aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fcoe45
-rw-r--r--Documentation/DMA-API-HOWTO.txt9
-rw-r--r--Documentation/DocBook/drm.tmpl78
-rw-r--r--Documentation/DocBook/media/dvb/dvbapi.xml2
-rw-r--r--Documentation/DocBook/media/dvb/dvbproperty.xml180
-rw-r--r--Documentation/DocBook/media/dvb/frontend.xml2
-rw-r--r--Documentation/DocBook/media/v4l/common.xml2
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml16
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml23
-rw-r--r--Documentation/DocBook/media/v4l/io.xml59
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv12m.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml34
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-uv8.xml62
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml2
-rw-r--r--Documentation/DocBook/media/v4l/subdev-formats.xml926
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml12
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dqevent.xml6
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-expbuf.xml28
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml8
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml57
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-querycap.xml2
-rw-r--r--Documentation/DocBook/media_api.tmpl1
-rw-r--r--Documentation/EDID/HOWTO.txt27
-rw-r--r--Documentation/IPMI.txt18
-rw-r--r--Documentation/block/cfq-iosched.txt58
-rw-r--r--Documentation/blockdev/nbd.txt38
-rw-r--r--Documentation/cgroups/blkio-controller.txt37
-rw-r--r--Documentation/coccinelle.txt4
-rw-r--r--Documentation/device-mapper/cache-policies.txt77
-rw-r--r--Documentation/device-mapper/cache.txt243
-rw-r--r--Documentation/devicetree/bindings/arc/interrupts.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/armadeus.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt8
-rw-r--r--Documentation/devicetree/bindings/clock/imx5-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/imx6q-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/arm-pl330.txt21
-rw-r--r--Documentation/devicetree/bindings/dma/dma.txt81
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt44
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/panel.txt59
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/slave.txt18
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt21
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt21
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt20
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pwm.txt48
-rw-r--r--Documentation/devicetree/bindings/leds/tca6507.txt33
-rw-r--r--Documentation/devicetree/bindings/media/gpio-ir-receiver.txt16
-rw-r--r--Documentation/devicetree/bindings/mfd/max8925.txt64
-rw-r--r--Documentation/devicetree/bindings/mips/cpu_irq.txt47
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt18
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt34
-rw-r--r--Documentation/devicetree/bindings/mmc/orion-sdio.txt17
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt12
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt20
-rw-r--r--Documentation/devicetree/bindings/mtd/elm.txt16
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt3
-rw-r--r--Documentation/devicetree/bindings/power_supply/max8925_batter.txt18
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt18
-rw-r--r--Documentation/devicetree/bindings/pwm/vt8500-pwm.txt9
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65090.txt122
-rw-r--r--Documentation/devicetree/bindings/serial/lantiq_asc.txt16
-rw-r--r--Documentation/devicetree/bindings/thermal/dove-thermal.txt18
-rw-r--r--Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt15
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.txt29
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt (renamed from Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt)11
-rw-r--r--Documentation/devicetree/bindings/video/backlight/max8925-backlight.txt10
-rw-r--r--Documentation/devicetree/bindings/video/display-timing.txt109
-rw-r--r--Documentation/devicetree/bindings/w1/fsl-imx-owire.txt19
-rw-r--r--Documentation/dma-buf-sharing.txt6
-rwxr-xr-xDocumentation/dvb/get_dvb_firmware17
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/porting4
-rw-r--r--Documentation/filesystems/vfs.txt24
-rw-r--r--Documentation/i2c/busses/i2c-i8012
-rw-r--r--Documentation/i2c/busses/i2c-ismt36
-rw-r--r--Documentation/i2c/busses/i2c-sis6309
-rw-r--r--Documentation/i2c/smbus-protocol4
-rw-r--r--Documentation/i2c/writing-clients4
-rw-r--r--Documentation/kbuild/kconfig-language.txt23
-rw-r--r--Documentation/kbuild/kconfig.txt6
-rw-r--r--Documentation/kernel-parameters.txt62
-rw-r--r--Documentation/leds/00-INDEX2
-rw-r--r--Documentation/leds/leds-lp5521.txt63
-rw-r--r--Documentation/leds/leds-lp5523.txt27
-rw-r--r--Documentation/leds/leds-lp55xx.txt118
-rw-r--r--Documentation/media-framework.txt2
-rw-r--r--Documentation/namespaces/resource-control.txt14
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas9
-rw-r--r--Documentation/thermal/exynos_thermal_emulation53
-rw-r--r--Documentation/thermal/intel_powerclamp.txt307
-rw-r--r--Documentation/thermal/nouveau_thermal81
-rw-r--r--Documentation/thermal/sysfs-api.txt18
-rw-r--r--Documentation/video4linux/CARDLIST.au08282
-rw-r--r--Documentation/video4linux/CARDLIST.cx238852
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx3
-rw-r--r--Documentation/video4linux/CARDLIST.saa71341
-rw-r--r--Documentation/video4linux/et61x251.txt315
-rwxr-xr-x[-rw-r--r--]Documentation/video4linux/extract_xc3028.pl0
-rw-r--r--Documentation/video4linux/fimc.txt2
-rw-r--r--Documentation/video4linux/ibmcam.txt323
-rw-r--r--Documentation/video4linux/m5602.txt12
-rw-r--r--Documentation/video4linux/ov511.txt288
-rw-r--r--Documentation/video4linux/se401.txt54
-rw-r--r--Documentation/video4linux/si470x.txt7
-rw-r--r--Documentation/video4linux/soc-camera.txt146
-rw-r--r--Documentation/video4linux/stv680.txt53
-rw-r--r--Documentation/video4linux/v4l2-controls.txt22
-rw-r--r--Documentation/video4linux/v4l2-framework.txt3
-rw-r--r--Documentation/video4linux/w9968cf.txt458
-rw-r--r--Documentation/video4linux/zc0301.txt270
-rw-r--r--Documentation/virtual/kvm/api.txt108
-rw-r--r--Documentation/virtual/kvm/mmu.txt7
-rw-r--r--MAINTAINERS320
-rw-r--r--Makefile10
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/kernel/srm_env.c2
-rw-r--r--arch/alpha/kernel/traps.c2
-rw-r--r--arch/arc/Kbuild2
-rw-r--r--arch/arc/Kconfig453
-rw-r--r--arch/arc/Kconfig.debug34
-rw-r--r--arch/arc/Makefile126
-rw-r--r--arch/arc/boot/Makefile26
-rw-r--r--arch/arc/boot/dts/Makefile13
-rw-r--r--arch/arc/boot/dts/angel4.dts55
-rw-r--r--arch/arc/boot/dts/skeleton.dts10
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi37
-rw-r--r--arch/arc/configs/fpga_defconfig61
-rw-r--r--arch/arc/include/asm/Kbuild49
-rw-r--r--arch/arc/include/asm/arcregs.h433
-rw-r--r--arch/arc/include/asm/asm-offsets.h9
-rw-r--r--arch/arc/include/asm/atomic.h232
-rw-r--r--arch/arc/include/asm/barrier.h42
-rw-r--r--arch/arc/include/asm/bitops.h516
-rw-r--r--arch/arc/include/asm/bug.h37
-rw-r--r--arch/arc/include/asm/cache.h75
-rw-r--r--arch/arc/include/asm/cacheflush.h67
-rw-r--r--arch/arc/include/asm/checksum.h101
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h143
-rw-r--r--arch/arc/include/asm/current.h32
-rw-r--r--arch/arc/include/asm/defines.h56
-rw-r--r--arch/arc/include/asm/delay.h68
-rw-r--r--arch/arc/include/asm/disasm.h116
-rw-r--r--arch/arc/include/asm/dma-mapping.h221
-rw-r--r--arch/arc/include/asm/dma.h14
-rw-r--r--arch/arc/include/asm/elf.h78
-rw-r--r--arch/arc/include/asm/entry.h724
-rw-r--r--arch/arc/include/asm/exec.h15
-rw-r--r--arch/arc/include/asm/futex.h151
-rw-r--r--arch/arc/include/asm/io.h105
-rw-r--r--arch/arc/include/asm/irq.h25
-rw-r--r--arch/arc/include/asm/irqflags.h153
-rw-r--r--arch/arc/include/asm/kdebug.h19
-rw-r--r--arch/arc/include/asm/kgdb.h61
-rw-r--r--arch/arc/include/asm/kprobes.h62
-rw-r--r--arch/arc/include/asm/linkage.h63
-rw-r--r--arch/arc/include/asm/mach_desc.h87
-rw-r--r--arch/arc/include/asm/mmu.h23
-rw-r--r--arch/arc/include/asm/mmu_context.h213
-rw-r--r--arch/arc/include/asm/module.h28
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/page.h109
-rw-r--r--arch/arc/include/asm/perf_event.h13
-rw-r--r--arch/arc/include/asm/pgalloc.h134
-rw-r--r--arch/arc/include/asm/pgtable.h405
-rw-r--r--arch/arc/include/asm/processor.h151
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/ptrace.h130
-rw-r--r--arch/arc/include/asm/sections.h18
-rw-r--r--arch/arc/include/asm/segment.h24
-rw-r--r--arch/arc/include/asm/serial.h25
-rw-r--r--arch/arc/include/asm/setup.h37
-rw-r--r--arch/arc/include/asm/smp.h130
-rw-r--r--arch/arc/include/asm/spinlock.h144
-rw-r--r--arch/arc/include/asm/spinlock_types.h35
-rw-r--r--arch/arc/include/asm/string.h40
-rw-r--r--arch/arc/include/asm/switch_to.h41
-rw-r--r--arch/arc/include/asm/syscall.h72
-rw-r--r--arch/arc/include/asm/syscalls.h29
-rw-r--r--arch/arc/include/asm/thread_info.h121
-rw-r--r--arch/arc/include/asm/timex.h18
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h104
-rw-r--r--arch/arc/include/asm/tlb.h58
-rw-r--r--arch/arc/include/asm/tlbflush.h28
-rw-r--r--arch/arc/include/asm/uaccess.h751
-rw-r--r--arch/arc/include/asm/unaligned.h29
-rw-r--r--arch/arc/include/asm/unwind.h163
-rw-r--r--arch/arc/include/uapi/asm/Kbuild12
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h18
-rw-r--r--arch/arc/include/uapi/asm/cachectl.h28
-rw-r--r--arch/arc/include/uapi/asm/elf.h26
-rw-r--r--arch/arc/include/uapi/asm/page.h39
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h48
-rw-r--r--arch/arc/include/uapi/asm/setup.h6
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h22
-rw-r--r--arch/arc/include/uapi/asm/signal.h27
-rw-r--r--arch/arc/include/uapi/asm/swab.h98
-rw-r--r--arch/arc/include/uapi/asm/unistd.h34
-rw-r--r--arch/arc/kernel/Makefile33
-rw-r--r--arch/arc/kernel/arc_hostlink.c58
-rw-r--r--arch/arc/kernel/arcksyms.c56
-rw-r--r--arch/arc/kernel/asm-offsets.c64
-rw-r--r--arch/arc/kernel/clk.c21
-rw-r--r--arch/arc/kernel/ctx_sw.c109
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S58
-rw-r--r--arch/arc/kernel/devtree.c123
-rw-r--r--arch/arc/kernel/disasm.c538
-rw-r--r--arch/arc/kernel/entry.S839
-rw-r--r--arch/arc/kernel/fpu.c55
-rw-r--r--arch/arc/kernel/head.S111
-rw-r--r--arch/arc/kernel/irq.c273
-rw-r--r--arch/arc/kernel/kgdb.c205
-rw-r--r--arch/arc/kernel/kprobes.c525
-rw-r--r--arch/arc/kernel/module.c145
-rw-r--r--arch/arc/kernel/process.c235
-rw-r--r--arch/arc/kernel/ptrace.c158
-rw-r--r--arch/arc/kernel/reset.c33
-rw-r--r--arch/arc/kernel/setup.c473
-rw-r--r--arch/arc/kernel/signal.c360
-rw-r--r--arch/arc/kernel/smp.c332
-rw-r--r--arch/arc/kernel/stacktrace.c254
-rw-r--r--arch/arc/kernel/sys.c18
-rw-r--r--arch/arc/kernel/time.c265
-rw-r--r--arch/arc/kernel/traps.c170
-rw-r--r--arch/arc/kernel/troubleshoot.c322
-rw-r--r--arch/arc/kernel/unaligned.c245
-rw-r--r--arch/arc/kernel/unwind.c1329
-rw-r--r--arch/arc/kernel/vmlinux.lds.S163
-rw-r--r--arch/arc/lib/Makefile9
-rw-r--r--arch/arc/lib/memcmp.S124
-rw-r--r--arch/arc/lib/memcpy-700.S66
-rw-r--r--arch/arc/lib/memset.S59
-rw-r--r--arch/arc/lib/strchr-700.S123
-rw-r--r--arch/arc/lib/strcmp.S96
-rw-r--r--arch/arc/lib/strcpy-700.S70
-rw-r--r--arch/arc/lib/strlen.S83
-rw-r--r--arch/arc/mm/Makefile10
-rw-r--r--arch/arc/mm/cache_arc700.c768
-rw-r--r--arch/arc/mm/dma.c94
-rw-r--r--arch/arc/mm/extable.c63
-rw-r--r--arch/arc/mm/fault.c228
-rw-r--r--arch/arc/mm/init.c187
-rw-r--r--arch/arc/mm/ioremap.c91
-rw-r--r--arch/arc/mm/tlb.c645
-rw-r--r--arch/arc/mm/tlbex.S408
-rw-r--r--arch/arc/oprofile/Makefile9
-rw-r--r--arch/arc/oprofile/common.c26
-rw-r--r--arch/arc/plat-arcfpga/Kconfig84
-rw-r--r--arch/arc/plat-arcfpga/Makefile12
-rw-r--r--arch/arc/plat-arcfpga/include/plat/irq.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/memmap.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/smp.h118
-rw-r--r--arch/arc/plat-arcfpga/irq.c25
-rw-r--r--arch/arc/plat-arcfpga/platform.c226
-rw-r--r--arch/arc/plat-arcfpga/smp.c171
-rw-r--r--arch/arm/Kconfig23
-rw-r--r--arch/arm/boot/dts/Makefile13
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi14
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts35
-rw-r--r--arch/arm/boot/dts/armada-370-mirabox.dts18
-rw-r--r--arch/arm/boot/dts/armada-370-rd.dts68
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi49
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi21
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts31
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts113
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi6
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi6
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78460.dtsi6
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts18
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi19
-rw-r--r--arch/arm/boot/dts/dove-cubox.dts28
-rw-r--r--arch/arm/boot/dts/dove.dtsi24
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts1
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi12
-rw-r--r--arch/arm/boot/dts/imx25-karo-tx25.dts30
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts36
-rw-r--r--arch/arm/boot/dts/imx25.dtsi2
-rw-r--r--arch/arm/boot/dts/imx27-apf27.dts82
-rw-r--r--arch/arm/boot/dts/imx27-pdk.dts (renamed from arch/arm/boot/dts/imx27-3ds.dts)24
-rw-r--r--arch/arm/boot/dts/imx31-bug.dts12
-rw-r--r--arch/arm/boot/dts/imx51-apf51.dts52
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts478
-rw-r--r--arch/arm/boot/dts/imx51.dtsi53
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts126
-rw-r--r--arch/arm/boot/dts/imx53-evk.dts194
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts130
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts380
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts294
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi172
-rw-r--r--arch/arm/boot/dts/imx53.dtsi68
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi59
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts124
-rw-r--r--arch/arm/boot/dts/imx6q-sabreauto.dts64
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts216
-rw-r--r--arch/arm/boot/dts/imx6q-sabresd.dts102
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi796
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi800
-rw-r--r--arch/arm/boot/dts/kirkwood-6282.dtsi17
-rw-r--r--arch/arm/boot/dts/kirkwood-dreamplug.dts7
-rw-r--r--arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts94
-rw-r--r--arch/arm/boot/dts/kirkwood-mplcec4.dts11
-rw-r--r--arch/arm/boot/dts/kirkwood-ns2-common.dtsi6
-rw-r--r--arch/arm/boot/dts/kirkwood-nsa310.dts126
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a6.dts116
-rw-r--r--arch/arm/boot/dts/kirkwood-topkick.dts102
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi8
-rw-r--r--arch/arm/boot/dts/mmp2-brownstone.dts158
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi4
-rw-r--r--arch/arm/configs/dove_defconfig28
-rw-r--r--arch/arm/configs/mvebu_defconfig26
-rw-r--r--arch/arm/include/asm/device.h6
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h2
-rw-r--r--arch/arm/include/asm/xen/events.h22
-rw-r--r--arch/arm/include/asm/xen/page.h4
-rw-r--r--arch/arm/kernel/kprobes.c6
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/kvm/arm.c4
-rw-r--r--arch/arm/kvm/mmu.c5
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c13
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c8
-rw-r--r--arch/arm/mach-davinci/dm644x.c10
-rw-r--r--arch/arm/mach-dove/Kconfig7
-rw-r--r--arch/arm/mach-dove/Makefile4
-rw-r--r--arch/arm/mach-dove/board-dt.c92
-rw-r--r--arch/arm/mach-dove/common.c85
-rw-r--r--arch/arm/mach-exynos/Kconfig5
-rw-r--r--arch/arm/mach-exynos/Makefile1
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c41
-rw-r--r--arch/arm/mach-exynos/clock-exynos4210.c9
-rw-r--r--arch/arm/mach-exynos/clock-exynos4212.c23
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c62
-rw-r--r--arch/arm/mach-exynos/dev-sysmmu.c274
-rw-r--r--arch/arm/mach-exynos/include/mach/sysmmu.h66
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c34
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c30
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c8
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c8
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c4
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c26
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c65
-rw-r--r--arch/arm/mach-kirkwood/Kconfig7
-rw-r--r--arch/arm/mach-kirkwood/Makefile1
-rw-r--r--arch/arm/mach-kirkwood/board-dreamplug.c6
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c16
-rw-r--r--arch/arm/mach-kirkwood/board-guruplug.c39
-rw-r--r--arch/arm/mach-kirkwood/board-mplcec4.c7
-rw-r--r--arch/arm/mach-kirkwood/board-ns2.c14
-rw-r--r--arch/arm/mach-kirkwood/board-nsa310.c70
-rw-r--r--arch/arm/mach-kirkwood/board-openblocks_a6.c44
-rw-r--r--arch/arm/mach-kirkwood/board-usi_topkick.c48
-rw-r--r--arch/arm/mach-kirkwood/common.h11
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c87
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/am33xx-restart.c34
-rw-r--r--arch/arm/mach-omap2/am35xx-emac.c3
-rw-r--r--arch/arm/mach-omap2/board-generic.c1
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c8
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c14
-rw-r--r--arch/arm/mach-omap2/cclock33xx_data.c10
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c10
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c13
-rw-r--r--arch/arm/mach-omap2/clock.h11
-rw-r--r--arch/arm/mach-omap2/cm33xx.c3
-rw-r--r--arch/arm/mach-omap2/cm33xx.h9
-rw-r--r--arch/arm/mach-omap2/common.h8
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c5
-rw-r--r--arch/arm/mach-omap2/id.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c23
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c443
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c51
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_reset.c53
-rw-r--r--arch/arm/mach-omap2/pm.c20
-rw-r--r--arch/arm/mach-omap2/pm24xx.c12
-rw-r--r--arch/arm/mach-omap2/pm44xx.c21
-rw-r--r--arch/arm/mach-omap2/prm33xx.c11
-rw-r--r--arch/arm/mach-omap2/prm33xx.h4
-rw-r--r--arch/arm/mach-omap2/sleep24xx.S19
-rw-r--r--arch/arm/mach-omap2/soc.h1
-rw-r--r--arch/arm/mach-omap2/sr_device.c2
-rw-r--r--arch/arm/mach-pxa/tosa.c6
-rw-r--r--arch/arm/mach-s3c64xx/dma.c3
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c6
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c33
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c39
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c31
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c1
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500.c14
-rw-r--r--arch/arm/mm/dma-mapping.c108
-rw-r--r--arch/arm/plat-omap/Kconfig9
-rw-r--r--arch/arm/plat-omap/include/plat/timex.h8
-rw-r--r--arch/arm/xen/enlighten.c8
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/include/asm/unistd32.h6
-rw-r--r--arch/arm64/kernel/sys32.S5
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/avr32/include/asm/elf.h3
-rw-r--r--arch/avr32/kernel/traps.c2
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/include/asm/elf.h3
-rw-r--r--arch/blackfin/kernel/cplbinfo.c2
-rw-r--r--arch/blackfin/kernel/process.c4
-rw-r--r--arch/c6x/include/asm/elf.h3
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c8
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c3
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c8
-rw-r--r--arch/cris/include/asm/elf.h3
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/elf.h3
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.h1
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c4
-rw-r--r--arch/frv/mm/elf-fdpic.c49
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/elf.h3
-rw-r--r--arch/hexagon/include/asm/elf.h5
-rw-r--r--arch/hexagon/kernel/traps.c2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/elf.h3
-rw-r--r--arch/ia64/include/asm/kvm_host.h4
-rw-r--r--arch/ia64/kernel/kprobes.c8
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/sys_ia64.c37
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c8
-rw-r--r--arch/ia64/kvm/lapic.h6
-rw-r--r--arch/ia64/mm/hugetlbpage.c20
-rw-r--r--arch/ia64/pci/pci.c8
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/include/asm/elf.h3
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/include/asm/elf.h3
-rw-r--r--arch/m68k/kernel/traps.c2
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/Makefile9
-rw-r--r--arch/microblaze/boot/.gitignore3
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/kernel/.gitignore1
-rw-r--r--arch/microblaze/kernel/cpu/cache.c148
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c21
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c13
-rw-r--r--arch/microblaze/kernel/cpu/pvr.c2
-rw-r--r--arch/microblaze/kernel/dma.c6
-rw-r--r--arch/microblaze/kernel/early_printk.c17
-rw-r--r--arch/microblaze/kernel/exceptions.c27
-rw-r--r--arch/microblaze/kernel/ftrace.c44
-rw-r--r--arch/microblaze/kernel/heartbeat.c2
-rw-r--r--arch/microblaze/kernel/intc.c4
-rw-r--r--arch/microblaze/kernel/kgdb.c2
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c2
-rw-r--r--arch/microblaze/kernel/module.c5
-rw-r--r--arch/microblaze/kernel/process.c29
-rw-r--r--arch/microblaze/kernel/prom.c2
-rw-r--r--arch/microblaze/kernel/prom_parse.c2
-rw-r--r--arch/microblaze/kernel/ptrace.c2
-rw-r--r--arch/microblaze/kernel/setup.c30
-rw-r--r--arch/microblaze/kernel/signal.c6
-rw-r--r--arch/microblaze/kernel/stacktrace.c2
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c3
-rw-r--r--arch/microblaze/kernel/traps.c8
-rw-r--r--arch/microblaze/kernel/unwind.c2
-rw-r--r--arch/microblaze/lib/ashldi3.c3
-rw-r--r--arch/microblaze/lib/ashrdi3.c3
-rw-r--r--arch/microblaze/lib/cmpdi2.c2
-rw-r--r--arch/microblaze/lib/lshrdi3.c3
-rw-r--r--arch/microblaze/lib/memcpy.c12
-rw-r--r--arch/microblaze/lib/memmove.c11
-rw-r--r--arch/microblaze/lib/memset.c2
-rw-r--r--arch/microblaze/lib/muldi3.c2
-rw-r--r--arch/microblaze/lib/uaccess_old.S9
-rw-r--r--arch/microblaze/lib/ucmpdi2.c2
-rw-r--r--arch/microblaze/mm/consistent.c7
-rw-r--r--arch/microblaze/mm/fault.c10
-rw-r--r--arch/microblaze/mm/highmem.c2
-rw-r--r--arch/microblaze/mm/init.c32
-rw-r--r--arch/microblaze/mm/pgtable.c17
-rw-r--r--arch/microblaze/pci/indirect_pci.c2
-rw-r--r--arch/microblaze/pci/iomap.c2
-rw-r--r--arch/microblaze/pci/pci-common.c96
-rw-r--r--arch/microblaze/pci/xilinx_pci.c16
-rw-r--r--arch/mips/Kbuild.platforms4
-rw-r--r--arch/mips/Kconfig50
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/alchemy/Platform6
-rw-r--r--arch/mips/alchemy/board-gpr.c12
-rw-r--r--arch/mips/alchemy/board-mtx1.c8
-rw-r--r--arch/mips/alchemy/common/dbdma.c14
-rw-r--r--arch/mips/alchemy/common/gpiolib.c14
-rw-r--r--arch/mips/alchemy/common/irq.c178
-rw-r--r--arch/mips/alchemy/common/platform.c4
-rw-r--r--arch/mips/alchemy/common/setup.c2
-rw-r--r--arch/mips/alchemy/common/sleeper.S20
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/alchemy/common/usb.c2
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c2
-rw-r--r--arch/mips/alchemy/devboards/db1000.c16
-rw-r--r--arch/mips/alchemy/devboards/db1200.c28
-rw-r--r--arch/mips/alchemy/devboards/db1300.c10
-rw-r--r--arch/mips/alchemy/devboards/db1550.c18
-rw-r--r--arch/mips/alchemy/devboards/pm.c2
-rw-r--r--arch/mips/ar7/Platform6
-rw-r--r--arch/mips/ar7/platform.c12
-rw-r--r--arch/mips/ath79/Kconfig20
-rw-r--r--arch/mips/ath79/Makefile1
-rw-r--r--arch/mips/ath79/clock.c80
-rw-r--r--arch/mips/ath79/common.c4
-rw-r--r--arch/mips/ath79/dev-common.c9
-rw-r--r--arch/mips/ath79/dev-usb.c126
-rw-r--r--arch/mips/ath79/dev-wmac.c30
-rw-r--r--arch/mips/ath79/early_printk.c2
-rw-r--r--arch/mips/ath79/gpio.c52
-rw-r--r--arch/mips/ath79/irq.c187
-rw-r--r--arch/mips/ath79/mach-ap121.c2
-rw-r--r--arch/mips/ath79/mach-ap136.c156
-rw-r--r--arch/mips/ath79/mach-ap81.c2
-rw-r--r--arch/mips/ath79/mach-db120.c2
-rw-r--r--arch/mips/ath79/mach-pb44.c6
-rw-r--r--arch/mips/ath79/machtypes.h1
-rw-r--r--arch/mips/ath79/pci.c165
-rw-r--r--arch/mips/ath79/pci.h1
-rw-r--r--arch/mips/ath79/setup.c18
-rw-r--r--arch/mips/bcm47xx/Makefile2
-rw-r--r--arch/mips/bcm47xx/nvram.c163
-rw-r--r--arch/mips/bcm47xx/setup.c6
-rw-r--r--arch/mips/bcm47xx/sprom.c26
-rw-r--r--arch/mips/bcm47xx/wgt634u.c40
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c26
-rw-r--r--arch/mips/bcm63xx/early_printk.c4
-rw-r--r--arch/mips/boot/Makefile2
-rw-r--r--arch/mips/boot/compressed/Makefile8
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c4
-rw-r--r--arch/mips/boot/compressed/decompress.c4
-rw-r--r--arch/mips/boot/compressed/head.S4
-rw-r--r--arch/mips/boot/ecoff.h62
-rw-r--r--arch/mips/boot/elf2ecoff.c8
-rw-r--r--arch/mips/cavium-octeon/Kconfig9
-rw-r--r--arch/mips/cavium-octeon/Makefile3
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c10
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c28
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c22
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-spi.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-util.c34
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c10
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c6
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c66
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c28
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-spi.c70
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c16
-rw-r--r--arch/mips/cavium-octeon/oct_ilm.c206
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S22
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/cavium-octeon/octeon_3xxx.dts34
-rw-r--r--arch/mips/cavium-octeon/octeon_68xx.dts34
-rw-r--r--arch/mips/cavium-octeon/octeon_boot.h14
-rw-r--r--arch/mips/cavium-octeon/setup.c8
-rw-r--r--arch/mips/cavium-octeon/smp.c10
-rw-r--r--arch/mips/cobalt/led.c2
-rw-r--r--arch/mips/cobalt/mtd.c2
-rw-r--r--arch/mips/cobalt/rtc.c2
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/configs/pnx8550_jbs_defconfig98
-rw-r--r--arch/mips/configs/pnx8550_stb810_defconfig92
-rw-r--r--arch/mips/configs/rt305x_defconfig167
-rw-r--r--arch/mips/dec/int-handler.S98
-rw-r--r--arch/mips/dec/kn02xa-berr.c4
-rw-r--r--arch/mips/dec/prom/call_o32.S2
-rw-r--r--arch/mips/dec/prom/dectypes.h2
-rw-r--r--arch/mips/dec/prom/init.c2
-rw-r--r--arch/mips/dec/prom/memory.c2
-rw-r--r--arch/mips/dec/setup.c4
-rw-r--r--arch/mips/dec/wbflush.c6
-rw-r--r--arch/mips/emma/markeins/irq.c2
-rw-r--r--arch/mips/emma/markeins/platform.c2
-rw-r--r--arch/mips/emma/markeins/setup.c2
-rw-r--r--arch/mips/fw/arc/file.c4
-rw-r--r--arch/mips/fw/arc/identify.c2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/fw/arc/promlib.c2
-rw-r--r--arch/mips/fw/lib/call_o32.S2
-rw-r--r--arch/mips/fw/sni/sniprom.c16
-rw-r--r--arch/mips/include/asm/abi.h8
-rw-r--r--arch/mips/include/asm/addrspace.h6
-rw-r--r--arch/mips/include/asm/asm.h98
-rw-r--r--arch/mips/include/asm/atomic.h4
-rw-r--r--arch/mips/include/asm/barrier.h10
-rw-r--r--arch/mips/include/asm/bcache.h2
-rw-r--r--arch/mips/include/asm/bitops.h22
-rw-r--r--arch/mips/include/asm/bootinfo.h20
-rw-r--r--arch/mips/include/asm/break.h26
-rw-r--r--arch/mips/include/asm/cacheops.h14
-rw-r--r--arch/mips/include/asm/checksum.h2
-rw-r--r--arch/mips/include/asm/cmpxchg.h6
-rw-r--r--arch/mips/include/asm/compat-signal.h4
-rw-r--r--arch/mips/include/asm/compat.h6
-rw-r--r--arch/mips/include/asm/cpu-features.h38
-rw-r--r--arch/mips/include/asm/cpu-info.h18
-rw-r--r--arch/mips/include/asm/cpu.h37
-rw-r--r--arch/mips/include/asm/dec/ioasic_addrs.h22
-rw-r--r--arch/mips/include/asm/dec/kn01.h12
-rw-r--r--arch/mips/include/asm/dec/kn02ca.h2
-rw-r--r--arch/mips/include/asm/dec/prom.h2
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/asm/dma.h112
-rw-r--r--arch/mips/include/asm/elf.h30
-rw-r--r--arch/mips/include/asm/emma/emma2rh.h112
-rw-r--r--arch/mips/include/asm/emma/markeins.h2
-rw-r--r--arch/mips/include/asm/fixmap.h4
-rw-r--r--arch/mips/include/asm/floppy.h4
-rw-r--r--arch/mips/include/asm/fpregdef.h10
-rw-r--r--arch/mips/include/asm/fpu.h6
-rw-r--r--arch/mips/include/asm/futex.h12
-rw-r--r--arch/mips/include/asm/fw/arc/hinv.h10
-rw-r--r--arch/mips/include/asm/fw/arc/types.h12
-rw-r--r--arch/mips/include/asm/fw/cfe/cfe_api.h6
-rw-r--r--arch/mips/include/asm/fw/cfe/cfe_error.h18
-rw-r--r--arch/mips/include/asm/gcmpregs.h100
-rw-r--r--arch/mips/include/asm/gic.h17
-rw-r--r--arch/mips/include/asm/gio_device.h14
-rw-r--r--arch/mips/include/asm/gt64120.h22
-rw-r--r--arch/mips/include/asm/hazards.h8
-rw-r--r--arch/mips/include/asm/highmem.h4
-rw-r--r--arch/mips/include/asm/inst.h348
-rw-r--r--arch/mips/include/asm/io.h30
-rw-r--r--arch/mips/include/asm/ip32/crime.h8
-rw-r--r--arch/mips/include/asm/ip32/ip32_ints.h2
-rw-r--r--arch/mips/include/asm/ip32/mace.h12
-rw-r--r--arch/mips/include/asm/irq.h6
-rw-r--r--arch/mips/include/asm/irq_cpu.h6
-rw-r--r--arch/mips/include/asm/isadep.h4
-rw-r--r--arch/mips/include/asm/jazz.h166
-rw-r--r--arch/mips/include/asm/jazzdma.h50
-rw-r--r--arch/mips/include/asm/kmap_types.h2
-rw-r--r--arch/mips/include/asm/kprobes.h2
-rw-r--r--arch/mips/include/asm/lasat/eeprom.h12
-rw-r--r--arch/mips/include/asm/lasat/lasat.h14
-rw-r--r--arch/mips/include/asm/lasat/serial.h2
-rw-r--r--arch/mips/include/asm/local.h6
-rw-r--r--arch/mips/include/asm/m48t37.h2
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h18
-rw-r--r--arch/mips/include/asm/mach-ar7/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h132
-rw-r--r--arch/mips/include/asm/mach-ath79/ar933x_uart.h12
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h17
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ath79/irq.h27
-rw-r--r--arch/mips/include/asm/mach-ath79/pci.h28
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h198
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000_dma.h10
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1100_mmc.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h16
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_ide.h22
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_psc.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1000.h20
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1300.h2
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h (renamed from arch/mips/include/asm/mach-bcm47xx/nvram.h)13
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h24
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/irq.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h4
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h36
-rw-r--r--arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mach-cobalt/mach-gt64120.h2
-rw-r--r--arch/mips/include/asm/mach-db1x00/bcsr.h4
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1200.h2
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1300.h2
-rw-r--r--arch/mips/include/asm/mach-emma2rh/irq.h2
-rw-r--r--arch/mips/include/asm/mach-generic/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-generic/floppy.h4
-rw-r--r--arch/mips/include/asm/mach-generic/ide.h4
-rw-r--r--arch/mips/include/asm/mach-generic/irq.h4
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h4
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h2
-rw-r--r--arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ip28/spaces.h2
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-ip32/war.h2
-rw-r--r--arch/mips/include/asm/mach-jazz/floppy.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/clock.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/dma.h10
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/irq.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/timer.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/war.h24
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/xway_dma.h4
-rw-r--r--arch/mips/include/asm/mach-lasat/mach-gt64120.h6
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536.h422
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h122
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/gpio.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h50
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/mem.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/irq.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/loongson1.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/platform.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/prom.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-clk.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-wdt.h4
-rw-r--r--arch/mips/include/asm/mach-malta/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-malta/irq.h2
-rw-r--r--arch/mips/include/asm/mach-malta/mach-gt64120.h2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h)0
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h)0
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h)110
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h)4
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h)4
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h)46
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h)8
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h)2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h)416
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h)96
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h)2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/war.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/war.h)4
-rw-r--r--arch/mips/include/asm/mach-pnx833x/irq-mapping.h18
-rw-r--r--arch/mips/include/asm/mach-pnx833x/pnx833x.h12
-rw-r--r--arch/mips/include/asm/mach-pnx8550/cm.h43
-rw-r--r--arch/mips/include/asm/mach-pnx8550/glb.h86
-rw-r--r--arch/mips/include/asm/mach-pnx8550/int.h140
-rw-r--r--arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h262
-rw-r--r--arch/mips/include/asm/mach-pnx8550/nand.h121
-rw-r--r--arch/mips/include/asm/mach-pnx8550/pci.h185
-rw-r--r--arch/mips/include/asm/mach-pnx8550/uart.h30
-rw-r--r--arch/mips/include/asm/mach-pnx8550/usb.h32
-rw-r--r--arch/mips/include/asm/mach-powertv/asic.h8
-rw-r--r--arch/mips/include/asm/mach-powertv/asic_regs.h4
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-powertv/interrupts.h62
-rw-r--r--arch/mips/include/asm/mach-ralink/ralink_regs.h39
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h139
-rw-r--r--arch/mips/include/asm/mach-ralink/war.h (renamed from arch/mips/include/asm/mach-pnx8550/war.h)7
-rw-r--r--arch/mips/include/asm/mach-rc32434/ddr.h2
-rw-r--r--arch/mips/include/asm/mach-rc32434/dma.h12
-rw-r--r--arch/mips/include/asm/mach-rc32434/dma_v.h4
-rw-r--r--arch/mips/include/asm/mach-rc32434/eth.h6
-rw-r--r--arch/mips/include/asm/mach-rc32434/gpio.h6
-rw-r--r--arch/mips/include/asm/mach-rc32434/irq.h12
-rw-r--r--arch/mips/include/asm/mach-rc32434/pci.h60
-rw-r--r--arch/mips/include/asm/mach-rc32434/rb.h6
-rw-r--r--arch/mips/include/asm/mach-rc32434/rc32434.h2
-rw-r--r--arch/mips/include/asm/mach-rc32434/timer.h18
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-sead3/irq.h2
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h4
-rw-r--r--arch/mips/include/asm/mach-wrppmc/mach-gt64120.h16
-rw-r--r--arch/mips/include/asm/mc146818-time.h4
-rw-r--r--arch/mips/include/asm/mips-boards/bonito64.h106
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h70
-rw-r--r--arch/mips/include/asm/mips-boards/launch.h10
-rw-r--r--arch/mips/include/asm/mips-boards/malta.h10
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h8
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h8
-rw-r--r--arch/mips/include/asm/mips-boards/prom.h6
-rw-r--r--arch/mips/include/asm/mips-boards/sead3int.h4
-rw-r--r--arch/mips/include/asm/mips-boards/sim.h14
-rw-r--r--arch/mips/include/asm/mipsmtregs.h8
-rw-r--r--arch/mips/include/asm/mipsregs.h735
-rw-r--r--arch/mips/include/asm/mmu_context.h6
-rw-r--r--arch/mips/include/asm/msc01_ic.h174
-rw-r--r--arch/mips/include/asm/netlogic/common.h16
-rw-r--r--arch/mips/include/asm/netlogic/haldefs.h2
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h83
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/bridge.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h2
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/iomap.h48
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pcibus.h46
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h118
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/sys.h160
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/uart.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h242
-rw-r--r--arch/mips/include/asm/netlogic/xlr/iomap.h88
-rw-r--r--arch/mips/include/asm/netlogic/xlr/msidef.h18
-rw-r--r--arch/mips/include/asm/netlogic/xlr/pic.h58
-rw-r--r--arch/mips/include/asm/nile4.h38
-rw-r--r--arch/mips/include/asm/octeon/cvmx-address.h50
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h72
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h44
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h30
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fau.h162
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa.h32
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-rgmii.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-sgmii.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-util.h18
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-xaui.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper.h12
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h210
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mdio.h40
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip-defs.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip.h62
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko.h60
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h122
-rw-r--r--arch/mips/include/asm/octeon/cvmx-scratch.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spi.h66
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spinlock.h78
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sysinfo.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-wqe.h104
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h48
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h10
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h240
-rw-r--r--arch/mips/include/asm/octeon/octeon.h12
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h2
-rw-r--r--arch/mips/include/asm/paccess.h2
-rw-r--r--arch/mips/include/asm/page.h14
-rw-r--r--arch/mips/include/asm/pci.h11
-rw-r--r--arch/mips/include/asm/pci/bridge.h30
-rw-r--r--arch/mips/include/asm/pgtable-32.h20
-rw-r--r--arch/mips/include/asm/pgtable-64.h4
-rw-r--r--arch/mips/include/asm/pgtable-bits.h42
-rw-r--r--arch/mips/include/asm/pgtable.h4
-rw-r--r--arch/mips/include/asm/processor.h46
-rw-r--r--arch/mips/include/asm/r4kcache.h12
-rw-r--r--arch/mips/include/asm/regdef.h66
-rw-r--r--arch/mips/include/asm/rtlx.h2
-rw-r--r--arch/mips/include/asm/seccomp.h2
-rw-r--r--arch/mips/include/asm/sgi/gio.h16
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h82
-rw-r--r--arch/mips/include/asm/sgi/ioc.h4
-rw-r--r--arch/mips/include/asm/sgi/ip22.h6
-rw-r--r--arch/mips/include/asm/sgi/mc.h26
-rw-r--r--arch/mips/include/asm/sgi/pi1.h20
-rw-r--r--arch/mips/include/asm/sgialib.h2
-rw-r--r--arch/mips/include/asm/sgiarcs.h136
-rw-r--r--arch/mips/include/asm/shmparam.h2
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_int.h424
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_l2c.h138
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_mc.h1192
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_regs.h806
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_scd.h276
-rw-r--r--arch/mips/include/asm/sibyte/bigsur.h20
-rw-r--r--arch/mips/include/asm/sibyte/carmel.h52
-rw-r--r--arch/mips/include/asm/sibyte/sb1250.h4
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_defs.h94
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_dma.h426
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_genbus.h26
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_int.h274
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_l2c.h92
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_ldt.h14
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mac.h568
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mc.h712
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_regs.h854
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_scd.h668
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_smbus.h158
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_syncser.h136
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_uart.h262
-rw-r--r--arch/mips/include/asm/sibyte/sentosa.h6
-rw-r--r--arch/mips/include/asm/sibyte/swarm.h26
-rw-r--r--arch/mips/include/asm/smp.h12
-rw-r--r--arch/mips/include/asm/smtc.h4
-rw-r--r--arch/mips/include/asm/sn/addrs.h54
-rw-r--r--arch/mips/include/asm/sn/agent.h12
-rw-r--r--arch/mips/include/asm/sn/arch.h8
-rw-r--r--arch/mips/include/asm/sn/fru.h12
-rw-r--r--arch/mips/include/asm/sn/gda.h24
-rw-r--r--arch/mips/include/asm/sn/intr.h16
-rw-r--r--arch/mips/include/asm/sn/io.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h8
-rw-r--r--arch/mips/include/asm/sn/klconfig.h526
-rw-r--r--arch/mips/include/asm/sn/kldir.h182
-rw-r--r--arch/mips/include/asm/sn/launch.h16
-rw-r--r--arch/mips/include/asm/sn/mapped_kernel.h4
-rw-r--r--arch/mips/include/asm/sn/nmi.h8
-rw-r--r--arch/mips/include/asm/sn/sn0/addrs.h20
-rw-r--r--arch/mips/include/asm/sn/sn0/arch.h22
-rw-r--r--arch/mips/include/asm/sn/sn0/hub.h12
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h442
-rw-r--r--arch/mips/include/asm/sn/sn0/hubmd.h214
-rw-r--r--arch/mips/include/asm/sn/sn0/hubni.h78
-rw-r--r--arch/mips/include/asm/sn/sn0/hubpi.h184
-rw-r--r--arch/mips/include/asm/sn/sn0/ip27.h28
-rw-r--r--arch/mips/include/asm/sn/types.h4
-rw-r--r--arch/mips/include/asm/sni.h104
-rw-r--r--arch/mips/include/asm/sparsemem.h2
-rw-r--r--arch/mips/include/asm/spinlock.h4
-rw-r--r--arch/mips/include/asm/spinlock_types.h2
-rw-r--r--arch/mips/include/asm/stackframe.h14
-rw-r--r--arch/mips/include/asm/string.h8
-rw-r--r--arch/mips/include/asm/switch_to.h4
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/time.h6
-rw-r--r--arch/mips/include/asm/tlb.h2
-rw-r--r--arch/mips/include/asm/topology.h2
-rw-r--r--arch/mips/include/asm/traps.h2
-rw-r--r--arch/mips/include/asm/txx9/jmr3927.h14
-rw-r--r--arch/mips/include/asm/txx9/rbtx4927.h6
-rw-r--r--arch/mips/include/asm/txx9/rbtx4938.h20
-rw-r--r--arch/mips/include/asm/txx9/rbtx4939.h14
-rw-r--r--arch/mips/include/asm/txx9/smsc_fdc37m81x.h54
-rw-r--r--arch/mips/include/asm/txx9/tx3927.h16
-rw-r--r--arch/mips/include/asm/txx9/tx4927.h18
-rw-r--r--arch/mips/include/asm/txx9/tx4927pcic.h6
-rw-r--r--arch/mips/include/asm/txx9/tx4938.h32
-rw-r--r--arch/mips/include/asm/txx9/tx4939.h50
-rw-r--r--arch/mips/include/asm/txx9tmr.h4
-rw-r--r--arch/mips/include/asm/uaccess.h102
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/include/asm/user.h2
-rw-r--r--arch/mips/include/asm/vr41xx/pci.h2
-rw-r--r--arch/mips/include/asm/vr41xx/tb0287.h2
-rw-r--r--arch/mips/include/asm/war.h46
-rw-r--r--arch/mips/include/asm/xtalk/xtalk.h24
-rw-r--r--arch/mips/include/asm/xtalk/xwidget.h8
-rw-r--r--arch/mips/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/include/uapi/asm/break.h16
-rw-r--r--arch/mips/include/uapi/asm/cachectl.h10
-rw-r--r--arch/mips/include/uapi/asm/errno.h188
-rw-r--r--arch/mips/include/uapi/asm/fcntl.h4
-rw-r--r--arch/mips/include/uapi/asm/inst.h331
-rw-r--r--arch/mips/include/uapi/asm/ioctls.h22
-rw-r--r--arch/mips/include/uapi/asm/mman.h10
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h8
-rw-r--r--arch/mips/include/uapi/asm/sembuf.h4
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h8
-rw-r--r--arch/mips/include/uapi/asm/signal.h24
-rw-r--r--arch/mips/include/uapi/asm/socket.h14
-rw-r--r--arch/mips/include/uapi/asm/sockios.h2
-rw-r--r--arch/mips/include/uapi/asm/stat.h2
-rw-r--r--arch/mips/include/uapi/asm/statfs.h6
-rw-r--r--arch/mips/include/uapi/asm/sysmips.h8
-rw-r--r--arch/mips/include/uapi/asm/termbits.h158
-rw-r--r--arch/mips/include/uapi/asm/termios.h12
-rw-r--r--arch/mips/include/uapi/asm/unistd.h60
-rw-r--r--arch/mips/jazz/Makefile2
-rw-r--r--arch/mips/jazz/irq.c4
-rw-r--r--arch/mips/jazz/jazzdma.c6
-rw-r--r--arch/mips/jazz/setup.c20
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c8
-rw-r--r--arch/mips/jz4740/clock-debugfs.c2
-rw-r--r--arch/mips/jz4740/clock.c4
-rw-r--r--arch/mips/jz4740/dma.c2
-rw-r--r--arch/mips/jz4740/gpio.c2
-rw-r--r--arch/mips/jz4740/irq.c2
-rw-r--r--arch/mips/jz4740/irq.h2
-rw-r--r--arch/mips/jz4740/platform.c12
-rw-r--r--arch/mips/jz4740/pm.c2
-rw-r--r--arch/mips/jz4740/prom.c2
-rw-r--r--arch/mips/jz4740/reset.c2
-rw-r--r--arch/mips/jz4740/setup.c2
-rw-r--r--arch/mips/jz4740/time.c2
-rw-r--r--arch/mips/jz4740/timer.c2
-rw-r--r--arch/mips/kernel/Makefile36
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c10
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c10
-rw-r--r--arch/mips/kernel/bmips_vec.S6
-rw-r--r--arch/mips/kernel/branch.c10
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c4
-rw-r--r--arch/mips/kernel/cevt-ds1287.c4
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c4
-rw-r--r--arch/mips/kernel/cevt-r4k.c6
-rw-r--r--arch/mips/kernel/cevt-sb1250.c4
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cevt-txx9.c6
-rw-r--r--arch/mips/kernel/cpu-bugs64.c10
-rw-r--r--arch/mips/kernel/cpu-probe.c132
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c19
-rw-r--r--arch/mips/kernel/crash.c2
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c2
-rw-r--r--arch/mips/kernel/csrc-gic.c49
-rw-r--r--arch/mips/kernel/csrc-ioasic.c2
-rw-r--r--arch/mips/kernel/csrc-powertv.c6
-rw-r--r--arch/mips/kernel/csrc-sb1250.c2
-rw-r--r--arch/mips/kernel/early_printk.c5
-rw-r--r--arch/mips/kernel/ftrace.c12
-rw-r--r--arch/mips/kernel/genex.S14
-rw-r--r--arch/mips/kernel/head.S4
-rw-r--r--arch/mips/kernel/i8259.c2
-rw-r--r--arch/mips/kernel/irq-gt641xx.c4
-rw-r--r--arch/mips/kernel/irq-msc01.c6
-rw-r--r--arch/mips/kernel/irq-rm7000.c4
-rw-r--r--arch/mips/kernel/irq.c2
-rw-r--r--arch/mips/kernel/irq_cpu.c50
-rw-r--r--arch/mips/kernel/irq_txx9.c10
-rw-r--r--arch/mips/kernel/kgdb.c6
-rw-r--r--arch/mips/kernel/kprobes.c16
-rw-r--r--arch/mips/kernel/linux32.c10
-rw-r--r--arch/mips/kernel/mips_ksyms.c4
-rw-r--r--arch/mips/kernel/module-rela.c6
-rw-r--r--arch/mips/kernel/module.c10
-rw-r--r--arch/mips/kernel/octeon_switch.S104
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c30
-rw-r--r--arch/mips/kernel/proc.c26
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/ptrace.c12
-rw-r--r--arch/mips/kernel/ptrace32.c2
-rw-r--r--arch/mips/kernel/r2300_fpu.S128
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S4
-rw-r--r--arch/mips/kernel/relocate_kernel.S12
-rw-r--r--arch/mips/kernel/rtlx.c31
-rw-r--r--arch/mips/kernel/scall32-o32.S14
-rw-r--r--arch/mips/kernel/scall64-64.S6
-rw-r--r--arch/mips/kernel/scall64-n32.S10
-rw-r--r--arch/mips/kernel/scall64-o32.S18
-rw-r--r--arch/mips/kernel/setup.c101
-rw-r--r--arch/mips/kernel/signal.c12
-rw-r--r--arch/mips/kernel/signal32.c14
-rw-r--r--arch/mips/kernel/signal_n32.c10
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/smp-mt.c4
-rw-r--r--arch/mips/kernel/smtc-asm.S2
-rw-r--r--arch/mips/kernel/smtc-proc.c6
-rw-r--r--arch/mips/kernel/smtc.c21
-rw-r--r--arch/mips/kernel/sync-r4k.c2
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/kernel/time.c12
-rw-r--r--arch/mips/kernel/traps.c34
-rw-r--r--arch/mips/kernel/unaligned.c34
-rw-r--r--arch/mips/kernel/vmlinux.lds.S8
-rw-r--r--arch/mips/kernel/vpe.c115
-rw-r--r--arch/mips/kernel/watch.c4
-rw-r--r--arch/mips/lantiq/clk.c18
-rw-r--r--arch/mips/lantiq/clk.h7
-rw-r--r--arch/mips/lantiq/dts/danube.dtsi2
-rw-r--r--arch/mips/lantiq/dts/easy50712.dts2
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c4
-rw-r--r--arch/mips/lantiq/irq.c105
-rw-r--r--arch/mips/lantiq/prom.h2
-rw-r--r--arch/mips/lantiq/xway/clk.c43
-rw-r--r--arch/mips/lantiq/xway/reset.c9
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c15
-rw-r--r--arch/mips/lasat/Makefile2
-rw-r--r--arch/mips/lasat/ds1603.h2
-rw-r--r--arch/mips/lasat/image/Makefile2
-rw-r--r--arch/mips/lasat/image/head.S2
-rw-r--r--arch/mips/lasat/picvue.c34
-rw-r--r--arch/mips/lasat/picvue.h16
-rw-r--r--arch/mips/lasat/picvue_proc.c2
-rw-r--r--arch/mips/lasat/serial.c2
-rw-r--r--arch/mips/lasat/sysctl.c4
-rw-r--r--arch/mips/lib/bitops.c4
-rw-r--r--arch/mips/lib/csum_partial.S42
-rw-r--r--arch/mips/lib/delay.c2
-rw-r--r--arch/mips/lib/dump_tlb.c2
-rw-r--r--arch/mips/lib/memcpy.S34
-rw-r--r--arch/mips/lib/memset.S4
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c2
-rw-r--r--arch/mips/lib/strncpy_user.S2
-rw-r--r--arch/mips/lib/strnlen_user.S6
-rw-r--r--arch/mips/lib/uncached.c2
-rw-r--r--arch/mips/loongson/Makefile2
-rw-r--r--arch/mips/loongson/common/bonito-irq.c6
-rw-r--r--arch/mips/loongson/common/cmdline.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_acc.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ehci.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ide.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_isa.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_mfgpt.c6
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ohci.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_pci.c4
-rw-r--r--arch/mips/loongson/common/early_printk.c6
-rw-r--r--arch/mips/loongson/common/env.c4
-rw-r--r--arch/mips/loongson/common/gpio.c16
-rw-r--r--arch/mips/loongson/common/init.c4
-rw-r--r--arch/mips/loongson/common/irq.c6
-rw-r--r--arch/mips/loongson/common/machtype.c20
-rw-r--r--arch/mips/loongson/common/mem.c4
-rw-r--r--arch/mips/loongson/common/pci.c34
-rw-r--r--arch/mips/loongson/common/platform.c4
-rw-r--r--arch/mips/loongson/common/reset.c10
-rw-r--r--arch/mips/loongson/common/serial.c18
-rw-r--r--arch/mips/loongson/common/setup.c6
-rw-r--r--arch/mips/loongson/common/time.c6
-rw-r--r--arch/mips/loongson/common/uart_base.c4
-rw-r--r--arch/mips/loongson/fuloong-2e/irq.c12
-rw-r--r--arch/mips/loongson/fuloong-2e/reset.c4
-rw-r--r--arch/mips/loongson/lemote-2f/ec_kb3310b.h216
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c20
-rw-r--r--arch/mips/loongson/lemote-2f/machtype.c14
-rw-r--r--arch/mips/loongson/lemote-2f/reset.c10
-rw-r--r--arch/mips/loongson1/Platform2
-rw-r--r--arch/mips/loongson1/common/clock.c4
-rw-r--r--arch/mips/loongson1/common/irq.c4
-rw-r--r--arch/mips/loongson1/common/platform.c6
-rw-r--r--arch/mips/loongson1/common/prom.c6
-rw-r--r--arch/mips/loongson1/common/reset.c4
-rw-r--r--arch/mips/loongson1/common/setup.c4
-rw-r--r--arch/mips/loongson1/ls1b/board.c4
-rw-r--r--arch/mips/math-emu/Makefile1
-rw-r--r--arch/mips/math-emu/cp1emu.c8
-rw-r--r--arch/mips/math-emu/dp_add.c2
-rw-r--r--arch/mips/math-emu/dp_sqrt.c6
-rw-r--r--arch/mips/math-emu/dp_sub.c2
-rw-r--r--arch/mips/math-emu/ieee754.c18
-rw-r--r--arch/mips/math-emu/ieee754dp.c2
-rw-r--r--arch/mips/math-emu/ieee754int.h2
-rw-r--r--arch/mips/math-emu/ieee754sp.c2
-rw-r--r--arch/mips/math-emu/ieee754xcpt.c2
-rw-r--r--arch/mips/math-emu/kernel_linkage.c2
-rw-r--r--arch/mips/math-emu/sp_add.c2
-rw-r--r--arch/mips/math-emu/sp_mul.c4
-rw-r--r--arch/mips/math-emu/sp_sub.c2
-rw-r--r--arch/mips/mm/Makefile6
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c27
-rw-r--r--arch/mips/mm/c-tx39.c12
-rw-r--r--arch/mips/mm/cerr-sb1.c30
-rw-r--r--arch/mips/mm/cex-gen.S6
-rw-r--r--arch/mips/mm/cex-oct.S36
-rw-r--r--arch/mips/mm/cex-sb1.S8
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/ioremap.c4
-rw-r--r--arch/mips/mm/page.c6
-rw-r--r--arch/mips/mm/pgtable-64.c4
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-r5k.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c33
-rw-r--r--arch/mips/mm/uasm.c16
-rw-r--r--arch/mips/mti-malta/malta-amon.c6
-rw-r--r--arch/mips/mti-malta/malta-cmdline.c2
-rw-r--r--arch/mips/mti-malta/malta-display.c6
-rw-r--r--arch/mips/mti-malta/malta-init.c14
-rw-r--r--arch/mips/mti-malta/malta-int.c40
-rw-r--r--arch/mips/mti-malta/malta-memory.c4
-rw-r--r--arch/mips/mti-malta/malta-pci.c6
-rw-r--r--arch/mips/mti-malta/malta-platform.c2
-rw-r--r--arch/mips/mti-malta/malta-setup.c4
-rw-r--r--arch/mips/mti-malta/malta-smtc.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c83
-rw-r--r--arch/mips/mti-sead3/Makefile10
-rw-r--r--arch/mips/mti-sead3/leds-sead3.c5
-rw-r--r--arch/mips/mti-sead3/sead3-console.c4
-rw-r--r--arch/mips/mti-sead3/sead3-display.c2
-rw-r--r--arch/mips/mti-sead3/sead3-i2c-drv.c28
-rw-r--r--arch/mips/mti-sead3/sead3-init.c5
-rw-r--r--arch/mips/mti-sead3/sead3-memory.c138
-rw-r--r--arch/mips/mti-sead3/sead3-net.c4
-rw-r--r--arch/mips/mti-sead3/sead3-pic32-bus.c6
-rw-r--r--arch/mips/mti-sead3/sead3-pic32-i2c-drv.c58
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c27
-rw-r--r--arch/mips/mti-sead3/sead3-time.c8
-rw-r--r--arch/mips/mti-sead3/sead3.dts26
-rw-r--r--arch/mips/netlogic/Platform4
-rw-r--r--arch/mips/netlogic/common/irq.c43
-rw-r--r--arch/mips/netlogic/common/smp.c8
-rw-r--r--arch/mips/netlogic/common/smpboot.S20
-rw-r--r--arch/mips/netlogic/common/time.c56
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts2
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c4
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c2
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c35
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c6
-rw-r--r--arch/mips/netlogic/xlr/platform-flash.c12
-rw-r--r--arch/mips/netlogic/xlr/platform.c12
-rw-r--r--arch/mips/netlogic/xlr/setup.c4
-rw-r--r--arch/mips/oprofile/common.c7
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c10
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c38
-rw-r--r--arch/mips/pci/Makefile5
-rw-r--r--arch/mips/pci/fixup-cobalt.c28
-rw-r--r--arch/mips/pci/fixup-emma2rh.c2
-rw-r--r--arch/mips/pci/fixup-fuloong2e.c8
-rw-r--r--arch/mips/pci/fixup-ip32.c14
-rw-r--r--arch/mips/pci/fixup-lemote2f.c14
-rw-r--r--arch/mips/pci/fixup-malta.c14
-rw-r--r--arch/mips/pci/fixup-pmcmsp.c224
-rw-r--r--arch/mips/pci/fixup-pnx8550.c57
-rw-r--r--arch/mips/pci/fixup-sni.c68
-rw-r--r--arch/mips/pci/fixup-tb0219.c2
-rw-r--r--arch/mips/pci/fixup-tb0287.c2
-rw-r--r--arch/mips/pci/fixup-wrppmc.c2
-rw-r--r--arch/mips/pci/ops-bcm63xx.c16
-rw-r--r--arch/mips/pci/ops-bonito64.c4
-rw-r--r--arch/mips/pci/ops-gt64xxx_pci0.c24
-rw-r--r--arch/mips/pci/ops-lantiq.c2
-rw-r--r--arch/mips/pci/ops-loongson2.c2
-rw-r--r--arch/mips/pci/ops-msc.c22
-rw-r--r--arch/mips/pci/ops-nile4.c2
-rw-r--r--arch/mips/pci/ops-pmcmsp.c450
-rw-r--r--arch/mips/pci/ops-pnx8550.c282
-rw-r--r--arch/mips/pci/ops-rc32434.c2
-rw-r--r--arch/mips/pci/ops-sni.c8
-rw-r--r--arch/mips/pci/ops-tx4927.c8
-rw-r--r--arch/mips/pci/ops-vr41xx.c6
-rw-r--r--arch/mips/pci/pci-alchemy.c6
-rw-r--r--arch/mips/pci/pci-ar71xx.c194
-rw-r--r--arch/mips/pci/pci-ar724x.c304
-rw-r--r--arch/mips/pci/pci-bcm1480.c8
-rw-r--r--arch/mips/pci/pci-bcm1480ht.c6
-rw-r--r--arch/mips/pci/pci-bcm47xx.c2
-rw-r--r--arch/mips/pci/pci-bcm63xx.c62
-rw-r--r--arch/mips/pci/pci-bcm63xx.h2
-rw-r--r--arch/mips/pci/pci-ip27.c8
-rw-r--r--arch/mips/pci/pci-ip32.c6
-rw-r--r--arch/mips/pci/pci-lantiq.c12
-rw-r--r--arch/mips/pci/pci-lasat.c26
-rw-r--r--arch/mips/pci/pci-octeon.c30
-rw-r--r--arch/mips/pci/pci-rc32434.c6
-rw-r--r--arch/mips/pci/pci-sb1250.c8
-rw-r--r--arch/mips/pci/pci-vr41xx.c22
-rw-r--r--arch/mips/pci/pci-vr41xx.h2
-rw-r--r--arch/mips/pci/pci-xlp.c156
-rw-r--r--arch/mips/pci/pci-xlr.c34
-rw-r--r--arch/mips/pci/pci.c19
-rw-r--r--arch/mips/pci/pcie-octeon.c58
-rw-r--r--arch/mips/pmc-sierra/Platform7
-rw-r--r--arch/mips/pmcs-msp71xx/Kconfig (renamed from arch/mips/pmc-sierra/Kconfig)0
-rw-r--r--arch/mips/pmcs-msp71xx/Makefile (renamed from arch/mips/pmc-sierra/msp71xx/Makefile)0
-rw-r--r--arch/mips/pmcs-msp71xx/Platform7
-rw-r--r--arch/mips/pmcs-msp71xx/gpio.c (renamed from arch/mips/pmc-sierra/msp71xx/gpio.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/gpio_extended.c (renamed from arch/mips/pmc-sierra/msp71xx/gpio_extended.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_elb.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_elb.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_eth.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_eth.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_hwbutton.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_cic.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c)6
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_per.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq_per.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_slp.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_pci.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_pci.c)2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_prom.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_prom.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_serial.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_serial.c)32
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_setup.c)8
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_smp.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smtc.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_smtc.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_time.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_time.c)2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_usb.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_usb.c)54
-rw-r--r--arch/mips/pnx833x/Platform2
-rw-r--r--arch/mips/pnx833x/common/interrupts.c110
-rw-r--r--arch/mips/pnx833x/common/platform.c34
-rw-r--r--arch/mips/pnx833x/common/prom.c2
-rw-r--r--arch/mips/pnx833x/common/reset.c2
-rw-r--r--arch/mips/pnx833x/common/setup.c2
-rw-r--r--arch/mips/pnx833x/stb22x/board.c2
-rw-r--r--arch/mips/pnx8550/Makefile3
-rw-r--r--arch/mips/pnx8550/Platform7
-rw-r--r--arch/mips/pnx8550/common/Makefile26
-rw-r--r--arch/mips/pnx8550/common/int.c236
-rw-r--r--arch/mips/pnx8550/common/pci.c134
-rw-r--r--arch/mips/pnx8550/common/platform.c162
-rw-r--r--arch/mips/pnx8550/common/proc.c110
-rw-r--r--arch/mips/pnx8550/common/prom.c128
-rw-r--r--arch/mips/pnx8550/common/reset.c40
-rw-r--r--arch/mips/pnx8550/common/setup.c142
-rw-r--r--arch/mips/pnx8550/common/time.c151
-rw-r--r--arch/mips/pnx8550/jbs/Makefile4
-rw-r--r--arch/mips/pnx8550/jbs/board_setup.c56
-rw-r--r--arch/mips/pnx8550/jbs/init.c53
-rw-r--r--arch/mips/pnx8550/jbs/irqmap.c35
-rw-r--r--arch/mips/pnx8550/stb810/Makefile4
-rw-r--r--arch/mips/pnx8550/stb810/board_setup.c41
-rw-r--r--arch/mips/pnx8550/stb810/irqmap.c22
-rw-r--r--arch/mips/pnx8550/stb810/prom_init.c46
-rw-r--r--arch/mips/power/cpu.c2
-rw-r--r--arch/mips/power/hibernate.S2
-rw-r--r--arch/mips/powertv/asic/asic-calliope.c10
-rw-r--r--arch/mips/powertv/asic/asic-cronus.c6
-rw-r--r--arch/mips/powertv/asic/asic-gaia.c2
-rw-r--r--arch/mips/powertv/asic/asic-zeus.c6
-rw-r--r--arch/mips/powertv/asic/asic_devices.c8
-rw-r--r--arch/mips/powertv/asic/asic_int.c4
-rw-r--r--arch/mips/powertv/asic/irq_asic.c4
-rw-r--r--arch/mips/powertv/asic/prealloc-calliope.c10
-rw-r--r--arch/mips/powertv/asic/prealloc-cronus.c12
-rw-r--r--arch/mips/powertv/asic/prealloc-cronuslite.c8
-rw-r--r--arch/mips/powertv/asic/prealloc-gaia.c382
-rw-r--r--arch/mips/powertv/asic/prealloc-zeus.c10
-rw-r--r--arch/mips/powertv/init.c2
-rw-r--r--arch/mips/powertv/ioremap.c4
-rw-r--r--arch/mips/powertv/memory.c2
-rw-r--r--arch/mips/powertv/powertv-usb.c46
-rw-r--r--arch/mips/ralink/Kconfig32
-rw-r--r--arch/mips/ralink/Makefile15
-rw-r--r--arch/mips/ralink/Platform10
-rw-r--r--arch/mips/ralink/clk.c72
-rw-r--r--arch/mips/ralink/common.h44
-rw-r--r--arch/mips/ralink/dts/Makefile1
-rw-r--r--arch/mips/ralink/dts/rt3050.dtsi106
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts52
-rw-r--r--arch/mips/ralink/early_printk.c44
-rw-r--r--arch/mips/ralink/irq.c180
-rw-r--r--arch/mips/ralink/of.c107
-rw-r--r--arch/mips/ralink/prom.c69
-rw-r--r--arch/mips/ralink/reset.c44
-rw-r--r--arch/mips/ralink/rt305x.c242
-rw-r--r--arch/mips/rb532/devices.c12
-rw-r--r--arch/mips/rb532/gpio.c8
-rw-r--r--arch/mips/rb532/irq.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c14
-rw-r--r--arch/mips/sgi-ip22/ip22-gio.c14
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c36
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c44
-rw-r--r--arch/mips/sgi-ip22/ip22-nvram.c16
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c8
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c4
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c16
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c14
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c12
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c16
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c8
-rw-r--r--arch/mips/sibyte/Platform6
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c8
-rw-r--r--arch/mips/sibyte/common/cfe.c10
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c18
-rw-r--r--arch/mips/sibyte/sb1250/bus_watcher.c6
-rw-r--r--arch/mips/sibyte/sb1250/irq.c8
-rw-r--r--arch/mips/sibyte/sb1250/setup.c18
-rw-r--r--arch/mips/sibyte/swarm/platform.c4
-rw-r--r--arch/mips/sibyte/swarm/rtc_xicor1241.c50
-rw-r--r--arch/mips/sni/a20r.c44
-rw-r--r--arch/mips/sni/eisa.c2
-rw-r--r--arch/mips/sni/irq.c20
-rw-r--r--arch/mips/sni/pcimt.c36
-rw-r--r--arch/mips/sni/pcit.c40
-rw-r--r--arch/mips/sni/rm200.c38
-rw-r--r--arch/mips/sni/setup.c24
-rw-r--r--arch/mips/sni/time.c30
-rw-r--r--arch/mips/txx9/Platform4
-rw-r--r--arch/mips/txx9/generic/irq_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/irq_tx4939.c4
-rw-r--r--arch/mips/txx9/generic/mem_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/pci.c4
-rw-r--r--arch/mips/txx9/generic/setup.c8
-rw-r--r--arch/mips/txx9/generic/setup_tx3927.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c4
-rw-r--r--arch/mips/txx9/generic/smsc_fdc37m81x.c48
-rw-r--r--arch/mips/txx9/rbtx4927/irq.c2
-rw-r--r--arch/mips/txx9/rbtx4927/prom.c2
-rw-r--r--arch/mips/txx9/rbtx4927/setup.c2
-rw-r--r--arch/mips/txx9/rbtx4938/setup.c8
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c4
-rw-r--r--arch/mips/vr41xx/common/bcu.c4
-rw-r--r--arch/mips/vr41xx/common/cmu.c16
-rw-r--r--arch/mips/vr41xx/common/giu.c2
-rw-r--r--arch/mips/vr41xx/common/icu.c8
-rw-r--r--arch/mips/vr41xx/common/pmu.c2
-rw-r--r--arch/mips/vr41xx/common/rtc.c2
-rw-r--r--arch/mips/vr41xx/common/type.c2
-rw-r--r--arch/mips/wrppmc/Platform2
-rw-r--r--arch/mips/wrppmc/irq.c4
-rw-r--r--arch/mips/wrppmc/serial.c2
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/elf.h5
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.h1
-rw-r--r--arch/mn10300/unit-asb2305/pci.c4
-rw-r--r--arch/openrisc/Kconfig5
-rw-r--r--arch/openrisc/include/asm/bitops.h1
-rw-r--r--arch/openrisc/include/asm/elf.h3
-rw-r--r--arch/openrisc/include/asm/processor.h1
-rw-r--r--arch/openrisc/kernel/entry.S16
-rw-r--r--arch/openrisc/kernel/head.S13
-rw-r--r--arch/openrisc/mm/init.c17
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/kernel/sys_parisc.c48
-rw-r--r--arch/parisc/kernel/sys_parisc32.c10
-rw-r--r--arch/parisc/kernel/syscall_table.S6
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/elf.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h8
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c6
-rw-r--r--arch/powerpc/kernel/rtas_flash.c16
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c18
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kvm/Makefile9
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c30
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c18
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/kvm/booke.c70
-rw-r--r--arch/powerpc/kvm/booke.h1
-rw-r--r--arch/powerpc/kvm/booke_emulate.c3
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S49
-rw-r--r--arch/powerpc/kvm/e500.c16
-rw-r--r--arch/powerpc/kvm/e500.h1
-rw-r--r--arch/powerpc/kvm/e500_mmu.c (renamed from arch/powerpc/kvm/e500_tlb.c)659
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c699
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.h18
-rw-r--r--arch/powerpc/kvm/emulate.c5
-rw-r--r--arch/powerpc/kvm/powerpc.c17
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c66
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c2
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c8
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c2
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/elf.h5
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h15
-rw-r--r--arch/s390/kernel/compat_wrapper.S16
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/kernel/syscalls.S6
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--arch/s390/kvm/intercept.c45
-rw-r--r--arch/s390/kvm/interrupt.c264
-rw-r--r--arch/s390/kvm/kvm-s390.c50
-rw-r--r--arch/s390/kvm/kvm-s390.h46
-rw-r--r--arch/s390/kvm/priv.c316
-rw-r--r--arch/s390/kvm/sigp.c10
-rw-r--r--arch/s390/kvm/trace-s390.h26
-rw-r--r--arch/s390/pci/pci_debug.c4
-rw-r--r--arch/s390/pci/pci_msi.c3
-rw-r--r--arch/score/Kconfig1
-rw-r--r--arch/score/include/asm/elf.h5
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sh/kernel/kprobes.c6
-rw-r--r--arch/sh/kernel/traps.c2
-rw-r--r--arch/sh/mm/alignment.c2
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/elf_32.h3
-rw-r--r--arch/sparc/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/ldc.c3
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/signal32.c2
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/sparc/kernel/traps_32.c2
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/kernel/traps.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/eboot.c26
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aes-i586-asm_32.S15
-rw-r--r--arch/x86/crypto/aes-x86_64-asm_64.S30
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S23
-rw-r--r--arch/x86/crypto/blowfish-x86_64-asm_64.S39
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S38
-rw-r--r--arch/x86/crypto/camellia-x86_64-asm_64.S50
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S48
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S35
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S246
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c201
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S8
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S4
-rw-r--r--arch/x86/crypto/salsa20-i586-asm_32.S28
-rw-r--r--arch/x86/crypto/salsa20-x86_64-asm_64.S28
-rw-r--r--arch/x86/crypto/salsa20_glue.c5
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S35
-rw-r--r--arch/x86/crypto/serpent-sse2-i586-asm_32.S20
-rw-r--r--arch/x86/crypto/serpent-sse2-x86_64-asm_64.S20
-rw-r--r--arch/x86/crypto/sha1_ssse3_asm.S10
-rw-r--r--arch/x86/crypto/twofish-avx-x86_64-asm_64.S35
-rw-r--r--arch/x86/crypto/twofish-i586-asm_32.S11
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S20
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64.S11
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/ia32/sys_ia32.c5
-rw-r--r--arch/x86/include/asm/efi.h9
-rw-r--r--arch/x86/include/asm/ftrace.h24
-rw-r--r--arch/x86/include/asm/kvm_host.h26
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/sys_ia32.h1
-rw-r--r--arch/x86/include/asm/thread_info.h1
-rw-r--r--arch/x86/include/asm/vmx.h18
-rw-r--r--arch/x86/include/asm/xen/events.h3
-rw-r--r--arch/x86/include/asm/xen/page.h2
-rw-r--r--arch/x86/include/uapi/asm/vmx.h9
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
-rw-r--r--arch/x86/kernel/cpuid.c4
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/head.c53
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/kprobes/core.c8
-rw-r--r--arch/x86/kernel/kvmclock.c11
-rw-r--r--arch/x86/kernel/nmi.c1
-rw-r--r--arch/x86/kernel/pvclock.c2
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kvm/emulate.c673
-rw-r--r--arch/x86/kvm/i8254.c1
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/irq.c74
-rw-r--r--arch/x86/kvm/lapic.c140
-rw-r--r--arch/x86/kvm/lapic.h34
-rw-r--r--arch/x86/kvm/mmu.c194
-rw-r--r--arch/x86/kvm/mmutrace.h6
-rw-r--r--arch/x86/kvm/paging_tmpl.h106
-rw-r--r--arch/x86/kvm/svm.c24
-rw-r--r--arch/x86/kvm/vmx.c714
-rw-r--r--arch/x86/kvm/x86.c168
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/numa.c14
-rw-r--r--arch/x86/mm/pageattr.c50
-rw-r--r--arch/x86/mm/srat.c125
-rw-r--r--arch/x86/pci/acpi.c9
-rw-r--r--arch/x86/pci/common.c1
-rw-r--r--arch/x86/pci/i386.c185
-rw-r--r--arch/x86/pci/legacy.c2
-rw-r--r--arch/x86/pci/numaq_32.c2
-rw-r--r--arch/x86/platform/efi/efi.c10
-rw-r--r--arch/x86/syscalls/syscall_32.tbl6
-rw-r--r--arch/x86/xen/smp.c42
-rw-r--r--arch/x86/xen/spinlock.c1
-rw-r--r--arch/xtensa/Kconfig47
-rw-r--r--arch/xtensa/Makefile6
-rw-r--r--arch/xtensa/boot/Makefile12
-rw-r--r--arch/xtensa/boot/dts/Makefile15
-rw-r--r--arch/xtensa/include/asm/atomic.h6
-rw-r--r--arch/xtensa/include/asm/checksum.h1
-rw-r--r--arch/xtensa/include/asm/elf.h3
-rw-r--r--arch/xtensa/include/asm/pgtable.h4
-rw-r--r--arch/xtensa/include/asm/processor.h4
-rw-r--r--arch/xtensa/include/asm/ptrace.h3
-rw-r--r--arch/xtensa/include/asm/regs.h1
-rw-r--r--arch/xtensa/include/asm/string.h4
-rw-r--r--arch/xtensa/include/asm/timex.h8
-rw-r--r--arch/xtensa/include/asm/traps.h24
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h11
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h7
-rw-r--r--arch/xtensa/kernel/asm-offsets.c1
-rw-r--r--arch/xtensa/kernel/entry.S69
-rw-r--r--arch/xtensa/kernel/head.S9
-rw-r--r--arch/xtensa/kernel/process.c5
-rw-r--r--arch/xtensa/kernel/ptrace.c35
-rw-r--r--arch/xtensa/kernel/setup.c42
-rw-r--r--arch/xtensa/kernel/signal.c6
-rw-r--r--arch/xtensa/kernel/syscall.c41
-rw-r--r--arch/xtensa/kernel/traps.c76
-rw-r--r--arch/xtensa/kernel/vectors.S57
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S68
-rw-r--r--arch/xtensa/oprofile/Makefile9
-rw-r--r--arch/xtensa/oprofile/backtrace.c171
-rw-r--r--arch/xtensa/oprofile/init.c26
-rw-r--r--arch/xtensa/platforms/iss/Makefile1
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c375
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c4
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/core.h475
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/tie-asm.h193
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/tie.h150
-rw-r--r--block/Kconfig1
-rw-r--r--block/blk-cgroup.c281
-rw-r--r--block/blk-cgroup.h68
-rw-r--r--block/blk-core.c18
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/blk-lib.c6
-rw-r--r--block/blk-sysfs.c9
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c29
-rw-r--r--block/cfq-iosched.c632
-rw-r--r--block/elevator.c25
-rw-r--r--block/genhd.c24
-rw-r--r--block/partition-generic.c6
-rw-r--r--block/partitions/check.c37
-rw-r--r--block/partitions/check.h4
-rw-r--r--block/partitions/efi.c12
-rw-r--r--block/partitions/mac.c4
-rw-r--r--block/partitions/msdos.c11
-rw-r--r--crypto/Kconfig21
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablkcipher.c12
-rw-r--r--crypto/aead.c15
-rw-r--r--crypto/ahash.c2
-rw-r--r--crypto/algapi.c10
-rw-r--r--crypto/async_tx/async_memcpy.c6
-rw-r--r--crypto/async_tx/async_memset.c1
-rw-r--r--crypto/async_tx/async_tx.c9
-rw-r--r--crypto/async_tx/async_xor.c4
-rw-r--r--crypto/authenc.c3
-rw-r--r--crypto/authencesn.c3
-rw-r--r--crypto/blkcipher.c12
-rw-r--r--crypto/ccm.c23
-rw-r--r--crypto/chainiv.c3
-rw-r--r--crypto/crc32.c158
-rw-r--r--crypto/crypto_user.c38
-rw-r--r--crypto/ctr.c6
-rw-r--r--crypto/cts.c3
-rw-r--r--crypto/gcm.c29
-rw-r--r--crypto/pcompress.c3
-rw-r--r--crypto/rng.c2
-rw-r--r--crypto/seqiv.c3
-rw-r--r--crypto/shash.c3
-rw-r--r--crypto/testmgr.c15
-rw-r--r--crypto/testmgr.h38
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/apei/ghes.c71
-rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/internal.h8
-rw-r--r--drivers/acpi/numa.c23
-rw-r--r--drivers/acpi/osl.c26
-rw-r--r--drivers/acpi/pci_irq.c102
-rw-r--r--drivers/acpi/pci_root.c170
-rw-r--r--drivers/acpi/pci_slot.c13
-rw-r--r--drivers/acpi/power.c112
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/ata/Kconfig21
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c24
-rw-r--r--drivers/ata/ata_piix.c17
-rw-r--r--drivers/ata/libata-acpi.c177
-rw-r--r--drivers/ata/libata-core.c66
-rw-r--r--drivers/ata/libata-eh.c27
-rw-r--r--drivers/ata/libata-scsi.c20
-rw-r--r--drivers/ata/libata-zpodd.c299
-rw-r--r--drivers/ata/libata.h27
-rw-r--r--drivers/ata/pata_samsung_cf.c4
-rw-r--r--drivers/ata/sata_rcar.c910
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c3
-rw-r--r--drivers/atm/he.c3
-rw-r--r--drivers/atm/nicstar.c25
-rw-r--r--drivers/atm/solos-pci.c3
-rw-r--r--drivers/base/devtmpfs.c3
-rw-r--r--drivers/base/dma-buf.c66
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/block/DAC960.c3
-rw-r--r--drivers/block/Kconfig23
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/drbd/drbd_main.c29
-rw-r--r--drivers/block/loop.c87
-rw-r--r--drivers/block/mtip32xx/Kconfig2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c431
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h48
-rw-r--r--drivers/block/nbd.c38
-rw-r--r--drivers/block/rbd.c1852
-rw-r--r--drivers/block/rsxx/Makefile2
-rw-r--r--drivers/block/rsxx/config.c213
-rw-r--r--drivers/block/rsxx/core.c649
-rw-r--r--drivers/block/rsxx/cregs.c758
-rw-r--r--drivers/block/rsxx/dev.c367
-rw-r--r--drivers/block/rsxx/dma.c998
-rw-r--r--drivers/block/rsxx/rsxx.h45
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h72
-rw-r--r--drivers/block/rsxx/rsxx_priv.h399
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/xd.c1123
-rw-r--r--drivers/block/xd.h134
-rw-r--r--drivers/block/xen-blkback/blkback.c7
-rw-r--r--drivers/block/xen-blkback/xenbus.c49
-rw-r--r--drivers/block/xen-blkfront.c13
-rw-r--r--drivers/char/agp/intel-gtt.c128
-rw-r--r--drivers/char/dsp56k.c8
-rw-r--r--drivers/char/dtlk.c4
-rw-r--r--drivers/char/hw_random/virtio-rng.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c66
-rw-r--r--drivers/char/lp.c8
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/misc.c16
-rw-r--r--drivers/char/nsc_gpio.c4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/ppdev.c6
-rw-r--r--drivers/char/ps3flash.c2
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tb0219.c4
-rw-r--r--drivers/char/virtio_console.c18
-rw-r--r--drivers/clk/clk.c59
-rw-r--r--drivers/clocksource/nomadik-mtu.c1
-rw-r--r--drivers/clocksource/time-armada-370-xp.c150
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/bfin_crc.c6
-rw-r--r--drivers/crypto/omap-aes.c658
-rw-r--r--drivers/crypto/omap-sham.c918
-rw-r--r--drivers/crypto/s5p-sss.c4
-rw-r--r--drivers/dca/dca-core.c5
-rw-r--r--drivers/dca/dca-sysfs.c23
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c10
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_hdmac_regs.h8
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/coh901318_lli.c2
-rw-r--r--drivers/dma/dmaengine.c37
-rw-r--r--drivers/dma/dmatest.c22
-rw-r--r--drivers/dma/dw_dmac.c523
-rw-r--r--drivers/dma/dw_dmac_regs.h27
-rw-r--r--drivers/dma/edma.c61
-rw-r--r--drivers/dma/ep93xx_dma.c3
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c113
-rw-r--r--drivers/dma/ioat/dma_v3.c216
-rw-r--r--drivers/dma/ioat/hw.h11
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/iop-adma.c45
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/ipu/ipu_irq.c1
-rw-r--r--drivers/dma/mmp_pdma.c6
-rw-r--r--drivers/dma/mv_xor.c40
-rw-r--r--drivers/dma/mxs-dma.c8
-rw-r--r--drivers/dma/of-dma.c267
-rw-r--r--drivers/dma/pch_dma.c13
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/dma/sh/shdma-base.c3
-rw-r--r--drivers/dma/sh/shdma.c2
-rw-r--r--drivers/dma/sirf-dma.c73
-rw-r--r--drivers/dma/ste_dma40.c491
-rw-r--r--drivers/dma/ste_dma40_ll.c29
-rw-r--r--drivers/dma/ste_dma40_ll.h130
-rw-r--r--drivers/dma/tegra20-apb-dma.c55
-rw-r--r--drivers/edac/Kconfig23
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/edac_core.h5
-rw-r--r--drivers/edac/edac_mc.c152
-rw-r--r--drivers/edac/edac_mc_sysfs.c36
-rw-r--r--drivers/edac/edac_module.c2
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/ghes_edac.c537
-rw-r--r--drivers/edac/i3200_edac.c37
-rw-r--r--drivers/edac/i5100_edac.c178
-rw-r--r--drivers/edac/i7core_edac.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firewire/core-cdev.c20
-rw-r--r--drivers/firewire/core-device.c7
-rw-r--r--drivers/firmware/efivars.c146
-rw-r--r--drivers/gpio/Kconfig18
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/gpio-em.c3
-rw-r--r--drivers/gpio/gpio-langwell.c53
-rw-r--r--drivers/gpio/gpio-lynxpoint.c469
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mxs.c31
-rw-r--r--drivers/gpio/gpio-omap.c6
-rw-r--r--drivers/gpio/gpio-palmas.c184
-rw-r--r--drivers/gpio/gpio-pca953x.c380
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c7
-rw-r--r--drivers/gpio/gpio-twl4030.c176
-rw-r--r--drivers/gpio/gpio-vt8500.c65
-rw-r--r--drivers/gpio/gpiolib-acpi.c87
-rw-r--r--drivers/gpio/gpiolib.c770
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c27
-rw-r--r--drivers/gpu/drm/ast/ast_main.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c27
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c12
-rw-r--r--drivers/gpu/drm/drm_context.c19
-rw-r--r--drivers/gpu/drm/drm_crtc.c826
-rw-r--r--drivers/gpu/drm/drm_drv.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c843
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h774
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c63
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c95
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c310
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c40
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c21
-rw-r--r--drivers/gpu/drm/drm_hashtab.c19
-rw-r--r--drivers/gpu/drm/drm_irq.c12
-rw-r--r--drivers/gpu/drm/drm_mm.c96
-rw-r--r--drivers/gpu/drm/drm_modes.c70
-rw-r--r--drivers/gpu/drm/drm_pci.c81
-rw-r--r--drivers/gpu/drm/drm_prime.c186
-rw-r--r--drivers/gpu/drm/drm_stub.c19
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1035
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c34
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c43
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c14
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c12
-rw-r--r--drivers/gpu/drm/i2c/Kconfig28
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c906
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c254
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c94
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c131
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h475
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c526
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c333
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c370
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h436
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c540
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c503
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c79
-rw-r--r--drivers/gpu/drm/i915/intel_display.c975
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c374
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h41
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c55
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c108
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c103
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c250
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c95
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c113
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c67
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c46
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c28
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c16
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig28
-rw-r--r--drivers/gpu/drm/nouveau/Makefile29
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c346
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.h78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c371
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c309
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c187
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h127
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c279
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c481
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c218
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c244
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c199
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c130
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c96
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c297
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c233
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c173
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c149
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c307
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c186
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig (renamed from drivers/staging/omapdrm/Kconfig)0
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile (renamed from drivers/staging/omapdrm/Makefile)0
-rw-r--r--drivers/gpu/drm/omapdrm/TODO23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c (renamed from drivers/staging/omapdrm/omap_connector.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c (renamed from drivers/staging/omapdrm/omap_crtc.c)14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c (renamed from drivers/staging/omapdrm/omap_debugfs.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h (renamed from drivers/staging/omapdrm/omap_dmm_priv.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c (renamed from drivers/staging/omapdrm/omap_dmm_tiler.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h (renamed from drivers/staging/omapdrm/omap_dmm_tiler.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c (renamed from drivers/staging/omapdrm/omap_drv.c)6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h (renamed from drivers/staging/omapdrm/omap_drv.h)4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c (renamed from drivers/staging/omapdrm/omap_encoder.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c (renamed from drivers/staging/omapdrm/omap_fb.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c (renamed from drivers/staging/omapdrm/omap_fbdev.c)34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c (renamed from drivers/staging/omapdrm/omap_gem.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c (renamed from drivers/staging/omapdrm/omap_gem_dmabuf.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c (renamed from drivers/staging/omapdrm/omap_gem_helpers.c)4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c (renamed from drivers/staging/omapdrm/omap_irq.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c (renamed from drivers/staging/omapdrm/omap_plane.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c (renamed from drivers/staging/omapdrm/tcm-sita.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h (renamed from drivers/staging/omapdrm/tcm-sita.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h (renamed from drivers/staging/omapdrm/tcm.h)0
-rw-r--r--drivers/gpu/drm/radeon/Kconfig33
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c366
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1149
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c85
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h54
-rw-r--r--drivers/gpu/drm/radeon/ni.c339
-rw-r--r--drivers/gpu/drm/radeon/nid.h27
-rw-r--r--drivers/gpu/drm/radeon/r100.c224
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h4
-rw-r--r--drivers/gpu/drm/radeon/r100d.h11
-rw-r--r--drivers/gpu/drm/radeon/r200.c26
-rw-r--r--drivers/gpu/drm/radeon/r300.c42
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h11
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c401
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c332
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c135
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c176
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c91
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c170
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c25
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h4
-rw-r--r--drivers/gpu/drm/radeon/si.c509
-rw-r--r--drivers/gpu/drm/radeon/sid.h30
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c1
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c13
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c585
-rw-r--r--drivers/gpu/drm/tegra/dc.h14
-rw-r--r--drivers/gpu/drm/tegra/drm.c103
-rw-r--r--drivers/gpu/drm/tegra/drm.h43
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c226
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h189
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig13
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c602
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c611
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h150
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c436
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h154
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c376
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c419
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.h26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c103
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c78
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c78
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
-rw-r--r--drivers/gpu/drm/via/via_map.c1
-rw-r--r--drivers/gpu/drm/via/via_mm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c21
-rw-r--r--drivers/gpu/stub/Kconfig18
-rw-r--r--drivers/gpu/stub/Makefile1
-rw-r--r--drivers/gpu/stub/poulsbo.c64
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-roccat.c2
-rw-r--r--drivers/hid/hidraw.c6
-rw-r--r--drivers/hsi/hsi.c2
-rw-r--r--drivers/i2c/busses/Kconfig28
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-at91.c17
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c342
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c2
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c16
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c61
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c19
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c3
-rw-r--r--drivers/i2c/busses/i2c-imx.c1
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c2
-rw-r--r--drivers/i2c/busses/i2c-isch.c17
-rw-r--r--drivers/i2c/busses/i2c-ismt.c963
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c272
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c152
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c100
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-octeon.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c1
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-powermac.c1
-rw-r--r--drivers/i2c/busses/i2c-puv3.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c6
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c62
-rw-r--r--drivers/i2c/busses/i2c-s6000.c1
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c1
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c152
-rw-r--r--drivers/i2c/busses/i2c-sis630.c360
-rw-r--r--drivers/i2c/busses/i2c-stu300.c1
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c79
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/busses/scx200_acb.c1
-rw-r--r--drivers/i2c/i2c-core.c69
-rw-r--r--drivers/i2c/i2c-dev.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c1
-rw-r--r--drivers/ide/ide-proc.c4
-rw-r--r--drivers/infiniband/core/cm.c22
-rw-r--r--drivers/infiniband/core/cma.c27
-rw-r--r--drivers/infiniband/core/fmr_pool.c3
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/ucm.c16
-rw-r--r--drivers/infiniband/core/ucma.c32
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c140
-rw-r--r--drivers/infiniband/core/uverbs_main.c13
-rw-r--r--drivers/infiniband/core/verbs.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c13
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h24
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c170
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h29
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c27
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c34
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c16
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c6
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c32
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c7
-rw-r--r--drivers/infiniband/hw/mlx4/main.c22
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c87
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c49
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c14
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c8
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c42
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/input/misc/max8925_onkey.c3
-rw-r--r--drivers/iommu/Kconfig76
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c3
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iommu.c66
-rw-r--r--drivers/iommu/omap-iommu.c4
-rw-r--r--drivers/iommu/shmobile-iommu.c395
-rw-r--r--drivers/iommu/shmobile-ipmmu.c136
-rw-r--r--drivers/iommu/shmobile-ipmmu.h34
-rw-r--r--drivers/iommu/tegra-gart.c4
-rw-r--r--drivers/iommu/tegra-smmu.c91
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c6
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c8
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/isdn/mISDN/stack.c3
-rw-r--r--drivers/leds/Kconfig12
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-88pm860x.c5
-rw-r--r--drivers/leds/leds-lm3530.c58
-rw-r--r--drivers/leds/leds-lm355x.c2
-rw-r--r--drivers/leds/leds-lm3642.c4
-rw-r--r--drivers/leds/leds-lp5521.c944
-rw-r--r--drivers/leds/leds-lp5523.c1050
-rw-r--r--drivers/leds/leds-lp55xx-common.c523
-rw-r--r--drivers/leds/leds-lp55xx-common.h134
-rw-r--r--drivers/leds/leds-lp8788.c9
-rw-r--r--drivers/leds/leds-pca9532.c6
-rw-r--r--drivers/leds/leds-pwm.c152
-rw-r--r--drivers/leds/leds-renesas-tpu.c12
-rw-r--r--drivers/leds/leds-ss4200.c3
-rw-r--r--drivers/leds/leds-sunfire.c19
-rw-r--r--drivers/leds/leds-tca6507.c72
-rw-r--r--drivers/leds/leds-wm831x-status.c2
-rw-r--r--drivers/lguest/lguest_device.c2
-rw-r--r--drivers/md/Kconfig55
-rw-r--r--drivers/md/Makefile6
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-bio-prison.c158
-rw-r--r--drivers/md/dm-bio-prison.h58
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-block-types.h54
-rw-r--r--drivers/md/dm-cache-metadata.c1146
-rw-r--r--drivers/md/dm-cache-metadata.h142
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c464
-rw-r--r--drivers/md/dm-cache-policy-internal.h124
-rw-r--r--drivers/md/dm-cache-policy-mq.c1195
-rw-r--r--drivers/md/dm-cache-policy.c161
-rw-r--r--drivers/md/dm-cache-policy.h228
-rw-r--r--drivers/md/dm-cache-target.c2584
-rw-r--r--drivers/md/dm-crypt.c45
-rw-r--r--drivers/md/dm-delay.c12
-rw-r--r--drivers/md/dm-flakey.c11
-rw-r--r--drivers/md/dm-ioctl.c166
-rw-r--r--drivers/md/dm-kcopyd.c121
-rw-r--r--drivers/md/dm-linear.c13
-rw-r--r--drivers/md/dm-mpath.c12
-rw-r--r--drivers/md/dm-raid.c10
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-snap.c36
-rw-r--r--drivers/md/dm-stripe.c27
-rw-r--r--drivers/md/dm-table.c11
-rw-r--r--drivers/md/dm-target.c2
-rw-r--r--drivers/md/dm-thin-metadata.c12
-rw-r--r--drivers/md/dm-thin.c277
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c508
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/persistent-data/Makefile2
-rw-r--r--drivers/md/persistent-data/dm-array.c808
-rw-r--r--drivers/md/persistent-data/dm-array.h166
-rw-r--r--drivers/md/persistent-data/dm-bitset.c163
-rw-r--r--drivers/md/persistent-data/dm-bitset.h165
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c1
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h1
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c7
-rw-r--r--drivers/md/persistent-data/dm-btree.c52
-rw-r--r--drivers/md/persistent-data/dm-btree.h15
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c7
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/Kconfig19
-rw-r--r--drivers/media/common/Kconfig11
-rw-r--r--drivers/media/common/Makefile3
-rw-r--r--drivers/media/common/btcx-risc.c (renamed from drivers/media/i2c/btcx-risc.c)0
-rw-r--r--drivers/media/common/btcx-risc.h (renamed from drivers/media/i2c/btcx-risc.h)0
-rw-r--r--drivers/media/common/cx2341x.c (renamed from drivers/media/i2c/cx2341x.c)0
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c5
-rw-r--r--drivers/media/common/tveeprom.c (renamed from drivers/media/i2c/tveeprom.c)290
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c16
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c59
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h10
-rw-r--r--drivers/media/dvb-core/dvb_net.c71
-rw-r--r--drivers/media/dvb-core/dvb_net.h1
-rw-r--r--drivers/media/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb-frontends/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9033.c18
-rw-r--r--drivers/media/dvb-frontends/af9033.h1
-rw-r--r--drivers/media/dvb-frontends/af9033_priv.h132
-rw-r--r--drivers/media/dvb-frontends/bcm3510.h2
-rw-r--r--drivers/media/dvb-frontends/cx22700.h2
-rw-r--r--drivers/media/dvb-frontends/cx24110.h2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c2
-rw-r--r--drivers/media/dvb-frontends/dib0070.h2
-rw-r--r--drivers/media/dvb-frontends/dib0090.h2
-rw-r--r--drivers/media/dvb-frontends/dib3000.h2
-rw-r--r--drivers/media/dvb-frontends/dib8000.h2
-rw-r--r--drivers/media/dvb-frontends/dib9000.h2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb-frontends/ds3000.c261
-rw-r--r--drivers/media/dvb-frontends/ds3000.h10
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.h2
-rw-r--r--drivers/media/dvb-frontends/isl6405.h2
-rw-r--r--drivers/media/dvb-frontends/isl6421.h2
-rw-r--r--drivers/media/dvb-frontends/isl6423.h2
-rw-r--r--drivers/media/dvb-frontends/itd1000.h2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb-frontends/l64781.h2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.h2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c422
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.h6
-rw-r--r--drivers/media/dvb-frontends/mb86a16.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c1790
-rw-r--r--drivers/media/dvb-frontends/mt312.h2
-rw-r--r--drivers/media/dvb-frontends/mt352.h2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.h2
-rw-r--r--drivers/media/dvb-frontends/nxt6000.h2
-rw-r--r--drivers/media/dvb-frontends/or51132.h2
-rw-r--r--drivers/media/dvb-frontends/or51211.c99
-rw-r--r--drivers/media/dvb-frontends/or51211.h2
-rw-r--r--drivers/media/dvb-frontends/s5h1420.h2
-rw-r--r--drivers/media/dvb-frontends/sp8870.h2
-rw-r--r--drivers/media/dvb-frontends/sp887x.h2
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.h2
-rw-r--r--drivers/media/dvb-frontends/stb6100.h2
-rw-r--r--drivers/media/dvb-frontends/stv0297.h2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.h2
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c40
-rw-r--r--drivers/media/dvb-frontends/stv0900_reg.h3
-rw-r--r--drivers/media/dvb-frontends/stv0900_sw.c7
-rw-r--r--drivers/media/dvb-frontends/stv090x.c141
-rw-r--r--drivers/media/dvb-frontends/stv090x.h2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.h2
-rw-r--r--drivers/media/dvb-frontends/tda1002x.h5
-rw-r--r--drivers/media/dvb-frontends/tda1004x.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c22
-rw-r--r--drivers/media/dvb-frontends/tda10071.h8
-rw-r--r--drivers/media/dvb-frontends/tda10086.h2
-rw-r--r--drivers/media/dvb-frontends/tda665x.h2
-rw-r--r--drivers/media/dvb-frontends/tda8083.h2
-rw-r--r--drivers/media/dvb-frontends/tda8261.h2
-rw-r--r--drivers/media/dvb-frontends/tda8261_cfg.h2
-rw-r--r--drivers/media/dvb-frontends/tda826x.h2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c373
-rw-r--r--drivers/media/dvb-frontends/ts2020.h50
-rw-r--r--drivers/media/dvb-frontends/tua6100.h2
-rw-r--r--drivers/media/dvb-frontends/ves1820.h2
-rw-r--r--drivers/media/dvb-frontends/ves1x93.h2
-rw-r--r--drivers/media/dvb-frontends/zl10353.h2
-rw-r--r--drivers/media/i2c/Kconfig42
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/adv7180.c3
-rw-r--r--drivers/media/i2c/adv7343.c45
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c6
-rw-r--r--drivers/media/i2c/mt9v011.c223
-rw-r--r--drivers/media/i2c/noon010pc30.c7
-rw-r--r--drivers/media/i2c/ov7670.c589
-rw-r--r--drivers/media/i2c/ov9650.c1562
-rw-r--r--drivers/media/i2c/s5c73m3/Makefile2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c1704
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c563
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c156
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h459
-rw-r--r--drivers/media/i2c/s5k6aa.c7
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c27
-rw-r--r--drivers/media/i2c/soc_camera/mt9m001.c52
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c36
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c36
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c45
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c45
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c29
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c31
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c30
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c36
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c27
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c29
-rw-r--r--drivers/media/i2c/soc_camera/rj54n1cb0c.c39
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c30
-rw-r--r--drivers/media/i2c/ths7303.c3
-rw-r--r--drivers/media/i2c/tvaudio.c238
-rw-r--r--drivers/media/i2c/tvp514x.c4
-rw-r--r--drivers/media/i2c/tvp5150.c7
-rw-r--r--drivers/media/i2c/tvp7002.c18
-rw-r--r--drivers/media/parport/Kconfig1
-rw-r--r--drivers/media/parport/bw-qcam.c165
-rw-r--r--drivers/media/pci/bt8xx/Makefile1
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c5
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c4
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.h2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c9
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/Kconfig3
-rw-r--r--drivers/media/pci/cx23885/Makefile1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c114
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c66
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c20
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c6
-rw-r--r--drivers/media/pci/cx25821/Makefile1
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/Kconfig2
-rw-r--r--drivers/media/pci/cx88/Makefile1
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c2
-rw-r--r--drivers/media/pci/cx88/cx88-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c15
-rw-r--r--drivers/media/pci/cx88/cx88-i2c.c3
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.c3
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.h2
-rw-r--r--drivers/media/pci/cx88/cx88.h10
-rw-r--r--drivers/media/pci/dm1105/Kconfig1
-rw-r--r--drivers/media/pci/dm1105/dm1105.c11
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c14
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c4
-rw-r--r--drivers/media/pci/mantis/mantis_ca.c5
-rw-r--r--drivers/media/pci/meye/meye.c286
-rw-r--r--drivers/media/pci/meye/meye.h2
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c17
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c13
-rw-r--r--drivers/media/pci/saa7134/saa7134.h7
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c1073
-rw-r--r--drivers/media/pci/ttpci/Kconfig5
-rw-r--r--drivers/media/pci/ttpci/av7110.c12
-rw-r--r--drivers/media/pci/ttpci/av7110.h2
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c8
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c24
-rw-r--r--drivers/media/pci/zoran/zoran_card.c3
-rw-r--r--drivers/media/pci/zoran/zoran_device.c4
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/pci/zoran/zoran_procfs.c2
-rw-r--r--drivers/media/platform/Kconfig11
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/blackfin/Kconfig7
-rw-r--r--drivers/media/platform/blackfin/Makefile4
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c189
-rw-r--r--drivers/media/platform/blackfin/ppi.c90
-rw-r--r--drivers/media/platform/coda.c32
-rw-r--r--drivers/media/platform/davinci/Kconfig22
-rw-r--r--drivers/media/platform/davinci/Makefile4
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/vpbe.c12
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c9
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c35
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c65
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c5
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c6
-rw-r--r--drivers/media/platform/davinci/vpss.c70
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c48
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h5
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c34
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.c6
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c6
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c84
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h17
-rw-r--r--drivers/media/platform/omap/Kconfig2
-rw-r--r--drivers/media/platform/omap/omap_vout.c36
-rw-r--r--drivers/media/platform/omap24xxcam.c2
-rw-r--r--drivers/media/platform/omap3isp/isp.c92
-rw-r--r--drivers/media/platform/omap3isp/isp.h8
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c8
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c13
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c28
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c28
-rw-r--r--drivers/media/platform/omap3isp/isphist.c21
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c40
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c5
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c1
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-capture.c190
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.c165
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.h17
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite-reg.c16
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite-reg.h4
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.c192
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.h9
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-m2m.c136
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c400
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.h14
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-reg.c82
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-reg.h10
-rw-r--r--drivers/media/platform/s5p-fimc/mipi-csis.c101
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-hw.c16
-rw-r--r--drivers/media/platform/s5p-g2d/g2d-regs.h7
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c35
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h17
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c165
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h31
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c149
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h3
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c15
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c30
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c197
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c148
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c2
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c18
-rw-r--r--drivers/media/platform/s5p-tv/hdmiphy_drv.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h1
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c14
-rw-r--r--drivers/media/platform/s5p-tv/mixer_reg.c6
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c19
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c29
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c6
-rw-r--r--drivers/media/platform/sh_veu.c1266
-rw-r--r--drivers/media/platform/sh_vou.c123
-rw-r--r--drivers/media/platform/soc_camera/Kconfig7
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c6
-rw-r--r--drivers/media/platform/soc_camera/mx1_camera.c5
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c529
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c73
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c44
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c23
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c172
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c6
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c6
-rw-r--r--drivers/media/platform/timblogiw.c2
-rw-r--r--drivers/media/platform/via-camera.c60
-rw-r--r--drivers/media/platform/vino.c11
-rw-r--r--drivers/media/platform/vivi.c224
-rw-r--r--drivers/media/radio/Kconfig12
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-ma901.c460
-rw-r--r--drivers/media/radio/radio-miropcm20.c173
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c3
-rw-r--r--drivers/media/rc/ati_remote.c27
-rw-r--r--drivers/media/rc/ene_ir.c28
-rw-r--r--drivers/media/rc/fintek-cir.c24
-rw-r--r--drivers/media/rc/gpio-ir-recv.c55
-rw-r--r--drivers/media/rc/iguanair.c26
-rw-r--r--drivers/media/rc/imon.c4
-rw-r--r--drivers/media/rc/ir-raw.c17
-rw-r--r--drivers/media/rc/ite-cir.c26
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand-02.c86
-rw-r--r--drivers/media/rc/lirc_dev.c19
-rw-r--r--drivers/media/rc/mceusb.c37
-rw-r--r--drivers/media/rc/nuvoton-cir.c41
-rw-r--r--drivers/media/rc/rc-core-priv.h16
-rw-r--r--drivers/media/rc/rc-main.c7
-rw-r--r--drivers/media/rc/redrat3.c18
-rw-r--r--drivers/media/rc/ttusbir.c10
-rw-r--r--drivers/media/rc/winbond-cir.c41
-rw-r--r--drivers/media/tuners/fc0011.c19
-rw-r--r--drivers/media/tuners/fc0012-priv.h13
-rw-r--r--drivers/media/tuners/fc0012.c113
-rw-r--r--drivers/media/tuners/fc0012.h32
-rw-r--r--drivers/media/tuners/mt2060.h2
-rw-r--r--drivers/media/tuners/mt2063.h2
-rw-r--r--drivers/media/tuners/mt20xx.h2
-rw-r--r--drivers/media/tuners/mt2131.h2
-rw-r--r--drivers/media/tuners/mt2266.h2
-rw-r--r--drivers/media/tuners/mxl5007t.h2
-rw-r--r--drivers/media/tuners/qt1010.h2
-rw-r--r--drivers/media/tuners/tda18212.c6
-rw-r--r--drivers/media/tuners/tda18218.c6
-rw-r--r--drivers/media/tuners/tda18271-fe.c2
-rw-r--r--drivers/media/tuners/tda18271-maps.c6
-rw-r--r--drivers/media/tuners/tda18271.h2
-rw-r--r--drivers/media/tuners/tda827x.h2
-rw-r--r--drivers/media/tuners/tda8290.h2
-rw-r--r--drivers/media/tuners/tda9887.h2
-rw-r--r--drivers/media/tuners/tea5761.h2
-rw-r--r--drivers/media/tuners/tea5767.h2
-rw-r--r--drivers/media/tuners/tuner-simple.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c2
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/tuners/xc4000.h2
-rw-r--r--drivers/media/tuners/xc5000.c1
-rw-r--r--drivers/media/usb/Kconfig2
-rw-r--r--drivers/media/usb/au0828/Kconfig17
-rw-r--r--drivers/media/usb/au0828/Makefile6
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c24
-rw-r--r--drivers/media/usb/au0828/au0828-core.c13
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c13
-rw-r--r--drivers/media/usb/au0828/au0828-video.c4
-rw-r--r--drivers/media/usb/au0828/au0828.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c31
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c289
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c26
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c54
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c38
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c30
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig8
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c5
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c60
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c179
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c5
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c277
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c8
-rw-r--r--drivers/media/usb/em28xx/Kconfig8
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c270
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c296
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c96
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c293
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c359
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h5
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c123
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c1689
-rw-r--r--drivers/media/usb/em28xx/em28xx.h149
-rw-r--r--drivers/media/usb/gspca/cpia1.c6
-rw-r--r--drivers/media/usb/gspca/gspca.c10
-rw-r--r--drivers/media/usb/gspca/gspca.h6
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c18
-rw-r--r--drivers/media/usb/gspca/konica.c6
-rw-r--r--drivers/media/usb/gspca/ov519.c6
-rw-r--r--drivers/media/usb/gspca/pac207.c36
-rw-r--r--drivers/media/usb/gspca/pac7302.c4
-rw-r--r--drivers/media/usb/gspca/pac7311.c4
-rw-r--r--drivers/media/usb/gspca/se401.c4
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c4
-rw-r--r--drivers/media/usb/gspca/sonixb.c6
-rw-r--r--drivers/media/usb/gspca/sonixj.c4
-rw-r--r--drivers/media/usb/gspca/spca561.c6
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c17
-rw-r--r--drivers/media/usb/gspca/t613.c8
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c8
-rw-r--r--drivers/media/usb/gspca/zc3xx.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c5
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c5
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c7
-rw-r--r--drivers/media/usb/s2255/s2255drv.c6
-rw-r--r--drivers/media/usb/sn9c102/sn9c102_core.c9
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c59
-rw-r--r--drivers/media/usb/tlg2300/pd-video.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c542
-rw-r--r--drivers/media/usb/tm6000/tm6000.h10
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c7
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c5
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c16
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c8
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/Kconfig11
-rw-r--r--drivers/media/v4l2-core/Makefile3
-rw-r--r--drivers/media/v4l2-core/tuner-core.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c179
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c15
-rw-r--r--drivers/memstick/core/memstick.c21
-rw-r--r--drivers/memstick/core/mspro_block.c17
-rw-r--r--drivers/memstick/host/r592.c3
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c7
-rw-r--r--drivers/mfd/88pm800.c10
-rw-r--r--drivers/mfd/88pm805.c4
-rw-r--r--drivers/mfd/88pm80x.c22
-rw-r--r--drivers/mfd/Kconfig72
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-core.c61
-rw-r--r--drivers/mfd/ab8500-debugfs.c1248
-rw-r--r--drivers/mfd/ab8500-gpadc.c90
-rw-r--r--drivers/mfd/ab8500-sysctrl.c92
-rw-r--r--drivers/mfd/abx500-core.c16
-rw-r--r--drivers/mfd/arizona-core.c55
-rw-r--r--drivers/mfd/da9052-i2c.c3
-rw-r--r--drivers/mfd/db8500-prcmu.c17
-rw-r--r--drivers/mfd/lpc_ich.c144
-rw-r--r--drivers/mfd/lpc_sch.c147
-rw-r--r--drivers/mfd/max8925-core.c89
-rw-r--r--drivers/mfd/max8925-i2c.c36
-rw-r--r--drivers/mfd/omap-usb-host.c558
-rw-r--r--drivers/mfd/omap-usb-tll.c243
-rw-r--r--drivers/mfd/palmas.c14
-rw-r--r--drivers/mfd/rtl8411.c16
-rw-r--r--drivers/mfd/rts5209.c8
-rw-r--r--drivers/mfd/rts5227.c234
-rw-r--r--drivers/mfd/rts5229.c8
-rw-r--r--drivers/mfd/rtsx_pcr.c111
-rw-r--r--drivers/mfd/rtsx_pcr.h4
-rw-r--r--drivers/mfd/syscon.c1
-rw-r--r--drivers/mfd/tps6507x.c9
-rw-r--r--drivers/mfd/tps65090.c47
-rw-r--r--drivers/mfd/twl-core.c370
-rw-r--r--drivers/mfd/vexpress-sysreg.c83
-rw-r--r--drivers/mfd/wm5102-tables.c130
-rw-r--r--drivers/mfd/wm8994-core.c7
-rw-r--r--drivers/misc/c2port/core.c22
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c6
-rw-r--r--drivers/misc/eeprom/Kconfig11
-rw-r--r--drivers/misc/kgdbts.c2
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c3
-rw-r--r--drivers/misc/tifm_core.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c7
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c6
-rw-r--r--drivers/mmc/card/block.c491
-rw-r--r--drivers/mmc/card/queue.c128
-rw-r--r--drivers/mmc/card/queue.h25
-rw-r--r--drivers/mmc/core/bus.c1
-rw-r--r--drivers/mmc/core/core.c246
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/host.c134
-rw-r--r--drivers/mmc/core/mmc.c46
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c44
-rw-r--r--drivers/mmc/core/sdio.c54
-rw-r--r--drivers/mmc/core/slot-gpio.c57
-rw-r--r--drivers/mmc/host/Kconfig18
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/android-goldfish.c570
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c10
-rw-r--r--drivers/mmc/host/dw_mmc.c73
-rw-r--r--drivers/mmc/host/mvsdio.c131
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c23
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c230
-rw-r--r--drivers/mmc/host/sdhci-pci.c4
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c8
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c11
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c117
-rw-r--r--drivers/mmc/host/sdhci-s3c.c8
-rw-r--r--drivers/mmc/host/sdhci-tegra.c123
-rw-r--r--drivers/mmc/host/sdhci.c448
-rw-r--r--drivers/mmc/host/sdhci.h14
-rw-r--r--drivers/mmc/host/sh_mmcif.c281
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c96
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c87
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c2
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/ar7part.c6
-rw-r--r--drivers/mtd/bcm47xxpart.c51
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c217
-rw-r--r--drivers/mtd/cmdlinepart.c47
-rw-r--r--drivers/mtd/devices/Makefile4
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c55
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h15
-rw-r--r--drivers/mtd/devices/elm.c404
-rw-r--r--drivers/mtd/devices/m25p80.c100
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/physmap_of.c9
-rw-r--r--drivers/mtd/maps/uclinux.c30
-rw-r--r--drivers/mtd/mtdcore.c9
-rw-r--r--drivers/mtd/nand/atmel_nand.c141
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c14
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c24
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c233
-rw-r--r--drivers/mtd/nand/fsmc_nand.c22
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h22
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c63
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h4
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/nand_ecc.c5
-rw-r--r--drivers/mtd/nand/nandsim.c40
-rw-r--r--drivers/mtd/nand/omap2.c583
-rw-r--r--drivers/mtd/ofpart.c7
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c12
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c49
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c43
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c9
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c11
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c42
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c25
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.h6
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.c58
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c186
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c63
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c10
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/macvtap.c21
-rw-r--r--drivers/net/ppp/ppp_generic.c33
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/smsc95xx.c36
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c4
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c21
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c9
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/zd1201.c7
-rw-r--r--drivers/oprofile/oprofilefs.c16
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/pci/access.c6
-rw-r--r--drivers/pci/bus.c81
-rw-r--r--drivers/pci/hotplug/acpiphp.h14
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c23
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c266
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c29
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c57
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c44
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c63
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c36
-rw-r--r--drivers/pci/iov.c10
-rw-r--r--drivers/pci/pci-acpi.c43
-rw-r--r--drivers/pci/pci-driver.c12
-rw-r--r--drivers/pci/pci.c66
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c8
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/probe.c103
-rw-r--r--drivers/pci/proc.c10
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c8
-rw-r--r--drivers/pnp/isapnp/proc.c4
-rw-r--r--drivers/pnp/pnpbios/proc.c2
-rw-r--r--drivers/power/bq2415x_charger.c11
-rw-r--r--drivers/power/bq27x00_battery.c9
-rw-r--r--drivers/power/ds2782_battery.c9
-rw-r--r--drivers/pps/clients/pps-gpio.c6
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/pps/pps.c36
-rw-r--r--drivers/pwm/Kconfig18
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c52
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c445
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c53
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c93
-rw-r--r--drivers/pwm/pwm-twl-led.c1
-rw-r--r--drivers/pwm/pwm-twl.c10
-rw-r--r--drivers/pwm/pwm-vt8500.c87
-rw-r--r--drivers/remoteproc/remoteproc_core.c11
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c31
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-palmas.c339
-rw-r--r--drivers/s390/char/fs3270.c4
-rw-r--r--drivers/s390/char/tape_char.c8
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/qdio_debug.c4
-rw-r--r--drivers/s390/kvm/Makefile2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c40
-rw-r--r--drivers/s390/kvm/virtio_ccw.c926
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-sas.c2
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/aacraid/aacraid.h8
-rw-r--r--drivers/scsi/aacraid/comminit.c11
-rw-r--r--drivers/scsi/aacraid/src.c4
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c15
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h27
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c277
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c29
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c95
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c21
-rw-r--r--drivers/scsi/csiostor/csio_hw.c15
-rw-r--r--drivers/scsi/csiostor/csio_init.c11
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c266
-rw-r--r--drivers/scsi/fcoe/fcoe.h6
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c45
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c186
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c199
-rw-r--r--drivers/scsi/fcoe/libfcoe.h20
-rw-r--r--drivers/scsi/fnic/Makefile2
-rw-r--r--drivers/scsi/fnic/fnic.h60
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c314
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c6
-rw-r--r--drivers/scsi/fnic/fnic_io.h6
-rw-r--r--drivers/scsi/fnic/fnic_main.c17
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c721
-rw-r--r--drivers/scsi/fnic/fnic_trace.c273
-rw-r--r--drivers/scsi/fnic/fnic_trace.h90
-rw-r--r--drivers/scsi/gdth.c10
-rw-r--r--drivers/scsi/hpsa.c117
-rw-r--r--drivers/scsi/ipr.c1325
-rw-r--r--drivers/scsi/ipr.h102
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_libfc.h38
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h176
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c261
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c434
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c27
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c1
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h54
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c200
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c207
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c147
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c169
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c55
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c67
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c160
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c12
-rw-r--r--drivers/scsi/sg.c43
-rw-r--r--drivers/scsi/sr.c46
-rw-r--r--drivers/scsi/st.c29
-rw-r--r--drivers/scsi/storvsc_drv.c137
-rw-r--r--drivers/scsi/ufs/Kconfig74
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs.h44
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c211
-rw-r--r--drivers/scsi/ufs/ufshcd.c423
-rw-r--r--drivers/scsi/ufs/ufshcd.h202
-rw-r--r--drivers/scsi/ufs/ufshci.h44
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/binder.c19
-rw-r--r--drivers/staging/bcm/Misc.c2
-rw-r--r--drivers/staging/ccg/f_mass_storage.c2
-rw-r--r--drivers/staging/ccg/rndis.c2
-rw-r--r--drivers/staging/ccg/storage_common.c2
-rw-r--r--drivers/staging/dgrp/dgrp_specproc.c4
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.c4
-rw-r--r--drivers/staging/media/as102/as10x_cmd_cfg.c2
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c29
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig9
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile3
-rw-r--r--drivers/staging/media/davinci_vpfe/TODO37
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt154
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h1290
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c1863
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h179
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c1048
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h559
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c1071
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h233
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h93
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c2104
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.h203
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif_regs.h294
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c1999
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h244
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe.h86
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c740
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h97
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c1620
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h155
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c2
-rw-r--r--drivers/staging/media/go7007/go7007-driver.c15
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c24
-rw-r--r--drivers/staging/media/go7007/go7007-i2c.c10
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c5
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c11
-rw-r--r--drivers/staging/media/go7007/s2250-board.c32
-rw-r--r--drivers/staging/media/go7007/s2250-loader.c38
-rw-r--r--drivers/staging/media/go7007/wis-saa7113.c2
-rw-r--r--drivers/staging/media/go7007/wis-sony-tuner.c86
-rw-r--r--drivers/staging/media/go7007/wis-tw2804.c24
-rw-r--r--drivers/staging/media/go7007/wis-tw9903.c12
-rw-r--r--drivers/staging/media/go7007/wis-uda1342.c7
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c15
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c12
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c31
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c49
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c73
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c70
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c36
-rw-r--r--drivers/staging/media/solo6x10/p2m.c8
-rw-r--r--drivers/staging/media/solo6x10/v4l2-enc.c4
-rw-r--r--drivers/staging/media/solo6x10/v4l2.c5
-rw-r--r--drivers/staging/omapdrm/TODO32
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c91
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c25
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c39
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_alua.c35
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c58
-rw-r--r--drivers/target/target_core_fabric_configfs.c12
-rw-r--r--drivers/target/target_core_file.c118
-rw-r--r--drivers/target/target_core_iblock.c48
-rw-r--r--drivers/target/target_core_internal.h3
-rw-r--r--drivers/target/target_core_pr.c27
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/target/target_core_rd.c18
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_spc.c42
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/target/target_core_transport.c19
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/thermal/Kconfig55
-rw-r--r--drivers/thermal/Makefile10
-rw-r--r--drivers/thermal/cpu_cooling.c21
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c5
-rw-r--r--drivers/thermal/db8500_thermal.c4
-rw-r--r--drivers/thermal/dove_thermal.c209
-rw-r--r--drivers/thermal/exynos_thermal.c211
-rw-r--r--drivers/thermal/intel_powerclamp.c795
-rw-r--r--drivers/thermal/kirkwood_thermal.c134
-rw-r--r--drivers/thermal/rcar_thermal.c490
-rw-r--r--drivers/thermal/spear_thermal.c7
-rw-r--r--drivers/thermal/step_wise.c122
-rw-r--r--drivers/thermal/thermal_sys.c122
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/serial/Kconfig10
-rw-r--r--drivers/tty/sysrq.c15
-rw-r--r--drivers/tty/vt/vc_screen.c8
-rw-r--r--drivers/tty/vt/vt.c138
-rw-r--r--drivers/uio/uio.c19
-rw-r--r--drivers/usb/core/devices.c4
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/rndis.c2
-rw-r--r--drivers/usb/gadget/storage_common.c2
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/vfio/pci/Kconfig10
-rw-r--r--drivers/vfio/pci/vfio_pci.c75
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c52
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h19
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c281
-rw-r--r--drivers/vfio/vfio.c52
-rw-r--r--drivers/vhost/tcm_vhost.c287
-rw-r--r--drivers/vhost/tcm_vhost.h8
-rw-r--r--drivers/video/Kconfig26
-rw-r--r--drivers/video/Makefile5
-rw-r--r--drivers/video/backlight/Kconfig6
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/ams369fg06.c8
-rw-r--r--drivers/video/backlight/lp8788_bl.c333
-rw-r--r--drivers/video/backlight/max8925_bl.c31
-rw-r--r--drivers/video/backlight/pwm_bl.c20
-rw-r--r--drivers/video/console/fbcon.c58
-rw-r--r--drivers/video/console/vgacon.c22
-rw-r--r--drivers/video/display_timing.c24
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fbmon.c94
-rw-r--r--drivers/video/fbsysfs.c3
-rw-r--r--drivers/video/hdmi.c308
-rw-r--r--drivers/video/msm/mdp.c2
-rw-r--r--drivers/video/of_display_timing.c239
-rw-r--r--drivers/video/of_videomode.c54
-rw-r--r--drivers/video/via/hw.c6
-rw-r--r--drivers/video/via/hw.h2
-rw-r--r--drivers/video/via/lcd.c2
-rw-r--r--drivers/video/via/share.h2
-rw-r--r--drivers/video/via/via_modesetting.c8
-rw-r--r--drivers/video/via/via_modesetting.h6
-rw-r--r--drivers/video/videomode.c39
-rw-r--r--drivers/virtio/virtio_balloon.c13
-rw-r--r--drivers/virtio/virtio_mmio.c4
-rw-r--r--drivers/virtio/virtio_pci.c8
-rw-r--r--drivers/w1/masters/mxc_w1.c9
-rw-r--r--drivers/w1/slaves/Kconfig13
-rw-r--r--drivers/w1/slaves/Makefile3
-rw-r--r--drivers/w1/slaves/w1_ds2413.c177
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/cpwd.c4
-rw-r--r--drivers/watchdog/ux500_wdt.c171
-rw-r--r--drivers/xen/Kconfig34
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/events.c115
-rw-r--r--drivers/xen/evtchn.c14
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/pcpu.c35
-rw-r--r--drivers/xen/tmem.c2
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c471
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c483
-rw-r--r--drivers/xen/xen-stub.c101
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c2
-rw-r--r--drivers/xen/xenfs/super.c66
-rw-r--r--drivers/zorro/proc.c4
-rw-r--r--fs/9p/acl.c37
-rw-r--r--fs/9p/acl.h20
-rw-r--r--fs/9p/fid.c17
-rw-r--r--fs/9p/v9fs.c34
-rw-r--r--fs/9p/v9fs.h10
-rw-r--r--fs/9p/vfs_dentry.c1
-rw-r--r--fs/9p/vfs_file.c10
-rw-r--r--fs/9p/vfs_inode.c6
-rw-r--r--fs/9p/vfs_inode_dotl.c85
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/9p/xattr.c33
-rw-r--r--fs/9p/xattr.h2
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/affs/amigaffs.c3
-rw-r--r--fs/affs/dir.c2
-rw-r--r--fs/afs/afs.h11
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/fsclient.c14
-rw-r--r--fs/afs/inode.c6
-rw-r--r--fs/afs/super.c6
-rw-r--r--fs/afs/write.c7
-rw-r--r--fs/aio.c3
-rw-r--r--fs/anon_inodes.c10
-rw-r--r--fs/autofs4/autofs_i.h2
-rw-r--r--fs/autofs4/dev-ioctl.c2
-rw-r--r--fs/autofs4/root.c10
-rw-r--r--fs/autofs4/waitq.c6
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/binfmt_aout.c4
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_elf_fdpic.c4
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/binfmt_misc.c4
-rw-r--r--fs/bio.c2
-rw-r--r--fs/block_dev.c8
-rw-r--r--fs/btrfs/Kconfig3
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/backref.c5
-rw-r--r--fs/btrfs/backref.h2
-rw-r--r--fs/btrfs/btrfs_inode.h20
-rw-r--r--fs/btrfs/check-integrity.c3
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/btrfs/ctree.c68
-rw-r--r--fs/btrfs/ctree.h150
-rw-r--r--fs/btrfs/delayed-inode.c147
-rw-r--r--fs/btrfs/delayed-inode.h1
-rw-r--r--fs/btrfs/delayed-ref.c82
-rw-r--r--fs/btrfs/delayed-ref.h52
-rw-r--r--fs/btrfs/dev-replace.c6
-rw-r--r--fs/btrfs/disk-io.c227
-rw-r--r--fs/btrfs/disk-io.h7
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/extent-tree.c584
-rw-r--r--fs/btrfs/extent_io.c138
-rw-r--r--fs/btrfs/extent_io.h8
-rw-r--r--fs/btrfs/extent_map.c1
-rw-r--r--fs/btrfs/file-item.c67
-rw-r--r--fs/btrfs/file.c65
-rw-r--r--fs/btrfs/free-space-cache.c62
-rw-r--r--fs/btrfs/inode.c1068
-rw-r--r--fs/btrfs/ioctl.c263
-rw-r--r--fs/btrfs/locking.c5
-rw-r--r--fs/btrfs/ordered-data.c98
-rw-r--r--fs/btrfs/ordered-data.h14
-rw-r--r--fs/btrfs/print-tree.c1
-rw-r--r--fs/btrfs/qgroup.c55
-rw-r--r--fs/btrfs/raid56.c2099
-rw-r--r--fs/btrfs/raid56.h51
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c10
-rw-r--r--fs/btrfs/send.c55
-rw-r--r--fs/btrfs/send.h1
-rw-r--r--fs/btrfs/super.c89
-rw-r--r--fs/btrfs/sysfs.c1
-rw-r--r--fs/btrfs/transaction.c151
-rw-r--r--fs/btrfs/transaction.h8
-rw-r--r--fs/btrfs/tree-defrag.c19
-rw-r--r--fs/btrfs/tree-log.c166
-rw-r--r--fs/btrfs/ulist.c2
-rw-r--r--fs/btrfs/volumes.c636
-rw-r--r--fs/btrfs/volumes.h11
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/ceph/addr.c50
-rw-r--r--fs/ceph/caps.c49
-rw-r--r--fs/ceph/dir.c6
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/file.c18
-rw-r--r--fs/ceph/inode.c63
-rw-r--r--fs/ceph/ioctl.c22
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/ceph/mds_client.c37
-rw-r--r--fs/ceph/mds_client.h10
-rw-r--r--fs/ceph/mdsmap.c12
-rw-r--r--fs/ceph/strings.c4
-rw-r--r--fs/ceph/super.c7
-rw-r--r--fs/ceph/super.h14
-rw-r--r--fs/ceph/xattr.c214
-rw-r--r--fs/cifs/cifs_fs_sb.h8
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifsacl.c47
-rw-r--r--fs/cifs/cifsfs.c25
-rw-r--r--fs/cifs/cifsglob.h22
-rw-r--r--fs/cifs/cifspdu.h1
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c15
-rw-r--r--fs/cifs/connect.c68
-rw-r--r--fs/cifs/dir.c18
-rw-r--r--fs/cifs/file.c46
-rw-r--r--fs/cifs/inode.c61
-rw-r--r--fs/cifs/ioctl.c2
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/readdir.c12
-rw-r--r--fs/coda/cache.c4
-rw-r--r--fs/coda/coda_fs_i.h2
-rw-r--r--fs/coda/coda_linux.c8
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/coda/file.c12
-rw-r--r--fs/coda/inode.c8
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/coda/psdev.c7
-rw-r--r--fs/coda/upcall.c10
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/coredump.c6
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/dcache.c89
-rw-r--r--fs/devpts/inode.c18
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/dlm/config.c2
-rw-r--r--fs/dlm/lock.c18
-rw-r--r--fs/dlm/lockspace.c1
-rw-r--r--fs/dlm/lowcomms.c11
-rw-r--r--fs/dlm/recover.c52
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h6
-rw-r--r--fs/ecryptfs/file.c4
-rw-r--r--fs/ecryptfs/inode.c3
-rw-r--r--fs/ecryptfs/messaging.c6
-rw-r--r--fs/ecryptfs/read_write.c6
-rw-r--r--fs/efs/dir.c2
-rw-r--r--fs/exec.c20
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/exportfs/expfs.c3
-rw-r--r--fs/ext2/balloc.c28
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/inode.c12
-rw-r--r--fs/ext2/ioctl.c2
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext2/xattr.c4
-rw-r--r--fs/ext3/dir.c8
-rw-r--r--fs/ext3/inode.c16
-rw-r--r--fs/ext3/ioctl.c2
-rw-r--r--fs/ext3/namei.c5
-rw-r--r--fs/ext3/resize.c12
-rw-r--r--fs/ext3/super.c51
-rw-r--r--fs/ext3/xattr.c4
-rw-r--r--fs/ext4/acl.c7
-rw-r--r--fs/ext4/balloc.c15
-rw-r--r--fs/ext4/dir.c11
-rw-r--r--fs/ext4/ext4.h124
-rw-r--r--fs/ext4/ext4_extents.h6
-rw-r--r--fs/ext4/ext4_jbd2.c102
-rw-r--r--fs/ext4/ext4_jbd2.h51
-rw-r--r--fs/ext4/extents.c316
-rw-r--r--fs/ext4/extents_status.c620
-rw-r--r--fs/ext4/extents_status.h86
-rw-r--r--fs/ext4/file.c18
-rw-r--r--fs/ext4/hash.c6
-rw-r--r--fs/ext4/ialloc.c29
-rw-r--r--fs/ext4/indirect.c259
-rw-r--r--fs/ext4/inline.c14
-rw-r--r--fs/ext4/inode.c678
-rw-r--r--fs/ext4/ioctl.c15
-rw-r--r--fs/ext4/mballoc.c77
-rw-r--r--fs/ext4/mballoc.h4
-rw-r--r--fs/ext4/migrate.c15
-rw-r--r--fs/ext4/mmp.c4
-rw-r--r--fs/ext4/move_extent.c16
-rw-r--r--fs/ext4/namei.c500
-rw-r--r--fs/ext4/page-io.c85
-rw-r--r--fs/ext4/resize.c42
-rw-r--r--fs/ext4/super.c546
-rw-r--r--fs/ext4/xattr.c23
-rw-r--r--fs/ext4/xattr.h68
-rw-r--r--fs/f2fs/checkpoint.c63
-rw-r--r--fs/f2fs/debug.c4
-rw-r--r--fs/f2fs/dir.c31
-rw-r--r--fs/f2fs/f2fs.h44
-rw-r--r--fs/f2fs/file.c35
-rw-r--r--fs/f2fs/gc.c124
-rw-r--r--fs/f2fs/gc.h21
-rw-r--r--fs/f2fs/inode.c53
-rw-r--r--fs/f2fs/node.c20
-rw-r--r--fs/f2fs/recovery.c16
-rw-r--r--fs/f2fs/segment.c29
-rw-r--r--fs/f2fs/segment.h23
-rw-r--r--fs/f2fs/super.c92
-rw-r--r--fs/fat/dir.c6
-rw-r--r--fs/fat/fat.h2
-rw-r--r--fs/fat/file.c4
-rw-r--r--fs/fat/inode.c77
-rw-r--r--fs/fat/nfs.c3
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/file_table.c31
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/fs-writeback.c60
-rw-r--r--fs/fscache/cookie.c11
-rw-r--r--fs/fuse/control.c2
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/export.c4
-rw-r--r--fs/gfs2/file.c17
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h3
-rw-r--r--fs/gfs2/inode.c32
-rw-r--r--fs/gfs2/quota.c138
-rw-r--r--fs/gfs2/quota.h15
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c6
-rw-r--r--fs/gfs2/sys.c32
-rw-r--r--fs/gfs2/xattr.c4
-rw-r--r--fs/hfs/dir.c2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/Makefile4
-rw-r--r--fs/hfsplus/attributes.c399
-rw-r--r--fs/hfsplus/bfind.c93
-rw-r--r--fs/hfsplus/bnode.c8
-rw-r--r--fs/hfsplus/brec.c23
-rw-r--r--fs/hfsplus/btree.c8
-rw-r--r--fs/hfsplus/catalog.c36
-rw-r--r--fs/hfsplus/dir.c57
-rw-r--r--fs/hfsplus/extents.c4
-rw-r--r--fs/hfsplus/hfsplus_fs.h52
-rw-r--r--fs/hfsplus/hfsplus_raw.h68
-rw-r--r--fs/hfsplus/inode.c20
-rw-r--r--fs/hfsplus/ioctl.c112
-rw-r--r--fs/hfsplus/super.c56
-rw-r--r--fs/hfsplus/unicode.c7
-rw-r--r--fs/hfsplus/xattr.c709
-rw-r--r--fs/hfsplus/xattr.h60
-rw-r--r--fs/hfsplus/xattr_security.c104
-rw-r--r--fs/hfsplus/xattr_trusted.c63
-rw-r--r--fs/hfsplus/xattr_user.c63
-rw-r--r--fs/hostfs/hostfs_kern.c10
-rw-r--r--fs/hpfs/dir.c4
-rw-r--r--fs/hpfs/file.c2
-rw-r--r--fs/hppfs/hppfs.c8
-rw-r--r--fs/hugetlbfs/inode.c33
-rw-r--r--fs/inode.c21
-rw-r--r--fs/ioctl.c12
-rw-r--r--fs/isofs/compress.c2
-rw-r--r--fs/isofs/dir.c2
-rw-r--r--fs/isofs/export.c4
-rw-r--r--fs/jbd2/commit.c8
-rw-r--r--fs/jbd2/journal.c66
-rw-r--r--fs/jbd2/transaction.c31
-rw-r--r--fs/jffs2/dir.c4
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_dtree.c2
-rw-r--r--fs/lockd/clntlock.c16
-rw-r--r--fs/lockd/clntproc.c8
-rw-r--r--fs/lockd/host.c30
-rw-r--r--fs/lockd/mon.c1
-rw-r--r--fs/lockd/svclock.c16
-rw-r--r--fs/lockd/svcsubs.c11
-rw-r--r--fs/locks.c24
-rw-r--r--fs/logfs/dir.c4
-rw-r--r--fs/logfs/file.c2
-rw-r--r--fs/minix/dir.c2
-rw-r--r--fs/namei.c67
-rw-r--r--fs/namespace.c63
-rw-r--r--fs/ncpfs/dir.c10
-rw-r--r--fs/ncpfs/inode.c59
-rw-r--r--fs/ncpfs/ioctl.c29
-rw-r--r--fs/ncpfs/mmap.c2
-rw-r--r--fs/ncpfs/ncp_fs_sb.h6
-rw-r--r--fs/nfs/cache_lib.c12
-rw-r--r--fs/nfs/cache_lib.h2
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/dir.c48
-rw-r--r--fs/nfs/dns_resolve.c67
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/idmap.c55
-rw-r--r--fs/nfs/inode.c18
-rw-r--r--fs/nfs/nfs2xdr.c19
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs3xdr.c18
-rw-r--r--fs/nfs/nfs4client.c14
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/nfs4filelayout.c6
-rw-r--r--fs/nfs/nfs4filelayout.h2
-rw-r--r--fs/nfs/nfs4filelayoutdev.c1
-rw-r--r--fs/nfs/nfs4namespace.c1
-rw-r--r--fs/nfs/nfs4proc.c21
-rw-r--r--fs/nfs/nfs4super.c6
-rw-r--r--fs/nfs/nfs4xdr.c16
-rw-r--r--fs/nfs/pnfs.c21
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfs/pnfs_dev.c9
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/super.c7
-rw-r--r--fs/nfs/unlink.c20
-rw-r--r--fs/nfs_common/nfsacl.c41
-rw-r--r--fs/nfsd/acl.h2
-rw-r--r--fs/nfsd/auth.c12
-rw-r--r--fs/nfsd/auth.h6
-rw-r--r--fs/nfsd/cache.h17
-rw-r--r--fs/nfsd/export.c38
-rw-r--r--fs/nfsd/fault_inject.c8
-rw-r--r--fs/nfsd/idmap.h8
-rw-r--r--fs/nfsd/nfs2acl.c23
-rw-r--r--fs/nfsd/nfs3proc.c5
-rw-r--r--fs/nfsd/nfs3xdr.c24
-rw-r--r--fs/nfsd/nfs4acl.c63
-rw-r--r--fs/nfsd/nfs4idmap.c54
-rw-r--r--fs/nfsd/nfs4proc.c7
-rw-r--r--fs/nfsd/nfs4recover.c10
-rw-r--r--fs/nfsd/nfs4state.c107
-rw-r--r--fs/nfsd/nfs4xdr.c79
-rw-r--r--fs/nfsd/nfscache.c353
-rw-r--r--fs/nfsd/nfsctl.c83
-rw-r--r--fs/nfsd/nfsproc.c12
-rw-r--r--fs/nfsd/nfssvc.c6
-rw-r--r--fs/nfsd/nfsxdr.c21
-rw-r--r--fs/nfsd/state.h4
-rw-r--r--fs/nfsd/vfs.c14
-rw-r--r--fs/nfsd/vfs.h8
-rw-r--r--fs/nfsd/xdr.h2
-rw-r--r--fs/nfsd/xdr3.h2
-rw-r--r--fs/nfsd/xdr4.h2
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/namei.c4
-rw-r--r--fs/notify/dnotify/dnotify.c4
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/notify/fsnotify.c3
-rw-r--r--fs/notify/inode_mark.c19
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c1
-rw-r--r--fs/notify/inotify/inotify_user.c24
-rw-r--r--fs/notify/vfsmount_mark.c19
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ocfs2/acl.c31
-rw-r--r--fs/ocfs2/aops.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c32
-rw-r--r--fs/ocfs2/dcache.c3
-rw-r--r--fs/ocfs2/dir.c5
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c6
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c6
-rw-r--r--fs/ocfs2/dlmglue.c8
-rw-r--r--fs/ocfs2/export.c4
-rw-r--r--fs/ocfs2/file.c25
-rw-r--r--fs/ocfs2/inode.c12
-rw-r--r--fs/ocfs2/ioctl.c4
-rw-r--r--fs/ocfs2/mmap.c10
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/ocfs2/refcounttree.c6
-rw-r--r--fs/ocfs2/suballoc.c7
-rw-r--r--fs/ocfs2/suballoc.h2
-rw-r--r--fs/ocfs2/symlink.c2
-rw-r--r--fs/ocfs2/xattr.c2
-rw-r--r--fs/omfs/dir.c4
-rw-r--r--fs/open.c49
-rw-r--r--fs/openpromfs/inode.c2
-rw-r--r--fs/pipe.c20
-rw-r--r--fs/proc/base.c48
-rw-r--r--fs/proc/generic.c58
-rw-r--r--fs/proc/inode.c44
-rw-r--r--fs/proc/internal.h3
-rw-r--r--fs/proc/kcore.c3
-rw-r--r--fs/proc/nommu.c2
-rw-r--r--fs/proc/proc_devtree.c13
-rw-r--r--fs/proc/proc_net.c2
-rw-r--r--fs/proc/proc_sysctl.c23
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/proc/vmcore.c35
-rw-r--r--fs/qnx4/dir.c2
-rw-r--r--fs/qnx6/dir.c2
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/read_write.c17
-rw-r--r--fs/readdir.c2
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/ioctl.c2
-rw-r--r--fs/reiserfs/procfs.c2
-rw-r--r--fs/romfs/super.c2
-rw-r--r--fs/seq_file.c40
-rw-r--r--fs/splice.c7
-rw-r--r--fs/squashfs/dir.c2
-rw-r--r--fs/stat.c13
-rw-r--r--fs/super.c8
-rw-r--r--fs/sync.c2
-rw-r--r--fs/sysfs/bin.c9
-rw-r--r--fs/sysv/dir.c2
-rw-r--r--fs/timerfd.c10
-rw-r--r--fs/ubifs/debug.c8
-rw-r--r--fs/ubifs/dir.c2
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/lpt_commit.c14
-rw-r--r--fs/ubifs/orphan.c12
-rw-r--r--fs/ubifs/tnc_commit.c2
-rw-r--r--fs/ubifs/ubifs.h6
-rw-r--r--fs/udf/dir.c2
-rw-r--r--fs/udf/file.c6
-rw-r--r--fs/udf/inode.c86
-rw-r--r--fs/udf/namei.c4
-rw-r--r--fs/udf/super.c11
-rw-r--r--fs/udf/udf_i.h16
-rw-r--r--fs/udf/udf_sb.h5
-rw-r--r--fs/udf/udfdecl.h5
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/xfs/xfs_dfrag.c8
-rw-r--r--fs/xfs/xfs_export.c4
-rw-r--r--fs/xfs/xfs_file.c4
-rw-r--r--fs/xfs/xfs_ioctl.c6
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_log_recover.c3
-rw-r--r--include/acpi/acpi_bus.h10
-rw-r--r--include/acpi/acpi_drivers.h5
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/ghes.h72
-rw-r--r--include/asm-generic/checksum.h4
-rw-r--r--include/asm-generic/cputime_nsecs.h2
-rw-r--r--include/asm-generic/gpio.h86
-rw-r--r--include/asm-generic/io.h8
-rw-r--r--include/asm-generic/uaccess.h14
-rw-r--r--include/drm/drmP.h34
-rw-r--r--include/drm/drm_crtc.h38
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/drm/drm_encoder_slave.h20
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fb_helper.h18
-rw-r--r--include/drm/drm_gem_cma_helper.h4
-rw-r--r--include/drm/drm_mm.h40
-rw-r--r--include/drm/drm_pciids.h13
-rw-r--r--include/drm/intel-gtt.h22
-rw-r--r--include/drm/ttm/ttm_bo_driver.h61
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/acpi_gpio.h4
-rw-r--r--include/linux/amba/pl080.h (renamed from arch/arm/include/asm/hardware/pl080.h)2
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/blktrace_api.h1
-rw-r--r--include/linux/btrfs.h6
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/ceph/ceph_features.h38
-rw-r--r--include/linux/ceph/ceph_fs.h32
-rw-r--r--include/linux/ceph/decode.h29
-rw-r--r--include/linux/ceph/libceph.h16
-rw-r--r--include/linux/ceph/mdsmap.h4
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/ceph/osd_client.h74
-rw-r--r--include/linux/ceph/osdmap.h30
-rw-r--r--include/linux/ceph/rados.h158
-rw-r--r--include/linux/coda_psdev.h2
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/completion.h3
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/crush/crush.h2
-rw-r--r--include/linux/dcache.h8
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/device-mapper.h49
-rw-r--r--include/linux/dm-kcopyd.h25
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dmaengine.h48
-rw-r--r--include/linux/dw_dmac.h45
-rw-r--r--include/linux/edac.h79
-rw-r--r--include/linux/elevator.h5
-rw-r--r--include/linux/elf.h4
-rw-r--r--include/linux/eventfd.h2
-rw-r--r--include/linux/fb.h8
-rw-r--r--include/linux/freezer.h3
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/fsnotify.h2
-rw-r--r--include/linux/genhd.h10
-rw-r--r--include/linux/gpio.h28
-rw-r--r--include/linux/hashtable.h40
-rw-r--r--include/linux/hdmi.h231
-rw-r--r--include/linux/hsi/hsi.h6
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/i2c/twl.h84
-rw-r--r--include/linux/idr.h170
-rw-r--r--include/linux/if_team.h6
-rw-r--r--include/linux/if_vlan.h6
-rw-r--r--include/linux/iommu.h30
-rw-r--r--include/linux/ipmi.h4
-rw-r--r--include/linux/jbd2.h44
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/kvm_host.h30
-rw-r--r--include/linux/leds-lp5521.h73
-rw-r--r--include/linux/leds-lp5523.h49
-rw-r--r--include/linux/leds_pwm.h2
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/list.h49
-rw-r--r--include/linux/llist.h25
-rw-r--r--include/linux/lockd/lockd.h5
-rw-r--r--include/linux/lzo.h15
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/mfd/88pm80x.h2
-rw-r--r--include/linux/mfd/abx500.h2
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h5
-rw-r--r--include/linux/mfd/abx500/ab8500.h12
-rw-r--r--include/linux/mfd/arizona/pdata.h12
-rw-r--r--include/linux/mfd/arizona/registers.h16
-rw-r--r--include/linux/mfd/dbx500-prcmu.h12
-rw-r--r--include/linux/mfd/lp8788.h24
-rw-r--r--include/linux/mfd/max8925.h3
-rw-r--r--include/linux/mfd/palmas.h52
-rw-r--r--include/linux/mfd/rtsx_pci.h9
-rw-r--r--include/linux/mfd/tmio.h18
-rw-r--r--include/linux/mlx4/device.h22
-rw-r--r--include/linux/mlx4/qp.h19
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mmc/card.h22
-rw-r--r--include/linux/mmc/core.h7
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h61
-rw-r--r--include/linux/mmc/mmc.h15
-rw-r--r--include/linux/mmc/sdhci.h1
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h2
-rw-r--r--include/linux/mod_devicetable.h58
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mtd/map.h9
-rw-r--r--include/linux/nfs4.h6
-rw-r--r--include/linux/nfs_idmap.h9
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/nfsd/export.h4
-rw-r--r--include/linux/of_dma.h74
-rw-r--r--include/linux/opp.h18
-rw-r--r--include/linux/pci.h15
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pid.h3
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h13
-rw-r--r--include/linux/platform_data/elm.h54
-rw-r--r--include/linux/platform_data/exynos_thermal.h3
-rw-r--r--include/linux/platform_data/leds-lp55xx.h87
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/mmc-sdhci-tegra.h28
-rw-r--r--include/linux/platform_data/sh_ipmmu.h18
-rw-r--r--include/linux/platform_data/usb-omap.h1
-rw-r--r--include/linux/platform_data/ux500_wdt.h19
-rw-r--r--include/linux/pwm.h30
-rw-r--r--include/linux/quota.h1
-rw-r--r--include/linux/rculist.h56
-rw-r--r--include/linux/scatterlist.h41
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sunrpc/addr.h170
-rw-r--r--include/linux/sunrpc/auth.h7
-rw-r--r--include/linux/sunrpc/cache.h10
-rw-r--r--include/linux/sunrpc/clnt.h153
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/svcauth.h4
-rw-r--r--include/linux/sunrpc/xdr.h3
-rw-r--r--include/linux/thermal.h10
-rw-r--r--include/linux/user_namespace.h10
-rw-r--r--include/linux/vexpress.h8
-rw-r--r--include/linux/virtio.h11
-rw-r--r--include/linux/vt_kern.h3
-rw-r--r--include/linux/writeback.h6
-rw-r--r--include/media/adv7343.h52
-rw-r--r--include/media/blackfin/bfin_capture.h5
-rw-r--r--include/media/blackfin/ppi.h36
-rw-r--r--include/media/davinci/vpbe_osd.h5
-rw-r--r--include/media/davinci/vpbe_venc.h5
-rw-r--r--include/media/davinci/vpss.h16
-rw-r--r--include/media/ov7670.h2
-rw-r--r--include/media/ov9650.h27
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/s5c73m3.h55
-rw-r--r--include/media/s5p_fimc.h49
-rw-r--r--include/media/soc_camera.h107
-rw-r--r--include/media/soc_camera_platform.h10
-rw-r--r--include/media/tvp514x.h7
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-ctrls.h53
-rw-r--r--include/media/v4l2-event.h4
-rw-r--r--include/media/v4l2-image-sizes.h34
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/net/9p/9p.h14
-rw-r--r--include/net/9p/client.h12
-rw-r--r--include/net/ax25.h8
-rw-r--r--include/net/icmp.h1
-rw-r--r--include/net/inet6_hashtables.h8
-rw-r--r--include/net/inet_frag.h8
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--include/net/ipip.h17
-rw-r--r--include/net/ipv6.h12
-rw-r--r--include/net/netrom.h16
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sock.h21
-rw-r--r--include/ras/ras_event.h4
-rw-r--r--include/rdma/ib_verbs.h73
-rw-r--r--include/scsi/Kbuild3
-rw-r--r--include/scsi/fc/Kbuild4
-rw-r--r--include/scsi/fcoe_sysfs.h11
-rw-r--r--include/scsi/libfcoe.h32
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_host.h4
-rw-r--r--include/sound/aess.h53
-rw-r--r--include/target/target_core_backend.h5
-rw-r--r--include/target/target_core_base.h7
-rw-r--r--include/trace/events/block.h104
-rw-r--r--include/trace/events/ext4.h198
-rw-r--r--include/trace/events/jbd2.h106
-rw-r--r--include/trace/events/kvm.h2
-rw-r--r--include/trace/events/writeback.h116
-rw-r--r--include/uapi/drm/i915_drm.h20
-rw-r--r--include/uapi/drm/omap_drm.h (renamed from drivers/staging/omapdrm/omap_drm.h)2
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/btrfs.h (renamed from fs/btrfs/ioctl.h)18
-rw-r--r--include/uapi/linux/cdrom.h34
-rw-r--r--include/uapi/linux/dm-ioctl.h11
-rw-r--r--include/uapi/linux/dvb/frontend.h79
-rw-r--r--include/uapi/linux/dvb/version.h2
-rw-r--r--include/uapi/linux/ipmi.h10
-rw-r--r--include/uapi/linux/kvm.h27
-rw-r--r--include/uapi/linux/meye.h8
-rw-r--r--include/uapi/linux/msdos_fs.h38
-rw-r--r--include/uapi/linux/nbd.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h33
-rw-r--r--include/uapi/linux/v4l2-mediabus.h11
-rw-r--r--include/uapi/linux/vfio.h9
-rw-r--r--include/uapi/linux/videodev2.h35
-rw-r--r--include/uapi/linux/xattr.h13
-rw-r--r--include/uapi/rdma/ib_user_verbs.h16
-rw-r--r--include/uapi/scsi/Kbuild3
-rw-r--r--include/uapi/scsi/fc/Kbuild4
-rw-r--r--include/uapi/scsi/fc/fc_els.h (renamed from include/scsi/fc/fc_els.h)0
-rw-r--r--include/uapi/scsi/fc/fc_fs.h (renamed from include/scsi/fc/fc_fs.h)0
-rw-r--r--include/uapi/scsi/fc/fc_gs.h (renamed from include/scsi/fc/fc_gs.h)0
-rw-r--r--include/uapi/scsi/fc/fc_ns.h (renamed from include/scsi/fc/fc_ns.h)0
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h (renamed from include/scsi/scsi_bsg_fc.h)0
-rw-r--r--include/uapi/scsi/scsi_netlink.h (renamed from include/scsi/scsi_netlink.h)0
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h (renamed from include/scsi/scsi_netlink_fc.h)0
-rw-r--r--include/video/display_timing.h124
-rw-r--r--include/video/of_display_timing.h20
-rw-r--r--include/video/of_videomode.h18
-rw-r--r--include/video/videomode.h48
-rw-r--r--include/xen/acpi.h35
-rw-r--r--include/xen/interface/memory.h6
-rw-r--r--include/xen/interface/platform.h21
-rw-r--r--include/xen/interface/xen.h8
-rw-r--r--init/Kconfig48
-rw-r--r--ipc/mqueue.c17
-rw-r--r--ipc/shm.c11
-rw-r--r--ipc/util.c30
-rw-r--r--kernel/Makefile44
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/cgroup.c49
-rw-r--r--kernel/debug/debug_core.h2
-rw-r--r--kernel/debug/gdbstub.c3
-rw-r--r--kernel/debug/kdb/kdb_bp.c20
-rw-r--r--kernel/debug/kdb/kdb_debugger.c25
-rw-r--r--kernel/debug/kdb/kdb_main.c135
-rw-r--r--kernel/debug/kdb/kdb_private.h4
-rw-r--r--kernel/events/core.c18
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/kexec.c44
-rw-r--r--kernel/kprobes.c35
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/module.c142
-rw-r--r--kernel/nsproxy.c2
-rw-r--r--kernel/panic.c34
-rw-r--r--kernel/pid.c3
-rw-r--r--kernel/posix-timers.c25
-rw-r--r--kernel/printk.c9
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/core.c112
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sched/debug.c90
-rw-r--r--kernel/sched/stats.c79
-rw-r--r--kernel/signal.c16
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/sys.c23
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/sysctl_binary.c42
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/timeconst.bc108
-rw-r--r--kernel/timeconst.pl376
-rw-r--r--kernel/trace/blktrace.c28
-rw-r--r--kernel/trace/ftrace.c70
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/trace/trace_syscalls.c43
-rw-r--r--kernel/tracepoint.c6
-rw-r--r--kernel/user-return-notifier.c4
-rw-r--r--kernel/user.c7
-rw-r--r--kernel/user_namespace.c62
-rw-r--r--kernel/utsname.c2
-rw-r--r--kernel/utsname_sysctl.c3
-rw-r--r--kernel/workqueue.c13
-rw-r--r--lib/Kconfig.kgdb18
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bug.c3
-rw-r--r--lib/checksum.c2
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/decompress_unlzo.c2
-rw-r--r--lib/devres.c2
-rw-r--r--lib/idr.c446
-rw-r--r--lib/kfifo.c (renamed from kernel/kfifo.c)6
-rw-r--r--lib/lru_cache.c3
-rw-r--r--lib/lzo/Makefile2
-rw-r--r--lib/lzo/lzo1x_compress.c335
-rw-r--r--lib/lzo/lzo1x_decompress.c255
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c237
-rw-r--r--lib/lzo/lzodefs.h38
-rw-r--r--lib/scatterlist.c86
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/cleancache.c2
-rw-r--r--mm/fadvise.c2
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/ksm.c15
-rw-r--r--mm/memblock.c50
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mlock.c34
-rw-r--r--mm/mmap.c35
-rw-r--r--mm/mmu_notifier.c18
-rw-r--r--mm/nommu.c12
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c287
-rw-r--r--mm/shmem.c54
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swapfile.c2
-rw-r--r--net/9p/client.c43
-rw-r--r--net/9p/error.c4
-rw-r--r--net/9p/protocol.c49
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/9p/util.c17
-rw-r--r--net/appletalk/ddp.c9
-rw-r--r--net/atm/common.c7
-rw-r--r--net/atm/lec.c66
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/atm/signaling.c3
-rw-r--r--net/ax25/af_ax25.c15
-rw-r--r--net/ax25/ax25_ds_subr.c6
-rw-r--r--net/ax25/ax25_ds_timer.c3
-rw-r--r--net/ax25/ax25_iface.c3
-rw-r--r--net/ax25/ax25_uid.c11
-rw-r--r--net/batman-adv/bat_iv_ogm.c12
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c39
-rw-r--r--net/batman-adv/distributed-arp-table.c15
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/main.c6
-rw-r--r--net/batman-adv/originator.c31
-rw-r--r--net/batman-adv/originator.h3
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/translation-table.c82
-rw-r--r--net/batman-adv/vis.c38
-rw-r--r--net/bluetooth/hci_sock.c15
-rw-r--r--net/bluetooth/rfcomm/sock.c13
-rw-r--r--net/bluetooth/sco.c14
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_multicast.c25
-rw-r--r--net/can/af_can.c18
-rw-r--r--net/can/gw.c15
-rw-r--r--net/can/proc.c3
-rw-r--r--net/ceph/ceph_common.c27
-rw-r--r--net/ceph/ceph_strings.c39
-rw-r--r--net/ceph/crush/mapper.c15
-rw-r--r--net/ceph/crypto.c7
-rw-r--r--net/ceph/debugfs.c29
-rw-r--r--net/ceph/messenger.c260
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c635
-rw-r--r--net/ceph/osdmap.c290
-rw-r--r--net/ceph/pagevec.c24
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/flow.c11
-rw-r--r--net/core/net-procfs.c3
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sock_diag.c27
-rw-r--r--net/decnet/af_decnet.c9
-rw-r--r--net/decnet/dn_table.c13
-rw-r--r--net/ieee802154/dgram.c3
-rw-r--r--net/ieee802154/raw.c3
-rw-r--r--net/ipv4/af_inet.c17
-rw-r--r--net/ipv4/devinet.c10
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c23
-rw-r--r--net/ipv4/fib_trie.c33
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_fragment.c10
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c7
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv6/addrconf.c32
-rw-r--r--net/ipv6/addrlabel.c18
-rw-r--r--net/ipv6/inet6_connection_sock.c5
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/xfrm6_tunnel.c10
-rw-r--r--net/ipx/af_ipx.c16
-rw-r--r--net/ipx/ipx_proc.c5
-rw-r--r--net/iucv/af_iucv.c21
-rw-r--r--net/key/af_key.c3
-rw-r--r--net/l2tp/l2tp_core.c12
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/llc/llc_sap.c3
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c45
-rw-r--r--net/mac80211/tx.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c26
-rw-r--r--net/netfilter/nf_conntrack_expect.c17
-rw-r--r--net/netfilter/nf_conntrack_helper.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c9
-rw-r--r--net/netfilter/nf_conntrack_sip.c8
-rw-r--r--net/netfilter/nf_nat_core.c3
-rw-r--r--net/netfilter/nfnetlink_cthelper.c17
-rw-r--r--net/netfilter/nfnetlink_log.c7
-rw-r--r--net/netfilter/nfnetlink_queue_core.c10
-rw-r--r--net/netfilter/xt_RATEEST.c3
-rw-r--r--net/netfilter/xt_connlimit.c8
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlink/af_netlink.c32
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/netrom/nr_route.c30
-rw-r--r--net/nfc/llcp/llcp.c16
-rw-r--r--net/openvswitch/datapath.c10
-rw-r--r--net/openvswitch/flow.c13
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/packet/diag.c3
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/phonet/socket.c9
-rw-r--r--net/rds/bind.c3
-rw-r--r--net/rds/connection.c9
-rw-r--r--net/rose/af_rose.c14
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cbq.c18
-rw-r--r--net/sched/sch_drr.c10
-rw-r--r--net/sched/sch_hfsc.c15
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_qfq.c16
-rw-r--r--net/sctp/associola.c31
-rw-r--r--net/sctp/endpointola.c3
-rw-r--r--net/sctp/input.c6
-rw-r--r--net/sctp/proc.c9
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/addr.c3
-rw-r--r--net/sunrpc/auth.c11
-rw-r--r--net/sunrpc/auth_generic.c16
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c47
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c36
-rw-r--r--net/sunrpc/auth_unix.c36
-rw-r--r--net/sunrpc/cache.c83
-rw-r--r--net/sunrpc/clnt.c16
-rw-r--r--net/sunrpc/rpc_pipe.c10
-rw-r--r--net/sunrpc/rpcb_clnt.c1
-rw-r--r--net/sunrpc/svc.c13
-rw-r--r--net/sunrpc/svc_xprt.c81
-rw-r--r--net/sunrpc/svcauth.c3
-rw-r--r--net/sunrpc/svcauth_unix.c59
-rw-r--r--net/sunrpc/xdr.c41
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c20
-rw-r--r--net/sunrpc/xprtsock.c35
-rw-r--r--net/tipc/name_table.c8
-rw-r--r--net/tipc/node.c3
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/unix/diag.c7
-rw-r--r--net/unix/garbage.c2
-rw-r--r--net/x25/af_x25.c12
-rw-r--r--net/xfrm/xfrm_policy.c47
-rw-r--r--net/xfrm/xfrm_state.c42
-rw-r--r--scripts/Makefile.headersinst6
-rw-r--r--scripts/Makefile.modpost7
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/coccicheck39
-rw-r--r--scripts/coccinelle/misc/memcpy-assign.cocci103
-rw-r--r--scripts/coccinelle/misc/orplus.cocci55
-rw-r--r--scripts/coccinelle/misc/semicolon.cocci83
-rwxr-xr-xscripts/depmod.sh26
-rwxr-xr-xscripts/get_maintainer.pl4
-rw-r--r--scripts/kconfig/Makefile3
-rw-r--r--scripts/kconfig/conf.c7
-rw-r--r--scripts/kconfig/expr.c10
-rw-r--r--scripts/kconfig/gconf.c2
-rw-r--r--scripts/kconfig/lkc.h8
-rw-r--r--scripts/kconfig/lxdialog/check-lxdialog.sh1
-rw-r--r--scripts/kconfig/lxdialog/dialog.h1
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c121
-rw-r--r--scripts/kconfig/lxdialog/menubox.c20
-rw-r--r--scripts/kconfig/mconf.c54
-rw-r--r--scripts/kconfig/menu.c4
-rwxr-xr-xscripts/kconfig/merge_config.sh20
-rw-r--r--scripts/kconfig/nconf.c340
-rw-r--r--scripts/kconfig/nconf.gui.c2
-rw-r--r--scripts/kconfig/qconf.cc1
-rw-r--r--scripts/kconfig/symbol.c12
-rw-r--r--scripts/kconfig/util.c23
-rw-r--r--scripts/kconfig/zconf.l8
-rw-r--r--scripts/kconfig/zconf.lex.c_shipped8
-rwxr-xr-xscripts/kernel-doc2
-rw-r--r--scripts/link-vmlinux.sh9
-rw-r--r--scripts/mod/.gitignore1
-rw-r--r--scripts/mod/Makefile35
-rw-r--r--scripts/mod/devicetable-offsets.c178
-rw-r--r--scripts/mod/file2alias.c674
-rw-r--r--scripts/mod/modpost.c7
-rwxr-xr-xscripts/package/mkspec2
-rwxr-xr-xscripts/setlocalversion2
-rwxr-xr-xscripts/sign-file134
-rwxr-xr-xscripts/tags.sh50
-rw-r--r--security/apparmor/domain.c4
-rw-r--r--security/apparmor/file.c4
-rw-r--r--security/apparmor/lsm.c6
-rw-r--r--security/commoncap.c2
-rw-r--r--security/integrity/ima/ima_api.c6
-rw-r--r--security/integrity/ima/ima_crypto.c2
-rw-r--r--security/integrity/ima/ima_main.c7
-rw-r--r--security/integrity/ima/ima_policy.c11
-rw-r--r--security/integrity/ima/ima_queue.c3
-rw-r--r--security/selinux/avc.c19
-rw-r--r--security/selinux/hooks.c10
-rw-r--r--security/selinux/selinuxfs.c20
-rw-r--r--security/smack/smack_lsm.c14
-rw-r--r--security/tomoyo/securityfs_if.c2
-rw-r--r--sound/core/info.c2
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/oss/msnd_pinnacle.c6
-rw-r--r--sound/oss/soundcard.c10
-rw-r--r--sound/pci/bt87x.c19
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c6
-rw-r--r--sound/pci/emu10k1/emupcm.c8
-rw-r--r--sound/pci/hda/patch_hdmi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pci/ice1712/revo.c37
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/sound_firmware.c2
-rw-r--r--tools/perf/perf.h6
-rw-r--r--tools/perf/util/evlist.c3
-rw-r--r--tools/testing/ktest/examples/include/patchcheck.conf37
-rwxr-xr-xtools/testing/ktest/ktest.pl190
-rw-r--r--tools/testing/ktest/sample.conf44
-rw-r--r--tools/testing/selftests/Makefile8
-rw-r--r--tools/testing/selftests/README.txt42
-rw-r--r--tools/testing/selftests/efivarfs/Makefile12
-rw-r--r--tools/testing/selftests/efivarfs/create-read.c38
-rw-r--r--tools/testing/selftests/efivarfs/efivarfs.sh139
-rw-r--r--tools/testing/selftests/efivarfs/open-unlink.c63
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/ioapic.c48
-rw-r--r--virt/kvm/ioapic.h4
-rw-r--r--virt/kvm/iommu.c4
-rw-r--r--virt/kvm/irq_comm.c39
-rw-r--r--virt/kvm/kvm_main.c201
4229 files changed, 176066 insertions, 70766 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
index 50e2a80ea28f..21640eaad371 100644
--- a/Documentation/ABI/testing/sysfs-bus-fcoe
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -1,14 +1,53 @@
-What: /sys/bus/fcoe/ctlr_X
+What: /sys/bus/fcoe/
+Date: August 2012
+KernelVersion: TBD
+Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description: The FCoE bus. Attributes in this directory are control interfaces.
+Attributes:
+
+ ctlr_create: 'FCoE Controller' instance creation interface. Writing an
+ <ifname> to this file will allocate and populate sysfs with a
+ fcoe_ctlr_device (ctlr_X). The user can then configure any
+ per-port settings and finally write to the fcoe_ctlr_device's
+ 'start' attribute to begin the kernel's discovery and login
+ process.
+
+ ctlr_destroy: 'FCoE Controller' instance removal interface. Writing a
+ fcoe_ctlr_device's sysfs name to this file will log the
+ fcoe_ctlr_device out of the fabric or otherwise connected
+ FCoE devices. It will also free all kernel memory allocated
+ for this fcoe_ctlr_device and any structures associated
+ with it, this includes the scsi_host.
+
+What: /sys/bus/fcoe/devices/ctlr_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
-Description: 'FCoE Controller' instances on the fcoe bus
+Description: 'FCoE Controller' instances on the fcoe bus.
+ The FCoE Controller now has a three stage creation process.
+ 1) Write interface name to ctlr_create 2) Configure the FCoE
+ Controller (ctlr_X) 3) Enable the FCoE Controller to begin
+ discovery and login. The FCoE Controller is destroyed by
+ writing it's name, i.e. ctlr_X to the ctlr_delete file.
+
Attributes:
fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
this value will change the dev_loss_tmo for all
FCFs discovered by this controller.
+ mode: Display or change the FCoE Controller's mode. Possible
+ modes are 'Fabric' and 'VN2VN'. If a FCoE Controller
+ is started in 'Fabric' mode then FIP FCF discovery is
+ initiated and ultimately a fabric login is attempted.
+ If a FCoE Controller is started in 'VN2VN' mode then
+ FIP VN2VN discovery and login is performed. A FCoE
+ Controller only supports one mode at a time.
+
+ enabled: Whether an FCoE controller is enabled or disabled.
+ 0 if disabled, 1 if enabled. Writing either 0 or 1
+ to this file will enable or disable the FCoE controller.
+
lesb/link_fail: Link Error Status Block (LESB) link failure count.
lesb/vlink_fail: Link Error Status Block (LESB) virtual link
@@ -26,7 +65,7 @@ Attributes:
Notes: ctlr_X (global increment starting at 0)
-What: /sys/bus/fcoe/fcf_X
+What: /sys/bus/fcoe/devices/fcf_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 4a4fb295ceef..14129f149a75 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -488,9 +488,10 @@ will invoke the generic mapping error check interface. Doing so will ensure
that the mapping code will work correctly on all dma implementations without
any dependency on the specifics of the underlying implementation. Using the
returned address without checking for errors could result in failures ranging
-from panics to silent data corruption. Couple of example of incorrect ways to
-check for errors that make assumptions about the underlying dma implementation
-are as follows and these are applicable to dma_map_page() as well.
+from panics to silent data corruption. A couple of examples of incorrect ways
+to check for errors that make assumptions about the underlying dma
+implementation are as follows and these are applicable to dma_map_page() as
+well.
Incorrect example 1:
dma_addr_t dma_handle;
@@ -751,7 +752,7 @@ Example 1:
dma_unmap_single(dma_handle1);
map_error_handling1:
-Example 2: (if buffers are allocated a loop, unmap all mapped buffers when
+Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when
mapping error is detected in the middle)
dma_addr_t dma_addr;
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 4ee2304f82f9..f9df3b872c16 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -743,6 +743,10 @@ char *date;</synopsis>
These two operations are mandatory for GEM drivers that support DRM
PRIME.
</para>
+ <sect4>
+ <title>DRM PRIME Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
+ </sect4>
</sect3>
<sect3 id="drm-gem-objects-mapping">
<title>GEM Objects Mapping</title>
@@ -978,10 +982,25 @@ int max_width, max_height;</synopsis>
If the parameters are deemed valid, drivers then create, initialize and
return an instance of struct <structname>drm_framebuffer</structname>.
If desired the instance can be embedded in a larger driver-specific
- structure. The new instance is initialized with a call to
- <function>drm_framebuffer_init</function> which takes a pointer to DRM
- frame buffer operations (struct
- <structname>drm_framebuffer_funcs</structname>). Frame buffer operations are
+ structure. Drivers must fill its <structfield>width</structfield>,
+ <structfield>height</structfield>, <structfield>pitches</structfield>,
+ <structfield>offsets</structfield>, <structfield>depth</structfield>,
+ <structfield>bits_per_pixel</structfield> and
+ <structfield>pixel_format</structfield> fields from the values passed
+ through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
+ should call the <function>drm_helper_mode_fill_fb_struct</function>
+ helper function to do so.
+ </para>
+
+ <para>
+ The initailization of the new framebuffer instance is finalized with a
+ call to <function>drm_framebuffer_init</function> which takes a pointer
+ to DRM frame buffer operations (struct
+ <structname>drm_framebuffer_funcs</structname>). Note that this function
+ publishes the framebuffer and so from this point on it can be accessed
+ concurrently from other threads. Hence it must be the last step in the
+ driver's framebuffer initialization sequence. Frame buffer operations
+ are
<itemizedlist>
<listitem>
<synopsis>int (*create_handle)(struct drm_framebuffer *fb,
@@ -1022,16 +1041,16 @@ int max_width, max_height;</synopsis>
</itemizedlist>
</para>
<para>
- After initializing the <structname>drm_framebuffer</structname>
- instance drivers must fill its <structfield>width</structfield>,
- <structfield>height</structfield>, <structfield>pitches</structfield>,
- <structfield>offsets</structfield>, <structfield>depth</structfield>,
- <structfield>bits_per_pixel</structfield> and
- <structfield>pixel_format</structfield> fields from the values passed
- through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
- should call the <function>drm_helper_mode_fill_fb_struct</function>
- helper function to do so.
- </para>
+ The lifetime of a drm framebuffer is controlled with a reference count,
+ drivers can grab additional references with
+ <function>drm_framebuffer_reference</function> </para> and drop them
+ again with <function>drm_framebuffer_unreference</function>. For
+ driver-private framebuffers for which the last reference is never
+ dropped (e.g. for the fbdev framebuffer when the struct
+ <structname>drm_framebuffer</structname> is embedded into the fbdev
+ helper struct) drivers can manually clean up a framebuffer at module
+ unload time with
+ <function>drm_framebuffer_unregister_private</function>.
</sect2>
<sect2>
<title>Output Polling</title>
@@ -1043,6 +1062,22 @@ int max_width, max_height;</synopsis>
operation.
</para>
</sect2>
+ <sect2>
+ <title>Locking</title>
+ <para>
+ Beside some lookup structures with their own locking (which is hidden
+ behind the interface functions) most of the modeset state is protected
+ by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
+ per-crtc locks to allow cursor updates, pageflips and similar operations
+ to occur concurrently with background tasks like output detection.
+ Operations which cross domains like a full modeset always grab all
+ locks. Drivers there need to protect resources shared between crtcs with
+ additional locking. They also need to be careful to always grab the
+ relevant crtc locks if a modset functions touches crtc state, e.g. for
+ load detection (which does only grab the <code>mode_config.lock</code>
+ to allow concurrent screen updates on live crtcs).
+ </para>
+ </sect2>
</sect1>
<!-- Internals: kms initialization and cleanup -->
@@ -1126,6 +1161,12 @@ int max_width, max_height;</synopsis>
any new rendering to the frame buffer until the page flip completes.
</para>
<para>
+ If a page flip can be successfully scheduled the driver must set the
+ <code>drm_crtc-&lt;fb</code> field to the new framebuffer pointed to
+ by <code>fb</code>. This is important so that the reference counting
+ on framebuffers stays balanced.
+ </para>
+ <para>
If a page flip is already pending, the
<methodname>page_flip</methodname> operation must return
-<errorname>EBUSY</errorname>.
@@ -1609,6 +1650,10 @@ void intel_crt_init(struct drm_device *dev)
make its properties available to applications.
</para>
</sect2>
+ <sect2>
+ <title>KMS API Functions</title>
+!Edrivers/gpu/drm/drm_crtc.c
+ </sect2>
</sect1>
<!-- Internals: kms helper functions -->
@@ -2104,6 +2149,7 @@ void intel_crt_init(struct drm_device *dev)
<title>fbdev Helper Functions Reference</title>
!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
!Edrivers/gpu/drm/drm_fb_helper.c
+!Iinclude/drm/drm_fb_helper.h
</sect2>
<sect2>
<title>Display Port Helper Functions Reference</title>
@@ -2111,6 +2157,10 @@ void intel_crt_init(struct drm_device *dev)
!Iinclude/drm/drm_dp_helper.h
!Edrivers/gpu/drm/drm_dp_helper.c
</sect2>
+ <sect2>
+ <title>EDID Helper Functions Reference</title>
+!Edrivers/gpu/drm/drm_edid.c
+ </sect2>
</sect1>
<!-- Internals: vertical blanking -->
diff --git a/Documentation/DocBook/media/dvb/dvbapi.xml b/Documentation/DocBook/media/dvb/dvbapi.xml
index 757488b24f4f..0197bcc7842d 100644
--- a/Documentation/DocBook/media/dvb/dvbapi.xml
+++ b/Documentation/DocBook/media/dvb/dvbapi.xml
@@ -84,7 +84,7 @@ Added ISDB-T test originally written by Patrick Boettcher
<title>LINUX DVB API</title>
-<subtitle>Version 5.8</subtitle>
+<subtitle>Version 5.10</subtitle>
<!-- ADD THE CHAPTERS HERE -->
<chapter id="dvb_introdution">
&sub-intro;
diff --git a/Documentation/DocBook/media/dvb/dvbproperty.xml b/Documentation/DocBook/media/dvb/dvbproperty.xml
index 957e3acaae8e..4a5eaeed0b9e 100644
--- a/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/media/dvb/dvbproperty.xml
@@ -7,14 +7,41 @@ the capability ioctls weren't implemented yet via the new way.</para>
<para>The typical usage for the <constant>FE_GET_PROPERTY/FE_SET_PROPERTY</constant>
API is to replace the ioctl's were the <link linkend="dvb-frontend-parameters">
struct <constant>dvb_frontend_parameters</constant></link> were used.</para>
+<section id="dtv-stats">
+<title>DTV stats type</title>
+<programlisting>
+struct dtv_stats {
+ __u8 scale; /* enum fecap_scale_params type */
+ union {
+ __u64 uvalue; /* for counters and relative scales */
+ __s64 svalue; /* for 1/1000 dB measures */
+ };
+} __packed;
+</programlisting>
+</section>
+<section id="dtv-fe-stats">
+<title>DTV stats type</title>
+<programlisting>
+#define MAX_DTV_STATS 4
+
+struct dtv_fe_stats {
+ __u8 len;
+ struct dtv_stats stat[MAX_DTV_STATS];
+} __packed;
+</programlisting>
+</section>
+
<section id="dtv-property">
<title>DTV property type</title>
<programlisting>
/* Reserved fields should be set to 0 */
+
struct dtv_property {
__u32 cmd;
+ __u32 reserved[3];
union {
__u32 data;
+ struct dtv_fe_stats st;
struct {
__u8 data[32];
__u32 len;
@@ -440,7 +467,7 @@ typedef enum fe_delivery_system {
<title><constant>DTV-ISDBT-LAYER*</constant> parameters</title>
<para>ISDB-T channels can be coded hierarchically. As opposed to DVB-T in
ISDB-T hierarchical layers can be decoded simultaneously. For that
- reason a ISDB-T demodulator has 3 viterbi and 3 reed-solomon-decoders.</para>
+ reason a ISDB-T demodulator has 3 Viterbi and 3 Reed-Solomon decoders.</para>
<para>ISDB-T has 3 hierarchical layers which each can use a part of the
available segments. The total number of segments over all layers has
to 13 in ISDB-T.</para>
@@ -850,6 +877,147 @@ enum fe_interleaving {
<para>use the special macro LNA_AUTO to set LNA auto</para>
</section>
</section>
+
+ <section id="frontend-stat-properties">
+ <title>Frontend statistics indicators</title>
+ <para>The values are returned via <constant>dtv_property.stat</constant>.
+ If the property is supported, <constant>dtv_property.stat.len</constant> is bigger than zero.</para>
+ <para>For most delivery systems, <constant>dtv_property.stat.len</constant>
+ will be 1 if the stats is supported, and the properties will
+ return a single value for each parameter.</para>
+ <para>It should be noticed, however, that new OFDM delivery systems
+ like ISDB can use different modulation types for each group of
+ carriers. On such standards, up to 3 groups of statistics can be
+ provided, and <constant>dtv_property.stat.len</constant> is updated
+ to reflect the "global" metrics, plus one metric per each carrier
+ group (called "layer" on ISDB).</para>
+ <para>So, in order to be consistent with other delivery systems, the first
+ value at <link linkend="dtv-stats"><constant>dtv_property.stat.dtv_stats</constant></link>
+ array refers to the global metric. The other elements of the array
+ represent each layer, starting from layer A(index 1),
+ layer B (index 2) and so on.</para>
+ <para>The number of filled elements are stored at <constant>dtv_property.stat.len</constant>.</para>
+ <para>Each element of the <constant>dtv_property.stat.dtv_stats</constant> array consists on two elements:</para>
+ <itemizedlist mark='opencircle'>
+ <listitem><para><constant>svalue</constant> or <constant>uvalue</constant>, where
+ <constant>svalue</constant> is for signed values of the measure (dB measures)
+ and <constant>uvalue</constant> is for unsigned values (counters, relative scale)</para></listitem>
+ <listitem><para><constant>scale</constant> - Scale for the value. It can be:</para>
+ <section id = "fecap-scale-params">
+ <itemizedlist mark='bullet'>
+ <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - The parameter is supported by the frontend, but it was not possible to collect it (could be a transitory or permanent condition)</para></listitem>
+ <listitem><para><constant>FE_SCALE_DECIBEL</constant> - parameter is a signed value, measured in 1/1000 dB</para></listitem>
+ <listitem><para><constant>FE_SCALE_RELATIVE</constant> - parameter is a unsigned value, where 0 means 0% and 65535 means 100%.</para></listitem>
+ <listitem><para><constant>FE_SCALE_COUNTER</constant> - parameter is a unsigned value that counts the occurrence of an event, like bit error, block error, or lapsed time.</para></listitem>
+ </itemizedlist>
+ </section>
+ </listitem>
+ </itemizedlist>
+ <section id="DTV-STAT-SIGNAL-STRENGTH">
+ <title><constant>DTV_STAT_SIGNAL_STRENGTH</constant></title>
+ <para>Indicates the signal strength level at the analog part of the tuner or of the demod.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_DECIBEL</constant> - signal strength is in 0.0001 dBm units, power measured in miliwatts. This value is generally negative.</listitem>
+ <listitem><constant>FE_SCALE_RELATIVE</constant> - The frontend provides a 0% to 100% measurement for power (actually, 0 to 65535).</listitem>
+ </itemizedlist>
+ </section>
+ <section id="DTV-STAT-CNR">
+ <title><constant>DTV_STAT_CNR</constant></title>
+ <para>Indicates the Signal to Noise ratio for the main carrier.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_DECIBEL</constant> - Signal/Noise ratio is in 0.0001 dB units.</listitem>
+ <listitem><constant>FE_SCALE_RELATIVE</constant> - The frontend provides a 0% to 100% measurement for Signal/Noise (actually, 0 to 65535).</listitem>
+ </itemizedlist>
+ </section>
+ <section id="DTV-STAT-PRE-ERROR-BIT-COUNT">
+ <title><constant>DTV_STAT_PRE_ERROR_BIT_COUNT</constant></title>
+ <para>Measures the number of bit errors before the forward error correction (FEC) on the inner coding block (before Viterbi, LDPC or other inner code).</para>
+ <para>This measure is taken during the same interval as <constant>DTV_STAT_PRE_TOTAL_BIT_COUNT</constant>.</para>
+ <para>In order to get the BER (Bit Error Rate) measurement, it should be divided by
+ <link linkend="DTV-STAT-PRE-TOTAL-BIT-COUNT"><constant>DTV_STAT_PRE_TOTAL_BIT_COUNT</constant></link>.</para>
+ <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
+ The frontend may reset it when a channel/transponder is tuned.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_COUNTER</constant> - Number of error bits counted before the inner coding.</listitem>
+ </itemizedlist>
+ </section>
+ <section id="DTV-STAT-PRE-TOTAL-BIT-COUNT">
+ <title><constant>DTV_STAT_PRE_TOTAL_BIT_COUNT</constant></title>
+ <para>Measures the amount of bits received before the inner code block, during the same period as
+ <link linkend="DTV-STAT-PRE-ERROR-BIT-COUNT"><constant>DTV_STAT_PRE_ERROR_BIT_COUNT</constant></link> measurement was taken.</para>
+ <para>It should be noticed that this measurement can be smaller than the total amount of bits on the transport stream,
+ as the frontend may need to manually restart the measurement, loosing some data between each measurement interval.</para>
+ <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
+ The frontend may reset it when a channel/transponder is tuned.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_COUNTER</constant> - Number of bits counted while measuring
+ <link linkend="DTV-STAT-PRE-ERROR-BIT-COUNT"><constant>DTV_STAT_PRE_ERROR_BIT_COUNT</constant></link>.</listitem>
+ </itemizedlist>
+ </section>
+ <section id="DTV-STAT-POST-ERROR-BIT-COUNT">
+ <title><constant>DTV_STAT_POST_ERROR_BIT_COUNT</constant></title>
+ <para>Measures the number of bit errors after the forward error correction (FEC) done by inner code block (after Viterbi, LDPC or other inner code).</para>
+ <para>This measure is taken during the same interval as <constant>DTV_STAT_POST_TOTAL_BIT_COUNT</constant>.</para>
+ <para>In order to get the BER (Bit Error Rate) measurement, it should be divided by
+ <link linkend="DTV-STAT-POST-TOTAL-BIT-COUNT"><constant>DTV_STAT_POST_TOTAL_BIT_COUNT</constant></link>.</para>
+ <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
+ The frontend may reset it when a channel/transponder is tuned.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_COUNTER</constant> - Number of error bits counted after the inner coding.</listitem>
+ </itemizedlist>
+ </section>
+ <section id="DTV-STAT-POST-TOTAL-BIT-COUNT">
+ <title><constant>DTV_STAT_POST_TOTAL_BIT_COUNT</constant></title>
+ <para>Measures the amount of bits received after the inner coding, during the same period as
+ <link linkend="DTV-STAT-POST-ERROR-BIT-COUNT"><constant>DTV_STAT_POST_ERROR_BIT_COUNT</constant></link> measurement was taken.</para>
+ <para>It should be noticed that this measurement can be smaller than the total amount of bits on the transport stream,
+ as the frontend may need to manually restart the measurement, loosing some data between each measurement interval.</para>
+ <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
+ The frontend may reset it when a channel/transponder is tuned.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_COUNTER</constant> - Number of bits counted while measuring
+ <link linkend="DTV-STAT-POST-ERROR-BIT-COUNT"><constant>DTV_STAT_POST_ERROR_BIT_COUNT</constant></link>.</listitem>
+ </itemizedlist>
+ </section>
+ <section id="DTV-STAT-ERROR-BLOCK-COUNT">
+ <title><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></title>
+ <para>Measures the number of block errors after the outer forward error correction coding (after Reed-Solomon or other outer code).</para>
+ <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
+ The frontend may reset it when a channel/transponder is tuned.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_COUNTER</constant> - Number of error blocks counted after the outer coding.</listitem>
+ </itemizedlist>
+ </section>
+ <section id="DTV-STAT-TOTAL-BLOCK-COUNT">
+ <title><constant>DTV-STAT_TOTAL_BLOCK_COUNT</constant></title>
+ <para>Measures the total number of blocks received during the same period as
+ <link linkend="DTV-STAT-ERROR-BLOCK-COUNT"><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></link> measurement was taken.</para>
+ <para>It can be used to calculate the PER indicator, by dividing
+ <link linkend="DTV-STAT-ERROR-BLOCK-COUNT"><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></link>
+ by <link linkend="DTV-STAT-TOTAL-BLOCK-COUNT"><constant>DTV-STAT-TOTAL-BLOCK-COUNT</constant></link>.</para>
+ <para>Possible scales for this metric are:</para>
+ <itemizedlist mark='bullet'>
+ <listitem><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</listitem>
+ <listitem><constant>FE_SCALE_COUNTER</constant> - Number of blocks counted while measuring
+ <link linkend="DTV-STAT-ERROR-BLOCK-COUNT"><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></link>.</listitem>
+ </itemizedlist>
+ </section>
+ </section>
+
<section id="frontend-property-terrestrial-systems">
<title>Properties used on terrestrial delivery systems</title>
<section id="dvbt-params">
@@ -871,6 +1039,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-HIERARCHY"><constant>DTV_HIERARCHY</constant></link></para></listitem>
<listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
<section id="dvbt2-params">
<title>DVB-T2 delivery system</title>
@@ -895,6 +1064,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-STREAM-ID"><constant>DTV_STREAM_ID</constant></link></para></listitem>
<listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
<section id="isdbt">
<title>ISDB-T delivery system</title>
@@ -948,6 +1118,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-ISDBT-LAYER-SEGMENT-COUNT"><constant>DTV_ISDBT_LAYERC_SEGMENT_COUNT</constant></link></para></listitem>
<listitem><para><link linkend="DTV-ISDBT-LAYER-TIME-INTERLEAVING"><constant>DTV_ISDBT_LAYERC_TIME_INTERLEAVING</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
<section id="atsc-params">
<title>ATSC delivery system</title>
@@ -961,6 +1132,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
<listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
<section id="atscmh-params">
<title>ATSC-MH delivery system</title>
@@ -988,6 +1160,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE-MODE-C"><constant>DTV_ATSCMH_SCCC_CODE_MODE_C</constant></link></para></listitem>
<listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE-MODE-D"><constant>DTV_ATSCMH_SCCC_CODE_MODE_D</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
<section id="dtmb-params">
<title>DTMB delivery system</title>
@@ -1007,6 +1180,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-INTERLEAVING"><constant>DTV_INTERLEAVING</constant></link></para></listitem>
<listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
</section>
<section id="frontend-property-cable-systems">
@@ -1028,6 +1202,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-INNER-FEC"><constant>DTV_INNER_FEC</constant></link></para></listitem>
<listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
<section id="dvbc-annex-b-params">
<title>DVB-C Annex B delivery system</title>
@@ -1043,6 +1218,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
<listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
</section>
<section id="frontend-property-satellital-systems">
@@ -1062,6 +1238,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-VOLTAGE"><constant>DTV_VOLTAGE</constant></link></para></listitem>
<listitem><para><link linkend="DTV-TONE"><constant>DTV_TONE</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
<para>Future implementations might add those two missing parameters:</para>
<itemizedlist mark='opencircle'>
<listitem><para><link linkend="DTV-DISEQC-MASTER"><constant>DTV_DISEQC_MASTER</constant></link></para></listitem>
@@ -1077,6 +1254,7 @@ enum fe_interleaving {
<listitem><para><link linkend="DTV-ROLLOFF"><constant>DTV_ROLLOFF</constant></link></para></listitem>
<listitem><para><link linkend="DTV-STREAM-ID"><constant>DTV_STREAM_ID</constant></link></para></listitem>
</itemizedlist>
+ <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
</section>
<section id="turbo-params">
<title>Turbo code delivery system</title>
diff --git a/Documentation/DocBook/media/dvb/frontend.xml b/Documentation/DocBook/media/dvb/frontend.xml
index 426c2526a454..df39ba395df0 100644
--- a/Documentation/DocBook/media/dvb/frontend.xml
+++ b/Documentation/DocBook/media/dvb/frontend.xml
@@ -230,7 +230,7 @@ typedef enum fe_status {
<entry align="char">The frontend has found a DVB signal</entry>
</row><row>
<entry align="char">FE_HAS_VITERBI</entry>
-<entry align="char">The frontend FEC code is stable</entry>
+<entry align="char">The frontend FEC inner coding (Viterbi, LDPC or other inner code) is stable</entry>
</row><row>
<entry align="char">FE_HAS_SYNC</entry>
<entry align="char">Syncronization bytes was found</entry>
diff --git a/Documentation/DocBook/media/v4l/common.xml b/Documentation/DocBook/media/v4l/common.xml
index 73c6847436c9..ae06afbbb3a9 100644
--- a/Documentation/DocBook/media/v4l/common.xml
+++ b/Documentation/DocBook/media/v4l/common.xml
@@ -609,7 +609,7 @@ to zero and the <constant>VIDIOC_G_STD</constant>,
<para>Applications can make use of the <xref linkend="input-capabilities" /> and
<xref linkend="output-capabilities"/> flags to determine whether the video standard ioctls
are available for the device.</para>
-&ENOTTY;.
+
<para>See <xref linkend="buffer" /> for a rationale. Probably
even USB cameras follow some well known video standard. It might have
been better to explicitly indicate elsewhere if a device cannot live
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 3dd9e78815d1..104a1a2b8849 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2477,6 +2477,22 @@ that used it. It was originally scheduled for removal in 2.6.35.
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 3.9</title>
+ <orderedlist>
+ <listitem>
+ <para>Added timestamp types to
+ <structfield>flags</structfield> field in
+ <structname>v4l2_buffer</structname>. See <xref
+ linkend="buffer-flags" />.</para>
+ </listitem>
+ <listitem>
+ <para>Added <constant>V4L2_EVENT_CTRL_CH_RANGE</constant> control event
+ changes flag. See <xref linkend="changes-flags"/>.</para>
+ </listitem>
+ </orderedlist>
+ </section>
+
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index 7fe5be1d3bbb..9e8f85498678 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -203,29 +203,6 @@ and should not be used in new drivers and applications.</entry>
<entry>boolean</entry>
<entry>Mirror the picture vertically.</entry>
</row>
- <row>
- <entry><constant>V4L2_CID_HCENTER_DEPRECATED</constant> (formerly <constant>V4L2_CID_HCENTER</constant>)</entry>
- <entry>integer</entry>
- <entry>Horizontal image centering. This control is
-deprecated. New drivers and applications should use the <link
-linkend="camera-controls">Camera class controls</link>
-<constant>V4L2_CID_PAN_ABSOLUTE</constant>,
-<constant>V4L2_CID_PAN_RELATIVE</constant> and
-<constant>V4L2_CID_PAN_RESET</constant> instead.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_VCENTER_DEPRECATED</constant>
- (formerly <constant>V4L2_CID_VCENTER</constant>)</entry>
- <entry>integer</entry>
- <entry>Vertical image centering. Centering is intended to
-<emphasis>physically</emphasis> adjust cameras. For image cropping see
-<xref linkend="crop" />, for clipping <xref linkend="overlay" />. This
-control is deprecated. New drivers and applications should use the
-<link linkend="camera-controls">Camera class controls</link>
-<constant>V4L2_CID_TILT_ABSOLUTE</constant>,
-<constant>V4L2_CID_TILT_RELATIVE</constant> and
-<constant>V4L2_CID_TILT_RESET</constant> instead.</entry>
- </row>
<row id="v4l2-power-line-frequency">
<entry><constant>V4L2_CID_POWER_LINE_FREQUENCY</constant></entry>
<entry>enum</entry>
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
index 388a34032653..e6c58559ca6b 100644
--- a/Documentation/DocBook/media/v4l/io.xml
+++ b/Documentation/DocBook/media/v4l/io.xml
@@ -477,7 +477,7 @@ rest should be evident.</para>
<note>
<title>Experimental</title>
- <para>This is an <link linkend="experimental"> experimental </link>
+ <para>This is an <link linkend="experimental">experimental</link>
interface and may change in the future.</para>
</note>
@@ -488,7 +488,7 @@ DMA buffer from userspace using a file descriptor previously exported for a
different or the same device (known as the importer role), or both. This
section describes the DMABUF importer role API in V4L2.</para>
- <para>Refer to <link linked="vidioc-expbuf"> DMABUF exporting </link> for
+ <para>Refer to <link linkend="vidioc-expbuf">DMABUF exporting</link> for
details about exporting V4L2 buffers as DMABUF file descriptors.</para>
<para>Input and output devices support the streaming I/O method when the
@@ -741,17 +741,19 @@ applications when an output stream.</entry>
<entry>struct timeval</entry>
<entry><structfield>timestamp</structfield></entry>
<entry></entry>
- <entry><para>For input streams this is the
-system time (as returned by the <function>gettimeofday()</function>
-function) when the first data byte was captured. For output streams
-the data will not be displayed before this time, secondary to the
-nominal frame rate determined by the current video standard in
-enqueued order. Applications can for example zero this field to
-display frames as soon as possible. The driver stores the time at
-which the first data byte was actually sent out in the
-<structfield>timestamp</structfield> field. This permits
-applications to monitor the drift between the video and system
-clock.</para></entry>
+ <entry><para>For input streams this is time when the first data
+ byte was captured, as returned by the
+ <function>clock_gettime()</function> function for the relevant
+ clock id; see <constant>V4L2_BUF_FLAG_TIMESTAMP_*</constant> in
+ <xref linkend="buffer-flags" />. For output streams the data
+ will not be displayed before this time, secondary to the nominal
+ frame rate determined by the current video standard in enqueued
+ order. Applications can for example zero this field to display
+ frames as soon as possible. The driver stores the time at which
+ the first data byte was actually sent out in the
+ <structfield>timestamp</structfield> field. This permits
+ applications to monitor the drift between the video and system
+ clock.</para></entry>
</row>
<row>
<entry>&v4l2-timecode;</entry>
@@ -903,7 +905,7 @@ should set this to 0.</entry>
</row>
<row>
<entry></entry>
- <entry>__unsigned long</entry>
+ <entry>unsigned long</entry>
<entry><structfield>userptr</structfield></entry>
<entry>When the memory type in the containing &v4l2-buffer; is
<constant>V4L2_MEMORY_USERPTR</constant>, this is a userspace
@@ -1114,6 +1116,35 @@ Typically applications shall use this flag for output buffers if the data
in this buffer has not been created by the CPU but by some DMA-capable unit,
in which case caches have not been used.</entry>
</row>
+ <row>
+ <entry><constant>V4L2_BUF_FLAG_TIMESTAMP_MASK</constant></entry>
+ <entry>0xe000</entry>
+ <entry>Mask for timestamp types below. To test the
+ timestamp type, mask out bits not belonging to timestamp
+ type by performing a logical and operation with buffer
+ flags and timestamp mask.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN</constant></entry>
+ <entry>0x0000</entry>
+ <entry>Unknown timestamp type. This type is used by
+ drivers before Linux 3.9 and may be either monotonic (see
+ below) or realtime (wall clock). Monotonic clock has been
+ favoured in embedded systems whereas most of the drivers
+ use the realtime clock. Either kinds of timestamps are
+ available in user space via
+ <function>clock_gettime(2)</function> using clock IDs
+ <constant>CLOCK_MONOTONIC</constant> and
+ <constant>CLOCK_REALTIME</constant>, respectively.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC</constant></entry>
+ <entry>0x2000</entry>
+ <entry>The buffer timestamp has been taken from the
+ <constant>CLOCK_MONOTONIC</constant> clock. To access the
+ same clock outside V4L2, use
+ <function>clock_gettime(2)</function> .</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
index a990b34d911a..f3a3d459fcdf 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
@@ -6,7 +6,7 @@
<refnamediv>
<refname id="V4L2-PIX-FMT-NV12M"><constant>V4L2_PIX_FMT_NV12M</constant></refname>
<refname id="V4L2-PIX-FMT-NV21M"><constant>V4L2_PIX_FMT_NV21M</constant></refname>
- <refname id="V4L2-PIX-FMT-NV12MT_16X16"><constant>V4L2_PIX_FMT_NV12MT_16X16</constant></refname>
+ <refname id="V4L2-PIX-FMT-NV12MT-16X16"><constant>V4L2_PIX_FMT_NV12MT_16X16</constant></refname>
<refpurpose>Variation of <constant>V4L2_PIX_FMT_NV12</constant> and <constant>V4L2_PIX_FMT_NV21</constant> with planes
non contiguous in memory. </refpurpose>
</refnamediv>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml
new file mode 100644
index 000000000000..29acc2098cc2
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml
@@ -0,0 +1,34 @@
+ <refentry>
+ <refmeta>
+ <refentrytitle>
+ V4L2_PIX_FMT_SBGGR10ALAW8 ('aBA8'),
+ V4L2_PIX_FMT_SGBRG10ALAW8 ('aGA8'),
+ V4L2_PIX_FMT_SGRBG10ALAW8 ('agA8'),
+ V4L2_PIX_FMT_SRGGB10ALAW8 ('aRA8'),
+ </refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname id="V4L2-PIX-FMT-SBGGR10ALAW8">
+ <constant>V4L2_PIX_FMT_SBGGR10ALAW8</constant>
+ </refname>
+ <refname id="V4L2-PIX-FMT-SGBRG10ALAW8">
+ <constant>V4L2_PIX_FMT_SGBRG10ALAW8</constant>
+ </refname>
+ <refname id="V4L2-PIX-FMT-SGRBG10ALAW8">
+ <constant>V4L2_PIX_FMT_SGRBG10ALAW8</constant>
+ </refname>
+ <refname id="V4L2-PIX-FMT-SRGGB10ALAW8">
+ <constant>V4L2_PIX_FMT_SRGGB10ALAW8</constant>
+ </refname>
+ <refpurpose>10-bit Bayer formats compressed to 8 bits</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+ <para>The following four pixel formats are raw sRGB / Bayer
+ formats with 10 bits per color compressed to 8 bits each,
+ using the A-LAW algorithm. Each color component consumes 8
+ bits of memory. In other respects this format is similar to
+ <xref linkend="V4L2-PIX-FMT-SRGGB8"></xref>.</para>
+ </refsect1>
+ </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-uv8.xml b/Documentation/DocBook/media/v4l/pixfmt-uv8.xml
new file mode 100644
index 000000000000..c507c1f73cd0
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/pixfmt-uv8.xml
@@ -0,0 +1,62 @@
+ <refentry id="V4L2-PIX-FMT-UV8">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_UV8 ('UV8')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname><constant>V4L2_PIX_FMT_UV8</constant></refname>
+ <refpurpose>UV plane interleaved</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+ <para>In this format there is no Y plane, Only CbCr plane. ie
+ (UV interleaved)</para>
+ <example>
+ <title>
+ <constant>V4L2_PIX_FMT_UV8</constant>
+ pixel image
+ </title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>Cb<subscript>00</subscript></entry>
+ <entry>Cr<subscript>00</subscript></entry>
+ <entry>Cb<subscript>01</subscript></entry>
+ <entry>Cr<subscript>01</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;4:</entry>
+ <entry>Cb<subscript>10</subscript></entry>
+ <entry>Cr<subscript>10</subscript></entry>
+ <entry>Cb<subscript>11</subscript></entry>
+ <entry>Cr<subscript>11</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;8:</entry>
+ <entry>Cb<subscript>20</subscript></entry>
+ <entry>Cr<subscript>20</subscript></entry>
+ <entry>Cb<subscript>21</subscript></entry>
+ <entry>Cr<subscript>21</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;12:</entry>
+ <entry>Cb<subscript>30</subscript></entry>
+ <entry>Cr<subscript>30</subscript></entry>
+ <entry>Cb<subscript>31</subscript></entry>
+ <entry>Cr<subscript>31</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+ </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index bf94f417592c..99b8d2ad6e4f 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -673,6 +673,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
&sub-srggb8;
&sub-sbggr16;
&sub-srggb10;
+ &sub-srggb10alaw8;
&sub-srggb10dpcm8;
&sub-srggb12;
</section>
@@ -701,6 +702,7 @@ information.</para>
&sub-y12;
&sub-y10b;
&sub-y16;
+ &sub-uv8;
&sub-yuyv;
&sub-uyvy;
&sub-yvyu;
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index a0a936455fae..cc51372ed5e0 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -353,9 +353,9 @@
<listitem><para>The number of bits per pixel component. All components are
transferred on the same number of bits. Common values are 8, 10 and 12.</para>
</listitem>
- <listitem><para>If the pixel components are DPCM-compressed, a mention of the
- DPCM compression and the number of bits per compressed pixel component.</para>
- </listitem>
+ <listitem><para>The compression (optional). If the pixel components are
+ ALAW- or DPCM-compressed, a mention of the compression scheme and the
+ number of bits per compressed pixel component.</para></listitem>
<listitem><para>The number of bus samples per pixel. Pixels that are wider than
the bus width must be transferred in multiple samples. Common values are
1 and 2.</para></listitem>
@@ -504,6 +504,74 @@
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
+ <row id="V4L2-MBUS-FMT-SBGGR10-ALAW8-1X8">
+ <entry>V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8</entry>
+ <entry>0x3015</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>b<subscript>7</subscript></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
+ <row id="V4L2-MBUS-FMT-SGBRG10-ALAW8-1X8">
+ <entry>V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8</entry>
+ <entry>0x3016</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ </row>
+ <row id="V4L2-MBUS-FMT-SGRBG10-ALAW8-1X8">
+ <entry>V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8</entry>
+ <entry>0x3017</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ </row>
+ <row id="V4L2-MBUS-FMT-SRGGB10-ALAW8-1X8">
+ <entry>V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8</entry>
+ <entry>0x3018</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>r<subscript>7</subscript></entry>
+ <entry>r<subscript>6</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ </row>
<row id="V4L2-MBUS-FMT-SBGGR10-DPCM8-1X8">
<entry>V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8</entry>
<entry>0x300b</entry>
@@ -853,10 +921,16 @@
<title>Packed YUV Formats</title>
<para>Those data formats transfer pixel data as (possibly downsampled) Y, U
- and V components. The format code is made of the following information.
+ and V components. Some formats include dummy bits in some of their samples
+ and are collectively referred to as "YDYC" (Y-Dummy-Y-Chroma) formats.
+ One cannot rely on the values of these dummy bits as those are undefined.
+ </para>
+ <para>The format code is made of the following information.
<itemizedlist>
<listitem><para>The Y, U and V components order code, as transferred on the
- bus. Possible values are YUYV, UYVY, YVYU and VYUY.</para></listitem>
+ bus. Possible values are YUYV, UYVY, YVYU and VYUY for formats with no
+ dummy bit, and YDYUYDYV, YDYVYDYU, YUYDYVYD and YVYDYUYD for YDYC formats.
+ </para></listitem>
<listitem><para>The number of bits per pixel component. All components are
transferred on the same number of bits. Common values are 8, 10 and 12.</para>
</listitem>
@@ -877,7 +951,21 @@
U, Y, V, Y order will be named <constant>V4L2_MBUS_FMT_UYVY8_2X8</constant>.
</para>
- <para>The following table lisst existing packet YUV formats.</para>
+ <para><xref linkend="v4l2-mbus-pixelcode-yuv8"/> list existing packet YUV
+ formats and describes the organization of each pixel data in each sample.
+ When a format pattern is split across multiple samples each of the samples
+ in the pattern is described.</para>
+
+ <para>The role of each bit transferred over the bus is identified by one
+ of the following codes.</para>
+
+ <itemizedlist>
+ <listitem><para>y<subscript>x</subscript> for luma component bit number x</para></listitem>
+ <listitem><para>u<subscript>x</subscript> for blue chroma component bit number x</para></listitem>
+ <listitem><para>v<subscript>x</subscript> for red chroma component bit number x</para></listitem>
+ <listitem><para>- for non-available bits (for positions higher than the bus width)</para></listitem>
+ <listitem><para>d for dummy bits</para></listitem>
+ </itemizedlist>
<table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-yuv8">
<title>YUV Formats</title>
@@ -885,27 +973,37 @@
<colspec colname="id" align="left" />
<colspec colname="code" align="center"/>
<colspec colname="bit" />
- <colspec colnum="4" colname="b19" align="center" />
- <colspec colnum="5" colname="b18" align="center" />
- <colspec colnum="6" colname="b17" align="center" />
- <colspec colnum="7" colname="b16" align="center" />
- <colspec colnum="8" colname="b15" align="center" />
- <colspec colnum="9" colname="b14" align="center" />
- <colspec colnum="10" colname="b13" align="center" />
- <colspec colnum="11" colname="b12" align="center" />
- <colspec colnum="12" colname="b11" align="center" />
- <colspec colnum="13" colname="b10" align="center" />
- <colspec colnum="14" colname="b09" align="center" />
- <colspec colnum="15" colname="b08" align="center" />
- <colspec colnum="16" colname="b07" align="center" />
- <colspec colnum="17" colname="b06" align="center" />
- <colspec colnum="18" colname="b05" align="center" />
- <colspec colnum="19" colname="b04" align="center" />
- <colspec colnum="20" colname="b03" align="center" />
- <colspec colnum="21" colname="b02" align="center" />
- <colspec colnum="22" colname="b01" align="center" />
- <colspec colnum="23" colname="b00" align="center" />
- <spanspec namest="b19" nameend="b00" spanname="b0" />
+ <colspec colnum="4" colname="b29" align="center" />
+ <colspec colnum="5" colname="b28" align="center" />
+ <colspec colnum="6" colname="b27" align="center" />
+ <colspec colnum="7" colname="b26" align="center" />
+ <colspec colnum="8" colname="b25" align="center" />
+ <colspec colnum="9" colname="b24" align="center" />
+ <colspec colnum="10" colname="b23" align="center" />
+ <colspec colnum="11" colname="b22" align="center" />
+ <colspec colnum="12" colname="b21" align="center" />
+ <colspec colnum="13" colname="b20" align="center" />
+ <colspec colnum="14" colname="b19" align="center" />
+ <colspec colnum="15" colname="b18" align="center" />
+ <colspec colnum="16" colname="b17" align="center" />
+ <colspec colnum="17" colname="b16" align="center" />
+ <colspec colnum="18" colname="b15" align="center" />
+ <colspec colnum="19" colname="b14" align="center" />
+ <colspec colnum="20" colname="b13" align="center" />
+ <colspec colnum="21" colname="b12" align="center" />
+ <colspec colnum="22" colname="b11" align="center" />
+ <colspec colnum="23" colname="b10" align="center" />
+ <colspec colnum="24" colname="b09" align="center" />
+ <colspec colnum="25" colname="b08" align="center" />
+ <colspec colnum="26" colname="b07" align="center" />
+ <colspec colnum="27" colname="b06" align="center" />
+ <colspec colnum="28" colname="b05" align="center" />
+ <colspec colnum="29" colname="b04" align="center" />
+ <colspec colnum="30" colname="b03" align="center" />
+ <colspec colnum="31" colname="b02" align="center" />
+ <colspec colnum="32" colname="b01" align="center" />
+ <colspec colnum="33" colname="b00" align="center" />
+ <spanspec namest="b29" nameend="b00" spanname="b0" />
<thead>
<row>
<entry>Identifier</entry>
@@ -917,6 +1015,16 @@
<entry></entry>
<entry></entry>
<entry>Bit</entry>
+ <entry>29</entry>
+ <entry>28</entry>
+ <entry>27</entry>
+ <entry>26</entry>
+ <entry>25</entry>
+ <entry>24</entry>
+ <entry>23</entry>
+ <entry>22</entry>
+ <entry>21</entry>
+ <entry>10</entry>
<entry>19</entry>
<entry>18</entry>
<entry>17</entry>
@@ -944,16 +1052,8 @@
<entry>V4L2_MBUS_FMT_Y8_1X8</entry>
<entry>0x2001</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -965,9 +1065,9 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY8-1_5X8">
- <entry>V4L2_MBUS_FMT_UYVY8_1_5X8</entry>
- <entry>0x2002</entry>
+ <row id="V4L2-MBUS-FMT-UV8-1X8">
+ <entry>V4L2_MBUS_FMT_UV8_1X8</entry>
+ <entry>0x2015</entry>
<entry></entry>
<entry>-</entry>
<entry>-</entry>
@@ -1006,6 +1106,40 @@
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
+ </row>
+ <row id="V4L2-MBUS-FMT-UYVY8-1_5X8">
+ <entry>V4L2_MBUS_FMT_UYVY8_1_5X8</entry>
+ <entry>0x2002</entry>
+ <entry></entry>
+ &dash-ent-10;
+ &dash-ent-10;
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ &dash-ent-10;
+ &dash-ent-10;
+ <entry>-</entry>
+ <entry>-</entry>
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1019,16 +1153,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1044,16 +1170,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1069,16 +1187,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1094,16 +1204,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1119,16 +1221,8 @@
<entry>V4L2_MBUS_FMT_VYUY8_1_5X8</entry>
<entry>0x2003</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1144,16 +1238,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1169,16 +1255,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1194,16 +1272,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>u<subscript>7</subscript></entry>
@@ -1219,16 +1289,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1244,16 +1306,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1269,16 +1323,8 @@
<entry>V4L2_MBUS_FMT_YUYV8_1_5X8</entry>
<entry>0x2004</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1294,16 +1340,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1319,16 +1357,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>u<subscript>7</subscript></entry>
@@ -1344,16 +1374,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1369,16 +1391,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1394,16 +1408,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1419,16 +1425,8 @@
<entry>V4L2_MBUS_FMT_YVYU8_1_5X8</entry>
<entry>0x2005</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1444,16 +1442,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1469,16 +1459,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1494,16 +1476,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1519,16 +1493,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1544,16 +1510,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>u<subscript>7</subscript></entry>
@@ -1569,16 +1527,8 @@
<entry>V4L2_MBUS_FMT_UYVY8_2X8</entry>
<entry>0x2006</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>u<subscript>7</subscript></entry>
@@ -1594,16 +1544,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1619,16 +1561,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1644,16 +1578,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1669,16 +1595,8 @@
<entry>V4L2_MBUS_FMT_VYUY8_2X8</entry>
<entry>0x2007</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1694,16 +1612,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1719,16 +1629,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>u<subscript>7</subscript></entry>
@@ -1744,16 +1646,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1769,16 +1663,8 @@
<entry>V4L2_MBUS_FMT_YUYV8_2X8</entry>
<entry>0x2008</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1794,16 +1680,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>u<subscript>7</subscript></entry>
@@ -1819,16 +1697,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1844,16 +1714,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1869,16 +1731,8 @@
<entry>V4L2_MBUS_FMT_YVYU8_2X8</entry>
<entry>0x2009</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1894,16 +1748,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>v<subscript>7</subscript></entry>
@@ -1919,16 +1765,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>y<subscript>7</subscript></entry>
@@ -1944,16 +1782,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>u<subscript>7</subscript></entry>
@@ -1969,16 +1799,8 @@
<entry>V4L2_MBUS_FMT_Y10_1X10</entry>
<entry>0x200a</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -1994,16 +1816,8 @@
<entry>V4L2_MBUS_FMT_YUYV10_2X10</entry>
<entry>0x200b</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2019,16 +1833,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>u<subscript>9</subscript></entry>
<entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
@@ -2044,16 +1850,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2069,16 +1867,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2094,16 +1884,8 @@
<entry>V4L2_MBUS_FMT_YVYU10_2X10</entry>
<entry>0x200c</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2119,16 +1901,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2144,16 +1918,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2169,16 +1935,8 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-10;
+ &dash-ent-10;
<entry>u<subscript>9</subscript></entry>
<entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
@@ -2194,6 +1952,7 @@
<entry>V4L2_MBUS_FMT_Y12_1X12</entry>
<entry>0x2013</entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2219,6 +1978,7 @@
<entry>V4L2_MBUS_FMT_UYVY8_1X16</entry>
<entry>0x200f</entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2244,6 +2004,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2269,6 +2030,7 @@
<entry>V4L2_MBUS_FMT_VYUY8_1X16</entry>
<entry>0x2010</entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2294,6 +2056,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2319,6 +2082,7 @@
<entry>V4L2_MBUS_FMT_YUYV8_1X16</entry>
<entry>0x2011</entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2344,6 +2108,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2369,6 +2134,7 @@
<entry>V4L2_MBUS_FMT_YVYU8_1X16</entry>
<entry>0x2012</entry>
<entry></entry>
+ &dash-ent-10;
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2394,6 +2160,57 @@
<entry></entry>
<entry></entry>
<entry></entry>
+ &dash-ent-10;
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
+ </row>
+ <row id="V4L2-MBUS-FMT-YDYUYDYV8-1X16">
+ <entry>V4L2_MBUS_FMT_YDYUYDYV8_1X16</entry>
+ <entry>0x2014</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
<entry>-</entry>
<entry>-</entry>
<entry>-</entry>
@@ -2415,10 +2232,61 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ <entry>d</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>-</entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
+ </row>
<row id="V4L2-MBUS-FMT-YUYV10-1X20">
<entry>V4L2_MBUS_FMT_YUYV10_1X20</entry>
<entry>0x200d</entry>
<entry></entry>
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2444,6 +2312,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2469,6 +2338,7 @@
<entry>V4L2_MBUS_FMT_YVYU10_1X20</entry>
<entry>0x200e</entry>
<entry></entry>
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2494,6 +2364,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
+ &dash-ent-10;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2515,6 +2386,41 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
+ <row id="V4L2-MBUS-FMT-YUV10-1X30">
+ <entry>V4L2_MBUS_FMT_YUV10_1X30</entry>
+ <entry>0x2014</entry>
+ <entry></entry>
+ <entry>y<subscript>9</subscript></entry>
+ <entry>y<subscript>8</subscript></entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>u<subscript>9</subscript></entry>
+ <entry>u<subscript>8</subscript></entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
+ <entry>v<subscript>9</subscript></entry>
+ <entry>v<subscript>8</subscript></entry>
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 4d110b1ad3e9..a3cce18384e9 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -140,6 +140,16 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>3.9</revnumber>
+ <date>2012-12-03</date>
+ <authorinitials>sa, sn</authorinitials>
+ <revremark>Added timestamp types to v4l2_buffer.
+ Added <constant>V4L2_EVENT_CTRL_CH_RANGE</constant> control
+ event changes flag, see <xref linkend="changes-flags"/>.
+ </revremark>
+ </revision>
+
+ <revision>
<revnumber>3.6</revnumber>
<date>2012-07-02</date>
<authorinitials>hv</authorinitials>
@@ -472,7 +482,7 @@ and discussions on the V4L mailing list.</revremark>
</partinfo>
<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.6</subtitle>
+ <subtitle>Revision 3.9</subtitle>
<chapter id="common">
&sub-common;
diff --git a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
index 98a856f9ec30..89891adb928a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
@@ -261,6 +261,12 @@
<entry>This control event was triggered because the control flags
changed.</entry>
</row>
+ <row>
+ <entry><constant>V4L2_EVENT_CTRL_CH_RANGE</constant></entry>
+ <entry>0x0004</entry>
+ <entry>This control event was triggered because the minimum,
+ maximum, step or the default value of the control changed.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
index 72dfbd20a802..e287c8fc803b 100644
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -83,15 +83,14 @@ descriptor. The application may pass it to other DMABUF-aware devices. Refer to
<link linkend="dmabuf">DMABUF importing</link> for details about importing
DMABUF files into V4L2 nodes. It is recommended to close a DMABUF file when it
is no longer used to allow the associated memory to be reclaimed. </para>
-
</refsect1>
+
<refsect1>
- <section>
- <title>Examples</title>
+ <title>Examples</title>
- <example>
- <title>Exporting a buffer.</title>
- <programlisting>
+ <example>
+ <title>Exporting a buffer.</title>
+ <programlisting>
int buffer_export(int v4lfd, &v4l2-buf-type; bt, int index, int *dmafd)
{
&v4l2-exportbuffer; expbuf;
@@ -108,12 +107,12 @@ int buffer_export(int v4lfd, &v4l2-buf-type; bt, int index, int *dmafd)
return 0;
}
- </programlisting>
- </example>
+ </programlisting>
+ </example>
- <example>
- <title>Exporting a buffer using the multi-planar API.</title>
- <programlisting>
+ <example>
+ <title>Exporting a buffer using the multi-planar API.</title>
+ <programlisting>
int buffer_export_mp(int v4lfd, &v4l2-buf-type; bt, int index,
int dmafd[], int n_planes)
{
@@ -137,12 +136,9 @@ int buffer_export_mp(int v4lfd, &v4l2-buf-type; bt, int index,
return 0;
}
- </programlisting>
- </example>
- </section>
- </refsect1>
+ </programlisting>
+ </example>
- <refsect1>
<table pgwide="1" frame="none" id="v4l2-exportbuffer">
<title>struct <structname>v4l2_exportbuffer</structname></title>
<tgroup cols="3">
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml b/Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml
index 12b1d0503e26..ee2820d6ca66 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml
@@ -64,7 +64,9 @@ return an &EINVAL;. When the <structfield>value</structfield> is out
of bounds drivers can choose to take the closest valid value or return
an &ERANGE;, whatever seems more appropriate. However,
<constant>VIDIOC_S_CTRL</constant> is a write-only ioctl, it does not
-return the actual new value.</para>
+return the actual new value. If the <structfield>value</structfield>
+is inappropriate for the control (e.g. if it refers to an unsupported
+menu index of a menu control), then &EINVAL; is returned as well.</para>
<para>These ioctls work only with user controls. For other
control classes the &VIDIOC-G-EXT-CTRLS;, &VIDIOC-S-EXT-CTRLS; or
@@ -99,7 +101,9 @@ application.</entry>
<term><errorcode>EINVAL</errorcode></term>
<listitem>
<para>The &v4l2-control; <structfield>id</structfield> is
-invalid.</para>
+invalid or the <structfield>value</structfield> is inappropriate for
+the given control (i.e. if a menu item is selected that is not supported
+by the driver according to &VIDIOC-QUERYMENU;).</para>
</listitem>
</varlistentry>
<varlistentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index 0a4b90fcf2da..4e16112df992 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -106,7 +106,9 @@ value or if an error is returned.</para>
&EINVAL;. When the value is out of bounds drivers can choose to take
the closest valid value or return an &ERANGE;, whatever seems more
appropriate. In the first case the new value is set in
-&v4l2-ext-control;.</para>
+&v4l2-ext-control;. If the new control value is inappropriate (e.g. the
+given menu index is not supported by the menu control), then this will
+also result in an &EINVAL; error.</para>
<para>The driver will only set/get these controls if all control
values are correct. This prevents the situation where only some of the
@@ -199,13 +201,46 @@ also be zero.</entry>
<row>
<entry>__u32</entry>
<entry><structfield>error_idx</structfield></entry>
- <entry>Set by the driver in case of an error. If it is equal
-to <structfield>count</structfield>, then no actual changes were made to
-controls. In other words, the error was not associated with setting a particular
-control. If it is another value, then only the controls up to <structfield>error_idx-1</structfield>
-were modified and control <structfield>error_idx</structfield> is the one that
-caused the error. The <structfield>error_idx</structfield> value is undefined
-if the ioctl returned 0 (success).</entry>
+ <entry><para>Set by the driver in case of an error. If the error is
+associated with a particular control, then <structfield>error_idx</structfield>
+is set to the index of that control. If the error is not related to a specific
+control, or the validation step failed (see below), then
+<structfield>error_idx</structfield> is set to <structfield>count</structfield>.
+The value is undefined if the ioctl returned 0 (success).</para>
+
+<para>Before controls are read from/written to hardware a validation step
+takes place: this checks if all controls in the list are valid controls,
+if no attempt is made to write to a read-only control or read from a write-only
+control, and any other up-front checks that can be done without accessing the
+hardware. The exact validations done during this step are driver dependent
+since some checks might require hardware access for some devices, thus making
+it impossible to do those checks up-front. However, drivers should make a
+best-effort to do as many up-front checks as possible.</para>
+
+<para>This check is done to avoid leaving the hardware in an inconsistent state due
+to easy-to-avoid problems. But it leads to another problem: the application needs to
+know whether an error came from the validation step (meaning that the hardware
+was not touched) or from an error during the actual reading from/writing to hardware.</para>
+
+<para>The, in hindsight quite poor, solution for that is to set <structfield>error_idx</structfield>
+to <structfield>count</structfield> if the validation failed. This has the
+unfortunate side-effect that it is not possible to see which control failed the
+validation. If the validation was successful and the error happened while
+accessing the hardware, then <structfield>error_idx</structfield> is less than
+<structfield>count</structfield> and only the controls up to
+<structfield>error_idx-1</structfield> were read or written correctly, and the
+state of the remaining controls is undefined.</para>
+
+<para>Since <constant>VIDIOC_TRY_EXT_CTRLS</constant> does not access hardware
+there is also no need to handle the validation step in this special way,
+so <structfield>error_idx</structfield> will just be set to the control that
+failed the validation step instead of to <structfield>count</structfield>.
+This means that if <constant>VIDIOC_S_EXT_CTRLS</constant> fails with
+<structfield>error_idx</structfield> set to <structfield>count</structfield>,
+then you can call <constant>VIDIOC_TRY_EXT_CTRLS</constant> to try to discover
+the actual control that failed the validation step. Unfortunately, there
+is no <constant>TRY</constant> equivalent for <constant>VIDIOC_G_EXT_CTRLS</constant>.
+</para></entry>
</row>
<row>
<entry>__u32</entry>
@@ -298,8 +333,10 @@ These controls are described in <xref
<term><errorcode>EINVAL</errorcode></term>
<listitem>
<para>The &v4l2-ext-control; <structfield>id</structfield>
-is invalid or the &v4l2-ext-controls;
-<structfield>ctrl_class</structfield> is invalid. This error code is
+is invalid, the &v4l2-ext-controls;
+<structfield>ctrl_class</structfield> is invalid, or the &v4l2-ext-control;
+<structfield>value</structfield> was inappropriate (e.g. the given menu
+index is not supported by the driver). This error code is
also returned by the <constant>VIDIOC_S_EXT_CTRLS</constant> and
<constant>VIDIOC_TRY_EXT_CTRLS</constant> ioctls if two or more
control values are in conflict.</para>
diff --git a/Documentation/DocBook/media/v4l/vidioc-querycap.xml b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
index 4c70215ae03f..d5a3c97b206a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-querycap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
@@ -76,7 +76,7 @@ make sure the strings are properly NUL-terminated.</para></entry>
<row>
<entry>__u8</entry>
<entry><structfield>card</structfield>[32]</entry>
- <entry>Name of the device, a NUL-terminated ASCII string.
+ <entry>Name of the device, a NUL-terminated UTF-8 string.
For example: "Yoyodyne TV/FM". One driver may support different brands
or models of video hardware. This information is intended for users,
for example in a menu of available devices. Since multiple TV cards of
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index f2413acfe241..1f6593deb995 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -22,6 +22,7 @@
<!-- LinuxTV v4l-dvb repository. -->
<!ENTITY v4l-dvb "<ulink url='http://linuxtv.org/repo/'>http://linuxtv.org/repo/</ulink>">
+<!ENTITY dash-ent-10 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
]>
<book id="media_api">
diff --git a/Documentation/EDID/HOWTO.txt b/Documentation/EDID/HOWTO.txt
index 75a9f2a0c43d..2d0a8f09475d 100644
--- a/Documentation/EDID/HOWTO.txt
+++ b/Documentation/EDID/HOWTO.txt
@@ -28,11 +28,30 @@ Makefile environment are given here.
To create binary EDID and C source code files from the existing data
material, simply type "make".
-If you want to create your own EDID file, copy the file 1024x768.S and
-replace the settings with your own data. The CRC value in the last line
+If you want to create your own EDID file, copy the file 1024x768.S,
+replace the settings with your own data and add a new target to the
+Makefile. Please note that the EDID data structure expects the timing
+values in a different way as compared to the standard X11 format.
+
+X11:
+HTimings: hdisp hsyncstart hsyncend htotal
+VTimings: vdisp vsyncstart vsyncend vtotal
+
+EDID:
+#define XPIX hdisp
+#define XBLANK htotal-hdisp
+#define XOFFSET hsyncstart-hdisp
+#define XPULSE hsyncend-hsyncstart
+
+#define YPIX vdisp
+#define YBLANK vtotal-vdisp
+#define YOFFSET (63+(vsyncstart-vdisp))
+#define YPULSE (63+(vsyncend-vsyncstart))
+
+The CRC value in the last line
#define CRC 0x55
-is a bit tricky. After a first version of the binary data set is
-created, it must be be checked with the "edid-decode" utility which will
+also is a bit tricky. After a first version of the binary data set is
+created, it must be checked with the "edid-decode" utility which will
most probably complain about a wrong CRC. Fortunately, the utility also
displays the correct CRC which must then be inserted into the source
file. After the make procedure is repeated, the EDID data set is ready
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 16eb4c9e9233..f13c9132e9f2 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -348,34 +348,40 @@ You can change this at module load time (for a module) with:
modprobe ipmi_si.o type=<type1>,<type2>....
ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
- irqs=<irq1>,<irq2>... trydefaults=[0|1]
+ irqs=<irq1>,<irq2>...
regspacings=<sp1>,<sp2>,... regsizes=<size1>,<size2>,...
regshifts=<shift1>,<shift2>,...
slave_addrs=<addr1>,<addr2>,...
force_kipmid=<enable1>,<enable2>,...
kipmid_max_busy_us=<ustime1>,<ustime2>,...
unload_when_empty=[0|1]
+ trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1]
+ tryplatform=[0|1] trypci=[0|1]
-Each of these except si_trydefaults is a list, the first item for the
+Each of these except try... items is a list, the first item for the
first interface, second item for the second interface, etc.
The si_type may be either "kcs", "smic", or "bt". If you leave it blank, it
defaults to "kcs".
-If you specify si_addrs as non-zero for an interface, the driver will
+If you specify addrs as non-zero for an interface, the driver will
use the memory address given as the address of the device. This
overrides si_ports.
-If you specify si_ports as non-zero for an interface, the driver will
+If you specify ports as non-zero for an interface, the driver will
use the I/O port given as the device address.
-If you specify si_irqs as non-zero for an interface, the driver will
+If you specify irqs as non-zero for an interface, the driver will
attempt to use the given interrupt for the device.
-si_trydefaults sets whether the standard IPMI interface at 0xca2 and
+trydefaults sets whether the standard IPMI interface at 0xca2 and
any interfaces specified by ACPE are tried. By default, the driver
tries it, set this value to zero to turn this off.
+The other try... items disable discovery by their corresponding
+names. These are all enabled by default, set them to zero to disable
+them. The tryplatform disables openfirmware.
+
The next three parameters have to do with register layout. The
registers used by the interfaces may not appear at successive
locations and they may not be in 8-bit registers. These parameters
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index d89b4fe724d7..a5eb7d19a65d 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -102,6 +102,64 @@ processing of request. Therefore, increasing the value can imporve the
performace although this can cause the latency of some I/O to increase due
to more number of requests.
+CFQ Group scheduling
+====================
+
+CFQ supports blkio cgroup and has "blkio." prefixed files in each
+blkio cgroup directory. It is weight-based and there are four knobs
+for configuration - weight[_device] and leaf_weight[_device].
+Internal cgroup nodes (the ones with children) can also have tasks in
+them, so the former two configure how much proportion the cgroup as a
+whole is entitled to at its parent's level while the latter two
+configure how much proportion the tasks in the cgroup have compared to
+its direct children.
+
+Another way to think about it is assuming that each internal node has
+an implicit leaf child node which hosts all the tasks whose weight is
+configured by leaf_weight[_device]. Let's assume a blkio hierarchy
+composed of five cgroups - root, A, B, AA and AB - with the following
+weights where the names represent the hierarchy.
+
+ weight leaf_weight
+ root : 125 125
+ A : 500 750
+ B : 250 500
+ AA : 500 500
+ AB : 1000 500
+
+root never has a parent making its weight is meaningless. For backward
+compatibility, weight is always kept in sync with leaf_weight. B, AA
+and AB have no child and thus its tasks have no children cgroup to
+compete with. They always get 100% of what the cgroup won at the
+parent level. Considering only the weights which matter, the hierarchy
+looks like the following.
+
+ root
+ / | \
+ A B leaf
+ 500 250 125
+ / | \
+ AA AB leaf
+ 500 1000 750
+
+If all cgroups have active IOs and competing with each other, disk
+time will be distributed like the following.
+
+Distribution below root. The total active weight at this level is
+A:500 + B:250 + C:125 = 875.
+
+ root-leaf : 125 / 875 =~ 14%
+ A : 500 / 875 =~ 57%
+ B(-leaf) : 250 / 875 =~ 28%
+
+A has children and further distributes its 57% among the children and
+the implicit leaf node. The total active weight at this level is
+AA:500 + AB:1000 + A-leaf:750 = 2250.
+
+ A-leaf : ( 750 / 2250) * A =~ 19%
+ AA(-leaf) : ( 500 / 2250) * A =~ 12%
+ AB(-leaf) : (1000 / 2250) * A =~ 25%
+
CFQ IOPS Mode for group scheduling
===================================
Basic CFQ design is to provide priority based time slices. Higher priority
diff --git a/Documentation/blockdev/nbd.txt b/Documentation/blockdev/nbd.txt
index aeb93ffe6416..271e607304da 100644
--- a/Documentation/blockdev/nbd.txt
+++ b/Documentation/blockdev/nbd.txt
@@ -4,43 +4,13 @@
can use a remote server as one of its block devices. So every time
the client computer wants to read, e.g., /dev/nb0, it sends a
request over TCP to the server, which will reply with the data read.
- This can be used for stations with low disk space (or even diskless -
- if you boot from floppy) to borrow disk space from another computer.
- Unlike NFS, it is possible to put any filesystem on it, etc. It should
- even be possible to use NBD as a root filesystem (I've never tried),
- but it requires a user-level program to be in the initrd to start.
- It also allows you to run block-device in user land (making server
- and client physically the same computer, communicating using loopback).
-
- Current state: It currently works. Network block device is stable.
- I originally thought that it was impossible to swap over TCP. It
- turned out not to be true - swapping over TCP now works and seems
- to be deadlock-free, but it requires heavy patches into Linux's
- network layer.
-
+ This can be used for stations with low disk space (or even diskless)
+ to borrow disk space from another computer.
+ Unlike NFS, it is possible to put any filesystem on it, etc.
+
For more information, or to download the nbd-client and nbd-server
tools, go to http://nbd.sf.net/.
- Howto: To setup nbd, you can simply do the following:
-
- First, serve a device or file from a remote server:
-
- nbd-server <port-number> <device-or-file-to-serve-to-client>
-
- e.g.,
- root@server1 # nbd-server 1234 /dev/sdb1
-
- (serves sdb1 partition on TCP port 1234)
-
- Then, on the local (client) system:
-
- nbd-client <server-name-or-IP> <server-port-number> /dev/nb[0-n]
-
- e.g.,
- root@client1 # nbd-client server1 1234 /dev/nb0
-
- (creates the nb0 device on client1)
-
The nbd kernel module need only be installed on the client
system, as the nbd-server is completely in userspace. In fact,
the nbd-server has been successfully ported to other operating
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index b4b1fb3a83f0..da272c8f44e7 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -75,7 +75,7 @@ Throttling/Upper Limit policy
mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
- Specify a bandwidth rate on particular device for root group. The format
- for policy is "<major>:<minor> <byes_per_second>".
+ for policy is "<major>:<minor> <bytes_per_second>".
echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
@@ -94,13 +94,11 @@ Throttling/Upper Limit policy
Hierarchical Cgroups
====================
-- Currently none of the IO control policy supports hierarchical groups. But
- cgroup interface does allow creation of hierarchical cgroups and internally
- IO policies treat them as flat hierarchy.
+- Currently only CFQ supports hierarchical groups. For throttling,
+ cgroup interface does allow creation of hierarchical cgroups and
+ internally it treats them as flat hierarchy.
- So this patch will allow creation of cgroup hierarchcy but at the backend
- everything will be treated as flat. So if somebody created a hierarchy like
- as follows.
+ If somebody created a hierarchy like as follows.
root
/ \
@@ -108,16 +106,20 @@ Hierarchical Cgroups
|
test3
- CFQ and throttling will practically treat all groups at same level.
+ CFQ will handle the hierarchy correctly but and throttling will
+ practically treat all groups at same level. For details on CFQ
+ hierarchy support, refer to Documentation/block/cfq-iosched.txt.
+ Throttling will treat the hierarchy as if it looks like the
+ following.
pivot
/ / \ \
root test1 test2 test3
- Down the line we can implement hierarchical accounting/control support
- and also introduce a new cgroup file "use_hierarchy" which will control
- whether cgroup hierarchy is viewed as flat or hierarchical by the policy..
- This is how memory controller also has implemented the things.
+ Nesting cgroups, while allowed, isn't officially supported and blkio
+ genereates warning when cgroups nest. Once throttling implements
+ hierarchy support, hierarchy will be supported and the warning will
+ be removed.
Various user visible config options
===================================
@@ -172,6 +174,12 @@ Proportional weight policy files
dev weight
8:16 300
+- blkio.leaf_weight[_device]
+ - Equivalents of blkio.weight[_device] for the purpose of
+ deciding how much weight tasks in the given cgroup has while
+ competing with the cgroup's child cgroups. For details,
+ please refer to Documentation/block/cfq-iosched.txt.
+
- blkio.time
- disk time allocated to cgroup per device in milliseconds. First
two fields specify the major and minor number of the device and
@@ -279,6 +287,11 @@ Proportional weight policy files
and minor number of the device and third field specifies the number
of times a group was dequeued from a particular device.
+- blkio.*_recursive
+ - Recursive version of various stats. These files show the
+ same information as their non-recursive counterparts but
+ include stats from all the descendant cgroups.
+
Throttling/Upper limit policy files
-----------------------------------
- blkio.throttle.read_bps_device
diff --git a/Documentation/coccinelle.txt b/Documentation/coccinelle.txt
index cf44eb6499b4..dffa2d620d6d 100644
--- a/Documentation/coccinelle.txt
+++ b/Documentation/coccinelle.txt
@@ -87,6 +87,10 @@ As any static code analyzer, Coccinelle produces false
positives. Thus, reports must be carefully checked, and patches
reviewed.
+To enable verbose messages set the V= variable, for example:
+
+ make coccicheck MODE=report V=1
+
Using Coccinelle with a single semantic patch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
new file mode 100644
index 000000000000..d7c440b444cc
--- /dev/null
+++ b/Documentation/device-mapper/cache-policies.txt
@@ -0,0 +1,77 @@
+Guidance for writing policies
+=============================
+
+Try to keep transactionality out of it. The core is careful to
+avoid asking about anything that is migrating. This is a pain, but
+makes it easier to write the policies.
+
+Mappings are loaded into the policy at construction time.
+
+Every bio that is mapped by the target is referred to the policy.
+The policy can return a simple HIT or MISS or issue a migration.
+
+Currently there's no way for the policy to issue background work,
+e.g. to start writing back dirty blocks that are going to be evicte
+soon.
+
+Because we map bios, rather than requests it's easy for the policy
+to get fooled by many small bios. For this reason the core target
+issues periodic ticks to the policy. It's suggested that the policy
+doesn't update states (eg, hit counts) for a block more than once
+for each tick. The core ticks by watching bios complete, and so
+trying to see when the io scheduler has let the ios run.
+
+
+Overview of supplied cache replacement policies
+===============================================
+
+multiqueue
+----------
+
+This policy is the default.
+
+The multiqueue policy has two sets of 16 queues: one set for entries
+waiting for the cache and another one for those in the cache.
+Cache entries in the queues are aged based on logical time. Entry into
+the cache is based on variable thresholds and queue selection is based
+on hit count on entry. The policy aims to take different cache miss
+costs into account and to adjust to varying load patterns automatically.
+
+Message and constructor argument pairs are:
+ 'sequential_threshold <#nr_sequential_ios>' and
+ 'random_threshold <#nr_random_ios>'.
+
+The sequential threshold indicates the number of contiguous I/Os
+required before a stream is treated as sequential. The random threshold
+is the number of intervening non-contiguous I/Os that must be seen
+before the stream is treated as random again.
+
+The sequential and random thresholds default to 512 and 4 respectively.
+
+Large, sequential ios are probably better left on the origin device
+since spindles tend to have good bandwidth. The io_tracker counts
+contiguous I/Os to try to spot when the io is in one of these sequential
+modes.
+
+cleaner
+-------
+
+The cleaner writes back all dirty blocks in a cache to decommission it.
+
+Examples
+========
+
+The syntax for a table is:
+ cache <metadata dev> <cache dev> <origin dev> <block size>
+ <#feature_args> [<feature arg>]*
+ <policy> <#policy_args> [<policy arg>]*
+
+The syntax to send a message using the dmsetup command is:
+ dmsetup message <mapped device> 0 sequential_threshold 1024
+ dmsetup message <mapped device> 0 random_threshold 8
+
+Using dmsetup:
+ dmsetup create blah --table "0 268435456 cache /dev/sdb /dev/sdc \
+ /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
+ creates a 128GB large mapped device named 'blah' with the
+ sequential threshold set to 1024 and the random_threshold set to 8.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
new file mode 100644
index 000000000000..f50470abe241
--- /dev/null
+++ b/Documentation/device-mapper/cache.txt
@@ -0,0 +1,243 @@
+Introduction
+============
+
+dm-cache is a device mapper target written by Joe Thornber, Heinz
+Mauelshagen, and Mike Snitzer.
+
+It aims to improve performance of a block device (eg, a spindle) by
+dynamically migrating some of its data to a faster, smaller device
+(eg, an SSD).
+
+This device-mapper solution allows us to insert this caching at
+different levels of the dm stack, for instance above the data device for
+a thin-provisioning pool. Caching solutions that are integrated more
+closely with the virtual memory system should give better performance.
+
+The target reuses the metadata library used in the thin-provisioning
+library.
+
+The decision as to what data to migrate and when is left to a plug-in
+policy module. Several of these have been written as we experiment,
+and we hope other people will contribute others for specific io
+scenarios (eg. a vm image server).
+
+Glossary
+========
+
+ Migration - Movement of the primary copy of a logical block from one
+ device to the other.
+ Promotion - Migration from slow device to fast device.
+ Demotion - Migration from fast device to slow device.
+
+The origin device always contains a copy of the logical block, which
+may be out of date or kept in sync with the copy on the cache device
+(depending on policy).
+
+Design
+======
+
+Sub-devices
+-----------
+
+The target is constructed by passing three devices to it (along with
+other parameters detailed later):
+
+1. An origin device - the big, slow one.
+
+2. A cache device - the small, fast one.
+
+3. A small metadata device - records which blocks are in the cache,
+ which are dirty, and extra hints for use by the policy object.
+ This information could be put on the cache device, but having it
+ separate allows the volume manager to configure it differently,
+ e.g. as a mirror for extra robustness.
+
+Fixed block size
+----------------
+
+The origin is divided up into blocks of a fixed size. This block size
+is configurable when you first create the cache. Typically we've been
+using block sizes of 256k - 1024k.
+
+Having a fixed block size simplifies the target a lot. But it is
+something of a compromise. For instance, a small part of a block may be
+getting hit a lot, yet the whole block will be promoted to the cache.
+So large block sizes are bad because they waste cache space. And small
+block sizes are bad because they increase the amount of metadata (both
+in core and on disk).
+
+Writeback/writethrough
+----------------------
+
+The cache has two modes, writeback and writethrough.
+
+If writeback, the default, is selected then a write to a block that is
+cached will go only to the cache and the block will be marked dirty in
+the metadata.
+
+If writethrough is selected then a write to a cached block will not
+complete until it has hit both the origin and cache devices. Clean
+blocks should remain clean.
+
+A simple cleaner policy is provided, which will clean (write back) all
+dirty blocks in a cache. Useful for decommissioning a cache.
+
+Migration throttling
+--------------------
+
+Migrating data between the origin and cache device uses bandwidth.
+The user can set a throttle to prevent more than a certain amount of
+migration occuring at any one time. Currently we're not taking any
+account of normal io traffic going to the devices. More work needs
+doing here to avoid migrating during those peak io moments.
+
+For the time being, a message "migration_threshold <#sectors>"
+can be used to set the maximum number of sectors being migrated,
+the default being 204800 sectors (or 100MB).
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
+written. If no such requests are made then commits will occur every
+second. This means the cache behaves like a physical disk that has a
+write cache (the same is true of the thin-provisioning target). If
+power is lost you may lose some recent writes. The metadata should
+always be consistent in spite of any crash.
+
+The 'dirty' state for a cache block changes far too frequently for us
+to keep updating it on the fly. So we treat it as a hint. In normal
+operation it will be written when the dm device is suspended. If the
+system crashes all cache blocks will be assumed dirty when restarted.
+
+Per-block policy hints
+----------------------
+
+Policy plug-ins can store a chunk of data per cache block. It's up to
+the policy how big this chunk is, but it should be kept small. Like the
+dirty flags this data is lost if there's a crash so a safe fallback
+value should always be possible.
+
+For instance, the 'mq' policy, which is currently the default policy,
+uses this facility to store the hit count of the cache blocks. If
+there's a crash this information will be lost, which means the cache
+may be less efficient until those hit counts are regenerated.
+
+Policy hints affect performance, not correctness.
+
+Policy messaging
+----------------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these. Device-mapper
+messages are used. Refer to cache-policies.txt.
+
+Discard bitset resolution
+-------------------------
+
+We can avoid copying data during migration if we know the block has
+been discarded. A prime example of this is when mkfs discards the
+whole block device. We store a bitset tracking the discard state of
+blocks. However, we allow this bitset to have a different block size
+from the cache blocks. This is because we need to track the discard
+state for all of the origin device (compare with the dirty bitset
+which is just for the smaller cache device).
+
+Target interface
+================
+
+Constructor
+-----------
+
+ cache <metadata dev> <cache dev> <origin dev> <block size>
+ <#feature args> [<feature arg>]*
+ <policy> <#policy args> [policy args]*
+
+ metadata dev : fast device holding the persistent metadata
+ cache dev : fast device holding cached data blocks
+ origin dev : slow device holding original data blocks
+ block size : cache unit size in sectors
+
+ #feature args : number of feature arguments passed
+ feature args : writethrough. (The default is writeback.)
+
+ policy : the replacement policy to use
+ #policy args : an even number of arguments corresponding to
+ key/value pairs passed to the policy
+ policy args : key/value pairs passed to the policy
+ E.g. 'sequential_threshold 1024'
+ See cache-policies.txt for details.
+
+Optional feature arguments are:
+ writethrough : write through caching that prohibits cache block
+ content from being different from origin block content.
+ Without this argument, the default behaviour is to write
+ back cache block contents later for performance reasons,
+ so they may differ from the corresponding origin blocks.
+
+A policy called 'default' is always registered. This is an alias for
+the policy we currently think is giving best all round performance.
+
+As the default policy could vary between kernels, if you are relying on
+the characteristics of a specific policy, always request it by name.
+
+Status
+------
+
+<#used metadata blocks>/<#total metadata blocks> <#read hits> <#read misses>
+<#write hits> <#write misses> <#demotions> <#promotions> <#blocks in cache>
+<#dirty> <#features> <features>* <#core args> <core args>* <#policy args>
+<policy args>*
+
+#used metadata blocks : Number of metadata blocks used
+#total metadata blocks : Total number of metadata blocks
+#read hits : Number of times a READ bio has been mapped
+ to the cache
+#read misses : Number of times a READ bio has been mapped
+ to the origin
+#write hits : Number of times a WRITE bio has been mapped
+ to the cache
+#write misses : Number of times a WRITE bio has been
+ mapped to the origin
+#demotions : Number of times a block has been removed
+ from the cache
+#promotions : Number of times a block has been moved to
+ the cache
+#blocks in cache : Number of blocks resident in the cache
+#dirty : Number of blocks in the cache that differ
+ from the origin
+#feature args : Number of feature args to follow
+feature args : 'writethrough' (optional)
+#core args : Number of core arguments (must be even)
+core args : Key/value pairs for tuning the core
+ e.g. migration_threshold
+#policy args : Number of policy arguments to follow (must be even)
+policy args : Key/value pairs
+ e.g. 'sequential_threshold 1024
+
+Messages
+--------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these. Device-mapper
+messages are used. (A sysfs interface would also be possible.)
+
+The message format is:
+
+ <key> <value>
+
+E.g.
+ dmsetup message my_cache 0 sequential_threshold 1024
+
+Examples
+========
+
+The test suite can be found here:
+
+https://github.com/jthornber/thinp-test-suite
+
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+ /dev/mapper/ssd /dev/mapper/origin 512 1 writeback default 0'
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+ /dev/mapper/ssd /dev/mapper/origin 1024 1 writeback \
+ mq 4 sequential_threshold 1024 random_threshold 8'
diff --git a/Documentation/devicetree/bindings/arc/interrupts.txt b/Documentation/devicetree/bindings/arc/interrupts.txt
new file mode 100644
index 000000000000..9a5d562435ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/interrupts.txt
@@ -0,0 +1,24 @@
+* ARC700 incore Interrupt Controller
+
+ The core interrupt controller provides 32 prioritised interrupts (2 levels)
+ to ARC700 core.
+
+Properties:
+
+- compatible: "snps,arc700-intc"
+- interrupt-controller: This is an interrupt controller.
+- #interrupt-cells: Must be <1>.
+
+ Single Cell "interrupts" property of a device specifies the IRQ number
+ between 0 to 31
+
+ intc accessed via the special ARC AUX register interface, hence "reg" property
+ is not specified.
+
+Example:
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/armadeus.txt b/Documentation/devicetree/bindings/arm/armadeus.txt
new file mode 100644
index 000000000000..9821283ff516
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armadeus.txt
@@ -0,0 +1,6 @@
+Armadeus i.MX Platforms Device Tree Bindings
+-----------------------------------------------
+
+APF51: i.MX51 based module.
+Required root node properties:
+ - compatible = "armadeus,imx51-apf51", "fsl,imx51";
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index f79818711e83..e935d7d4ac43 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -5,6 +5,14 @@ i.MX23 Evaluation Kit
Required root node properties:
- compatible = "fsl,imx23-evk", "fsl,imx23";
+i.MX25 Product Development Kit
+Required root node properties:
+ - compatible = "fsl,imx25-pdk", "fsl,imx25";
+
+i.MX27 Product Development Kit
+Required root node properties:
+ - compatible = "fsl,imx27-pdk", "fsl,imx27";
+
i.MX28 Evaluation Kit
Required root node properties:
- compatible = "fsl,imx28-evk", "fsl,imx28";
diff --git a/Documentation/devicetree/bindings/clock/imx5-clock.txt b/Documentation/devicetree/bindings/clock/imx5-clock.txt
index 04ad47876be0..2a0c904c46ae 100644
--- a/Documentation/devicetree/bindings/clock/imx5-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx5-clock.txt
@@ -171,6 +171,7 @@ clocks and IDs.
can_sel 156
can1_serial_gate 157
can1_ipg_gate 158
+ owire_gate 159
Examples (for mx53):
diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.txt b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
index f73fdf595568..969b38e06ad3 100644
--- a/Documentation/devicetree/bindings/clock/imx6q-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
@@ -203,6 +203,8 @@ clocks and IDs.
pcie_ref 188
pcie_ref_125m 189
enet_ref 190
+ usbphy1_gate 191
+ usbphy2_gate 192
Examples:
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index 6d21c0288e9e..e4022776ac6e 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -113,7 +113,7 @@ PROPERTIES
EXAMPLE
crypto@300000 {
compatible = "fsl,sec-v4.0";
- fsl,sec-era = <0x2>;
+ fsl,sec-era = <2>;
#address-cells = <1>;
#size-cells = <1>;
reg = <0x300000 0x10000>;
diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt
index 36e27d54260b..267565894db9 100644
--- a/Documentation/devicetree/bindings/dma/arm-pl330.txt
+++ b/Documentation/devicetree/bindings/dma/arm-pl330.txt
@@ -10,7 +10,11 @@ Required properties:
- interrupts: interrupt number to the cpu.
Optional properties:
-- dma-coherent : Present if dma operations are coherent
+ - dma-coherent : Present if dma operations are coherent
+ - #dma-cells: must be <1>. used to represent the number of integer
+ cells in the dmas property of client device.
+ - dma-channels: contains the total number of DMA channels supported by the DMAC
+ - dma-requests: contains the total number of DMA requests supported by the DMAC
Example:
@@ -18,16 +22,23 @@ Example:
compatible = "arm,pl330", "arm,primecell";
reg = <0x12680000 0x1000>;
interrupts = <99>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
Client drivers (device nodes requiring dma transfers from dev-to-mem or
-mem-to-dev) should specify the DMA channel numbers using a two-value pair
+mem-to-dev) should specify the DMA channel numbers and dma channel names
as shown below.
[property name] = <[phandle of the dma controller] [dma request id]>;
+ [property name] = <[dma channel name]>
where 'dma request id' is the dma request number which is connected
- to the client controller. The 'property name' is recommended to be
- of the form <name>-dma-channel.
+ to the client controller. The 'property name' 'dmas' and 'dma-names'
+ as required by the generic dma device tree binding helpers. The dma
+ names correspond 1:1 with the dma request ids in the dmas property.
- Example: tx-dma-channel = <&pdma0 12>;
+ Example: dmas = <&pdma0 12
+ &pdma1 11>;
+ dma-names = "tx", "rx";
diff --git a/Documentation/devicetree/bindings/dma/dma.txt b/Documentation/devicetree/bindings/dma/dma.txt
new file mode 100644
index 000000000000..8f504e6bae14
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/dma.txt
@@ -0,0 +1,81 @@
+* Generic DMA Controller and DMA request bindings
+
+Generic binding to provide a way for a driver using DMA Engine to retrieve the
+DMA request or channel information that goes from a hardware device to a DMA
+controller.
+
+
+* DMA controller
+
+Required property:
+- #dma-cells: Must be at least 1. Used to provide DMA controller
+ specific information. See DMA client binding below for
+ more details.
+
+Optional properties:
+- dma-channels: Number of DMA channels supported by the controller.
+- dma-requests: Number of DMA requests signals supported by the
+ controller.
+
+Example:
+
+ dma: dma@48000000 {
+ compatible = "ti,omap-sdma";
+ reg = <0x48000000 0x1000>;
+ interrupts = <0 12 0x4
+ 0 13 0x4
+ 0 14 0x4
+ 0 15 0x4>;
+ #dma-cells = <1>;
+ dma-channels = <32>;
+ dma-requests = <127>;
+ };
+
+
+* DMA client
+
+Client drivers should specify the DMA property using a phandle to the controller
+followed by DMA controller specific data.
+
+Required property:
+- dmas: List of one or more DMA specifiers, each consisting of
+ - A phandle pointing to DMA controller node
+ - A number of integer cells, as determined by the
+ #dma-cells property in the node referenced by phandle
+ containing DMA controller specific information. This
+ typically contains a DMA request line number or a
+ channel number, but can contain any data that is used
+ required for configuring a channel.
+- dma-names: Contains one identifier string for each DMA specifier in
+ the dmas property. The specific strings that can be used
+ are defined in the binding of the DMA client device.
+ Multiple DMA specifiers can be used to represent
+ alternatives and in this case the dma-names for those
+ DMA specifiers must be identical (see examples).
+
+Examples:
+
+1. A device with one DMA read channel, one DMA write channel:
+
+ i2c1: i2c@1 {
+ ...
+ dmas = <&dma 2 /* read channel */
+ &dma 3>; /* write channel */
+ dma-names = "rx", "tx";
+ ...
+ };
+
+2. A single read-write channel with three alternative DMA controllers:
+
+ dmas = <&dma1 5
+ &dma2 7
+ &dma3 2>;
+ dma-names = "rx-tx", "rx-tx", "rx-tx";
+
+3. A device with three channels, one of which has two alternatives:
+
+ dmas = <&dma1 2 /* read channel */
+ &dma1 3 /* write channel */
+ &dma2 0 /* error read */
+ &dma3 0>; /* alternative error read */
+ dma-names = "rx", "tx", "error", "error";
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index c0d85dbcada5..5bb3dfb6f1d8 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -6,6 +6,26 @@ Required properties:
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
- interrupt: Should contain the DMAC interrupt number
+- nr_channels: Number of channels supported by hardware
+- is_private: The device channels should be marked as private and not for by the
+ general purpose DMA channel allocator. False if not passed.
+- chan_allocation_order: order of allocation of channel, 0 (default): ascending,
+ 1: descending
+- chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1:
+ increase from chan n->0
+- block_size: Maximum block size supported by the controller
+- nr_masters: Number of AHB masters supported by the controller
+- data_width: Maximum data width supported by hardware per AHB master
+ (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+- slave_info:
+ - bus_id: name of this device channel, not just a device name since
+ devices may have more than one channel e.g. "foo_tx". For using the
+ dw_generic_filter(), slave drivers must pass exactly this string as
+ param to filter function.
+ - cfg_hi: Platform-specific initializer for the CFG_HI register
+ - cfg_lo: Platform-specific initializer for the CFG_LO register
+ - src_master: src master for transfers on allocated channel.
+ - dst_master: dest master for transfers on allocated channel.
Example:
@@ -14,4 +34,28 @@ Example:
reg = <0xfc000000 0x1000>;
interrupt-parent = <&vic1>;
interrupts = <12>;
+
+ nr_channels = <8>;
+ chan_allocation_order = <1>;
+ chan_priority = <1>;
+ block_size = <0xfff>;
+ nr_masters = <2>;
+ data_width = <3 3 0 0>;
+
+ slave_info {
+ uart0-tx {
+ bus_id = "uart0-tx";
+ cfg_hi = <0x4000>; /* 0x8 << 11 */
+ cfg_lo = <0>;
+ src_master = <0>;
+ dst_master = <1>;
+ };
+ spi0-tx {
+ bus_id = "spi0-tx";
+ cfg_hi = <0x2000>; /* 0x4 << 11 */
+ cfg_lo = <0>;
+ src_master = <0>;
+ dst_master = <0>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/panel.txt b/Documentation/devicetree/bindings/drm/tilcdc/panel.txt
new file mode 100644
index 000000000000..9301c330d1a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/panel.txt
@@ -0,0 +1,59 @@
+Device-Tree bindings for tilcdc DRM generic panel output driver
+
+Required properties:
+ - compatible: value should be "ti,tilcdc,panel".
+ - panel-info: configuration info to configure LCDC correctly for the panel
+ - ac-bias: AC Bias Pin Frequency
+ - ac-bias-intrpt: AC Bias Pin Transitions per Interrupt
+ - dma-burst-sz: DMA burst size
+ - bpp: Bits per pixel
+ - fdd: FIFO DMA Request Delay
+ - sync-edge: Horizontal and Vertical Sync Edge: 0=rising 1=falling
+ - sync-ctrl: Horizontal and Vertical Sync: Control: 0=ignore
+ - raster-order: Raster Data Order Select: 1=Most-to-least 0=Least-to-most
+ - fifo-th: DMA FIFO threshold
+ - display-timings: typical videomode of lcd panel. Multiple video modes
+ can be listed if the panel supports multiple timings, but the 'native-mode'
+ should be the preferred/default resolution. Refer to
+ Documentation/devicetree/bindings/video/display-timing.txt for display
+ timing binding details.
+
+Recommended properties:
+ - pinctrl-names, pinctrl-0: the pincontrol settings to configure
+ muxing properly for pins that connect to TFP410 device
+
+Example:
+
+ /* Settings for CDTech_S035Q01 / LCD3 cape: */
+ lcd3 {
+ compatible = "ti,tilcdc,panel";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bone_lcd3_cape_lcd_pins>;
+ panel-info {
+ ac-bias = <255>;
+ ac-bias-intrpt = <0>;
+ dma-burst-sz = <16>;
+ bpp = <16>;
+ fdd = <0x80>;
+ sync-edge = <0>;
+ sync-ctrl = <1>;
+ raster-order = <0>;
+ fifo-th = <0>;
+ };
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 320x240 {
+ hactive = <320>;
+ vactive = <240>;
+ hback-porch = <21>;
+ hfront-porch = <58>;
+ hsync-len = <47>;
+ vback-porch = <11>;
+ vfront-porch = <23>;
+ vsync-len = <2>;
+ clock-frequency = <8000000>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/slave.txt b/Documentation/devicetree/bindings/drm/tilcdc/slave.txt
new file mode 100644
index 000000000000..3d2c52460dca
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/slave.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for tilcdc DRM encoder slave output driver
+
+Required properties:
+ - compatible: value should be "ti,tilcdc,slave".
+ - i2c: the phandle for the i2c device the encoder slave is connected to
+
+Recommended properties:
+ - pinctrl-names, pinctrl-0: the pincontrol settings to configure
+ muxing properly for pins that connect to TFP410 device
+
+Example:
+
+ hdmi {
+ compatible = "ti,tilcdc,slave";
+ i2c = <&i2c0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt b/Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt
new file mode 100644
index 000000000000..a58ae7756fc6
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt
@@ -0,0 +1,21 @@
+Device-Tree bindings for tilcdc DRM TFP410 output driver
+
+Required properties:
+ - compatible: value should be "ti,tilcdc,tfp410".
+ - i2c: the phandle for the i2c device to use for DDC
+
+Recommended properties:
+ - pinctrl-names, pinctrl-0: the pincontrol settings to configure
+ muxing properly for pins that connect to TFP410 device
+ - powerdn-gpio: the powerdown GPIO, pulled low to power down the
+ TFP410 device (for DPMS_OFF)
+
+Example:
+
+ dvicape {
+ compatible = "ti,tilcdc,tfp410";
+ i2c = <&i2c2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bone_dvi_cape_dvi_00A1_pins>;
+ powerdn-gpio = <&gpio2 31 0>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt
new file mode 100644
index 000000000000..e5f130159ae1
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt
@@ -0,0 +1,21 @@
+Device-Tree bindings for tilcdc DRM driver
+
+Required properties:
+ - compatible: value should be "ti,am33xx-tilcdc".
+ - interrupts: the interrupt number
+ - reg: base address and size of the LCDC device
+
+Recommended properties:
+ - interrupt-parent: the phandle for the interrupt controller that
+ services interrupts for this device.
+ - ti,hwmods: Name of the hwmod associated to the LCDC
+
+Example:
+
+ fb: fb@4830e000 {
+ compatible = "ti,am33xx-tilcdc";
+ reg = <0x4830e000 0x1000>;
+ interrupt-parent = <&intc>;
+ interrupts = <36>;
+ ti,hwmods = "lcdc";
+ };
diff --git a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
new file mode 100644
index 000000000000..e9de3756752b
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
@@ -0,0 +1,20 @@
+Broadcom BCM2835 I2C controller
+
+Required properties:
+- compatible : Should be "brcm,bcm2835-i2c".
+- reg: Should contain register location and length.
+- interrupts: Should contain interrupt.
+- clocks : The clock feeding the I2C controller.
+
+Recommended properties:
+- clock-frequency : desired I2C bus clock frequency in Hz.
+
+Example:
+
+i2c@20205000 {
+ compatible = "brcm,bcm2835-i2c";
+ reg = <0x7e205000 0x1000>;
+ interrupts = <2 21>;
+ clocks = <&clk_i2c>;
+ clock-frequency = <100000>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
index e9611ace8792..f98d4c5b5cca 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
@@ -8,6 +8,8 @@ Required properties:
(b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c.
(c) "samsung, s3c2440-hdmiphy-i2c", for s3c2440-like i2c used
inside HDMIPHY block found on several samsung SoCs
+ (d) "samsung, exynos5440-i2c", for s3c2440-like i2c used
+ on EXYNOS5440 which does not need GPIO configuration.
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm.txt b/Documentation/devicetree/bindings/leds/leds-pwm.txt
new file mode 100644
index 000000000000..7297107cf832
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-pwm.txt
@@ -0,0 +1,48 @@
+LED connected to PWM
+
+Required properties:
+- compatible : should be "pwm-leds".
+
+Each LED is represented as a sub-node of the pwm-leds device. Each
+node's name represents the name of the corresponding LED.
+
+LED sub-node properties:
+- pwms : PWM property to point to the PWM device (phandle)/port (id) and to
+ specify the period time to be used: <&phandle id period_ns>;
+- pwm-names : (optional) Name to be used by the PWM subsystem for the PWM device
+ For the pwms and pwm-names property please refer to:
+ Documentation/devicetree/bindings/pwm/pwm.txt
+- max-brightness : Maximum brightness possible for the LED
+- label : (optional)
+ see Documentation/devicetree/bindings/leds/common.txt
+- linux,default-trigger : (optional)
+ see Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+
+twl_pwm: pwm {
+ /* provides two PWMs (id 0, 1 for PWM1 and PWM2) */
+ compatible = "ti,twl6030-pwm";
+ #pwm-cells = <2>;
+};
+
+twl_pwmled: pwmled {
+ /* provides one PWM (id 0 for Charing indicator LED) */
+ compatible = "ti,twl6030-pwmled";
+ #pwm-cells = <2>;
+};
+
+pwmleds {
+ compatible = "pwm-leds";
+ kpad {
+ label = "omap4::keypad";
+ pwms = <&twl_pwm 0 7812500>;
+ max-brightness = <127>;
+ };
+
+ charging {
+ label = "omap4:green:chrg";
+ pwms = <&twl_pwmled 0 7812500>;
+ max-brightness = <255>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/leds/tca6507.txt b/Documentation/devicetree/bindings/leds/tca6507.txt
new file mode 100644
index 000000000000..2b6693b972fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/tca6507.txt
@@ -0,0 +1,33 @@
+LEDs conected to tca6507
+
+Required properties:
+- compatible : should be : "ti,tca6507".
+
+Each led is represented as a sub-node of the ti,tca6507 device.
+
+LED sub-node properties:
+- label : (optional) see Documentation/devicetree/bindings/leds/common.txt
+- reg : number of LED line (could be from 0 to 6)
+- linux,default-trigger : (optional)
+ see Documentation/devicetree/bindings/leds/common.txt
+
+Examples:
+
+tca6507@45 {
+ compatible = "ti,tca6507";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x45>;
+
+ led0: red-aux@0 {
+ label = "red:aux";
+ reg = <0x0>;
+ };
+
+ led1: green-aux@1 {
+ label = "green:aux";
+ reg = <0x5>;
+ linux,default-trigger = "default-on";
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/media/gpio-ir-receiver.txt b/Documentation/devicetree/bindings/media/gpio-ir-receiver.txt
new file mode 100644
index 000000000000..56e726ef4bf2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/gpio-ir-receiver.txt
@@ -0,0 +1,16 @@
+Device-Tree bindings for GPIO IR receiver
+
+Required properties:
+ - compatible: should be "gpio-ir-receiver".
+ - gpios: specifies GPIO used for IR signal reception.
+
+Optional properties:
+ - linux,rc-map-name: Linux specific remote control map name.
+
+Example node:
+
+ ir: ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 19 1>;
+ linux,rc-map-name = "rc-rc6-mce";
+ };
diff --git a/Documentation/devicetree/bindings/mfd/max8925.txt b/Documentation/devicetree/bindings/mfd/max8925.txt
new file mode 100644
index 000000000000..4f0dc6638e5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/max8925.txt
@@ -0,0 +1,64 @@
+* Maxim max8925 Power Management IC
+
+Required parent device properties:
+- compatible : "maxim,max8925"
+- reg : the I2C slave address for the max8925 chip
+- interrupts : IRQ line for the max8925 chip
+- interrupt-controller: describes the max8925 as an interrupt
+ controller (has its own domain)
+- #interrupt-cells : should be 1.
+ - The cell is the max8925 local IRQ number
+
+Optional parent device properties:
+- maxim,tsc-irq: there are 2 IRQ lines for max8925, one is indicated in
+ interrupts property, the other is indicated here.
+
+max8925 consists of a large and varied group of sub-devices:
+
+Device Supply Names Description
+------ ------------ -----------
+max8925-onkey : : On key
+max8925-rtc : : RTC
+max8925-regulator : : Regulators
+max8925-backlight : : Backlight
+max8925-touch : : Touchscreen
+max8925-power : : Charger
+
+Example:
+
+ pmic: max8925@3c {
+ compatible = "maxim,max8925";
+ reg = <0x3c>;
+ interrupts = <1>;
+ interrupt-parent = <&intcmux4>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ maxim,tsc-irq = <0>;
+
+ regulators {
+ SDV1 {
+ regulator-min-microvolt = <637500>;
+ regulator-max-microvolt = <1425000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO1 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ };
+ backlight {
+ maxim,max8925-dual-string = <0>;
+ };
+ charger {
+ batt-detect = <0>;
+ topoff-threshold = <1>;
+ fast-charge = <7>;
+ no-temp-support = <0>;
+ no-insert-detect = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
new file mode 100644
index 000000000000..13aa4b62c62a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cpu_irq.txt
@@ -0,0 +1,47 @@
+MIPS CPU interrupt controller
+
+On MIPS the mips_cpu_intc_init() helper can be used to initialize the 8 CPU
+IRQs from a devicetree file and create a irq_domain for IRQ controller.
+
+With the irq_domain in place we can describe how the 8 IRQs are wired to the
+platforms internal interrupt controller cascade.
+
+Below is an example of a platform describing the cascade inside the devicetree
+and the code used to load it inside arch_init_irq().
+
+Required properties:
+- compatible : Should be "mti,cpu-interrupt-controller"
+
+Example devicetree:
+ cpu-irq: cpu-irq@0 {
+ #address-cells = <0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu-irq>;
+ interrupts = <2>;
+ };
+
+
+Example platform irq.c:
+static struct of_device_id __initdata of_irq_ids[] = {
+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+ { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
diff --git a/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt b/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt
new file mode 100644
index 000000000000..59476fbdbfa1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt
@@ -0,0 +1,18 @@
+Broadcom BCM2835 SDHCI controller
+
+This file documents differences between the core properties described
+by mmc.txt and the properties that represent the BCM2835 controller.
+
+Required properties:
+- compatible : Should be "brcm,bcm2835-sdhci".
+- clocks : The clock feeding the SDHCI controller.
+
+Example:
+
+sdhci: sdhci {
+ compatible = "brcm,bcm2835-sdhci";
+ reg = <0x7e300000 0x100>;
+ interrupts = <2 30>;
+ clocks = <&clk_mmc>;
+ bus-width = <4>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index a591c6741d75..85aada2263d5 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -6,23 +6,45 @@ Interpreted by the OF core:
- reg: Registers location and length.
- interrupts: Interrupts used by the MMC controller.
-Required properties:
-- bus-width: Number of data lines, can be <1>, <4>, or <8>
-
Card detection:
-If no property below is supplied, standard SDHCI card detect is used.
+If no property below is supplied, host native card detect is used.
Only one of the properties in this section should be supplied:
- broken-cd: There is no card detection available; polling must be used.
- cd-gpios: Specify GPIOs for card detection, see gpio binding
- non-removable: non-removable slot (like eMMC); assume always present.
Optional properties:
+- bus-width: Number of data lines, can be <1>, <4>, or <8>. The default
+ will be <1> if the property is absent.
- wp-gpios: Specify GPIOs for write protection, see gpio binding
-- cd-inverted: when present, polarity on the cd gpio line is inverted
-- wp-inverted: when present, polarity on the wp gpio line is inverted
+- cd-inverted: when present, polarity on the CD line is inverted. See the note
+ below for the case, when a GPIO is used for the CD line
+- wp-inverted: when present, polarity on the WP line is inverted. See the note
+ below for the case, when a GPIO is used for the WP line
- max-frequency: maximum operating clock frequency
- no-1-8-v: when present, denotes that 1.8v card voltage is not supported on
this system, even if the controller claims it is.
+- cap-sd-highspeed: SD high-speed timing is supported
+- cap-mmc-highspeed: MMC high-speed timing is supported
+- cap-power-off-card: powering off the card is safe
+- cap-sdio-irq: enable SDIO IRQ signalling on this interface
+
+*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
+polarity properties, we have to fix the meaning of the "normal" and "inverted"
+line levels. We choose to follow the SDHCI standard, which specifies both those
+lines as "active low." Therefore, using the "cd-inverted" property means, that
+the CD line is active high, i.e. it is high, when a card is inserted. Similar
+logic applies to the "wp-inverted" property.
+
+CD and WP lines can be implemented on the hardware in one of two ways: as GPIOs,
+specified in cd-gpios and wp-gpios properties, or as dedicated pins. Polarity of
+dedicated pins can be specified, using *-inverted properties. GPIO polarity can
+also be specified using the OF_GPIO_ACTIVE_LOW flag. This creates an ambiguity
+in the latter case. We choose to use the XOR logic for GPIO CD and WP lines.
+This means, the two properties are "superimposed," for example leaving the
+OF_GPIO_ACTIVE_LOW flag clear and specifying the respective *-inverted
+property results in a double-inversion and actually means the "normal" line
+polarity is in effect.
Optional SDIO properties:
- keep-power-in-suspend: Preserves card power during a suspend/resume cycle
diff --git a/Documentation/devicetree/bindings/mmc/orion-sdio.txt b/Documentation/devicetree/bindings/mmc/orion-sdio.txt
new file mode 100644
index 000000000000..84f0ebd67a13
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/orion-sdio.txt
@@ -0,0 +1,17 @@
+* Marvell orion-sdio controller
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the orion-sdio driver.
+
+- compatible: Should be "marvell,orion-sdio"
+- clocks: reference to the clock of the SDIO interface
+
+Example:
+
+ mvsdio@d00d4000 {
+ compatible = "marvell,orion-sdio";
+ reg = <0xd00d4000 0x200>;
+ interrupts = <54>;
+ clocks = <&gateclk 17>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
index 06cd32d08052..726fd2122a13 100644
--- a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
@@ -26,8 +26,16 @@ Required Properties:
* bus-width: as documented in mmc core bindings.
* wp-gpios: specifies the write protect gpio line. The format of the
- gpio specifier depends on the gpio controller. If the write-protect
- line is not available, this property is optional.
+ gpio specifier depends on the gpio controller. If a GPIO is not used
+ for write-protect, this property is optional.
+
+ * disable-wp: If the wp-gpios property isn't present then (by default)
+ we'd assume that the write protect is hooked up directly to the
+ controller's special purpose write protect line (accessible via
+ the WRTPRT register). However, it's possible that we simply don't
+ want write protect. In that case specify 'disable-wp'.
+ NOTE: This property is not required for slots known to always
+ connect to eMMC or SDIO cards.
Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
new file mode 100644
index 000000000000..df204e18e030
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -0,0 +1,20 @@
+* Toshiba Mobile IO SD/MMC controller
+
+The tmio-mmc driver doesn't probe its devices actively, instead its binding to
+devices is managed by either MFD drivers or by the sh_mobile_sdhi platform
+driver. Those drivers supply the tmio-mmc driver with platform data, that either
+describe hardware capabilities, known to them, or are obtained by them from
+their own platform data or from their DT information. In the latter case all
+compulsory and any optional properties, common to all SD/MMC drivers, as
+described in mmc.txt, can be used. Additionally the following tmio_mmc-specific
+optional bindings can be used.
+
+Optional properties:
+- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
+
+When used with Renesas SDHI hardware, the following compatibility strings
+configure various model-specific properties:
+
+"renesas,sh7372-sdhi": (default) compatible with SH7372
+"renesas,r8a7740-sdhi": compatible with R8A7740: certain MMC/SD commands have to
+ wait for the interface to become idle.
diff --git a/Documentation/devicetree/bindings/mtd/elm.txt b/Documentation/devicetree/bindings/mtd/elm.txt
new file mode 100644
index 000000000000..8c1528c421d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/elm.txt
@@ -0,0 +1,16 @@
+Error location module
+
+Required properties:
+- compatible: Must be "ti,am33xx-elm"
+- reg: physical base address and size of the registers map.
+- interrupts: Interrupt number for the elm.
+
+Optional properties:
+- ti,hwmods: Name of the hwmod associated to the elm
+
+Example:
+elm: elm@0 {
+ compatible = "ti,am3352-elm";
+ reg = <0x48080000 0x2000>;
+ interrupts = <4>;
+};
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index dab7847fc800..61c5ec850f2f 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -26,6 +26,9 @@ file systems on embedded devices.
- linux,mtd-name: allow to specify the mtd name for retro capability with
physmap-flash drivers as boot loader pass the mtd partition via the old
device name physmap-flash.
+ - use-advanced-sector-protection: boolean to enable support for the
+ advanced sector protection (Spansion: PPB - Persistent Protection
+ Bits) locking.
For JEDEC compatible devices, the following additional properties
are defined:
diff --git a/Documentation/devicetree/bindings/power_supply/max8925_batter.txt b/Documentation/devicetree/bindings/power_supply/max8925_batter.txt
new file mode 100644
index 000000000000..d7e3e0c0f71d
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/max8925_batter.txt
@@ -0,0 +1,18 @@
+max8925-battery bindings
+~~~~~~~~~~~~~~~~
+
+Optional properties :
+ - batt-detect: whether support battery detect
+ - topoff-threshold: set charging current in topoff mode
+ - fast-charge: set charging current in fast mode
+ - no-temp-support: whether support temperature protection detect
+ - no-insert-detect: whether support insert detect
+
+Example:
+ charger {
+ batt-detect = <0>;
+ topoff-threshold = <1>;
+ fast-charge = <7>;
+ no-temp-support = <0>;
+ no-insert-detect = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
new file mode 100644
index 000000000000..de0eaed86651
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
@@ -0,0 +1,18 @@
+Atmel TCB PWM controller
+
+Required properties:
+- compatible: should be "atmel,tcb-pwm"
+- #pwm-cells: Should be 3. The first cell specifies the per-chip index
+ of the PWM to use, the second cell is the period in nanoseconds and
+ bit 0 in the third cell is used to encode the polarity of PWM output.
+ Set bit 0 of the third cell in PWM specifier to 1 for inverse polarity &
+ set to 0 for normal polarity.
+- tc-block: The Timer Counter block to use as a PWM chip.
+
+Example:
+
+pwm {
+ compatible = "atmel,tcb-pwm";
+ #pwm-cells = <3>;
+ tc-block = <1>;
+};
diff --git a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
index bcc63678a9a5..d21d82d29855 100644
--- a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
@@ -3,14 +3,17 @@ VIA/Wondermedia VT8500/WM8xxx series SoC PWM controller
Required properties:
- compatible: should be "via,vt8500-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. The first cell specifies the per-chip index
- of the PWM to use and the second cell is the period in nanoseconds.
+- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
+ First cell specifies the per-chip index of the PWM to use, the second
+ cell is the period in nanoseconds and bit 0 in the third cell is used to
+ encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
+ to 1 for inverse polarity & set to 0 for normal polarity.
- clocks: phandle to the PWM source clock
Example:
pwm1: pwm@d8220000 {
- #pwm-cells = <2>;
+ #pwm-cells = <3>;
compatible = "via,vt8500-pwm";
reg = <0xd8220000 0x1000>;
clocks = <&clkpwm>;
diff --git a/Documentation/devicetree/bindings/regulator/tps65090.txt b/Documentation/devicetree/bindings/regulator/tps65090.txt
new file mode 100644
index 000000000000..313a60ba61d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/tps65090.txt
@@ -0,0 +1,122 @@
+TPS65090 regulators
+
+Required properties:
+- compatible: "ti,tps65090"
+- reg: I2C slave address
+- interrupts: the interrupt outputs of the controller
+- regulators: A node that houses a sub-node for each regulator within the
+ device. Each sub-node is identified using the node's name, with valid
+ values listed below. The content of each sub-node is defined by the
+ standard binding for regulators; see regulator.txt.
+ dcdc[1-3], fet[1-7] and ldo[1-2] respectively.
+- vsys[1-3]-supply: The input supply for DCDC[1-3] respectively.
+- infet[1-7]-supply: The input supply for FET[1-7] respectively.
+- vsys-l[1-2]-supply: The input supply for LDO[1-2] respectively.
+
+Optional properties:
+- ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3.
+ If DCDCs are externally controlled then this property should be there.
+- "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
+ If DCDCs are externally controlled and if it is from GPIO then GPIO
+ number should be provided. If it is externally controlled and no GPIO
+ entry then driver will just configure this rails as external control
+ and will not provide any enable/disable APIs.
+
+Each regulator is defined using the standard binding for regulators.
+
+Example:
+
+ tps65090@48 {
+ compatible = "ti,tps65090";
+ reg = <0x48>;
+ interrupts = <0 88 0x4>;
+
+ vsys1-supply = <&some_reg>;
+ vsys2-supply = <&some_reg>;
+ vsys3-supply = <&some_reg>;
+ infet1-supply = <&some_reg>;
+ infet2-supply = <&some_reg>;
+ infet3-supply = <&some_reg>;
+ infet4-supply = <&some_reg>;
+ infet5-supply = <&some_reg>;
+ infet6-supply = <&some_reg>;
+ infet7-supply = <&some_reg>;
+ vsys_l1-supply = <&some_reg>;
+ vsys_l2-supply = <&some_reg>;
+
+ regulators {
+ dcdc1 {
+ regulator-name = "dcdc1";
+ regulator-boot-on;
+ regulator-always-on;
+ ti,enable-ext-control;
+ dcdc-ext-control-gpios = <&gpio 10 0>;
+ };
+
+ dcdc2 {
+ regulator-name = "dcdc2";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc3 {
+ regulator-name = "dcdc3";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fet1 {
+ regulator-name = "fet1";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fet2 {
+ regulator-name = "fet2";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fet3 {
+ regulator-name = "fet3";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fet4 {
+ regulator-name = "fet4";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fet5 {
+ regulator-name = "fet5";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fet6 {
+ regulator-name = "fet6";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ fet7 {
+ regulator-name = "fet7";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1 {
+ regulator-name = "ldo1";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2 {
+ regulator-name = "ldo2";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/serial/lantiq_asc.txt b/Documentation/devicetree/bindings/serial/lantiq_asc.txt
new file mode 100644
index 000000000000..5b78591aaa46
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/lantiq_asc.txt
@@ -0,0 +1,16 @@
+Lantiq SoC ASC serial controller
+
+Required properties:
+- compatible : Should be "lantiq,asc"
+- reg : Address and length of the register set for the device
+- interrupts: the 3 (tx rx err) interrupt numbers. The interrupt specifier
+ depends on the interrupt-parent interrupt controller.
+
+Example:
+
+asc1: serial@E100C00 {
+ compatible = "lantiq,asc";
+ reg = <0xE100C00 0x400>;
+ interrupt-parent = <&icu0>;
+ interrupts = <112 113 114>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/dove-thermal.txt b/Documentation/devicetree/bindings/thermal/dove-thermal.txt
new file mode 100644
index 000000000000..6f474677d472
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/dove-thermal.txt
@@ -0,0 +1,18 @@
+* Dove Thermal
+
+This driver is for Dove SoCs which contain a thermal sensor.
+
+Required properties:
+- compatible : "marvell,dove-thermal"
+- reg : Address range of the thermal registers
+
+The reg properties should contain two ranges. The first is for the
+three Thermal Manager registers, while the second range contains the
+Thermal Diode Control Registers.
+
+Example:
+
+ thermal@10078 {
+ compatible = "marvell,dove-thermal";
+ reg = <0xd001c 0x0c>, <0xd005c 0x08>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt b/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt
new file mode 100644
index 000000000000..8c0f5eb86da7
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt
@@ -0,0 +1,15 @@
+* Kirkwood Thermal
+
+This version is for Kirkwood 88F8262 & 88F6283 SoCs. Other kirkwoods
+don't contain a thermal sensor.
+
+Required properties:
+- compatible : "marvell,kirkwood-thermal"
+- reg : Address range of the thermal registers
+
+Example:
+
+ thermal@10078 {
+ compatible = "marvell,kirkwood-thermal";
+ reg = <0x10078 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
new file mode 100644
index 000000000000..28ef498a66e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -0,0 +1,29 @@
+* Renesas R-Car Thermal
+
+Required properties:
+- compatible : "renesas,rcar-thermal"
+- reg : Address range of the thermal registers.
+ The 1st reg will be recognized as common register
+ if it has "interrupts".
+
+Option properties:
+
+- interrupts : use interrupt
+
+Example (non interrupt support):
+
+thermal@e61f0100 {
+ compatible = "renesas,rcar-thermal";
+ reg = <0xe61f0100 0x38>;
+};
+
+Example (interrupt support):
+
+thermal@e61f0000 {
+ compatible = "renesas,rcar-thermal";
+ reg = <0xe61f0000 0x14
+ 0xe61f0100 0x38
+ 0xe61f0200 0x38
+ 0xe61f0300 0x38>;
+ interrupts = <0 69 4>;
+};
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
index 64830118b013..36381129d141 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
+++ b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
@@ -1,10 +1,13 @@
-Marvell Armada 370 and Armada XP Global Timers
-----------------------------------------------
+Marvell Armada 370 and Armada XP Timers
+---------------------------------------
Required properties:
- compatible: Should be "marvell,armada-370-xp-timer"
-- interrupts: Should contain the list of Global Timer interrupts
-- reg: Should contain the base address of the Global Timer registers
+- interrupts: Should contain the list of Global Timer interrupts and
+ then local timer interrupts
+- reg: Should contain location and length for timers register. First
+ pair for the Global Timer registers, second pair for the
+ local/private timers.
- clocks: clock driving the timer hardware
Optional properties:
diff --git a/Documentation/devicetree/bindings/video/backlight/max8925-backlight.txt b/Documentation/devicetree/bindings/video/backlight/max8925-backlight.txt
new file mode 100644
index 000000000000..b4cffdaa4137
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/backlight/max8925-backlight.txt
@@ -0,0 +1,10 @@
+88pm860x-backlight bindings
+
+Optional properties:
+ - maxim,max8925-dual-string: whether support dual string
+
+Example:
+
+ backlights {
+ maxim,max8925-dual-string = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/video/display-timing.txt b/Documentation/devicetree/bindings/video/display-timing.txt
new file mode 100644
index 000000000000..150038552bc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/display-timing.txt
@@ -0,0 +1,109 @@
+display-timing bindings
+=======================
+
+display-timings node
+--------------------
+
+required properties:
+ - none
+
+optional properties:
+ - native-mode: The native mode for the display, in case multiple modes are
+ provided. When omitted, assume the first node is the native.
+
+timing subnode
+--------------
+
+required properties:
+ - hactive, vactive: display resolution
+ - hfront-porch, hback-porch, hsync-len: horizontal display timing parameters
+ in pixels
+ vfront-porch, vback-porch, vsync-len: vertical display timing parameters in
+ lines
+ - clock-frequency: display clock in Hz
+
+optional properties:
+ - hsync-active: hsync pulse is active low/high/ignored
+ - vsync-active: vsync pulse is active low/high/ignored
+ - de-active: data-enable pulse is active low/high/ignored
+ - pixelclk-active: with
+ - active high = drive pixel data on rising edge/
+ sample data on falling edge
+ - active low = drive pixel data on falling edge/
+ sample data on rising edge
+ - ignored = ignored
+ - interlaced (bool): boolean to enable interlaced mode
+ - doublescan (bool): boolean to enable doublescan mode
+
+All the optional properties that are not bool follow the following logic:
+ <1>: high active
+ <0>: low active
+ omitted: not used on hardware
+
+There are different ways of describing the capabilities of a display. The
+devicetree representation corresponds to the one commonly found in datasheets
+for displays. If a display supports multiple signal timings, the native-mode
+can be specified.
+
+The parameters are defined as:
+
+ +----------+-------------------------------------+----------+-------+
+ | | ↑ | | |
+ | | |vback_porch | | |
+ | | ↓ | | |
+ +----------#######################################----------+-------+
+ | # ↑ # | |
+ | # | # | |
+ | hback # | # hfront | hsync |
+ | porch # | hactive # porch | len |
+ |<-------->#<-------+--------------------------->#<-------->|<----->|
+ | # | # | |
+ | # |vactive # | |
+ | # | # | |
+ | # ↓ # | |
+ +----------#######################################----------+-------+
+ | | ↑ | | |
+ | | |vfront_porch | | |
+ | | ↓ | | |
+ +----------+-------------------------------------+----------+-------+
+ | | ↑ | | |
+ | | |vsync_len | | |
+ | | ↓ | | |
+ +----------+-------------------------------------+----------+-------+
+
+Example:
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 1080p24 {
+ /* 1920x1080p24 */
+ clock-frequency = <52000000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hfront-porch = <25>;
+ hback-porch = <25>;
+ hsync-len = <25>;
+ vback-porch = <2>;
+ vfront-porch = <2>;
+ vsync-len = <2>;
+ hsync-active = <1>;
+ };
+ };
+
+Every required property also supports the use of ranges, so the commonly used
+datasheet description with minimum, typical and maximum values can be used.
+
+Example:
+
+ timing1: timing {
+ /* 1920x1080p24 */
+ clock-frequency = <148500000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hsync-len = <0 44 60>;
+ hfront-porch = <80 88 95>;
+ hback-porch = <100 148 160>;
+ vfront-porch = <0 4 6>;
+ vback-porch = <0 36 50>;
+ vsync-len = <0 5 6>;
+ };
diff --git a/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt b/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt
new file mode 100644
index 000000000000..ecf42c07684d
--- /dev/null
+++ b/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt
@@ -0,0 +1,19 @@
+* Freescale i.MX One wire bus master controller
+
+Required properties:
+- compatible : should be "fsl,imx21-owire"
+- reg : Address and length of the register set for the device
+
+Optional properties:
+- clocks : phandle of clock that supplies the module (required if platform
+ clock bindings use device tree)
+
+Example:
+
+- From imx53.dtsi:
+owire: owire@63fa4000 {
+ compatible = "fsl,imx53-owire", "fsl,imx21-owire";
+ reg = <0x63fa4000 0x4000>;
+ clocks = <&clks 159>;
+ status = "disabled";
+};
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 0188903bc9e1..4966b1be42ac 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -302,7 +302,11 @@ Access to a dma_buf from the kernel context involves three steps:
void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
The vmap call can fail if there is no vmap support in the exporter, or if it
- runs out of vmalloc space. Fallback to kmap should be implemented.
+ runs out of vmalloc space. Fallback to kmap should be implemented. Note that
+ the dma-buf layer keeps a reference count for all vmap access and calls down
+ into the exporter's vmap function only when no vmapping exists, and only
+ unmaps it once. Protection against concurrent vmap/vunmap calls is provided
+ by taking the dma_buf->lock mutex.
3. Finish access
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index 32bc56b13b1c..5d5ee4c13fa6 100755
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -23,7 +23,7 @@ use IO::Handle;
@components = ( "sp8870", "sp887x", "tda10045", "tda10046",
"tda10046lifeview", "av7110", "dec2000t", "dec2540t",
- "dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
+ "dec3000s", "vp7041", "vp7049", "dibusb", "nxt2002", "nxt2004",
"or51211", "or51132_qam", "or51132_vsb", "bluebird",
"opera1", "cx231xx", "cx18", "cx23885", "pvrusb2", "mpc718",
"af9015", "ngene", "az6027", "lme2510_lg", "lme2510c_s7395",
@@ -289,6 +289,19 @@ sub vp7041 {
$outfile;
}
+sub vp7049 {
+ my $fwfile = "dvb-usb-vp7049-0.95.fw";
+ my $url = "http://ao2.it/sites/default/files/blog/2012/11/06/linux-support-digicom-digitune-s-vp7049-udtt7049/$fwfile";
+ my $hash = "5609fd295168aea88b25ff43a6f79c36";
+
+ checkstandard();
+
+ wgetfile($fwfile, $url);
+ verify($fwfile, $hash);
+
+ $fwfile;
+}
+
sub dibusb {
my $url = "http://www.linuxtv.org/downloads/firmware/dvb-usb-dibusb-5.0.0.11.fw";
my $outfile = "dvb-dibusb-5.0.0.11.fw";
@@ -677,7 +690,7 @@ sub drxk_terratec_h5 {
}
sub drxk_terratec_htc_stick {
- my $url = "http://ftp.terratec.de/Receiver/Cinergy_HTC_Stick/Updates/";
+ my $url = "http://ftp.terratec.de/Receiver/Cinergy_HTC_Stick/Updates/History/";
my $zipfile = "Cinergy_HTC_Stick_Drv_5.09.1202.00_XP_Vista_7.exe";
my $hash = "6722a2442a05423b781721fbc069ed5e";
my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 0);
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index f48e0c6b4c42..0706d32a61e6 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -10,6 +10,7 @@ be able to use diff(1).
--------------------------- dentry_operations --------------------------
prototypes:
int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, const struct inode *,
struct qstr *);
int (*d_compare)(const struct dentry *, const struct inode *,
@@ -25,6 +26,7 @@ prototypes:
locking rules:
rename_lock ->d_lock may block rcu-walk
d_revalidate: no no yes (ref-walk) maybe
+d_weak_revalidate:no no yes no
d_hash no no no maybe
d_compare: yes no no maybe
d_delete: no yes no no
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 0472c31c163b..4db22f6491e0 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -441,3 +441,7 @@ d_make_root() drops the reference to inode if dentry allocation fails.
two, it gets "is it an O_EXCL or equivalent?" boolean argument. Note that
local filesystems can ignore tha argument - they are guaranteed that the
object doesn't exist. It's remote/distributed ones that might care...
+--
+[mandatory]
+ FS_REVAL_DOT is gone; if you used to have it, add ->d_weak_revalidate()
+in your dentry operations instead.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index e3869098163e..bc4b06b3160a 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -900,6 +900,7 @@ defined:
struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, const struct inode *,
struct qstr *);
int (*d_compare)(const struct dentry *, const struct inode *,
@@ -915,8 +916,13 @@ struct dentry_operations {
d_revalidate: called when the VFS needs to revalidate a dentry. This
is called whenever a name look-up finds a dentry in the
- dcache. Most filesystems leave this as NULL, because all their
- dentries in the dcache are valid
+ dcache. Most local filesystems leave this as NULL, because all their
+ dentries in the dcache are valid. Network filesystems are different
+ since things can change on the server without the client necessarily
+ being aware of it.
+
+ This function should return a positive value if the dentry is still
+ valid, and zero or a negative error code if it isn't.
d_revalidate may be called in rcu-walk mode (flags & LOOKUP_RCU).
If in rcu-walk mode, the filesystem must revalidate the dentry without
@@ -927,6 +933,20 @@ struct dentry_operations {
If a situation is encountered that rcu-walk cannot handle, return
-ECHILD and it will be called again in ref-walk mode.
+ d_weak_revalidate: called when the VFS needs to revalidate a "jumped" dentry.
+ This is called when a path-walk ends at dentry that was not acquired by
+ doing a lookup in the parent directory. This includes "/", "." and "..",
+ as well as procfs-style symlinks and mountpoint traversal.
+
+ In this case, we are less concerned with whether the dentry is still
+ fully correct, but rather that the inode is still valid. As with
+ d_revalidate, most local filesystems will set this to NULL since their
+ dcache entries are always valid.
+
+ This function has the same return code semantics as d_revalidate.
+
+ d_weak_revalidate is only called after leaving rcu-walk mode.
+
d_hash: called when the VFS adds a dentry to the hash table. The first
dentry passed to d_hash is the parent directory that the name is
to be hashed into. The inode is the dentry's inode.
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 157416e78cc4..d55b8ab2d10f 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -22,6 +22,8 @@ Supported adapters:
* Intel Panther Point (PCH)
* Intel Lynx Point (PCH)
* Intel Lynx Point-LP (PCH)
+ * Intel Avoton (SOC)
+ * Intel Wellsburg (PCH)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/i2c/busses/i2c-ismt b/Documentation/i2c/busses/i2c-ismt
new file mode 100644
index 000000000000..737355822c0b
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-ismt
@@ -0,0 +1,36 @@
+Kernel driver i2c-ismt
+
+Supported adapters:
+ * Intel S12xx series SOCs
+
+Authors:
+ Bill Brown <bill.e.brown@intel.com>
+
+
+Module Parameters
+-----------------
+
+* bus_speed (unsigned int)
+Allows changing of the bus speed. Normally, the bus speed is set by the BIOS
+and never needs to be changed. However, some SMBus analyzers are too slow for
+monitoring the bus during debug, thus the need for this module parameter.
+Specify the bus speed in kHz.
+Available bus frequency settings:
+ 0 no change
+ 80 kHz
+ 100 kHz
+ 400 kHz
+ 1000 kHz
+
+
+Description
+-----------
+
+The S12xx series of SOCs have a pair of integrated SMBus 2.0 controllers
+targeted primarily at the microserver and storage markets.
+
+The S12xx series contain a pair of PCI functions. An output of lspci will show
+something similar to the following:
+
+ 00:13.0 System peripheral: Intel Corporation Centerton SMBus 2.0 Controller 0
+ 00:13.1 System peripheral: Intel Corporation Centerton SMBus 2.0 Controller 1
diff --git a/Documentation/i2c/busses/i2c-sis630 b/Documentation/i2c/busses/i2c-sis630
index 0b9697366930..ee7943631074 100644
--- a/Documentation/i2c/busses/i2c-sis630
+++ b/Documentation/i2c/busses/i2c-sis630
@@ -4,9 +4,11 @@ Supported adapters:
* Silicon Integrated Systems Corp (SiS)
630 chipset (Datasheet: available at http://www.sfr-fresh.com/linux)
730 chipset
+ 964 chipset
* Possible other SiS chipsets ?
Author: Alexander Malysh <amalysh@web.de>
+ Amaury Decrême <amaury.decreme@gmail.com> - SiS964 support
Module Parameters
-----------------
@@ -18,6 +20,7 @@ Module Parameters
* high_clock = [1|0] Forcibly set Host Master Clock to 56KHz (default,
what your BIOS use). DANGEROUS! This should be a bit
faster, but freeze some systems (i.e. my Laptop).
+ SIS630/730 chip only.
Description
@@ -36,6 +39,12 @@ or like this:
00:00.0 Host bridge: Silicon Integrated Systems [SiS] 730 Host (rev 02)
00:01.0 ISA bridge: Silicon Integrated Systems [SiS] 85C503/5513
+or like this:
+
+00:00.0 Host bridge: Silicon Integrated Systems [SiS] 760/M760 Host (rev 02)
+00:02.0 ISA bridge: Silicon Integrated Systems [SiS] SiS964 [MuTIOL Media IO]
+ LPC Controller (rev 36)
+
in your 'lspci' output , then this driver is for your chipset.
Thank You
diff --git a/Documentation/i2c/smbus-protocol b/Documentation/i2c/smbus-protocol
index d1f22618e14b..6012b12b3510 100644
--- a/Documentation/i2c/smbus-protocol
+++ b/Documentation/i2c/smbus-protocol
@@ -137,8 +137,8 @@ available for writes where the two data bytes are the other way
around (not SMBus compliant, but very popular.)
-SMBus Process Call: i2c_smbus_process_call()
-=============================================
+SMBus Process Call:
+===================
This command selects a device register (through the Comm byte), sends
16 bits of data to it, and reads 16 bits of data in return.
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 3a94b0e6f601..6b344b516bff 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -365,8 +365,6 @@ in terms of it. Never use this function directly!
s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
s32 i2c_smbus_write_word_data(struct i2c_client *client,
u8 command, u16 value);
- s32 i2c_smbus_process_call(struct i2c_client *client,
- u8 command, u16 value);
s32 i2c_smbus_read_block_data(struct i2c_client *client,
u8 command, u8 *values);
s32 i2c_smbus_write_block_data(struct i2c_client *client,
@@ -381,6 +379,8 @@ These ones were removed from i2c-core because they had no users, but could
be added back later if needed:
s32 i2c_smbus_write_quick(struct i2c_client *client, u8 value);
+ s32 i2c_smbus_process_call(struct i2c_client *client,
+ u8 command, u16 value);
s32 i2c_smbus_block_process_call(struct i2c_client *client,
u8 command, u8 length, u8 *values);
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index a686f9cd69c1..c858f8419eba 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -388,26 +388,3 @@ config FOO
depends on BAR && m
limits FOO to module (=m) or disabled (=n).
-
-Kconfig symbol existence
-~~~~~~~~~~~~~~~~~~~~~~~~
-The following two methods produce the same kconfig symbol dependencies
-but differ greatly in kconfig symbol existence (production) in the
-generated config file.
-
-case 1:
-
-config FOO
- tristate "about foo"
- depends on BAR
-
-vs. case 2:
-
-if BAR
-config FOO
- tristate "about foo"
-endif
-
-In case 1, the symbol FOO will always exist in the config file (given
-no other dependencies). In case 2, the symbol FOO will only exist in
-the config file if BAR is enabled.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index a09f1a6a830c..b8b77bbc784f 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -46,6 +46,12 @@ KCONFIG_OVERWRITECONFIG
If you set KCONFIG_OVERWRITECONFIG in the environment, Kconfig will not
break symlinks when .config is a symlink to somewhere else.
+CONFIG_
+--------------------------------------------------
+If you set CONFIG_ in the environment, Kconfig will prefix all symbols
+with its value when saving the configuration, instead of using the default,
+"CONFIG_".
+
______________________________________________________________________
Environment variables for '{allyes/allmod/allno/rand}config'
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 766087781ecd..3a54fca730c0 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -564,6 +564,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
UART at the specified I/O port or MMIO address,
switching to the matching ttyS device later. The
options are the same as for ttyS, above.
+ hvc<n> Use the hypervisor console device <n>. This is for
+ both Xen and PowerPC hypervisors.
If the device connected to the port is not a TTY but a braille
device, prepend "brl," before the device type, for instance
@@ -757,6 +759,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
earlyprintk= [X86,SH,BLACKFIN]
earlyprintk=vga
+ earlyprintk=xen
earlyprintk=serial[,ttySn[,baudrate]]
earlyprintk=ttySn[,baudrate]
earlyprintk=dbgp[debugController#]
@@ -774,6 +777,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
The VGA output is eventually overwritten by the real
console.
+ The xen output can only be used by Xen PV guests.
+
ekgdboc= [X86,KGDB] Allow early kernel console debugging
ekgdboc=kbd
@@ -1640,42 +1645,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
that the amount of memory usable for all allocations
is not too small.
- movablemem_map=acpi
- [KNL,X86,IA-64,PPC] This parameter is similar to
- memmap except it specifies the memory map of
- ZONE_MOVABLE.
- This option inform the kernel to use Hot Pluggable bit
- in flags from SRAT from ACPI BIOS to determine which
- memory devices could be hotplugged. The corresponding
- memory ranges will be set as ZONE_MOVABLE.
- NOTE: Whatever node the kernel resides in will always
- be un-hotpluggable.
-
- movablemem_map=nn[KMG]@ss[KMG]
- [KNL,X86,IA-64,PPC] This parameter is similar to
- memmap except it specifies the memory map of
- ZONE_MOVABLE.
- If user specifies memory ranges, the info in SRAT will
- be ingored. And it works like the following:
- - If more ranges are all within one node, then from
- lowest ss to the end of the node will be ZONE_MOVABLE.
- - If a range is within a node, then from ss to the end
- of the node will be ZONE_MOVABLE.
- - If a range covers two or more nodes, then from ss to
- the end of the 1st node will be ZONE_MOVABLE, and all
- the rest nodes will only have ZONE_MOVABLE.
- If memmap is specified at the same time, the
- movablemem_map will be limited within the memmap
- areas. If kernelcore or movablecore is also specified,
- movablemem_map will have higher priority to be
- satisfied. So the administrator should be careful that
- the amount of movablemem_map areas are not too large.
- Otherwise kernel won't have enough memory to start.
- NOTE: We don't stop users specifying the node the
- kernel resides in as hotpluggable so that this
- option can be used as a workaround of firmware
- bugs.
-
MTD_Partition= [MTD]
Format: <name>,<region-number>,<size>,<offset>
@@ -2262,6 +2231,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
This sorting is done to get a device
order compatible with older (<= 2.4) kernels.
nobfsort Don't sort PCI devices into breadth-first order.
+ pcie_bus_tune_off Disable PCIe MPS (Max Payload Size)
+ tuning and use the BIOS-configured MPS defaults.
+ pcie_bus_safe Set every device's MPS to the largest value
+ supported by all devices below the root complex.
+ pcie_bus_perf Set device MPS to the largest allowable MPS
+ based on its parent bus. Also set MRRS (Max
+ Read Request Size) to the largest supported
+ value (no larger than the MPS that the device
+ or bus can support) for best performance.
+ pcie_bus_peer2peer Set every device's MPS to 128B, which
+ every device is guaranteed to support. This
+ configuration allows peer-to-peer DMA between
+ any pair of devices, possibly at the cost of
+ reduced performance. This also guarantees
+ that hot-added devices will work.
cbiosize=nn[KMG] The fixed amount of bus space which is
reserved for the CardBus bridge's IO window.
The default value is 256 bytes.
@@ -2283,6 +2267,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the default.
off: Turn ECRC off
on: Turn ECRC on.
+ hpiosize=nn[KMG] The fixed amount of bus space which is
+ reserved for hotplug bridge's IO window.
+ Default size is 256 bytes.
+ hpmemsize=nn[KMG] The fixed amount of bus space which is
+ reserved for hotplug bridge's memory window.
+ Default size is 2 megabytes.
realloc= Enable/disable reallocating PCI bridge resources
if allocations done by BIOS are too small to
accommodate resources required by all child
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index 5fefe374892f..5246090ef15c 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -6,5 +6,7 @@ leds-lp5521.txt
- notes on how to use the leds-lp5521 driver.
leds-lp5523.txt
- notes on how to use the leds-lp5523 driver.
+leds-lp55xx.txt
+ - description about lp55xx common driver.
leds-lm3556.txt
- notes on how to use the leds-lm3556 driver.
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
index 0e542ab3d4a0..270f57196339 100644
--- a/Documentation/leds/leds-lp5521.txt
+++ b/Documentation/leds/leds-lp5521.txt
@@ -17,19 +17,8 @@ lp5521:channelx, where x is 0 .. 2
All three channels can be also controlled using the engine micro programs.
More details of the instructions can be found from the public data sheet.
-Control interface for the engines:
-x is 1 .. 3
-enginex_mode : disabled, load, run
-enginex_load : store program (visible only in engine load mode)
-
-Example (start to blink the channel 2 led):
-cd /sys/class/leds/lp5521:channel2/device
-echo "load" > engine3_mode
-echo "037f4d0003ff6000" > engine3_load
-echo "run" > engine3_mode
-
-stop the engine:
-echo "disabled" > engine3_mode
+LP5521 has the internal program memory for running various LED patterns.
+For the details, please refer to 'firmware' section in leds-lp55xx.txt
sysfs contains a selftest entry.
The test communicates with the chip and checks that
@@ -47,7 +36,7 @@ The name of each channel can be configurable.
If the name field is not defined, the default name will be set to 'xxxx:channelN'
(XXXX : pdata->label or i2c client name, N : channel number)
-static struct lp5521_led_config lp5521_led_config[] = {
+static struct lp55xx_led_config lp5521_led_config[] = {
{
.name = "red",
.chan_nr = 0,
@@ -81,10 +70,10 @@ static void lp5521_enable(bool state)
/* Control of chip enable signal */
}
-static struct lp5521_platform_data lp5521_platform_data = {
+static struct lp55xx_platform_data lp5521_platform_data = {
.led_config = lp5521_led_config,
.num_channels = ARRAY_SIZE(lp5521_led_config),
- .clock_mode = LP5521_CLOCK_EXT,
+ .clock_mode = LP55XX_CLOCK_EXT,
.setup_resources = lp5521_setup,
.release_resources = lp5521_release,
.enable = lp5521_enable,
@@ -105,47 +94,9 @@ example of update_config :
LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT | \
LP5521_CLK_INT)
-static struct lp5521_platform_data lp5521_pdata = {
+static struct lp55xx_platform_data lp5521_pdata = {
.led_config = lp5521_led_config,
.num_channels = ARRAY_SIZE(lp5521_led_config),
- .clock_mode = LP5521_CLOCK_INT,
+ .clock_mode = LP55XX_CLOCK_INT,
.update_config = LP5521_CONFIGS,
};
-
-LED patterns : LP5521 has autonomous operation without external control.
-Pattern data can be defined in the platform data.
-
-example of led pattern data :
-
-/* RGB(50,5,0) 500ms on, 500ms off, infinite loop */
-static u8 pattern_red[] = {
- 0x40, 0x32, 0x60, 0x00, 0x40, 0x00, 0x60, 0x00,
- };
-
-static u8 pattern_green[] = {
- 0x40, 0x05, 0x60, 0x00, 0x40, 0x00, 0x60, 0x00,
- };
-
-static struct lp5521_led_pattern board_led_patterns[] = {
- {
- .r = pattern_red,
- .g = pattern_green,
- .size_r = ARRAY_SIZE(pattern_red),
- .size_g = ARRAY_SIZE(pattern_green),
- },
-};
-
-static struct lp5521_platform_data lp5521_platform_data = {
- .led_config = lp5521_led_config,
- .num_channels = ARRAY_SIZE(lp5521_led_config),
- .clock_mode = LP5521_CLOCK_EXT,
- .patterns = board_led_patterns,
- .num_patterns = ARRAY_SIZE(board_led_patterns),
-};
-
-Then predefined led pattern(s) can be executed via the sysfs.
-To start the pattern #1,
-# echo 1 > /sys/bus/i2c/devices/xxxx/led_pattern
-(xxxx : i2c bus & slave address)
-To end the pattern,
-# echo 0 > /sys/bus/i2c/devices/xxxx/led_pattern
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
index c2743f59f9ac..899fdad509fe 100644
--- a/Documentation/leds/leds-lp5523.txt
+++ b/Documentation/leds/leds-lp5523.txt
@@ -27,25 +27,8 @@ c) Default
If both fields are NULL, 'lp5523' is used by default.
/sys/class/leds/lp5523:channelN (N: 0 ~ 8)
-The chip provides 3 engines. Each engine can control channels without
-interaction from the main CPU. Details of the micro engine code can be found
-from the public data sheet. Leds can be muxed to different channels.
-
-Control interface for the engines:
-x is 1 .. 3
-enginex_mode : disabled, load, run
-enginex_load : microcode load (visible only in load mode)
-enginex_leds : led mux control (visible only in load mode)
-
-cd /sys/class/leds/lp5523:channel2/device
-echo "load" > engine3_mode
-echo "9d80400004ff05ff437f0000" > engine3_load
-echo "111111111" > engine3_leds
-echo "run" > engine3_mode
-
-sysfs contains a selftest entry. It measures each channel
-voltage level and checks if it looks reasonable. If the level is too high,
-the led is missing; if the level is too low, there is a short circuit.
+LP5523 has the internal program memory for running various LED patterns.
+For the details, please refer to 'firmware' section in leds-lp55xx.txt
Selftest uses always the current from the platform data.
@@ -58,7 +41,7 @@ Example platform data:
Note - chan_nr can have values between 0 and 8.
-static struct lp5523_led_config lp5523_led_config[] = {
+static struct lp55xx_led_config lp5523_led_config[] = {
{
.name = "D1",
.chan_nr = 0,
@@ -88,10 +71,10 @@ static void lp5523_enable(bool state)
/* Control chip enable signal */
}
-static struct lp5523_platform_data lp5523_platform_data = {
+static struct lp55xx_platform_data lp5523_platform_data = {
.led_config = lp5523_led_config,
.num_channels = ARRAY_SIZE(lp5523_led_config),
- .clock_mode = LP5523_CLOCK_EXT,
+ .clock_mode = LP55XX_CLOCK_EXT,
.setup_resources = lp5523_setup,
.release_resources = lp5523_release,
.enable = lp5523_enable,
diff --git a/Documentation/leds/leds-lp55xx.txt b/Documentation/leds/leds-lp55xx.txt
new file mode 100644
index 000000000000..ced41868d2d1
--- /dev/null
+++ b/Documentation/leds/leds-lp55xx.txt
@@ -0,0 +1,118 @@
+LP5521/LP5523/LP55231 Common Driver
+===================================
+
+Authors: Milo(Woogyom) Kim <milo.kim@ti.com>
+
+Description
+-----------
+LP5521, LP5523/55231 have common features as below.
+
+ Register access via the I2C
+ Device initialization/deinitialization
+ Create LED class devices for multiple output channels
+ Device attributes for user-space interface
+ Program memory for running LED patterns
+
+The LP55xx common driver provides these features using exported functions.
+ lp55xx_init_device() / lp55xx_deinit_device()
+ lp55xx_register_leds() / lp55xx_unregister_leds()
+ lp55xx_regsister_sysfs() / lp55xx_unregister_sysfs()
+
+( Driver Structure Data )
+
+In lp55xx common driver, two different data structure is used.
+
+o lp55xx_led
+ control multi output LED channels such as led current, channel index.
+o lp55xx_chip
+ general chip control such like the I2C and platform data.
+
+For example, LP5521 has maximum 3 LED channels.
+LP5523/55231 has 9 output channels.
+
+lp55xx_chip for LP5521 ... lp55xx_led #1
+ lp55xx_led #2
+ lp55xx_led #3
+
+lp55xx_chip for LP5523 ... lp55xx_led #1
+ lp55xx_led #2
+ .
+ .
+ lp55xx_led #9
+
+( Chip Dependent Code )
+
+To support device specific configurations, special structure
+'lpxx_device_config' is used.
+
+ Maximum number of channels
+ Reset command, chip enable command
+ Chip specific initialization
+ Brightness control register access
+ Setting LED output current
+ Program memory address access for running patterns
+ Additional device specific attributes
+
+( Firmware Interface )
+
+LP55xx family devices have the internal program memory for running
+various LED patterns.
+This pattern data is saved as a file in the user-land or
+hex byte string is written into the memory through the I2C.
+LP55xx common driver supports the firmware interface.
+
+LP55xx chips have three program engines.
+To load and run the pattern, the programming sequence is following.
+ (1) Select an engine number (1/2/3)
+ (2) Mode change to load
+ (3) Write pattern data into selected area
+ (4) Mode change to run
+
+The LP55xx common driver provides simple interfaces as below.
+select_engine : Select which engine is used for running program
+run_engine : Start program which is loaded via the firmware interface
+firmware : Load program data
+
+For example, run blinking pattern in engine #1 of LP5521
+echo 1 > /sys/bus/i2c/devices/xxxx/select_engine
+echo 1 > /sys/class/firmware/lp5521/loading
+echo "4000600040FF6000" > /sys/class/firmware/lp5521/data
+echo 0 > /sys/class/firmware/lp5521/loading
+echo 1 > /sys/bus/i2c/devices/xxxx/run_engine
+
+For example, run blinking pattern in engine #3 of LP55231
+echo 3 > /sys/bus/i2c/devices/xxxx/select_engine
+echo 1 > /sys/class/firmware/lp55231/loading
+echo "9d0740ff7e0040007e00a0010000" > /sys/class/firmware/lp55231/data
+echo 0 > /sys/class/firmware/lp55231/loading
+echo 1 > /sys/bus/i2c/devices/xxxx/run_engine
+
+To start blinking patterns in engine #2 and #3 simultaneously,
+for idx in 2 3
+do
+ echo $idx > /sys/class/leds/red/device/select_engine
+ sleep 0.1
+ echo 1 > /sys/class/firmware/lp5521/loading
+ echo "4000600040FF6000" > /sys/class/firmware/lp5521/data
+ echo 0 > /sys/class/firmware/lp5521/loading
+done
+echo 1 > /sys/class/leds/red/device/run_engine
+
+Here is another example for LP5523.
+echo 2 > /sys/bus/i2c/devices/xxxx/select_engine
+echo 1 > /sys/class/firmware/lp5523/loading
+echo "9d80400004ff05ff437f0000" > /sys/class/firmware/lp5523/data
+echo 0 > /sys/class/firmware/lp5523/loading
+echo 1 > /sys/bus/i2c/devices/xxxx/run_engine
+
+As soon as 'loading' is set to 0, registered callback is called.
+Inside the callback, the selected engine is loaded and memory is updated.
+To run programmed pattern, 'run_engine' attribute should be enabled.
+
+( 'run_engine' and 'firmware_cb' )
+The sequence of running the program data is common.
+But each device has own specific register addresses for commands.
+To support this, 'run_engine' and 'firmware_cb' are configurable in each driver.
+run_engine : Control the selected engine
+firmware_cb : The callback function after loading the firmware is done.
+ Chip specific commands for loading and updating program memory.
diff --git a/Documentation/media-framework.txt b/Documentation/media-framework.txt
index 802875413873..77bd0a42f19d 100644
--- a/Documentation/media-framework.txt
+++ b/Documentation/media-framework.txt
@@ -336,7 +336,7 @@ Calls to media_entity_pipeline_start() can be nested. The pipeline pointer must
be identical for all nested calls to the function.
media_entity_pipeline_start() may return an error. In that case, it will
-clean up any the changes it did by itself.
+clean up any of the changes it did by itself.
When stopping the stream, drivers must notify the entities with
diff --git a/Documentation/namespaces/resource-control.txt b/Documentation/namespaces/resource-control.txt
new file mode 100644
index 000000000000..abc13c394738
--- /dev/null
+++ b/Documentation/namespaces/resource-control.txt
@@ -0,0 +1,14 @@
+There are a lot of kinds of objects in the kernel that don't have
+individual limits or that have limits that are ineffective when a set
+of processes is allowed to switch user ids. With user namespaces
+enabled in a kernel for people who don't trust their users or their
+users programs to play nice this problems becomes more acute.
+
+Therefore it is recommended that memory control groups be enabled in
+kernels that enable user namespaces, and it is further recommended
+that userspace configure memory control groups to limit how much
+memory user's they don't trust to play nice can use.
+
+Memory control groups can be configured by installing the libcgroup
+package present on most distros editing /etc/cgrules.conf,
+/etc/cgconfig.conf and setting up libpam-cgroup.
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index da03146c182a..09673c7fc8ee 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,12 @@
+Release Date : Sat. Feb 9, 2013 17:00:00 PST 2013 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 06.506.00.00-rc1
+Old Version : 06.504.01.00-rc1
+ 1. Add 4k FastPath DIF support.
+ 2. Dont load DevHandle unless FastPath enabled.
+ 3. Version and Changelog update.
+-------------------------------------------------------------------------------
Release Date : Mon. Oct 1, 2012 17:00:00 PST 2012 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/Documentation/thermal/exynos_thermal_emulation b/Documentation/thermal/exynos_thermal_emulation
new file mode 100644
index 000000000000..b73bbfb697bb
--- /dev/null
+++ b/Documentation/thermal/exynos_thermal_emulation
@@ -0,0 +1,53 @@
+EXYNOS EMULATION MODE
+========================
+
+Copyright (C) 2012 Samsung Electronics
+
+Written by Jonghwa Lee <jonghwa3.lee@samsung.com>
+
+Description
+-----------
+
+Exynos 4x12 (4212, 4412) and 5 series provide emulation mode for thermal management unit.
+Thermal emulation mode supports software debug for TMU's operation. User can set temperature
+manually with software code and TMU will read current temperature from user value not from
+sensor's value.
+
+Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available.
+When it's enabled, sysfs node will be created under
+/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'.
+
+The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any
+temperature you want to update to sysfs node, it automatically enable emulation mode and
+current temperature will be changed into it.
+(Exynos also supports user changable delay time which would be used to delay of
+ changing temperature. However, this node only uses same delay of real sensing time, 938us.)
+
+Exynos emulation mode requires synchronous of value changing and enabling. It means when you
+want to update the any value of delay or next temperature, then you have to enable emulation
+mode at the same time. (Or you have to keep the mode enabling.) If you don't, it fails to
+change the value to updated one and just use last succeessful value repeatedly. That's why
+this node gives users the right to change termerpature only. Just one interface makes it more
+simply to use.
+
+Disabling emulation mode only requires writing value 0 to sysfs node.
+
+
+TEMP 120 |
+ |
+ 100 |
+ |
+ 80 |
+ | +-----------
+ 60 | | |
+ | +-------------| |
+ 40 | | | |
+ | | | |
+ 20 | | | +----------
+ | | | | |
+ 0 |______________|_____________|__________|__________|_________
+ A A A A TIME
+ |<----->| |<----->| |<----->| |
+ | 938us | | | | | |
+emulation : 0 50 | 70 | 20 | 0
+current temp : sensor 50 70 20 sensor
diff --git a/Documentation/thermal/intel_powerclamp.txt b/Documentation/thermal/intel_powerclamp.txt
new file mode 100644
index 000000000000..332de4a39b5a
--- /dev/null
+++ b/Documentation/thermal/intel_powerclamp.txt
@@ -0,0 +1,307 @@
+ =======================
+ INTEL POWERCLAMP DRIVER
+ =======================
+By: Arjan van de Ven <arjan@linux.intel.com>
+ Jacob Pan <jacob.jun.pan@linux.intel.com>
+
+Contents:
+ (*) Introduction
+ - Goals and Objectives
+
+ (*) Theory of Operation
+ - Idle Injection
+ - Calibration
+
+ (*) Performance Analysis
+ - Effectiveness and Limitations
+ - Power vs Performance
+ - Scalability
+ - Calibration
+ - Comparison with Alternative Techniques
+
+ (*) Usage and Interfaces
+ - Generic Thermal Layer (sysfs)
+ - Kernel APIs (TBD)
+
+============
+INTRODUCTION
+============
+
+Consider the situation where a system’s power consumption must be
+reduced at runtime, due to power budget, thermal constraint, or noise
+level, and where active cooling is not preferred. Software managed
+passive power reduction must be performed to prevent the hardware
+actions that are designed for catastrophic scenarios.
+
+Currently, P-states, T-states (clock modulation), and CPU offlining
+are used for CPU throttling.
+
+On Intel CPUs, C-states provide effective power reduction, but so far
+they’re only used opportunistically, based on workload. With the
+development of intel_powerclamp driver, the method of synchronizing
+idle injection across all online CPU threads was introduced. The goal
+is to achieve forced and controllable C-state residency.
+
+Test/Analysis has been made in the areas of power, performance,
+scalability, and user experience. In many cases, clear advantage is
+shown over taking the CPU offline or modulating the CPU clock.
+
+
+===================
+THEORY OF OPERATION
+===================
+
+Idle Injection
+--------------
+
+On modern Intel processors (Nehalem or later), package level C-state
+residency is available in MSRs, thus also available to the kernel.
+
+These MSRs are:
+ #define MSR_PKG_C2_RESIDENCY 0x60D
+ #define MSR_PKG_C3_RESIDENCY 0x3F8
+ #define MSR_PKG_C6_RESIDENCY 0x3F9
+ #define MSR_PKG_C7_RESIDENCY 0x3FA
+
+If the kernel can also inject idle time to the system, then a
+closed-loop control system can be established that manages package
+level C-state. The intel_powerclamp driver is conceived as such a
+control system, where the target set point is a user-selected idle
+ratio (based on power reduction), and the error is the difference
+between the actual package level C-state residency ratio and the target idle
+ratio.
+
+Injection is controlled by high priority kernel threads, spawned for
+each online CPU.
+
+These kernel threads, with SCHED_FIFO class, are created to perform
+clamping actions of controlled duty ratio and duration. Each per-CPU
+thread synchronizes its idle time and duration, based on the rounding
+of jiffies, so accumulated errors can be prevented to avoid a jittery
+effect. Threads are also bound to the CPU such that they cannot be
+migrated, unless the CPU is taken offline. In this case, threads
+belong to the offlined CPUs will be terminated immediately.
+
+Running as SCHED_FIFO and relatively high priority, also allows such
+scheme to work for both preemptable and non-preemptable kernels.
+Alignment of idle time around jiffies ensures scalability for HZ
+values. This effect can be better visualized using a Perf timechart.
+The following diagram shows the behavior of kernel thread
+kidle_inject/cpu. During idle injection, it runs monitor/mwait idle
+for a given "duration", then relinquishes the CPU to other tasks,
+until the next time interval.
+
+The NOHZ schedule tick is disabled during idle time, but interrupts
+are not masked. Tests show that the extra wakeups from scheduler tick
+have a dramatic impact on the effectiveness of the powerclamp driver
+on large scale systems (Westmere system with 80 processors).
+
+CPU0
+ ____________ ____________
+kidle_inject/0 | sleep | mwait | sleep |
+ _________| |________| |_______
+ duration
+CPU1
+ ____________ ____________
+kidle_inject/1 | sleep | mwait | sleep |
+ _________| |________| |_______
+ ^
+ |
+ |
+ roundup(jiffies, interval)
+
+Only one CPU is allowed to collect statistics and update global
+control parameters. This CPU is referred to as the controlling CPU in
+this document. The controlling CPU is elected at runtime, with a
+policy that favors BSP, taking into account the possibility of a CPU
+hot-plug.
+
+In terms of dynamics of the idle control system, package level idle
+time is considered largely as a non-causal system where its behavior
+cannot be based on the past or current input. Therefore, the
+intel_powerclamp driver attempts to enforce the desired idle time
+instantly as given input (target idle ratio). After injection,
+powerclamp moniors the actual idle for a given time window and adjust
+the next injection accordingly to avoid over/under correction.
+
+When used in a causal control system, such as a temperature control,
+it is up to the user of this driver to implement algorithms where
+past samples and outputs are included in the feedback. For example, a
+PID-based thermal controller can use the powerclamp driver to
+maintain a desired target temperature, based on integral and
+derivative gains of the past samples.
+
+
+
+Calibration
+-----------
+During scalability testing, it is observed that synchronized actions
+among CPUs become challenging as the number of cores grows. This is
+also true for the ability of a system to enter package level C-states.
+
+To make sure the intel_powerclamp driver scales well, online
+calibration is implemented. The goals for doing such a calibration
+are:
+
+a) determine the effective range of idle injection ratio
+b) determine the amount of compensation needed at each target ratio
+
+Compensation to each target ratio consists of two parts:
+
+ a) steady state error compensation
+ This is to offset the error occurring when the system can
+ enter idle without extra wakeups (such as external interrupts).
+
+ b) dynamic error compensation
+ When an excessive amount of wakeups occurs during idle, an
+ additional idle ratio can be added to quiet interrupts, by
+ slowing down CPU activities.
+
+A debugfs file is provided for the user to examine compensation
+progress and results, such as on a Westmere system.
+[jacob@nex01 ~]$ cat
+/sys/kernel/debug/intel_powerclamp/powerclamp_calib
+controlling cpu: 0
+pct confidence steady dynamic (compensation)
+0 0 0 0
+1 1 0 0
+2 1 1 0
+3 3 1 0
+4 3 1 0
+5 3 1 0
+6 3 1 0
+7 3 1 0
+8 3 1 0
+...
+30 3 2 0
+31 3 2 0
+32 3 1 0
+33 3 2 0
+34 3 1 0
+35 3 2 0
+36 3 1 0
+37 3 2 0
+38 3 1 0
+39 3 2 0
+40 3 3 0
+41 3 1 0
+42 3 2 0
+43 3 1 0
+44 3 1 0
+45 3 2 0
+46 3 3 0
+47 3 0 0
+48 3 2 0
+49 3 3 0
+
+Calibration occurs during runtime. No offline method is available.
+Steady state compensation is used only when confidence levels of all
+adjacent ratios have reached satisfactory level. A confidence level
+is accumulated based on clean data collected at runtime. Data
+collected during a period without extra interrupts is considered
+clean.
+
+To compensate for excessive amounts of wakeup during idle, additional
+idle time is injected when such a condition is detected. Currently,
+we have a simple algorithm to double the injection ratio. A possible
+enhancement might be to throttle the offending IRQ, such as delaying
+EOI for level triggered interrupts. But it is a challenge to be
+non-intrusive to the scheduler or the IRQ core code.
+
+
+CPU Online/Offline
+------------------
+Per-CPU kernel threads are started/stopped upon receiving
+notifications of CPU hotplug activities. The intel_powerclamp driver
+keeps track of clamping kernel threads, even after they are migrated
+to other CPUs, after a CPU offline event.
+
+
+=====================
+Performance Analysis
+=====================
+This section describes the general performance data collected on
+multiple systems, including Westmere (80P) and Ivy Bridge (4P, 8P).
+
+Effectiveness and Limitations
+-----------------------------
+The maximum range that idle injection is allowed is capped at 50
+percent. As mentioned earlier, since interrupts are allowed during
+forced idle time, excessive interrupts could result in less
+effectiveness. The extreme case would be doing a ping -f to generated
+flooded network interrupts without much CPU acknowledgement. In this
+case, little can be done from the idle injection threads. In most
+normal cases, such as scp a large file, applications can be throttled
+by the powerclamp driver, since slowing down the CPU also slows down
+network protocol processing, which in turn reduces interrupts.
+
+When control parameters change at runtime by the controlling CPU, it
+may take an additional period for the rest of the CPUs to catch up
+with the changes. During this time, idle injection is out of sync,
+thus not able to enter package C- states at the expected ratio. But
+this effect is minor, in that in most cases change to the target
+ratio is updated much less frequently than the idle injection
+frequency.
+
+Scalability
+-----------
+Tests also show a minor, but measurable, difference between the 4P/8P
+Ivy Bridge system and the 80P Westmere server under 50% idle ratio.
+More compensation is needed on Westmere for the same amount of
+target idle ratio. The compensation also increases as the idle ratio
+gets larger. The above reason constitutes the need for the
+calibration code.
+
+On the IVB 8P system, compared to an offline CPU, powerclamp can
+achieve up to 40% better performance per watt. (measured by a spin
+counter summed over per CPU counting threads spawned for all running
+CPUs).
+
+====================
+Usage and Interfaces
+====================
+The powerclamp driver is registered to the generic thermal layer as a
+cooling device. Currently, it’s not bound to any thermal zones.
+
+jacob@chromoly:/sys/class/thermal/cooling_device14$ grep . *
+cur_state:0
+max_state:50
+type:intel_powerclamp
+
+Example usage:
+- To inject 25% idle time
+$ sudo sh -c "echo 25 > /sys/class/thermal/cooling_device80/cur_state
+"
+
+If the system is not busy and has more than 25% idle time already,
+then the powerclamp driver will not start idle injection. Using Top
+will not show idle injection kernel threads.
+
+If the system is busy (spin test below) and has less than 25% natural
+idle time, powerclamp kernel threads will do idle injection, which
+appear running to the scheduler. But the overall system idle is still
+reflected. In this example, 24.1% idle is shown. This helps the
+system admin or user determine the cause of slowdown, when a
+powerclamp driver is in action.
+
+
+Tasks: 197 total, 1 running, 196 sleeping, 0 stopped, 0 zombie
+Cpu(s): 71.2%us, 4.7%sy, 0.0%ni, 24.1%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
+Mem: 3943228k total, 1689632k used, 2253596k free, 74960k buffers
+Swap: 4087804k total, 0k used, 4087804k free, 945336k cached
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 3352 jacob 20 0 262m 644 428 S 286 0.0 0:17.16 spin
+ 3341 root -51 0 0 0 0 D 25 0.0 0:01.62 kidle_inject/0
+ 3344 root -51 0 0 0 0 D 25 0.0 0:01.60 kidle_inject/3
+ 3342 root -51 0 0 0 0 D 25 0.0 0:01.61 kidle_inject/1
+ 3343 root -51 0 0 0 0 D 25 0.0 0:01.60 kidle_inject/2
+ 2935 jacob 20 0 696m 125m 35m S 5 3.3 0:31.11 firefox
+ 1546 root 20 0 158m 20m 6640 S 3 0.5 0:26.97 Xorg
+ 2100 jacob 20 0 1223m 88m 30m S 3 2.3 0:23.68 compiz
+
+Tests have shown that by using the powerclamp driver as a cooling
+device, a PID based userspace thermal controller can manage to
+control CPU temperature effectively, when no other thermal influence
+is added. For example, a UltraBook user can compile the kernel under
+certain temperature (below most active trip points).
diff --git a/Documentation/thermal/nouveau_thermal b/Documentation/thermal/nouveau_thermal
new file mode 100644
index 000000000000..efceb7828f54
--- /dev/null
+++ b/Documentation/thermal/nouveau_thermal
@@ -0,0 +1,81 @@
+Kernel driver nouveau
+===================
+
+Supported chips:
+* NV43+
+
+Authors: Martin Peres (mupuf) <martin.peres@labri.fr>
+
+Description
+---------
+
+This driver allows to read the GPU core temperature, drive the GPU fan and
+set temperature alarms.
+
+Currently, due to the absence of in-kernel API to access HWMON drivers, Nouveau
+cannot access any of the i2c external monitoring chips it may find. If you
+have one of those, temperature and/or fan management through Nouveau's HWMON
+interface is likely not to work. This document may then not cover your situation
+entirely.
+
+Temperature management
+--------------------
+
+Temperature is exposed under as a read-only HWMON attribute temp1_input.
+
+In order to protect the GPU from overheating, Nouveau supports 4 configurable
+temperature thresholds:
+
+ * Fan_boost: Fan speed is set to 100% when reaching this temperature;
+ * Downclock: The GPU will be downclocked to reduce its power dissipation;
+ * Critical: The GPU is put on hold to further lower power dissipation;
+ * Shutdown: Shut the computer down to protect your GPU.
+
+WARNING: Some of these thresholds may not be used by Nouveau depending
+on your chipset.
+
+The default value for these thresholds comes from the GPU's vbios. These
+thresholds can be configured thanks to the following HWMON attributes:
+
+ * Fan_boost: temp1_auto_point1_temp and temp1_auto_point1_temp_hyst;
+ * Downclock: temp1_max and temp1_max_hyst;
+ * Critical: temp1_crit and temp1_crit_hyst;
+ * Shutdown: temp1_emergency and temp1_emergency_hyst.
+
+NOTE: Remember that the values are stored as milli degrees Celcius. Don't forget
+to multiply!
+
+Fan management
+------------
+
+Not all cards have a drivable fan. If you do, then the following HWMON
+attributes should be available:
+
+ * pwm1_enable: Current fan management mode (NONE, MANUAL or AUTO);
+ * pwm1: Current PWM value (power percentage);
+ * pwm1_min: The minimum PWM speed allowed;
+ * pwm1_max: The maximum PWM speed allowed (bypassed when hitting Fan_boost);
+
+You may also have the following attribute:
+
+ * fan1_input: Speed in RPM of your fan.
+
+Your fan can be driven in different modes:
+
+ * 0: The fan is left untouched;
+ * 1: The fan can be driven in manual (use pwm1 to change the speed);
+ * 2; The fan is driven automatically depending on the temperature.
+
+NOTE: Be sure to use the manual mode if you want to drive the fan speed manually
+
+NOTE2: Not all fan management modes may be supported on all chipsets. We are
+working on it.
+
+Bug reports
+---------
+
+Thermal management on Nouveau is new and may not work on all cards. If you have
+inquiries, please ping mupuf on IRC (#nouveau, freenode).
+
+Bug reports should be filled on Freedesktop's bug tracker. Please follow
+http://nouveau.freedesktop.org/wiki/Bugs
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index 88c02334e356..6859661c9d31 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -55,6 +55,8 @@ temperature) and throttle appropriate devices.
.get_trip_type: get the type of certain trip point.
.get_trip_temp: get the temperature above which the certain trip point
will be fired.
+ .set_emul_temp: set the emulation temperature which helps in debugging
+ different threshold temperature points.
1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
@@ -153,6 +155,7 @@ Thermal zone device sys I/F, created once it's registered:
|---trip_point_[0-*]_temp: Trip point temperature
|---trip_point_[0-*]_type: Trip point type
|---trip_point_[0-*]_hyst: Hysteresis value for this trip point
+ |---emul_temp: Emulated temperature set node
Thermal cooling device sys I/F, created once it's registered:
/sys/class/thermal/cooling_device[0-*]:
@@ -252,6 +255,16 @@ passive
Valid values: 0 (disabled) or greater than 1000
RW, Optional
+emul_temp
+ Interface to set the emulated temperature method in thermal zone
+ (sensor). After setting this temperature, the thermal zone may pass
+ this temperature to platform emulation function if registered or
+ cache it locally. This is useful in debugging different temperature
+ threshold and its associated cooling action. This is write only node
+ and writing 0 on this node should disable emulation.
+ Unit: millidegree Celsius
+ WO, Optional
+
*****************************
* Cooling device attributes *
*****************************
@@ -329,8 +342,9 @@ The framework includes a simple notification mechanism, in the form of a
netlink event. Netlink socket initialization is done during the _init_
of the framework. Drivers which intend to use the notification mechanism
just need to call thermal_generate_netlink_event() with two arguments viz
-(originator, event). Typically the originator will be an integer assigned
-to a thermal_zone_device when it registers itself with the framework. The
+(originator, event). The originator is a pointer to struct thermal_zone_device
+from where the event has been originated. An integer which represents the
+thermal zone device will be used in the message to identify the zone. The
event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
THERMAL_DEV_FAULT}. Notification can be sent when the current temperature
crosses any of the configured thresholds.
diff --git a/Documentation/video4linux/CARDLIST.au0828 b/Documentation/video4linux/CARDLIST.au0828
index a8a65753e544..55a21deab7db 100644
--- a/Documentation/video4linux/CARDLIST.au0828
+++ b/Documentation/video4linux/CARDLIST.au0828
@@ -1,5 +1,5 @@
0 -> Unknown board (au0828)
- 1 -> Hauppauge HVR950Q (au0828) [2040:7200,2040:7210,2040:7217,2040:721b,2040:721e,2040:721f,2040:7280,0fd9:0008,2040:7260,2040:7213]
+ 1 -> Hauppauge HVR950Q (au0828) [2040:7200,2040:7210,2040:7217,2040:721b,2040:721e,2040:721f,2040:7280,0fd9:0008,2040:7260,2040:7213,2040:7270]
2 -> Hauppauge HVR850 (au0828) [2040:7240]
3 -> DViCO FusionHDTV USB (au0828) [0fe9:d620]
4 -> Hauppauge HVR950Q rev xxF8 (au0828) [2040:7201,2040:7211,2040:7281]
diff --git a/Documentation/video4linux/CARDLIST.cx23885 b/Documentation/video4linux/CARDLIST.cx23885
index 1299b5e82d7f..9f056d512e35 100644
--- a/Documentation/video4linux/CARDLIST.cx23885
+++ b/Documentation/video4linux/CARDLIST.cx23885
@@ -36,3 +36,5 @@
35 -> TeVii S471 [d471:9022]
36 -> Hauppauge WinTV-HVR1255 [0070:2259]
37 -> Prof Revolution DVB-S2 8000 [8000:3034]
+ 38 -> Hauppauge WinTV-HVR4400 [0070:c108,0070:c138,0070:c12a,0070:c1f8]
+ 39 -> AVerTV Hybrid Express Slim HC81R [1461:d939]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index d99262dda533..3f12865b2a88 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -76,7 +76,7 @@
76 -> KWorld PlusTV 340U or UB435-Q (ATSC) (em2870) [1b80:a340]
77 -> EM2874 Leadership ISDBT (em2874)
78 -> PCTV nanoStick T2 290e (em28174)
- 79 -> Terratec Cinergy H5 (em2884) [0ccd:008e,0ccd:00ac,0ccd:10a2,0ccd:10ad]
+ 79 -> Terratec Cinergy H5 (em2884) [0ccd:10a2,0ccd:10ad]
80 -> PCTV DVB-S2 Stick (460e) (em28174)
81 -> Hauppauge WinTV HVR 930C (em2884) [2040:1605]
82 -> Terratec Cinergy HTC Stick (em2884) [0ccd:00b2]
@@ -84,3 +84,4 @@
84 -> MaxMedia UB425-TC (em2874) [1b80:e425]
85 -> PCTV QuatroStick (510e) (em2884) [2304:0242]
86 -> PCTV QuatroStick nano (520e) (em2884) [2013:0251]
+ 87 -> Terratec Cinergy HTC USB XS (em2884) [0ccd:008e,0ccd:00ac]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 94d9025aa82d..b3ad68309109 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -189,3 +189,4 @@
188 -> Sensoray 811/911 [6000:0811,6000:0911]
189 -> Kworld PC150-U [17de:a134]
190 -> Asus My Cinema PS3-100 [1043:48cd]
+191 -> Hawell HW-9004V1
diff --git a/Documentation/video4linux/et61x251.txt b/Documentation/video4linux/et61x251.txt
deleted file mode 100644
index e0cdae491858..000000000000
--- a/Documentation/video4linux/et61x251.txt
+++ /dev/null
@@ -1,315 +0,0 @@
-
- ET61X[12]51 PC Camera Controllers
- Driver for Linux
- =================================
-
- - Documentation -
-
-
-Index
-=====
-1. Copyright
-2. Disclaimer
-3. License
-4. Overview and features
-5. Module dependencies
-6. Module loading
-7. Module parameters
-8. Optional device control through "sysfs"
-9. Supported devices
-10. Notes for V4L2 application developers
-11. Contact information
-
-
-1. Copyright
-============
-Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it>
-
-
-2. Disclaimer
-=============
-Etoms is a trademark of Etoms Electronics Corp.
-This software is not developed or sponsored by Etoms Electronics.
-
-
-3. License
-==========
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-
-4. Overview and features
-========================
-This driver supports the video interface of the devices mounting the ET61X151
-or ET61X251 PC Camera Controllers.
-
-It's worth to note that Etoms Electronics has never collaborated with the
-author during the development of this project; despite several requests,
-Etoms Electronics also refused to release enough detailed specifications of
-the video compression engine.
-
-The driver relies on the Video4Linux2 and USB core modules. It has been
-designed to run properly on SMP systems as well.
-
-The latest version of the ET61X[12]51 driver can be found at the following URL:
-http://www.linux-projects.org/
-
-Some of the features of the driver are:
-
-- full compliance with the Video4Linux2 API (see also "Notes for V4L2
- application developers" paragraph);
-- available mmap or read/poll methods for video streaming through isochronous
- data transfers;
-- automatic detection of image sensor;
-- support for any window resolutions and optional panning within the maximum
- pixel area of image sensor;
-- image downscaling with arbitrary scaling factors from 1 and 2 in both
- directions (see "Notes for V4L2 application developers" paragraph);
-- two different video formats for uncompressed or compressed data in low or
- high compression quality (see also "Notes for V4L2 application developers"
- paragraph);
-- full support for the capabilities of every possible image sensors that can
- be connected to the ET61X[12]51 bridges, including, for instance, red, green,
- blue and global gain adjustments and exposure control (see "Supported
- devices" paragraph for details);
-- use of default color settings for sunlight conditions;
-- dynamic I/O interface for both ET61X[12]51 and image sensor control (see
- "Optional device control through 'sysfs'" paragraph);
-- dynamic driver control thanks to various module parameters (see "Module
- parameters" paragraph);
-- up to 64 cameras can be handled at the same time; they can be connected and
- disconnected from the host many times without turning off the computer, if
- the system supports hotplugging;
-- no known bugs.
-
-
-5. Module dependencies
-======================
-For it to work properly, the driver needs kernel support for Video4Linux and
-USB.
-
-The following options of the kernel configuration file must be enabled and
-corresponding modules must be compiled:
-
- # Multimedia devices
- #
- CONFIG_VIDEO_DEV=m
-
-To enable advanced debugging functionality on the device through /sysfs:
-
- # Multimedia devices
- #
- CONFIG_VIDEO_ADV_DEBUG=y
-
- # USB support
- #
- CONFIG_USB=m
-
-In addition, depending on the hardware being used, the modules below are
-necessary:
-
- # USB Host Controller Drivers
- #
- CONFIG_USB_EHCI_HCD=m
- CONFIG_USB_UHCI_HCD=m
- CONFIG_USB_OHCI_HCD=m
-
-And finally:
-
- # USB Multimedia devices
- #
- CONFIG_USB_ET61X251=m
-
-
-6. Module loading
-=================
-To use the driver, it is necessary to load the "et61x251" module into memory
-after every other module required: "videodev", "v4l2_common", "compat_ioctl32",
-"usbcore" and, depending on the USB host controller you have, "ehci-hcd",
-"uhci-hcd" or "ohci-hcd".
-
-Loading can be done as shown below:
-
- [root@localhost home]# modprobe et61x251
-
-At this point the devices should be recognized. You can invoke "dmesg" to
-analyze kernel messages and verify that the loading process has gone well:
-
- [user@localhost home]$ dmesg
-
-
-7. Module parameters
-====================
-Module parameters are listed below:
--------------------------------------------------------------------------------
-Name: video_nr
-Type: short array (min = 0, max = 64)
-Syntax: <-1|n[,...]>
-Description: Specify V4L2 minor mode number:
- -1 = use next available
- n = use minor number n
- You can specify up to 64 cameras this way.
- For example:
- video_nr=-1,2,-1 would assign minor number 2 to the second
- registered camera and use auto for the first one and for every
- other camera.
-Default: -1
--------------------------------------------------------------------------------
-Name: force_munmap
-Type: bool array (min = 0, max = 64)
-Syntax: <0|1[,...]>
-Description: Force the application to unmap previously mapped buffer memory
- before calling any VIDIOC_S_CROP or VIDIOC_S_FMT ioctl's. Not
- all the applications support this feature. This parameter is
- specific for each detected camera.
- 0 = do not force memory unmapping
- 1 = force memory unmapping (save memory)
-Default: 0
--------------------------------------------------------------------------------
-Name: frame_timeout
-Type: uint array (min = 0, max = 64)
-Syntax: <n[,...]>
-Description: Timeout for a video frame in seconds. This parameter is
- specific for each detected camera. This parameter can be
- changed at runtime thanks to the /sys filesystem interface.
-Default: 2
--------------------------------------------------------------------------------
-Name: debug
-Type: ushort
-Syntax: <n>
-Description: Debugging information level, from 0 to 3:
- 0 = none (use carefully)
- 1 = critical errors
- 2 = significant information
- 3 = more verbose messages
- Level 3 is useful for testing only, when only one device
- is used at the same time. It also shows some more information
- about the hardware being detected. This module parameter can be
- changed at runtime thanks to the /sys filesystem interface.
-Default: 2
--------------------------------------------------------------------------------
-
-
-8. Optional device control through "sysfs"
-==========================================
-If the kernel has been compiled with the CONFIG_VIDEO_ADV_DEBUG option enabled,
-it is possible to read and write both the ET61X[12]51 and the image sensor
-registers by using the "sysfs" filesystem interface.
-
-There are four files in the /sys/class/video4linux/videoX directory for each
-registered camera: "reg", "val", "i2c_reg" and "i2c_val". The first two files
-control the ET61X[12]51 bridge, while the other two control the sensor chip.
-"reg" and "i2c_reg" hold the values of the current register index where the
-following reading/writing operations are addressed at through "val" and
-"i2c_val". Their use is not intended for end-users, unless you know what you
-are doing. Remember that you must be logged in as root before writing to them.
-
-As an example, suppose we were to want to read the value contained in the
-register number 1 of the sensor register table - which is usually the product
-identifier - of the camera registered as "/dev/video0":
-
- [root@localhost #] cd /sys/class/video4linux/video0
- [root@localhost #] echo 1 > i2c_reg
- [root@localhost #] cat i2c_val
-
-Note that if the sensor registers cannot be read, "cat" will fail.
-To avoid race conditions, all the I/O accesses to the files are serialized.
-
-
-9. Supported devices
-====================
-None of the names of the companies as well as their products will be mentioned
-here. They have never collaborated with the author, so no advertising.
-
-From the point of view of a driver, what unambiguously identify a device are
-its vendor and product USB identifiers. Below is a list of known identifiers of
-devices mounting the ET61X[12]51 PC camera controllers:
-
-Vendor ID Product ID
---------- ----------
-0x102c 0x6151
-0x102c 0x6251
-0x102c 0x6253
-0x102c 0x6254
-0x102c 0x6255
-0x102c 0x6256
-0x102c 0x6257
-0x102c 0x6258
-0x102c 0x6259
-0x102c 0x625a
-0x102c 0x625b
-0x102c 0x625c
-0x102c 0x625d
-0x102c 0x625e
-0x102c 0x625f
-0x102c 0x6260
-0x102c 0x6261
-0x102c 0x6262
-0x102c 0x6263
-0x102c 0x6264
-0x102c 0x6265
-0x102c 0x6266
-0x102c 0x6267
-0x102c 0x6268
-0x102c 0x6269
-
-The following image sensors are supported:
-
-Model Manufacturer
------ ------------
-TAS5130D1B Taiwan Advanced Sensor Corporation
-
-All the available control settings of each image sensor are supported through
-the V4L2 interface.
-
-
-10. Notes for V4L2 application developers
-=========================================
-This driver follows the V4L2 API specifications. In particular, it enforces two
-rules:
-
-- exactly one I/O method, either "mmap" or "read", is associated with each
-file descriptor. Once it is selected, the application must close and reopen the
-device to switch to the other I/O method;
-
-- although it is not mandatory, previously mapped buffer memory should always
-be unmapped before calling any "VIDIOC_S_CROP" or "VIDIOC_S_FMT" ioctl's.
-The same number of buffers as before will be allocated again to match the size
-of the new video frames, so you have to map the buffers again before any I/O
-attempts on them.
-
-Consistently with the hardware limits, this driver also supports image
-downscaling with arbitrary scaling factors from 1 and 2 in both directions.
-However, the V4L2 API specifications don't correctly define how the scaling
-factor can be chosen arbitrarily by the "negotiation" of the "source" and
-"target" rectangles. To work around this flaw, we have added the convention
-that, during the negotiation, whenever the "VIDIOC_S_CROP" ioctl is issued, the
-scaling factor is restored to 1.
-
-This driver supports two different video formats: the first one is the "8-bit
-Sequential Bayer" format and can be used to obtain uncompressed video data
-from the device through the current I/O method, while the second one provides
-"raw" compressed video data (without frame headers not related to the
-compressed data). The current compression quality may vary from 0 to 1 and can
-be selected or queried thanks to the VIDIOC_S_JPEGCOMP and VIDIOC_G_JPEGCOMP
-V4L2 ioctl's.
-
-
-11. Contact information
-=======================
-The author may be contacted by e-mail at <luca.risolia@studio.unibo.it>.
-
-GPG/PGP encrypted e-mail's are accepted. The GPG key ID of the author is
-'FCE635A4'; the public 1024-bit key should be available at any keyserver;
-the fingerprint is: '88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4'.
diff --git a/Documentation/video4linux/extract_xc3028.pl b/Documentation/video4linux/extract_xc3028.pl
index 47877deae6d7..47877deae6d7 100644..100755
--- a/Documentation/video4linux/extract_xc3028.pl
+++ b/Documentation/video4linux/extract_xc3028.pl
diff --git a/Documentation/video4linux/fimc.txt b/Documentation/video4linux/fimc.txt
index fd02d9a4930a..25f4d3402722 100644
--- a/Documentation/video4linux/fimc.txt
+++ b/Documentation/video4linux/fimc.txt
@@ -58,7 +58,7 @@ Not currently supported:
4.1. Media device interface
The driver supports Media Controller API as defined at
-http://http://linuxtv.org/downloads/v4l-dvb-apis/media_common.html
+http://linuxtv.org/downloads/v4l-dvb-apis/media_common.html
The media device driver name is "SAMSUNG S5P FIMC".
The purpose of this interface is to allow changing assignment of FIMC instances
diff --git a/Documentation/video4linux/ibmcam.txt b/Documentation/video4linux/ibmcam.txt
deleted file mode 100644
index a51055211e62..000000000000
--- a/Documentation/video4linux/ibmcam.txt
+++ /dev/null
@@ -1,323 +0,0 @@
-README for Linux device driver for the IBM "C-It" USB video camera
-
-INTRODUCTION:
-
-This driver does not use all features known to exist in
-the IBM camera. However most of needed features work well.
-
-This driver was developed using logs of observed USB traffic
-which was produced by standard Windows driver (c-it98.sys).
-I did not have data sheets from Xirlink.
-
-Video formats:
- 128x96 [model 1]
- 176x144
- 320x240 [model 2]
- 352x240 [model 2]
- 352x288
-Frame rate: 3 - 30 frames per second (FPS)
-External interface: USB
-Internal interface: Video For Linux (V4L)
-Supported controls:
-- by V4L: Contrast, Brightness, Color, Hue
-- by driver options: frame rate, lighting conditions, video format,
- default picture settings, sharpness.
-
-SUPPORTED CAMERAS:
-
-Xirlink "C-It" camera, also known as "IBM PC Camera".
-The device uses proprietary ASIC (and compression method);
-it is manufactured by Xirlink. See http://xirlinkwebcam.sourceforge.net,
-http://www.ibmpccamera.com, or http://www.c-itnow.com/ for details and pictures.
-
-This very chipset ("X Chip", as marked at the factory)
-is used in several other cameras, and they are supported
-as well:
-
-- IBM NetCamera
-- Veo Stingray
-
-The Linux driver was developed with camera with following
-model number (or FCC ID): KSX-XVP510. This camera has three
-interfaces, each with one endpoint (control, iso, iso). This
-type of cameras is referred to as "model 1". These cameras are
-no longer manufactured.
-
-Xirlink now manufactures new cameras which are somewhat different.
-In particular, following models [FCC ID] belong to that category:
-
-XVP300 [KSX-X9903]
-XVP600 [KSX-X9902]
-XVP610 [KSX-X9902]
-
-(see http://www.xirlink.com/ibmpccamera/ for updates, they refer
-to these new cameras by Windows driver dated 12-27-99, v3005 BETA)
-These cameras have two interfaces, one endpoint in each (iso, bulk).
-Such type of cameras is referred to as "model 2". They are supported
-(with exception of 352x288 native mode).
-
-Some IBM NetCameras (Model 4) are made to generate only compressed
-video streams. This is great for performance, but unfortunately
-nobody knows how to decompress the stream :-( Therefore, these
-cameras are *unsupported* and if you try to use one of those, all
-you get is random colored horizontal streaks, not the image!
-If you have one of those cameras, you probably should return it
-to the store and get something that is supported.
-
-Tell me more about all that "model" business
---------------------------------------------
-
-I just invented model numbers to uniquely identify flavors of the
-hardware/firmware that were sold. It was very confusing to use
-brand names or some other internal numbering schemes. So I found
-by experimentation that all Xirlink chipsets fall into four big
-classes, and I called them "models". Each model is programmed in
-its own way, and each model sends back the video in its own way.
-
-Quirks of Model 2 cameras:
--------------------------
-
-Model 2 does not have hardware contrast control. Corresponding V4L
-control is implemented in software, which is not very nice to your
-CPU, but at least it works.
-
-This driver provides 352x288 mode by switching the camera into
-quasi-352x288 RGB mode (800 Kbits per frame) essentially limiting
-this mode to 10 frames per second or less, in ideal conditions on
-the bus (USB is shared, after all). The frame rate
-has to be programmed very conservatively. Additional concern is that
-frame rate depends on brightness setting; therefore the picture can
-be good at one brightness and broken at another! I did not want to fix
-the frame rate at slowest setting, but I had to move it pretty much down
-the scale (so that framerate option barely matters). I also noticed that
-camera after first powering up produces frames slightly faster than during
-consecutive uses. All this means that if you use 352x288 (which is
-default), be warned - you may encounter broken picture on first connect;
-try to adjust brightness - brighter image is slower, so USB will be able
-to send all data. However if you regularly use Model 2 cameras you may
-prefer 176x144 which makes perfectly good I420, with no scaling and
-lesser demands on USB (300 Kbits per second, or 26 frames per second).
-
-Another strange effect of 352x288 mode is the fine vertical grid visible
-on some colored surfaces. I am sure it is caused by me not understanding
-what the camera is trying to say. Blame trade secrets for that.
-
-The camera that I had also has a hardware quirk: if disconnected,
-it needs few minutes to "relax" before it can be plugged in again
-(poorly designed USB processor reset circuit?)
-
-[Veo Stingray with Product ID 0x800C is also Model 2, but I haven't
-observed this particular flaw in it.]
-
-Model 2 camera can be programmed for very high sensitivity (even starlight
-may be enough), this makes it convenient for tinkering with. The driver
-code has enough comments to help a programmer to tweak the camera
-as s/he feels necessary.
-
-WHAT YOU NEED:
-
-- A supported IBM PC (C-it) camera (model 1 or 2)
-
-- A Linux box with USB support (2.3/2.4; 2.2 w/backport may work)
-
-- A Video4Linux compatible frame grabber program such as xawtv.
-
-HOW TO COMPILE THE DRIVER:
-
-You need to compile the driver only if you are a developer
-or if you want to make changes to the code. Most distributions
-precompile all modules, so you can go directly to the next
-section "HOW TO USE THE DRIVER".
-
-The ibmcam driver uses usbvideo helper library (module),
-so if you are studying the ibmcam code you will be led there.
-
-The driver itself consists of only one file in usb/ directory:
-ibmcam.c. This file is included into the Linux kernel build
-process if you configure the kernel for CONFIG_USB_IBMCAM.
-Run "make xconfig" and in USB section you will find the IBM
-camera driver. Select it, save the configuration and recompile.
-
-HOW TO USE THE DRIVER:
-
-I recommend to compile driver as a module. This gives you an
-easier access to its configuration. The camera has many more
-settings than V4L can operate, so some settings are done using
-module options.
-
-To begin with, on most modern Linux distributions the driver
-will be automatically loaded whenever you plug the supported
-camera in. Therefore, you don't need to do anything. However
-if you want to experiment with some module parameters then
-you can load and unload the driver manually, with camera
-plugged in or unplugged.
-
-Typically module is installed with command 'modprobe', like this:
-
-# modprobe ibmcam framerate=1
-
-Alternatively you can use 'insmod' in similar fashion:
-
-# insmod /lib/modules/2.x.y/usb/ibmcam.o framerate=1
-
-Module can be inserted with camera connected or disconnected.
-
-The driver can have options, though some defaults are provided.
-
-Driver options: (* indicates that option is model-dependent)
-
-Name Type Range [default] Example
--------------- -------------- -------------- ------------------
-debug Integer 0-9 [0] debug=1
-flags Integer 0-0xFF [0] flags=0x0d
-framerate Integer 0-6 [2] framerate=1
-hue_correction Integer 0-255 [128] hue_correction=115
-init_brightness Integer 0-255 [128] init_brightness=100
-init_contrast Integer 0-255 [192] init_contrast=200
-init_color Integer 0-255 [128] init_color=130
-init_hue Integer 0-255 [128] init_hue=115
-lighting Integer 0-2* [1] lighting=2
-sharpness Integer 0-6* [4] sharpness=3
-size Integer 0-2* [2] size=1
-
-Options for Model 2 only:
-
-Name Type Range [default] Example
--------------- -------------- -------------- ------------------
-init_model2_rg Integer 0..255 [0x70] init_model2_rg=128
-init_model2_rg2 Integer 0..255 [0x2f] init_model2_rg2=50
-init_model2_sat Integer 0..255 [0x34] init_model2_sat=65
-init_model2_yb Integer 0..255 [0xa0] init_model2_yb=200
-
-debug You don't need this option unless you are a developer.
- If you are a developer then you will see in the code
- what values do what. 0=off.
-
-flags This is a bit mask, and you can combine any number of
- bits to produce what you want. Usually you don't want
- any of extra features this option provides:
-
- FLAGS_RETRY_VIDIOCSYNC 1 This bit allows to retry failed
- VIDIOCSYNC ioctls without failing.
- Will work with xawtv, will not
- with xrealproducer. Default is
- not set.
- FLAGS_MONOCHROME 2 Activates monochrome (b/w) mode.
- FLAGS_DISPLAY_HINTS 4 Shows colored pixels which have
- magic meaning to developers.
- FLAGS_OVERLAY_STATS 8 Shows tiny numbers on screen,
- useful only for debugging.
- FLAGS_FORCE_TESTPATTERN 16 Shows blue screen with numbers.
- FLAGS_SEPARATE_FRAMES 32 Shows each frame separately, as
- it was received from the camera.
- Default (not set) is to mix the
- preceding frame in to compensate
- for occasional loss of Isoc data
- on high frame rates.
- FLAGS_CLEAN_FRAMES 64 Forces "cleanup" of each frame
- prior to use; relevant only if
- FLAGS_SEPARATE_FRAMES is set.
- Default is not to clean frames,
- this is a little faster but may
- produce flicker if frame rate is
- too high and Isoc data gets lost.
- FLAGS_NO_DECODING 128 This flag turns the video stream
- decoder off, and dumps the raw
- Isoc data from the camera into
- the reading process. Useful to
- developers, but not to users.
-
-framerate This setting controls frame rate of the camera. This is
- an approximate setting (in terms of "worst" ... "best")
- because camera changes frame rate depending on amount
- of light available. Setting 0 is slowest, 6 is fastest.
- Beware - fast settings are very demanding and may not
- work well with all video sizes. Be conservative.
-
-hue_correction This highly optional setting allows to adjust the
- hue of the image in a way slightly different from
- what usual "hue" control does. Both controls affect
- YUV colorspace: regular "hue" control adjusts only
- U component, and this "hue_correction" option similarly
- adjusts only V component. However usually it is enough
- to tweak only U or V to compensate for colored light or
- color temperature; this option simply allows more
- complicated correction when and if it is necessary.
-
-init_brightness These settings specify _initial_ values which will be
-init_contrast used to set up the camera. If your V4L application has
-init_color its own controls to adjust the picture then these
-init_hue controls will be used too. These options allow you to
- preconfigure the camera when it gets connected, before
- any V4L application connects to it. Good for webcams.
-
-init_model2_rg These initial settings alter color balance of the
-init_model2_rg2 camera on hardware level. All four settings may be used
-init_model2_sat to tune the camera to specific lighting conditions. These
-init_model2_yb settings only apply to Model 2 cameras.
-
-lighting This option selects one of three hardware-defined
- photosensitivity settings of the camera. 0=bright light,
- 1=Medium (default), 2=Low light. This setting affects
- frame rate: the dimmer the lighting the lower the frame
- rate (because longer exposition time is needed). The
- Model 2 cameras allow values more than 2 for this option,
- thus enabling extremely high sensitivity at cost of frame
- rate, color saturation and imaging sensor noise.
-
-sharpness This option controls smoothing (noise reduction)
- made by camera. Setting 0 is most smooth, setting 6
- is most sharp. Be aware that CMOS sensor used in the
- camera is pretty noisy, so if you choose 6 you will
- be greeted with "snowy" image. Default is 4. Model 2
- cameras do not support this feature.
-
-size This setting chooses one of several image sizes that are
- supported by this driver. Cameras may support more, but
- it's difficult to reverse-engineer all formats.
- Following video sizes are supported:
-
- size=0 128x96 (Model 1 only)
- size=1 160x120
- size=2 176x144
- size=3 320x240 (Model 2 only)
- size=4 352x240 (Model 2 only)
- size=5 352x288
- size=6 640x480 (Model 3 only)
-
- The 352x288 is the native size of the Model 1 sensor
- array, so it's the best resolution the camera can
- yield. The best resolution of Model 2 is 176x144, and
- larger images are produced by stretching the bitmap.
- Model 3 has sensor with 640x480 grid, and it works too,
- but the frame rate will be exceptionally low (1-2 FPS);
- it may be still OK for some applications, like security.
- Choose the image size you need. The smaller image can
- support faster frame rate. Default is 352x288.
-
-For more information and the Troubleshooting FAQ visit this URL:
-
- http://www.linux-usb.org/ibmcam/
-
-WHAT NEEDS TO BE DONE:
-
-- The button on the camera is not used. I don't know how to get to it.
- I know now how to read button on Model 2, but what to do with it?
-
-- Camera reports its status back to the driver; however I don't know
- what returned data means. If camera fails at some initialization
- stage then something should be done, and I don't do that because
- I don't even know that some command failed. This is mostly Model 1
- concern because Model 2 uses different commands which do not return
- status (and seem to complete successfully every time).
-
-- Some flavors of Model 4 NetCameras produce only compressed video
- streams, and I don't know how to decode them.
-
-CREDITS:
-
-The code is based in no small part on the CPiA driver by Johannes Erdfelt,
-Randy Dunlap, and others. Big thanks to them for their pioneering work on that
-and the USB stack.
-
-I also thank John Lightsey for his donation of the Veo Stingray camera.
diff --git a/Documentation/video4linux/m5602.txt b/Documentation/video4linux/m5602.txt
deleted file mode 100644
index 4450ab13f37b..000000000000
--- a/Documentation/video4linux/m5602.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-This document describes the ALi m5602 bridge connected
-to the following supported sensors:
-OmniVision OV9650,
-Samsung s5k83a,
-Samsung s5k4aa,
-Micron mt9m111,
-Pixel plus PO1030
-
-This driver mimics the windows drivers, which have a braindead implementation sending bayer-encoded frames at VGA resolution.
-In a perfect world we should be able to reprogram the m5602 and the connected sensor in hardware instead, supporting a range of resolutions and pixelformats
-
-Anyway, have fun and please report any bugs to m560x-driver-devel@lists.sourceforge.net
diff --git a/Documentation/video4linux/ov511.txt b/Documentation/video4linux/ov511.txt
deleted file mode 100644
index b3326b167ada..000000000000
--- a/Documentation/video4linux/ov511.txt
+++ /dev/null
@@ -1,288 +0,0 @@
--------------------------------------------------------------------------------
-Readme for Linux device driver for the OmniVision OV511 USB to camera bridge IC
--------------------------------------------------------------------------------
-
-Author: Mark McClelland
-Homepage: http://alpha.dyndns.org/ov511
-
-INTRODUCTION:
-
-This is a driver for the OV511, a USB-only chip used in many "webcam" devices.
-Any camera using the OV511/OV511+ and the OV6620/OV7610/20/20AE should work.
-Video capture devices that use the Philips SAA7111A decoder also work. It
-supports streaming and capture of color or monochrome video via the Video4Linux
-API. Most V4L apps are compatible with it. Most resolutions with a width and
-height that are a multiple of 8 are supported.
-
-If you need more information, please visit the OV511 homepage at the above URL.
-
-WHAT YOU NEED:
-
-- If you want to help with the development, get the chip's specification docs at
- http://www.ovt.com/omniusbp.html
-
-- A Video4Linux compatible frame grabber program (I recommend vidcat and xawtv)
- vidcat is part of the w3cam package: http://mpx.freeshell.net/
- xawtv is available at: http://linux.bytesex.org/xawtv/
-
-HOW TO USE IT:
-
-Note: These are simplified instructions. For complete instructions see:
- http://alpha.dyndns.org/ov511/install.html
-
-You must have first compiled USB support, support for your specific USB host
-controller (UHCI or OHCI), and Video4Linux support for your kernel (I recommend
-making them modules.) Make sure "Enforce bandwidth allocation" is NOT enabled.
-
-Next, (as root):
-
- modprobe usbcore
- modprobe usb-uhci <OR> modprobe usb-ohci
- modprobe videodev
- modprobe ov511
-
-If it is not already there (it usually is), create the video device:
-
- mknod /dev/video0 c 81 0
-
-Optionally, symlink /dev/video to /dev/video0
-
-You will have to set permissions on this device to allow you to read/write
-from it:
-
- chmod 666 /dev/video
- chmod 666 /dev/video0 (if necessary)
-
-Now you are ready to run a video app! Both vidcat and xawtv work well for me
-at 640x480.
-
-[Using vidcat:]
-
- vidcat -s 640x480 -p c > test.jpg
- xview test.jpg
-
-[Using xawtv:]
-
-From the main xawtv directory:
-
- make clean
- ./configure
- make
- make install
-
-Now you should be able to run xawtv. Right click for the options dialog.
-
-MODULE PARAMETERS:
-
- You can set these with: insmod ov511 NAME=VALUE
- There is currently no way to set these on a per-camera basis.
-
- NAME: autobright
- TYPE: integer (Boolean)
- DEFAULT: 1
- DESC: Brightness is normally under automatic control and can't be set
- manually by the video app. Set to 0 for manual control.
-
- NAME: autogain
- TYPE: integer (Boolean)
- DEFAULT: 1
- DESC: Auto Gain Control enable. This feature is not yet implemented.
-
- NAME: autoexp
- TYPE: integer (Boolean)
- DEFAULT: 1
- DESC: Auto Exposure Control enable. This feature is not yet implemented.
-
- NAME: debug
- TYPE: integer (0-6)
- DEFAULT: 3
- DESC: Sets the threshold for printing debug messages. The higher the value,
- the more is printed. The levels are cumulative, and are as follows:
- 0=no debug messages
- 1=init/detection/unload and other significant messages
- 2=some warning messages
- 3=config/control function calls
- 4=most function calls and data parsing messages
- 5=highly repetitive mesgs
-
- NAME: snapshot
- TYPE: integer (Boolean)
- DEFAULT: 0
- DESC: Set to 1 to enable snapshot mode. read()/VIDIOCSYNC will block until
- the snapshot button is pressed. Note: enabling this mode disables
- /proc/video/ov511/<minor#>/button
-
- NAME: cams
- TYPE: integer (1-4 for OV511, 1-31 for OV511+)
- DEFAULT: 1
- DESC: Number of cameras allowed to stream simultaneously on a single bus.
- Values higher than 1 reduce the data rate of each camera, allowing two
- or more to be used at once. If you have a complicated setup involving
- both OV511 and OV511+ cameras, trial-and-error may be necessary for
- finding the optimum setting.
-
- NAME: compress
- TYPE: integer (Boolean)
- DEFAULT: 0
- DESC: Set this to 1 to turn on the camera's compression engine. This can
- potentially increase the frame rate at the expense of quality, if you
- have a fast CPU. You must load the proper compression module for your
- camera before starting your application (ov511_decomp or ov518_decomp).
-
- NAME: testpat
- TYPE: integer (Boolean)
- DEFAULT: 0
- DESC: This configures the camera's sensor to transmit a colored test-pattern
- instead of an image. This does not work correctly yet.
-
- NAME: dumppix
- TYPE: integer (0-2)
- DEFAULT: 0
- DESC: Dumps raw pixel data and skips post-processing and format conversion.
- It is for debugging purposes only. Options are:
- 0: Disable (default)
- 1: Dump raw data from camera, excluding headers and trailers
- 2: Dumps data exactly as received from camera
-
- NAME: led
- TYPE: integer (0-2)
- DEFAULT: 1 (Always on)
- DESC: Controls whether the LED (the little light) on the front of the camera
- is always off (0), always on (1), or only on when driver is open (2).
- This is not supported with the OV511, and might only work with certain
- cameras (ones that actually have the LED wired to the control pin, and
- not just hard-wired to be on all the time).
-
- NAME: dump_bridge
- TYPE: integer (Boolean)
- DEFAULT: 0
- DESC: Dumps the bridge (OV511[+] or OV518[+]) register values to the system
- log. Only useful for serious debugging/development purposes.
-
- NAME: dump_sensor
- TYPE: integer (Boolean)
- DEFAULT: 0
- DESC: Dumps the sensor register values to the system log. Only useful for
- serious debugging/development purposes.
-
- NAME: printph
- TYPE: integer (Boolean)
- DEFAULT: 0
- DESC: Setting this to 1 will dump the first 12 bytes of each isoc frame. This
- is only useful if you are trying to debug problems with the isoc data
- stream (i.e.: camera initializes, but vidcat hangs until Ctrl-C). Be
- warned that this dumps a large number of messages to your kernel log.
-
- NAME: phy, phuv, pvy, pvuv, qhy, qhuv, qvy, qvuv
- TYPE: integer (0-63 for phy and phuv, 0-255 for rest)
- DEFAULT: OV511 default values
- DESC: These are registers 70h - 77h of the OV511, which control the
- prediction ranges and quantization thresholds of the compressor, for
- the Y and UV channels in the horizontal and vertical directions. See
- the OV511 or OV511+ data sheet for more detailed descriptions. These
- normally do not need to be changed.
-
- NAME: lightfreq
- TYPE: integer (0, 50, or 60)
- DEFAULT: 0 (use sensor default)
- DESC: Sets the sensor to match your lighting frequency. This can reduce the
- appearance of "banding", i.e. horizontal lines or waves of light and
- dark that are often caused by artificial lighting. Valid values are:
- 0 - Use default (depends on sensor, most likely 60 Hz)
- 50 - For European and Asian 50 Hz power
- 60 - For American 60 Hz power
-
- NAME: bandingfilter
- TYPE: integer (Boolean)
- DEFAULT: 0 (off)
- DESC: Enables the sensor´s banding filter exposure algorithm. This reduces
- or stabilizes the "banding" caused by some artificial light sources
- (especially fluorescent). You might have to set lightfreq correctly for
- this to work right. As an added bonus, this sometimes makes it
- possible to capture your monitor´s output.
-
- NAME: fastset
- TYPE: integer (Boolean)
- DEFAULT: 0 (off)
- DESC: Allows picture settings (brightness, contrast, color, and hue) to take
- effect immediately, even in the middle of a frame. This reduces the
- time to change settings, but can ruin frames during the change. Only
- affects OmniVision sensors.
-
- NAME: force_palette
- TYPE: integer (Boolean)
- DEFAULT: 0 (off)
- DESC: Forces the palette (color format) to a specific value. If an
- application requests a different palette, it will be rejected, thereby
- forcing it to try others until it succeeds. This is useful for forcing
- greyscale mode with a color camera, for example. Supported modes are:
- 0 (Allows all the following formats)
- 1 VIDEO_PALETTE_GREY (Linear greyscale)
- 10 VIDEO_PALETTE_YUV420 (YUV 4:2:0 Planar)
- 15 VIDEO_PALETTE_YUV420P (YUV 4:2:0 Planar, same as 10)
-
- NAME: backlight
- TYPE: integer (Boolean)
- DEFAULT: 0 (off)
- DESC: Setting this flag changes the exposure algorithm for OmniVision sensors
- such that objects in the camera's view (i.e. your head) can be clearly
- seen when they are illuminated from behind. It reduces or eliminates
- the sensor's auto-exposure function, so it should only be used when
- needed. Additionally, it is only supported with the OV6620 and OV7620.
-
- NAME: unit_video
- TYPE: Up to 16 comma-separated integers
- DEFAULT: 0,0,0... (automatically assign the next available minor(s))
- DESC: You can specify up to 16 minor numbers to be assigned to ov511 devices.
- For example, "unit_video=1,3" will make the driver use /dev/video1 and
- /dev/video3 for the first two devices it detects. Additional devices
- will be assigned automatically starting at the first available device
- node (/dev/video0 in this case). Note that you cannot specify 0 as a
- minor number. This feature requires kernel version 2.4.5 or higher.
-
- NAME: remove_zeros
- TYPE: integer (Boolean)
- DEFAULT: 0 (do not skip any incoming data)
- DESC: Setting this to 1 will remove zero-padding from incoming data. This
- will compensate for the blocks of corruption that can appear when the
- camera cannot keep up with the speed of the USB bus (eg. at low frame
- resolutions). This feature is always enabled when compression is on.
-
- NAME: mirror
- TYPE: integer (Boolean)
- DEFAULT: 0 (off)
- DESC: Setting this to 1 will reverse ("mirror") the image horizontally. This
- might be necessary if your camera has a custom lens assembly. This has
- no effect with video capture devices.
-
- NAME: ov518_color
- TYPE: integer (Boolean)
- DEFAULT: 0 (off)
- DESC: Enable OV518 color support. This is off by default since it doesn't
- work most of the time. If you want to try it, you must also load
- ov518_decomp with the "nouv=0" parameter. If you get improper colors or
- diagonal lines through the image, restart your video app and try again.
- Repeat as necessary.
-
-WORKING FEATURES:
- o Color streaming/capture at most widths and heights that are multiples of 8.
- o Monochrome (use force_palette=1 to enable)
- o Setting/getting of saturation, contrast, brightness, and hue (only some of
- them work the OV7620 and OV7620AE)
- o /proc status reporting
- o SAA7111A video capture support at 320x240 and 640x480
- o Compression support
- o SMP compatibility
-
-HOW TO CONTACT ME:
-
-You can email me at mark@alpha.dyndns.org . Please prefix the subject line
-with "OV511: " so that I am certain to notice your message.
-
-CREDITS:
-
-The code is based in no small part on the CPiA driver by Johannes Erdfelt,
-Randy Dunlap, and others. Big thanks to them for their pioneering work on that
-and the USB stack. Thanks to Bret Wallach for getting camera reg IO, ISOC, and
-image capture working. Thanks to Orion Sky Lawlor, Kevin Moore, and Claudio
-Matsuoka for their work as well.
diff --git a/Documentation/video4linux/se401.txt b/Documentation/video4linux/se401.txt
deleted file mode 100644
index bd6526ec8dd7..000000000000
--- a/Documentation/video4linux/se401.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Linux driver for SE401 based USB cameras
-
-Copyright, 2001, Jeroen Vreeken
-
-
-INTRODUCTION:
-
-The SE401 chip is the used in low-cost usb webcams.
-It is produced by Endpoints Inc. (www.endpoints.com).
-It interfaces directly to a cmos image sensor and USB. The only other major
-part in a se401 based camera is a dram chip.
-
-The following cameras are known to work with this driver:
-
-Aox se401 (non-branded) cameras
-Philips PVCV665 USB VGA webcam 'Vesta Fun'
-Kensington VideoCAM PC Camera Model 67014
-Kensington VideoCAM PC Camera Model 67015
-Kensington VideoCAM PC Camera Model 67016
-Kensington VideoCAM PC Camera Model 67017
-
-
-WHAT YOU NEED:
-
-- USB support
-- VIDEO4LINUX support
-
-More information about USB support for linux can be found at:
-http://www.linux-usb.org
-
-
-MODULE OPTIONS:
-
-When the driver is compiled as a module you can also use the 'flickerless'
-option. With it exposure is limited to values that do not interfere with the
-net frequency. Valid options for this option are 0, 50 and 60. (0=disable,
-50=50hz, 60=60hz)
-
-
-KNOWN PROBLEMS:
-
-The driver works fine with the usb-ohci and uhci host controller drivers,
-the default settings also work with usb-uhci. But sending more than one bulk
-transfer at a time with usb-uhci doesn't work yet.
-Users of usb-ohci and uhci can safely enlarge SE401_NUMSBUF in se401.h in
-order to increase the throughput (and thus framerate).
-
-
-HELP:
-
-The latest info on this driver can be found at:
-http://members.chello.nl/~j.vreeken/se401/
-And questions to me can be send to:
-pe1rxq@amsat.org
diff --git a/Documentation/video4linux/si470x.txt b/Documentation/video4linux/si470x.txt
index 3a7823e01b4d..98c32925eb39 100644
--- a/Documentation/video4linux/si470x.txt
+++ b/Documentation/video4linux/si470x.txt
@@ -53,6 +53,9 @@ Testing is usually done with most application under Debian/testing:
- kradio - Comfortable Radio Application for KDE
- radio - ncurses-based radio application
- mplayer - The Ultimate Movie Player For Linux
+- v4l2-ctl - Collection of command line video4linux utilities
+For example, you can use:
+v4l2-ctl -d /dev/radio0 --set-ctrl=volume=10,mute=0 --set-freq=95.21 --all
There is also a library libv4l, which can be used. It's going to have a function
for frequency seeking, either by using hardware functionality as in radio-si470x
@@ -75,8 +78,10 @@ commands. Please adjust the audio devices to your needs (/dev/dsp* and hw:x,x).
If you just want to test audio (very poor quality):
cat /dev/dsp1 > /dev/dsp
-If you use OSS try:
+If you use sox + OSS try:
sox -2 --endian little -r 96000 -t oss /dev/dsp1 -t oss /dev/dsp
+or using sox + alsa:
+sox --endian little -c 2 -S -r 96000 -t alsa hw:1 -t alsa -r 96000 hw:0
If you use arts try:
arecord -D hw:1,0 -r96000 -c2 -f S16_LE | artsdsp aplay -B -
diff --git a/Documentation/video4linux/soc-camera.txt b/Documentation/video4linux/soc-camera.txt
index 3f87c7da4ca2..f62fcdbc8b9f 100644
--- a/Documentation/video4linux/soc-camera.txt
+++ b/Documentation/video4linux/soc-camera.txt
@@ -9,32 +9,36 @@ The following terms are used in this document:
of connecting to a variety of systems and interfaces, typically uses i2c for
control and configuration, and a parallel or a serial bus for data.
- camera host - an interface, to which a camera is connected. Typically a
- specialised interface, present on many SoCs, e.g., PXA27x and PXA3xx, SuperH,
+ specialised interface, present on many SoCs, e.g. PXA27x and PXA3xx, SuperH,
AVR32, i.MX27, i.MX31.
- camera host bus - a connection between a camera host and a camera. Can be
- parallel or serial, consists of data and control lines, e.g., clock, vertical
+ parallel or serial, consists of data and control lines, e.g. clock, vertical
and horizontal synchronization signals.
Purpose of the soc-camera subsystem
-----------------------------------
-The soc-camera subsystem provides a unified API between camera host drivers and
-camera sensor drivers. It implements a V4L2 interface to the user, currently
-only the mmap method is supported.
+The soc-camera subsystem initially provided a unified API between camera host
+drivers and camera sensor drivers. Later the soc-camera sensor API has been
+replaced with the V4L2 standard subdev API. This also made camera driver re-use
+with non-soc-camera hosts possible. The camera host API to the soc-camera core
+has been preserved.
-This subsystem has been written to connect drivers for System-on-Chip (SoC)
-video capture interfaces with drivers for CMOS camera sensor chips to enable
-the reuse of sensor drivers with various hosts. The subsystem has been designed
-to support multiple camera host interfaces and multiple cameras per interface,
-although most applications have only one camera sensor.
+Soc-camera implements a V4L2 interface to the user, currently only the "mmap"
+method is supported by host drivers. However, the soc-camera core also provides
+support for the "read" method.
+
+The subsystem has been designed to support multiple camera host interfaces and
+multiple cameras per interface, although most applications have only one camera
+sensor.
Existing drivers
----------------
-As of 2.6.27-rc4 there are two host drivers in the mainline: pxa_camera.c for
-PXA27x SoCs and sh_mobile_ceu_camera.c for SuperH SoCs, and four sensor drivers:
-mt9m001.c, mt9m111.c, mt9v022.c and a generic soc_camera_platform.c driver. This
-list is not supposed to be updated, look for more examples in your tree.
+As of 3.7 there are seven host drivers in the mainline: atmel-isi.c,
+mx1_camera.c (broken, scheduled for removal), mx2_camera.c, mx3_camera.c,
+omap1_camera.c, pxa_camera.c, sh_mobile_ceu_camera.c, and multiple sensor
+drivers under drivers/media/i2c/soc_camera/.
Camera host API
---------------
@@ -45,38 +49,37 @@ soc_camera_host_register(struct soc_camera_host *);
function. The host object can be initialized as follows:
-static struct soc_camera_host pxa_soc_camera_host = {
- .drv_name = PXA_CAM_DRV_NAME,
- .ops = &pxa_soc_camera_host_ops,
-};
+ struct soc_camera_host *ici;
+ ici->drv_name = DRV_NAME;
+ ici->ops = &camera_host_ops;
+ ici->priv = pcdev;
+ ici->v4l2_dev.dev = &pdev->dev;
+ ici->nr = pdev->id;
All camera host methods are passed in a struct soc_camera_host_ops:
-static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
+static struct soc_camera_host_ops camera_host_ops = {
.owner = THIS_MODULE,
- .add = pxa_camera_add_device,
- .remove = pxa_camera_remove_device,
- .suspend = pxa_camera_suspend,
- .resume = pxa_camera_resume,
- .set_fmt_cap = pxa_camera_set_fmt_cap,
- .try_fmt_cap = pxa_camera_try_fmt_cap,
- .init_videobuf = pxa_camera_init_videobuf,
- .reqbufs = pxa_camera_reqbufs,
- .poll = pxa_camera_poll,
- .querycap = pxa_camera_querycap,
- .try_bus_param = pxa_camera_try_bus_param,
- .set_bus_param = pxa_camera_set_bus_param,
+ .add = camera_add_device,
+ .remove = camera_remove_device,
+ .set_fmt = camera_set_fmt_cap,
+ .try_fmt = camera_try_fmt_cap,
+ .init_videobuf2 = camera_init_videobuf2,
+ .poll = camera_poll,
+ .querycap = camera_querycap,
+ .set_bus_param = camera_set_bus_param,
+ /* The rest of host operations are optional */
};
.add and .remove methods are called when a sensor is attached to or detached
-from the host, apart from performing host-internal tasks they shall also call
-sensor driver's .init and .release methods respectively. .suspend and .resume
-methods implement host's power-management functionality and its their
-responsibility to call respective sensor's methods. .try_bus_param and
-.set_bus_param are used to negotiate physical connection parameters between the
-host and the sensor. .init_videobuf is called by soc-camera core when a
-video-device is opened, further video-buffer management is implemented completely
-by the specific camera host driver. The rest of the methods are called from
+from the host. .set_bus_param is used to configure physical connection
+parameters between the host and the sensor. .init_videobuf2 is called by
+soc-camera core when a video-device is opened, the host driver would typically
+call vb2_queue_init() in this method. Further video-buffer management is
+implemented completely by the specific camera host driver. If the host driver
+supports non-standard pixel format conversion, it should implement a
+.get_formats and, possibly, a .put_formats operations. See below for more
+details about format conversion. The rest of the methods are called from
respective V4L2 operations.
Camera API
@@ -84,37 +87,21 @@ Camera API
Sensor drivers can use struct soc_camera_link, typically provided by the
platform, and used to specify to which camera host bus the sensor is connected,
-and arbitrarily provide platform .power and .reset methods for the camera.
-soc_camera_device_register() and soc_camera_device_unregister() functions are
-used to add a sensor driver to or remove one from the system. The registration
-function takes a pointer to struct soc_camera_device as the only parameter.
-This struct can be initialized as follows:
-
- /* link to driver operations */
- icd->ops = &mt9m001_ops;
- /* link to the underlying physical (e.g., i2c) device */
- icd->control = &client->dev;
- /* window geometry */
- icd->x_min = 20;
- icd->y_min = 12;
- icd->x_current = 20;
- icd->y_current = 12;
- icd->width_min = 48;
- icd->width_max = 1280;
- icd->height_min = 32;
- icd->height_max = 1024;
- icd->y_skip_top = 1;
- /* camera bus ID, typically obtained from platform data */
- icd->iface = icl->bus_id;
-
-struct soc_camera_ops provides .probe and .remove methods, which are called by
-the soc-camera core, when a camera is matched against or removed from a camera
-host bus, .init, .release, .suspend, and .resume are called from the camera host
-driver as discussed above. Other members of this struct provide respective V4L2
-functionality.
-
-struct soc_camera_device also links to an array of struct soc_camera_data_format,
-listing pixel formats, supported by the camera.
+and optionally provide platform .power and .reset methods for the camera. This
+struct is provided to the camera driver via the I2C client device platform data
+and can be obtained, using the soc_camera_i2c_to_link() macro. Care should be
+taken, when using soc_camera_vdev_to_subdev() and when accessing struct
+soc_camera_device, using v4l2_get_subdev_hostdata(): both only work, when
+running on an soc-camera host. The actual camera driver operation is implemented
+using the V4L2 subdev API. Additionally soc-camera camera drivers can use
+auxiliary soc-camera helper functions like soc_camera_power_on() and
+soc_camera_power_off(), which switch regulators, provided by the platform and call
+board-specific power switching methods. soc_camera_apply_board_flags() takes
+camera bus configuration capability flags and applies any board transformations,
+e.g. signal polarity inversion. soc_mbus_get_fmtdesc() can be used to obtain a
+pixel format descriptor, corresponding to a certain media-bus pixel format code.
+soc_camera_limit_side() can be used to restrict beginning and length of a frame
+side, based on camera capabilities.
VIDIOC_S_CROP and VIDIOC_S_FMT behaviour
----------------------------------------
@@ -153,8 +140,25 @@ implemented.
User window geometry is kept in .user_width and .user_height fields in struct
soc_camera_device and used by the soc-camera core and host drivers. The core
updates these fields upon successful completion of a .s_fmt() call, but if these
-fields change elsewhere, e.g., during .s_crop() processing, the host driver is
+fields change elsewhere, e.g. during .s_crop() processing, the host driver is
responsible for updating them.
+Format conversion
+-----------------
+
+V4L2 distinguishes between pixel formats, as they are stored in memory, and as
+they are transferred over a media bus. Soc-camera provides support to
+conveniently manage these formats. A table of standard transformations is
+maintained by soc-camera core, which describes, what FOURCC pixel format will
+be obtained, if a media-bus pixel format is stored in memory according to
+certain rules. E.g. if V4L2_MBUS_FMT_YUYV8_2X8 data is sampled with 8 bits per
+sample and stored in memory in the little-endian order with no gaps between
+bytes, data in memory will represent the V4L2_PIX_FMT_YUYV FOURCC format. These
+standard transformations will be used by soc-camera or by camera host drivers to
+configure camera drivers to produce the FOURCC format, requested by the user,
+using the VIDIOC_S_FMT ioctl(). Apart from those standard format conversions,
+host drivers can also provide their own conversion rules by implementing a
+.get_formats and, if required, a .put_formats methods.
+
--
Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
diff --git a/Documentation/video4linux/stv680.txt b/Documentation/video4linux/stv680.txt
deleted file mode 100644
index e3de33645308..000000000000
--- a/Documentation/video4linux/stv680.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-Linux driver for STV0680 based USB cameras
-
-Copyright, 2001, Kevin Sisson
-
-
-INTRODUCTION:
-
-STMicroelectronics produces the STV0680B chip, which comes in two
-types, -001 and -003. The -003 version allows the recording and downloading
-of sound clips from the camera, and allows a flash attachment. Otherwise,
-it uses the same commands as the -001 version. Both versions support a
-variety of SDRAM sizes and sensors, allowing for a maximum of 26 VGA or 20
-CIF pictures. The STV0680 supports either a serial or a usb interface, and
-video is possible through the usb interface.
-
-The following cameras are known to work with this driver, although any
-camera with Vendor/Product codes of 0553/0202 should work:
-
-Aiptek Pencam (various models)
-Nisis QuickPix 2
-Radio Shack 'Kid's digital camera' (#60-1207)
-At least one Trust Spycam model
-Several other European brand models
-
-WHAT YOU NEED:
-
-- USB support
-- VIDEO4LINUX support
-
-More information about USB support for linux can be found at:
-http://www.linux-usb.org
-
-
-MODULE OPTIONS:
-
-When the driver is compiled as a module, you can set a "swapRGB=1"
-option, if necessary, for those applications that require it
-(such as xawtv). However, the driver should detect and set this
-automatically, so this option should not normally be used.
-
-
-KNOWN PROBLEMS:
-
-The driver seems to work better with the usb-ohci than the usb-uhci host
-controller driver.
-
-HELP:
-
-The latest info on this driver can be found at:
-http://personal.clt.bellsouth.net/~kjsisson or at
-http://stv0680-usb.sourceforge.net
-
-Any questions to me can be send to: kjsisson@bellsouth.net
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index cfe52c798d74..676f87366025 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -715,14 +715,20 @@ a control of this type whenever the first control belonging to a new control
class is added.
-Proposals for Extensions
-========================
+Adding Notify Callbacks
+=======================
+
+Sometimes the platform or bridge driver needs to be notified when a control
+from a sub-device driver changes. You can set a notify callback by calling
+this function:
-Some ideas for future extensions to the spec:
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl,
+ void (*notify)(struct v4l2_ctrl *ctrl, void *priv), void *priv);
-1) Add a V4L2_CTRL_FLAG_HEX to have values shown as hexadecimal instead of
-decimal. Useful for e.g. video_mute_yuv.
+Whenever the give control changes value the notify callback will be called
+with a pointer to the control and the priv pointer that was passed with
+v4l2_ctrl_notify. Note that the control's handler lock is held when the
+notify function is called.
-2) It is possible to mark in the controls array which controls have been
-successfully written and which failed by for example adding a bit to the
-control ID. Not sure if it is worth the effort, though.
+There can be only one notify function per control handler. Any attempt
+to set another notify function will cause a WARN_ON.
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index b89567ad04b7..a300b283a1a0 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -68,8 +68,7 @@ Structure of the framework
The framework closely resembles the driver structure: it has a v4l2_device
struct for the device instance data, a v4l2_subdev struct to refer to
sub-device instances, the video_device struct stores V4L2 device node data
-and in the future a v4l2_fh struct will keep track of filehandle instances
-(this is not yet implemented).
+and the v4l2_fh struct keeps track of filehandle instances.
The V4L2 framework also optionally integrates with the media framework. If a
driver sets the struct v4l2_device mdev field, sub-devices and video nodes
diff --git a/Documentation/video4linux/w9968cf.txt b/Documentation/video4linux/w9968cf.txt
deleted file mode 100644
index 9649450f3b90..000000000000
--- a/Documentation/video4linux/w9968cf.txt
+++ /dev/null
@@ -1,458 +0,0 @@
-
- W996[87]CF JPEG USB Dual Mode Camera Chip
- Driver for Linux 2.6 (basic version)
- =========================================
-
- - Documentation -
-
-
-Index
-=====
-1. Copyright
-2. Disclaimer
-3. License
-4. Overview
-5. Supported devices
-6. Module dependencies
-7. Module loading
-8. Module parameters
-9. Contact information
-10. Credits
-
-
-1. Copyright
-============
-Copyright (C) 2002-2004 by Luca Risolia <luca.risolia@studio.unibo.it>
-
-
-2. Disclaimer
-=============
-Winbond is a trademark of Winbond Electronics Corporation.
-This software is not sponsored or developed by Winbond.
-
-
-3. License
-==========
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-
-4. Overview
-===========
-This driver supports the video streaming capabilities of the devices mounting
-Winbond W9967CF and Winbond W9968CF JPEG USB Dual Mode Camera Chips. OV681
-based cameras should be supported as well.
-
-The driver is divided into two modules: the basic one, "w9968cf", is needed for
-the supported devices to work; the second one, "w9968cf-vpp", is an optional
-module, which provides some useful video post-processing functions like video
-decoding, up-scaling and colour conversions.
-
-Note that the official kernels do neither include nor support the second
-module for performance purposes. Therefore, it is always recommended to
-download and install the latest and complete release of the driver,
-replacing the existing one, if present.
-
-The latest and full-featured version of the W996[87]CF driver can be found at:
-http://www.linux-projects.org. Please refer to the documentation included in
-that package, if you are going to use it.
-
-Up to 32 cameras can be handled at the same time. They can be connected and
-disconnected from the host many times without turning off the computer, if
-your system supports the hotplug facility.
-
-To change the default settings for each camera, many parameters can be passed
-through command line when the module is loaded into memory.
-
-The driver relies on the Video4Linux, USB and I2C core modules. It has been
-designed to run properly on SMP systems as well. An additional module,
-"ovcamchip", is mandatory; it provides support for some OmniVision image
-sensors connected to the W996[87]CF chips; if found in the system, the module
-will be automatically loaded by default (provided that the kernel has been
-compiled with the automatic module loading option).
-
-
-5. Supported devices
-====================
-At the moment, known W996[87]CF and OV681 based devices are:
-- Aroma Digi Pen VGA Dual Mode ADG-5000 (unknown image sensor)
-- AVerMedia AVerTV USB (SAA7111A, Philips FI1216Mk2 tuner, PT2313L audio chip)
-- Creative Labs Video Blaster WebCam Go (OmniVision OV7610 sensor)
-- Creative Labs Video Blaster WebCam Go Plus (OmniVision OV7620 sensor)
-- Lebon LDC-035A (unknown image sensor)
-- Ezonics EZ-802 EZMega Cam (OmniVision OV8610C sensor)
-- OmniVision OV8610-EDE (OmniVision OV8610 sensor)
-- OPCOM Digi Pen VGA Dual Mode Pen Camera (unknown image sensor)
-- Pretec Digi Pen-II (OmniVision OV7620 sensor)
-- Pretec DigiPen-480 (OmniVision OV8610 sensor)
-
-If you know any other W996[87]CF or OV681 based cameras, please contact me.
-
-The list above does not imply that all those devices work with this driver: up
-until now only webcams that have an image sensor supported by the "ovcamchip"
-module work. Kernel messages will always tell you whether this is case.
-
-Possible external microcontrollers of those webcams are not supported: this
-means that still images cannot be downloaded from the device memory.
-
-Furthermore, it's worth to note that I was only able to run tests on my
-"Creative Labs Video Blaster WebCam Go". Donations of other models, for
-additional testing and full support, would be much appreciated.
-
-
-6. Module dependencies
-======================
-For it to work properly, the driver needs kernel support for Video4Linux, USB
-and I2C, and the "ovcamchip" module for the image sensor. Make sure you are not
-actually using any external "ovcamchip" module, given that the W996[87]CF
-driver depends on the version of the module present in the official kernels.
-
-The following options of the kernel configuration file must be enabled and
-corresponding modules must be compiled:
-
- # Multimedia devices
- #
- CONFIG_VIDEO_DEV=m
-
- # I2C support
- #
- CONFIG_I2C=m
-
-The I2C core module can be compiled statically in the kernel as well.
-
- # OmniVision Camera Chip support
- #
- CONFIG_VIDEO_OVCAMCHIP=m
-
- # USB support
- #
- CONFIG_USB=m
-
-In addition, depending on the hardware being used, only one of the modules
-below is necessary:
-
- # USB Host Controller Drivers
- #
- CONFIG_USB_EHCI_HCD=m
- CONFIG_USB_UHCI_HCD=m
- CONFIG_USB_OHCI_HCD=m
-
-And finally:
-
- # USB Multimedia devices
- #
- CONFIG_USB_W9968CF=m
-
-
-7. Module loading
-=================
-To use the driver, it is necessary to load the "w9968cf" module into memory
-after every other module required.
-
-Loading can be done this way, from root:
-
- [root@localhost home]# modprobe usbcore
- [root@localhost home]# modprobe i2c-core
- [root@localhost home]# modprobe videodev
- [root@localhost home]# modprobe w9968cf
-
-At this point the pertinent devices should be recognized: "dmesg" can be used
-to analyze kernel messages:
-
- [user@localhost home]$ dmesg
-
-There are a lot of parameters the module can use to change the default
-settings for each device. To list every possible parameter with a brief
-explanation about them and which syntax to use, it is recommended to run the
-"modinfo" command:
-
- [root@locahost home]# modinfo w9968cf
-
-
-8. Module parameters
-====================
-Module parameters are listed below:
--------------------------------------------------------------------------------
-Name: ovmod_load
-Type: bool
-Syntax: <0|1>
-Description: Automatic 'ovcamchip' module loading: 0 disabled, 1 enabled.
- If enabled, 'insmod' searches for the required 'ovcamchip'
- module in the system, according to its configuration, and
- loads that module automatically. This action is performed as
- once soon as the 'w9968cf' module is loaded into memory.
-Default: 1
--------------------------------------------------------------------------------
-Name: simcams
-Type: int
-Syntax: <n>
-Description: Number of cameras allowed to stream simultaneously.
- n may vary from 0 to 32.
-Default: 32
--------------------------------------------------------------------------------
-Name: video_nr
-Type: int array (min = 0, max = 32)
-Syntax: <-1|n[,...]>
-Description: Specify V4L minor mode number.
- -1 = use next available
- n = use minor number n
- You can specify up to 32 cameras this way.
- For example:
- video_nr=-1,2,-1 would assign minor number 2 to the second
- recognized camera and use auto for the first one and for every
- other camera.
-Default: -1
--------------------------------------------------------------------------------
-Name: packet_size
-Type: int array (min = 0, max = 32)
-Syntax: <n[,...]>
-Description: Specify the maximum data payload size in bytes for alternate
- settings, for each device. n is scaled between 63 and 1023.
-Default: 1023
--------------------------------------------------------------------------------
-Name: max_buffers
-Type: int array (min = 0, max = 32)
-Syntax: <n[,...]>
-Description: For advanced users.
- Specify the maximum number of video frame buffers to allocate
- for each device, from 2 to 32.
-Default: 2
--------------------------------------------------------------------------------
-Name: double_buffer
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Hardware double buffering: 0 disabled, 1 enabled.
- It should be enabled if you want smooth video output: if you
- obtain out of sync. video, disable it, or try to
- decrease the 'clockdiv' module parameter value.
-Default: 1 for every device.
--------------------------------------------------------------------------------
-Name: clamping
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Video data clamping: 0 disabled, 1 enabled.
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: filter_type
-Type: int array (min = 0, max = 32)
-Syntax: <0|1|2[,...]>
-Description: Video filter type.
- 0 none, 1 (1-2-1) 3-tap filter, 2 (2-3-6-3-2) 5-tap filter.
- The filter is used to reduce noise and aliasing artifacts
- produced by the CCD or CMOS image sensor.
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: largeview
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Large view: 0 disabled, 1 enabled.
-Default: 1 for every device.
--------------------------------------------------------------------------------
-Name: upscaling
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Software scaling (for non-compressed video only):
- 0 disabled, 1 enabled.
- Disable it if you have a slow CPU or you don't have enough
- memory.
-Default: 0 for every device.
-Note: If 'w9968cf-vpp' is not present, this parameter is set to 0.
--------------------------------------------------------------------------------
-Name: decompression
-Type: int array (min = 0, max = 32)
-Syntax: <0|1|2[,...]>
-Description: Software video decompression:
- 0 = disables decompression
- (doesn't allow formats needing decompression).
- 1 = forces decompression
- (allows formats needing decompression only).
- 2 = allows any permitted formats.
- Formats supporting (de)compressed video are YUV422P and
- YUV420P/YUV420 in any resolutions where width and height are
- multiples of 16.
-Default: 2 for every device.
-Note: If 'w9968cf-vpp' is not present, forcing decompression is not
- allowed; in this case this parameter is set to 2.
--------------------------------------------------------------------------------
-Name: force_palette
-Type: int array (min = 0, max = 32)
-Syntax: <0|9|10|13|15|8|7|1|6|3|4|5[,...]>
-Description: Force picture palette.
- In order:
- 0 = Off - allows any of the following formats:
- 9 = UYVY 16 bpp - Original video, compression disabled
- 10 = YUV420 12 bpp - Original video, compression enabled
- 13 = YUV422P 16 bpp - Original video, compression enabled
- 15 = YUV420P 12 bpp - Original video, compression enabled
- 8 = YUVY 16 bpp - Software conversion from UYVY
- 7 = YUV422 16 bpp - Software conversion from UYVY
- 1 = GREY 8 bpp - Software conversion from UYVY
- 6 = RGB555 16 bpp - Software conversion from UYVY
- 3 = RGB565 16 bpp - Software conversion from UYVY
- 4 = RGB24 24 bpp - Software conversion from UYVY
- 5 = RGB32 32 bpp - Software conversion from UYVY
- When not 0, this parameter will override 'decompression'.
-Default: 0 for every device. Initial palette is 9 (UYVY).
-Note: If 'w9968cf-vpp' is not present, this parameter is set to 9.
--------------------------------------------------------------------------------
-Name: force_rgb
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Read RGB video data instead of BGR:
- 1 = use RGB component ordering.
- 0 = use BGR component ordering.
- This parameter has effect when using RGBX palettes only.
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: autobright
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Image sensor automatically changes brightness:
- 0 = no, 1 = yes
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: autoexp
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Image sensor automatically changes exposure:
- 0 = no, 1 = yes
-Default: 1 for every device.
--------------------------------------------------------------------------------
-Name: lightfreq
-Type: int array (min = 0, max = 32)
-Syntax: <50|60[,...]>
-Description: Light frequency in Hz:
- 50 for European and Asian lighting, 60 for American lighting.
-Default: 50 for every device.
--------------------------------------------------------------------------------
-Name: bandingfilter
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Banding filter to reduce effects of fluorescent
- lighting:
- 0 disabled, 1 enabled.
- This filter tries to reduce the pattern of horizontal
- light/dark bands caused by some (usually fluorescent) lighting.
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: clockdiv
-Type: int array (min = 0, max = 32)
-Syntax: <-1|n[,...]>
-Description: Force pixel clock divisor to a specific value (for experts):
- n may vary from 0 to 127.
- -1 for automatic value.
- See also the 'double_buffer' module parameter.
-Default: -1 for every device.
--------------------------------------------------------------------------------
-Name: backlight
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Objects are lit from behind:
- 0 = no, 1 = yes
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: mirror
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Reverse image horizontally:
- 0 = no, 1 = yes
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: monochrome
-Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: The image sensor is monochrome:
- 0 = no, 1 = yes
-Default: 0 for every device.
--------------------------------------------------------------------------------
-Name: brightness
-Type: long array (min = 0, max = 32)
-Syntax: <n[,...]>
-Description: Set picture brightness (0-65535).
- This parameter has no effect if 'autobright' is enabled.
-Default: 31000 for every device.
--------------------------------------------------------------------------------
-Name: hue
-Type: long array (min = 0, max = 32)
-Syntax: <n[,...]>
-Description: Set picture hue (0-65535).
-Default: 32768 for every device.
--------------------------------------------------------------------------------
-Name: colour
-Type: long array (min = 0, max = 32)
-Syntax: <n[,...]>
-Description: Set picture saturation (0-65535).
-Default: 32768 for every device.
--------------------------------------------------------------------------------
-Name: contrast
-Type: long array (min = 0, max = 32)
-Syntax: <n[,...]>
-Description: Set picture contrast (0-65535).
-Default: 50000 for every device.
--------------------------------------------------------------------------------
-Name: whiteness
-Type: long array (min = 0, max = 32)
-Syntax: <n[,...]>
-Description: Set picture whiteness (0-65535).
-Default: 32768 for every device.
--------------------------------------------------------------------------------
-Name: debug
-Type: int
-Syntax: <n>
-Description: Debugging information level, from 0 to 6:
- 0 = none (use carefully)
- 1 = critical errors
- 2 = significant information
- 3 = configuration or general messages
- 4 = warnings
- 5 = called functions
- 6 = function internals
- Level 5 and 6 are useful for testing only, when only one
- device is used.
-Default: 2
--------------------------------------------------------------------------------
-Name: specific_debug
-Type: bool
-Syntax: <0|1>
-Description: Enable or disable specific debugging messages:
- 0 = print messages concerning every level <= 'debug' level.
- 1 = print messages concerning the level indicated by 'debug'.
-Default: 0
--------------------------------------------------------------------------------
-
-
-9. Contact information
-======================
-I may be contacted by e-mail at <luca.risolia@studio.unibo.it>.
-
-I can accept GPG/PGP encrypted e-mail. My GPG key ID is 'FCE635A4'.
-My public 1024-bit key should be available at your keyserver; the fingerprint
-is: '88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4'.
-
-
-10. Credits
-==========
-The development would not have proceed much further without having looked at
-the source code of other drivers and without the help of several persons; in
-particular:
-
-- the I2C interface to kernel and high-level image sensor control routines have
- been taken from the OV511 driver by Mark McClelland;
-
-- memory management code has been copied from the bttv driver by Ralph Metzler,
- Marcus Metzler and Gerd Knorr;
-
-- the low-level I2C read function has been written by Frederic Jouault;
-
-- the low-level I2C fast write function has been written by Piotr Czerczak.
diff --git a/Documentation/video4linux/zc0301.txt b/Documentation/video4linux/zc0301.txt
deleted file mode 100644
index b41c83cf09f4..000000000000
--- a/Documentation/video4linux/zc0301.txt
+++ /dev/null
@@ -1,270 +0,0 @@
-
- ZC0301 and ZC0301P Image Processor and Control Chip
- Driver for Linux
- ===================================================
-
- - Documentation -
-
-
-Index
-=====
-1. Copyright
-2. Disclaimer
-3. License
-4. Overview and features
-5. Module dependencies
-6. Module loading
-7. Module parameters
-8. Supported devices
-9. Notes for V4L2 application developers
-10. Contact information
-11. Credits
-
-
-1. Copyright
-============
-Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it>
-
-
-2. Disclaimer
-=============
-This software is not developed or sponsored by Z-Star Microelectronics Corp.
-Trademarks are property of their respective owner.
-
-
-3. License
-==========
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-
-4. Overview and features
-========================
-This driver supports the video interface of the devices mounting the ZC0301 or
-ZC0301P Image Processors and Control Chips.
-
-The driver relies on the Video4Linux2 and USB core modules. It has been
-designed to run properly on SMP systems as well.
-
-The latest version of the ZC0301[P] driver can be found at the following URL:
-http://www.linux-projects.org/
-
-Some of the features of the driver are:
-
-- full compliance with the Video4Linux2 API (see also "Notes for V4L2
- application developers" paragraph);
-- available mmap or read/poll methods for video streaming through isochronous
- data transfers;
-- automatic detection of image sensor;
-- video format is standard JPEG;
-- dynamic driver control thanks to various module parameters (see "Module
- parameters" paragraph);
-- up to 64 cameras can be handled at the same time; they can be connected and
- disconnected from the host many times without turning off the computer, if
- the system supports hotplugging;
-
-
-5. Module dependencies
-======================
-For it to work properly, the driver needs kernel support for Video4Linux and
-USB.
-
-The following options of the kernel configuration file must be enabled and
-corresponding modules must be compiled:
-
- # Multimedia devices
- #
- CONFIG_VIDEO_DEV=m
-
- # USB support
- #
- CONFIG_USB=m
-
-In addition, depending on the hardware being used, the modules below are
-necessary:
-
- # USB Host Controller Drivers
- #
- CONFIG_USB_EHCI_HCD=m
- CONFIG_USB_UHCI_HCD=m
- CONFIG_USB_OHCI_HCD=m
-
-The ZC0301 controller also provides a built-in microphone interface. It is
-supported by the USB Audio driver thanks to the ALSA API:
-
- # Sound
- #
- CONFIG_SOUND=y
-
- # Advanced Linux Sound Architecture
- #
- CONFIG_SND=m
-
- # USB devices
- #
- CONFIG_SND_USB_AUDIO=m
-
-And finally:
-
- # V4L USB devices
- #
- CONFIG_USB_ZC0301=m
-
-
-6. Module loading
-=================
-To use the driver, it is necessary to load the "zc0301" module into memory
-after every other module required: "videodev", "v4l2_common", "compat_ioctl32",
-"usbcore" and, depending on the USB host controller you have, "ehci-hcd",
-"uhci-hcd" or "ohci-hcd".
-
-Loading can be done as shown below:
-
- [root@localhost home]# modprobe zc0301
-
-At this point the devices should be recognized. You can invoke "dmesg" to
-analyze kernel messages and verify that the loading process has gone well:
-
- [user@localhost home]$ dmesg
-
-
-7. Module parameters
-====================
-Module parameters are listed below:
--------------------------------------------------------------------------------
-Name: video_nr
-Type: short array (min = 0, max = 64)
-Syntax: <-1|n[,...]>
-Description: Specify V4L2 minor mode number:
- -1 = use next available
- n = use minor number n
- You can specify up to 64 cameras this way.
- For example:
- video_nr=-1,2,-1 would assign minor number 2 to the second
- registered camera and use auto for the first one and for every
- other camera.
-Default: -1
--------------------------------------------------------------------------------
-Name: force_munmap
-Type: bool array (min = 0, max = 64)
-Syntax: <0|1[,...]>
-Description: Force the application to unmap previously mapped buffer memory
- before calling any VIDIOC_S_CROP or VIDIOC_S_FMT ioctl's. Not
- all the applications support this feature. This parameter is
- specific for each detected camera.
- 0 = do not force memory unmapping
- 1 = force memory unmapping (save memory)
-Default: 0
--------------------------------------------------------------------------------
-Name: frame_timeout
-Type: uint array (min = 0, max = 64)
-Syntax: <n[,...]>
-Description: Timeout for a video frame in seconds. This parameter is
- specific for each detected camera. This parameter can be
- changed at runtime thanks to the /sys filesystem interface.
-Default: 2
--------------------------------------------------------------------------------
-Name: debug
-Type: ushort
-Syntax: <n>
-Description: Debugging information level, from 0 to 3:
- 0 = none (use carefully)
- 1 = critical errors
- 2 = significant information
- 3 = more verbose messages
- Level 3 is useful for testing only, when only one device
- is used at the same time. It also shows some information
- about the hardware being detected. This module parameter can be
- changed at runtime thanks to the /sys filesystem interface.
-Default: 2
--------------------------------------------------------------------------------
-
-
-8. Supported devices
-====================
-None of the names of the companies as well as their products will be mentioned
-here. They have never collaborated with the author, so no advertising.
-
-From the point of view of a driver, what unambiguously identify a device are
-its vendor and product USB identifiers. Below is a list of known identifiers of
-devices mounting the ZC0301 Image Processor and Control Chips:
-
-Vendor ID Product ID
---------- ----------
-0x041e 0x4017
-0x041e 0x401c
-0x041e 0x401e
-0x041e 0x401f
-0x041e 0x4022
-0x041e 0x4034
-0x041e 0x4035
-0x041e 0x4036
-0x041e 0x403a
-0x0458 0x7007
-0x0458 0x700c
-0x0458 0x700f
-0x046d 0x08ae
-0x055f 0xd003
-0x055f 0xd004
-0x0ac8 0x0301
-0x0ac8 0x301b
-0x0ac8 0x303b
-0x10fd 0x0128
-0x10fd 0x8050
-0x10fd 0x804e
-
-The list above does not imply that all those devices work with this driver: up
-until now only the ones that mount the following image sensors are supported;
-kernel messages will always tell you whether this is the case:
-
-Model Manufacturer
------ ------------
-PAS202BCB PixArt Imaging, Inc.
-PB-0330 Photobit Corporation
-
-
-9. Notes for V4L2 application developers
-========================================
-This driver follows the V4L2 API specifications. In particular, it enforces two
-rules:
-
-- exactly one I/O method, either "mmap" or "read", is associated with each
-file descriptor. Once it is selected, the application must close and reopen the
-device to switch to the other I/O method;
-
-- although it is not mandatory, previously mapped buffer memory should always
-be unmapped before calling any "VIDIOC_S_CROP" or "VIDIOC_S_FMT" ioctl's.
-The same number of buffers as before will be allocated again to match the size
-of the new video frames, so you have to map the buffers again before any I/O
-attempts on them.
-
-
-10. Contact information
-=======================
-The author may be contacted by e-mail at <luca.risolia@studio.unibo.it>.
-
-GPG/PGP encrypted e-mail's are accepted. The GPG key ID of the author is
-'FCE635A4'; the public 1024-bit key should be available at any keyserver;
-the fingerprint is: '88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4'.
-
-
-11. Credits
-===========
-- Information about the chip internals needed to enable the I2C protocol have
- been taken from the documentation of the ZC030x Video4Linux1 driver written
- by Andrew Birkett <andy@nobugs.org>;
-- The initialization values of the ZC0301 controller connected to the PAS202BCB
- and PB-0330 image sensors have been taken from the SPCA5XX driver maintained
- by Michel Xhaard <mxhaard@magic.fr>;
-- Stanislav Lechev donated one camera.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index e0fa0ea2b187..119358dfb742 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -219,19 +219,6 @@ allocation of vcpu ids. For example, if userspace wants
single-threaded guest vcpus, it should make all vcpu ids be a multiple
of the number of vcpus per vcore.
-On powerpc using book3s_hv mode, the vcpus are mapped onto virtual
-threads in one or more virtual CPU cores. (This is because the
-hardware requires all the hardware threads in a CPU core to be in the
-same partition.) The KVM_CAP_PPC_SMT capability indicates the number
-of vcpus per virtual core (vcore). The vcore id is obtained by
-dividing the vcpu id by the number of vcpus per vcore. The vcpus in a
-given vcore will always be in the same physical core as each other
-(though that might be a different physical core from time to time).
-Userspace can control the threading (SMT) mode of the guest by its
-allocation of vcpu ids. For example, if userspace wants
-single-threaded guest vcpus, it should make all vcpu ids be a multiple
-of the number of vcpus per vcore.
-
For virtual cpus that have been created with S390 user controlled virtual
machines, the resulting vcpu fd can be memory mapped at page offset
KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual
@@ -345,7 +332,7 @@ struct kvm_sregs {
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
-/* ppc -- see arch/powerpc/include/asm/kvm.h */
+/* ppc -- see arch/powerpc/include/uapi/asm/kvm.h */
interrupt_bitmap is a bitmap of pending external interrupts. At most
one bit may be set. This interrupt has been acknowledged by the APIC
@@ -892,12 +879,12 @@ It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
be identical. This allows large pages in the guest to be backed by large
pages in the host.
-The flags field supports two flag, KVM_MEM_LOG_DIRTY_PAGES, which instructs
-kvm to keep track of writes to memory within the slot. See KVM_GET_DIRTY_LOG
-ioctl. The KVM_CAP_READONLY_MEM capability indicates the availability of the
-KVM_MEM_READONLY flag. When this flag is set for a memory region, KVM only
-allows read accesses. Writes will be posted to userspace as KVM_EXIT_MMIO
-exits.
+The flags field supports two flags: KVM_MEM_LOG_DIRTY_PAGES and
+KVM_MEM_READONLY. The former can be set to instruct KVM to keep track of
+writes to memory within the slot. See KVM_GET_DIRTY_LOG ioctl to know how to
+use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it,
+to make a new slot read-only. In this case, writes to this memory will be
+posted to userspace as KVM_EXIT_MMIO exits.
When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of
the memory region are automatically reflected into the guest. For example, an
@@ -931,7 +918,7 @@ documentation when it pops into existence).
4.37 KVM_ENABLE_CAP
Capability: KVM_CAP_ENABLE_CAP
-Architectures: ppc
+Architectures: ppc, s390
Type: vcpu ioctl
Parameters: struct kvm_enable_cap (in)
Returns: 0 on success; -1 on error
@@ -1792,6 +1779,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_VPA_SLB | 128
PPC | KVM_REG_PPC_VPA_DTL | 128
PPC | KVM_REG_PPC_EPCR | 32
+ PPC | KVM_REG_PPC_EPR | 32
ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number:
@@ -2108,6 +2096,14 @@ KVM_S390_INT_VIRTIO (vm) - virtio external interrupt; external interrupt
KVM_S390_INT_SERVICE (vm) - sclp external interrupt; sclp parameter in parm
KVM_S390_INT_EMERGENCY (vcpu) - sigp emergency; source cpu in parm
KVM_S390_INT_EXTERNAL_CALL (vcpu) - sigp external call; source cpu in parm
+KVM_S390_INT_IO(ai,cssid,ssid,schid) (vm) - compound value to indicate an
+ I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel);
+ I/O interruption parameters in parm (subchannel) and parm64 (intparm,
+ interruption subclass)
+KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
+ machine check interrupt code in parm64 (note that
+ machine checks needing further payload are not
+ supported by this ioctl)
Note that the vcpu ioctl is asynchronous to vcpu execution.
@@ -2359,8 +2355,8 @@ executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
-NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR
- and KVM_EXIT_PAPR the corresponding
+NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR,
+ KVM_EXIT_PAPR and KVM_EXIT_EPR the corresponding
operations are complete (and guest state is consistent) only after userspace
has re-entered the kernel with KVM_RUN. The kernel side will first finish
incomplete operations and then check for pending signals. Userspace
@@ -2463,6 +2459,41 @@ The possible hypercalls are defined in the Power Architecture Platform
Requirements (PAPR) document available from www.power.org (free
developer registration required to access it).
+ /* KVM_EXIT_S390_TSCH */
+ struct {
+ __u16 subchannel_id;
+ __u16 subchannel_nr;
+ __u32 io_int_parm;
+ __u32 io_int_word;
+ __u32 ipb;
+ __u8 dequeued;
+ } s390_tsch;
+
+s390 specific. This exit occurs when KVM_CAP_S390_CSS_SUPPORT has been enabled
+and TEST SUBCHANNEL was intercepted. If dequeued is set, a pending I/O
+interrupt for the target subchannel has been dequeued and subchannel_id,
+subchannel_nr, io_int_parm and io_int_word contain the parameters for that
+interrupt. ipb is needed for instruction parameter decoding.
+
+ /* KVM_EXIT_EPR */
+ struct {
+ __u32 epr;
+ } epr;
+
+On FSL BookE PowerPC chips, the interrupt controller has a fast patch
+interrupt acknowledge path to the core. When the core successfully
+delivers an interrupt, it automatically populates the EPR register with
+the interrupt vector number and acknowledges the interrupt inside
+the interrupt controller.
+
+In case the interrupt controller lives in user space, we need to do
+the interrupt acknowledge cycle through it to fetch the next to be
+delivered interrupt vector using this exit.
+
+It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an
+external interrupt has just been delivered into the guest. User space
+should put the acknowledged interrupt vector into the 'epr' field.
+
/* Fix the size of the union. */
char padding[256];
};
@@ -2584,3 +2615,34 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
where "num_sets" is the tlb_sizes[] value divided by the tlb_ways[] value.
- The tsize field of mas1 shall be set to 4K on TLB0, even though the
hardware ignores this value for TLB0.
+
+6.4 KVM_CAP_S390_CSS_SUPPORT
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success; -1 on error
+
+This capability enables support for handling of channel I/O instructions.
+
+TEST PENDING INTERRUPTION and the interrupt portion of TEST SUBCHANNEL are
+handled in-kernel, while the other I/O instructions are passed to userspace.
+
+When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
+SUBCHANNEL intercepts.
+
+6.5 KVM_CAP_PPC_EPR
+
+Architectures: ppc
+Parameters: args[0] defines whether the proxy facility is active
+Returns: 0 on success; -1 on error
+
+This capability enables or disables the delivery of interrupts through the
+external proxy facility.
+
+When enabled (args[0] != 0), every time the guest gets an external interrupt
+delivered, it automatically exits into user space with a KVM_EXIT_EPR exit
+to receive the topmost interrupt vector.
+
+When disabled (args[0] == 0), behavior is as if this facility is unsupported.
+
+When this capability is enabled, KVM_EXIT_EPR can occur.
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index fa5f1dbc6b23..43fcb761ed16 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -187,13 +187,6 @@ Shadow pages contain the following information:
perform a reverse map from a pte to a gfn. When role.direct is set, any
element of this array can be calculated from the gfn field when used, in
this case, the array of gfns is not allocated. See role.direct and gfn.
- slot_bitmap:
- A bitmap containing one bit per memory slot. If the page contains a pte
- mapping a page from memory slot n, then bit n of slot_bitmap will be set
- (if a page is aliased among several slots, then it is not guaranteed that
- all slots will be marked).
- Used during dirty logging to avoid scanning a shadow page if none if its
- pages need tracking.
root_count:
A counter keeping track of how many hardware registers (guest cr3 or
pdptrs) are now pointing at the page. While this counter is nonzero, the
diff --git a/MAINTAINERS b/MAINTAINERS
index a92f485f5456..aea0adf414dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -97,12 +97,13 @@ Descriptions of section entries:
X: net/ipv6/
matches all files in and below net excluding net/ipv6/
K: Keyword perl extended regex pattern to match content in a
- patch or file. For instance:
+ patch or file, or an affected filename. For instance:
K: of_get_profile
- matches patches or files that contain "of_get_profile"
+ matches patch or file content, or filenames, that contain
+ "of_get_profile"
K: \b(printk|pr_(info|err))\b
- matches patches or files that contain one or more of the words
- printk, pr_info or pr_err
+ matches patch or file content, or filenames, that contain one or
+ more of the words printk, pr_info or pr_err
One regex pattern per line. Multiple K: lines acceptable.
Note: For the hard of thinking, this list is meant to remain in alphabetical
@@ -465,6 +466,14 @@ S: Maintained
F: drivers/scsi/aic7xxx/
F: drivers/scsi/aic7xxx_old/
+AIMSLAB FM RADIO RECEIVER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/radio-aimslab*
+
AIO
M: Benjamin LaHaise <bcrl@kvack.org>
L: linux-aio@kvack.org
@@ -559,6 +568,18 @@ L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/hw/amso1100/
+ANALOG DEVICES INC AD9389B DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/ad9389b*
+
+ANALOG DEVICES INC ADV7604 DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/adv7604*
+
ANALOG DEVICES INC ASOC CODEC DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
L: device-drivers-devel@blackfin.uclinux.org
@@ -1126,6 +1147,14 @@ F: arch/arm/mach-s5pv210/mach-goni.c
F: arch/arm/mach-exynos/mach-universal_c210.c
F: arch/arm/mach-exynos/mach-nuri.c
+ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
+M: Kyungmin Park <kyungmin.park@samsung.com>
+M: Kamil Debski <k.debski@samsung.com>
+L: linux-arm-kernel@lists.infradead.org
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/platform/s5p-g2d/
+
ARM/SAMSUNG S5P SERIES FIMC SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -1291,6 +1320,14 @@ S: Maintained
F: arch/arm64/
F: Documentation/arm64/
+AS3645A LED FLASH CONTROLLER DRIVER
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/as3645a.c
+F: include/media/as3645a.h
+
ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
L: lm-sensors@lm-sensors.org
@@ -1538,6 +1575,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/dvb-usb-v2/az6007.c
+AZTECH FM RADIO RECEIVER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/radio-aztech*
+
B43 WIRELESS DRIVER
M: Stefano Brivio <stefano.brivio@polimi.it>
L: linux-wireless@vger.kernel.org
@@ -1637,6 +1682,15 @@ W: http://blackfin.uclinux.org/
S: Supported
F: drivers/i2c/busses/i2c-bfin-twi.c
+BLACKFIN MEDIA DRIVER
+M: Scott Jiang <scott.jiang.linux@gmail.com>
+L: uclinux-dist-devel@blackfin.uclinux.org
+W: http://blackfin.uclinux.org/
+S: Supported
+F: drivers/media/platform/blackfin/
+F: drivers/media/i2c/adv7183*
+F: drivers/media/i2c/vs6624*
+
BLINKM RGB LED DRIVER
M: Jan-Simon Moeller <jansimon.moeller@gmx.de>
S: Maintained
@@ -1746,7 +1800,8 @@ F: drivers/bcma/
F: include/linux/bcma/
BROCADE BFA FC SCSI DRIVER
-M: Krishna C Gudipati <kgudipat@brocade.com>
+M: Anil Gurumurthy <agurumur@brocade.com>
+M: Vijaya Mohan Guvva <vmohan@brocade.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/bfa/
@@ -1820,6 +1875,14 @@ S: Supported
F: Documentation/filesystems/caching/cachefiles.txt
F: fs/cachefiles/
+CADET FM/AM RADIO RECEIVER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/radio-cadet*
+
CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
M: Jonathan Corbet <corbet@lwn.net>
L: linux-media@vger.kernel.org
@@ -2012,8 +2075,8 @@ S: Maintained
F: include/linux/clk.h
CISCO FCOE HBA DRIVER
-M: Abhijeet Joglekar <abjoglek@cisco.com>
-M: Venkata Siva Vijayendra Bhamidipati <vbhamidi@cisco.com>
+M: Hiral Patel <hiralpat@cisco.com>
+M: Suma Ramars <sramars@cisco.com>
M: Brian Uchino <buchino@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
@@ -2221,6 +2284,15 @@ F: Documentation/video4linux/cx18.txt
F: drivers/media/pci/cx18/
F: include/uapi/linux/ivtv*
+CX2341X MPEG ENCODER HELPER MODULE
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/i2c/cx2341x*
+F: include/media/cx2341x*
+
CX88 VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@redhat.com>
L: linux-media@vger.kernel.org
@@ -2598,6 +2670,13 @@ S: Maintained
F: drivers/gpu/drm/tegra/
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+DSBR100 USB FM RADIO DRIVER
+M: Alexey Klimov <klimov.linux@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/dsbr100.c
+
DSCC4 DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -2825,6 +2904,13 @@ W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/e7xxx_edac.c
+EDAC-GHES
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-edac@vger.kernel.org
+W: bluesmoke.sourceforge.net
+S: Maintained
+F: drivers/edac/ghes-edac.c
+
EDAC-I82443BXGX
M: Tim Small <tim@buttersideup.com>
L: linux-edac@vger.kernel.org
@@ -3364,6 +3450,14 @@ W: http://www.icp-vortex.com/
S: Supported
F: drivers/scsi/gdt*
+GEMTEK FM RADIO RECEIVER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/radio-gemtek*
+
GENERIC GPIO I2C DRIVER
M: Haavard Skinnemoen <hskinnemoen@gmail.com>
S: Supported
@@ -3767,6 +3861,13 @@ F: drivers/i2c/busses/i2c-sis96x.c
F: drivers/i2c/busses/i2c-via.c
F: drivers/i2c/busses/i2c-viapro.c
+I2C/SMBUS ISMT DRIVER
+M: Seth Heasley <seth.heasley@intel.com>
+M: Neil Horman <nhorman@tuxdriver.com>
+L: linux-i2c@vger.kernel.org
+F: drivers/i2c/busses/i2c-ismt.c
+F: Documentation/i2c/busses/i2c-ismt
+
I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: linux-i2c@vger.kernel.org
@@ -4250,6 +4351,14 @@ F: Documentation/isapnp.txt
F: drivers/pnp/isapnp/
F: include/linux/isapnp.h
+ISA RADIO MODULE
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/radio-isa*
+
iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
M: Peter Jones <pjones@redhat.com>
M: Konrad Rzeszutek Wilk <konrad@kernel.org>
@@ -4408,6 +4517,14 @@ W: http://lse.sourceforge.net/kdump/
S: Maintained
F: Documentation/kdump/
+KEENE FM RADIO TRANSMITTER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/radio-keene*
+
KERNEL AUTOMOUNTER v4 (AUTOFS4)
M: Ian Kent <raven@themaw.net>
L: autofs@vger.kernel.org
@@ -4895,6 +5012,13 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/dvb-frontends/m88rs2000*
+MA901 MASTERKIT USB FM RADIO DRIVER
+M: Alexey Klimov <klimov.linux@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/radio-ma901.c
+
MAC80211
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
@@ -4992,6 +5116,14 @@ S: Maintained
F: Documentation/hwmon/max6650
F: drivers/hwmon/max6650.c
+MAXIRADIO FM RADIO RECEIVER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/radio-maxiradio*
+
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
M: Mauro Carvalho Chehab <mchehab@redhat.com>
P: LinuxTV.org Project
@@ -5014,6 +5146,14 @@ F: include/uapi/linux/meye.h
F: include/uapi/linux/ivtv*
F: include/uapi/linux/uvcvideo.h
+MEDIAVISION PRO MOVIE STUDIO DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Odd Fixes
+F: drivers/media/parport/pms*
+
MEGARAID SCSI DRIVERS
M: Neela Syam Kolli <megaraidlinux@lsi.com>
L: linux-scsi@vger.kernel.org
@@ -5087,6 +5227,14 @@ S: Supported
F: Documentation/mips/
F: arch/mips/
+MIROSOUND PCM20 FM RADIO RECEIVER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Odd Fixes
+F: drivers/media/radio/radio-miropcm20*
+
MODULE SUPPORT
M: Rusty Russell <rusty@rustcorp.com.au>
S: Maintained
@@ -5125,6 +5273,38 @@ L: platform-driver-x86@vger.kernel.org
S: Supported
F: drivers/platform/x86/msi-wmi.c
+MT9M032 SENSOR DRIVER
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/mt9m032.c
+F: include/media/mt9m032.h
+
+MT9P031 SENSOR DRIVER
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/mt9p031.c
+F: include/media/mt9p031.h
+
+MT9T001 SENSOR DRIVER
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/mt9t001.c
+F: include/media/mt9t001.h
+
+MT9V032 SENSOR DRIVER
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/mt9v032.c
+F: include/media/mt9v032.h
+
MULTIFUNCTION DEVICES (MFD)
M: Samuel Ortiz <sameo@linux.intel.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/mfd-2.6.git
@@ -5266,6 +5446,7 @@ F: net/netrom/
NETWORK BLOCK DEVICE (NBD)
M: Paul Clements <Paul.Clements@steeleye.com>
S: Maintained
+L: nbd-general@lists.sourceforge.net
F: Documentation/blockdev/nbd.txt
F: drivers/block/nbd.c
F: include/linux/nbd.h
@@ -5528,7 +5709,7 @@ S: Maintained
F: drivers/mmc/host/omap.c
OMAP HS MMC SUPPORT
-M: Venkatraman S <svenkatr@ti.com>
+M: Balaji T K <balajitk@ti.com>
L: linux-mmc@vger.kernel.org
L: linux-omap@vger.kernel.org
S: Maintained
@@ -6276,6 +6457,14 @@ L: linux-hexagon@vger.kernel.org
S: Supported
F: arch/hexagon/
+QUICKCAM PARALLEL PORT WEBCAMS
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Odd Fixes
+F: drivers/media/parport/*-qcam*
+
RADOS BLOCK DEVICE (RBD)
M: Yehuda Sadeh <yehuda@inktank.com>
M: Sage Weil <sage@inktank.com>
@@ -6333,6 +6522,12 @@ S: Maintained
F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
+RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
+M: Joshua Morris <josh.h.morris@us.ibm.com>
+M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+S: Maintained
+F: drivers/block/rsxx/
+
RANDOM NUMBER DRIVER
M: Theodore Ts'o" <tytso@mit.edu>
S: Maintained
@@ -6550,6 +6745,14 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/mmc/host/s3cmci.*
+SAA6588 RDS RECEIVER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Odd Fixes
+F: drivers/media/i2c/saa6588*
+
SAA7134 VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@redhat.com>
L: linux-media@vger.kernel.org
@@ -6560,10 +6763,9 @@ F: Documentation/video4linux/saa7134/
F: drivers/media/pci/saa7134/
SAA7146 VIDEO4LINUX-2 DRIVER
-M: Michael Hunold <michael@mihu.de>
+M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
-W: http://www.mihu.de/linux/saa7146
S: Maintained
F: drivers/media/common/saa7146/
F: drivers/media/pci/saa7146/
@@ -6605,6 +6807,13 @@ S: Maintained
F: drivers/media/platform/s3c-camif/
F: include/media/s3c_camif.h
+SAMSUNG S5C73M3 CAMERA DRIVER
+M: Kyungmin Park <kyungmin.park@samsung.com>
+M: Andrzej Hajda <a.hajda@samsung.com>
+L: linux-media@vger.kernel.org
+S: Supported
+F: drivers/media/i2c/s5c73m3/*
+
SERIAL DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-serial@vger.kernel.org
@@ -6618,6 +6827,14 @@ F: include/linux/dw_dmac.h
F: drivers/dma/dw_dmac_regs.h
F: drivers/dma/dw_dmac.c
+SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
+M: Seungwon Jeon <tgih.jun@samsung.com>
+M: Jaehoon Chung <jh80.chung@samsung.com>
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: include/linux/mmc/dw_mmc.h
+F: drivers/mmc/host/dw_mmc*
+
TIMEKEEPING, NTP
M: John Stultz <john.stultz@linaro.org>
M: Thomas Gleixner <tglx@linutronix.de>
@@ -6866,6 +7083,38 @@ M: Robin Holt <holt@sgi.com>
S: Maintained
F: drivers/misc/sgi-xp/
+SI470X FM RADIO RECEIVER I2C DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Odd Fixes
+F: drivers/media/radio/si470x/radio-si470x-i2c.c
+
+SI470X FM RADIO RECEIVER USB DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/radio/si470x/radio-si470x-common.c
+F: drivers/media/radio/si470x/radio-si470x.h
+F: drivers/media/radio/si470x/radio-si470x-usb.c
+
+SH_VEU V4L2 MEM2MEM DRIVER
+M: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/platform/sh_veu.c
+F: include/media/sh_veu.h
+
+SH_VOU V4L2 OUTPUT DRIVER
+M: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/platform/sh_vou.c
+F: include/media/sh_vou.h
+
SIMPLE FIRMWARE INTERFACE (SFI)
M: Len Brown <lenb@kernel.org>
L: sfi-devel@simplefirmware.org
@@ -7306,6 +7555,7 @@ STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
M: Julian Andres Klode <jak@jak-linux.org>
M: Marc Dietrich <marvin24@gmx.de>
L: ac100@lists.launchpad.net (moderated for non-subscribers)
+L: linux-tegra@vger.kernel.org
S: Maintained
F: drivers/staging/nvec/
@@ -7432,6 +7682,12 @@ F: lib/swiotlb.c
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
+SYNOPSYS ARC ARCHITECTURE
+M: Vineet Gupta <vgupta@synopsys.com>
+L: linux-snps-arc@vger.kernel.org
+S: Supported
+F: arch/arc/
+
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
S: Maintained
@@ -7533,6 +7789,14 @@ T: git git://linuxtv.org/mkrufky/tuners.git
S: Maintained
F: drivers/media/tuners/tda8290.*
+TDA9840 MEDIA DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/i2c/tda9840*
+
TEA5761 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@redhat.com>
L: linux-media@vger.kernel.org
@@ -7549,6 +7813,22 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/tuners/tea5767.*
+TEA6415C MEDIA DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/i2c/tea6415c*
+
+TEA6420 MEDIA DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/i2c/tea6420*
+
TEAM DRIVER
M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
@@ -7574,9 +7854,7 @@ L: linux-tegra@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
S: Supported
-F: arch/arm/mach-tegra
-F: arch/arm/boot/dts/tegra*
-F: arch/arm/configs/tegra_defconfig
+K: (?i)[^a-z]tegra
TEHUTI ETHERNET DRIVER
M: Andy Gospodarek <andy@greyhouse.net>
@@ -8198,6 +8476,14 @@ S: Maintained
F: drivers/media/usb/uvc/
F: include/uapi/linux/uvcvideo.h
+USB VISION DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Odd Fixes
+F: drivers/media/usb/usbvision/
+
USB WEBCAM GADGET
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-usb@vger.kernel.org
@@ -8347,6 +8633,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/via/via-velocity.*
+VIVI VIRTUAL VIDEO DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/platform/vivi*
+
VLAN (802.1Q)
M: Patrick McHardy <kaber@trash.net>
L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 6fccf6531770..473beb1ba057 100644
--- a/Makefile
+++ b/Makefile
@@ -192,7 +192,6 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
# "make" in the configured kernel build directory always uses that.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
-export KBUILD_BUILDHOST := $(SUBARCH)
ARCH ?= $(SUBARCH)
CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%)
@@ -620,7 +619,8 @@ KBUILD_AFLAGS += -gdwarf-2
endif
ifdef CONFIG_DEBUG_INFO_REDUCED
-KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly)
+KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
+ $(call cc-option,-fno-var-tracking)
endif
ifdef CONFIG_FUNCTION_TRACER
@@ -720,11 +720,11 @@ endif # INSTALL_MOD_STRIP
export mod_strip_cmd
-ifeq ($(CONFIG_MODULE_SIG),y)
+ifdef CONFIG_MODULE_SIG_ALL
MODSECKEY = ./signing_key.priv
MODPUBKEY = ./signing_key.x509
export MODPUBKEY
-mod_sign_cmd = perl $(srctree)/scripts/sign-file $(MODSECKEY) $(MODPUBKEY)
+mod_sign_cmd = perl $(srctree)/scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODSECKEY) $(MODPUBKEY)
else
mod_sign_cmd = true
endif
@@ -1398,7 +1398,7 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))
# Run depmod only if we have System.map and depmod is executable
quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
- $(KERNELRELEASE)
+ $(KERNELRELEASE) "$(patsubst "%",%,$(CONFIG_SYMBOL_PREFIX))"
# Create temporary dir for module support files
# clean it up only when building all modules
diff --git a/arch/Kconfig b/arch/Kconfig
index 40e2b12c7916..dcd91a85536a 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -303,6 +303,13 @@ config ARCH_WANT_OLD_COMPAT_IPC
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool
+config HAVE_VIRT_TO_BUS
+ bool
+ help
+ An architecture should select this if it implements the
+ deprecated interface virt_to_bus(). All new architectures
+ should probably not select this.
+
config HAVE_ARCH_SECCOMP_FILTER
bool
help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 1ecbf7a1b677..5833aa441481 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,6 +9,7 @@ config ALPHA
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
select HAVE_GENERIC_HARDIRQS
+ select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index b9fc6c309d2e..e64559f0a82d 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -111,7 +111,7 @@ static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int res;
- srm_env_t *entry = PDE(file->f_path.dentry->d_inode)->data;
+ srm_env_t *entry = PDE(file_inode(file))->data;
char *buf = (char *) __get_free_page(GFP_USER);
unsigned long ret1, ret2;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 272666d006df..4037461a6493 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -186,7 +186,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
#endif
printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
dik_show_regs(regs, r9_15);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
dik_show_trace((unsigned long *)(regs+1));
dik_show_code((unsigned int *)regs->pc);
diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild
new file mode 100644
index 000000000000..082d329d3245
--- /dev/null
+++ b/arch/arc/Kbuild
@@ -0,0 +1,2 @@
+obj-y += kernel/
+obj-y += mm/
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
new file mode 100644
index 000000000000..e6f4eca09ee3
--- /dev/null
+++ b/arch/arc/Kconfig
@@ -0,0 +1,453 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+config ARC
+ def_bool y
+ select CLONE_BACKWARDS
+ # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
+ select DEVTMPFS if !INITRAMFS_SOURCE=""
+ select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_FIND_FIRST_BIT
+ # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_SHOW
+ select GENERIC_KERNEL_EXECVE
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_SMP_IDLE_THREAD
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_IOREMAP_PROT
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES
+ select HAVE_MEMBLOCK
+ select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+ select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+ select OF
+ select OF_EARLY_FLATTREE
+ select PERF_USE_VMALLOC
+
+config SCHED_OMIT_FRAME_POINTER
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+
+config MMU
+ def_bool y
+
+config NO_IOPORT
+ def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config BINFMT_ELF
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+ select STACKTRACE
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
+config NO_DMA
+ def_bool n
+
+source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
+menu "ARC Architecture Configuration"
+
+menu "ARC Platform/SoC/Board"
+
+source "arch/arc/plat-arcfpga/Kconfig"
+#New platform adds here
+
+endmenu
+
+menu "ARC CPU Configuration"
+
+choice
+ prompt "ARC Core"
+ default ARC_CPU_770
+
+config ARC_CPU_750D
+ bool "ARC750D"
+ help
+ Support for ARC750 core
+
+config ARC_CPU_770
+ bool "ARC770"
+ select ARC_CPU_REL_4_10
+ help
+ Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
+ This core has a bunch of cool new features:
+ -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
+ Shared Address Spaces (for sharing TLB entires in MMU)
+ -Caches: New Prog Model, Region Flush
+ -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
+
+endchoice
+
+config CPU_BIG_ENDIAN
+ bool "Enable Big Endian Mode"
+ default n
+ help
+ Build kernel for Big Endian Mode of ARC CPU
+
+# If a platform can't work with 0x8000_0000 based dma_addr_t
+config ARC_PLAT_NEEDS_CPU_TO_DMA
+ bool
+
+config SMP
+ bool "Symmetric Multi-Processing (Incomplete)"
+ default n
+ select USE_GENERIC_SMP_HELPERS
+ help
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+if SMP
+
+config ARC_HAS_COH_CACHES
+ def_bool n
+
+config ARC_HAS_COH_LLSC
+ def_bool n
+
+config ARC_HAS_COH_RTSC
+ def_bool n
+
+config ARC_HAS_REENTRANT_IRQ_LV2
+ def_bool n
+
+endif
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ depends on SMP
+ default "2"
+
+menuconfig ARC_CACHE
+ bool "Enable Cache Support"
+ default y
+ # if SMP, cache enabled ONLY if ARC implementation has cache coherency
+ depends on !SMP || ARC_HAS_COH_CACHES
+
+if ARC_CACHE
+
+config ARC_CACHE_LINE_SHIFT
+ int "Cache Line Length (as power of 2)"
+ range 5 7
+ default "6"
+ help
+ Starting with ARC700 4.9, Cache line length is configurable,
+ This option specifies "N", with Line-len = 2 power N
+ So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
+ Linux only supports same line lengths for I and D caches.
+
+config ARC_HAS_ICACHE
+ bool "Use Instruction Cache"
+ default y
+
+config ARC_HAS_DCACHE
+ bool "Use Data Cache"
+ default y
+
+config ARC_CACHE_PAGES
+ bool "Per Page Cache Control"
+ default y
+ depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
+ help
+ This can be used to over-ride the global I/D Cache Enable on a
+ per-page basis (but only for pages accessed via MMU such as
+ Kernel Virtual address or User Virtual Address)
+ TLB entries have a per-page Cache Enable Bit.
+ Note that Global I/D ENABLE + Per Page DISABLE works but corollary
+ Global DISABLE + Per Page ENABLE won't work
+
+endif #ARC_CACHE
+
+config ARC_HAS_ICCM
+ bool "Use ICCM"
+ help
+ Single Cycle RAMS to store Fast Path Code
+ default n
+
+config ARC_ICCM_SZ
+ int "ICCM Size in KB"
+ default "64"
+ depends on ARC_HAS_ICCM
+
+config ARC_HAS_DCCM
+ bool "Use DCCM"
+ help
+ Single Cycle RAMS to store Fast Path Data
+ default n
+
+config ARC_DCCM_SZ
+ int "DCCM Size in KB"
+ default "64"
+ depends on ARC_HAS_DCCM
+
+config ARC_DCCM_BASE
+ hex "DCCM map address"
+ default "0xA0000000"
+ depends on ARC_HAS_DCCM
+
+config ARC_HAS_HW_MPY
+ bool "Use Hardware Multiplier (Normal or Faster XMAC)"
+ default y
+ help
+ Influences how gcc generates code for MPY operations.
+ If enabled, MPYxx insns are generated, provided by Standard/XMAC
+ Multipler. Otherwise software multipy lib is used
+
+choice
+ prompt "ARC700 MMU Version"
+ default ARC_MMU_V3 if ARC_CPU_770
+ default ARC_MMU_V2 if ARC_CPU_750D
+
+config ARC_MMU_V1
+ bool "MMU v1"
+ help
+ Orig ARC700 MMU
+
+config ARC_MMU_V2
+ bool "MMU v2"
+ help
+ Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
+ when 2 D-TLB and 1 I-TLB entries index into same 2way set.
+
+config ARC_MMU_V3
+ bool "MMU v3"
+ depends on ARC_CPU_770
+ help
+ Introduced with ARC700 4.10: New Features
+ Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
+ Shared Address Spaces (SASID)
+
+endchoice
+
+
+choice
+ prompt "MMU Page Size"
+ default ARC_PAGE_SIZE_8K
+
+config ARC_PAGE_SIZE_8K
+ bool "8KB"
+ help
+ Choose between 8k vs 16k
+
+config ARC_PAGE_SIZE_16K
+ bool "16KB"
+ depends on ARC_MMU_V3
+
+config ARC_PAGE_SIZE_4K
+ bool "4KB"
+ depends on ARC_MMU_V3
+
+endchoice
+
+config ARC_COMPACT_IRQ_LEVELS
+ bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+ default n
+ # Timer HAS to be high priority, for any other high priority config
+ select ARC_IRQ3_LV2
+ # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
+ depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
+
+if ARC_COMPACT_IRQ_LEVELS
+
+config ARC_IRQ3_LV2
+ bool
+
+config ARC_IRQ5_LV2
+ bool
+
+config ARC_IRQ6_LV2
+ bool
+
+endif
+
+config ARC_FPU_SAVE_RESTORE
+ bool "Enable FPU state persistence across context switch"
+ default n
+ help
+ Double Precision Floating Point unit had dedictaed regs which
+ need to be saved/restored across context-switch.
+ Note that ARC FPU is overly simplistic, unlike say x86, which has
+ hardware pieces to allow software to conditionally save/restore,
+ based on actual usage of FPU by a task. Thus our implemn does
+ this for all tasks in system.
+
+menuconfig ARC_CPU_REL_4_10
+ bool "Enable support for Rel 4.10 features"
+ default n
+ help
+ -ARC770 (and dependent features) enabled
+ -ARC750 also shares some of the new features with 770
+
+config ARC_HAS_LLSC
+ bool "Insn: LLOCK/SCOND (efficient atomic ops)"
+ default y
+ depends on ARC_CPU_770
+ # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
+ depends on !SMP || ARC_HAS_COH_LLSC
+
+config ARC_HAS_SWAPE
+ bool "Insn: SWAPE (endian-swap)"
+ default y
+ depends on ARC_CPU_REL_4_10
+
+config ARC_HAS_RTSC
+ bool "Insn: RTSC (64-bit r/o cycle counter)"
+ default y
+ depends on ARC_CPU_REL_4_10
+ # if SMP, enable RTSC only if counter is coherent across cores
+ depends on !SMP || ARC_HAS_COH_RTSC
+
+endmenu # "ARC CPU Configuration"
+
+config LINUX_LINK_BASE
+ hex "Linux Link Address"
+ default "0x80000000"
+ help
+ ARC700 divides the 32 bit phy address space into two equal halves
+ -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
+ -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
+ Typically Linux kernel is linked at the start of untransalted addr,
+ hence the default value of 0x8zs.
+ However some customers have peripherals mapped at this addr, so
+ Linux needs to be scooted a bit.
+ If you don't know what the above means, leave this setting alone.
+
+config ARC_CURR_IN_REG
+ bool "Dedicate Register r25 for current_task pointer"
+ default y
+ help
+ This reserved Register R25 to point to Current Task in
+ kernel mode. This saves memory access for each such access
+
+
+config ARC_MISALIGN_ACCESS
+ bool "Emulate unaligned memory access (userspace only)"
+ default N
+ select SYSCTL_ARCH_UNALIGN_NO_WARN
+ select SYSCTL_ARCH_UNALIGN_ALLOW
+ help
+ This enables misaligned 16 & 32 bit memory access from user space.
+ Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
+ potential bugs in code
+
+config ARC_STACK_NONEXEC
+ bool "Make stack non-executable"
+ default n
+ help
+ To disable the execute permissions of stack/heap of processes
+ which are enabled by default.
+
+config HZ
+ int "Timer Frequency"
+ default 100
+
+config ARC_METAWARE_HLINK
+ bool "Support for Metaware debugger assisted Host access"
+ default n
+ help
+ This options allows a Linux userland apps to directly access
+ host file system (open/creat/read/write etc) with help from
+ Metaware Debugger. This can come in handy for Linux-host communication
+ when there is no real usable peripheral such as EMAC.
+
+menuconfig ARC_DBG
+ bool "ARC debugging"
+ default y
+
+config ARC_DW2_UNWIND
+ bool "Enable DWARF specific kernel stack unwind"
+ depends on ARC_DBG
+ default y
+ select KALLSYMS
+ help
+ Compiles the kernel with DWARF unwind information and can be used
+ to get stack backtraces.
+
+ If you say Y here the resulting kernel image will be slightly larger
+ but not slower, and it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame unwind information
+
+config ARC_DBG_TLB_PARANOIA
+ bool "Paranoia Checks in Low Level TLB Handlers"
+ depends on ARC_DBG
+ default n
+
+config ARC_DBG_TLB_MISS_COUNT
+ bool "Profile TLB Misses"
+ default n
+ select DEBUG_FS
+ depends on ARC_DBG
+ help
+ Counts number of I and D TLB Misses and exports them via Debugfs
+ The counters can be cleared via Debugfs as well
+
+config CMDLINE
+ string "Kernel command line to built-in"
+ default "print-fatal-signals=1"
+ help
+ The default command line which will be appended to the optional
+ u-boot provided command line (see below)
+
+config CMDLINE_UBOOT
+ bool "Support U-boot kernel command line passing"
+ default n
+ help
+ If you are using U-boot (www.denx.de) and wish to pass the kernel
+ command line from the U-boot environment to the Linux kernel then
+ switch this option on.
+ ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
+ to it. kernel startup code will copy the string into cmdline buffer
+ and also append CONFIG_CMDLINE.
+
+config ARC_BUILTIN_DTB_NAME
+ string "Built in DTB"
+ help
+ Set the name of the DTB to embed in the vmlinux binary
+ Leaving it blank selects the minimal "skeleton" dtb
+
+source "kernel/Kconfig.preempt"
+
+endmenu # "ARC Architecture Configuration"
+
+source "mm/Kconfig"
+source "net/Kconfig"
+source "drivers/Kconfig"
+source "fs/Kconfig"
+source "arch/arc/Kconfig.debug"
+source "security/Kconfig"
+source "crypto/Kconfig"
+source "lib/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
new file mode 100644
index 000000000000..962c6099659e
--- /dev/null
+++ b/arch/arc/Kconfig.debug
@@ -0,0 +1,34 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ default y
+ help
+ Write kernel log output directly into the VGA buffer or to a serial
+ port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ help
+ This option will cause messages to be printed if free stack space
+ drops below a certain limit.
+
+config 16KSTACKS
+ bool "Use 16Kb for kernel stacks instead of 8Kb"
+ help
+ If you say Y here the kernel will use a 16Kb stacksize for the
+ kernel stack attached to each process/thread. The default is 8K.
+ This increases the resident kernel footprint and will cause less
+ threads to run on the system and also increase the pressure
+ on the VM subsystem for higher order allocations.
+
+endmenu
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
new file mode 100644
index 000000000000..92379c7cbc1a
--- /dev/null
+++ b/arch/arc/Makefile
@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+UTS_MACHINE := arc
+
+KBUILD_DEFCONFIG := fpga_defconfig
+
+cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
+
+LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h
+
+ifdef CONFIG_ARC_CURR_IN_REG
+# For a global register defintion, make sure it gets passed to every file
+# We had a customer reported bug where some code built in kernel was NOT using
+# any kernel headers, and missing the r25 global register
+# Can't do unconditionally (like above) because of recursive include issues
+# due to <linux/thread_info.h>
+LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
+endif
+
+atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y)
+cflags-$(atleast_gcc44) += -fsection-anchors
+
+cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
+cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
+
+ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+# Generic build system uses -O2, we want -O3
+cflags-y += -O3
+endif
+
+# small data is default for elf32 tool-chain. If not usable, disable it
+# This also allows repurposing GP as scratch reg to gcc reg allocator
+disable_small_data := y
+cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
+ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+
+# STAR 9000518362:
+# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
+# --build-id w/o "-marclinux".
+# Default arc-elf32-ld is OK
+ldflags-y += -marclinux
+
+ARC_LIBGCC := -mA7
+cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
+
+ifndef CONFIG_ARC_HAS_HW_MPY
+ cflags-y += -mno-mpy
+
+# newlib for ARC700 assumes MPY to be always present, which is generally true
+# However, if someone really doesn't want MPY, we need to use the 600 ver
+# which coupled with -mno-mpy will use mpy emulation
+# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
+# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
+
+ ARC_LIBGCC := -marc600
+ ifneq ($(atleast_gcc44),y)
+ cflags-y += -multcost=30
+ endif
+endif
+
+LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name)
+
+# Modules with short calls might break for calls into builtin-kernel
+KBUILD_CFLAGS_MODULE += -mlong-calls
+
+# Finally dump eveything into kernel build system
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_AFLAGS += $(KBUILD_CFLAGS)
+LDFLAGS += $(ldflags-y)
+
+head-y := arch/arc/kernel/head.o
+
+# See arch/arc/Kbuild for content of core part of the kernel
+core-y += arch/arc/
+
+# w/o this dtb won't embed into kernel binary
+core-y += arch/arc/boot/dts/
+
+core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/
+
+drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
+
+libs-y += arch/arc/lib/ $(LIBGCC)
+
+#default target for make without any arguements.
+KBUILD_IMAGE := bootpImage
+
+all: $(KBUILD_IMAGE)
+boot := arch/arc/boot
+
+bootpImage: vmlinux
+
+uImage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+%.dtb %.dtb.S %.dtb.o: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+dtbs: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+# Hacks to enable final link due to absence of link-time branch relexation
+# and gcc choosing optimal(shorter) branches at -O3
+#
+# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
+# However lib/decompress_inflate.o (.init.text) calls
+# zlib_inflate_workspacesize (.text) causing relocation errors.
+# Thus forcing all exten calls in this file to be long calls
+export CFLAGS_decompress_inflate.o = -mmedium-calls
+export CFLAGS_initramfs.o = -mmedium-calls
+ifdef CONFIG_SMP
+export CFLAGS_core.o = -mmedium-calls
+endif
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
new file mode 100644
index 000000000000..7d514c24e095
--- /dev/null
+++ b/arch/arc/boot/Makefile
@@ -0,0 +1,26 @@
+targets := vmlinux.bin vmlinux.bin.gz uImage
+
+# uImage build relies on mkimage being availble on your host for ARC target
+# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
+# and make sure it's reacable from your PATH
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+LINUX_START_TEXT = $$(readelf -h vmlinux | \
+ grep "Entry point address" | grep -o 0x.*)
+
+UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
+UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage)
+
+PHONY += FORCE
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
new file mode 100644
index 000000000000..5776835d583f
--- /dev/null
+++ b/arch/arc/boot/dts/Makefile
@@ -0,0 +1,13 @@
+# Built-in dtb
+builtindtb-y := angel4
+
+ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
+ builtindtb-y := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
+endif
+
+obj-y += $(builtindtb-y).dtb.o
+targets += $(builtindtb-y).dtb
+
+dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb)
+
+clean-files := *.dtb
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
new file mode 100644
index 000000000000..bae4f936cb03
--- /dev/null
+++ b/arch/arc/boot/dts/angel4.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "snps,arc-angel4";
+ clock-frequency = <80000000>; /* 80 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen {
+ bootargs = "console=ttyARC0,115200n8";
+ };
+
+ aliases {
+ serial0 = &arcuart0;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; /* 256M */
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ arcuart0: serial@c0fc1000 {
+ compatible = "snps,arc-uart";
+ reg = <0xc0fc1000 0x100>;
+ interrupts = <5>;
+ clock-frequency = <80000000>;
+ current-speed = <115200>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/skeleton.dts b/arch/arc/boot/dts/skeleton.dts
new file mode 100644
index 000000000000..25a84fb5b3dc
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dts
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
new file mode 100644
index 000000000000..a870bdd5e404
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Skeleton device tree; the bare minimum needed to boot; just include and
+ * add a compatible value.
+ */
+
+/ {
+ compatible = "snps,arc";
+ clock-frequency = <80000000>; /* 80 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chosen { };
+ aliases { };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,arc770d";
+ reg = <0>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; /* 256M */
+ };
+};
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
new file mode 100644
index 000000000000..b8698067ebbe
--- /dev/null
+++ b/arch/arc/configs/fpga_defconfig
@@ -0,0 +1,61 @@
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_FPGA_LEGACY=y
+CONFIG_ARC_BOARD_ML509=y
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 000000000000..48af742f8b5a
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,49 @@
+generic-y += auxvec.h
+generic-y += bugs.h
+generic-y += bitsperlong.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 000000000000..1b907c465666
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+#ifdef __KERNEL__
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_CRC_BCR 0x62
+#define ARC_REG_DVFB_BCR 0x64
+#define ARC_REG_EXTARITH_BCR 0x65
+#define ARC_REG_VECBASE_BCR 0x68
+#define ARC_REG_PERIBASE_BCR 0x69
+#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
+#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
+#define ARC_REG_MMU_BCR 0x6f
+#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR 0x75
+#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_XY_MEM_BCR 0x79
+#define ARC_REG_MAC_BCR 0x7a
+#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_SWAP_BCR 0x7c
+#define ARC_REG_NORM_BCR 0x7d
+#define ARC_REG_MIXMAX_BCR 0x7e
+#define ARC_REG_BARREL_BCR 0x7f
+#define ARC_REG_D_UNCACH_BCR 0x6A
+
+/* status32 Bits Positions */
+#define STATUS_H_BIT 0 /* CPU Halted */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+#define STATUS_AE_BIT 5 /* Exception active */
+#define STATUS_DE_BIT 6 /* PC is in delay slot */
+#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_L_BIT 12 /* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_H_MASK (1<<STATUS_H_BIT)
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
+#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_L_MASK (1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#define ECR_VEC_MASK 0xff0000
+#define ECR_CODE_MASK 0x00ff00
+#define ECR_PARAM_MASK 0x0000ff
+
+/* Exception Cause Vector Values */
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x20
+#define ECR_V_ITLB_MISS 0x21
+#define ECR_V_DTLB_MISS 0x22
+#define ECR_V_PROTV 0x23
+
+/* Protection Violation Exception Cause Code Values */
+#define ECR_C_PROTV_INST_FETCH 0x00
+#define ECR_C_PROTV_LOAD 0x01
+#define ECR_C_PROTV_STORE 0x02
+#define ECR_C_PROTV_XCHG 0x03
+#define ECR_C_PROTV_MISALIG_DATA 0x04
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS 8
+#define ECR_C_BIT_DTLB_ST_MISS 9
+
+
+/* Auxiliary registers */
+#define AUX_IDENTITY 4
+#define AUX_INTR_VEC_BASE 0x25
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
+#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
+#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
+
+#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
+#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+
+/* MMU Management regs */
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR 0x80000000
+
+/* TLB Commands */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+#else
+#undef TLBWriteNI /* These cmds don't exist on older MMU */
+#undef TLBIVUTLB
+#endif
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIL 0x19
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_IC_PTAG 0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_DC_PTAG 0x5C
+#endif
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH 0x40
+#define DC_CTRL_FLUSH_STATUS 0x100
+
+/* MMU Management regs */
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT 0x300
+#define ARC_AUX_DPFP_1L 0x301
+#define ARC_AUX_DPFP_1H 0x302
+#define ARC_AUX_DPFP_2L 0x303
+#define ARC_AUX_DPFP_2H 0x304
+#define ARC_AUX_DPFP_STAT 0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ * Inline ASM macros to read/write AUX Regs
+ * Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg) __builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val) \
+ __builtin_arc_sr((unsigned int)val, reg_immed)
+
+#else
+
+#define read_aux_reg(reg) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " lr %0, [%1]" \
+ : "=r"(__ret) \
+ : "i"(reg)); \
+ __ret; \
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ * write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val) \
+({ \
+ __asm__ __volatile__( \
+ " sr %0, [%1] \n" \
+ : \
+ : "ir"(val), "i"(reg_imm)); \
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ * * e.g.
+ * reg_num = 0x69
+ * write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ * memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val) \
+({ \
+ unsigned int tmp; \
+ \
+ __asm__ __volatile__( \
+ " ld %0, [%2] \n\t" \
+ " sr %1, [%0] \n\t" \
+ : "=&r"(tmp) \
+ : "r"(val), "memory"(&reg_in_var)); \
+})
+
+#endif
+
+#define READ_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ tmp = read_aux_reg(reg); \
+ if (sizeof(tmp) == sizeof(into)) { \
+ into = *((typeof(into) *)&tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+#define WRITE_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ if (sizeof(tmp) == sizeof(into)) { \
+ tmp = (*(unsigned int *)(into)); \
+ write_aux_reg(reg, tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+/* Helpers */
+#define TO_KB(bytes) ((bytes) >> 10)
+#define TO_MB(bytes) (TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+ struct {
+ unsigned int l, h;
+ } aux_dpfp[2];
+};
+#endif
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+ unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_mmu_1_2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
+#else
+ unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+#define EXTN_SWAP_VALID 0x1
+#define EXTN_NORM_VALID 0x2
+#define EXTN_MINMAX_VALID 0x2
+#define EXTN_BARREL_VALID 0x2
+
+struct bcr_extn {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
+ norm:2, swap:1;
+#else
+ unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
+ crc:1, pad:20;
+#endif
+};
+
+/* DSP Options Ref Manual */
+struct bcr_extn_mac_mul {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, type:8, ver:8;
+#else
+ unsigned int ver:8, type:8, pad:16;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+ unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:8, pad2:8, sz:8, pad:8;
+#else
+ unsigned int pad:8, sz:8, pad2:8, start:8;
+#endif
+};
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int addr:24, ver:8;
+#else
+ unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int res:21, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* Both SP and DP FPU BCRs have same format */
+struct bcr_fp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int fast:1, ver:8;
+#else
+ unsigned int ver:8, fast:1;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+};
+
+struct cpuinfo_arc_cache {
+ unsigned int has_aliasing, sz, line_len, assoc, ver;
+};
+
+struct cpuinfo_arc_ccm {
+ unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+ struct cpuinfo_arc_cache icache, dcache;
+ struct cpuinfo_arc_mmu mmu;
+ struct bcr_identity core;
+ unsigned int timers;
+ unsigned int vec_base;
+ unsigned int uncached_base;
+ struct cpuinfo_arc_ccm iccm, dccm;
+ struct bcr_extn extn;
+ struct bcr_extn_xymem extn_xymem;
+ struct bcr_extn_mac_mul extn_mac_mul;
+ struct bcr_fp fp, dpfp;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+#endif /* __ASEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..dad18768fe43
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 000000000000..83f03ca6caf6
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v) ((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp) /* Early clobber, to prevent reg reuse */
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+/* add and also return the new value */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bic %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(addr), "ir"(mask)
+ : "cc");
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter = i;
+ atomic_ops_unlock(flags);
+}
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter += i;
+ atomic_ops_unlock(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter -= i;
+ atomic_ops_unlock(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp += i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp -= i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ *addr &= ~mask;
+ atomic_ops_unlock(flags);
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+ c = old; \
+ c; \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i) { (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644
index 000000000000..f6cb7c4ffb35
--- /dev/null
+++ b/arch/arc/include/asm/barrier.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define rmb() mb()
+#define wmb() mb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define read_barrier_depends() mb()
+
+/* TODO-vineetg verify the correctness of macros here */
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#define smp_read_barrier_depends() do { } while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 000000000000..647a83a8e756
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+ * The Kconfig glue ensures that in SMP, this is only set if the container
+ * SoC/platform has cross-core coherent LLOCK/SCOND
+ */
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bset %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bclr %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bxor %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+/*
+ * Semantically:
+ * Test the bit
+ * if clear
+ * set it and return 0 (old value)
+ * else
+ * return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bset %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bclr %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bxor %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#include <asm/smp.h>
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ * This avoids extra code to be generated for pointer arithmatic, since
+ * is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ * only consider bottom 5 bits of @nr, so NO need to mask them off.
+ * (GCC Quirk: however for constant @nr we still need to do the masking
+ * at compile time)
+ */
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int
+__test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ unsigned long mask;
+
+ addr += nr >> 5;
+
+ /* ARC700 only considers 5 bits in bit-fiddling insn */
+ mask = 1 << nr;
+
+ return ((mask & *addr) != 0);
+}
+
+#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr), (addr)) : \
+ __test_bit((nr), (addr)))
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+ unsigned int res;
+
+ __asm__ __volatile__(
+ " norm.f %0, %1 \n"
+ " mov.n %0, 0 \n"
+ " add.p %0, %0, 1 \n"
+ : "=r"(res)
+ : "r"(x)
+ : "cc");
+
+ return res;
+}
+
+static inline int constant_fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+ if (__builtin_constant_p(x))
+ return constant_fls(x);
+
+ return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+ if (!x)
+ return 0;
+ else
+ return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+ if (!word)
+ return word;
+
+ return ffs(word) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x) __ffs(~(x))
+
+/* TODO does this affect uni-processor code */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 000000000000..2ad8f9b1c54b
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg);
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg);
+
+#define BUG() do { \
+ dump_stack(); \
+ pr_warn("Kernel BUG in %s: %s: %d!\n", \
+ __FILE__, __func__, __LINE__); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 000000000000..6632273861fd
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT 6
+#else
+#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARC_ICACHE_WAYS 2
+#define ARC_DCACHE_WAYS 4
+
+/* Helpers */
+#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
+#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
+
+#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
+#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
+
+#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
+#error "Need to fix some code as I/D cache lines not same"
+#else
+#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK))
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " ld.di %0, [%1] \n" \
+ : "=r"(__ret) \
+ : "r"(ptr)); \
+ __ret; \
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({ \
+ __asm__ __volatile__( \
+ " st.di %0, [%1] \n" \
+ : \
+ : "r"(data), "r"(ptr)); \
+})
+
+/* used to give SHMLBA a value to avoid Cache Aliasing */
+extern unsigned int ARC_shmlba;
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * ARC700 doesn't cache any access in top 256M.
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void __init read_decode_cache_bcr(void);
+#endif
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 000000000000..97ee96f26505
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ * vineetg: April 2008
+ * -Added a critical CacheLine flush to copy_to_user_page( ) which
+ * was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+ int len);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+/*
+ * VM callbacks when entire/range of user-space V-P mappings are
+ * torn-down/get-invalidated
+ *
+ * Currently we don't support D$ aliasing configs for our VIPT caches
+ * NOPS for VIPT Cache with non-aliasing D$ configurations only
+ */
+#define flush_cache_dup_mm(mm) /* called on fork */
+#define flush_cache_mm(mm) /* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) \
+ flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len); \
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 000000000000..10957298b7a3
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
+ * -Insn Scheduling improvements to csum core routines.
+ * = csum_fold( ) largely derived from ARM version.
+ * = ip_fast_cum( ) to have module scheduling
+ * -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ * worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ * Fold a partial checksum
+ *
+ * The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ * added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+ unsigned r = s << 16 | s >> 16; /* ror */
+ s = ~s;
+ s -= r;
+ return s >> 16;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ const void *ptr = iph;
+ unsigned int tmp, tmp2, sum;
+
+ __asm__(
+ " ld.ab %0, [%3, 4] \n"
+ " ld.ab %2, [%3, 4] \n"
+ " sub %1, %4, 2 \n"
+ " lsr.f lp_count, %1, 1 \n"
+ " bcc 0f \n"
+ " add.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ "0: lp 1f \n"
+ " ld.ab %1, [%3, 4] \n"
+ " adc.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ " adc.f %0, %0, %1 \n"
+ "1: adc.f %0, %0, %2 \n"
+ " add.cs %0,%0,1 \n"
+ : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+ : "r"(ihl)
+ : "cc", "lp_count", "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__ __volatile__(
+ " add.f %0, %0, %1 \n"
+ " adc.f %0, %0, %2 \n"
+ " adc.f %0, %0, %3 \n"
+ " adc.f %0, %0, %4 \n"
+ " adc %0, %0, 0 \n"
+ : "+&r"(sum)
+ : "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ "r"(len),
+#else
+ "r"(len << 8),
+#endif
+ "r"(htons(proto))
+ : "cc");
+
+ return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644
index 000000000000..bf9d29f5bd53
--- /dev/null
+++ b/arch/arc/include/asm/clk.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+ return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..03cd6894855d
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " brne %0, %2, 2f \n"
+ " scond %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "ir"(expected),
+ "r"(new) /* can't be "ir". scond can't take limm for "b" */
+ : "cc");
+
+ return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long flags;
+ int prev;
+ volatile unsigned long *p = ptr;
+
+ atomic_ops_lock(flags);
+ prev = *p;
+ if (prev == expected)
+ *p = new;
+ atomic_ops_unlock(flags);
+ return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ int size)
+{
+ extern unsigned long __xchg_bad_pointer(void);
+
+ switch (size) {
+ case 4:
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r"(val)
+ : "r"(ptr)
+ : "memory");
+
+ return val;
+ }
+ return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+ sizeof(*(ptr))))
+
+/*
+ * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
+ * not require any locking. However there's a quirk.
+ * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
+ * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
+ * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
+ * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ *
+ * This however is only relevant if SMP and/or ARC lacks LLSC
+ * if (UP or LLSC)
+ * xchg doesn't need serialization
+ * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
+ * xchg needs serialization
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with) \
+({ \
+ unsigned long flags; \
+ typeof(*(ptr)) old_val; \
+ \
+ atomic_ops_lock(flags); \
+ old_val = _xchg(ptr, with); \
+ atomic_ops_unlock(flags); \
+ old_val; \
+})
+
+#else
+
+#define xchg(ptr, with) _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ * SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ * is natively "SMP safe", no serialization required).
+ * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ * could clobber them. atomic_xchg() itself would be 1 insn, so it
+ * can't be clobbered by others. Thus no serialization required when
+ * atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 000000000000..87b918585c4a
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ * - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
new file mode 100644
index 000000000000..6097bb439cc5
--- /dev/null
+++ b/arch/arc/include/asm/defines.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_DEFINES_H__
+#define __ARC_ASM_DEFINES_H__
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+#ifdef CONFIG_ARC_HAS_LLSC
+#define __CONFIG_ARC_HAS_LLSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_LLSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_SWAPE
+#define __CONFIG_ARC_HAS_SWAPE_VAL 1
+#else
+#define __CONFIG_ARC_HAS_SWAPE_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_RTSC
+#define __CONFIG_ARC_HAS_RTSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_RTSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_MMU_SASID
+#define __CONFIG_ARC_MMU_SASID_VAL 1
+#else
+#define __CONFIG_ARC_MMU_SASID_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+#define __CONFIG_ARC_HAS_ICACHE 1
+#else
+#define __CONFIG_ARC_HAS_ICACHE 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+#define __CONFIG_ARC_HAS_DCACHE 1
+#else
+#define __CONFIG_ARC_HAS_DCACHE 0
+#endif
+
+#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 000000000000..442ce5d0f709
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ * -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ * -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h> /* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__(
+ "1: sub.f %0, %0, 1 \n"
+ " jpnz 1b \n"
+ : "+r"(loops)
+ :
+ : "cc");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ * -we have precomputed @loops_per_jiffy
+ * -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ * -Mathematically if we multiply and divide a number by same value the
+ * result remains unchanged: In this case, we use 2^32
+ * -> (loops_per_N_usec * 2^32 ) / 2^32
+ * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ * -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ * -Divide by 2^32 is very simply right shift by 32
+ * -We simply need to ensure that the multiply per above eqn happens in
+ * 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long loops;
+
+ /* (long long) cast ensures 64 bit MPY - real or emulated
+ * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+ */
+ loops = ((long long)(usecs * 4295 * HZ) *
+ (long long)(loops_per_jiffy)) >> 32;
+
+ __delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+ : __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 000000000000..f1cce3d059a1
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+ op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+ op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+ op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+ op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+ op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+ op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+ op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+ noflow,
+ direct_jump,
+ direct_call,
+ indirect_jump,
+ indirect_call,
+ invalid_instr
+};
+
+#define IS_BIT(word, n) ((word) & (1<<n))
+#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
+#define MINOR_OPCODE(word) (BITS((word), 16, 21))
+#define FIELD_A(word) (BITS((word), 0, 5))
+#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
+ (BITS((word), 24, 26)))
+#define FIELD_C(word) (BITS((word), 6, 11))
+#define FIELD_u6(word) FIELDC(word)
+#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
+ BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
+ BITS(word, 16, 23)), 9)
+#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
+ (BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
+ BITS((word), 8, 10))
+#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word) (BITS((word), 0, 4))
+#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L 0x00000100
+#define REG_LIMM 62
+
+struct disasm_state {
+ /* generic info */
+ unsigned long words[2];
+ int instr_len;
+ int major_opcode;
+ /* info for branch/jump */
+ int is_branch;
+ int target;
+ int delay_slot;
+ enum flow flow;
+ /* info for load/store */
+ int src1, src2, src3, dest, wb_reg;
+ int zz, aa, x, pref, di;
+ int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+ if (IS_BIT(value, (bits - 1)))
+ value |= (0xffffffff << bits);
+
+ return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+ uint16_t word = *((uint16_t *)addr);
+ int opcode = (word >> 11) & 0x1F;
+ return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+ *cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs);
+
+#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..31f77aec0823
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,221 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
+/*
+ * dma_map_* API take cpu addresses, which is kernel logical address in the
+ * untranslated address space (0x8000_0000) based. The dma address (bus addr)
+ * ideally needs to be 0x0000_0000 based hence these glue routines.
+ * However given that intermediate bus bridges can ignore the high bit, we can
+ * do with these routines being no-ops.
+ * If a platform/device comes up which sriclty requires 0 based bus addr
+ * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
+ */
+#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
+#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
+
+#else
+#include <plat/dma_addr.h>
+#endif
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir) \
+do { \
+ if (__builtin_constant_p(dir)) \
+ __inline_dma_cache_sync(addr, sz, dir); \
+ else \
+ __arc_dma_cache_sync(addr, sz, dir); \
+} \
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ _dma_cache_sync((unsigned long)cpu_addr, size, dir);
+ return plat_kernel_addr_to_dma(dev, cpu_addr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 000000000000..ca7c45181de9
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 000000000000..f4c8d36ebecb
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT 93
+
+/* ARC Relocations (kernel Modules only) */
+#define R_ARC_32 0x4
+#define R_ARC_32_ME 0x1B
+#define R_ARC_S25H_PCREL 0x10
+#define R_ARC_S25W_PCREL 0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH EM_ARCOMPACT
+#define ELF_CLASS ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ * -we don't load something for the wrong architecture.
+ * -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 000000000000..23daa326fc9b
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ * Stack switching code can no longer reliably rely on the fact that
+ * if we are NOT in user mode, stack is switched to kernel mode.
+ * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ * it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we also need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ * -Modified CALLEE_REG save/restore macros to handle the fact that
+ * r25 contains the kernel current task ptr
+ * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ * address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#ifdef __ASSEMBLY__
+#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/asm-offsets.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h> /* For VMALLOC_START */
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+/*--------------------------------------------------------------
+ * Save caller saved registers (scratch registers) ( r0 - r12 )
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLER_SAVED
+ st.a r0, [sp, -4]
+ st.a r1, [sp, -4]
+ st.a r2, [sp, -4]
+ st.a r3, [sp, -4]
+ st.a r4, [sp, -4]
+ st.a r5, [sp, -4]
+ st.a r6, [sp, -4]
+ st.a r7, [sp, -4]
+ st.a r8, [sp, -4]
+ st.a r9, [sp, -4]
+ st.a r10, [sp, -4]
+ st.a r11, [sp, -4]
+ st.a r12, [sp, -4]
+.endm
+
+/*--------------------------------------------------------------
+ * Restore caller saved registers (scratch registers)
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLER_SAVED
+ ld.ab r12, [sp, 4]
+ ld.ab r11, [sp, 4]
+ ld.ab r10, [sp, 4]
+ ld.ab r9, [sp, 4]
+ ld.ab r8, [sp, 4]
+ ld.ab r7, [sp, 4]
+ ld.ab r6, [sp, 4]
+ ld.ab r5, [sp, 4]
+ ld.ab r4, [sp, 4]
+ ld.ab r3, [sp, 4]
+ ld.ab r2, [sp, 4]
+ ld.ab r1, [sp, 4]
+ ld.ab r0, [sp, 4]
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * on kernel stack.
+ * User mode callee regs need to be saved in case of
+ * -fork and friends for replicating from parent to child
+ * -before going into do_signal( ) for ptrace/core-dump
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Low level exception/ISR save user mode r25
+ * into task->thread.user_r25. So it needs to be retrieved from there and
+ * saved into kernel stack with rest of callee reg-file
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Retrieve orig r25 and save it on stack
+ ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
+ st.a r12, [sp, -4]
+#else
+ st.a r25, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * kernel mode callee regs needed to be saved in case of context switch
+ * If r25 is used for caching task pointer then that need not be saved
+ * as it can be re-created from current task global
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ sub sp, sp, 8
+#else
+ st.a r25, [sp, -4]
+ sub sp, sp, 4
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_KERNEL:
+ * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
+ * This is reverse of SAVE_CALLEE_SAVED,
+ *
+ * NOTE:
+ * Ideally this shd only be called in switch_to for loading
+ * switched-IN task's CALLEE Reg File.
+ * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
+ * which simply pops the stack w/o touching regs.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
+#else
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_USER:
+ * This is called after do_signal where tracer might have changed callee regs
+ * thus we need to restore the reg file.
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Ptrace would have modified on-kernel-stack value of
+ * r25, which needs to be shoved back into task->thread.user_r25 where from
+ * Low level exception/ISR return code will retrieve to populate with rest of
+ * callee reg-file.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld.ab r12, [sp, 4]
+ st r12, [r25, TASK_THREAD + THREAD_USER_R25]
+#else
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+ add sp, sp, 14 * 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore User mode r25 saved in task_struct->thread.user_r25
+ *-------------------------------------------------------------*/
+.macro RESTORE_USER_R25
+ ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+ /* Get task->thread_info (this is essentially start of a PAGE) */
+ ld \out, [\tsk, TASK_THREAD_INFO]
+
+ /* Go to end of page where stack begins (grows upwards) */
+ add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */
+
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry : r9 contains pre-IRQ/exception/trap status32
+ * Exit : SP is set to kernel mode stack pointer
+ * If CURR_IN_REG, r25 set to "current" task pointer
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+ /* User Mode when this happened ? Yes: Proceed to switch stack */
+ bbit1 r9, STATUS_U_BIT, 88f
+
+ /* OK we were already in kernel mode when this event happened, thus can
+ * assume SP is kernel mode SP. _NO_ need to do any stack switching
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ /* However....
+ * If Level 2 Interrupts enabled, we may end up with a corner case:
+ * 1. User Task executing
+ * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+ * 3. But before it could switch SP from USER to KERNEL stack
+ * a L2 IRQ "Interrupts" L1
+ * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * not switched.
+ * To handle this, we may need to switch stack even if in kernel mode
+ * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+ */
+ brlo sp, VMALLOC_START, 88f
+
+ /* TODO: vineetg:
+ * We need to be a bit more cautious here. What if a kernel bug in
+ * L1 ISR, caused SP to go whaco (some small value which looks like
+ * USER stk) and then we take L2 ISR.
+ * Above brlo alone would treat it as a valid L1-L2 sceanrio
+ * instead of shouting alound
+ * The only feasible way is to make sure this L2 happened in
+ * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+ * L1 ISR before it switches stack
+ */
+
+#endif
+
+ /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
+ * safe-keeping not really needed, but it keeps the epilogue code
+ * (SP restore) simpler/uniform.
+ */
+ b.d 77f
+
+ st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+ GET_CURR_TASK_ON_CPU r9
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+ /* If current task pointer cached in r25, time to
+ * -safekeep USER r25 in task->thread_struct->user_r25
+ * -load r25 with current task ptr
+ */
+ st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
+ mov r25, r9
+#endif
+
+ /* With current tsk in r9, get it's kernel mode stack base */
+ GET_TSK_STACK_BASE r9, r9
+
+#ifdef PT_REGS_CANARY
+ st 0xabcdabcd, [r9, 0]
+#endif
+
+ /* Save Pre Intr/Exception User SP on kernel stack */
+ st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
+
+ /* CAUTION:
+ * SP should be set at the very end when we are done with everything
+ * In case of 2 levels of interrupt we depend on value of SP to assume
+ * that everything else is done (loading r25 etc)
+ */
+
+ /* set SP to point to kernel mode stack */
+ mov sp, r9
+
+77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN reg
+
+ ld \reg, [sp, PT_status32]
+ bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
+ bset \reg, \reg, STATUS_L_BIT
+ sr \reg, [erstatus]
+ mov \reg, 55f
+ sr \reg, [eret]
+
+ rtie
+55:
+.endm
+
+/*
+ * @reg [OUT] &thread_info of "current"
+ */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ and \reg, sp, ~(THREAD_SIZE - 1)
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS reg
+ GET_CURR_THR_INFO_FROM_SP \reg
+ ld \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of ptregs.
+ *-------------------------------------------------------------*/
+.macro EXCPN_PROLOG_FREEUP_REG reg
+#ifdef CONFIG_SMP
+ sr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ st \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+.macro EXCPN_PROLOG_RESTORE_REG reg
+#ifdef CONFIG_SMP
+ lr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
+ * Requires SP to be already switched to kernel mode Stack
+ * sp points to the next free element on the stack at exit of this macro.
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ * Note that syscalls are implemented via TRAP which is also a exception
+ * from CPU's point of view
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_EXCEPTION marker
+
+ st \marker, [sp, 8]
+ st r0, [sp, 4] /* orig_r0, needed only for sys calls */
+
+ /* Restore r9 used to code the early prologue */
+ EXCPN_PROLOG_RESTORE_REG r9
+
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ lr r9, [eret]
+ st.a r9, [sp, -4]
+ lr r9, [erstatus]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [erbta]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbeef
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for exceptions
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_SYS
+ SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for sys calls
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_TRAP
+ /*
+ * Setup pt_regs->orig_r8.
+ * Encode syscall number (r8) in upper short word of event type (r9)
+ * N.B. #1: This is already endian safe (see ptrace.h)
+ * #2: Only r9 can be used as scratch as it is already clobbered
+ * and it's contents are no longer needed by the latter part
+ * of exception prologue
+ */
+ lsl r9, r8, 16
+ or r9, r9, orig_r8_IS_SCALL
+
+ SAVE_ALL_EXCEPTION r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro RESTORE_ALL_SYS
+
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [erbta]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [erstatus]
+ ld.ab r9, [sp, 4]
+ sr r9, [eret]
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save all registers used by interrupt handlers.
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_INT1
+
+ /* restore original r9 , saved in int1_saved_reg
+ * It will be saved on stack in macro: SAVE_CALLER_SAVED
+ */
+#ifdef CONFIG_SMP
+ lr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld r9, [@int1_saved_reg]
+#endif
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink1, [sp, -4]
+ lr r9, [status32_l1]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l1]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee1
+ st r9, [sp, -4]
+#endif
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+.macro SAVE_ALL_INT2
+
+ /* TODO-vineetg: SMP we can't use global nor can we use
+ * SCRATCH0 as we do for int1 because while int1 is using
+ * it, int2 can come
+ */
+ /* retsore original r9 , saved in sys_saved_r9 */
+ ld r9, [@int2_saved_reg]
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink2, [sp, -4]
+ lr r9, [status32_l2]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l2]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee2
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+
+.macro RESTORE_ALL_INT1
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4] /* Actual reg file */
+ sr r9, [bta_l1]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l1]
+ ld.ab r9, [sp, 4]
+ mov ilink1, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+.macro RESTORE_ALL_INT2
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [bta_l2]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l2]
+ ld.ab r9, [sp, 4]
+ mov ilink2, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+
+.endm
+
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ lsr \reg, \reg, 8
+ bmsk \reg, \reg, 7
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro GET_CURR_TASK_ON_CPU reg
+ GET_CPU_ID \reg
+ ld.as \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ GET_CPU_ID \tmp
+ add2 \tmp, @_current_task, \tmp
+ st \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+
+.endm
+
+
+#else /* Uniprocessor implementation of macros */
+
+.macro GET_CURR_TASK_ON_CPU reg
+ ld \reg, [@_current_task]
+.endm
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ st \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ * -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ GET_CURR_TASK_ON_CPU \reg
+ add \reg, \reg, \off
+.endm
+
+#endif /* CONFIG_ARC_CURR_IN_REG */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 000000000000..28abc6905e07
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 000000000000..4dc64ddebece
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ __asm__ __volatile__( \
+ "1: ld %1, [%2] \n" \
+ insn "\n" \
+ "2: st %0, [%2] \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+/* Compare-xchg with preemption disabled.
+ * Notes:
+ * -Best-Effort: Exchg happens only if compare succeeds.
+ * If compare fails, returns; leaving retry/looping to upper layers
+ * -successful cmp-xchg: return orig value in @addr (same as cmp val)
+ * -Compare fails: return orig value in @addr
+ * -user access r/w fails: return -EFAULT
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ u32 newval)
+{
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ /* TBD : can use llock/scond */
+ __asm__ __volatile__(
+ "1: ld %0, [%3] \n"
+ " brne %0, %1, 3f \n"
+ "2: st %2, [%3] \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: mov %0, %4 \n"
+ " b 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 2b, 4b \n"
+ " .previous\n"
+ : "=&r"(val)
+ : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ *uval = val;
+ return val;
+}
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 000000000000..473424d7528b
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+#define PCI_IOBASE ((void __iomem *)0)
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz) ioremap(phy, sz)
+#define ioremap_wc(phy, sz) ioremap(phy, sz)
+
+/* Change struct page to physical address */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 b;
+
+ __asm__ __volatile__(
+ " ldb%U1 %0, %1 \n"
+ : "=r" (b)
+ : "m" (*(volatile u8 __force *)addr)
+ : "memory");
+
+ return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 s;
+
+ __asm__ __volatile__(
+ " ldw%U1 %0, %1 \n"
+ : "=r" (s)
+ : "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+ return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 w;
+
+ __asm__ __volatile__(
+ " ld%U1 %0, %1 \n"
+ : "=r" (w)
+ : "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+ return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stb%U1 %0, %1 \n"
+ :
+ : "r" (b), "m" (*(volatile u8 __force *)addr)
+ : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stw%U1 %0, %1 \n"
+ :
+ : "r" (s), "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " st%U1 %0, %1 \n"
+ :
+ : "r" (w), "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+}
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 000000000000..4c588f9820cf
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_IRQS 32
+
+/* Platform Independent IRQs */
+#define TIMER0_IRQ 3
+#define TIMER1_IRQ 4
+
+#include <asm-generic/irq.h>
+
+extern void __init arc_init_IRQ(void);
+extern int __init get_hw_config_num_irq(void);
+
+void __cpuinit arc_local_timer_setup(unsigned int cpu);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 000000000000..ccd84806b62f
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ * -Remove explicit mov of current status32 into reg, that is not needed
+ * -Use BIC insn instead of INVERTED + AND
+ * -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#ifdef __KERNEL__
+
+#include <asm/arcregs.h>
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long temp, flags;
+
+ __asm__ __volatile__(
+ " lr %1, [status32] \n"
+ " bic %0, %1, %2 \n"
+ " and.f 0, %1, %2 \n"
+ " flag.nz %0 \n"
+ : "=r"(temp), "=r"(flags)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "cc");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+ __asm__ __volatile__(
+ " flag %0 \n"
+ :
+ : "r"(flags));
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " and %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp));
+
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ | STATUS_E2_MASK
+#endif
+ ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_mask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static inline void arch_unmask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb |= (1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+#else
+
+.macro IRQ_DISABLE scratch
+ lr \scratch, [status32]
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_DISABLE_SAVE scratch, save
+ lr \scratch, [status32]
+ mov \save, \scratch /* Make a copy */
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_ENABLE scratch
+ lr \scratch, [status32]
+ or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* KERNEL */
+
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 000000000000..3fbe6c472c0a
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_IERR,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 000000000000..f3c4934f0ca9
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,61 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/user.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS 39
+
+#define BREAK_INSTR_SIZE 2
+#define CACHE_FLUSH_IS_SAFE 1
+#define NUMREGBYTES (GDB_MAX_REGS * 4)
+#define BUFMAX 2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ("trap_s 0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs, int param);
+
+enum arc700_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+ _BTA = 27,
+ _LP_START = 28,
+ _LP_END = 29,
+ _LP_COUNT = 30,
+ _STATUS32 = 31,
+ _BLINK = 32,
+ _FP = 33,
+ __SP = 34,
+ _EFA = 35,
+ _RET = 36,
+ _ORIG_R8 = 37,
+ _STOP_PC = 38
+};
+
+#else
+static inline void kgdb_trap(struct pt_regs *regs, int param)
+{
+}
+#endif
+
+#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 000000000000..4d9c211fce70
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE 8
+#define MAX_STACK_SIZE 64
+
+struct arch_specific_insn {
+ int is_short;
+ kprobe_opcode_t *t1_addr, *t2_addr;
+ kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p) do { } while (0)
+
+#define kretprobe_blacklist_size 0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+ struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 000000000000..0283e9e44e0d
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+/* Can't use the ENTRY macro in linux/linkage.h
+ * gas considers ';' as comment vs. newline
+ */
+.macro ARC_ENTRY name
+ .global \name
+ .align 4
+ \name:
+.endm
+
+.macro ARC_EXIT name
+#define ASM_PREV_SYM_ADDR(name) .-##name
+ .size \ name, ASM_PREV_SYM_ADDR(\name)
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+ .section .data.arcfp
+#else
+ .section .data
+#endif
+ .global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+ .section .text.arcfp, "ax",@progbits
+#else
+ .section .text, "ax",@progbits
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 000000000000..9998dc846ebb
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ * a multi-platform kernel builds with array of such descriptors.
+ * We extend the early DT scan to also match the DT's "compatible" string
+ * against the @dt_compat of all such descriptors, and one with highest
+ * "DT score" is selected as global @machine_desc.
+ *
+ * @name: Board/SoC name
+ * @dt_compat: Array of device tree 'compatible' strings
+ * (XXX: although only 1st entry is looked at)
+ * @init_early: Very early callback [called from setup_arch()]
+ * @init_irq: setup external IRQ controllers [called from init_IRQ()]
+ * @init_smp: for each CPU (e.g. setup IPI)
+ * [(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_time: platform specific clocksource/clockevent registration
+ * [called from time_init()]
+ * @init_machine: arch initcall level callback (e.g. populate static
+ * platform devices or parse Devicetree)
+ * @init_late: Late initcall level callback
+ *
+ */
+struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+
+ void (*init_early)(void);
+ void (*init_irq)(void);
+#ifdef CONFIG_SMP
+ void (*init_smp)(unsigned int);
+#endif
+ void (*init_time)(void);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+ /* the default machine is the last one linked in */
+ if (__arch_info_end - 1 < __arch_info_begin)
+ return NULL;
+ return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name) \
+static const struct machine_desc __mach_desc_##_type \
+__used \
+__attribute__((__section__(".arch.info.init"))) = { \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void __init copy_devtree(void);
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 000000000000..56b02320f1a9
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long asid; /* Pvt Addr-Space ID for mm */
+#ifdef CONFIG_ARC_TLB_DBG
+ struct task_struct *tsk;
+#endif
+} mm_context_t;
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 000000000000..0d71fb11b57c
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Refactored get_new_mmu_context( ) to only handle live-mm.
+ * retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ * -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/* ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cache.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * For book-keeping, Linux uses a couple of data-structures:
+ * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
+ * time of say switch_mm( )
+ * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
+ * given an ASID, finding the mm struct associated.
+ *
+ * The round-robin allocation algorithm allows for ASID stealing.
+ * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
+ * already assigned to another (switched-out) task. Obviously the prev owner
+ * is marked with an invalid ASID to make it request for a new ASID when it
+ * gets scheduled next time. However its TLB entries (with ASID "x") could
+ * exist, which must be cleared before the same ASID is used by the new owner.
+ * Flushing them would be plausible but costly solution. Instead we force a
+ * allocation policy quirk, which ensures that a stolen ASID won't have any
+ * TLB entries associates, alleviating the need to flush.
+ * The quirk essentially is not allowing ASID allocated in prev cycle
+ * to be used past a roll-over in the next cycle.
+ * When this happens (i.e. task ASID > asid tracker), task needs to refresh
+ * its ASID, aligning it to current value of tracker. If the task doesn't get
+ * scheduled past a roll-over, hence its ASID is not yet realigned with
+ * tracker, such ASID is anyways safely reusable because it is
+ * gauranteed that TLB entries with that ASID wont exist.
+ */
+
+#define FIRST_ASID 0
+#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
+#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
+#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
+
+/* ASID to mm struct mapping */
+extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+extern int asid_cache;
+
+/*
+ * Assign a new ASID to task. If the task already has an ASID, it is
+ * relinquished.
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+ struct mm_struct *prev_owner;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Relinquish the currently owned ASID (if any).
+ * Doing unconditionally saves a cmp-n-branch; for already unused
+ * ASID slot, the value was/remains NULL
+ */
+ asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+
+ /* move to new ASID */
+ if (++asid_cache > MAX_ASID) { /* ASID roll-over */
+ asid_cache = FIRST_ASID;
+ flush_tlb_all();
+ }
+
+ /*
+ * Is next ASID already owned by some-one else (we are stealing it).
+ * If so, let the orig owner be aware of this, so when it runs, it
+ * asks for a brand new ASID. This would only happen for a long-lived
+ * task with ASID from prev allocation cycle (before ASID roll-over).
+ *
+ * This might look wrong - if we are re-using some other task's ASID,
+ * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
+ * care of such a case: it ensures that task with ASID from prev alloc
+ * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
+ * The stealing scenario described here will only happen if that task
+ * didn't get a chance to refresh it's ASID - implying stale entries
+ * won't exist.
+ */
+ prev_owner = asid_mm_map[asid_cache];
+ if (prev_owner)
+ prev_owner->context.asid = NO_ASID;
+
+ /* Assign new ASID to tsk */
+ asid_mm_map[asid_cache] = mm;
+ mm->context.asid = asid_cache;
+
+#ifdef CONFIG_ARC_TLB_DBG
+ pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
+ " pid:%u, assigned asid:%lu\n",
+ (unsigned int)mm, (unsigned int)prev_owner,
+ (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
+ (mm->context.tsk)->pid, mm->context.asid);
+#endif
+
+ write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context.asid = NO_ASID;
+#ifdef CONFIG_ARC_TLB_DBG
+ mm->context.tsk = tsk;
+#endif
+ return 0;
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+ If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+#ifndef CONFIG_SMP
+ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /*
+ * Get a new ASID if task doesn't have a valid one. Possible when
+ * -task never had an ASID (fresh after fork)
+ * -it's ASID was stolen - past an ASID roll-over.
+ * -There's a third obscure scenario (if this task is running for the
+ * first time afer an ASID rollover), where despite having a valid
+ * ASID, we force a get for new ASID - see comments at top.
+ *
+ * Both the non-alloc scenario and first-use-after-rollover can be
+ * detected using the single condition below: NO_ASID = 256
+ * while asid_cache is always a valid ASID value (0-255).
+ */
+ if (next->context.asid > asid_cache) {
+ get_new_mmu_context(next);
+ } else {
+ /*
+ * XXX: This will never happen given the chks above
+ * BUG_ON(next->context.asid > MAX_ASID);
+ */
+ write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
+ }
+
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ asid_mm_map[mm->context.asid] = NULL;
+ mm->context.asid = NO_ASID;
+
+ local_irq_restore(flags);
+}
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifndef CONFIG_SMP
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /* Unconditionally get a new ASID */
+ get_new_mmu_context(next);
+
+}
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 000000000000..518222bb3f8e
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+ void *unw_info;
+ int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 000000000000..a2f88ff9f506
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 000000000000..bdf546104551
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+/* TBD: for now don't worry about VIPT D$ aliasing */
+#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(addr, vaddr, pg) clear_page(addr)
+#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+#define __pte(x) (x)
+#define __pgprot(x) (x)
+#define pte_pgprot(x) (x)
+
+#endif
+
+#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr) ((unsigned long)vaddr)
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr) \
+ (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for page, used in mmap.c */
+#ifdef CONFIG_ARC_STACK_NONEXEC
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#else
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif
+
+#define WANT_PAGE_VIRTUAL 1
+
+#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 000000000000..115ad96480e6
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 000000000000..36a9f20c21a3
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ * -"/proc/meminfo | grep PageTables" kept on increasing
+ * Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ * -Variable pg-sz means that Page Tables could be variable sized themselves
+ * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ * -Page Table size capped to max 1 to save memory - hence verified.
+ * -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ * -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ * -Switched pgtable_t from being struct page * to unsigned long
+ * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ * to deal with struct page. Thay way in future we can make it allocate
+ * multiple PG Tbls in one Page Frame
+ * =sweet side effect is avoiding calls to ugly page_address( ) from the
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+ pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+ return get_order(PTRS_PER_PGD * 4);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ int num, num2;
+ pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+ if (ret) {
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+ memzero(ret, num * sizeof(pgd_t));
+
+ num2 = VMALLOC_SIZE / PGDIR_SIZE;
+ memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+ memzero(ret + num + num2,
+ (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+ return get_order(PTRS_PER_PTE * 4);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ __get_order_pte());
+
+ return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pgtable_t pte_pg;
+
+ pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ if (pte_pg) {
+ memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+ pgtable_page_ctor(virt_to_page(pte_pg));
+ }
+
+ return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+ pgtable_page_dtor(virt_to_page(ptep));
+ free_pages(ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache() do { } while (0)
+#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 000000000000..b7e36684c091
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ * They are semantically the same although in different contexts
+ * VALID marks a TLB entry exists and it will only happen if PRESENT
+ * - Utilise some unused free bits to confine PTE flags to 12 bits
+ * This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ * -TLB Locking never really existed, except for initial specs
+ * -SILENT_xxx not needed for our port
+ * -Per my request, MMU V3 changes the layout of some of the bits
+ * to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ * -Switched form 8:11:13 split for page table lookup to 11:8:13
+ * -this speeds up page table allocation itself as we now have to memset 1K
+ * instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ * need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * seperate PD0 and PD1, which combined forms a translation entry)
+ * while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ * (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
+#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
+#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
+#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
+#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
+
+#else
+
+/* PD1 */
+#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
+#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
+
+/* PD0 */
+#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
+#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
+ usable for shared TLB entries (H) */
+
+#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
+
+#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
+#endif
+
+/* Kernel allowed all permissions for all pages */
+#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
+#else
+#define _PAGE_DEF_CACHEABLE (0)
+#endif
+
+/* Helper for every "user" page
+ * -kernel can R/W/X
+ * -by default cached, unless config otherwise
+ * -present in memory
+ */
+#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE __pgprot(___DEF)
+#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_EXECUTE)
+
+#define PAGE_SHARED PAGE_U_W_R
+
+/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
+ * kernel vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
+ _PAGE_GLOBAL)
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ * which directly corresponds to PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ * 1. Although ARC700 can do exclusive execute/write protection (meaning R
+ * can be tracked independet of X/W unlike some other CPUs), still to
+ * keep things consistent with other archs:
+ * -Write implies Read: W => R
+ * -Execute implies Read: X => R
+ *
+ * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ * This is to enable COW mechanism
+ */
+ /* xwr */
+#define __P000 PAGE_U_NONE
+#define __P001 PAGE_U_R
+#define __P010 PAGE_U_R /* Pvt-W => !W */
+#define __P011 PAGE_U_R /* Pvt-W => !W */
+#define __P100 PAGE_U_X_R /* X => R */
+#define __P101 PAGE_U_X_R
+#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
+#define __P111 PAGE_U_X_R /* Pvt-W => !W */
+
+#define __S000 PAGE_U_NONE
+#define __S001 PAGE_U_R
+#define __S010 PAGE_U_W_R /* W => R */
+#define __S011 PAGE_U_W_R
+#define __S100 PAGE_U_X_R /* X => R */
+#define __S101 PAGE_U_X_R
+#define __S110 PAGE_U_X_W_R /* X => R */
+#define __S111 PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ * 32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
+ * -------------------------------------------------------
+ * | | |
+ * | | --> off in page frame
+ * | |
+ * | ---> index into Page Table
+ * |
+ * ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE 9
+#endif
+
+#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#ifdef __ASSEMBLY__
+#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
+#else
+#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
+#endif
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS 0
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+ (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+
+#define mk_pte(page, pgprot) \
+({ \
+ pte_t pte; \
+ pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
+ pte; \
+})
+
+/* TBD: Non linear mapping stuff */
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_FILE;
+}
+
+#define PTE_FILE_MAX_BITS 30
+#define pgoff_to_pte(x) __pte(x)
+#define pte_to_pgoff(x) (pte_val(x) >> 2)
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+ __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
+#define pte_offset_map(dir, addr) pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
+#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte) (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr) \
+({ \
+ pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
+ pgd_base + pgd_index(addr); \
+})
+#else
+#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
+#endif
+
+extern void paging_init(void);
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off) ((swp_entry_t) { \
+ ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define kern_addr_valid(addr) (1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 000000000000..5f26b2c1cba0
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ * -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+ unsigned long ksp; /* kernel mode stack pointer */
+ unsigned long callee_reg; /* pointer to callee regs */
+ unsigned long fault_address; /* dbls as brkpt holder as well */
+ unsigned long cause_code; /* Exception Cause Code (ECR) */
+#ifdef CONFIG_ARC_CURR_IN_REG
+ unsigned long user_r25;
+#endif
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *t);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#ifdef CONFIG_SMP
+#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
+#else
+#define cpu_relax() do { } while (0)
+#endif
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * These can't be derived from pt_regs as that would give correp user-mode val
+ */
+#define KSTK_ESP(tsk) (tsk->thread.ksp)
+#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
+#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * E1,E2 so that Interrupts are enabled in user mode
+ * L set, so Loop inhibited to begin with
+ * lp_start and lp_end seeded with bogus non-zero values so to easily catch
+ * the ARC700 sr to lp_start hardware bug
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->ret = (_pc); \
+ /* Interrupts enabled in User Mode */ \
+ (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
+ | STATUS_E1_MASK | STATUS_E2_MASK; \
+ (_regs)->sp = (_usp); \
+ /* bogus seed values for debugging */ \
+ (_regs)->lp_start = 0x10; \
+ (_regs)->lp_end = 0x80; \
+} while (0)
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Kernels Virtual memory area.
+ * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
+ * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
+ * of the translated bottom 2GB for kernel virtual memory and protect
+ * these pages from user accesses by disabling Ru, Eu and Wu.
+ */
+#define VMALLOC_SIZE (0x10000000) /* 256M */
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+#define VMALLOC_END (PAGE_OFFSET)
+
+/* Most of the architectures seem to be keeping some kind of padding between
+ * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
+ */
+#define USER_KERNEL_GUTTER 0x10000000
+
+/* User address space:
+ * On ARC700, CPU allows the entire lower half of 32 bit address space to be
+ * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
+ * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
+ * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
+ * Thus total User vaddr space is (0:0x5FFF_FFFF)
+ */
+#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
new file mode 100644
index 000000000000..692d0d0789a7
--- /dev/null
+++ b/arch/arc/include/asm/prom.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_PROM_H_
+#define _ASM_ARC_PROM_H_
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 000000000000..8ae783d20a81
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+struct pt_regs {
+ /*
+ * 1 word gutter after reg-file has been saved
+ * Technically not needed, Since SP always points to a "full" location
+ * (vs. "empty"). But pt_regs is shared with tools....
+ */
+ long res;
+
+ /* Real registers */
+ long bta; /* bta_l1, bta_l2, erbta */
+ long lp_start;
+ long lp_end;
+ long lp_count;
+ long status32; /* status32_l1, status32_l2, erstatus */
+ long ret; /* ilink1, ilink2 or eret */
+ long blink;
+ long fp;
+ long r26; /* gp */
+ long r12;
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long r7;
+ long r6;
+ long r5;
+ long r4;
+ long r3;
+ long r2;
+ long r1;
+ long r0;
+ long sp; /* user/kernel sp depending on where we came from */
+ long orig_r0;
+
+ /*to distinguish bet excp, syscall, irq */
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ /* so that assembly code is same for LE/BE */
+ unsigned long orig_r8:16, event:16;
+#else
+ unsigned long event:16, orig_r8:16;
+#endif
+ long orig_r8_word;
+ };
+};
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+ long res; /* Again this is not needed */
+ long r25;
+ long r24;
+ long r23;
+ long r22;
+ long r21;
+ long r20;
+ long r19;
+ long r18;
+ long r17;
+ long r16;
+ long r15;
+ long r14;
+ long r13;
+};
+
+#define instruction_pointer(regs) ((regs)->ret)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({ unsigned int sp; \
+ if (user_mode(regs)) \
+ sp = (regs)->sp;\
+ else \
+ sp = -1; \
+ sp; \
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL)
+#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
+
+#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED)
+
+#define current_pt_regs() \
+({ \
+ /* open-coded current_thread_info() */ \
+ register unsigned long sp asm ("sp"); \
+ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
+ (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define orig_r8_IS_SCALL 0x0001
+#define orig_r8_IS_SCALL_RESTARTED 0x0002
+#define orig_r8_IS_BRKPT 0x0004
+#define orig_r8_IS_EXCPN 0x0004
+#define orig_r8_IS_IRQ1 0x0010
+#define orig_r8_IS_IRQ2 0x0020
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 000000000000..6fc1159dfefe
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _int_vec_base_lds[];
+extern char __arc_dccm_base[];
+extern char __dtb_start[];
+
+#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 000000000000..da2c45979817
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(0)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b) ((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 000000000000..4dff5a1e4128
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early-8250 requires BASE_BAUD to be defined and includes this header.
+ * We put in a typical value:
+ * (core clk / 16) - i.e. UART samples 16 times per sec.
+ * Athough in multi-platform-image this might not work, specially if the
+ * clk driving the UART is different.
+ * We can't use DeviceTree as this is typically for early serial.
+ */
+
+#include <asm/clk.h>
+
+#define BASE_BAUD (arc_get_core_freq() / 16)
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 000000000000..229e50681497
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASMARC_SETUP_H
+#define __ASMARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+ int id;
+ const char *str;
+};
+
+struct cpuinfo_data {
+ struct id_to_str info;
+ int up_range;
+};
+
+extern int root_mountflags, end_mem;
+extern int running_on_hw;
+
+void __init setup_processor(void);
+void __init setup_arch_memory(void);
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 000000000000..c4fb211dcd25
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void __init first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, int irq);
+
+/*
+ * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+ *
+ * @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send: To send IPI to a @cpumask
+ * @ips_clear: To clear IPI received by @cpu at @irq
+ */
+struct plat_smp_ops {
+ const char *info;
+ void (*cpu_kick)(int cpu, unsigned long pc);
+ void (*ipi_send)(void *callmap);
+ void (*ipi_clear)(int cpu, int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops plat_smp_ops;
+
+#endif /* CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ * support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * gaurantted by the platform (not something which core handles).
+ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ * disabling for atomicity.
+ *
+ * However exported spinlock API is not usable due to cyclic hdr deps
+ * (even after system.h disintegration upstream)
+ * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ * So the workaround is to use the lowest level arch spinlock API.
+ * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ * but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+extern arch_spinlock_t smp_bitops_lock;
+
+#define atomic_ops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_atomic_ops_lock); \
+} while (0)
+
+#define atomic_ops_unlock(flags) do { \
+ arch_spin_unlock(&smp_atomic_ops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#define bitops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_bitops_lock); \
+} while (0)
+
+#define bitops_unlock(flags) do { \
+ arch_spin_unlock(&smp_bitops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags) local_irq_save(flags)
+#define atomic_ops_unlock(flags) local_irq_restore(flags)
+
+#define bitops_lock(flags) local_irq_save(flags)
+#define bitops_unlock(flags) local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 000000000000..f158197ac5b0
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ " breq %0, %2, 1b \n"
+ : "+&r" (tmp)
+ : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ *
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+/* Would read_trylock() succeed? */
+#define arch_read_can_lock(x) ((x)->counter > 0)
+
+/* Would write_trylock() succeed? */
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ */
+ if (rw->counter > 0) {
+ rw->counter--;
+ ret = 1;
+ }
+
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ smp_mb();
+ return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ */
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ rw->counter = 0;
+ ret = 1;
+ }
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (!arch_read_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (!arch_write_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter++;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..8276bfd61704
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
+#define __ARCH_SPIN_LOCK_LOCKED__ 1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked: 0x01_00_00_00
+ * Read lock(s): 0x00_FF_00_00 to say 0x01
+ * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
+ */
+typedef struct {
+ volatile unsigned int counter;
+ arch_spinlock_t lock_mutex;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 000000000000..87676c8f1412
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -We had half-optimised memset/memcpy, got better versions of those
+ * -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 000000000000..1b171ab5fec0
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
+#define ARC_FPU_NEXT(t)
+
+#else
+
+#define ARC_FPU_PREV(p, n)
+#define ARC_FPU_NEXT(n)
+
+#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last) \
+do { \
+ ARC_FPU_PREV(prev, next); \
+ last = __switch_to(prev, next);\
+ ARC_FPU_NEXT(next); \
+ mb(); \
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 000000000000..33ab3048e9b2
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H 1
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h> /* in_syscall() */
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ if (user_mode(regs) && in_syscall(regs))
+ return regs->orig_r8;
+ else
+ return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
+ regs->r8 = regs->orig_r8;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ unsigned long *inside_ptregs = &(regs->r0);
+ inside_ptregs -= i;
+
+ BUG_ON((i + n) > 6);
+
+ while (n--) {
+ args[i++] = (*inside_ptregs);
+ inside_ptregs--;
+ }
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 000000000000..e53a5340ba4f
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H 1
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_fork_wrapper(void);
+int sys_vfork_wrapper(void);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 000000000000..2d50a4cdd7f3
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Oct 2009
+ * No need for ARC specific thread_info allocator (kmalloc/free). This is
+ * anyways one page allocation, thus slab alloc can be short-circuited and
+ * the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ struct task_struct *task; /* main task structure */
+ mm_segment_t addr_limit; /* thread address space */
+ struct exec_domain *exec_domain;/* execution domain */
+ __u32 cpu; /* current CPU */
+ unsigned long thr_ptr; /* TLS ptr */
+ struct restart_block restart_block;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 16
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_MEMDIE (1<<TIF_MEMDIE)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 000000000000..0a82960a75e9
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644
index 000000000000..a5ff961b1efc
--- /dev/null
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_TLB_MMU_V1_H__
+#define __ASM_TLB_MMU_V1_H__
+
+#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
+
+#include <asm/tlb.h>
+
+.macro TLB_WRITE_HEURISTICS
+
+#define JH_HACK1
+#undef JH_HACK2
+#undef JH_HACK3
+
+#ifdef JH_HACK3
+; Calculate set index for 2-way MMU
+; -avoiding use of GetIndex from MMU
+; and its unpleasant LFSR pseudo-random sequence
+;
+; r1 = TLBPD0 from TLB_RELOAD above
+;
+; -- jh_ex_way_set not cleared on startup
+; didn't want to change setup.c
+; hence extra instruction to clean
+;
+; -- should be in cache since in same line
+; as r0/r1 saves above
+;
+ld r0,[jh_ex_way_sel] ; victim pointer
+and r0,r0,1 ; clean
+xor.f r0,r0,1 ; flip
+st r0,[jh_ex_way_sel] ; store back
+asr r0,r1,12 ; get set # <<1, note bit 12=R=0
+or.nz r0,r0,1 ; set way bit
+and r0,r0,0xff ; clean
+sr r0,[ARC_REG_TLBINDEX]
+#endif
+
+#ifdef JH_HACK2
+; JH hack #2
+; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
+; Slower in thrash case (where it matters) because more code is executed
+; Inefficient due to two-register paradigm of this miss handler
+;
+/* r1 = data TLBPD0 at this point */
+lr r0,[eret] /* instruction address */
+xor r0,r0,r1 /* compare set # */
+and.f r0,r0,0x000fe000 /* 2-way MMU mask */
+bne 88f /* not in same set - no need to probe */
+
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
+and r1,r1,0xff /* Data ASID */
+or r0,r0,r1 /* Instruction address + Data ASID */
+
+lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+xor r0,r0,1 /* flip bottom bit of data index */
+b.d 89f
+sr r0,[ARC_REG_TLBINDEX] /* and put it back */
+88:
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+89:
+#endif
+
+#ifdef JH_HACK1
+;
+; Always checks whether instruction will be kicked out by dtlb miss
+;
+mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+bmsk r1,r3,7 /* Data ASID, bits 7-0 */
+or_s r0,r0,r1 /* Instruction address + Data ASID */
+
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
+cmp r0,r1 /* if no match on indices, go around */
+xor.eq r1,r1,1 /* flip bottom bit of data index */
+sr r1,[ARC_REG_TLBINDEX] /* and put it back */
+#endif
+
+.endm
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 000000000000..3eb2ce0bdfa3
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#ifdef __KERNEL__
+
+#include <asm/pgtable.h>
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
+ _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifndef __ASSEMBLY__
+
+#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+
+/*
+ * This pair is called at time of munmap/exit to flush cache and TLB entries
+ * for mappings being torn down.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
+ * as we don't support aliasing configs in our VIPT D$.
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
+ * albiet for difft reasons - its better handled by moving to new ASID
+ *
+ * Note, read http://lkml.org/lkml/2004/1/15/6
+ */
+#define tlb_start_vma(tlb, vma)
+#define tlb_end_vma(tlb, vma)
+
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
+#endif
+
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void __init read_decode_mmu_bcr(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 000000000000..b2f9bc7f68c8
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+/* XXX: Revisit for SMP */
+#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 000000000000..32420824375b
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ * -__clear_user( ) called multiple times during elf load was byte loop
+ * converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ * -Hand crafted constant propagation for "constant" copy sizes
+ * -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ * -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h> /* for generic string functions */
+
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+
+/*
+ * Algorthmically, for __user_ok() we want do:
+ * (start < TASK_SIZE) && (start+len < TASK_SIZE)
+ * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
+ * emitted directly in code.
+ *
+ * This can however be rewritten as follows:
+ * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
+ *
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+ * The reason for rewriting being, for majorit yof cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+ * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
+ * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
+ * would already have been done at this call site for __kernel_ok()
+ *
+ */
+#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
+ (((addr)+(sz)) <= get_fs()))
+#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
+ likely(__user_ok((addr), (sz))))
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
+ case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
+ case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
+ case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret) \
+ __asm__ __volatile__( \
+ "1: ld %1,[%2]\n" \
+ "4: ld %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
+ case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
+ case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
+ case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+#define __arc_put_user_one(src, dst, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret) \
+ __asm__ __volatile__( \
+ "1: st %1,[%2]\n" \
+ "4: st %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__ (
+ " mov.f lp_count, %0 \n"
+ " lpnz 2f \n"
+ "1: ldb.ab %1, [%3, 1] \n"
+ " stb.ab %1, [%2, 1] \n"
+ " sub %0,%0,1 \n"
+ "2: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 2b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 3b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /*
+ * Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ /*
+ * Hand-crafted constant propagation to reduce code sz of the
+ * laddered copy 16x,8,4,2,1
+ */
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ "1: ld.ab %3, [%2, 4] \n"
+ "11: ld.ab %4, [%2, 4] \n"
+ "12: ld.ab %5, [%2, 4] \n"
+ "13: ld.ab %6, [%2, 4] \n"
+ " st.ab %3, [%1, 4] \n"
+ " st.ab %4, [%1, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ "14: ld.ab %3, [%2,4] \n"
+ "15: ld.ab %4, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " st.ab %4, [%1,4] \n"
+ " sub %0,%0,8 \n"
+ "31: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ "16: ld.ab %3, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " sub %0,%0,4 \n"
+ "32: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ "17: ldw.ab %3, [%2,2] \n"
+ " stw.ab %3, [%1,2] \n"
+ " sub %0,%0,2 \n"
+ "33: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ "18: ldb.ab %3, [%2,2] \n"
+ " stb.ab %3, [%1,2] \n"
+ " sub %0,%0,1 \n"
+ "34: ; nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ "1: ld.ab %5, [%2, 4] \n"
+ "11: ld.ab %6, [%2, 4] \n"
+ "12: ld.ab %7, [%2, 4] \n"
+ "13: ld.ab %8, [%2, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " st.ab %7, [%1, 4] \n"
+ " st.ab %8, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ "14: ld.ab %5, [%2,4] \n"
+ "15: ld.ab %6, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " st.ab %6, [%1,4] \n"
+ " sub.f %0,%0,8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ "16: ld.ab %5, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " sub.f %0,%0,4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ "17: ldw.ab %5, [%2,2] \n"
+ " stw.ab %5, [%1,2] \n"
+ " sub.f %0,%0,2 \n"
+ "33: bbit0 %3,0,34f \n"
+ "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ " stb.ab %5, [%1,1] \n"
+ " sub.f %0,%0,1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__(
+ " mov.f lp_count, %0 \n"
+ " lpnz 3f \n"
+ " ldb.ab %1, [%3, 1] \n"
+ "1: stb.ab %1, [%2, 1] \n"
+ " sub %0, %0, 1 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /* Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ " ld.ab %3, [%2, 4] \n"
+ " ld.ab %4, [%2, 4] \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ "1: st.ab %3, [%1, 4] \n"
+ "11: st.ab %4, [%1, 4] \n"
+ "12: st.ab %5, [%1, 4] \n"
+ "13: st.ab %6, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ " ld.ab %4, [%2,4] \n"
+ "14: st.ab %3, [%1,4] \n"
+ "15: st.ab %4, [%1,4] \n"
+ " sub %0, %0, 8 \n"
+ "31:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ "16: st.ab %3, [%1,4] \n"
+ " sub %0, %0, 4 \n"
+ "32:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ " ldw.ab %3, [%2,2] \n"
+ "17: stw.ab %3, [%1,2] \n"
+ " sub %0, %0, 2 \n"
+ "33:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ " ldb.ab %3, [%2,1] \n"
+ "18: stb.ab %3, [%1,1] \n"
+ " sub %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ " ld.ab %7, [%2, 4] \n"
+ " ld.ab %8, [%2, 4] \n"
+ "1: st.ab %5, [%1, 4] \n"
+ "11: st.ab %6, [%1, 4] \n"
+ "12: st.ab %7, [%1, 4] \n"
+ "13: st.ab %8, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ " ld.ab %6, [%2,4] \n"
+ "14: st.ab %5, [%1,4] \n"
+ "15: st.ab %6, [%1,4] \n"
+ " sub.f %0, %0, 8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ "16: st.ab %5, [%1,4] \n"
+ " sub.f %0, %0, 4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ " ldw.ab %5, [%2,2] \n"
+ "17: stw.ab %5, [%1,2] \n"
+ " sub.f %0, %0, 2 \n"
+ "33: bbit0 %3,0,34f \n"
+ " ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ "18: stb.ab %5, [%1,1] \n"
+ " sub.f %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+ long res = n;
+ unsigned char *d_char = to;
+
+ __asm__ __volatile__(
+ " bbit0 %0, 0, 1f \n"
+ "75: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "1: bbit0 %0, 1, 2f \n"
+ "76: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "2: asr.f lp_count, %1, 2 \n"
+ " lpnz 3f \n"
+ "77: st.ab %2, [%0,4] \n"
+ " sub %1, %1, 4 \n"
+ "3: bbit0 %1, 1, 4f \n"
+ "78: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "4: bbit0 %1, 0, 5f \n"
+ "79: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "5: \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 5b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 75b, 3b \n"
+ " .word 76b, 3b \n"
+ " .word 77b, 3b \n"
+ " .word 78b, 3b \n"
+ " .word 79b, 3b \n"
+ " .previous \n"
+ : "+r"(d_char), "+r"(res)
+ : "i"(0)
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res = count;
+ char val;
+ unsigned int hw_count;
+
+ if (count == 0)
+ return 0;
+
+ __asm__ __volatile__(
+ " lp 2f \n"
+ "1: ldb.ab %3, [%2, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " stb.ab %3, [%1, 1] \n"
+ "2: sub %0, %6, %4 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
+ : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
+ : "memory");
+
+ return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+ long res, tmp1, cnt;
+ char val;
+
+ __asm__ __volatile__(
+ " mov %2, %1 \n"
+ "1: ldb.ab %3, [%0, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " sub.f %2, %2, 1 \n"
+ " bnz 1b \n"
+ " sub %2, %2, 1 \n"
+ "2: sub %0, %1, %2 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, 0 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+ : "0"(s), "1"(n)
+ : "memory");
+
+ return res;
+}
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
+#define __clear_user(d, n) __arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
+#else
+extern long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n);
+extern long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n);
+extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count);
+extern long arc_strnlen_user_noinline(const char __user *src, long n);
+
+#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
+#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
+#define __clear_user(d, n) arc_clear_user_noinline(d, n)
+#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
+#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
+
+#endif
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 000000000000..5dbe63f17b66
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 000000000000..7ca628b6ee2a
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27; /* fp */
+ unsigned long r28; /* sp */
+ unsigned long r29;
+ unsigned long r30;
+ unsigned long r31; /* blink */
+ unsigned long r63; /* pc */
+};
+
+struct unwind_frame_info {
+ struct arc700_regs regs;
+ struct task_struct *task;
+ unsigned call_frame:1;
+};
+
+#define UNW_PC(frame) ((frame)->regs.r63)
+#define UNW_SP(frame) ((frame)->regs.r28)
+#define UNW_BLINK(frame) ((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) ((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET 4
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame) ((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(r0), \
+ PTREGS_INFO(r1), \
+ PTREGS_INFO(r2), \
+ PTREGS_INFO(r3), \
+ PTREGS_INFO(r4), \
+ PTREGS_INFO(r5), \
+ PTREGS_INFO(r6), \
+ PTREGS_INFO(r7), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(r16), \
+ PTREGS_INFO(r17), \
+ PTREGS_INFO(r18), \
+ PTREGS_INFO(r19), \
+ PTREGS_INFO(r20), \
+ PTREGS_INFO(r21), \
+ PTREGS_INFO(r22), \
+ PTREGS_INFO(r23), \
+ PTREGS_INFO(r24), \
+ PTREGS_INFO(r25), \
+ PTREGS_INFO(r26), \
+ PTREGS_INFO(r27), \
+ PTREGS_INFO(r28), \
+ PTREGS_INFO(r29), \
+ PTREGS_INFO(r30), \
+ PTREGS_INFO(r31), \
+ PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void arc_unwind_setup(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+ int (*callback) (struct unwind_frame_info *info,
+ void *arg),
+ void *arg)
+{
+ return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+ return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ struct pt_regs *regs)
+{
+ return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+static inline void arc_unwind_setup(void)
+{
+}
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..18fefaea73fd
--- /dev/null
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -0,0 +1,12 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+header-y += elf.h
+header-y += page.h
+header-y += setup.h
+header-y += byteorder.h
+header-y += cachectl.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += signal.h
+header-y += swab.h
+header-y += unistd.h
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..9da71d415c38
--- /dev/null
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_BYTEORDER_H
+#define __ASM_ARC_BYTEORDER_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/uapi/asm/cachectl.h b/arch/arc/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000000..51c73f0255b3
--- /dev/null
+++ b/arch/arc/include/uapi/asm/cachectl.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHECTL_H
+#define __ARC_ASM_CACHECTL_H
+
+/*
+ * ARC ABI flags defined for Android's finegrained cacheflush requirements
+ */
+#define CF_I_INV 0x0002
+#define CF_D_FLUSH 0x0010
+#define CF_D_FLUSH_INV 0x0020
+
+#define CF_DEFAULT (CF_I_INV | CF_D_FLUSH)
+
+/*
+ * Standard flags expected by cacheflush system call users
+ */
+#define ICACHE CF_I_INV
+#define DCACHE CF_D_FLUSH
+#define BCACHE (CF_I_INV | CF_D_FLUSH)
+
+#endif
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
new file mode 100644
index 000000000000..0f99ac8fcbb2
--- /dev/null
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_ELF_H
+#define _UAPI__ASM_ARC_ELF_H
+
+#include <asm/ptrace.h> /* for user_regs_struct */
+
+/* Machine specific ELF Hdr flags */
+#define EF_ARC_OSABI_MSK 0x00000f00
+#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
+#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpregset_t;
+
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
new file mode 100644
index 000000000000..e5d41e08240c
--- /dev/null
+++ b/arch/arc/include/uapi/asm/page.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_PAGE_H
+#define _UAPI__ASM_ARC_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define PAGE_SHIFT 14
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define PAGE_SHIFT 12
+#else
+/*
+ * Default 8k
+ * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
+ * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
+ * using the correct uClibc header and in their build our autoconf.h is
+ * not available
+ */
+#define PAGE_SHIFT 13
+#endif
+
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_OFFSET (0x80000000)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
+#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
+#endif
+
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+
+#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..6afa4f702075
--- /dev/null
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _UAPI__ASM_ARC_PTRACE_H
+#define _UAPI__ASM_ARC_PTRACE_H
+
+
+#ifndef __ASSEMBLY__
+/*
+ * Userspace ABI: Register state needed by
+ * -ptrace (gdbserver)
+ * -sigcontext (SA_SIGNINFO signal frame)
+ *
+ * This is to decouple pt_regs from user-space ABI, to be able to change it
+ * w/o affecting the ABI.
+ * Although the layout (initial padding) is similar to pt_regs to have some
+ * optimizations when copying pt_regs to/from user_regs_struct.
+ *
+ * Also, sigcontext only care about the scratch regs as that is what we really
+ * save/restore for signal handling.
+*/
+struct user_regs_struct {
+
+ struct scratch {
+ long pad;
+ long bta, lp_start, lp_end, lp_count;
+ long status32, ret, blink, fp, gp;
+ long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ long sp;
+ } scratch;
+ struct callee {
+ long pad;
+ long r25, r24, r23, r22, r21, r20;
+ long r19, r18, r17, r16, r15, r14, r13;
+ } callee;
+ long efa; /* break pt addr, for break points in delay slots */
+ long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */
+};
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/setup.h b/arch/arc/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..a6d4e44938be
--- /dev/null
+++ b/arch/arc/include/uapi/asm/setup.h
@@ -0,0 +1,6 @@
+/*
+ * setup.h is part of userspace header ABI so UAPI scripts have to generate it
+ * even if there's nothing to export - causing empty <uapi/asm/setup.h>
+ * However to prevent "patch" from discarding it we add this placeholder
+ * comment
+ */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..9678a11fc158
--- /dev/null
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SIGCONTEXT_H
+#define _ASM_ARC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked.
+ */
+struct sigcontext {
+ struct user_regs_struct regs;
+};
+
+#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/include/uapi/asm/signal.h b/arch/arc/include/uapi/asm/signal.h
new file mode 100644
index 000000000000..fad62f7f42d6
--- /dev/null
+++ b/arch/arc/include/uapi/asm/signal.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_SIGNAL_H
+#define _ASM_ARC_SIGNAL_H
+
+/*
+ * This is much needed for ARC sigreturn optimization.
+ * This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
+ * which allows sigreturn based re-entry into kernel after handling signal.
+ * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
+ * mode stack which in turn forces the following:
+ * -TLB Flush (after making the stack page executable)
+ * -Cache line Flush (to make I/D Cache lines coherent)
+ */
+#define SA_RESTORER 0x04000000
+
+#include <asm-generic/signal.h>
+
+#endif /* _ASM_ARC_SIGNAL_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
new file mode 100644
index 000000000000..095599a73195
--- /dev/null
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Support single cycle endian-swap insn in ARC700 4.10
+ *
+ * vineetg: June 2009
+ * -Better htonl implementation (5 instead of 9 ALU instructions)
+ * -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
+ */
+
+#ifndef __ASM_ARC_SWAB_H
+#define __ASM_ARC_SWAB_H
+
+#include <linux/types.h>
+
+/* Native single cycle endian swap insn */
+#ifdef CONFIG_ARC_HAS_SWAPE
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " swape %0, %1 \n" \
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#else
+
+/* Several ways of Endian-Swap Emulation for ARC
+ * 0: kernel generic
+ * 1: ARC optimised "C"
+ * 2: ARC Custom instruction
+ */
+#define ARC_BSWAP_TYPE 1
+
+#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
+
+/* The kernel default implementation of htonl is
+ * return x<<24 | x>>24 |
+ * (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
+ *
+ * This generates 9 instructions on ARC (excluding the ld/st)
+ *
+ * 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
+ * 8051fd98: asl r5,r3,24 ; get 3rd Byte
+ * 8051fd9c: lsr r2,r3,24 ; get 0th Byte
+ * 8051fda0: and r4,r3,0xff00
+ * 8051fda8: asl r4,r4,8 ; get 1st Byte
+ * 8051fdac: and r3,r3,0x00ff0000
+ * 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
+ * 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
+ * 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
+ * 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
+ * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
+ *
+ * Joern suggested a better "C" algorithm which is great since
+ * (1) It is portable to any architecure
+ * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
+ */
+
+#define __arch_swab32(x) \
+({ unsigned long __in = (x), __tmp; \
+ __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
+ __in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
+ __tmp ^= __in; \
+ __tmp &= 0xff00ff; \
+ __tmp ^ __in; \
+})
+
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
+ " bswap %0, %1 \n"\
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#endif /* ARC_BSWAP_TYPE=zzz */
+
+#endif /* CONFIG_ARC_HAS_SWAPE */
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __SWAB_64_THRU_32__
+#endif
+
+#endif
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..6f30484f34b7
--- /dev/null
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+/* ARC specific syscall */
+#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_sysfs, sys_sysfs)
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
new file mode 100644
index 000000000000..c242ef07ba70
--- /dev/null
+++ b/arch/arc/kernel/Makefile
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+# Pass UTS_MACHINE for user_regset definition
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
+obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
+obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
+obj-y += devtree.o
+
+obj-$(CONFIG_MODULES) += arcksyms.o module.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
+
+obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
+CFLAGS_fpu.o += -mdpfp
+
+ifdef CONFIG_ARC_DW2_UNWIND
+CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
+obj-y += ctx_sw.o
+else
+obj-y += ctx_sw_asm.o
+endif
+
+extra-y := vmlinux.lds head.o
diff --git a/arch/arc/kernel/arc_hostlink.c b/arch/arc/kernel/arc_hostlink.c
new file mode 100644
index 000000000000..47b2a17cc52a
--- /dev/null
+++ b/arch/arc/kernel/arc_hostlink.c
@@ -0,0 +1,58 @@
+/*
+ * arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
+ *
+ * Allows Linux userland access to host in absence of any peripherals.
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h> /* file_operations */
+#include <linux/miscdevice.h>
+#include <linux/mm.h> /* VM_IO */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ pr_warn("Hostlink buffer mmap ERROR\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static long arc_hl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support, returning the physical addr to mmap in user space */
+ put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
+ return 0;
+}
+
+static const struct file_operations arc_hl_fops = {
+ .unlocked_ioctl = arc_hl_ioctl,
+ .mmap = arc_hl_mmap,
+};
+
+static struct miscdevice arc_hl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hostlink",
+ .fops = &arc_hl_fops
+};
+
+static int __init arc_hl_init(void)
+{
+ pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
+ return misc_register(&arc_hl_dev);
+}
+module_init(arc_hl_init);
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
new file mode 100644
index 000000000000..4d9e77724bed
--- /dev/null
+++ b/arch/arc/kernel/arcksyms.c
@@ -0,0 +1,56 @@
+/*
+ * arcksyms.c - Exporting symbols not exportable from their own sources
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+
+/* libgcc functions, not part of kernel sources */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __divsf3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __cmpdi2(void);
+extern void __fixunsdfsi(void);
+extern void __muldf3(void);
+extern void __divdf3(void);
+extern void __floatunsidf(void);
+extern void __floatunsisf(void);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__divsf3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__cmpdi2);
+EXPORT_SYMBOL(__fixunsdfsi);
+EXPORT_SYMBOL(__muldf3);
+EXPORT_SYMBOL(__divdf3);
+EXPORT_SYMBOL(__floatunsidf);
+EXPORT_SYMBOL(__floatunsisf);
+
+/* ARC optimised assembler routines */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strlen);
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..0dc148ebce74
--- /dev/null
+++ b/arch/arc/kernel/asm-offsets.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <asm/hardirq.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+int main(void)
+{
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+
+ BLANK();
+
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
+#ifdef CONFIG_ARC_CURR_IN_REG
+ DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
+#endif
+ DEFINE(THREAD_FAULT_ADDR,
+ offsetof(struct thread_struct, fault_address));
+
+ BLANK();
+
+ DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(THREAD_INFO_PREEMPT_COUNT,
+ offsetof(struct thread_info, preempt_count));
+
+ BLANK();
+
+ DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+
+ DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
+ DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+
+ DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
+
+ BLANK();
+
+ DEFINE(PT_status32, offsetof(struct pt_regs, status32));
+ DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word));
+ DEFINE(PT_sp, offsetof(struct pt_regs, sp));
+ DEFINE(PT_r0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_r1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_r2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_r3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_r4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_r5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_r6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_r7, offsetof(struct pt_regs, r7));
+
+ return 0;
+}
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
new file mode 100644
index 000000000000..66ce0dc917fb
--- /dev/null
+++ b/arch/arc/kernel/clk.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/clk.h>
+
+unsigned long core_freq = 800000000;
+
+/*
+ * As of now we default to device-tree provided clock
+ * In future we can determine this in early boot
+ */
+int arc_set_core_freq(unsigned long freq)
+{
+ core_freq = freq;
+ return 0;
+}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
new file mode 100644
index 000000000000..60844dac6132
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -"C" version of lowest level context switch asm macro called by schedular
+ * gcc doesn't generate the dward CFI info for hand written asm, hence can't
+ * backtrace out of it (e.g. tasks sleeping in kernel).
+ * So we cheat a bit by writing almost similar code in inline-asm.
+ * -This is a hacky way of doing things, but there is no other simple way.
+ * I don't want/intend to extend unwinding code to understand raw asm
+ */
+
+#include <asm/asm-offsets.h>
+#include <linux/sched.h>
+
+struct task_struct *__sched
+__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
+{
+ unsigned int tmp;
+ unsigned int prev = (unsigned int)prev_task;
+ unsigned int next = (unsigned int)next_task;
+ int num_words_to_skip = 1;
+#ifdef CONFIG_ARC_CURR_IN_REG
+ num_words_to_skip++;
+#endif
+
+ __asm__ __volatile__(
+ /* FP/BLINK save generated by gcc (standard function prologue */
+ "st.a r13, [sp, -4] \n\t"
+ "st.a r14, [sp, -4] \n\t"
+ "st.a r15, [sp, -4] \n\t"
+ "st.a r16, [sp, -4] \n\t"
+ "st.a r17, [sp, -4] \n\t"
+ "st.a r18, [sp, -4] \n\t"
+ "st.a r19, [sp, -4] \n\t"
+ "st.a r20, [sp, -4] \n\t"
+ "st.a r21, [sp, -4] \n\t"
+ "st.a r22, [sp, -4] \n\t"
+ "st.a r23, [sp, -4] \n\t"
+ "st.a r24, [sp, -4] \n\t"
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "st.a r25, [sp, -4] \n\t"
+#endif
+ "sub sp, sp, %4 \n\t" /* create gutter at top */
+
+ /* set ksp of outgoing task in tsk->thread.ksp */
+ "st.as sp, [%3, %1] \n\t"
+
+ "sync \n\t"
+
+ /*
+ * setup _current_task with incoming tsk.
+ * optionally, set r25 to that as well
+ * For SMP extra work to get to &_current_task[cpu]
+ * (open coded SET_CURR_TASK_ON_CPU)
+ */
+#ifndef CONFIG_SMP
+ "st %2, [@_current_task] \n\t"
+#else
+ "lr r24, [identity] \n\t"
+ "lsr r24, r24, 8 \n\t"
+ "bmsk r24, r24, 7 \n\t"
+ "add2 r24, @_current_task, r24 \n\t"
+ "st %2, [r24] \n\t"
+#endif
+#ifdef CONFIG_ARC_CURR_IN_REG
+ "mov r25, %2 \n\t"
+#endif
+
+ /* get ksp of incoming task from tsk->thread.ksp */
+ "ld.as sp, [%2, %1] \n\t"
+
+ /* start loading it's CALLEE reg file */
+
+ "add sp, sp, %4 \n\t" /* skip gutter at top */
+
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "ld.ab r25, [sp, 4] \n\t"
+#endif
+ "ld.ab r24, [sp, 4] \n\t"
+ "ld.ab r23, [sp, 4] \n\t"
+ "ld.ab r22, [sp, 4] \n\t"
+ "ld.ab r21, [sp, 4] \n\t"
+ "ld.ab r20, [sp, 4] \n\t"
+ "ld.ab r19, [sp, 4] \n\t"
+ "ld.ab r18, [sp, 4] \n\t"
+ "ld.ab r17, [sp, 4] \n\t"
+ "ld.ab r16, [sp, 4] \n\t"
+ "ld.ab r15, [sp, 4] \n\t"
+ "ld.ab r14, [sp, 4] \n\t"
+ "ld.ab r13, [sp, 4] \n\t"
+
+ /* last (ret value) = prev : although for ARC it mov r0, r0 */
+ "mov %0, %3 \n\t"
+
+ /* FP/BLINK restore generated by gcc (standard func epilogue */
+
+ : "=r"(tmp)
+ : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
+ "n"(num_words_to_skip * 4)
+ : "blink"
+ );
+
+ return (struct task_struct *)tmp;
+}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644
index 000000000000..d8972345e4c2
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -Moved core context switch macro out of entry.S into this file.
+ * -This is the more "natural" hand written assembler
+ */
+
+#include <asm/entry.h> /* For the SAVE_* macros */
+#include <asm/asm-offsets.h>
+#include <asm/linkage.h>
+
+;################### Low Level Context Switch ##########################
+
+ .section .sched.text,"ax",@progbits
+ .align 4
+ .global __switch_to
+ .type __switch_to, @function
+__switch_to:
+
+ /* Save regs on kernel mode stack of task */
+ st.a blink, [sp, -4]
+ st.a fp, [sp, -4]
+ SAVE_CALLEE_SAVED_KERNEL
+
+ /* Save the now KSP in task->thread.ksp */
+ st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
+
+ /*
+ * Return last task in r0 (return reg)
+ * On ARC, Return reg = First Arg reg = r0.
+ * Since we already have last task in r0,
+ * don't need to do anything special to return it
+ */
+
+ /* hardware memory barrier */
+ sync
+
+ /*
+ * switch to new task, contained in r1
+ * Temp reg r3 is required to get the ptr to store val
+ */
+ SET_CURR_TASK_ON_CPU r1, r3
+
+ /* reload SP with kernel mode stack pointer in task->thread.ksp */
+ ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
+
+ /* restore the registers */
+ RESTORE_CALLEE_SAVED_KERNEL
+ ld.ab fp, [sp, 4]
+ ld.ab blink, [sp, 4]
+ j [blink]
+
+ARC_EXIT __switch_to
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
new file mode 100644
index 000000000000..bdee3a812052
--- /dev/null
+++ b/arch/arc/kernel/devtree.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Based on reduced version of METAG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <asm/prom.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+/* called from unflatten_device_tree() to bootstrap devicetree itself */
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt: virtual address pointer to dt blob
+ *
+ * If a dtb was passed to the kernel, then use it to choose the correct
+ * machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(void *dt)
+{
+ struct boot_param_header *devtree = dt;
+ struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
+ unsigned int score, mdesc_score = ~1;
+ unsigned long dt_root;
+ const char *model, *compat;
+ void *clk;
+ char manufacturer[16];
+ unsigned long len;
+
+ /* check device tree validity */
+ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ return NULL;
+
+ initial_boot_params = devtree;
+ dt_root = of_get_flat_dt_root();
+
+ /*
+ * The kernel could be multi-platform enabled, thus could have many
+ * "baked-in" machine descriptors. Search thru all for the best
+ * "compatible" string match.
+ */
+ for_each_machine_desc(mdesc) {
+ score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+ if (score > 0 && score < mdesc_score) {
+ mdesc_best = mdesc;
+ mdesc_score = score;
+ }
+ }
+ if (!mdesc_best) {
+ const char *prop;
+ long size;
+
+ pr_err("\n unrecognized device tree list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ if (prop) {
+ while (size > 0) {
+ printk("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ }
+ printk("]\n\n");
+
+ machine_halt();
+ }
+
+ /* compat = "<manufacturer>,<model>" */
+ compat = mdesc_best->dt_compat[0];
+
+ model = strchr(compat, ',');
+ if (model)
+ model++;
+
+ strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
+
+ pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
+ if (clk)
+ arc_set_core_freq(of_read_ulong(clk, len/4));
+
+ return mdesc_best;
+}
+
+/*
+ * Copy the flattened DT out of .init since unflattening doesn't copy strings
+ * and the normal DT APIs refs them from orig flat DT
+ */
+void __init copy_devtree(void)
+{
+ void *alloc = early_init_dt_alloc_memory_arch(
+ be32_to_cpu(initial_boot_params->totalsize), 64);
+ if (alloc) {
+ memcpy(alloc, initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+ initial_boot_params = alloc;
+ }
+}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
new file mode 100644
index 000000000000..2f390289a792
--- /dev/null
+++ b/arch/arc/kernel/disasm.c
@@ -0,0 +1,538 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <asm/disasm.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
+ defined(CONFIG_KPROBES)
+
+/* disasm_instr: Analyses instruction at addr, stores
+ * findings in *state
+ */
+void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs)
+{
+ int fieldA = 0;
+ int fieldC = 0, fieldCisReg = 0;
+ uint16_t word1 = 0, word0 = 0;
+ int subopcode, is_linked, op_format;
+ uint16_t *ins_ptr;
+ uint16_t ins_buf[4];
+ int bytes_not_copied = 0;
+
+ memset(state, 0, sizeof(struct disasm_state));
+
+ /* This fetches the upper part of the 32 bit instruction
+ * in both the cases of Little Endian or Big Endian configurations. */
+ if (userspace) {
+ bytes_not_copied = copy_from_user(ins_buf,
+ (const void __user *) addr, 8);
+ if (bytes_not_copied > 6)
+ goto fault;
+ ins_ptr = ins_buf;
+ } else {
+ ins_ptr = (uint16_t *) addr;
+ }
+
+ word1 = *((uint16_t *)addr);
+
+ state->major_opcode = (word1 >> 11) & 0x1F;
+
+ /* Check if the instruction is 32 bit or 16 bit instruction */
+ if (state->major_opcode < 0x0B) {
+ if (bytes_not_copied > 4)
+ goto fault;
+ state->instr_len = 4;
+ word0 = *((uint16_t *)(addr+2));
+ state->words[0] = (word1 << 16) | word0;
+ } else {
+ state->instr_len = 2;
+ state->words[0] = word1;
+ }
+
+ /* Read the second word in case of limm */
+ word1 = *((uint16_t *)(addr + state->instr_len));
+ word0 = *((uint16_t *)(addr + state->instr_len + 2));
+ state->words[1] = (word1 << 16) | word0;
+
+ switch (state->major_opcode) {
+ case op_Bcc:
+ state->is_branch = 1;
+
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 16)) ?
+ FIELD_s25(state->words[0]) :
+ FIELD_s21(state->words[0]);
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->flow = direct_jump;
+ break;
+
+ case op_BLcc:
+ if (IS_BIT(state->words[0], 16)) {
+ /* Branch and Link*/
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 17)) ?
+ (FIELD_s25(state->words[0]) & ~0x3) :
+ FIELD_s21(state->words[0]);
+
+ state->flow = direct_call;
+ } else {
+ /*Branch On Compare */
+ fieldA = FIELD_s9(state->words[0]) & ~0x3;
+ state->flow = direct_jump;
+ }
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->is_branch = 1;
+ break;
+
+ case op_LD: /* LD<zz> a,[b,s9] */
+ state->write = 0;
+ state->di = BITS(state->words[0], 11, 11);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 7, 8);
+ state->aa = BITS(state->words[0], 9, 10);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->aa = 0;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src2 = FIELD_s9(state->words[0]);
+ state->dest = FIELD_A(state->words[0]);
+ state->pref = (state->dest == REG_LIMM);
+ break;
+
+ case op_ST:
+ state->write = 1;
+ state->di = BITS(state->words[0], 5, 5);
+ if (state->di)
+ break;
+ state->aa = BITS(state->words[0], 3, 4);
+ state->zz = BITS(state->words[0], 1, 2);
+ state->src1 = FIELD_C(state->words[0]);
+ if (state->src1 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->src1, regs, cregs);
+ }
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->aa = 0;
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src3 = FIELD_s9(state->words[0]);
+ break;
+
+ case op_MAJOR_4:
+ subopcode = MINOR_OPCODE(state->words[0]);
+ switch (subopcode) {
+ case 32: /* Jcc */
+ case 33: /* Jcc.D */
+ case 34: /* JLcc */
+ case 35: /* JLcc.D */
+ is_linked = 0;
+
+ if (subopcode == 33 || subopcode == 35)
+ state->delay_slot = 1;
+
+ if (subopcode == 34 || subopcode == 35)
+ is_linked = 1;
+
+ fieldCisReg = 0;
+ op_format = BITS(state->words[0], 22, 23);
+ if (op_format == 0 || ((op_format == 3) &&
+ (!IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+
+ if (fieldC == REG_LIMM) {
+ fieldC = state->words[1];
+ state->instr_len += 4;
+ } else {
+ fieldCisReg = 1;
+ }
+ } else if (op_format == 1 || ((op_format == 3)
+ && (IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+ } else {
+ /* op_format == 2 */
+ fieldC = FIELD_s12(state->words[0]);
+ }
+
+ if (!fieldCisReg) {
+ state->target = fieldC;
+ state->flow = is_linked ?
+ direct_call : direct_jump;
+ } else {
+ state->target = get_reg(fieldC, regs, cregs);
+ state->flow = is_linked ?
+ indirect_call : indirect_jump;
+ }
+ state->is_branch = 1;
+ break;
+
+ case 40: /* LPcc */
+ if (BITS(state->words[0], 22, 23) == 3) {
+ /* Conditional LPcc u7 */
+ fieldC = FIELD_C(state->words[0]);
+
+ fieldC = fieldC << 1;
+ fieldC += (addr & ~0x03);
+ state->is_branch = 1;
+ state->flow = direct_jump;
+ state->target = fieldC;
+ }
+ /* For Unconditional lp, next pc is the fall through
+ * which is updated */
+ break;
+
+ case 48 ... 55: /* LD a,[b,c] */
+ state->di = BITS(state->words[0], 15, 15);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 16, 16);
+ state->zz = BITS(state->words[0], 17, 18);
+ state->aa = BITS(state->words[0], 22, 23);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs,
+ cregs);
+ }
+ state->src2 = FIELD_C(state->words[0]);
+ if (state->src2 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->src2, regs,
+ cregs);
+ }
+ state->dest = FIELD_A(state->words[0]);
+ if (state->dest == REG_LIMM)
+ state->pref = 1;
+ break;
+
+ case 10: /* MOV */
+ /* still need to check for limm to extract instr len */
+ /* MOV is special case because it only takes 2 args */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if (FIELD_C(state->words[0]) == REG_LIMM)
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+
+
+ default:
+ /* Not a Load, Jump or Loop instruction */
+ /* still need to check for limm to extract instr len */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM)))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+ }
+ break;
+
+ /* 16 Bit Instructions */
+ case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
+ state->zz = BITS(state->words[0], 3, 4);
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->dest = FIELD_S_A(state->words[0]);
+ break;
+
+ case op_ADD_MOV_CMP:
+ /* check for limm, ignore mov_s h,b (== mov_s 0,b) */
+ if ((BITS(state->words[0], 3, 4) < 3) &&
+ (FIELD_S_H(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+
+ case op_S:
+ subopcode = BITS(state->words[0], 5, 7);
+ switch (subopcode) {
+ case 0: /* j_s */
+ case 1: /* j_s.d */
+ case 2: /* jl_s */
+ case 3: /* jl_s.d */
+ state->target = get_reg(FIELD_S_B(state->words[0]),
+ regs, cregs);
+ state->delay_slot = subopcode & 1;
+ state->flow = (subopcode >= 2) ?
+ direct_call : indirect_jump;
+ break;
+ case 7:
+ switch (BITS(state->words[0], 8, 10)) {
+ case 4: /* jeq_s [blink] */
+ case 5: /* jne_s [blink] */
+ case 6: /* j_s [blink] */
+ case 7: /* j_s.d [blink] */
+ state->delay_slot = (subopcode == 7);
+ state->flow = indirect_jump;
+ state->target = get_reg(31, regs, cregs);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case op_LD_S: /* LD_S c, [b, u7] */
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_LDB_S:
+ case op_STB_S:
+ /* no further handling required as byte accesses should not
+ * cause an unaligned access exception */
+ state->zz = 1;
+ break;
+
+ case op_LDWX_S: /* LDWX_S c, [b, u6] */
+ state->x = 1;
+ /* intentional fall-through */
+
+ case op_LDW_S: /* LDW_S c, [b, u6] */
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u6(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_ST_S: /* ST_S c, [b, u7] */
+ state->write = 1;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ break;
+
+ case op_STW_S: /* STW_S c,[b,u6] */
+ state->write = 1;
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u6(state->words[0]);
+ break;
+
+ case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
+ /* note: we are ignoring possibility of:
+ * ADD_S, SUB_S, PUSH_S, POP_S as these should not
+ * cause unaliged exception anyway */
+ state->write = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 5, 5);
+ if (state->zz)
+ break; /* byte accesses should not come here */
+ if (!state->write) {
+ state->src1 = get_reg(28, regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ } else {
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
+ cregs);
+ state->src2 = get_reg(28, regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ }
+ break;
+
+ case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
+ /* note: ADD_S r0, gp, s11 is ignored */
+ state->zz = BITS(state->words[0], 9, 10);
+ state->src1 = get_reg(26, regs, cregs);
+ state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
+ FIELD_S_s11(state->words[0]);
+ state->dest = 0;
+ break;
+
+ case op_Pcl: /* LD_S b,[pcl,u10] */
+ state->src1 = regs->ret & ~3;
+ state->src2 = FIELD_S_u10(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ break;
+
+ case op_BR_S:
+ state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_B_S:
+ fieldA = (BITS(state->words[0], 9, 10) == 3) ?
+ FIELD_S_s7(state->words[0]) :
+ FIELD_S_s10(state->words[0]);
+ state->target = fieldA + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_BL_S:
+ state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_call;
+ state->is_branch = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bytes_not_copied <= (8 - state->instr_len))
+ return;
+
+fault: state->fault = 1;
+}
+
+long __kprobes get_reg(int reg, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ if (reg <= 12) {
+ p = &regs->r0;
+ return p[-reg];
+ }
+
+ if (cregs && (reg <= 25)) {
+ p = &cregs->r13;
+ return p[13-reg];
+ }
+
+ if (reg == 26)
+ return regs->r26;
+ if (reg == 27)
+ return regs->fp;
+ if (reg == 28)
+ return regs->sp;
+ if (reg == 31)
+ return regs->blink;
+
+ return 0;
+}
+
+void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ switch (reg) {
+ case 0 ... 12:
+ p = &regs->r0;
+ p[-reg] = val;
+ break;
+ case 13 ... 25:
+ if (cregs) {
+ p = &cregs->r13;
+ p[13-reg] = val;
+ }
+ break;
+ case 26:
+ regs->r26 = val;
+ break;
+ case 27:
+ regs->fp = val;
+ break;
+ case 28:
+ regs->sp = val;
+ break;
+ case 31:
+ regs->blink = val;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Disassembles the insn at @pc and sets @next_pc to next PC (which could be
+ * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
+ *
+ * If @pc is a branch
+ * -@tgt_if_br is set to branch target.
+ * -If branch has delay slot, @next_pc updated with actual next PC.
+ */
+int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
+ struct callee_regs *cregs,
+ unsigned long *next_pc, unsigned long *tgt_if_br)
+{
+ struct disasm_state instr;
+
+ memset(&instr, 0, sizeof(struct disasm_state));
+ disasm_instr(pc, &instr, 0, regs, cregs);
+
+ *next_pc = pc + instr.instr_len;
+
+ /* Instruction with possible two targets branch, jump and loop */
+ if (instr.is_branch)
+ *tgt_if_br = instr.target;
+
+ /* For the instructions with delay slots, the fall through is the
+ * instruction following the instruction in delay slot.
+ */
+ if (instr.delay_slot) {
+ struct disasm_state instr_d;
+
+ disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
+
+ *next_pc += instr_d.instr_len;
+ }
+
+ /* Zero Overhead Loop - end of the loop */
+ if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
+ && (regs->lp_count > 1)) {
+ *next_pc = regs->lp_start;
+ }
+
+ return instr.is_branch;
+}
+
+#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
new file mode 100644
index 000000000000..ef6800ba2f03
--- /dev/null
+++ b/arch/arc/kernel/entry.S
@@ -0,0 +1,839 @@
+/*
+ * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Userspace unaligned access emulation
+ *
+ * vineetg: Feb 2011 (ptrace low level code fixes)
+ * -traced syscall return code (r0) was not saved into pt_regs for restoring
+ * into user reg-file when traded task rets to user space.
+ * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
+ * were not invoking post-syscall trace hook (jumping directly into
+ * ret_from_system_call)
+ *
+ * vineetg: Nov 2010:
+ * -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
+ * -To maintain the slot size of 8 bytes/vector, added nop, which is
+ * not executed at runtime.
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
+ * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
+ * need ptregs anymore
+ *
+ * Vineetg: Oct 2009
+ * -In a rare scenario, Process gets a Priv-V exception and gets scheduled
+ * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
+ * active (AE bit enabled). This causes a double fault for a subseq valid
+ * exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
+ * Instr Error could also cause similar scenario, so same there as well.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
+ * setting it and not clearing it clears ZOL context
+ *
+ * Vineetg: May 16th, 2008
+ * - r25 now contains the Current Task when in kernel
+ *
+ * Vineetg: Dec 22, 2007
+ * Minor Surgery of Low Level ISR to make it SMP safe
+ * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
+ * - _current_task is made an array of NR_CPUS
+ * - Access of _current_task wrapped inside a macro so that if hardware
+ * team agrees for a dedicated reg, no other code is touched
+ *
+ * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
+ */
+
+/*------------------------------------------------------------------
+ * Function ABI
+ *------------------------------------------------------------------
+ *
+ * Arguments r0 - r7
+ * Caller Saved Registers r0 - r12
+ * Callee Saved Registers r13- r25
+ * Global Pointer (gp) r26
+ * Frame Pointer (fp) r27
+ * Stack Pointer (sp) r28
+ * Interrupt link register (ilink1) r29
+ * Interrupt link register (ilink2) r30
+ * Branch link register (blink) r31
+ *------------------------------------------------------------------
+ */
+
+ .cpu A7
+
+;############################ Vector Table #################################
+
+.macro VECTOR lbl
+#if 1 /* Just in case, build breaks */
+ j \lbl
+#else
+ b \lbl
+ nop
+#endif
+.endm
+
+ .section .vector, "ax",@progbits
+ .align 4
+
+/* Each entry in the vector table must occupy 2 words. Since it is a jump
+ * across sections (.vector to .text) we are gauranteed that 'j somewhere'
+ * will use the 'j limm' form of the intrsuction as long as somewhere is in
+ * a section other than .vector.
+ */
+
+; ********* Critical System Events **********************
+VECTOR res_service ; 0x0, Restart Vector (0x0)
+VECTOR mem_service ; 0x8, Mem exception (0x1)
+VECTOR instr_service ; 0x10, Instrn Error (0x2)
+
+; ******************** Device ISRs **********************
+#ifdef CONFIG_ARC_IRQ3_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+VECTOR handle_interrupt_level1
+
+#ifdef CONFIG_ARC_IRQ5_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+#ifdef CONFIG_ARC_IRQ6_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+.rept 25
+VECTOR handle_interrupt_level1 ; Other devices
+.endr
+
+/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
+
+; ******************** Exceptions **********************
+VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
+VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21)
+VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
+VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
+ ; or Misaligned Access
+VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
+VECTOR EV_Trap ; 0x128, Trap exception (0x25)
+VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
+
+.rept 24
+VECTOR reserved ; Reserved Exceptions
+.endr
+
+#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+
+;##################### Scratch Mem for IRQ stack switching #############
+
+ARCFP_DATA int1_saved_reg
+ .align 32
+ .type int1_saved_reg, @object
+ .size int1_saved_reg, 4
+int1_saved_reg:
+ .zero 4
+
+/* Each Interrupt level needs it's own scratch */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ARCFP_DATA int2_saved_reg
+ .type int2_saved_reg, @object
+ .size int2_saved_reg, 4
+int2_saved_reg:
+ .zero 4
+
+#endif
+
+; ---------------------------------------------
+ .section .text, "ax",@progbits
+
+res_service: ; processor restart
+ flag 0x1 ; not implemented
+ nop
+ nop
+
+reserved: ; processor restart
+ rtie ; jump to processor initializations
+
+;##################### Interrupt Handling ##############################
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+; ---------------------------------------------
+; Level 2 ISR: Can interrupt a Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level2
+
+ ; TODO-vineetg for SMP this wont work
+ ; free up r9 as scratchpad
+ st r9, [@int2_saved_reg]
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l2]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT2
+
+ ;------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, disable preemption
+ ;------------------------------------------------------
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; bump thread_info->preempt_count (Disable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+ add r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+1:
+ ;------------------------------------------------------
+ ; setup params for Linux common ISR and invoke it
+ ;------------------------------------------------------
+ lr r0, [icause2]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x2
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+
+ARC_EXIT handle_interrupt_level2
+
+#endif
+
+; ---------------------------------------------
+; Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level1
+
+ /* free up r9 as scratchpad */
+#ifdef CONFIG_SMP
+ sr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ st r9, [@int1_saved_reg]
+#endif
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l1]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT1
+
+ lr r0, [icause1]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x1
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+ARC_EXIT handle_interrupt_level1
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Instruction Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY instr_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_insterror_or_kprobe
+ b ret_from_exception
+ARC_EXIT instr_service
+
+; ---------------------------------------------
+; Memory Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY mem_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_memory_error
+ b ret_from_exception
+ARC_EXIT mem_service
+
+; ---------------------------------------------
+; Machine Check Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_MachineCheck
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ brne r0, 0x200100, 1f
+ bl do_tlb_overlap_fault
+ b ret_from_exception
+
+1:
+ ; DEAD END: can't do much, display Regs and HALT
+ SAVE_CALLEE_SAVED_USER
+
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ j do_machine_check_fault
+
+ARC_EXIT EV_MachineCheck
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_TLBProtV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when Exception occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ ;---------(3) Save some more regs-----------------
+ ; vineetg: Mar 6th: Random Seg Fault issue #1
+ ; ecr and efa were not saved in case an Intr sneaks in
+ ; after fake rtie
+ ;
+ lr r3, [ecr]
+ lr r4, [efa]
+
+ ; --------(4) Return from CPU Exception Mode ---------
+ ; Fake a rtie, but rtie to next label
+ ; That way, subsequently, do_page_fault ( ) executes in pure kernel
+ ; mode with further Exceptions enabled
+
+ FAKE_RET_FROM_EXCPN r9
+
+ ;------ (5) Type of Protection Violation? ----------
+ ;
+ ; ProtV Hardware Exception is triggered for Access Faults of 2 types
+ ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW
+ ; -Unaligned Access (READ/WRITE on odd boundary)
+ ;
+ cmp r3, 0x230400 ; Misaligned data access ?
+ beq 4f
+
+ ;========= (6a) Access Violation Processing ========
+ cmp r3, 0x230100
+ mov r1, 0x0 ; if LD exception ? write = 0
+ mov.ne r1, 0x1 ; else write = 1
+
+ mov r2, r4 ; faulting address
+ mov r0, sp ; pt_regs
+ bl do_page_fault
+ b ret_from_exception
+
+ ;========== (6b) Non aligned access ============
+4:
+ mov r0, r3 ; cause code
+ mov r1, r4 ; faulting address
+ mov r2, sp ; pt_regs
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ SAVE_CALLEE_SAVED_USER
+ mov r3, sp ; callee_regs
+#endif
+
+ bl do_misaligned_access
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ DISCARD_CALLEE_SAVED_USER
+#endif
+
+ b ret_from_exception
+
+ARC_EXIT EV_TLBProtV
+
+; ---------------------------------------------
+; Privilege Violation Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_PrivilegeV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_privilege_fault
+ b ret_from_exception
+ARC_EXIT EV_PrivilegeV
+
+; ---------------------------------------------
+; Extension Instruction Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_Extension
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_extension_fault
+ b ret_from_exception
+ARC_EXIT EV_Extension
+
+;######################### System Call Tracing #########################
+
+tracesys:
+ ; save EFA in case tracer wants the PC of traced task
+ ; using ERET won't work since next-PC has already committed
+ lr r12, [efa]
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
+ st r12, [r11, THREAD_FAULT_ADDR]
+
+ ; PRE Sys Call Ptrace hook
+ mov r0, sp ; pt_regs needed
+ bl @syscall_trace_entry
+
+ ; Tracing code now returns the syscall num (orig or modif)
+ mov r8, r0
+
+ ; Do the Sys Call as we normally would.
+ ; Validate the Sys Call number
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi tracesys_exit
+
+ ; Restore the sys-call args. Mere invocation of the hook abv could have
+ ; clobbered them (since they are in scratch regs). The tracer could also
+ ; have deliberately changed the syscall args: r0-r7
+ ld r0, [sp, PT_r0]
+ ld r1, [sp, PT_r1]
+ ld r2, [sp, PT_r2]
+ ld r3, [sp, PT_r3]
+ ld r4, [sp, PT_r4]
+ ld r5, [sp, PT_r5]
+ ld r6, [sp, PT_r6]
+ ld r7, [sp, PT_r7]
+ ld.as r9, [sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+tracesys_exit:
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ;POST Sys Call Ptrace Hook
+ bl @syscall_trace_exit
+ b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
+ ; we'd done before calling post hook above
+
+;################### Break Point TRAP ##########################
+
+ ; ======= (5b) Trap is due to Break-Point =========
+
+trap_with_param:
+
+ ; stop_pc info by gdb needs this info
+ stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+
+ mov r0, r12
+ lr r1, [efa]
+ mov r2, sp
+
+ ; Now that we have read EFA, its safe to do "fake" rtie
+ ; and get out of CPU exception mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; Save callee regs in case gdb wants to have a look
+ ; SP will grow up by size of CALLEE Reg-File
+ ; NOTE: clobbers r12
+ SAVE_CALLEE_SAVED_USER
+
+ ; save location of saved Callee Regs @ thread_struct->pc
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ ; Call the trap handler
+ bl do_non_swi_trap
+
+ ; unwind stack to discard Callee saved Regs
+ DISCARD_CALLEE_SAVED_USER
+
+ b ret_from_exception
+
+;##################### Trap Handling ##############################
+;
+; EV_Trap caused by TRAP_S and TRAP0 instructions.
+;------------------------------------------------------------------
+; (1) System Calls
+; :parameters in r0-r7.
+; :r8 has the system call number
+; (2) Break Points
+;------------------------------------------------------------------
+
+ARC_ENTRY EV_Trap
+
+ ; Need at least 1 reg to code the early exception prolog
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_TRAP
+
+ ;------- (4) What caused the Trap --------------
+ lr r12, [ecr]
+ and.f 0, r12, ECR_PARAM_MASK
+ bnz trap_with_param
+
+ ; ======= (5a) Trap is due to System Call ========
+
+ ; Before doing anything, return from CPU Exception Mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; If syscall tracing ongoing, invoke pre-pos-hooks
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys ; this never comes back
+
+ ;============ This is normal System Call case ==========
+ ; Sys-call num shd not exceed the total system calls avail
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi ret_from_system_call
+
+ ; Offset into the syscall_table and call handler
+ ld.as r9,[sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+ ; fall through to ret_from_system_call
+ARC_EXIT EV_Trap
+
+ARC_ENTRY ret_from_system_call
+
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ; fall through yet again to ret_from_exception
+
+;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
+;
+; If ret to user mode do we need to handle signals, schedule() et al.
+
+ARC_ENTRY ret_from_exception
+
+ ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
+ ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
+
+#ifdef CONFIG_PREEMPT
+ bbit0 r8, STATUS_U_BIT, resume_kernel_mode
+#else
+ bbit0 r8, STATUS_U_BIT, restore_regs
+#endif
+
+ ; Before returning to User mode check-for-and-complete any pending work
+ ; such as rescheduling/signal-delivery etc.
+resume_user_mode_begin:
+
+ ; Disable IRQs to ensures that chk for pending work itself is atomic
+ ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
+ ; interim IRQ).
+ IRQ_DISABLE r10
+
+ ; Fast Path return to user mode if no pending work
+ GET_CURR_THR_INFO_FLAGS r9
+ and.f 0, r9, _TIF_WORK_MASK
+ bz restore_regs
+
+ ; --- (Slow Path #1) task preemption ---
+ bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
+ mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
+ b @schedule ; BTST+Bnz causes relo error in link
+
+.Lchk_pend_signals:
+ IRQ_ENABLE r10
+
+ ; --- (Slow Path #2) pending signal ---
+ mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
+
+ bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
+
+ ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
+ ; in pt_reg since the "C" ABI (kernel code) will automatically
+ ; save/restore callee-saved regs.
+ ;
+ ; However, here we need to explicitly save callee regs because
+ ; (i) If this signal causes coredump - full regfile needed
+ ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
+ ; tracer might call PEEKUSR(CALLEE reg)
+ ;
+ ; NOTE: SP will grow up by size of CALLEE Reg-File
+ SAVE_CALLEE_SAVED_USER ; clobbers r12
+
+ ; save location of saved Callee Regs @ thread_struct->callee
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ bl @do_signal
+
+ ; Ideally we want to discard the Callee reg above, however if this was
+ ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
+ RESTORE_CALLEE_SAVED_USER
+
+ b resume_user_mode_begin ; loop back to start of U mode ret
+
+ ; --- (Slow Path #3) notify_resume ---
+.Lchk_notify_resume:
+ btst r9, TIF_NOTIFY_RESUME
+ blnz @do_notify_resume
+ b resume_user_mode_begin ; unconditionally back to U mode ret chks
+ ; for single exit point from this block
+
+#ifdef CONFIG_PREEMPT
+
+resume_kernel_mode:
+
+ ; Can't preempt if preemption disabled
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
+ brne r8, 0, restore_regs
+
+ ; check if this task's NEED_RESCHED flag set
+ ld r9, [r10, THREAD_INFO_FLAGS]
+ bbit0 r9, TIF_NEED_RESCHED, restore_regs
+
+ IRQ_DISABLE r9
+
+ ; Invoke PREEMPTION
+ bl preempt_schedule_irq
+
+ ; preempt_schedule_irq() always returns with IRQ disabled
+#endif
+
+ ; fall through
+
+;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+
+restore_regs :
+
+ ; Disable Interrupts while restoring reg-file back
+ ; XXX can this be optimised out
+ IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Restore User R25
+ ; Earlier this used to be only for returning to user mode
+ ; However with 2 levels of IRQ this can also happen even if
+ ; in kernel mode
+ ld r9, [sp, PT_sp]
+ brhs r9, VMALLOC_START, 8f
+ RESTORE_USER_R25
+8:
+#endif
+
+ ; Restore REG File. In case multiple Events outstanding,
+ ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
+ ; Note that we use realtime STATUS32 (not pt_regs->status32) to
+ ; decide that.
+
+ ; if Returning from Exception
+ bbit0 r10, STATUS_AE_BIT, not_exception
+ RESTORE_ALL_SYS
+ rtie
+
+ ; Not Exception so maybe Interrupts (Level 1 or 2)
+
+not_exception:
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
+
+ ;------------------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
+ ; so that sched doesnt move to new task, causing L1 to be delayed
+ ; undeterministically. Now that we've achieved that, lets reset
+ ; things to what they were, before returning from L2 context
+ ;----------------------------------------------------------------
+
+ ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
+ brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; decrement thread_info->preempt_count (re-enable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+ ; paranoid check, given A1 was active when A2 happened, preempt count
+ ; must not be 0 beccause we would have incremented it.
+ ; If this does happen we simply HALT as it means a BUG !!!
+ cmp r9, 0
+ bnz 2f
+ flag 1
+
+2:
+ sub r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+149:
+ ;return from level 2
+ RESTORE_ALL_INT2
+debug_marker_l2:
+ rtie
+
+not_level2_interrupt:
+
+#endif
+
+ bbit0 r10, STATUS_A1_BIT, not_level1_interrupt
+
+ ;return from level 1
+
+ RESTORE_ALL_INT1
+debug_marker_l1:
+ rtie
+
+not_level1_interrupt:
+
+ ;this case is for syscalls or Exceptions (with fake rtie)
+
+ RESTORE_ALL_SYS
+debug_marker_syscall:
+ rtie
+
+ARC_EXIT ret_from_exception
+
+ARC_ENTRY ret_from_fork
+ ; when the forked child comes here from the __switch_to function
+ ; r0 has the last task pointer.
+ ; put last task in scheduler queue
+ bl @schedule_tail
+
+ ; If kernel thread, jump to it's entry-point
+ ld r9, [sp, PT_status32]
+ brne r9, 0, 1f
+
+ jl.d [r14]
+ mov r0, r13 ; arg to payload
+
+1:
+ ; special case of kernel_thread entry point returning back due to
+ ; kernel_execve() - pretend return from syscall to ret to userland
+ b ret_from_exception
+ARC_EXIT ret_from_fork
+
+;################### Special Sys Call Wrappers ##########################
+
+; TBD: call do_fork directly from here
+ARC_ENTRY sys_fork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_fork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_fork_wrapper
+
+ARC_ENTRY sys_vfork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_vfork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_vfork_wrapper
+
+ARC_ENTRY sys_clone_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_clone
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_clone_wrapper
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+; Workaround for bug 94179 (STAR ):
+; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
+; section (.debug_frame) as loadable. So we force it here.
+; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
+; would not work after a clean build due to kernel build system dependencies.
+.section .debug_frame, "wa",@progbits
+#endif
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644
index 000000000000..f352e512cbd1
--- /dev/null
+++ b/arch/arc/kernel/fpu.c
@@ -0,0 +1,55 @@
+/*
+ * fpu.c - save/restore of Floating Point Unit Registers on task switch
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+
+/*
+ * To save/restore FPU regs, simplest scheme would use LR/SR insns.
+ * However since SR serializes the pipeline, an alternate "hack" can be used
+ * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
+ *
+ * Store to 64bit dpfp1 reg from a pair of core regs:
+ * dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
+ *
+ * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
+ * mov_s r3, 0
+ * daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
+ * dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
+ * dexcl1 0, r1, r0 ; restore dpfp1 to orig value
+ *
+ * However we can tweak the read, so that read-out of outgoing task's FPU regs
+ * and write of incoming task's regs happen in one shot. So all the work is
+ * done before context switch
+ */
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+ unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
+ unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
+
+ const unsigned int zero = 0;
+
+ __asm__ __volatile__(
+ "daddh11 %0, %2, %2\n"
+ "dexcl1 %1, %3, %4\n"
+ : "=&r" (*(saveto + 1)), /* early clobber must here */
+ "=&r" (*(saveto))
+ : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
+ );
+
+ __asm__ __volatile__(
+ "daddh22 %0, %2, %2\n"
+ "dexcl2 %1, %3, %4\n"
+ : "=&r"(*(saveto + 3)), /* early clobber must here */
+ "=&r"(*(saveto + 2))
+ : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
+ );
+}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
new file mode 100644
index 000000000000..006dec3fc353
--- /dev/null
+++ b/arch/arc/kernel/head.S
@@ -0,0 +1,111 @@
+/*
+ * ARC CPU startup Code
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Dec 2007
+ * -Check if we are running on Simulator or on real hardware
+ * to skip certain things during boot on simulator
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+#include <linux/linkage.h>
+#include <asm/arcregs.h>
+
+ .cpu A7
+
+ .section .init.text, "ax",@progbits
+ .type stext, @function
+ .globl stext
+stext:
+ ;-------------------------------------------------------------------
+ ; Don't clobber r0-r4 yet. It might have bootloader provided info
+ ;-------------------------------------------------------------------
+
+#ifdef CONFIG_SMP
+ ; Only Boot (Master) proceeds. Others wait in platform dependent way
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (cpu-id) ^^^ => Zero for UP ARC700
+ ; => #Core-ID if SMP (Master 0)
+ GET_CPU_ID r5
+ cmp r5, 0
+ jnz arc_platform_smp_wait_to_boot
+#endif
+ ; Clear BSS before updating any globals
+ ; XXX: use ZOL here
+ mov r5, __bss_start
+ mov r6, __bss_stop
+1:
+ st.ab 0, [r5,4]
+ brlt r5, r6, 1b
+
+#ifdef CONFIG_CMDLINE_UBOOT
+ ; support for bootloader provided cmdline
+ ; If cmdline passed by u-boot, then
+ ; r0 = 1 (because ATAGS parsing, now retired, used to use 0)
+ ; r1 = magic number (board identity)
+ ; r2 = addr of cmdline string (somewhere in memory/flash)
+
+ brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline
+ breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL
+
+ mov r5, @command_line
+1:
+ ldb.ab r6, [r2, 1]
+ breq r6, 0, .Lother_bootup_chores
+ b.d 1b
+ stb.ab r6, [r5, 1]
+#endif
+
+.Lother_bootup_chores:
+
+ ; Identify if running on ISS vs Silicon
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (chip-id) ^^^^^ ==> 0xffff for ISS
+ lr r0, [identity]
+ lsr r3, r0, 16
+ cmp r3, 0xffff
+ mov.z r4, 0
+ mov.nz r4, 1
+ st r4, [@running_on_hw]
+
+ ; setup "current" tsk and optionally cache it in dedicated r25
+ mov r9, @init_task
+ SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; tsk->thread_info is really a PAGE, whose bottom hoists stack
+ GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
+
+ j start_kernel ; "C" entry point
+
+#ifdef CONFIG_SMP
+;----------------------------------------------------------------
+; First lines of code run by secondary before jumping to 'C'
+;----------------------------------------------------------------
+ .section .init.text, "ax",@progbits
+ .type first_lines_of_secondary, @function
+ .globl first_lines_of_secondary
+
+first_lines_of_secondary:
+
+ ; setup per-cpu idle task as "current" on this CPU
+ ld r0, [@secondary_idle_tsk]
+ SET_CURR_TASK_ON_CPU r0, r1
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; set it's stack base to tsk->thread_info bottom
+ GET_TSK_STACK_BASE r0, sp
+
+ j start_kernel_secondary
+
+#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
new file mode 100644
index 000000000000..551c10dff481
--- /dev/null
+++ b/arch/arc/kernel/irq.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <asm/sections.h>
+#include <asm/irq.h>
+#include <asm/mach_desc.h>
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ * -Platform Independent (must for any ARC700)
+ * -Needed for each CPU (hence not foldable into init_IRQ)
+ *
+ * what it does ?
+ * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
+ * -Disable all IRQs (on CPU side)
+ * -Optionally, setup the High priority Interrupts as Level 2 IRQs
+ */
+void __init arc_init_IRQ(void)
+{
+ int level_mask = 0;
+
+ write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
+
+ /* Disable all IRQs: enable them as devices request */
+ write_aux_reg(AUX_IENABLE, 0);
+
+ /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
+#ifdef CONFIG_ARC_IRQ3_LV2
+ level_mask |= (1 << 3);
+#endif
+#ifdef CONFIG_ARC_IRQ5_LV2
+ level_mask |= (1 << 5);
+#endif
+#ifdef CONFIG_ARC_IRQ6_LV2
+ level_mask |= (1 << 6);
+#endif
+
+ if (level_mask) {
+ pr_info("Level-2 interrupts bitset %x\n", level_mask);
+ write_aux_reg(AUX_IRQ_LEV, level_mask);
+ }
+}
+
+/*
+ * ARC700 core includes a simple on-chip intc supporting
+ * -per IRQ enable/disable
+ * -2 levels of interrupts (high/low)
+ * -all interrupts being level triggered
+ *
+ * To reduce platform code, we assume all IRQs directly hooked-up into intc.
+ * Platforms with external intc, hence cascaded IRQs, are free to over-ride
+ * below, per IRQ.
+ */
+
+static void arc_mask_irq(struct irq_data *data)
+{
+ arch_mask_irq(data->irq);
+}
+
+static void arc_unmask_irq(struct irq_data *data)
+{
+ arch_unmask_irq(data->irq);
+}
+
+static struct irq_chip onchip_intc = {
+ .name = "ARC In-core Intc",
+ .irq_mask = arc_mask_irq,
+ .irq_unmask = arc_unmask_irq,
+};
+
+static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (irq == TIMER0_IRQ)
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
+ else
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops arc_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = arc_intc_domain_map,
+};
+
+static struct irq_domain *root_domain;
+
+void __init init_onchip_IRQ(void)
+{
+ struct device_node *intc = NULL;
+
+ intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
+ if(!intc)
+ panic("DeviceTree Missing incore intc\n");
+
+ root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+ &arc_intc_domain_ops, NULL);
+
+ if (!root_domain)
+ panic("root irq domain not avail\n");
+
+ /* with this we don't need to export root_domain */
+ irq_set_default_host(root_domain);
+}
+
+/*
+ * Late Interrupt system init called from start_kernel for Boot CPU only
+ *
+ * Since slab must already be initialized, platforms can start doing any
+ * needed request_irq( )s
+ */
+void __init init_IRQ(void)
+{
+ init_onchip_IRQ();
+
+ /* Any external intc can be setup here */
+ if (machine_desc->init_irq)
+ machine_desc->init_irq();
+
+#ifdef CONFIG_SMP
+ /* Master CPU can initialize it's side of IPI */
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+#endif
+}
+
+/*
+ * "C" Entry point for any ARC ISR, called from low level vector handler
+ * @irq is the vector number read from ICAUSE reg of on-chip intc
+ */
+void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ generic_handle_irq(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+int __init get_hw_config_num_irq(void)
+{
+ uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
+
+ switch (val & 0x03) {
+ case 0:
+ return 16;
+ case 1:
+ return 32;
+ case 2:
+ return 8;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * arch_local_irq_enable - Enable interrupts.
+ *
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ * which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ * e.g. suppose TIMER is high priority (Level 2) IRQ
+ * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ * Here local_irq_enable( ) shd not re-enable lower priority interrupts
+ * -If called from soft-ISR, it must re-enable all interrupts
+ * soft ISR are low prioity jobs which can be very slow, thus all IRQs
+ * must be enabled while they run.
+ * Now hardware context wise we may still be in L2 ISR (not done rtie)
+ * still we must re-enable both L1 and L2 IRQs
+ * Another twist is prev scenario with flow being
+ * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
+ * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
+ * over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
+
+void arch_local_irq_enable(void)
+{
+
+ unsigned long flags;
+ flags = arch_local_save_flags();
+
+ /* Allow both L1 and L2 at the onset */
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Called from hard ISR (between irq_enter and irq_exit) */
+ if (in_irq()) {
+
+ /* If in L2 ISR, don't re-enable any further IRQs as this can
+ * cause IRQ priorities to get upside down. e.g. it could allow
+ * L1 be taken while in L2 hard ISR which is wrong not only in
+ * theory, it can also cause the dreaded L1-L2-L1 scenario
+ */
+ if (flags & STATUS_A2_MASK)
+ flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
+ else if (flags & STATUS_A1_MASK)
+ flags &= ~(STATUS_E1_MASK);
+ }
+
+ /* called from soft IRQ, ideally we want to re-enable all levels */
+
+ else if (in_softirq()) {
+
+ /* However if this is case of L1 interrupted by L2,
+ * re-enabling both may cause whaco L1-L2-L1 scenario
+ * because ARC700 allows level 1 to interrupt an active L2 ISR
+ * Thus we disable both
+ * However some code, executing in soft ISR wants some IRQs
+ * to be enabled so we re-enable L2 only
+ *
+ * How do we determine L1 intr by L2
+ * -A2 is set (means in L2 ISR)
+ * -E1 is set in this ISR's pt_regs->status32 which is
+ * saved copy of status32_l2 when l2 ISR happened
+ */
+ struct pt_regs *pt = get_irq_regs();
+ if ((flags & STATUS_A2_MASK) && pt &&
+ (pt->status32 & STATUS_A1_MASK)) {
+ /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
+ flags &= ~(STATUS_E1_MASK);
+ }
+ }
+
+ arch_local_irq_restore(flags);
+}
+
+#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
+
+/*
+ * Simpler version for only 1 level of interrupt
+ * Here we only Worry about Level 1 Bits
+ */
+void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+
+ /*
+ * ARC IDE Drivers tries to re-enable interrupts from hard-isr
+ * context which is simply wrong
+ */
+ if (in_irq()) {
+ WARN_ONCE(1, "IRQ enabled from hard-isr");
+ return;
+ }
+
+ flags = arch_local_save_flags();
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+ arch_local_irq_restore(flags);
+}
+#endif
+EXPORT_SYMBOL(arch_local_irq_enable);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
new file mode 100644
index 000000000000..2888ba5be47e
--- /dev/null
+++ b/arch/arc/kernel/kgdb.c
@@ -0,0 +1,205 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kgdb.h>
+#include <asm/disasm.h>
+#include <asm/cacheflush.h>
+
+static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
+
+ for (regno = 27; regno < GDB_MAX_REGS; regno++)
+ gdb_regs[regno] = 0;
+
+ gdb_regs[_FP] = kernel_regs->fp;
+ gdb_regs[__SP] = kernel_regs->sp;
+ gdb_regs[_BLINK] = kernel_regs->blink;
+ gdb_regs[_RET] = kernel_regs->ret;
+ gdb_regs[_STATUS32] = kernel_regs->status32;
+ gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
+ gdb_regs[_LP_END] = kernel_regs->lp_end;
+ gdb_regs[_LP_START] = kernel_regs->lp_start;
+ gdb_regs[_BTA] = kernel_regs->bta;
+ gdb_regs[_STOP_PC] = kernel_regs->ret;
+}
+
+static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
+
+ kernel_regs->fp = gdb_regs[_FP];
+ kernel_regs->sp = gdb_regs[__SP];
+ kernel_regs->blink = gdb_regs[_BLINK];
+ kernel_regs->ret = gdb_regs[_RET];
+ kernel_regs->status32 = gdb_regs[_STATUS32];
+ kernel_regs->lp_count = gdb_regs[_LP_COUNT];
+ kernel_regs->lp_end = gdb_regs[_LP_END];
+ kernel_regs->lp_start = gdb_regs[_LP_START];
+ kernel_regs->bta = gdb_regs[_BTA];
+}
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+ struct task_struct *task)
+{
+ if (task)
+ to_gdb_regs(gdb_regs, task_pt_regs(task),
+ (struct callee_regs *) task->thread.callee_reg);
+}
+
+struct single_step_data_t {
+ uint16_t opcode[2];
+ unsigned long address[2];
+ int is_branch;
+ int armed;
+} single_step_data;
+
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (single_step_data.armed) {
+ int i;
+
+ for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
+ memcpy((void *) single_step_data.address[i],
+ &single_step_data.opcode[i],
+ BREAK_INSTR_SIZE);
+
+ flush_icache_range(single_step_data.address[i],
+ single_step_data.address[i] +
+ BREAK_INSTR_SIZE);
+ }
+ single_step_data.armed = 0;
+ }
+}
+
+static void place_trap(unsigned long address, void *save)
+{
+ memcpy(save, (void *) address, BREAK_INSTR_SIZE);
+ memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ flush_icache_range(address, address + BREAK_INSTR_SIZE);
+}
+
+static void do_single_step(struct pt_regs *regs)
+{
+ single_step_data.is_branch = disasm_next_pc((unsigned long)
+ regs->ret, regs, (struct callee_regs *)
+ current->thread.callee_reg,
+ &single_step_data.address[0],
+ &single_step_data.address[1]);
+
+ place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
+
+ if (single_step_data.is_branch) {
+ place_trap(single_step_data.address[1],
+ &single_step_data.opcode[1]);
+ }
+
+ single_step_data.armed++;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ char *remcomInBuffer, char *remcomOutBuffer,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+ char *ptr;
+
+ undo_single_step(regs);
+
+ switch (remcomInBuffer[0]) {
+ case 's':
+ case 'c':
+ ptr = &remcomInBuffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->ret = addr;
+
+ case 'D':
+ case 'k':
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+ if (remcomInBuffer[0] == 's') {
+ do_single_step(regs);
+ atomic_set(&kgdb_cpu_doing_single_step,
+ smp_processor_id());
+ }
+
+ return 0;
+ }
+ return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int kgdb_arch_init(void)
+{
+ single_step_data.armed = 0;
+ return 0;
+}
+
+void kgdb_trap(struct pt_regs *regs, int param)
+{
+ /* trap_s 3 is used for breakpoints that overwrite existing
+ * instructions, while trap_s 4 is used for compiled breakpoints.
+ *
+ * with trap_s 3 breakpoints the original instruction needs to be
+ * restored and continuation needs to start at the location of the
+ * breakpoint.
+ *
+ * with trap_s 4 (compiled) breakpoints, continuation needs to
+ * start after the breakpoint.
+ */
+ if (param == 3)
+ instruction_pointer(regs) -= BREAK_INSTR_SIZE;
+
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ instruction_pointer(regs) = ip;
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+ /* breakpoint instruction: TRAP_S 0x3 */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ .gdb_bpt_instr = {0x78, 0x7e},
+#else
+ .gdb_bpt_instr = {0x7e, 0x78},
+#endif
+};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
new file mode 100644
index 000000000000..3bfeacb674de
--- /dev/null
+++ b/arch/arc/kernel/kprobes.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/current.h>
+#include <asm/disasm.h>
+
+#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
+ (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ /* Attempt to probe at unaligned address */
+ if ((unsigned long)p->addr & 0x01)
+ return -EINVAL;
+
+ /* Address should not be in exception handling code */
+
+ p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
+ p->opcode = *p->addr;
+
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = UNIMP_S_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ arch_disarm_kprobe(p);
+
+ /* Can we remove the kprobe in the middle of kprobe handling? */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static inline void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __get_cpu_var(current_kprobe) = p;
+}
+
+static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
+ struct pt_regs *regs)
+{
+ /* Remove the trap instructions inserted for single step and
+ * restore the original instructions
+ */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+
+ return;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long next_pc;
+ unsigned long tgt_if_br = 0;
+ int is_branch;
+ unsigned long bta;
+
+ /* Copy the opcode back to the kprobe location and execute the
+ * instruction. Because of this we will not be able to get into the
+ * same kprobe until this kprobe is done
+ */
+ *(p->addr) = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+
+ /* Now we insert the trap at the next location after this instruction to
+ * single step. If it is a branch we insert the trap at possible branch
+ * targets
+ */
+
+ bta = regs->bta;
+
+ if (regs->status32 & 0x40) {
+ /* We are in a delay slot with the branch taken */
+
+ next_pc = bta & ~0x01;
+
+ if (!p->ainsn.is_short) {
+ if (bta & 0x01)
+ regs->blink += 2;
+ else {
+ /* Branch not taken */
+ next_pc += 2;
+
+ /* next pc is taken from bta after executing the
+ * delay slot instruction
+ */
+ regs->bta += 2;
+ }
+ }
+
+ is_branch = 0;
+ } else
+ is_branch =
+ disasm_next_pc((unsigned long)p->addr, regs,
+ (struct callee_regs *) current->thread.callee_reg,
+ &next_pc, &tgt_if_br);
+
+ p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
+ p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
+ *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ if (is_branch) {
+ p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
+ p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
+ *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+ }
+}
+
+int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ preempt_disable();
+
+ kcb = get_kprobe_ctlblk();
+ p = get_kprobe((unsigned long *)addr);
+
+ if (p) {
+ /*
+ * We have reentered the kprobe_handler, since another kprobe
+ * was hit while within the handler, we save the original
+ * kprobes and single step on the instruction of the new probe
+ * without calling any user handlers to avoid recursive
+ * kprobes.
+ */
+ if (kprobe_running()) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /* If we have no pre-handler or it returned 0, we continue with
+ * normal processing. If we have a pre-handler and it returned
+ * non-zero - which is expected from setjmp_pre_handler for
+ * jprobe, we return without single stepping and leave that to
+ * the break-handler which is invoked by a kprobe from
+ * jprobe_return
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ return 1;
+ } else if (kprobe_running()) {
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+ }
+ }
+
+ /* no_kprobe: */
+ preempt_enable_no_resched();
+ return 0;
+}
+
+static int __kprobes arc_post_kprobe_handler(unsigned long addr,
+ struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ resume_execution(cur, addr, regs);
+
+ /* Rearm the kprobe */
+ arch_arm_kprobe(cur);
+
+ /*
+ * When we return from trap instruction we go to the next instruction
+ * We restored the actual instruction in resume_exectuiont and we to
+ * return to the same address and execute it
+ */
+ regs->ret = addr;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ reset_current_kprobe();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+/*
+ * Fault can be for the instruction being single stepped or for the
+ * pre/post handlers in the module.
+ * This is applicable for applications like user probes, where we have the
+ * probe in user space and the handlers in the kernel
+ */
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single stepped
+ * caused the fault. We reset the current kprobe and allow the
+ * exception handler as if it is regular exception. In our
+ * case it doesn't matter because the system will be halted
+ */
+ resume_execution(cur, (unsigned long)cur->addr, regs);
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ preempt_enable_no_resched();
+ break;
+
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We are here because the instructions in the pre/post handler
+ * caused the fault.
+ */
+
+ /* We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned zero,
+ * try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ unsigned long addr = args->err;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_IERR:
+ if (arc_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ case DIE_TRAP:
+ if (arc_post_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr = regs->sp;
+
+ kcb->jprobe_saved_regs = *regs;
+ memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
+ regs->ret = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ __asm__ __volatile__("unimp_s");
+ return;
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr;
+
+ *regs = kcb->jprobe_saved_regs;
+ sp_addr = regs->sp;
+ memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static void __used kretprobe_trampoline_holder(void)
+{
+ __asm__ __volatile__(".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n" "nop\n");
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+
+ ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&kretprobe_trampoline;
+}
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address) {
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ regs->ret = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ /* By returning a non zero value, we are telling the kprobe handler
+ * that we don't want the post_handler to run
+ */
+ return 1;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ /* Registering the trampoline code for the kret probe */
+ return register_kprobe(&trampoline_p);
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
+}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
new file mode 100644
index 000000000000..cdd359352c0a
--- /dev/null
+++ b/arch/arc/kernel/module.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <asm/unwind.h>
+
+static inline void arc_write_me(unsigned short *addr, unsigned long value)
+{
+ *addr = (value & 0xffff0000) >> 16;
+ *(addr + 1) = (value & 0xffff);
+}
+
+/* ARC specific section quirks - before relocation loop in generic loader
+ *
+ * For dwarf unwinding out of modules, this needs to
+ * 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
+ * -fasynchronous-unwind-tables it doesn't).
+ * 2. Since we are iterating thru sec hdr tbl anyways, make a note of
+ * the exact section index, for later use.
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstr, struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int i;
+
+ mod->arch.unw_sec_idx = 0;
+ mod->arch.unw_info = NULL;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
+ sechdrs[i].sh_flags |= SHF_ALLOC;
+ mod->arch.unw_sec_idx = i;
+ break;
+ }
+ }
+#endif
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ if (mod->arch.unw_info)
+ unwind_remove_table(mod->arch.unw_info, 0);
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex, /* sec index for sym tbl */
+ unsigned int relsec, /* sec index for relo sec */
+ struct module *module)
+{
+ int i, n;
+ Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym_entry, *sym_sec;
+ Elf32_Addr relocation;
+ Elf32_Addr location;
+ Elf32_Addr sec_to_patch;
+ int relo_type;
+
+ sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
+ n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
+
+ pr_debug("\n========== Module Sym reloc ===========================\n");
+ pr_debug("Section to fixup %x\n", sec_to_patch);
+ pr_debug("=========================================================\n");
+ pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+ pr_debug("=========================================================\n");
+
+ /* Loop thru entries in relocation section */
+ for (i = 0; i < n; i++) {
+
+ /* This is where to make the change */
+ location = sec_to_patch + rel_entry[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
+
+ relocation = sym_entry->st_value + rel_entry[i].r_addend;
+
+ pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation,
+ strtab + sym_entry->st_name);
+
+ /* This assumes modules are built with -mlong-calls
+ * so any branches/jumps are absolute 32 bit jmps
+ * global data access again is abs 32 bit.
+ * Both of these are handled by same relocation type
+ */
+ relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
+
+ if (likely(R_ARC_32_ME == relo_type))
+ arc_write_me((unsigned short *)location, relocation);
+ else if (R_ARC_32 == relo_type)
+ *((Elf32_Addr *) location) = relocation;
+ else
+ goto relo_err;
+
+ }
+ return 0;
+
+relo_err:
+ pr_err("%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel_entry[i].r_info));
+ return -ENOEXEC;
+
+}
+
+/* Just before lift off: After sections have been relocated, we add the
+ * dwarf section to unwinder table pool
+ * This couldn't be done in module_frob_arch_sections() because
+ * relocations had not been applied by then
+ */
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ void *unw;
+ int unwsec = mod->arch.unw_sec_idx;
+
+ if (unwsec) {
+ unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
+ sechdrs[unwsec].sh_size);
+ mod->arch.unw_info = unw;
+ }
+#endif
+ return 0;
+}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
new file mode 100644
index 000000000000..0a7531d99294
--- /dev/null
+++ b/arch/arc/kernel/process.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/elf.h>
+#include <linux/tick.h>
+
+SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
+{
+ task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
+ return 0;
+}
+
+/*
+ * We return the user space TLS data ptr as sys-call return code
+ * Ideally it should be copy to user.
+ * However we can cheat by the fact that some sys-calls do return
+ * absurdly high values
+ * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
+ * it won't be considered a sys-call error
+ * and it will be loads better than copy-to-user, which is a definite
+ * D-TLB Miss
+ */
+SYSCALL_DEFINE0(arc_gettls)
+{
+ return task_thread_info(current)->thr_ptr;
+}
+
+static inline void arch_idle(void)
+{
+ /* sleep, but enable all interrupts before committing */
+ __asm__("sleep 0x3");
+}
+
+void cpu_idle(void)
+{
+ /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
+
+doze:
+ local_irq_disable();
+ if (!need_resched()) {
+ arch_idle();
+ goto doze;
+ } else {
+ local_irq_enable();
+ }
+
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
+
+ schedule_preempt_disabled();
+ }
+}
+
+asmlinkage void ret_from_fork(void);
+
+/* Layout of Child kernel mode stack as setup at the end of this function is
+ *
+ * | ... |
+ * | ... |
+ * | unused |
+ * | |
+ * ------------------ <==== top of Stack (thread.ksp)
+ * | UNUSED 1 word|
+ * ------------------
+ * | r25 |
+ * ~ ~
+ * | --to-- | (CALLEE Regs of user mode)
+ * | r13 |
+ * ------------------
+ * | fp |
+ * | blink | @ret_from_fork
+ * ------------------
+ * | |
+ * ~ ~
+ * ~ ~
+ * | |
+ * ------------------
+ * | r12 |
+ * ~ ~
+ * | --to-- | (scratch Regs of user mode)
+ * | r0 |
+ * ------------------
+ * | UNUSED 1 word|
+ * ------------------ <===== END of PAGE
+ */
+int copy_thread(unsigned long clone_flags,
+ unsigned long usp, unsigned long arg,
+ struct task_struct *p)
+{
+ struct pt_regs *c_regs; /* child's pt_regs */
+ unsigned long *childksp; /* to unwind out of __switch_to() */
+ struct callee_regs *c_callee; /* child's callee regs */
+ struct callee_regs *parent_callee; /* paren't callee */
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Mark the specific anchors to begin with (see pic above) */
+ c_regs = task_pt_regs(p);
+ childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
+ c_callee = ((struct callee_regs *)childksp) - 1;
+
+ /*
+ * __switch_to() uses thread.ksp to start unwinding stack
+ * For kernel threads we don't need to create callee regs, the
+ * stack layout nevertheless needs to remain the same.
+ * Also, since __switch_to anyways unwinds callee regs, we use
+ * this to populate kernel thread entry-pt/args into callee regs,
+ * so that ret_from_kernel_thread() becomes simpler.
+ */
+ p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top */
+ childksp[0] = 0; /* fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* blink */
+
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(c_regs, 0, sizeof(struct pt_regs));
+
+ c_callee->r13 = arg; /* argument to kernel thread */
+ c_callee->r14 = usp; /* function */
+
+ return 0;
+ }
+
+ /*--------- User Task Only --------------*/
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
+ childksp[0] = 0; /* for POP fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
+
+ /* Copy parents pt regs on child's kernel mode stack */
+ *c_regs = *regs;
+
+ if (usp)
+ c_regs->sp = usp;
+
+ c_regs->r0 = 0; /* fork returns 0 in child */
+
+ parent_callee = ((struct callee_regs *)regs) - 1;
+ *c_callee = *parent_callee;
+
+ if (unlikely(clone_flags & CLONE_SETTLS)) {
+ /*
+ * set task's userland tls data ptr from 4th arg
+ * clone C-lib call is difft from clone sys-call
+ */
+ task_thread_info(p)->thr_ptr = regs->r3;
+ } else {
+ /* Normal fork case: set parent's TLS ptr in child */
+ task_thread_info(p)->thr_ptr =
+ task_thread_info(current)->thr_ptr;
+ }
+
+ return 0;
+}
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void)
+{
+}
+
+/*
+ * Free any architecture-specific thread data structures, etc.
+ */
+void exit_thread(void)
+{
+}
+
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+ return 0;
+}
+
+/*
+ * API: expected by schedular Code: If thread is sleeping where is that.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ * So we hard code that anyways.
+ */
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ struct pt_regs *regs = task_pt_regs(t);
+ unsigned long blink = 0;
+
+ /*
+ * If the thread being queried for in not itself calling this, then it
+ * implies it is not executing, which in turn implies it is sleeping,
+ * which in turn implies it got switched OUT by the schedular.
+ * In that case, it's kernel mode blink can reliably retrieved as per
+ * the picture above (right above pt_regs).
+ */
+ if (t != current && t->state != TASK_RUNNING)
+ blink = *((unsigned int *)regs - 1);
+
+ return blink;
+}
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ unsigned int eflags;
+
+ if (x->e_machine != EM_ARCOMPACT)
+ return 0;
+
+ eflags = x->e_flags;
+ if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+ pr_err("ABI mismatch - you need newer toolchain\n");
+ force_sigsegv(SIGSEGV, current);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
new file mode 100644
index 000000000000..c6a81c58d0f3
--- /dev/null
+++ b/arch/arc/kernel/ptrace.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/unistd.h>
+#include <linux/elf.h>
+
+static struct callee_regs *task_callee_regs(struct task_struct *tsk)
+{
+ struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
+ return tmp;
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+ unsigned int stop_pc_val;
+
+#define REG_O_CHUNK(START, END, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, START), \
+ offsetof(struct user_regs_struct, END));
+
+#define REG_O_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ REG_O_CHUNK(scratch, callee, ptregs);
+ REG_O_CHUNK(callee, efa, cregs);
+ REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
+
+ if (!ret) {
+ if (in_brkpt_trap(ptregs)) {
+ stop_pc_val = target->thread.fault_address;
+ pr_debug("\t\tstop_pc (brk-pt)\n");
+ } else {
+ stop_pc_val = ptregs->ret;
+ pr_debug("\t\tstop_pc (others)\n");
+ }
+
+ REG_O_ONE(stop_pc, &stop_pc_val);
+ }
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+
+#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, FIRST), \
+ offsetof(struct user_regs_struct, NEXT));
+
+#define REG_IN_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+#define REG_IGNORE_ONE(LOC) \
+ if (!ret) \
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ /* TBD: disallow updates to STATUS32, orig_r8 etc*/
+ REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */
+ REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
+ REG_IGNORE_ONE(efa); /* efa update invalid */
+ REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+
+ return ret;
+}
+
+enum arc_getset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset arc_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = genregs_get,
+ .set = genregs_set,
+ }
+};
+
+static const struct user_regset_view user_arc_view = {
+ .name = UTS_MACHINE,
+ .e_machine = EM_ARCOMPACT,
+ .regsets = arc_regsets,
+ .n = ARRAY_SIZE(arc_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_arc_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EIO;
+
+ pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace_entry(struct pt_regs *regs)
+{
+ if (tracehook_report_syscall_entry(regs))
+ return ULONG_MAX;
+
+ return regs->r8;
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
new file mode 100644
index 000000000000..e227a2b1c943
--- /dev/null
+++ b/arch/arc/kernel/reset.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+
+void machine_halt(void)
+{
+ /* Halt the processor */
+ __asm__ __volatile__("flag 1\n");
+}
+
+void machine_restart(char *__unused)
+{
+ /* Soft reset : jump to reset vector */
+ pr_info("Put your restart handler here\n");
+ machine_halt();
+}
+
+void machine_power_off(void)
+{
+ /* FIXME :: power off ??? */
+ machine_halt();
+}
+
+void (*pm_power_off) (void) = NULL;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
new file mode 100644
index 000000000000..dc0f968dae0a
--- /dev/null
+++ b/arch/arc/kernel/setup.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/of_fdt.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+#include <asm/cache.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/prom.h>
+#include <asm/unwind.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
+
+int running_on_hw = 1; /* vs. on ISS */
+
+char __initdata command_line[COMMAND_LINE_SIZE];
+struct machine_desc *machine_desc __initdata;
+
+struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
+
+struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+
+
+void __init read_arc_build_cfg_regs(void)
+{
+ struct bcr_perip uncached_space;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ FIX_PTR(cpu);
+
+ READ_BCR(AUX_IDENTITY, cpu->core);
+
+ cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
+
+ cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
+ if (cpu->vec_base == 0)
+ cpu->vec_base = (unsigned int)_int_vec_base_lds;
+
+ READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
+ cpu->uncached_base = uncached_space.start << 24;
+
+ cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR);
+ cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
+ cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
+ cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
+ cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
+ READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
+
+ cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR);
+ cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR);
+
+ /* Note that we read the CCM BCRs independent of kernel config
+ * This is to catch the cases where user doesn't know that
+ * CCMs are present in hardware build
+ */
+ {
+ struct bcr_iccm iccm;
+ struct bcr_dccm dccm;
+ struct bcr_dccm_base dccm_base;
+ unsigned int bcr_32bit_val;
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
+ if (bcr_32bit_val) {
+ iccm = *((struct bcr_iccm *)&bcr_32bit_val);
+ cpu->iccm.base_addr = iccm.base << 16;
+ cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
+ }
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
+ if (bcr_32bit_val) {
+ dccm = *((struct bcr_dccm *)&bcr_32bit_val);
+ cpu->dccm.sz = 0x800 << (dccm.sz);
+
+ READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
+ cpu->dccm.base_addr = dccm_base.addr << 8;
+ }
+ }
+
+ READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
+
+ read_decode_mmu_bcr();
+ read_decode_cache_bcr();
+
+ READ_BCR(ARC_REG_FP_BCR, cpu->fp);
+ READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp);
+}
+
+static const struct cpuinfo_data arc_cpu_tbl[] = {
+ { {0x10, "ARCTangent A5"}, 0x1F},
+ { {0x20, "ARC 600" }, 0x2F},
+ { {0x30, "ARC 700" }, 0x33},
+ { {0x34, "ARC 700 R4.10"}, 0x34},
+ { {0x00, NULL } }
+};
+
+char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+ struct bcr_identity *core = &cpu->core;
+ const struct cpuinfo_data *tbl;
+ int be = 0;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ be = 1;
+#endif
+ FIX_PTR(cpu);
+
+ n += scnprintf(buf + n, len - n,
+ "\nARC IDENTITY\t: Family [%#02x]"
+ " Cpu-id [%#02x] Chip-id [%#4x]\n",
+ core->family, core->cpu_id,
+ core->chip_id);
+
+ for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
+ if ((core->family >= tbl->info.id) &&
+ (core->family <= tbl->up_range)) {
+ n += scnprintf(buf + n, len - n,
+ "processor\t: %s %s\n",
+ tbl->info.str,
+ be ? "[Big Endian]" : "");
+ break;
+ }
+ }
+
+ if (tbl->info.id == 0)
+ n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+
+ n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
+ (unsigned int)(arc_get_core_freq() / 1000000),
+ (unsigned int)(arc_get_core_freq() / 10000) % 100);
+
+ n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n",
+ (cpu->timers & 0x200) ? "TIMER1" : "",
+ (cpu->timers & 0x100) ? "TIMER0" : "");
+
+ n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n",
+ cpu->vec_base);
+
+ n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n",
+ cpu->uncached_base);
+
+ return buf;
+}
+
+static const struct id_to_str mul_type_nm[] = {
+ { 0x0, "N/A"},
+ { 0x1, "32x32 (spl Result Reg)" },
+ { 0x2, "32x32 (ANY Result Reg)" }
+};
+
+static const struct id_to_str mac_mul_nm[] = {
+ {0x0, "N/A"},
+ {0x1, "N/A"},
+ {0x2, "Dual 16 x 16"},
+ {0x3, "N/A"},
+ {0x4, "32x16"},
+ {0x5, "N/A"},
+ {0x6, "Dual 16x16 and 32x16"}
+};
+
+char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+
+ FIX_PTR(cpu);
+#define IS_AVAIL1(var, str) ((var) ? str : "")
+#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
+#define IS_USED(var) ((var) ? "(in-use)" : "(not used)")
+
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-Base]\t: %s %s %s %s %s %s\n",
+ IS_AVAIL2(cpu->extn.norm, "norm,"),
+ IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"),
+ IS_AVAIL1(cpu->extn.swap, "swap,"),
+ IS_AVAIL2(cpu->extn.minmax, "minmax,"),
+ IS_AVAIL1(cpu->extn.crc, "crc,"),
+ IS_AVAIL2(cpu->extn.ext_arith, "ext-arith"));
+
+ n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s",
+ mul_type_nm[cpu->extn.mul].str);
+
+ n += scnprintf(buf + n, len - n, " MAC MPY: %s\n",
+ mac_mul_nm[cpu->extn_mac_mul.type].str);
+
+ if (cpu->core.family == 0x34) {
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
+ IS_USED(__CONFIG_ARC_HAS_LLSC_VAL),
+ IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL),
+ IS_USED(__CONFIG_ARC_HAS_RTSC_VAL));
+ }
+
+ n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
+ !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
+
+ if (cpu->dccm.sz)
+ n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
+ cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
+
+ if (cpu->iccm.sz)
+ n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
+ cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+
+ n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
+ !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
+
+ if (cpu->fp.ver)
+ n += scnprintf(buf + n, len - n, "SP [v%d] %s",
+ cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
+
+ if (cpu->dpfp.ver)
+ n += scnprintf(buf + n, len - n, "DP [v%d] %s",
+ cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
+
+ n += scnprintf(buf + n, len - n, "\n");
+
+#ifdef _ASM_GENERIC_UNISTD_H
+ n += scnprintf(buf + n, len - n,
+ "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
+#endif
+
+ return buf;
+}
+
+void __init arc_chk_ccms(void)
+{
+#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ /*
+ * DCCM can be arbit placed in hardware.
+ * Make sure it's placement/sz matches what Linux is built with
+ */
+ if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
+ panic("Linux built with incorrect DCCM Base address\n");
+
+ if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
+ panic("Linux built with incorrect DCCM Size\n");
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
+ panic("Linux built with incorrect ICCM Size\n");
+#endif
+#endif
+}
+
+/*
+ * Ensure that FP hardware and kernel config match
+ * -If hardware contains DPFP, kernel needs to save/restore FPU state
+ * across context switches
+ * -If hardware lacks DPFP, but kernel configured to save FPU state then
+ * kernel trying to access non-existant DPFP regs will crash
+ *
+ * We only check for Dbl precision Floating Point, because only DPFP
+ * hardware has dedicated regs which need to be saved/restored on ctx-sw
+ * (Single Precision uses core regs), thus kernel is kind of oblivious to it
+ */
+void __init arc_chk_fpu(void)
+{
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+ if (cpu->dpfp.ver) {
+#ifndef CONFIG_ARC_FPU_SAVE_RESTORE
+ pr_warn("DPFP support broken in this kernel...\n");
+#endif
+ } else {
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ panic("H/w lacks DPFP support, apps won't work\n");
+#endif
+ }
+}
+
+/*
+ * Initialize and setup the processor core
+ * This is called by all the CPUs thus should not do special case stuff
+ * such as only for boot CPU etc
+ */
+
+void __init setup_processor(void)
+{
+ char str[512];
+ int cpu_id = smp_processor_id();
+
+ read_arc_build_cfg_regs();
+ arc_init_IRQ();
+
+ printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
+
+ arc_mmu_init();
+ arc_cache_init();
+ arc_chk_ccms();
+
+ printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
+
+#ifdef CONFIG_SMP
+ printk(arc_platform_smp_cpuinfo());
+#endif
+
+ arc_chk_fpu();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef CONFIG_CMDLINE_UBOOT
+ /* Make sure that a whitespace is inserted before */
+ strlcat(command_line, " ", sizeof(command_line));
+#endif
+ /*
+ * Append .config cmdline to base command line, which might already
+ * contain u-boot "bootargs" (handled by head.S, if so configured)
+ */
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ machine_desc = setup_machine_fdt(__dtb_start);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+
+ /* To force early parsing of things like mem=xxx */
+ parse_early_param();
+
+ /* Platform/board specific: e.g. early console registration */
+ if (machine_desc->init_early)
+ machine_desc->init_early();
+
+ setup_processor();
+
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
+ setup_arch_memory();
+
+ /* copy flat DT out of .init and then unflatten it */
+ copy_devtree();
+ unflatten_device_tree();
+
+ /* Can be issue if someone passes cmd line arg "ro"
+ * But that is unlikely so keeping it as it is
+ */
+ root_mountflags &= ~MS_RDONLY;
+
+ console_verbose();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+
+ arc_unwind_init();
+ arc_unwind_setup();
+}
+
+static int __init customize_machine(void)
+{
+ /* Add platform devices */
+ if (machine_desc->init_machine)
+ machine_desc->init_machine();
+
+ return 0;
+}
+arch_initcall(customize_machine);
+
+static int __init init_late_machine(void)
+{
+ if (machine_desc->init_late)
+ machine_desc->init_late();
+
+ return 0;
+}
+late_initcall(init_late_machine);
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
+#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *str;
+ int cpu_id = ptr_to_cpu(v);
+
+ str = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!str)
+ goto done;
+
+ seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+#ifdef CONFIG_SMP
+ seq_printf(m, arc_platform_smp_cpuinfo());
+#endif
+
+ free_page((unsigned long)str);
+done:
+ seq_printf(m, "\n\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ /*
+ * Callback returns cpu-id to iterator for show routine, NULL to stop.
+ * However since NULL is also a valid cpu-id (0), we use a round-about
+ * way to pass it w/o having to kmalloc/free a 2 byte string.
+ * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
+ */
+ return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_topology);
+
+static int __init topology_init(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ register_cpu(&per_cpu(cpu_topology, cpu), cpu);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
new file mode 100644
index 000000000000..ee6ef2f60a28
--- /dev/null
+++ b/arch/arc/kernel/signal.c
@@ -0,0 +1,360 @@
+/*
+ * Signal Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 2010 (Restarting of timer related syscalls)
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal() supports TIF_RESTORE_SIGMASK
+ * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
+ * -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
+ * the job to do_signal()
+ *
+ * vineetg: July 2009
+ * -Modified Code to support the uClibc provided userland sigreturn stub
+ * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * probes and Cache line flushes.
+ *
+ * vineetg: July 2009
+ * -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
+ * in done in block copy rather than one word at a time.
+ * This saves around 2K of code and improves LMBench lat_sig <catch>
+ *
+ * rajeshwarr: Feb 2009
+ * - Support for Realtime Signals
+ *
+ * vineetg: Aug 11th 2008: Bug #94183
+ * -ViXS were still seeing crashes when using insmod to load drivers.
+ * It turned out that the code to change Execute permssions for TLB entries
+ * of user was not guarded for interrupts (mod_tlb_permission)
+ * This was cauing TLB entries to be overwritten on unrelated indexes
+ *
+ * Vineetg: July 15th 2008: Bug #94183
+ * -Exception happens in Delay slot of a JMP, and before user space resumes,
+ * Signal is delivered (Ctrl + C) = >SIGINT.
+ * setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
+ * to run, but doesn't clear the Delay slot bit from status32. As a result,
+ * on resuming user mode, signal handler branches off to BTA of orig JMP
+ * -FIX: clear the DE bit from status32 in setup_frame( )
+ *
+ * Rahul Trivedi, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <asm/ucontext.h>
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+#define MAGIC_SIGALTSTK 0x07302004
+ unsigned int sigret_magic;
+};
+
+static int
+stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+{
+ int err;
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+
+ return err;
+}
+
+static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+{
+ sigset_t set;
+ int err;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (!err)
+ set_current_blocked(&set);
+
+ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ return err;
+}
+
+static inline int is_do_ss_needed(unsigned int magic)
+{
+ if (MAGIC_SIGALTSTK == magic)
+ return 1;
+ else
+ return 0;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic;
+ int err;
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /* Since we stacked the signal on a word boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->sp & 3)
+ goto badframe;
+
+ sf = (struct rt_sigframe __force __user *)(regs->sp);
+
+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ goto badframe;
+
+ err = restore_usr_regs(regs, sf);
+ err |= __get_user(magic, &sf->sigret_magic);
+ if (err)
+ goto badframe;
+
+ if (unlikely(is_do_ss_needed(magic)))
+ if (restore_altstack(&sf->uc.uc_stack))
+ goto badframe;
+
+ /* Don't restart from sigreturn */
+ syscall_wont_restart(regs);
+
+ return regs->r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs,
+ unsigned long framesize)
+{
+ unsigned long sp = regs->sp;
+ void __user *frame;
+
+ /* This is the X/Open sanctioned signal stack switching */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /* No matter what happens, 'sp' must be word
+ * aligned otherwise nasty things could happen
+ */
+
+ /* ATPCS B01 mandates 8-byte alignment */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /* Check that we can actually write to the signal frame */
+ if (!access_ok(VERIFY_WRITE, frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+/*
+ * translate the signal
+ */
+static inline int map_sig(int sig)
+{
+ struct thread_info *thread = current_thread_info();
+ if (thread->exec_domain && thread->exec_domain->signal_invmap
+ && sig < 32)
+ sig = thread->exec_domain->signal_invmap[sig];
+ return sig;
+}
+
+static int
+setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic = 0;
+ int err = 0;
+
+ sf = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
+ if (!sf)
+ return 1;
+
+ /*
+ * SA_SIGINFO requires 3 args to signal handler:
+ * #1: sig-no (common to any handler)
+ * #2: struct siginfo
+ * #3: struct ucontext (completely populated)
+ */
+ if (unlikely(ka->sa.sa_flags & SA_SIGINFO)) {
+ err |= copy_siginfo_to_user(&sf->info, info);
+ err |= __put_user(0, &sf->uc.uc_flags);
+ err |= __put_user(NULL, &sf->uc.uc_link);
+ err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
+
+ /* setup args 2 and 3 for user mode handler */
+ regs->r1 = (unsigned long)&sf->info;
+ regs->r2 = (unsigned long)&sf->uc;
+
+ /*
+ * small optim to avoid unconditonally calling do_sigaltstack
+ * in sigreturn path, now that we only have rt_sigreturn
+ */
+ magic = MAGIC_SIGALTSTK;
+ }
+
+ /*
+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+ * during signal handler execution. This works for SA_SIGINFO as well
+ * although the semantics are now overloaded (the same reg state can be
+ * inspected by userland: but are they allowed to fiddle with it ?
+ */
+ err |= stash_usr_regs(sf, regs, set);
+ err |= __put_user(magic, &sf->sigret_magic);
+ if (err)
+ return err;
+
+ /* #1 arg to the user Signal handler */
+ regs->r0 = map_sig(signo);
+
+ /* setup PC of user space signal handler */
+ regs->ret = (unsigned long)ka->sa.sa_handler;
+
+ /*
+ * handler returns using sigreturn stub provided already by userpsace
+ */
+ BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
+ regs->blink = (unsigned long)ka->sa.sa_restorer;
+
+ /* User Stack for signal handler will be above the frame just carved */
+ regs->sp = (unsigned long)sf;
+
+ /*
+ * Bug 94183, Clear the DE bit, so that when signal handler
+ * starts to run, it doesn't use BTA
+ */
+ regs->status32 &= ~STATUS_DE_MASK;
+ regs->status32 |= STATUS_L_MASK;
+
+ return err;
+}
+
+static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
+{
+ switch (regs->r0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ /*
+ * ERESTARTNOHAND means that the syscall should
+ * only be restarted if there was no handler for
+ * the signal, and since we only get here if there
+ * is a handler, we don't restart
+ */
+ regs->r0 = -EINTR; /* ERESTART_xxx is internal */
+ break;
+
+ case -ERESTARTSYS:
+ /*
+ * ERESTARTSYS means to restart the syscall if
+ * there is no handler or the handler was
+ * registered with SA_RESTART
+ */
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+
+ case -ERESTARTNOINTR:
+ /*
+ * ERESTARTNOINTR means that the syscall should
+ * be called again after the signal handler returns.
+ * Setup reg state just as it was before doing the trap
+ * r0 has been clobbered with sys call ret code thus it
+ * needs to be reloaded with orig first arg to syscall
+ * in orig_r0. Rest of relevant reg-file:
+ * r8 (syscall num) and (r1 - r7) will be reset to
+ * their orig user space value when we ret from kernel
+ */
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+ struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ret)
+ force_sigsegv(sig, current);
+ else
+ signal_delivered(sig, info, ka, regs, 0);
+}
+
+void do_signal(struct pt_regs *regs)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+ int restart_scall;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+ restart_scall = in_syscall(regs) && syscall_restartable(regs);
+
+ if (signr > 0) {
+ if (restart_scall) {
+ arc_restart_syscall(&ka, regs);
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+ handle_signal(signr, &ka, &info, regs);
+ return;
+ }
+
+ if (restart_scall) {
+ /* No handler for syscall: restart it */
+ if (regs->r0 == -ERESTARTNOHAND ||
+ regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
+ regs->r8 = __NR_restart_syscall;
+ regs->ret -= 4;
+ }
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+
+ /* If there's no signal to deliver, restore the saved sigmask back */
+ restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * ASM glue gaurantees that this is only called when returning to
+ * user mode
+ */
+ if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644
index 000000000000..3af3e06dcf02
--- /dev/null
+++ b/arch/arc/kernel/smp.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * RajeshwarR: Dec 11, 2007
+ * -- Added support for Inter Processor Interrupts
+ *
+ * Vineetg: Nov 1st, 2007
+ * -- Initial Write (Borrowed heavily from ARM)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock_types.h>
+#include <linux/reboot.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/mach_desc.h>
+
+arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+struct plat_smp_ops plat_smp_ops;
+
+/* XXX: per cpu ? Only needed once in early seconday boot */
+struct task_struct *secondary_idle_tsk;
+
+/* Called from start_kernel */
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpu_possible(i, true);
+}
+
+/* called from init ( ) => process 1 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+
+}
+
+/*
+ * After power-up, a non Master CPU needs to wait for Master to kick start it
+ *
+ * The default implementation halts
+ *
+ * This relies on platform specific support allowing Master to directly set
+ * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
+ *
+ * In lack of such h/w assist, platforms can override this function
+ * - make this function busy-spin on a token, eventually set by Master
+ * (from arc_platform_smp_wakeup_cpu())
+ * - Once token is available, jump to @first_lines_of_secondary
+ * (using inline asm).
+ *
+ * Alert: can NOT use stack here as it has not been determined/setup for CPU.
+ * If it turns out to be elaborate, it's better to code it in assembly
+ *
+ */
+void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
+{
+ /*
+ * As a hack for debugging - since debugger will single-step over the
+ * FLAG insn - wrap the halt itself it in a self loop
+ */
+ __asm__ __volatile__(
+ "1: \n"
+ " flag 1 \n"
+ " b 1b \n");
+}
+
+const char *arc_platform_smp_cpuinfo(void)
+{
+ return plat_smp_ops.info;
+}
+
+/*
+ * The very first "C" code executed by secondary
+ * Called from asm stub in head.S
+ * "current"/R25 already setup by low level boot code
+ */
+void __cpuinit start_kernel_secondary(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ /* MMU, Caches, Vector Table, Interrupts etc */
+ setup_processor();
+
+ atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+
+ pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
+
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+
+ arc_local_timer_setup(cpu);
+
+ local_irq_enable();
+ preempt_disable();
+ cpu_idle();
+}
+
+/*
+ * Called from kernel_init( ) -> smp_init( ) - for each CPU
+ *
+ * At this point, Secondary Processor is "HALT"ed:
+ * -It booted, but was halted in head.S
+ * -It was configured to halt-on-reset
+ * So need to wake it up.
+ *
+ * Essential requirements being where to run from (PC) and stack (SP)
+*/
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long wait_till;
+
+ secondary_idle_tsk = idle;
+
+ pr_info("Idle Task [%d] %p", cpu, idle);
+ pr_info("Trying to bring up CPU%u ...\n", cpu);
+
+ if (plat_smp_ops.cpu_kick)
+ plat_smp_ops.cpu_kick(cpu,
+ (unsigned long)first_lines_of_secondary);
+
+ /* wait for 1 sec after kicking the secondary */
+ wait_till = jiffies + HZ;
+ while (time_before(jiffies, wait_till)) {
+ if (cpu_online(cpu))
+ break;
+ }
+
+ if (!cpu_online(cpu)) {
+ pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
+ return -1;
+ }
+
+ secondary_idle_tsk = NULL;
+
+ return 0;
+}
+
+/*
+ * not supported here
+ */
+int __init setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+/* Inter Processor Interrupt Handling */
+/*****************************************************************************/
+
+/*
+ * structures for inter-processor calls
+ * A Collection of single bit ipi messages
+ *
+ */
+
+/*
+ * TODO_rajesh investigate tlb message types.
+ * IPI Timer not needed because each ARC has an individual Interrupting Timer
+ */
+enum ipi_msg_type {
+ IPI_NOP = 0,
+ IPI_RESCHEDULE = 1,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_CPU_STOP
+};
+
+struct ipi_data {
+ unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
+{
+ unsigned long flags;
+ unsigned int cpu;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, callmap) {
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ set_bit(msg, &ipi->bits);
+ }
+
+ /* Call the platform specific cross-CPU call function */
+ if (plat_smp_ops.ipi_send)
+ plat_smp_ops.ipi_send((void *)callmap);
+
+ local_irq_restore(flags);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ ipi_send_msg(&targets, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ ipi_send_msg(mask, IPI_CALL_FUNC);
+}
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ machine_halt();
+}
+
+static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
+{
+ unsigned long msg = 0;
+
+ do {
+ msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
+
+ switch (msg) {
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+
+}
+
+/*
+ * arch-common ISR to handle for inter-processor interrupts
+ * Has hooks for platform specific IPI
+ */
+irqreturn_t do_IPI(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned long ops;
+
+ if (plat_smp_ops.ipi_clear)
+ plat_smp_ops.ipi_clear(cpu, irq);
+
+ /*
+ * XXX: is this loop really needed
+ * And do we need to move ipi_clean inside
+ */
+ while ((ops = xchg(&ipi->bits, 0)) != 0)
+ __do_IPI(&ops, ipi, cpu);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * API called by platform code to hookup arch-common ISR to their IPI IRQ
+ */
+static DEFINE_PER_CPU(int, ipi_dev);
+int smp_ipi_irq_setup(int cpu, int irq)
+{
+ int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
+ return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
+}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
new file mode 100644
index 000000000000..a63ff842564b
--- /dev/null
+++ b/arch/arc/kernel/stacktrace.c
@@ -0,0 +1,254 @@
+/*
+ * stacktrace.c : stacktracing APIs needed by rest of kernel
+ * (wrappers over ARC dwarf based unwinder)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: aug 2009
+ * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
+ * for displaying task's kernel mode call stack in /proc/<pid>/stack
+ * -Iterator based approach to have single copy of unwinding core and APIs
+ * needing unwinding, implement the logic in iterator regarding:
+ * = which frame onwards to start capture
+ * = which frame to stop capturing (wchan)
+ * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
+ *
+ * vineetg: March 2009
+ * -Implemented correct versions of thread_saved_pc() and get_wchan()
+ *
+ * rajeshwarr: 2008
+ * -Initial implementation
+ */
+
+#include <linux/ptrace.h>
+#include <linux/export.h>
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <asm/arcregs.h>
+#include <asm/unwind.h>
+#include <asm/switch_to.h>
+
+/*-------------------------------------------------------------------------
+ * Unwinder Iterator
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+static void seed_unwind_frame_info(struct task_struct *tsk,
+ struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
+{
+ if (tsk == NULL && regs == NULL) {
+ unsigned long fp, sp, blink, ret;
+ frame_info->task = current;
+
+ __asm__ __volatile__(
+ "mov %0,r27\n\t"
+ "mov %1,r28\n\t"
+ "mov %2,r31\n\t"
+ "mov %3,r63\n\t"
+ : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
+ );
+
+ frame_info->regs.r27 = fp;
+ frame_info->regs.r28 = sp;
+ frame_info->regs.r31 = blink;
+ frame_info->regs.r63 = ret;
+ frame_info->call_frame = 0;
+ } else if (regs == NULL) {
+
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = KSTK_FP(tsk);
+ frame_info->regs.r28 = KSTK_ESP(tsk);
+ frame_info->regs.r31 = KSTK_BLINK(tsk);
+ frame_info->regs.r63 = (unsigned int)__switch_to;
+
+ /* In the prologue of __switch_to, first FP is saved on stack
+ * and then SP is copied to FP. Dwarf assumes cfa as FP based
+ * but we didn't save FP. The value retrieved above is FP's
+ * state in previous frame.
+ * As a work around for this, we unwind from __switch_to start
+ * and adjust SP accordingly. The other limitation is that
+ * __switch_to macro is dwarf rules are not generated for inline
+ * assembly code
+ */
+ frame_info->regs.r27 = 0;
+ frame_info->regs.r28 += 64;
+ frame_info->call_frame = 0;
+
+ } else {
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ }
+}
+
+#endif
+
+static noinline unsigned int
+arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *), void *arg)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int ret = 0;
+ unsigned int address;
+ struct unwind_frame_info frame_info;
+
+ seed_unwind_frame_info(tsk, regs, &frame_info);
+
+ while (1) {
+ address = UNW_PC(&frame_info);
+
+ if (address && __kernel_text_address(address)) {
+ if (consumer_fn(address, arg) == -1)
+ break;
+ }
+
+ ret = arc_unwind(&frame_info);
+
+ if (ret == 0) {
+ frame_info.regs.r63 = frame_info.regs.r31;
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return address; /* return the last address it saw */
+#else
+ /* On ARC, only Dward based unwinder works. fp based backtracing is
+ * not possible (-fno-omit-frame-pointer) because of the way function
+ * prelogue is setup (callee regs saved and then fp set and not other
+ * way around
+ */
+ pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ return 0;
+
+#endif
+}
+
+/*-------------------------------------------------------------------------
+ * callbacks called by unwinder iterator to implement kernel APIs
+ *
+ * The callback can return -1 to force the iterator to stop, which by default
+ * keeps going till the bottom-most frame.
+ *-------------------------------------------------------------------------
+ */
+
+/* Call-back which plugs into unwinding core to dump the stack in
+ * case of panic/OOPs/BUG etc
+ */
+static int __print_sym(unsigned int address, void *unused)
+{
+ __print_symbol(" %s\n", address);
+ return 0;
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/* Call-back which plugs into unwinding core to capture the
+ * traces needed by kernel on /proc/<pid>/stack
+ */
+static int __collect_all(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+static int __collect_all_but_sched(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (in_sched_functions(address))
+ return 0;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+#endif
+
+static int __get_first_nonsched(unsigned int address, void *unused)
+{
+ if (in_sched_functions(address))
+ return 0;
+
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * APIs expected by various kernel sub-systems
+ *-------------------------------------------------------------------------
+ */
+
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+{
+ pr_info("\nStack Trace:\n");
+ arc_unwind_core(tsk, regs, __print_sym, NULL);
+}
+EXPORT_SYMBOL(show_stacktrace);
+
+/* Expected by sched Code */
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ show_stacktrace(tsk, NULL);
+}
+
+/* Expected by Rest of kernel code */
+void dump_stack(void)
+{
+ show_stacktrace(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+/* Another API expected by schedular, shows up in "ps" as Wait Channel
+ * Ofcourse just returning schedule( ) would be pointless so unwind until
+ * the function is not in schedular code
+ */
+unsigned int get_wchan(struct task_struct *tsk)
+{
+ return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/*
+ * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
+ * A typical use is when /proc/<pid>/stack is queried by userland
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ arc_unwind_core(current, NULL, __collect_all, trace);
+}
+#endif
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
new file mode 100644
index 000000000000..f6bdd07583f3
--- /dev/null
+++ b/arch/arc/kernel/sys.c
@@ -0,0 +1,18 @@
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#define sys_clone sys_clone_wrapper
+#define sys_fork sys_fork_wrapper
+#define sys_vfork sys_vfork_wrapper
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[NR_syscalls] = {
+ [0 ... NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
new file mode 100644
index 000000000000..f13f72807aa5
--- /dev/null
+++ b/arch/arc/kernel/time.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 1011
+ * -sched_clock( ) no longer jiffies based. Uses the same clocksource
+ * as gtod
+ *
+ * Rajeshwarr/Vineetg: Mar 2008
+ * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
+ * for arch independent gettimeofday()
+ * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
+ *
+ * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
+ * Each can programmed to go from @count to @limit and optionally
+ * interrupt when that happens.
+ * A write to Control Register clears the Interrupt
+ *
+ * We've designated TIMER0 for events (clockevents)
+ * while TIMER1 for free running (clocksource)
+ *
+ * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
+ */
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define ARC_TIMER_MAX 0xFFFFFFFF
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_HAS_RTSC
+
+int __cpuinit arc_counter_setup(void)
+{
+ /* RTSC insn taps into cpu clk, needs no setup */
+
+ /* For SMP, only allowed if cross-core-sync, hence usable as cs */
+ return 1;
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ struct { u32 high, low; };
+#else
+ struct { u32 low, high; };
+#endif
+ cycle_t full;
+ } stamp;
+
+ flags = arch_local_irq_save();
+
+ __asm__ __volatile(
+ " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
+ " rtsc %0, 0 \n"
+ " mov %1, 0 \n"
+ : "=r" (stamp.low), "=r" (stamp.high));
+
+ arch_local_irq_restore(flags);
+
+ return stamp.full;
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC RTSC",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#else /* !CONFIG_ARC_HAS_RTSC */
+
+static bool is_usable_as_clocksource(void)
+{
+#ifdef CONFIG_SMP
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/*
+ * set 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+int __cpuinit arc_counter_setup(void)
+{
+ write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
+ write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+ return is_usable_as_clocksource();
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC Timer1",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#endif
+
+/********** Clock Event Device *********/
+
+/*
+ * Arm the timer to interrupt after @limit cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int limit)
+{
+ write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
+ write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
+
+ write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+/*
+ * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
+ * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
+ * -Rearming is done by setting the IE bit
+ *
+ * Small optimisation: Normal code would have been
+ * if (irq_reenable)
+ * CTRL_REG = (IE | NH);
+ * else
+ * CTRL_REG = NH;
+ * However since IE is BIT0 we can fold the branch
+ */
+static void arc_timer_event_ack(unsigned int irq_reenable)
+{
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+}
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ arc_timer_event_setup(delta);
+ return 0;
+}
+
+static void arc_clkevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ arc_timer_event_setup(arc_get_core_freq() / HZ);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .mode = CLOCK_EVT_MODE_UNUSED,
+ .rating = 300,
+ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_mode = arc_clkevent_set_mode,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+
+ arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
+ clk->event_handler(clk);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction arc_timer_irq = {
+ .name = "Timer0 (clock-evt-dev)",
+ .flags = IRQF_TIMER | IRQF_PERCPU,
+ .handler = timer_irq_handler,
+};
+
+/*
+ * Setup the local event timer for @cpu
+ * N.B. weak so that some exotic ARC SoCs can completely override it
+ */
+void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu)
+{
+ struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
+
+ clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
+
+ clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
+ clk->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(clk);
+
+ /*
+ * setup the per-cpu timer IRQ handler - for all cpus
+ * For non boot CPU explicitly unmask at intc
+ * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
+ */
+ if (!cpu)
+ setup_irq(TIMER0_IRQ, &arc_timer_irq);
+ else
+ arch_unmask_irq(TIMER0_IRQ);
+}
+
+/*
+ * Called from start_kernel() - boot CPU only
+ *
+ * -Sets up h/w timers as applicable on boot cpu
+ * -Also sets up any global state needed for timer subsystem:
+ * - for "counting" timer, registers a clocksource, usable across CPUs
+ * (provided that underlying counter h/w is synchronized across cores)
+ * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
+ */
+void __init time_init(void)
+{
+ /*
+ * sets up the timekeeping free-flowing counter which also returns
+ * whether the counter is usable as clocksource
+ */
+ if (arc_counter_setup())
+ /*
+ * CLK upto 4.29 GHz can be safely represented in 32 bits
+ * because Max 32 bit number is 4,294,967,295
+ */
+ clocksource_register_hz(&arc_counter, arc_get_core_freq());
+
+ /* sets up the periodic event timer */
+ arc_local_timer_setup(smp_processor_id());
+
+ if (machine_desc->init_time)
+ machine_desc->init_time();
+}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
new file mode 100644
index 000000000000..7496995371e8
--- /dev/null
+++ b/arch/arc/kernel/traps.c
@@ -0,0 +1,170 @@
+/*
+ * Traps/Non-MMU Exception handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -user-space unaligned access emulation
+ *
+ * Rahul Trivedi: Codito Technologies 2004
+ */
+
+#include <linux/sched.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/kprobes.h>
+#include <asm/unaligned.h>
+#include <asm/kgdb.h>
+
+void __init trap_init(void)
+{
+ return;
+}
+
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg)
+{
+ show_kernel_fault_diag(str, regs, address, cause_reg);
+
+ /* DEAD END */
+ __asm__("flag 1");
+}
+
+/*
+ * Helper called for bulk of exceptions NOT needing specific handling
+ * -for user faults enqueues requested signal
+ * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
+ */
+static noinline int handle_exception(unsigned long cause, char *str,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ if (user_mode(regs)) {
+ struct task_struct *tsk = current;
+
+ tsk->thread.fault_address = (__force unsigned int)info->si_addr;
+ tsk->thread.cause_code = cause;
+
+ force_sig_info(info->si_signo, info, tsk);
+
+ } else {
+ /* If not due to copy_(to|from)_user, we are doomed */
+ if (fixup_exception(regs))
+ return 0;
+
+ die(str, regs, (unsigned long)info->si_addr, cause);
+ }
+
+ return 1;
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode) \
+int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
+{ \
+ siginfo_t info = { \
+ .si_signo = signr, \
+ .si_errno = 0, \
+ .si_code = sicode, \
+ .si_addr = (void __user *)address, \
+ }; \
+ return handle_exception(cause, str, regs, &info);\
+}
+
+/*
+ * Entry points for exceptions NOT needing specific handling
+ */
+DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
+DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
+DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
+DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
+DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+/*
+ * Entry Point for Misaligned Data access Exception, for emulating in software
+ */
+int do_misaligned_access(unsigned long cause, unsigned long address,
+ struct pt_regs *regs, struct callee_regs *cregs)
+{
+ if (misaligned_fixup(address, regs, cause, cregs) != 0) {
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)address;
+ return handle_exception(cause, "Misaligned Access", regs,
+ &info);
+ }
+ return 0;
+}
+
+#else
+DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
+#endif
+
+/*
+ * Entry point for miscll errors such as Nested Exceptions
+ * -Duplicate TLB entry is handled seperately though
+ */
+void do_machine_check_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ die("Machine Check Exception", regs, address, cause);
+}
+
+
+/*
+ * Entry point for traps induced by ARCompact TRAP_S <n> insn
+ * This is same family as TRAP0/SWI insn (use the same vector).
+ * The only difference being SWI insn take no operand, while TRAP_S does
+ * which reflects in ECR Reg as 8 bit param.
+ * Thus TRAP_S <n> can be used for specific purpose
+ * -1 used for software breakpointing (gdb)
+ * -2 used by kprobes
+ */
+void do_non_swi_trap(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ unsigned int param = cause & 0xff;
+
+ switch (param) {
+ case 1:
+ trap_is_brkpt(cause, address, regs);
+ break;
+
+ case 2:
+ trap_is_kprobe(param, address, regs);
+ break;
+
+ case 3:
+ case 4:
+ kgdb_trap(regs, param);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Entry point for Instruction Error Exception
+ * -For a corner case, ARC kprobes implementation resorts to using
+ * this exception, hence the check
+ */
+void do_insterror_or_kprobe(unsigned long cause,
+ unsigned long address,
+ struct pt_regs *regs)
+{
+ /* Check if this exception is caused by kprobes */
+ if (notify_die(DIE_IERR, "kprobe_ierr", regs, address,
+ cause, SIGILL) == NOTIFY_STOP)
+ return;
+
+ insterror_is_error(cause, address, regs);
+}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
new file mode 100644
index 000000000000..7c10873c311f
--- /dev/null
+++ b/arch/arc/kernel/troubleshoot.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ */
+
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/fs_struct.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <asm/arcregs.h>
+
+/*
+ * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
+ * -Prints 3 regs per line and a CR.
+ * -To continue, callee regs right after scratch, special handling of CR
+ */
+static noinline void print_reg_file(long *reg_rev, int start_num)
+{
+ unsigned int i;
+ char buf[512];
+ int n = 0, len = sizeof(buf);
+
+ /* weird loop because pt_regs regs rev r12..r0, r25..r13 */
+ for (i = start_num; i < start_num + 13; i++) {
+ n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
+ i, (unsigned long)*reg_rev);
+
+ if (((i + 1) % 3) == 0)
+ n += scnprintf(buf + n, len - n, "\n");
+
+ reg_rev--;
+ }
+
+ if (start_num != 0)
+ n += scnprintf(buf + n, len - n, "\n\n");
+
+ pr_info("%s", buf);
+}
+
+static void show_callee_regs(struct callee_regs *cregs)
+{
+ print_reg_file(&(cregs->r13), 13);
+}
+
+void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+{
+ struct path path;
+ char *path_nm = NULL;
+ struct mm_struct *mm;
+ struct file *exe_file;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ mmput(mm);
+
+ if (exe_file) {
+ path = exe_file->f_path;
+ path_get(&exe_file->f_path);
+ fput(exe_file);
+ path_nm = d_path(&path, buf, 255);
+ path_put(&path);
+ }
+
+done:
+ pr_info("%s, TGID %u\n", path_nm, tsk->tgid);
+}
+EXPORT_SYMBOL(print_task_path_n_nm);
+
+static void show_faulting_vma(unsigned long address, char *buf)
+{
+ struct vm_area_struct *vma;
+ struct inode *inode;
+ unsigned long ino = 0;
+ dev_t dev = 0;
+ char *nm = buf;
+
+ vma = find_vma(current->active_mm, address);
+
+ /* check against the find_vma( ) behaviour which returns the next VMA
+ * if the container VMA is not found
+ */
+ if (vma && (vma->vm_start <= address)) {
+ struct file *file = vma->vm_file;
+ if (file) {
+ struct path *path = &file->f_path;
+ nm = d_path(path, buf, PAGE_SIZE - 1);
+ inode = vma->vm_file->f_path.dentry->d_inode;
+ dev = inode->i_sb->s_dev;
+ ino = inode->i_ino;
+ }
+ pr_info(" @off 0x%lx in [%s]\n"
+ " VMA: 0x%08lx to 0x%08lx\n\n",
+ address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
+ } else
+ pr_info(" @No matching VMA found\n");
+}
+
+static void show_ecr_verbose(struct pt_regs *regs)
+{
+ unsigned int vec, cause_code, cause_reg;
+ unsigned long address;
+
+ cause_reg = current->thread.cause_code;
+ pr_info("\n[ECR]: 0x%08x => ", cause_reg);
+
+ /* For Data fault, this is data address not instruction addr */
+ address = current->thread.fault_address;
+
+ vec = cause_reg >> 16;
+ cause_code = (cause_reg >> 8) & 0xFF;
+
+ /* For DTLB Miss or ProtV, display the memory involved too */
+ if (vec == ECR_V_DTLB_MISS) {
+ pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
+ (cause_code == 0x01) ? "Read From" :
+ ((cause_code == 0x02) ? "Write to" : "EX"),
+ address, regs->ret);
+ } else if (vec == ECR_V_ITLB_MISS) {
+ pr_cont("Insn could not be fetched\n");
+ } else if (vec == ECR_V_MACH_CHK) {
+ pr_cont("%s\n", (cause_code == 0x0) ?
+ "Double Fault" : "Other Fatal Err");
+
+ } else if (vec == ECR_V_PROTV) {
+ if (cause_code == ECR_C_PROTV_INST_FETCH)
+ pr_cont("Execute from Non-exec Page\n");
+ else if (cause_code == ECR_C_PROTV_LOAD)
+ pr_cont("Read from Non-readable Page\n");
+ else if (cause_code == ECR_C_PROTV_STORE)
+ pr_cont("Write to Non-writable Page\n");
+ else if (cause_code == ECR_C_PROTV_XCHG)
+ pr_cont("Data exchange protection violation\n");
+ else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
+ } else if (vec == ECR_V_INSN_ERR) {
+ pr_cont("Illegal Insn\n");
+ } else {
+ pr_cont("Check Programmer's Manual\n");
+ }
+}
+
+/************************************************************************
+ * API called by rest of kernel
+ ***********************************************************************/
+
+void show_regs(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct callee_regs *cregs;
+ char *buf;
+
+ buf = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!buf)
+ return;
+
+ print_task_path_n_nm(tsk, buf);
+
+ if (current->thread.cause_code)
+ show_ecr_verbose(regs);
+
+ pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
+ pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
+
+ show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+
+ /* can't use print_vma_addr() yet as it doesn't check for
+ * non-inclusive vma
+ */
+
+ /* print special regs */
+ pr_info("status32: 0x%08lx\n", regs->status32);
+ pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
+ pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
+ regs->bta, regs->blink);
+ pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
+
+ /* print regs->r0 thru regs->r12
+ * Sequential printing was generating horrible code
+ */
+ print_reg_file(&(regs->r0), 0);
+
+ /* If Callee regs were saved, display them too */
+ cregs = (struct callee_regs *)current->thread.callee_reg;
+ if (cregs)
+ show_callee_regs(cregs);
+
+ free_page((unsigned long)buf);
+}
+
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg)
+{
+ current->thread.fault_address = address;
+ current->thread.cause_code = cause_reg;
+
+ /* Caller and Callee regs */
+ show_regs(regs);
+
+ /* Show stack trace if this Fatality happened in kernel mode */
+ if (!user_mode(regs))
+ show_stacktrace(current, regs);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/debugfs.h>
+
+static struct dentry *test_dentry;
+static struct dentry *test_dir;
+static struct dentry *test_u32_dentry;
+
+static u32 clr_on_read = 1;
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+u32 numitlb, numdtlb, num_pte_not_present;
+
+static int fill_display_data(char *kbuf)
+{
+ size_t num = 0;
+ num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
+ num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
+ num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
+
+ if (clr_on_read)
+ numitlb = numdtlb = num_pte_not_present = 0;
+
+ return num;
+}
+
+static int tlb_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = (void *)__get_free_page(GFP_KERNEL);
+ return 0;
+}
+
+/* called on user read(): display the couters */
+static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
+ char __user *user_buf, /* user buffer */
+ size_t len, /* length of buffer */
+ loff_t *offset) /* offset in the file */
+{
+ size_t num;
+ char *kbuf = (char *)file->private_data;
+
+ /* All of the data can he shoved in one iteration */
+ if (*offset != 0)
+ return 0;
+
+ num = fill_display_data(kbuf);
+
+ /* simple_read_from_buffer() is helper for copy to user space
+ It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
+ @3 (offset) into the user space address starting at @1 (user_buf).
+ @5 (len) is max size of user buffer
+ */
+ return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
+}
+
+/* called on user write : clears the counters */
+static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
+ size_t length, loff_t *offset)
+{
+ numitlb = numdtlb = num_pte_not_present = 0;
+ return length;
+}
+
+static int tlb_stats_close(struct inode *inode, struct file *file)
+{
+ free_page((unsigned long)(file->private_data));
+ return 0;
+}
+
+static const struct file_operations tlb_stats_file_ops = {
+ .read = tlb_stats_output,
+ .write = tlb_stats_clear,
+ .open = tlb_stats_open,
+ .release = tlb_stats_close
+};
+#endif
+
+static int __init arc_debugfs_init(void)
+{
+ test_dir = debugfs_create_dir("arc", NULL);
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
+ &tlb_stats_file_ops);
+#endif
+
+ test_u32_dentry =
+ debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
+
+ return 0;
+}
+
+module_init(arc_debugfs_init);
+
+static void __exit arc_debugfs_exit(void)
+{
+ debugfs_remove(test_u32_dentry);
+ debugfs_remove(test_dentry);
+ debugfs_remove(test_dir);
+}
+module_exit(arc_debugfs_exit);
+
+#endif
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
new file mode 100644
index 000000000000..4cd81633febd
--- /dev/null
+++ b/arch/arc/kernel/unaligned.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg : May 2011
+ * -Adapted (from .26 to .35)
+ * -original contribution by Tim.yao@amlogic.com
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/disasm.h>
+
+#define __get8_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.ab %1, [%2, 1]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, 1\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 3b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v ; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define get32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v << 0; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ \
+ __asm__( \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb %1, [%2]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: mov %0, 1\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ __asm__( \
+ \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "3: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "4: stb %1, [%2]\n" \
+ "5:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "6: mov %0, 1\n" \
+ " b 5b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
+
+static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ int val;
+
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
+
+ if (state->aa == 2)
+ state->src2 = 0;
+ }
+
+ if (state->zz == 0) {
+ get32_unaligned_check(val, state->src1 + state->src2);
+ } else {
+ get16_unaligned_check(val, state->src1 + state->src2);
+
+ if (state->x)
+ val = (val << 16) >> 16;
+ }
+
+ if (state->pref == 0)
+ set_reg(state->dest, val, regs, cregs);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
+
+ if (state->aa == 3)
+ state->src3 = 0;
+ } else if (state->aa == 3) {
+ if (state->zz == 2) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
+ regs, cregs);
+ } else if (!state->zz) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
+ regs, cregs);
+ } else {
+ goto fault;
+ }
+ }
+
+ /* write fix-up */
+ if (!state->zz)
+ put32_unaligned_check(state->src1, state->src2 + state->src3);
+ else
+ put16_unaligned_check(state->src1, state->src2 + state->src3);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+/*
+ * Handle an unaligned access
+ * Returns 0 if successfully handled, 1 if some error happened
+ */
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ struct disasm_state state;
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+ if (!user_mode(regs) || !unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
+ pr_warn_once("%s(%d) made unaligned access which was emulated"
+ " by kernel assist\n. This can degrade application"
+ " performance significantly\n. To enable further"
+ " logging of such instances, please \n"
+ " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
+ get_task_comm(buf, current), task_pid_nr(current));
+ } else {
+ /* Add rate limiting if it gets down to it */
+ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
+ get_task_comm(buf, current), task_pid_nr(current),
+ address, regs->ret);
+
+ }
+
+ disasm_instr(regs->ret, &state, 1, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ /* ldb/stb should not have unaligned exception */
+ if ((state.zz == 1) || (state.di))
+ goto fault;
+
+ if (!state.write)
+ fixup_load(&state, regs, cregs);
+ else
+ fixup_store(&state, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ if (delay_mode(regs)) {
+ regs->ret = regs->bta;
+ regs->status32 &= ~STATUS_DE_MASK;
+ } else {
+ regs->ret += state.instr_len;
+ }
+
+ return 0;
+
+fault:
+ pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
+ state.words[0], address);
+
+ return 1;
+}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
new file mode 100644
index 000000000000..a8d02223da44
--- /dev/null
+++ b/arch/arc/kernel/unwind.c
@@ -0,0 +1,1329 @@
+/*
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/unwind.h>
+
+extern char __start_unwind[], __end_unwind[];
+/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
+
+/* #define UNWIND_DEBUG */
+
+#ifdef UNWIND_DEBUG
+int dbg_unw;
+#define unw_debug(fmt, ...) \
+do { \
+ if (dbg_unw) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0);
+#else
+#define unw_debug(fmt, ...)
+#endif
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+ % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ + offsetof(struct unwind_frame_info, f) \
+ / FIELD_SIZEOF(struct unwind_frame_info, f), \
+ FIELD_SIZEOF(struct unwind_frame_info, f) \
+ }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+ unsigned offs:BITS_PER_LONG / 2;
+ unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+UNW_REGISTER_INFO};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_GNU_window_save 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+#define DW_EH_PE_FORM 0x07
+#define DW_EH_PE_native 0x00
+#define DW_EH_PE_leb128 0x01
+#define DW_EH_PE_data2 0x02
+#define DW_EH_PE_data4 0x03
+#define DW_EH_PE_data8 0x04
+#define DW_EH_PE_signed 0x08
+#define DW_EH_PE_ADJUST 0x70
+#define DW_EH_PE_abs 0x00
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit 0xff
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+
+static struct unwind_table {
+ struct {
+ unsigned long pc;
+ unsigned long range;
+ } core, init;
+ const void *address;
+ unsigned long size;
+ const unsigned char *header;
+ unsigned long hdrsz;
+ struct unwind_table *link;
+ const char *name;
+} root_table;
+
+struct unwind_item {
+ enum item_location {
+ Nowhere,
+ Memory,
+ Register,
+ Value
+ } where;
+ uleb128_t value;
+};
+
+struct unwind_state {
+ uleb128_t loc, org;
+ const u8 *cieStart, *cieEnd;
+ uleb128_t codeAlign;
+ sleb128_t dataAlign;
+ struct cfa {
+ uleb128_t reg, offs;
+ } cfa;
+ struct unwind_item regs[ARRAY_SIZE(reg_info)];
+ unsigned stackDepth:8;
+ unsigned version:8;
+ const u8 *label;
+ const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+ struct unwind_table *table;
+
+ for (table = &root_table; table; table = table->link)
+ if ((pc >= table->core.pc
+ && pc < table->core.pc + table->core.range)
+ || (pc >= table->init.pc
+ && pc < table->init.pc + table->init.range))
+ break;
+
+ return table;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end, signed ptrType);
+
+static void init_unwind_table(struct unwind_table *table, const char *name,
+ const void *core_start, unsigned long core_size,
+ const void *init_start, unsigned long init_size,
+ const void *table_start, unsigned long table_size,
+ const u8 *header_start, unsigned long header_size)
+{
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+
+ table->core.pc = (unsigned long)core_start;
+ table->core.range = core_size;
+ table->init.pc = (unsigned long)init_start;
+ table->init.range = init_size;
+ table->address = table_start;
+ table->size = table_size;
+
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
+ || header_start[2] == DW_EH_PE_omit
+ || read_pointer(&ptr, end, header_start[2]) <= 0
+ || header_start[3] == DW_EH_PE_omit)
+ header_start = NULL;
+
+ table->hdrsz = header_size;
+ smp_wmb();
+ table->header = header_start;
+ table->link = NULL;
+ table->name = name;
+}
+
+void __init arc_unwind_init(void)
+{
+ init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
+ __start_unwind, __end_unwind - __start_unwind,
+ NULL, 0);
+ /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+}
+
+static const u32 bad_cie, not_fde;
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
+static signed fde_pointer_type(const u32 *cie);
+
+struct eh_frame_hdr_table_entry {
+ unsigned long start, fde;
+};
+
+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
+{
+ const struct eh_frame_hdr_table_entry *e1 = p1;
+ const struct eh_frame_hdr_table_entry *e2 = p2;
+
+ return (e1->start > e2->start) - (e1->start < e2->start);
+}
+
+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
+{
+ struct eh_frame_hdr_table_entry *e1 = p1;
+ struct eh_frame_hdr_table_entry *e2 = p2;
+ unsigned long v;
+
+ v = e1->start;
+ e1->start = e2->start;
+ e2->start = v;
+ v = e1->fde;
+ e1->fde = e2->fde;
+ e2->fde = v;
+}
+
+static void __init setup_unwind_table(struct unwind_table *table,
+ void *(*alloc) (unsigned long))
+{
+ const u8 *ptr;
+ unsigned long tableSize = table->size, hdrSize;
+ unsigned n;
+ const u32 *fde;
+ struct {
+ u8 version;
+ u8 eh_frame_ptr_enc;
+ u8 fde_count_enc;
+ u8 table_enc;
+ unsigned long eh_frame_ptr;
+ unsigned int fde_count;
+ struct eh_frame_hdr_table_entry table[];
+ } __attribute__ ((__packed__)) *header;
+
+ if (table->header)
+ return;
+
+ if (table->hdrsz)
+ pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
+ table->name);
+
+ if (tableSize & (sizeof(*fde) - 1))
+ return;
+
+ for (fde = table->address, n = 0;
+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = cie_for_fde(fde, table);
+ signed ptrType;
+
+ if (cie == &not_fde)
+ continue;
+ if (cie == NULL || cie == &bad_cie)
+ return;
+ ptrType = fde_pointer_type(cie);
+ if (ptrType < 0)
+ return;
+
+ ptr = (const u8 *)(fde + 2);
+ if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
+ ptrType)) {
+ /* FIXME_Rajesh We have 4 instances of null addresses
+ * instead of the initial loc addr
+ * return;
+ */
+ }
+ ++n;
+ }
+
+ if (tableSize || !n)
+ return;
+
+ hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ + 2 * n * sizeof(unsigned long);
+ header = alloc(hdrSize);
+ if (!header)
+ return;
+ header->version = 1;
+ header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
+ header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
+ BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
+ % __alignof(typeof(header->fde_count)));
+ header->fde_count = n;
+
+ BUILD_BUG_ON(offsetof(typeof(*header), table)
+ % __alignof(typeof(*header->table)));
+ for (fde = table->address, tableSize = table->size, n = 0;
+ tableSize;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ /* const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); */
+ const u32 *cie = (const u32 *)(fde[1]);
+
+ if (fde[1] == 0xffffffff)
+ continue; /* this is a CIE */
+ ptr = (const u8 *)(fde + 2);
+ header->table[n].start = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde,
+ fde_pointer_type(cie));
+ header->table[n].fde = (unsigned long)fde;
+ ++n;
+ }
+ WARN_ON(n != header->fde_count);
+
+ sort(header->table,
+ n,
+ sizeof(*header->table),
+ cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
+
+ table->hdrsz = hdrSize;
+ smp_wmb();
+ table->header = (const void *)header;
+}
+
+static void *__init balloc(unsigned long sz)
+{
+ return __alloc_bootmem_nopanic(sz,
+ sizeof(unsigned int),
+ __pa(MAX_DMA_ADDRESS));
+}
+
+void __init arc_unwind_setup(void)
+{
+ setup_unwind_table(&root_table, balloc);
+}
+
+#ifdef CONFIG_MODULES
+
+static struct unwind_table *last_table;
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size)
+{
+ struct unwind_table *table;
+
+ if (table_size <= 0)
+ return NULL;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ init_unwind_table(table, module->name,
+ module->module_core, module->core_size,
+ module->module_init, module->init_size,
+ table_start, table_size,
+ NULL, 0);
+
+#ifdef UNWIND_DEBUG
+ unw_debug("Table added for [%s] %lx %lx\n",
+ module->name, table->core.pc, table->core.range);
+#endif
+ if (last_table)
+ last_table->link = table;
+ else
+ root_table.link = table;
+ last_table = table;
+
+ return table;
+}
+
+struct unlink_table_info {
+ struct unwind_table *table;
+ int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+ struct unlink_table_info *info = arg;
+ struct unwind_table *table = info->table, *prev;
+
+ for (prev = &root_table; prev->link && prev->link != table;
+ prev = prev->link)
+ ;
+
+ if (prev->link) {
+ if (info->init_only) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ info->table = NULL;
+ } else {
+ prev->link = table->link;
+ if (!prev->link)
+ last_table = prev;
+ }
+ } else
+ info->table = NULL;
+
+ return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+ struct unwind_table *table = handle;
+ struct unlink_table_info info;
+
+ if (!table || table == &root_table)
+ return;
+
+ if (init_only && table == last_table) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ return;
+ }
+
+ info.table = table;
+ info.init_only = init_only;
+
+ unlink_table(&info); /* XXX: SMP */
+ kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ uleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (uleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur++ & 0x80))
+ break;
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ sleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (sleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur & 0x80)) {
+ value |= -(*cur++ & 0x40) << shift;
+ break;
+ }
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
+{
+ const u32 *cie;
+
+ if (!*fde || (*fde & (sizeof(*fde) - 1)))
+ return &bad_cie;
+
+ if (fde[1] == 0xffffffff)
+ return &not_fde; /* this is a CIE */
+
+ if ((fde[1] & (sizeof(*fde) - 1)))
+/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
+ return NULL; /* this is not a valid FDE */
+
+ /* cie = fde + 1 - fde[1] / sizeof(*fde); */
+ cie = (u32 *) fde[1];
+
+ if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
+ || (*cie & (sizeof(*cie) - 1))
+ || (cie[1] != 0xffffffff))
+ return NULL; /* this is not a (valid) CIE */
+ return cie;
+}
+
+static unsigned long read_pointer(const u8 **pLoc, const void *end,
+ signed ptrType)
+{
+ unsigned long value = 0;
+ union {
+ const u8 *p8;
+ const u16 *p16u;
+ const s16 *p16s;
+ const u32 *p32u;
+ const s32 *p32s;
+ const unsigned long *pul;
+ } ptr;
+
+ if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+ return 0;
+ ptr.p8 = *pLoc;
+ switch (ptrType & DW_EH_PE_FORM) {
+ case DW_EH_PE_data2:
+ if (end < (const void *)(ptr.p16u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned((u16 *) ptr.p16s++);
+ else
+ value = get_unaligned((u16 *) ptr.p16u++);
+ break;
+ case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+ if (end < (const void *)(ptr.p32u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p32s++);
+ else
+ value = get_unaligned(ptr.p32u++);
+ break;
+ case DW_EH_PE_data8:
+ BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+ BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+ case DW_EH_PE_native:
+ if (end < (const void *)(ptr.pul + 1))
+ return 0;
+ value = get_unaligned((unsigned long *)ptr.pul++);
+ break;
+ case DW_EH_PE_leb128:
+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+ value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
+ : get_uleb128(&ptr.p8, end);
+ if ((const void *)ptr.p8 > end)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ switch (ptrType & DW_EH_PE_ADJUST) {
+ case DW_EH_PE_abs:
+ break;
+ case DW_EH_PE_pcrel:
+ value += (unsigned long)*pLoc;
+ break;
+ default:
+ return 0;
+ }
+ if ((ptrType & DW_EH_PE_indirect)
+ && __get_user(value, (unsigned long __user *)value))
+ return 0;
+ *pLoc = ptr.p8;
+
+ return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+ const u8 *ptr = (const u8 *)(cie + 2);
+ unsigned version = *ptr;
+
+ if (version != 1)
+ return -1; /* unsupported */
+
+ if (*++ptr) {
+ const char *aug;
+ const u8 *end = (const u8 *)(cie + 1) + *cie;
+ uleb128_t len;
+
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr != 'z')
+ return -1;
+
+ /* check if augmentation string is nul-terminated */
+ aug = (const void *)ptr;
+ ptr = memchr(aug, 0, end - ptr);
+ if (ptr == NULL)
+ return -1;
+
+ ++ptr; /* skip terminator */
+ get_uleb128(&ptr, end); /* skip code alignment */
+ get_sleb128(&ptr, end); /* skip data alignment */
+ /* skip return address column */
+ version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
+ len = get_uleb128(&ptr, end); /* augmentation length */
+
+ if (ptr + len < ptr || ptr + len > end)
+ return -1;
+
+ end = ptr + len;
+ while (*++aug) {
+ if (ptr >= end)
+ return -1;
+ switch (*aug) {
+ case 'L':
+ ++ptr;
+ break;
+ case 'P':{
+ signed ptrType = *ptr++;
+
+ if (!read_pointer(&ptr, end, ptrType)
+ || ptr > end)
+ return -1;
+ }
+ break;
+ case 'R':
+ return *ptr;
+ default:
+ return -1;
+ }
+ }
+ }
+ return DW_EH_PE_native | DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+ state->loc += delta * state->codeAlign;
+
+ /* FIXME_Rajesh: Probably we are defining for the initial range as well;
+ return delta > 0;
+ */
+ unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
+ return 1;
+}
+
+static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
+ struct unwind_state *state)
+{
+ if (reg < ARRAY_SIZE(state->regs)) {
+ state->regs[reg].where = where;
+ state->regs[reg].value = value;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("r%lu: ", reg);
+ switch (where) {
+ case Nowhere:
+ unw_debug("s ");
+ break;
+ case Memory:
+ unw_debug("c(%lu) ", value);
+ break;
+ case Register:
+ unw_debug("r(%lu) ", value);
+ break;
+ case Value:
+ unw_debug("v(%lu) ", value);
+ break;
+ default:
+ break;
+ }
+#endif
+ }
+}
+
+static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
+ signed ptrType, struct unwind_state *state)
+{
+ union {
+ const u8 *p8;
+ const u16 *p16;
+ const u32 *p32;
+ } ptr;
+ int result = 1;
+ u8 opcode;
+
+ if (start != state->cieStart) {
+ state->loc = state->org;
+ result =
+ processCFI(state->cieStart, state->cieEnd, 0, ptrType,
+ state);
+ if (targetLoc == 0 && state->label == NULL)
+ return result;
+ }
+ for (ptr.p8 = start; result && ptr.p8 < end;) {
+ switch (*ptr.p8 >> 6) {
+ uleb128_t value;
+
+ case 0:
+ opcode = *ptr.p8++;
+
+ switch (opcode) {
+ case DW_CFA_nop:
+ unw_debug("cfa nop ");
+ break;
+ case DW_CFA_set_loc:
+ state->loc = read_pointer(&ptr.p8, end,
+ ptrType);
+ if (state->loc == 0)
+ result = 0;
+ unw_debug("cfa_set_loc: 0x%lx ", state->loc);
+ break;
+ case DW_CFA_advance_loc1:
+ unw_debug("\ncfa advance loc1:");
+ result = ptr.p8 < end
+ && advance_loc(*ptr.p8++, state);
+ break;
+ case DW_CFA_advance_loc2:
+ value = *ptr.p8++;
+ value += *ptr.p8++ << 8;
+ unw_debug("\ncfa advance loc2:");
+ result = ptr.p8 <= end + 2
+ /* && advance_loc(*ptr.p16++, state); */
+ && advance_loc(value, state);
+ break;
+ case DW_CFA_advance_loc4:
+ unw_debug("\ncfa advance loc4:");
+ result = ptr.p8 <= end + 4
+ && advance_loc(*ptr.p32++, state);
+ break;
+ case DW_CFA_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_offset_extended: ");
+ set_rule(value, Memory,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_offset_extended_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_restore_extended:
+ unw_debug("cfa_restore_extended: ");
+ case DW_CFA_undefined:
+ unw_debug("cfa_undefined: ");
+ case DW_CFA_same_value:
+ unw_debug("cfa_same_value: ");
+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
+ state);
+ break;
+ case DW_CFA_register:
+ unw_debug("cfa_register: ");
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Register,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_remember_state:
+ unw_debug("cfa_remember_state: ");
+ if (ptr.p8 == state->label) {
+ state->label = NULL;
+ return 1;
+ }
+ if (state->stackDepth >= MAX_STACK_DEPTH)
+ return 0;
+ state->stack[state->stackDepth++] = ptr.p8;
+ break;
+ case DW_CFA_restore_state:
+ unw_debug("cfa_restore_state: ");
+ if (state->stackDepth) {
+ const uleb128_t loc = state->loc;
+ const u8 *label = state->label;
+
+ state->label =
+ state->stack[state->stackDepth - 1];
+ memcpy(&state->cfa, &badCFA,
+ sizeof(state->cfa));
+ memset(state->regs, 0,
+ sizeof(state->regs));
+ state->stackDepth = 0;
+ result =
+ processCFI(start, end, 0, ptrType,
+ state);
+ state->loc = loc;
+ state->label = label;
+ } else
+ return 0;
+ break;
+ case DW_CFA_def_cfa:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset:
+ state->cfa.offs = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa_offset: 0x%lx ",
+ state->cfa.offs);
+ break;
+ case DW_CFA_def_cfa_sf:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ /*nobreak */
+ case DW_CFA_def_cfa_offset_sf:
+ state->cfa.offs = get_sleb128(&ptr.p8, end)
+ * state->dataAlign;
+ break;
+ case DW_CFA_def_cfa_register:
+ unw_debug("cfa_def_cfa_regsiter: ");
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ break;
+ /*todo case DW_CFA_def_cfa_expression: */
+ /*todo case DW_CFA_expression: */
+ /*todo case DW_CFA_val_expression: */
+ case DW_CFA_GNU_args_size:
+ get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Memory,
+ (uleb128_t) 0 - get_uleb128(&ptr.p8,
+ end),
+ state);
+ break;
+ case DW_CFA_GNU_window_save:
+ default:
+ unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
+ result = 0;
+ break;
+ }
+ break;
+ case 1:
+ unw_debug("\ncfa_adv_loc: ");
+ result = advance_loc(*ptr.p8++ & 0x3f, state);
+ break;
+ case 2:
+ unw_debug("cfa_offset: ");
+ value = *ptr.p8++ & 0x3f;
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end),
+ state);
+ break;
+ case 3:
+ unw_debug("cfa_restore: ");
+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+ break;
+ }
+
+ if (ptr.p8 > end)
+ result = 0;
+ if (result && targetLoc != 0 && targetLoc < state->loc)
+ return 1;
+ }
+
+ return result && ptr.p8 == end && (targetLoc == 0 || (
+ /*todo While in theory this should apply, gcc in practice omits
+ everything past the function prolog, and hence the location
+ never reaches the end of the function.
+ targetLoc < state->loc && */ state->label == NULL));
+}
+
+/* Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error. */
+int arc_unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+ const u32 *fde = NULL, *cie = NULL;
+ const u8 *ptr = NULL, *end = NULL;
+ unsigned long pc = UNW_PC(frame) - frame->call_frame;
+ unsigned long startLoc = 0, endLoc = 0, cfa;
+ unsigned i;
+ signed ptrType = -1;
+ uleb128_t retAddrReg = 0;
+ const struct unwind_table *table;
+ struct unwind_state state;
+ unsigned long *fptr;
+ unsigned long addr;
+
+ unw_debug("\n\nUNWIND FRAME:\n");
+ unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
+ UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
+ UNW_FP(frame));
+
+ if (UNW_PC(frame) == 0)
+ return -EINVAL;
+
+#ifdef UNWIND_DEBUG
+ {
+ unsigned long *sptr = (unsigned long *)UNW_SP(frame);
+ unw_debug("\nStack Dump:\n");
+ for (i = 0; i < 20; i++, sptr++)
+ unw_debug("0x%p: 0x%lx\n", sptr, *sptr);
+ unw_debug("\n");
+ }
+#endif
+
+ table = find_table(pc);
+ if (table != NULL
+ && !(table->size & (sizeof(*fde) - 1))) {
+ const u8 *hdr = table->header;
+ unsigned long tableSize;
+
+ smp_rmb();
+ if (hdr && hdr[0] == 1) {
+ switch (hdr[3] & DW_EH_PE_FORM) {
+ case DW_EH_PE_native:
+ tableSize = sizeof(unsigned long);
+ break;
+ case DW_EH_PE_data2:
+ tableSize = 2;
+ break;
+ case DW_EH_PE_data4:
+ tableSize = 4;
+ break;
+ case DW_EH_PE_data8:
+ tableSize = 8;
+ break;
+ default:
+ tableSize = 0;
+ break;
+ }
+ ptr = hdr + 4;
+ end = hdr + table->hdrsz;
+ if (tableSize && read_pointer(&ptr, end, hdr[1])
+ == (unsigned long)table->address
+ && (i = read_pointer(&ptr, end, hdr[2])) > 0
+ && i == (end - ptr) / (2 * tableSize)
+ && !((end - ptr) % (2 * tableSize))) {
+ do {
+ const u8 *cur =
+ ptr + (i / 2) * (2 * tableSize);
+
+ startLoc = read_pointer(&cur,
+ cur + tableSize,
+ hdr[3]);
+ if (pc < startLoc)
+ i /= 2;
+ else {
+ ptr = cur - tableSize;
+ i = (i + 1) / 2;
+ }
+ } while (startLoc && i > 1);
+ if (i == 1
+ && (startLoc = read_pointer(&ptr,
+ ptr + tableSize,
+ hdr[3])) != 0
+ && pc >= startLoc)
+ fde = (void *)read_pointer(&ptr,
+ ptr +
+ tableSize,
+ hdr[3]);
+ }
+ }
+
+ if (fde != NULL) {
+ cie = cie_for_fde(fde, table);
+ ptr = (const u8 *)(fde + 2);
+ if (cie != NULL
+ && cie != &bad_cie
+ && cie != &not_fde
+ && (ptrType = fde_pointer_type(cie)) >= 0
+ && read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType) == startLoc) {
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= endLoc)
+ fde = NULL;
+ } else
+ fde = NULL;
+ }
+ if (fde == NULL) {
+ for (fde = table->address, tableSize = table->size;
+ cie = NULL, tableSize > sizeof(*fde)
+ && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde,
+ fde += 1 + *fde / sizeof(*fde)) {
+ cie = cie_for_fde(fde, table);
+ if (cie == &bad_cie) {
+ cie = NULL;
+ break;
+ }
+ if (cie == NULL
+ || cie == &not_fde
+ || (ptrType = fde_pointer_type(cie)) < 0)
+ continue;
+ ptr = (const u8 *)(fde + 2);
+ startLoc = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde, ptrType);
+ if (!startLoc)
+ continue;
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= startLoc && pc < endLoc)
+ break;
+ }
+ }
+ }
+ if (cie != NULL) {
+ memset(&state, 0, sizeof(state));
+ state.cieEnd = ptr; /* keep here temporarily */
+ ptr = (const u8 *)(cie + 2);
+ end = (const u8 *)(cie + 1) + *cie;
+ frame->call_frame = 1;
+ if ((state.version = *ptr) != 1)
+ cie = NULL; /* unsupported version */
+ else if (*++ptr) {
+ /* check if augmentation size is first (thus present) */
+ if (*ptr == 'z') {
+ while (++ptr < end && *ptr) {
+ switch (*ptr) {
+ /* chk for ignorable or already handled
+ * nul-terminated augmentation string */
+ case 'L':
+ case 'P':
+ case 'R':
+ continue;
+ case 'S':
+ frame->call_frame = 0;
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ if (ptr >= end || *ptr)
+ cie = NULL;
+ }
+ ++ptr;
+ }
+ if (cie != NULL) {
+ /* get code aligment factor */
+ state.codeAlign = get_uleb128(&ptr, end);
+ /* get data aligment factor */
+ state.dataAlign = get_sleb128(&ptr, end);
+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+ cie = NULL;
+ else {
+ retAddrReg =
+ state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
+ end);
+ unw_debug("CIE Frame Info:\n");
+ unw_debug("return Address register 0x%lx\n",
+ retAddrReg);
+ unw_debug("data Align: %ld\n", state.dataAlign);
+ unw_debug("code Align: %lu\n", state.codeAlign);
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ ptr += augSize;
+ }
+ if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(retAddrReg)
+ || reg_info[retAddrReg].width !=
+ sizeof(unsigned long))
+ cie = NULL;
+ }
+ }
+ if (cie != NULL) {
+ state.cieStart = ptr;
+ ptr = state.cieEnd;
+ state.cieEnd = end;
+ end = (const u8 *)(fde + 1) + *fde;
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ if ((ptr += augSize) > end)
+ fde = NULL;
+ }
+ }
+ if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+ unsigned long top, bottom;
+
+ top = STACK_TOP_UNW(frame->task);
+ bottom = STACK_BOTTOM_UNW(frame->task);
+#if FRAME_RETADDR_OFFSET < 0
+ if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
+ && bottom < UNW_FP(frame)
+#else
+ if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
+ && bottom > UNW_FP(frame)
+#endif
+ && !((UNW_SP(frame) | UNW_FP(frame))
+ & (sizeof(unsigned long) - 1))) {
+ unsigned long link;
+
+ if (!__get_user(link, (unsigned long *)
+ (UNW_FP(frame) + FRAME_LINK_OFFSET))
+#if FRAME_RETADDR_OFFSET < 0
+ && link > bottom && link < UNW_FP(frame)
+#else
+ && link > UNW_FP(frame) && link < bottom
+#endif
+ && !(link & (sizeof(link) - 1))
+ && !__get_user(UNW_PC(frame),
+ (unsigned long *)(UNW_FP(frame)
+ + FRAME_RETADDR_OFFSET)))
+ {
+ UNW_SP(frame) =
+ UNW_FP(frame) + FRAME_RETADDR_OFFSET
+#if FRAME_RETADDR_OFFSET < 0
+ -
+#else
+ +
+#endif
+ sizeof(UNW_PC(frame));
+ UNW_FP(frame) = link;
+ return 0;
+ }
+ }
+#endif
+ return -ENXIO;
+ }
+ state.org = startLoc;
+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+
+ unw_debug("\nProcess instructions\n");
+
+ /* process instructions
+ * For ARC, we optimize by having blink(retAddrReg) with
+ * the sameValue in the leaf function, so we should not check
+ * state.regs[retAddrReg].where == Nowhere
+ */
+ if (!processCFI(ptr, end, pc, ptrType, &state)
+ || state.loc > endLoc
+/* || state.regs[retAddrReg].where == Nowhere */
+ || state.cfa.reg >= ARRAY_SIZE(reg_info)
+ || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+ || state.cfa.offs % sizeof(unsigned long))
+ return -EIO;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("\n");
+
+ unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+
+ if (REG_INVALID(i))
+ continue;
+
+ switch (state.regs[i].where) {
+ case Nowhere:
+ break;
+ case Memory:
+ unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
+ break;
+ case Register:
+ unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
+ break;
+ case Value:
+ unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
+ break;
+ }
+ }
+
+ unw_debug("\n");
+#endif
+
+ /* update frame */
+#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
+ if (frame->call_frame
+ && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
+ frame->call_frame = 0;
+#endif
+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+ startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
+ endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+ startLoc = min(STACK_LIMIT(cfa), cfa);
+ endLoc = max(STACK_LIMIT(cfa), cfa);
+ }
+
+ unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx => 0x%lx\n",
+ state.cfa.reg, state.cfa.offs, cfa);
+
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i)) {
+ if (state.regs[i].where == Nowhere)
+ continue;
+ return -EIO;
+ }
+ switch (state.regs[i].where) {
+ default:
+ break;
+ case Register:
+ if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(state.regs[i].value)
+ || reg_info[i].width >
+ reg_info[state.regs[i].value].width)
+ return -EIO;
+ switch (reg_info[state.regs[i].value].width) {
+ case sizeof(u8):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u8);
+ break;
+ case sizeof(u16):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u16);
+ break;
+ case sizeof(u32):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u32);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u64);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ }
+ }
+
+ unw_debug("\nRegister state after evaluation with realtime Stack:\n");
+ fptr = (unsigned long *)(&frame->regs);
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
+
+ if (REG_INVALID(i))
+ continue;
+ switch (state.regs[i].where) {
+ case Nowhere:
+ if (reg_info[i].width != sizeof(UNW_SP(frame))
+ || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+ != &UNW_SP(frame))
+ continue;
+ UNW_SP(frame) = cfa;
+ break;
+ case Register:
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ FRAME_REG(i, u8) = state.regs[i].value;
+ break;
+ case sizeof(u16):
+ FRAME_REG(i, u16) = state.regs[i].value;
+ break;
+ case sizeof(u32):
+ FRAME_REG(i, u32) = state.regs[i].value;
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ FRAME_REG(i, u64) = state.regs[i].value;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ case Value:
+ if (reg_info[i].width != sizeof(unsigned long))
+ return -EIO;
+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+ * state.dataAlign;
+ break;
+ case Memory:
+ addr = cfa + state.regs[i].value * state.dataAlign;
+
+ if ((state.regs[i].value * state.dataAlign)
+ % sizeof(unsigned long)
+ || addr < startLoc
+ || addr + sizeof(unsigned long) < addr
+ || addr + sizeof(unsigned long) > endLoc)
+ return -EIO;
+
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ __get_user(FRAME_REG(i, u8),
+ (u8 __user *)addr);
+ break;
+ case sizeof(u16):
+ __get_user(FRAME_REG(i, u16),
+ (u16 __user *)addr);
+ break;
+ case sizeof(u32):
+ __get_user(FRAME_REG(i, u32),
+ (u32 __user *)addr);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ __get_user(FRAME_REG(i, u64),
+ (u64 __user *)addr);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ break;
+ }
+ unw_debug("r%d: 0x%lx ", i, *fptr);
+ }
+
+ return 0;
+#undef FRAME_REG
+}
+EXPORT_SYMBOL(arc_unwind);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d3c92f52d444
--- /dev/null
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(arc)
+ENTRY(_stext)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+ /*
+ * ICCM starts at 0x8000_0000. So if kernel is relocated to some other
+ * address, make sure peripheral at 0x8z doesn't clash with ICCM
+ * Essentially vector is also in ICCM.
+ */
+
+ . = CONFIG_LINUX_LINK_BASE;
+
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
+ . = ALIGN(PAGE_SIZE);
+ }
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ .text.arcfp : {
+ *(.text.arcfp)
+ . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
+ }
+#endif
+
+ /*
+ * The reason for having a seperate subsection .init.ramfs is to
+ * prevent objump from including it in kernel dumps
+ *
+ * Reason for having .init.ramfs above .init is to make sure that the
+ * binary blob is tucked away to one side, reducing the displacement
+ * between .init.text and .text, avoiding any possible relocation
+ * errors because of calls from .init.text to .text
+ * Yes such calls do exist. e.g.
+ * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
+ */
+
+ __init_begin = .;
+
+ .init.ramfs : { INIT_RAM_FS }
+
+ . = ALIGN(PAGE_SIZE);
+ _stext = .;
+
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(L1_CACHE_BYTES)
+
+ /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
+ .init.data : {
+ INIT_DATA
+ INIT_SETUP(L1_CACHE_BYTES)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ }
+
+ .init.arch.info : {
+ __arch_info_begin = .;
+ *(.arch.info.init)
+ __arch_info_end = .;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ /*
+ * .exit.text is discard at runtime, not link time, to deal with
+ * references from .debug_frame
+ * It will be init freed, being inside [__init_start : __init_end]
+ */
+ .exit.text : { EXIT_TEXT }
+ .exit.data : { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+ .text : {
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ }
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+ _etext = .;
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+
+ /*
+ * 1. this is .data essentially
+ * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
+ */
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+ . = ALIGN(PAGE_SIZE);
+ .debug_frame : {
+ __start_unwind = .;
+ *(.debug_frame)
+ __end_unwind = .;
+ }
+#else
+ /DISCARD/ : { *(.debug_frame) }
+#endif
+
+ NOTES
+
+ . = ALIGN(PAGE_SIZE);
+ _end = . ;
+
+ STABS_DEBUG
+ DISCARDS
+
+ .arcextmap 0 : {
+ *(.gnu.linkonce.arcextmap.*)
+ *(.arcextmap.*)
+ }
+
+ /* open-coded because we need .debug_frame seperately for unwinding */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ . = CONFIG_ARC_DCCM_BASE;
+ __arc_dccm_base = .;
+ .data.arcfp : {
+ *(.data.arcfp)
+ }
+ . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
+#endif
+}
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
new file mode 100644
index 000000000000..db46e200baba
--- /dev/null
+++ b/arch/arc/lib/Makefile
@@ -0,0 +1,9 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+lib-y := strchr-700.o strcmp.o strcpy-700.o strlen.o
+lib-y += memcmp.o memcpy-700.o memset.o
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
new file mode 100644
index 000000000000..bc813d55b6c3
--- /dev/null
+++ b/arch/arc/lib/memcmp.S
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#ifdef __LITTLE_ENDIAN__
+#define WORD2 r2
+#define SHIFT r3
+#else /* BIG ENDIAN */
+#define WORD2 r3
+#define SHIFT r2
+#endif
+
+ARC_ENTRY memcmp
+ or r12,r0,r1
+ asl_s r12,r12,30
+ sub r3,r2,1
+ brls r2,r12,.Lbytewise
+ ld r4,[r0,0]
+ ld r5,[r1,0]
+ lsr.f lp_count,r3,3
+ lpne .Loop_end
+ ld_s WORD2,[r0,4]
+ ld_s r12,[r1,4]
+ brne r4,r5,.Leven
+ ld.a r4,[r0,8]
+ ld.a r5,[r1,8]
+ brne WORD2,r12,.Lodd
+.Loop_end:
+ asl_s SHIFT,SHIFT,3
+ bhs_s .Last_cmp
+ brne r4,r5,.Leven
+ ld r4,[r0,4]
+ ld r5,[r1,4]
+#ifdef __LITTLE_ENDIAN__
+ nop_s
+ ; one more load latency cycle
+.Last_cmp:
+ xor r0,r4,r5
+ bset r0,r0,SHIFT
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ b.d .Leven_cmp
+ and r1,r1,24
+.Leven:
+ xor r0,r4,r5
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+.Leven_cmp:
+ asl r2,r4,r1
+ asl r12,r5,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+ .balign 4
+.Lodd:
+ xor r0,WORD2,r12
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+ asl_s r2,r2,r1
+ asl_s r12,r12,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+#else /* BIG ENDIAN */
+.Last_cmp:
+ neg_s SHIFT,SHIFT
+ lsr r4,r4,SHIFT
+ lsr r5,r5,SHIFT
+ ; slow track insn
+.Leven:
+ sub.f r0,r4,r5
+ mov.ne r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+.Lodd:
+ cmp_s WORD2,r12
+
+ mov_s r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+#endif /* ENDIAN */
+ .balign 4
+.Lbytewise:
+ breq r2,0,.Lnil
+ ldb r4,[r0,0]
+ ldb r5,[r1,0]
+ lsr.f lp_count,r3
+ lpne .Lbyte_end
+ ldb_s r3,[r0,1]
+ ldb r12,[r1,1]
+ brne r4,r5,.Lbyte_even
+ ldb.a r4,[r0,2]
+ ldb.a r5,[r1,2]
+ brne r3,r12,.Lbyte_odd
+.Lbyte_end:
+ bcc .Lbyte_even
+ brne r4,r5,.Lbyte_even
+ ldb_s r3,[r0,1]
+ ldb_s r12,[r1,1]
+.Lbyte_odd:
+ j_s.d [blink]
+ sub r0,r3,r12
+.Lbyte_even:
+ j_s.d [blink]
+ sub r0,r4,r5
+.Lnil:
+ j_s.d [blink]
+ mov r0,0
+ARC_EXIT memcmp
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
new file mode 100644
index 000000000000..b64cc10ac918
--- /dev/null
+++ b/arch/arc/lib/memcpy-700.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY memcpy
+ or r3,r0,r1
+ asl_s r3,r3,30
+ mov_s r5,r0
+ brls.d r2,r3,.Lcopy_bytewise
+ sub.f r3,r2,1
+ ld_s r12,[r1,0]
+ asr.f lp_count,r3,3
+ bbit0.d r3,2,.Lnox4
+ bmsk_s r2,r2,1
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,4]
+.Lnox4:
+ lppnz .Lendloop
+ ld_s r3,[r1,4]
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,8]
+ st.ab r3,[r5,4]
+.Lendloop:
+ breq r2,0,.Last_store
+ ld r3,[r5,0]
+#ifdef __LITTLE_ENDIAN__
+ add3 r2,-1,r2
+ ; uses long immediate
+ xor_s r12,r12,r3
+ bmsk r12,r12,r2
+ xor_s r12,r12,r3
+#else /* BIG ENDIAN */
+ sub3 r2,31,r2
+ ; uses long immediate
+ xor_s r3,r3,r12
+ bmsk r3,r3,r2
+ xor_s r12,r12,r3
+#endif /* ENDIAN */
+.Last_store:
+ j_s.d [blink]
+ st r12,[r5,0]
+
+ .balign 4
+.Lcopy_bytewise:
+ jcs [blink]
+ ldb_s r12,[r1,0]
+ lsr.f lp_count,r3
+ bhs_s .Lnox1
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,1]
+.Lnox1:
+ lppnz .Lendbloop
+ ldb_s r3,[r1,1]
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,2]
+ stb.ab r3,[r5,1]
+.Lendbloop:
+ j_s.d [blink]
+ stb r12,[r5,0]
+ARC_EXIT memcpy
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
new file mode 100644
index 000000000000..9b2d88d2e141
--- /dev/null
+++ b/arch/arc/lib/memset.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
+
+ARC_ENTRY memset
+ mov_s r4,r0
+ or r12,r0,r2
+ bmsk.f r12,r12,1
+ extb_s r1,r1
+ asl r3,r1,8
+ beq.d .Laligned
+ or_s r1,r1,r3
+ brls r2,SMALL,.Ltiny
+ add r3,r2,r0
+ stb r1,[r3,-1]
+ bclr_s r3,r3,0
+ stw r1,[r3,-2]
+ bmsk.f r12,r0,1
+ add_s r2,r2,r12
+ sub.ne r2,r2,4
+ stb.ab r1,[r4,1]
+ and r4,r4,-2
+ stw.ab r1,[r4,2]
+ and r4,r4,-4
+.Laligned: ; This code address should be aligned for speed.
+ asl r3,r1,16
+ lsr.f lp_count,r2,2
+ or_s r1,r1,r3
+ lpne .Loop_end
+ st.ab r1,[r4,4]
+.Loop_end:
+ j_s [blink]
+
+ .balign 4
+.Ltiny:
+ mov.f lp_count,r2
+ lpne .Ltiny_end
+ stb.ab r1,[r4,1]
+.Ltiny_end:
+ j_s [blink]
+ARC_EXIT memset
+
+; memzero: @r0 = mem, @r1 = size_t
+; memset: @r0 = mem, @r1 = char, @r2 = size_t
+
+ARC_ENTRY memzero
+ ; adjust bzero args to memset args
+ mov r2, r1
+ mov r1, 0
+ b memset ;tail call so need to tinker with blink
+ARC_EXIT memzero
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
new file mode 100644
index 000000000000..99c10475d477
--- /dev/null
+++ b/arch/arc/lib/strchr-700.S
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ARC700 has a relatively long pipeline and branch prediction, so we want
+ to avoid branches that are hard to predict. On the other hand, the
+ presence of the norm instruction makes it easier to operate on whole
+ words branch-free. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strchr
+ extb_s r1,r1
+ asl r5,r1,8
+ bmsk r2,r0,1
+ or r5,r5,r1
+ mov_s r3,0x01010101
+ breq.d r2,r0,.Laligned
+ asl r4,r5,16
+ sub_s r0,r0,r2
+ asl r7,r2,3
+ ld_s r2,[r0]
+#ifdef __LITTLE_ENDIAN__
+ asl r7,r3,r7
+#else
+ lsr r7,r3,r7
+#endif
+ or r5,r5,r4
+ ror r4,r3
+ sub r12,r2,r7
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0_ua
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r7
+ bic r12,r12,r6
+ and r7,r12,r4
+ breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
+ b .Lfound_char ; Likewise this one.
+; /* We require this code address to be unaligned for speed... */
+.Laligned:
+ ld_s r2,[r0]
+ or r5,r5,r4
+ ror r4,r3
+; /* ... so that this code address is aligned, for itself and ... */
+.Loop:
+ sub r12,r2,r3
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r3
+ bic r12,r12,r6
+ and r7,r12,r4
+ breq r7,0,.Loop /* ... so that this branch is unaligned. */
+ ; Found searched-for character. r0 has already advanced to next word.
+#ifdef __LITTLE_ENDIAN__
+/* We only need the information about the first matching byte
+ (i.e. the least significant matching byte) to be exact,
+ hence there is no problem with carry effects. */
+.Lfound_char:
+ sub r3,r7,1
+ bic r3,r3,r7
+ norm r2,r3
+ sub_s r0,r0,1
+ asr_s r2,r2,3
+ j.d [blink]
+ sub_s r0,r0,r2
+
+ .balign 4
+.Lfound0_ua:
+ mov r3,r7
+.Lfound0:
+ sub r3,r6,r3
+ bic r3,r3,r6
+ and r2,r3,r4
+ or_s r12,r12,r2
+ sub_s r3,r12,1
+ bic_s r3,r3,r12
+ norm r3,r3
+ add_s r0,r0,3
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ sub_s r0,r0,r12
+ j_s.d [blink]
+ mov.pl r0,0
+#else /* BIG ENDIAN */
+.Lfound_char:
+ lsr r7,r7,7
+
+ bic r2,r7,r6
+ norm r2,r2
+ sub_s r0,r0,4
+ asr_s r2,r2,3
+ j.d [blink]
+ add_s r0,r0,r2
+
+.Lfound0_ua:
+ mov_s r3,r7
+.Lfound0:
+ asl_s r2,r2,7
+ or r7,r6,r4
+ bic_s r12,r12,r2
+ sub r2,r7,r3
+ or r2,r2,r6
+ bic r12,r2,r12
+ bic.f r3,r4,r12
+ norm r3,r3
+
+ add.pl r3,r3,1
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ add_s r0,r0,r12
+ j_s.d [blink]
+ mov.mi r0,0
+#endif /* ENDIAN */
+ARC_EXIT strchr
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
new file mode 100644
index 000000000000..5dc802b45cf3
--- /dev/null
+++ b/arch/arc/lib/strcmp.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* This is optimized primarily for the ARC700.
+ It would be possible to speed up the loops by one cycle / word
+ respective one cycle / byte by forcing double source 1 alignment, unrolling
+ by a factor of two, and speculatively loading the second word / byte of
+ source 1; however, that would increase the overhead for loop setup / finish,
+ and strcmp might often terminate early. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcmp
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne r2,0,.Lcharloop
+ mov_s r12,0x01010101
+ ror r5,r12
+.Lwordloop:
+ ld.ab r2,[r0,4]
+ ld.ab r3,[r1,4]
+ nop_s
+ sub r4,r2,r12
+ bic r4,r4,r2
+ and r4,r4,r5
+ brne r4,0,.Lfound0
+ breq r2,r3,.Lwordloop
+#ifdef __LITTLE_ENDIAN__
+ xor r0,r2,r3 ; mask for difference
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+#endif /* LITTLE ENDIAN */
+ cmp_s r2,r3
+ mov_s r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+
+ .balign 4
+#ifdef __LITTLE_ENDIAN__
+.Lfound0:
+ xor r0,r2,r3 ; mask for difference
+ or r0,r0,r4 ; or in zero indicator
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+ sub.f r0,r2,r3
+ mov.hi r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#else /* BIG ENDIAN */
+ /* The zero-detection above can mis-detect 0x01 bytes as zeroes
+ because of carry-propagateion from a lower significant zero byte.
+ We can compensate for this by checking that bit0 is zero.
+ This compensation is not necessary in the step where we
+ get a low estimate for r2, because in any affected bytes
+ we already have 0x00 or 0x01, which will remain unchanged
+ when bit 7 is cleared. */
+ .balign 4
+.Lfound0:
+ lsr r0,r4,8
+ lsr_s r1,r2
+ bic_s r2,r2,r0 ; get low estimate for r2 and get ...
+ bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
+ or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
+ cmp_s r3,r2 ; ... be independent of trailing garbage
+ or_s r2,r2,r0 ; likewise for r3 > r2
+ bic_s r3,r3,r0
+ rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
+ cmp_s r2,r3
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#endif /* ENDIAN */
+
+ .balign 4
+.Lcharloop:
+ ldb.ab r2,[r0,1]
+ ldb.ab r3,[r1,1]
+ nop_s
+ breq r2,0,.Lcmpend
+ breq r2,r3,.Lcharloop
+.Lcmpend:
+ j_s.d [blink]
+ sub r0,r2,r3
+ARC_EXIT strcmp
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
new file mode 100644
index 000000000000..b7ca4ae81d88
--- /dev/null
+++ b/arch/arc/lib/strcpy-700.S
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
+ If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
+ it 8 byte aligned. Thus, we can do a little read-ahead, without
+ dereferencing a cache line that we should not touch.
+ Note that short and long instructions have been scheduled to avoid
+ branch stalls.
+ The beq_s to r3z could be made unaligned & long to avoid a stall
+ there, but the it is not likely to be taken often, and it
+ would also be likey to cost an unaligned mispredict at the next call. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcpy
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne.d r2,0,charloop
+ mov_s r10,r0
+ ld_s r3,[r1,0]
+ mov r8,0x01010101
+ bbit0.d r1,2,loop_start
+ ror r12,r8
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne r3z
+ mov_s r4,r3
+ .balign 4
+loop:
+ ld.a r3,[r1,4]
+ st.ab r4,[r10,4]
+loop_start:
+ ld.a r4,[r1,4]
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne_s r3z
+ st.ab r3,[r10,4]
+ sub r2,r4,r8
+ bic r2,r2,r4
+ tst r2,r12
+ beq loop
+ mov_s r3,r4
+#ifdef __LITTLE_ENDIAN__
+r3z: bmsk.f r1,r3,7
+ lsr_s r3,r3,8
+#else
+r3z: lsr.f r1,r3,24
+ asl_s r3,r3,8
+#endif
+ bne.d r3z
+ stb.ab r1,[r10,1]
+ j_s [blink]
+
+ .balign 4
+charloop:
+ ldb.ab r3,[r1,1]
+
+
+ brne.d r3,0,charloop
+ stb.ab r3,[r10,1]
+ j [blink]
+ARC_EXIT strcpy
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
new file mode 100644
index 000000000000..39759e099696
--- /dev/null
+++ b/arch/arc/lib/strlen.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strlen
+ or r3,r0,7
+ ld r2,[r3,-7]
+ ld.a r6,[r3,-3]
+ mov r4,0x01010101
+ ; uses long immediate
+#ifdef __LITTLE_ENDIAN__
+ asl_s r1,r0,3
+ btst_s r0,2
+ asl r7,r4,r1
+ ror r5,r4
+ sub r1,r2,r7
+ bic_s r1,r1,r2
+ mov.eq r7,r4
+ sub r12,r6,r7
+ bic r12,r12,r6
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#else /* BIG ENDIAN */
+ ror r5,r4
+ btst_s r0,2
+ mov_s r1,31
+ sub3 r7,r1,r0
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ bmsk r1,r1,r7
+ sub r12,r6,r4
+ bic r12,r12,r6
+ bmsk.ne r12,r12,r7
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#endif /* ENDIAN */
+
+.Loop:
+ ld_s r2,[r3,4]
+ ld.a r6,[r3,8]
+ ; stall for load result
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ sub r12,r6,r4
+ bic r12,r12,r6
+ or r12,r12,r1
+ and r12,r12,r5
+ breq r12,0,.Loop
+.Lend:
+ and.f r1,r1,r5
+ sub.ne r3,r3,4
+ mov.eq r1,r12
+#ifdef __LITTLE_ENDIAN__
+ sub_s r2,r1,1
+ bic_s r2,r2,r1
+ norm r1,r2
+ sub_s r0,r0,3
+ lsr_s r1,r1,3
+ sub r0,r3,r0
+ j_s.d [blink]
+ sub r0,r0,r1
+#else /* BIG ENDIAN */
+ lsr_s r1,r1,7
+ mov.eq r2,r6
+ bic_s r1,r1,r2
+ norm r1,r1
+ sub r0,r3,r0
+ lsr_s r1,r1,3
+ j_s.d [blink]
+ add r0,r0,r1
+#endif /* ENDIAN */
+.Learly_end:
+ b.d .Lend
+ sub_s.ne r1,r1,r1
+ARC_EXIT strlen
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
new file mode 100644
index 000000000000..168dc146a8f6
--- /dev/null
+++ b/arch/arc/mm/Makefile
@@ -0,0 +1,10 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-y := extable.o ioremap.o dma.o fault.o init.o
+obj-y += tlb.o tlbex.o cache_arc700.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
new file mode 100644
index 000000000000..88d617d84234
--- /dev/null
+++ b/arch/arc/mm/cache_arc700.c
@@ -0,0 +1,768 @@
+/*
+ * ARC700 VIPT Cache Management
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
+ *
+ * vineetg: Apr 2011
+ * -Now that MMU can support larger pg sz (16K), the determiniation of
+ * aliasing shd not be based on assumption of 8k pg
+ *
+ * vineetg: Mar 2011
+ * -optimised version of flush_icache_range( ) for making I/D coherent
+ * when vaddr is available (agnostic of num of aliases)
+ *
+ * vineetg: Mar 2011
+ * -Added documentation about I-cache aliasing on ARC700 and the way it
+ * was handled up until MMU V2.
+ * -Spotted a three year old bug when killing the 4 aliases, which needs
+ * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
+ * instead of paddr | {0x00, 0x01, 0x10, 0x11}
+ * (Rajesh you owe me one now)
+ *
+ * vineetg: Dec 2010
+ * -Off-by-one error when computing num_of_lines to flush
+ * This broke signal handling with bionic which uses synthetic sigret stub
+ *
+ * vineetg: Mar 2010
+ * -GCC can't generate ZOL for core cache flush loops.
+ * Conv them into iterations based as opposed to while (start < end) types
+ *
+ * Vineetg: July 2009
+ * -In I-cache flush routine we used to chk for aliasing for every line INV.
+ * Instead now we setup routines per cache geometry and invoke them
+ * via function pointers.
+ *
+ * Vineetg: Jan 2009
+ * -Cache Line flush routines used to flush an extra line beyond end addr
+ * because check was while (end >= start) instead of (end > start)
+ * =Some call sites had to work around by doing -1, -4 etc to end param
+ * =Some callers didnt care. This was spec bad in case of INV routines
+ * which would discard valid data (cause of the horrible ext2 bug
+ * in ARC IDE driver)
+ *
+ * vineetg: June 11th 2008: Fixed flush_icache_range( )
+ * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
+ * to be flushed, which it was not doing.
+ * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
+ * however ARC cache maintenance OPs require PHY addr. Thus need to do
+ * vmalloc_to_phy.
+ * -Also added optimisation there, that for range > PAGE SIZE we flush the
+ * entire cache in one shot rather than line by line. For e.g. a module
+ * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
+ * while cache is only 16 or 32k.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/mmu_context.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/setup.h>
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+static void __ic_line_inv_no_alias(unsigned long, int);
+static void __ic_line_inv_2_alias(unsigned long, int);
+static void __ic_line_inv_4_alias(unsigned long, int);
+
+/* Holds the ptr to flush routine, dependign on size due to aliasing issues */
+static void (*___flush_icache_rtn) (unsigned long, int);
+#endif
+
+char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ unsigned int c = smp_processor_id();
+
+#define PR_CACHE(p, enb, str) \
+{ \
+ if (!(p)->ver) \
+ n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
+ else \
+ n += scnprintf(buf + n, len - n, \
+ str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
+ TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
+ enb ? "" : "DISABLED (kernel-build)"); \
+}
+
+ PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
+ PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
+
+ return buf;
+}
+
+/*
+ * Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation done here, simply read/convert the BCRs
+ */
+void __init read_decode_cache_bcr(void)
+{
+ struct bcr_cache ibcr, dbcr;
+ struct cpuinfo_arc_cache *p_ic, *p_dc;
+ unsigned int cpu = smp_processor_id();
+
+ p_ic = &cpuinfo_arc700[cpu].icache;
+ READ_BCR(ARC_REG_IC_BCR, ibcr);
+
+ if (ibcr.config == 0x3)
+ p_ic->assoc = 2;
+ p_ic->line_len = 8 << ibcr.line_len;
+ p_ic->sz = 0x200 << ibcr.sz;
+ p_ic->ver = ibcr.ver;
+
+ p_dc = &cpuinfo_arc700[cpu].dcache;
+ READ_BCR(ARC_REG_DC_BCR, dbcr);
+
+ if (dbcr.config == 0x2)
+ p_dc->assoc = 4;
+ p_dc->line_len = 16 << dbcr.line_len;
+ p_dc->sz = 0x200 << dbcr.sz;
+ p_dc->ver = dbcr.ver;
+}
+
+/*
+ * 1. Validate the Cache Geomtery (compile time config matches hardware)
+ * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
+ * (aliasing D-cache configurations are not supported YET)
+ * 3. Enable the Caches, setup default flush mode for D-Cache
+ * 3. Calculate the SHMLBA used by user space
+ */
+void __init arc_cache_init(void)
+{
+ unsigned int temp;
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+ struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+ int way_pg_ratio = way_pg_ratio;
+ char str[256];
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+ if (!ic->ver)
+ goto chk_dc;
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+ /* 1. Confirm some of I-cache params which Linux assumes */
+ if ((ic->assoc != ARC_ICACHE_WAYS) ||
+ (ic->line_len != ARC_ICACHE_LINE_LEN)) {
+ panic("Cache H/W doesn't match kernel Config");
+ }
+#if (CONFIG_ARC_MMU_VER > 2)
+ if (ic->ver != 3) {
+ if (running_on_hw)
+ panic("Cache ver doesn't match MMU ver\n");
+
+ /* For ISS - suggest the toggles to use */
+ pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
+
+ }
+#endif
+
+ /*
+ * if Cache way size is <= page size then no aliasing exhibited
+ * otherwise ratio determines num of aliases.
+ * e.g. 32K I$, 2 way set assoc, 8k pg size
+ * way-sz = 32k/2 = 16k
+ * way-pg-ratio = 16k/8k = 2, so 2 aliases possible
+ * (meaning 1 line could be in 2 possible locations).
+ */
+ way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE;
+ switch (way_pg_ratio) {
+ case 0:
+ case 1:
+ ___flush_icache_rtn = __ic_line_inv_no_alias;
+ break;
+ case 2:
+ ___flush_icache_rtn = __ic_line_inv_2_alias;
+ break;
+ case 4:
+ ___flush_icache_rtn = __ic_line_inv_4_alias;
+ break;
+ default:
+ panic("Unsupported I-Cache Sz\n");
+ }
+#endif
+
+ /* Enable/disable I-Cache */
+ temp = read_aux_reg(ARC_REG_IC_CTRL);
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+ temp &= ~IC_CTRL_CACHE_DISABLE;
+#else
+ temp |= IC_CTRL_CACHE_DISABLE;
+#endif
+
+ write_aux_reg(ARC_REG_IC_CTRL, temp);
+
+chk_dc:
+ if (!dc->ver)
+ return;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+ if ((dc->assoc != ARC_DCACHE_WAYS) ||
+ (dc->line_len != ARC_DCACHE_LINE_LEN)) {
+ panic("Cache H/W doesn't match kernel Config");
+ }
+
+ /* check for D-Cache aliasing */
+ if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
+ panic("D$ aliasing not handled right now\n");
+#endif
+
+ /* Set the default Invalidate Mode to "simpy discard dirty lines"
+ * as this is more frequent then flush before invalidate
+ * Ofcourse we toggle this default behviour when desired
+ */
+ temp = read_aux_reg(ARC_REG_DC_CTRL);
+ temp &= ~DC_CTRL_INV_MODE_FLUSH;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+ /* Enable D-Cache: Clear Bit 0 */
+ write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
+#else
+ /* Flush D cache */
+ write_aux_reg(ARC_REG_DC_FLSH, 0x1);
+ /* Disable D cache */
+ write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
+#endif
+
+ return;
+}
+
+#define OP_INV 0x1
+#define OP_FLUSH 0x2
+#define OP_FLUSH_N_INV 0x3
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+
+/***************************************************************
+ * Machine specific helpers for Entire D-Cache or Per Line ops
+ */
+
+static inline void wait_for_flush(void)
+{
+ while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
+ ;
+}
+
+/*
+ * Operation on Entire D-Cache
+ * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
+ * Note that constant propagation ensures all the checks are gone
+ * in generated code
+ */
+static inline void __dc_entire_op(const int cacheop)
+{
+ unsigned long flags, tmp = tmp;
+ int aux;
+
+ local_irq_save(flags);
+
+ if (cacheop == OP_FLUSH_N_INV) {
+ /* Dcache provides 2 cmd: FLUSH or INV
+ * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * Default INV sub-mode is DISCARD, which needs to be toggled
+ */
+ tmp = read_aux_reg(ARC_REG_DC_CTRL);
+ write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+ }
+
+ if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDC;
+ else
+ aux = ARC_REG_DC_FLSH;
+
+ write_aux_reg(aux, 0x1);
+
+ if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+ wait_for_flush();
+
+ /* Switch back the DISCARD ONLY Invalidate mode */
+ if (cacheop == OP_FLUSH_N_INV)
+ write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Per Line Operation on D-Cache
+ * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
+ * It's sole purpose is to help gcc generate ZOL
+ */
+static inline void __dc_line_loop(unsigned long start, unsigned long sz,
+ int aux_reg)
+{
+ int num_lines, slack;
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @start - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@start will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+ slack = start & ~DCACHE_LINE_MASK;
+ sz += slack;
+ start -= slack;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
+
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ /*
+ * Just as for I$, in MMU v3, D$ ops also require
+ * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
+ * But we pass phy addr for both. This works since Linux
+ * doesn't support aliasing configs for D$, yet.
+ * Thus paddr is enough to provide both tag and index.
+ */
+ write_aux_reg(ARC_REG_DC_PTAG, start);
+#endif
+ write_aux_reg(aux_reg, start);
+ start += ARC_DCACHE_LINE_LEN;
+ }
+}
+
+/*
+ * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
+ */
+static inline void __dc_line_op(unsigned long start, unsigned long sz,
+ const int cacheop)
+{
+ unsigned long flags, tmp = tmp;
+ int aux;
+
+ local_irq_save(flags);
+
+ if (cacheop == OP_FLUSH_N_INV) {
+ /*
+ * Dcache provides 2 cmd: FLUSH or INV
+ * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * Default INV sub-mode is DISCARD, which needs to be toggled
+ */
+ tmp = read_aux_reg(ARC_REG_DC_CTRL);
+ write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+ }
+
+ if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDL;
+ else
+ aux = ARC_REG_DC_FLDL;
+
+ __dc_line_loop(start, sz, aux);
+
+ if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+ wait_for_flush();
+
+ /* Switch back the DISCARD ONLY Invalidate mode */
+ if (cacheop == OP_FLUSH_N_INV)
+ write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __dc_entire_op(cacheop)
+#define __dc_line_op(start, sz, cacheop)
+
+#endif /* CONFIG_ARC_HAS_DCACHE */
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+
+/*
+ * I-Cache Aliasing in ARC700 VIPT caches
+ *
+ * For fetching code from I$, ARC700 uses vaddr (embedded in program code)
+ * to "index" into SET of cache-line and paddr from MMU to match the TAG
+ * in the WAYS of SET.
+ *
+ * However the CDU iterface (to flush/inv) lines from software, only takes
+ * paddr (to have simpler hardware interface). For simpler cases, using paddr
+ * alone suffices.
+ * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size):
+ * way_sz = cache_sz / num_ways = 16k/2 = 8k
+ * num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits
+ * Ignoring the bottom 5 bits corresp to the off within a 32b cacheline,
+ * bits req for calc set-index = bits 12:5 (0 based). Since this range fits
+ * inside the bottom 13 bits of paddr, which are same for vaddr and paddr
+ * (with 8k pg sz), paddr alone can be safely used by CDU to unambigously
+ * locate a cache-line.
+ *
+ * However for a difft sized cache, say 32k I$, above math yields need
+ * for 14 bits of vaddr to locate a cache line, which can't be provided by
+ * paddr, since the bit 13 (0 based) might differ between the two.
+ *
+ * This lack of extra bits needed for correct line addressing, defines the
+ * classical problem of Cache aliasing with VIPT architectures
+ * num_aliases = 1 << extra_bits
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases
+ * 2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases
+ * 2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases
+ *
+ * ------------------
+ * MMU v1/v2 (Fixed Page Size 8k)
+ * ------------------
+ * The solution was to provide CDU with these additonal vaddr bits. These
+ * would be bits [x:13], x would depend on cache-geom.
+ * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
+ * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
+ * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
+ * represent the offset within cache-line. The adv of using this "clumsy"
+ * interface for additional info was no new reg was needed in CDU.
+ *
+ * 17:13 represented the max num of bits passable, actual bits needed were
+ * fewer, based on the num-of-aliases possible.
+ * -for 2 alias possibility, only bit 13 needed (32K cache)
+ * -for 4 alias possibility, bits 14:13 needed (64K cache)
+ *
+ * Since vaddr was not available for all instances of I$ flush req by core
+ * kernel, the only safe way (non-optimal though) was to kill all possible
+ * lines which could represent an alias (even if they didnt represent one
+ * in execution).
+ * e.g. for 64K I$, 4 aliases possible, so we did
+ * flush start
+ * flush start | 0x01
+ * flush start | 0x2
+ * flush start | 0x3
+ *
+ * The penalty was invoking the operation itself, since tag match is anyways
+ * paddr based, a line which didn't represent an alias would not match the
+ * paddr, hence wont be killed
+ *
+ * Note that aliasing concerns are independent of line-sz for a given cache
+ * geometry (size + set_assoc) because the extra bits required by line-sz are
+ * reduced from the set calc.
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above
+ * 32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit
+ * 64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit
+ *
+ * ------------------
+ * MMU v3
+ * ------------------
+ * This ver of MMU supports var page sizes (1k-16k) - Linux will support
+ * 8k (default), 16k and 4k.
+ * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * meaning more vaddr bits needed to disambiguate the cache-line-op ;
+ * the existing scheme of piggybacking won't work for certain configurations.
+ * Two new registers IC_PTAG and DC_PTAG inttoduced.
+ * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
+ */
+
+/***********************************************************
+ * Machine specific helpers for per line I-Cache invalidate.
+ * 3 routines to accpunt for 1, 2, 4 aliases possible
+ */
+
+static void __ic_line_inv_no_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+#endif
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv_2_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ /*
+ * MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG
+ * reg to pass the "tag" bits and existing IVIL reg only looks
+ * at bits relevant for "index" (details above)
+ * Programming Notes:
+ * -when writing tag to PTAG reg, bit chopping can be avoided,
+ * CDU ignores non-tag bits.
+ * -Ideally "index" must be computed from vaddr, but it is not
+ * avail in these rtns. So to be safe, we kill the lines in all
+ * possible indexes corresp to num of aliases possible for
+ * given cache config.
+ */
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x1 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT));
+#else
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+#endif
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv_4_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x3 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x2 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x1 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT));
+#else
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x02);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x03);
+#endif
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv(unsigned long start, unsigned long sz)
+{
+ unsigned long flags;
+ int num_lines, slack;
+
+ /*
+ * Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @start - aligned to cache line, and integral @num_lines
+ * However page sized flushes can be compile time optimised.
+ * -@start will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+ slack = start & ~ICACHE_LINE_MASK;
+ sz += slack;
+ start -= slack;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+ local_irq_save(flags);
+ (*___flush_icache_rtn) (start, num_lines);
+ local_irq_restore(flags);
+}
+
+/* Unlike routines above, having vaddr for flush op (along with paddr),
+ * prevents the need to speculatively kill the lines in multiple sets
+ * based on ratio of way_sz : pg_sz
+ */
+static void __ic_line_inv_vaddr(unsigned long phy_start,
+ unsigned long vaddr, unsigned long sz)
+{
+ unsigned long flags;
+ int num_lines, slack;
+ unsigned int addr;
+
+ slack = phy_start & ~ICACHE_LINE_MASK;
+ sz += slack;
+ phy_start -= slack;
+ num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ vaddr &= ~ICACHE_LINE_MASK;
+ addr = phy_start;
+#else
+ /* bits 17:13 of vaddr go as bits 4:0 of paddr */
+ addr = phy_start | ((vaddr >> 13) & 0x1F);
+#endif
+
+ local_irq_save(flags);
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ /* tag comes from phy addr */
+ write_aux_reg(ARC_REG_IC_PTAG, addr);
+
+ /* index bits come from vaddr */
+ write_aux_reg(ARC_REG_IC_IVIL, vaddr);
+ vaddr += ARC_ICACHE_LINE_LEN;
+#else
+ /* this paddr contains vaddrs bits as needed */
+ write_aux_reg(ARC_REG_IC_IVIL, addr);
+#endif
+ addr += ARC_ICACHE_LINE_LEN;
+ }
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __ic_line_inv(start, sz)
+#define __ic_line_inv_vaddr(pstart, vstart, sz)
+
+#endif /* CONFIG_ARC_HAS_ICACHE */
+
+
+/***********************************************************
+ * Exported APIs
+ */
+
+/* TBD: use pg_arch_1 to optimize this */
+void flush_dcache_page(struct page *page)
+{
+ __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_FLUSH_N_INV);
+}
+EXPORT_SYMBOL(dma_cache_wback_inv);
+
+void dma_cache_inv(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_INV);
+}
+EXPORT_SYMBOL(dma_cache_inv);
+
+void dma_cache_wback(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_FLUSH);
+}
+EXPORT_SYMBOL(dma_cache_wback);
+
+/*
+ * This is API for making I/D Caches consistent when modifying code
+ * (loadable modules, kprobes, etc)
+ * This is called on insmod, with kernel virtual address for CODE of
+ * the module. ARC cache maintenance ops require PHY address thus we
+ * need to convert vmalloc addr to PHY addr
+ */
+void flush_icache_range(unsigned long kstart, unsigned long kend)
+{
+ unsigned int tot_sz, off, sz;
+ unsigned long phy, pfn;
+ unsigned long flags;
+
+ /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
+
+ /* This is not the right API for user virtual address */
+ if (kstart < TASK_SIZE) {
+ BUG_ON("Flush icache range for user virtual addr space");
+ return;
+ }
+
+ /* Shortcut for bigger flush ranges.
+ * Here we don't care if this was kernel virtual or phy addr
+ */
+ tot_sz = kend - kstart;
+ if (tot_sz > PAGE_SIZE) {
+ flush_cache_all();
+ return;
+ }
+
+ /* Case: Kernel Phy addr (0x8000_0000 onwards) */
+ if (likely(kstart > PAGE_OFFSET)) {
+ __ic_line_inv(kstart, kend - kstart);
+ __dc_line_op(kstart, kend - kstart, OP_FLUSH);
+ return;
+ }
+
+ /*
+ * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
+ * (1) ARC Cache Maintenance ops only take Phy addr, hence special
+ * handling of kernel vaddr.
+ *
+ * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
+ * it still needs to handle a 2 page scenario, where the range
+ * straddles across 2 virtual pages and hence need for loop
+ */
+ while (tot_sz > 0) {
+ off = kstart % PAGE_SIZE;
+ pfn = vmalloc_to_pfn((void *)kstart);
+ phy = (pfn << PAGE_SHIFT) + off;
+ sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
+ local_irq_save(flags);
+ __dc_line_op(phy, sz, OP_FLUSH);
+ __ic_line_inv(phy, sz);
+ local_irq_restore(flags);
+ kstart += sz;
+ tot_sz -= sz;
+ }
+}
+
+/*
+ * Optimised ver of flush_icache_range() with spec callers: ptrace/signals
+ * where vaddr is also available. This allows passing both vaddr and paddr
+ * bits to CDU for cache flush, short-circuting the current pessimistic algo
+ * which kills all possible aliases.
+ * An added adv of knowing that vaddr is user-vaddr avoids various checks
+ * and handling for k-vaddr, k-paddr as done in orig ver above
+ */
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+ int len)
+{
+ __ic_line_inv_vaddr(paddr, u_vaddr, len);
+ __dc_line_op(paddr, len, OP_FLUSH);
+}
+
+/*
+ * XXX: This also needs to be optim using pg_arch_1
+ * This is called when a page-cache page is about to be mapped into a
+ * user process' address space. It offers an opportunity for a
+ * port to ensure d-cache/i-cache coherency if necessary.
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+ __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
+}
+
+void flush_icache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ write_aux_reg(ARC_REG_IC_IVIC, 1);
+
+ /* lr will not complete till the icache inv operation is not over */
+ read_aux_reg(ARC_REG_IC_CTRL);
+ local_irq_restore(flags);
+}
+
+noinline void flush_cache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ flush_icache_all();
+ __dc_entire_op(OP_FLUSH_N_INV);
+
+ local_irq_restore(flags);
+
+}
+
+/**********************************************************************
+ * Explicit Cache flush request from user space via syscall
+ * Needed for JITs which generate code on the fly
+ */
+SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
+{
+ /* TBD: optimize this */
+ flush_cache_all();
+ return 0;
+}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
new file mode 100644
index 000000000000..12cc6485b218
--- /dev/null
+++ b/arch/arc/mm/dma.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * DMA Coherent API Notes
+ *
+ * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
+ * implemented by accessintg it using a kernel virtual address, with
+ * Cache bit off in the TLB entry.
+ *
+ * The default DMA address == Phy address which is 0x8000_0000 based.
+ * A platform/device can make it zero based, by over-riding
+ * plat_{dma,kernel}_addr_to_{kernel,dma}
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+#include <linux/export.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Helpers for Coherent DMA API.
+ */
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *paddr;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = alloc_pages_exact(size, gfp);
+ if (!paddr)
+ return NULL;
+
+ /* This is bus address, platform dependent */
+ *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+ return paddr;
+}
+EXPORT_SYMBOL(dma_alloc_noncoherent);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+ size);
+}
+EXPORT_SYMBOL(dma_free_noncoherent);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *paddr, *kvaddr;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = alloc_pages_exact(size, gfp);
+ if (!paddr)
+ return NULL;
+
+ /* This is kernel Virtual address (0x7000_0000 based) */
+ kvaddr = ioremap_nocache((unsigned long)paddr, size);
+ if (kvaddr != NULL)
+ memset(kvaddr, 0, size);
+
+ /* This is bus address, platform dependent */
+ *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+ return kvaddr;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle)
+{
+ iounmap((void __force __iomem *)kvaddr);
+
+ free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+ size);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Helper for streaming DMA...
+ */
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ __inline_dma_cache_sync(paddr, size, dir);
+}
+EXPORT_SYMBOL(__arc_dma_cache_sync);
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 000000000000..014172ba8432
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup) {
+ regs->ret = fixup->fixup;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+
+long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n)
+{
+ return __arc_copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_from_user_noinline);
+
+long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n)
+{
+ return __arc_copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_to_user_noinline);
+
+unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n)
+{
+ return __arc_clear_user(to, n);
+}
+EXPORT_SYMBOL(arc_clear_user_noinline);
+
+long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count)
+{
+ return __arc_strncpy_from_user(dst, src, count);
+}
+EXPORT_SYMBOL(arc_strncpy_from_user_noinline);
+
+long arc_strnlen_user_noinline(const char __user *src, long n)
+{
+ return __arc_strnlen_user(src, n);
+}
+EXPORT_SYMBOL(arc_strnlen_user_noinline);
+#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 000000000000..af55aab803d2
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,228 @@
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/signal.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <asm/pgalloc.h>
+
+static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+{
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd = pgd_offset_fast(mm, address);
+ pgd_k = pgd_offset_k(address);
+
+ if (!pgd_present(*pgd_k))
+ goto bad_area;
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto bad_area;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto bad_area;
+
+ set_pmd(pmd, *pmd_k);
+
+ /* XXX: create the TLB entry here */
+ return 0;
+
+bad_area:
+ return 1;
+}
+
+void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
+ unsigned long cause_code)
+{
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ siginfo_t info;
+ int fault, ret;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address >= VMALLOC_START && address <= VMALLOC_END) {
+ ret = handle_vmalloc_fault(mm, address);
+ if (unlikely(ret))
+ goto bad_area_nosemaphore;
+ else
+ return;
+ }
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto no_context;
+
+retry:
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ /* Handle protection violation, execute on heap or stack */
+
+ if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))
+ goto bad_area;
+
+ if (write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
+ if (unlikely(fatal_signal_pending(current))) {
+ if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
+ up_read(&mm->mmap_sem);
+ if (user_mode(regs))
+ return;
+ }
+
+ if (likely(!(fault & VM_FAULT_ERROR))) {
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ /* To avoid updating stats twice for retry case */
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+ }
+
+ /* Fault Handled Gracefully */
+ up_read(&mm->mmap_sem);
+ return;
+ }
+
+ /* TBD: switch to pagefault_out_of_memory() */
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+
+ /* no man's land */
+ BUG();
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.fault_address = address;
+ tsk->thread.cause_code = cause_code;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault?
+ *
+ * (The kernel has valid exception-points in the source
+ * when it acesses user-memory. When it fails in one
+ * of those points, we find it in a table and do a jump
+ * to some fixup code that loads an appropriate error
+ * code)
+ */
+ if (fixup_exception(regs))
+ return;
+
+ die("Oops", regs, address, cause_code);
+
+out_of_memory:
+ if (is_global_init(tsk)) {
+ yield();
+ goto survive;
+ }
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs))
+ do_group_exit(SIGKILL); /* This will never return */
+
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ if (!user_mode(regs))
+ goto no_context;
+
+ tsk->thread.fault_address = address;
+ tsk->thread.cause_code = cause_code;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
new file mode 100644
index 000000000000..caf797de23fc
--- /dev/null
+++ b/arch/arc/mm/init.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#ifdef CONFIG_BLOCK_DEV_RAM
+#include <linux/blk.h>
+#endif
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+EXPORT_SYMBOL(empty_zero_page);
+
+/* Default tot mem from .config */
+static unsigned long arc_mem_sz = 0x20000000; /* some default */
+
+/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
+static int __init setup_mem_sz(char *str)
+{
+ arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
+
+ /* early console might not be setup yet - it will show up later */
+ pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
+
+ return 0;
+}
+early_param("mem", setup_mem_sz);
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ arc_mem_sz = size & PAGE_MASK;
+ pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
+}
+
+/*
+ * First memory setup routine called from setup_arch()
+ * 1. setup swapper's mm @init_mm
+ * 2. Count the pages we have and setup bootmem allocator
+ * 3. zone setup
+ */
+void __init setup_arch_memory(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, 0 };
+ unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
+
+ init_mm.start_code = (unsigned long)_text;
+ init_mm.end_code = (unsigned long)_etext;
+ init_mm.end_data = (unsigned long)_edata;
+ init_mm.brk = (unsigned long)_end;
+
+ /*
+ * We do it here, so that memory is correctly instantiated
+ * even if "mem=xxx" cmline over-ride is given and/or
+ * DT has memory node. Each causes an update to @arc_mem_sz
+ * and we finally add memory one here
+ */
+ memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
+
+ /*------------- externs in mm need setting up ---------------*/
+
+ /* first page of system - kernel .vector starts here */
+ min_low_pfn = PFN_DOWN(CONFIG_LINUX_LINK_BASE);
+
+ /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
+ max_low_pfn = max_pfn = PFN_DOWN(end_mem);
+
+ max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
+
+ /*------------- reserve kernel image -----------------------*/
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
+
+ memblock_dump_all();
+
+ /*-------------- node setup --------------------------------*/
+ memset(zones_size, 0, sizeof(zones_size));
+ zones_size[ZONE_NORMAL] = num_physpages;
+
+ /*
+ * We can't use the helper free_area_init(zones[]) because it uses
+ * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
+ * when our kernel doesn't start at PAGE_OFFSET, i.e.
+ * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE
+ */
+ free_area_init_node(0, /* node-id */
+ zones_size, /* num pages per zone */
+ min_low_pfn, /* first pfn of node */
+ NULL); /* NO holes */
+}
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+ int codesize, datasize, initsize, reserved_pages, free_pages;
+ int tmp;
+
+ high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);
+
+ totalram_pages = free_all_bootmem();
+
+ /* count all reserved pages [kernel code/data/mem_map..] */
+ reserved_pages = 0;
+ for (tmp = 0; tmp < max_mapnr; tmp++)
+ if (PageReserved(mem_map + tmp))
+ reserved_pages++;
+
+ /* XXX: nr_free_pages() is equivalent */
+ free_pages = max_mapnr - reserved_pages;
+
+ /*
+ * For the purpose of display below, split the "reserve mem"
+ * kernel code/data is already shown explicitly,
+ * Show any other reservations (mem_map[ ] et al)
+ */
+ reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >>
+ PAGE_SHIFT);
+
+ codesize = _etext - _text;
+ datasize = _end - _etext;
+ initsize = __init_end - __init_begin;
+
+ pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n",
+ PAGES_TO_MB(free_pages),
+ TO_MB(arc_mem_sz),
+ TO_KB(codesize), TO_KB(datasize), TO_KB(initsize),
+ PAGES_TO_KB(reserved_pages));
+}
+
+static void __init free_init_pages(const char *what, unsigned long begin,
+ unsigned long end)
+{
+ unsigned long addr;
+
+ pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
+ what, TO_KB(end - begin), begin, end);
+
+ /* need to check that the page we free is not a partial page */
+ for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+}
+
+/*
+ * free_initmem: Free all the __init memory.
+ */
+void __init_refok free_initmem(void)
+{
+ free_init_pages("unused kernel memory",
+ (unsigned long)__init_begin,
+ (unsigned long)__init_end);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory", start, end);
+}
+#endif
+
+#ifdef CONFIG_OF_FLATTREE
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ pr_err("%s(%lx, %lx)\n", __func__, start, end);
+}
+#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
new file mode 100644
index 000000000000..3e5c92c79936
--- /dev/null
+++ b/arch/arc/mm/ioremap.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/cache.h>
+
+void __iomem *ioremap(unsigned long paddr, unsigned long size)
+{
+ unsigned long end;
+
+ /* Don't allow wraparound or zero size */
+ end = paddr + size - 1;
+ if (!size || (end < paddr))
+ return NULL;
+
+ /* If the region is h/w uncached, avoid MMU mappings */
+ if (paddr >= ARC_UNCACHED_ADDR_SPACE)
+ return (void __iomem *)paddr;
+
+ return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
+}
+EXPORT_SYMBOL(ioremap);
+
+/*
+ * ioremap with access flags
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
+ * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
+ * might need finer access control (R/W/X)
+ */
+void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ unsigned long flags)
+{
+ void __iomem *vaddr;
+ struct vm_struct *area;
+ unsigned long off, end;
+ pgprot_t prot = __pgprot(flags);
+
+ /* Don't allow wraparound, zero size */
+ end = paddr + size - 1;
+ if ((!size) || (end < paddr))
+ return NULL;
+
+ /* An early platform driver might end up here */
+ if (!slab_is_available())
+ return NULL;
+
+ /* force uncached */
+ prot = pgprot_noncached(prot);
+
+ /* Mappings have to be page-aligned */
+ off = paddr & ~PAGE_MASK;
+ paddr &= PAGE_MASK;
+ size = PAGE_ALIGN(end + 1) - paddr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ area->phys_addr = paddr;
+ vaddr = (void __iomem *)area->addr;
+ if (ioremap_page_range((unsigned long)vaddr,
+ (unsigned long)vaddr + size, paddr, prot)) {
+ vunmap((void __force *)vaddr);
+ return NULL;
+ }
+ return (void __iomem *)(off + (char __iomem *)vaddr);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+
+void iounmap(const void __iomem *addr)
+{
+ if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
+ return;
+
+ vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
new file mode 100644
index 000000000000..9b9ce23f4ec3
--- /dev/null
+++ b/arch/arc/mm/tlb.c
@@ -0,0 +1,645 @@
+/*
+ * TLB Management (flush/create/diagnostics) for ARC700
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Aug 2011
+ * -Reintroduce duplicate PD fixup - some customer chips still have the issue
+ *
+ * vineetg: May 2011
+ * -No need to flush_cache_page( ) for each call to update_mmu_cache()
+ * some of the LMBench tests improved amazingly
+ * = page-fault thrice as fast (75 usec to 28 usec)
+ * = mmap twice as fast (9.6 msec to 4.6 msec),
+ * = fork (5.3 msec to 3.7 msec)
+ *
+ * vineetg: April 2011 :
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * vineetg: April 2011 : Preparing for MMU V3
+ * -MMU v2/v3 BCRs decoded differently
+ * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
+ * -tlb_entry_erase( ) can be void
+ * -local_flush_tlb_range( ):
+ * = need not "ceil" @end
+ * = walks MMU only if range spans < 32 entries, as opposed to 256
+ *
+ * Vineetg: Sept 10th 2008
+ * -Changes related to MMU v2 (Rel 4.8)
+ *
+ * Vineetg: Aug 29th 2008
+ * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
+ * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
+ * it fails. Thus need to load it with ANY valid value before invoking
+ * TLBIVUTLB cmd
+ *
+ * Vineetg: Aug 21th 2008:
+ * -Reduced the duration of IRQ lockouts in TLB Flush routines
+ * -Multiple copies of TLB erase code seperated into a "single" function
+ * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
+ * in interrupt-safe region.
+ *
+ * Vineetg: April 23rd Bug #93131
+ * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
+ * flush is more than the size of TLB itself.
+ *
+ * Rahul Trivedi : Codito Technologies 2004
+ */
+
+#include <linux/module.h>
+#include <asm/arcregs.h>
+#include <asm/setup.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* Need for ARC MMU v2
+ *
+ * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
+ * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
+ * map into same set, there would be contention for the 2 ways causing severe
+ * Thrashing.
+ *
+ * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
+ * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
+ * Given this, the thrasing problem should never happen because once the 3
+ * J-TLB entries are created (even though 3rd will knock out one of the prev
+ * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
+ *
+ * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
+ * This is a simple design for keeping them in sync. So what do we do?
+ * The solution which James came up was pretty neat. It utilised the assoc
+ * of uTLBs by not invalidating always but only when absolutely necessary.
+ *
+ * - Existing TLB commands work as before
+ * - New command (TLBWriteNI) for TLB write without clearing uTLBs
+ * - New command (TLBIVUTLB) to invalidate uTLBs.
+ *
+ * The uTLBs need only be invalidated when pages are being removed from the
+ * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
+ * as a result of a miss, the removed entry is still allowed to exist in the
+ * uTLBs as it is still valid and present in the OS page table. This allows the
+ * full associativity of the uTLBs to hide the limited associativity of the main
+ * TLB.
+ *
+ * During a miss handler, the new "TLBWriteNI" command is used to load
+ * entries without clearing the uTLBs.
+ *
+ * When the OS page table is updated, TLB entries that may be associated with a
+ * removed page are removed (flushed) from the TLB using TLBWrite. In this
+ * circumstance, the uTLBs must also be cleared. This is done by using the
+ * existing TLBWrite command. An explicit IVUTLB is also required for those
+ * corner cases when TLBWrite was not executed at all because the corresp
+ * J-TLB entry got evicted/replaced.
+ */
+
+/* A copy of the ASID from the PID reg is kept in asid_cache */
+int asid_cache = FIRST_ASID;
+
+/* ASID to mm struct mapping. We have one extra entry corresponding to
+ * NO_ASID to save us a compare when clearing the mm entry for old asid
+ * see get_new_mmu_context (asm-arc/mmu_context.h)
+ */
+struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+/*
+ * Utility Routine to erase a J-TLB entry
+ * The procedure is to look it up in the MMU. If found, ERASE it by
+ * issuing a TlbWrite CMD with PD0 = PD1 = 0
+ */
+
+static void __tlb_entry_erase(void)
+{
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+ unsigned int idx;
+
+ /* Locate the TLB entry for this vaddr + ASID */
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /* No error means entry found, zero it out */
+ if (likely(!(idx & TLB_LKUP_ERR))) {
+ __tlb_entry_erase();
+ } else { /* Some sort of Error */
+
+ /* Duplicate entry error */
+ if (idx & 0x1) {
+ /* TODO we need to handle this case too */
+ pr_emerg("unhandled Duplicate flush for %x\n",
+ vaddr_n_asid);
+ }
+ /* else entry not found so nothing to do */
+ }
+}
+
+/****************************************************************************
+ * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
+ *
+ * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
+ *
+ * utlb_invalidate ( )
+ * -For v2 MMU calls Flush uTLB Cmd
+ * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
+ * This is because in v1 TLBWrite itself invalidate uTLBs
+ ***************************************************************************/
+
+static void utlb_invalidate(void)
+{
+#if (CONFIG_ARC_MMU_VER >= 2)
+
+#if (CONFIG_ARC_MMU_VER < 3)
+ /* MMU v2 introduced the uTLB Flush command.
+ * There was however an obscure hardware bug, where uTLB flush would
+ * fail when a prior probe for J-TLB (both totally unrelated) would
+ * return lkup err - because the entry didnt exist in MMU.
+ * The Workround was to set Index reg with some valid value, prior to
+ * flush. This was fixed in MMU v3 hence not needed any more
+ */
+ unsigned int idx;
+
+ /* make sure INDEX Reg is valid */
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /* If not write some dummy val */
+ if (unlikely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBINDEX, 0xa);
+#endif
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
+#endif
+
+}
+
+/*
+ * Un-conditionally (without lookup) erase the entire MMU contents
+ */
+
+noinline void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned int entry;
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ local_irq_save(flags);
+
+ /* Load PD0 and PD1 with template for a Blank Entry */
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+
+ for (entry = 0; entry < mmu->num_tlb; entry++) {
+ /* write this entry to the TLB */
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ */
+noinline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ /*
+ * Small optimisation courtesy IA64
+ * flush_mm called during fork,exit,munmap etc, multiple times as well.
+ * Only for fork( ) do we need to move parent to a new MMU ctxt,
+ * all other cases are NOPs, hence this check.
+ */
+ if (atomic_read(&mm->mm_users) == 0)
+ return;
+
+ /*
+ * Workaround for Android weirdism:
+ * A binder VMA could end up in a task such that vma->mm != tsk->mm
+ * old code would cause h/w - s/w ASID to get out of sync
+ */
+ if (current->mm != mm)
+ destroy_context(mm);
+ else
+ get_new_mmu_context(mm);
+}
+
+/*
+ * Flush a Range of TLB entries for userland.
+ * @start is inclusive, while @end is exclusive
+ * Difference between this and Kernel Range Flush is
+ * -Here the fastest way (if range is too large) is to move to next ASID
+ * without doing any explicit Shootdown
+ * -In case of kernel Flush, entry has to be shot down explictly
+ */
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+ unsigned int asid;
+
+ /* If range @start to @end is more than 32 TLB entries deep,
+ * its better to move to a new ASID rather than searching for
+ * individual entries and then shooting them down
+ *
+ * The calc above is rough, doesn't account for unaligned parts,
+ * since this is heuristics based anyways
+ */
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
+ /*
+ * @start moved to page start: this alone suffices for checking
+ * loop end condition below, w/o need for aligning @end to end
+ * e.g. 2000 to 4001 will anyhow loop twice
+ */
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ asid = vma->vm_mm->context.asid;
+
+ if (asid != NO_ASID) {
+ while (start < end) {
+ tlb_entry_erase(start | (asid & 0xff));
+ start += PAGE_SIZE;
+ }
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
+ * @start, @end interpreted as kvaddr
+ * Interestingly, shared TLB entries can also be flushed using just
+ * @start,@end alone (interpreted as user vaddr), although technically SASID
+ * is also needed. However our smart TLbProbe lookup takes care of that.
+ */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /* exactly same as above, except for TLB entry not taking ASID */
+
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_all();
+ return;
+ }
+
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ while (start < end) {
+ tlb_entry_erase(start);
+ start += PAGE_SIZE;
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Delete TLB entry in MMU for a given page (??? address)
+ * NOTE One TLB entry contains translation for single PAGE
+ */
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ unsigned long flags;
+
+ /* Note that it is critical that interrupts are DISABLED between
+ * checking the ASID and using it flush the TLB entry
+ */
+ local_irq_save(flags);
+
+ if (vma->vm_mm->context.asid != NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) |
+ (vma->vm_mm->context.asid & 0xff));
+ utlb_invalidate();
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Routine to create a TLB entry
+ */
+void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ unsigned long flags;
+ unsigned int idx, asid_or_sasid;
+ unsigned long pd0_flags;
+
+ /*
+ * create_tlb() assumes that current->mm == vma->mm, since
+ * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
+ * -completes the lazy write to SASID reg (again valid for curr tsk)
+ *
+ * Removing the assumption involves
+ * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
+ * -Fix the TLB paranoid debug code to not trigger false negatives.
+ * -More importantly it makes this handler inconsistent with fast-path
+ * TLB Refill handler which always deals with "current"
+ *
+ * Lets see the use cases when current->mm != vma->mm and we land here
+ * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
+ * Here VM wants to pre-install a TLB entry for user stack while
+ * current->mm still points to pre-execve mm (hence the condition).
+ * However the stack vaddr is soon relocated (randomization) and
+ * move_page_tables() tries to undo that TLB entry.
+ * Thus not creating TLB entry is not any worse.
+ *
+ * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
+ * breakpoint in debugged task. Not creating a TLB now is not
+ * performance critical.
+ *
+ * Both the cases above are not good enough for code churn.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ tlb_paranoid_check(vma->vm_mm->context.asid, address);
+
+ address &= PAGE_MASK;
+
+ /* update this PTE credentials */
+ pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
+
+ /* Create HW TLB entry Flags (in PD0) from PTE Flags */
+#if (CONFIG_ARC_MMU_VER <= 2)
+ pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
+#else
+ pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
+#endif
+
+ /* ASID for this task */
+ asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
+
+ /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
+ write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
+
+ /* First verify if entry for this vaddr+ASID already exists */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /*
+ * If Not already present get a free slot from MMU.
+ * Otherwise, Probe would have located the entry and set INDEX Reg
+ * with existing location. This will cause Write CMD to over-write
+ * existing entry with new PD0 and PD1
+ */
+ if (likely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+
+ /*
+ * Commit the Entry to MMU
+ * It doesnt sound safe to use the TLBWriteNI cmd here
+ * which doesn't flush uTLBs. I'd rather be safe than sorry.
+ */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+
+ local_irq_restore(flags);
+}
+
+/* arch hook called by core VM at the end of handle_mm_fault( ),
+ * when a new PTE is entered in Page Tables or an existing one
+ * is modified. We aggresively pre-install a TLB entry
+ */
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
+ pte_t *ptep)
+{
+
+ create_tlb(vma, vaddress, ptep);
+}
+
+/* Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation is done here, simply read/convert the BCRs
+ */
+void __init read_decode_mmu_bcr(void)
+{
+ unsigned int tmp;
+ struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
+ struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ tmp = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (tmp >> 24);
+
+ if (mmu->ver <= 2) {
+ mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+ mmu->pg_sz = PAGE_SIZE;
+ mmu->sets = 1 << mmu2->sets;
+ mmu->ways = 1 << mmu2->ways;
+ mmu->u_dtlb = mmu2->u_dtlb;
+ mmu->u_itlb = mmu2->u_itlb;
+ } else {
+ mmu3 = (struct bcr_mmu_3 *)&tmp;
+ mmu->pg_sz = 512 << mmu3->pg_sz;
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ mmu->u_dtlb = mmu3->u_dtlb;
+ mmu->u_itlb = mmu3->u_itlb;
+ }
+
+ mmu->num_tlb = mmu->sets * mmu->ways;
+}
+
+char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
+ p_mmu->ver, TO_KB(p_mmu->pg_sz));
+
+ n += scnprintf(buf + n, len - n,
+ "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
+ p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
+ p_mmu->u_dtlb, p_mmu->u_itlb,
+ __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : "");
+
+ return buf;
+}
+
+void __init arc_mmu_init(void)
+{
+ char str[256];
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
+
+ /* For efficiency sake, kernel is compile time built for a MMU ver
+ * This must match the hardware it is running on.
+ * Linux built for MMU V2, if run on MMU V1 will break down because V1
+ * hardware doesn't understand cmds such as WriteNI, or IVUTLB
+ * On the other hand, Linux built for V1 if run on MMU V2 will do
+ * un-needed workarounds to prevent memcpy thrashing.
+ * Similarly MMU V3 has new features which won't work on older MMU
+ */
+ if (mmu->ver != CONFIG_ARC_MMU_VER) {
+ panic("MMU ver %d doesn't match kernel built for %d...\n",
+ mmu->ver, CONFIG_ARC_MMU_VER);
+ }
+
+ if (mmu->pg_sz != PAGE_SIZE)
+ panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+
+ /*
+ * ASID mgmt data structures are compile time init
+ * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
+ */
+
+ local_flush_tlb_all();
+
+ /* Enable the MMU */
+ write_aux_reg(ARC_REG_PID, MMU_ENABLE);
+
+ /* In smp we use this reg for interrupt 1 scratch */
+#ifndef CONFIG_SMP
+ /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
+#endif
+}
+
+/*
+ * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
+ * The mapping is Column-first.
+ * --------------------- -----------
+ * |way0|way1|way2|way3| |way0|way1|
+ * --------------------- -----------
+ * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
+ * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
+ * ~ ~ ~ ~
+ * [set127] | 508| 509| 510| 511| | 254| 255|
+ * --------------------- -----------
+ * For normal operations we don't(must not) care how above works since
+ * MMU cmd getIndex(vaddr) abstracts that out.
+ * However for walking WAYS of a SET, we need to know this
+ */
+#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
+
+/* Handling of Duplicate PD (TLB entry) in MMU.
+ * -Could be due to buggy customer tapeouts or obscure kernel bugs
+ * -MMU complaints not at the time of duplicate PD installation, but at the
+ * time of lookup matching multiple ways.
+ * -Ideally these should never happen - but if they do - workaround by deleting
+ * the duplicate one.
+ * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
+ */
+volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+
+void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ int set, way, n;
+ unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
+ unsigned long flags, is_valid;
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ local_irq_save(flags);
+
+ /* re-enable the MMU */
+ write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
+
+ /* loop thru all sets of TLB */
+ for (set = 0; set < mmu->sets; set++) {
+
+ /* read out all the ways of current set */
+ for (way = 0, is_valid = 0; way < mmu->ways; way++) {
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
+ pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
+ pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
+ is_valid |= pd0[way] & _PAGE_PRESENT;
+ }
+
+ /* If all the WAYS in SET are empty, skip to next SET */
+ if (!is_valid)
+ continue;
+
+ /* Scan the set for duplicate ways: needs a nested loop */
+ for (way = 0; way < mmu->ways; way++) {
+ if (!pd0[way])
+ continue;
+
+ for (n = way + 1; n < mmu->ways; n++) {
+ if ((pd0[way] & PAGE_MASK) ==
+ (pd0[n] & PAGE_MASK)) {
+
+ if (dup_pd_verbose) {
+ pr_info("Duplicate PD's @"
+ "[%d:%d]/[%d:%d]\n",
+ set, way, set, n);
+ pr_info("TLBPD0[%u]: %08x\n",
+ way, pd0[way]);
+ }
+
+ /*
+ * clear entry @way and not @n. This is
+ * critical to our optimised loop
+ */
+ pd0[way] = pd1[way] = 0;
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ __tlb_entry_erase();
+ }
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/***********************************************************************
+ * Diagnostic Routines
+ * -Called from Low Level TLB Hanlders if things don;t look good
+ **********************************************************************/
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+/*
+ * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
+ * don't match
+ */
+void print_asid_mismatch(int is_fast_path)
+{
+ int pid_sw, pid_hw;
+ pid_sw = current->active_mm->context.asid;
+ pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
+ is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw);
+
+ __asm__ __volatile__("flag 1");
+}
+
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
+{
+ unsigned int pid_hw;
+
+ pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
+ print_asid_mismatch(0);
+}
+#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
new file mode 100644
index 000000000000..9df765dc7c3a
--- /dev/null
+++ b/arch/arc/mm/tlbex.S
@@ -0,0 +1,408 @@
+/*
+ * TLB Exception Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: April 2011 :
+ * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * Vineetg: July 2009
+ * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
+ * entry, so that it doesn't knock out it's I-TLB entry
+ * -Some more fine tuning:
+ * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
+ *
+ * Vineetg: July 2009
+ * -Practically rewrote the I/D TLB Miss handlers
+ * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Hence Leaner by 1.5 K
+ * Used Conditional arithmetic to replace excessive branching
+ * Also used short instructions wherever possible
+ *
+ * Vineetg: Aug 13th 2008
+ * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
+ * more information in case of a Fatality
+ *
+ * Vineetg: March 25th Bug #92690
+ * -Added Debug Code to check if sw-ASID == hw-ASID
+
+ * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
+ */
+
+ .cpu A7
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/tlb.h>
+#include <asm/pgtable.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+#include <asm/processor.h>
+#if (CONFIG_ARC_MMU_VER == 1)
+#include <asm/tlb-mmu1.h>
+#endif
+
+;--------------------------------------------------------------------------
+; scratch memory to save the registers (r0-r3) used to code TLB refill Handler
+; For details refer to comments before TLBMISS_FREEUP_REGS below
+;--------------------------------------------------------------------------
+
+ARCFP_DATA ex_saved_reg1
+ .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned
+ .type ex_saved_reg1, @object
+#ifdef CONFIG_SMP
+ .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+ex_saved_reg1:
+ .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+#else
+ .size ex_saved_reg1, 16
+ex_saved_reg1:
+ .zero 16
+#endif
+
+;============================================================================
+; Troubleshooting Stuff
+;============================================================================
+
+; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
+; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
+; we use the MMU PID Reg to get current ASID.
+; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
+; So we try to detect this in TLB Mis shandler
+
+
+.macro DBG_ASID_MISMATCH
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+ ; make sure h/w ASID is same as s/w ASID
+
+ GET_CURR_TASK_ON_CPU r3
+ ld r0, [r3, TASK_ACT_MM]
+ ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
+
+ lr r1, [ARC_REG_PID]
+ and r1, r1, 0xFF
+ breq r1, r0, 5f
+
+ ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
+ lr r0, [erstatus]
+ bbit0 r0, STATUS_U_BIT, 5f
+
+ ; We sure are in troubled waters, Flag the error, but to do so
+ ; need to switch to kernel mode stack to call error routine
+ GET_TSK_STACK_BASE r3, sp
+
+ ; Call printk to shoutout aloud
+ mov r0, 1
+ j print_asid_mismatch
+
+5: ; ASIDs match so proceed normally
+ nop
+
+#endif
+
+.endm
+
+;============================================================================
+;TLB Miss handling Code
+;============================================================================
+
+;-----------------------------------------------------------------------------
+; This macro does the page-table lookup for the faulting address.
+; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
+.macro LOAD_FAULT_PTE
+
+ lr r2, [efa]
+
+#ifndef CONFIG_SMP
+ lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
+#else
+ GET_CURR_TASK_ON_CPU r1
+ ld r1, [r1, TASK_ACT_MM]
+ ld r1, [r1, MM_PGD]
+#endif
+
+ lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
+ ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr
+ and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags
+ ; contains Ptr to Page Table
+ bz.d do_slow_path_pf ; if no Page Table, do page fault
+
+ ; Get the PTE entry: The idea is
+ ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
+ ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
+ ; (3) z = pgtbl[y]
+ ; To avoid the multiply by in end, we do the -2, <<2 below
+
+ lsr r0, r2, (PAGE_SHIFT - 2)
+ and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
+ ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ and.f 0, r0, _PAGE_PRESENT
+ bz 1f
+ ld r2, [num_pte_not_present]
+ add r2, r2, 1
+ st r2, [num_pte_not_present]
+1:
+#endif
+
+.endm
+
+;-----------------------------------------------------------------
+; Convert Linux PTE entry into TLB entry
+; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; IN: r0 = PTE, r1 = ptr to PTE
+
+.macro CONV_PTE_TO_TLB
+ and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE
+ sr r3, [ARC_REG_TLBPD1] ; these go in PD1
+
+ and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
+#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */
+ lsr r2, r2 ; shift PTE flags to match layout in PD0
+#endif
+
+ lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
+
+ or r3, r3, r2 ; S | vaddr | {sasid|asid}
+ sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
+.endm
+
+;-----------------------------------------------------------------
+; Commit the TLB entry into MMU
+
+.macro COMMIT_ENTRY_TO_MMU
+
+ /* Get free TLB slot: Set = computed from vaddr, way = random */
+ sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+
+ /* Commit the Write */
+#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
+ sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
+#else
+ sr TLBWrite, [ARC_REG_TLBCOMMAND]
+#endif
+.endm
+
+;-----------------------------------------------------------------
+; ARC700 Exception Handling doesn't auto-switch stack and it only provides
+; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
+;
+; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
+; "global" is used to free-up FIRST core reg to be able to code the rest of
+; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
+; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
+; need to be saved as well by extending the "global" to be 4 words. Hence
+; ".size ex_saved_reg1, 16"
+; [All of this dance is to avoid stack switching for each TLB Miss, since we
+; only need to save only a handful of regs, as opposed to complete reg file]
+;
+; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
+; core reg as it will not be SMP safe.
+; Thus scratch AUX reg is used (and no longer used to cache task PGD).
+; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
+; Epilogue thus has to locate the "per-cpu" storage for regs.
+; To avoid cache line bouncing the per-cpu global is aligned/sized per
+; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
+; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
+
+; As simple as that....
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_SMP
+ sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
+ GET_CPU_ID r0 ; get to per cpu scratch mem,
+ lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
+ add r0, @ex_saved_reg1, r0
+#else
+ st r0, [@ex_saved_reg1]
+ mov_s r0, @ex_saved_reg1
+#endif
+ st_s r1, [r0, 4]
+ st_s r2, [r0, 8]
+ st_s r3, [r0, 12]
+
+ ; VERIFY if the ASID in MMU-PID Reg is same as
+ ; one in Linux data structures
+
+ DBG_ASID_MISMATCH
+.endm
+
+;-----------------------------------------------------------------
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_SMP
+ GET_CPU_ID r0 ; get to per cpu scratch mem
+ lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
+ add r0, @ex_saved_reg1, r0
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ lr r0, [ARC_REG_SCRATCH_DATA0]
+#else
+ mov_s r0, @ex_saved_reg1
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ ld_s r0, [r0]
+#endif
+.endm
+
+ARCFP_CODE ;Fast Path Code, candidate for ICCM
+
+;-----------------------------------------------------------------------------
+; I-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissI
+
+ TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ ld r0, [@numitlb]
+ add r0, r0, 1
+ st r0, [@numitlb]
+#endif
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Check if PTE permissions approp for executing code
+ cmp_s r2, VMALLOC_START
+ mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+ mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
+
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
+ bnz do_slow_path_pf
+
+ ; Let Linux VM know that the page was accessed
+ or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+ rtie
+
+ARC_EXIT EV_TLBMissI
+
+;-----------------------------------------------------------------------------
+; D-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissD
+
+ TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ ld r0, [@numdtlb]
+ add r0, r0, 1
+ st r0, [@numdtlb]
+#endif
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
+
+ mov_s r2, 0
+ lr r3, [ecr]
+ btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
+ or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
+ or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ ; Above laddering takes care of XCHG access
+ ; which is both Read and Write
+
+ ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
+ ; For copy_(to|from)_user, despite exception taken in kernel mode,
+ ; this code is not hit, because EFA would still be the user mode
+ ; address (EFA < 0x6000_0000).
+ ; This code is for legit kernel mode faults, vmalloc specifically
+ ; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
+
+ lr r3, [efa]
+ cmp r3, VMALLOC_START - 1 ; If kernel mode access
+ asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx
+ or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode
+
+ ; By now, r2 setup with all the Flags we need to check in PTE
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
+
+ ;----------------------------------------------------------------
+ ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
+ lr r3, [ecr]
+ or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
+ or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+
+#if (CONFIG_ARC_MMU_VER == 1)
+ ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
+ ; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
+ ; But only for old MMU or one with Metal Fix
+ TLB_WRITE_HEURISTICS
+#endif
+
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+ rtie
+
+;-------- Common routine to call Linux Page Fault Handler -----------
+do_slow_path_pf:
+
+ ; Restore the 4-scratch regs saved by fast path miss handler
+ TLBMISS_RESTORE_REGS
+
+ ; Slow path TLB Miss handled as a regular ARC Exception
+ ; (stack switching / save the complete reg-file).
+ ; That requires freeing up r9
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ ; ------- setup args for Linux Page fault Hanlder ---------
+ mov_s r0, sp
+ lr r2, [efa]
+ lr r3, [ecr]
+
+ ; Both st and ex imply WRITE access of some sort, hence do_page_fault( )
+ ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or
+ ; DTLB-ld Miss
+ ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03
+ ; Following code uses that fact that st/ex have one bit in common
+
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS
+ mov.z r1, 0
+ mov.nz r1, 1
+
+ ; We don't want exceptions to be disabled while the fault is handled.
+ ; Now that we have saved the context we return from exception hence
+ ; exceptions get re-enable
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_page_fault
+ b ret_from_exception
+
+ARC_EXIT EV_TLBMissD
+
+ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr
diff --git a/arch/arc/oprofile/Makefile b/arch/arc/oprofile/Makefile
new file mode 100644
index 000000000000..ce417a6e70b8
--- /dev/null
+++ b/arch/arc/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arc/oprofile/common.c b/arch/arc/oprofile/common.c
new file mode 100644
index 000000000000..c80fcad4a5a7
--- /dev/null
+++ b/arch/arc/oprofile/common.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on orig code from @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ /*
+ * A failure here, forces oprofile core to switch to Timer based PC
+ * sampling, which will happen if say perf is not enabled/available
+ */
+ return oprofile_perf_init(ops);
+}
+
+void oprofile_arch_exit(void)
+{
+ oprofile_perf_exit();
+}
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
new file mode 100644
index 000000000000..b41e786cdbc0
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Kconfig
@@ -0,0 +1,84 @@
+#
+# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+menuconfig ARC_PLAT_FPGA_LEGACY
+ bool "\"Legacy\" ARC FPGA dev Boards"
+ select ISS_SMP_EXTN if SMP
+ help
+ Support for ARC development boards, provided by Synopsys.
+ These are based on FPGA or ISS. e.g.
+ - ARCAngel4
+ - ML509
+ - MetaWare ISS
+
+if ARC_PLAT_FPGA_LEGACY
+
+config ARC_BOARD_ANGEL4
+ bool "ARC Angel4"
+ default y
+ help
+ ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based)
+
+config ARC_BOARD_ML509
+ bool "ML509"
+ help
+ ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based)
+
+config ISS_SMP_EXTN
+ bool "ARC SMP Extensions (ISS Models only)"
+ default n
+ depends on SMP
+ select ARC_HAS_COH_RTSC
+ help
+ SMP Extensions to ARC700, in a "simulation only" Model, supported in
+ ARC ISS (Instruction Set Simulator).
+ The SMP extensions include:
+ -IDU (Interrupt Distribution Unit)
+ -XTL (To enable CPU start/stop/set-PC for another CPU)
+ It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND)
+
+config ARC_SERIAL_BAUD
+ int "UART Baud rate"
+ default "115200"
+ depends on SERIAL_ARC || SERIAL_ARC_CONSOLE
+ help
+ Baud rate for the ARC UART
+
+menuconfig ARC_HAS_BVCI_LAT_UNIT
+ bool "BVCI Bus Latency Unit"
+ depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4
+ help
+ IP to add artifical latency to BVCI Bus Based FPGA builds.
+ The default latency (even worst case) for FPGA is non-realistic
+ (~10 SDRAM, ~5 SSRAM).
+
+config BVCI_LAT_UNITS
+ hex "Latency Unit(s) Bitmap"
+ default "0x0"
+ depends on ARC_HAS_BVCI_LAT_UNIT
+ help
+ There are multiple Latency Units corresponding to the many
+ interfaces of the system bus arbiter (both CPU side as well as
+ the peripheral side).
+ To add latency to ALL memory transaction, choose Unit 0, otherwise
+ for finer grainer - interface wise latency, specify a bitmap (1 bit
+ per unit) of all units. e.g. 1,2,12 will be 0x1003
+
+ Unit 0 - System Arb and Mem Controller
+ Unit 1 - I$ and System Bus
+ Unit 2 - D$ and System Bus
+ ..
+ Unit 12 - IDE Disk controller and System Bus
+
+config BVCI_LAT_CYCLES
+ int "Latency Value in cycles"
+ range 0 63
+ default "30"
+ depends on ARC_HAS_BVCI_LAT_UNIT
+
+endif
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile
new file mode 100644
index 000000000000..a44e22ebc1b7
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Makefile
@@ -0,0 +1,12 @@
+#
+# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include
+
+obj-y := platform.o irq.o
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h
new file mode 100644
index 000000000000..41e335670f60
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/irq.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ * -For AA4 board, IRQ assignments to peripherals
+ */
+
+#ifndef __PLAT_IRQ_H
+#define __PLAT_IRQ_H
+
+#define UART0_IRQ 5
+#define UART1_IRQ 10
+#define UART2_IRQ 11
+
+#define VMAC_IRQ 6
+
+#define IDE_IRQ 13
+#define PCI_IRQ 14
+#define PS2_IRQ 15
+
+#ifdef CONFIG_SMP
+#define IDU_INTERRUPT_0 16
+#endif
+
+extern void __init plat_fpga_init_IRQ(void);
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h
new file mode 100644
index 000000000000..1663f3388085
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/memmap.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ * -For AA4 board, System Memory Map for Peripherals etc
+ */
+
+#ifndef __PLAT_MEMMAP_H
+#define __PLAT_MEMMAP_H
+
+#define UART0_BASE 0xC0FC1000
+#define UART1_BASE 0xC0FC1100
+
+#define VMAC_REG_BASEADDR 0xC0FC2000
+
+#define IDE_CONTROLLER_BASE 0xC0FC9000
+
+#define AHB_PCI_HOST_BRG_BASE 0xC0FD0000
+
+#define PGU_BASEADDR 0xC0FC8000
+#define VLCK_ADDR 0xC0FCF028
+
+#define BVCI_LAT_UNIT_BASE 0xC0FED000
+
+#define PS2_BASE_ADDR 0xC0FCC000
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/smp.h b/arch/arc/plat-arcfpga/include/plat/smp.h
new file mode 100644
index 000000000000..c09eb4cfc77c
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/smp.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Rajeshwar Ranga: Interrupt Distribution Unit API's
+ */
+
+#ifndef __PLAT_ARCFPGA_SMP_H
+#define __PLAT_ARCFPGA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <asm/arcregs.h>
+
+#define ARC_AUX_IDU_REG_CMD 0x2000
+#define ARC_AUX_IDU_REG_PARAM 0x2001
+
+#define ARC_AUX_XTL_REG_CMD 0x2002
+#define ARC_AUX_XTL_REG_PARAM 0x2003
+
+#define ARC_REG_MP_BCR 0x2021
+
+#define ARC_XTL_CMD_WRITE_PC 0x04
+#define ARC_XTL_CMD_CLEAR_HALT 0x02
+
+/*
+ * Build Configuration Register which identifies the sub-components
+ */
+struct bcr_mp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8;
+#else
+ unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16;
+#endif
+};
+
+/* IDU supports 256 common interrupts */
+#define NR_IDU_IRQS 256
+
+/*
+ * The Aux Regs layout is same bit-by-bit in both BE/LE modes.
+ * However when casted as a bitfield encoded "C" struct, gcc treats it as
+ * memory, generating different code for BE/LE, requiring strcture adj (see
+ * include/asm/arcregs.h)
+ *
+ * However when manually "carving" the value for a Aux, no special handling
+ * of BE is needed because of the property discribed above
+ */
+#define IDU_SET_COMMAND(irq, cmd) \
+do { \
+ uint32_t __val; \
+ __val = (((irq & 0xFF) << 8) | (cmd & 0xFF)); \
+ write_aux_reg(ARC_AUX_IDU_REG_CMD, __val); \
+} while (0)
+
+#define IDU_SET_PARAM(par) write_aux_reg(ARC_AUX_IDU_REG_PARAM, par)
+#define IDU_GET_PARAM() read_aux_reg(ARC_AUX_IDU_REG_PARAM)
+
+/* IDU Commands */
+#define IDU_DISABLE 0x00
+#define IDU_ENABLE 0x01
+#define IDU_IRQ_CLEAR 0x02
+#define IDU_IRQ_ASSERT 0x03
+#define IDU_IRQ_WMODE 0x04
+#define IDU_IRQ_STATUS 0x05
+#define IDU_IRQ_ACK 0x06
+#define IDU_IRQ_PEND 0x07
+#define IDU_IRQ_RMODE 0x08
+#define IDU_IRQ_WBITMASK 0x09
+#define IDU_IRQ_RBITMASK 0x0A
+
+#define idu_enable() IDU_SET_COMMAND(0, IDU_ENABLE)
+#define idu_disable() IDU_SET_COMMAND(0, IDU_DISABLE)
+
+#define idu_irq_assert(irq) IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT)
+#define idu_irq_clear(irq) IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR)
+
+/* IDU Interrupt Mode - Destination Encoding */
+#define IDU_IRQ_MOD_DISABLE 0x00
+#define IDU_IRQ_MOD_ROUND_RECP 0x01
+#define IDU_IRQ_MOD_TCPU_FIRSTRECP 0x02
+#define IDU_IRQ_MOD_TCPU_ALLRECP 0x03
+
+/* IDU Interrupt Mode - Triggering Mode */
+#define IDU_IRQ_MODE_LEVEL_TRIG 0x00
+#define IDU_IRQ_MODE_PULSE_TRIG 0x01
+
+#define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode) \
+ (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF))
+
+struct idu_irq_config {
+ uint8_t irq;
+ uint8_t dest_mode;
+ uint8_t trig_mode;
+};
+
+struct idu_irq_status {
+ uint8_t irq;
+ bool enabled;
+ bool status;
+ bool ack;
+ bool pend;
+ uint8_t next_rr;
+};
+
+extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask);
+extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode);
+
+extern void iss_model_init_smp(unsigned int cpu);
+extern void iss_model_init_early_smp(void);
+
+#endif /* CONFIG_SMP */
+
+#endif
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c
new file mode 100644
index 000000000000..d2215fd889c2
--- /dev/null
+++ b/arch/arc/plat-arcfpga/irq.c
@@ -0,0 +1,25 @@
+/*
+ * ARC FPGA Platform IRQ hookups
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <plat/irq.h>
+
+void __init plat_fpga_init_IRQ(void)
+{
+ /*
+ * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the
+ * request_irq() comes from any other CPU, the low level IRQ unamsking
+ * essential for getting Interrupts won't be enabled on cpu0, locking
+ * up the UART state machine.
+ */
+#ifdef CONFIG_SMP
+ arch_unmask_irq(UART0_IRQ);
+#endif
+}
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
new file mode 100644
index 000000000000..4e20a1a5104d
--- /dev/null
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -0,0 +1,226 @@
+/*
+ * ARC FPGA Platform support code
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/of_platform.h>
+#include <asm/setup.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+#include <plat/memmap.h>
+#include <plat/smp.h>
+#include <plat/irq.h>
+
+/*-----------------------BVCI Latency Unit -----------------------------*/
+
+#ifdef CONFIG_ARC_HAS_BVCI_LAT_UNIT
+
+int lat_cycles = CONFIG_BVCI_LAT_CYCLES;
+
+/* BVCI Bus Profiler: Latency Unit */
+static void __init setup_bvci_lat_unit(void)
+{
+#define MAX_BVCI_UNITS 12
+
+ unsigned int i;
+ unsigned int *base = (unsigned int *)BVCI_LAT_UNIT_BASE;
+ const unsigned long units_req = CONFIG_BVCI_LAT_UNITS;
+ const unsigned int REG_UNIT = 21;
+ const unsigned int REG_VAL = 22;
+
+ /*
+ * There are multiple Latency Units corresponding to the many
+ * interfaces of the system bus arbiter (both CPU side as well as
+ * the peripheral side).
+ *
+ * Unit 0 - System Arb and Mem Controller - adds latency to all
+ * memory trasactions
+ * Unit 1 - I$ and System Bus
+ * Unit 2 - D$ and System Bus
+ * ..
+ * Unit 12 - IDE Disk controller and System Bus
+ *
+ * The programmers model requires writing to lat_unit reg first
+ * and then the latency value (cycles) to lat_value reg
+ */
+
+ if (CONFIG_BVCI_LAT_UNITS == 0) {
+ writel(0, base + REG_UNIT);
+ writel(lat_cycles, base + REG_VAL);
+ pr_info("BVCI Latency for all Memory Transactions %d cycles\n",
+ lat_cycles);
+ } else {
+ for_each_set_bit(i, &units_req, MAX_BVCI_UNITS) {
+ writel(i + 1, base + REG_UNIT); /* loop is 0 based */
+ writel(lat_cycles, base + REG_VAL);
+ pr_info("BVCI Latency for Unit[%d] = %d cycles\n",
+ (i + 1), lat_cycles);
+ }
+ }
+}
+#else
+static void __init setup_bvci_lat_unit(void)
+{
+}
+#endif
+
+/*----------------------- Platform Devices -----------------------------*/
+
+static unsigned long arc_uart_info[] = {
+ 0, /* uart->is_emulated (runtime @running_on_hw) */
+ 0, /* uart->port.uartclk */
+ 0, /* uart->baud */
+ 0
+};
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+/*
+ * static platform data - but only for early serial
+ * TBD: derive this from a special DT node
+ */
+static struct resource arc_uart0_res[] = {
+ {
+ .start = UART0_BASE,
+ .end = UART0_BASE + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = UART0_IRQ,
+ .end = UART0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device arc_uart0_dev = {
+ .name = "arc-uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(arc_uart0_res),
+ .resource = arc_uart0_res,
+ .dev = {
+ .platform_data = &arc_uart_info,
+ },
+};
+
+static struct platform_device *fpga_early_devs[] __initdata = {
+ &arc_uart0_dev,
+};
+#endif
+
+static void arc_fpga_serial_init(void)
+{
+ /* To let driver workaround ISS bug: baudh Reg can't be set to 0 */
+ arc_uart_info[0] = !running_on_hw;
+
+ arc_uart_info[1] = arc_get_core_freq();
+
+ arc_uart_info[2] = CONFIG_ARC_SERIAL_BAUD;
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+ early_platform_add_devices(fpga_early_devs,
+ ARRAY_SIZE(fpga_early_devs));
+
+ /*
+ * ARC console driver registers itself as an early platform driver
+ * of class "earlyprintk".
+ * Install it here, followed by probe of devices.
+ * The installation here doesn't require earlyprintk in command line
+ * To do so however, replace the lines below with
+ * parse_early_param();
+ * early_platform_driver_probe("earlyprintk", 1, 1);
+ * ^^
+ */
+ early_platform_driver_register_all("earlyprintk");
+ early_platform_driver_probe("earlyprintk", 1, 0);
+
+ /*
+ * This is to make sure that arc uart would be preferred console
+ * despite one/more of following:
+ * -command line lacked "console=ttyARC0" or
+ * -CONFIG_VT_CONSOLE was enabled (for no reason whatsoever)
+ * Note that this needs to be done after above early console is reg,
+ * otherwise the early console never gets a chance to run.
+ */
+ add_preferred_console("ttyARC", 0, "115200");
+#endif
+}
+
+static void __init plat_fpga_early_init(void)
+{
+ pr_info("[plat-arcfpga]: registering early dev resources\n");
+
+ setup_bvci_lat_unit();
+
+ arc_fpga_serial_init();
+
+#ifdef CONFIG_SMP
+ iss_model_init_early_smp();
+#endif
+}
+
+static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = {
+#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE)
+ OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info),
+#endif
+ {}
+};
+
+static void __init plat_fpga_populate_dev(void)
+{
+ pr_info("[plat-arcfpga]: registering device resources\n");
+
+ /*
+ * Traverses flattened DeviceTree - registering platform devices
+ * complete with their resources
+ */
+ of_platform_populate(NULL, of_default_bus_match_table,
+ plat_auxdata_lookup, NULL);
+}
+
+/*----------------------- Machine Descriptions ------------------------------
+ *
+ * Machine description is simply a set of platform/board specific callbacks
+ * This is not directly related to DeviceTree based dynamic device creation,
+ * however as part of early device tree scan, we also select the right
+ * callback set, by matching the DT compatible name.
+ */
+
+static const char *aa4_compat[] __initdata = {
+ "snps,arc-angel4",
+ NULL,
+};
+
+MACHINE_START(ANGEL4, "angel4")
+ .dt_compat = aa4_compat,
+ .init_early = plat_fpga_early_init,
+ .init_machine = plat_fpga_populate_dev,
+ .init_irq = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+ .init_smp = iss_model_init_smp,
+#endif
+MACHINE_END
+
+static const char *ml509_compat[] __initdata = {
+ "snps,arc-ml509",
+ NULL,
+};
+
+MACHINE_START(ML509, "ml509")
+ .dt_compat = ml509_compat,
+ .init_early = plat_fpga_early_init,
+ .init_machine = plat_fpga_populate_dev,
+ .init_irq = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+ .init_smp = iss_model_init_smp,
+#endif
+MACHINE_END
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c
new file mode 100644
index 000000000000..91b55349a5f8
--- /dev/null
+++ b/arch/arc/plat-arcfpga/smp.c
@@ -0,0 +1,171 @@
+/*
+ * ARC700 Simulation-only Extensions for SMP
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineet Gupta - 2012 : split off arch common and plat specific SMP
+ * Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's
+ */
+
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <plat/irq.h>
+#include <plat/smp.h>
+
+static char smp_cpuinfo_buf[128];
+
+/*
+ *-------------------------------------------------------------------
+ * Platform specific callbacks expected by arch SMP code
+ *-------------------------------------------------------------------
+ */
+
+/*
+ * Master kick starting another CPU
+ */
+static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
+{
+ /* setup the start PC */
+ write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);
+
+ /* Trigger WRITE_PC cmd for this cpu */
+ write_aux_reg(ARC_AUX_XTL_REG_CMD,
+ (ARC_XTL_CMD_WRITE_PC | (cpu << 8)));
+
+ /* Take the cpu out of Halt */
+ write_aux_reg(ARC_AUX_XTL_REG_CMD,
+ (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));
+
+}
+
+/*
+ * Any SMP specific init any CPU does when it comes up.
+ * Here we setup the CPU to enable Inter-Processor-Interrupts
+ * Called for each CPU
+ * -Master : init_IRQ()
+ * -Other(s) : start_kernel_secondary()
+ */
+void iss_model_init_smp(unsigned int cpu)
+{
+ /* Check if CPU is configured for more than 16 interrupts */
+ if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16)
+ panic("[arcfpga] IRQ system can't support IDU IPI\n");
+
+ idu_disable();
+
+ /****************************************************************
+ * IDU provides a set of Common IRQs, each of which can be dynamically
+ * attached to (1|many|all) CPUs.
+ * The Common IRQs [0-15] are mapped as CPU pvt [16-31]
+ *
+ * Here we use a simple 1:1 mapping:
+ * A CPU 'x' is wired to Common IRQ 'x'.
+ * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which
+ * makes up for our simple IPI plumbing.
+ *
+ * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs
+ * w/o having to do one-at-a-time
+ ******************************************************************/
+
+ /*
+ * Claim an IRQ which would trigger IPI on this CPU.
+ * In IDU parlance it involves setting up a cpu bitmask for the IRQ
+ * The bitmap here contains only 1 CPU (self).
+ */
+ idu_irq_set_tgtcpu(cpu, 0x1 << cpu);
+
+ /* Set the IRQ destination to use the bitmask above */
+ idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */
+ IDU_IRQ_MODE_PULSE_TRIG);
+
+ idu_enable();
+
+ /* Attach the arch-common IPI ISR to our IDU IRQ */
+ smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu);
+}
+
+static void iss_model_ipi_send(void *arg)
+{
+ struct cpumask *callmap = arg;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, callmap)
+ idu_irq_assert(cpu);
+}
+
+static void iss_model_ipi_clear(int cpu, int irq)
+{
+ idu_irq_clear(IDU_INTERRUPT_0 + cpu);
+}
+
+void iss_model_init_early_smp(void)
+{
+#define IS_AVAIL1(var, str) ((var) ? str : "")
+
+ struct bcr_mp mp;
+
+ READ_BCR(ARC_REG_MP_BCR, mp);
+
+ sprintf(smp_cpuinfo_buf, "Extn [ISS-SMP]: v%d, arch(%d) %s %s %s\n",
+ mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"),
+ IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU"));
+
+ plat_smp_ops.info = smp_cpuinfo_buf;
+
+ plat_smp_ops.cpu_kick = iss_model_smp_wakeup_cpu;
+ plat_smp_ops.ipi_send = iss_model_ipi_send;
+ plat_smp_ops.ipi_clear = iss_model_ipi_clear;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Low level Platform IPI Providers
+ *-------------------------------------------------------------------
+ */
+
+/* Set the Mode for the Common IRQ */
+void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode)
+{
+ uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode);
+
+ IDU_SET_PARAM(par);
+ IDU_SET_COMMAND(irq, IDU_IRQ_WMODE);
+}
+
+/* Set the target cpu Bitmask for Common IRQ */
+void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask)
+{
+ IDU_SET_PARAM(mask);
+ IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK);
+}
+
+/* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */
+bool idu_irq_get_ack(uint8_t irq)
+{
+ uint32_t val;
+
+ IDU_SET_COMMAND(irq, IDU_IRQ_ACK);
+ val = IDU_GET_PARAM();
+
+ return val & (1 << irq);
+}
+
+/*
+ * Get the Interrupt Pending status for IRQ (as CPU Bitmask)
+ * -Pending means CPU has not yet noticed the IRQ (e.g. disabled)
+ * -After Interrupt has been taken, the IPI expcitily needs to be
+ * cleared, to be acknowledged.
+ */
+bool idu_irq_get_pend(uint8_t irq)
+{
+ uint32_t val;
+
+ IDU_SET_COMMAND(irq, IDU_IRQ_PEND);
+ val = IDU_GET_PARAM();
+
+ return val & (1 << irq);
+}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a955d89ed836..5b714695b01b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,6 +49,7 @@ config ARM
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
+ select HAVE_VIRT_TO_BUS
select KTIME_SCALAR
select PERF_USE_VMALLOC
select RTC_LIB
@@ -77,6 +78,27 @@ config ARM_DMA_USE_IOMMU
select ARM_HAS_SG_CHAIN
select NEED_SG_DMA_LENGTH
+if ARM_DMA_USE_IOMMU
+
+config ARM_DMA_IOMMU_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a waste of address space. Drivers which has
+ relatively small addressing window (like 64Mib) might run out of
+ virtual space with just a few allocations.
+
+ With this parameter you can specify the maximum PAGE_SIZE order for
+ DMA IOMMU buffers. Larger buffers will be aligned only to this
+ specified order. The order is expressed as a power of two multiplied
+ by the PAGE_SIZE.
+
+endif
+
config HAVE_PWM
bool
@@ -1654,7 +1676,6 @@ config HZ
int
default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \
ARCH_S5PV210 || ARCH_EXYNOS4
- default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
default AT91_TIMER_HZ if ARCH_AT91
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
default 100
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 411ab1614a0e..9c6255884cbb 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -56,6 +56,7 @@ dtb-$(CONFIG_ARCH_KIRKWOOD) += kirkwood-dns320.dtb \
kirkwood-dockstar.dtb \
kirkwood-dreamplug.dtb \
kirkwood-goflexnet.dtb \
+ kirkwood-guruplug-server-plus.dtb \
kirkwood-ib62x0.dtb \
kirkwood-iconnect.dtb \
kirkwood-iomega_ix2_200.dtb \
@@ -78,11 +79,21 @@ dtb-$(CONFIG_ARCH_MSM) += msm8660-surf.dtb \
msm8960-cdp.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-370-db.dtb \
armada-370-mirabox.dtb \
+ armada-370-rd.dtb \
armada-xp-db.dtb \
+ armada-xp-gp.dtb \
armada-xp-openblocks-ax3-4.dtb
-dtb-$(CONFIG_ARCH_MXC) += imx51-babbage.dtb \
+dtb-$(CONFIG_ARCH_MXC) += \
+ imx25-karo-tx25.dtb \
+ imx25-pdk.dtb \
+ imx27-apf27.dtb \
+ imx27-pdk.dtb \
+ imx31-bug.dtb \
+ imx51-apf51.dtb \
+ imx51-babbage.dtb \
imx53-ard.dtb \
imx53-evk.dtb \
+ imx53-mba53.dtb \
imx53-qsb.dtb \
imx53-smd.dtb \
imx6q-arm2.dtb \
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index c2f14e875eb6..0957645b73af 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -385,5 +385,19 @@
mac-address = [ 00 00 00 00 00 00 ];
};
};
+
+ ocmcram: ocmcram@40300000 {
+ compatible = "ti,am3352-ocmcram";
+ reg = <0x40300000 0x10000>;
+ ti,hwmods = "ocmcram";
+ ti,no_idle_on_suspend;
+ };
+
+ wkup_m3: wkup_m3@44d00000 {
+ compatible = "ti,am3353-wkup-m3";
+ reg = <0x44d00000 0x4000 /* M3 UMEM */
+ 0x44d80000 0x2000>; /* M3 DMEM */
+ ti,hwmods = "wkup_m3";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 9b82facb2561..e34b280ce6ec 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -59,5 +59,40 @@
phy = <&phy1>;
phy-mode = "rgmii-id";
};
+
+ mvsdio@d00d4000 {
+ pinctrl-0 = <&sdio_pins1>;
+ pinctrl-names = "default";
+ /*
+ * This device is disabled by default, because
+ * using the SD card connector requires
+ * changing the default CON40 connector
+ * "DB-88F6710_MPP_2xRGMII_DEVICE_Jumper" to a
+ * different connector
+ * "DB-88F6710_MPP_RGMII_SD_Jumper".
+ */
+ status = "disabled";
+ /* No CD or WP GPIOs */
+ };
+
+ usb@d0050000 {
+ status = "okay";
+ };
+
+ usb@d0051000 {
+ status = "okay";
+ };
+
+ spi0: spi@d0010600 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mx25l25635e";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <50000000>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
index 3b4071336599..dd0c57dd9f30 100644
--- a/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -52,5 +52,23 @@
phy = <&phy1>;
phy-mode = "rgmii-id";
};
+
+ mvsdio@d00d4000 {
+ pinctrl-0 = <&sdio_pins2>;
+ pinctrl-names = "default";
+ status = "okay";
+ /*
+ * No CD or WP GPIOs: SDIO interface used for
+ * Wifi/Bluetooth chip
+ */
+ };
+
+ usb@d0050000 {
+ status = "okay";
+ };
+
+ usb@d0051000 {
+ status = "okay";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
new file mode 100644
index 000000000000..f8e4855bc9a5
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -0,0 +1,68 @@
+/*
+ * Device Tree file for Marvell Armada 370 Reference Design board
+ * (RD-88F6710-A1)
+ *
+ * Copied from arch/arm/boot/dts/armada-370-db.dts
+ *
+ * Copyright (C) 2013 Florian Fainelli <florian@openwrt.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-370.dtsi"
+
+/ {
+ model = "Marvell Armada 370 Reference Design";
+ compatible = "marvell,a370-rd", "marvell,armada370", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>; /* 512 MB */
+ };
+
+ soc {
+ serial@d0012000 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
+ sata@d00a0000 {
+ nr-ports = <2>;
+ status = "okay";
+ };
+
+ mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+
+ ethernet@d0070000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "sgmii";
+ };
+ ethernet@d0074000 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+ };
+
+ mvsdio@d00d4000 {
+ pinctrl-0 = <&sdio_pins1>;
+ pinctrl-names = "default";
+ status = "okay";
+ /* No CD or WP GPIOs */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 5b2922599f0e..6f1acc75e155 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -68,8 +68,9 @@
timer@d0020300 {
compatible = "marvell,armada-370-xp-timer";
- reg = <0xd0020300 0x30>;
- interrupts = <37>, <38>, <39>, <40>;
+ reg = <0xd0020300 0x30>,
+ <0xd0021040 0x30>;
+ interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
clocks = <&coreclk 2>;
};
@@ -137,6 +138,50 @@
reg = <0xd0010300 0x20>;
interrupts = <50>;
};
+
+ mvsdio@d00d4000 {
+ compatible = "marvell,orion-sdio";
+ reg = <0xd00d4000 0x200>;
+ interrupts = <54>;
+ clocks = <&gateclk 17>;
+ status = "disabled";
+ };
+
+ usb@d0050000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0xd0050000 0x500>;
+ interrupts = <45>;
+ status = "disabled";
+ };
+
+ usb@d0051000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0xd0051000 0x500>;
+ interrupts = <46>;
+ status = "disabled";
+ };
+
+ spi0: spi@d0010600 {
+ compatible = "marvell,orion-spi";
+ reg = <0xd0010600 0x28>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ interrupts = <30>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ spi1: spi@d0010680 {
+ compatible = "marvell,orion-spi";
+ reg = <0xd0010680 0x28>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ interrupts = <92>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 636cf7d4009e..8188d138020e 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -47,6 +47,18 @@
pinctrl {
compatible = "marvell,mv88f6710-pinctrl";
reg = <0xd0018000 0x38>;
+
+ sdio_pins1: sdio-pins1 {
+ marvell,pins = "mpp9", "mpp11", "mpp12",
+ "mpp13", "mpp14", "mpp15";
+ marvell,function = "sd0";
+ };
+
+ sdio_pins2: sdio-pins2 {
+ marvell,pins = "mpp47", "mpp48", "mpp49",
+ "mpp50", "mpp51", "mpp52";
+ marvell,function = "sd0";
+ };
};
gpio0: gpio@d0018100 {
@@ -132,5 +144,14 @@
dmacap,memset;
};
};
+
+ usb@d0050000 {
+ clocks = <&coreclk 0>;
+ };
+
+ usb@d0051000 {
+ clocks = <&coreclk 0>;
+ };
+
};
};
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index 8e53b25b5508..e83505e4c236 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -90,5 +90,36 @@
phy = <&phy3>;
phy-mode = "sgmii";
};
+
+ mvsdio@d00d4000 {
+ pinctrl-0 = <&sdio_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ /* No CD or WP GPIOs */
+ };
+
+ usb@d0050000 {
+ status = "okay";
+ };
+
+ usb@d0051000 {
+ status = "okay";
+ };
+
+ usb@d0052000 {
+ status = "okay";
+ };
+
+ spi0: spi@d0010600 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "m25p64";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <20000000>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
new file mode 100644
index 000000000000..1c8afe2ffebc
--- /dev/null
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -0,0 +1,113 @@
+/*
+ * Device Tree file for Marvell Armada XP development board
+ * (DB-MV784MP-GP)
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-xp-mv78460.dtsi"
+
+/ {
+ model = "Marvell Armada XP Development Board DB-MV784MP-GP";
+ compatible = "marvell,axp-gp", "marvell,armadaxp-mv78460", "marvell,armadaxp", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+
+ /*
+ * 4 GB of plug-in RAM modules by default but only 3GB
+ * are visible, the amount of memory available can be
+ * changed by the bootloader according the size of the
+ * module actually plugged
+ */
+ reg = <0x00000000 0xC0000000>;
+ };
+
+ soc {
+ serial@d0012000 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@d0012100 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@d0012200 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@d0012300 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+
+ sata@d00a0000 {
+ nr-ports = <2>;
+ status = "okay";
+ };
+
+ mdio {
+ phy0: ethernet-phy@0 {
+ reg = <16>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <17>;
+ };
+
+ phy2: ethernet-phy@2 {
+ reg = <18>;
+ };
+
+ phy3: ethernet-phy@3 {
+ reg = <19>;
+ };
+ };
+
+ ethernet@d0070000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet@d0074000 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet@d0030000 {
+ status = "okay";
+ phy = <&phy2>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet@d0034000 {
+ status = "okay";
+ phy = <&phy3>;
+ phy-mode = "rgmii-id";
+ };
+
+ spi0: spi@d0010600 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "n25q128a13";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <108000000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index e041f42ed711..f56c40599f5b 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -47,6 +47,12 @@
pinctrl {
compatible = "marvell,mv78230-pinctrl";
reg = <0xd0018000 0x38>;
+
+ sdio_pins: sdio-pins {
+ marvell,pins = "mpp30", "mpp31", "mpp32",
+ "mpp33", "mpp34", "mpp35";
+ marvell,function = "sd0";
+ };
};
gpio0: gpio@d0018100 {
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 9e23bd8c9536..f8f2b787d2b0 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -48,6 +48,12 @@
pinctrl {
compatible = "marvell,mv78260-pinctrl";
reg = <0xd0018000 0x38>;
+
+ sdio_pins: sdio-pins {
+ marvell,pins = "mpp30", "mpp31", "mpp32",
+ "mpp33", "mpp34", "mpp35";
+ marvell,function = "sd0";
+ };
};
gpio0: gpio@d0018100 {
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index 965966110e38..936c25dc32b0 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -63,6 +63,12 @@
pinctrl {
compatible = "marvell,mv78460-pinctrl";
reg = <0xd0018000 0x38>;
+
+ sdio_pins: sdio-pins {
+ marvell,pins = "mpp30", "mpp31", "mpp32",
+ "mpp33", "mpp34", "mpp35";
+ marvell,function = "sd0";
+ };
};
gpio0: gpio@d0018100 {
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index b42652fd3d8c..3818a82176a2 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -66,6 +66,18 @@
};
};
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@1 {
+ label = "Init Button";
+ linux,code = <116>;
+ gpios = <&gpio1 28 0>;
+ };
+ };
+
mdio {
phy0: ethernet-phy@0 {
reg = <0>;
@@ -121,5 +133,11 @@
nr-ports = <2>;
status = "okay";
};
+ usb@d0050000 {
+ status = "okay";
+ };
+ usb@d0051000 {
+ status = "okay";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 2e37ef101c90..1443949c165e 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -30,7 +30,7 @@
};
mpic: interrupt-controller@d0020000 {
- reg = <0xd0020a00 0x1d0>,
+ reg = <0xd0020a00 0x2d0>,
<0xd0021070 0x58>;
};
@@ -134,5 +134,22 @@
dmacap,memset;
};
};
+
+ usb@d0050000 {
+ clocks = <&gateclk 18>;
+ };
+
+ usb@d0051000 {
+ clocks = <&gateclk 19>;
+ };
+
+ usb@d0052000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0xd0052000 0x500>;
+ interrupts = <47>;
+ clocks = <&gateclk 20>;
+ status = "disabled";
+ };
+
};
};
diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts
index cdee96fca6e2..7e3065abd751 100644
--- a/arch/arm/boot/dts/dove-cubox.dts
+++ b/arch/arm/boot/dts/dove-cubox.dts
@@ -17,12 +17,33 @@
leds {
compatible = "gpio-leds";
+ pinctrl-0 = <&pmx_gpio_18>;
+ pinctrl-names = "default";
+
power {
label = "Power";
gpios = <&gpio0 18 1>;
linux,default-trigger = "default-on";
};
};
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_power: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "USB Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 1 0>;
+ };
+ };
};
&uart0 { status = "okay"; };
@@ -47,9 +68,14 @@
};
&pinctrl {
- pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>;
+ pinctrl-0 = <&pmx_gpio_1 &pmx_gpio_12>;
pinctrl-names = "default";
+ pmx_gpio_1: pmx-gpio-1 {
+ marvell,pins = "mpp1";
+ marvell,function = "gpio";
+ };
+
pmx_gpio_12: pmx-gpio-12 {
marvell,pins = "mpp12";
marvell,function = "gpio";
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 740630f9cd65..67dbe20868a2 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -55,7 +55,7 @@
reg = <0x12000 0x100>;
reg-shift = <2>;
interrupts = <7>;
- clock-frequency = <166666667>;
+ clocks = <&core_clk 0>;
status = "disabled";
};
@@ -64,7 +64,7 @@
reg = <0x12100 0x100>;
reg-shift = <2>;
interrupts = <8>;
- clock-frequency = <166666667>;
+ clocks = <&core_clk 0>;
status = "disabled";
};
@@ -73,7 +73,7 @@
reg = <0x12000 0x100>;
reg-shift = <2>;
interrupts = <9>;
- clock-frequency = <166666667>;
+ clocks = <&core_clk 0>;
status = "disabled";
};
@@ -82,7 +82,7 @@
reg = <0x12100 0x100>;
reg-shift = <2>;
interrupts = <10>;
- clock-frequency = <166666667>;
+ clocks = <&core_clk 0>;
status = "disabled";
};
@@ -156,6 +156,22 @@
status = "disabled";
};
+ ehci0: usb-host@50000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0x50000 0x1000>;
+ interrupts = <24>;
+ clocks = <&gate_clk 0>;
+ status = "okay";
+ };
+
+ ehci1: usb-host@51000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0x51000 0x1000>;
+ interrupts = <25>;
+ clocks = <&gate_clk 1>;
+ status = "okay";
+ };
+
sdio0: sdio@92000 {
compatible = "marvell,dove-sdhci";
reg = <0x92000 0x100>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 4db9db0a8443..1b8d4106d338 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -151,6 +151,7 @@
reg = <0>;
bus-width = <4>;
samsung,cd-pinmux-gpio = <&gpc3 2 2 3 3>;
+ disable-wp;
gpios = <&gpc3 0 2 0 3>, <&gpc3 1 2 0 3>,
<&gpc3 3 2 3 3>, <&gpc3 4 2 3 3>,
<&gpc3 5 2 3 3>, <&gpc3 6 2 3 3>,
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f50b4e854355..b1ac73e21c80 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -312,24 +312,36 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x121A0000 0x1000>;
interrupts = <0 34 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
pdma1: pdma@121B0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x121B0000 0x1000>;
interrupts = <0 35 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
mdma0: mdma@10800000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x10800000 0x1000>;
interrupts = <0 33 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
};
mdma1: mdma@11C10000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x11C10000 0x1000>;
interrupts = <0 124 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
};
};
diff --git a/arch/arm/boot/dts/imx25-karo-tx25.dts b/arch/arm/boot/dts/imx25-karo-tx25.dts
index d81f8a0b9794..1a9d0491cdce 100644
--- a/arch/arm/boot/dts/imx25-karo-tx25.dts
+++ b/arch/arm/boot/dts/imx25-karo-tx25.dts
@@ -19,26 +19,18 @@
memory {
reg = <0x80000000 0x02000000 0x90000000 0x02000000>;
};
+};
- soc {
- aips@43f00000 {
- uart1: serial@43f90000 {
- status = "okay";
- };
- };
+&uart1 {
+ status = "okay";
+};
- spba@50000000 {
- fec: ethernet@50038000 {
- status = "okay";
- phy-mode = "rmii";
- };
- };
+&fec {
+ phy-mode = "rmii";
+ status = "okay";
+};
- emi@80000000 {
- nand@bb000000 {
- nand-on-flash-bbt;
- status = "okay";
- };
- };
- };
+&nfc {
+ nand-on-flash-bbt;
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
new file mode 100644
index 000000000000..a02a860afd18
--- /dev/null
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx25.dtsi"
+
+/ {
+ model = "Freescale i.MX25 Product Development Kit";
+ compatible = "fsl,imx25-pdk", "fsl,imx25";
+
+ memory {
+ reg = <0x80000000 0x4000000>;
+ };
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&fec {
+ phy-mode = "rmii";
+ status = "okay";
+};
+
+&nfc {
+ nand-on-flash-bbt;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index e1b13ebc96d6..94f33059158a 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -499,7 +499,7 @@
reg = <0x80000000 0x3b002000>;
ranges;
- nand@bb000000 {
+ nfc: nand@bb000000 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx27-apf27.dts b/arch/arm/boot/dts/imx27-apf27.dts
index c0327c054de2..b464c807d8d9 100644
--- a/arch/arm/boot/dts/imx27-apf27.dts
+++ b/arch/arm/boot/dts/imx27-apf27.dts
@@ -32,58 +32,54 @@
clock-frequency = <0>;
};
};
+};
- soc {
- aipi@10000000 {
- serial@1000a000 {
- status = "okay";
- };
+&uart1 {
+ status = "okay";
+};
- ethernet@1002b000 {
- status = "okay";
- };
- };
+&fec {
+ status = "okay";
+};
- nand@d8000000 {
- status = "okay";
- nand-bus-width = <16>;
- nand-ecc-mode = "hw";
- nand-on-flash-bbt;
+&nfc {
+ status = "okay";
+ nand-bus-width = <16>;
+ nand-ecc-mode = "hw";
+ nand-on-flash-bbt;
- partition@0 {
- label = "u-boot";
- reg = <0x0 0x100000>;
- };
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0 0x100000>;
+ };
- partition@100000 {
- label = "env";
- reg = <0x100000 0x80000>;
- };
+ partition@100000 {
+ label = "env";
+ reg = <0x100000 0x80000>;
+ };
- partition@180000 {
- label = "env2";
- reg = <0x180000 0x80000>;
- };
+ partition@180000 {
+ label = "env2";
+ reg = <0x180000 0x80000>;
+ };
- partition@200000 {
- label = "firmware";
- reg = <0x200000 0x80000>;
- };
+ partition@200000 {
+ label = "firmware";
+ reg = <0x200000 0x80000>;
+ };
- partition@280000 {
- label = "dtb";
- reg = <0x280000 0x80000>;
- };
+ partition@280000 {
+ label = "dtb";
+ reg = <0x280000 0x80000>;
+ };
- partition@300000 {
- label = "kernel";
- reg = <0x300000 0x500000>;
- };
+ partition@300000 {
+ label = "kernel";
+ reg = <0x300000 0x500000>;
+ };
- partition@800000 {
- label = "rootfs";
- reg = <0x800000 0xf800000>;
- };
- };
+ partition@800000 {
+ label = "rootfs";
+ reg = <0x800000 0xf800000>;
};
};
diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-pdk.dts
index fa04c7b18bcb..41cd1105608e 100644
--- a/arch/arm/boot/dts/imx27-3ds.dts
+++ b/arch/arm/boot/dts/imx27-pdk.dts
@@ -13,25 +13,19 @@
/include/ "imx27.dtsi"
/ {
- model = "mx27_3ds";
- compatible = "freescale,imx27-3ds", "fsl,imx27";
+ model = "Freescale i.MX27 Product Development Kit";
+ compatible = "fsl,imx27-pdk", "fsl,imx27";
memory {
reg = <0x0 0x0>;
};
+};
- soc {
- aipi@10000000 { /* aipi1 */
- uart1: serial@1000a000 {
- fsl,uart-has-rtscts;
- status = "okay";
- };
- };
+&uart1 {
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
- aipi@10020000 { /* aipi2 */
- ethernet@1002b000 {
- status = "okay";
- };
- };
- };
+&fec {
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/imx31-bug.dts b/arch/arm/boot/dts/imx31-bug.dts
index 7f67402328d3..9ac6f6ba1d64 100644
--- a/arch/arm/boot/dts/imx31-bug.dts
+++ b/arch/arm/boot/dts/imx31-bug.dts
@@ -19,13 +19,9 @@
memory {
reg = <0x80000000 0x8000000>; /* 128M */
};
+};
- soc {
- aips@43f00000 { /* AIPS1 */
- uart5: serial@43fb4000 {
- fsl,uart-has-rtscts;
- status = "okay";
- };
- };
- };
+&uart5 {
+ fsl,uart-has-rtscts;
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/imx51-apf51.dts b/arch/arm/boot/dts/imx51-apf51.dts
new file mode 100644
index 000000000000..92d3a66a69e2
--- /dev/null
+++ b/arch/arm/boot/dts/imx51-apf51.dts
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Armadeus Systems - <support@armadeus.com>
+ * Copyright 2012 Laurent Cans <laurent.cans@gmail.com>
+ *
+ * Based on mx51-babbage.dts
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx51.dtsi"
+
+/ {
+ model = "Armadeus Systems APF51 module";
+ compatible = "armadeus,imx51-apf51", "fsl,imx51";
+
+ memory {
+ reg = <0x90000000 0x20000000>;
+ };
+
+ clocks {
+ ckih1 {
+ clock-frequency = <0>;
+ };
+
+ osc {
+ clock-frequency = <33554432>;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_2>;
+ phy-mode = "mii";
+ phy-reset-gpios = <&gpio3 0 0>;
+ phy-reset-duration = <1>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3_2>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 567e7ee72f91..aab6e43219af 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -21,239 +21,20 @@
reg = <0x90000000 0x20000000>;
};
- soc {
- display@di0 {
- compatible = "fsl,imx-parallel-display";
- crtcs = <&ipu 0>;
- interface-pix-fmt = "rgb24";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ipu_disp1_1>;
- };
-
- display@di1 {
- compatible = "fsl,imx-parallel-display";
- crtcs = <&ipu 1>;
- interface-pix-fmt = "rgb565";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ipu_disp2_1>;
- };
-
- aips@70000000 { /* aips-1 */
- spba@70000000 {
- esdhc@70004000 { /* ESDHC1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc1_1>;
- fsl,cd-controller;
- fsl,wp-controller;
- status = "okay";
- };
-
- esdhc@70008000 { /* ESDHC2 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc2_1>;
- cd-gpios = <&gpio1 6 0>;
- wp-gpios = <&gpio1 5 0>;
- status = "okay";
- };
-
- uart3: serial@7000c000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart3_1>;
- fsl,uart-has-rtscts;
- status = "okay";
- };
-
- ecspi@70010000 { /* ECSPI1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ecspi1_1>;
- fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio4 24 0>, <&gpio4 25 0>;
- status = "okay";
-
- pmic: mc13892@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mc13892";
- spi-max-frequency = <6000000>;
- reg = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <8 0x4>;
-
- regulators {
- sw1_reg: sw1 {
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1375000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- sw2_reg: sw2 {
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1850000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- sw3_reg: sw3 {
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1850000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- sw4_reg: sw4 {
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1850000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- vpll_reg: vpll {
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- vdig_reg: vdig {
- regulator-min-microvolt = <1650000>;
- regulator-max-microvolt = <1650000>;
- regulator-boot-on;
- };
-
- vsd_reg: vsd {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3150000>;
- };
-
- vusb2_reg: vusb2 {
- regulator-min-microvolt = <2400000>;
- regulator-max-microvolt = <2775000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- vvideo_reg: vvideo {
- regulator-min-microvolt = <2775000>;
- regulator-max-microvolt = <2775000>;
- };
-
- vaudio_reg: vaudio {
- regulator-min-microvolt = <2300000>;
- regulator-max-microvolt = <3000000>;
- };
-
- vcam_reg: vcam {
- regulator-min-microvolt = <2500000>;
- regulator-max-microvolt = <3000000>;
- };
-
- vgen1_reg: vgen1 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- vgen2_reg: vgen2 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3150000>;
- regulator-always-on;
- };
-
- vgen3_reg: vgen3 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2900000>;
- regulator-always-on;
- };
- };
- };
-
- flash: at45db321d@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
- spi-max-frequency = <25000000>;
- reg = <1>;
-
- partition@0 {
- label = "U-Boot";
- reg = <0x0 0x40000>;
- read-only;
- };
-
- partition@40000 {
- label = "Kernel";
- reg = <0x40000 0x3c0000>;
- };
- };
- };
-
- ssi2: ssi@70014000 {
- fsl,mode = "i2s-slave";
- status = "okay";
- };
- };
-
- iomuxc@73fa8000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 694 0x20d5 /* MX51_PAD_GPIO1_0__SD1_CD */
- 697 0x20d5 /* MX51_PAD_GPIO1_1__SD1_WP */
- 737 0x100 /* MX51_PAD_GPIO1_5__GPIO1_5 */
- 740 0x100 /* MX51_PAD_GPIO1_6__GPIO1_6 */
- 121 0x5 /* MX51_PAD_EIM_A27__GPIO2_21 */
- 402 0x85 /* MX51_PAD_CSPI1_SS0__GPIO4_24 */
- 405 0x85 /* MX51_PAD_CSPI1_SS1__GPIO4_25 */
- >;
- };
- };
- };
-
- uart1: serial@73fbc000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_1>;
- fsl,uart-has-rtscts;
- status = "okay";
- };
-
- uart2: serial@73fc0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart2_1>;
- status = "okay";
- };
- };
-
- aips@80000000 { /* aips-2 */
- i2c@83fc4000 { /* I2C2 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c2_1>;
- status = "okay";
-
- sgtl5000: codec@0a {
- compatible = "fsl,sgtl5000";
- reg = <0x0a>;
- clock-frequency = <26000000>;
- VDDA-supply = <&vdig_reg>;
- VDDIO-supply = <&vvideo_reg>;
- };
- };
-
- audmux@83fd0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_audmux_1>;
- status = "okay";
- };
+ display@di0 {
+ compatible = "fsl,imx-parallel-display";
+ crtcs = <&ipu 0>;
+ interface-pix-fmt = "rgb24";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu_disp1_1>;
+ };
- ethernet@83fec000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fec_1>;
- phy-mode = "mii";
- status = "okay";
- };
- };
+ display@di1 {
+ compatible = "fsl,imx-parallel-display";
+ crtcs = <&ipu 1>;
+ interface-pix-fmt = "rgb565";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu_disp2_1>;
};
gpio-keys {
@@ -281,3 +62,236 @@
mux-ext-port = <3>;
};
};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1_1>;
+ fsl,cd-controller;
+ fsl,wp-controller;
+ status = "okay";
+};
+
+&esdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc2_1>;
+ cd-gpios = <&gpio1 6 0>;
+ wp-gpios = <&gpio1 5 0>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3_1>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ fsl,spi-num-chipselects = <2>;
+ cs-gpios = <&gpio4 24 0>, <&gpio4 25 0>;
+ status = "okay";
+
+ pmic: mc13892@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mc13892";
+ spi-max-frequency = <6000000>;
+ reg = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <8 0x4>;
+
+ regulators {
+ sw1_reg: sw1 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1375000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3_reg: sw3 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vpll_reg: vpll {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdig_reg: vdig {
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <1650000>;
+ regulator-boot-on;
+ };
+
+ vsd_reg: vsd {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3150000>;
+ };
+
+ vusb2_reg: vusb2 {
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <2775000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vvideo_reg: vvideo {
+ regulator-min-microvolt = <2775000>;
+ regulator-max-microvolt = <2775000>;
+ };
+
+ vaudio_reg: vaudio {
+ regulator-min-microvolt = <2300000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ vcam_reg: vcam {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3150000>;
+ regulator-always-on;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ flash: at45db321d@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <25000000>;
+ reg = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "Kernel";
+ reg = <0x40000 0x3c0000>;
+ };
+ };
+};
+
+&ssi2 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 694 0x20d5 /* MX51_PAD_GPIO1_0__SD1_CD */
+ 697 0x20d5 /* MX51_PAD_GPIO1_1__SD1_WP */
+ 737 0x100 /* MX51_PAD_GPIO1_5__GPIO1_5 */
+ 740 0x100 /* MX51_PAD_GPIO1_6__GPIO1_6 */
+ 121 0x5 /* MX51_PAD_EIM_A27__GPIO2_21 */
+ 402 0x85 /* MX51_PAD_CSPI1_SS0__GPIO4_24 */
+ 405 0x85 /* MX51_PAD_CSPI1_SS1__GPIO4_25 */
+ >;
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_1>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_1>;
+ status = "okay";
+
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ clock-frequency = <26000000>;
+ VDDA-supply = <&vdig_reg>;
+ VDDIO-supply = <&vvideo_reg>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_1>;
+ phy-mode = "mii";
+ status = "okay";
+};
+
+&kpp {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_kpp_1>;
+ linux,keymap = <0x00000067 /* KEY_UP */
+ 0x0001006c /* KEY_DOWN */
+ 0x00020072 /* KEY_VOLUMEDOWN */
+ 0x00030066 /* KEY_HOME */
+ 0x0100006a /* KEY_RIGHT */
+ 0x01010069 /* KEY_LEFT */
+ 0x0102001c /* KEY_ENTER */
+ 0x01030073 /* KEY_VOLUMEUP */
+ 0x02000040 /* KEY_F6 */
+ 0x02010042 /* KEY_F8 */
+ 0x02020043 /* KEY_F9 */
+ 0x02030044 /* KEY_F10 */
+ 0x0300003b /* KEY_F1 */
+ 0x0301003c /* KEY_F2 */
+ 0x0302003d /* KEY_F3 */
+ 0x03030074>; /* KEY_POWER */
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 1f5d45eff45e..fcf035bf7c5a 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -221,6 +221,14 @@
#interrupt-cells = <2>;
};
+ kpp: kpp@73f94000 {
+ compatible = "fsl,imx51-kpp", "fsl,imx21-kpp";
+ reg = <0x73f94000 0x4000>;
+ interrupts = <60>;
+ clocks = <&clks 0>;
+ status = "disabled";
+ };
+
wdog1: wdog@73f98000 {
compatible = "fsl,imx51-wdt", "fsl,imx21-wdt";
reg = <0x73f98000 0x4000>;
@@ -273,6 +281,29 @@
260 0x80000000 /* MX51_PAD_NANDF_RDY_INT__FEC_TX_CLK */
>;
};
+
+ pinctrl_fec_2: fecgrp-2 {
+ fsl,pins = <
+ 589 0x80000000 /* MX51_PAD_DI_GP3__FEC_TX_ER */
+ 592 0x80000000 /* MX51_PAD_DI2_PIN4__FEC_CRS */
+ 594 0x80000000 /* MX51_PAD_DI2_PIN2__FEC_MDC */
+ 596 0x80000000 /* MX51_PAD_DI2_PIN3__FEC_MDIO */
+ 598 0x80000000 /* MX51_PAD_DI2_DISP_CLK__FEC_RDATA1 */
+ 602 0x80000000 /* MX51_PAD_DI_GP4__FEC_RDATA2 */
+ 604 0x80000000 /* MX51_PAD_DISP2_DAT0__FEC_RDATA3 */
+ 609 0x80000000 /* MX51_PAD_DISP2_DAT1__FEC_RX_ER */
+ 618 0x80000000 /* MX51_PAD_DISP2_DAT6__FEC_TDATA1 */
+ 623 0x80000000 /* MX51_PAD_DISP2_DAT7__FEC_TDATA2 */
+ 628 0x80000000 /* MX51_PAD_DISP2_DAT8__FEC_TDATA3 */
+ 634 0x80000000 /* MX51_PAD_DISP2_DAT9__FEC_TX_EN */
+ 639 0x80000000 /* MX51_PAD_DISP2_DAT10__FEC_COL */
+ 644 0x80000000 /* MX51_PAD_DISP2_DAT11__FEC_RX_CLK */
+ 649 0x80000000 /* MX51_PAD_DISP2_DAT12__FEC_RX_DV */
+ 653 0x80000000 /* MX51_PAD_DISP2_DAT13__FEC_TX_CLK */
+ 657 0x80000000 /* MX51_PAD_DISP2_DAT14__FEC_RDATA0 */
+ 662 0x80000000 /* MX51_PAD_DISP2_DAT15__FEC_TDATA0 */
+ >;
+ };
};
ecspi1 {
@@ -409,6 +440,28 @@
49 0x1c5 /* MX51_PAD_EIM_D24__UART3_CTS */
>;
};
+
+ pinctrl_uart3_2: uart3grp-2 {
+ fsl,pins = <
+ 434 0x1c5 /* MX51_PAD_UART3_RXD__UART3_RXD */
+ 430 0x1c5 /* MX51_PAD_UART3_TXD__UART3_TXD */
+ >;
+ };
+ };
+
+ kpp {
+ pinctrl_kpp_1: kppgrp-1 {
+ fsl,pins = <
+ 438 0xe0 /* MX51_PAD_KEY_ROW0__KEY_ROW0 */
+ 439 0xe0 /* MX51_PAD_KEY_ROW1__KEY_ROW1 */
+ 440 0xe0 /* MX51_PAD_KEY_ROW2__KEY_ROW2 */
+ 441 0xe0 /* MX51_PAD_KEY_ROW3__KEY_ROW3 */
+ 442 0xe8 /* MX51_PAD_KEY_COL0__KEY_COL0 */
+ 444 0xe8 /* MX51_PAD_KEY_COL1__KEY_COL1 */
+ 446 0xe8 /* MX51_PAD_KEY_COL2__KEY_COL2 */
+ 448 0xe8 /* MX51_PAD_KEY_COL3__KEY_COL3 */
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 4be76f223526..e049fd0319e8 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -21,72 +21,6 @@
reg = <0x70000000 0x40000000>;
};
- soc {
- aips@50000000 { /* AIPS1 */
- spba@50000000 {
- esdhc@50004000 { /* ESDHC1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc1_2>;
- cd-gpios = <&gpio1 1 0>;
- wp-gpios = <&gpio1 9 0>;
- status = "okay";
- };
- };
-
- iomuxc@53fa8000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 1077 0x80000000 /* MX53_PAD_GPIO_1__GPIO1_1 */
- 1085 0x80000000 /* MX53_PAD_GPIO_9__GPIO1_9 */
- 486 0x80000000 /* MX53_PAD_EIM_EB3__GPIO2_31 */
- 739 0x80000000 /* MX53_PAD_GPIO_10__GPIO4_0 */
- 218 0x80000000 /* MX53_PAD_DISP0_DAT16__GPIO5_10 */
- 226 0x80000000 /* MX53_PAD_DISP0_DAT17__GPIO5_11 */
- 233 0x80000000 /* MX53_PAD_DISP0_DAT18__GPIO5_12 */
- 241 0x80000000 /* MX53_PAD_DISP0_DAT19__GPIO5_13 */
- 429 0x80000000 /* MX53_PAD_EIM_D16__EMI_WEIM_D_16 */
- 435 0x80000000 /* MX53_PAD_EIM_D17__EMI_WEIM_D_17 */
- 441 0x80000000 /* MX53_PAD_EIM_D18__EMI_WEIM_D_18 */
- 448 0x80000000 /* MX53_PAD_EIM_D19__EMI_WEIM_D_19 */
- 456 0x80000000 /* MX53_PAD_EIM_D20__EMI_WEIM_D_20 */
- 464 0x80000000 /* MX53_PAD_EIM_D21__EMI_WEIM_D_21 */
- 471 0x80000000 /* MX53_PAD_EIM_D22__EMI_WEIM_D_22 */
- 477 0x80000000 /* MX53_PAD_EIM_D23__EMI_WEIM_D_23 */
- 492 0x80000000 /* MX53_PAD_EIM_D24__EMI_WEIM_D_24 */
- 500 0x80000000 /* MX53_PAD_EIM_D25__EMI_WEIM_D_25 */
- 508 0x80000000 /* MX53_PAD_EIM_D26__EMI_WEIM_D_26 */
- 516 0x80000000 /* MX53_PAD_EIM_D27__EMI_WEIM_D_27 */
- 524 0x80000000 /* MX53_PAD_EIM_D28__EMI_WEIM_D_28 */
- 532 0x80000000 /* MX53_PAD_EIM_D29__EMI_WEIM_D_29 */
- 540 0x80000000 /* MX53_PAD_EIM_D30__EMI_WEIM_D_30 */
- 548 0x80000000 /* MX53_PAD_EIM_D31__EMI_WEIM_D_31 */
- 637 0x80000000 /* MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 */
- 642 0x80000000 /* MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 */
- 647 0x80000000 /* MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 */
- 652 0x80000000 /* MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 */
- 657 0x80000000 /* MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 */
- 662 0x80000000 /* MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 */
- 667 0x80000000 /* MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 */
- 611 0x80000000 /* MX53_PAD_EIM_OE__EMI_WEIM_OE */
- 616 0x80000000 /* MX53_PAD_EIM_RW__EMI_WEIM_RW */
- 607 0x80000000 /* MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 */
- >;
- };
- };
- };
-
- uart1: serial@53fbc000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_2>;
- status = "okay";
- };
- };
- };
-
eim-cs1@f4000000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -162,3 +96,63 @@
};
};
};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1_2>;
+ cd-gpios = <&gpio1 1 0>;
+ wp-gpios = <&gpio1 9 0>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 1077 0x80000000 /* MX53_PAD_GPIO_1__GPIO1_1 */
+ 1085 0x80000000 /* MX53_PAD_GPIO_9__GPIO1_9 */
+ 486 0x80000000 /* MX53_PAD_EIM_EB3__GPIO2_31 */
+ 739 0x80000000 /* MX53_PAD_GPIO_10__GPIO4_0 */
+ 218 0x80000000 /* MX53_PAD_DISP0_DAT16__GPIO5_10 */
+ 226 0x80000000 /* MX53_PAD_DISP0_DAT17__GPIO5_11 */
+ 233 0x80000000 /* MX53_PAD_DISP0_DAT18__GPIO5_12 */
+ 241 0x80000000 /* MX53_PAD_DISP0_DAT19__GPIO5_13 */
+ 429 0x80000000 /* MX53_PAD_EIM_D16__EMI_WEIM_D_16 */
+ 435 0x80000000 /* MX53_PAD_EIM_D17__EMI_WEIM_D_17 */
+ 441 0x80000000 /* MX53_PAD_EIM_D18__EMI_WEIM_D_18 */
+ 448 0x80000000 /* MX53_PAD_EIM_D19__EMI_WEIM_D_19 */
+ 456 0x80000000 /* MX53_PAD_EIM_D20__EMI_WEIM_D_20 */
+ 464 0x80000000 /* MX53_PAD_EIM_D21__EMI_WEIM_D_21 */
+ 471 0x80000000 /* MX53_PAD_EIM_D22__EMI_WEIM_D_22 */
+ 477 0x80000000 /* MX53_PAD_EIM_D23__EMI_WEIM_D_23 */
+ 492 0x80000000 /* MX53_PAD_EIM_D24__EMI_WEIM_D_24 */
+ 500 0x80000000 /* MX53_PAD_EIM_D25__EMI_WEIM_D_25 */
+ 508 0x80000000 /* MX53_PAD_EIM_D26__EMI_WEIM_D_26 */
+ 516 0x80000000 /* MX53_PAD_EIM_D27__EMI_WEIM_D_27 */
+ 524 0x80000000 /* MX53_PAD_EIM_D28__EMI_WEIM_D_28 */
+ 532 0x80000000 /* MX53_PAD_EIM_D29__EMI_WEIM_D_29 */
+ 540 0x80000000 /* MX53_PAD_EIM_D30__EMI_WEIM_D_30 */
+ 548 0x80000000 /* MX53_PAD_EIM_D31__EMI_WEIM_D_31 */
+ 637 0x80000000 /* MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 */
+ 642 0x80000000 /* MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 */
+ 647 0x80000000 /* MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 */
+ 652 0x80000000 /* MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 */
+ 657 0x80000000 /* MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 */
+ 662 0x80000000 /* MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 */
+ 667 0x80000000 /* MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 */
+ 611 0x80000000 /* MX53_PAD_EIM_OE__EMI_WEIM_OE */
+ 616 0x80000000 /* MX53_PAD_EIM_RW__EMI_WEIM_RW */
+ 607 0x80000000 /* MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 */
+ >;
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-evk.dts b/arch/arm/boot/dts/imx53-evk.dts
index a124d1e25258..85a89b52f9b8 100644
--- a/arch/arm/boot/dts/imx53-evk.dts
+++ b/arch/arm/boot/dts/imx53-evk.dts
@@ -21,107 +21,6 @@
reg = <0x70000000 0x80000000>;
};
- soc {
- aips@50000000 { /* AIPS1 */
- spba@50000000 {
- esdhc@50004000 { /* ESDHC1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc1_1>;
- cd-gpios = <&gpio3 13 0>;
- wp-gpios = <&gpio3 14 0>;
- status = "okay";
- };
-
- ecspi@50010000 { /* ECSPI1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ecspi1_1>;
- fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
- status = "okay";
-
- flash: at45db321d@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
- spi-max-frequency = <25000000>;
- reg = <1>;
-
- partition@0 {
- label = "U-Boot";
- reg = <0x0 0x40000>;
- read-only;
- };
-
- partition@40000 {
- label = "Kernel";
- reg = <0x40000 0x3c0000>;
- };
- };
- };
-
- esdhc@50020000 { /* ESDHC3 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc3_1>;
- cd-gpios = <&gpio3 11 0>;
- wp-gpios = <&gpio3 12 0>;
- status = "okay";
- };
- };
-
- iomuxc@53fa8000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 424 0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
- 449 0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
- 693 0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
- 697 0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
- 701 0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
- 705 0x80000000 /* MX53_PAD_EIM_DA14__GPIO3_14 */
- 868 0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
- 873 0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
- >;
- };
- };
- };
-
- uart1: serial@53fbc000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_1>;
- status = "okay";
- };
- };
-
- aips@60000000 { /* AIPS2 */
- i2c@63fc4000 { /* I2C2 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c2_1>;
- status = "okay";
-
- pmic: mc13892@08 {
- compatible = "fsl,mc13892", "fsl,mc13xxx";
- reg = <0x08>;
- };
-
- codec: sgtl5000@0a {
- compatible = "fsl,sgtl5000";
- reg = <0x0a>;
- };
- };
-
- ethernet@63fec000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fec_1>;
- phy-mode = "rmii";
- phy-reset-gpios = <&gpio7 6 0>;
- status = "okay";
- };
- };
- };
-
leds {
compatible = "gpio-leds";
@@ -132,3 +31,96 @@
};
};
};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1_1>;
+ cd-gpios = <&gpio3 13 0>;
+ wp-gpios = <&gpio3 14 0>;
+ status = "okay";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ fsl,spi-num-chipselects = <2>;
+ cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
+ status = "okay";
+
+ flash: at45db321d@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <25000000>;
+ reg = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "Kernel";
+ reg = <0x40000 0x3c0000>;
+ };
+ };
+};
+
+&esdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc3_1>;
+ cd-gpios = <&gpio3 11 0>;
+ wp-gpios = <&gpio3 12 0>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 424 0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
+ 449 0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
+ 693 0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
+ 697 0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
+ 701 0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
+ 705 0x80000000 /* MX53_PAD_EIM_DA14__GPIO3_14 */
+ 868 0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+ 873 0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
+ >;
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_1>;
+ status = "okay";
+
+ pmic: mc13892@08 {
+ compatible = "fsl,mc13892", "fsl,mc13xxx";
+ reg = <0x08>;
+ };
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_1>;
+ phy-mode = "rmii";
+ phy-reset-gpios = <&gpio7 6 0>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
new file mode 100644
index 000000000000..e54fffd48369
--- /dev/null
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx53-tqma53.dtsi"
+
+/ {
+ model = "TQ MBa53 starter kit";
+ compatible = "tq,mba53", "tq,tqma53", "fsl,imx53";
+};
+
+&iomuxc {
+ lvds1 {
+ pinctrl_lvds1_1: lvds1-grp1 {
+ fsl,pins = <730 0x10000 /* LVDS0_TX3 */
+ 732 0x10000 /* LVDS0_CLK */
+ 734 0x10000 /* LVDS0_TX2 */
+ 736 0x10000 /* LVDS0_TX1 */
+ 738 0x10000>; /* LVDS0_TX0 */
+ };
+
+ pinctrl_lvds1_2: lvds1-grp2 {
+ fsl,pins = <720 0x10000 /* LVDS1_TX3 */
+ 722 0x10000 /* LVDS1_TX2 */
+ 724 0x10000 /* LVDS1_CLK */
+ 726 0x10000 /* LVDS1_TX1 */
+ 728 0x10000>; /* LVDS1_TX0 */
+ };
+ };
+
+ disp1 {
+ pinctrl_disp1_1: disp1-grp1 {
+ fsl,pins = <689 0x10000 /* DISP1_DRDY */
+ 482 0x10000 /* DISP1_HSYNC */
+ 489 0x10000 /* DISP1_VSYNC */
+ 684 0x10000 /* DISP1_DAT_0 */
+ 515 0x10000 /* DISP1_DAT_22 */
+ 523 0x10000 /* DISP1_DAT_23 */
+ 543 0x10000 /* DISP1_DAT_21 */
+ 553 0x10000 /* DISP1_DAT_20 */
+ 558 0x10000 /* DISP1_DAT_19 */
+ 564 0x10000 /* DISP1_DAT_18 */
+ 570 0x10000 /* DISP1_DAT_17 */
+ 575 0x10000 /* DISP1_DAT_16 */
+ 580 0x10000 /* DISP1_DAT_15 */
+ 585 0x10000 /* DISP1_DAT_14 */
+ 590 0x10000 /* DISP1_DAT_13 */
+ 595 0x10000 /* DISP1_DAT_12 */
+ 628 0x10000 /* DISP1_DAT_11 */
+ 634 0x10000 /* DISP1_DAT_10 */
+ 639 0x10000 /* DISP1_DAT_9 */
+ 644 0x10000 /* DISP1_DAT_8 */
+ 649 0x10000 /* DISP1_DAT_7 */
+ 654 0x10000 /* DISP1_DAT_6 */
+ 659 0x10000 /* DISP1_DAT_5 */
+ 664 0x10000 /* DISP1_DAT_4 */
+ 669 0x10000 /* DISP1_DAT_3 */
+ 674 0x10000 /* DISP1_DAT_2 */
+ 679 0x10000 /* DISP1_DAT_1 */
+ 684 0x10000>; /* DISP1_DAT_0 */
+ };
+ };
+};
+
+&cspi {
+ status = "okay";
+};
+
+&i2c2 {
+ codec: sgtl5000@a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ };
+
+ expander: pca9554@20 {
+ compatible = "pca9554";
+ reg = <0x20>;
+ interrupts = <109>;
+ };
+
+ sensor2: lm75@49 {
+ compatible = "lm75";
+ reg = <0x49>;
+ };
+};
+
+&fec {
+ status = "okay";
+};
+
+&esdhc2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&ecspi1 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&can1 {
+ status = "okay";
+};
+
+&can2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index b0075537195b..05cc5620436b 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -21,200 +21,6 @@
reg = <0x70000000 0x40000000>;
};
- soc {
- aips@50000000 { /* AIPS1 */
- spba@50000000 {
- esdhc@50004000 { /* ESDHC1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc1_1>;
- cd-gpios = <&gpio3 13 0>;
- status = "okay";
- };
-
- ssi2: ssi@50014000 {
- fsl,mode = "i2s-slave";
- status = "okay";
- };
-
- esdhc@50020000 { /* ESDHC3 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc3_1>;
- cd-gpios = <&gpio3 11 0>;
- wp-gpios = <&gpio3 12 0>;
- status = "okay";
- };
- };
-
- iomuxc@53fa8000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 1071 0x80000000 /* MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK */
- 1141 0x80000000 /* MX53_PAD_GPIO_8__GPIO1_8 */
- 982 0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
- 989 0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
- 693 0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
- 697 0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
- 701 0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
- 868 0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
- 1149 0x80000000 /* MX53_PAD_GPIO_16__GPIO7_11 */
- >;
- };
-
- led_pin_gpio7_7: led_gpio7_7@0 {
- fsl,pins = <
- 873 0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
- >;
- };
- };
-
- };
-
- uart1: serial@53fbc000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_1>;
- status = "okay";
- };
- };
-
- aips@60000000 { /* AIPS2 */
- i2c@63fc4000 { /* I2C2 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c2_1>;
- status = "okay";
-
- sgtl5000: codec@0a {
- compatible = "fsl,sgtl5000";
- reg = <0x0a>;
- VDDA-supply = <&reg_3p2v>;
- VDDIO-supply = <&reg_3p2v>;
- };
- };
-
- i2c@63fc8000 { /* I2C1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1_1>;
- status = "okay";
-
- accelerometer: mma8450@1c {
- compatible = "fsl,mma8450";
- reg = <0x1c>;
- };
-
- pmic: dialog@48 {
- compatible = "dlg,da9053-aa", "dlg,da9052";
- reg = <0x48>;
- interrupt-parent = <&gpio7>;
- interrupts = <11 0x8>; /* low-level active IRQ at GPIO7_11 */
-
- regulators {
- buck1_reg: buck1 {
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <2075000>;
- regulator-always-on;
- };
-
- buck2_reg: buck2 {
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <2075000>;
- regulator-always-on;
- };
-
- buck3_reg: buck3 {
- regulator-min-microvolt = <925000>;
- regulator-max-microvolt = <2500000>;
- regulator-always-on;
- };
-
- buck4_reg: buck4 {
- regulator-min-microvolt = <925000>;
- regulator-max-microvolt = <2500000>;
- regulator-always-on;
- };
-
- ldo1_reg: ldo1 {
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- ldo2_reg: ldo2 {
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- ldo3_reg: ldo3 {
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- ldo4_reg: ldo4 {
- regulator-min-microvolt = <1725000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- ldo5_reg: ldo5 {
- regulator-min-microvolt = <1725000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- ldo6_reg: ldo6 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3600000>;
- regulator-always-on;
- };
-
- ldo7_reg: ldo7 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3600000>;
- regulator-always-on;
- };
-
- ldo8_reg: ldo8 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3600000>;
- regulator-always-on;
- };
-
- ldo9_reg: ldo9 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3600000>;
- regulator-always-on;
- };
-
- ldo10_reg: ldo10 {
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <3650000>;
- regulator-always-on;
- };
- };
- };
- };
-
- audmux@63fd0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_audmux_1>;
- status = "okay";
- };
-
- ethernet@63fec000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fec_1>;
- phy-mode = "rmii";
- phy-reset-gpios = <&gpio7 6 0>;
- status = "okay";
- };
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
@@ -276,3 +82,189 @@
mux-ext-port = <5>;
};
};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1_1>;
+ cd-gpios = <&gpio3 13 0>;
+ status = "okay";
+};
+
+&ssi2 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&esdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc3_1>;
+ cd-gpios = <&gpio3 11 0>;
+ wp-gpios = <&gpio3 12 0>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 1071 0x80000000 /* MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK */
+ 1141 0x80000000 /* MX53_PAD_GPIO_8__GPIO1_8 */
+ 982 0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
+ 989 0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
+ 693 0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
+ 697 0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
+ 701 0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
+ 868 0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+ 1149 0x80000000 /* MX53_PAD_GPIO_16__GPIO7_11 */
+ >;
+ };
+
+ led_pin_gpio7_7: led_gpio7_7@0 {
+ fsl,pins = <
+ 873 0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
+ >;
+ };
+ };
+
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_1>;
+ status = "okay";
+
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3p2v>;
+ VDDIO-supply = <&reg_3p2v>;
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ accelerometer: mma8450@1c {
+ compatible = "fsl,mma8450";
+ reg = <0x1c>;
+ };
+
+ pmic: dialog@48 {
+ compatible = "dlg,da9053-aa", "dlg,da9052";
+ reg = <0x48>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <11 0x8>; /* low-level active IRQ at GPIO7_11 */
+
+ regulators {
+ buck1_reg: buck1 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ regulator-always-on;
+ };
+
+ buck2_reg: buck2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ regulator-always-on;
+ };
+
+ buck3_reg: buck3 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ buck4_reg: buck4 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ ldo1_reg: ldo1 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ldo2 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ldo3 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo4_reg: ldo4 {
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo5_reg: ldo5 {
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo6_reg: ldo6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+ ldo7_reg: ldo7 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+ ldo8_reg: ldo8 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+ ldo9_reg: ldo9 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+ ldo10_reg: ldo10 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <3650000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_1>;
+ phy-mode = "rmii";
+ phy-reset-gpios = <&gpio7 6 0>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 06c68580c842..995554c324b8 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -21,157 +21,6 @@
reg = <0x70000000 0x40000000>;
};
- soc {
- aips@50000000 { /* AIPS1 */
- spba@50000000 {
- esdhc@50004000 { /* ESDHC1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc1_1>;
- cd-gpios = <&gpio3 13 0>;
- wp-gpios = <&gpio4 11 0>;
- status = "okay";
- };
-
- esdhc@50008000 { /* ESDHC2 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc2_1>;
- non-removable;
- status = "okay";
- };
-
- uart3: serial@5000c000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart3_1>;
- fsl,uart-has-rtscts;
- status = "okay";
- };
-
- ecspi@50010000 { /* ECSPI1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ecspi1_1>;
- fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
- status = "okay";
-
- zigbee: mc1323@0 {
- compatible = "fsl,mc1323";
- spi-max-frequency = <8000000>;
- reg = <0>;
- };
-
- flash: m25p32@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p32", "st,m25p";
- spi-max-frequency = <20000000>;
- reg = <1>;
-
- partition@0 {
- label = "U-Boot";
- reg = <0x0 0x40000>;
- read-only;
- };
-
- partition@40000 {
- label = "Kernel";
- reg = <0x40000 0x3c0000>;
- };
- };
- };
-
- esdhc@50020000 { /* ESDHC3 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_esdhc3_1>;
- non-removable;
- status = "okay";
- };
- };
-
- iomuxc@53fa8000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 982 0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
- 989 0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
- 424 0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
- 701 0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
- 449 0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
- 43 0x80000000 /* MX53_PAD_KEY_ROW2__GPIO4_11 */
- 868 0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
- >;
- };
- };
- };
-
- uart1: serial@53fbc000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_1>;
- status = "okay";
- };
-
- uart2: serial@53fc0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart2_1>;
- status = "okay";
- };
- };
-
- aips@60000000 { /* AIPS2 */
- i2c@63fc4000 { /* I2C2 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c2_1>;
- status = "okay";
-
- codec: sgtl5000@0a {
- compatible = "fsl,sgtl5000";
- reg = <0x0a>;
- };
-
- magnetometer: mag3110@0e {
- compatible = "fsl,mag3110";
- reg = <0x0e>;
- };
-
- touchkey: mpr121@5a {
- compatible = "fsl,mpr121";
- reg = <0x5a>;
- };
- };
-
- i2c@63fc8000 { /* I2C1 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1_1>;
- status = "okay";
-
- accelerometer: mma8450@1c {
- compatible = "fsl,mma8450";
- reg = <0x1c>;
- };
-
- camera: ov5642@3c {
- compatible = "ovti,ov5642";
- reg = <0x3c>;
- };
-
- pmic: dialog@48 {
- compatible = "dialog,da9053", "dialog,da9052";
- reg = <0x48>;
- };
- };
-
- ethernet@63fec000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fec_1>;
- phy-mode = "rmii";
- phy-reset-gpios = <&gpio7 6 0>;
- status = "okay";
- };
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
@@ -188,3 +37,146 @@
};
};
};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1_1>;
+ cd-gpios = <&gpio3 13 0>;
+ wp-gpios = <&gpio4 11 0>;
+ status = "okay";
+};
+
+&esdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc2_1>;
+ non-removable;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3_1>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ fsl,spi-num-chipselects = <2>;
+ cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
+ status = "okay";
+
+ zigbee: mc1323@0 {
+ compatible = "fsl,mc1323";
+ spi-max-frequency = <8000000>;
+ reg = <0>;
+ };
+
+ flash: m25p32@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p32", "st,m25p";
+ spi-max-frequency = <20000000>;
+ reg = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "Kernel";
+ reg = <0x40000 0x3c0000>;
+ };
+ };
+};
+
+&esdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc3_1>;
+ non-removable;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 982 0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
+ 989 0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
+ 424 0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
+ 701 0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
+ 449 0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
+ 43 0x80000000 /* MX53_PAD_KEY_ROW2__GPIO4_11 */
+ 868 0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+ >;
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_1>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_1>;
+ status = "okay";
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ };
+
+ magnetometer: mag3110@0e {
+ compatible = "fsl,mag3110";
+ reg = <0x0e>;
+ };
+
+ touchkey: mpr121@5a {
+ compatible = "fsl,mpr121";
+ reg = <0x5a>;
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+ status = "okay";
+
+ accelerometer: mma8450@1c {
+ compatible = "fsl,mma8450";
+ reg = <0x1c>;
+ };
+
+ camera: ov5642@3c {
+ compatible = "ovti,ov5642";
+ reg = <0x3c>;
+ };
+
+ pmic: dialog@48 {
+ compatible = "dialog,da9053", "dialog,da9052";
+ reg = <0x48>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_1>;
+ phy-mode = "rmii";
+ phy-reset-gpios = <&gpio7 6 0>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
new file mode 100644
index 000000000000..8278ec5ec222
--- /dev/null
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "imx53.dtsi"
+
+/ {
+ model = "TQ TQMa53";
+ compatible = "tq,tqma53", "fsl,imx53";
+
+ memory {
+ reg = <0x70000000 0x40000000>; /* Up to 1GiB */
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+};
+
+&esdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc2_1>;
+ wp-gpios = <&gpio1 2 0>;
+ cd-gpios = <&gpio1 4 0>;
+ status = "disabled";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3_2>;
+ status = "disabled";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ fsl,spi-num-chipselects = <4>;
+ cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>,
+ <&gpio3 24 0>, <&gpio3 25 0>;
+ status = "disabled";
+};
+
+&esdhc3 { /* EMMC */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc3_1>;
+ vmmc-supply = <&reg_3p3v>;
+ non-removable;
+ bus-width = <8>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ i2s {
+ pinctrl_i2s_1: i2s-grp1 {
+ fsl,pins = <
+ 1 0x10000 /* I2S_MCLK */
+ 10 0x10000 /* I2S_SCLK */
+ 17 0x10000 /* I2S_DOUT */
+ 23 0x10000 /* I2S_LRCLK*/
+ 30 0x10000 /* I2S_DIN */
+ >;
+ };
+ };
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 610 0x10000 /* MX53_PAD_EIM_CS1__IPU_DI1_PIN6 (VSYNC)*/
+ 711 0x10000 /* MX53_PAD_EIM_DA15__IPU_DI1_PIN4 (HSYNC)*/
+ 873 0x10000 /* MX53_PAD_PATA_DA_1__GPIO7_7 (LCD_BLT_EN)*/
+ 878 0x10000 /* MX53_PAD_PATA_DA_2__GPIO7_8 (LCD_RESET)*/
+ 922 0x10000 /* MX53_PAD_PATA_DATA5__GPIO2_5 (LCD_POWER)*/
+ 928 0x10000 /* MX53_PAD_PATA_DATA6__GPIO2_6 (PMIC_INT)*/
+ 982 0x10000 /* MX53_PAD_PATA_DATA14__GPIO2_14 (CSI_RST)*/
+ 989 0x10000 /* MX53_PAD_PATA_DATA15__GPIO2_15 (CSI_PWDN)*/
+ 1069 0x10000 /* MX53_PAD_GPIO_0__GPIO1_0 (SYSTEM_DOWN)*/
+ 1093 0x10000 /* MX53_PAD_GPIO_3__GPIO1_3 */
+ >;
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_2>;
+ fsl,uart-has-rtscts;
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_1>;
+ status = "disabled";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_2>;
+ status = "disabled";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2_1>;
+ status = "disabled";
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3_1>;
+ status = "disabled";
+};
+
+&cspi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cspi_1>;
+ fsl,spi-num-chipselects = <3>;
+ cs-gpios = <&gpio1 18 0>, <&gpio1 19 0>,
+ <&gpio1 21 0>;
+ status = "disabled";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2_1>;
+ status = "okay";
+
+ pmic: mc34708@8 {
+ compatible = "fsl,mc34708";
+ reg = <0x8>;
+ fsl,mc13xxx-uses-rtc;
+ interrupt-parent = <&gpio2>;
+ interrupts = <6 8>; /* PDATA_DATA6, low active */
+ };
+
+ sensor1: lm75@48 {
+ compatible = "lm75";
+ reg = <0x48>;
+ };
+
+ eeprom: 24c64@50 {
+ compatible = "at,24c64";
+ pagesize = <32>;
+ reg = <0x50>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_1>;
+ phy-mode = "rmii";
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index edc3f1eb6699..d05aa215c7f9 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -274,6 +274,44 @@
};
};
+ csi {
+ pinctrl_csi_1: csigrp-1 {
+ fsl,pins = <
+ 286 0x1d5 /* MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN */
+ 291 0x1d5 /* MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC */
+ 280 0x1d5 /* MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC */
+ 276 0x1d5 /* MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK */
+ 409 0x1d5 /* MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 */
+ 402 0x1d5 /* MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 */
+ 395 0x1d5 /* MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 */
+ 388 0x1d5 /* MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 */
+ 381 0x1d5 /* MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 */
+ 374 0x1d5 /* MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 */
+ 367 0x1d5 /* MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 */
+ 360 0x1d5 /* MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 */
+ 352 0x1d5 /* MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11 */
+ 344 0x1d5 /* MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 */
+ 336 0x1d5 /* MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 */
+ 328 0x1d5 /* MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8 */
+ 320 0x1d5 /* MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7 */
+ 312 0x1d5 /* MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6 */
+ 304 0x1d5 /* MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5 */
+ 296 0x1d5 /* MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4 */
+ 276 0x1d5 /* MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK */
+ >;
+ };
+ };
+
+ cspi {
+ pinctrl_cspi_1: cspigrp-1 {
+ fsl,pins = <
+ 998 0x1d5 /* MX53_PAD_SD1_DATA0__CSPI_MISO */
+ 1008 0x1d5 /* MX53_PAD_SD1_CMD__CSPI_MOSI */
+ 1022 0x1d5 /* MX53_PAD_SD1_CLK__CSPI_SCLK */
+ >;
+ };
+ };
+
ecspi1 {
pinctrl_ecspi1_1: ecspi1grp-1 {
fsl,pins = <
@@ -349,6 +387,13 @@
853 0x80000000 /* MX53_PAD_PATA_DIOR__CAN1_RXCAN */
>;
};
+
+ pinctrl_can1_2: can1grp-2 {
+ fsl,pins = <
+ 37 0x80000000 /* MX53_PAD_KEY_COL2__CAN1_TXCAN */
+ 44 0x80000000 /* MX53_PAD_KEY_ROW2__CAN1_RXCAN */
+ >;
+ };
};
can2 {
@@ -387,6 +432,14 @@
};
};
+ owire {
+ pinctrl_owire_1: owiregrp-1 {
+ fsl,pins = <
+ 1166 0x80000000 /* MX53_PAD_GPIO_18__OWIRE_LINE */
+ >;
+ };
+ };
+
uart1 {
pinctrl_uart1_1: uart1grp-1 {
fsl,pins = <
@@ -421,6 +474,14 @@
880 0x1c5 /* MX53_PAD_PATA_DA_2__UART3_RTS */
>;
};
+
+ pinctrl_uart3_2: uart3grp-2 {
+ fsl,pins = <
+ 884 0x1c5 /* MX53_PAD_PATA_CS_0__UART3_TXD_MUX */
+ 888 0x1c5 /* MX53_PAD_PATA_CS_1__UART3_RXD_MUX */
+ >;
+ };
+
};
uart4 {
@@ -570,6 +631,13 @@
status = "disabled";
};
+ owire: owire@63fa4000 {
+ compatible = "fsl,imx53-owire", "fsl,imx21-owire";
+ reg = <0x63fa4000 0x4000>;
+ clocks = <&clks 159>;
+ status = "disabled";
+ };
+
ecspi2: ecspi@63fac000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
new file mode 100644
index 000000000000..63fafe2a606c
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/include/ "imx6qdl.dtsi"
+
+/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ soc {
+ aips1: aips-bus@02000000 {
+ pxp: pxp@020f0000 {
+ reg = <0x020f0000 0x4000>;
+ interrupts = <0 98 0x04>;
+ };
+
+ epdc: epdc@020f4000 {
+ reg = <0x020f4000 0x4000>;
+ interrupts = <0 97 0x04>;
+ };
+
+ lcdif: lcdif@020f8000 {
+ reg = <0x020f8000 0x4000>;
+ interrupts = <0 39 0x04>;
+ };
+ };
+
+ aips2: aips-bus@02100000 {
+ i2c4: i2c@021f8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx1-i2c";
+ reg = <0x021f8000 0x4000>;
+ interrupts = <0 35 0x04>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index 5bfa02a3f85c..53eb241fa5ad 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -21,71 +21,6 @@
reg = <0x10000000 0x80000000>;
};
- soc {
- gpmi-nand@00112000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_gpmi_nand_1>;
- status = "disabled"; /* gpmi nand conflicts with SD */
- };
-
- aips-bus@02000000 { /* AIPS1 */
- iomuxc@020e0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 176 0x80000000 /* MX6Q_PAD_EIM_D25__GPIO_3_25 */
- >;
- };
- };
-
- arm2 {
- pinctrl_usdhc3_arm2: usdhc3grp-arm2 {
- fsl,pins = <
- 1363 0x80000000 /* MX6Q_PAD_NANDF_CS0__GPIO_6_11 */
- 1369 0x80000000 /* MX6Q_PAD_NANDF_CS1__GPIO_6_14 */
- >;
- };
- };
- };
- };
-
- aips-bus@02100000 { /* AIPS2 */
- ethernet@02188000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet_2>;
- phy-mode = "rgmii";
- status = "okay";
- };
-
- usdhc@02198000 { /* uSDHC3 */
- cd-gpios = <&gpio6 11 0>;
- wp-gpios = <&gpio6 14 0>;
- vmmc-supply = <&reg_3p3v>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc3_1
- &pinctrl_usdhc3_arm2>;
- status = "okay";
- };
-
- usdhc@0219c000 { /* uSDHC4 */
- non-removable;
- vmmc-supply = <&reg_3p3v>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc4_1>;
- status = "okay";
- };
-
- uart4: serial@021f0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart4_1>;
- status = "okay";
- };
- };
- };
-
regulators {
compatible = "simple-bus";
@@ -108,3 +43,62 @@
};
};
};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand_1>;
+ status = "disabled"; /* gpmi nand conflicts with SD */
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 176 0x80000000 /* MX6Q_PAD_EIM_D25__GPIO_3_25 */
+ >;
+ };
+ };
+
+ arm2 {
+ pinctrl_usdhc3_arm2: usdhc3grp-arm2 {
+ fsl,pins = <
+ 1363 0x80000000 /* MX6Q_PAD_NANDF_CS0__GPIO_6_11 */
+ 1369 0x80000000 /* MX6Q_PAD_NANDF_CS1__GPIO_6_14 */
+ >;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_2>;
+ phy-mode = "rgmii";
+ status = "okay";
+};
+
+&usdhc3 {
+ cd-gpios = <&gpio6 11 0>;
+ wp-gpios = <&gpio6 14 0>;
+ vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_1
+ &pinctrl_usdhc3_arm2>;
+ status = "okay";
+};
+
+&usdhc4 {
+ non-removable;
+ vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4_1>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4_1>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-sabreauto.dts b/arch/arm/boot/dts/imx6q-sabreauto.dts
index 826e4ad1477e..656d489122fe 100644
--- a/arch/arm/boot/dts/imx6q-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6q-sabreauto.dts
@@ -20,45 +20,39 @@
memory {
reg = <0x10000000 0x80000000>;
};
+};
- soc {
- aips-bus@02000000 { /* AIPS1 */
- iomuxc@020e0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 1376 0x80000000 /* MX6Q_PAD_NANDF_CS2__GPIO_6_15 */
- 13 0x80000000 /* MX6Q_PAD_SD2_DAT2__GPIO_1_13 */
- >;
- };
- };
- };
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 1376 0x80000000 /* MX6Q_PAD_NANDF_CS2__GPIO_6_15 */
+ 13 0x80000000 /* MX6Q_PAD_SD2_DAT2__GPIO_1_13 */
+ >;
};
+ };
+};
- aips-bus@02100000 { /* AIPS2 */
- uart4: serial@021f0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart4_1>;
- status = "okay";
- };
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4_1>;
+ status = "okay";
+};
- ethernet@02188000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet_2>;
- phy-mode = "rgmii";
- status = "okay";
- };
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_2>;
+ phy-mode = "rgmii";
+ status = "okay";
+};
- usdhc@02198000 { /* uSDHC3 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc3_1>;
- cd-gpios = <&gpio6 15 0>;
- wp-gpios = <&gpio1 13 0>;
- status = "okay";
- };
- };
- };
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_1>;
+ cd-gpios = <&gpio6 15 0>;
+ wp-gpios = <&gpio1 13 0>;
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index d152328285a1..2ce355cd05e5 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -21,118 +21,6 @@
reg = <0x10000000 0x40000000>;
};
- soc {
- aips-bus@02000000 { /* AIPS1 */
- spba-bus@02000000 {
- ecspi@02008000 { /* eCSPI1 */
- fsl,spi-num-chipselects = <1>;
- cs-gpios = <&gpio3 19 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ecspi1_1>;
- status = "okay";
-
- flash: m25p80@0 {
- compatible = "sst,sst25vf016b";
- spi-max-frequency = <20000000>;
- reg = <0>;
- };
- };
-
- ssi1: ssi@02028000 {
- fsl,mode = "i2s-slave";
- status = "okay";
- };
- };
-
- iomuxc@020e0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 1450 0x80000000 /* MX6Q_PAD_NANDF_D6__GPIO_2_6 */
- 1458 0x80000000 /* MX6Q_PAD_NANDF_D7__GPIO_2_7 */
- 121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
- 144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
- 152 0x80000000 /* MX6Q_PAD_EIM_D23__GPIO_3_23 */
- 1262 0x80000000 /* MX6Q_PAD_SD3_DAT5__GPIO_7_0 */
- 1270 0x1f0b0 /* MX6Q_PAD_SD3_DAT4__GPIO_7_1 */
- 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */
- >;
- };
- };
- };
- };
-
- aips-bus@02100000 { /* AIPS2 */
- usb@02184000 { /* USB OTG */
- vbus-supply = <&reg_usb_otg_vbus>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbotg_1>;
- disable-over-current;
- status = "okay";
- };
-
- usb@02184200 { /* USB1 */
- status = "okay";
- };
-
- ethernet@02188000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet_1>;
- phy-mode = "rgmii";
- phy-reset-gpios = <&gpio3 23 0>;
- status = "okay";
- };
-
- usdhc@02198000 { /* uSDHC3 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc3_2>;
- cd-gpios = <&gpio7 0 0>;
- wp-gpios = <&gpio7 1 0>;
- vmmc-supply = <&reg_3p3v>;
- status = "okay";
- };
-
- usdhc@0219c000 { /* uSDHC4 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc4_2>;
- cd-gpios = <&gpio2 6 0>;
- wp-gpios = <&gpio2 7 0>;
- vmmc-supply = <&reg_3p3v>;
- status = "okay";
- };
-
- audmux@021d8000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_audmux_1>;
- };
-
- uart2: serial@021e8000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart2_1>;
- };
-
- i2c@021a0000 { /* I2C1 */
- status = "okay";
- clock-frequency = <100000>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1_1>;
-
- codec: sgtl5000@0a {
- compatible = "fsl,sgtl5000";
- reg = <0x0a>;
- clocks = <&clks 169>;
- VDDA-supply = <&reg_2p5v>;
- VDDIO-supply = <&reg_3p3v>;
- };
- };
- };
- };
-
regulators {
compatible = "simple-bus";
@@ -176,3 +64,107 @@
mux-ext-port = <4>;
};
};
+
+&ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio3 19 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
+ status = "okay";
+
+ flash: m25p80@0 {
+ compatible = "sst,sst25vf016b";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
+&ssi1 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 1450 0x80000000 /* MX6Q_PAD_NANDF_D6__GPIO_2_6 */
+ 1458 0x80000000 /* MX6Q_PAD_NANDF_D7__GPIO_2_7 */
+ 121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
+ 144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
+ 152 0x80000000 /* MX6Q_PAD_EIM_D23__GPIO_3_23 */
+ 1262 0x80000000 /* MX6Q_PAD_SD3_DAT5__GPIO_7_0 */
+ 1270 0x1f0b0 /* MX6Q_PAD_SD3_DAT4__GPIO_7_1 */
+ 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */
+ >;
+ };
+ };
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_1>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio3 23 0>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_2>;
+ cd-gpios = <&gpio7 0 0>;
+ wp-gpios = <&gpio7 1 0>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&usdhc4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4_2>;
+ cd-gpios = <&gpio2 6 0>;
+ wp-gpios = <&gpio2 7 0>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&audmux {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+};
+
+&uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_1>;
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
+
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ clocks = <&clks 169>;
+ VDDA-supply = <&reg_2p5v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
index a42402562b7b..2dea304a7980 100644
--- a/arch/arm/boot/dts/imx6q-sabresd.dts
+++ b/arch/arm/boot/dts/imx6q-sabresd.dts
@@ -21,61 +21,6 @@
reg = <0x10000000 0x40000000>;
};
- soc {
- aips-bus@02000000 { /* AIPS1 */
- spba-bus@02000000 {
- uart1: serial@02020000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_1>;
- status = "okay";
- };
- };
-
- iomuxc@020e0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- hog {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- 1004 0x80000000 /* MX6Q_PAD_GPIO_4__GPIO_1_4 */
- 1012 0x80000000 /* MX6Q_PAD_GPIO_5__GPIO_1_5 */
- 1402 0x80000000 /* MX6Q_PAD_NANDF_D0__GPIO_2_0 */
- 1410 0x80000000 /* MX6Q_PAD_NANDF_D1__GPIO_2_1 */
- 1418 0x80000000 /* MX6Q_PAD_NANDF_D2__GPIO_2_2 */
- 1426 0x80000000 /* MX6Q_PAD_NANDF_D3__GPIO_2_3 */
- >;
- };
- };
- };
- };
-
- aips-bus@02100000 { /* AIPS2 */
- ethernet@02188000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_enet_1>;
- phy-mode = "rgmii";
- status = "okay";
- };
-
- usdhc@02194000 { /* uSDHC2 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc2_1>;
- cd-gpios = <&gpio2 2 0>;
- wp-gpios = <&gpio2 3 0>;
- status = "okay";
- };
-
- usdhc@02198000 { /* uSDHC3 */
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc3_1>;
- cd-gpios = <&gpio2 0 0>;
- wp-gpios = <&gpio2 1 0>;
- status = "okay";
- };
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
@@ -92,3 +37,50 @@
};
};
};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_1>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 1004 0x80000000 /* MX6Q_PAD_GPIO_4__GPIO_1_4 */
+ 1012 0x80000000 /* MX6Q_PAD_GPIO_5__GPIO_1_5 */
+ 1402 0x80000000 /* MX6Q_PAD_NANDF_D0__GPIO_2_0 */
+ 1410 0x80000000 /* MX6Q_PAD_NANDF_D1__GPIO_2_1 */
+ 1418 0x80000000 /* MX6Q_PAD_NANDF_D2__GPIO_2_2 */
+ 1426 0x80000000 /* MX6Q_PAD_NANDF_D3__GPIO_2_3 */
+ >;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_1>;
+ phy-mode = "rgmii";
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_1>;
+ cd-gpios = <&gpio2 2 0>;
+ wp-gpios = <&gpio2 3 0>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_1>;
+ cd-gpios = <&gpio2 0 0>;
+ wp-gpios = <&gpio2 1 0>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index ff1205ea5719..cba021eb035e 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -1,33 +1,16 @@
+
/*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
+ * Copyright 2013 Freescale Semiconductor, Inc.
*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
-/include/ "skeleton.dtsi"
+/include/ "imx6qdl.dtsi"
/ {
- aliases {
- serial0 = &uart1;
- serial1 = &uart2;
- serial2 = &uart3;
- serial3 = &uart4;
- serial4 = &uart5;
- gpio0 = &gpio1;
- gpio1 = &gpio2;
- gpio2 = &gpio3;
- gpio3 = &gpio4;
- gpio4 = &gpio5;
- gpio5 = &gpio6;
- gpio6 = &gpio7;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -38,12 +21,19 @@
next-level-cache = <&L2>;
operating-points = <
/* kHz uV */
- 792000 1100000
+ 1200000 1275000
+ 996000 1250000
+ 792000 1150000
396000 950000
- 198000 850000
>;
clock-latency = <61036>; /* two CLK32 periods */
- cpu0-supply = <&reg_cpu>;
+ clocks = <&clks 104>, <&clks 6>, <&clks 16>,
+ <&clks 17>, <&clks 170>;
+ clock-names = "arm", "pll2_pfd2_396m", "step",
+ "pll1_sw", "pll1_sys";
+ arm-supply = <&reg_arm>;
+ pu-supply = <&reg_pu>;
+ soc-supply = <&reg_soc>;
};
cpu@1 {
@@ -65,142 +55,9 @@
};
};
- intc: interrupt-controller@00a01000 {
- compatible = "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-controller;
- reg = <0x00a01000 0x1000>,
- <0x00a00100 0x100>;
- };
-
- clocks {
- #address-cells = <1>;
- #size-cells = <0>;
-
- ckil {
- compatible = "fsl,imx-ckil", "fixed-clock";
- clock-frequency = <32768>;
- };
-
- ckih1 {
- compatible = "fsl,imx-ckih1", "fixed-clock";
- clock-frequency = <0>;
- };
-
- osc {
- compatible = "fsl,imx-osc", "fixed-clock";
- clock-frequency = <24000000>;
- };
- };
-
soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- interrupt-parent = <&intc>;
- ranges;
-
- dma-apbh@00110000 {
- compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
- reg = <0x00110000 0x2000>;
- clocks = <&clks 106>;
- };
-
- nfc: gpmi-nand@00112000 {
- compatible = "fsl,imx6q-gpmi-nand";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x00112000 0x2000>, <0x00114000 0x2000>;
- reg-names = "gpmi-nand", "bch";
- interrupts = <0 13 0x04>, <0 15 0x04>;
- interrupt-names = "gpmi-dma", "bch";
- clocks = <&clks 152>, <&clks 153>, <&clks 151>,
- <&clks 150>, <&clks 149>;
- clock-names = "gpmi_io", "gpmi_apb", "gpmi_bch",
- "gpmi_bch_apb", "per1_bch";
- fsl,gpmi-dma-channel = <0>;
- status = "disabled";
- };
-
- timer@00a00600 {
- compatible = "arm,cortex-a9-twd-timer";
- reg = <0x00a00600 0x20>;
- interrupts = <1 13 0xf01>;
- };
-
- L2: l2-cache@00a02000 {
- compatible = "arm,pl310-cache";
- reg = <0x00a02000 0x1000>;
- interrupts = <0 92 0x04>;
- cache-unified;
- cache-level = <2>;
- };
-
aips-bus@02000000 { /* AIPS1 */
- compatible = "fsl,aips-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x02000000 0x100000>;
- ranges;
-
spba-bus@02000000 {
- compatible = "fsl,spba-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x02000000 0x40000>;
- ranges;
-
- spdif: spdif@02004000 {
- reg = <0x02004000 0x4000>;
- interrupts = <0 52 0x04>;
- };
-
- ecspi1: ecspi@02008000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
- reg = <0x02008000 0x4000>;
- interrupts = <0 31 0x04>;
- clocks = <&clks 112>, <&clks 112>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- ecspi2: ecspi@0200c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
- reg = <0x0200c000 0x4000>;
- interrupts = <0 32 0x04>;
- clocks = <&clks 113>, <&clks 113>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- ecspi3: ecspi@02010000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
- reg = <0x02010000 0x4000>;
- interrupts = <0 33 0x04>;
- clocks = <&clks 114>, <&clks 114>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- ecspi4: ecspi@02014000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
- reg = <0x02014000 0x4000>;
- interrupts = <0 34 0x04>;
- clocks = <&clks 115>, <&clks 115>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
ecspi5: ecspi@02018000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -211,361 +68,6 @@
clock-names = "ipg", "per";
status = "disabled";
};
-
- uart1: serial@02020000 {
- compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
- reg = <0x02020000 0x4000>;
- interrupts = <0 26 0x04>;
- clocks = <&clks 160>, <&clks 161>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- esai: esai@02024000 {
- reg = <0x02024000 0x4000>;
- interrupts = <0 51 0x04>;
- };
-
- ssi1: ssi@02028000 {
- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
- reg = <0x02028000 0x4000>;
- interrupts = <0 46 0x04>;
- clocks = <&clks 178>;
- fsl,fifo-depth = <15>;
- fsl,ssi-dma-events = <38 37>;
- status = "disabled";
- };
-
- ssi2: ssi@0202c000 {
- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
- reg = <0x0202c000 0x4000>;
- interrupts = <0 47 0x04>;
- clocks = <&clks 179>;
- fsl,fifo-depth = <15>;
- fsl,ssi-dma-events = <42 41>;
- status = "disabled";
- };
-
- ssi3: ssi@02030000 {
- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
- reg = <0x02030000 0x4000>;
- interrupts = <0 48 0x04>;
- clocks = <&clks 180>;
- fsl,fifo-depth = <15>;
- fsl,ssi-dma-events = <46 45>;
- status = "disabled";
- };
-
- asrc: asrc@02034000 {
- reg = <0x02034000 0x4000>;
- interrupts = <0 50 0x04>;
- };
-
- spba@0203c000 {
- reg = <0x0203c000 0x4000>;
- };
- };
-
- vpu: vpu@02040000 {
- reg = <0x02040000 0x3c000>;
- interrupts = <0 3 0x04 0 12 0x04>;
- };
-
- aipstz@0207c000 { /* AIPSTZ1 */
- reg = <0x0207c000 0x4000>;
- };
-
- pwm1: pwm@02080000 {
- #pwm-cells = <2>;
- compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
- reg = <0x02080000 0x4000>;
- interrupts = <0 83 0x04>;
- clocks = <&clks 62>, <&clks 145>;
- clock-names = "ipg", "per";
- };
-
- pwm2: pwm@02084000 {
- #pwm-cells = <2>;
- compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
- reg = <0x02084000 0x4000>;
- interrupts = <0 84 0x04>;
- clocks = <&clks 62>, <&clks 146>;
- clock-names = "ipg", "per";
- };
-
- pwm3: pwm@02088000 {
- #pwm-cells = <2>;
- compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
- reg = <0x02088000 0x4000>;
- interrupts = <0 85 0x04>;
- clocks = <&clks 62>, <&clks 147>;
- clock-names = "ipg", "per";
- };
-
- pwm4: pwm@0208c000 {
- #pwm-cells = <2>;
- compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
- reg = <0x0208c000 0x4000>;
- interrupts = <0 86 0x04>;
- clocks = <&clks 62>, <&clks 148>;
- clock-names = "ipg", "per";
- };
-
- can1: flexcan@02090000 {
- reg = <0x02090000 0x4000>;
- interrupts = <0 110 0x04>;
- };
-
- can2: flexcan@02094000 {
- reg = <0x02094000 0x4000>;
- interrupts = <0 111 0x04>;
- };
-
- gpt: gpt@02098000 {
- compatible = "fsl,imx6q-gpt";
- reg = <0x02098000 0x4000>;
- interrupts = <0 55 0x04>;
- };
-
- gpio1: gpio@0209c000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
- reg = <0x0209c000 0x4000>;
- interrupts = <0 66 0x04 0 67 0x04>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio2: gpio@020a0000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
- reg = <0x020a0000 0x4000>;
- interrupts = <0 68 0x04 0 69 0x04>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio3: gpio@020a4000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
- reg = <0x020a4000 0x4000>;
- interrupts = <0 70 0x04 0 71 0x04>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio4: gpio@020a8000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
- reg = <0x020a8000 0x4000>;
- interrupts = <0 72 0x04 0 73 0x04>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio5: gpio@020ac000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
- reg = <0x020ac000 0x4000>;
- interrupts = <0 74 0x04 0 75 0x04>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio6: gpio@020b0000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
- reg = <0x020b0000 0x4000>;
- interrupts = <0 76 0x04 0 77 0x04>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio7: gpio@020b4000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
- reg = <0x020b4000 0x4000>;
- interrupts = <0 78 0x04 0 79 0x04>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- kpp: kpp@020b8000 {
- reg = <0x020b8000 0x4000>;
- interrupts = <0 82 0x04>;
- };
-
- wdog1: wdog@020bc000 {
- compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
- reg = <0x020bc000 0x4000>;
- interrupts = <0 80 0x04>;
- clocks = <&clks 0>;
- };
-
- wdog2: wdog@020c0000 {
- compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
- reg = <0x020c0000 0x4000>;
- interrupts = <0 81 0x04>;
- clocks = <&clks 0>;
- status = "disabled";
- };
-
- clks: ccm@020c4000 {
- compatible = "fsl,imx6q-ccm";
- reg = <0x020c4000 0x4000>;
- interrupts = <0 87 0x04 0 88 0x04>;
- #clock-cells = <1>;
- };
-
- anatop: anatop@020c8000 {
- compatible = "fsl,imx6q-anatop", "syscon", "simple-bus";
- reg = <0x020c8000 0x1000>;
- interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
-
- regulator-1p1@110 {
- compatible = "fsl,anatop-regulator";
- regulator-name = "vdd1p1";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1375000>;
- regulator-always-on;
- anatop-reg-offset = <0x110>;
- anatop-vol-bit-shift = <8>;
- anatop-vol-bit-width = <5>;
- anatop-min-bit-val = <4>;
- anatop-min-voltage = <800000>;
- anatop-max-voltage = <1375000>;
- };
-
- regulator-3p0@120 {
- compatible = "fsl,anatop-regulator";
- regulator-name = "vdd3p0";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <3150000>;
- regulator-always-on;
- anatop-reg-offset = <0x120>;
- anatop-vol-bit-shift = <8>;
- anatop-vol-bit-width = <5>;
- anatop-min-bit-val = <0>;
- anatop-min-voltage = <2625000>;
- anatop-max-voltage = <3400000>;
- };
-
- regulator-2p5@130 {
- compatible = "fsl,anatop-regulator";
- regulator-name = "vdd2p5";
- regulator-min-microvolt = <2000000>;
- regulator-max-microvolt = <2750000>;
- regulator-always-on;
- anatop-reg-offset = <0x130>;
- anatop-vol-bit-shift = <8>;
- anatop-vol-bit-width = <5>;
- anatop-min-bit-val = <0>;
- anatop-min-voltage = <2000000>;
- anatop-max-voltage = <2750000>;
- };
-
- reg_cpu: regulator-vddcore@140 {
- compatible = "fsl,anatop-regulator";
- regulator-name = "cpu";
- regulator-min-microvolt = <725000>;
- regulator-max-microvolt = <1450000>;
- regulator-always-on;
- anatop-reg-offset = <0x140>;
- anatop-vol-bit-shift = <0>;
- anatop-vol-bit-width = <5>;
- anatop-min-bit-val = <1>;
- anatop-min-voltage = <725000>;
- anatop-max-voltage = <1450000>;
- };
-
- regulator-vddpu@140 {
- compatible = "fsl,anatop-regulator";
- regulator-name = "vddpu";
- regulator-min-microvolt = <725000>;
- regulator-max-microvolt = <1450000>;
- regulator-always-on;
- anatop-reg-offset = <0x140>;
- anatop-vol-bit-shift = <9>;
- anatop-vol-bit-width = <5>;
- anatop-min-bit-val = <1>;
- anatop-min-voltage = <725000>;
- anatop-max-voltage = <1450000>;
- };
-
- regulator-vddsoc@140 {
- compatible = "fsl,anatop-regulator";
- regulator-name = "vddsoc";
- regulator-min-microvolt = <725000>;
- regulator-max-microvolt = <1450000>;
- regulator-always-on;
- anatop-reg-offset = <0x140>;
- anatop-vol-bit-shift = <18>;
- anatop-vol-bit-width = <5>;
- anatop-min-bit-val = <1>;
- anatop-min-voltage = <725000>;
- anatop-max-voltage = <1450000>;
- };
- };
-
- usbphy1: usbphy@020c9000 {
- compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
- reg = <0x020c9000 0x1000>;
- interrupts = <0 44 0x04>;
- clocks = <&clks 182>;
- };
-
- usbphy2: usbphy@020ca000 {
- compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
- reg = <0x020ca000 0x1000>;
- interrupts = <0 45 0x04>;
- clocks = <&clks 183>;
- };
-
- snvs@020cc000 {
- compatible = "fsl,sec-v4.0-mon", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x020cc000 0x4000>;
-
- snvs-rtc-lp@34 {
- compatible = "fsl,sec-v4.0-mon-rtc-lp";
- reg = <0x34 0x58>;
- interrupts = <0 19 0x04 0 20 0x04>;
- };
- };
-
- epit1: epit@020d0000 { /* EPIT1 */
- reg = <0x020d0000 0x4000>;
- interrupts = <0 56 0x04>;
- };
-
- epit2: epit@020d4000 { /* EPIT2 */
- reg = <0x020d4000 0x4000>;
- interrupts = <0 57 0x04>;
- };
-
- src: src@020d8000 {
- compatible = "fsl,imx6q-src";
- reg = <0x020d8000 0x4000>;
- interrupts = <0 91 0x04 0 96 0x04>;
- };
-
- gpc: gpc@020dc000 {
- compatible = "fsl,imx6q-gpc";
- reg = <0x020dc000 0x4000>;
- interrupts = <0 89 0x04 0 90 0x04>;
- };
-
- gpr: iomuxc-gpr@020e0000 {
- compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
- reg = <0x020e0000 0x38>;
};
iomuxc: iomuxc@020e0000 {
@@ -780,272 +282,6 @@
};
};
};
-
- dcic1: dcic@020e4000 {
- reg = <0x020e4000 0x4000>;
- interrupts = <0 124 0x04>;
- };
-
- dcic2: dcic@020e8000 {
- reg = <0x020e8000 0x4000>;
- interrupts = <0 125 0x04>;
- };
-
- sdma: sdma@020ec000 {
- compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
- reg = <0x020ec000 0x4000>;
- interrupts = <0 2 0x04>;
- clocks = <&clks 155>, <&clks 155>;
- clock-names = "ipg", "ahb";
- fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6q-to1.bin";
- };
- };
-
- aips-bus@02100000 { /* AIPS2 */
- compatible = "fsl,aips-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x02100000 0x100000>;
- ranges;
-
- caam@02100000 {
- reg = <0x02100000 0x40000>;
- interrupts = <0 105 0x04 0 106 0x04>;
- };
-
- aipstz@0217c000 { /* AIPSTZ2 */
- reg = <0x0217c000 0x4000>;
- };
-
- usbotg: usb@02184000 {
- compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
- reg = <0x02184000 0x200>;
- interrupts = <0 43 0x04>;
- clocks = <&clks 162>;
- fsl,usbphy = <&usbphy1>;
- fsl,usbmisc = <&usbmisc 0>;
- status = "disabled";
- };
-
- usbh1: usb@02184200 {
- compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
- reg = <0x02184200 0x200>;
- interrupts = <0 40 0x04>;
- clocks = <&clks 162>;
- fsl,usbphy = <&usbphy2>;
- fsl,usbmisc = <&usbmisc 1>;
- status = "disabled";
- };
-
- usbh2: usb@02184400 {
- compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
- reg = <0x02184400 0x200>;
- interrupts = <0 41 0x04>;
- clocks = <&clks 162>;
- fsl,usbmisc = <&usbmisc 2>;
- status = "disabled";
- };
-
- usbh3: usb@02184600 {
- compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
- reg = <0x02184600 0x200>;
- interrupts = <0 42 0x04>;
- clocks = <&clks 162>;
- fsl,usbmisc = <&usbmisc 3>;
- status = "disabled";
- };
-
- usbmisc: usbmisc: usbmisc@02184800 {
- #index-cells = <1>;
- compatible = "fsl,imx6q-usbmisc";
- reg = <0x02184800 0x200>;
- clocks = <&clks 162>;
- };
-
- fec: ethernet@02188000 {
- compatible = "fsl,imx6q-fec";
- reg = <0x02188000 0x4000>;
- interrupts = <0 118 0x04 0 119 0x04>;
- clocks = <&clks 117>, <&clks 117>, <&clks 190>;
- clock-names = "ipg", "ahb", "ptp";
- status = "disabled";
- };
-
- mlb@0218c000 {
- reg = <0x0218c000 0x4000>;
- interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
- };
-
- usdhc1: usdhc@02190000 {
- compatible = "fsl,imx6q-usdhc";
- reg = <0x02190000 0x4000>;
- interrupts = <0 22 0x04>;
- clocks = <&clks 163>, <&clks 163>, <&clks 163>;
- clock-names = "ipg", "ahb", "per";
- bus-width = <4>;
- status = "disabled";
- };
-
- usdhc2: usdhc@02194000 {
- compatible = "fsl,imx6q-usdhc";
- reg = <0x02194000 0x4000>;
- interrupts = <0 23 0x04>;
- clocks = <&clks 164>, <&clks 164>, <&clks 164>;
- clock-names = "ipg", "ahb", "per";
- bus-width = <4>;
- status = "disabled";
- };
-
- usdhc3: usdhc@02198000 {
- compatible = "fsl,imx6q-usdhc";
- reg = <0x02198000 0x4000>;
- interrupts = <0 24 0x04>;
- clocks = <&clks 165>, <&clks 165>, <&clks 165>;
- clock-names = "ipg", "ahb", "per";
- bus-width = <4>;
- status = "disabled";
- };
-
- usdhc4: usdhc@0219c000 {
- compatible = "fsl,imx6q-usdhc";
- reg = <0x0219c000 0x4000>;
- interrupts = <0 25 0x04>;
- clocks = <&clks 166>, <&clks 166>, <&clks 166>;
- clock-names = "ipg", "ahb", "per";
- bus-width = <4>;
- status = "disabled";
- };
-
- i2c1: i2c@021a0000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
- reg = <0x021a0000 0x4000>;
- interrupts = <0 36 0x04>;
- clocks = <&clks 125>;
- status = "disabled";
- };
-
- i2c2: i2c@021a4000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
- reg = <0x021a4000 0x4000>;
- interrupts = <0 37 0x04>;
- clocks = <&clks 126>;
- status = "disabled";
- };
-
- i2c3: i2c@021a8000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
- reg = <0x021a8000 0x4000>;
- interrupts = <0 38 0x04>;
- clocks = <&clks 127>;
- status = "disabled";
- };
-
- romcp@021ac000 {
- reg = <0x021ac000 0x4000>;
- };
-
- mmdc0: mmdc@021b0000 { /* MMDC0 */
- compatible = "fsl,imx6q-mmdc";
- reg = <0x021b0000 0x4000>;
- };
-
- mmdc1: mmdc@021b4000 { /* MMDC1 */
- reg = <0x021b4000 0x4000>;
- };
-
- weim@021b8000 {
- reg = <0x021b8000 0x4000>;
- interrupts = <0 14 0x04>;
- };
-
- ocotp@021bc000 {
- reg = <0x021bc000 0x4000>;
- };
-
- ocotp@021c0000 {
- reg = <0x021c0000 0x4000>;
- interrupts = <0 21 0x04>;
- };
-
- tzasc@021d0000 { /* TZASC1 */
- reg = <0x021d0000 0x4000>;
- interrupts = <0 108 0x04>;
- };
-
- tzasc@021d4000 { /* TZASC2 */
- reg = <0x021d4000 0x4000>;
- interrupts = <0 109 0x04>;
- };
-
- audmux: audmux@021d8000 {
- compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
- reg = <0x021d8000 0x4000>;
- status = "disabled";
- };
-
- mipi@021dc000 { /* MIPI-CSI */
- reg = <0x021dc000 0x4000>;
- };
-
- mipi@021e0000 { /* MIPI-DSI */
- reg = <0x021e0000 0x4000>;
- };
-
- vdoa@021e4000 {
- reg = <0x021e4000 0x4000>;
- interrupts = <0 18 0x04>;
- };
-
- uart2: serial@021e8000 {
- compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
- reg = <0x021e8000 0x4000>;
- interrupts = <0 27 0x04>;
- clocks = <&clks 160>, <&clks 161>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart3: serial@021ec000 {
- compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
- reg = <0x021ec000 0x4000>;
- interrupts = <0 28 0x04>;
- clocks = <&clks 160>, <&clks 161>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart4: serial@021f0000 {
- compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
- reg = <0x021f0000 0x4000>;
- interrupts = <0 29 0x04>;
- clocks = <&clks 160>, <&clks 161>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart5: serial@021f4000 {
- compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
- reg = <0x021f4000 0x4000>;
- interrupts = <0 30 0x04>;
- clocks = <&clks 160>, <&clks 161>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
- };
-
- ipu1: ipu@02400000 {
- #crtc-cells = <1>;
- compatible = "fsl,imx6q-ipu";
- reg = <0x02400000 0x400000>;
- interrupts = <0 6 0x4 0 5 0x4>;
- clocks = <&clks 130>, <&clks 131>, <&clks 132>;
- clock-names = "bus", "di0", "di1";
};
ipu2: ipu@02800000 {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
new file mode 100644
index 000000000000..06ec460b4581
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -0,0 +1,800 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
+ };
+
+ intc: interrupt-controller@00a01000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-controller;
+ reg = <0x00a01000 0x1000>,
+ <0x00a00100 0x100>;
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&intc>;
+ ranges;
+
+ dma-apbh@00110000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x00110000 0x2000>;
+ clocks = <&clks 106>;
+ };
+
+ gpmi: gpmi-nand@00112000 {
+ compatible = "fsl,imx6q-gpmi-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x00112000 0x2000>, <0x00114000 0x2000>;
+ reg-names = "gpmi-nand", "bch";
+ interrupts = <0 13 0x04>, <0 15 0x04>;
+ interrupt-names = "gpmi-dma", "bch";
+ clocks = <&clks 152>, <&clks 153>, <&clks 151>,
+ <&clks 150>, <&clks 149>;
+ clock-names = "gpmi_io", "gpmi_apb", "gpmi_bch",
+ "gpmi_bch_apb", "per1_bch";
+ fsl,gpmi-dma-channel = <0>;
+ status = "disabled";
+ };
+
+ timer@00a00600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x00a00600 0x20>;
+ interrupts = <1 13 0xf01>;
+ };
+
+ L2: l2-cache@00a02000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x00a02000 0x1000>;
+ interrupts = <0 92 0x04>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ aips-bus@02000000 { /* AIPS1 */
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x02000000 0x100000>;
+ ranges;
+
+ spba-bus@02000000 {
+ compatible = "fsl,spba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x02000000 0x40000>;
+ ranges;
+
+ spdif: spdif@02004000 {
+ reg = <0x02004000 0x4000>;
+ interrupts = <0 52 0x04>;
+ };
+
+ ecspi1: ecspi@02008000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02008000 0x4000>;
+ interrupts = <0 31 0x04>;
+ clocks = <&clks 112>, <&clks 112>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi2: ecspi@0200c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x0200c000 0x4000>;
+ interrupts = <0 32 0x04>;
+ clocks = <&clks 113>, <&clks 113>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi3: ecspi@02010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02010000 0x4000>;
+ interrupts = <0 33 0x04>;
+ clocks = <&clks 114>, <&clks 114>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi4: ecspi@02014000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02014000 0x4000>;
+ interrupts = <0 34 0x04>;
+ clocks = <&clks 115>, <&clks 115>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart1: serial@02020000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02020000 0x4000>;
+ interrupts = <0 26 0x04>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ esai: esai@02024000 {
+ reg = <0x02024000 0x4000>;
+ interrupts = <0 51 0x04>;
+ };
+
+ ssi1: ssi@02028000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
+ reg = <0x02028000 0x4000>;
+ interrupts = <0 46 0x04>;
+ clocks = <&clks 178>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <38 37>;
+ status = "disabled";
+ };
+
+ ssi2: ssi@0202c000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
+ reg = <0x0202c000 0x4000>;
+ interrupts = <0 47 0x04>;
+ clocks = <&clks 179>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <42 41>;
+ status = "disabled";
+ };
+
+ ssi3: ssi@02030000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
+ reg = <0x02030000 0x4000>;
+ interrupts = <0 48 0x04>;
+ clocks = <&clks 180>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <46 45>;
+ status = "disabled";
+ };
+
+ asrc: asrc@02034000 {
+ reg = <0x02034000 0x4000>;
+ interrupts = <0 50 0x04>;
+ };
+
+ spba@0203c000 {
+ reg = <0x0203c000 0x4000>;
+ };
+ };
+
+ vpu: vpu@02040000 {
+ reg = <0x02040000 0x3c000>;
+ interrupts = <0 3 0x04 0 12 0x04>;
+ };
+
+ aipstz@0207c000 { /* AIPSTZ1 */
+ reg = <0x0207c000 0x4000>;
+ };
+
+ pwm1: pwm@02080000 {
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02080000 0x4000>;
+ interrupts = <0 83 0x04>;
+ clocks = <&clks 62>, <&clks 145>;
+ clock-names = "ipg", "per";
+ };
+
+ pwm2: pwm@02084000 {
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02084000 0x4000>;
+ interrupts = <0 84 0x04>;
+ clocks = <&clks 62>, <&clks 146>;
+ clock-names = "ipg", "per";
+ };
+
+ pwm3: pwm@02088000 {
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02088000 0x4000>;
+ interrupts = <0 85 0x04>;
+ clocks = <&clks 62>, <&clks 147>;
+ clock-names = "ipg", "per";
+ };
+
+ pwm4: pwm@0208c000 {
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x0208c000 0x4000>;
+ interrupts = <0 86 0x04>;
+ clocks = <&clks 62>, <&clks 148>;
+ clock-names = "ipg", "per";
+ };
+
+ can1: flexcan@02090000 {
+ reg = <0x02090000 0x4000>;
+ interrupts = <0 110 0x04>;
+ };
+
+ can2: flexcan@02094000 {
+ reg = <0x02094000 0x4000>;
+ interrupts = <0 111 0x04>;
+ };
+
+ gpt: gpt@02098000 {
+ compatible = "fsl,imx6q-gpt";
+ reg = <0x02098000 0x4000>;
+ interrupts = <0 55 0x04>;
+ };
+
+ gpio1: gpio@0209c000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x0209c000 0x4000>;
+ interrupts = <0 66 0x04 0 67 0x04>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@020a0000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a0000 0x4000>;
+ interrupts = <0 68 0x04 0 69 0x04>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@020a4000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a4000 0x4000>;
+ interrupts = <0 70 0x04 0 71 0x04>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@020a8000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a8000 0x4000>;
+ interrupts = <0 72 0x04 0 73 0x04>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio5: gpio@020ac000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020ac000 0x4000>;
+ interrupts = <0 74 0x04 0 75 0x04>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio6: gpio@020b0000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020b0000 0x4000>;
+ interrupts = <0 76 0x04 0 77 0x04>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio7: gpio@020b4000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020b4000 0x4000>;
+ interrupts = <0 78 0x04 0 79 0x04>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ kpp: kpp@020b8000 {
+ reg = <0x020b8000 0x4000>;
+ interrupts = <0 82 0x04>;
+ };
+
+ wdog1: wdog@020bc000 {
+ compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+ reg = <0x020bc000 0x4000>;
+ interrupts = <0 80 0x04>;
+ clocks = <&clks 0>;
+ };
+
+ wdog2: wdog@020c0000 {
+ compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+ reg = <0x020c0000 0x4000>;
+ interrupts = <0 81 0x04>;
+ clocks = <&clks 0>;
+ status = "disabled";
+ };
+
+ clks: ccm@020c4000 {
+ compatible = "fsl,imx6q-ccm";
+ reg = <0x020c4000 0x4000>;
+ interrupts = <0 87 0x04 0 88 0x04>;
+ #clock-cells = <1>;
+ };
+
+ anatop: anatop@020c8000 {
+ compatible = "fsl,imx6q-anatop", "syscon", "simple-bus";
+ reg = <0x020c8000 0x1000>;
+ interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
+
+ regulator-1p1@110 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd1p1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1375000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x110>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <4>;
+ anatop-min-voltage = <800000>;
+ anatop-max-voltage = <1375000>;
+ };
+
+ regulator-3p0@120 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd3p0";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3150000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x120>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2625000>;
+ anatop-max-voltage = <3400000>;
+ };
+
+ regulator-2p5@130 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd2p5";
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2750000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x130>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2000000>;
+ anatop-max-voltage = <2750000>;
+ };
+
+ reg_arm: regulator-vddcore@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "cpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <0>;
+ anatop-vol-bit-width = <5>;
+ anatop-delay-reg-offset = <0x170>;
+ anatop-delay-bit-shift = <24>;
+ anatop-delay-bit-width = <2>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
+
+ reg_pu: regulator-vddpu@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+ anatop-delay-reg-offset = <0x170>;
+ anatop-delay-bit-shift = <26>;
+ anatop-delay-bit-width = <2>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
+
+ reg_soc: regulator-vddsoc@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vddsoc";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <18>;
+ anatop-vol-bit-width = <5>;
+ anatop-delay-reg-offset = <0x170>;
+ anatop-delay-bit-shift = <28>;
+ anatop-delay-bit-width = <2>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
+ };
+
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+ interrupts = <0 44 0x04>;
+ clocks = <&clks 182>;
+ };
+
+ usbphy2: usbphy@020ca000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020ca000 0x1000>;
+ interrupts = <0 45 0x04>;
+ clocks = <&clks 183>;
+ };
+
+ snvs@020cc000 {
+ compatible = "fsl,sec-v4.0-mon", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x020cc000 0x4000>;
+
+ snvs-rtc-lp@34 {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ reg = <0x34 0x58>;
+ interrupts = <0 19 0x04 0 20 0x04>;
+ };
+ };
+
+ epit1: epit@020d0000 { /* EPIT1 */
+ reg = <0x020d0000 0x4000>;
+ interrupts = <0 56 0x04>;
+ };
+
+ epit2: epit@020d4000 { /* EPIT2 */
+ reg = <0x020d4000 0x4000>;
+ interrupts = <0 57 0x04>;
+ };
+
+ src: src@020d8000 {
+ compatible = "fsl,imx6q-src";
+ reg = <0x020d8000 0x4000>;
+ interrupts = <0 91 0x04 0 96 0x04>;
+ };
+
+ gpc: gpc@020dc000 {
+ compatible = "fsl,imx6q-gpc";
+ reg = <0x020dc000 0x4000>;
+ interrupts = <0 89 0x04 0 90 0x04>;
+ };
+
+ gpr: iomuxc-gpr@020e0000 {
+ compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
+ reg = <0x020e0000 0x38>;
+ };
+
+ dcic1: dcic@020e4000 {
+ reg = <0x020e4000 0x4000>;
+ interrupts = <0 124 0x04>;
+ };
+
+ dcic2: dcic@020e8000 {
+ reg = <0x020e8000 0x4000>;
+ interrupts = <0 125 0x04>;
+ };
+
+ sdma: sdma@020ec000 {
+ compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
+ reg = <0x020ec000 0x4000>;
+ interrupts = <0 2 0x04>;
+ clocks = <&clks 155>, <&clks 155>;
+ clock-names = "ipg", "ahb";
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6q.bin";
+ };
+ };
+
+ aips-bus@02100000 { /* AIPS2 */
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x02100000 0x100000>;
+ ranges;
+
+ caam@02100000 {
+ reg = <0x02100000 0x40000>;
+ interrupts = <0 105 0x04 0 106 0x04>;
+ };
+
+ aipstz@0217c000 { /* AIPSTZ2 */
+ reg = <0x0217c000 0x4000>;
+ };
+
+ usbotg: usb@02184000 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184000 0x200>;
+ interrupts = <0 43 0x04>;
+ clocks = <&clks 162>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc 0>;
+ status = "disabled";
+ };
+
+ usbh1: usb@02184200 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184200 0x200>;
+ interrupts = <0 40 0x04>;
+ clocks = <&clks 162>;
+ fsl,usbphy = <&usbphy2>;
+ fsl,usbmisc = <&usbmisc 1>;
+ status = "disabled";
+ };
+
+ usbh2: usb@02184400 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184400 0x200>;
+ interrupts = <0 41 0x04>;
+ clocks = <&clks 162>;
+ fsl,usbmisc = <&usbmisc 2>;
+ status = "disabled";
+ };
+
+ usbh3: usb@02184600 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184600 0x200>;
+ interrupts = <0 42 0x04>;
+ clocks = <&clks 162>;
+ fsl,usbmisc = <&usbmisc 3>;
+ status = "disabled";
+ };
+
+ usbmisc: usbmisc: usbmisc@02184800 {
+ #index-cells = <1>;
+ compatible = "fsl,imx6q-usbmisc";
+ reg = <0x02184800 0x200>;
+ clocks = <&clks 162>;
+ };
+
+ fec: ethernet@02188000 {
+ compatible = "fsl,imx6q-fec";
+ reg = <0x02188000 0x4000>;
+ interrupts = <0 118 0x04 0 119 0x04>;
+ clocks = <&clks 117>, <&clks 117>, <&clks 190>;
+ clock-names = "ipg", "ahb", "ptp";
+ status = "disabled";
+ };
+
+ mlb@0218c000 {
+ reg = <0x0218c000 0x4000>;
+ interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
+ };
+
+ usdhc1: usdhc@02190000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02190000 0x4000>;
+ interrupts = <0 22 0x04>;
+ clocks = <&clks 163>, <&clks 163>, <&clks 163>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc2: usdhc@02194000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02194000 0x4000>;
+ interrupts = <0 23 0x04>;
+ clocks = <&clks 164>, <&clks 164>, <&clks 164>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc3: usdhc@02198000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02198000 0x4000>;
+ interrupts = <0 24 0x04>;
+ clocks = <&clks 165>, <&clks 165>, <&clks 165>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc4: usdhc@0219c000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x0219c000 0x4000>;
+ interrupts = <0 25 0x04>;
+ clocks = <&clks 166>, <&clks 166>, <&clks 166>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@021a0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a0000 0x4000>;
+ interrupts = <0 36 0x04>;
+ clocks = <&clks 125>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@021a4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a4000 0x4000>;
+ interrupts = <0 37 0x04>;
+ clocks = <&clks 126>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@021a8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a8000 0x4000>;
+ interrupts = <0 38 0x04>;
+ clocks = <&clks 127>;
+ status = "disabled";
+ };
+
+ romcp@021ac000 {
+ reg = <0x021ac000 0x4000>;
+ };
+
+ mmdc0: mmdc@021b0000 { /* MMDC0 */
+ compatible = "fsl,imx6q-mmdc";
+ reg = <0x021b0000 0x4000>;
+ };
+
+ mmdc1: mmdc@021b4000 { /* MMDC1 */
+ reg = <0x021b4000 0x4000>;
+ };
+
+ weim@021b8000 {
+ reg = <0x021b8000 0x4000>;
+ interrupts = <0 14 0x04>;
+ };
+
+ ocotp@021bc000 {
+ compatible = "fsl,imx6q-ocotp";
+ reg = <0x021bc000 0x4000>;
+ };
+
+ ocotp@021c0000 {
+ reg = <0x021c0000 0x4000>;
+ interrupts = <0 21 0x04>;
+ };
+
+ tzasc@021d0000 { /* TZASC1 */
+ reg = <0x021d0000 0x4000>;
+ interrupts = <0 108 0x04>;
+ };
+
+ tzasc@021d4000 { /* TZASC2 */
+ reg = <0x021d4000 0x4000>;
+ interrupts = <0 109 0x04>;
+ };
+
+ audmux: audmux@021d8000 {
+ compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
+ reg = <0x021d8000 0x4000>;
+ status = "disabled";
+ };
+
+ mipi@021dc000 { /* MIPI-CSI */
+ reg = <0x021dc000 0x4000>;
+ };
+
+ mipi@021e0000 { /* MIPI-DSI */
+ reg = <0x021e0000 0x4000>;
+ };
+
+ vdoa@021e4000 {
+ reg = <0x021e4000 0x4000>;
+ interrupts = <0 18 0x04>;
+ };
+
+ uart2: serial@021e8000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021e8000 0x4000>;
+ interrupts = <0 27 0x04>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart3: serial@021ec000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021ec000 0x4000>;
+ interrupts = <0 28 0x04>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart4: serial@021f0000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021f0000 0x4000>;
+ interrupts = <0 29 0x04>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart5: serial@021f4000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021f4000 0x4000>;
+ interrupts = <0 30 0x04>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+ };
+
+ ipu1: ipu@02400000 {
+ #crtc-cells = <1>;
+ compatible = "fsl,imx6q-ipu";
+ reg = <0x02400000 0x400000>;
+ interrupts = <0 6 0x4 0 5 0x4>;
+ clocks = <&clks 130>, <&clks 131>, <&clks 132>;
+ clock-names = "bus", "di0", "di1";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-6282.dtsi b/arch/arm/boot/dts/kirkwood-6282.dtsi
index 4ccea2130a6c..192cf76fbf93 100644
--- a/arch/arm/boot/dts/kirkwood-6282.dtsi
+++ b/arch/arm/boot/dts/kirkwood-6282.dtsi
@@ -5,6 +5,12 @@
compatible = "marvell,88f6282-pinctrl";
reg = <0x10000 0x20>;
+ pmx_nand: pmx-nand {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3",
+ "mpp4", "mpp5", "mpp18", "mpp19";
+ marvell,function = "nand";
+ };
+
pmx_sata0: pmx-sata0 {
marvell,pins = "mpp5", "mpp21", "mpp23";
marvell,function = "sata0";
@@ -21,6 +27,12 @@
marvell,pins = "mpp8", "mpp9";
marvell,function = "twsi0";
};
+
+ pmx_twsi1: pmx-twsi1 {
+ marvell,pins = "mpp36", "mpp37";
+ marvell,function = "twsi1";
+ };
+
pmx_uart0: pmx-uart0 {
marvell,pins = "mpp10", "mpp11";
marvell,function = "uart0";
@@ -30,6 +42,11 @@
marvell,pins = "mpp13", "mpp14";
marvell,function = "uart1";
};
+ pmx_sdio: pmx-sdio {
+ marvell,pins = "mpp12", "mpp13", "mpp14",
+ "mpp15", "mpp16", "mpp17";
+ marvell,function = "sdio";
+ };
};
i2c@11100 {
diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts
index f2d386c95b07..ef2d8c705709 100644
--- a/arch/arm/boot/dts/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts
@@ -74,6 +74,13 @@
status = "okay";
nr-ports = <1>;
};
+
+ mvsdio@90000 {
+ pinctrl-0 = <&pmx_sdio>;
+ pinctrl-names = "default";
+ status = "okay";
+ /* No CD or WP GPIOs */
+ };
};
gpio-leds {
diff --git a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
new file mode 100644
index 000000000000..9555a86297c2
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
@@ -0,0 +1,94 @@
+/dts-v1/;
+
+/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
+
+/ {
+ model = "Globalscale Technologies Guruplug Server Plus";
+ compatible = "globalscale,guruplug-server-plus", "globalscale,guruplug", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 earlyprintk";
+ };
+
+ ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_led_health_r &pmx_led_health_g
+ &pmx_led_wmode_r &pmx_led_wmode_g >;
+ pinctrl-names = "default";
+
+ pmx_led_health_r: pmx-led-health-r {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+ pmx_led_health_g: pmx-led-health-g {
+ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+ pmx_led_wmode_r: pmx-led-wmode-r {
+ marvell,pins = "mpp48";
+ marvell,function = "gpio";
+ };
+ pmx_led_wmode_g: pmx-led-wmode-g {
+ marvell,pins = "mpp49";
+ marvell,function = "gpio";
+ };
+ };
+ serial@12000 {
+ clock-frequency = <200000000>;
+ status = "ok";
+ };
+
+ nand@3000000 {
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x00100000 0x00400000>;
+ };
+
+ partition@500000 {
+ label = "data";
+ reg = <0x00500000 0x1fb00000>;
+ };
+ };
+
+ sata@80000 {
+ status = "okay";
+ nr-ports = <1>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ health-r {
+ label = "guruplug:red:health";
+ gpios = <&gpio1 14 1>;
+ };
+ health-g {
+ label = "guruplug:green:health";
+ gpios = <&gpio1 15 1>;
+ };
+ wmode-r {
+ label = "guruplug:red:wmode";
+ gpios = <&gpio1 16 1>;
+ };
+ wmode-g {
+ label = "guruplug:green:wmode";
+ gpios = <&gpio1 17 1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index 262c65403760..662dfd81b1ce 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -20,12 +20,11 @@
pinctrl: pinctrl@10000 {
pinctrl-0 = < &pmx_nand &pmx_uart0
- &pmx_led_health &pmx_sdio
+ &pmx_led_health
&pmx_sata0 &pmx_sata1
&pmx_led_user1o
&pmx_led_user1g &pmx_led_user0o
&pmx_led_user0g &pmx_led_misc
- &pmx_sdio_cd
>;
pinctrl-names = "default";
@@ -133,6 +132,14 @@
status = "okay";
};
+
+ mvsdio@90000 {
+ pinctrl-0 = <&pmx_sdio &pmx_sdio_cd>;
+ pinctrl-names = "default";
+ status = "okay";
+ cd-gpios = <&gpio1 15 0>;
+ /* No WP GPIO */
+ };
};
gpio-leds {
diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
index 77d21abfcdf7..e8e7ecef1650 100644
--- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
@@ -76,4 +76,10 @@
gpios = <&gpio0 12 0>;
};
};
+
+ gpio_poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio0 31 0>;
+ };
+
};
diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts
index 5509f9659546..3a178cf708d7 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa310.dts
@@ -16,6 +16,105 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+ pinctrl-0 = < &pmx_led_esata_green
+ &pmx_led_esata_red
+ &pmx_led_usb_green
+ &pmx_led_usb_red
+ &pmx_usb_power_off
+ &pmx_led_sys_green
+ &pmx_led_sys_red
+ &pmx_btn_reset
+ &pmx_btn_copy
+ &pmx_led_copy_green
+ &pmx_led_copy_red
+ &pmx_led_hdd_green
+ &pmx_led_hdd_red
+ &pmx_unknown
+ &pmx_btn_power
+ &pmx_pwr_off >;
+ pinctrl-names = "default";
+
+ pmx_led_esata_green: pmx-led-esata-green {
+ marvell,pins = "mpp12";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_esata_red: pmx-led-esata-red {
+ marvell,pins = "mpp13";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_usb_green: pmx-led-usb-green {
+ marvell,pins = "mpp15";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_usb_red: pmx-led-usb-red {
+ marvell,pins = "mpp16";
+ marvell,function = "gpio";
+ };
+
+ pmx_usb_power_off: pmx-usb-power-off {
+ marvell,pins = "mpp21";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_sys_green: pmx-led-sys-green {
+ marvell,pins = "mpp28";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_sys_red: pmx-led-sys-red {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+
+ pmx_btn_reset: pmx-btn-reset {
+ marvell,pins = "mpp36";
+ marvell,function = "gpio";
+ };
+
+ pmx_btn_copy: pmx-btn-copy {
+ marvell,pins = "mpp37";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_copy_green: pmx-led-copy-green {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_copy_red: pmx-led-copy-red {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_hdd_green: pmx-led-hdd-green {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_hdd_red: pmx-led-hdd-red {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+
+ pmx_unknown: pmx-unknown {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+
+ pmx_btn_power: pmx-btn-power {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+
+ pmx_pwr_off: pmx-pwr-off {
+ marvell,pins = "mpp48";
+ marvell,function = "gpio";
+ };
+ };
serial@12000 {
clock-frequency = <200000000>;
@@ -29,6 +128,11 @@
i2c@11000 {
status = "okay";
+
+ adt7476: adt7476a@2e {
+ compatible = "adt7476";
+ reg = <0x2e>;
+ };
};
nand@3000000 {
@@ -141,4 +245,26 @@
gpios = <&gpio1 8 0>;
};
};
+
+ gpio_poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio1 16 0>;
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb0_power_off: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "USB Power Off";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 21 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index 49d3d74d4d38..ede7fe0d7a87 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -75,6 +75,122 @@
reg = <0x30>;
};
};
+
+ pinctrl: pinctrl@10000 {
+ pinctrl-0 = < &pmx_nand &pmx_uart0
+ &pmx_uart1 &pmx_twsi1
+ &pmx_dip_sw0 &pmx_dip_sw1
+ &pmx_dip_sw2 &pmx_dip_sw3
+ &pmx_gpio_0 &pmx_gpio_1
+ &pmx_gpio_2 &pmx_gpio_3
+ &pmx_gpio_4 &pmx_gpio_5
+ &pmx_gpio_6 &pmx_gpio_7
+ &pmx_led_red &pmx_led_green
+ &pmx_led_yellow >;
+ pinctrl-names = "default";
+
+ pmx_uart0: pmx-uart0 {
+ marvell,pins = "mpp10", "mpp11", "mpp15",
+ "mpp16";
+ marvell,function = "uart0";
+ };
+
+ pmx_uart1: pmx-uart1 {
+ marvell,pins = "mpp13", "mpp14", "mpp8",
+ "mpp9";
+ marvell,function = "uart1";
+ };
+
+ pmx_sysrst: pmx-sysrst {
+ marvell,pins = "mpp6";
+ marvell,function = "sysrst";
+ };
+
+ pmx_dip_sw0: pmx-dip-sw0 {
+ marvell,pins = "mpp20";
+ marvell,function = "gpio";
+ };
+
+ pmx_dip_sw1: pmx-dip-sw1 {
+ marvell,pins = "mpp21";
+ marvell,function = "gpio";
+ };
+
+ pmx_dip_sw2: pmx-dip-sw2 {
+ marvell,pins = "mpp22";
+ marvell,function = "gpio";
+ };
+
+ pmx_dip_sw3: pmx-dip-sw3 {
+ marvell,pins = "mpp23";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_0: pmx-gpio-0 {
+ marvell,pins = "mpp24";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_1: pmx-gpio-1 {
+ marvell,pins = "mpp25";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_2: pmx-gpio-2 {
+ marvell,pins = "mpp26";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_3: pmx-gpio-3 {
+ marvell,pins = "mpp27";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_4: pmx-gpio-4 {
+ marvell,pins = "mpp28";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_5: pmx-gpio-5 {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_6: pmx-gpio-6 {
+ marvell,pins = "mpp30";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_7: pmx-gpio-7 {
+ marvell,pins = "mpp31";
+ marvell,function = "gpio";
+ };
+
+ pmx_gpio_init: pmx-init {
+ marvell,pins = "mpp38";
+ marvell,function = "gpio";
+ };
+
+ pmx_usb_oc: pmx-usb-oc {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_red: pmx-led-red {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_green: pmx-led-green {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_yellow: pmx-led-yellow {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+ };
};
gpio-leds {
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index cd15452a52a6..842ff95d60df 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6282.dtsi"
/ {
model = "Univeral Scientific Industrial Co. Topkick-1281P2";
@@ -16,6 +17,96 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+ /*
+ * GPIO LED layout
+ *
+ * /-SYS_LED(2)
+ * |
+ * | /-DISK_LED
+ * | |
+ * | | /-WLAN_LED(2)
+ * | | |
+ * [SW] [*] [*] [*]
+ */
+
+ /*
+ * Switch positions
+ *
+ * /-SW_LEFT(2)
+ * |
+ * | /-SW_IDLE
+ * | |
+ * | | /-SW_RIGHT
+ * | | |
+ * PS [L] [I] [R] LEDS
+ */
+ pinctrl-0 = < &pmx_led_disk_yellow
+ &pmx_sata0_pwr_enable
+ &pmx_led_sys_red
+ &pmx_led_sys_blue
+ &pmx_led_wifi_green
+ &pmx_sw_left
+ &pmx_sw_right
+ &pmx_sw_idle
+ &pmx_sw_left2
+ &pmx_led_wifi_yellow
+ &pmx_uart0
+ &pmx_nand
+ &pmx_twsi0 >;
+ pinctrl-names = "default";
+
+ pmx_led_disk_yellow: pmx-led-disk-yellow {
+ marvell,pins = "mpp21";
+ marvell,function = "gpio";
+ };
+
+ pmx_sata0_pwr_enable: pmx-sata0-pwr-enable {
+ marvell,pins = "mpp36";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_sys_red: pmx-led-sys-red {
+ marvell,pins = "mpp37";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_sys_blue: pmx-led-sys-blue {
+ marvell,pins = "mpp38";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_wifi_green: pmx-led-wifi-green {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+
+ pmx_sw_left: pmx-sw-left {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+
+ pmx_sw_right: pmx-sw-right {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+
+ pmx_sw_idle: pmx-sw-idle {
+ marvell,pins = "mpp45";
+ marvell,function = "gpio";
+ };
+
+ pmx_sw_left2: pmx-sw-left2 {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_wifi_yellow: pmx-led-wifi-yellow {
+ marvell,pins = "mpp48";
+ marvell,function = "gpio";
+ };
+ };
+
serial@12000 {
clock-frequency = <200000000>;
status = "ok";
@@ -54,6 +145,17 @@
status = "okay";
nr-ports = <1>;
};
+
+ i2c@11000 {
+ status = "ok";
+ };
+
+ mvsdio@90000 {
+ pinctrl-0 = <&pmx_sdio>;
+ pinctrl-names = "default";
+ status = "okay";
+ /* No CD or WP GPIOs */
+ };
};
gpio-leds {
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index d6ab442b7011..2c738d9dc82a 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -193,5 +193,13 @@
clocks = <&gate_clk 17>;
status = "okay";
};
+
+ mvsdio@90000 {
+ compatible = "marvell,orion-sdio";
+ reg = <0x90000 0x200>;
+ interrupts = <28>;
+ clocks = <&gate_clk 4>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
index c9b4f27d191e..7f70a39459f6 100644
--- a/arch/arm/boot/dts/mmp2-brownstone.dts
+++ b/arch/arm/boot/dts/mmp2-brownstone.dts
@@ -29,6 +29,164 @@
};
twsi1: i2c@d4011000 {
status = "okay";
+ pmic: max8925@3c {
+ compatible = "maxium,max8925";
+ reg = <0x3c>;
+ interrupts = <1>;
+ interrupt-parent = <&intcmux4>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ maxim,tsc-irq = <0>;
+
+ regulators {
+ SDV1 {
+ regulator-min-microvolt = <637500>;
+ regulator-max-microvolt = <1425000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ SDV2 {
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <2225000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ SDV3 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO1 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO2 {
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <2250000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO3 {
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <2250000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO4 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO5 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO6 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO7 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO8 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO9 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO10 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ };
+ LDO11 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO12 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO13 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO14 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO15 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO16 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO17 {
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <2250000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO18 {
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <2250000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO19 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ LDO20 {
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <3900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ backlight {
+ maxim,max8925-dual-string = <0>;
+ };
+ charger {
+ batt-detect = <0>;
+ topoff-threshold = <1>;
+ fast-charge = <7>;
+ no-temp-support = <0>;
+ no-insert-detect = <0>;
+ };
+ };
};
rtc: rtc@d4010000 {
status = "okay";
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 0514fb41627e..1429ac05b36d 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -46,7 +46,7 @@
mrvl,intc-nr-irqs = <64>;
};
- intcmux4@d4282150 {
+ intcmux4: interrupt-controller@d4282150 {
compatible = "mrvl,mmp2-mux-intc";
interrupts = <4>;
interrupt-controller;
@@ -201,6 +201,8 @@
compatible = "mrvl,mmp-twsi";
reg = <0xd4011000 0x1000>;
interrupts = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
mrvl,i2c-fast-mode;
status = "disabled";
};
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index 0b7ee92c5713..3fe8dae8d32d 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -1,26 +1,24 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_DOVE=y
CONFIG_MACH_DOVE_DB=y
CONFIG_MACH_CM_A510=y
CONFIG_MACH_DOVE_DT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_HIGHMEM=y
-CONFIG_USE_OF=y
-CONFIG_ATAGS=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
-CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y
CONFIG_VFP=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -32,8 +30,9 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -57,7 +56,6 @@ CONFIG_ATA=y
CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_MV643XX_ETH=y
-# CONFIG_NETDEV_10000 is not set
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
@@ -68,10 +66,7 @@ CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
@@ -81,13 +76,11 @@ CONFIG_SPI=y
CONFIG_SPI_ORION=y
# CONFIG_HWMON is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_IO_ACCESSORS=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_DOVE=y
CONFIG_NEW_LEDS=y
@@ -104,6 +97,7 @@ CONFIG_MV_XOR=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
@@ -112,24 +106,20 @@ CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=y
CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
@@ -138,7 +128,6 @@ CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_BLOWFISH=y
CONFIG_CRYPTO_TEA=y
CONFIG_CRYPTO_TWOFISH=y
@@ -147,5 +136,4 @@ CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_MV_CESA=y
CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/mvebu_defconfig b/arch/arm/configs/mvebu_defconfig
index cbd91bce1ca9..2ec8119cff73 100644
--- a/arch/arm/configs/mvebu_defconfig
+++ b/arch/arm/configs/mvebu_defconfig
@@ -14,16 +14,20 @@ CONFIG_MACH_ARMADA_XP=y
# CONFIG_CACHE_L2X0 is not set
# CONFIG_SWP_EMULATE is not set
CONFIG_SMP=y
-# CONFIG_LOCAL_TIMERS is not set
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
# CONFIG_COMPACTION is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_VFP=y
CONFIG_NET=y
CONFIG_INET=y
+CONFIG_BT=y
+CONFIG_BT_MRVL=y
+CONFIG_BT_MRVL_SDIO=y
+CONFIG_CFG80211=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
@@ -31,16 +35,34 @@ CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_MVNETA=y
CONFIG_MARVELL_PHY=y
+CONFIG_MWIFIEX=y
+CONFIG_MWIFIEX_SDIO=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_I2C=y
+CONFIG_SPI=y
+CONFIG_SPI_ORION=y
CONFIG_I2C_MV64XXX=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_M25P80=y
CONFIG_SERIAL_8250_DW=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_MMC=y
+CONFIG_MMC_MVSDIO=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_S35390A=y
+CONFIG_RTC_DRV_MV=y
CONFIG_DMADEVICES=y
CONFIG_MV_XOR=y
# CONFIG_IOMMU_SUPPORT is not set
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index b69c0d3285f8..dc662fca9230 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -27,4 +27,10 @@ struct pdev_archdata {
#endif
};
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 799b09409fad..a8c56acc8c98 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -7,6 +7,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/kmemcheck.h>
+#include <linux/kref.h>
struct dma_iommu_mapping {
/* iommu specific data */
@@ -29,6 +30,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index dfe98866a992..d1736a53b12d 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -26,7 +26,7 @@
#include <asm/kvm_arch_timer.h>
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#define KVM_MEMORY_SLOTS 32
+#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 94b4e9020b02..5c27696de14f 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -15,4 +15,26 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
+/*
+ * We cannot use xchg because it does not support 8-byte
+ * values. However it is safe to use {ldr,dtd}exd directly because all
+ * platforms which Xen can run on support those instructions.
+ */
+static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val)
+{
+ xen_ulong_t oldval;
+ unsigned int tmp;
+
+ wmb();
+ asm volatile("@ xchg_xen_ulong\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " strexd %1, %2, %H2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (oldval), "=&r" (tmp)
+ : "r" (val), "r" (ptr)
+ : "memory", "cc");
+ return oldval;
+}
+
#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index c6b9096cef95..30cdacb675af 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,6 +1,7 @@
#ifndef _ASM_ARM_XEN_PAGE_H
#define _ASM_ARM_XEN_PAGE_H
+#include <asm/mach/map.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -86,4 +87,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
return __set_phys_to_machine(pfn, mfn);
}
+
+#define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY);
+
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 4dd41fc9e235..170e9f34003f 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
@@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
kretprobe_assert(ri, orig_ret_address, trampoline_address);
kretprobe_hash_unlock(current, &flags);
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index b0179b89a04c..1c089119b2d7 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -296,7 +296,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
bust_spinlocks(0);
die_owner = -1;
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9ada5549216d..5a936988eb24 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -232,7 +232,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
return 0;
}
@@ -240,7 +240,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc)
+ bool user_alloc)
{
}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index f30e13163a96..99e07c7dd745 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -633,11 +633,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
memslot = gfn_to_memslot(vcpu->kvm, gfn);
- if (!memslot->user_alloc) {
- kvm_err("non user-alloc memslots not supported\n");
- ret = -EINVAL;
- goto out_unlock;
- }
ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
if (ret == 0)
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 1f50a653eb8c..c2dfe06563df 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1253,11 +1253,24 @@ static struct vpif_capture_config da850_vpif_capture_config = {
};
/* VPIF display configuration */
+
+static struct adv7343_platform_data adv7343_pdata = {
+ .mode_config = {
+ .dac_3 = 1,
+ .dac_2 = 1,
+ .dac_1 = 1,
+ },
+ .sd_config = {
+ .sd_dac_out1 = 1,
+ },
+};
+
static struct vpif_subdev_info da850_vpif_subdev[] = {
{
.name = "adv7343",
.board_info = {
I2C_BOARD_INFO("adv7343", 0x2a),
+ .platform_data = &adv7343_pdata,
},
},
};
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index e4a16f98e6a2..71735e7797cc 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -690,7 +690,7 @@ static struct vpbe_output dm644xevm_vpbe_outputs[] = {
.std = VENC_STD_ALL,
.capabilities = V4L2_OUT_CAP_STD,
},
- .subdev_name = VPBE_VENC_SUBDEV_NAME,
+ .subdev_name = DM644X_VPBE_VENC_SUBDEV_NAME,
.default_mode = "ntsc",
.num_modes = ARRAY_SIZE(dm644xevm_enc_std_timing),
.modes = dm644xevm_enc_std_timing,
@@ -702,7 +702,7 @@ static struct vpbe_output dm644xevm_vpbe_outputs[] = {
.type = V4L2_OUTPUT_TYPE_ANALOG,
.capabilities = V4L2_OUT_CAP_DV_TIMINGS,
},
- .subdev_name = VPBE_VENC_SUBDEV_NAME,
+ .subdev_name = DM644X_VPBE_VENC_SUBDEV_NAME,
.default_mode = "480p59_94",
.num_modes = ARRAY_SIZE(dm644xevm_enc_preset_timing),
.modes = dm644xevm_enc_preset_timing,
@@ -713,10 +713,10 @@ static struct vpbe_config dm644xevm_display_cfg = {
.module_name = "dm644x-vpbe-display",
.i2c_adapter_id = 1,
.osd = {
- .module_name = VPBE_OSD_SUBDEV_NAME,
+ .module_name = DM644X_VPBE_OSD_SUBDEV_NAME,
},
.venc = {
- .module_name = VPBE_VENC_SUBDEV_NAME,
+ .module_name = DM644X_VPBE_VENC_SUBDEV_NAME,
},
.num_outputs = ARRAY_SIZE(dm644xevm_vpbe_outputs),
.outputs = dm644xevm_vpbe_outputs,
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 11c79a3362ef..db1dd92e00af 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -669,19 +669,14 @@ static struct resource dm644x_osd_resources[] = {
},
};
-static struct osd_platform_data dm644x_osd_data = {
- .vpbe_type = VPBE_VERSION_1,
-};
-
static struct platform_device dm644x_osd_dev = {
- .name = VPBE_OSD_SUBDEV_NAME,
+ .name = DM644X_VPBE_OSD_SUBDEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_osd_resources),
.resource = dm644x_osd_resources,
.dev = {
.dma_mask = &dm644x_video_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &dm644x_osd_data,
},
};
@@ -751,12 +746,11 @@ static struct platform_device dm644x_vpbe_display = {
};
static struct venc_platform_data dm644x_venc_pdata = {
- .venc_type = VPBE_VERSION_1,
.setup_clock = dm644x_venc_setup_clock,
};
static struct platform_device dm644x_venc_dev = {
- .name = VPBE_VENC_SUBDEV_NAME,
+ .name = DM644X_VPBE_VENC_SUBDEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_venc_resources),
.resource = dm644x_venc_resources,
diff --git a/arch/arm/mach-dove/Kconfig b/arch/arm/mach-dove/Kconfig
index 603c5fd99e8a..36469d813951 100644
--- a/arch/arm/mach-dove/Kconfig
+++ b/arch/arm/mach-dove/Kconfig
@@ -2,8 +2,12 @@ if ARCH_DOVE
menu "Marvell Dove Implementations"
+config DOVE_LEGACY
+ bool
+
config MACH_DOVE_DB
bool "Marvell DB-MV88AP510 Development Board"
+ select DOVE_LEGACY
select I2C_BOARDINFO
help
Say 'Y' here if you want your kernel to support the
@@ -11,6 +15,7 @@ config MACH_DOVE_DB
config MACH_CM_A510
bool "CompuLab CM-A510 Board"
+ select DOVE_LEGACY
help
Say 'Y' here if you want your kernel to support the
CompuLab CM-A510 Board.
@@ -19,6 +24,8 @@ config MACH_DOVE_DT
bool "Marvell Dove Flattened Device Tree"
select MVEBU_CLK_CORE
select MVEBU_CLK_GATING
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
select USE_OF
help
Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-dove/Makefile b/arch/arm/mach-dove/Makefile
index 5e683baf96cf..3f0a858fb597 100644
--- a/arch/arm/mach-dove/Makefile
+++ b/arch/arm/mach-dove/Makefile
@@ -1,4 +1,6 @@
-obj-y += common.o addr-map.o irq.o mpp.o
+obj-y += common.o addr-map.o irq.o
+obj-$(CONFIG_DOVE_LEGACY) += mpp.o
obj-$(CONFIG_PCI) += pcie.o
obj-$(CONFIG_MACH_DOVE_DB) += dove-db-setup.o
+obj-$(CONFIG_MACH_DOVE_DT) += board-dt.o
obj-$(CONFIG_MACH_CM_A510) += cm-a510.o
diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c
new file mode 100644
index 000000000000..fbde1dd67113
--- /dev/null
+++ b/arch/arm/mach-dove/board-dt.c
@@ -0,0 +1,92 @@
+/*
+ * arch/arm/mach-dove/board-dt.c
+ *
+ * Marvell Dove 88AP510 System On Chip FDT Board
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/usb-ehci-orion.h>
+#include <asm/hardware/cache-tauros2.h>
+#include <asm/mach/arch.h>
+#include <mach/pm.h>
+#include <plat/common.h>
+#include <plat/irq.h>
+#include "common.h"
+
+/*
+ * There are still devices that doesn't even know about DT,
+ * get clock gates here and add a clock lookup.
+ */
+static void __init dove_legacy_clk_init(void)
+{
+ struct device_node *np = of_find_compatible_node(NULL, NULL,
+ "marvell,dove-gating-clock");
+ struct of_phandle_args clkspec;
+
+ clkspec.np = np;
+ clkspec.args_count = 1;
+
+ clkspec.args[0] = CLOCK_GATING_BIT_GBE;
+ orion_clkdev_add(NULL, "mv643xx_eth_port.0",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CLOCK_GATING_BIT_PCIE0;
+ orion_clkdev_add("0", "pcie",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CLOCK_GATING_BIT_PCIE1;
+ orion_clkdev_add("1", "pcie",
+ of_clk_get_from_provider(&clkspec));
+}
+
+static void __init dove_of_clk_init(void)
+{
+ mvebu_clocks_init();
+ dove_legacy_clk_init();
+}
+
+static struct mv643xx_eth_platform_data dove_dt_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT,
+};
+
+static void __init dove_dt_init(void)
+{
+ pr_info("Dove 88AP510 SoC\n");
+
+#ifdef CONFIG_CACHE_TAUROS2
+ tauros2_init(0);
+#endif
+ dove_setup_cpu_mbus();
+
+ /* Setup root of clk tree */
+ dove_of_clk_init();
+
+ /* Internal devices not ported to DT yet */
+ dove_ge00_init(&dove_dt_ge00_data);
+ dove_pcie_init(1, 1);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char * const dove_dt_board_compat[] = {
+ "marvell,dove",
+ NULL
+};
+
+DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)")
+ .map_io = dove_map_io,
+ .init_early = dove_init_early,
+ .init_irq = orion_dt_init_irq,
+ .init_time = dove_timer_init,
+ .init_machine = dove_dt_init,
+ .restart = dove_restart,
+ .dt_compat = dove_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index ea84c535a110..c6b3b2bb50e7 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -360,88 +360,3 @@ void dove_restart(char mode, const char *cmd)
while (1)
;
}
-
-#if defined(CONFIG_MACH_DOVE_DT)
-/*
- * There are still devices that doesn't even know about DT,
- * get clock gates here and add a clock lookup.
- */
-static void __init dove_legacy_clk_init(void)
-{
- struct device_node *np = of_find_compatible_node(NULL, NULL,
- "marvell,dove-gating-clock");
- struct of_phandle_args clkspec;
-
- clkspec.np = np;
- clkspec.args_count = 1;
-
- clkspec.args[0] = CLOCK_GATING_BIT_USB0;
- orion_clkdev_add(NULL, "orion-ehci.0",
- of_clk_get_from_provider(&clkspec));
-
- clkspec.args[0] = CLOCK_GATING_BIT_USB1;
- orion_clkdev_add(NULL, "orion-ehci.1",
- of_clk_get_from_provider(&clkspec));
-
- clkspec.args[0] = CLOCK_GATING_BIT_GBE;
- orion_clkdev_add(NULL, "mv643xx_eth_port.0",
- of_clk_get_from_provider(&clkspec));
-
- clkspec.args[0] = CLOCK_GATING_BIT_PCIE0;
- orion_clkdev_add("0", "pcie",
- of_clk_get_from_provider(&clkspec));
-
- clkspec.args[0] = CLOCK_GATING_BIT_PCIE1;
- orion_clkdev_add("1", "pcie",
- of_clk_get_from_provider(&clkspec));
-}
-
-static void __init dove_of_clk_init(void)
-{
- mvebu_clocks_init();
- dove_legacy_clk_init();
-}
-
-static struct mv643xx_eth_platform_data dove_dt_ge00_data = {
- .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT,
-};
-
-static void __init dove_dt_init(void)
-{
- pr_info("Dove 88AP510 SoC, TCLK = %d MHz.\n",
- (dove_tclk + 499999) / 1000000);
-
-#ifdef CONFIG_CACHE_TAUROS2
- tauros2_init(0);
-#endif
- dove_setup_cpu_mbus();
-
- /* Setup root of clk tree */
- dove_of_clk_init();
-
- /* Internal devices not ported to DT yet */
- dove_rtc_init();
-
- dove_ge00_init(&dove_dt_ge00_data);
- dove_ehci0_init();
- dove_ehci1_init();
- dove_pcie_init(1, 1);
-
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
-static const char * const dove_dt_board_compat[] = {
- "marvell,dove",
- NULL
-};
-
-DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)")
- .map_io = dove_map_io,
- .init_early = dove_init_early,
- .init_irq = orion_dt_init_irq,
- .init_time = dove_timer_init,
- .init_machine = dove_dt_init,
- .restart = dove_restart,
- .dt_compat = dove_dt_board_compat,
-MACHINE_END
-#endif
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 85afb031b676..70f94c87479d 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -105,11 +105,6 @@ config EXYNOS4_SETUP_FIMD0
help
Common setup code for FIMD0.
-config EXYNOS_DEV_SYSMMU
- bool
- help
- Common setup code for SYSTEM MMU in EXYNOS platforms
-
config EXYNOS4_DEV_USB_OHCI
bool
help
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index b189881657ec..435757e57bb4 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
obj-$(CONFIG_EXYNOS_DEV_DMA) += dma.o
obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o
-obj-$(CONFIG_EXYNOS_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_ARCH_EXYNOS) += setup-i2c0.o
obj-$(CONFIG_EXYNOS4_SETUP_FIMC) += setup-fimc.o
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index bbcb3dea0d40..8a8468d83c8c 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -24,7 +24,6 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
@@ -709,53 +708,53 @@ static struct clk exynos4_init_clocks_off[] = {
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 14),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.0",
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 1),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.1",
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 2),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.2",
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 4),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.3",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 11),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.4",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 4),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(fimc0, 5),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.5",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 7),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(fimc1, 6),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.6",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 8),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(fimc2, 7),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.7",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 9),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(fimc3, 8),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.8",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 10),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(fimd0, 10),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.10",
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 4),
}
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index fed4c26e9dad..19af9f783c56 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -26,7 +26,6 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
@@ -129,13 +128,13 @@ static struct clk init_clocks_off[] = {
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 0),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.9",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 3),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(fimd1, 11),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.11",
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 4),
}, {
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 8fba0b5fb8ab..529476f8ec71 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -26,7 +26,6 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
@@ -111,21 +110,31 @@ static struct clksrc_clk clksrcs[] = {
static struct clk init_clocks_off[] = {
{
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.9",
.enable = exynos4_clk_ip_dmc_ctrl,
.ctrlbit = (1 << 24),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.12",
.enable = exynos4212_clk_ip_isp0_ctrl,
.ctrlbit = (7 << 8),
}, {
- .name = SYSMMU_CLOCK_NAME2,
- .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.13",
.enable = exynos4212_clk_ip_isp1_ctrl,
.ctrlbit = (1 << 4),
}, {
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.14",
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (1 << 11),
+ }, {
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.15",
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
.name = "flite",
.devname = "exynos-fimc-lite.0",
.enable = exynos4212_clk_ip_isp0_ctrl,
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index e9d7b80bae49..b0ea31fc9fb8 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -24,7 +24,6 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/sysmmu.h>
#include "common.h"
@@ -859,73 +858,78 @@ static struct clk exynos5_init_clocks_off[] = {
.enable = exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 3),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.1",
.enable = &exynos5_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 1),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.0",
.enable = &exynos5_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 2),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.2",
.enable = &exynos5_clk_ip_disp1_ctrl,
.ctrlbit = (1 << 9)
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.3",
.enable = &exynos5_clk_ip_gen_ctrl,
.ctrlbit = (1 << 7),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.4",
.enable = &exynos5_clk_ip_gen_ctrl,
.ctrlbit = (1 << 6)
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(gsc0, 5),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.5",
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 7),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(gsc1, 6),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.6",
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 8),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(gsc2, 7),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.7",
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 9),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(gsc3, 8),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.8",
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 10),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.9",
.enable = &exynos5_clk_ip_isp0_ctrl,
.ctrlbit = (0x3F << 8),
}, {
- .name = SYSMMU_CLOCK_NAME2,
- .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.10",
.enable = &exynos5_clk_ip_isp1_ctrl,
.ctrlbit = (0xF << 4),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(camif0, 12),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.11",
+ .enable = &exynos5_clk_ip_disp1_ctrl,
+ .ctrlbit = (1 << 8)
+ }, {
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.12",
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 11),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(camif1, 13),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.13",
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 12),
}, {
- .name = SYSMMU_CLOCK_NAME,
- .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .name = "sysmmu",
+ .devname = "exynos-sysmmu.14",
.enable = &exynos5_clk_ip_acp_ctrl,
.ctrlbit = (1 << 7)
}
diff --git a/arch/arm/mach-exynos/dev-sysmmu.c b/arch/arm/mach-exynos/dev-sysmmu.c
deleted file mode 100644
index c5b1ea301df0..000000000000
--- a/arch/arm/mach-exynos/dev-sysmmu.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/* linux/arch/arm/mach-exynos/dev-sysmmu.c
- *
- * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - System MMU support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-
-#include <plat/cpu.h>
-
-#include <mach/map.h>
-#include <mach/irqs.h>
-#include <mach/sysmmu.h>
-
-static u64 exynos_sysmmu_dma_mask = DMA_BIT_MASK(32);
-
-#define SYSMMU_PLATFORM_DEVICE(ipname, devid) \
-static struct sysmmu_platform_data platdata_##ipname = { \
- .dbgname = #ipname, \
-}; \
-struct platform_device SYSMMU_PLATDEV(ipname) = \
-{ \
- .name = SYSMMU_DEVNAME_BASE, \
- .id = devid, \
- .dev = { \
- .dma_mask = &exynos_sysmmu_dma_mask, \
- .coherent_dma_mask = DMA_BIT_MASK(32), \
- .platform_data = &platdata_##ipname, \
- }, \
-}
-
-SYSMMU_PLATFORM_DEVICE(mfc_l, 0);
-SYSMMU_PLATFORM_DEVICE(mfc_r, 1);
-SYSMMU_PLATFORM_DEVICE(tv, 2);
-SYSMMU_PLATFORM_DEVICE(jpeg, 3);
-SYSMMU_PLATFORM_DEVICE(rot, 4);
-SYSMMU_PLATFORM_DEVICE(fimc0, 5); /* fimc* and gsc* exist exclusively */
-SYSMMU_PLATFORM_DEVICE(fimc1, 6);
-SYSMMU_PLATFORM_DEVICE(fimc2, 7);
-SYSMMU_PLATFORM_DEVICE(fimc3, 8);
-SYSMMU_PLATFORM_DEVICE(gsc0, 5);
-SYSMMU_PLATFORM_DEVICE(gsc1, 6);
-SYSMMU_PLATFORM_DEVICE(gsc2, 7);
-SYSMMU_PLATFORM_DEVICE(gsc3, 8);
-SYSMMU_PLATFORM_DEVICE(isp, 9);
-SYSMMU_PLATFORM_DEVICE(fimd0, 10);
-SYSMMU_PLATFORM_DEVICE(fimd1, 11);
-SYSMMU_PLATFORM_DEVICE(camif0, 12);
-SYSMMU_PLATFORM_DEVICE(camif1, 13);
-SYSMMU_PLATFORM_DEVICE(2d, 14);
-
-#define SYSMMU_RESOURCE_NAME(core, ipname) sysmmures_##core##_##ipname
-
-#define SYSMMU_RESOURCE(core, ipname) \
- static struct resource SYSMMU_RESOURCE_NAME(core, ipname)[] __initdata =
-
-#define DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
- DEFINE_RES_MEM_NAMED(core##_PA_SYSMMU_##mem, SZ_4K, #mem), \
- DEFINE_RES_IRQ_NAMED(core##_IRQ_SYSMMU_##irq##_0, #mem)
-
-#define SYSMMU_RESOURCE_DEFINE(core, ipname, mem, irq) \
- SYSMMU_RESOURCE(core, ipname) { \
- DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
- }
-
-struct sysmmu_resource_map {
- struct platform_device *pdev;
- struct resource *res;
- u32 rnum;
- struct device *pdd;
- char *clocknames;
-};
-
-#define SYSMMU_RESOURCE_MAPPING(core, ipname, resname) { \
- .pdev = &SYSMMU_PLATDEV(ipname), \
- .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
- .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
- .clocknames = SYSMMU_CLOCK_NAME, \
-}
-
-#define SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata) { \
- .pdev = &SYSMMU_PLATDEV(ipname), \
- .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
- .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
- .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
-}
-
-#ifdef CONFIG_EXYNOS_DEV_PD
-#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) { \
- .pdev = &SYSMMU_PLATDEV(ipname), \
- .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
- .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
- .clocknames = SYSMMU_CLOCK_NAME, \
- .pdd = &exynos##core##_device_pd[pd].dev, \
-}
-
-#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) {\
- .pdev = &SYSMMU_PLATDEV(ipname), \
- .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
- .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
- .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
- .pdd = &exynos##core##_device_pd[pd].dev, \
-}
-#else
-#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) \
- SYSMMU_RESOURCE_MAPPING(core, ipname, resname)
-#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) \
- SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata)
-
-#endif /* CONFIG_EXYNOS_DEV_PD */
-
-#ifdef CONFIG_ARCH_EXYNOS4
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc0, FIMC0, FIMC0);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc1, FIMC1, FIMC1);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc2, FIMC2, FIMC2);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc3, FIMC3, FIMC3);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, jpeg, JPEG, JPEG);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d, G2D, 2D);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, tv, TV, TV_M0);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d_acp, 2D_ACP, 2D);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, rot, ROTATOR, ROTATOR);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd0, FIMD0, LCD0_M0);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd1, FIMD1, LCD1_M1);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite0, FIMC_LITE0, FIMC_LITE0);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite1, FIMC_LITE1, FIMC_LITE1);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_r, MFC_R, MFC_M0);
-SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_l, MFC_L, MFC_M1);
-SYSMMU_RESOURCE(EXYNOS4, isp) {
- DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_ISP, FIMC_ISP),
- DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_DRC, FIMC_DRC),
- DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_FD, FIMC_FD),
- DEFINE_SYSMMU_RESOURCE(EXYNOS4, ISPCPU, FIMC_CX),
-};
-
-static struct sysmmu_resource_map sysmmu_resmap4[] __initdata = {
- SYSMMU_RESOURCE_MAPPING_PD(4, fimc0, fimc0, PD_CAM),
- SYSMMU_RESOURCE_MAPPING_PD(4, fimc1, fimc1, PD_CAM),
- SYSMMU_RESOURCE_MAPPING_PD(4, fimc2, fimc2, PD_CAM),
- SYSMMU_RESOURCE_MAPPING_PD(4, fimc3, fimc3, PD_CAM),
- SYSMMU_RESOURCE_MAPPING_PD(4, tv, tv, PD_TV),
- SYSMMU_RESOURCE_MAPPING_PD(4, mfc_r, mfc_r, PD_MFC),
- SYSMMU_RESOURCE_MAPPING_PD(4, mfc_l, mfc_l, PD_MFC),
- SYSMMU_RESOURCE_MAPPING_PD(4, rot, rot, PD_LCD0),
- SYSMMU_RESOURCE_MAPPING_PD(4, jpeg, jpeg, PD_CAM),
- SYSMMU_RESOURCE_MAPPING_PD(4, fimd0, fimd0, PD_LCD0),
-};
-
-static struct sysmmu_resource_map sysmmu_resmap4210[] __initdata = {
- SYSMMU_RESOURCE_MAPPING_PD(4, 2d, 2d, PD_LCD0),
- SYSMMU_RESOURCE_MAPPING_PD(4, fimd1, fimd1, PD_LCD1),
-};
-
-static struct sysmmu_resource_map sysmmu_resmap4212[] __initdata = {
- SYSMMU_RESOURCE_MAPPING(4, 2d, 2d_acp),
- SYSMMU_RESOURCE_MAPPING_PD(4, camif0, flite0, PD_ISP),
- SYSMMU_RESOURCE_MAPPING_PD(4, camif1, flite1, PD_ISP),
- SYSMMU_RESOURCE_MAPPING_PD(4, isp, isp, PD_ISP),
-};
-#endif /* CONFIG_ARCH_EXYNOS4 */
-
-#ifdef CONFIG_ARCH_EXYNOS5
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, jpeg, JPEG, JPEG);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, fimd1, FIMD1, FIMD1);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, 2d, 2D, 2D);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, rot, ROTATOR, ROTATOR);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, tv, TV, TV);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite0, LITE0, LITE0);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite1, LITE1, LITE1);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc0, GSC0, GSC0);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc1, GSC1, GSC1);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc2, GSC2, GSC2);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc3, GSC3, GSC3);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_r, MFC_R, MFC_R);
-SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_l, MFC_L, MFC_L);
-SYSMMU_RESOURCE(EXYNOS5, isp) {
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISP, ISP),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, DRC, DRC),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, FD, FD),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISPCPU, MCUISP),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERC, SCALERCISP),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERP, SCALERPISP),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, ODC, ODC),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS0, DIS0),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS1, DIS1),
- DEFINE_SYSMMU_RESOURCE(EXYNOS5, 3DNR, 3DNR),
-};
-
-static struct sysmmu_resource_map sysmmu_resmap5[] __initdata = {
- SYSMMU_RESOURCE_MAPPING(5, jpeg, jpeg),
- SYSMMU_RESOURCE_MAPPING(5, fimd1, fimd1),
- SYSMMU_RESOURCE_MAPPING(5, 2d, 2d),
- SYSMMU_RESOURCE_MAPPING(5, rot, rot),
- SYSMMU_RESOURCE_MAPPING_PD(5, tv, tv, PD_DISP1),
- SYSMMU_RESOURCE_MAPPING_PD(5, camif0, flite0, PD_GSCL),
- SYSMMU_RESOURCE_MAPPING_PD(5, camif1, flite1, PD_GSCL),
- SYSMMU_RESOURCE_MAPPING_PD(5, gsc0, gsc0, PD_GSCL),
- SYSMMU_RESOURCE_MAPPING_PD(5, gsc1, gsc1, PD_GSCL),
- SYSMMU_RESOURCE_MAPPING_PD(5, gsc2, gsc2, PD_GSCL),
- SYSMMU_RESOURCE_MAPPING_PD(5, gsc3, gsc3, PD_GSCL),
- SYSMMU_RESOURCE_MAPPING_PD(5, mfc_r, mfc_r, PD_MFC),
- SYSMMU_RESOURCE_MAPPING_PD(5, mfc_l, mfc_l, PD_MFC),
- SYSMMU_RESOURCE_MAPPING_MCPD(5, isp, isp, PD_ISP, mc_platdata),
-};
-#endif /* CONFIG_ARCH_EXYNOS5 */
-
-static int __init init_sysmmu_platform_device(void)
-{
- int i, j;
- struct sysmmu_resource_map *resmap[2] = {NULL, NULL};
- int nmap[2] = {0, 0};
-
-#ifdef CONFIG_ARCH_EXYNOS5
- if (soc_is_exynos5250()) {
- resmap[0] = sysmmu_resmap5;
- nmap[0] = ARRAY_SIZE(sysmmu_resmap5);
- nmap[1] = 0;
- }
-#endif
-
-#ifdef CONFIG_ARCH_EXYNOS4
- if (resmap[0] == NULL) {
- resmap[0] = sysmmu_resmap4;
- nmap[0] = ARRAY_SIZE(sysmmu_resmap4);
- }
-
- if (soc_is_exynos4210()) {
- resmap[1] = sysmmu_resmap4210;
- nmap[1] = ARRAY_SIZE(sysmmu_resmap4210);
- }
-
- if (soc_is_exynos4412() || soc_is_exynos4212()) {
- resmap[1] = sysmmu_resmap4212;
- nmap[1] = ARRAY_SIZE(sysmmu_resmap4212);
- }
-#endif
-
- for (j = 0; j < 2; j++) {
- for (i = 0; i < nmap[j]; i++) {
- struct sysmmu_resource_map *map;
- struct sysmmu_platform_data *platdata;
-
- map = &resmap[j][i];
-
- map->pdev->dev.parent = map->pdd;
-
- platdata = map->pdev->dev.platform_data;
- platdata->clockname = map->clocknames;
-
- if (platform_device_add_resources(map->pdev, map->res,
- map->rnum)) {
- pr_err("%s: Failed to add device resources for "
- "%s.%d\n", __func__,
- map->pdev->name, map->pdev->id);
- continue;
- }
-
- if (platform_device_register(map->pdev)) {
- pr_err("%s: Failed to register %s.%d\n",
- __func__, map->pdev->name,
- map->pdev->id);
- }
- }
- }
-
- return 0;
-}
-arch_initcall(init_sysmmu_platform_device);
diff --git a/arch/arm/mach-exynos/include/mach/sysmmu.h b/arch/arm/mach-exynos/include/mach/sysmmu.h
deleted file mode 100644
index 88a4543b0001..000000000000
--- a/arch/arm/mach-exynos/include/mach/sysmmu.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - System MMU support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ARM_MACH_EXYNOS_SYSMMU_H_
-#define _ARM_MACH_EXYNOS_SYSMMU_H_
-
-struct sysmmu_platform_data {
- char *dbgname;
- /* comma(,) separated list of clock names for clock gating */
- char *clockname;
-};
-
-#define SYSMMU_DEVNAME_BASE "exynos-sysmmu"
-
-#define SYSMMU_CLOCK_NAME "sysmmu"
-#define SYSMMU_CLOCK_NAME2 "sysmmu_mc"
-
-#ifdef CONFIG_EXYNOS_DEV_SYSMMU
-#include <linux/device.h>
-struct platform_device;
-
-#define SYSMMU_PLATDEV(ipname) exynos_device_sysmmu_##ipname
-
-extern struct platform_device SYSMMU_PLATDEV(mfc_l);
-extern struct platform_device SYSMMU_PLATDEV(mfc_r);
-extern struct platform_device SYSMMU_PLATDEV(tv);
-extern struct platform_device SYSMMU_PLATDEV(jpeg);
-extern struct platform_device SYSMMU_PLATDEV(rot);
-extern struct platform_device SYSMMU_PLATDEV(fimc0);
-extern struct platform_device SYSMMU_PLATDEV(fimc1);
-extern struct platform_device SYSMMU_PLATDEV(fimc2);
-extern struct platform_device SYSMMU_PLATDEV(fimc3);
-extern struct platform_device SYSMMU_PLATDEV(gsc0);
-extern struct platform_device SYSMMU_PLATDEV(gsc1);
-extern struct platform_device SYSMMU_PLATDEV(gsc2);
-extern struct platform_device SYSMMU_PLATDEV(gsc3);
-extern struct platform_device SYSMMU_PLATDEV(isp);
-extern struct platform_device SYSMMU_PLATDEV(fimd0);
-extern struct platform_device SYSMMU_PLATDEV(fimd1);
-extern struct platform_device SYSMMU_PLATDEV(camif0);
-extern struct platform_device SYSMMU_PLATDEV(camif1);
-extern struct platform_device SYSMMU_PLATDEV(2d);
-
-#ifdef CONFIG_IOMMU_API
-static inline void platform_set_sysmmu(
- struct device *sysmmu, struct device *dev)
-{
- dev->archdata.iommu = sysmmu;
-}
-#endif
-
-#else /* !CONFIG_EXYNOS_DEV_SYSMMU */
-#define platform_set_sysmmu(sysmmu, dev) do { } while (0)
-#endif
-
-#define SYSMMU_CLOCK_DEVNAME(ipname, id) (SYSMMU_DEVNAME_BASE "." #id)
-
-#endif /* _ARM_MACH_EXYNOS_SYSMMU_H_ */
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 112d10e53d20..3358088c822a 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -79,6 +79,40 @@ static const struct of_dev_auxdata exynos4_auxdata_lookup[] __initconst = {
OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_MDMA1, "dma-pl330.2", NULL),
OF_DEV_AUXDATA("samsung,exynos4210-tmu", EXYNOS4_PA_TMU,
"exynos-tmu", NULL),
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13620000,
+ "exynos-sysmmu.0", NULL), /* MFC_L */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13630000,
+ "exynos-sysmmu.1", NULL), /* MFC_R */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13E20000,
+ "exynos-sysmmu.2", NULL), /* TV */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11A60000,
+ "exynos-sysmmu.3", NULL), /* JPEG */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x12A30000,
+ "exynos-sysmmu.4", NULL), /* ROTATOR */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11A20000,
+ "exynos-sysmmu.5", NULL), /* FIMC0 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11A30000,
+ "exynos-sysmmu.6", NULL), /* FIMC1 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11A40000,
+ "exynos-sysmmu.7", NULL), /* FIMC2 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11A50000,
+ "exynos-sysmmu.8", NULL), /* FIMC3 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x12A20000,
+ "exynos-sysmmu.9", NULL), /* G2D(4210) */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x10A40000,
+ "exynos-sysmmu.9", NULL), /* G2D(4x12) */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11E20000,
+ "exynos-sysmmu.10", NULL), /* FIMD0 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x12220000,
+ "exynos-sysmmu.11", NULL), /* FIMD1(4210) */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x12260000,
+ "exynos-sysmmu.12", NULL), /* IS0(4x12) */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x122B0000,
+ "exynos-sysmmu.13", NULL), /* IS1(4x12) */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x123B0000,
+ "exynos-sysmmu.14", NULL), /* FIMC-LITE0(4x12) */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x123C0000,
+ "exynos-sysmmu.15", NULL), /* FIMC-LITE1(4x12) */
{},
};
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 973a06637572..acaeb14db54b 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -109,6 +109,36 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"samsung-i2s.1", NULL),
OF_DEV_AUXDATA("samsung,i2s-v5", 0x12D70000,
"samsung-i2s.2", NULL),
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11210000,
+ "exynos-sysmmu.0", "mfc"), /* MFC_L */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11200000,
+ "exynos-sysmmu.1", "mfc"), /* MFC_R */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x14650000,
+ "exynos-sysmmu.2", NULL), /* TV */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11F20000,
+ "exynos-sysmmu.3", "jpeg"), /* JPEG */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x11D40000,
+ "exynos-sysmmu.4", NULL), /* ROTATOR */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13E80000,
+ "exynos-sysmmu.5", "gscl"), /* GSCL0 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13E90000,
+ "exynos-sysmmu.6", "gscl"), /* GSCL1 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13EA0000,
+ "exynos-sysmmu.7", "gscl"), /* GSCL2 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13EB0000,
+ "exynos-sysmmu.8", "gscl"), /* GSCL3 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13260000,
+ "exynos-sysmmu.9", NULL), /* FIMC-IS0 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x132C0000,
+ "exynos-sysmmu.10", NULL), /* FIMC-IS1 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x14640000,
+ "exynos-sysmmu.11", NULL), /* FIMD1 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13C40000,
+ "exynos-sysmmu.12", NULL), /* FIMC-LITE0 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x13C50000,
+ "exynos-sysmmu.13", NULL), /* FIMC-LITE1 */
+ OF_DEV_AUXDATA("samsung,exynos-sysmmu", 0x10A60000,
+ "exynos-sysmmu.14", NULL), /* G2D */
{},
};
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index b8b3fbf0bae7..1ea79730187f 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -1208,25 +1208,25 @@ static struct i2c_board_info m5mols_board_info = {
.platform_data = &m5mols_platdata,
};
-static struct s5p_fimc_isp_info nuri_camera_sensors[] = {
+static struct fimc_source_info nuri_camera_sensors[] = {
{
.flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
- .bus_type = FIMC_ITU_601,
+ .fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
.board_info = &s5k6aa_board_info,
.clk_frequency = 24000000UL,
.i2c_bus_num = 6,
}, {
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
- .bus_type = FIMC_MIPI_CSI2,
+ .fimc_bus_type = FIMC_BUS_TYPE_MIPI_CSI2,
.board_info = &m5mols_board_info,
.clk_frequency = 24000000UL,
},
};
static struct s5p_platform_fimc fimc_md_platdata = {
- .isp_info = nuri_camera_sensors,
+ .source_info = nuri_camera_sensors,
.num_clients = ARRAY_SIZE(nuri_camera_sensors),
};
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index c9d33a43103e..497fcb793dc1 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -987,12 +987,12 @@ static struct i2c_board_info m5mols_board_info = {
.platform_data = &m5mols_platdata,
};
-static struct s5p_fimc_isp_info universal_camera_sensors[] = {
+static struct fimc_source_info universal_camera_sensors[] = {
{
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
- .bus_type = FIMC_ITU_601,
+ .fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
.board_info = &s5k6aa_board_info,
.i2c_bus_num = 0,
.clk_frequency = 24000000UL,
@@ -1000,7 +1000,7 @@ static struct s5p_fimc_isp_info universal_camera_sensors[] = {
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
- .bus_type = FIMC_MIPI_CSI2,
+ .fimc_bus_type = FIMC_BUS_TYPE_MIPI_CSI2,
.board_info = &m5mols_board_info,
.i2c_bus_num = 0,
.clk_frequency = 24000000UL,
@@ -1008,7 +1008,7 @@ static struct s5p_fimc_isp_info universal_camera_sensors[] = {
};
static struct s5p_platform_fimc fimc_md_platdata = {
- .isp_info = universal_camera_sensors,
+ .source_info = universal_camera_sensors,
.num_clients = ARRAY_SIZE(universal_camera_sensors),
};
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index fb7cb841b64c..0f39f8c93b94 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -83,6 +83,7 @@ enum imx5_clks {
ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
epit1_ipg_gate, epit1_hf_gate, epit2_ipg_gate, epit2_hf_gate,
can_sel, can1_serial_gate, can1_ipg_gate,
+ owire_gate,
clk_max
};
@@ -233,12 +234,13 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk[epit1_hf_gate] = imx_clk_gate2("epit1_hf_gate", "per_root", MXC_CCM_CCGR2, 4);
clk[epit2_ipg_gate] = imx_clk_gate2("epit2_ipg_gate", "ipg", MXC_CCM_CCGR2, 6);
clk[epit2_hf_gate] = imx_clk_gate2("epit2_hf_gate", "per_root", MXC_CCM_CCGR2, 8);
+ clk[owire_gate] = imx_clk_gate2("owire_gate", "per_root", MXC_CCM_CCGR2, 22);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX5 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
-
+
clk_register_clkdev(clk[gpt_hf_gate], "per", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 540138c4606c..7b025ee528a5 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -164,8 +164,8 @@ enum mx6q_clks {
usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
pll4_audio, pll5_video, pll8_mlb, pll7_usb_host, pll6_enet, ssi1_ipg,
ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
- sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref,
- clk_max
+ sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref, usbphy1_gate,
+ usbphy2_gate, clk_max
};
static struct clk *clk[clk_max];
@@ -218,8 +218,21 @@ int __init mx6q_clocks_init(void)
clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x3);
clk[pll8_mlb] = imx_clk_pllv3(IMX_PLLV3_MLB, "pll8_mlb", "osc", base + 0xd0, 0x0);
- clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 6);
- clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 6);
+ /*
+ * Bit 20 is the reserved and read-only bit, we do this only for:
+ * - Do nothing for usbphy clk_enable/disable
+ * - Keep refcount when do usbphy clk_enable/disable, in that case,
+ * the clk framework may need to enable/disable usbphy's parent
+ */
+ clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
+ clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
+
+ /*
+ * usbphy*_gate needs to be on after system boots up, and software
+ * never needs to control it anymore.
+ */
+ clk[usbphy1_gate] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6);
+ clk[usbphy2_gate] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6);
clk[sata_ref] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5);
clk[pcie_ref] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4);
@@ -446,6 +459,11 @@ int __init mx6q_clocks_init(void)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clk[clks_init_on[i]]);
+ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+ clk_prepare_enable(clk[usbphy1_gate]);
+ clk_prepare_enable(clk[usbphy2_gate]);
+ }
+
/* Set initial power mode */
imx6q_set_lpm(WAIT_CLOCKED);
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 1786b2d1257e..9ffd103b27e4 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -22,6 +23,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/opp.h>
#include <linux/phy.h>
#include <linux/regmap.h>
#include <linux/micrel_phy.h>
@@ -200,6 +202,64 @@ static void __init imx6q_init_machine(void)
imx6q_1588_init();
}
+#define OCOTP_CFG3 0x440
+#define OCOTP_CFG3_SPEED_SHIFT 16
+#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
+
+static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+ if (!np) {
+ pr_warn("failed to find ocotp node\n");
+ return;
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("failed to map ocotp\n");
+ goto put_node;
+ }
+
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
+ if (opp_disable(cpu_dev, 1200000000))
+ pr_warn("failed to disable 1.2 GHz OPP\n");
+
+put_node:
+ of_node_put(np);
+}
+
+static void __init imx6q_opp_init(struct device *cpu_dev)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np) {
+ pr_warn("failed to find cpu0 node\n");
+ return;
+ }
+
+ cpu_dev->of_node = np;
+ if (of_init_opp_table(cpu_dev)) {
+ pr_warn("failed to init OPP table\n");
+ goto put_node;
+ }
+
+ imx6q_opp_check_1p2ghz(cpu_dev);
+
+put_node:
+ of_node_put(np);
+}
+
+struct platform_device imx6q_cpufreq_pdev = {
+ .name = "imx6q-cpufreq",
+};
+
static void __init imx6q_init_late(void)
{
/*
@@ -208,6 +268,11 @@ static void __init imx6q_init_late(void)
*/
if (imx6q_revision() > IMX_CHIP_REVISION_1_1)
imx6q_cpuidle_init();
+
+ if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
+ imx6q_opp_init(&imx6q_cpufreq_pdev.dev);
+ platform_device_register(&imx6q_cpufreq_pdev);
+ }
}
static void __init imx6q_map_io(void)
diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig
index f91cdff5a3e4..7b6a64bc5f40 100644
--- a/arch/arm/mach-kirkwood/Kconfig
+++ b/arch/arm/mach-kirkwood/Kconfig
@@ -58,6 +58,13 @@ config ARCH_KIRKWOOD_DT
Say 'Y' here if you want your kernel to support the
Marvell Kirkwood using flattened device tree.
+config MACH_GURUPLUG_DT
+ bool "Marvell GuruPlug Reference Board (Flattened Device Tree)"
+ select ARCH_KIRKWOOD_DT
+ help
+ Say 'Y' here if you want your kernel to support the
+ Marvell GuruPlug Reference Board (Flattened Device Tree).
+
config MACH_DREAMPLUG_DT
bool "Marvell DreamPlug (Flattened Device Tree)"
select ARCH_KIRKWOOD_DT
diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile
index d6653095a1eb..4cc4bee4d0cf 100644
--- a/arch/arm/mach-kirkwood/Makefile
+++ b/arch/arm/mach-kirkwood/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MACH_T5325) += t5325-setup.o
obj-$(CONFIG_ARCH_KIRKWOOD_DT) += board-dt.o
obj-$(CONFIG_MACH_DREAMPLUG_DT) += board-dreamplug.o
+obj-$(CONFIG_MACH_GURUPLUG_DT) += board-guruplug.o
obj-$(CONFIG_MACH_ICONNECT_DT) += board-iconnect.o
obj-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += board-dnskw.o
obj-$(CONFIG_MACH_IB62X0_DT) += board-ib62x0.o
diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c
index 08248e24ffcd..0903242c00dc 100644
--- a/arch/arm/mach-kirkwood/board-dreamplug.c
+++ b/arch/arm/mach-kirkwood/board-dreamplug.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
-#include <linux/platform_data/mmc-mvsdio.h>
#include "common.h"
static struct mv643xx_eth_platform_data dreamplug_ge00_data = {
@@ -26,10 +25,6 @@ static struct mv643xx_eth_platform_data dreamplug_ge01_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(1),
};
-static struct mvsdio_platform_data dreamplug_mvsdio_data = {
- /* unfortunately the CD signal has not been connected */
-};
-
void __init dreamplug_init(void)
{
/*
@@ -37,5 +32,4 @@ void __init dreamplug_init(void)
*/
kirkwood_ge00_init(&dreamplug_ge00_data);
kirkwood_ge01_init(&dreamplug_ge01_data);
- kirkwood_sdio_init(&dreamplug_mvsdio_data);
}
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 95cc04d14b65..2e73e9d53f70 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -55,10 +55,6 @@ static void __init kirkwood_legacy_clk_init(void)
orion_clkdev_add("0", "pcie",
of_clk_get_from_provider(&clkspec));
- clkspec.args[0] = CGC_BIT_USB0;
- orion_clkdev_add(NULL, "orion-ehci.0",
- of_clk_get_from_provider(&clkspec));
-
clkspec.args[0] = CGC_BIT_PEX1;
orion_clkdev_add("1", "pcie",
of_clk_get_from_provider(&clkspec));
@@ -66,11 +62,6 @@ static void __init kirkwood_legacy_clk_init(void)
clkspec.args[0] = CGC_BIT_GE1;
orion_clkdev_add(NULL, "mv643xx_eth_port.1",
of_clk_get_from_provider(&clkspec));
-
- clkspec.args[0] = CGC_BIT_SDIO;
- orion_clkdev_add(NULL, "mvsdio",
- of_clk_get_from_provider(&clkspec));
-
}
static void __init kirkwood_of_clk_init(void)
@@ -107,6 +98,9 @@ static void __init kirkwood_dt_init(void)
if (of_machine_is_compatible("globalscale,dreamplug"))
dreamplug_init();
+ if (of_machine_is_compatible("globalscale,guruplug"))
+ guruplug_dt_init();
+
if (of_machine_is_compatible("dlink,dns-kirkwood"))
dnskw_init();
@@ -150,14 +144,12 @@ static void __init kirkwood_dt_init(void)
if (of_machine_is_compatible("usi,topkick"))
usi_topkick_init();
- if (of_machine_is_compatible("zyxel,nsa310"))
- nsa310_init();
-
of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL);
}
static const char * const kirkwood_dt_board_compat[] = {
"globalscale,dreamplug",
+ "globalscale,guruplug",
"dlink,dns-320",
"dlink,dns-325",
"iom,iconnect",
diff --git a/arch/arm/mach-kirkwood/board-guruplug.c b/arch/arm/mach-kirkwood/board-guruplug.c
new file mode 100644
index 000000000000..0a0df4554d8b
--- /dev/null
+++ b/arch/arm/mach-kirkwood/board-guruplug.c
@@ -0,0 +1,39 @@
+/*
+ * arch/arm/mach-kirkwood/board-guruplug.c
+ *
+ * Marvell Guruplug Reference Board Init for drivers not converted to
+ * flattened device tree yet.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/mmc-mvsdio.h>
+#include "common.h"
+
+static struct mv643xx_eth_platform_data guruplug_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(0),
+};
+
+static struct mv643xx_eth_platform_data guruplug_ge01_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(1),
+};
+
+static struct mvsdio_platform_data guruplug_mvsdio_data = {
+ /* unfortunately the CD signal has not been connected */
+};
+
+void __init guruplug_dt_init(void)
+{
+ /*
+ * Basic setup. Needs to be called early.
+ */
+ kirkwood_ge00_init(&guruplug_ge00_data);
+ kirkwood_ge01_init(&guruplug_ge01_data);
+ kirkwood_sdio_init(&guruplug_mvsdio_data);
+}
diff --git a/arch/arm/mach-kirkwood/board-mplcec4.c b/arch/arm/mach-kirkwood/board-mplcec4.c
index 3264925b8318..7d6dc669e17f 100644
--- a/arch/arm/mach-kirkwood/board-mplcec4.c
+++ b/arch/arm/mach-kirkwood/board-mplcec4.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
-#include <linux/platform_data/mmc-mvsdio.h>
#include "common.h"
static struct mv643xx_eth_platform_data mplcec4_ge00_data = {
@@ -23,11 +22,6 @@ static struct mv643xx_eth_platform_data mplcec4_ge01_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(2),
};
-static struct mvsdio_platform_data mplcec4_mvsdio_data = {
- .gpio_card_detect = 47, /* MPP47 used as SD card detect */
-};
-
-
void __init mplcec4_init(void)
{
/*
@@ -35,7 +29,6 @@ void __init mplcec4_init(void)
*/
kirkwood_ge00_init(&mplcec4_ge00_data);
kirkwood_ge01_init(&mplcec4_ge01_data);
- kirkwood_sdio_init(&mplcec4_mvsdio_data);
kirkwood_pcie_init(KW_PCIE0);
}
diff --git a/arch/arm/mach-kirkwood/board-ns2.c b/arch/arm/mach-kirkwood/board-ns2.c
index f4632a809f68..f2ea3b7ad726 100644
--- a/arch/arm/mach-kirkwood/board-ns2.c
+++ b/arch/arm/mach-kirkwood/board-ns2.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mv643xx_eth.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#include "common.h"
@@ -23,13 +22,6 @@ static struct mv643xx_eth_platform_data ns2_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
-#define NS2_GPIO_POWER_OFF 31
-
-static void ns2_power_off(void)
-{
- gpio_set_value(NS2_GPIO_POWER_OFF, 1);
-}
-
void __init ns2_init(void)
{
/*
@@ -39,10 +31,4 @@ void __init ns2_init(void)
of_machine_is_compatible("lacie,netspace_mini_v2"))
ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
kirkwood_ge00_init(&ns2_ge00_data);
-
- if (gpio_request(NS2_GPIO_POWER_OFF, "power-off") == 0 &&
- gpio_direction_output(NS2_GPIO_POWER_OFF, 0) == 0)
- pm_power_off = ns2_power_off;
- else
- pr_err("ns2: failed to configure power-off GPIO\n");
}
diff --git a/arch/arm/mach-kirkwood/board-nsa310.c b/arch/arm/mach-kirkwood/board-nsa310.c
index 970174ad4a70..55ade93b93bf 100644
--- a/arch/arm/mach-kirkwood/board-nsa310.c
+++ b/arch/arm/mach-kirkwood/board-nsa310.c
@@ -10,79 +10,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
#include <mach/kirkwood.h>
#include <linux/of.h>
#include "common.h"
-#include "mpp.h"
-
-#define NSA310_GPIO_USB_POWER_OFF 21
-#define NSA310_GPIO_POWER_OFF 48
-
-static unsigned int nsa310_mpp_config[] __initdata = {
- MPP12_GPIO, /* led esata green */
- MPP13_GPIO, /* led esata red */
- MPP15_GPIO, /* led usb green */
- MPP16_GPIO, /* led usb red */
- MPP21_GPIO, /* control usb power off */
- MPP28_GPIO, /* led sys green */
- MPP29_GPIO, /* led sys red */
- MPP36_GPIO, /* key reset */
- MPP37_GPIO, /* key copy */
- MPP39_GPIO, /* led copy green */
- MPP40_GPIO, /* led copy red */
- MPP41_GPIO, /* led hdd green */
- MPP42_GPIO, /* led hdd red */
- MPP44_GPIO, /* ?? */
- MPP46_GPIO, /* key power */
- MPP48_GPIO, /* control power off */
- 0
-};
-
-static struct i2c_board_info __initdata nsa310_i2c_info[] = {
- { I2C_BOARD_INFO("adt7476", 0x2e) },
-};
-
-static void nsa310_power_off(void)
-{
- gpio_set_value(NSA310_GPIO_POWER_OFF, 1);
-}
-
-static int __init nsa310_gpio_request(unsigned int gpio, unsigned long flags,
- const char *label)
-{
- int err;
-
- err = gpio_request_one(gpio, flags, label);
- if (err)
- pr_err("NSA-310: can't setup GPIO%u (%s), err=%d\n",
- gpio, label, err);
-
- return err;
-}
-
-static void __init nsa310_gpio_init(void)
-{
- int err;
-
- err = nsa310_gpio_request(NSA310_GPIO_POWER_OFF, GPIOF_OUT_INIT_LOW,
- "Power Off");
- if (!err)
- pm_power_off = nsa310_power_off;
-
- nsa310_gpio_request(NSA310_GPIO_USB_POWER_OFF, GPIOF_OUT_INIT_LOW,
- "USB Power Off");
-}
-
-void __init nsa310_init(void)
-{
- kirkwood_mpp_conf(nsa310_mpp_config);
-
- nsa310_gpio_init();
-
- i2c_register_board_info(0, ARRAY_AND_SIZE(nsa310_i2c_info));
-}
static int __init nsa310_pci_init(void)
{
diff --git a/arch/arm/mach-kirkwood/board-openblocks_a6.c b/arch/arm/mach-kirkwood/board-openblocks_a6.c
index 815fc6451d52..b11d8fdeca93 100644
--- a/arch/arm/mach-kirkwood/board-openblocks_a6.c
+++ b/arch/arm/mach-kirkwood/board-openblocks_a6.c
@@ -11,60 +11,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
-#include <linux/clk.h>
-#include <linux/clk-private.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data openblocks_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
-static unsigned int openblocks_a6_mpp_config[] __initdata = {
- MPP0_NF_IO2,
- MPP1_NF_IO3,
- MPP2_NF_IO4,
- MPP3_NF_IO5,
- MPP4_NF_IO6,
- MPP5_NF_IO7,
- MPP6_SYSRST_OUTn,
- MPP8_UART1_RTS,
- MPP9_UART1_CTS,
- MPP10_UART0_TXD,
- MPP11_UART0_RXD,
- MPP13_UART1_TXD,
- MPP14_UART1_RXD,
- MPP15_UART0_RTS,
- MPP16_UART0_CTS,
- MPP18_NF_IO0,
- MPP19_NF_IO1,
- MPP20_GPIO, /* DIP SW0 */
- MPP21_GPIO, /* DIP SW1 */
- MPP22_GPIO, /* DIP SW2 */
- MPP23_GPIO, /* DIP SW3 */
- MPP24_GPIO, /* GPIO 0 */
- MPP25_GPIO, /* GPIO 1 */
- MPP26_GPIO, /* GPIO 2 */
- MPP27_GPIO, /* GPIO 3 */
- MPP28_GPIO, /* GPIO 4 */
- MPP29_GPIO, /* GPIO 5 */
- MPP30_GPIO, /* GPIO 6 */
- MPP31_GPIO, /* GPIO 7 */
- MPP36_TW1_SDA,
- MPP37_TW1_SCK,
- MPP38_GPIO, /* INIT */
- MPP39_GPIO, /* USB OC */
- MPP41_GPIO, /* LED: Red */
- MPP42_GPIO, /* LED: Green */
- MPP43_GPIO, /* LED: Yellow */
- 0,
-};
-
void __init openblocks_a6_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(openblocks_a6_mpp_config);
kirkwood_ge00_init(&openblocks_ge00_data);
}
diff --git a/arch/arm/mach-kirkwood/board-usi_topkick.c b/arch/arm/mach-kirkwood/board-usi_topkick.c
index 23d2dd1b1b1e..1cc04ec33f0b 100644
--- a/arch/arm/mach-kirkwood/board-usi_topkick.c
+++ b/arch/arm/mach-kirkwood/board-usi_topkick.c
@@ -14,64 +14,16 @@
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
-#include <linux/platform_data/mmc-mvsdio.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data topkick_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
-static struct mvsdio_platform_data topkick_mvsdio_data = {
- /* unfortunately the CD signal has not been connected */
-};
-
-/*
- * GPIO LED layout
- *
- * /-SYS_LED(2)
- * |
- * | /-DISK_LED
- * | |
- * | | /-WLAN_LED(2)
- * | | |
- * [SW] [*] [*] [*]
- */
-
-/*
- * Switch positions
- *
- * /-SW_LEFT
- * |
- * | /-SW_IDLE
- * | |
- * | | /-SW_RIGHT
- * | | |
- * PS [L] [I] [R] LEDS
- */
-
-static unsigned int topkick_mpp_config[] __initdata = {
- MPP21_GPIO, /* DISK_LED (low active) - yellow */
- MPP36_GPIO, /* SATA0 power enable (high active) */
- MPP37_GPIO, /* SYS_LED2 (low active) - red */
- MPP38_GPIO, /* SYS_LED (low active) - blue */
- MPP39_GPIO, /* WLAN_LED (low active) - green */
- MPP43_GPIO, /* SW_LEFT (low active) */
- MPP44_GPIO, /* SW_RIGHT (low active) */
- MPP45_GPIO, /* SW_IDLE (low active) */
- MPP46_GPIO, /* SW_LEFT (low active) */
- MPP48_GPIO, /* WLAN_LED2 (low active) - yellow */
- 0
-};
-
void __init usi_topkick_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(topkick_mpp_config);
-
-
kirkwood_ge00_init(&topkick_ge00_data);
- kirkwood_sdio_init(&topkick_mvsdio_data);
}
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index e956d0277dd1..5ed70565c843 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -60,6 +60,11 @@ void dreamplug_init(void);
#else
static inline void dreamplug_init(void) {};
#endif
+#ifdef CONFIG_MACH_GURUPLUG_DT
+void guruplug_dt_init(void);
+#else
+static inline void guruplug_dt_init(void) {};
+#endif
#ifdef CONFIG_MACH_TS219_DT
void qnap_dt_ts219_init(void);
#else
@@ -130,12 +135,6 @@ void ns2_init(void);
static inline void ns2_init(void) {};
#endif
-#ifdef CONFIG_MACH_NSA310_DT
-void nsa310_init(void);
-#else
-static inline void nsa310_init(void) {};
-#endif
-
#ifdef CONFIG_MACH_OPENBLOCKS_A6_DT
void openblocks_a6_init(void);
#else
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 8e3fb082c3c6..274ff58271de 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -34,6 +34,7 @@
#define ARMADA_370_XP_INT_CONTROL (0x00)
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
+#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
@@ -41,28 +42,90 @@
#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
+#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
+
#define ACTIVE_DOORBELLS (8)
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
+/*
+ * In SMP mode:
+ * For shared global interrupts, mask/unmask global enable bit
+ * For CPU interrtups, mask/unmask the calling CPU's bit
+ */
static void armada_370_xp_irq_mask(struct irq_data *d)
{
+#ifdef CONFIG_SMP
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+ writel(hwirq, main_int_base +
+ ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+ else
+ writel(hwirq, per_cpu_int_base +
+ ARMADA_370_XP_INT_SET_MASK_OFFS);
+#else
writel(irqd_to_hwirq(d),
per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+#endif
}
static void armada_370_xp_irq_unmask(struct irq_data *d)
{
+#ifdef CONFIG_SMP
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+ writel(hwirq, main_int_base +
+ ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ else
+ writel(hwirq, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#else
writel(irqd_to_hwirq(d),
per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#endif
}
#ifdef CONFIG_SMP
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
+ unsigned long reg;
+ unsigned long new_mask = 0;
+ unsigned long online_mask = 0;
+ unsigned long count = 0;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int cpu;
+
+ for_each_cpu(cpu, mask_val) {
+ new_mask |= 1 << cpu_logical_map(cpu);
+ count++;
+ }
+
+ /*
+ * Forbid mutlicore interrupt affinity
+ * This is required since the MPIC HW doesn't limit
+ * several CPUs from acknowledging the same interrupt.
+ */
+ if (count > 1)
+ return -EINVAL;
+
+ for_each_cpu(cpu, cpu_online_mask)
+ online_mask |= 1 << cpu_logical_map(cpu);
+
+ raw_spin_lock(&irq_controller_lock);
+
+ reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+ reg = (reg & (~online_mask)) | new_mask;
+ writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+
+ raw_spin_unlock(&irq_controller_lock);
+
return 0;
}
#endif
@@ -82,10 +145,17 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
{
armada_370_xp_irq_mask(irq_get_irq_data(virq));
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
-
- irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
- handle_level_irq);
irq_set_status_flags(virq, IRQ_LEVEL);
+
+ if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
+ irq_set_percpu_devid(virq);
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_percpu_devid_irq);
+
+ } else {
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_level_irq);
+ }
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
return 0;
@@ -155,6 +225,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init();
+
+ /*
+ * Set the default affinity from all CPUs to the boot cpu.
+ * This is required since the MPIC doesn't limit several CPUs
+ * from acknowledging the same interrupt.
+ */
+ cpumask_clear(irq_default_affinity);
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+
#endif
return 0;
@@ -173,7 +252,7 @@ asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
if (irqnr > 1022)
break;
- if (irqnr >= 8) {
+ if (irqnr > 0) {
irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
irqnr);
handle_IRQ(irqnr, regs);
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index ff528df70119..b068b7fe99ef 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -11,7 +11,7 @@ obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o gpmc.o timer.o pm.o \
omap_device.o sram.o
omap-2-3-common = irq.o
-hwmod-common = omap_hwmod.o \
+hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
omap_hwmod_common_data.o
clock-common = clock.o clock_common_data.o \
clkt_dpll.o clkt_clksel.o
@@ -56,6 +56,7 @@ AFLAGS_sram34xx.o :=-Wa,-march=armv7-a
# Restart code (OMAP4/5 currently in omap4-common.c)
obj-$(CONFIG_SOC_OMAP2420) += omap2-restart.o
obj-$(CONFIG_SOC_OMAP2430) += omap2-restart.o
+obj-$(CONFIG_SOC_AM33XX) += am33xx-restart.o
obj-$(CONFIG_ARCH_OMAP3) += omap3-restart.o
# Pin multiplexing
diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c
new file mode 100644
index 000000000000..88e4fa8af031
--- /dev/null
+++ b/arch/arm/mach-omap2/am33xx-restart.c
@@ -0,0 +1,34 @@
+/*
+ * am33xx-restart.c - Code common to all AM33xx machines.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+
+#include "common.h"
+#include "prm-regbits-33xx.h"
+#include "prm33xx.h"
+
+/**
+ * am3xx_restart - trigger a software restart of the SoC
+ * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c
+ * @cmd: passed from the userspace program rebooting the system (if provided)
+ *
+ * Resets the SoC. For @cmd, see the 'reboot' syscall in
+ * kernel/sys.c. No return value.
+ */
+void am33xx_restart(char mode, const char *cmd)
+{
+ /* TODO: Handle mode and cmd if necessary */
+
+ am33xx_prm_rmw_reg_bits(AM33XX_GLOBAL_WARM_SW_RST_MASK,
+ AM33XX_GLOBAL_WARM_SW_RST_MASK,
+ AM33XX_PRM_DEVICE_MOD,
+ AM33XX_PRM_RSTCTRL_OFFSET);
+
+ /* OCP barrier */
+ (void)am33xx_prm_read_reg(AM33XX_PRM_DEVICE_MOD,
+ AM33XX_PRM_RSTCTRL_OFFSET);
+}
diff --git a/arch/arm/mach-omap2/am35xx-emac.c b/arch/arm/mach-omap2/am35xx-emac.c
index a00d39107a21..25b79a297365 100644
--- a/arch/arm/mach-omap2/am35xx-emac.c
+++ b/arch/arm/mach-omap2/am35xx-emac.c
@@ -62,8 +62,7 @@ static int __init omap_davinci_emac_dev_init(struct omap_hwmod *oh,
{
struct platform_device *pdev;
- pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len,
- false);
+ pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len);
if (IS_ERR(pdev)) {
WARN(1, "Can't build omap_device for %s:%s.\n",
oh->class->name, oh->name);
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 2590463e4b57..0274ff7a2a2b 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -140,6 +140,7 @@ DT_MACHINE_START(AM33XX_DT, "Generic AM33XX (Flattened Device Tree)")
.init_machine = omap_generic_init,
.init_time = omap3_am33xx_gptimer_timer_init,
.dt_compat = am33xx_boards_compat,
+ .restart = am33xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index c26d441ca4fb..3a077df6b8df 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -40,7 +40,7 @@
#include <sound/tpa6130a2-plat.h>
#include <media/radio-si4713.h>
#include <media/si4713.h>
-#include <linux/leds-lp5523.h>
+#include <linux/platform_data/leds-lp55xx.h>
#include <linux/platform_data/tsl2563.h>
#include <linux/lis3lv02d.h>
@@ -160,7 +160,7 @@ static struct tsl2563_platform_data rx51_tsl2563_platform_data = {
#endif
#if defined(CONFIG_LEDS_LP5523) || defined(CONFIG_LEDS_LP5523_MODULE)
-static struct lp5523_led_config rx51_lp5523_led_config[] = {
+static struct lp55xx_led_config rx51_lp5523_led_config[] = {
{
.name = "lp5523:kb1",
.chan_nr = 0,
@@ -216,10 +216,10 @@ static void rx51_lp5523_enable(bool state)
gpio_set_value(RX51_LP5523_CHIP_EN_GPIO, !!state);
}
-static struct lp5523_platform_data rx51_lp5523_platform_data = {
+static struct lp55xx_platform_data rx51_lp5523_platform_data = {
.led_config = rx51_lp5523_led_config,
.num_channels = ARRAY_SIZE(rx51_lp5523_led_config),
- .clock_mode = LP5523_CLOCK_AUTO,
+ .clock_mode = LP55XX_CLOCK_AUTO,
.setup_resources = rx51_lp5523_setup,
.release_resources = rx51_lp5523_release,
.enable = rx51_lp5523_enable,
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index 1c7c834a5b5f..8cef477d6b00 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -49,13 +49,13 @@ static void zoom_panel_disable_lcd(struct omap_dss_device *dssdev)
{
}
-/*
- * PWMA/B register offsets (TWL4030_MODULE_PWMA)
- */
+/* Register offsets in TWL4030_MODULE_INTBR */
#define TWL_INTBR_PMBR1 0xD
#define TWL_INTBR_GPBR1 0xC
-#define TWL_LED_PWMON 0x0
-#define TWL_LED_PWMOFF 0x1
+
+/* Register offsets in TWL_MODULE_PWM */
+#define TWL_LED_PWMON 0x3
+#define TWL_LED_PWMOFF 0x4
static int zoom_set_bl_intensity(struct omap_dss_device *dssdev, int level)
{
@@ -93,8 +93,8 @@ static int zoom_set_bl_intensity(struct omap_dss_device *dssdev, int level)
}
c = ((50 * (100 - level)) / 100) + 1;
- twl_i2c_write_u8(TWL4030_MODULE_PWM1, 0x7F, TWL_LED_PWMOFF);
- twl_i2c_write_u8(TWL4030_MODULE_PWM1, c, TWL_LED_PWMON);
+ twl_i2c_write_u8(TWL_MODULE_PWM, 0x7F, TWL_LED_PWMOFF);
+ twl_i2c_write_u8(TWL_MODULE_PWM, c, TWL_LED_PWMON);
#else
pr_warn("Backlight not enabled\n");
#endif
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c
index ea64ad606759..476b82066cb6 100644
--- a/arch/arm/mach-omap2/cclock33xx_data.c
+++ b/arch/arm/mach-omap2/cclock33xx_data.c
@@ -284,9 +284,10 @@ DEFINE_STRUCT_CLK(dpll_disp_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
* TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
* and ALT_CLK1/2)
*/
-DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck, 0x0,
- AM33XX_CM_DIV_M2_DPLL_DISP, AM33XX_DPLL_CLKOUT_DIV_SHIFT,
- AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck,
+ CLK_SET_RATE_PARENT, AM33XX_CM_DIV_M2_DPLL_DISP,
+ AM33XX_DPLL_CLKOUT_DIV_SHIFT, AM33XX_DPLL_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
/* DPLL_PER */
static struct dpll_data dpll_per_dd = {
@@ -723,7 +724,8 @@ static struct clk_hw_omap lcd_gclk_hw = {
.clksel_mask = AM33XX_CLKSEL_0_1_MASK,
};
-DEFINE_STRUCT_CLK(lcd_gclk, lcd_ck_parents, gpio_fck_ops);
+DEFINE_STRUCT_CLK_FLAGS(lcd_gclk, lcd_ck_parents,
+ gpio_fck_ops, CLK_SET_RATE_PARENT);
DEFINE_CLK_FIXED_FACTOR(mmc_clk, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0, 1, 2);
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 6ef87580c33f..4579c3c5338f 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -426,6 +426,7 @@ static struct clk dpll4_m5x2_ck_3630 = {
.parent_names = dpll4_m5x2_ck_parent_names,
.num_parents = ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
.ops = &dpll4_m5x2_ck_3630_ops,
+ .flags = CLK_SET_RATE_PARENT,
};
static struct clk cam_mclk;
@@ -443,7 +444,14 @@ static struct clk_hw_omap cam_mclk_hw = {
.clkdm_name = "cam_clkdm",
};
-DEFINE_STRUCT_CLK(cam_mclk, cam_mclk_parent_names, aes2_ick_ops);
+static struct clk cam_mclk = {
+ .name = "cam_mclk",
+ .hw = &cam_mclk_hw.hw,
+ .parent_names = cam_mclk_parent_names,
+ .num_parents = ARRAY_SIZE(cam_mclk_parent_names),
+ .ops = &aes2_ick_ops,
+ .flags = CLK_SET_RATE_PARENT,
+};
static const struct clksel_rate clkout2_src_core_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index cebe2b31943e..3d58f335f173 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -605,15 +605,26 @@ static const char *dpll_usb_ck_parents[] = {
static struct clk dpll_usb_ck;
+static const struct clk_ops dpll_usb_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+ .init = &omap2_init_clk_clkdm,
+};
+
static struct clk_hw_omap dpll_usb_ck_hw = {
.hw = {
.clk = &dpll_usb_ck,
},
.dpll_data = &dpll_usb_dd,
+ .clkdm_name = "l3_init_clkdm",
.ops = &clkhwops_omap3_dpll,
};
-DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_ck_ops);
+DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_usb_ck_ops);
static const char *dpll_usb_clkdcoldo_ck_parents[] = {
"dpll_usb_ck",
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index b40204837bd7..60ddd8612b4d 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -65,6 +65,17 @@ struct clockdomain;
.ops = &_clkops_name, \
};
+#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name, \
+ _clkops_name, _flags) \
+ static struct clk _name = { \
+ .name = #_name, \
+ .hw = &_name##_hw.hw, \
+ .parent_names = _parent_array_name, \
+ .num_parents = ARRAY_SIZE(_parent_array_name), \
+ .ops = &_clkops_name, \
+ .flags = _flags, \
+ };
+
#define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \
static struct clk_hw_omap _name##_hw = { \
.hw = { \
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index 058ce3c0873e..325a51576576 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -241,9 +241,6 @@ int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)
{
int i = 0;
- if (!clkctrl_offs)
- return 0;
-
omap_test_timeout(_is_module_ready(inst, cdoffs, clkctrl_offs),
MAX_MODULE_READY_TIME, i);
diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h
index 5fa0b62e1a79..64f4bafe7bd9 100644
--- a/arch/arm/mach-omap2/cm33xx.h
+++ b/arch/arm/mach-omap2/cm33xx.h
@@ -17,16 +17,11 @@
#ifndef __ARCH_ARM_MACH_OMAP2_CM_33XX_H
#define __ARCH_ARM_MACH_OMAP2_CM_33XX_H
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
#include "common.h"
#include "cm.h"
#include "cm-regbits-33xx.h"
-#include "cm33xx.h"
+#include "iomap.h"
/* CM base address */
#define AM33XX_CM_BASE 0x44e00000
@@ -381,6 +376,7 @@
#define AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_CEFUSE_MOD, 0x0020)
+#ifndef __ASSEMBLER__
extern bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs);
extern void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs);
extern void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs);
@@ -417,4 +413,5 @@ static inline int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs,
}
#endif
+#endif /* ASSEMBLER */
#endif
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index b4350274361b..0a6b9c7a63da 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -119,6 +119,14 @@ static inline void omap2xxx_restart(char mode, const char *cmd)
}
#endif
+#ifdef CONFIG_SOC_AM33XX
+void am33xx_restart(char mode, const char *cmd);
+#else
+static inline void am33xx_restart(char mode, const char *cmd)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_OMAP3
void omap3xxx_restart(char mode, const char *cmd);
#else
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 142d9c616f1b..1ec7f0597710 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -426,7 +426,7 @@ static void __init omap_init_hdmi_audio(void)
return;
}
- pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0, 0);
+ pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
WARN(IS_ERR(pdev),
"Can't build omap_device for omap-hdmi-audio-dai.\n");
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index 0a02aab5df67..3aed4b0b9563 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -500,8 +500,9 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
if (dd->last_rounded_rate == 0)
return -EINVAL;
- /* No freqsel on OMAP4 and OMAP3630 */
- if (!cpu_is_omap44xx() && !cpu_is_omap3630()) {
+ /* No freqsel on AM335x, OMAP4 and OMAP3630 */
+ if (!soc_is_am33xx() && !cpu_is_omap44xx() &&
+ !cpu_is_omap3630()) {
freqsel = _omap3_dpll_compute_freqsel(clk,
dd->last_rounded_n);
WARN_ON(!freqsel);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 45cc7ed4dd58..8a68f1ec66b9 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -399,8 +399,18 @@ void __init omap3xxx_check_revision(void)
}
break;
case 0xb944:
- omap_revision = AM335X_REV_ES1_0;
- cpu_rev = "1.0";
+ switch (rev) {
+ case 0:
+ omap_revision = AM335X_REV_ES1_0;
+ cpu_rev = "1.0";
+ break;
+ case 1:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = AM335X_REV_ES2_0;
+ cpu_rev = "2.0";
+ break;
+ }
break;
case 0xb8f2:
switch (rev) {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index a8984989dec8..c2c798c08c2b 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2055,6 +2055,23 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh)
}
/**
+ * _enable_preprogram - Pre-program an IP block during the _enable() process
+ * @oh: struct omap_hwmod *
+ *
+ * Some IP blocks (such as AESS) require some additional programming
+ * after enable before they can enter idle. If a function pointer to
+ * do so is present in the hwmod data, then call it and pass along the
+ * return value; otherwise, return 0.
+ */
+static int __init _enable_preprogram(struct omap_hwmod *oh)
+{
+ if (!oh->class->enable_preprogram)
+ return 0;
+
+ return oh->class->enable_preprogram(oh);
+}
+
+/**
* _enable - enable an omap_hwmod
* @oh: struct omap_hwmod *
*
@@ -2160,6 +2177,7 @@ static int _enable(struct omap_hwmod *oh)
_update_sysc_cache(oh);
_enable_sysc(oh);
}
+ r = _enable_preprogram(oh);
} else {
if (soc_ops.disable_module)
soc_ops.disable_module(oh);
@@ -3049,11 +3067,8 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
static int _am33xx_deassert_hardreset(struct omap_hwmod *oh,
struct omap_hwmod_rst_info *ohri)
{
- if (ohri->st_shift)
- pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
- oh->name, ohri->name);
-
return am33xx_prm_deassert_hardreset(ohri->rst_shift,
+ ohri->st_shift,
oh->clkdm->pwrdm.ptr->prcm_offs,
oh->prcm.omap4.rstctrl_offs,
oh->prcm.omap4.rstst_offs);
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 80c00e706d69..d43d9b608eda 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -510,6 +510,7 @@ struct omap_hwmod_omap4_prcm {
* @rev: revision of the IP class
* @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown
* @reset: ptr to fn to be executed in place of the standard hwmod reset fn
+ * @enable_preprogram: ptr to fn to be executed during device enable
*
* Represent the class of a OMAP hardware "modules" (e.g. timer,
* smartreflex, gpio, uart...)
@@ -533,6 +534,7 @@ struct omap_hwmod_class {
u32 rev;
int (*pre_shutdown)(struct omap_hwmod *oh);
int (*reset)(struct omap_hwmod *oh);
+ int (*enable_preprogram)(struct omap_hwmod *oh);
};
/**
@@ -680,6 +682,12 @@ extern void __init omap_hwmod_init(void);
const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh);
/*
+ *
+ */
+
+extern int omap_hwmod_aess_preprogram(struct omap_hwmod *oh);
+
+/*
* Chip variant-specific hwmod init routines - XXX should be converted
* to use initcalls once the initial boot ordering is straightened out
*/
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 646c14d9fdb9..26eee4a556ad 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -262,13 +262,15 @@ static struct omap_hwmod am33xx_wkup_m3_hwmod = {
.name = "wkup_m3",
.class = &am33xx_wkup_m3_hwmod_class,
.clkdm_name = "l4_wkup_aon_clkdm",
- .flags = HWMOD_INIT_NO_RESET, /* Keep hardreset asserted */
+ /* Keep hardreset asserted */
+ .flags = HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,
.mpu_irqs = am33xx_wkup_m3_irqs,
.main_clk = "dpll_core_m4_div2_ck",
.prcm = {
.omap4 = {
.clkctrl_offs = AM33XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET,
.rstctrl_offs = AM33XX_RM_WKUP_RSTCTRL_OFFSET,
+ .rstst_offs = AM33XX_RM_WKUP_RSTST_OFFSET,
.modulemode = MODULEMODE_SWCTRL,
},
},
@@ -414,7 +416,6 @@ static struct omap_hwmod am33xx_adc_tsc_hwmod = {
* - cEFUSE (doesn't fall under any ocp_if)
* - clkdiv32k
* - debugss
- * - ocmc ram
* - ocp watch point
* - aes0
* - sha0
@@ -481,25 +482,6 @@ static struct omap_hwmod am33xx_debugss_hwmod = {
},
};
-/* ocmcram */
-static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
- .name = "ocmcram",
-};
-
-static struct omap_hwmod am33xx_ocmcram_hwmod = {
- .name = "ocmcram",
- .class = &am33xx_ocmcram_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
/* ocpwp */
static struct omap_hwmod_class am33xx_ocpwp_hwmod_class = {
.name = "ocpwp",
@@ -570,6 +552,25 @@ static struct omap_hwmod am33xx_sha0_hwmod = {
#endif
+/* ocmcram */
+static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
+ .name = "ocmcram",
+};
+
+static struct omap_hwmod am33xx_ocmcram_hwmod = {
+ .name = "ocmcram",
+ .class = &am33xx_ocmcram_hwmod_class,
+ .clkdm_name = "l3_clkdm",
+ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .main_clk = "l3_gclk",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+};
+
/* 'smartreflex' class */
static struct omap_hwmod_class am33xx_smartreflex_hwmod_class = {
.name = "smartreflex",
@@ -783,9 +784,7 @@ static struct omap_hwmod am33xx_elm_hwmod = {
},
};
-/*
- * 'epwmss' class: ecap0,1,2, ehrpwm0,1,2
- */
+/* pwmss */
static struct omap_hwmod_class_sysconfig am33xx_epwmss_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x4,
@@ -801,18 +800,23 @@ static struct omap_hwmod_class am33xx_epwmss_hwmod_class = {
.sysc = &am33xx_epwmss_sysc,
};
-/* ehrpwm0 */
-static struct omap_hwmod_irq_info am33xx_ehrpwm0_irqs[] = {
- { .name = "int", .irq = 86 + OMAP_INTC_START, },
- { .name = "tzint", .irq = 58 + OMAP_INTC_START, },
- { .irq = -1 },
+static struct omap_hwmod_class am33xx_ecap_hwmod_class = {
+ .name = "ecap",
};
-static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
- .name = "ehrpwm0",
+static struct omap_hwmod_class am33xx_eqep_hwmod_class = {
+ .name = "eqep",
+};
+
+static struct omap_hwmod_class am33xx_ehrpwm_hwmod_class = {
+ .name = "ehrpwm",
+};
+
+/* epwmss0 */
+static struct omap_hwmod am33xx_epwmss0_hwmod = {
+ .name = "epwmss0",
.class = &am33xx_epwmss_hwmod_class,
.clkdm_name = "l4ls_clkdm",
- .mpu_irqs = am33xx_ehrpwm0_irqs,
.main_clk = "l4ls_gclk",
.prcm = {
.omap4 = {
@@ -822,63 +826,58 @@ static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
},
};
-/* ehrpwm1 */
-static struct omap_hwmod_irq_info am33xx_ehrpwm1_irqs[] = {
- { .name = "int", .irq = 87 + OMAP_INTC_START, },
- { .name = "tzint", .irq = 59 + OMAP_INTC_START, },
+/* ecap0 */
+static struct omap_hwmod_irq_info am33xx_ecap0_irqs[] = {
+ { .irq = 31 + OMAP_INTC_START, },
{ .irq = -1 },
};
-static struct omap_hwmod am33xx_ehrpwm1_hwmod = {
- .name = "ehrpwm1",
- .class = &am33xx_epwmss_hwmod_class,
+static struct omap_hwmod am33xx_ecap0_hwmod = {
+ .name = "ecap0",
+ .class = &am33xx_ecap_hwmod_class,
.clkdm_name = "l4ls_clkdm",
- .mpu_irqs = am33xx_ehrpwm1_irqs,
+ .mpu_irqs = am33xx_ecap0_irqs,
.main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
};
-/* ehrpwm2 */
-static struct omap_hwmod_irq_info am33xx_ehrpwm2_irqs[] = {
- { .name = "int", .irq = 39 + OMAP_INTC_START, },
- { .name = "tzint", .irq = 60 + OMAP_INTC_START, },
+/* eqep0 */
+static struct omap_hwmod_irq_info am33xx_eqep0_irqs[] = {
+ { .irq = 79 + OMAP_INTC_START, },
{ .irq = -1 },
};
-static struct omap_hwmod am33xx_ehrpwm2_hwmod = {
- .name = "ehrpwm2",
- .class = &am33xx_epwmss_hwmod_class,
+static struct omap_hwmod am33xx_eqep0_hwmod = {
+ .name = "eqep0",
+ .class = &am33xx_eqep_hwmod_class,
.clkdm_name = "l4ls_clkdm",
- .mpu_irqs = am33xx_ehrpwm2_irqs,
+ .mpu_irqs = am33xx_eqep0_irqs,
.main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
};
-/* ecap0 */
-static struct omap_hwmod_irq_info am33xx_ecap0_irqs[] = {
- { .irq = 31 + OMAP_INTC_START, },
+/* ehrpwm0 */
+static struct omap_hwmod_irq_info am33xx_ehrpwm0_irqs[] = {
+ { .name = "int", .irq = 86 + OMAP_INTC_START, },
+ { .name = "tzint", .irq = 58 + OMAP_INTC_START, },
{ .irq = -1 },
};
-static struct omap_hwmod am33xx_ecap0_hwmod = {
- .name = "ecap0",
+static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
+ .name = "ehrpwm0",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .mpu_irqs = am33xx_ehrpwm0_irqs,
+ .main_clk = "l4ls_gclk",
+};
+
+/* epwmss1 */
+static struct omap_hwmod am33xx_epwmss1_hwmod = {
+ .name = "epwmss1",
.class = &am33xx_epwmss_hwmod_class,
.clkdm_name = "l4ls_clkdm",
- .mpu_irqs = am33xx_ecap0_irqs,
.main_clk = "l4ls_gclk",
.prcm = {
.omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET,
+ .clkctrl_offs = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
.modulemode = MODULEMODE_SWCTRL,
},
},
@@ -892,13 +891,50 @@ static struct omap_hwmod_irq_info am33xx_ecap1_irqs[] = {
static struct omap_hwmod am33xx_ecap1_hwmod = {
.name = "ecap1",
- .class = &am33xx_epwmss_hwmod_class,
+ .class = &am33xx_ecap_hwmod_class,
.clkdm_name = "l4ls_clkdm",
.mpu_irqs = am33xx_ecap1_irqs,
.main_clk = "l4ls_gclk",
+};
+
+/* eqep1 */
+static struct omap_hwmod_irq_info am33xx_eqep1_irqs[] = {
+ { .irq = 88 + OMAP_INTC_START, },
+ { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_eqep1_hwmod = {
+ .name = "eqep1",
+ .class = &am33xx_eqep_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .mpu_irqs = am33xx_eqep1_irqs,
+ .main_clk = "l4ls_gclk",
+};
+
+/* ehrpwm1 */
+static struct omap_hwmod_irq_info am33xx_ehrpwm1_irqs[] = {
+ { .name = "int", .irq = 87 + OMAP_INTC_START, },
+ { .name = "tzint", .irq = 59 + OMAP_INTC_START, },
+ { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_ehrpwm1_hwmod = {
+ .name = "ehrpwm1",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .mpu_irqs = am33xx_ehrpwm1_irqs,
+ .main_clk = "l4ls_gclk",
+};
+
+/* epwmss2 */
+static struct omap_hwmod am33xx_epwmss2_hwmod = {
+ .name = "epwmss2",
+ .class = &am33xx_epwmss_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .main_clk = "l4ls_gclk",
.prcm = {
.omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
+ .clkctrl_offs = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
.modulemode = MODULEMODE_SWCTRL,
},
},
@@ -912,16 +948,39 @@ static struct omap_hwmod_irq_info am33xx_ecap2_irqs[] = {
static struct omap_hwmod am33xx_ecap2_hwmod = {
.name = "ecap2",
+ .class = &am33xx_ecap_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
.mpu_irqs = am33xx_ecap2_irqs,
- .class = &am33xx_epwmss_hwmod_class,
+ .main_clk = "l4ls_gclk",
+};
+
+/* eqep2 */
+static struct omap_hwmod_irq_info am33xx_eqep2_irqs[] = {
+ { .irq = 89 + OMAP_INTC_START, },
+ { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_eqep2_hwmod = {
+ .name = "eqep2",
+ .class = &am33xx_eqep_hwmod_class,
.clkdm_name = "l4ls_clkdm",
+ .mpu_irqs = am33xx_eqep2_irqs,
+ .main_clk = "l4ls_gclk",
+};
+
+/* ehrpwm2 */
+static struct omap_hwmod_irq_info am33xx_ehrpwm2_irqs[] = {
+ { .name = "int", .irq = 39 + OMAP_INTC_START, },
+ { .name = "tzint", .irq = 60 + OMAP_INTC_START, },
+ { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_ehrpwm2_hwmod = {
+ .name = "ehrpwm2",
+ .class = &am33xx_ehrpwm_hwmod_class,
+ .clkdm_name = "l4ls_clkdm",
+ .mpu_irqs = am33xx_ehrpwm2_irqs,
.main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
};
/*
@@ -1824,6 +1883,7 @@ static struct omap_hwmod am33xx_tptc0_hwmod = {
.class = &am33xx_tptc_hwmod_class,
.clkdm_name = "l3_clkdm",
.mpu_irqs = am33xx_tptc0_irqs,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
.main_clk = "l3_gclk",
.prcm = {
.omap4 = {
@@ -2496,7 +2556,6 @@ static struct omap_hwmod_addr_space am33xx_cpgmac0_addr_space[] = {
{
.pa_start = 0x4a100000,
.pa_end = 0x4a100000 + SZ_2K - 1,
- .flags = ADDR_TYPE_RT,
},
/* cpsw wr */
{
@@ -2547,162 +2606,202 @@ static struct omap_hwmod_ocp_if am33xx_l4_ls__elm = {
.user = OCP_USER_MPU,
};
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ehrpwm0_addr_space[] = {
+static struct omap_hwmod_addr_space am33xx_epwmss0_addr_space[] = {
{
.pa_start = 0x48300000,
.pa_end = 0x48300000 + SZ_16 - 1,
.flags = ADDR_TYPE_RT
},
- {
- .pa_start = 0x48300200,
- .pa_end = 0x48300200 + SZ_256 - 1,
- .flags = ADDR_TYPE_RT
- },
{ }
};
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm0 = {
+static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0 = {
.master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_ehrpwm0_hwmod,
+ .slave = &am33xx_epwmss0_hwmod,
.clk = "l4ls_gclk",
- .addr = am33xx_ehrpwm0_addr_space,
+ .addr = am33xx_epwmss0_addr_space,
.user = OCP_USER_MPU,
};
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ehrpwm1_addr_space[] = {
- {
- .pa_start = 0x48302000,
- .pa_end = 0x48302000 + SZ_16 - 1,
- .flags = ADDR_TYPE_RT
- },
+static struct omap_hwmod_addr_space am33xx_ecap0_addr_space[] = {
{
- .pa_start = 0x48302200,
- .pa_end = 0x48302200 + SZ_256 - 1,
- .flags = ADDR_TYPE_RT
+ .pa_start = 0x48300100,
+ .pa_end = 0x48300100 + SZ_128 - 1,
},
{ }
};
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_ehrpwm1_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0 = {
+ .master = &am33xx_epwmss0_hwmod,
+ .slave = &am33xx_ecap0_hwmod,
.clk = "l4ls_gclk",
- .addr = am33xx_ehrpwm1_addr_space,
+ .addr = am33xx_ecap0_addr_space,
.user = OCP_USER_MPU,
};
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ehrpwm2_addr_space[] = {
+static struct omap_hwmod_addr_space am33xx_eqep0_addr_space[] = {
{
- .pa_start = 0x48304000,
- .pa_end = 0x48304000 + SZ_16 - 1,
- .flags = ADDR_TYPE_RT
- },
- {
- .pa_start = 0x48304200,
- .pa_end = 0x48304200 + SZ_256 - 1,
- .flags = ADDR_TYPE_RT
+ .pa_start = 0x48300180,
+ .pa_end = 0x48300180 + SZ_128 - 1,
},
{ }
};
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_ehrpwm2_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0 = {
+ .master = &am33xx_epwmss0_hwmod,
+ .slave = &am33xx_eqep0_hwmod,
.clk = "l4ls_gclk",
- .addr = am33xx_ehrpwm2_addr_space,
+ .addr = am33xx_eqep0_addr_space,
.user = OCP_USER_MPU,
};
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ecap0_addr_space[] = {
- {
- .pa_start = 0x48300000,
- .pa_end = 0x48300000 + SZ_16 - 1,
- .flags = ADDR_TYPE_RT
- },
+static struct omap_hwmod_addr_space am33xx_ehrpwm0_addr_space[] = {
{
- .pa_start = 0x48300100,
- .pa_end = 0x48300100 + SZ_256 - 1,
- .flags = ADDR_TYPE_RT
+ .pa_start = 0x48300200,
+ .pa_end = 0x48300200 + SZ_128 - 1,
},
{ }
};
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap0 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_ecap0_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0 = {
+ .master = &am33xx_epwmss0_hwmod,
+ .slave = &am33xx_ehrpwm0_hwmod,
.clk = "l4ls_gclk",
- .addr = am33xx_ecap0_addr_space,
+ .addr = am33xx_ehrpwm0_addr_space,
.user = OCP_USER_MPU,
};
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ecap1_addr_space[] = {
+
+static struct omap_hwmod_addr_space am33xx_epwmss1_addr_space[] = {
{
.pa_start = 0x48302000,
.pa_end = 0x48302000 + SZ_16 - 1,
.flags = ADDR_TYPE_RT
},
+ { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_epwmss1_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_epwmss1_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ecap1_addr_space[] = {
{
.pa_start = 0x48302100,
- .pa_end = 0x48302100 + SZ_256 - 1,
- .flags = ADDR_TYPE_RT
+ .pa_end = 0x48302100 + SZ_128 - 1,
},
{ }
};
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap1 = {
- .master = &am33xx_l4_ls_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1 = {
+ .master = &am33xx_epwmss1_hwmod,
.slave = &am33xx_ecap1_hwmod,
.clk = "l4ls_gclk",
.addr = am33xx_ecap1_addr_space,
.user = OCP_USER_MPU,
};
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ecap2_addr_space[] = {
+static struct omap_hwmod_addr_space am33xx_eqep1_addr_space[] = {
+ {
+ .pa_start = 0x48302180,
+ .pa_end = 0x48302180 + SZ_128 - 1,
+ },
+ { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1 = {
+ .master = &am33xx_epwmss1_hwmod,
+ .slave = &am33xx_eqep1_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_eqep1_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ehrpwm1_addr_space[] = {
+ {
+ .pa_start = 0x48302200,
+ .pa_end = 0x48302200 + SZ_128 - 1,
+ },
+ { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1 = {
+ .master = &am33xx_epwmss1_hwmod,
+ .slave = &am33xx_ehrpwm1_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_ehrpwm1_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_epwmss2_addr_space[] = {
{
.pa_start = 0x48304000,
.pa_end = 0x48304000 + SZ_16 - 1,
.flags = ADDR_TYPE_RT
},
+ { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2 = {
+ .master = &am33xx_l4_ls_hwmod,
+ .slave = &am33xx_epwmss2_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_epwmss2_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ecap2_addr_space[] = {
{
.pa_start = 0x48304100,
- .pa_end = 0x48304100 + SZ_256 - 1,
- .flags = ADDR_TYPE_RT
+ .pa_end = 0x48304100 + SZ_128 - 1,
},
{ }
};
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap2 = {
- .master = &am33xx_l4_ls_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2 = {
+ .master = &am33xx_epwmss2_hwmod,
.slave = &am33xx_ecap2_hwmod,
.clk = "l4ls_gclk",
.addr = am33xx_ecap2_addr_space,
.user = OCP_USER_MPU,
};
+static struct omap_hwmod_addr_space am33xx_eqep2_addr_space[] = {
+ {
+ .pa_start = 0x48304180,
+ .pa_end = 0x48304180 + SZ_128 - 1,
+ },
+ { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2 = {
+ .master = &am33xx_epwmss2_hwmod,
+ .slave = &am33xx_eqep2_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_eqep2_addr_space,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ehrpwm2_addr_space[] = {
+ {
+ .pa_start = 0x48304200,
+ .pa_end = 0x48304200 + SZ_128 - 1,
+ },
+ { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2 = {
+ .master = &am33xx_epwmss2_hwmod,
+ .slave = &am33xx_ehrpwm2_hwmod,
+ .clk = "l4ls_gclk",
+ .addr = am33xx_ehrpwm2_addr_space,
+ .user = OCP_USER_MPU,
+};
+
/* l3s cfg -> gpmc */
static struct omap_hwmod_addr_space am33xx_gpmc_addr_space[] = {
{
@@ -3328,6 +3427,13 @@ static struct omap_hwmod_ocp_if am33xx_l3_s__usbss = {
.flags = OCPIF_SWSUP_IDLE,
};
+/* l3 main -> ocmc */
+static struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
+ .master = &am33xx_l3_main_hwmod,
+ .slave = &am33xx_ocmcram_hwmod,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_fw__emif_fw,
&am33xx_l3_main__emif,
@@ -3385,12 +3491,18 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_ls__uart6,
&am33xx_l4_ls__spinlock,
&am33xx_l4_ls__elm,
- &am33xx_l4_ls__ehrpwm0,
- &am33xx_l4_ls__ehrpwm1,
- &am33xx_l4_ls__ehrpwm2,
- &am33xx_l4_ls__ecap0,
- &am33xx_l4_ls__ecap1,
- &am33xx_l4_ls__ecap2,
+ &am33xx_l4_ls__epwmss0,
+ &am33xx_epwmss0__ecap0,
+ &am33xx_epwmss0__eqep0,
+ &am33xx_epwmss0__ehrpwm0,
+ &am33xx_l4_ls__epwmss1,
+ &am33xx_epwmss1__ecap1,
+ &am33xx_epwmss1__eqep1,
+ &am33xx_epwmss1__ehrpwm1,
+ &am33xx_l4_ls__epwmss2,
+ &am33xx_epwmss2__ecap2,
+ &am33xx_epwmss2__eqep2,
+ &am33xx_epwmss2__ehrpwm2,
&am33xx_l3_s__gpmc,
&am33xx_l3_main__lcdc,
&am33xx_l4_ls__mcspi0,
@@ -3398,6 +3510,7 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l3_main__tptc0,
&am33xx_l3_main__tptc1,
&am33xx_l3_main__tptc2,
+ &am33xx_l3_main__ocmc,
&am33xx_l3_s__usbss,
&am33xx_l4_hs__cpgmac0,
&am33xx_cpgmac0__mdio,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 8bb2628df34e..ac7e03ec952f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3493,7 +3493,12 @@ static struct omap_hwmod am35xx_emac_hwmod = {
.name = "davinci_emac",
.mpu_irqs = am35xx_emac_mpu_irqs,
.class = &am35xx_emac_class,
- .flags = HWMOD_NO_IDLEST,
+ /*
+ * According to Mark Greer, the MPU will not return from WFI
+ * when the EMAC signals an interrupt.
+ * http://www.spinics.net/lists/arm-kernel/msg174734.html
+ */
+ .flags = (HWMOD_NO_IDLEST | HWMOD_BLOCK_WFI),
};
/* l3_core -> davinci emac interface */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 7ec1083ff604..0e47d2e1687c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -322,6 +322,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = {
static struct omap_hwmod_class omap44xx_aess_hwmod_class = {
.name = "aess",
.sysc = &omap44xx_aess_sysc,
+ .enable_preprogram = omap_hwmod_aess_preprogram,
};
/* aess */
@@ -348,7 +349,7 @@ static struct omap_hwmod omap44xx_aess_hwmod = {
.clkdm_name = "abe_clkdm",
.mpu_irqs = omap44xx_aess_irqs,
.sdma_reqs = omap44xx_aess_sdma_reqs,
- .main_clk = "aess_fck",
+ .main_clk = "aess_fclk",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET,
@@ -4241,6 +4242,27 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ocp_wp_noc = {
static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
{
+ .name = "dmem",
+ .pa_start = 0x40180000,
+ .pa_end = 0x4018ffff
+ },
+ {
+ .name = "cmem",
+ .pa_start = 0x401a0000,
+ .pa_end = 0x401a1fff
+ },
+ {
+ .name = "smem",
+ .pa_start = 0x401c0000,
+ .pa_end = 0x401c5fff
+ },
+ {
+ .name = "pmem",
+ .pa_start = 0x401e0000,
+ .pa_end = 0x401e1fff
+ },
+ {
+ .name = "mpu",
.pa_start = 0x401f1000,
.pa_end = 0x401f13ff,
.flags = ADDR_TYPE_RT
@@ -4259,6 +4281,27 @@ static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
{
+ .name = "dmem_dma",
+ .pa_start = 0x49080000,
+ .pa_end = 0x4908ffff
+ },
+ {
+ .name = "cmem_dma",
+ .pa_start = 0x490a0000,
+ .pa_end = 0x490a1fff
+ },
+ {
+ .name = "smem_dma",
+ .pa_start = 0x490c0000,
+ .pa_end = 0x490c5fff
+ },
+ {
+ .name = "pmem_dma",
+ .pa_start = 0x490e0000,
+ .pa_end = 0x490e1fff
+ },
+ {
+ .name = "dma",
.pa_start = 0x490f1000,
.pa_end = 0x490f13ff,
.flags = ADDR_TYPE_RT
@@ -6268,7 +6311,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l3_main_1__l3_main_3,
&omap44xx_l3_main_2__l3_main_3,
&omap44xx_l4_cfg__l3_main_3,
- /* &omap44xx_aess__l4_abe, */
+ &omap44xx_aess__l4_abe,
&omap44xx_dsp__l4_abe,
&omap44xx_l3_main_1__l4_abe,
&omap44xx_mpu__l4_abe,
@@ -6277,8 +6320,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__l4_wkup,
&omap44xx_mpu__mpu_private,
&omap44xx_l4_cfg__ocp_wp_noc,
- /* &omap44xx_l4_abe__aess, */
- /* &omap44xx_l4_abe__aess_dma, */
+ &omap44xx_l4_abe__aess,
+ &omap44xx_l4_abe__aess_dma,
&omap44xx_l3_main_2__c2c,
&omap44xx_l4_wkup__counter_32k,
&omap44xx_l4_cfg__ctrl_module_core,
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
new file mode 100644
index 000000000000..65e186c9df55
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_reset.c
@@ -0,0 +1,53 @@
+/*
+ * OMAP IP block custom reset and preprogramming stubs
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Paul Walmsley
+ *
+ * A small number of IP blocks need custom reset and preprogramming
+ * functions. The stubs in this file provide a standard way for the
+ * hwmod code to call these functions, which are to be located under
+ * drivers/.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include <sound/aess.h>
+
+#include "omap_hwmod.h"
+
+/**
+ * omap_hwmod_aess_preprogram - enable AESS internal autogating
+ * @oh: struct omap_hwmod *
+ *
+ * The AESS will not IdleAck to the PRCM until its internal autogating
+ * is enabled. Since internal autogating is disabled by default after
+ * AESS reset, we must enable autogating after the hwmod code resets
+ * the AESS. Returns 0.
+ */
+int omap_hwmod_aess_preprogram(struct omap_hwmod *oh)
+{
+ void __iomem *va;
+
+ va = omap_hwmod_get_mpu_rt_va(oh);
+ if (!va)
+ return -EINVAL;
+
+ aess_enable_autogating(va);
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index cd6682df5625..673a4c1d1d76 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -282,19 +282,19 @@ int __init omap2_common_pm_late_init(void)
* a completely different mechanism.
* Disable this part if a DT blob is available.
*/
- if (of_have_populated_dt())
- return 0;
+ if (!of_have_populated_dt()) {
- /* Init the voltage layer */
- omap_pmic_late_init();
- omap_voltage_late_init();
+ /* Init the voltage layer */
+ omap_pmic_late_init();
+ omap_voltage_late_init();
- /* Initialize the voltages */
- omap3_init_voltages();
- omap4_init_voltages();
+ /* Initialize the voltages */
+ omap3_init_voltages();
+ omap4_init_voltages();
- /* Smartreflex device init */
- omap_devinit_smartreflex();
+ /* Smartreflex device init */
+ omap_devinit_smartreflex();
+ }
#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index b2a4df623545..b59d93908341 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -54,7 +54,6 @@
#include "powerdomain.h"
#include "clockdomain.h"
-static void (*omap2_sram_idle)(void);
static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl,
void __iomem *sdrc_power);
@@ -163,6 +162,8 @@ static int omap2_allow_mpu_retention(void)
static void omap2_enter_mpu_retention(void)
{
+ const int zero = 0;
+
/* The peripherals seem not to be able to wake up the MPU when
* it is in retention mode. */
if (omap2_allow_mpu_retention()) {
@@ -179,7 +180,8 @@ static void omap2_enter_mpu_retention(void)
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
}
- omap2_sram_idle();
+ /* WFI */
+ asm("mcr p15, 0, %0, c7, c0, 4" : : "r" (zero) : "memory", "cc");
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
}
@@ -333,11 +335,9 @@ int __init omap2_pm_init(void)
/*
* We copy the assembler sleep/wakeup routines to SRAM.
* These routines need to be in SRAM as that's the only
- * memory the MPU can see when it wakes up.
+ * memory the MPU can see when it wakes up after the entire
+ * chip enters idle.
*/
- omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
- omap24xx_idle_loop_suspend_sz);
-
omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
omap24xx_cpu_suspend_sz);
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index aa6fd98f606e..ea62e75ef21d 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -77,10 +77,20 @@ static int omap4_pm_suspend(void)
omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state);
}
- if (ret)
+ if (ret) {
pr_crit("Could not enter target state in pm_suspend\n");
- else
+ /*
+ * OMAP4 chip PM currently works only with certain (newer)
+ * versions of bootloaders. This is due to missing code in the
+ * kernel to properly reset and initialize some devices.
+ * Warn the user about the bootloader version being one of the
+ * possible causes.
+ * http://www.spinics.net/lists/arm-kernel/msg218641.html
+ */
+ pr_warn("A possible cause could be an old bootloader - try u-boot >= v2012.07\n");
+ } else {
pr_info("Successfully put all powerdomains to target state\n");
+ }
return 0;
}
@@ -146,6 +156,13 @@ int __init omap4_pm_init(void)
}
pr_err("Power Management for TI OMAP4.\n");
+ /*
+ * OMAP4 chip PM currently works only with certain (newer)
+ * versions of bootloaders. This is due to missing code in the
+ * kernel to properly reset and initialize some devices.
+ * http://www.spinics.net/lists/arm-kernel/msg218641.html
+ */
+ pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
ret = pwrdm_for_each(pwrdms_setup, NULL);
if (ret) {
diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
index 1ac73883f891..44c0d7216aa7 100644
--- a/arch/arm/mach-omap2/prm33xx.c
+++ b/arch/arm/mach-omap2/prm33xx.c
@@ -110,11 +110,11 @@ int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs)
* -EINVAL upon an argument error, -EEXIST if the submodule was already out
* of reset, or -EBUSY if the submodule did not exit reset promptly.
*/
-int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
+int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, s16 inst,
u16 rstctrl_offs, u16 rstst_offs)
{
int c;
- u32 mask = 1 << shift;
+ u32 mask = 1 << st_shift;
/* Check the current status to avoid de-asserting the line twice */
if (am33xx_prm_is_hardreset_asserted(shift, inst, rstctrl_offs) == 0)
@@ -122,11 +122,14 @@ int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
/* Clear the reset status by writing 1 to the status bit */
am33xx_prm_rmw_reg_bits(0xffffffff, mask, inst, rstst_offs);
+
/* de-assert the reset control line */
+ mask = 1 << shift;
+
am33xx_prm_rmw_reg_bits(mask, 0, inst, rstctrl_offs);
- /* wait the status to be set */
- omap_test_timeout(am33xx_prm_is_hardreset_asserted(shift, inst,
+ /* wait the status to be set */
+ omap_test_timeout(am33xx_prm_is_hardreset_asserted(st_shift, inst,
rstst_offs),
MAX_MODULE_HARDRESET_WAIT, c);
diff --git a/arch/arm/mach-omap2/prm33xx.h b/arch/arm/mach-omap2/prm33xx.h
index 3f25c563a821..9b9918dfb119 100644
--- a/arch/arm/mach-omap2/prm33xx.h
+++ b/arch/arm/mach-omap2/prm33xx.h
@@ -117,6 +117,7 @@
#define AM33XX_PM_CEFUSE_PWRSTST_OFFSET 0x0004
#define AM33XX_PM_CEFUSE_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0004)
+#ifndef __ASSEMBLER__
extern u32 am33xx_prm_read_reg(s16 inst, u16 idx);
extern void am33xx_prm_write_reg(u32 val, s16 inst, u16 idx);
extern u32 am33xx_prm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
@@ -124,6 +125,7 @@ extern void am33xx_prm_global_warm_sw_reset(void);
extern int am33xx_prm_is_hardreset_asserted(u8 shift, s16 inst,
u16 rstctrl_offs);
extern int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs);
-extern int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
+extern int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, s16 inst,
u16 rstctrl_offs, u16 rstst_offs);
+#endif /* ASSEMBLER */
#endif
diff --git a/arch/arm/mach-omap2/sleep24xx.S b/arch/arm/mach-omap2/sleep24xx.S
index ce0ccd26efbd..1d3cb25c9629 100644
--- a/arch/arm/mach-omap2/sleep24xx.S
+++ b/arch/arm/mach-omap2/sleep24xx.S
@@ -37,25 +37,6 @@
.text
/*
- * Forces OMAP into idle state
- *
- * omap24xx_idle_loop_suspend() - This bit of code just executes the WFI
- * for normal idles.
- *
- * Note: This code get's copied to internal SRAM at boot. When the OMAP
- * wakes up it continues execution at the point it went to sleep.
- */
- .align 3
-ENTRY(omap24xx_idle_loop_suspend)
- stmfd sp!, {r0, lr} @ save registers on stack
- mov r0, #0 @ clear for mcr setup
- mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
- ldmfd sp!, {r0, pc} @ restore regs and return
-
-ENTRY(omap24xx_idle_loop_suspend_sz)
- .word . - omap24xx_idle_loop_suspend
-
-/*
* omap24xx_cpu_suspend() - Forces OMAP into deep sleep state by completing
* SDRC shutdown then ARM shutdown. Upon wake MPU is back on so just restore
* SDRC.
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 092aedd7ed13..c62116bbc760 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -395,6 +395,7 @@ IS_OMAP_TYPE(3430, 0x3430)
#define AM335X_CLASS 0x33500033
#define AM335X_REV_ES1_0 AM335X_CLASS
+#define AM335X_REV_ES2_0 (AM335X_CLASS | (0x1 << 8))
#define OMAP443X_CLASS 0x44300044
#define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index bb829e065400..d7bc33f15344 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -152,7 +152,7 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
sr_data->enable_on_init = sr_enable_on_init;
- pdev = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data), 0);
+ pdev = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data));
if (IS_ERR(pdev))
pr_warning("%s: Could not build omap_device for %s: %s.\n\n",
__func__, name, oh->name);
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 9e7998d3635f..3d91d2e5bf3a 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -927,8 +927,6 @@ static void tosa_restart(char mode, const char *cmd)
static void __init tosa_init(void)
{
- int dummy;
-
pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_config));
pxa_set_ffuart_info(NULL);
@@ -947,10 +945,6 @@ static void __init tosa_init(void)
/* enable batt_fault */
PMCR = 0x01;
- dummy = gpiochip_reserve(TOSA_SCOOP_GPIO_BASE, 12);
- dummy = gpiochip_reserve(TOSA_SCOOP_JC_GPIO_BASE, 12);
- dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16);
-
pxa_set_mci_info(&tosa_mci_platform_data);
pxa_set_ficp_info(&tosa_ficp_platform_data);
pxa_set_i2c_info(NULL);
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index ec29b35f25c0..6af1aa1ef213 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -23,13 +23,12 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/amba/pl080.h>
#include <mach/dma.h>
#include <mach/map.h>
#include <mach/irqs.h>
-#include <asm/hardware/pl080.h>
-
#include "regs-sys.h"
/* dma channel state information */
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 570481591746..3a38f7b34b94 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -840,12 +840,12 @@ static struct i2c_board_info noon010pc30_board_info = {
.platform_data = &noon010pc30_pldata,
};
-static struct s5p_fimc_isp_info goni_camera_sensors[] = {
+static struct fimc_source_info goni_camera_sensors[] = {
{
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
- .bus_type = FIMC_ITU_601,
+ .bus_type = FIMC_BUS_TYPE_ITU_601,
.board_info = &noon010pc30_board_info,
.i2c_bus_num = 0,
.clk_frequency = 16000000UL,
@@ -853,7 +853,7 @@ static struct s5p_fimc_isp_info goni_camera_sensors[] = {
};
static struct s5p_platform_fimc goni_fimc_md_platdata __initdata = {
- .isp_info = goni_camera_sensors,
+ .source_info = goni_camera_sensors,
.num_clients = ARRAY_SIZE(goni_camera_sensors),
};
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 30ac79c7c687..8b85d4d8fab6 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -27,6 +27,7 @@
#include <linux/serial_sci.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
+#include <linux/platform_data/sh_ipmmu.h>
#include <mach/dma-register.h>
#include <mach/r8a7740.h>
#include <mach/pm-rmobile.h>
@@ -378,6 +379,37 @@ static struct platform_device tmu02_device = {
.num_resources = ARRAY_SIZE(tmu02_resources),
};
+/* IPMMUI (an IPMMU module for ICB/LMB) */
+static struct resource ipmmu_resources[] = {
+ [0] = {
+ .name = "IPMMUI",
+ .start = 0xfe951000,
+ .end = 0xfe9510ff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static const char * const ipmmu_dev_names[] = {
+ "sh_mobile_lcdc_fb.0",
+ "sh_mobile_lcdc_fb.1",
+ "sh_mobile_ceu.0",
+};
+
+static struct shmobile_ipmmu_platform_data ipmmu_platform_data = {
+ .dev_names = ipmmu_dev_names,
+ .num_dev_names = ARRAY_SIZE(ipmmu_dev_names),
+};
+
+static struct platform_device ipmmu_device = {
+ .name = "ipmmu",
+ .id = -1,
+ .dev = {
+ .platform_data = &ipmmu_platform_data,
+ },
+ .resource = ipmmu_resources,
+ .num_resources = ARRAY_SIZE(ipmmu_resources),
+};
+
static struct platform_device *r8a7740_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
@@ -392,6 +424,7 @@ static struct platform_device *r8a7740_early_devices[] __initdata = {
&tmu00_device,
&tmu01_device,
&tmu02_device,
+ &ipmmu_device,
};
/* DMA */
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index d2079d5e3334..59c7146bf66f 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -33,6 +33,7 @@
#include <linux/sh_timer.h>
#include <linux/pm_domain.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/sh_ipmmu.h>
#include <mach/dma-register.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
@@ -1008,6 +1009,43 @@ static struct platform_device spu1_device = {
.num_resources = ARRAY_SIZE(spu1_resources),
};
+/* IPMMUI (an IPMMU module for ICB/LMB) */
+static struct resource ipmmu_resources[] = {
+ [0] = {
+ .name = "IPMMUI",
+ .start = 0xfe951000,
+ .end = 0xfe9510ff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static const char * const ipmmu_dev_names[] = {
+ "sh_mobile_lcdc_fb.0",
+ "sh_mobile_lcdc_fb.1",
+ "sh_mobile_ceu.0",
+ "uio_pdrv_genirq.0",
+ "uio_pdrv_genirq.1",
+ "uio_pdrv_genirq.2",
+ "uio_pdrv_genirq.3",
+ "uio_pdrv_genirq.4",
+ "uio_pdrv_genirq.5",
+};
+
+static struct shmobile_ipmmu_platform_data ipmmu_platform_data = {
+ .dev_names = ipmmu_dev_names,
+ .num_dev_names = ARRAY_SIZE(ipmmu_dev_names),
+};
+
+static struct platform_device ipmmu_device = {
+ .name = "ipmmu",
+ .id = -1,
+ .dev = {
+ .platform_data = &ipmmu_platform_data,
+ },
+ .resource = ipmmu_resources,
+ .num_resources = ARRAY_SIZE(ipmmu_resources),
+};
+
static struct platform_device *sh7372_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
@@ -1019,6 +1057,7 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
&cmt2_device,
&tmu00_device,
&tmu01_device,
+ &ipmmu_device,
};
static struct platform_device *sh7372_late_devices[] __initdata = {
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 2ecd6681692f..bdab575f88bc 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -31,6 +31,7 @@
#include <linux/sh_dma.h>
#include <linux/sh_intc.h>
#include <linux/sh_timer.h>
+#include <linux/platform_data/sh_ipmmu.h>
#include <mach/dma-register.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
@@ -780,6 +781,35 @@ static struct platform_device pmu_device = {
.resource = pmu_resources,
};
+/* an IPMMU module for ICB */
+static struct resource ipmmu_resources[] = {
+ [0] = {
+ .name = "IPMMU",
+ .start = 0xfe951000,
+ .end = 0xfe9510ff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static const char * const ipmmu_dev_names[] = {
+ "sh_mobile_lcdc_fb.0",
+};
+
+static struct shmobile_ipmmu_platform_data ipmmu_platform_data = {
+ .dev_names = ipmmu_dev_names,
+ .num_dev_names = ARRAY_SIZE(ipmmu_dev_names),
+};
+
+static struct platform_device ipmmu_device = {
+ .name = "ipmmu",
+ .id = -1,
+ .dev = {
+ .platform_data = &ipmmu_platform_data,
+ },
+ .resource = ipmmu_resources,
+ .num_resources = ARRAY_SIZE(ipmmu_resources),
+};
+
static struct platform_device *sh73a0_early_devices_dt[] __initdata = {
&scif0_device,
&scif1_device,
@@ -796,6 +826,7 @@ static struct platform_device *sh73a0_early_devices_dt[] __initdata = {
static struct platform_device *sh73a0_early_devices[] __initdata = {
&tmu00_device,
&tmu01_device,
+ &ipmmu_device,
};
static struct platform_device *sh73a0_late_devices[] __initdata = {
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index b2ba516ca2d4..f9d754f90c59 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -16,7 +16,6 @@
#include <linux/amba/pl022.h>
#include <linux/amba/pl08x.h>
#include <linux/io.h>
-#include <asm/hardware/pl080.h>
#include <plat/pl080.h>
#include <mach/generic.h>
#include <mach/spear.h>
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index b8bd33ca88bd..8904d8a52d84 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -20,7 +20,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <asm/hardware/pl080.h>
+#include <linux/amba/pl080.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 3868aa4ff15e..b03457881c4b 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -28,7 +28,7 @@
#include <linux/mfd/tps6105x.h>
#include <linux/mfd/abx500/ab8500-gpio.h>
#include <linux/mfd/abx500/ab8500-codec.h>
-#include <linux/leds-lp5521.h>
+#include <linux/platform_data/leds-lp55xx.h>
#include <linux/input.h>
#include <linux/smsc911x.h>
#include <linux/gpio_keys.h>
@@ -301,7 +301,7 @@ static struct tc3589x_platform_data mop500_tc35892_data = {
.irq_base = MOP500_EGPIO_IRQ_BASE,
};
-static struct lp5521_led_config lp5521_pri_led[] = {
+static struct lp55xx_led_config lp5521_pri_led[] = {
[0] = {
.chan_nr = 0,
.led_current = 0x2f,
@@ -319,14 +319,14 @@ static struct lp5521_led_config lp5521_pri_led[] = {
},
};
-static struct lp5521_platform_data __initdata lp5521_pri_data = {
+static struct lp55xx_platform_data __initdata lp5521_pri_data = {
.label = "lp5521_pri",
.led_config = &lp5521_pri_led[0],
.num_channels = 3,
- .clock_mode = LP5521_CLOCK_EXT,
+ .clock_mode = LP55XX_CLOCK_EXT,
};
-static struct lp5521_led_config lp5521_sec_led[] = {
+static struct lp55xx_led_config lp5521_sec_led[] = {
[0] = {
.chan_nr = 0,
.led_current = 0x2f,
@@ -344,11 +344,11 @@ static struct lp5521_led_config lp5521_sec_led[] = {
},
};
-static struct lp5521_platform_data __initdata lp5521_sec_data = {
+static struct lp55xx_platform_data __initdata lp5521_sec_data = {
.label = "lp5521_sec",
.led_config = &lp5521_sec_led[0],
.num_channels = 3,
- .clock_mode = LP5521_CLOCK_EXT,
+ .clock_mode = LP55XX_CLOCK_EXT,
};
static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index dda3904dc64c..c7e3759f16d3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev)
static void __dma_clear_buffer(struct page *page, size_t size)
{
- void *ptr;
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
- ptr = page_address(page);
- if (ptr) {
+ if (PageHighMem(page)) {
+ phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
+ phys_addr_t end = base + size;
+ while (size > 0) {
+ void *ptr = kmap_atomic(page);
+ memset(ptr, 0, PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ kunmap_atomic(ptr);
+ page++;
+ size -= PAGE_SIZE;
+ }
+ outer_flush_range(base, end);
+ } else {
+ void *ptr = page_address(page);
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page);
+ pgprot_t prot, struct page **ret_page,
+ const void *caller);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@@ -346,10 +358,11 @@ static int __init atomic_pool_init(void)
goto no_pages;
if (IS_ENABLED(CONFIG_CMA))
- ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
+ atomic_pool_init);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, NULL);
+ &page, atomic_pool_init);
if (ptr) {
int i;
@@ -542,27 +555,41 @@ static int __free_from_pool(void *start, size_t size)
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page)
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
+ void *ptr;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
return NULL;
__dma_clear_buffer(page, size);
- __dma_remap(page, size, prot);
+ if (PageHighMem(page)) {
+ ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
+ if (!ptr) {
+ dma_release_from_contiguous(dev, page, count);
+ return NULL;
+ }
+ } else {
+ __dma_remap(page, size, prot);
+ ptr = page_address(page);
+ }
*ret_page = page;
- return page_address(page);
+ return ptr;
}
static void __free_from_contiguous(struct device *dev, struct page *page,
- size_t size)
+ void *cpu_addr, size_t size)
{
- __dma_remap(page, size, pgprot_kernel);
+ if (PageHighMem(page))
+ __dma_free_remap(cpu_addr, size);
+ else
+ __dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -583,9 +610,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
#define __free_from_pool(cpu_addr, size) 0
-#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
@@ -645,7 +672,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -739,7 +766,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
- __free_from_contiguous(dev, page, size);
+ __free_from_contiguous(dev, page, cpu_addr, size);
}
}
@@ -1002,6 +1029,9 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
unsigned int count, start;
unsigned long flags;
+ if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
+ order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
+
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
@@ -1068,12 +1098,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
return pages;
}
+ /*
+ * IOMMU can map any pages, so himem can also be used here
+ */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
while (count) {
int j, order = __fls(count);
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ pages[i] = alloc_pages(gfp, order);
while (!pages[i] && order)
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ pages[i] = alloc_pages(gfp, --order);
if (!pages[i])
goto error;
@@ -1257,11 +1292,11 @@ err_mapping:
return NULL;
}
-static void __iommu_free_atomic(struct device *dev, struct page **pages,
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
dma_addr_t handle, size_t size)
{
__iommu_remove_mapping(dev, handle, size);
- __free_from_pool(page_address(pages[0]), size);
+ __free_from_pool(cpu_addr, size);
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
@@ -1344,7 +1379,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, pages, handle, size);
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
return;
}
@@ -1732,6 +1767,8 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .set_dma_mask = arm_dma_set_mask,
};
struct dma_map_ops iommu_coherent_ops = {
@@ -1745,6 +1782,8 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
+
+ .set_dma_mask = arm_dma_set_mask,
};
/**
@@ -1799,6 +1838,7 @@ err2:
err:
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref)
{
@@ -1815,6 +1855,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
if (mapping)
kref_put(&mapping->kref, release_iommu_mapping);
}
+EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
/**
* arm_iommu_attach_device
@@ -1843,5 +1884,32 @@ int arm_iommu_attach_device(struct device *dev,
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
}
+EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
+ iommu_detach_device(mapping->domain, dev);
+ kref_put(&mapping->kref, release_iommu_mapping);
+ mapping = NULL;
+ set_dma_ops(dev, NULL);
+
+ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
#endif
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 67c859cf16bc..ce66eb9be481 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -147,15 +147,6 @@ config OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
help
PPA routine service ID for setting L2 auxiliary control register.
-config OMAP_32K_TIMER_HZ
- int "Kernel internal timer frequency for 32KHz timer"
- range 32 1024
- depends on OMAP_32K_TIMER
- default "128"
- help
- Kernel internal timer frequency should be a divisor of 32768,
- such as 64 or 128.
-
config OMAP_DM_TIMER
bool "Use dual-mode timer"
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
diff --git a/arch/arm/plat-omap/include/plat/timex.h b/arch/arm/plat-omap/include/plat/timex.h
index 6d35767bc48f..e27d2daa7790 100644
--- a/arch/arm/plat-omap/include/plat/timex.h
+++ b/arch/arm/plat-omap/include/plat/timex.h
@@ -28,14 +28,6 @@
#if !defined(__ASM_ARCH_OMAP_TIMEX_H)
#define __ASM_ARCH_OMAP_TIMEX_H
-/*
- * OMAP 32KHz timer updates time one jiffie at a time from a secondary timer,
- * and that's why the CLOCK_TICK_RATE is not 32768.
- */
-#ifdef CONFIG_OMAP_32K_TIMER
-#define CLOCK_TICK_RATE (CONFIG_OMAP_32K_TIMER_HZ)
-#else
#define CLOCK_TICK_RATE (HZ * 100000UL)
-#endif
#endif /* __ASM_ARCH_OMAP_TIMEX_H */
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 7a32976fa2a3..8dc0605a9ce9 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -59,14 +59,16 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
};
xen_ulong_t idx = fgmfn;
xen_pfn_t gpfn = lpfn;
+ int err = 0;
set_xen_guest_handle(xatp.idxs, &idx);
set_xen_guest_handle(xatp.gpfns, &gpfn);
+ set_xen_guest_handle(xatp.errs, &err);
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
- if (rc) {
- pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n",
- rc, lpfn, fgmfn);
+ if (rc || err) {
+ pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
+ rc, err, lpfn, fgmfn);
return 1;
}
return 0;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f532ce5f3c5b..fd70a68387eb 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,7 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
select ARM_AMBA
@@ -93,7 +94,7 @@ config IOMMU_HELPER
def_bool SWIOTLB
config GENERIC_GPIO
- def_bool y
+ bool
source "init/Kconfig"
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index e60e386178d1..12f22492df4c 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -40,7 +40,7 @@ __SYSCALL(15, sys_chmod)
__SYSCALL(16, sys_lchown16)
__SYSCALL(17, sys_ni_syscall) /* 17 was sys_break */
__SYSCALL(18, sys_ni_syscall) /* 18 was sys_stat */
-__SYSCALL(19, compat_sys_lseek_wrapper)
+__SYSCALL(19, compat_sys_lseek)
__SYSCALL(20, sys_getpid)
__SYSCALL(21, compat_sys_mount)
__SYSCALL(22, sys_ni_syscall) /* 22 was sys_umount */
@@ -113,8 +113,8 @@ __SYSCALL(88, sys_reboot)
__SYSCALL(89, sys_ni_syscall) /* 89 was sys_readdir */
__SYSCALL(90, sys_ni_syscall) /* 90 was sys_mmap */
__SYSCALL(91, sys_munmap)
-__SYSCALL(92, sys_truncate)
-__SYSCALL(93, sys_ftruncate)
+__SYSCALL(92, compat_sys_truncate)
+__SYSCALL(93, compat_sys_ftruncate)
__SYSCALL(94, sys_fchmod)
__SYSCALL(95, sys_fchown16)
__SYSCALL(96, sys_getpriority)
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index 6abb05721614..9416d045a687 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -58,11 +58,6 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* in registers or that take 32-bit parameters which require sign
* extension.
*/
-compat_sys_lseek_wrapper:
- sxtw x1, w1
- b sys_lseek
-ENDPROC(compat_sys_lseek_wrapper)
-
compat_sys_pread64_wrapper:
orr x3, x4, x5, lsl #32
b sys_pread64
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 3883f842434f..b3c5f628bdb4 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -242,7 +242,7 @@ void die(const char *str, struct pt_regs *regs, int err)
crash_kexec(regs);
bust_spinlocks(0);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
raw_spin_unlock_irq(&die_lock);
oops_exit();
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 2ae6591b3a55..9b89257b2cfd 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -7,6 +7,7 @@ config AVR32
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_GENERIC_HARDIRQS
+ select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_ATOMIC64
select HARDIRQS_SW_RESEND
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
index e2c328739808..d232888b99d5 100644
--- a/arch/avr32/include/asm/elf.h
+++ b/arch/avr32/include/asm/elf.h
@@ -102,7 +102,4 @@ typedef struct user_fpu_struct elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
-
#endif /* __ASM_AVR32_ELF_H */
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index 3d760c06f024..682b2478691a 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -61,7 +61,7 @@ void die(const char *str, struct pt_regs *regs, long err)
show_regs_log_lvl(regs, KERN_EMERG);
show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG);
bust_spinlocks(0);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index e98f3248c8aa..600494c70e96 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -33,6 +33,7 @@ config BLACKFIN
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_UID16
+ select HAVE_VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h
index 14bc98ff668f..d15cb9b5d52c 100644
--- a/arch/blackfin/include/asm/elf.h
+++ b/arch/blackfin/include/asm/elf.h
@@ -132,7 +132,4 @@ do { \
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif
diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c
index 0bdaa517a501..e1d0b24c6070 100644
--- a/arch/blackfin/kernel/cplbinfo.c
+++ b/arch/blackfin/kernel/cplbinfo.c
@@ -116,7 +116,7 @@ static const struct seq_operations cplbinfo_sops = {
static int cplbinfo_open(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
char cplb_type;
unsigned int cpu;
int ret;
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 8061426b7df5..9782c0329c14 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -80,12 +80,10 @@ void cpu_idle(void)
if (cpu_is_offline(smp_processor_id()))
cpu_die();
#endif
- if (!idle)
- idle = default_idle;
tick_nohz_idle_enter();
rcu_idle_enter();
while (!need_resched())
- idle();
+ default_idle();
rcu_idle_exit();
tick_nohz_idle_exit();
preempt_enable_no_resched();
diff --git a/arch/c6x/include/asm/elf.h b/arch/c6x/include/asm/elf.h
index 32b997126adf..9a4dfc5eb249 100644
--- a/arch/c6x/include/asm/elf.h
+++ b/arch/c6x/include/asm/elf.h
@@ -77,9 +77,6 @@ do { \
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
/* C6X specific section types */
#define SHT_C6000_UNWIND 0x70000001
#define SHT_C6000_PREEMPTMAP 0x70000002
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 0e5c187ac7d2..bb0ac66cf533 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -43,6 +43,7 @@ config CRIS
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
select HAVE_UID16
+ select HAVE_VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index c4b71710fb0e..a1c498d18d31 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -654,7 +654,7 @@ static int sync_serial_release(struct inode *inode, struct file *file)
static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
{
- int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int dev = MINOR(file_inode(file)->i_rdev);
unsigned int mask = 0;
struct sync_port *port;
DEBUGPOLL(static unsigned int prev_mask = 0);
@@ -685,7 +685,7 @@ static int sync_serial_ioctl_unlocked(struct file *file,
int return_val = 0;
unsigned long flags;
- int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int dev = MINOR(file_inode(file)->i_rdev);
struct sync_port *port;
if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) {
@@ -973,7 +973,7 @@ static long sync_serial_ioctl(struct file *file,
static ssize_t sync_serial_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
- int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int dev = MINOR(file_inode(file)->i_rdev);
DECLARE_WAITQUEUE(wait, current);
struct sync_port *port;
unsigned long flags;
@@ -1097,7 +1097,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
static ssize_t sync_serial_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
- int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int dev = MINOR(file_inode(file)->i_rdev);
int avail;
struct sync_port *port;
unsigned char *start;
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index f8476d9e856b..877da1908234 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -3135,11 +3135,10 @@ static long cryptocop_ioctl_unlocked(struct inode *inode,
static long
cryptocop_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
long ret;
mutex_lock(&cryptocop_mutex);
- ret = cryptocop_ioctl_unlocked(inode, filp, cmd, arg);
+ ret = cryptocop_ioctl_unlocked(file_inode(filp), filp, cmd, arg);
mutex_unlock(&cryptocop_mutex);
return ret;
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index a6a180bc566f..219f704e3221 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -609,7 +609,7 @@ static int sync_serial_release(struct inode *inode, struct file *file)
static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
{
- int dev = iminor(file->f_path.dentry->d_inode);
+ int dev = iminor(file_inode(file));
unsigned int mask = 0;
sync_port *port;
DEBUGPOLL( static unsigned int prev_mask = 0; );
@@ -657,7 +657,7 @@ static int sync_serial_ioctl(struct file *file,
{
int return_val = 0;
int dma_w_size = regk_dma_set_w_size1;
- int dev = iminor(file->f_path.dentry->d_inode);
+ int dev = iminor(file_inode(file));
sync_port *port;
reg_sser_rw_tr_cfg tr_cfg;
reg_sser_rw_rec_cfg rec_cfg;
@@ -979,7 +979,7 @@ static long sync_serial_ioctl(struct file *file,
static ssize_t sync_serial_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
- int dev = iminor(file->f_path.dentry->d_inode);
+ int dev = iminor(file_inode(file));
DECLARE_WAITQUEUE(wait, current);
struct sync_port *port;
int trunc_count;
@@ -1102,7 +1102,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
static ssize_t sync_serial_read(struct file * file, char * buf,
size_t count, loff_t *ppos)
{
- int dev = iminor(file->f_path.dentry->d_inode);
+ int dev = iminor(file_inode(file));
int avail;
sync_port *port;
unsigned char* start;
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h
index 8182f2dc89d0..30ded8fbf592 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/asm/elf.h
@@ -86,7 +86,4 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 2d0509d4cfee..12369b194c7b 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -6,6 +6,7 @@ config FRV
select HAVE_PERF_EVENTS
select HAVE_UID16
select HAVE_GENERIC_HARDIRQS
+ select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_SHOW
select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/frv/include/asm/elf.h b/arch/frv/include/asm/elf.h
index 9ccbc80f0b11..2bac6446db41 100644
--- a/arch/frv/include/asm/elf.h
+++ b/arch/frv/include/asm/elf.h
@@ -137,7 +137,4 @@ do { \
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif
diff --git a/arch/frv/mb93090-mb00/pci-frv.h b/arch/frv/mb93090-mb00/pci-frv.h
index 089eeba4f3bc..76c4e73d643d 100644
--- a/arch/frv/mb93090-mb00/pci-frv.h
+++ b/arch/frv/mb93090-mb00/pci-frv.h
@@ -31,7 +31,6 @@ void pcibios_resource_survey(void);
/* pci-vdk.c */
extern int __nongpreldata pcibios_last_bus;
-extern struct pci_bus *__nongpreldata pci_root_bus;
extern struct pci_ops *__nongpreldata pci_root_ops;
/* pci-irq.c */
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index d186b254ce99..0aa35f0eb0db 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -26,7 +26,6 @@
unsigned int __nongpreldata pci_probe = 1;
int __nongpreldata pcibios_last_bus = -1;
-struct pci_bus *__nongpreldata pci_root_bus;
struct pci_ops *__nongpreldata pci_root_ops;
/*
@@ -416,8 +415,7 @@ int __init pcibios_init(void)
printk("PCI: Probing PCI hardware\n");
pci_add_resource(&resources, &pci_ioport_resource);
pci_add_resource(&resources, &pci_iomem_resource);
- pci_root_bus = pci_scan_root_bus(NULL, 0, pci_root_ops, NULL,
- &resources);
+ pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources);
pcibios_irq_init();
pcibios_fixup_peer_bridges();
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 385fd30b142f..836f14707a62 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -60,7 +60,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vma;
- unsigned long limit;
+ struct vm_unmapped_area_info info;
if (len > TASK_SIZE)
return -ENOMEM;
@@ -79,39 +79,24 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
}
/* search between the bottom of user VM and the stack grow area */
- addr = PAGE_SIZE;
- limit = (current->mm->start_stack - 0x00200000);
- if (addr + len <= limit) {
- limit -= len;
-
- if (addr <= limit) {
- vma = find_vma(current->mm, PAGE_SIZE);
- for (; vma; vma = vma->vm_next) {
- if (addr > limit)
- break;
- if (addr + len <= vma->vm_start)
- goto success;
- addr = vma->vm_end;
- }
- }
- }
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = (current->mm->start_stack - 0x00200000);
+ info.align_mask = 0;
+ info.align_offset = 0;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto success;
+ VM_BUG_ON(addr != -ENOMEM);
/* search from just above the WorkRAM area to the top of memory */
- addr = PAGE_ALIGN(0x80000000);
- limit = TASK_SIZE - len;
- if (addr <= limit) {
- vma = find_vma(current->mm, addr);
- for (; vma; vma = vma->vm_next) {
- if (addr > limit)
- break;
- if (addr + len <= vma->vm_start)
- goto success;
- addr = vma->vm_end;
- }
-
- if (!vma && addr <= limit)
- goto success;
- }
+ info.low_limit = PAGE_ALIGN(0x80000000);
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto success;
+ VM_BUG_ON(addr != -ENOMEM);
#if 0
printk("[area] l=%lx (ENOMEM) f='%s'\n",
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 05b613af223a..ae8551eb3736 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -5,6 +5,7 @@ config H8300
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
select HAVE_UID16
+ select HAVE_VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h
index 41193c396bff..6db71248a82f 100644
--- a/arch/h8300/include/asm/elf.h
+++ b/arch/h8300/include/asm/elf.h
@@ -54,9 +54,6 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#define R_H8_NONE 0
#define R_H8_DIR32 1
#define R_H8_DIR32_28 2
diff --git a/arch/hexagon/include/asm/elf.h b/arch/hexagon/include/asm/elf.h
index 1ba4b3bff5ed..1f14e082588e 100644
--- a/arch/hexagon/include/asm/elf.h
+++ b/arch/hexagon/include/asm/elf.h
@@ -216,11 +216,6 @@ do { \
*/
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-#endif
-
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index a41eeb8eeaa1..be5e2dd9c9d3 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -225,7 +225,7 @@ int die(const char *str, struct pt_regs *regs, long err)
do_show_stack(current, &regs->r30, pt_elr(regs));
bust_spinlocks(0);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die.lock);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index c90366ef7183..33f3fdc0b214 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,6 +26,7 @@ config IA64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
+ select HAVE_VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index b5298eb09adb..5a83c5cc3dc8 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -201,9 +201,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
-#define SET_PERSONALITY(ex) \
- set_personality((current->personality & ~PER_MASK) | PER_LINUX)
-
#define elf_read_implies_exec(ex, executable_stack) \
((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 6d6a5ac48d85..cfa74983c675 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -23,9 +23,7 @@
#ifndef __ASM_KVM_HOST_H
#define __ASM_KVM_HOST_H
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_USER_MEM_SLOTS 32
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 7026b29e277a..f8280a766a78 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =
((struct fnptr *)kretprobe_trampoline)->ip;
@@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->cr_iip = orig_ret_address;
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index ea39eba61ef5..433f5e8a2cd1 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2221,9 +2221,9 @@ pfm_alloc_file(pfm_context_t *ctx)
d_add(path.dentry, inode);
file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
- if (!file) {
+ if (IS_ERR(file)) {
path_put(&path);
- return ERR_PTR(-ENFILE);
+ return file;
}
file->f_flags = O_RDONLY;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 79802e540e53..aa527d7e91f2 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -301,7 +301,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
static ssize_t
salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
char cmd[32];
@@ -463,7 +463,7 @@ retry:
static ssize_t
salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
u8 *buf;
@@ -524,7 +524,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
static ssize_t
salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
char cmd[32];
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index d9439ef2f661..41e33f84c185 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
unsigned long pgoff, unsigned long flags)
{
long map_shared = (flags & MAP_SHARED);
- unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+ unsigned long align_mask = 0;
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
@@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
addr = 0;
#endif
if (!addr)
- addr = mm->free_area_cache;
+ addr = TASK_UNMAPPED_BASE;
if (map_shared && (TASK_SIZE > 0xfffffffful))
/*
@@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
* tasks, we prefer to avoid exhausting the address space too quickly by
* limiting alignment to a single page.
*/
- align_mask = SHMLBA - 1;
-
- full_search:
- start_addr = addr = (addr + align_mask) & ~align_mask;
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
- if (start_addr != TASK_UNMAPPED_BASE) {
- /* Start a new search --- just in case we missed some holes. */
- addr = TASK_UNMAPPED_BASE;
- goto full_search;
- }
- return -ENOMEM;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /* Remember the address where we stopped this search: */
- mm->free_area_cache = addr + len;
- return addr;
- }
- addr = (vma->vm_end + align_mask) & ~align_mask;
- }
+ align_mask = PAGE_MASK & (SHMLBA - 1);
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = addr;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = align_mask;
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
asmlinkage long
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index bd42b76000d1..f7f9f9c6caf0 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -72,7 +72,7 @@ die (const char *str, struct pt_regs *regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die.lock);
if (!regs)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index bd1c51555038..ad3126a58644 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -955,7 +955,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
kvm_mem.guest_phys_addr;
kvm_userspace_mem.memory_size = kvm_mem.memory_size;
r = kvm_vm_ioctl_set_memory_region(kvm,
- &kvm_userspace_mem, 0);
+ &kvm_userspace_mem, false);
if (r)
goto out;
break;
@@ -1580,7 +1580,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
unsigned long i;
unsigned long pfn;
@@ -1611,7 +1611,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc)
+ bool user_alloc)
{
return;
}
@@ -1834,7 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
+ if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index c5f92a926a9a..c3e2935b6db4 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -27,4 +27,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
#define kvm_apic_present(x) (true)
#define kvm_lapic_enabled(x) (true)
+static inline bool kvm_apic_vid_enabled(void)
+{
+ /* IA64 has no apicv supporting, do nothing here */
+ return false;
+}
+
#endif
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 5ca674b74737..76069c18ee42 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -148,7 +148,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- struct vm_area_struct *vmm;
+ struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
@@ -165,16 +165,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
/* This code assumes that RGN_HPAGE != 0. */
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
addr = HPAGE_REGION_BASE;
- else
- addr = ALIGN(addr, HPAGE_SIZE);
- for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
- return -ENOMEM;
- if (!vmm || (addr + len) <= vmm->vm_start)
- return addr;
- addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
- }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = addr;
+ info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+ info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
static int __init hugetlb_setup_sz(char *str)
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 55b72ad57329..60532ab27346 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -393,6 +393,14 @@ out1:
return NULL;
}
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_controller *controller = bridge->bus->sysdata;
+
+ ACPI_HANDLE_SET(&bridge->dev, controller->acpi_handle);
+ return 0;
+}
+
static int is_valid_resource(struct pci_dev *dev, int idx)
{
unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index f807721e19a5..92623818a1fe 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,6 +10,7 @@ config M32R
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_DEBUG_BUGVERBOSE
select HAVE_GENERIC_HARDIRQS
+ select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h
index 70896161c636..8acc9da9a15e 100644
--- a/arch/m32r/include/asm/elf.h
+++ b/arch/m32r/include/asm/elf.h
@@ -128,7 +128,4 @@ typedef elf_fpreg_t elf_fpregset_t;
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif /* _ASM_M32R__ELF_H */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index efb1ce1f14a3..0e708c78e01c 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -8,6 +8,7 @@ config M68K
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
select HAVE_UID16
+ select HAVE_VIRT_TO_BUS
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER if MMU
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index f83c1d0a87cf..b1c26de438be 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -113,7 +113,4 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index cbc624af4494..f32ab22e7ed3 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1176,7 +1176,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
console_verbose();
printk("%s: %08x\n",str,nr);
show_registers(fp);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
do_exit(SIGSEGV);
}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index ba3b7c8c04b8..7843d11156e6 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,6 +19,7 @@ config MICROBLAZE
select HAVE_DEBUG_KMEMLEAK
select IRQ_DOMAIN
select HAVE_GENERIC_HARDIRQS
+ select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index d26fb905ee0a..0a603d3ecf24 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -69,16 +69,13 @@ export MMU DTB
all: linux.bin
-# With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 = linux.bin linux.bin.gz
-BOOT_TARGETS2 = simpleImage.%
-
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-$(BOOT_TARGETS1): vmlinux
+linux.bin linux.bin.gz: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-$(BOOT_TARGETS2): vmlinux
+
+simpleImage.%: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
define archhelp
diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore
new file mode 100644
index 000000000000..bf0459186027
--- /dev/null
+++ b/arch/microblaze/boot/.gitignore
@@ -0,0 +1,3 @@
+*.dtb
+linux.bin*
+simpleImage.*
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 4fbfdc1ac7f8..8cb8a8566ede 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -150,7 +150,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
#define page_to_bus(page) (page_to_phys(page))
#define bus_to_virt(addr) (phys_to_virt(addr))
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
/*extern void *__ioremap(phys_addr_t address, unsigned long size,
unsigned long flags);*/
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
diff --git a/arch/microblaze/kernel/.gitignore b/arch/microblaze/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/microblaze/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 4b7d8a3f4aef..4254514b4c8c 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -17,82 +17,70 @@
static inline void __enable_icache_msr(void)
{
- __asm__ __volatile__ (" msrset r0, %0; \
- nop; " \
+ __asm__ __volatile__ (" msrset r0, %0;" \
+ "nop;" \
: : "i" (MSR_ICE) : "memory");
}
static inline void __disable_icache_msr(void)
{
- __asm__ __volatile__ (" msrclr r0, %0; \
- nop; " \
+ __asm__ __volatile__ (" msrclr r0, %0;" \
+ "nop;" \
: : "i" (MSR_ICE) : "memory");
}
static inline void __enable_dcache_msr(void)
{
- __asm__ __volatile__ (" msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
+ __asm__ __volatile__ (" msrset r0, %0;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory");
}
static inline void __disable_dcache_msr(void)
{
- __asm__ __volatile__ (" msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
+ __asm__ __volatile__ (" msrclr r0, %0;" \
+ "nop; " \
+ : : "i" (MSR_DCE) : "memory");
}
static inline void __enable_icache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "ori r12, r12, %0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory", "r12");
}
static inline void __disable_icache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "andi r12, r12, ~%0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory", "r12");
}
static inline void __enable_dcache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "ori r12, r12, %0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory", "r12");
}
static inline void __disable_dcache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "andi r12, r12, ~%0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory", "r12");
}
@@ -106,7 +94,7 @@ do { \
int align = ~(cache_line_length - 1); \
end = min(start + cache_size, end); \
start &= align; \
-} while (0);
+} while (0)
/*
* Helper macro to loop over the specified cache_size/line_length and
@@ -118,12 +106,12 @@ do { \
int step = -line_length; \
WARN_ON(step >= 0); \
\
- __asm__ __volatile__ (" 1: " #op " %0, r0; \
- bgtid %0, 1b; \
- addk %0, %0, %1; \
- " : : "r" (len), "r" (step) \
+ __asm__ __volatile__ (" 1: " #op " %0, r0;" \
+ "bgtid %0, 1b;" \
+ "addk %0, %0, %1;" \
+ : : "r" (len), "r" (step) \
: "memory"); \
-} while (0);
+} while (0)
/* Used for wdc.flush/clear which can use rB for offset which is not possible
* to use for simple wdc or wic.
@@ -142,12 +130,12 @@ do { \
count = end - start; \
WARN_ON(count < 0); \
\
- __asm__ __volatile__ (" 1: " #op " %0, %1; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
- " : : "r" (start), "r" (count), \
+ __asm__ __volatile__ (" 1: " #op " %0, %1;" \
+ "bgtid %1, 1b;" \
+ "addk %1, %1, %2;" \
+ : : "r" (start), "r" (count), \
"r" (step) : "memory"); \
-} while (0);
+} while (0)
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
@@ -157,13 +145,13 @@ do { \
end = ((end & align) == end) ? end - line_length : end & align; \
WARN_ON(end - start < 0); \
\
- __asm__ __volatile__ (" 1: " #op " %1, r0; \
- cmpu %0, %1, %2; \
- bgtid %0, 1b; \
- addk %1, %1, %3; \
- " : : "r" (temp), "r" (start), "r" (end),\
+ __asm__ __volatile__ (" 1: " #op " %1, r0;" \
+ "cmpu %0, %1, %2;" \
+ "bgtid %0, 1b;" \
+ "addk %1, %1, %3;" \
+ : : "r" (temp), "r" (start), "r" (end), \
"r" (line_length) : "memory"); \
-} while (0);
+} while (0)
#define ASM_LOOP
@@ -352,7 +340,7 @@ static void __invalidate_dcache_all_noirq_wt(void)
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
@@ -361,7 +349,8 @@ static void __invalidate_dcache_all_noirq_wt(void)
#endif
}
-/* FIXME It is blindly invalidation as is expected
+/*
+ * FIXME It is blindly invalidation as is expected
* but can't be called on noMMU in microblaze_cache_init below
*
* MS: noMMU kernel won't boot if simple wdc is used
@@ -375,7 +364,7 @@ static void __invalidate_dcache_all_wb(void)
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
- wdc)
+ wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
@@ -616,49 +605,48 @@ static const struct scache wt_nomsr_noirq = {
#define CPUVER_7_20_A 0x0c
#define CPUVER_7_20_D 0x0f
-#define INFO(s) printk(KERN_INFO "cache: " s "\n");
-
void microblaze_cache_init(void)
{
if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
if (cpuinfo.dcache_wb) {
- INFO("wb_msr");
+ pr_info("wb_msr\n");
mbc = (struct scache *)&wb_msr;
if (cpuinfo.ver_code <= CPUVER_7_20_D) {
/* MS: problem with signal handling - hw bug */
- INFO("WB won't work properly");
+ pr_info("WB won't work properly\n");
}
} else {
if (cpuinfo.ver_code >= CPUVER_7_20_A) {
- INFO("wt_msr_noirq");
+ pr_info("wt_msr_noirq\n");
mbc = (struct scache *)&wt_msr_noirq;
} else {
- INFO("wt_msr");
+ pr_info("wt_msr\n");
mbc = (struct scache *)&wt_msr;
}
}
} else {
if (cpuinfo.dcache_wb) {
- INFO("wb_nomsr");
+ pr_info("wb_nomsr\n");
mbc = (struct scache *)&wb_nomsr;
if (cpuinfo.ver_code <= CPUVER_7_20_D) {
/* MS: problem with signal handling - hw bug */
- INFO("WB won't work properly");
+ pr_info("WB won't work properly\n");
}
} else {
if (cpuinfo.ver_code >= CPUVER_7_20_A) {
- INFO("wt_nomsr_noirq");
+ pr_info("wt_nomsr_noirq\n");
mbc = (struct scache *)&wt_nomsr_noirq;
} else {
- INFO("wt_nomsr");
+ pr_info("wt_nomsr\n");
mbc = (struct scache *)&wt_nomsr;
}
}
}
-/* FIXME Invalidation is done in U-BOOT
- * WT cache: Data is already written to main memory
- * WB cache: Discard data on noMMU which caused that kernel doesn't boot
- */
+ /*
+ * FIXME Invalidation is done in U-BOOT
+ * WT cache: Data is already written to main memory
+ * WB cache: Discard data on noMMU which caused that kernel doesn't boot
+ */
/* invalidate_dcache(); */
enable_dcache();
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index 916aaedf1945..ee4689415410 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -27,7 +27,7 @@
early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
#else
#define err_printk(x) \
- printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n");
+ pr_info("ERROR: Microblaze " x "-different for PVR and DTS\n");
#endif
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
@@ -38,12 +38,11 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
CI(ver_code, VERSION);
if (!ci->ver_code) {
- printk(KERN_ERR "ERROR: MB has broken PVR regs "
- "-> use DTS setting\n");
+ pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n");
return;
}
- temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |\
+ temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |
PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr);
if (ci->use_instr != temp)
err_printk("BARREL, MSR, PCMP or DIV");
@@ -59,13 +58,13 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
err_printk("HW_FPU");
ci->use_fpu = temp;
- ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |\
- PVR_UNALIGNED_EXCEPTION(pvr) |\
- PVR_ILL_OPCODE_EXCEPTION(pvr) |\
- PVR_IOPB_BUS_EXCEPTION(pvr) |\
- PVR_DOPB_BUS_EXCEPTION(pvr) |\
- PVR_DIV_ZERO_EXCEPTION(pvr) |\
- PVR_FPU_EXCEPTION(pvr) |\
+ ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |
+ PVR_UNALIGNED_EXCEPTION(pvr) |
+ PVR_ILL_OPCODE_EXCEPTION(pvr) |
+ PVR_IOPB_BUS_EXCEPTION(pvr) |
+ PVR_DOPB_BUS_EXCEPTION(pvr) |
+ PVR_DIV_ZERO_EXCEPTION(pvr) |
+ PVR_FPU_EXCEPTION(pvr) |
PVR_FSL_EXCEPTION(pvr);
CI(pvr_user1, USER1);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index eab6abf5652e..0b2299bcb948 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -68,31 +68,30 @@ void __init setup_cpuinfo(void)
cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu");
if (!cpu)
- printk(KERN_ERR "You don't have cpu!!!\n");
+ pr_err("You don't have cpu!!!\n");
- printk(KERN_INFO "%s: initialising\n", __func__);
+ pr_info("%s: initialising\n", __func__);
switch (cpu_has_pvr()) {
case 0:
- printk(KERN_WARNING
- "%s: No PVR support. Using static CPU info from FDT\n",
+ pr_warn("%s: No PVR support. Using static CPU info from FDT\n",
__func__);
set_cpuinfo_static(&cpuinfo, cpu);
break;
/* FIXME I found weird behavior with MB 7.00.a/b 7.10.a
* please do not use FULL PVR with MMU */
case 1:
- printk(KERN_INFO "%s: Using full CPU PVR support\n",
+ pr_info("%s: Using full CPU PVR support\n",
__func__);
set_cpuinfo_static(&cpuinfo, cpu);
set_cpuinfo_pvr_full(&cpuinfo, cpu);
break;
default:
- printk(KERN_WARNING "%s: Unsupported PVR setting\n", __func__);
+ pr_warn("%s: Unsupported PVR setting\n", __func__);
set_cpuinfo_static(&cpuinfo, cpu);
}
if (cpuinfo.mmu_privins)
- printk(KERN_WARNING "%s: Stream instructions enabled"
+ pr_warn("%s: Stream instructions enabled"
" - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
}
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
index 3a749d5e71fd..8d0dc6db48cf 100644
--- a/arch/microblaze/kernel/cpu/pvr.c
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -27,7 +27,7 @@
tmp = 0x0; /* Prevent warning about unused */ \
__asm__ __volatile__ ( \
"mfs %0, rpvr" #pvrid ";" \
- : "=r" (tmp) : : "memory"); \
+ : "=r" (tmp) : : "memory"); \
val = tmp; \
}
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index a2bfa2ca5730..da68d00fd087 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -11,7 +11,7 @@
#include <linux/gfp.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
-#include <asm/bug.h>
+#include <linux/bug.h>
/*
* Generic direct DMA implementation
@@ -197,8 +197,8 @@ EXPORT_SYMBOL(dma_direct_ops);
static int __init dma_init(void)
{
- dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
- return 0;
+ return 0;
}
fs_initcall(dma_init);
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index aba1f9a97d5d..60dcacc68038 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -140,20 +140,20 @@ int __init setup_early_printk(char *opt)
switch (version) {
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
case UARTLITE:
- printk(KERN_INFO "Early console on uartlite "
- "at 0x%08x\n", base_addr);
+ pr_info("Early console on uartlite at 0x%08x\n",
+ base_addr);
early_console = &early_serial_uartlite_console;
break;
#endif
#ifdef CONFIG_SERIAL_8250_CONSOLE
case UART16550:
- printk(KERN_INFO "Early console on uart16650 "
- "at 0x%08x\n", base_addr);
+ pr_info("Early console on uart16650 at 0x%08x\n",
+ base_addr);
early_console = &early_serial_uart16550_console;
break;
#endif
default:
- printk(KERN_INFO "Unsupported early console %d\n",
+ pr_info("Unsupported early console %d\n",
version);
return 1;
}
@@ -171,10 +171,9 @@ void __init remap_early_printk(void)
{
if (!early_console_initialized || !early_console)
return;
- printk(KERN_INFO "early_printk_console remapping from 0x%x to ",
- base_addr);
+ pr_info("early_printk_console remapping from 0x%x to ", base_addr);
base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
- printk(KERN_CONT "0x%x\n", base_addr);
+ pr_cont("0x%x\n", base_addr);
#ifdef CONFIG_MMU
/*
@@ -197,7 +196,7 @@ void __init disable_early_printk(void)
{
if (!early_console_initialized || !early_console)
return;
- printk(KERN_WARNING "disabling early console\n");
+ pr_warn("disabling early console\n");
unregister_console(early_console);
early_console_initialized = 0;
}
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 6348dc82f428..42dd12a62ff5 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -13,11 +13,11 @@
* This file handles the architecture-dependent parts of hardware exceptions
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kallsyms.h>
-#include <linux/module.h>
#include <asm/exceptions.h>
#include <asm/entry.h> /* For KM CPU var */
@@ -40,7 +40,7 @@ void die(const char *str, struct pt_regs *fp, long err)
{
console_verbose();
spin_lock_irq(&die_lock);
- printk(KERN_WARNING "Oops: %s, sig: %ld\n", str, err);
+ pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
/* do_exit() should take care of panic'ing from an interrupt
@@ -61,9 +61,9 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
- if (kernel_mode(regs)) {
+ if (kernel_mode(regs))
die("Exception in kernel mode", regs, signr);
- }
+
info.si_signo = signr;
info.si_errno = 0;
info.si_code = code;
@@ -79,8 +79,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
#endif
#if 0
- printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x " \
- "ESR=%08x\n",
+ pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr,
(unsigned int) regs->pc, (unsigned int) regs->esr);
#endif
@@ -92,8 +91,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
- printk(KERN_WARNING "Illegal opcode exception " \
- "in kernel mode.\n");
+ pr_warn("Illegal opcode exception in kernel mode.\n");
die("opcode exception", regs, SIGBUS);
break;
case MICROBLAZE_IBUS_EXCEPTION:
@@ -102,8 +100,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
- printk(KERN_WARNING "Instruction bus error exception " \
- "in kernel mode.\n");
+ pr_warn("Instruction bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DBUS_EXCEPTION:
@@ -112,8 +109,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
- printk(KERN_WARNING "Data bus error exception " \
- "in kernel mode.\n");
+ pr_warn("Data bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
@@ -122,8 +118,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
_exception(SIGFPE, regs, FPE_INTDIV, addr);
return;
}
- printk(KERN_WARNING "Divide by zero exception " \
- "in kernel mode.\n");
+ pr_warn("Divide by zero exception in kernel mode.\n");
die("Divide by zero exception", regs, SIGBUS);
break;
case MICROBLAZE_FPU_EXCEPTION:
@@ -151,8 +146,8 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
#endif
default:
/* FIXME what to do in unexpected exception */
- printk(KERN_WARNING "Unexpected exception %02x "
- "PC=%08x in %s mode\n", type, (unsigned int) addr,
+ pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
+ type, (unsigned int) addr,
kernel_mode(regs) ? "kernel" : "user");
}
return;
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index 357d56abe24a..e8a5e9cf4ed1 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -35,18 +35,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
- asm volatile(" 1: lwi %0, %2, 0; \
- 2: swi %3, %2, 0; \
- addik %1, r0, 0; \
- 3: \
- .section .fixup, \"ax\"; \
- 4: brid 3b; \
- addik %1, r0, 1; \
- .previous; \
- .section __ex_table,\"a\"; \
- .word 1b,4b; \
- .word 2b,4b; \
- .previous;" \
+ asm volatile(" 1: lwi %0, %2, 0;" \
+ "2: swi %3, %2, 0;" \
+ " addik %1, r0, 0;" \
+ "3:" \
+ " .section .fixup, \"ax\";" \
+ "4: brid 3b;" \
+ " addik %1, r0, 1;" \
+ " .previous;" \
+ " .section __ex_table,\"a\";" \
+ " .word 1b,4b;" \
+ " .word 2b,4b;" \
+ " .previous;" \
: "=&r" (old), "=r" (faulted)
: "r" (parent), "r" (return_hooker)
);
@@ -81,16 +81,16 @@ static int ftrace_modify_code(unsigned long addr, unsigned int value)
{
int faulted = 0;
- __asm__ __volatile__(" 1: swi %2, %1, 0; \
- addik %0, r0, 0; \
- 2: \
- .section .fixup, \"ax\"; \
- 3: brid 2b; \
- addik %0, r0, 1; \
- .previous; \
- .section __ex_table,\"a\"; \
- .word 1b,3b; \
- .previous;" \
+ __asm__ __volatile__(" 1: swi %2, %1, 0;" \
+ " addik %0, r0, 0;" \
+ "2:" \
+ " .section .fixup, \"ax\";" \
+ "3: brid 2b;" \
+ " addik %0, r0, 1;" \
+ " .previous;" \
+ " .section __ex_table,\"a\";" \
+ " .word 1b,3b;" \
+ " .previous;" \
: "=r" (faulted)
: "r" (addr), "r" (value)
);
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
index 154756f3c694..1879a0527776 100644
--- a/arch/microblaze/kernel/heartbeat.c
+++ b/arch/microblaze/kernel/heartbeat.c
@@ -61,7 +61,7 @@ void setup_heartbeat(void)
if (gpio) {
base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
- printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
+ pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
/* GPIO is configured as output */
prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 7a1a8d4354fe..8778adf72bd3 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -147,12 +147,12 @@ void __init init_IRQ(void)
intr_mask =
be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL));
if (intr_mask > (u32)((1ULL << nr_irq) - 1))
- printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
+ pr_info(" ERROR: Mismatch in kind-of-intr param\n");
#ifdef CONFIG_SELFMOD_INTC
selfmod_function((int *) arr_func, intc_baseaddr);
#endif
- printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
+ pr_info("%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
intc->name, intc_baseaddr, nr_irq, intr_mask);
/*
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
index 09a5e8286137..8adc92443100 100644
--- a/arch/microblaze/kernel/kgdb.c
+++ b/arch/microblaze/kernel/kgdb.c
@@ -141,7 +141,7 @@ void kgdb_arch_exit(void)
/*
* Global data
*/
-struct kgdb_arch arch_kgdb_ops = {
+const struct kgdb_arch arch_kgdb_ops = {
#ifdef __MICROBLAZEEL__
.gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
#else
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 2b25bcf05c00..9f1d02c4c5cc 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -7,7 +7,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index f39257a5abcf..182e6be856cd 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -7,7 +7,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/moduleloader.h>
#include <linux/kernel.h>
#include <linux/elf.h>
@@ -108,8 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
break;
default:
- printk(KERN_ERR "module %s: "
- "Unknown relocation: %u\n",
+ pr_err("module %s: Unknown relocation: %u\n",
module->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 6ff2dcff3410..fa0ea609137c 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -8,36 +8,36 @@
* for more details.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/bitops.h>
#include <linux/ptrace.h>
#include <asm/pgalloc.h>
-#include <asm/uaccess.h> /* for USER_DS macros */
+#include <linux/uaccess.h> /* for USER_DS macros */
#include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs)
{
- printk(KERN_INFO " Registers dump: mode=%X\r\n", regs->pt_mode);
- printk(KERN_INFO " r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
+ pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
+ pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
regs->r1, regs->r2, regs->r3, regs->r4);
- printk(KERN_INFO " r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
+ pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
regs->r5, regs->r6, regs->r7, regs->r8);
- printk(KERN_INFO " r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
+ pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
regs->r9, regs->r10, regs->r11, regs->r12);
- printk(KERN_INFO " r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
+ pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
regs->r13, regs->r14, regs->r15, regs->r16);
- printk(KERN_INFO " r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
+ pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
regs->r17, regs->r18, regs->r19, regs->r20);
- printk(KERN_INFO " r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
+ pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
regs->r21, regs->r22, regs->r23, regs->r24);
- printk(KERN_INFO " r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
+ pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
regs->r25, regs->r26, regs->r27, regs->r28);
- printk(KERN_INFO " r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
+ pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
regs->r29, regs->r30, regs->r31, regs->pc);
- printk(KERN_INFO " msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
+ pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
regs->msr, regs->ear, regs->esr, regs->fsr);
}
@@ -97,13 +97,10 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- if (!idle)
- idle = default_idle;
-
tick_nohz_idle_enter();
rcu_idle_enter();
while (!need_resched())
- idle();
+ default_idle();
rcu_idle_exit();
tick_nohz_idle_exit();
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index a744e3f18883..0a2c68f9f9b0 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -14,6 +14,7 @@
*/
#include <stdarg.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
@@ -25,7 +26,6 @@
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
-#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/debugfs.h>
#include <linux/irq.h>
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
index 47187cc2cf00..068762f55fd6 100644
--- a/arch/microblaze/kernel/prom_parse.c
+++ b/arch/microblaze/kernel/prom_parse.c
@@ -1,8 +1,8 @@
#undef DEBUG
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/etherdevice.h>
#include <linux/of_address.h>
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index b050219c42e6..39cf50841f6d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -40,7 +40,7 @@
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/syscall.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* Returns the address where the register at REG_OFFS in P is stashed away. */
static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 954348f83505..0263da7b83dd 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -150,33 +150,35 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
/* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0,
tlb1, kernel_tlb); */
- printk("Ramdisk addr 0x%08x, ", ram);
+ pr_info("Ramdisk addr 0x%08x, ", ram);
if (fdt)
- printk("FDT at 0x%08x\n", fdt);
+ pr_info("FDT at 0x%08x\n", fdt);
else
- printk("Compiled-in FDT at 0x%08x\n",
+ pr_info("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
- printk("Found romfs @ 0x%08x (0x%08x)\n",
+ pr_info("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, romfs_size);
- printk("#### klimit %p ####\n", old_klimit);
+ pr_info("#### klimit %p ####\n", old_klimit);
BUG_ON(romfs_size < 0); /* What else can we do? */
- printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+ pr_info("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
romfs_size, romfs_base, (unsigned)&__bss_stop);
- printk("New klimit: 0x%08x\n", (unsigned)klimit);
+ pr_info("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- if (msr)
- printk("!!!Your kernel has setup MSR instruction but "
- "CPU don't have it %x\n", msr);
+ if (msr) {
+ pr_info("!!!Your kernel has setup MSR instruction but ");
+ pr_cont("CPU don't have it %x\n", msr);
+ }
#else
- if (!msr)
- printk("!!!Your kernel not setup MSR instruction but "
- "CPU have it %x\n", msr);
+ if (!msr) {
+ pr_info("!!!Your kernel not setup MSR instruction but ");
+ pr_cont"CPU have it %x\n", msr);
+ }
#endif
/* Do not copy reset vectors. offset = 0x2 means skip the first
@@ -216,6 +218,8 @@ static int __init debugfs_tlb(void)
d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
if (!d)
return -ENOMEM;
+
+ return 0;
}
device_initcall(debugfs_tlb);
# endif
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 9f7a8bde0686..d26d7e7a6913 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -242,7 +242,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
set_fs(USER_DS);
#ifdef DEBUG_SIG
- printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
+ pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
current->comm, current->pid, frame, regs->pc);
#endif
@@ -317,8 +317,8 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
int signr;
struct k_sigaction ka;
#ifdef DEBUG_SIG
- printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall);
- printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
+ pr_info("do signal: %p %d\n", regs, in_syscall);
+ pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
regs->r12, current_thread_info()->flags);
#endif
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c
index 84bc6686102c..b4debe283a79 100644
--- a/arch/microblaze/kernel/stacktrace.c
+++ b/arch/microblaze/kernel/stacktrace.c
@@ -9,11 +9,11 @@
* for more details.
*/
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
-#include <linux/module.h>
#include <asm/unwind.h>
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 63647c586b43..f905b3ae68c7 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -13,6 +13,7 @@
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/syscalls.h>
@@ -24,14 +25,12 @@
#include <linux/sys.h>
#include <linux/ipc.h>
#include <linux/file.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <linux/slab.h>
-
#include <asm/syscalls.h>
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 5541ac559593..30e6b5004a6a 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -8,9 +8,9 @@
* for more details.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/debug_locks.h>
@@ -26,7 +26,7 @@ static unsigned long kstack_depth_to_print; /* 0 == entire stack */
static int __init kstack_setup(char *s)
{
- return !strict_strtoul(s, 0, &kstack_depth_to_print);
+ return !kstrtoul(s, 0, &kstack_depth_to_print);
}
__setup("kstack=", kstack_setup);
@@ -66,9 +66,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
}
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
words_to_show << 2, 0);
- printk(KERN_INFO "\n\n");
-
- pr_info("Call Trace:\n");
+ pr_info("\n\nCall Trace:\n");
microblaze_unwind(task, NULL);
pr_info("\n");
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 6be4ae3c3351..1f7b8d449668 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -13,13 +13,13 @@
*/
/* #define DEBUG 1 */
+#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <asm/sections.h>
#include <asm/exceptions.h>
diff --git a/arch/microblaze/lib/ashldi3.c b/arch/microblaze/lib/ashldi3.c
index beb80f316095..1af904cd972d 100644
--- a/arch/microblaze/lib/ashldi3.c
+++ b/arch/microblaze/lib/ashldi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
@@ -25,5 +25,4 @@ long long __ashldi3(long long u, word_type b)
return w.ll;
}
-
EXPORT_SYMBOL(__ashldi3);
diff --git a/arch/microblaze/lib/ashrdi3.c b/arch/microblaze/lib/ashrdi3.c
index c884a912b660..32c334c05d04 100644
--- a/arch/microblaze/lib/ashrdi3.c
+++ b/arch/microblaze/lib/ashrdi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
@@ -27,5 +27,4 @@ long long __ashrdi3(long long u, word_type b)
return w.ll;
}
-
EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/microblaze/lib/cmpdi2.c b/arch/microblaze/lib/cmpdi2.c
index a708400ea7b7..67abc9ac1bd4 100644
--- a/arch/microblaze/lib/cmpdi2.c
+++ b/arch/microblaze/lib/cmpdi2.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/microblaze/lib/lshrdi3.c b/arch/microblaze/lib/lshrdi3.c
index dcf8d6810b7c..adcb253f11c8 100644
--- a/arch/microblaze/lib/lshrdi3.c
+++ b/arch/microblaze/lib/lshrdi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
@@ -25,5 +25,4 @@ long long __lshrdi3(long long u, word_type b)
return w.ll;
}
-
EXPORT_SYMBOL(__lshrdi3);
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index fe9c53fafdea..f536e81b8168 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -24,10 +24,10 @@
* not any responsibility to update it.
*/
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/compiler.h>
-#include <linux/module.h>
#include <linux/string.h>
@@ -103,12 +103,12 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
}
#else
/* Load the holding buffer */
- buf_hold = (*i_src++ & 0xFFFFFF00) >>8;
+ buf_hold = (*i_src++ & 0xFFFFFF00) >> 8;
for (; c >= 4; c -= 4) {
value = *i_src++;
*i_dst++ = buf_hold | ((value & 0xFF) << 24);
- buf_hold = (value & 0xFFFFFF00) >>8;
+ buf_hold = (value & 0xFFFFFF00) >> 8;
}
#endif
/* Realign the source */
@@ -129,12 +129,12 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
}
#else
/* Load the holding buffer */
- buf_hold = (*i_src++ & 0xFFFF0000 )>>16;
+ buf_hold = (*i_src++ & 0xFFFF0000) >> 16;
for (; c >= 4; c -= 4) {
value = *i_src++;
- *i_dst++ = buf_hold | ((value & 0xFFFF)<<16);
- buf_hold = (value & 0xFFFF0000) >>16;
+ *i_dst++ = buf_hold | ((value & 0xFFFF) << 16);
+ buf_hold = (value & 0xFFFF0000) >> 16;
}
#endif
/* Realign the source */
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 2146c3752a80..3611ce70415b 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -24,10 +24,10 @@
* not any responsibility to update it.
*/
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/compiler.h>
-#include <linux/module.h>
#include <linux/string.h>
#ifdef __HAVE_ARCH_MEMMOVE
@@ -129,7 +129,8 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
for (; c >= 4; c -= 4) {
value = *--i_src;
- *--i_dst = buf_hold | ((value & 0xFFFFFF00)>>8);
+ *--i_dst = buf_hold |
+ ((value & 0xFFFFFF00) >> 8);
buf_hold = (value & 0xFF) << 24;
}
#endif
@@ -155,7 +156,8 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
for (; c >= 4; c -= 4) {
value = *--i_src;
- *--i_dst = buf_hold | ((value & 0xFFFF0000)>>16);
+ *--i_dst = buf_hold |
+ ((value & 0xFFFF0000) >> 16);
buf_hold = (value & 0xFFFF) << 16;
}
#endif
@@ -181,7 +183,8 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
for (; c >= 4; c -= 4) {
value = *--i_src;
- *--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
+ *--i_dst = buf_hold |
+ ((value & 0xFF000000) >> 24);
buf_hold = (value & 0xFFFFFF) << 8;
}
#endif
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index ddf67939576d..04ea72c8a81d 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -24,10 +24,10 @@
* not any responsibility to update it.
*/
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/compiler.h>
-#include <linux/module.h>
#include <linux/string.h>
#ifdef __HAVE_ARCH_MEMSET
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c
index d3659244ab6f..a3f9a03acdcd 100644
--- a/arch/microblaze/lib/muldi3.c
+++ b/arch/microblaze/lib/muldi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index f085995ee848..0e8cc2710c27 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -38,15 +38,14 @@ __strncpy_user:
addik r3,r7,0 /* temp_count = len */
1:
lbu r4,r6,r0
+ beqid r4,2f
sb r4,r5,r0
- addik r3,r3,-1
- beqi r3,2f /* break on len */
-
addik r5,r5,1
- bneid r4,1b
addik r6,r6,1 /* delay slot */
- addik r3,r3,1 /* undo "temp_count--" */
+
+ addik r3,r3,-1
+ bnei r3,1b /* break on len */
2:
rsubk r3,r3,r7 /* temp_count = len - temp_count */
3:
diff --git a/arch/microblaze/lib/ucmpdi2.c b/arch/microblaze/lib/ucmpdi2.c
index 63ca105b6713..d05f1585121c 100644
--- a/arch/microblaze/lib/ucmpdi2.c
+++ b/arch/microblaze/lib/ucmpdi2.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index a1e2e18e0961..5226b09cbbb2 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -13,7 +13,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -37,7 +37,7 @@
#include <asm/pgalloc.h>
#include <linux/io.h>
#include <linux/hardirq.h>
-#include <asm/mmu_context.h>
+#include <linux/mmu_context.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
@@ -102,8 +102,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
# endif
if ((unsigned int)ret > cpuinfo.dcache_base &&
(unsigned int)ret < cpuinfo.dcache_high)
- printk(KERN_WARNING
- "ERROR: Your cache coherent area is CACHED!!!\n");
+ pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
/* dma_handle is same as physical (shadowed) address */
*dma_handle = (dma_addr_t)ret;
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 714b35a9c4f7..731f739d17a1 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -32,7 +32,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
-#include <asm/mmu_context.h>
+#include <linux/mmu_context.h>
#include <linux/uaccess.h>
#include <asm/exceptions.h>
@@ -100,7 +100,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
/* On a kernel SLB miss we can only check for a valid exception entry */
if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
- printk(KERN_WARNING "kernel task_size exceed");
+ pr_warn("kernel task_size exceed");
_exception(SIGSEGV, regs, code, address);
}
@@ -114,9 +114,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
- printk(KERN_EMERG "Page fault in user mode with "
- "in_atomic(), mm = %p\n", mm);
- printk(KERN_EMERG "r15 = %lx MSR = %lx\n",
+ pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
+ mm);
+ pr_emerg("r15 = %lx MSR = %lx\n",
regs->r15, regs->msr);
die("Weird page fault", regs, SIGSEGV);
}
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
index 7d78838e8bfa..5a92576fad92 100644
--- a/arch/microblaze/mm/highmem.c
+++ b/arch/microblaze/mm/highmem.c
@@ -20,8 +20,8 @@
* highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
*/
+#include <linux/export.h>
#include <linux/highmem.h>
-#include <linux/module.h>
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index ce80823051ba..8f8b367c079e 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -89,7 +89,7 @@ static unsigned long highmem_setup(void)
reservedpages++;
}
totalram_pages += totalhigh_pages;
- printk(KERN_INFO "High memory: %luk\n",
+ pr_info("High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
return reservedpages;
@@ -142,8 +142,8 @@ void __init setup_memory(void)
((u32)_text <= (memory_start + lowmem_size - 1))) {
memory_size = lowmem_size;
PAGE_OFFSET = memory_start;
- printk(KERN_INFO "%s: Main mem: 0x%x, "
- "size 0x%08x\n", __func__, (u32) memory_start,
+ pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
+ __func__, (u32) memory_start,
(u32) memory_size);
break;
}
@@ -158,7 +158,7 @@ void __init setup_memory(void)
kernel_align_start = PAGE_DOWN((u32)_text);
/* ALIGN can be remove because _end in vmlinux.lds.S is align */
kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
- printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
+ pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
__func__, kernel_align_start, kernel_align_start
+ kernel_align_size, kernel_align_size);
memblock_reserve(kernel_align_start, kernel_align_size);
@@ -181,10 +181,10 @@ void __init setup_memory(void)
max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
- printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
- printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
- printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
- printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
+ pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
+ pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
+ pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
+ pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
/*
* Find an area to use for the bootmem bitmap.
@@ -246,7 +246,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
free_page(addr);
totalram_pages++;
}
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+ pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -260,7 +260,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
totalram_pages++;
pages++;
}
- printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n",
+ pr_notice("Freeing initrd memory: %dk freed\n",
(int)(pages * (PAGE_SIZE / 1024)));
}
#endif
@@ -304,11 +304,11 @@ void __init mem_init(void)
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
- pr_info("Memory: %luk/%luk available (%luk kernel code, "
- "%luk reserved, %luk data, %luk bss, %luk init)\n",
+ pr_info("Memory: %luk/%luk available (%luk kernel code, ",
nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
- codesize >> 10,
+ codesize >> 10);
+ pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n",
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
bsssize >> 10,
@@ -394,17 +394,17 @@ asmlinkage void __init mmu_init(void)
unsigned int kstart, ksize;
if (!memblock.reserved.cnt) {
- printk(KERN_EMERG "Error memory count\n");
+ pr_emerg("Error memory count\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < 0x400000) {
- printk(KERN_EMERG "Memory must be greater than 4MB\n");
+ pr_emerg("Memory must be greater than 4MB\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
- printk(KERN_EMERG "Kernel size is greater than memory node\n");
+ pr_emerg("Kernel size is greater than memory node\n");
machine_restart(NULL);
}
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index d1c06d07fed8..10b3bd0a980d 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -26,8 +26,8 @@
*
*/
+#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
@@ -39,8 +39,6 @@
#include <asm/sections.h>
#include <asm/fixmap.h>
-#define flush_HPTE(X, va, pg) _tlbie(va)
-
unsigned long ioremap_base;
unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot);
@@ -75,9 +73,8 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
p >= memory_start && p < virt_to_phys(high_memory) &&
!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
p < virt_to_phys((unsigned long)__bss_stop))) {
- printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
- " is RAM lr %pf\n", (unsigned long)p,
- __builtin_return_address(0));
+ pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
+ (unsigned long)p, __builtin_return_address(0));
return NULL;
}
@@ -128,9 +125,10 @@ void __iomem *ioremap(phys_addr_t addr, unsigned long size)
}
EXPORT_SYMBOL(ioremap);
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
{
- if (addr > high_memory && (unsigned long) addr < ioremap_bot)
+ if ((__force void *)addr > high_memory &&
+ (unsigned long) addr < ioremap_bot)
vfree((void *) (PAGE_MASK & (unsigned long) addr));
}
EXPORT_SYMBOL(iounmap);
@@ -152,8 +150,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
if (unlikely(mem_init_done))
- flush_HPTE(0, va, pmd_val(*pd));
- /* flush_HPTE(0, va, pg); */
+ _tlbie(va);
}
return err;
}
diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c
index 4196eb6bd764..ae4fca46c9f6 100644
--- a/arch/microblaze/pci/indirect_pci.c
+++ b/arch/microblaze/pci/indirect_pci.c
@@ -15,7 +15,7 @@
#include <linux/string.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
diff --git a/arch/microblaze/pci/iomap.c b/arch/microblaze/pci/iomap.c
index b07abbac0319..94149f5e6ebe 100644
--- a/arch/microblaze/pci/iomap.c
+++ b/arch/microblaze/pci/iomap.c
@@ -7,7 +7,7 @@
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/export.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/pci-bridge.h>
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 96416553cb36..9ea521e4959e 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -30,10 +30,11 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci.h>
#include <linux/export.h>
#include <asm/processor.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
@@ -552,11 +553,10 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
*/
if ((offset + size) > hose->isa_mem_size) {
#ifdef CONFIG_MMU
- printk(KERN_DEBUG
- "Process %s (pid:%d) mapped non-existing PCI"
- "legacy memory for 0%04x:%02x\n",
- current->comm, current->pid, pci_domain_nr(bus),
- bus->number);
+ pr_debug("Process %s (pid:%d) mapped non-existing PCI",
+ current->comm, current->pid);
+ pr_debug("legacy memory for 0%04x:%02x\n",
+ pci_domain_nr(bus), bus->number);
#endif
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
@@ -564,7 +564,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
}
offset += hose->isa_mem_phys;
} else {
- unsigned long io_offset = (unsigned long)hose->io_base_virt - \
+ unsigned long io_offset = (unsigned long)hose->io_base_virt -
_IO_BASE;
unsigned long roffset = offset + io_offset;
rp = &hose->io_resource;
@@ -668,7 +668,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
unsigned long long isa_mb = 0;
struct resource *res;
- printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
+ pr_info("PCI host bridge %s %s ranges:\n",
dev->full_name, primary ? "(primary)" : "");
/* Get ranges property */
@@ -685,9 +685,10 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
cpu_addr = of_translate_address(dev, ranges + 3);
size = of_read_number(ranges + pna + 3, 2);
- pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
- "cpu_addr:0x%016llx size:0x%016llx\n",
- pci_space, pci_addr, cpu_addr, size);
+ pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
+ pci_space, pci_addr);
+ pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
+ cpu_addr, size);
ranges += np;
@@ -716,14 +717,12 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
res = NULL;
switch ((pci_space >> 24) & 0x3) {
case 1: /* PCI IO space */
- printk(KERN_INFO
- " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
+ pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
cpu_addr, cpu_addr + size - 1, pci_addr);
/* We support only one IO range */
if (hose->pci_io_size) {
- printk(KERN_INFO
- " \\--> Skipped (too many) !\n");
+ pr_info(" \\--> Skipped (too many) !\n");
continue;
}
/* On 32 bits, limit I/O space to 16MB */
@@ -750,15 +749,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
break;
case 2: /* PCI Memory space */
case 3: /* PCI 64 bits Memory space */
- printk(KERN_INFO
- " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
+ pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
cpu_addr, cpu_addr + size - 1, pci_addr,
(pci_space & 0x40000000) ? "Prefetch" : "");
/* We support only 3 memory ranges */
if (memno >= 3) {
- printk(KERN_INFO
- " \\--> Skipped (too many) !\n");
+ pr_info(" \\--> Skipped (too many) !\n");
continue;
}
/* Handles ISA memory hole space here */
@@ -781,8 +778,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
hose->pci_mem_offset = cpu_addr - pci_addr;
else if (pci_addr != 0 &&
hose->pci_mem_offset != cpu_addr - pci_addr) {
- printk(KERN_INFO
- " \\--> Skipped (offset mismatch) !\n");
+ pr_info(" \\--> Skipped (offset mismatch) !\n");
continue;
}
@@ -809,7 +805,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
*/
if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
unsigned int next = isa_hole + 1;
- printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
+ pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
if (next < memno)
memmove(&hose->mem_resources[isa_hole],
&hose->mem_resources[next],
@@ -833,7 +829,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
int i;
if (!hose) {
- printk(KERN_ERR "No host bridge for PCI dev %s !\n",
+ pr_err("No host bridge for PCI dev %s !\n",
pci_name(dev));
return;
}
@@ -842,12 +838,12 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
if (!res->flags)
continue;
if (res->start == 0) {
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
- "is unassigned\n",
+ pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
pci_name(dev), i,
(unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned int)res->flags);
+ pr_debug("is unassigned\n");
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
@@ -856,7 +852,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
pci_name(dev), i,
- (unsigned long long)res->start,\
+ (unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned int)res->flags);
}
@@ -947,7 +943,7 @@ static void pcibios_fixup_bridge(struct pci_bus *bus)
pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
pci_name(dev), i,
- (unsigned long long)res->start,\
+ (unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned int)res->flags);
@@ -1154,12 +1150,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
}
}
- pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
- "[0x%x], parent %p (%s)\n",
+ pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
bus->self ? pci_name(bus->self) : "PHB",
bus->number, i,
(unsigned long long)res->start,
- (unsigned long long)res->end,
+ (unsigned long long)res->end);
+ pr_debug("[0x%x], parent %p (%s)\n",
(unsigned int)res->flags,
pr, (pr && pr->name) ? pr->name : "nil");
@@ -1174,9 +1170,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
if (reparent_resources(pr, res) == 0)
continue;
}
- printk(KERN_WARNING "PCI: Cannot allocate resource region "
- "%d of PCI bridge %d, will remap\n", i, bus->number);
-
+ pr_warn("PCI: Cannot allocate resource region ");
+ pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
res->start = res->end = 0;
res->flags = 0;
}
@@ -1198,8 +1193,8 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
pr = pci_find_parent_resource(dev, r);
if (!pr || (pr->flags & IORESOURCE_UNSET) ||
request_resource(pr, r) < 0) {
- printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
- " of device %s, will remap\n", idx, pci_name(dev));
+ pr_warn("PCI: Cannot allocate resource region %d ", idx);
+ pr_cont("of device %s, will remap\n", pci_name(dev));
if (pr)
pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
pr,
@@ -1282,8 +1277,7 @@ static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
res->end = (offset + 0xfff) & 0xfffffffful;
pr_debug("Candidate legacy IO: %pR\n", res);
if (request_resource(&hose->io_resource, res)) {
- printk(KERN_DEBUG
- "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
+ pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
@@ -1311,8 +1305,7 @@ static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
res->end = 0xbffff + offset;
pr_debug("Candidate VGA memory: %pR\n", res);
if (request_resource(pres, res)) {
- printk(KERN_DEBUG
- "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
+ pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
@@ -1362,10 +1355,9 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
if (r->parent || !r->start || !r->flags)
continue;
- pr_debug("PCI: Claiming %s: "
- "Resource %d: %016llx..%016llx [%x]\n",
- pci_name(dev), i,
- (unsigned long long)r->start,
+ pr_debug("PCI: Claiming %s: ", pci_name(dev));
+ pr_debug("Resource %d: %016llx..%016llx [%x]\n",
+ i, (unsigned long long)r->start,
(unsigned long long)r->end,
(unsigned int)r->flags);
@@ -1423,9 +1415,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
res->end = (res->end + io_offset) & 0xffffffffu;
if (!res->flags) {
- printk(KERN_WARNING "PCI: I/O resource not set for host"
- " bridge %s (domain %d)\n",
- hose->dn->full_name, hose->global_number);
+ pr_warn("PCI: I/O resource not set for host ");
+ pr_cont("bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
/* Workaround for lack of IO resource only on 32-bit */
res->start = (unsigned long)hose->io_base_virt - isa_io_base;
res->end = res->start + IO_SPACE_LIMIT;
@@ -1445,9 +1437,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
if (!res->flags) {
if (i > 0)
continue;
- printk(KERN_ERR "PCI: Memory resource 0 not set for "
- "host bridge %s (domain %d)\n",
- hose->dn->full_name, hose->global_number);
+ pr_err("PCI: Memory resource 0 not set for ");
+ pr_cont("host bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
/* Workaround for lack of MEM resource only on 32-bit */
res->start = hose->pci_mem_offset;
@@ -1489,7 +1481,7 @@ static void pcibios_scan_phb(struct pci_controller *hose)
bus = pci_scan_root_bus(hose->parent, hose->first_busno,
hose->ops, hose, &resources);
if (bus == NULL) {
- printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+ pr_err("Failed to create bus for PCI domain %04x\n",
hose->global_number);
pci_free_resource_list(&resources);
return;
@@ -1505,7 +1497,7 @@ static int __init pcibios_init(void)
struct pci_controller *hose, *tmp;
int next_busno = 0;
- printk(KERN_INFO "PCI: Probing PCI hardware\n");
+ pr_info("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
@@ -1605,7 +1597,7 @@ fake_pci_bus(struct pci_controller *hose, int busnr)
static struct pci_bus bus;
if (!hose)
- printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
+ pr_err("Can't find hose for PCI bus %d!\n", busnr);
bus.number = busnr;
bus.sysdata = hose;
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
index 0687a42a5bd4..14c7da5fd039 100644
--- a/arch/microblaze/pci/xilinx_pci.c
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -18,7 +18,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define XPLB_PCI_ADDR 0x10c
#define XPLB_PCI_DATA 0x110
@@ -82,7 +82,7 @@ xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
*
* List pci devices in very early phase.
*/
-void __init xilinx_early_pci_scan(struct pci_controller *hose)
+static void __init xilinx_early_pci_scan(struct pci_controller *hose)
{
u32 bus = 0;
u32 val, dev, func, offset;
@@ -91,27 +91,27 @@ void __init xilinx_early_pci_scan(struct pci_controller *hose)
for (dev = 0; dev < 2; dev++) {
/* List only first function number - up-to 8 functions */
for (func = 0; func < 1; func++) {
- printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
+ pr_info("%02x:%02x:%02x", bus, dev, func);
/* read the first 64 standardized bytes */
/* Up-to 192 bytes can be list of capabilities */
for (offset = 0; offset < 64; offset += 4) {
early_read_config_dword(hose, bus,
PCI_DEVFN(dev, func), offset, &val);
if (offset == 0 && val == 0xFFFFFFFF) {
- printk(KERN_CONT "\nABSENT");
+ pr_cont("\nABSENT");
break;
}
if (!(offset % 0x10))
- printk(KERN_CONT "\n%04x: ", offset);
+ pr_cont("\n%04x: ", offset);
- printk(KERN_CONT "%08x ", val);
+ pr_cont("%08x ", val);
}
- printk(KERN_INFO "\n");
+ pr_info("\n");
}
}
}
#else
-void __init xilinx_early_pci_scan(struct pci_controller *hose)
+static void __init xilinx_early_pci_scan(struct pci_controller *hose)
{
}
#endif
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 91b9d69f465c..4b597d91a8d5 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -18,10 +18,10 @@ platforms += loongson1
platforms += mti-malta
platforms += mti-sead3
platforms += netlogic
-platforms += pmc-sierra
+platforms += pmcs-msp71xx
platforms += pnx833x
-platforms += pnx8550
platforms += powertv
+platforms += ralink
platforms += rb532
platforms += sgi-ip22
platforms += sgi-ip27
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 198641589bb5..ae9c716c46bb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -38,6 +38,7 @@ config MIPS
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_VIRT_TO_BUS
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select CLONE_BACKWARDS
@@ -107,12 +108,14 @@ config ATH79
config BCM47XX
bool "Broadcom BCM47XX based boards"
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
select FW_CFE
select HW_HAS_PCI
select IRQ_CPU
+ select NO_EXCEPT_FILL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_HAS_EARLY_PRINTK
@@ -294,6 +297,7 @@ config MIPS_MALTA
select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
+ select CSRC_GIC
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
select HAVE_PCSPKR_PLATFORM
@@ -353,6 +357,7 @@ config MIPS_SEAD3
select USB_ARCH_HAS_EHCI
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
+ select USE_OF
help
This enables support for the MIPS Technologies SEAD3 evaluation
board.
@@ -384,16 +389,6 @@ config NXP_STB225
help
Support for NXP Semiconductors STB225 Development Board.
-config PNX8550_JBS
- bool "NXP PNX8550 based JBS board"
- select PNX8550
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
-config PNX8550_STB810
- bool "NXP PNX8550 based STB810 board"
- select PNX8550
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
config PMC_MSP
bool "PMC-Sierra MSP chipsets"
select CEVT_R4K
@@ -433,6 +428,22 @@ config POWERTV
help
This enables support for the Cisco PowerTV Platform.
+config RALINK
+ bool "Ralink based machines"
+ select CEVT_R4K
+ select CSRC_R4K
+ select BOOT_RAW
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select USE_OF
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+ select HAVE_MACH_CLKDEV
+ select CLKDEV_LOOKUP
+
config SGI_IP22
bool "SGI IP22 (Indy/Indigo2)"
select FW_ARC
@@ -834,8 +845,9 @@ source "arch/mips/jazz/Kconfig"
source "arch/mips/jz4740/Kconfig"
source "arch/mips/lantiq/Kconfig"
source "arch/mips/lasat/Kconfig"
-source "arch/mips/pmc-sierra/Kconfig"
+source "arch/mips/pmcs-msp71xx/Kconfig"
source "arch/mips/powertv/Kconfig"
+source "arch/mips/ralink/Kconfig"
source "arch/mips/sgi-ip27/Kconfig"
source "arch/mips/sibyte/Kconfig"
source "arch/mips/txx9/Kconfig"
@@ -916,6 +928,9 @@ config CSRC_POWERTV
config CSRC_R4K
bool
+config CSRC_GIC
+ bool
+
config CSRC_SB1250
bool
@@ -1102,19 +1117,6 @@ config SOC_PNX8335
bool
select SOC_PNX833X
-config PNX8550
- bool
- select SOC_PNX8550
-
-config SOC_PNX8550
- bool
- select DMA_NONCOHERENT
- select HW_HAS_PCI
- select SYS_HAS_CPU_MIPS32_R1
- select SYS_HAS_EARLY_PRINTK
- select SYS_SUPPORTS_32BIT_KERNEL
- select GENERIC_GPIO
-
config SWAP_IO_SPACE
bool
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index f2dfd404550c..6f7978f95090 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -191,7 +191,7 @@ endif
include $(srctree)/arch/mips/Kbuild.platforms
ifdef CONFIG_PHYSICAL_START
-load-y = $(CONFIG_PHYSICAL_START)
+load-y = $(CONFIG_PHYSICAL_START)
endif
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
index 942c5800a684..fa1bdd1aea15 100644
--- a/arch/mips/alchemy/Platform
+++ b/arch/mips/alchemy/Platform
@@ -1,7 +1,7 @@
#
# Core Alchemy code
#
-platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
+platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
#
@@ -45,7 +45,7 @@ load-$(CONFIG_MIPS_MTX1) += 0xffffffff80100000
#
# MyCable eval board
#
-platform-$(CONFIG_MIPS_XXS1500) += alchemy/
+platform-$(CONFIG_MIPS_XXS1500) += alchemy/
load-$(CONFIG_MIPS_XXS1500) += 0xffffffff80100000
#
@@ -56,7 +56,7 @@ load-$(CONFIG_MIPS_GPR) += 0xffffffff80100000
# boards can specify their own <gpio.h> in one of their include dirs.
# If they do, placing this line here at the end will make sure the
-# compiler picks the board one. If they don't, it will make sure
+# compiler picks the board one. If they don't, it will make sure
# the alchemy generic gpio header is picked up.
cflags-$(CONFIG_MIPS_ALCHEMY) += -I$(srctree)/arch/mips/include/asm/mach-au1x00
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index ba3259086b9d..cb0f6afb7389 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -135,33 +135,33 @@ static struct mtd_partition gpr_mtd_partitions[] = {
{
.name = "kernel",
.size = 0x00200000,
- .offset = 0,
+ .offset = 0,
},
{
.name = "rootfs",
.size = 0x00800000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "config",
.size = 0x00200000,
- .offset = 0x01d00000,
+ .offset = 0x01d00000,
},
{
.name = "yamon",
.size = 0x00100000,
- .offset = 0x01c00000,
+ .offset = 0x01c00000,
},
{
.name = "yamon env vars",
.size = 0x00040000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
},
{
.name = "kernel+rootfs",
.size = 0x00a00000,
- .offset = 0,
+ .offset = 0,
},
};
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index a124c251c0c9..4a9baa9f6330 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -173,23 +173,23 @@ static struct mtd_partition mtx1_mtd_partitions[] = {
{
.name = "filesystem",
.size = 0x01C00000,
- .offset = 0,
+ .offset = 0,
},
{
.name = "yamon",
.size = 0x00100000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "kernel",
.size = 0x002c0000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
},
{
.name = "yamon env",
.size = 0x00040000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
},
};
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index cf02d7dc2df0..19d5642c16d9 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -252,7 +252,7 @@ EXPORT_SYMBOL(au1xxx_ddma_del_device);
u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
void (*callback)(int, void *), void *callparam)
{
- unsigned long flags;
+ unsigned long flags;
u32 used, chan;
u32 dcp;
int i;
@@ -512,7 +512,7 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
break;
}
- /* If source input is FIFO, set static address. */
+ /* If source input is FIFO, set static address. */
if (stp->dev_flags & DEV_FLAGS_IN) {
if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
@@ -635,7 +635,7 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
@@ -697,7 +697,7 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
@@ -742,7 +742,7 @@ u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
*nbytes = dp->dscr_cmd1;
rv = dp->dscr_stat;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
@@ -891,7 +891,7 @@ void au1xxx_dbdma_dump(u32 chanid)
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
dbdev_tab_t *stp, *dtp;
- au1x_dma_chan_t *cp;
+ au1x_dma_chan_t *cp;
u32 i = 0;
ctp = *((chan_tab_t **)chanid);
@@ -969,7 +969,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
ctp->chan_ptr->ddma_dbell = 0;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c
index f1b50f0c01db..f9bc4f520440 100644
--- a/arch/mips/alchemy/common/gpiolib.c
+++ b/arch/mips/alchemy/common/gpiolib.c
@@ -106,14 +106,14 @@ struct gpio_chip alchemy_gpio_chip[] = {
.ngpio = ALCHEMY_GPIO1_NUM,
},
[1] = {
- .label = "alchemy-gpio2",
- .direction_input = gpio2_direction_input,
- .direction_output = gpio2_direction_output,
- .get = gpio2_get,
- .set = gpio2_set,
+ .label = "alchemy-gpio2",
+ .direction_input = gpio2_direction_input,
+ .direction_output = gpio2_direction_output,
+ .get = gpio2_get,
+ .set = gpio2_set,
.to_irq = gpio2_to_irq,
- .base = ALCHEMY_GPIO2_BASE,
- .ngpio = ALCHEMY_GPIO2_NUM,
+ .base = ALCHEMY_GPIO2_BASE,
+ .ngpio = ALCHEMY_GPIO2_NUM,
},
};
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 94fbcd19eb9c..63a71817a00c 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -84,20 +84,20 @@ static int au1300_gpic_settype(struct irq_data *d, unsigned int type);
* needs the highest priority.
*/
struct alchemy_irqmap au1000_irqmap[] __initdata = {
- { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -106,33 +106,33 @@ struct alchemy_irqmap au1000_irqmap[] __initdata = {
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
- { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1500_irqmap[] __initdata = {
- { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -141,31 +141,31 @@ struct alchemy_irqmap au1500_irqmap[] __initdata = {
{ AU1500_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
- { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1500_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1100_irqmap[] __initdata = {
- { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -174,33 +174,33 @@ struct alchemy_irqmap au1100_irqmap[] __initdata = {
{ AU1100_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
- { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1100_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1550_irqmap[] __initdata = {
- { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -210,26 +210,26 @@ struct alchemy_irqmap au1550_irqmap[] __initdata = {
{ AU1550_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1200_irqmap[] __initdata = {
- { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -239,9 +239,9 @@ struct alchemy_irqmap au1200_irqmap[] __initdata = {
{ AU1200_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ -1, },
};
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 7af941d8e717..9837a134a6d6 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -53,7 +53,7 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
.irq = _irq, \
.regshift = 2, \
.iotype = UPIO_AU, \
- .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
+ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
UPF_FIXED_TYPE, \
.type = PORT_16550A, \
.pm = alchemy_8250_pm, \
@@ -137,7 +137,7 @@ static void alchemy_ehci_power_off(struct platform_device *pdev)
}
static struct usb_ehci_pdata alchemy_ehci_pdata = {
- .no_io_watchdog = 1,
+ .no_io_watchdog = 1,
.power_on = alchemy_ehci_power_on,
.power_off = alchemy_ehci_power_off,
.power_suspend = alchemy_ehci_power_off,
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 37ffd997c616..62b4e7bbeab9 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -59,7 +59,7 @@ void __init plat_mem_setup(void)
/* Clear to obtain best system bus performance */
clear_c0_config(1 << 19); /* Clear Config[OD] */
- board_setup(); /* board specific setup */
+ board_setup(); /* board specific setup */
/* IO/MEM resources. */
set_io_port_base(0);
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
index c7bcc7e5c822..706d933e0085 100644
--- a/arch/mips/alchemy/common/sleeper.S
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -102,12 +102,12 @@ LEAF(alchemy_sleep_au1000)
cache 0x14, 96(t0)
.set mips0
-1: lui a0, 0xb400 /* mem_xxx */
- sw zero, 0x001c(a0) /* Precharge */
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x001c(a0) /* Precharge */
sync
sw zero, 0x0020(a0) /* Auto Refresh */
sync
- sw zero, 0x0030(a0) /* Sleep */
+ sw zero, 0x0030(a0) /* Sleep */
sync
DO_SLEEP
@@ -128,15 +128,15 @@ LEAF(alchemy_sleep_au1550)
cache 0x14, 96(t0)
.set mips0
-1: lui a0, 0xb400 /* mem_xxx */
- sw zero, 0x08c0(a0) /* Precharge */
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x08c0(a0) /* Precharge */
sync
sw zero, 0x08d0(a0) /* Self Refresh */
sync
/* wait for sdram to enter self-refresh mode */
- lui t0, 0x0100
-2: lw t1, 0x0850(a0) /* mem_sdstat */
+ lui t0, 0x0100
+2: lw t1, 0x0850(a0) /* mem_sdstat */
and t2, t1, t0
beq t2, zero, 2b
nop
@@ -144,9 +144,9 @@ LEAF(alchemy_sleep_au1550)
/* disable SDRAM clocks */
lui t0, 0xcfff
ori t0, t0, 0xffff
- lw t1, 0x0840(a0) /* mem_sdconfiga */
- and t1, t0, t1 /* clear CE[1:0] */
- sw t1, 0x0840(a0) /* mem_sdconfiga */
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t0, t1 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
sync
DO_SLEEP
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index b67930d19325..38afb11ba2c4 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -85,7 +85,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
.name = "rtcmatch2",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 1500,
- .set_next_event = au1x_rtcmatch2_set_next_event,
+ .set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
.cpumask = cpu_all_mask,
};
diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c
index 936af8359fb2..fcc695626117 100644
--- a/arch/mips/alchemy/common/usb.c
+++ b/arch/mips/alchemy/common/usb.c
@@ -122,7 +122,7 @@ static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
unsigned long r;
if (enable) {
- __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
+ __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
wmb();
r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index f2039ef2c293..c98c9ea3372c 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -20,7 +20,7 @@ static struct bcsr_reg {
spinlock_t lock;
} bcsr_regs[BCSR_CNT];
-static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
+static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
static int bcsr_csc_base; /* linux-irq of first cascaded irq */
void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 8187845650f7..11f3ad20321c 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -276,7 +276,7 @@ static void db1100_mmcled_set(struct led_classdev *led, enum led_brightness b)
}
static struct led_classdev db1100_mmc_led = {
- .brightness_set = db1100_mmcled_set,
+ .brightness_set = db1100_mmcled_set,
};
static int db1100_mmc1_card_readonly(void *mmc_host)
@@ -314,7 +314,7 @@ static void db1100_mmc1led_set(struct led_classdev *led, enum led_brightness b)
}
static struct led_classdev db1100_mmc1_led = {
- .brightness_set = db1100_mmc1led_set,
+ .brightness_set = db1100_mmc1led_set,
};
static struct au1xmmc_platform_data db1100_mmc_platdata[2] = {
@@ -357,7 +357,7 @@ static struct resource au1100_mmc0_resources[] = {
}
};
-static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
+static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
static struct platform_device db1100_mmc0_dev = {
.name = "au1xxx-mmc",
@@ -482,7 +482,7 @@ static struct spi_board_info db1100_spi_info[] __initdata = {
.mode = 0,
.irq = AU1100_GPIO21_INT,
.platform_data = &db1100_touch_pd,
- .controller_data = (void *)210, /* for spi_gpio: CS# GPIO210 */
+ .controller_data = (void *)210, /* for spi_gpio: CS# GPIO210 */
},
};
@@ -572,7 +572,7 @@ static int __init db1000_dev_init(void)
irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
/* EPSON S1D13806 0x1b000000
- * SRAM 1MB/2MB 0x1a000000
+ * SRAM 1MB/2MB 0x1a000000
* DS1693 RTC 0x0c000000
*/
} else if (board == BCSR_WHOAMI_PB1100) {
@@ -586,7 +586,7 @@ static int __init db1000_dev_init(void)
irq_set_irq_type(AU1100_GPIO12_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1100_GPIO13_INT, IRQ_TYPE_LEVEL_LOW);
/* EPSON S1D13806 0x1b000000
- * SRAM 1MB/2MB 0x1a000000
+ * SRAM 1MB/2MB 0x1a000000
* DiskOnChip 0x0d000000
* DS1693 RTC 0x0c000000
*/
@@ -605,7 +605,7 @@ static int __init db1000_dev_init(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
- c0, d0, /*s0*/0, 0, 0);
+ c0, d0, /*s0*/0, 0, 0);
if (twosocks) {
irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
@@ -619,7 +619,7 @@ static int __init db1000_dev_init(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
- c1, d1, /*s1*/0, 0, 1);
+ c1, d1, /*s1*/0, 0, 1);
}
platform_add_devices(db1x00_devs, ARRAY_SIZE(db1x00_devs));
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index 299b7d202bea..a84d98b8f96e 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -90,14 +90,14 @@ int __init db1200_board_setup(void)
whoami = bcsr_read(BCSR_WHOAMI);
printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d"
- " Board-ID %d Daughtercard ID %d\n", get_system_type(),
+ " Board-ID %d Daughtercard ID %d\n", get_system_type(),
(whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
/* SMBus/SPI on PSC0, Audio on PSC1 */
pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3);
- pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
+ pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
__raw_writel(pfc, (void __iomem *)SYS_PINFUNC);
wmb();
@@ -129,7 +129,7 @@ int __init db1200_board_setup(void)
static struct mtd_partition db1200_spiflash_parts[] = {
{
.name = "spi_flash",
- .offset = 0,
+ .offset = 0,
.size = MTDPART_SIZ_FULL,
},
};
@@ -200,12 +200,12 @@ static int au1200_nand_device_ready(struct mtd_info *mtd)
static struct mtd_partition db1200_nand_parts[] = {
{
.name = "NAND FS 0",
- .offset = 0,
+ .offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
@@ -395,7 +395,7 @@ static void db1200_mmcled_set(struct led_classdev *led,
}
static struct led_classdev db1200_mmc_led = {
- .brightness_set = db1200_mmcled_set,
+ .brightness_set = db1200_mmcled_set,
};
/* -- */
@@ -463,7 +463,7 @@ static void pb1200_mmc1led_set(struct led_classdev *led,
}
static struct led_classdev pb1200_mmc1_led = {
- .brightness_set = pb1200_mmc1led_set,
+ .brightness_set = pb1200_mmc1led_set,
};
static void pb1200_mmc1_set_power(void *mmc_host, int state)
@@ -526,7 +526,7 @@ static struct resource au1200_mmc0_resources[] = {
}
};
-static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
+static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
static struct platform_device db1200_mmc0_dev = {
.name = "au1xxx-mmc",
@@ -601,7 +601,7 @@ static int db1200fb_panel_shutdown(void)
static struct au1200fb_platdata db1200fb_pd = {
.panel_index = db1200fb_panel_index,
.panel_init = db1200fb_panel_init,
- .panel_shutdown = db1200fb_panel_shutdown,
+ .panel_shutdown = db1200fb_panel_shutdown,
};
static struct resource au1200_lcd_res[] = {
@@ -772,11 +772,11 @@ static int __init pb1200_res_fixup(void)
}
db1200_nand_res[0].start = PB1200_NAND_PHYS_ADDR;
- db1200_nand_res[0].end = PB1200_NAND_PHYS_ADDR + 0xff;
+ db1200_nand_res[0].end = PB1200_NAND_PHYS_ADDR + 0xff;
db1200_ide_res[0].start = PB1200_IDE_PHYS_ADDR;
- db1200_ide_res[0].end = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
+ db1200_ide_res[0].end = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
db1200_eth_res[0].start = PB1200_ETH_PHYS_ADDR;
- db1200_eth_res[0].end = PB1200_ETH_PHYS_ADDR + 0xff;
+ db1200_eth_res[0].end = PB1200_ETH_PHYS_ADDR + 0xff;
return 0;
}
@@ -797,7 +797,7 @@ int __init db1200_dev_setup(void)
irq_set_irq_type(AU1200_GPIO7_INT, IRQ_TYPE_LEVEL_LOW);
bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
- /* insert/eject pairs: one of both is always screaming. To avoid
+ /* insert/eject pairs: one of both is always screaming. To avoid
* issues they must not be automatically enabled when initially
* requested.
*/
@@ -813,7 +813,7 @@ int __init db1200_dev_setup(void)
spi_register_board_info(db1200_spi_devs,
ARRAY_SIZE(db1200_i2c_devs));
- /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
+ /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
* or S12 on the PB1200.
*/
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index cdf37cbd3d1f..6167e73eef9c 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -80,7 +80,7 @@ static int db1300_dev_pins[] __initdata = {
AU1300_PIN_PSC0D1,
AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
AU1300_PIN_PSC1D1,
- AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
+ AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
AU1300_PIN_PSC2D1,
AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
AU1300_PIN_PSC3D1,
@@ -143,12 +143,12 @@ static int au1300_nand_device_ready(struct mtd_info *mtd)
static struct mtd_partition db1300_nand_parts[] = {
{
.name = "NAND FS 0",
- .offset = 0,
+ .offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
@@ -487,7 +487,7 @@ static void db1300_mmcled_set(struct led_classdev *led,
}
static struct led_classdev db1300_mmc_led = {
- .brightness_set = db1300_mmcled_set,
+ .brightness_set = db1300_mmcled_set,
};
struct au1xmmc_platform_data db1300_sd1_platdata = {
@@ -646,7 +646,7 @@ static int db1300fb_panel_shutdown(void)
static struct au1200fb_platdata db1300fb_pd = {
.panel_index = db1300fb_panel_index,
.panel_init = db1300fb_panel_init,
- .panel_shutdown = db1300fb_panel_shutdown,
+ .panel_shutdown = db1300fb_panel_shutdown,
};
static struct resource au1300_lcd_res[] = {
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 5a9ae6095428..016cddacd7ea 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -67,7 +67,7 @@ int __init db1550_board_setup(void)
bcsr_init(PB1550_BCSR_PHYS_ADDR,
PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
- pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \
+ pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \
"Daughtercard ID %d\n", get_system_type(),
(whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
@@ -80,7 +80,7 @@ int __init db1550_board_setup(void)
static struct mtd_partition db1550_spiflash_parts[] = {
{
.name = "spi_flash",
- .offset = 0,
+ .offset = 0,
.size = MTDPART_SIZ_FULL,
},
};
@@ -151,12 +151,12 @@ static int au1550_nand_device_ready(struct mtd_info *mtd)
static struct mtd_partition db1550_nand_parts[] = {
{
.name = "NAND FS 0",
- .offset = 0,
+ .offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
@@ -495,10 +495,10 @@ static void __init db1550_devices(void)
{
alchemy_gpio_direction_output(203, 0); /* red led on */
- irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH); /* CD0# */
- irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH); /* CD1# */
- irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW); /* CARD0# */
- irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW); /* CARD1# */
+ irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH); /* CD0# */
+ irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH); /* CD1# */
+ irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW); /* CARD0# */
+ irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW); /* CARD1# */
irq_set_irq_type(AU1550_GPIO21_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG0# */
irq_set_irq_type(AU1550_GPIO22_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG1# */
@@ -539,7 +539,7 @@ static void __init pb1550_devices(void)
/* Pb1550, like all others, also has statuschange irqs; however they're
* wired up on one of the Au1550's shared GPIO201_205 line, which also
- * services the PCMCIA card interrupts. So we ignore statuschange and
+ * services the PCMCIA card interrupts. So we ignore statuschange and
* use the GPIO201_205 exclusively for card interrupts, since a) pcmcia
* drivers are used to shared irqs and b) statuschange isn't really use-
* ful anyway.
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index acaf91b5e461..b86bff31d1d3 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -194,7 +194,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj,
}
#define ATTR(x) \
- static struct kobj_attribute x##_attribute = \
+ static struct kobj_attribute x##_attribute = \
__ATTR(x, 0664, db1x_pmattr_show, \
db1x_pmattr_store);
diff --git a/arch/mips/ar7/Platform b/arch/mips/ar7/Platform
index 0bf85c416c6c..21f9102d533c 100644
--- a/arch/mips/ar7/Platform
+++ b/arch/mips/ar7/Platform
@@ -1,6 +1,6 @@
#
# Texas Instruments AR7
#
-platform-$(CONFIG_AR7) += ar7/
-cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
-load-$(CONFIG_AR7) += 0xffffffff94100000
+platform-$(CONFIG_AR7) += ar7/
+cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
+load-$(CONFIG_AR7) += 0xffffffff94100000
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 7477fd2127ad..7e2356fd5fd6 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -492,11 +492,11 @@ static struct gpio_led gt701_leds[] = {
.active_low = 1,
.default_trigger = "default-on",
},
- {
- .name = "ethernet",
- .gpio = 10,
- .active_low = 1,
- },
+ {
+ .name = "ethernet",
+ .gpio = 10,
+ .active_low = 1,
+ },
};
static struct gpio_led_platform_data ar7_led_data;
@@ -512,7 +512,7 @@ static void __init detect_leds(void)
{
char *prid, *usb_prod;
- /* Default LEDs */
+ /* Default LEDs */
ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
ar7_led_data.leds = default_leds;
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index f44feee2d67f..3995e31a73e2 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -14,6 +14,18 @@ config ATH79_MACH_AP121
Say 'Y' here if you want your kernel to support the
Atheros AP121 reference board.
+config ATH79_MACH_AP136
+ bool "Atheros AP136 reference board"
+ select SOC_QCA955X
+ select ATH79_DEV_GPIO_BUTTONS
+ select ATH79_DEV_LEDS_GPIO
+ select ATH79_DEV_SPI
+ select ATH79_DEV_USB
+ select ATH79_DEV_WMAC
+ help
+ Say 'Y' here if you want your kernel to support the
+ Atheros AP136 reference board.
+
config ATH79_MACH_AP81
bool "Atheros AP81 reference board"
select SOC_AR913X
@@ -88,6 +100,12 @@ config SOC_AR934X
select PCI_AR724X if PCI
def_bool n
+config SOC_QCA955X
+ select USB_ARCH_HAS_EHCI
+ select HW_HAS_PCI
+ select PCI_AR724X if PCI
+ def_bool n
+
config PCI_AR724X
def_bool n
@@ -104,7 +122,7 @@ config ATH79_DEV_USB
def_bool n
config ATH79_DEV_WMAC
- depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
+ depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X || SOC_QCA955X)
def_bool n
endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 2b54d98263f3..5c9ff692ff3c 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o
# Machines
#
obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o
+obj-$(CONFIG_ATH79_MACH_AP136) += mach-ap136.o
obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o
obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o
obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 579f452c0b45..765ef30e3e1c 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -198,7 +198,7 @@ static void __init ar934x_clocks_init(void)
dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
- if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+ if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
ath79_ref_clk.rate = 40 * 1000 * 1000;
else
ath79_ref_clk.rate = 25 * 1000 * 1000;
@@ -295,6 +295,82 @@ static void __init ar934x_clocks_init(void)
iounmap(dpll_base);
}
+static void __init qca955x_clocks_init(void)
+{
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA955X_BOOTSTRAP_REF_CLK_40)
+ ath79_ref_clk.rate = 40 * 1000 * 1000;
+ else
+ ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA955X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+ cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6));
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA955X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+ ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10));
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ ath79_cpu_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ath79_ddr_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
+ else
+ ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ath79_ahb_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
+
+ ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
+}
+
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
@@ -307,6 +383,8 @@ void __init ath79_clocks_init(void)
ar933x_clocks_init();
else if (soc_is_ar934x())
ar934x_clocks_init();
+ else if (soc_is_qca955x())
+ qca955x_clocks_init();
else
BUG();
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 5a4adfc9d79d..eb3966cd8cfc 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -72,6 +72,8 @@ void ath79_device_reset_set(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca955x())
+ reg = QCA955X_RESET_REG_RESET_MODULE;
else
BUG();
@@ -98,6 +100,8 @@ void ath79_device_reset_clear(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca955x())
+ reg = QCA955X_RESET_REG_RESET_MODULE;
else
BUG();
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
index 45efc63b08b6..9516aab27139 100644
--- a/arch/mips/ath79/dev-common.c
+++ b/arch/mips/ath79/dev-common.c
@@ -36,7 +36,7 @@ static struct resource ath79_uart_resources[] = {
static struct plat_serial8250_port ath79_uart_data[] = {
{
.mapbase = AR71XX_UART_BASE,
- .irq = ATH79_MISC_IRQ_UART,
+ .irq = ATH79_MISC_IRQ(3),
.flags = AR71XX_UART_FLAGS,
.iotype = UPIO_MEM32,
.regshift = 2,
@@ -62,8 +62,8 @@ static struct resource ar933x_uart_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = ATH79_MISC_IRQ_UART,
- .end = ATH79_MISC_IRQ_UART,
+ .start = ATH79_MISC_IRQ(3),
+ .end = ATH79_MISC_IRQ(3),
.flags = IORESOURCE_IRQ,
},
};
@@ -90,7 +90,8 @@ void __init ath79_register_uart(void)
if (soc_is_ar71xx() ||
soc_is_ar724x() ||
soc_is_ar913x() ||
- soc_is_ar934x()) {
+ soc_is_ar934x() ||
+ soc_is_qca955x()) {
ath79_uart_data[0].uartclk = clk_get_rate(clk);
platform_device_register(&ath79_uart_device);
} else if (soc_is_ar933x()) {
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
index bd2bc108e1b5..8227265bcc2d 100644
--- a/arch/mips/ath79/dev-usb.c
+++ b/arch/mips/ath79/dev-usb.c
@@ -25,29 +25,11 @@
#include "common.h"
#include "dev-usb.h"
-static struct resource ath79_ohci_resources[2];
-
-static u64 ath79_ohci_dmamask = DMA_BIT_MASK(32);
+static u64 ath79_usb_dmamask = DMA_BIT_MASK(32);
static struct usb_ohci_pdata ath79_ohci_pdata = {
};
-static struct platform_device ath79_ohci_device = {
- .name = "ohci-platform",
- .id = -1,
- .resource = ath79_ohci_resources,
- .num_resources = ARRAY_SIZE(ath79_ohci_resources),
- .dev = {
- .dma_mask = &ath79_ohci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &ath79_ohci_pdata,
- },
-};
-
-static struct resource ath79_ehci_resources[2];
-
-static u64 ath79_ehci_dmamask = DMA_BIT_MASK(32);
-
static struct usb_ehci_pdata ath79_ehci_pdata_v1 = {
.has_synopsys_hc_bug = 1,
};
@@ -57,22 +39,16 @@ static struct usb_ehci_pdata ath79_ehci_pdata_v2 = {
.has_tt = 1,
};
-static struct platform_device ath79_ehci_device = {
- .name = "ehci-platform",
- .id = -1,
- .resource = ath79_ehci_resources,
- .num_resources = ARRAY_SIZE(ath79_ehci_resources),
- .dev = {
- .dma_mask = &ath79_ehci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static void __init ath79_usb_init_resource(struct resource res[2],
- unsigned long base,
- unsigned long size,
- int irq)
+static void __init ath79_usb_register(const char *name, int id,
+ unsigned long base, unsigned long size,
+ int irq, const void *data,
+ size_t data_size)
{
+ struct resource res[2];
+ struct platform_device *pdev;
+
+ memset(res, 0, sizeof(res));
+
res[0].flags = IORESOURCE_MEM;
res[0].start = base;
res[0].end = base + size - 1;
@@ -80,6 +56,19 @@ static void __init ath79_usb_init_resource(struct resource res[2],
res[1].flags = IORESOURCE_IRQ;
res[1].start = irq;
res[1].end = irq;
+
+ pdev = platform_device_register_resndata(NULL, name, id,
+ res, ARRAY_SIZE(res),
+ data, data_size);
+
+ if (IS_ERR(pdev)) {
+ pr_err("ath79: unable to register USB at %08lx, err=%d\n",
+ base, (int) PTR_ERR(pdev));
+ return;
+ }
+
+ pdev->dev.dma_mask = &ath79_usb_dmamask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
#define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \
@@ -106,14 +95,15 @@ static void __init ath79_usb_setup(void)
mdelay(900);
- ath79_usb_init_resource(ath79_ohci_resources, AR71XX_OHCI_BASE,
- AR71XX_OHCI_SIZE, ATH79_MISC_IRQ_OHCI);
- platform_device_register(&ath79_ohci_device);
+ ath79_usb_register("ohci-platform", -1,
+ AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE,
+ ATH79_MISC_IRQ(6),
+ &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
- ath79_usb_init_resource(ath79_ehci_resources, AR71XX_EHCI_BASE,
- AR71XX_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v1;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1));
}
static void __init ar7240_usb_setup(void)
@@ -135,9 +125,10 @@ static void __init ar7240_usb_setup(void)
iounmap(usb_ctrl_base);
- ath79_usb_init_resource(ath79_ohci_resources, AR7240_OHCI_BASE,
- AR7240_OHCI_SIZE, ATH79_CPU_IRQ_USB);
- platform_device_register(&ath79_ohci_device);
+ ath79_usb_register("ohci-platform", -1,
+ AR7240_OHCI_BASE, AR7240_OHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
}
static void __init ar724x_usb_setup(void)
@@ -151,10 +142,10 @@ static void __init ar724x_usb_setup(void)
ath79_device_reset_clear(AR724X_RESET_USB_PHY);
mdelay(10);
- ath79_usb_init_resource(ath79_ehci_resources, AR724X_EHCI_BASE,
- AR724X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR724X_EHCI_BASE, AR724X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar913x_usb_setup(void)
@@ -168,10 +159,10 @@ static void __init ar913x_usb_setup(void)
ath79_device_reset_clear(AR913X_RESET_USB_PHY);
mdelay(10);
- ath79_usb_init_resource(ath79_ehci_resources, AR913X_EHCI_BASE,
- AR913X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR913X_EHCI_BASE, AR913X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar933x_usb_setup(void)
@@ -185,10 +176,10 @@ static void __init ar933x_usb_setup(void)
ath79_device_reset_clear(AR933X_RESET_USB_PHY);
mdelay(10);
- ath79_usb_init_resource(ath79_ehci_resources, AR933X_EHCI_BASE,
- AR933X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR933X_EHCI_BASE, AR933X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar934x_usb_setup(void)
@@ -211,10 +202,23 @@ static void __init ar934x_usb_setup(void)
ath79_device_reset_clear(AR934X_RESET_USB_HOST);
udelay(1000);
- ath79_usb_init_resource(ath79_ehci_resources, AR934X_EHCI_BASE,
- AR934X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR934X_EHCI_BASE, AR934X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
+}
+
+static void __init qca955x_usb_setup(void)
+{
+ ath79_usb_register("ehci-platform", 0,
+ QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE,
+ ATH79_IP3_IRQ(0),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
+
+ ath79_usb_register("ehci-platform", 1,
+ QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE,
+ ATH79_IP3_IRQ(1),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
void __init ath79_register_usb(void)
@@ -231,6 +235,8 @@ void __init ath79_register_usb(void)
ar933x_usb_setup();
else if (soc_is_ar934x())
ar934x_usb_setup();
+ else if (soc_is_qca955x())
+ qca955x_usb_setup();
else
BUG();
}
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index d6d893c16ad4..da190b1b87ce 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -55,8 +55,8 @@ static void __init ar913x_wmac_setup(void)
ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
- ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
}
@@ -83,8 +83,8 @@ static void __init ar933x_wmac_setup(void)
ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
- ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -107,7 +107,7 @@ static void ar934x_wmac_setup(void)
ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
- ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+ ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
if (t & AR934X_BOOTSTRAP_REF_CLK_40)
@@ -116,6 +116,24 @@ static void ar934x_wmac_setup(void)
ath79_wmac_data.is_clk_25mhz = true;
}
+static void qca955x_wmac_setup(void)
+{
+ u32 t;
+
+ ath79_wmac_device.name = "qca955x_wmac";
+
+ ath79_wmac_resources[0].start = QCA955X_WMAC_BASE;
+ ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+ ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
+
+ t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
+ if (t & QCA955X_BOOTSTRAP_REF_CLK_40)
+ ath79_wmac_data.is_clk_25mhz = false;
+ else
+ ath79_wmac_data.is_clk_25mhz = true;
+}
+
void __init ath79_register_wmac(u8 *cal_data)
{
if (soc_is_ar913x())
@@ -124,6 +142,8 @@ void __init ath79_register_wmac(u8 *cal_data)
ar933x_wmac_setup();
else if (soc_is_ar934x())
ar934x_wmac_setup();
+ else if (soc_is_qca955x())
+ qca955x_wmac_setup();
else
BUG();
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index dc938cb2ba58..b955fafc58ba 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -74,6 +74,8 @@ static void prom_putchar_init(void)
case REV_ID_MAJOR_AR9341:
case REV_ID_MAJOR_AR9342:
case REV_ID_MAJOR_AR9344:
+ case REV_ID_MAJOR_QCA9556:
+ case REV_ID_MAJOR_QCA9558:
_prom_putchar = prom_putchar_ar71xx;
break;
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index 48fe762d2526..8d025b028bb1 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -137,49 +137,45 @@ static struct gpio_chip ath79_gpio_chip = {
.base = 0,
};
-void ath79_gpio_function_enable(u32 mask)
+static void __iomem *ath79_gpio_get_function_reg(void)
{
- void __iomem *base = ath79_gpio_base;
- unsigned long flags;
+ u32 reg = 0;
- spin_lock_irqsave(&ath79_gpio_lock, flags);
-
- __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) | mask,
- base + AR71XX_GPIO_REG_FUNC);
- /* flush write */
- __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+ if (soc_is_ar71xx() ||
+ soc_is_ar724x() ||
+ soc_is_ar913x() ||
+ soc_is_ar933x())
+ reg = AR71XX_GPIO_REG_FUNC;
+ else if (soc_is_ar934x())
+ reg = AR934X_GPIO_REG_FUNC;
+ else
+ BUG();
- spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+ return ath79_gpio_base + reg;
}
-void ath79_gpio_function_disable(u32 mask)
+void ath79_gpio_function_setup(u32 set, u32 clear)
{
- void __iomem *base = ath79_gpio_base;
+ void __iomem *reg = ath79_gpio_get_function_reg();
unsigned long flags;
spin_lock_irqsave(&ath79_gpio_lock, flags);
- __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~mask,
- base + AR71XX_GPIO_REG_FUNC);
+ __raw_writel((__raw_readl(reg) & ~clear) | set, reg);
/* flush write */
- __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+ __raw_readl(reg);
spin_unlock_irqrestore(&ath79_gpio_lock, flags);
}
-void ath79_gpio_function_setup(u32 set, u32 clear)
+void ath79_gpio_function_enable(u32 mask)
{
- void __iomem *base = ath79_gpio_base;
- unsigned long flags;
-
- spin_lock_irqsave(&ath79_gpio_lock, flags);
-
- __raw_writel((__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~clear) | set,
- base + AR71XX_GPIO_REG_FUNC);
- /* flush write */
- __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+ ath79_gpio_function_setup(mask, 0);
+}
- spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+void ath79_gpio_function_disable(u32 mask)
+{
+ ath79_gpio_function_setup(0, mask);
}
void __init ath79_gpio_init(void)
@@ -198,12 +194,14 @@ void __init ath79_gpio_init(void)
ath79_gpio_count = AR933X_GPIO_COUNT;
else if (soc_is_ar934x())
ath79_gpio_count = AR934X_GPIO_COUNT;
+ else if (soc_is_qca955x())
+ ath79_gpio_count = QCA955X_GPIO_COUNT;
else
BUG();
ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
ath79_gpio_chip.ngpio = ath79_gpio_count;
- if (soc_is_ar934x()) {
+ if (soc_is_ar934x() || soc_is_qca955x()) {
ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
}
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 90d09fc15398..9c0e1761773f 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -35,44 +35,17 @@ static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
- if (pending & MISC_INT_UART)
- generic_handle_irq(ATH79_MISC_IRQ_UART);
-
- else if (pending & MISC_INT_DMA)
- generic_handle_irq(ATH79_MISC_IRQ_DMA);
-
- else if (pending & MISC_INT_PERFC)
- generic_handle_irq(ATH79_MISC_IRQ_PERFC);
-
- else if (pending & MISC_INT_TIMER)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER);
-
- else if (pending & MISC_INT_TIMER2)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER2);
-
- else if (pending & MISC_INT_TIMER3)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER3);
-
- else if (pending & MISC_INT_TIMER4)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER4);
-
- else if (pending & MISC_INT_OHCI)
- generic_handle_irq(ATH79_MISC_IRQ_OHCI);
-
- else if (pending & MISC_INT_ERROR)
- generic_handle_irq(ATH79_MISC_IRQ_ERROR);
-
- else if (pending & MISC_INT_GPIO)
- generic_handle_irq(ATH79_MISC_IRQ_GPIO);
-
- else if (pending & MISC_INT_WDOG)
- generic_handle_irq(ATH79_MISC_IRQ_WDOG);
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
- else if (pending & MISC_INT_ETHSW)
- generic_handle_irq(ATH79_MISC_IRQ_ETHSW);
+ while (pending) {
+ int bit = __ffs(pending);
- else
- spurious_interrupt();
+ generic_handle_irq(ATH79_MISC_IRQ(bit));
+ pending &= ~BIT(bit);
+ }
}
static void ar71xx_misc_irq_unmask(struct irq_data *d)
@@ -130,7 +103,10 @@ static void __init ath79_misc_irq_init(void)
if (soc_is_ar71xx() || soc_is_ar913x())
ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
- else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
+ else if (soc_is_ar724x() ||
+ soc_is_ar933x() ||
+ soc_is_ar934x() ||
+ soc_is_qca955x())
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
else
BUG();
@@ -141,7 +117,7 @@ static void __init ath79_misc_irq_init(void)
handle_level_irq);
}
- irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
+ irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
}
static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
@@ -174,7 +150,89 @@ static void ar934x_ip2_irq_init(void)
irq_set_chip_and_handler(i, &dummy_irq_chip,
handle_level_irq);
- irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
+ irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
+}
+
+static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ u32 status;
+
+ disable_irq_nosync(irq);
+
+ status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
+ status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
+
+ if (status == 0) {
+ spurious_interrupt();
+ goto enable;
+ }
+
+ if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP2_IRQ(0));
+ }
+
+ if (status & QCA955X_EXT_INT_WMAC_ALL) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP2_IRQ(1));
+ }
+
+enable:
+ enable_irq(irq);
+}
+
+static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ u32 status;
+
+ disable_irq_nosync(irq);
+
+ status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
+ status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
+ QCA955X_EXT_INT_USB1 |
+ QCA955X_EXT_INT_USB2;
+
+ if (status == 0) {
+ spurious_interrupt();
+ goto enable;
+ }
+
+ if (status & QCA955X_EXT_INT_USB1) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP3_IRQ(0));
+ }
+
+ if (status & QCA955X_EXT_INT_USB2) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP3_IRQ(1));
+ }
+
+ if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP3_IRQ(2));
+ }
+
+enable:
+ enable_irq(irq);
+}
+
+static void qca955x_irq_init(void)
+{
+ int i;
+
+ for (i = ATH79_IP2_IRQ_BASE;
+ i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &dummy_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch);
+
+ for (i = ATH79_IP3_IRQ_BASE;
+ i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &dummy_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
}
asmlinkage void plat_irq_dispatch(void)
@@ -184,22 +242,22 @@ asmlinkage void plat_irq_dispatch(void)
pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP7)
- do_IRQ(ATH79_CPU_IRQ_TIMER);
+ do_IRQ(ATH79_CPU_IRQ(7));
else if (pending & STATUSF_IP2)
ath79_ip2_handler();
else if (pending & STATUSF_IP4)
- do_IRQ(ATH79_CPU_IRQ_GE0);
+ do_IRQ(ATH79_CPU_IRQ(4));
else if (pending & STATUSF_IP5)
- do_IRQ(ATH79_CPU_IRQ_GE1);
+ do_IRQ(ATH79_CPU_IRQ(5));
else if (pending & STATUSF_IP3)
ath79_ip3_handler();
else if (pending & STATUSF_IP6)
- do_IRQ(ATH79_CPU_IRQ_MISC);
+ do_IRQ(ATH79_CPU_IRQ(6));
else
spurious_interrupt();
@@ -212,63 +270,69 @@ asmlinkage void plat_irq_dispatch(void)
* Issue a flush in the handlers to ensure that the driver sees
* the update.
*/
+
+static void ath79_default_ip2_handler(void)
+{
+ do_IRQ(ATH79_CPU_IRQ(2));
+}
+
+static void ath79_default_ip3_handler(void)
+{
+ do_IRQ(ATH79_CPU_IRQ(3));
+}
+
static void ar71xx_ip2_handler(void)
{
ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar724x_ip2_handler(void)
{
ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar913x_ip2_handler(void)
{
ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar933x_ip2_handler(void)
{
ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
- do_IRQ(ATH79_CPU_IRQ_IP2);
-}
-
-static void ar934x_ip2_handler(void)
-{
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar71xx_ip3_handler(void)
{
ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar724x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar913x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar933x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar934x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
void __init arch_init_irq(void)
@@ -286,16 +350,21 @@ void __init arch_init_irq(void)
ath79_ip2_handler = ar933x_ip2_handler;
ath79_ip3_handler = ar933x_ip3_handler;
} else if (soc_is_ar934x()) {
- ath79_ip2_handler = ar934x_ip2_handler;
+ ath79_ip2_handler = ath79_default_ip2_handler;
ath79_ip3_handler = ar934x_ip3_handler;
+ } else if (soc_is_qca955x()) {
+ ath79_ip2_handler = ath79_default_ip2_handler;
+ ath79_ip3_handler = ath79_default_ip3_handler;
} else {
BUG();
}
- cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
+ cp0_perfcount_irq = ATH79_MISC_IRQ(5);
mips_cpu_irq_init();
ath79_misc_irq_init();
if (soc_is_ar934x())
ar934x_ip2_irq_init();
+ else if (soc_is_qca955x())
+ qca955x_irq_init();
}
diff --git a/arch/mips/ath79/mach-ap121.c b/arch/mips/ath79/mach-ap121.c
index 4c20200d7c72..1bf73f2a069d 100644
--- a/arch/mips/ath79/mach-ap121.c
+++ b/arch/mips/ath79/mach-ap121.c
@@ -69,7 +69,7 @@ static struct spi_board_info ap121_spi_info[] = {
static struct ath79_spi_platform_data ap121_spi_data = {
.bus_num = 0,
- .num_chipselect = 1,
+ .num_chipselect = 1,
};
static void __init ap121_setup(void)
diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c
new file mode 100644
index 000000000000..479dd4b1d0d2
--- /dev/null
+++ b/arch/mips/ath79/mach-ap136.c
@@ -0,0 +1,156 @@
+/*
+ * Qualcomm Atheros AP136 reference board support
+ *
+ * Copyright (c) 2012 Qualcomm Atheros
+ * Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+#include "dev-usb.h"
+#include "dev-wmac.h"
+#include "pci.h"
+
+#define AP136_GPIO_LED_STATUS_RED 14
+#define AP136_GPIO_LED_STATUS_GREEN 19
+#define AP136_GPIO_LED_USB 4
+#define AP136_GPIO_LED_WLAN_2G 13
+#define AP136_GPIO_LED_WLAN_5G 12
+#define AP136_GPIO_LED_WPS_RED 15
+#define AP136_GPIO_LED_WPS_GREEN 20
+
+#define AP136_GPIO_BTN_WPS 16
+#define AP136_GPIO_BTN_RFKILL 21
+
+#define AP136_KEYS_POLL_INTERVAL 20 /* msecs */
+#define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL)
+
+#define AP136_WMAC_CALDATA_OFFSET 0x1000
+#define AP136_PCIE_CALDATA_OFFSET 0x5000
+
+static struct gpio_led ap136_leds_gpio[] __initdata = {
+ {
+ .name = "qca:green:status",
+ .gpio = AP136_GPIO_LED_STATUS_GREEN,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:status",
+ .gpio = AP136_GPIO_LED_STATUS_RED,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:green:wps",
+ .gpio = AP136_GPIO_LED_WPS_GREEN,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:wps",
+ .gpio = AP136_GPIO_LED_WPS_RED,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:wlan-2g",
+ .gpio = AP136_GPIO_LED_WLAN_2G,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:usb",
+ .gpio = AP136_GPIO_LED_USB,
+ .active_low = 1,
+ }
+};
+
+static struct gpio_keys_button ap136_gpio_keys[] __initdata = {
+ {
+ .desc = "WPS button",
+ .type = EV_KEY,
+ .code = KEY_WPS_BUTTON,
+ .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = AP136_GPIO_BTN_WPS,
+ .active_low = 1,
+ },
+ {
+ .desc = "RFKILL button",
+ .type = EV_KEY,
+ .code = KEY_RFKILL,
+ .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = AP136_GPIO_BTN_RFKILL,
+ .active_low = 1,
+ },
+};
+
+static struct spi_board_info ap136_spi_info[] = {
+ {
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 25000000,
+ .modalias = "mx25l6405d",
+ }
+};
+
+static struct ath79_spi_platform_data ap136_spi_data = {
+ .bus_num = 0,
+ .num_chipselect = 1,
+};
+
+#ifdef CONFIG_PCI
+static struct ath9k_platform_data ap136_ath9k_data;
+
+static int ap136_pci_plat_dev_init(struct pci_dev *dev)
+{
+ if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0)
+ dev->dev.platform_data = &ap136_ath9k_data;
+
+ return 0;
+}
+
+static void __init ap136_pci_init(u8 *eeprom)
+{
+ memcpy(ap136_ath9k_data.eeprom_data, eeprom,
+ sizeof(ap136_ath9k_data.eeprom_data));
+
+ ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init);
+ ath79_register_pci();
+}
+#else
+static inline void ap136_pci_init(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init ap136_setup(void)
+{
+ u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
+
+ ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio),
+ ap136_leds_gpio);
+ ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(ap136_gpio_keys),
+ ap136_gpio_keys);
+ ath79_register_spi(&ap136_spi_data, ap136_spi_info,
+ ARRAY_SIZE(ap136_spi_info));
+ ath79_register_usb();
+ ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET);
+ ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET);
+}
+
+MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010",
+ "Atheros AP136-010 reference board",
+ ap136_setup);
diff --git a/arch/mips/ath79/mach-ap81.c b/arch/mips/ath79/mach-ap81.c
index abe19836331c..1c78d497f930 100644
--- a/arch/mips/ath79/mach-ap81.c
+++ b/arch/mips/ath79/mach-ap81.c
@@ -78,7 +78,7 @@ static struct spi_board_info ap81_spi_info[] = {
static struct ath79_spi_platform_data ap81_spi_data = {
.bus_num = 0,
- .num_chipselect = 1,
+ .num_chipselect = 1,
};
static void __init ap81_setup(void)
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
index 42f540a724f4..4d661a1d2dae 100644
--- a/arch/mips/ath79/mach-db120.c
+++ b/arch/mips/ath79/mach-db120.c
@@ -87,7 +87,7 @@ static struct spi_board_info db120_spi_info[] = {
static struct ath79_spi_platform_data db120_spi_data = {
.bus_num = 0,
- .num_chipselect = 1,
+ .num_chipselect = 1,
};
#ifdef CONFIG_PCI
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index c5f0ea5e00c3..67b980d94fb7 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -34,8 +34,8 @@
#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
static struct i2c_gpio_platform_data pb44_i2c_gpio_data = {
- .sda_pin = PB44_GPIO_I2C_SDA,
- .scl_pin = PB44_GPIO_I2C_SCL,
+ .sda_pin = PB44_GPIO_I2C_SDA,
+ .scl_pin = PB44_GPIO_I2C_SCL,
};
static struct platform_device pb44_i2c_gpio_device = {
@@ -53,7 +53,7 @@ static struct pcf857x_platform_data pb44_pcf857x_data = {
static struct i2c_board_info pb44_i2c_board_info[] __initdata = {
{
I2C_BOARD_INFO("pcf8575", 0x20),
- .platform_data = &pb44_pcf857x_data,
+ .platform_data = &pb44_pcf857x_data,
},
};
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
index af92e5c30d66..26254058c545 100644
--- a/arch/mips/ath79/machtypes.h
+++ b/arch/mips/ath79/machtypes.h
@@ -17,6 +17,7 @@
enum ath79_mach_type {
ATH79_MACH_GENERIC = 0,
ATH79_MACH_AP121, /* Atheros AP121 reference board */
+ ATH79_MACH_AP136_010, /* Atheros AP136-010 reference board */
ATH79_MACH_AP81, /* Atheros AP81 reference board */
ATH79_MACH_DB120, /* Atheros DB120 reference board */
ATH79_MACH_PB44, /* Atheros PB44 reference board */
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
index ca83abd9d31e..730c0b03060d 100644
--- a/arch/mips/ath79/pci.c
+++ b/arch/mips/ath79/pci.c
@@ -14,10 +14,11 @@
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/resource.h>
+#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/irq.h>
-#include <asm/mach-ath79/pci.h>
#include "pci.h"
static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
@@ -48,6 +49,21 @@ static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
}
};
+static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = {
+ {
+ .bus = 0,
+ .slot = 0,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(0),
+ },
+ {
+ .bus = 1,
+ .slot = 0,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(1),
+ },
+};
+
int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
{
int irq = -1;
@@ -63,6 +79,9 @@ int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
soc_is_ar9344()) {
ath79_pci_irq_map = ar724x_pci_irq_map;
ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
+ } else if (soc_is_qca955x()) {
+ ath79_pci_irq_map = qca955x_pci_irq_map;
+ ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map);
} else {
pr_crit("pci %s: invalid irq map\n",
pci_name((struct pci_dev *) dev));
@@ -74,7 +93,9 @@ int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
const struct ath79_pci_irq *entry;
entry = &ath79_pci_irq_map[i];
- if (entry->slot == slot && entry->pin == pin) {
+ if (entry->bus == dev->bus->number &&
+ entry->slot == slot &&
+ entry->pin == pin) {
irq = entry->irq;
break;
}
@@ -110,21 +131,143 @@ void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
ath79_pci_plat_dev_init = func;
}
-int __init ath79_register_pci(void)
+static struct platform_device *
+ath79_register_pci_ar71xx(void)
+{
+ struct platform_device *pdev;
+ struct resource res[4];
+
+ memset(res, 0, sizeof(res));
+
+ res[0].name = "cfg_base";
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = AR71XX_PCI_CFG_BASE;
+ res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1;
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = ATH79_CPU_IRQ(2);
+ res[1].end = ATH79_CPU_IRQ(2);
+
+ res[2].name = "io_base";
+ res[2].flags = IORESOURCE_IO;
+ res[2].start = 0;
+ res[2].end = 0;
+
+ res[3].name = "mem_base";
+ res[3].flags = IORESOURCE_MEM;
+ res[3].start = AR71XX_PCI_MEM_BASE;
+ res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1;
+
+ pdev = platform_device_register_simple("ar71xx-pci", -1,
+ res, ARRAY_SIZE(res));
+ return pdev;
+}
+
+static struct platform_device *
+ath79_register_pci_ar724x(int id,
+ unsigned long cfg_base,
+ unsigned long ctrl_base,
+ unsigned long crp_base,
+ unsigned long mem_base,
+ unsigned long mem_size,
+ unsigned long io_base,
+ int irq)
{
- if (soc_is_ar71xx())
- return ar71xx_pcibios_init();
+ struct platform_device *pdev;
+ struct resource res[6];
+
+ memset(res, 0, sizeof(res));
+
+ res[0].name = "cfg_base";
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = cfg_base;
+ res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1;
+
+ res[1].name = "ctrl_base";
+ res[1].flags = IORESOURCE_MEM;
+ res[1].start = ctrl_base;
+ res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1;
+
+ res[2].flags = IORESOURCE_IRQ;
+ res[2].start = irq;
+ res[2].end = irq;
+
+ res[3].name = "mem_base";
+ res[3].flags = IORESOURCE_MEM;
+ res[3].start = mem_base;
+ res[3].end = mem_base + mem_size - 1;
+
+ res[4].name = "io_base";
+ res[4].flags = IORESOURCE_IO;
+ res[4].start = io_base;
+ res[4].end = io_base;
- if (soc_is_ar724x())
- return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
+ res[5].name = "crp_base";
+ res[5].flags = IORESOURCE_MEM;
+ res[5].start = crp_base;
+ res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1;
- if (soc_is_ar9342() || soc_is_ar9344()) {
+ pdev = platform_device_register_simple("ar724x-pci", id,
+ res, ARRAY_SIZE(res));
+ return pdev;
+}
+
+int __init ath79_register_pci(void)
+{
+ struct platform_device *pdev = NULL;
+
+ if (soc_is_ar71xx()) {
+ pdev = ath79_register_pci_ar71xx();
+ } else if (soc_is_ar724x()) {
+ pdev = ath79_register_pci_ar724x(-1,
+ AR724X_PCI_CFG_BASE,
+ AR724X_PCI_CTRL_BASE,
+ AR724X_PCI_CRP_BASE,
+ AR724X_PCI_MEM_BASE,
+ AR724X_PCI_MEM_SIZE,
+ 0,
+ ATH79_CPU_IRQ(2));
+ } else if (soc_is_ar9342() ||
+ soc_is_ar9344()) {
u32 bootstrap;
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
- if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
- return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
+ if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0)
+ return -ENODEV;
+
+ pdev = ath79_register_pci_ar724x(-1,
+ AR724X_PCI_CFG_BASE,
+ AR724X_PCI_CTRL_BASE,
+ AR724X_PCI_CRP_BASE,
+ AR724X_PCI_MEM_BASE,
+ AR724X_PCI_MEM_SIZE,
+ 0,
+ ATH79_IP2_IRQ(0));
+ } else if (soc_is_qca9558()) {
+ pdev = ath79_register_pci_ar724x(0,
+ QCA955X_PCI_CFG_BASE0,
+ QCA955X_PCI_CTRL_BASE0,
+ QCA955X_PCI_CRP_BASE0,
+ QCA955X_PCI_MEM_BASE0,
+ QCA955X_PCI_MEM_SIZE,
+ 0,
+ ATH79_IP2_IRQ(0));
+
+ pdev = ath79_register_pci_ar724x(1,
+ QCA955X_PCI_CFG_BASE1,
+ QCA955X_PCI_CTRL_BASE1,
+ QCA955X_PCI_CRP_BASE1,
+ QCA955X_PCI_MEM_BASE1,
+ QCA955X_PCI_MEM_SIZE,
+ 1,
+ ATH79_IP3_IRQ(2));
+ } else {
+ /* No PCI support */
+ return -ENODEV;
}
- return -ENODEV;
+ if (!pdev)
+ pr_err("unable to register PCI controller device\n");
+
+ return pdev ? 0 : -ENODEV;
}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
index 51c6625dcc6d..1d00a3803c37 100644
--- a/arch/mips/ath79/pci.h
+++ b/arch/mips/ath79/pci.h
@@ -14,6 +14,7 @@
#define _ATH79_PCI_H
struct ath79_pci_irq {
+ int bus;
u8 slot;
u8 pin;
int irq;
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 60d212ef8629..d5b3c9057018 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -164,13 +164,29 @@ static void __init ath79_detect_sys_type(void)
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA9556:
+ ath79_soc = ATH79_SOC_QCA9556;
+ chip = "9556";
+ rev = id & QCA955X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_QCA9558:
+ ath79_soc = ATH79_SOC_QCA9558;
+ chip = "9558";
+ rev = id & QCA955X_REV_ID_REVISION_MASK;
+ break;
+
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
ath79_soc_rev = rev;
- sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
+ if (soc_is_qca955x())
+ sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u",
+ chip, rev);
+ else
+ sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
pr_info("SoC: %s\n", ath79_sys_type);
}
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 1a3567f07e73..f3bf6d5bfb9d 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,5 +3,5 @@
# under Linux.
#
-obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
+obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 48a4c70b3842..cc40b74940f5 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -3,10 +3,10 @@
*
* Copyright (C) 2005 Broadcom Corporation
* Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -18,83 +18,160 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/addrspace.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
#include <asm/mach-bcm47xx/bcm47xx.h>
static char nvram_buf[NVRAM_SPACE];
+static u32 find_nvram_size(u32 end)
+{
+ struct nvram_header *header;
+ u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
+ header = (struct nvram_header *)KSEG1ADDR(end - nvram_sizes[i]);
+ if (header->magic == NVRAM_HEADER)
+ return nvram_sizes[i];
+ }
+
+ return 0;
+}
+
/* Probe for NVRAM header */
-static void early_nvram_init(void)
+static int nvram_find_and_copy(u32 base, u32 lim)
{
-#ifdef CONFIG_BCM47XX_SSB
- struct ssb_mipscore *mcore_ssb;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- struct bcma_drv_cc *bcma_cc;
-#endif
struct nvram_header *header;
int i;
- u32 base = 0;
- u32 lim = 0;
u32 off;
u32 *src, *dst;
+ u32 size;
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- mcore_ssb = &bcm47xx_bus.ssb.mipscore;
- base = mcore_ssb->pflash.window;
- lim = mcore_ssb->pflash.window_size;
- break;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_cc = &bcm47xx_bus.bcma.bus.drv_cc;
- base = bcma_cc->pflash.window;
- lim = bcma_cc->pflash.window_size;
- break;
-#endif
- }
-
+ /* TODO: when nvram is on nand flash check for bad blocks first. */
off = FLASH_MIN;
while (off <= lim) {
/* Windowed flash access */
- header = (struct nvram_header *)
- KSEG1ADDR(base + off - NVRAM_SPACE);
- if (header->magic == NVRAM_HEADER)
+ size = find_nvram_size(base + off);
+ if (size) {
+ header = (struct nvram_header *)KSEG1ADDR(base + off -
+ size);
goto found;
+ }
off <<= 1;
}
/* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
header = (struct nvram_header *) KSEG1ADDR(base + 4096);
- if (header->magic == NVRAM_HEADER)
+ if (header->magic == NVRAM_HEADER) {
+ size = NVRAM_SPACE;
goto found;
+ }
header = (struct nvram_header *) KSEG1ADDR(base + 1024);
- if (header->magic == NVRAM_HEADER)
+ if (header->magic == NVRAM_HEADER) {
+ size = NVRAM_SPACE;
goto found;
+ }
- return;
+ pr_err("no nvram found\n");
+ return -ENXIO;
found:
+
+ if (header->len > size)
+ pr_err("The nvram size accoridng to the header seems to be bigger than the partition on flash\n");
+ if (header->len > NVRAM_SPACE)
+ pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ header->len, NVRAM_SPACE);
+
src = (u32 *) header;
dst = (u32 *) nvram_buf;
for (i = 0; i < sizeof(struct nvram_header); i += 4)
*dst++ = *src++;
- for (; i < header->len && i < NVRAM_SPACE; i += 4)
+ for (; i < header->len && i < NVRAM_SPACE && i < size; i += 4)
*dst++ = le32_to_cpu(*src++);
+ memset(dst, 0x0, NVRAM_SPACE - i);
+
+ return 0;
}
-int nvram_getenv(char *name, char *val, size_t val_len)
+#ifdef CONFIG_BCM47XX_SSB
+static int nvram_init_ssb(void)
+{
+ struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore;
+ u32 base;
+ u32 lim;
+
+ if (mcore->pflash.present) {
+ base = mcore->pflash.window;
+ lim = mcore->pflash.window_size;
+ } else {
+ pr_err("Couldn't find supported flash memory\n");
+ return -ENXIO;
+ }
+
+ return nvram_find_and_copy(base, lim);
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static int nvram_init_bcma(void)
+{
+ struct bcma_drv_cc *cc = &bcm47xx_bus.bcma.bus.drv_cc;
+ u32 base;
+ u32 lim;
+
+#ifdef CONFIG_BCMA_NFLASH
+ if (cc->nflash.boot) {
+ base = BCMA_SOC_FLASH1;
+ lim = BCMA_SOC_FLASH1_SZ;
+ } else
+#endif
+ if (cc->pflash.present) {
+ base = cc->pflash.window;
+ lim = cc->pflash.window_size;
+#ifdef CONFIG_BCMA_SFLASH
+ } else if (cc->sflash.present) {
+ base = cc->sflash.window;
+ lim = cc->sflash.size;
+#endif
+ } else {
+ pr_err("Couldn't find supported flash memory\n");
+ return -ENXIO;
+ }
+
+ return nvram_find_and_copy(base, lim);
+}
+#endif
+
+static int nvram_init(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ return nvram_init_ssb();
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ return nvram_init_bcma();
+#endif
+ }
+ return -ENXIO;
+}
+
+int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len)
{
char *var, *value, *end, *eq;
+ int err;
if (!name)
- return NVRAM_ERR_INV_PARAM;
+ return -EINVAL;
- if (!nvram_buf[0])
- early_nvram_init();
+ if (!nvram_buf[0]) {
+ err = nvram_init();
+ if (err)
+ return err;
+ }
/* Look for name=value and return value */
var = &nvram_buf[sizeof(struct nvram_header)];
@@ -110,6 +187,6 @@ int nvram_getenv(char *name, char *val, size_t val_len)
return snprintf(val, val_len, "%s", value);
}
}
- return NVRAM_ERR_ENVNOTFOUND;
+ return -ENOENT;
}
-EXPORT_SYMBOL(nvram_getenv);
+EXPORT_SYMBOL(bcm47xx_nvram_getenv);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 4d54b58dbd32..b2246cd9ca12 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -35,7 +35,7 @@
#include <asm/reboot.h>
#include <asm/time.h>
#include <bcm47xx.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
union bcm47xx_bus bcm47xx_bus;
EXPORT_SYMBOL(bcm47xx_bus);
@@ -115,7 +115,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
memset(&iv->sprom, 0, sizeof(struct ssb_sprom));
bcm47xx_fill_sprom(&iv->sprom, NULL, false);
- if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
+ if (bcm47xx_nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
return 0;
@@ -138,7 +138,7 @@ static void __init bcm47xx_register_ssb(void)
panic("Failed to initialize SSB bus (err %d)", err);
mcore = &bcm47xx_bus.ssb.mipscore;
- if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
+ if (bcm47xx_nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
if (strstr(buf, "console=ttyS1")) {
struct ssb_serial_port port;
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 289cc0a38638..ad03c931b905 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -27,7 +27,7 @@
*/
#include <bcm47xx.h>
-#include <nvram.h>
+#include <bcm47xx_nvram.h>
static void create_key(const char *prefix, const char *postfix,
const char *name, char *buf, int len)
@@ -50,18 +50,18 @@ static int get_nvram_var(const char *prefix, const char *postfix,
create_key(prefix, postfix, name, key, sizeof(key));
- err = nvram_getenv(key, buf, len);
- if (fallback && err == NVRAM_ERR_ENVNOTFOUND && prefix) {
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ if (fallback && err == -ENOENT && prefix) {
create_key(NULL, postfix, name, key, sizeof(key));
- err = nvram_getenv(key, buf, len);
+ err = bcm47xx_nvram_getenv(key, buf, len);
}
return err;
}
#define NVRAM_READ_VAL(type) \
static void nvram_read_ ## type (const char *prefix, \
- const char *postfix, const char *name, \
- type *val, type allset, bool fallback) \
+ const char *postfix, const char *name, \
+ type *val, type allset, bool fallback) \
{ \
char buf[100]; \
int err; \
@@ -71,7 +71,7 @@ static void nvram_read_ ## type (const char *prefix, \
fallback); \
if (err < 0) \
return; \
- err = kstrto ## type (buf, 0, &var); \
+ err = kstrto ## type(strim(buf), 0, &var); \
if (err) { \
pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
prefix, name, postfix, buf, err); \
@@ -99,7 +99,7 @@ static void nvram_read_u32_2(const char *prefix, const char *name,
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
- err = kstrtou32(buf, 0, &val);
+ err = kstrtou32(strim(buf), 0, &val);
if (err) {
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
@@ -120,7 +120,7 @@ static void nvram_read_leddc(const char *prefix, const char *name,
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
- err = kstrtou32(buf, 0, &val);
+ err = kstrtou32(strim(buf), 0, &val);
if (err) {
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
@@ -144,7 +144,7 @@ static void nvram_read_macaddr(const char *prefix, const char *name,
if (err < 0)
return;
- nvram_parse_macaddr(buf, *val);
+ bcm47xx_nvram_parse_macaddr(buf, *val);
}
static void nvram_read_alpha2(const char *prefix, const char *name,
@@ -652,12 +652,10 @@ static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
- nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0,
- fallback);
+ nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, true);
nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0,
fallback);
- nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0,
- fallback);
+ nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, true);
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi, fallback);
nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
diff --git a/arch/mips/bcm47xx/wgt634u.c b/arch/mips/bcm47xx/wgt634u.c
index 9d111e8087ec..c63a4c287b5c 100644
--- a/arch/mips/bcm47xx/wgt634u.c
+++ b/arch/mips/bcm47xx/wgt634u.c
@@ -36,13 +36,13 @@ static struct gpio_led wgt634u_leds[] = {
};
static struct gpio_led_platform_data wgt634u_led_data = {
- .num_leds = ARRAY_SIZE(wgt634u_leds),
- .leds = wgt634u_leds,
+ .num_leds = ARRAY_SIZE(wgt634u_leds),
+ .leds = wgt634u_leds,
};
static struct platform_device wgt634u_gpio_leds = {
- .name = "leds-gpio",
- .id = -1,
+ .name = "leds-gpio",
+ .id = -1,
.dev = {
.platform_data = &wgt634u_led_data,
}
@@ -53,35 +53,35 @@ static struct platform_device wgt634u_gpio_leds = {
firmware. */
static struct mtd_partition wgt634u_partitions[] = {
{
- .name = "cfe",
- .offset = 0,
- .size = 0x60000, /* 384k */
- .mask_flags = MTD_WRITEABLE /* force read-only */
+ .name = "cfe",
+ .offset = 0,
+ .size = 0x60000, /* 384k */
+ .mask_flags = MTD_WRITEABLE /* force read-only */
},
{
- .name = "config",
+ .name = "config",
.offset = 0x60000,
- .size = 0x20000 /* 128k */
+ .size = 0x20000 /* 128k */
},
{
- .name = "linux",
+ .name = "linux",
.offset = 0x80000,
- .size = 0x140000 /* 1280k */
+ .size = 0x140000 /* 1280k */
},
{
- .name = "jffs",
+ .name = "jffs",
.offset = 0x1c0000,
- .size = 0x620000 /* 6272k */
+ .size = 0x620000 /* 6272k */
},
{
- .name = "nvram",
+ .name = "nvram",
.offset = 0x7e0000,
- .size = 0x20000 /* 128k */
+ .size = 0x20000 /* 128k */
},
};
static struct physmap_flash_data wgt634u_flash_data = {
- .parts = wgt634u_partitions,
+ .parts = wgt634u_partitions,
.nr_parts = ARRAY_SIZE(wgt634u_partitions)
};
@@ -90,9 +90,9 @@ static struct resource wgt634u_flash_resource = {
};
static struct platform_device wgt634u_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = { .platform_data = &wgt634u_flash_data, },
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = { .platform_data = &wgt634u_flash_data, },
.resource = &wgt634u_flash_resource,
.num_resources = 1,
};
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 73be9b349690..ed1949c29508 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -406,9 +406,9 @@ static struct board_info __initdata board_FAST2404 = {
.expected_cpu_id = 0x6348,
.has_uart0 = 1,
- .has_enet0 = 1,
- .has_enet1 = 1,
- .has_pci = 1,
+ .has_enet0 = 1,
+ .has_enet1 = 1,
+ .has_pci = 1,
.enet0 = {
.has_phy = 1,
@@ -591,22 +591,22 @@ static struct board_info __initdata board_96358vw2 = {
};
static struct board_info __initdata board_AGPFS0 = {
- .name = "AGPF-S0",
- .expected_cpu_id = 0x6358,
+ .name = "AGPF-S0",
+ .expected_cpu_id = 0x6358,
.has_uart0 = 1,
- .has_enet0 = 1,
- .has_enet1 = 1,
- .has_pci = 1,
+ .has_enet0 = 1,
+ .has_enet1 = 1,
+ .has_pci = 1,
.enet0 = {
- .has_phy = 1,
- .use_internal_phy = 1,
+ .has_phy = 1,
+ .use_internal_phy = 1,
},
.enet1 = {
- .force_speed_100 = 1,
- .force_duplex_full = 1,
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
},
.has_ohci0 = 1,
@@ -677,7 +677,7 @@ static struct ssb_sprom bcm63xx_sprom = {
.revision = 0x02,
.board_rev = 0x17,
.country_code = 0x0,
- .ant_available_bg = 0x3,
+ .ant_available_bg = 0x3,
.pa0b0 = 0x15ae,
.pa0b1 = 0xfa85,
.pa0b2 = 0xfe8d,
diff --git a/arch/mips/bcm63xx/early_printk.c b/arch/mips/bcm63xx/early_printk.c
index bf353c937df2..aa8f7f9cc7a4 100644
--- a/arch/mips/bcm63xx/early_printk.c
+++ b/arch/mips/bcm63xx/early_printk.c
@@ -10,7 +10,7 @@
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
-static void __init wait_xfered(void)
+static void wait_xfered(void)
{
unsigned int val;
@@ -22,7 +22,7 @@ static void __init wait_xfered(void)
} while (1);
}
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
wait_xfered();
bcm_uart0_writel(c, UART_FIFO_REG);
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 85bcb5adc7cb..851261e9fdc0 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -24,7 +24,7 @@ strip-flags := $(addprefix --remove-section=,$(drop-sections))
hostprogs-y := elf2ecoff
targets := vmlinux.ecoff
-quiet_cmd_ecoff = ECOFF $@
+quiet_cmd_ecoff = ECOFF $@
cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
$(obj)/vmlinux.ecoff: $(obj)/elf2ecoff $(VMLINUX) FORCE
$(call if_changed,ecoff)
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index c2a3fb0ffc87..bbaa1d4beb6d 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -51,7 +51,7 @@ $(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
targets += piggy.o
OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
- --set-section-flags=.image=contents,alloc,load,readonly,data
+ --set-section-flags=.image=contents,alloc,load,readonly,data
$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
$(call if_changed,objcopy)
@@ -67,9 +67,9 @@ endif
vmlinuzobjs-y += $(obj)/piggy.o
-quiet_cmd_zld = LD $@
+quiet_cmd_zld = LD $@
cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
-quiet_cmd_strip = STRIP $@
+quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -s $@
vmlinuz: $(src)/ld.script $(vmlinuzobjs-y) $(obj)/calc_vmlinuz_load_addr
$(call cmd,zld)
@@ -96,7 +96,7 @@ quiet_cmd_32 = OBJCOPY $@
vmlinuz.32: vmlinuz
$(call cmd,32)
-quiet_cmd_ecoff = ECOFF $@
+quiet_cmd_ecoff = ECOFF $@
cmd_ecoff = $< $(VMLINUZ) $@ $(e2eflag)
vmlinuz.ecoff: $(obj)/../elf2ecoff $(VMLINUZ)
$(call cmd,ecoff)
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 9a6243676e22..37fe58c19a90 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2010 "Wu Zhangjin" <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 5cad0faefa17..2c9573098c0d 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -5,8 +5,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
index 4e65a8420bee..409cb483a9ff 100644
--- a/arch/mips/boot/compressed/head.S
+++ b/arch/mips/boot/compressed/head.S
@@ -32,8 +32,8 @@ start:
bne a2, a0, 1b
addiu a0, a0, 4
- PTR_LA a0, (.heap) /* heap address */
- PTR_LA sp, (.stack + 8192) /* stack address */
+ PTR_LA a0, (.heap) /* heap address */
+ PTR_LA sp, (.stack + 8192) /* stack address */
PTR_LA ra, 2f
PTR_LA k0, decompress_kernel
diff --git a/arch/mips/boot/ecoff.h b/arch/mips/boot/ecoff.h
index 8c3eed2877f2..83e5c3813d67 100644
--- a/arch/mips/boot/ecoff.h
+++ b/arch/mips/boot/ecoff.h
@@ -2,48 +2,48 @@
* Some ECOFF definitions.
*/
typedef struct filehdr {
- unsigned short f_magic; /* magic number */
- unsigned short f_nscns; /* number of sections */
- long f_timdat; /* time & date stamp */
- long f_symptr; /* file pointer to symbolic header */
- long f_nsyms; /* sizeof(symbolic hdr) */
- unsigned short f_opthdr; /* sizeof(optional hdr) */
- unsigned short f_flags; /* flags */
+ unsigned short f_magic; /* magic number */
+ unsigned short f_nscns; /* number of sections */
+ long f_timdat; /* time & date stamp */
+ long f_symptr; /* file pointer to symbolic header */
+ long f_nsyms; /* sizeof(symbolic hdr) */
+ unsigned short f_opthdr; /* sizeof(optional hdr) */
+ unsigned short f_flags; /* flags */
} FILHDR;
-#define FILHSZ sizeof(FILHDR)
+#define FILHSZ sizeof(FILHDR)
#define OMAGIC 0407
#define MIPSEBMAGIC 0x160
#define MIPSELMAGIC 0x162
typedef struct scnhdr {
- char s_name[8]; /* section name */
- long s_paddr; /* physical address, aliased s_nlib */
- long s_vaddr; /* virtual address */
- long s_size; /* section size */
- long s_scnptr; /* file ptr to raw data for section */
- long s_relptr; /* file ptr to relocation */
- long s_lnnoptr; /* file ptr to gp histogram */
- unsigned short s_nreloc; /* number of relocation entries */
- unsigned short s_nlnno; /* number of gp histogram entries */
- long s_flags; /* flags */
+ char s_name[8]; /* section name */
+ long s_paddr; /* physical address, aliased s_nlib */
+ long s_vaddr; /* virtual address */
+ long s_size; /* section size */
+ long s_scnptr; /* file ptr to raw data for section */
+ long s_relptr; /* file ptr to relocation */
+ long s_lnnoptr; /* file ptr to gp histogram */
+ unsigned short s_nreloc; /* number of relocation entries */
+ unsigned short s_nlnno; /* number of gp histogram entries */
+ long s_flags; /* flags */
} SCNHDR;
#define SCNHSZ sizeof(SCNHDR)
#define SCNROUND ((long)16)
typedef struct aouthdr {
- short magic; /* see above */
- short vstamp; /* version stamp */
- long tsize; /* text size in bytes, padded to DW bdry*/
- long dsize; /* initialized data " " */
- long bsize; /* uninitialized data " " */
- long entry; /* entry pt. */
- long text_start; /* base of text used for this file */
- long data_start; /* base of data used for this file */
- long bss_start; /* base of bss used for this file */
- long gprmask; /* general purpose register mask */
- long cprmask[4]; /* co-processor register masks */
- long gp_value; /* the gp value used for this object */
+ short magic; /* see above */
+ short vstamp; /* version stamp */
+ long tsize; /* text size in bytes, padded to DW bdry*/
+ long dsize; /* initialized data " " */
+ long bsize; /* uninitialized data " " */
+ long entry; /* entry pt. */
+ long text_start; /* base of text used for this file */
+ long data_start; /* base of data used for this file */
+ long bss_start; /* base of bss used for this file */
+ long gprmask; /* general purpose register mask */
+ long cprmask[4]; /* co-processor register masks */
+ long gp_value; /* the gp value used for this object */
} AOUTHDR;
#define AOUTHSZ sizeof(AOUTHDR)
@@ -51,7 +51,7 @@ typedef struct aouthdr {
#define NMAGIC 0410
#define ZMAGIC 0413
#define SMAGIC 0411
-#define LIBMAGIC 0443
+#define LIBMAGIC 0443
#define N_TXTOFF(f, a) \
((a).magic == ZMAGIC || (a).magic == LIBMAGIC ? 0 : \
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index e19d906236af..8585078ae50e 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -29,7 +29,7 @@
/* elf2ecoff.c
This program converts an elf executable to an ECOFF executable.
- No symbol table is retained. This is useful primarily in building
+ No symbol table is retained. This is useful primarily in building
net-bootable kernels for machines (e.g., DECstation and Alpha) which
only support the ECOFF object file format. */
@@ -341,7 +341,7 @@ int main(int argc, char *argv[])
/* Figure out if we can cram the program header into an ECOFF
header... Basically, we can't handle anything but loadable
- segments, but we can ignore some kinds of segments. We can't
+ segments, but we can ignore some kinds of segments. We can't
handle holes in the address space. Segments may be out of order,
so we sort them first. */
@@ -514,7 +514,7 @@ int main(int argc, char *argv[])
for (i = 0; i < nosecs; i++) {
printf
- ("Section %d: %s phys %lx size %lx file offset %lx\n",
+ ("Section %d: %s phys %lx size %lx file offset %lx\n",
i, esecs[i].s_name, esecs[i].s_paddr,
esecs[i].s_size, esecs[i].s_scnptr);
}
@@ -551,7 +551,7 @@ int main(int argc, char *argv[])
}
/*
- * Copy the loadable sections. Zero-fill any gaps less than 64k;
+ * Copy the loadable sections. Zero-fill any gaps less than 64k;
* complain about any zero-filling, and die if we're asked to zero-fill
* more than 64k.
*/
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 2f4f6d5e05b6..75a6df7fd265 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -94,4 +94,13 @@ config SWIOTLB
select NEED_SG_DMA_LENGTH
+config OCTEON_ILM
+ tristate "Module to measure interrupt latency using Octeon CIU Timer"
+ help
+ This driver is a module to measure interrupt latency using the
+ the CIU Timers on Octeon.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-ilm
+
endif # CPU_CAVIUM_OCTEON
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index 6e927cf20df2..3595affb9772 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -17,7 +17,8 @@ obj-y += dma-octeon.o flash_setup.o
obj-y += octeon-memcpy.o
obj-y += executive/
-obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o
DTS_FILES = octeon_3xxx.dts octeon_68xx.dts
DTB_FILES = $(patsubst %.dts, %.dtb, $(DTS_FILES))
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index 6d5ddbc112cc..504ed61a47cd 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -155,8 +155,8 @@ int cvmx_bootmem_init(void *mem_desc_ptr)
*
* Linux 64 bit: Set XKPHYS bit
* Linux 32 bit: use mmap to create mapping, use virtual address
- * CVMX 64 bit: use physical address directly
- * CVMX 32 bit: use physical address directly
+ * CVMX 64 bit: use physical address directly
+ * CVMX 32 bit: use physical address directly
*
* Note that the CVMX environment assumes the use of 1-1 TLB
* mappings so that the physical addresses can be used
@@ -398,7 +398,7 @@ error_out:
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
{
uint64_t cur_addr;
- uint64_t prev_addr = 0; /* zero is invalid */
+ uint64_t prev_addr = 0; /* zero is invalid */
int retval = 0;
#ifdef DEBUG
@@ -424,7 +424,7 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
if (cur_addr == 0 || phy_addr < cur_addr) {
/* add at front of list - special case with changing head ptr */
if (cur_addr && phy_addr + size > cur_addr)
- goto bootmem_free_done; /* error, overlapping section */
+ goto bootmem_free_done; /* error, overlapping section */
else if (phy_addr + size == cur_addr) {
/* Add to front of existing first block */
cvmx_bootmem_phy_set_next(phy_addr,
@@ -611,7 +611,7 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
}
cvmx_bootmem_unlock();
- return named_block_ptr != NULL; /* 0 on failure, 1 on success */
+ return named_block_ptr != NULL; /* 0 on failure, 1 on success */
}
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index fd2015331a20..7c6497781895 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -203,10 +203,10 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
* enumeration from the bootloader.
*
* @ipd_port: IPD input port associated with the port we want to get link
- * status for.
+ * status for.
*
* Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
+ * return zero.
*/
cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
{
@@ -357,16 +357,16 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
result.s.link_up = 1;
result.s.full_duplex = ((phy_status >> 13) & 1);
switch ((phy_status >> 14) & 3) {
- case 0: /* 10 Mbps */
+ case 0: /* 10 Mbps */
result.s.speed = 10;
break;
- case 1: /* 100 Mbps */
+ case 1: /* 100 Mbps */
result.s.speed = 100;
break;
- case 2: /* 1 Gbps */
+ case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
- case 3: /* Illegal */
+ case 3: /* Illegal */
result.u64 = 0;
break;
}
@@ -391,16 +391,16 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
result.s.link_up = inband_status.s.status;
result.s.full_duplex = inband_status.s.duplex;
switch (inband_status.s.speed) {
- case 0: /* 10 Mbps */
+ case 0: /* 10 Mbps */
result.s.speed = 10;
break;
- case 1: /* 100 Mbps */
+ case 1: /* 100 Mbps */
result.s.speed = 100;
break;
- case 2: /* 1 Gbps */
+ case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
- case 3: /* Illegal */
+ case 3: /* Illegal */
result.u64 = 0;
break;
}
@@ -429,9 +429,9 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
*
* @phy_addr: The address of the PHY to program
* @enable_autoneg:
- * Non zero if you want to enable auto-negotiation.
+ * Non zero if you want to enable auto-negotiation.
* @link_info: Link speed to program. If the speed is zero and auto-negotiation
- * is enabled, all possible negotiation speeds are advertised.
+ * is enabled, all possible negotiation speeds are advertised.
*
* Returns Zero on success, negative on failure
*/
@@ -607,10 +607,10 @@ int cvmx_helper_board_link_set_phy(int phy_addr,
*
* @interface: Interface to probe
* @supported_ports:
- * Number of ports Octeon supports.
+ * Number of ports Octeon supports.
*
* Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
+ * simple be "support_ports".
*/
int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
{
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
index c1c54890bae0..607b4e659579 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
@@ -79,10 +79,10 @@ void cvmx_helper_qlm_jtag_init(void)
* @qlm: QLM to shift value into
* @bits: Number of bits to shift in (1-32).
* @data: Data to shift in. Bit 0 enters the chain first, followed by
- * bit 1, etc.
+ * bit 1, etc.
*
* Returns The low order bits of the JTAG chain that shifted out of the
- * circle.
+ * circle.
*/
uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
{
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index 82b21843421c..f59c88ee9b31 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -131,7 +131,7 @@ void cvmx_helper_rgmii_internal_loopback(int port)
* @interface: Interface to setup
* @port: Port to setup (0..3)
* @cpu_clock_hz:
- * Chip frequency in Hertz
+ * Chip frequency in Hertz
*
* Returns Zero on success, negative on failure
*/
@@ -409,14 +409,14 @@ int __cvmx_helper_rgmii_link_set(int ipd_port,
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
/*
- * Port .en .type .p0mii Configuration
- * ---- --- ----- ------ -----------------------------------------
- * X 0 X X All links are disabled.
- * 0 1 X 0 Port 0 is RGMII
- * 0 1 X 1 Port 0 is MII
- * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
- * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
- * MII port is selected by GMX_PRT1_CFG[SPEED].
+ * Port .en .type .p0mii Configuration
+ * ---- --- ----- ------ -----------------------------------------
+ * X 0 X X All links are disabled.
+ * 0 1 X 0 Port 0 is RGMII
+ * 0 1 X 1 Port 0 is MII
+ * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
+ * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
+ * MII port is selected by GMX_PRT1_CFG[SPEED].
*/
/* In MII mode, CLK_CNT = 1. */
@@ -464,9 +464,9 @@ int __cvmx_helper_rgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index 0c0bf5d30e70..45f18cce31a9 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -523,9 +523,9 @@ int __cvmx_helper_sgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
index 2830e4bdf7f3..1f3030c72d88 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
@@ -160,16 +160,16 @@ cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
result.s.link_up = inband.s.status;
result.s.full_duplex = inband.s.duplex;
switch (inband.s.speed) {
- case 0: /* 10 Mbps */
+ case 0: /* 10 Mbps */
result.s.speed = 10;
break;
- case 1: /* 100 Mbps */
+ case 1: /* 100 Mbps */
result.s.speed = 100;
break;
- case 2: /* 1 Gbps */
+ case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
- case 3: /* Illegal */
+ case 3: /* Illegal */
result.s.speed = 0;
result.s.link_up = 0;
break;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
index dfdfe8bdc9c5..65d2bc9a0bde 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -96,9 +96,9 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
uint8_t *end_of_data;
cvmx_dprintf("Packet Length: %u\n", work->len);
- cvmx_dprintf(" Input Port: %u\n", work->ipprt);
- cvmx_dprintf(" QoS: %u\n", work->qos);
- cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
+ cvmx_dprintf(" Input Port: %u\n", work->ipprt);
+ cvmx_dprintf(" QoS: %u\n", work->qos);
+ cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
if (work->word2.s.bufs == 0) {
union cvmx_ipd_wqe_fpa_queue wqe_pool;
@@ -132,14 +132,14 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
while (remaining_bytes) {
start_of_buffer =
((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- cvmx_dprintf(" Buffer Start:%llx\n",
+ cvmx_dprintf(" Buffer Start:%llx\n",
(unsigned long long)start_of_buffer);
- cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
- cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
- cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
- cvmx_dprintf(" Buffer Data: %llx\n",
+ cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
+ cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
+ cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
+ cvmx_dprintf(" Buffer Data: %llx\n",
(unsigned long long)buffer_ptr.s.addr);
- cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
+ cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
cvmx_dprintf("\t\t");
data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
@@ -172,11 +172,11 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
@@ -207,11 +207,11 @@ int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 1723248e987d..7653b7e92197 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -321,9 +321,9 @@ int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index fa4963856353..d63d20dfbfb0 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -111,7 +111,7 @@ int cvmx_helper_ports_on_interface(int interface)
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
+ * DISABLED.
*/
cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
{
@@ -187,7 +187,7 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
* the defines in executive-config.h.
*
* @ipd_port: Port to configure. This follows the IPD numbering, not the
- * per interface numbering
+ * per interface numbering
*
* Returns Zero on success, negative on failure
*/
@@ -591,7 +591,7 @@ static int __cvmx_helper_packet_hardware_enable(int interface)
* Function to adjust internal IPD pointer alignments
*
* Returns 0 on success
- * !0 on failure
+ * !0 on failure
*/
int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
{
@@ -1068,9 +1068,9 @@ int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
index 560e034aa024..fa327ec891cd 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
@@ -85,11 +85,11 @@ void __cvmx_interrupt_gmxx_enable(int interface)
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
if (mode.s.en) {
switch (mode.cn56xx.mode) {
- case 1: /* XAUI */
+ case 1: /* XAUI */
num_ports = 1;
break;
- case 2: /* SGMII */
- case 3: /* PICMG */
+ case 2: /* SGMII */
+ case 3: /* PICMG */
num_ports = 4;
break;
default: /* Disabled */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 33b72144db31..42e38c30b540 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -147,7 +147,7 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask)
mask &= valid_mask;
/* A UMSK setting which blocks all L2C Ways is an error on some chips */
- if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
if (OCTEON_IS_MODEL(OCTEON_CN63XX))
@@ -438,7 +438,7 @@ void cvmx_l2c_flush(void)
for (set = 0; set < n_set; set++) {
for (assoc = 0; assoc < n_assoc; assoc++) {
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
- (assoc << assoc_shift) | (set << set_shift));
+ (assoc << assoc_shift) | (set << set_shift));
CVMX_CACHE_WBIL2I(address, 0);
}
}
@@ -573,8 +573,8 @@ union __cvmx_l2c_tag {
* @index: Index of the cacheline
*
* Returns The Octeon model specific tag structure. This is
- * translated by a wrapper function to a generic form that is
- * easier for applications to use.
+ * translated by a wrapper function to a generic form that is
+ * easier for applications to use.
*/
static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
{
@@ -618,12 +618,12 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
".set push\n\t"
".set mips64\n\t"
".set noreorder\n\t"
- "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
+ "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
"ld $0, 0(%[dbg_addr])\n\t"
- "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
- "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
+ "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
+ "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
"ld $0, 0(%[dbg_addr])\n\t"
- "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
+ "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
".set pop"
: [tag_val] "=r" (tag_val)
: [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
@@ -664,10 +664,10 @@ union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
- tag.s.V = l2c_tadx_tag.s.valid;
- tag.s.D = l2c_tadx_tag.s.dirty;
- tag.s.L = l2c_tadx_tag.s.lock;
- tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
tag.s.addr = l2c_tadx_tag.s.tag;
} else {
union __cvmx_l2c_tag tmp_tag;
@@ -679,34 +679,34 @@ union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
* as it can represent all models.
*/
if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- tag.s.V = tmp_tag.cn58xx.V;
- tag.s.D = tmp_tag.cn58xx.D;
- tag.s.L = tmp_tag.cn58xx.L;
- tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
tag.s.addr = tmp_tag.cn58xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- tag.s.V = tmp_tag.cn38xx.V;
- tag.s.D = tmp_tag.cn38xx.D;
- tag.s.L = tmp_tag.cn38xx.L;
- tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
tag.s.addr = tmp_tag.cn38xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- tag.s.V = tmp_tag.cn31xx.V;
- tag.s.D = tmp_tag.cn31xx.D;
- tag.s.L = tmp_tag.cn31xx.L;
- tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
tag.s.addr = tmp_tag.cn31xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
- tag.s.V = tmp_tag.cn30xx.V;
- tag.s.D = tmp_tag.cn30xx.D;
- tag.s.L = tmp_tag.cn30xx.L;
- tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
tag.s.addr = tmp_tag.cn30xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- tag.s.V = tmp_tag.cn50xx.V;
- tag.s.D = tmp_tag.cn50xx.D;
- tag.s.L = tmp_tag.cn50xx.L;
- tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
tag.s.addr = tmp_tag.cn50xx.addr;
} else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
@@ -865,7 +865,7 @@ void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
uint64_t address;
/* Create the address based on index and association.
* Bits<20:17> select the way of the cache block involved in
- * the operation
+ * the operation
* Bits<16:7> of the effect address select the index
*/
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index f557084b1092..f2c877541597 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -99,7 +99,7 @@ void cvmx_pko_initialize_global(void)
* be called after the FPA has been initialized and filled with pages.
*
* Returns 0 on success
- * !0 on failure
+ * !0 on failure
*/
int cvmx_pko_initialize_local(void)
{
@@ -186,19 +186,19 @@ void cvmx_pko_shutdown(void)
/**
* Configure a output port and the associated queues for use.
*
- * @port: Port to configure.
+ * @port: Port to configure.
* @base_queue: First queue number to associate with this port.
* @num_queues: Number of queues to associate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 0-8. A value of 8 get 8 times the traffic
- * of a value of 1. A value of 0 indicates that no rounds
- * will be participated in. These priorities can be changed
- * on the fly while the pko is enabled. A priority of 9
- * indicates that static priority should be used. If static
- * priority is used all queues with static priority must be
- * contiguous starting at the base_queue, and lower numbered
- * queues have higher priority than higher numbered queues.
- * There must be num_queues elements in the array.
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 0-8. A value of 8 get 8 times the traffic
+ * of a value of 1. A value of 0 indicates that no rounds
+ * will be participated in. These priorities can be changed
+ * on the fly while the pko is enabled. A priority of 9
+ * indicates that static priority should be used. If static
+ * priority is used all queues with static priority must be
+ * contiguous starting at the base_queue, and lower numbered
+ * queues have higher priority than higher numbered queues.
+ * There must be num_queues elements in the array.
*/
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
uint64_t num_queues,
@@ -440,7 +440,7 @@ void cvmx_pko_show_queue_map()
* @port: Port to rate limit
* @packets_s: Maximum packet/sec
* @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
@@ -473,7 +473,7 @@ int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
* @port: Port to rate limit
* @bits_s: PKO rate limit in bits/sec
* @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-spi.c b/arch/mips/cavium-octeon/executive/cvmx-spi.c
index 74afb1710cd9..ef5198d13a0e 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-spi.c
@@ -69,7 +69,7 @@ static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
/**
* Get current SPI4 initialization callbacks
*
- * @callbacks: Pointer to the callbacks structure.to fill
+ * @callbacks: Pointer to the callbacks structure.to fill
*
* Returns Pointer to cvmx_spi_callbacks_t structure.
*/
@@ -92,11 +92,11 @@ void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
* Initialize and start the SPI interface.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* @num_ports: Number of SPI ports to configure
*
@@ -138,11 +138,11 @@ int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
* with its correspondent system.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, negative of failure.
@@ -160,7 +160,7 @@ int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
/* NOTE: Calendar setup is not performed during restart */
- /* Refer to cvmx_spi_start_interface() for the full sequence */
+ /* Refer to cvmx_spi_start_interface() for the full sequence */
/* Callback to perform clock detection */
INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
@@ -182,11 +182,11 @@ int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to perform SPI4 reset
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
@@ -297,11 +297,11 @@ int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
* Callback to setup calendar and miscellaneous settings before clock detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @num_ports: Number of ports to configure on SPI
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -382,7 +382,7 @@ int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
stxx_spi4_dat.u64 = 0;
/*Minimum needed by dynamic alignment */
stxx_spi4_dat.s.alpha = 32;
- stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
+ stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
stxx_spi4_dat.u64);
@@ -416,11 +416,11 @@ int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform clock detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -494,11 +494,11 @@ int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to perform link training
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for link to be trained (in seconds)
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -563,11 +563,11 @@ int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to perform calendar data synchronization
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for calendar data in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -620,11 +620,11 @@ int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to handle interface up
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
index 8b18a20cc7b3..3d17fac29359 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -74,26 +74,26 @@ EXPORT_SYMBOL(cvmx_sysinfo_get);
/**
* This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
* are required to use simple executive files directly.
*
* Locking (if required) must be handled outside of this
* function
*
* @phy_mem_desc_ptr:
- * Pointer to global physical memory descriptor
- * (bootmem descriptor) @board_type: Octeon board
- * type enumeration
+ * Pointer to global physical memory descriptor
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
*
* @board_rev_major:
- * Board major revision
+ * Board major revision
* @board_rev_minor:
- * Board minor revision
+ * Board minor revision
* @cpu_clock_hz:
- * CPU clock freqency in hertz
+ * CPU clock freqency in hertz
*
* Returns 0: Failure
- * 1: success
+ * 1: success
*/
int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
uint16_t board_type,
diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c
new file mode 100644
index 000000000000..71b213dbb621
--- /dev/null
+++ b/arch/mips/cavium-octeon/oct_ilm.c
@@ -0,0 +1,206 @@
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#define TIMER_NUM 3
+
+static bool reset_stats;
+
+struct latency_info {
+ u64 io_interval;
+ u64 cpu_interval;
+ u64 timer_start1;
+ u64 timer_start2;
+ u64 max_latency;
+ u64 min_latency;
+ u64 latency_sum;
+ u64 average_latency;
+ u64 interrupt_cnt;
+};
+
+static struct latency_info li;
+static struct dentry *dir;
+
+static int show_latency(struct seq_file *m, void *v)
+{
+ u64 cpuclk, avg, max, min;
+ struct latency_info curr_li = li;
+
+ cpuclk = octeon_get_clock_rate();
+
+ max = (curr_li.max_latency * 1000000000) / cpuclk;
+ min = (curr_li.min_latency * 1000000000) / cpuclk;
+ avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt);
+
+ seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n",
+ curr_li.interrupt_cnt, avg, max, min);
+ return 0;
+}
+
+static int oct_ilm_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_latency, NULL);
+}
+
+static const struct file_operations oct_ilm_ops = {
+ .open = oct_ilm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int reset_statistics(void *data, u64 value)
+{
+ reset_stats = true;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
+
+static int init_debufs(void)
+{
+ struct dentry *show_dentry;
+ dir = debugfs_create_dir("oct_ilm", 0);
+ if (!dir) {
+ pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n");
+ return -1;
+ }
+
+ show_dentry = debugfs_create_file("statistics", 0222, dir, NULL,
+ &oct_ilm_ops);
+ if (!show_dentry) {
+ pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n");
+ return -1;
+ }
+
+ show_dentry = debugfs_create_file("reset", 0222, dir, NULL,
+ &reset_statistics_ops);
+ if (!show_dentry) {
+ pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n");
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static void init_latency_info(struct latency_info *li, int startup)
+{
+ /* interval in milli seconds after which the interrupt will
+ * be triggered
+ */
+ int interval = 1;
+
+ if (startup) {
+ /* Calculating by the amounts io clock and cpu clock would
+ * increment in interval amount of ms
+ */
+ li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000;
+ li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000;
+ }
+ li->timer_start1 = 0;
+ li->timer_start2 = 0;
+ li->max_latency = 0;
+ li->min_latency = (u64)-1;
+ li->latency_sum = 0;
+ li->interrupt_cnt = 0;
+}
+
+
+static void start_timer(int timer, u64 interval)
+{
+ union cvmx_ciu_timx timx;
+ unsigned long flags;
+
+ timx.u64 = 0;
+ timx.s.one_shot = 1;
+ timx.s.len = interval;
+ raw_local_irq_save(flags);
+ li.timer_start1 = read_c0_cvmcount();
+ cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+ /* Read it back to force wait until register is written. */
+ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+ li.timer_start2 = read_c0_cvmcount();
+ raw_local_irq_restore(flags);
+}
+
+
+static irqreturn_t cvm_oct_ciu_timer_interrupt(int cpl, void *dev_id)
+{
+ u64 last_latency;
+ u64 last_int_cnt;
+
+ if (reset_stats) {
+ init_latency_info(&li, 0);
+ reset_stats = false;
+ } else {
+ last_int_cnt = read_c0_cvmcount();
+ last_latency = last_int_cnt - (li.timer_start1 + li.cpu_interval);
+ li.interrupt_cnt++;
+ li.latency_sum += last_latency;
+ if (last_latency > li.max_latency)
+ li.max_latency = last_latency;
+ if (last_latency < li.min_latency)
+ li.min_latency = last_latency;
+ }
+ start_timer(TIMER_NUM, li.io_interval);
+ return IRQ_HANDLED;
+}
+
+static void disable_timer(int timer)
+{
+ union cvmx_ciu_timx timx;
+
+ timx.s.one_shot = 0;
+ timx.s.len = 0;
+ cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+ /* Read it back to force immediate write of timer register*/
+ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+}
+
+static __init int oct_ilm_module_init(void)
+{
+ int rc;
+ int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
+
+ rc = init_debufs();
+ if (rc) {
+ WARN(1, "Could not create debugfs entries");
+ return rc;
+ }
+
+ rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
+ "oct_ilm", 0);
+ if (rc) {
+ WARN(1, "Could not acquire IRQ %d", irq);
+ goto err_irq;
+ }
+
+ init_latency_info(&li, 1);
+ start_timer(TIMER_NUM, li.io_interval);
+
+ return 0;
+err_irq:
+ debugfs_remove_recursive(dir);
+ return rc;
+}
+
+static __exit void oct_ilm_module_exit(void)
+{
+ disable_timer(TIMER_NUM);
+ if (dir)
+ debugfs_remove_recursive(dir);
+ free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0);
+}
+
+module_exit(oct_ilm_module_exit);
+module_init(oct_ilm_module_init);
+MODULE_AUTHOR("Venkat Subbiah, Cavium");
+MODULE_DESCRIPTION("Measures interrupt latency on Octeon chips.");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 46f5dbceeecc..156aa6143e11 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1542,7 +1542,7 @@ static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
if (line == 3) /* MIO */
switch (bit) {
- case 2: /* IPD_DRP */
+ case 2: /* IPD_DRP */
case 8 ... 11: /* Timers */
case 48: /* PTP */
edge = true;
@@ -1553,7 +1553,7 @@ static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
else if (line == 6) /* PKT */
switch (bit) {
case 52 ... 53: /* ILK_DRP */
- case 8 ... 12: /* GMX_DRP */
+ case 8 ... 12: /* GMX_DRP */
edge = true;
break;
default:
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 0ba0eb96d9ac..64e08df51d65 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -116,15 +116,15 @@
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
@@ -316,9 +316,9 @@ EXC( STORE t0, -8(dst), s_exc_p1u)
src_unaligned:
#define rem t8
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
beqz t0, cleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
@@ -326,13 +326,13 @@ src_unaligned:
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
-EXC( LDFIRST t0, FIRST(0)(src), l_exc)
-EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), l_exc_copy)
EXC( LDREST t1, REST(1)(src), l_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
EXC( LDREST t2, REST(2)(src), l_exc_copy)
EXC( LDREST t3, REST(3)(src), l_exc_copy)
ADD src, src, 4*NBYTES
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 3c1b625a5859..389512e2abd6 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -410,7 +410,7 @@ int __init octeon_prune_device_tree(void)
pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
if (pip_path) {
int pip = fdt_path_offset(initial_boot_params, pip_path);
- if (pip >= 0)
+ if (pip >= 0)
for (i = 0; i <= 4; i++)
octeon_fdt_pip_iface(pip, i, &mac_addr_base);
}
diff --git a/arch/mips/cavium-octeon/octeon_3xxx.dts b/arch/mips/cavium-octeon/octeon_3xxx.dts
index f28b2d0fde22..88cb42d4cc49 100644
--- a/arch/mips/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/cavium-octeon/octeon_3xxx.dts
@@ -3,7 +3,7 @@
* OCTEON 3XXX, 5XXX, 63XX device tree skeleton.
*
* This device tree is pruned and patched by early boot code before
- * use. Because of this, it contains a super-set of the available
+ * use. Because of this, it contains a super-set of the available
* devices and properties.
*/
/ {
@@ -433,12 +433,12 @@
cavium,t-we = <45>;
cavium,t-rd-hld = <35>;
cavium,t-wr-hld = <45>;
- cavium,t-pause = <0>;
- cavium,t-wait = <0>;
- cavium,t-page = <35>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <35>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@4 {
@@ -450,12 +450,12 @@
cavium,t-we = <320>;
cavium,t-rd-hld = <320>;
cavium,t-wr-hld = <320>;
- cavium,t-pause = <320>;
- cavium,t-wait = <320>;
- cavium,t-page = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@5 {
@@ -467,12 +467,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <30>;
- cavium,t-pause = <0>;
- cavium,t-wait = <30>;
- cavium,t-page = <320>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <16>;
};
cavium,cs-config@6 {
@@ -484,12 +484,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <70>;
- cavium,t-pause = <0>;
- cavium,t-wait = <0>;
- cavium,t-page = <320>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,wait-mode;
cavium,bus-width = <16>;
};
diff --git a/arch/mips/cavium-octeon/octeon_68xx.dts b/arch/mips/cavium-octeon/octeon_68xx.dts
index 1839468932b6..79b46fcb0a11 100644
--- a/arch/mips/cavium-octeon/octeon_68xx.dts
+++ b/arch/mips/cavium-octeon/octeon_68xx.dts
@@ -3,7 +3,7 @@
* OCTEON 68XX device tree skeleton.
*
* This device tree is pruned and patched by early boot code before
- * use. Because of this, it contains a super-set of the available
+ * use. Because of this, it contains a super-set of the available
* devices and properties.
*/
/ {
@@ -469,12 +469,12 @@
cavium,t-we = <35>;
cavium,t-rd-hld = <25>;
cavium,t-wr-hld = <35>;
- cavium,t-pause = <0>;
- cavium,t-wait = <300>;
- cavium,t-page = <25>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <25>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@4 {
@@ -486,12 +486,12 @@
cavium,t-we = <320>;
cavium,t-rd-hld = <320>;
cavium,t-wr-hld = <320>;
- cavium,t-pause = <320>;
- cavium,t-wait = <320>;
- cavium,t-page = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@5 {
@@ -503,12 +503,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <300>;
- cavium,t-pause = <0>;
- cavium,t-wait = <300>;
- cavium,t-page = <310>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <310>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <16>;
};
cavium,cs-config@6 {
@@ -520,12 +520,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <30>;
- cavium,t-pause = <0>;
- cavium,t-wait = <30>;
- cavium,t-page = <310>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <310>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,wait-mode;
cavium,bus-width = <16>;
};
diff --git a/arch/mips/cavium-octeon/octeon_boot.h b/arch/mips/cavium-octeon/octeon_boot.h
index 428864b2ba41..7b066bbca86d 100644
--- a/arch/mips/cavium-octeon/octeon_boot.h
+++ b/arch/mips/cavium-octeon/octeon_boot.h
@@ -31,7 +31,7 @@ struct boot_init_vector {
uint32_t k0_val;
/* Address of boot info block structure */
uint64_t boot_info_addr;
- uint32_t flags; /* flags */
+ uint32_t flags; /* flags */
uint32_t pad;
};
@@ -53,20 +53,20 @@ struct linux_app_boot_info {
/* If not to copy a lot of bootloader's structures
here is only offset of requested member */
-#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
/* hardcoded in bootloader */
-#define LABI_ADDR_IN_BOOTLOADER 0x700
+#define LABI_ADDR_IN_BOOTLOADER 0x700
#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
#define LABI_SIGNATURE 0xAABBCC01
/* from uboot-headers/octeon_mem_map.h */
-#define EXCEPTION_BASE_INCR (4 * 1024)
+#define EXCEPTION_BASE_INCR (4 * 1024)
/* Increment size for exception base addresses (4k minimum) */
-#define EXCEPTION_BASE_BASE 0
-#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
-#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
+#define EXCEPTION_BASE_BASE 0
+#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
#endif /* __OCTEON_BOOT_H__ */
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d7e0a09f77c2..c594a3d4f743 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -319,7 +319,7 @@ EXPORT_SYMBOL(octeon_get_io_clock_rate);
* exists on most Cavium evaluation boards. If it doesn't exist, then
* this function doesn't do anything.
*
- * @s: String to write
+ * @s: String to write
*/
void octeon_write_lcd(const char *s)
{
@@ -341,7 +341,7 @@ void octeon_write_lcd(const char *s)
/**
* Return the console uart passed by the bootloader
*
- * Returns uart (0 or 1)
+ * Returns uart (0 or 1)
*/
int octeon_get_boot_uart(void)
{
@@ -805,7 +805,7 @@ void __init prom_init(void)
/*
* To do: switch parsing to new style, something like:
* parse_crashkernel(arg, sysinfo->system_dram_size,
- * &crashk_size, &crashk_base);
+ * &crashk_size, &crashk_base);
*/
#endif
} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
@@ -1013,7 +1013,7 @@ void __init plat_mem_setup(void)
}
/*
- * Emit one character to the boot UART. Exported for use by the
+ * Emit one character to the boot UART. Exported for use by the
* watchdog timer.
*/
int prom_putchar(char c)
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index ee1fb9f7f517..295137dfdc37 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -55,7 +55,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
/**
* Cause the function described by call_data to be executed on the passed
- * cpu. When the function has finished, increment the finished field of
+ * cpu. When the function has finished, increment the finished field of
* call_data.
*/
void octeon_send_ipi_single(int cpu, unsigned int action)
@@ -126,8 +126,8 @@ static void octeon_smp_setup(void)
#ifdef CONFIG_HOTPLUG_CPU
/*
- * The possible CPUs are all those present on the chip. We
- * will assign CPU numbers for possible cores as well. Cores
+ * The possible CPUs are all those present on the chip. We
+ * will assign CPU numbers for possible cores as well. Cores
* are always consecutively numberd from 0.
*/
for (id = 0; id < num_cores && id < NR_CPUS; id++) {
@@ -332,7 +332,7 @@ extern void kernel_entry(unsigned long arg1, ...);
static void start_after_reset(void)
{
- kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
+ kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
}
static int octeon_update_boot_vector(unsigned int cpu)
@@ -401,7 +401,7 @@ static int __cpuinit register_cavium_notifier(void)
}
late_initcall(register_cavium_notifier);
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU */
struct plat_smp_ops octeon_smp_ops = {
.send_ipi_single = octeon_send_ipi_single,
diff --git a/arch/mips/cobalt/led.c b/arch/mips/cobalt/led.c
index d3ce6fa1dc74..32265f514e3f 100644
--- a/arch/mips/cobalt/led.c
+++ b/arch/mips/cobalt/led.c
@@ -1,7 +1,7 @@
/*
* Registration of Cobalt LED platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/mtd.c b/arch/mips/cobalt/mtd.c
index 691d620b6766..8db7b5d81560 100644
--- a/arch/mips/cobalt/mtd.c
+++ b/arch/mips/cobalt/mtd.c
@@ -25,7 +25,7 @@
static struct mtd_partition cobalt_mtd_partitions[] = {
{
.name = "firmware",
- .offset = 0x0,
+ .offset = 0x0,
.size = 0x80000,
},
};
diff --git a/arch/mips/cobalt/rtc.c b/arch/mips/cobalt/rtc.c
index 3ab39898b4e4..a6bc75ada9df 100644
--- a/arch/mips/cobalt/rtc.c
+++ b/arch/mips/cobalt/rtc.c
@@ -46,7 +46,7 @@ static __init int cobalt_rtc_add(void)
return -ENOMEM;
retval = platform_device_add_resources(pdev, cobalt_rtc_resource,
- ARRAY_SIZE(cobalt_rtc_resource));
+ ARRAY_SIZE(cobalt_rtc_resource));
if (retval)
goto err_free_device;
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index ea87d43ba607..e3a3836508ec 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -1,5 +1,6 @@
CONFIG_ATH79=y
CONFIG_ATH79_MACH_AP121=y
+CONFIG_ATH79_MACH_AP136=y
CONFIG_ATH79_MACH_AP81=y
CONFIG_ATH79_MACH_DB120=y
CONFIG_ATH79_MACH_PB44=y
diff --git a/arch/mips/configs/pnx8550_jbs_defconfig b/arch/mips/configs/pnx8550_jbs_defconfig
deleted file mode 100644
index 1d1f2067f3e6..000000000000
--- a/arch/mips/configs/pnx8550_jbs_defconfig
+++ /dev/null
@@ -1,98 +0,0 @@
-CONFIG_PNX8550_JBS=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_PCI=y
-CONFIG_PM=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_TCP_MD5SIG=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SGI_IOC4=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_FC_ATTRS=y
-CONFIG_ISCSI_TCP=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_8139TOO=y
-# CONFIG_8139TOO_PIO is not set
-CONFIG_8139TOO_TUNE_TWISTER=y
-CONFIG_8139TOO_8129=y
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_PNX8XXX=y
-CONFIG_SERIAL_PNX8XXX_CONSOLE=y
-CONFIG_HW_RANDOM=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRC_CCITT=m
diff --git a/arch/mips/configs/pnx8550_stb810_defconfig b/arch/mips/configs/pnx8550_stb810_defconfig
deleted file mode 100644
index 15c66a571f99..000000000000
--- a/arch/mips/configs/pnx8550_stb810_defconfig
+++ /dev/null
@@ -1,92 +0,0 @@
-CONFIG_PNX8550_STB810=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_PCI=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_ISCSI_TCP=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_NET_PCI=y
-CONFIG_NATSEMI=y
-CONFIG_CHELSIO_T3=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_HW_RANDOM=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRC_CCITT=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
new file mode 100644
index 000000000000..d1741bcf8949
--- /dev/null
+++ b/arch/mips/configs/rt305x_defconfig
@@ -0,0 +1,167 @@
+CONFIG_RALINK=y
+CONFIG_DTB_RT305X_EVAL=y
+CONFIG_CPU_MIPS32_R2=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_INITRAMFS_ROOT_UID=1000
+CONFIG_INITRAMFS_ROOT_GID=1000
+# CONFIG_RD_GZIP is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_COREDUMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_HAMRADIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_EEPROM_93CX6=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_PPP=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_ISDN=y
+CONFIG_INPUT=m
+CONFIG_INPUT_POLLDEV=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SPI=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_STAGING=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32_SARWATE=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_AVERAGE=y
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 82c852818781..22afed16ccde 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -55,70 +55,70 @@
* DS2100/3100's, aka kn01, aka Pmax:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 SCSI
- * 3 Lance Ethernet
- * 4 DZ11 serial
- * 5 RTC
- * 6 Memory Controller & Video
- * 7 FPU
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 SCSI
+ * 3 Lance Ethernet
+ * 4 DZ11 serial
+ * 5 RTC
+ * 6 Memory Controller & Video
+ * 7 FPU
*
* DS5000/200, aka kn02, aka 3max:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 TurboChannel
- * 3 RTC
- * 4 Reserved
- * 5 Memory Controller
- * 6 Reserved
- * 7 FPU
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 TurboChannel
+ * 3 RTC
+ * 4 Reserved
+ * 5 Memory Controller
+ * 6 Reserved
+ * 7 FPU
*
* DS5000/1xx's, aka kn02ba, aka 3min:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 TurboChannel Slot 0
- * 3 TurboChannel Slot 1
- * 4 TurboChannel Slot 2
- * 5 TurboChannel Slot 3 (ASIC)
- * 6 Halt button
- * 7 FPU/R4k timer
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 TurboChannel Slot 0
+ * 3 TurboChannel Slot 1
+ * 4 TurboChannel Slot 2
+ * 5 TurboChannel Slot 3 (ASIC)
+ * 6 Halt button
+ * 7 FPU/R4k timer
*
* DS5000/2x's, aka kn02ca, aka maxine:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 Periodic Interrupt (100usec)
- * 3 RTC
- * 4 I/O write timeout
- * 5 TurboChannel (ASIC)
- * 6 Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER)
- * 7 FPU/R4k timer
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Periodic Interrupt (100usec)
+ * 3 RTC
+ * 4 I/O write timeout
+ * 5 TurboChannel (ASIC)
+ * 6 Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER)
+ * 7 FPU/R4k timer
*
* DS5000/2xx's, aka kn03, aka 3maxplus:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 System Board (ASIC)
- * 3 RTC
- * 4 Reserved
- * 5 Memory
- * 6 Halt Button
- * 7 FPU/R4k timer
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 System Board (ASIC)
+ * 3 RTC
+ * 4 Reserved
+ * 5 Memory
+ * 6 Halt Button
+ * 7 FPU/R4k timer
*
* We handle the IRQ according to _our_ priority (see setup.c),
- * then we just return. If multiple IRQs are pending then we will
+ * then we just return. If multiple IRQs are pending then we will
* just take another exception, big deal.
*/
.align 5
@@ -146,7 +146,7 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,cpu_mask_nr_tbl
+ PTR_LA t1,cpu_mask_nr_tbl
1: lw t2,(t1)
nop
and t2,t0
@@ -195,7 +195,7 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,asic_mask_nr_tbl
+ PTR_LA t1,asic_mask_nr_tbl
2: lw t2,(t1)
nop
and t2,t0
@@ -221,7 +221,7 @@
FEXPORT(cpu_all_int) # HALT, timers, software junk
li a0,DEC_CPU_IRQ_BASE
srl t0,CAUSEB_IP
- li t1,CAUSEF_IP>>CAUSEB_IP # mask
+ li t1,CAUSEF_IP>>CAUSEB_IP # mask
b 1f
li t2,4 # nr of bits / 2
diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c
index ebb73c51d821..f434b759e3b9 100644
--- a/arch/mips/dec/kn02xa-berr.c
+++ b/arch/mips/dec/kn02xa-berr.c
@@ -128,8 +128,8 @@ void __init dec_kn02xa_be_init(void)
{
volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
- /* For KN04 we need to make sure EE (?) is enabled in the MB. */
- if (current_cpu_type() == CPU_R4000SC)
+ /* For KN04 we need to make sure EE (?) is enabled in the MB. */
+ if (current_cpu_type() == CPU_R4000SC)
*mbcs |= KN4K_MB_CSR_EE;
fast_iob();
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S
index 8c8498159e43..c0d1522d448f 100644
--- a/arch/mips/dec/prom/call_o32.S
+++ b/arch/mips/dec/prom/call_o32.S
@@ -14,7 +14,7 @@
/* Maximum number of arguments supported. Must be even! */
#define O32_ARGC 32
-/* Number of static registers we save. */
+/* Number of static registers we save. */
#define O32_STATC 11
/* Frame size for both of the above. */
#define O32_FRAMESZ (4 * O32_ARGC + SZREG * O32_STATC)
diff --git a/arch/mips/dec/prom/dectypes.h b/arch/mips/dec/prom/dectypes.h
index 707b6f1f5a9d..69ea5b9c8190 100644
--- a/arch/mips/dec/prom/dectypes.h
+++ b/arch/mips/dec/prom/dectypes.h
@@ -1,5 +1,5 @@
#ifndef DECTYPES
-#define DECTYPES
+#define DECTYPES
#define DS2100_3100 1 /* DS2100/3100 Pmax */
#define DS5000_200 2 /* DS5000/200 3max */
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index 93f1239af524..ab169046e442 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -103,7 +103,7 @@ void __init prom_init(void)
if (prom_is_rex(magic))
rex_clear_cache();
- /* Register the early console. */
+ /* Register the early console. */
register_prom_console();
/* Were we compiled with the right CPU option? */
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 8c62316f22f4..0aadac742900 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -22,7 +22,7 @@ volatile unsigned long mem_err; /* So we know an error occurred */
/*
* Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
- * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
+ * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
*/
#define CHUNK_SIZE 0x400000
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index b874accd878a..741cb4235bde 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(ioasic_base);
/*
* IRQ routing and priority tables. Priorites are set as follows:
*
- * KN01 KN230 KN02 KN02-BA KN02-CA KN03
+ * KN01 KN230 KN02 KN02-BA KN02-CA KN03
*
* MEMORY CPU CPU CPU ASIC CPU CPU
* RTC CPU CPU CPU ASIC CPU CPU
@@ -413,7 +413,7 @@ static void __init dec_init_kn02(void)
/*
* Machine-specific initialisation for KN02-BA, aka DS5000/1xx
- * (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
+ * (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
* DS5000/150, aka 4min.
*/
static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
diff --git a/arch/mips/dec/wbflush.c b/arch/mips/dec/wbflush.c
index 43feddd5e19c..56bda4a396b5 100644
--- a/arch/mips/dec/wbflush.c
+++ b/arch/mips/dec/wbflush.c
@@ -2,9 +2,9 @@
* Setup the right wbflush routine for the different DECstations.
*
* Created with information from:
- * DECstation 3100 Desktop Workstation Functional Specification
- * DECstation 5000/200 KN02 System Module Functional Specification
- * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
+ * DECstation 3100 Desktop Workstation Functional Specification
+ * DECstation 5000/200 KN02 System Module Functional Specification
+ * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c
index b5f08255d9c7..b880a83e4d4e 100644
--- a/arch/mips/emma/markeins/irq.c
+++ b/arch/mips/emma/markeins/irq.c
@@ -292,7 +292,7 @@ void __init arch_init_irq(void)
asmlinkage void plat_irq_dispatch(void)
{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
diff --git a/arch/mips/emma/markeins/platform.c b/arch/mips/emma/markeins/platform.c
index b05b08b92a34..99ea004730a7 100644
--- a/arch/mips/emma/markeins/platform.c
+++ b/arch/mips/emma/markeins/platform.c
@@ -190,7 +190,7 @@ static struct platform_device markeins_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
- .platform_data = &markeins_flash_data,
+ .platform_data = &markeins_flash_data,
},
.num_resources = 1,
.resource = &markeins_flash_resource,
diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c
index feceebcfff42..d71005835c00 100644
--- a/arch/mips/emma/markeins/setup.c
+++ b/arch/mips/emma/markeins/setup.c
@@ -28,7 +28,7 @@
#include <asm/emma/emma2rh.h>
-#define USE_CPU_COUNTER_TIMER /* whether we use cpu counter */
+#define USE_CPU_COUNTER_TIMER /* whether we use cpu counter */
extern void markeins_led(const char *);
diff --git a/arch/mips/fw/arc/file.c b/arch/mips/fw/arc/file.c
index 30335341b447..a8b08032348f 100644
--- a/arch/mips/fw/arc/file.c
+++ b/arch/mips/fw/arc/file.c
@@ -15,7 +15,7 @@
LONG
ArcGetDirectoryEntry(ULONG FileID, struct linux_vdirent *Buffer,
- ULONG N, ULONG *Count)
+ ULONG N, ULONG *Count)
{
return ARC_CALL4(get_vdirent, FileID, Buffer, N, Count);
}
@@ -69,7 +69,7 @@ ArcGetFileInformation(ULONG FileID, struct linux_finfo *Information)
}
LONG ArcSetFileInformation(ULONG FileID, ULONG AttributeFlags,
- ULONG AttributeMask)
+ ULONG AttributeMask)
{
return ARC_CALL3(set_finfo, FileID, AttributeFlags, AttributeMask);
}
diff --git a/arch/mips/fw/arc/identify.c b/arch/mips/fw/arc/identify.c
index 54a33c756f61..f90266c02c9d 100644
--- a/arch/mips/fw/arc/identify.c
+++ b/arch/mips/fw/arc/identify.c
@@ -100,7 +100,7 @@ void __init prom_identify_arch(void)
if (p == NULL) {
#ifdef CONFIG_SGI_IP27
/* IP27 PROM misbehaves, seems to not implement ARC
- GetChild(). So we just assume it's an IP27. */
+ GetChild(). So we just assume it's an IP27. */
iname = "SGI-IP27";
#else
iname = "Unknown";
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 8b8eea2b6cf6..5537b94572b2 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -1,6 +1,6 @@
/*
* memory.c: PROM library functions for acquiring/using memory descriptors
- * given to us from the ARCS firmware.
+ * given to us from the ARCS firmware.
*
* Copyright (C) 1996 by David S. Miller
* Copyright (C) 1999, 2000, 2001 by Ralf Baechle
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
index b7f9dd3c93c6..7e8ba5ce95be 100644
--- a/arch/mips/fw/arc/promlib.c
+++ b/arch/mips/fw/arc/promlib.c
@@ -11,7 +11,7 @@
#include <asm/bcache.h>
/*
- * IP22 boardcache is not compatible with board caches. Thus we disable it
+ * IP22 boardcache is not compatible with board caches. Thus we disable it
* during romvec action. Since r4xx0.c is always compiled and linked with your
* kernel, this shouldn't cause any harm regardless what MIPS processor you
* have.
diff --git a/arch/mips/fw/lib/call_o32.S b/arch/mips/fw/lib/call_o32.S
index e0a68713b3c3..b308b2a0613e 100644
--- a/arch/mips/fw/lib/call_o32.S
+++ b/arch/mips/fw/lib/call_o32.S
@@ -14,7 +14,7 @@
/* Maximum number of arguments supported. Must be even! */
#define O32_ARGC 32
-/* Number of static registers we save. */
+/* Number of static registers we save. */
#define O32_STATC 11
/* Frame size for static register */
#define O32_FRAMESZ (SZREG * O32_STATC)
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 96ba99202758..2c2cb182af4e 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -28,20 +28,20 @@
* registers
*/
#define PROM_GET_MEMCONF 58
-#define PROM_GET_HWCONF 61
+#define PROM_GET_HWCONF 61
#define PROM_VEC (u64 *)CKSEG1ADDR(0x1fc00000)
#define PROM_ENTRY(x) (PROM_VEC + (x))
-#define ___prom_putchar ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR))
-#define ___prom_getenv ((char *(*)(char *))PROM_ENTRY(PROM_GETENV))
-#define ___prom_get_memconf ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF))
-#define ___prom_get_hwconf ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF))
+#define ___prom_putchar ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR))
+#define ___prom_getenv ((char *(*)(char *))PROM_ENTRY(PROM_GETENV))
+#define ___prom_get_memconf ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF))
+#define ___prom_get_hwconf ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF))
#ifdef CONFIG_64BIT
static u8 o32_stk[16384];
-#define O32_STK &o32_stk[sizeof(o32_stk)]
+#define O32_STK &o32_stk[sizeof(o32_stk)]
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")
@@ -52,13 +52,13 @@ void __PROM_O32(__prom_get_memconf, (void (*)(void *), void *, void *));
u32 __PROM_O32(__prom_get_hwconf, (u32 (*)(void), void *));
#define _prom_putchar(x) __prom_putchar(___prom_putchar, O32_STK, x)
-#define _prom_getenv(x) __prom_getenv(___prom_getenv, O32_STK, x)
+#define _prom_getenv(x) __prom_getenv(___prom_getenv, O32_STK, x)
#define _prom_get_memconf(x) __prom_get_memconf(___prom_get_memconf, O32_STK, x)
#define _prom_get_hwconf() __prom_get_hwconf(___prom_get_hwconf, O32_STK)
#else
#define _prom_putchar(x) ___prom_putchar(x)
-#define _prom_getenv(x) ___prom_getenv(x)
+#define _prom_getenv(x) ___prom_getenv(x)
#define _prom_get_memconf(x) ___prom_get_memconf(x)
#define _prom_get_hwconf(x) ___prom_get_hwconf(x)
#endif
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
index 9252d9b50e59..909bb6984866 100644
--- a/arch/mips/include/asm/abi.h
+++ b/arch/mips/include/asm/abi.h
@@ -14,12 +14,12 @@
struct mips_abi {
int (* const setup_frame)(void *sig_return, struct k_sigaction *ka,
- struct pt_regs *regs, int signr,
- sigset_t *set);
+ struct pt_regs *regs, int signr,
+ sigset_t *set);
const unsigned long signal_return_offset;
int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka,
- struct pt_regs *regs, int signr,
- sigset_t *set, siginfo_t *info);
+ struct pt_regs *regs, int signr,
+ sigset_t *set, siginfo_t *info);
const unsigned long rt_signal_return_offset;
const unsigned long restart;
};
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 569f80aacbd2..13d61c002e4f 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -51,14 +51,14 @@
* Returns the physical address of a CKSEGx / XKPHYS address
*/
#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
-#define XPHYSADDR(a) ((_ACAST64_(a)) & \
+#define XPHYSADDR(a) ((_ACAST64_(a)) & \
_CONST64_(0x000000ffffffffff))
#ifdef CONFIG_64BIT
/*
* Memory segments (64bit kernel mode addresses)
- * The compatibility segments use the full 64-bit sign extended value. Note
+ * The compatibility segments use the full 64-bit sign extended value. Note
* the R8000 doesn't have them so don't reference these in generic MIPS code.
*/
#define XKUSEG _CONST64_(0x0000000000000000)
@@ -131,7 +131,7 @@
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
- * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
+ * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
* R8000 implements most with its 48-bit physical address space.
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 608cfcfbb3ea..164a21e65b42 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -33,12 +33,12 @@
* Not used for the kernel but here seems to be the right place.
*/
#ifdef __PIC__
-#define CPRESTORE(register) \
+#define CPRESTORE(register) \
.cprestore register
-#define CPADD(register) \
+#define CPADD(register) \
.cpadd register
-#define CPLOAD(register) \
- .cpload register
+#define CPLOAD(register) \
+ .cpload register
#else
#define CPRESTORE(register)
#define CPADD(register)
@@ -48,35 +48,35 @@
/*
* LEAF - declare leaf routine
*/
-#define LEAF(symbol) \
- .globl symbol; \
- .align 2; \
- .type symbol, @function; \
- .ent symbol, 0; \
+#define LEAF(symbol) \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
symbol: .frame sp, 0, ra
/*
* NESTED - declare nested routine entry point
*/
-#define NESTED(symbol, framesize, rpc) \
- .globl symbol; \
- .align 2; \
- .type symbol, @function; \
- .ent symbol, 0; \
+#define NESTED(symbol, framesize, rpc) \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
symbol: .frame sp, framesize, rpc
/*
* END - mark end of function
*/
-#define END(function) \
- .end function; \
+#define END(function) \
+ .end function; \
.size function, .-function
/*
* EXPORT - export definition of symbol
*/
#define EXPORT(symbol) \
- .globl symbol; \
+ .globl symbol; \
symbol:
/*
@@ -90,16 +90,16 @@ symbol:
/*
* ABS - export absolute symbol
*/
-#define ABS(symbol,value) \
- .globl symbol; \
+#define ABS(symbol,value) \
+ .globl symbol; \
symbol = value
-#define PANIC(msg) \
+#define PANIC(msg) \
.set push; \
- .set reorder; \
- PTR_LA a0, 8f; \
- jal panic; \
-9: b 9b; \
+ .set reorder; \
+ PTR_LA a0, 8f; \
+ jal panic; \
+9: b 9b; \
.set pop; \
TEXT(msg)
@@ -107,31 +107,31 @@ symbol = value
* Print formatted string
*/
#ifdef CONFIG_PRINTK
-#define PRINT(string) \
+#define PRINT(string) \
.set push; \
- .set reorder; \
- PTR_LA a0, 8f; \
- jal printk; \
+ .set reorder; \
+ PTR_LA a0, 8f; \
+ jal printk; \
.set pop; \
TEXT(string)
#else
#define PRINT(string)
#endif
-#define TEXT(msg) \
+#define TEXT(msg) \
.pushsection .data; \
-8: .asciiz msg; \
+8: .asciiz msg; \
.popsection;
/*
* Build text tables
*/
-#define TTABLE(string) \
+#define TTABLE(string) \
.pushsection .text; \
- .word 1f; \
+ .word 1f; \
.popsection \
.pushsection .data; \
-1: .asciiz string; \
+1: .asciiz string; \
.popsection
/*
@@ -143,13 +143,13 @@ symbol = value
*/
#ifdef CONFIG_CPU_HAS_PREFETCH
-#define PREF(hint,addr) \
+#define PREF(hint,addr) \
.set push; \
.set mips4; \
pref hint, addr; \
.set pop
-#define PREFX(hint,addr) \
+#define PREFX(hint,addr) \
.set push; \
.set mips4; \
prefx hint, addr; \
@@ -166,42 +166,42 @@ symbol = value
* MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
*/
#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt) \
+#define MOVN(rd, rs, rt) \
.set push; \
.set reorder; \
- beqz rt, 9f; \
- move rd, rs; \
+ beqz rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
-#define MOVZ(rd, rs, rt) \
+#define MOVZ(rd, rs, rt) \
.set push; \
.set reorder; \
- bnez rt, 9f; \
- move rd, rs; \
+ bnez rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt) \
+#define MOVN(rd, rs, rt) \
.set push; \
.set noreorder; \
- bnezl rt, 9f; \
- move rd, rs; \
+ bnezl rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
-#define MOVZ(rd, rs, rt) \
+#define MOVZ(rd, rs, rt) \
.set push; \
.set noreorder; \
- beqzl rt, 9f; \
- move rd, rs; \
+ beqzl rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
(_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt) \
+#define MOVN(rd, rs, rt) \
movn rd, rs, rt
-#define MOVZ(rd, rs, rt) \
+#define MOVZ(rd, rs, rt) \
movz rd, rs, rt
#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 01cc6ba64831..08b607969a16 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -1,5 +1,5 @@
/*
- * Atomic operations that C can't guarantee us. Useful for
+ * Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* But use these as seldom as possible since they are much more slower
@@ -21,7 +21,7 @@
#include <asm/cmpxchg.h>
#include <asm/war.h>
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_INIT(i) { (i) }
/*
* atomic_read - read atomic variable
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index f7fdc24e972d..314ab5532019 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -18,7 +18,7 @@
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
+ * any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
@@ -43,7 +43,7 @@
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
+ * two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
@@ -57,7 +57,7 @@
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
@@ -92,7 +92,7 @@
: "memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
-# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
+# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
# define fast_wmb() __syncw()
# define fast_rmb() barrier()
@@ -158,7 +158,7 @@
#endif
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
-#define __WEAK_LLSC_MB " sync \n"
+#define __WEAK_LLSC_MB " sync \n"
#else
#define __WEAK_LLSC_MB " \n"
#endif
diff --git a/arch/mips/include/asm/bcache.h b/arch/mips/include/asm/bcache.h
index 0ba9d6ef76a7..8c34484cea82 100644
--- a/arch/mips/include/asm/bcache.h
+++ b/arch/mips/include/asm/bcache.h
@@ -11,7 +11,7 @@
/* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent,
- chipset implemented caches. On machines with other CPUs the CPU does the
+ chipset implemented caches. On machines with other CPUs the CPU does the
cache thing itself. */
struct bcache_ops {
void (*bc_enable)(void);
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 46ac73abd5ee..71305a8b3d78 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -26,15 +26,15 @@
#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
-#define __INS "ins "
-#define __EXT "ext "
+#define __INS "ins "
+#define __EXT "ext "
#elif _MIPS_SZLONG == 64
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
-#define __INS "dins "
-#define __EXT "dext "
+#define __INS "dins "
+#define __EXT "dext "
#endif
/*
@@ -357,7 +357,7 @@ static inline int test_and_clear_bit(unsigned long nr,
"1: " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
- " " __SC "%2, %1 \n"
+ " " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
@@ -371,10 +371,10 @@ static inline int test_and_clear_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
" " __EXT "%2, %0, %3, 1 \n"
- " " __INS "%0, $0, %3, 1 \n"
- " " __SC "%0, %1 \n"
+ " " __INS "%0, $0, %3, 1 \n"
+ " " __SC "%0, %1 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "ir" (bit)
: "memory");
@@ -387,10 +387,10 @@ static inline int test_and_clear_bit(unsigned long nr,
do {
__asm__ __volatile__(
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
- " " __SC "%2, %1 \n"
+ " " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
@@ -444,7 +444,7 @@ static inline int test_and_change_bit(unsigned long nr,
do {
__asm__ __volatile__(
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_change_bit \n"
+ " " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
" .set mips0 \n"
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 7a51d879e6ca..b71dd5b16085 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -44,19 +44,19 @@
/*
* Valid machtype for group PMC-MSP
*/
-#define MACH_MSP4200_EVAL 0 /* PMC-Sierra MSP4200 Evaluation */
-#define MACH_MSP4200_GW 1 /* PMC-Sierra MSP4200 Gateway demo */
-#define MACH_MSP4200_FPGA 2 /* PMC-Sierra MSP4200 Emulation */
-#define MACH_MSP7120_EVAL 3 /* PMC-Sierra MSP7120 Evaluation */
-#define MACH_MSP7120_GW 4 /* PMC-Sierra MSP7120 Residential GW */
-#define MACH_MSP7120_FPGA 5 /* PMC-Sierra MSP7120 Emulation */
-#define MACH_MSP_OTHER 255 /* PMC-Sierra unknown board type */
+#define MACH_MSP4200_EVAL 0 /* PMC-Sierra MSP4200 Evaluation */
+#define MACH_MSP4200_GW 1 /* PMC-Sierra MSP4200 Gateway demo */
+#define MACH_MSP4200_FPGA 2 /* PMC-Sierra MSP4200 Emulation */
+#define MACH_MSP7120_EVAL 3 /* PMC-Sierra MSP7120 Evaluation */
+#define MACH_MSP7120_GW 4 /* PMC-Sierra MSP7120 Residential GW */
+#define MACH_MSP7120_FPGA 5 /* PMC-Sierra MSP7120 Emulation */
+#define MACH_MSP_OTHER 255 /* PMC-Sierra unknown board type */
/*
* Valid machtype for group Mikrotik
*/
-#define MACH_MIKROTIK_RB532 0 /* Mikrotik RouterBoard 532 */
-#define MACH_MIKROTIK_RB532A 1 /* Mikrotik RouterBoard 532A */
+#define MACH_MIKROTIK_RB532 0 /* Mikrotik RouterBoard 532 */
+#define MACH_MIKROTIK_RB532A 1 /* Mikrotik RouterBoard 532A */
/*
* Valid machtype for Loongson family
@@ -67,7 +67,7 @@
#define MACH_LEMOTE_ML2F7 3
#define MACH_LEMOTE_YL2F89 4
#define MACH_DEXXON_GDIUM2F10 5
-#define MACH_LEMOTE_NAS 6
+#define MACH_LEMOTE_NAS 6
#define MACH_LEMOTE_LL2F 7
#define MACH_LOONGSON_END 8
diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/asm/break.h
new file mode 100644
index 000000000000..0ef11429a70b
--- /dev/null
+++ b/arch/mips/include/asm/break.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 2003 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_BREAK_H
+#define __ASM_BREAK_H
+
+#ifdef __UAPI_ASM_BREAK_H
+#error "Error: Do not directly include <uapi/asm/break.h>"
+#endif
+#include <uapi/asm/break.h>
+
+/*
+ * Break codes used internally to the kernel.
+ */
+#define BRK_KDB 513 /* Used in KDB_ENTER() */
+#define BRK_MEMU 514 /* Used by FPU emulator */
+#define BRK_KPROBE_BP 515 /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
+#define BRK_MULOVF 1023 /* Multiply overflow */
+
+#endif /* __ASM_BREAK_H */
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
index 8f99c11ab665..68f37e3eccc7 100644
--- a/arch/mips/include/asm/cacheops.h
+++ b/arch/mips/include/asm/cacheops.h
@@ -8,20 +8,20 @@
* (C) Copyright 1996, 97, 99, 2002, 03 Ralf Baechle
* (C) Copyright 1999 Silicon Graphics, Inc.
*/
-#ifndef __ASM_CACHEOPS_H
-#define __ASM_CACHEOPS_H
+#ifndef __ASM_CACHEOPS_H
+#define __ASM_CACHEOPS_H
/*
* Cache Operations available on all MIPS processors with R4000-style caches
*/
-#define Index_Invalidate_I 0x00
-#define Index_Writeback_Inv_D 0x01
+#define Index_Invalidate_I 0x00
+#define Index_Writeback_Inv_D 0x01
#define Index_Load_Tag_I 0x04
#define Index_Load_Tag_D 0x05
#define Index_Store_Tag_I 0x08
#define Index_Store_Tag_D 0x09
#if defined(CONFIG_CPU_LOONGSON2)
-#define Hit_Invalidate_I 0x00
+#define Hit_Invalidate_I 0x00
#else
#define Hit_Invalidate_I 0x10
#endif
@@ -39,8 +39,8 @@
/*
* R4000SC and R4400SC-specific cacheops
*/
-#define Index_Invalidate_SI 0x02
-#define Index_Writeback_Inv_SD 0x03
+#define Index_Invalidate_SI 0x02
+#define Index_Writeback_Inv_SD 0x03
#define Index_Load_Tag_SI 0x06
#define Index_Load_Tag_SD 0x07
#define Index_Store_Tag_SI 0x0A
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index f2f7c6c264da..ac3d2b8a20d4 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -194,7 +194,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
+ const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index eee10dc07ac1..466069bd8465 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -146,7 +146,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
@@ -163,7 +163,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
@@ -205,7 +205,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
\
switch (sizeof(*(__ptr))) { \
case 4: \
- __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
+ __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
break; \
case 8: \
if (sizeof(long) == 8) { \
diff --git a/arch/mips/include/asm/compat-signal.h b/arch/mips/include/asm/compat-signal.h
index 6599a901b63e..64e0b9343b8c 100644
--- a/arch/mips/include/asm/compat-signal.h
+++ b/arch/mips/include/asm/compat-signal.h
@@ -18,9 +18,9 @@ static inline int __copy_conv_sigset_to_user(compat_sigset_t __user *d,
BUG_ON(sizeof(*d) != sizeof(*s));
BUG_ON(_NSIG_WORDS != 2);
- err = __put_user(s->sig[0], &d->sig[0]);
+ err = __put_user(s->sig[0], &d->sig[0]);
err |= __put_user(s->sig[0] >> 32, &d->sig[1]);
- err |= __put_user(s->sig[1], &d->sig[2]);
+ err |= __put_user(s->sig[1], &d->sig[2]);
err |= __put_user(s->sig[1] >> 32, &d->sig[3]);
return err;
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index ebaae9649f8a..c4bd54a7f5ce 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -120,7 +120,7 @@ struct compat_statfs {
typedef u32 compat_old_sigset_t; /* at least 32 bits */
-#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
+#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
@@ -168,7 +168,7 @@ typedef struct compat_siginfo {
s32 _addr; /* faulting insn/memory ref. */
} _sigfault;
- /* SIGPOLL, SIGXFSZ (To do ...) */
+ /* SIGPOLL, SIGXFSZ (To do ...) */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
@@ -179,7 +179,7 @@ typedef struct compat_siginfo {
timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval;/* same as below */
- int _sys_private; /* not to be passed to user */
+ int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index c507b931b484..1a57e8b4d092 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -14,7 +14,7 @@
#include <cpu-feature-overrides.h>
#ifndef current_cpu_type
-#define current_cpu_type() current_cpu_data.cputype
+#define current_cpu_type() current_cpu_data.cputype
#endif
/*
@@ -87,10 +87,10 @@
#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
#endif
#ifndef cpu_has_mdmx
-#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
+#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
#endif
#ifndef cpu_has_mips3d
-#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
+#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
#endif
#ifndef cpu_has_smartmips
#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
@@ -98,6 +98,9 @@
#ifndef cpu_has_rixi
#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
#endif
+#ifndef cpu_has_mmips
+#define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+#endif
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif
@@ -108,11 +111,11 @@
#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
#endif
#ifndef cpu_has_pindexed_dcache
-#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
+#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
#endif
/*
- * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
+ * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
* such as the R10000 have I-Caches that snoop local stores; the embedded ones
* don't. For maintaining I-cache coherency this means we need to flush the
* D-cache all the way back to whever the I-cache does refills from, so the
@@ -130,6 +133,19 @@
#endif
#endif
+# define cpu_has_mips_1 (cpu_data[0].isa_level & MIPS_CPU_ISA_I)
+#ifndef cpu_has_mips_2
+# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
+#endif
+#ifndef cpu_has_mips_3
+# define cpu_has_mips_3 (cpu_data[0].isa_level & MIPS_CPU_ISA_III)
+#endif
+#ifndef cpu_has_mips_4
+# define cpu_has_mips_4 (cpu_data[0].isa_level & MIPS_CPU_ISA_IV)
+#endif
+#ifndef cpu_has_mips_5
+# define cpu_has_mips_5 (cpu_data[0].isa_level & MIPS_CPU_ISA_V)
+#endif
# ifndef cpu_has_mips32r1
# define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1)
# endif
@@ -148,8 +164,8 @@
*/
#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
-#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
-#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
+#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
cpu_has_mips64r1 | cpu_has_mips64r2)
@@ -159,7 +175,7 @@
/*
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
*/
@@ -191,7 +207,7 @@
# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
# endif
# ifndef cpu_has_64bit_zero_reg
-# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
+# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
# endif
# ifndef cpu_has_64bit_gp_regs
# define cpu_has_64bit_gp_regs 0
@@ -260,4 +276,8 @@
#define cpu_has_perf_cntr_intr_bit (cpu_data[0].options & MIPS_CPU_PCI)
#endif
+#ifndef cpu_has_vz
+#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
+#endif
+
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index c454550eb0c0..41401d8eb7d1 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -52,14 +52,14 @@ struct cpuinfo_mips {
unsigned int cputype;
int isa_level;
int tlbsize;
- struct cache_desc icache; /* Primary I-cache */
- struct cache_desc dcache; /* Primary D or combined I/D cache */
- struct cache_desc scache; /* Secondary cache */
- struct cache_desc tcache; /* Tertiary/split secondary cache */
- int srsets; /* Shadow register sets */
+ struct cache_desc icache; /* Primary I-cache */
+ struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc scache; /* Secondary cache */
+ struct cache_desc tcache; /* Tertiary/split secondary cache */
+ int srsets; /* Shadow register sets */
int core; /* physical core number */
#ifdef CONFIG_64BIT
- int vmbits; /* Virtual memory size in bits */
+ int vmbits; /* Virtual memory size in bits */
#endif
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
/*
@@ -68,12 +68,12 @@ struct cpuinfo_mips {
* exception resources, ASID spaces, etc, are common
* to all TCs within the same VPE.
*/
- int vpe_id; /* Virtual Processor number */
+ int vpe_id; /* Virtual Processor number */
#endif
#ifdef CONFIG_MIPS_MT_SMTC
- int tc_id; /* Thread Context number */
+ int tc_id; /* Thread Context number */
#endif
- void *data; /* Additional data */
+ void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */
unsigned int watch_reg_use_cnt; /* Usable by ptrace */
#define NUM_WATCH_REGS 4
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 90112adb1940..dd86ab205483 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -1,6 +1,6 @@
/*
* cpu.h: Values of the PRId register used to match up
- * various MIPS cpu types.
+ * various MIPS cpu types.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2004 Maciej W. Rozycki
@@ -9,14 +9,14 @@
#define _ASM_CPU_H
/* Assigned Company values for bits 23:16 of the PRId Register
- (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from
+ (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from
MTI, the PRId register is defined in this (backwards compatible)
way:
+----------------+----------------+----------------+----------------+
- | Company Options| Company ID | Processor ID | Revision |
+ | Company Options| Company ID | Processor ID | Revision |
+----------------+----------------+----------------+----------------+
- 31 24 23 16 15 8 7
+ 31 24 23 16 15 8 7
I don't have docs for all the previous processors, but my impression is
that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64
@@ -29,7 +29,7 @@
#define PRID_COMP_ALCHEMY 0x030000
#define PRID_COMP_SIBYTE 0x040000
#define PRID_COMP_SANDCRAFT 0x050000
-#define PRID_COMP_NXP 0x060000
+#define PRID_COMP_NXP 0x060000
#define PRID_COMP_TOSHIBA 0x070000
#define PRID_COMP_LSI 0x080000
#define PRID_COMP_LEXRA 0x0b0000
@@ -38,9 +38,9 @@
#define PRID_COMP_INGENIC 0xd00000
/*
- * Assigned values for the product ID register. In order to detect a
+ * Assigned values for the product ID register. In order to detect a
* certain CPU type exactly eventually additional registers may need to
- * be examined. These are valid when 23:16 == PRID_COMP_LEGACY
+ * be examined. These are valid when 23:16 == PRID_COMP_LEGACY
*/
#define PRID_IMP_R2000 0x0100
#define PRID_IMP_AU1_REV1 0x0100
@@ -96,19 +96,20 @@
#define PRID_IMP_1004K 0x9900
#define PRID_IMP_1074K 0x9a00
#define PRID_IMP_M14KC 0x9c00
+#define PRID_IMP_M14KEC 0x9e00
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
*/
-#define PRID_IMP_SB1 0x0100
-#define PRID_IMP_SB1A 0x1100
+#define PRID_IMP_SB1 0x0100
+#define PRID_IMP_SB1A 0x1100
/*
* These are the PRID's for when 23:16 == PRID_COMP_SANDCRAFT
*/
-#define PRID_IMP_SR71000 0x0400
+#define PRID_IMP_SR71000 0x0400
/*
* These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
@@ -145,7 +146,7 @@
* These are the PRID's for when 23:16 == PRID_COMP_INGENIC
*/
-#define PRID_IMP_JZRISC 0x0200
+#define PRID_IMP_JZRISC 0x0200
/*
* These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
@@ -188,9 +189,9 @@
#define PRID_REV_R3000A 0x0030
#define PRID_REV_R3000 0x0020
#define PRID_REV_R2000A 0x0010
-#define PRID_REV_TX3912 0x0010
-#define PRID_REV_TX3922 0x0030
-#define PRID_REV_TX3927 0x0040
+#define PRID_REV_TX3912 0x0010
+#define PRID_REV_TX3922 0x0030
+#define PRID_REV_TX3927 0x0040
#define PRID_REV_VR4111 0x0050
#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
#define PRID_REV_VR4121 0x0060
@@ -217,9 +218,9 @@
* FPU implementation/revision register (CP1 control register 0).
*
* +---------------------------------+----------------+----------------+
- * | 0 | Implementation | Revision |
+ * | 0 | Implementation | Revision |
* +---------------------------------+----------------+----------------+
- * 31 16 15 8 7 0
+ * 31 16 15 8 7 0
*/
#define FPIR_IMP_NONE 0x0000
@@ -264,6 +265,7 @@ enum cpu_type_enum {
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
+ CPU_M14KEC,
/*
* MIPS64 class processors
@@ -322,6 +324,7 @@ enum cpu_type_enum {
#define MIPS_CPU_ULRI 0x00200000 /* CPU has ULRI feature */
#define MIPS_CPU_PCI 0x00400000 /* CPU has Perf Ctr Int indicator */
#define MIPS_CPU_RIXI 0x00800000 /* CPU has TLB Read/eXec Inhibit */
+#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */
/*
* CPU ASE encodings
@@ -333,6 +336,6 @@ enum cpu_type_enum {
#define MIPS_ASE_DSP 0x00000010 /* Signal Processing ASE */
#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
-
+#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/dec/ioasic_addrs.h b/arch/mips/include/asm/dec/ioasic_addrs.h
index 4cbc1f8a1129..a8665a7611c2 100644
--- a/arch/mips/include/asm/dec/ioasic_addrs.h
+++ b/arch/mips/include/asm/dec/ioasic_addrs.h
@@ -25,22 +25,22 @@
*/
#define IOASIC_SYS_ROM (0*IOASIC_SLOT_SIZE) /* system board ROM */
#define IOASIC_IOCTL (1*IOASIC_SLOT_SIZE) /* I/O ASIC */
-#define IOASIC_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
-#define IOASIC_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
-#define IOASIC_SCC0 (4*IOASIC_SLOT_SIZE) /* SCC #0 */
+#define IOASIC_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
+#define IOASIC_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
+#define IOASIC_SCC0 (4*IOASIC_SLOT_SIZE) /* SCC #0 */
#define IOASIC_VDAC_HI (5*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
-#define IOASIC_SCC1 (6*IOASIC_SLOT_SIZE) /* SCC #1 (3min, 3max+) */
+#define IOASIC_SCC1 (6*IOASIC_SLOT_SIZE) /* SCC #1 (3min, 3max+) */
#define IOASIC_VDAC_LO (7*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
-#define IOASIC_TOY (8*IOASIC_SLOT_SIZE) /* RTC */
-#define IOASIC_ISDN (9*IOASIC_SLOT_SIZE) /* ISDN (maxine) */
+#define IOASIC_TOY (8*IOASIC_SLOT_SIZE) /* RTC */
+#define IOASIC_ISDN (9*IOASIC_SLOT_SIZE) /* ISDN (maxine) */
#define IOASIC_ERRADDR (9*IOASIC_SLOT_SIZE) /* bus error address (3max+) */
-#define IOASIC_CHKSYN (10*IOASIC_SLOT_SIZE) /* ECC syndrome (3max+) */
+#define IOASIC_CHKSYN (10*IOASIC_SLOT_SIZE) /* ECC syndrome (3max+) */
#define IOASIC_ACC_BUS (10*IOASIC_SLOT_SIZE) /* ACCESS.bus (maxine) */
-#define IOASIC_MCR (11*IOASIC_SLOT_SIZE) /* memory control (3max+) */
-#define IOASIC_FLOPPY (11*IOASIC_SLOT_SIZE) /* FDC (maxine) */
-#define IOASIC_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
+#define IOASIC_MCR (11*IOASIC_SLOT_SIZE) /* memory control (3max+) */
+#define IOASIC_FLOPPY (11*IOASIC_SLOT_SIZE) /* FDC (maxine) */
+#define IOASIC_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
#define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE) /* FDC DMA (maxine) */
-#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
+#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
#define IOASIC_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
diff --git a/arch/mips/include/asm/dec/kn01.h b/arch/mips/include/asm/dec/kn01.h
index 88d9ffd74258..0eb3241de706 100644
--- a/arch/mips/include/asm/dec/kn01.h
+++ b/arch/mips/include/asm/dec/kn01.h
@@ -57,12 +57,12 @@
/*
* System Control & Status Register bits.
*/
-#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
-#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
-#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
-#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
-#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
-#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
+#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
+#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
+#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
+#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
+#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
+#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
#define KN01_CSR_VINT (1<<9) /* PCC area detect #2 status & ack */
#define KN01_CSR_TXDIS (1<<8) /* DZ11 transmit disable */
#define KN01_CSR_VBGTRG (1<<2) /* blue DAC voltage over green (r/o) */
diff --git a/arch/mips/include/asm/dec/kn02ca.h b/arch/mips/include/asm/dec/kn02ca.h
index 92c0fe256099..69dc2a9a2d0f 100644
--- a/arch/mips/include/asm/dec/kn02ca.h
+++ b/arch/mips/include/asm/dec/kn02ca.h
@@ -68,7 +68,7 @@
#define KN03CA_IO_SSR_ISDN_RST (1<<12) /* ~ISDN (Am79C30A) reset */
#define KN03CA_IO_SSR_FLOPPY_RST (1<<7) /* ~FDC (82077) reset */
-#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
+#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
#define KN03CA_IO_SSR_AB_RST (1<<5) /* ACCESS.bus reset */
#define KN03CA_IO_SSR_RES_4 (1<<4) /* unused */
#define KN03CA_IO_SSR_RES_3 (1<<4) /* unused */
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
index c0ead6313845..446577712bee 100644
--- a/arch/mips/include/asm/dec/prom.h
+++ b/arch/mips/include/asm/dec/prom.h
@@ -49,7 +49,7 @@
#ifdef CONFIG_64BIT
-#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
+#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
#else /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 006b43e38a9c..f8fc74b6cb47 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,7 +5,7 @@
#include <asm/cache.h>
#include <asm-generic/dma-coherent.h>
-#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
+#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
#include <dma-coherence.h>
#endif
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
index f5097f65a8ab..5b9ed1bffdbc 100644
--- a/arch/mips/include/asm/dma.h
+++ b/arch/mips/include/asm/dma.h
@@ -47,21 +47,21 @@
*
* Address mapping for channels 0-3:
*
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
@@ -102,55 +102,55 @@
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
-#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
-#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
-#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
-#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
-
-#define DMA_ADDR_0 0x00 /* DMA address registers */
-#define DMA_ADDR_1 0x02
-#define DMA_ADDR_2 0x04
-#define DMA_ADDR_3 0x06
-#define DMA_ADDR_4 0xC0
-#define DMA_ADDR_5 0xC4
-#define DMA_ADDR_6 0xC8
-#define DMA_ADDR_7 0xCC
-
-#define DMA_CNT_0 0x01 /* DMA count registers */
-#define DMA_CNT_1 0x03
-#define DMA_CNT_2 0x05
-#define DMA_CNT_3 0x07
-#define DMA_CNT_4 0xC2
-#define DMA_CNT_5 0xC6
-#define DMA_CNT_6 0xCA
-#define DMA_CNT_7 0xCE
-
-#define DMA_PAGE_0 0x87 /* DMA page registers */
-#define DMA_PAGE_1 0x83
-#define DMA_PAGE_2 0x81
-#define DMA_PAGE_3 0x82
-#define DMA_PAGE_5 0x8B
-#define DMA_PAGE_6 0x89
-#define DMA_PAGE_7 0x8A
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_PAGE_0 0x87 /* DMA page registers */
+#define DMA_PAGE_1 0x83
+#define DMA_PAGE_2 0x81
+#define DMA_PAGE_3 0x82
+#define DMA_PAGE_5 0x8B
+#define DMA_PAGE_6 0x89
+#define DMA_PAGE_7 0x8A
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_AUTOINIT 0x10
@@ -172,7 +172,7 @@ static __inline__ void release_dma_lock(unsigned long flags)
static __inline__ void enable_dma(unsigned int dmanr)
{
if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
+ dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
@@ -204,7 +204,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr)
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
else
dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
}
@@ -248,10 +248,10 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
- if (dmanr <= 3) {
+ if (dmanr <= 3) {
dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
+ dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
}
@@ -268,14 +268,14 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
- count--;
- if (dmanr <= 3) {
+ count--;
+ if (dmanr <= 3) {
dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
+ } else {
dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
+ }
}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 455c0ac7d4ea..cf3ae2480b1d 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -11,13 +11,13 @@
/* ELF header e_flags defines. */
/* MIPS architecture level. */
-#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
-#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
-#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
-#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
-#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
-#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
-#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32 R2 code. */
#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64 R2 code. */
@@ -74,7 +74,7 @@
#define R_MIPS_CALL16 11
#define R_MIPS_GPREL32 12
/* The remaining relocs are defined on Irix, although they are not
- in the MIPS ELF ABI. */
+ in the MIPS ELF ABI. */
#define R_MIPS_UNUSED1 13
#define R_MIPS_UNUSED2 14
#define R_MIPS_UNUSED3 15
@@ -214,7 +214,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
- if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
__res = 0; \
\
__res; \
@@ -292,7 +292,7 @@ do { \
__SET_PERSONALITY32_O32(); \
} while (0)
#else
-#define __SET_PERSONALITY32(ex) do { } while (0)
+#define __SET_PERSONALITY32(ex) do { } while (0)
#endif
#define SET_PERSONALITY(ex) \
@@ -337,11 +337,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
-#define ELF_HWCAP (0)
+#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
- * specific libraries for optimization. This is more specific in
+ * specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
@@ -365,11 +365,11 @@ extern const char *__elf_platform;
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
#ifndef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
diff --git a/arch/mips/include/asm/emma/emma2rh.h b/arch/mips/include/asm/emma/emma2rh.h
index c1449d20ef0e..ecf059608bd8 100644
--- a/arch/mips/include/asm/emma/emma2rh.h
+++ b/arch/mips/include/asm/emma/emma2rh.h
@@ -2,7 +2,7 @@
* Copyright (C) NEC Electronics Corporation 2005-2006
*
* This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
- * Copyright 2001 MontaVista Software Inc.
+ * Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,7 +40,7 @@
#define EMMA2RH_BHIF_INT1_EN_2 (0x000058+REGBASE)
#define EMMA2RH_BHIF_SW_INT (0x000070+REGBASE)
#define EMMA2RH_BHIF_SW_INT_EN (0x000080+REGBASE)
-#define EMMA2RH_BHIF_SW_INT_CLR (0x000090+REGBASE)
+#define EMMA2RH_BHIF_SW_INT_CLR (0x000090+REGBASE)
#define EMMA2RH_BHIF_MAIN_CTRL (0x0000b4+REGBASE)
#define EMMA2RH_BHIF_EXCEPT_VECT_BASE_ADDRESS (0x0000c0+REGBASE)
#define EMMA2RH_GPIO_DIR (0x110d20+REGBASE)
@@ -73,7 +73,7 @@
* Memory map (physical address)
*
* Note most of the following address must be properly aligned by the
- * corresponding size. For example, if PCI_IO_SIZE is 16MB, then
+ * corresponding size. For example, if PCI_IO_SIZE is 16MB, then
* PCI_IO_BASE must be aligned along 16MB boundary.
*/
@@ -96,8 +96,8 @@
#define EMMA2RH_ROM_BASE 0x1c000000
#define EMMA2RH_ROM_SIZE 0x04000000 /* 64 MB */
-#define EMMA2RH_PCI_CONFIG_BASE EMMA2RH_PCI_IO_BASE
-#define EMMA2RH_PCI_CONFIG_SIZE EMMA2RH_PCI_IO_SIZE
+#define EMMA2RH_PCI_CONFIG_BASE EMMA2RH_PCI_IO_BASE
+#define EMMA2RH_PCI_CONFIG_SIZE EMMA2RH_PCI_IO_SIZE
#define NUM_EMMA2RH_IRQ 96
@@ -169,51 +169,51 @@ static inline u8 emma2rh_in8(u32 offset)
**/
/*---------------------------------------------------------------------------*/
-/* CNT - Control register (00H R/W) */
+/* CNT - Control register (00H R/W) */
/*---------------------------------------------------------------------------*/
-#define SPT 0x00000001
-#define STT 0x00000002
-#define ACKE 0x00000004
-#define WTIM 0x00000008
-#define SPIE 0x00000010
-#define WREL 0x00000020
-#define LREL 0x00000040
-#define IICE 0x00000080
-#define CNT_RESERVED 0x000000ff /* reserved bit 0 */
-
-#define I2C_EMMA_START (IICE | STT)
-#define I2C_EMMA_STOP (IICE | SPT)
+#define SPT 0x00000001
+#define STT 0x00000002
+#define ACKE 0x00000004
+#define WTIM 0x00000008
+#define SPIE 0x00000010
+#define WREL 0x00000020
+#define LREL 0x00000040
+#define IICE 0x00000080
+#define CNT_RESERVED 0x000000ff /* reserved bit 0 */
+
+#define I2C_EMMA_START (IICE | STT)
+#define I2C_EMMA_STOP (IICE | SPT)
#define I2C_EMMA_REPSTART I2C_EMMA_START
/*---------------------------------------------------------------------------*/
-/* STA - Status register (10H Read) */
+/* STA - Status register (10H Read) */
/*---------------------------------------------------------------------------*/
-#define MSTS 0x00000080
-#define ALD 0x00000040
-#define EXC 0x00000020
-#define COI 0x00000010
-#define TRC 0x00000008
-#define ACKD 0x00000004
-#define STD 0x00000002
-#define SPD 0x00000001
+#define MSTS 0x00000080
+#define ALD 0x00000040
+#define EXC 0x00000020
+#define COI 0x00000010
+#define TRC 0x00000008
+#define ACKD 0x00000004
+#define STD 0x00000002
+#define SPD 0x00000001
/*---------------------------------------------------------------------------*/
-/* CSEL - Clock select register (20H R/W) */
+/* CSEL - Clock select register (20H R/W) */
/*---------------------------------------------------------------------------*/
-#define FCL 0x00000080
-#define ND50 0x00000040
-#define CLD 0x00000020
-#define DAD 0x00000010
-#define SMC 0x00000008
-#define DFC 0x00000004
-#define CL 0x00000003
-#define CSEL_RESERVED 0x000000ff /* reserved bit 0 */
-
-#define FAST397 0x0000008b
-#define FAST297 0x0000008a
-#define FAST347 0x0000000b
-#define FAST260 0x0000000a
-#define FAST130 0x00000008
+#define FCL 0x00000080
+#define ND50 0x00000040
+#define CLD 0x00000020
+#define DAD 0x00000010
+#define SMC 0x00000008
+#define DFC 0x00000004
+#define CL 0x00000003
+#define CSEL_RESERVED 0x000000ff /* reserved bit 0 */
+
+#define FAST397 0x0000008b
+#define FAST297 0x0000008a
+#define FAST347 0x0000000b
+#define FAST260 0x0000000a
+#define FAST130 0x00000008
#define STANDARD108 0x00000083
#define STANDARD83 0x00000082
#define STANDARD95 0x00000003
@@ -222,32 +222,32 @@ static inline u8 emma2rh_in8(u32 offset)
#define STANDARD71 0x00000000
/*---------------------------------------------------------------------------*/
-/* SVA - Slave address register (30H R/W) */
+/* SVA - Slave address register (30H R/W) */
/*---------------------------------------------------------------------------*/
-#define SVA 0x000000fe
+#define SVA 0x000000fe
/*---------------------------------------------------------------------------*/
-/* SHR - Shift register (40H R/W) */
+/* SHR - Shift register (40H R/W) */
/*---------------------------------------------------------------------------*/
-#define SR 0x000000ff
+#define SR 0x000000ff
/*---------------------------------------------------------------------------*/
-/* INT - Interrupt register (50H R/W) */
-/* INTM - Interrupt mask register (60H R/W) */
+/* INT - Interrupt register (50H R/W) */
+/* INTM - Interrupt mask register (60H R/W) */
/*---------------------------------------------------------------------------*/
-#define INTE0 0x00000001
+#define INTE0 0x00000001
/***********************************************************************
* I2C registers
***********************************************************************
*/
-#define I2C_EMMA_CNT 0x00
-#define I2C_EMMA_STA 0x10
-#define I2C_EMMA_CSEL 0x20
-#define I2C_EMMA_SVA 0x30
-#define I2C_EMMA_SHR 0x40
-#define I2C_EMMA_INT 0x50
-#define I2C_EMMA_INTM 0x60
+#define I2C_EMMA_CNT 0x00
+#define I2C_EMMA_STA 0x10
+#define I2C_EMMA_CSEL 0x20
+#define I2C_EMMA_SVA 0x30
+#define I2C_EMMA_SHR 0x40
+#define I2C_EMMA_INT 0x50
+#define I2C_EMMA_INTM 0x60
/*
* include the board dependent part
diff --git a/arch/mips/include/asm/emma/markeins.h b/arch/mips/include/asm/emma/markeins.h
index bf2d229c2dae..e55a67477820 100644
--- a/arch/mips/include/asm/emma/markeins.h
+++ b/arch/mips/include/asm/emma/markeins.h
@@ -2,7 +2,7 @@
* Copyright (C) NEC Electronics Corporation 2005-2006
*
* This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
- * Copyright 2001 MontaVista Software Inc.
+ * Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 98bcc98cf29b..dfaaf493e9d4 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -95,7 +95,7 @@ static inline unsigned long fix_to_virt(const unsigned int idx)
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
- return __fix_to_virt(idx);
+ return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
@@ -111,7 +111,7 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
* Called from pgtable_init()
*/
extern void fixrange_init(unsigned long start, unsigned long end,
- pgd_t *pgd_base);
+ pgd_t *pgd_base);
#endif
diff --git a/arch/mips/include/asm/floppy.h b/arch/mips/include/asm/floppy.h
index 4456c9c47e21..d75aed36480a 100644
--- a/arch/mips/include/asm/floppy.h
+++ b/arch/mips/include/asm/floppy.h
@@ -24,9 +24,9 @@ static inline void fd_cacheflush(char * addr, long size)
* And on Mips's the CMOS info fails also ...
*
* FIXME: This information should come from the ARC configuration tree
- * or wherever a particular machine has stored this ...
+ * or wherever a particular machine has stored this ...
*/
-#define FLOPPY0_TYPE fd_drive_type(0)
+#define FLOPPY0_TYPE fd_drive_type(0)
#define FLOPPY1_TYPE fd_drive_type(1)
#define FDC1 fd_getfdaddr1()
diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h
index 2b5fddc8f487..429481f9028d 100644
--- a/arch/mips/include/asm/fpregdef.h
+++ b/arch/mips/include/asm/fpregdef.h
@@ -20,15 +20,15 @@
* These definitions only cover the R3000-ish 16/32 register model.
* But we're trying to be R3000 friendly anyway ...
*/
-#define fv0 $f0 /* return value */
+#define fv0 $f0 /* return value */
#define fv0f $f1
#define fv1 $f2
#define fv1f $f3
-#define fa0 $f12 /* argument registers */
+#define fa0 $f12 /* argument registers */
#define fa0f $f13
#define fa1 $f14
#define fa1f $f15
-#define ft0 $f4 /* caller saved */
+#define ft0 $f4 /* caller saved */
#define ft0f $f5
#define ft1 $f6
#define ft1f $f7
@@ -40,7 +40,7 @@
#define ft4f $f17
#define ft5 $f18
#define ft5f $f19
-#define fs0 $f20 /* callee saved */
+#define fs0 $f20 /* callee saved */
#define fs0f $f21
#define fs1 $f22
#define fs1f $f23
@@ -53,7 +53,7 @@
#define fs5 $f30
#define fs5f $f31
-#define fcr31 $31 /* FPU status register */
+#define fcr31 $31 /* FPU status register */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 7fcef8ef3fab..d088e5db4903 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -35,14 +35,14 @@ extern void _restore_fp(struct task_struct *);
#define __enable_fpu() \
do { \
- set_c0_status(ST0_CU1); \
- enable_fpu_hazard(); \
+ set_c0_status(ST0_CU1); \
+ enable_fpu_hazard(); \
} while (0)
#define __disable_fpu() \
do { \
clear_c0_status(ST0_CU1); \
- disable_fpu_hazard(); \
+ disable_fpu_hazard(); \
} while (0)
#define enable_fpu() \
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 6ebf1734b411..6ea15815d3ee 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -92,24 +92,24 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("addu $1, %1, %z5",
- ret, oldval, uaddr, oparg);
+ __futex_atomic_op("addu $1, %1, %z5",
+ ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("or $1, %1, %z5",
- ret, oldval, uaddr, oparg);
+ ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and $1, %1, %z5",
- ret, oldval, uaddr, ~oparg);
+ ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("xor $1, %1, %z5",
- ret, oldval, uaddr, oparg);
+ ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h
index e6ff4add04e2..f8d37d1df5de 100644
--- a/arch/mips/include/asm/fw/arc/hinv.h
+++ b/arch/mips/include/asm/fw/arc/hinv.h
@@ -12,7 +12,7 @@ typedef enum configclass {
SystemClass,
ProcessorClass,
CacheClass,
-#ifndef _NT_PROM
+#ifndef _NT_PROM
MemoryClass,
AdapterClass,
ControllerClass,
@@ -34,7 +34,7 @@ typedef enum configtype {
SecondaryICache,
SecondaryDCache,
SecondaryCache,
-#ifndef _NT_PROM
+#ifndef _NT_PROM
Memory,
#endif
EISAAdapter,
@@ -93,7 +93,7 @@ typedef enum {
} IDENTIFIERFLAG;
#ifndef NULL /* for GetChild(NULL); */
-#define NULL 0
+#define NULL 0
#endif
union key_u {
@@ -125,7 +125,7 @@ typedef struct component {
IDENTIFIERFLAG Flags;
USHORT Version;
USHORT Revision;
- ULONG Key;
+ ULONG Key;
ULONG AffinityMask;
ULONG ConfigurationDataSize;
ULONG IdentifierLength;
@@ -149,7 +149,7 @@ typedef struct systemid {
typedef enum memorytype {
ExceptionBlock,
SPBPage, /* ARCS == SystemParameterBlock */
-#ifndef _NT_PROM
+#ifndef _NT_PROM
FreeContiguous,
FreeMemory,
BadMemory,
diff --git a/arch/mips/include/asm/fw/arc/types.h b/arch/mips/include/asm/fw/arc/types.h
index 2b11f87d6fb3..ad163806148a 100644
--- a/arch/mips/include/asm/fw/arc/types.h
+++ b/arch/mips/include/asm/fw/arc/types.h
@@ -15,7 +15,7 @@
typedef char CHAR;
typedef short SHORT;
typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
-typedef long LONG __attribute__ ((__mode__ (__SI__)));
+typedef long LONG __attribute__ ((__mode__ (__SI__)));
typedef unsigned char UCHAR;
typedef unsigned short USHORT;
typedef unsigned long ULONG __attribute__ ((__mode__ (__SI__)));
@@ -23,11 +23,11 @@ typedef void VOID;
/* The pointer types. Note that we're using a 64-bit compiler but all
pointer in the ARC structures are only 32-bit, so we need some disgusting
- workarounds. Keep your vomit bag handy. */
+ workarounds. Keep your vomit bag handy. */
typedef LONG _PCHAR;
typedef LONG _PSHORT;
typedef LONG _PLARGE_INTEGER;
-typedef LONG _PLONG;
+typedef LONG _PLONG;
typedef LONG _PUCHAR;
typedef LONG _PUSHORT;
typedef LONG _PULONG;
@@ -40,7 +40,7 @@ typedef LONG _PVOID;
typedef char CHAR;
typedef short SHORT;
typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
-typedef long LONG __attribute__ ((__mode__ (__DI__)));
+typedef long LONG __attribute__ ((__mode__ (__DI__)));
typedef unsigned char UCHAR;
typedef unsigned short USHORT;
typedef unsigned long ULONG __attribute__ ((__mode__ (__DI__)));
@@ -51,7 +51,7 @@ typedef void VOID;
typedef CHAR *_PCHAR;
typedef SHORT *_PSHORT;
typedef LARGE_INTEGER *_PLARGE_INTEGER;
-typedef LONG *_PLONG;
+typedef LONG *_PLONG;
typedef UCHAR *_PUCHAR;
typedef USHORT *_PUSHORT;
typedef ULONG *_PULONG;
@@ -62,7 +62,7 @@ typedef VOID *_PVOID;
typedef CHAR *PCHAR;
typedef SHORT *PSHORT;
typedef LARGE_INTEGER *PLARGE_INTEGER;
-typedef LONG *PLONG;
+typedef LONG *PLONG;
typedef UCHAR *PUCHAR;
typedef USHORT *PUSHORT;
typedef ULONG *PULONG;
diff --git a/arch/mips/include/asm/fw/cfe/cfe_api.h b/arch/mips/include/asm/fw/cfe/cfe_api.h
index 0995575db320..17347551a1b2 100644
--- a/arch/mips/include/asm/fw/cfe/cfe_api.h
+++ b/arch/mips/include/asm/fw/cfe/cfe_api.h
@@ -40,7 +40,7 @@ typedef long intptr_t;
/* Seal indicating CFE's presence, passed to user program. */
#define CFE_EPTSEAL 0x43464531
-#define CFE_MI_RESERVED 0 /* memory is reserved, do not use */
+#define CFE_MI_RESERVED 0 /* memory is reserved, do not use */
#define CFE_MI_AVAILABLE 1 /* memory is available */
#define CFE_FLG_WARMSTART 0x00000001
@@ -52,13 +52,13 @@ typedef long intptr_t;
#define CFE_STDHANDLE_CONSOLE 0
-#define CFE_DEV_NETWORK 1
+#define CFE_DEV_NETWORK 1
#define CFE_DEV_DISK 2
#define CFE_DEV_FLASH 3
#define CFE_DEV_SERIAL 4
#define CFE_DEV_CPU 5
#define CFE_DEV_NVRAM 6
-#define CFE_DEV_CLOCK 7
+#define CFE_DEV_CLOCK 7
#define CFE_DEV_OTHER 8
#define CFE_DEV_MASK 0x0F
diff --git a/arch/mips/include/asm/fw/cfe/cfe_error.h b/arch/mips/include/asm/fw/cfe/cfe_error.h
index b80374636279..fc0e91f07e22 100644
--- a/arch/mips/include/asm/fw/cfe/cfe_error.h
+++ b/arch/mips/include/asm/fw/cfe/cfe_error.h
@@ -25,7 +25,7 @@
*/
#define CFE_OK 0
-#define CFE_ERR -1 /* generic error */
+#define CFE_ERR -1 /* generic error */
#define CFE_ERR_INV_COMMAND -2
#define CFE_ERR_EOF -3
#define CFE_ERR_IOERR -4
@@ -37,12 +37,12 @@
#define CFE_ERR_ENVREADONLY -10
#define CFE_ERR_NOTELF -11
-#define CFE_ERR_NOT32BIT -12
-#define CFE_ERR_WRONGENDIAN -13
-#define CFE_ERR_BADELFVERS -14
-#define CFE_ERR_NOTMIPS -15
-#define CFE_ERR_BADELFFMT -16
-#define CFE_ERR_BADADDR -17
+#define CFE_ERR_NOT32BIT -12
+#define CFE_ERR_WRONGENDIAN -13
+#define CFE_ERR_BADELFVERS -14
+#define CFE_ERR_NOTMIPS -15
+#define CFE_ERR_BADELFFMT -16
+#define CFE_ERR_BADADDR -17
#define CFE_ERR_FILENOTFOUND -18
#define CFE_ERR_UNSUPPORTED -19
@@ -73,8 +73,8 @@
#define CFE_ERR_NOTREADY -36
-#define CFE_ERR_GETMEM -37
-#define CFE_ERR_SETMEM -38
+#define CFE_ERR_GETMEM -37
+#define CFE_ERR_SETMEM -38
#define CFE_ERR_NOTCONN -39
#define CFE_ERR_ADDRINUSE -40
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
index c0cf76a2ca89..a7359f77a48e 100644
--- a/arch/mips/include/asm/gcmpregs.h
+++ b/arch/mips/include/asm/gcmpregs.h
@@ -32,7 +32,7 @@
/* GCMP register access */
#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg))
-#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
+#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg))
#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg))
#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg))
@@ -45,76 +45,76 @@
/* GCB registers */
#define GCMP_GCB_GC_OFS 0x0000 /* Global Config Register */
-#define GCMP_GCB_GC_NUMIOCU_SHF 8
-#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
-#define GCMP_GCB_GC_NUMCORES_SHF 0
-#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
+#define GCMP_GCB_GC_NUMIOCU_SHF 8
+#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
+#define GCMP_GCB_GC_NUMCORES_SHF 0
+#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */
-#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
-#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
-#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
-#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
-#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
-#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
-#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
-#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
+#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
+#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
+#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
+#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
+#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
+#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
+#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
+#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */
#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */
-#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
-#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
+#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
+#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */
#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */
#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */
-#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
-#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
-#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
-#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
+#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
+#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
+#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
+#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */
#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */
-#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
-#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
+#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
+#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
-#define GCMP_GCB_GICBA_BASE_SHF 17
-#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
-#define GCMP_GCB_GICBA_EN_SHF 0
-#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
+#define GCMP_GCB_GICBA_BASE_SHF 17
+#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
+#define GCMP_GCB_GICBA_EN_SHF 0
+#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
/* GCB Regions */
#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */
-#define GCMP_GCB_CMxBASE_BASE_SHF 16
-#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
+#define GCMP_GCB_CMxBASE_BASE_SHF 16
+#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
#define GCMP_GCB_CMxMASK_OFS(n) (0x0098+16*(n)) /* Global Region[0-3] Address Mask */
-#define GCMP_GCB_CMxMASK_MASK_SHF 16
-#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
-#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
-#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
-#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
-#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
-#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
-#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
+#define GCMP_GCB_CMxMASK_MASK_SHF 16
+#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
+#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
+#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
+#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
+#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
+#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
+#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
/* Core local/Core other control block registers */
#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
-#define GCMP_CCB_RESETR_INRESET_SHF 0
-#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
+#define GCMP_CCB_RESETR_INRESET_SHF 0
+#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */
-#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
-#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
+#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
+#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
-#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
-#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
-#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
-#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
-#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
-#define GCMP_CCB_CFG_NUMVPE_SHF 0
-#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
+#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
+#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
+#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
+#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
+#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
+#define GCMP_CCB_CFG_NUMVPE_SHF 0
+#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
#define GCMP_CCB_OTHER_OFS 0x0018 /* Other Address */
-#define GCMP_CCB_OTHER_CORENUM_SHF 16
-#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
+#define GCMP_CCB_OTHER_CORENUM_SHF 16
+#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */
-#define GCMP_CCB_RESETBASE_BEV_SHF 12
-#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
+#define GCMP_CCB_RESETBASE_BEV_SHF 12
+#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
#define GCMP_CCB_ID_OFS 0x0028 /* Identification */
#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 37620db588be..bdc9786ab5a7 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -66,7 +66,7 @@
/* Register Map for Shared Section */
-#define GIC_SH_CONFIG_OFS 0x0000
+#define GIC_SH_CONFIG_OFS 0x0000
/* Shared Global Counter */
#define GIC_SH_COUNTER_31_00_OFS 0x0010
@@ -146,13 +146,13 @@
#define GIC_SH_PEND_223_192_OFS 0x0498
#define GIC_SH_PEND_255_224_OFS 0x049c
-#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
/* Maps Interrupt X to a Pin */
#define GIC_SH_MAP_TO_PIN(intr) \
(GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
-#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
/* Maps Interrupt X to a VPE */
#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
@@ -326,7 +326,7 @@ struct gic_intr_map {
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */
unsigned int flags; /* Misc flags */
-#define GIC_FLAG_IPI 0x01
+#define GIC_FLAG_IPI 0x01
#define GIC_FLAG_TRANSPARENT 0x02
};
@@ -343,10 +343,10 @@ struct gic_shared_intr_map {
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
-#define GIC_CPU_INT1 1 /* . */
-#define GIC_CPU_INT2 2 /* . */
-#define GIC_CPU_INT3 3 /* . */
-#define GIC_CPU_INT4 4 /* . */
+#define GIC_CPU_INT1 1 /* . */
+#define GIC_CPU_INT2 2 /* . */
+#define GIC_CPU_INT3 3 /* . */
+#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 5 */
/* Local GIC interrupts. */
@@ -359,6 +359,7 @@ struct gic_shared_intr_map {
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
#define GIC_PIN_TO_VEC_OFFSET (1)
+extern int gic_present;
extern unsigned long _gic_base;
extern unsigned int gic_irq_base;
extern unsigned int gic_irq_flags[];
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index 5437c84664bf..0878701712f8 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -6,15 +6,15 @@ struct gio_device_id {
};
struct gio_device {
- struct device dev;
+ struct device dev;
struct resource resource;
- unsigned int irq;
- unsigned int slotno;
+ unsigned int irq;
+ unsigned int slotno;
- const char *name;
+ const char *name;
struct gio_device_id id;
- unsigned id32:1;
- unsigned gio64:1;
+ unsigned id32:1;
+ unsigned gio64:1;
};
#define to_gio_device(d) container_of(d, struct gio_device, dev)
@@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev)
extern int gio_register_driver(struct gio_driver *);
extern void gio_unregister_driver(struct gio_driver *);
-#define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev)
+#define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev)
#define gio_set_drvdata(_dev, data) drv_set_drvdata(&(_dev)->dev, (data))
extern void gio_set_master(struct gio_device *);
diff --git a/arch/mips/include/asm/gt64120.h b/arch/mips/include/asm/gt64120.h
index 0aa44abc77fe..2e72abb9440e 100644
--- a/arch/mips/include/asm/gt64120.h
+++ b/arch/mips/include/asm/gt64120.h
@@ -34,7 +34,7 @@
#define GT_MULTI_OFS 0x120
-/* CPU Address Decode. */
+/* CPU Address Decode. */
#define GT_SCS10LD_OFS 0x008
#define GT_SCS10HD_OFS 0x010
#define GT_SCS32LD_OFS 0x018
@@ -106,12 +106,12 @@
#define GT_ADERR_OFS 0x470
-/* SDRAM Configuration. */
+/* SDRAM Configuration. */
#define GT_SDRAM_CFG_OFS 0x448
#define GT_SDRAM_OPMODE_OFS 0x474
#define GT_SDRAM_BM_OFS 0x478
-#define GT_SDRAM_ADDRDECODE_OFS 0x47c
+#define GT_SDRAM_ADDRDECODE_OFS 0x47c
/* SDRAM Parameters. */
#define GT_SDRAM_B0_OFS 0x44c
@@ -126,14 +126,14 @@
#define GT_DEV_B3_OFS 0x468
#define GT_DEV_BOOT_OFS 0x46c
-/* ECC. */
+/* ECC. */
#define GT_ECC_ERRDATALO 0x480 /* GT-64120A only */
#define GT_ECC_ERRDATAHI 0x484 /* GT-64120A only */
#define GT_ECC_MEM 0x488 /* GT-64120A only */
#define GT_ECC_CALC 0x48c /* GT-64120A only */
#define GT_ECC_ERRADDR 0x490 /* GT-64120A only */
-/* DMA Record. */
+/* DMA Record. */
#define GT_DMA0_CNT_OFS 0x800
#define GT_DMA1_CNT_OFS 0x804
#define GT_DMA2_CNT_OFS 0x808
@@ -156,13 +156,13 @@
#define GT_DMA2_CUR_OFS 0x878
#define GT_DMA3_CUR_OFS 0x87c
-/* DMA Channel Control. */
+/* DMA Channel Control. */
#define GT_DMA0_CTRL_OFS 0x840
#define GT_DMA1_CTRL_OFS 0x844
#define GT_DMA2_CTRL_OFS 0x848
#define GT_DMA3_CTRL_OFS 0x84c
-/* DMA Arbiter. */
+/* DMA Arbiter. */
#define GT_DMA_ARB_OFS 0x860
/* Timer/Counter. */
@@ -220,7 +220,7 @@
#define GT_PCI0_CFGADDR_OFS 0xcf8
#define GT_PCI0_CFGDATA_OFS 0xcfc
-/* Interrupts. */
+/* Interrupts. */
#define GT_INTRCAUSE_OFS 0xc18
#define GT_INTRMASK_OFS 0xc1c
@@ -547,15 +547,15 @@
#define GT_DEF_BASE 0x14000000UL
#define GT_MAX_BANKSIZE (256 * 1024 * 1024) /* Max 256MB bank */
-#define GT_LATTIM_MIN 6 /* Minimum lat */
+#define GT_LATTIM_MIN 6 /* Minimum lat */
/*
* The gt64120_dep.h file must define the following macros
*
* GT_READ(ofs, data_pointer)
- * GT_WRITE(ofs, data) - read/write GT64120 registers in 32bit
+ * GT_WRITE(ofs, data) - read/write GT64120 registers in 32bit
*
- * TIMER - gt64120 timer irq, temporary solution until
+ * TIMER - gt64120 timer irq, temporary solution until
* full gt64120 cascade interrupt support is in place
*/
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index f0324e92d089..44d6a5bde4a1 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -25,7 +25,7 @@ static inline void name(void) \
}
/*
- * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
+ * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
*/
extern void mips_ihb(void);
@@ -68,7 +68,7 @@ ASMMACRO(back_to_back_c0_hazard,
)
/*
* gcc has a tradition of misscompiling the previous construct using the
- * address of a label as argument to inline assembler. Gas otoh has the
+ * address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
* The alterantive is switching the assembler to 64-bit code which happens
@@ -114,7 +114,7 @@ ASMMACRO(back_to_back_c0_hazard,
)
/*
* gcc has a tradition of misscompiling the previous construct using the
- * address of a label as argument to inline assembler. Gas otoh has the
+ * address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
* The alterantive is switching the assembler to 64-bit code which happens
@@ -141,7 +141,7 @@ do { \
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
- defined(CONFIG_CPU_R5500)
+ defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 2d91888c9b74..b0dd0c84df70 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -39,8 +39,8 @@ extern pte_t *pkmap_page_table;
*/
#define LAST_PKMAP 1024
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index 33c34adbecfa..f1eadf764071 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -11,353 +11,7 @@
#ifndef _ASM_INST_H
#define _ASM_INST_H
-/*
- * Major opcodes; before MIPS IV cop1x was called cop3.
- */
-enum major_op {
- spec_op, bcond_op, j_op, jal_op,
- beq_op, bne_op, blez_op, bgtz_op,
- addi_op, addiu_op, slti_op, sltiu_op,
- andi_op, ori_op, xori_op, lui_op,
- cop0_op, cop1_op, cop2_op, cop1x_op,
- beql_op, bnel_op, blezl_op, bgtzl_op,
- daddi_op, daddiu_op, ldl_op, ldr_op,
- spec2_op, jalx_op, mdmx_op, spec3_op,
- lb_op, lh_op, lwl_op, lw_op,
- lbu_op, lhu_op, lwr_op, lwu_op,
- sb_op, sh_op, swl_op, sw_op,
- sdl_op, sdr_op, swr_op, cache_op,
- ll_op, lwc1_op, lwc2_op, pref_op,
- lld_op, ldc1_op, ldc2_op, ld_op,
- sc_op, swc1_op, swc2_op, major_3b_op,
- scd_op, sdc1_op, sdc2_op, sd_op
-};
-
-/*
- * func field of spec opcode.
- */
-enum spec_op {
- sll_op, movc_op, srl_op, sra_op,
- sllv_op, pmon_op, srlv_op, srav_op,
- jr_op, jalr_op, movz_op, movn_op,
- syscall_op, break_op, spim_op, sync_op,
- mfhi_op, mthi_op, mflo_op, mtlo_op,
- dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op,
- mult_op, multu_op, div_op, divu_op,
- dmult_op, dmultu_op, ddiv_op, ddivu_op,
- add_op, addu_op, sub_op, subu_op,
- and_op, or_op, xor_op, nor_op,
- spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
- dadd_op, daddu_op, dsub_op, dsubu_op,
- tge_op, tgeu_op, tlt_op, tltu_op,
- teq_op, spec5_unused_op, tne_op, spec6_unused_op,
- dsll_op, spec7_unused_op, dsrl_op, dsra_op,
- dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
-};
-
-/*
- * func field of spec2 opcode.
- */
-enum spec2_op {
- madd_op, maddu_op, mul_op, spec2_3_unused_op,
- msub_op, msubu_op, /* more unused ops */
- clz_op = 0x20, clo_op,
- dclz_op = 0x24, dclo_op,
- sdbpp_op = 0x3f
-};
-
-/*
- * func field of spec3 opcode.
- */
-enum spec3_op {
- ext_op, dextm_op, dextu_op, dext_op,
- ins_op, dinsm_op, dinsu_op, dins_op,
- lx_op = 0x0a,
- bshfl_op = 0x20,
- dbshfl_op = 0x24,
- rdhwr_op = 0x3b
-};
-
-/*
- * rt field of bcond opcodes.
- */
-enum rt_op {
- bltz_op, bgez_op, bltzl_op, bgezl_op,
- spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
- tgei_op, tgeiu_op, tlti_op, tltiu_op,
- teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
- bltzal_op, bgezal_op, bltzall_op, bgezall_op,
- rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
- rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
- bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
-};
-
-/*
- * rs field of cop opcodes.
- */
-enum cop_op {
- mfc_op = 0x00, dmfc_op = 0x01,
- cfc_op = 0x02, mtc_op = 0x04,
- dmtc_op = 0x05, ctc_op = 0x06,
- bc_op = 0x08, cop_op = 0x10,
- copm_op = 0x18
-};
-
-/*
- * rt field of cop.bc_op opcodes
- */
-enum bcop_op {
- bcf_op, bct_op, bcfl_op, bctl_op
-};
-
-/*
- * func field of cop0 coi opcodes.
- */
-enum cop0_coi_func {
- tlbr_op = 0x01, tlbwi_op = 0x02,
- tlbwr_op = 0x06, tlbp_op = 0x08,
- rfe_op = 0x10, eret_op = 0x18
-};
-
-/*
- * func field of cop0 com opcodes.
- */
-enum cop0_com_func {
- tlbr1_op = 0x01, tlbw_op = 0x02,
- tlbp1_op = 0x08, dctr_op = 0x09,
- dctw_op = 0x0a
-};
-
-/*
- * fmt field of cop1 opcodes.
- */
-enum cop1_fmt {
- s_fmt, d_fmt, e_fmt, q_fmt,
- w_fmt, l_fmt
-};
-
-/*
- * func field of cop1 instructions using d, s or w format.
- */
-enum cop1_sdw_func {
- fadd_op = 0x00, fsub_op = 0x01,
- fmul_op = 0x02, fdiv_op = 0x03,
- fsqrt_op = 0x04, fabs_op = 0x05,
- fmov_op = 0x06, fneg_op = 0x07,
- froundl_op = 0x08, ftruncl_op = 0x09,
- fceill_op = 0x0a, ffloorl_op = 0x0b,
- fround_op = 0x0c, ftrunc_op = 0x0d,
- fceil_op = 0x0e, ffloor_op = 0x0f,
- fmovc_op = 0x11, fmovz_op = 0x12,
- fmovn_op = 0x13, frecip_op = 0x15,
- frsqrt_op = 0x16, fcvts_op = 0x20,
- fcvtd_op = 0x21, fcvte_op = 0x22,
- fcvtw_op = 0x24, fcvtl_op = 0x25,
- fcmp_op = 0x30
-};
-
-/*
- * func field of cop1x opcodes (MIPS IV).
- */
-enum cop1x_func {
- lwxc1_op = 0x00, ldxc1_op = 0x01,
- pfetch_op = 0x07, swxc1_op = 0x08,
- sdxc1_op = 0x09, madd_s_op = 0x20,
- madd_d_op = 0x21, madd_e_op = 0x22,
- msub_s_op = 0x28, msub_d_op = 0x29,
- msub_e_op = 0x2a, nmadd_s_op = 0x30,
- nmadd_d_op = 0x31, nmadd_e_op = 0x32,
- nmsub_s_op = 0x38, nmsub_d_op = 0x39,
- nmsub_e_op = 0x3a
-};
-
-/*
- * func field for mad opcodes (MIPS IV).
- */
-enum mad_func {
- madd_fp_op = 0x08, msub_fp_op = 0x0a,
- nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e
-};
-
-/*
- * func field for special3 lx opcodes (Cavium Octeon).
- */
-enum lx_func {
- lwx_op = 0x00,
- lhx_op = 0x04,
- lbux_op = 0x06,
- ldx_op = 0x08,
- lwux_op = 0x10,
- lhux_op = 0x14,
- lbx_op = 0x16,
-};
-
-/*
- * Damn ... bitfields depend from byteorder :-(
- */
-#ifdef __MIPSEB__
-struct j_format { /* Jump format */
- unsigned int opcode : 6;
- unsigned int target : 26;
-};
-
-struct i_format { /* Immediate format (addi, lw, ...) */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- signed int simmediate : 16;
-};
-
-struct u_format { /* Unsigned immediate format (ori, xori, ...) */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- unsigned int uimmediate : 16;
-};
-
-struct c_format { /* Cache (>= R6000) format */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int c_op : 3;
- unsigned int cache : 2;
- unsigned int simmediate : 16;
-};
-
-struct r_format { /* Register format */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- unsigned int rd : 5;
- unsigned int re : 5;
- unsigned int func : 6;
-};
-
-struct p_format { /* Performance counter format (R10000) */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- unsigned int rd : 5;
- unsigned int re : 5;
- unsigned int func : 6;
-};
-
-struct f_format { /* FPU register format */
- unsigned int opcode : 6;
- unsigned int : 1;
- unsigned int fmt : 4;
- unsigned int rt : 5;
- unsigned int rd : 5;
- unsigned int re : 5;
- unsigned int func : 6;
-};
-
-struct ma_format { /* FPU multiply and add format (MIPS IV) */
- unsigned int opcode : 6;
- unsigned int fr : 5;
- unsigned int ft : 5;
- unsigned int fs : 5;
- unsigned int fd : 5;
- unsigned int func : 4;
- unsigned int fmt : 2;
-};
-
-struct b_format { /* BREAK and SYSCALL */
- unsigned int opcode:6;
- unsigned int code:20;
- unsigned int func:6;
-};
-
-#elif defined(__MIPSEL__)
-
-struct j_format { /* Jump format */
- unsigned int target : 26;
- unsigned int opcode : 6;
-};
-
-struct i_format { /* Immediate format */
- signed int simmediate : 16;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct u_format { /* Unsigned immediate format */
- unsigned int uimmediate : 16;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct c_format { /* Cache (>= R6000) format */
- unsigned int simmediate : 16;
- unsigned int cache : 2;
- unsigned int c_op : 3;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct r_format { /* Register format */
- unsigned int func : 6;
- unsigned int re : 5;
- unsigned int rd : 5;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct p_format { /* Performance counter format (R10000) */
- unsigned int func : 6;
- unsigned int re : 5;
- unsigned int rd : 5;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct f_format { /* FPU register format */
- unsigned int func : 6;
- unsigned int re : 5;
- unsigned int rd : 5;
- unsigned int rt : 5;
- unsigned int fmt : 4;
- unsigned int : 1;
- unsigned int opcode : 6;
-};
-
-struct ma_format { /* FPU multiply and add format (MIPS IV) */
- unsigned int fmt : 2;
- unsigned int func : 4;
- unsigned int fd : 5;
- unsigned int fs : 5;
- unsigned int ft : 5;
- unsigned int fr : 5;
- unsigned int opcode : 6;
-};
-
-struct b_format { /* BREAK and SYSCALL */
- unsigned int func:6;
- unsigned int code:20;
- unsigned int opcode:6;
-};
-
-#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
-#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
-#endif
-
-union mips_instruction {
- unsigned int word;
- unsigned short halfword[2];
- unsigned char byte[4];
- struct j_format j_format;
- struct i_format i_format;
- struct u_format u_format;
- struct c_format c_format;
- struct r_format r_format;
- struct p_format p_format;
- struct f_format f_format;
- struct ma_format ma_format;
- struct b_format b_format;
-};
+#include <uapi/asm/inst.h>
/* HACHACHAHCAHC ... */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index ff2e0345e013..1be13727323f 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -7,7 +7,7 @@
* Copyright (C) 1994 - 2000, 06 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
+ * Author: Maciej W. Rozycki <macro@mips.com>
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
@@ -253,9 +253,9 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
/*
- * ioremap_cachable - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
+ * ioremap_cachable - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
@@ -264,14 +264,14 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
* address.
*
* This version of ioremap ensures that the memory is marked cachable by
- * the CPU. Also enables full write-combining. Useful for some
+ * the CPU. Also enables full write-combining. Useful for some
* memory-like regions on I/O busses.
*/
#define ioremap_cachable(offset, size) \
__ioremap_mode((offset), (size), _page_cachable_default)
/*
- * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
+ * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
* requests a cachable mapping, ioremap_uncached_accelerated requests a
* mapping using the uncached accelerated mode which isn't supported on
* all processors.
@@ -298,7 +298,7 @@ static inline void iounmap(const volatile void __iomem *addr)
}
#ifdef CONFIG_CPU_CAVIUM_OCTEON
-#define war_octeon_io_reorder_wmb() wmb()
+#define war_octeon_io_reorder_wmb() wmb()
#else
#define war_octeon_io_reorder_wmb() do { } while (0)
#endif
@@ -317,7 +317,7 @@ static inline void pfx##write##bwlq(type val, \
\
__val = pfx##ioswab##bwlq(__mem, val); \
\
- if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
*__mem = __val; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
@@ -327,9 +327,9 @@ static inline void pfx##write##bwlq(type val, \
local_irq_save(__flags); \
__asm__ __volatile__( \
".set mips3" "\t\t# __writeq""\n\t" \
- "dsll32 %L0, %L0, 0" "\n\t" \
- "dsrl32 %L0, %L0, 0" "\n\t" \
- "dsll32 %M0, %M0, 0" "\n\t" \
+ "dsll32 %L0, %L0, 0" "\n\t" \
+ "dsrl32 %L0, %L0, 0" "\n\t" \
+ "dsll32 %M0, %M0, 0" "\n\t" \
"or %L0, %L0, %M0" "\n\t" \
"sd %L0, %2" "\n\t" \
".set mips0" "\n" \
@@ -348,7 +348,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
- if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
__val = *__mem; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
@@ -356,9 +356,9 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set mips3" "\t\t# __readq" "\n\t" \
+ ".set mips3" "\t\t# __readq" "\n\t" \
"ld %L0, %1" "\n\t" \
- "dsra32 %M0, %L0, 0" "\n\t" \
+ "dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
".set mips0" "\n" \
: "=r" (__val) \
@@ -586,7 +586,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#else /* Sane hardware */
-#define dma_cache_wback_inv(start,size) \
+#define dma_cache_wback_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_wback(start,size) \
do { (void) (start); (void) (size); } while (0)
diff --git a/arch/mips/include/asm/ip32/crime.h b/arch/mips/include/asm/ip32/crime.h
index 7c36b0e5b1c6..16c94a27beba 100644
--- a/arch/mips/include/asm/ip32/crime.h
+++ b/arch/mips/include/asm/ip32/crime.h
@@ -74,7 +74,7 @@ struct sgi_crime {
#define CRIME_RE_IDLE_E_INT BIT(24)
#define CRIME_RE_EMPTY_L_INT BIT(25)
#define CRIME_RE_FULL_L_INT BIT(26)
-#define CRIME_RE_IDLE_L_INT BIT(27)
+#define CRIME_RE_IDLE_L_INT BIT(27)
#define CRIME_SOFT0_INT BIT(28)
#define CRIME_SOFT1_INT BIT(29)
#define CRIME_SOFT2_INT BIT(30)
@@ -118,7 +118,7 @@ struct sgi_crime {
#define CRIME_MEM_REF_COUNTER_MASK 0x3ff /* 10bit */
volatile unsigned long mem_error_stat;
-#define CRIME_MEM_ERROR_STAT_MASK 0x0ff7ffff /* 28-bit register */
+#define CRIME_MEM_ERROR_STAT_MASK 0x0ff7ffff /* 28-bit register */
#define CRIME_MEM_ERROR_MACE_ID 0x0000007f
#define CRIME_MEM_ERROR_MACE_ACCESS 0x00000080
#define CRIME_MEM_ERROR_RE_ID 0x00007f00
@@ -134,8 +134,8 @@ struct sgi_crime {
#define CRIME_MEM_ERROR_MEM_ECC_RD 0x00800000
#define CRIME_MEM_ERROR_MEM_ECC_RMW 0x01000000
#define CRIME_MEM_ERROR_INV 0x0e000000
-#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD 0x02000000
-#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR 0x04000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD 0x02000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR 0x04000000
#define CRIME_MEM_ERROR_INV_MEM_ADDR_RMW 0x08000000
volatile unsigned long mem_error_addr;
diff --git a/arch/mips/include/asm/ip32/ip32_ints.h b/arch/mips/include/asm/ip32/ip32_ints.h
index 85bc5302bce0..72e3368de111 100644
--- a/arch/mips/include/asm/ip32/ip32_ints.h
+++ b/arch/mips/include/asm/ip32/ip32_ints.h
@@ -13,7 +13,7 @@
/*
* This list reflects the assignment of interrupt numbers to
- * interrupting events. Order is fairly irrelevant to handling
+ * interrupting events. Order is fairly irrelevant to handling
* priority. This differs from irix.
*/
diff --git a/arch/mips/include/asm/ip32/mace.h b/arch/mips/include/asm/ip32/mace.h
index c523123df380..253ed7ea80be 100644
--- a/arch/mips/include/asm/ip32/mace.h
+++ b/arch/mips/include/asm/ip32/mace.h
@@ -250,12 +250,12 @@ struct mace_ps2 {
* -> drivers/i2c/algos/i2c-algo-sgi.c */
struct mace_i2c {
volatile unsigned long config;
-#define MACEI2C_RESET BIT(0)
-#define MACEI2C_FAST BIT(1)
-#define MACEI2C_DATA_OVERRIDE BIT(2)
-#define MACEI2C_CLOCK_OVERRIDE BIT(3)
-#define MACEI2C_DATA_STATUS BIT(4)
-#define MACEI2C_CLOCK_STATUS BIT(5)
+#define MACEI2C_RESET BIT(0)
+#define MACEI2C_FAST BIT(1)
+#define MACEI2C_DATA_OVERRIDE BIT(2)
+#define MACEI2C_CLOCK_OVERRIDE BIT(3)
+#define MACEI2C_DATA_STATUS BIT(4)
+#define MACEI2C_CLOCK_STATUS BIT(5)
volatile unsigned long control;
volatile unsigned long data;
};
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 78dbb8a86da2..7bc2cdb35057 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -32,7 +32,7 @@ struct irqaction;
extern unsigned long irq_hwmask[];
extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
- unsigned long hwmask);
+ unsigned long hwmask);
static inline void smtc_im_ack_irq(unsigned int irq)
{
@@ -60,7 +60,7 @@ extern void smtc_forward_irq(struct irq_data *d);
* if option is enabled.
*
* Up through Linux 2.6.22 (at least) cpumask operations are very
- * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
+ * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
* used a "fast path" per-IRQ-descriptor cache of affinity information
* to reduce latency. As there is a project afoot to optimize the
* cpumask implementations, this version is optimistically assuming
@@ -133,7 +133,7 @@ extern void free_irqno(unsigned int irq);
/*
* Before R2 the timer and performance counter interrupts were both fixed to
- * IE7. Since R2 their number has to be read from the c0_intctl register.
+ * IE7. Since R2 their number has to be read from the c0_intctl register.
*/
#define CP0_LEGACY_COMPARE_IRQ 7
#define CP0_LEGACY_PERFCNT_IRQ 7
diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h
index ef6a07cddb23..3f11fdb3ed8c 100644
--- a/arch/mips/include/asm/irq_cpu.h
+++ b/arch/mips/include/asm/irq_cpu.h
@@ -17,4 +17,10 @@ extern void mips_cpu_irq_init(void);
extern void rm7k_cpu_irq_init(void);
extern void rm9k_cpu_irq_init(void);
+#ifdef CONFIG_IRQ_DOMAIN
+struct device_node;
+extern int mips_cpu_intc_init(struct device_node *of_node,
+ struct device_node *parent);
+#endif
+
#endif /* _ASM_IRQ_CPU_H */
diff --git a/arch/mips/include/asm/isadep.h b/arch/mips/include/asm/isadep.h
index 24c6cda79377..b4af6eb24ab9 100644
--- a/arch/mips/include/asm/isadep.h
+++ b/arch/mips/include/asm/isadep.h
@@ -18,7 +18,7 @@
* kernel or user mode? (CP0_STATUS)
*/
#define KU_MASK 0x08
-#define KU_USER 0x08
+#define KU_USER 0x08
#define KU_KERN 0x00
#else
@@ -26,7 +26,7 @@
* kernel or user mode?
*/
#define KU_MASK 0x18
-#define KU_USER 0x10
+#define KU_USER 0x10
#define KU_KERN 0x00
#endif
diff --git a/arch/mips/include/asm/jazz.h b/arch/mips/include/asm/jazz.h
index 83f449dec95e..a61970d01a81 100644
--- a/arch/mips/include/asm/jazz.h
+++ b/arch/mips/include/asm/jazz.h
@@ -16,7 +16,7 @@
* instead of 0xe0000000.
*/
-#define JAZZ_LOCAL_IO_SPACE 0xe0000000
+#define JAZZ_LOCAL_IO_SPACE 0xe0000000
/*
* Revision numbers in PICA_ASIC_REVISION
@@ -25,24 +25,24 @@
* 0xf0000001 - Rev2
* 0xf0000002 - Rev3
*/
-#define PICA_ASIC_REVISION 0xe0000008
+#define PICA_ASIC_REVISION 0xe0000008
/*
* The segments of the seven segment LED are mapped
* to the control bits as follows:
*
- * (7)
- * ---------
- * | |
- * (2) | | (6)
- * | (1) |
- * ---------
- * | |
- * (3) | | (5)
- * | (4) |
- * --------- . (0)
+ * (7)
+ * ---------
+ * | |
+ * (2) | | (6)
+ * | (1) |
+ * ---------
+ * | |
+ * (3) | | (5)
+ * | (4) |
+ * --------- . (0)
*/
-#define PICA_LED 0xe000f000
+#define PICA_LED 0xe000f000
/*
* Some characters for the LED control registers
@@ -51,24 +51,24 @@
* control each of the seven segments and the dot independently.
* It's only a toy, anyway...
*/
-#define LED_DOT 0x01
-#define LED_SPACE 0x00
-#define LED_0 0xfc
-#define LED_1 0x60
-#define LED_2 0xda
-#define LED_3 0xf2
-#define LED_4 0x66
-#define LED_5 0xb6
-#define LED_6 0xbe
-#define LED_7 0xe0
-#define LED_8 0xfe
-#define LED_9 0xf6
-#define LED_A 0xee
-#define LED_b 0x3e
-#define LED_C 0x9c
-#define LED_d 0x7a
-#define LED_E 0x9e
-#define LED_F 0x8e
+#define LED_DOT 0x01
+#define LED_SPACE 0x00
+#define LED_0 0xfc
+#define LED_1 0x60
+#define LED_2 0xda
+#define LED_3 0xf2
+#define LED_4 0x66
+#define LED_5 0xb6
+#define LED_6 0xbe
+#define LED_7 0xe0
+#define LED_8 0xfe
+#define LED_9 0xf6
+#define LED_A 0xee
+#define LED_b 0x3e
+#define LED_C 0x9c
+#define LED_d 0x7a
+#define LED_E 0x9e
+#define LED_F 0x8e
#ifndef __ASSEMBLY__
@@ -96,9 +96,9 @@ static __inline__ void pica_set_led(unsigned int bits)
* This address is just a guess and seems to differ from
* other mips machines such as RC3xxx...
*/
-#define JAZZ_KEYBOARD_ADDRESS 0xe0005000
-#define JAZZ_KEYBOARD_DATA 0xe0005000
-#define JAZZ_KEYBOARD_COMMAND 0xe0005001
+#define JAZZ_KEYBOARD_ADDRESS 0xe0005000
+#define JAZZ_KEYBOARD_DATA 0xe0005000
+#define JAZZ_KEYBOARD_COMMAND 0xe0005001
#ifndef __ASSEMBLY__
@@ -119,28 +119,28 @@ typedef struct {
/*
* For now. Needs to be changed for RC3xxx support. See below.
*/
-#define keyboard_hardware jazz_keyboard_hardware
+#define keyboard_hardware jazz_keyboard_hardware
#endif /* !__ASSEMBLY__ */
/*
* i8042 keyboard controller for most other Mips machines.
*/
-#define MIPS_KEYBOARD_ADDRESS 0xb9005000
-#define MIPS_KEYBOARD_DATA 0xb9005003
-#define MIPS_KEYBOARD_COMMAND 0xb9005007
+#define MIPS_KEYBOARD_ADDRESS 0xb9005000
+#define MIPS_KEYBOARD_DATA 0xb9005003
+#define MIPS_KEYBOARD_COMMAND 0xb9005007
/*
* Serial and parallel ports (WD 16C552) on the Mips JAZZ
*/
-#define JAZZ_SERIAL1_BASE (unsigned int)0xe0006000
-#define JAZZ_SERIAL2_BASE (unsigned int)0xe0007000
-#define JAZZ_PARALLEL_BASE (unsigned int)0xe0008000
+#define JAZZ_SERIAL1_BASE (unsigned int)0xe0006000
+#define JAZZ_SERIAL2_BASE (unsigned int)0xe0007000
+#define JAZZ_PARALLEL_BASE (unsigned int)0xe0008000
/*
* Dummy Device Address. Used in jazzdma.c
*/
-#define JAZZ_DUMMY_DEVICE 0xe000d000
+#define JAZZ_DUMMY_DEVICE 0xe000d000
/*
* JAZZ timer registers and interrupt no.
@@ -148,8 +148,8 @@ typedef struct {
* cpu level 6, but to keep compatibility with PC stuff
* it is remapped to vector 0. See arch/mips/kernel/entry.S.
*/
-#define JAZZ_TIMER_INTERVAL 0xe0000228
-#define JAZZ_TIMER_REGISTER 0xe0000230
+#define JAZZ_TIMER_INTERVAL 0xe0000228
+#define JAZZ_TIMER_REGISTER 0xe0000230
/*
* DRAM configuration register
@@ -176,13 +176,13 @@ typedef struct {
#endif
#endif /* !__ASSEMBLY__ */
-#define PICA_DRAM_CONFIG 0xe00fffe0
+#define PICA_DRAM_CONFIG 0xe00fffe0
/*
* JAZZ interrupt control registers
*/
-#define JAZZ_IO_IRQ_SOURCE 0xe0010000
-#define JAZZ_IO_IRQ_ENABLE 0xe0010002
+#define JAZZ_IO_IRQ_SOURCE 0xe0010000
+#define JAZZ_IO_IRQ_ENABLE 0xe0010002
/*
* JAZZ Interrupt Level definitions
@@ -190,20 +190,20 @@ typedef struct {
* This is somewhat broken. For reasons which nobody can remember anymore
* we remap the Jazz interrupts to the usual ISA style interrupt numbers.
*/
-#define JAZZ_IRQ_START 24
-#define JAZZ_IRQ_END (24 + 9)
-#define JAZZ_PARALLEL_IRQ (JAZZ_IRQ_START + 0)
-#define JAZZ_FLOPPY_IRQ (JAZZ_IRQ_START + 1)
-#define JAZZ_SOUND_IRQ (JAZZ_IRQ_START + 2)
-#define JAZZ_VIDEO_IRQ (JAZZ_IRQ_START + 3)
-#define JAZZ_ETHERNET_IRQ (JAZZ_IRQ_START + 4)
-#define JAZZ_SCSI_IRQ (JAZZ_IRQ_START + 5)
-#define JAZZ_KEYBOARD_IRQ (JAZZ_IRQ_START + 6)
-#define JAZZ_MOUSE_IRQ (JAZZ_IRQ_START + 7)
-#define JAZZ_SERIAL1_IRQ (JAZZ_IRQ_START + 8)
-#define JAZZ_SERIAL2_IRQ (JAZZ_IRQ_START + 9)
-
-#define JAZZ_TIMER_IRQ (MIPS_CPU_IRQ_BASE+6)
+#define JAZZ_IRQ_START 24
+#define JAZZ_IRQ_END (24 + 9)
+#define JAZZ_PARALLEL_IRQ (JAZZ_IRQ_START + 0)
+#define JAZZ_FLOPPY_IRQ (JAZZ_IRQ_START + 1)
+#define JAZZ_SOUND_IRQ (JAZZ_IRQ_START + 2)
+#define JAZZ_VIDEO_IRQ (JAZZ_IRQ_START + 3)
+#define JAZZ_ETHERNET_IRQ (JAZZ_IRQ_START + 4)
+#define JAZZ_SCSI_IRQ (JAZZ_IRQ_START + 5)
+#define JAZZ_KEYBOARD_IRQ (JAZZ_IRQ_START + 6)
+#define JAZZ_MOUSE_IRQ (JAZZ_IRQ_START + 7)
+#define JAZZ_SERIAL1_IRQ (JAZZ_IRQ_START + 8)
+#define JAZZ_SERIAL2_IRQ (JAZZ_IRQ_START + 9)
+
+#define JAZZ_TIMER_IRQ (MIPS_CPU_IRQ_BASE+6)
/*
@@ -211,46 +211,46 @@ typedef struct {
* Note: Channels 4...7 are not used with respect to the Acer PICA-61
* chipset which does not provide these DMA channels.
*/
-#define JAZZ_SCSI_DMA 0 /* SCSI */
-#define JAZZ_FLOPPY_DMA 1 /* FLOPPY */
-#define JAZZ_AUDIOL_DMA 2 /* AUDIO L */
-#define JAZZ_AUDIOR_DMA 3 /* AUDIO R */
+#define JAZZ_SCSI_DMA 0 /* SCSI */
+#define JAZZ_FLOPPY_DMA 1 /* FLOPPY */
+#define JAZZ_AUDIOL_DMA 2 /* AUDIO L */
+#define JAZZ_AUDIOR_DMA 3 /* AUDIO R */
/*
* JAZZ R4030 MCT_ADR chip (DMA controller)
* Note: Virtual Addresses !
*/
#define JAZZ_R4030_CONFIG 0xE0000000 /* R4030 config register */
-#define JAZZ_R4030_REVISION 0xE0000008 /* same as PICA_ASIC_REVISION */
+#define JAZZ_R4030_REVISION 0xE0000008 /* same as PICA_ASIC_REVISION */
#define JAZZ_R4030_INV_ADDR 0xE0000010 /* Invalid Address register */
-#define JAZZ_R4030_TRSTBL_BASE 0xE0000018 /* Translation Table Base */
-#define JAZZ_R4030_TRSTBL_LIM 0xE0000020 /* Translation Table Limit */
-#define JAZZ_R4030_TRSTBL_INV 0xE0000028 /* Translation Table Invalidate */
+#define JAZZ_R4030_TRSTBL_BASE 0xE0000018 /* Translation Table Base */
+#define JAZZ_R4030_TRSTBL_LIM 0xE0000020 /* Translation Table Limit */
+#define JAZZ_R4030_TRSTBL_INV 0xE0000028 /* Translation Table Invalidate */
-#define JAZZ_R4030_CACHE_MTNC 0xE0000030 /* Cache Maintenance */
-#define JAZZ_R4030_R_FAIL_ADDR 0xE0000038 /* Remote Failed Address */
-#define JAZZ_R4030_M_FAIL_ADDR 0xE0000040 /* Memory Failed Address */
+#define JAZZ_R4030_CACHE_MTNC 0xE0000030 /* Cache Maintenance */
+#define JAZZ_R4030_R_FAIL_ADDR 0xE0000038 /* Remote Failed Address */
+#define JAZZ_R4030_M_FAIL_ADDR 0xE0000040 /* Memory Failed Address */
-#define JAZZ_R4030_CACHE_PTAG 0xE0000048 /* I/O Cache Physical Tag */
-#define JAZZ_R4030_CACHE_LTAG 0xE0000050 /* I/O Cache Logical Tag */
-#define JAZZ_R4030_CACHE_BMASK 0xE0000058 /* I/O Cache Byte Mask */
-#define JAZZ_R4030_CACHE_BWIN 0xE0000060 /* I/O Cache Buffer Window */
+#define JAZZ_R4030_CACHE_PTAG 0xE0000048 /* I/O Cache Physical Tag */
+#define JAZZ_R4030_CACHE_LTAG 0xE0000050 /* I/O Cache Logical Tag */
+#define JAZZ_R4030_CACHE_BMASK 0xE0000058 /* I/O Cache Byte Mask */
+#define JAZZ_R4030_CACHE_BWIN 0xE0000060 /* I/O Cache Buffer Window */
/*
* Remote Speed Registers.
*
- * 0: free, 1: Ethernet, 2: SCSI, 3: Floppy,
- * 4: RTC, 5: Kb./Mouse 6: serial 1, 7: serial 2,
- * 8: parallel, 9: NVRAM, 10: CPU, 11: PROM,
+ * 0: free, 1: Ethernet, 2: SCSI, 3: Floppy,
+ * 4: RTC, 5: Kb./Mouse 6: serial 1, 7: serial 2,
+ * 8: parallel, 9: NVRAM, 10: CPU, 11: PROM,
* 12: reserved, 13: free, 14: 7seg LED, 15: ???
*/
#define JAZZ_R4030_REM_SPEED 0xE0000070 /* 16 Remote Speed Registers */
/* 0xE0000070,78,80... 0xE00000E8 */
-#define JAZZ_R4030_IRQ_ENABLE 0xE00000E8 /* Internal Interrupt Enable */
-#define JAZZ_R4030_INVAL_ADDR 0xE0000010 /* Invalid address Register */
-#define JAZZ_R4030_IRQ_SOURCE 0xE0000200 /* Interrupt Source Register */
-#define JAZZ_R4030_I386_ERROR 0xE0000208 /* i386/EISA Bus Error */
+#define JAZZ_R4030_IRQ_ENABLE 0xE00000E8 /* Internal Interrupt Enable */
+#define JAZZ_R4030_INVAL_ADDR 0xE0000010 /* Invalid address Register */
+#define JAZZ_R4030_IRQ_SOURCE 0xE0000200 /* Interrupt Source Register */
+#define JAZZ_R4030_I386_ERROR 0xE0000208 /* i386/EISA Bus Error */
/*
* Virtual (E)ISA controller address
diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h
index 8bb37bba68f0..2cefc3c47241 100644
--- a/arch/mips/include/asm/jazzdma.h
+++ b/arch/mips/include/asm/jazzdma.h
@@ -10,7 +10,7 @@
extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
extern int vdma_free(unsigned long laddr);
extern int vdma_remap(unsigned long laddr, unsigned long paddr,
- unsigned long size);
+ unsigned long size);
extern unsigned long vdma_phys2log(unsigned long paddr);
extern unsigned long vdma_log2phys(unsigned long laddr);
extern void vdma_stats(void); /* for debugging only */
@@ -35,14 +35,14 @@ extern int vdma_get_enable(int channel);
* Macros to get page no. and offset of a given address
* Note that VDMA_PAGE() works for physical addresses only
*/
-#define VDMA_PAGE(a) ((unsigned int)(a) >> 12)
-#define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1))
+#define VDMA_PAGE(a) ((unsigned int)(a) >> 12)
+#define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1))
/*
* error code returned by vdma_alloc()
* (See also arch/mips/kernel/jazzdma.c)
*/
-#define VDMA_ERROR 0xffffffff
+#define VDMA_ERROR 0xffffffff
/*
* VDMA pagetable entry description
@@ -59,37 +59,37 @@ typedef volatile struct VDMA_PGTBL_ENTRY {
*/
#define JAZZ_R4030_CHNL_MODE 0xE0000100 /* 8 DMA Channel Mode Registers, */
/* 0xE0000100,120,140... */
-#define JAZZ_R4030_CHNL_ENABLE 0xE0000108 /* 8 DMA Channel Enable Regs, */
+#define JAZZ_R4030_CHNL_ENABLE 0xE0000108 /* 8 DMA Channel Enable Regs, */
/* 0xE0000108,128,148... */
-#define JAZZ_R4030_CHNL_COUNT 0xE0000110 /* 8 DMA Channel Byte Cnt Regs, */
+#define JAZZ_R4030_CHNL_COUNT 0xE0000110 /* 8 DMA Channel Byte Cnt Regs, */
/* 0xE0000110,130,150... */
#define JAZZ_R4030_CHNL_ADDR 0xE0000118 /* 8 DMA Channel Address Regs, */
/* 0xE0000118,138,158... */
/* channel enable register bits */
-#define R4030_CHNL_ENABLE (1<<0)
-#define R4030_CHNL_WRITE (1<<1)
-#define R4030_TC_INTR (1<<8)
-#define R4030_MEM_INTR (1<<9)
-#define R4030_ADDR_INTR (1<<10)
+#define R4030_CHNL_ENABLE (1<<0)
+#define R4030_CHNL_WRITE (1<<1)
+#define R4030_TC_INTR (1<<8)
+#define R4030_MEM_INTR (1<<9)
+#define R4030_ADDR_INTR (1<<10)
/*
* Channel mode register bits
*/
-#define R4030_MODE_ATIME_40 (0) /* device access time on remote bus */
-#define R4030_MODE_ATIME_80 (1)
-#define R4030_MODE_ATIME_120 (2)
-#define R4030_MODE_ATIME_160 (3)
-#define R4030_MODE_ATIME_200 (4)
-#define R4030_MODE_ATIME_240 (5)
-#define R4030_MODE_ATIME_280 (6)
-#define R4030_MODE_ATIME_320 (7)
-#define R4030_MODE_WIDTH_8 (1<<3) /* device data bus width */
-#define R4030_MODE_WIDTH_16 (2<<3)
-#define R4030_MODE_WIDTH_32 (3<<3)
-#define R4030_MODE_INTR_EN (1<<5)
-#define R4030_MODE_BURST (1<<6) /* Rev. 2 only */
-#define R4030_MODE_FAST_ACK (1<<7) /* Rev. 2 only */
+#define R4030_MODE_ATIME_40 (0) /* device access time on remote bus */
+#define R4030_MODE_ATIME_80 (1)
+#define R4030_MODE_ATIME_120 (2)
+#define R4030_MODE_ATIME_160 (3)
+#define R4030_MODE_ATIME_200 (4)
+#define R4030_MODE_ATIME_240 (5)
+#define R4030_MODE_ATIME_280 (6)
+#define R4030_MODE_ATIME_320 (7)
+#define R4030_MODE_WIDTH_8 (1<<3) /* device data bus width */
+#define R4030_MODE_WIDTH_16 (2<<3)
+#define R4030_MODE_WIDTH_32 (3<<3)
+#define R4030_MODE_INTR_EN (1<<5)
+#define R4030_MODE_BURST (1<<6) /* Rev. 2 only */
+#define R4030_MODE_FAST_ACK (1<<7) /* Rev. 2 only */
#endif /* _ASM_JAZZDMA_H */
diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h
index 58e91ed0388f..c1909dcada39 100644
--- a/arch/mips/include/asm/kmap_types.h
+++ b/arch/mips/include/asm/kmap_types.h
@@ -2,7 +2,7 @@
#define _ASM_KMAP_TYPES_H
#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
+#define __WITH_KM_FENCE
#endif
#include <asm-generic/kmap_types.h>
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 1fbbca01e681..daba1f9a4f79 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -29,7 +29,7 @@
#include <asm/kdebug.h>
#include <asm/inst.h>
-#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define __ARCH_WANT_KPROBES_INSN_SLOT
struct kprobe;
struct pt_regs;
diff --git a/arch/mips/include/asm/lasat/eeprom.h b/arch/mips/include/asm/lasat/eeprom.h
index 3dac203697fa..d918b822e376 100644
--- a/arch/mips/include/asm/lasat/eeprom.h
+++ b/arch/mips/include/asm/lasat/eeprom.h
@@ -1,12 +1,12 @@
#include <asm/addrspace.h>
/* lasat 100 */
-#define AT93C_REG_100 KSEG1ADDR(0x1c810000)
-#define AT93C_RDATA_REG_100 AT93C_REG_100
-#define AT93C_RDATA_SHIFT_100 4
-#define AT93C_WDATA_SHIFT_100 4
-#define AT93C_CS_M_100 (1 << 5)
-#define AT93C_CLK_M_100 (1 << 3)
+#define AT93C_REG_100 KSEG1ADDR(0x1c810000)
+#define AT93C_RDATA_REG_100 AT93C_REG_100
+#define AT93C_RDATA_SHIFT_100 4
+#define AT93C_WDATA_SHIFT_100 4
+#define AT93C_CS_M_100 (1 << 5)
+#define AT93C_CLK_M_100 (1 << 3)
/* lasat 200 */
#define AT93C_REG_200 KSEG1ADDR(0x11000000)
diff --git a/arch/mips/include/asm/lasat/lasat.h b/arch/mips/include/asm/lasat/lasat.h
index e8ff70f80e13..9e32b4da99e2 100644
--- a/arch/mips/include/asm/lasat/lasat.h
+++ b/arch/mips/include/asm/lasat/lasat.h
@@ -100,7 +100,7 @@ struct lasat_eeprom_struct_pre7 {
/* Configuration descriptor encoding - see the doc for details */
-#define LASAT_W0_DSCTYPE(v) (((v)) & 0xf)
+#define LASAT_W0_DSCTYPE(v) (((v)) & 0xf)
#define LASAT_W0_BMID(v) (((v) >> 0x04) & 0xf)
#define LASAT_W0_CPUTYPE(v) (((v) >> 0x08) & 0xf)
#define LASAT_W0_BUSSPEED(v) (((v) >> 0x0c) & 0xf)
@@ -109,7 +109,7 @@ struct lasat_eeprom_struct_pre7 {
#define LASAT_W0_SDRAMBANKS(v) (((v) >> 0x18) & 0xf)
#define LASAT_W0_L2CACHE(v) (((v) >> 0x1c) & 0xf)
-#define LASAT_W1_EDHAC(v) (((v)) & 0xf)
+#define LASAT_W1_EDHAC(v) (((v)) & 0xf)
#define LASAT_W1_HIFN(v) (((v) >> 0x04) & 0x1)
#define LASAT_W1_ISDN(v) (((v) >> 0x05) & 0x1)
#define LASAT_W1_IDE(v) (((v) >> 0x06) & 0x1)
@@ -239,7 +239,7 @@ static inline void lasat_ndelay(unsigned int ns)
__delay(ns / lasat_ndelay_divider);
}
-#define IS_LASAT_200() (current_cpu_data.cputype == CPU_R5000)
+#define IS_LASAT_200() (current_cpu_data.cputype == CPU_R5000)
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
@@ -247,11 +247,11 @@ static inline void lasat_ndelay(unsigned int ns)
#define LASAT_SERVICEMODE_MAGIC_2 0xfedeabba
/* Lasat 100 boards */
-#define LASAT_GT_BASE (KSEG1ADDR(0x14000000))
+#define LASAT_GT_BASE (KSEG1ADDR(0x14000000))
/* Lasat 200 boards */
-#define Vrc5074_PHYS_BASE 0x1fa00000
-#define Vrc5074_BASE (KSEG1ADDR(Vrc5074_PHYS_BASE))
-#define PCI_WINDOW1 0x1a000000
+#define Vrc5074_PHYS_BASE 0x1fa00000
+#define Vrc5074_BASE (KSEG1ADDR(Vrc5074_PHYS_BASE))
+#define PCI_WINDOW1 0x1a000000
#endif /* _LASAT_H */
diff --git a/arch/mips/include/asm/lasat/serial.h b/arch/mips/include/asm/lasat/serial.h
index 1c37d70579b8..a2f6c7a9cfe8 100644
--- a/arch/mips/include/asm/lasat/serial.h
+++ b/arch/mips/include/asm/lasat/serial.h
@@ -1,7 +1,7 @@
#include <asm/lasat/lasat.h>
/* Lasat 100 boards serial configuration */
-#define LASAT_BASE_BAUD_100 (7372800 / 16)
+#define LASAT_BASE_BAUD_100 (7372800 / 16)
#define LASAT_UART_REGS_BASE_100 0x1c8b0000
#define LASAT_UART_REGS_SHIFT_100 2
#define LASATINT_UART_100 16
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 94fde8d0fac1..d44622cd74be 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -15,10 +15,10 @@ typedef struct
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local_read(l) atomic_long_read(&(l)->a)
-#define local_set(l, i) atomic_long_set(&(l)->a, (i))
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
-#define local_add(i, l) atomic_long_add((i), (&(l)->a))
-#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
#define local_inc(l) atomic_long_inc(&(l)->a)
#define local_dec(l) atomic_long_dec(&(l)->a)
diff --git a/arch/mips/include/asm/m48t37.h b/arch/mips/include/asm/m48t37.h
index cabf86264f36..e6eaf5339e4e 100644
--- a/arch/mips/include/asm/m48t37.h
+++ b/arch/mips/include/asm/m48t37.h
@@ -9,7 +9,7 @@
extern spinlock_t rtc_lock;
struct m48t37_rtc {
- volatile u8 pad[0x7ff0]; /* NVRAM */
+ volatile u8 pad[0x7ff0]; /* NVRAM */
volatile u8 flags;
volatile u8 century;
volatile u8 alarm_sec;
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index 07d3fadb2443..a47ea0c85248 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -40,9 +40,9 @@
#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
-#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
+#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
-#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
+#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
#define AR7_REGS_MDIO (AR7_REGS_BASE + 0x1e00)
#define AR7_REGS_IRQ (AR7_REGS_BASE + 0x2400)
#define AR7_REGS_MAC1 (AR7_REGS_BASE + 0x2800)
@@ -52,7 +52,7 @@
#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
/* Titan registers */
-#define TITAN_REGS_ESWITCH_BASE (0x08640000)
+#define TITAN_REGS_ESWITCH_BASE (0x08640000)
#define TITAN_REGS_MAC0 (TITAN_REGS_ESWITCH_BASE)
#define TITAN_REGS_MAC1 (TITAN_REGS_ESWITCH_BASE + 0x0800)
#define TITAN_REGS_MDIO (TITAN_REGS_ESWITCH_BASE + 0x02000)
@@ -72,9 +72,9 @@
/* GPIO control registers */
#define AR7_GPIO_INPUT 0x0
-#define AR7_GPIO_OUTPUT 0x4
+#define AR7_GPIO_OUTPUT 0x4
#define AR7_GPIO_DIR 0x8
-#define AR7_GPIO_ENABLE 0xc
+#define AR7_GPIO_ENABLE 0xc
#define TITAN_GPIO_INPUT_0 0x0
#define TITAN_GPIO_INPUT_1 0x4
#define TITAN_GPIO_OUTPUT_0 0x8
@@ -88,10 +88,10 @@
#define AR7_CHIP_7200 0x2b
#define AR7_CHIP_7300 0x05
#define AR7_CHIP_TITAN 0x07
-#define TITAN_CHIP_1050 0x0f
-#define TITAN_CHIP_1055 0x0e
-#define TITAN_CHIP_1056 0x0d
-#define TITAN_CHIP_1060 0x07
+#define TITAN_CHIP_1050 0x0f
+#define TITAN_CHIP_1055 0x0e
+#define TITAN_CHIP_1056 0x0d
+#define TITAN_CHIP_1060 0x07
/* Interrupts */
#define AR7_IRQ_UART0 15
diff --git a/arch/mips/include/asm/mach-ar7/irq.h b/arch/mips/include/asm/mach-ar7/irq.h
index 39e9757e3d93..7ad10e379e2b 100644
--- a/arch/mips/include/asm/mach-ar7/irq.h
+++ b/arch/mips/include/asm/mach-ar7/irq.h
@@ -9,7 +9,7 @@
#ifndef __ASM_AR7_IRQ_H
#define __ASM_AR7_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index a5e0f17ea77c..b86a1253a5bf 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -34,18 +34,44 @@
#define AR71XX_UART_SIZE 0x100
#define AR71XX_USB_CTRL_BASE (AR71XX_APB_BASE + 0x00030000)
#define AR71XX_USB_CTRL_SIZE 0x100
-#define AR71XX_GPIO_BASE (AR71XX_APB_BASE + 0x00040000)
-#define AR71XX_GPIO_SIZE 0x100
+#define AR71XX_GPIO_BASE (AR71XX_APB_BASE + 0x00040000)
+#define AR71XX_GPIO_SIZE 0x100
#define AR71XX_PLL_BASE (AR71XX_APB_BASE + 0x00050000)
#define AR71XX_PLL_SIZE 0x100
#define AR71XX_RESET_BASE (AR71XX_APB_BASE + 0x00060000)
#define AR71XX_RESET_SIZE 0x100
+#define AR71XX_PCI_MEM_BASE 0x10000000
+#define AR71XX_PCI_MEM_SIZE 0x07000000
+
+#define AR71XX_PCI_WIN0_OFFS 0x10000000
+#define AR71XX_PCI_WIN1_OFFS 0x11000000
+#define AR71XX_PCI_WIN2_OFFS 0x12000000
+#define AR71XX_PCI_WIN3_OFFS 0x13000000
+#define AR71XX_PCI_WIN4_OFFS 0x14000000
+#define AR71XX_PCI_WIN5_OFFS 0x15000000
+#define AR71XX_PCI_WIN6_OFFS 0x16000000
+#define AR71XX_PCI_WIN7_OFFS 0x07000000
+
+#define AR71XX_PCI_CFG_BASE \
+ (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE 0x100
+
#define AR7240_USB_CTRL_BASE (AR71XX_APB_BASE + 0x00030000)
#define AR7240_USB_CTRL_SIZE 0x100
#define AR7240_OHCI_BASE 0x1b000000
#define AR7240_OHCI_SIZE 0x1000
+#define AR724X_PCI_MEM_BASE 0x10000000
+#define AR724X_PCI_MEM_SIZE 0x04000000
+
+#define AR724X_PCI_CFG_BASE 0x14000000
+#define AR724X_PCI_CFG_SIZE 0x1000
+#define AR724X_PCI_CRP_BASE (AR71XX_APB_BASE + 0x000c0000)
+#define AR724X_PCI_CRP_SIZE 0x1000
+#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE 0x100
+
#define AR724X_EHCI_BASE 0x1b000000
#define AR724X_EHCI_SIZE 0x1000
@@ -68,6 +94,25 @@
#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
#define AR934X_SRIF_SIZE 0x1000
+#define QCA955X_PCI_MEM_BASE0 0x10000000
+#define QCA955X_PCI_MEM_BASE1 0x12000000
+#define QCA955X_PCI_MEM_SIZE 0x02000000
+#define QCA955X_PCI_CFG_BASE0 0x14000000
+#define QCA955X_PCI_CFG_BASE1 0x16000000
+#define QCA955X_PCI_CFG_SIZE 0x1000
+#define QCA955X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000)
+#define QCA955X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000)
+#define QCA955X_PCI_CRP_SIZE 0x1000
+#define QCA955X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA955X_PCI_CTRL_SIZE 0x100
+
+#define QCA955X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA955X_WMAC_SIZE 0x20000
+#define QCA955X_EHCI0_BASE 0x1b000000
+#define QCA955X_EHCI1_BASE 0x1b400000
+#define QCA955X_EHCI_SIZE 0x1000
+
/*
* DDR_CTRL block
*/
@@ -199,6 +244,41 @@
#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define QCA955X_PLL_CPU_CONFIG_REG 0x00
+#define QCA955X_PLL_DDR_CONFIG_REG 0x04
+#define QCA955X_PLL_CLK_CTRL_REG 0x08
+
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define QCA955X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define QCA955X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3
+
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define QCA955X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define QCA955X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
/*
* USB_CONFIG block
*/
@@ -238,6 +318,10 @@
#define AR934X_RESET_REG_BOOTSTRAP 0xb0
#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+#define QCA955X_RESET_REG_RESET_MODULE 0x1c
+#define QCA955X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA955X_RESET_REG_EXT_INT_STATUS 0xac
+
#define MISC_INT_ETHSW BIT(12)
#define MISC_INT_TIMER4 BIT(10)
#define MISC_INT_TIMER3 BIT(9)
@@ -312,9 +396,11 @@
#define AR934X_BOOTSTRAP_EJTAG_MODE BIT(5)
#define AR934X_BOOTSTRAP_REF_CLK_40 BIT(4)
#define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
-#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+#define QCA955X_BOOTSTRAP_REF_CLK_40 BIT(4)
+
#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
@@ -333,6 +419,37 @@
AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
AR934X_PCIE_WMAC_INT_PCIE_RC3)
+#define QCA955X_EXT_INT_WMAC_MISC BIT(0)
+#define QCA955X_EXT_INT_WMAC_TX BIT(1)
+#define QCA955X_EXT_INT_WMAC_RXLP BIT(2)
+#define QCA955X_EXT_INT_WMAC_RXHP BIT(3)
+#define QCA955X_EXT_INT_PCIE_RC1 BIT(4)
+#define QCA955X_EXT_INT_PCIE_RC1_INT0 BIT(5)
+#define QCA955X_EXT_INT_PCIE_RC1_INT1 BIT(6)
+#define QCA955X_EXT_INT_PCIE_RC1_INT2 BIT(7)
+#define QCA955X_EXT_INT_PCIE_RC1_INT3 BIT(8)
+#define QCA955X_EXT_INT_PCIE_RC2 BIT(12)
+#define QCA955X_EXT_INT_PCIE_RC2_INT0 BIT(13)
+#define QCA955X_EXT_INT_PCIE_RC2_INT1 BIT(14)
+#define QCA955X_EXT_INT_PCIE_RC2_INT2 BIT(15)
+#define QCA955X_EXT_INT_PCIE_RC2_INT3 BIT(16)
+#define QCA955X_EXT_INT_USB1 BIT(24)
+#define QCA955X_EXT_INT_USB2 BIT(28)
+
+#define QCA955X_EXT_INT_WMAC_ALL \
+ (QCA955X_EXT_INT_WMAC_MISC | QCA955X_EXT_INT_WMAC_TX | \
+ QCA955X_EXT_INT_WMAC_RXLP | QCA955X_EXT_INT_WMAC_RXHP)
+
+#define QCA955X_EXT_INT_PCIE_RC1_ALL \
+ (QCA955X_EXT_INT_PCIE_RC1 | QCA955X_EXT_INT_PCIE_RC1_INT0 | \
+ QCA955X_EXT_INT_PCIE_RC1_INT1 | QCA955X_EXT_INT_PCIE_RC1_INT2 | \
+ QCA955X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA955X_EXT_INT_PCIE_RC2_ALL \
+ (QCA955X_EXT_INT_PCIE_RC2 | QCA955X_EXT_INT_PCIE_RC2_INT0 | \
+ QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
+ QCA955X_EXT_INT_PCIE_RC2_INT3)
+
#define REV_ID_MAJOR_MASK 0xfff0
#define REV_ID_MAJOR_AR71XX 0x00a0
#define REV_ID_MAJOR_AR913X 0x00b0
@@ -344,6 +461,8 @@
#define REV_ID_MAJOR_AR9341 0x0120
#define REV_ID_MAJOR_AR9342 0x1120
#define REV_ID_MAJOR_AR9344 0x2120
+#define REV_ID_MAJOR_QCA9556 0x0130
+#define REV_ID_MAJOR_QCA9558 0x1130
#define AR71XX_REV_ID_MINOR_MASK 0x3
#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -362,7 +481,9 @@
#define AR724X_REV_ID_REVISION_MASK 0x3
-#define AR934X_REV_ID_REVISION_MASK 0xf
+#define AR934X_REV_ID_REVISION_MASK 0xf
+
+#define QCA955X_REV_ID_REVISION_MASK 0xf
/*
* SPI block
@@ -401,12 +522,15 @@
#define AR71XX_GPIO_REG_INT_ENABLE 0x24
#define AR71XX_GPIO_REG_FUNC 0x28
+#define AR934X_GPIO_REG_FUNC 0x6c
+
#define AR71XX_GPIO_COUNT 16
#define AR7240_GPIO_COUNT 18
#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
+#define QCA955X_GPIO_COUNT 24
/*
* SRIF block
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
index 52730555937f..c2917b39966b 100644
--- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h
+++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
@@ -26,14 +26,14 @@
#define AR933X_UART_CS_PARITY_S 0
#define AR933X_UART_CS_PARITY_M 0x3
-#define AR933X_UART_CS_PARITY_NONE 0
-#define AR933X_UART_CS_PARITY_ODD 1
-#define AR933X_UART_CS_PARITY_EVEN 2
+#define AR933X_UART_CS_PARITY_NONE 0
+#define AR933X_UART_CS_PARITY_ODD 1
+#define AR933X_UART_CS_PARITY_EVEN 2
#define AR933X_UART_CS_IF_MODE_S 2
#define AR933X_UART_CS_IF_MODE_M 0x3
-#define AR933X_UART_CS_IF_MODE_NONE 0
-#define AR933X_UART_CS_IF_MODE_DTE 1
-#define AR933X_UART_CS_IF_MODE_DCE 2
+#define AR933X_UART_CS_IF_MODE_NONE 0
+#define AR933X_UART_CS_IF_MODE_DTE 1
+#define AR933X_UART_CS_IF_MODE_DCE 2
#define AR933X_UART_CS_FLOW_CTRL_S 4
#define AR933X_UART_CS_FLOW_CTRL_M 0x3
#define AR933X_UART_CS_DMA_EN BIT(6)
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 4f248c3d7b23..1557934aaca9 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -32,6 +32,8 @@ enum ath79_soc_type {
ATH79_SOC_AR9341,
ATH79_SOC_AR9342,
ATH79_SOC_AR9344,
+ ATH79_SOC_QCA9556,
+ ATH79_SOC_QCA9558,
};
extern enum ath79_soc_type ath79_soc;
@@ -98,6 +100,21 @@ static inline int soc_is_ar934x(void)
return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
}
+static inline int soc_is_qca9556(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9556;
+}
+
+static inline int soc_is_qca9558(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9558;
+}
+
+static inline int soc_is_qca955x(void)
+{
+ return soc_is_qca9556() || soc_is_qca9558();
+}
+
extern void __iomem *ath79_ddr_base;
extern void __iomem *ath79_pll_base;
extern void __iomem *ath79_reset_base;
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index ea4b66dccf6e..ddb947e9221f 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -49,7 +49,7 @@
#define cpu_has_64bits 0
#define cpu_has_64bit_zero_reg 0
#define cpu_has_64bit_gp_regs 0
-#define cpu_has_64bit_addresses 0
+#define cpu_has_64bit_addresses 0
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
index 0968f69e2018..5c9ca76a7ebf 100644
--- a/arch/mips/include/asm/mach-ath79/irq.h
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -10,10 +10,13 @@
#define __ASM_MACH_ATH79_IRQ_H
#define MIPS_CPU_IRQ_BASE 0
-#define NR_IRQS 48
+#define NR_IRQS 51
+
+#define ATH79_CPU_IRQ(_x) (MIPS_CPU_IRQ_BASE + (_x))
#define ATH79_MISC_IRQ_BASE 8
#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_MISC_IRQ(_x) (ATH79_MISC_IRQ_BASE + (_x))
#define ATH79_PCI_IRQ_BASE (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
#define ATH79_PCI_IRQ_COUNT 6
@@ -23,25 +26,9 @@
#define ATH79_IP2_IRQ_COUNT 2
#define ATH79_IP2_IRQ(_x) (ATH79_IP2_IRQ_BASE + (_x))
-#define ATH79_CPU_IRQ_IP2 (MIPS_CPU_IRQ_BASE + 2)
-#define ATH79_CPU_IRQ_USB (MIPS_CPU_IRQ_BASE + 3)
-#define ATH79_CPU_IRQ_GE0 (MIPS_CPU_IRQ_BASE + 4)
-#define ATH79_CPU_IRQ_GE1 (MIPS_CPU_IRQ_BASE + 5)
-#define ATH79_CPU_IRQ_MISC (MIPS_CPU_IRQ_BASE + 6)
-#define ATH79_CPU_IRQ_TIMER (MIPS_CPU_IRQ_BASE + 7)
-
-#define ATH79_MISC_IRQ_TIMER (ATH79_MISC_IRQ_BASE + 0)
-#define ATH79_MISC_IRQ_ERROR (ATH79_MISC_IRQ_BASE + 1)
-#define ATH79_MISC_IRQ_GPIO (ATH79_MISC_IRQ_BASE + 2)
-#define ATH79_MISC_IRQ_UART (ATH79_MISC_IRQ_BASE + 3)
-#define ATH79_MISC_IRQ_WDOG (ATH79_MISC_IRQ_BASE + 4)
-#define ATH79_MISC_IRQ_PERFC (ATH79_MISC_IRQ_BASE + 5)
-#define ATH79_MISC_IRQ_OHCI (ATH79_MISC_IRQ_BASE + 6)
-#define ATH79_MISC_IRQ_DMA (ATH79_MISC_IRQ_BASE + 7)
-#define ATH79_MISC_IRQ_TIMER2 (ATH79_MISC_IRQ_BASE + 8)
-#define ATH79_MISC_IRQ_TIMER3 (ATH79_MISC_IRQ_BASE + 9)
-#define ATH79_MISC_IRQ_TIMER4 (ATH79_MISC_IRQ_BASE + 10)
-#define ATH79_MISC_IRQ_ETHSW (ATH79_MISC_IRQ_BASE + 12)
+#define ATH79_IP3_IRQ_BASE (ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT)
+#define ATH79_IP3_IRQ_COUNT 3
+#define ATH79_IP3_IRQ(_x) (ATH79_IP3_IRQ_BASE + (_x))
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
deleted file mode 100644
index 7868f7fa028f..000000000000
--- a/arch/mips/include/asm/mach-ath79/pci.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Atheros AR71XX/AR724X PCI support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_ATH79_PCI_H
-#define __ASM_MACH_ATH79_PCI_H
-
-#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
-int ar71xx_pcibios_init(void);
-#else
-static inline int ar71xx_pcibios_init(void) { return 0; }
-#endif
-
-#if defined(CONFIG_PCI_AR724X)
-int ar724x_pcibios_init(int irq);
-#else
-static inline int ar724x_pcibios_init(int irq) { return 0; }
-#endif
-
-#endif /* __ASM_MACH_ATH79_PCI_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 569828d3ccab..3e11a468cdf8 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -349,7 +349,7 @@ extern void au1300_vss_block_control(int block, int enable);
#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31)
#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_LAST + 1)
#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31)
-#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
+#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
/* Au1300-style (GPIC): 1 controller with up to 128 sources */
#define ALCHEMY_GPIC_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
@@ -589,7 +589,7 @@ enum soc_au1550_ints {
AU1550_GPIO14_INT,
AU1550_GPIO15_INT,
AU1550_GPIO200_INT,
- AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
+ AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
AU1550_GPIO16_INT,
AU1550_GPIO17_INT,
AU1550_GPIO20_INT,
@@ -603,7 +603,7 @@ enum soc_au1550_ints {
AU1550_GPIO28_INT,
AU1550_GPIO206_INT,
AU1550_GPIO207_INT,
- AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
+ AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
};
enum soc_au1200_ints {
@@ -636,7 +636,7 @@ enum soc_au1200_ints {
AU1200_GPIO205_INT,
AU1200_GPIO206_INT,
AU1200_GPIO207_INT,
- AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
+ AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
AU1200_USB_INT,
AU1200_LCD_INT,
AU1200_MAE_BOTH_INT,
@@ -823,7 +823,7 @@ enum soc_au1200_ints {
#define GPIC_GPIO_TO_BIT(gpio) \
(1 << ((gpio) & 0x1f))
-#define GPIC_GPIO_BANKOFF(gpio) \
+#define GPIC_GPIO_BANKOFF(gpio) \
(((gpio) >> 5) * 4)
/* Pin Control bits: who owns the pin, what does it do */
@@ -958,32 +958,32 @@ enum soc_au1200_ints {
#define MEM_STSTAT 0xB4001104
#define MEM_STNAND_CMD 0x0
-#define MEM_STNAND_ADDR 0x4
-#define MEM_STNAND_DATA 0x20
+#define MEM_STNAND_ADDR 0x4
+#define MEM_STNAND_DATA 0x20
/* Programmable Counters 0 and 1 */
#define SYS_BASE 0xB1900000
#define SYS_COUNTER_CNTRL (SYS_BASE + 0x14)
-# define SYS_CNTRL_E1S (1 << 23)
-# define SYS_CNTRL_T1S (1 << 20)
-# define SYS_CNTRL_M21 (1 << 19)
-# define SYS_CNTRL_M11 (1 << 18)
-# define SYS_CNTRL_M01 (1 << 17)
-# define SYS_CNTRL_C1S (1 << 16)
+# define SYS_CNTRL_E1S (1 << 23)
+# define SYS_CNTRL_T1S (1 << 20)
+# define SYS_CNTRL_M21 (1 << 19)
+# define SYS_CNTRL_M11 (1 << 18)
+# define SYS_CNTRL_M01 (1 << 17)
+# define SYS_CNTRL_C1S (1 << 16)
# define SYS_CNTRL_BP (1 << 14)
-# define SYS_CNTRL_EN1 (1 << 13)
-# define SYS_CNTRL_BT1 (1 << 12)
-# define SYS_CNTRL_EN0 (1 << 11)
-# define SYS_CNTRL_BT0 (1 << 10)
+# define SYS_CNTRL_EN1 (1 << 13)
+# define SYS_CNTRL_BT1 (1 << 12)
+# define SYS_CNTRL_EN0 (1 << 11)
+# define SYS_CNTRL_BT0 (1 << 10)
# define SYS_CNTRL_E0 (1 << 8)
-# define SYS_CNTRL_E0S (1 << 7)
-# define SYS_CNTRL_32S (1 << 5)
-# define SYS_CNTRL_T0S (1 << 4)
-# define SYS_CNTRL_M20 (1 << 3)
-# define SYS_CNTRL_M10 (1 << 2)
-# define SYS_CNTRL_M00 (1 << 1)
-# define SYS_CNTRL_C0S (1 << 0)
+# define SYS_CNTRL_E0S (1 << 7)
+# define SYS_CNTRL_32S (1 << 5)
+# define SYS_CNTRL_T0S (1 << 4)
+# define SYS_CNTRL_M20 (1 << 3)
+# define SYS_CNTRL_M10 (1 << 2)
+# define SYS_CNTRL_M00 (1 << 1)
+# define SYS_CNTRL_C0S (1 << 0)
/* Programmable Counter 0 Registers */
#define SYS_TOYTRIM (SYS_BASE + 0)
@@ -1003,33 +1003,33 @@ enum soc_au1200_ints {
/* I2S Controller */
#define I2S_DATA 0xB1000000
-# define I2S_DATA_MASK 0xffffff
+# define I2S_DATA_MASK 0xffffff
#define I2S_CONFIG 0xB1000004
-# define I2S_CONFIG_XU (1 << 25)
-# define I2S_CONFIG_XO (1 << 24)
-# define I2S_CONFIG_RU (1 << 23)
-# define I2S_CONFIG_RO (1 << 22)
-# define I2S_CONFIG_TR (1 << 21)
-# define I2S_CONFIG_TE (1 << 20)
-# define I2S_CONFIG_TF (1 << 19)
-# define I2S_CONFIG_RR (1 << 18)
-# define I2S_CONFIG_RE (1 << 17)
-# define I2S_CONFIG_RF (1 << 16)
-# define I2S_CONFIG_PD (1 << 11)
-# define I2S_CONFIG_LB (1 << 10)
-# define I2S_CONFIG_IC (1 << 9)
+# define I2S_CONFIG_XU (1 << 25)
+# define I2S_CONFIG_XO (1 << 24)
+# define I2S_CONFIG_RU (1 << 23)
+# define I2S_CONFIG_RO (1 << 22)
+# define I2S_CONFIG_TR (1 << 21)
+# define I2S_CONFIG_TE (1 << 20)
+# define I2S_CONFIG_TF (1 << 19)
+# define I2S_CONFIG_RR (1 << 18)
+# define I2S_CONFIG_RE (1 << 17)
+# define I2S_CONFIG_RF (1 << 16)
+# define I2S_CONFIG_PD (1 << 11)
+# define I2S_CONFIG_LB (1 << 10)
+# define I2S_CONFIG_IC (1 << 9)
# define I2S_CONFIG_FM_BIT 7
# define I2S_CONFIG_FM_MASK (0x3 << I2S_CONFIG_FM_BIT)
# define I2S_CONFIG_FM_I2S (0x0 << I2S_CONFIG_FM_BIT)
# define I2S_CONFIG_FM_LJ (0x1 << I2S_CONFIG_FM_BIT)
# define I2S_CONFIG_FM_RJ (0x2 << I2S_CONFIG_FM_BIT)
-# define I2S_CONFIG_TN (1 << 6)
-# define I2S_CONFIG_RN (1 << 5)
+# define I2S_CONFIG_TN (1 << 6)
+# define I2S_CONFIG_RN (1 << 5)
# define I2S_CONFIG_SZ_BIT 0
# define I2S_CONFIG_SZ_MASK (0x1F << I2S_CONFIG_SZ_BIT)
#define I2S_CONTROL 0xB1000008
-# define I2S_CONTROL_D (1 << 1)
+# define I2S_CONTROL_D (1 << 1)
# define I2S_CONTROL_CE (1 << 0)
@@ -1037,16 +1037,16 @@ enum soc_au1200_ints {
/* 4 byte offsets from AU1000_ETH_BASE */
#define MAC_CONTROL 0x0
-# define MAC_RX_ENABLE (1 << 2)
-# define MAC_TX_ENABLE (1 << 3)
-# define MAC_DEF_CHECK (1 << 5)
-# define MAC_SET_BL(X) (((X) & 0x3) << 6)
+# define MAC_RX_ENABLE (1 << 2)
+# define MAC_TX_ENABLE (1 << 3)
+# define MAC_DEF_CHECK (1 << 5)
+# define MAC_SET_BL(X) (((X) & 0x3) << 6)
# define MAC_AUTO_PAD (1 << 8)
# define MAC_DISABLE_RETRY (1 << 10)
# define MAC_DISABLE_BCAST (1 << 11)
# define MAC_LATE_COL (1 << 12)
-# define MAC_HASH_MODE (1 << 13)
-# define MAC_HASH_ONLY (1 << 15)
+# define MAC_HASH_MODE (1 << 13)
+# define MAC_HASH_ONLY (1 << 15)
# define MAC_PASS_ALL (1 << 16)
# define MAC_INVERSE_FILTER (1 << 17)
# define MAC_PROMISCUOUS (1 << 18)
@@ -1083,9 +1083,9 @@ enum soc_au1200_ints {
# define MAC_EN_RESET0 (1 << 1)
# define MAC_EN_TOSS (0 << 2)
# define MAC_EN_CACHEABLE (1 << 3)
-# define MAC_EN_RESET1 (1 << 4)
-# define MAC_EN_RESET2 (1 << 5)
-# define MAC_DMA_RESET (1 << 6)
+# define MAC_EN_RESET1 (1 << 4)
+# define MAC_EN_RESET2 (1 << 5)
+# define MAC_DMA_RESET (1 << 6)
/* Ethernet Controller DMA Channels */
@@ -1095,7 +1095,7 @@ enum soc_au1200_ints {
#define MAC_TX_BUFF0_STATUS 0x0
# define TX_FRAME_ABORTED (1 << 0)
# define TX_JAB_TIMEOUT (1 << 1)
-# define TX_NO_CARRIER (1 << 2)
+# define TX_NO_CARRIER (1 << 2)
# define TX_LOSS_CARRIER (1 << 3)
# define TX_EXC_DEF (1 << 4)
# define TX_LATE_COLL_ABORT (1 << 5)
@@ -1106,7 +1106,7 @@ enum soc_au1200_ints {
# define TX_COLL_CNT_MASK (0xF << 10)
# define TX_PKT_RETRY (1 << 31)
#define MAC_TX_BUFF0_ADDR 0x4
-# define TX_DMA_ENABLE (1 << 0)
+# define TX_DMA_ENABLE (1 << 0)
# define TX_T_DONE (1 << 1)
# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
#define MAC_TX_BUFF0_LEN 0x8
@@ -1125,7 +1125,7 @@ enum soc_au1200_ints {
/* offsets from MAC_RX_RING_ADDR */
#define MAC_RX_BUFF0_STATUS 0x0
# define RX_FRAME_LEN_MASK 0x3fff
-# define RX_WDOG_TIMER (1 << 14)
+# define RX_WDOG_TIMER (1 << 14)
# define RX_RUNT (1 << 15)
# define RX_OVERLEN (1 << 16)
# define RX_COLL (1 << 17)
@@ -1148,7 +1148,7 @@ enum soc_au1200_ints {
RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
#define MAC_RX_BUFF0_ADDR 0x4
-# define RX_DMA_ENABLE (1 << 0)
+# define RX_DMA_ENABLE (1 << 0)
# define RX_T_DONE (1 << 1)
# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
@@ -1173,34 +1173,34 @@ enum soc_au1200_ints {
/* SSIO */
#define SSI0_STATUS 0xB1600000
-# define SSI_STATUS_BF (1 << 4)
-# define SSI_STATUS_OF (1 << 3)
-# define SSI_STATUS_UF (1 << 2)
+# define SSI_STATUS_BF (1 << 4)
+# define SSI_STATUS_OF (1 << 3)
+# define SSI_STATUS_UF (1 << 2)
# define SSI_STATUS_D (1 << 1)
# define SSI_STATUS_B (1 << 0)
#define SSI0_INT 0xB1600004
# define SSI_INT_OI (1 << 3)
# define SSI_INT_UI (1 << 2)
# define SSI_INT_DI (1 << 1)
-#define SSI0_INT_ENABLE 0xB1600008
+#define SSI0_INT_ENABLE 0xB1600008
# define SSI_INTE_OIE (1 << 3)
# define SSI_INTE_UIE (1 << 2)
# define SSI_INTE_DIE (1 << 1)
#define SSI0_CONFIG 0xB1600020
-# define SSI_CONFIG_AO (1 << 24)
-# define SSI_CONFIG_DO (1 << 23)
+# define SSI_CONFIG_AO (1 << 24)
+# define SSI_CONFIG_DO (1 << 23)
# define SSI_CONFIG_ALEN_BIT 20
# define SSI_CONFIG_ALEN_MASK (0x7 << 20)
# define SSI_CONFIG_DLEN_BIT 16
# define SSI_CONFIG_DLEN_MASK (0x7 << 16)
-# define SSI_CONFIG_DD (1 << 11)
-# define SSI_CONFIG_AD (1 << 10)
+# define SSI_CONFIG_DD (1 << 11)
+# define SSI_CONFIG_AD (1 << 10)
# define SSI_CONFIG_BM_BIT 8
# define SSI_CONFIG_BM_MASK (0x3 << 8)
-# define SSI_CONFIG_CE (1 << 7)
-# define SSI_CONFIG_DP (1 << 6)
-# define SSI_CONFIG_DL (1 << 5)
-# define SSI_CONFIG_EP (1 << 4)
+# define SSI_CONFIG_CE (1 << 7)
+# define SSI_CONFIG_DP (1 << 6)
+# define SSI_CONFIG_DL (1 << 5)
+# define SSI_CONFIG_EP (1 << 4)
#define SSI0_ADATA 0xB1600024
# define SSI_AD_D (1 << 24)
# define SSI_AD_ADDR_BIT 16
@@ -1210,12 +1210,12 @@ enum soc_au1200_ints {
#define SSI0_CLKDIV 0xB1600028
#define SSI0_CONTROL 0xB1600100
# define SSI_CONTROL_CD (1 << 1)
-# define SSI_CONTROL_E (1 << 0)
+# define SSI_CONTROL_E (1 << 0)
/* SSI1 */
#define SSI1_STATUS 0xB1680000
#define SSI1_INT 0xB1680004
-#define SSI1_INT_ENABLE 0xB1680008
+#define SSI1_INT_ENABLE 0xB1680008
#define SSI1_CONFIG 0xB1680020
#define SSI1_ADATA 0xB1680024
#define SSI1_CLKDIV 0xB1680028
@@ -1242,8 +1242,8 @@ enum soc_au1200_ints {
#define SSI_CONFIG_AO (1 << 24)
#define SSI_CONFIG_DO (1 << 23)
-#define SSI_CONFIG_ALEN (7 << 20)
-#define SSI_CONFIG_DLEN (15 << 16)
+#define SSI_CONFIG_ALEN (7 << 20)
+#define SSI_CONFIG_DLEN (15 << 16)
#define SSI_CONFIG_DD (1 << 11)
#define SSI_CONFIG_AD (1 << 10)
#define SSI_CONFIG_BM (3 << 8)
@@ -1305,7 +1305,7 @@ struct au1k_irda_platform_data {
# define SYS_PF_CS (1 << 16) /* EXTCLK0/32KHz to gpio2 */
# define SYS_PF_EX0 (1 << 9) /* GPIO2/clock */
-/* Au1550 only. Redefines lots of pins */
+/* Au1550 only. Redefines lots of pins */
# define SYS_PF_PSC2_MASK (7 << 17)
# define SYS_PF_PSC2_AC97 0
# define SYS_PF_PSC2_SPI 0
@@ -1322,33 +1322,33 @@ struct au1k_irda_platform_data {
# define SYS_PF_MUST_BE_SET ((1 << 5) | (1 << 2))
/* Au1200 only */
-#define SYS_PINFUNC_DMA (1 << 31)
-#define SYS_PINFUNC_S0A (1 << 30)
-#define SYS_PINFUNC_S1A (1 << 29)
-#define SYS_PINFUNC_LP0 (1 << 28)
-#define SYS_PINFUNC_LP1 (1 << 27)
-#define SYS_PINFUNC_LD16 (1 << 26)
-#define SYS_PINFUNC_LD8 (1 << 25)
-#define SYS_PINFUNC_LD1 (1 << 24)
-#define SYS_PINFUNC_LD0 (1 << 23)
-#define SYS_PINFUNC_P1A (3 << 21)
-#define SYS_PINFUNC_P1B (1 << 20)
-#define SYS_PINFUNC_FS3 (1 << 19)
-#define SYS_PINFUNC_P0A (3 << 17)
+#define SYS_PINFUNC_DMA (1 << 31)
+#define SYS_PINFUNC_S0A (1 << 30)
+#define SYS_PINFUNC_S1A (1 << 29)
+#define SYS_PINFUNC_LP0 (1 << 28)
+#define SYS_PINFUNC_LP1 (1 << 27)
+#define SYS_PINFUNC_LD16 (1 << 26)
+#define SYS_PINFUNC_LD8 (1 << 25)
+#define SYS_PINFUNC_LD1 (1 << 24)
+#define SYS_PINFUNC_LD0 (1 << 23)
+#define SYS_PINFUNC_P1A (3 << 21)
+#define SYS_PINFUNC_P1B (1 << 20)
+#define SYS_PINFUNC_FS3 (1 << 19)
+#define SYS_PINFUNC_P0A (3 << 17)
#define SYS_PINFUNC_CS (1 << 16)
-#define SYS_PINFUNC_CIM (1 << 15)
-#define SYS_PINFUNC_P1C (1 << 14)
-#define SYS_PINFUNC_U1T (1 << 12)
-#define SYS_PINFUNC_U1R (1 << 11)
-#define SYS_PINFUNC_EX1 (1 << 10)
-#define SYS_PINFUNC_EX0 (1 << 9)
-#define SYS_PINFUNC_U0R (1 << 8)
+#define SYS_PINFUNC_CIM (1 << 15)
+#define SYS_PINFUNC_P1C (1 << 14)
+#define SYS_PINFUNC_U1T (1 << 12)
+#define SYS_PINFUNC_U1R (1 << 11)
+#define SYS_PINFUNC_EX1 (1 << 10)
+#define SYS_PINFUNC_EX0 (1 << 9)
+#define SYS_PINFUNC_U0R (1 << 8)
#define SYS_PINFUNC_MC (1 << 7)
-#define SYS_PINFUNC_S0B (1 << 6)
-#define SYS_PINFUNC_S0C (1 << 5)
-#define SYS_PINFUNC_P0B (1 << 4)
-#define SYS_PINFUNC_U0T (1 << 3)
-#define SYS_PINFUNC_S1B (1 << 2)
+#define SYS_PINFUNC_S0B (1 << 6)
+#define SYS_PINFUNC_S0C (1 << 5)
+#define SYS_PINFUNC_P0B (1 << 4)
+#define SYS_PINFUNC_U0T (1 << 3)
+#define SYS_PINFUNC_S1B (1 << 2)
/* Power Management */
#define SYS_SCRATCH0 0xB1900018
@@ -1405,7 +1405,7 @@ struct au1k_irda_platform_data {
# define SYS_CS_DI2 (1 << 16)
# define SYS_CS_CI2 (1 << 15)
-# define SYS_CS_ML_BIT 7
+# define SYS_CS_ML_BIT 7
# define SYS_CS_ML_MASK (0x7 << SYS_CS_ML_BIT)
# define SYS_CS_DL (1 << 6)
# define SYS_CS_CL (1 << 5)
@@ -1554,8 +1554,8 @@ struct au1k_irda_platform_data {
#define PCI_MWMASKDEV_MWMASK(x) (((x) & 0xffff) << 16)
#define PCI_MWMASKDEV_DEVID(x) ((x) & 0xffff)
#define PCI_MWBASEREVCCL_BASE(x) (((x) & 0xffff) << 16)
-#define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8)
-#define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff)
+#define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8)
+#define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff)
#define PCI_ID_DID(x) (((x) & 0xffff) << 16)
#define PCI_ID_VID(x) ((x) & 0xffff)
#define PCI_STATCMD_STATUS(x) (((x) & 0xffff) << 16)
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
index ba4cf0e91c8b..7cedca5a305c 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
@@ -34,7 +34,7 @@
#include <linux/spinlock.h> /* And spinlocks */
#include <linux/delay.h>
-#define NUM_AU1000_DMA_CHANNELS 8
+#define NUM_AU1000_DMA_CHANNELS 8
/* DMA Channel Register Offsets */
#define DMA_MODE_SET 0x00000000
@@ -47,7 +47,7 @@
#define DMA_DS (1 << 15)
#define DMA_BE (1 << 13)
#define DMA_DR (1 << 12)
-#define DMA_TS8 (1 << 11)
+#define DMA_TS8 (1 << 11)
#define DMA_DW_BIT 9
#define DMA_DW_MASK (0x03 << DMA_DW_BIT)
#define DMA_DW8 (0 << DMA_DW_BIT)
@@ -59,9 +59,9 @@
#define DMA_GO (1 << 5)
#define DMA_AB (1 << 4)
#define DMA_D1 (1 << 3)
-#define DMA_BE1 (1 << 2)
+#define DMA_BE1 (1 << 2)
#define DMA_D0 (1 << 1)
-#define DMA_BE0 (1 << 0)
+#define DMA_BE0 (1 << 0)
#define DMA_PERIPHERAL_ADDR 0x00000008
#define DMA_BUFFER0_START 0x0000000C
@@ -246,7 +246,7 @@ static inline void init_dma(unsigned int dmanr)
mode |= DMA_IE;
au_writel(~mode, chan->io + DMA_MODE_CLEAR);
- au_writel(mode, chan->io + DMA_MODE_SET);
+ au_writel(mode, chan->io + DMA_MODE_SET);
}
/*
diff --git a/arch/mips/include/asm/mach-au1x00/au1100_mmc.h b/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
index e221659f1bca..cadab91cee26 100644
--- a/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
+++ b/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
@@ -148,7 +148,7 @@ struct au1xmmc_platform_data {
/*
* SD_STATUS bit definitions.
*/
-#define SD_STATUS_DCRCW (0x00000007)
+#define SD_STATUS_DCRCW (0x00000007)
#define SD_STATUS_xx1 (0x00000008)
#define SD_STATUS_CB (0x00000010)
#define SD_STATUS_DB (0x00000020)
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 217810e18361..ca8077afac4a 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -103,7 +103,7 @@ typedef volatile struct au1xxx_ddma_desc {
* Lets have some SW data following -- make sure it's 32 bytes.
*/
u32 sw_status;
- u32 sw_context;
+ u32 sw_context;
u32 sw_reserved[6];
} au1x_ddma_desc_t;
@@ -123,7 +123,7 @@ typedef volatile struct au1xxx_ddma_desc {
#define DSCR_CMD0_CV (0x1 << 2) /* Clear Valid when done */
#define DSCR_CMD0_ST_MASK (0x3 << 0) /* Status instruction */
-#define SW_STATUS_INUSE (1 << 0)
+#define SW_STATUS_INUSE (1 << 0)
/* Command 0 device IDs. */
#define AU1550_DSCR_CMD0_UART0_TX 0
@@ -195,8 +195,8 @@ typedef volatile struct au1xxx_ddma_desc {
#define AU1300_DSCR_CMD0_SDMS_RX0 9
#define AU1300_DSCR_CMD0_SDMS_TX1 10
#define AU1300_DSCR_CMD0_SDMS_RX1 11
-#define AU1300_DSCR_CMD0_AES_TX 12
-#define AU1300_DSCR_CMD0_AES_RX 13
+#define AU1300_DSCR_CMD0_AES_TX 12
+#define AU1300_DSCR_CMD0_AES_RX 13
#define AU1300_DSCR_CMD0_PSC0_TX 14
#define AU1300_DSCR_CMD0_PSC0_RX 15
#define AU1300_DSCR_CMD0_PSC1_TX 16
@@ -205,12 +205,12 @@ typedef volatile struct au1xxx_ddma_desc {
#define AU1300_DSCR_CMD0_PSC2_RX 19
#define AU1300_DSCR_CMD0_PSC3_TX 20
#define AU1300_DSCR_CMD0_PSC3_RX 21
-#define AU1300_DSCR_CMD0_LCD 22
+#define AU1300_DSCR_CMD0_LCD 22
#define AU1300_DSCR_CMD0_NAND_FLASH 23
#define AU1300_DSCR_CMD0_SDMS_TX2 24
#define AU1300_DSCR_CMD0_SDMS_RX2 25
#define AU1300_DSCR_CMD0_CIM_SYNC 26
-#define AU1300_DSCR_CMD0_UDMA 27
+#define AU1300_DSCR_CMD0_UDMA 27
#define AU1300_DSCR_CMD0_DMA_REQ0 28
#define AU1300_DSCR_CMD0_DMA_REQ1 29
@@ -298,7 +298,7 @@ typedef volatile struct au1xxx_ddma_desc {
#define DSCR_NXTPTR_MS (1 << 27)
/* The number of DBDMA channels. */
-#define NUM_DBDMA_CHANS 16
+#define NUM_DBDMA_CHANS 16
/*
* DDMA API definitions
@@ -316,7 +316,7 @@ typedef struct dbdma_device_table {
typedef struct dbdma_chan_config {
- spinlock_t lock;
+ spinlock_t lock;
u32 chan_flags;
u32 chan_index;
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h b/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
index e306384b1414..bb91b8923a49 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
@@ -1,5 +1,5 @@
/*
- * include/asm-mips/mach-au1x00/au1xxx_ide.h version 01.30.00 Aug. 02 2005
+ * include/asm-mips/mach-au1x00/au1xxx_ide.h version 01.30.00 Aug. 02 2005
*
* BRIEF MODULE DESCRIPTION
* AMD Alchemy Au1xxx IDE interface routines over the Static Bus
@@ -27,14 +27,14 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
- * Interface and Linux Device Driver" Application Note.
+ * Interface and Linux Device Driver" Application Note.
*/
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
#define DMA_WAIT_TIMEOUT 100
-#define NUM_DESCRIPTORS PRD_ENTRIES
+#define NUM_DESCRIPTORS PRD_ENTRIES
#else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */
-#define NUM_DESCRIPTORS 2
+#define NUM_DESCRIPTORS 2
#endif
#ifndef AU1XXX_ATA_RQSIZE
@@ -84,8 +84,8 @@ typedef struct {
#define TWP_MASK (0x3F << 14)
#define TCSW_MASK (0x0F << 10)
#define TPM_MASK (0x0F << 6)
-#define TA_MASK (0x3F << 0)
-#define TS_MASK (1 << 8)
+#define TA_MASK (0x3F << 0)
+#define TS_MASK (1 << 8)
/* Timing parameters PIO mode 0 */
#define SBC_IDE_PIO0_TCSOE (0x04 << 29)
@@ -96,7 +96,7 @@ typedef struct {
#define SBC_IDE_PIO0_TWP (0x10 << 14)
#define SBC_IDE_PIO0_TCSW (0x04 << 10)
#define SBC_IDE_PIO0_TPM (0x00 << 6)
-#define SBC_IDE_PIO0_TA (0x15 << 0)
+#define SBC_IDE_PIO0_TA (0x15 << 0)
/* Timing parameters PIO mode 1 */
#define SBC_IDE_PIO1_TCSOE (0x03 << 29)
#define SBC_IDE_PIO1_TOECS (0x01 << 26)
@@ -106,7 +106,7 @@ typedef struct {
#define SBC_IDE_PIO1_TWP (0x08 << 14)
#define SBC_IDE_PIO1_TCSW (0x03 << 10)
#define SBC_IDE_PIO1_TPM (0x00 << 6)
-#define SBC_IDE_PIO1_TA (0x0B << 0)
+#define SBC_IDE_PIO1_TA (0x0B << 0)
/* Timing parameters PIO mode 2 */
#define SBC_IDE_PIO2_TCSOE (0x05 << 29)
#define SBC_IDE_PIO2_TOECS (0x01 << 26)
@@ -116,7 +116,7 @@ typedef struct {
#define SBC_IDE_PIO2_TWP (0x1F << 14)
#define SBC_IDE_PIO2_TCSW (0x05 << 10)
#define SBC_IDE_PIO2_TPM (0x00 << 6)
-#define SBC_IDE_PIO2_TA (0x22 << 0)
+#define SBC_IDE_PIO2_TA (0x22 << 0)
/* Timing parameters PIO mode 3 */
#define SBC_IDE_PIO3_TCSOE (0x05 << 29)
#define SBC_IDE_PIO3_TOECS (0x01 << 26)
@@ -126,7 +126,7 @@ typedef struct {
#define SBC_IDE_PIO3_TWP (0x15 << 14)
#define SBC_IDE_PIO3_TCSW (0x05 << 10)
#define SBC_IDE_PIO3_TPM (0x00 << 6)
-#define SBC_IDE_PIO3_TA (0x1A << 0)
+#define SBC_IDE_PIO3_TA (0x1A << 0)
/* Timing parameters PIO mode 4 */
#define SBC_IDE_PIO4_TCSOE (0x04 << 29)
#define SBC_IDE_PIO4_TOECS (0x01 << 26)
@@ -136,7 +136,7 @@ typedef struct {
#define SBC_IDE_PIO4_TWP (0x0D << 14)
#define SBC_IDE_PIO4_TCSW (0x03 << 10)
#define SBC_IDE_PIO4_TPM (0x00 << 6)
-#define SBC_IDE_PIO4_TA (0x12 << 0)
+#define SBC_IDE_PIO4_TA (0x12 << 0)
/* Timing parameters MDMA mode 0 */
#define SBC_IDE_MDMA0_TCSOE (0x03 << 29)
#define SBC_IDE_MDMA0_TOECS (0x01 << 26)
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h b/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
index 4e3f3bc26c60..8a9cd754be2d 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
@@ -53,7 +53,7 @@
#define PSC_CTRL_DISABLE 0
#define PSC_CTRL_SUSPEND 2
-#define PSC_CTRL_ENABLE 3
+#define PSC_CTRL_ENABLE 3
/* AC97 Registers. */
#define PSC_AC97CFG_OFFSET 0x00000008
@@ -85,8 +85,8 @@
#define PSC_AC97CFG_SE_ENABLE (1 << 25)
#define PSC_AC97CFG_LEN_MASK (0xf << 21)
-#define PSC_AC97CFG_TXSLOT_MASK (0x3ff << 11)
-#define PSC_AC97CFG_RXSLOT_MASK (0x3ff << 1)
+#define PSC_AC97CFG_TXSLOT_MASK (0x3ff << 11)
+#define PSC_AC97CFG_RXSLOT_MASK (0x3ff << 1)
#define PSC_AC97CFG_GE_ENABLE (1)
/* Enable slots 3-12. */
@@ -95,7 +95,7 @@
/*
* The word length equation is ((x) * 2) + 2, so choose 'x' appropriately.
- * The only sensible numbers are 7, 9, or possibly 11. Nah, just do the
+ * The only sensible numbers are 7, 9, or possibly 11. Nah, just do the
* arithmetic in the macro.
*/
#define PSC_AC97CFG_SET_LEN(x) (((((x) - 2) / 2) & 0xf) << 21)
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
index 73853b5a2a31..796afd051c35 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -12,14 +12,14 @@
#include <asm/mach-au1x00/au1000.h>
/* The default GPIO numberspace as documented in the Alchemy manuals.
- * GPIO0-31 from GPIO1 block, GPIO200-215 from GPIO2 block.
+ * GPIO0-31 from GPIO1 block, GPIO200-215 from GPIO2 block.
*/
#define ALCHEMY_GPIO1_BASE 0
#define ALCHEMY_GPIO2_BASE 200
#define ALCHEMY_GPIO1_NUM 32
#define ALCHEMY_GPIO2_NUM 16
-#define ALCHEMY_GPIO1_MAX (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1)
+#define ALCHEMY_GPIO1_MAX (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1)
#define ALCHEMY_GPIO2_MAX (ALCHEMY_GPIO2_BASE + ALCHEMY_GPIO2_NUM - 1)
#define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off))
@@ -67,7 +67,7 @@ static inline int au1500_gpio1_to_irq(int gpio)
switch (gpio) {
case 0 ... 15:
case 20:
- case 23 ... 28: return MAKE_IRQ(1, gpio);
+ case 23 ... 28: return MAKE_IRQ(1, gpio);
}
return -ENXIO;
@@ -139,8 +139,8 @@ static inline int au1550_gpio1_to_irq(int gpio)
switch (gpio) {
case 0 ... 15:
- case 20 ... 28: return MAKE_IRQ(1, gpio);
- case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16);
+ case 20 ... 28: return MAKE_IRQ(1, gpio);
+ case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16);
}
return -ENXIO;
@@ -152,9 +152,9 @@ static inline int au1550_gpio2_to_irq(int gpio)
switch (gpio) {
case 0: return MAKE_IRQ(1, 16);
- case 1 ... 5: return MAKE_IRQ(1, 17); /* shared GPIO201_205 */
+ case 1 ... 5: return MAKE_IRQ(1, 17); /* shared GPIO201_205 */
case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6);
- case 8 ... 15: return MAKE_IRQ(1, 31); /* shared GPIO208_215 */
+ case 8 ... 15: return MAKE_IRQ(1, 31); /* shared GPIO208_215 */
}
return -ENXIO;
@@ -190,7 +190,7 @@ static inline int au1200_gpio2_to_irq(int gpio)
case 0 ... 2: return MAKE_IRQ(0, 5 + gpio - 0);
case 3: return MAKE_IRQ(0, 22);
case 4 ... 7: return MAKE_IRQ(0, 24 + gpio - 4);
- case 8 ... 15: return MAKE_IRQ(0, 28); /* shared GPIO208_215 */
+ case 8 ... 15: return MAKE_IRQ(0, 28); /* shared GPIO208_215 */
}
return -ENXIO;
@@ -428,7 +428,7 @@ static inline void alchemy_gpio2_disable_int(int gpio2)
/**
* alchemy_gpio2_enable - Activate GPIO2 block.
*
- * The GPIO2 block must be enabled excplicitly to work. On systems
+ * The GPIO2 block must be enabled excplicitly to work. On systems
* where this isn't done by the bootloader, this macro can be used.
*/
static inline void alchemy_gpio2_enable(void)
@@ -533,7 +533,7 @@ static inline int alchemy_irq_to_gpio(int irq)
* 2 (1 for Au1000) gpio_chips are registered.
*
*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
- * the boards' gpio.h must provide the linux gpio wrapper functions,
+ * the boards' gpio.h must provide the linux gpio wrapper functions,
*
*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
* inlinable gpio functions are provided which enable access to the
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
index fb9975c74c57..ce02894271c6 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -130,7 +130,7 @@ static inline int au1300_gpio_getinitlvl(unsigned int gpio)
* A gpiochip for the 75 GPIOs is registered.
*
*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
-* the boards' gpio.h must provide the linux gpio wrapper functions,
+* the boards' gpio.h must provide the linux gpio wrapper functions,
*
*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
* inlinable gpio functions are provided which enable access to the
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
index 69ef3efe06e7..b8e7be8f34dd 100644
--- a/arch/mips/include/asm/mach-bcm47xx/nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
@@ -8,8 +8,8 @@
* option) any later version.
*/
-#ifndef __NVRAM_H
-#define __NVRAM_H
+#ifndef __BCM47XX_NVRAM_H
+#define __BCM47XX_NVRAM_H
#include <linux/types.h>
#include <linux/kernel.h>
@@ -32,12 +32,9 @@ struct nvram_header {
#define NVRAM_MAX_VALUE_LEN 255
#define NVRAM_MAX_PARAM_LEN 64
-#define NVRAM_ERR_INV_PARAM -8
-#define NVRAM_ERR_ENVNOTFOUND -9
+extern int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len);
-extern int nvram_getenv(char *name, char *val, size_t val_len);
-
-static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6])
+static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
{
if (strchr(buf, ':'))
sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
@@ -51,4 +48,4 @@ static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6])
printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
}
-#endif
+#endif /* __BCM47XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index dbd5b5ad07a5..cb922b9cb0e9 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -182,7 +182,7 @@ enum bcm63xx_regs_set {
#define BCM_6328_PERF_BASE (0xb0000000)
#define BCM_6328_TIMER_BASE (0xb0000040)
#define BCM_6328_WDT_BASE (0xb000005c)
-#define BCM_6328_UART0_BASE (0xb0000100)
+#define BCM_6328_UART0_BASE (0xb0000100)
#define BCM_6328_UART1_BASE (0xb0000120)
#define BCM_6328_GPIO_BASE (0xb0000080)
#define BCM_6328_SPI_BASE (0xdeadbeef)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
index 03a54df5fb86..7033144aab2d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
@@ -88,7 +88,7 @@
#define bcm_mpi_readl(o) bcm_rset_readl(RSET_MPI, (o))
#define bcm_mpi_writel(v, o) bcm_rset_writel(RSET_MPI, (v), (o))
#define bcm_pcmcia_readl(o) bcm_rset_readl(RSET_PCMCIA, (o))
-#define bcm_pcmcia_writel(v, o) bcm_rset_writel(RSET_PCMCIA, (v), (o))
+#define bcm_pcmcia_writel(v, o) bcm_rset_writel(RSET_PCMCIA, (v), (o))
#define bcm_pcie_readl(o) bcm_rset_readl(RSET_PCIE, (o))
#define bcm_pcie_writel(v, o) bcm_rset_writel(RSET_PCIE, (v), (o))
#define bcm_sdram_readl(o) bcm_rset_readl(RSET_SDRAM, (o))
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
index a5bbff31c898..1e89df7244bd 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
@@ -19,7 +19,7 @@ struct bcm_enet_desc {
#define DMADESC_SOP_MASK (1 << 13)
#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
#define DMADESC_WRAP_MASK (1 << 12)
-#define DMADESC_USB_NOZERO_MASK (1 << 1)
+#define DMADESC_USB_NOZERO_MASK (1 << 1)
#define DMADESC_USB_ZERO_MASK (1 << 0)
/* status */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index c3eeb90b480a..81b4702f792a 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -143,7 +143,7 @@
CKCTL_6368_NAND_EN | \
CKCTL_6368_IPSEC_EN)
-/* System PLL Control register */
+/* System PLL Control register */
#define PERF_SYS_PLL_CTL_REG 0x8
#define SYS_PLL_SOFT_RESET 0x1
@@ -219,7 +219,7 @@
#define SOFTRESET_6338_DMAMEM_MASK (1 << 6)
#define SOFTRESET_6338_SAR_MASK (1 << 7)
#define SOFTRESET_6338_ACLC_MASK (1 << 8)
-#define SOFTRESET_6338_ADSLMIPSPLL_MASK (1 << 10)
+#define SOFTRESET_6338_ADSLMIPSPLL_MASK (1 << 10)
#define SOFTRESET_6338_ALL (SOFTRESET_6338_SPI_MASK | \
SOFTRESET_6338_ENET_MASK | \
SOFTRESET_6338_USBH_MASK | \
@@ -238,7 +238,7 @@
#define SOFTRESET_6348_DMAMEM_MASK (1 << 6)
#define SOFTRESET_6348_SAR_MASK (1 << 7)
#define SOFTRESET_6348_ACLC_MASK (1 << 8)
-#define SOFTRESET_6348_ADSLMIPSPLL_MASK (1 << 10)
+#define SOFTRESET_6348_ADSLMIPSPLL_MASK (1 << 10)
#define SOFTRESET_6348_ALL (SOFTRESET_6348_SPI_MASK | \
SOFTRESET_6348_ENET_MASK | \
@@ -560,7 +560,7 @@
#define GPIO_PINMUX_OTHR_REG 0x24
-#define GPIO_PINMUX_OTHR_6328_USB_SHIFT 12
+#define GPIO_PINMUX_OTHR_6328_USB_SHIFT 12
#define GPIO_PINMUX_OTHR_6328_USB_MASK (3 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
#define GPIO_PINMUX_OTHR_6328_USB_HOST (1 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
#define GPIO_PINMUX_OTHR_6328_USB_DEV (2 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
@@ -572,12 +572,12 @@
/* those bits must be kept as read in gpio basemode register*/
#define GPIO_STRAPBUS_REG 0x40
-#define STRAPBUS_6358_BOOT_SEL_PARALLEL (1 << 1)
+#define STRAPBUS_6358_BOOT_SEL_PARALLEL (1 << 1)
#define STRAPBUS_6358_BOOT_SEL_SERIAL (0 << 1)
#define STRAPBUS_6368_BOOT_SEL_MASK 0x3
#define STRAPBUS_6368_BOOT_SEL_NAND 0
#define STRAPBUS_6368_BOOT_SEL_SERIAL 1
-#define STRAPBUS_6368_BOOT_SEL_PARALLEL 3
+#define STRAPBUS_6368_BOOT_SEL_PARALLEL 3
/*************************************************************************
@@ -812,7 +812,7 @@
#define USBH_PRIV_SWAP_OHCI_DATA_MASK (1 << USBH_PRIV_SWAP_OHCI_DATA_SHIFT)
#define USBH_PRIV_UTMI_CTL_6368_REG 0x10
-#define USBH_PRIV_UTMI_CTL_NODRIV_SHIFT 12
+#define USBH_PRIV_UTMI_CTL_NODRIV_SHIFT 12
#define USBH_PRIV_UTMI_CTL_NODRIV_MASK (0xf << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT)
#define USBH_PRIV_UTMI_CTL_HOSTB_SHIFT 0
#define USBH_PRIV_UTMI_CTL_HOSTB_MASK (0xf << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT)
@@ -841,7 +841,7 @@
#define USBD_CONTROL_INIT_SEL_MASK (0xf << USBD_CONTROL_INIT_SEL_SHIFT)
#define USBD_CONTROL_FIFO_RESET_SHIFT 6
#define USBD_CONTROL_FIFO_RESET_MASK (3 << USBD_CONTROL_FIFO_RESET_SHIFT)
-#define USBD_CONTROL_SETUPERRLOCK_SHIFT 5
+#define USBD_CONTROL_SETUPERRLOCK_SHIFT 5
#define USBD_CONTROL_SETUPERRLOCK_MASK (1 << USBD_CONTROL_SETUPERRLOCK_SHIFT)
#define USBD_CONTROL_DONE_CSRS_SHIFT 0
#define USBD_CONTROL_DONE_CSRS_MASK (1 << USBD_CONTROL_DONE_CSRS_SHIFT)
@@ -852,7 +852,7 @@
#define USBD_STRAPS_APP_SELF_PWR_MASK (1 << USBD_STRAPS_APP_SELF_PWR_SHIFT)
#define USBD_STRAPS_APP_DISCON_SHIFT 9
#define USBD_STRAPS_APP_DISCON_MASK (1 << USBD_STRAPS_APP_DISCON_SHIFT)
-#define USBD_STRAPS_APP_CSRPRGSUP_SHIFT 8
+#define USBD_STRAPS_APP_CSRPRGSUP_SHIFT 8
#define USBD_STRAPS_APP_CSRPRGSUP_MASK (1 << USBD_STRAPS_APP_CSRPRGSUP_SHIFT)
#define USBD_STRAPS_APP_RMTWKUP_SHIFT 6
#define USBD_STRAPS_APP_RMTWKUP_MASK (1 << USBD_STRAPS_APP_RMTWKUP_SHIFT)
@@ -943,7 +943,7 @@
#define USBD_EPNUM_TYPEMAP_REG 0x50
#define USBD_EPNUM_TYPEMAP_TYPE_SHIFT 8
#define USBD_EPNUM_TYPEMAP_TYPE_MASK (0x3 << USBD_EPNUM_TYPEMAP_TYPE_SHIFT)
-#define USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT 0
+#define USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT 0
#define USBD_EPNUM_TYPEMAP_DMA_CH_MASK (0xf << USBD_EPNUM_TYPEMAP_DMACH_SHIFT)
/* Misc per-endpoint settings */
@@ -1048,8 +1048,8 @@
#define MPI_L2PREMAP_IS_CARDBUS_MASK (1 << 2)
#define MPI_PCIMODESEL_REG 0x144
-#define MPI_PCIMODESEL_BAR1_NOSWAP_MASK (1 << 0)
-#define MPI_PCIMODESEL_BAR2_NOSWAP_MASK (1 << 1)
+#define MPI_PCIMODESEL_BAR1_NOSWAP_MASK (1 << 0)
+#define MPI_PCIMODESEL_BAR2_NOSWAP_MASK (1 << 1)
#define MPI_PCIMODESEL_EXT_ARB_MASK (1 << 2)
#define MPI_PCIMODESEL_PREFETCH_SHIFT 4
#define MPI_PCIMODESEL_PREFETCH_MASK (0xf << MPI_PCIMODESEL_PREFETCH_SHIFT)
diff --git a/arch/mips/include/asm/mach-bcm63xx/irq.h b/arch/mips/include/asm/mach-bcm63xx/irq.h
index 9332e788a5c9..2bbfc8d1f307 100644
--- a/arch/mips/include/asm/mach-bcm63xx/irq.h
+++ b/arch/mips/include/asm/mach-bcm63xx/irq.h
@@ -1,7 +1,7 @@
#ifndef __ASM_MACH_BCM63XX_IRQ_H
#define __ASM_MACH_BCM63XX_IRQ_H
-#define NR_IRQS 128
+#define NR_IRQS 128
#define MIPS_CPU_IRQ_BASE 0
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 502bb1815ae8..60fc4c347c44 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -51,8 +51,8 @@ enum octeon_irq {
/* 256 - 511 represent the MSI interrupts 0-255 */
#define OCTEON_IRQ_MSI_BIT0 (256)
-#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
-#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
+#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
+#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
#endif
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index dedef7d2b01f..1e7dbb192657 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -16,7 +16,7 @@
#define CP0_PRID_OCTEON_PASS1 0x000d0000
#define CP0_PRID_OCTEON_CN30XX 0x000d0200
-.macro kernel_entry_setup
+.macro kernel_entry_setup
# Registers set by bootloader:
# (only 32 bits set by bootloader, all addresses are physical
# addresses, and need to have the appropriate memory region set
@@ -28,12 +28,12 @@
.set push
.set arch=octeon
# Read the cavium mem control register
- dmfc0 v0, CP0_CVMMEMCTL_REG
+ dmfc0 v0, CP0_CVMMEMCTL_REG
# Clear the lower 6 bits, the CVMSEG size
- dins v0, $0, 0, 6
- ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
- dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register
- dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register
+ dins v0, $0, 0, 6
+ ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+ dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register
+ dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register
#ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED
# Disable unaligned load/store support but leave HW fixup enabled
or v0, v0, 0x5001
@@ -69,14 +69,14 @@ skip:
and v0, v0, v1
ori v0, v0, (6 << 7)
# Write the cavium control register
- dmtc0 v0, CP0_CVMCTL_REG
+ dmtc0 v0, CP0_CVMCTL_REG
sync
# Flush dcache after config change
- cache 9, 0($0)
+ cache 9, 0($0)
# Get my core id
- rdhwr v0, $0
+ rdhwr v0, $0
# Jump the master to kernel_entry
- bne a2, zero, octeon_main_processor
+ bne a2, zero, octeon_main_processor
nop
#ifdef CONFIG_SMP
@@ -87,21 +87,21 @@ skip:
#
# This is the variable where the next core to boot os stored
- PTR_LA t0, octeon_processor_boot
+ PTR_LA t0, octeon_processor_boot
octeon_spin_wait_boot:
# Get the core id of the next to be booted
- LONG_L t1, (t0)
+ LONG_L t1, (t0)
# Keep looping if it isn't me
bne t1, v0, octeon_spin_wait_boot
nop
# Get my GP from the global variable
- PTR_LA t0, octeon_processor_gp
- LONG_L gp, (t0)
+ PTR_LA t0, octeon_processor_gp
+ LONG_L gp, (t0)
# Get my SP from the global variable
- PTR_LA t0, octeon_processor_sp
- LONG_L sp, (t0)
+ PTR_LA t0, octeon_processor_sp
+ LONG_L sp, (t0)
# Set the SP global variable to zero so the master knows we've started
- LONG_S zero, (t0)
+ LONG_S zero, (t0)
#ifdef __OCTEON__
syncw
syncw
@@ -130,7 +130,7 @@ octeon_main_processor:
/*
* Do SMP slave processor setup necessary before we can savely execute C code.
*/
- .macro smp_slave_setup
+ .macro smp_slave_setup
.endm
#endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
index babc8374e378..71d4bface1dc 100644
--- a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
@@ -32,9 +32,9 @@
#define cpu_scache_line_size() 0
#ifdef CONFIG_64BIT
-#define cpu_has_llsc 0
+#define cpu_has_llsc 0
#else
-#define cpu_has_llsc 1
+#define cpu_has_llsc 1
#endif
#define cpu_has_mips16 0
diff --git a/arch/mips/include/asm/mach-cobalt/mach-gt64120.h b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
index f8afec3f2943..6fe475b9e965 100644
--- a/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/mach-db1x00/bcsr.h b/arch/mips/include/asm/mach-db1x00/bcsr.h
index 16f1cf5982b9..3c3ed4ae45e2 100644
--- a/arch/mips/include/asm/mach-db1x00/bcsr.h
+++ b/arch/mips/include/asm/mach-db1x00/bcsr.h
@@ -110,7 +110,7 @@ enum bcsr_whoami_boards {
BCSR_WHOAMI_DB1300,
};
-/* STATUS reg. Unless otherwise noted, they're valid on all boards.
+/* STATUS reg. Unless otherwise noted, they're valid on all boards.
* PB1200 = DB1200.
*/
#define BCSR_STATUS_PC0VS 0x0003
@@ -190,7 +190,7 @@ enum bcsr_whoami_boards {
#define BCSR_RESETS_OTPWRPROT 0x1000 /* DB1300 */
#define BCSR_RESETS_OTPCSB 0x2000 /* DB1300 */
#define BCSR_RESETS_OTGPWR 0x4000 /* DB1300 */
-#define BCSR_RESETS_USBHPWR 0x8000 /* DB1300 */
+#define BCSR_RESETS_USBHPWR 0x8000 /* DB1300 */
#define BCSR_BOARD_LCDVEE 0x0001
#define BCSR_BOARD_LCDVDD 0x0002
diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h
index b2a8319521e5..d3cce7326dd4 100644
--- a/arch/mips/include/asm/mach-db1x00/db1200.h
+++ b/arch/mips/include/asm/mach-db1x00/db1200.h
@@ -63,7 +63,7 @@
* the interrupt define and subtracting the DB1200_INT_BEGIN value.
*
* Example: IDE bis pos is = 64 - 64
- * ETH bit pos is = 65 - 64
+ * ETH bit pos is = 65 - 64
*/
enum external_db1200_ints {
DB1200_INT_BEGIN = AU1000_MAX_INTR + 1,
diff --git a/arch/mips/include/asm/mach-db1x00/db1300.h b/arch/mips/include/asm/mach-db1x00/db1300.h
index 7fe5fb3ba877..3d1ede46f059 100644
--- a/arch/mips/include/asm/mach-db1x00/db1300.h
+++ b/arch/mips/include/asm/mach-db1x00/db1300.h
@@ -21,7 +21,7 @@
#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
-#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
+#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
/* SMSC9210 CS */
diff --git a/arch/mips/include/asm/mach-emma2rh/irq.h b/arch/mips/include/asm/mach-emma2rh/irq.h
index 5439eb856461..2f7155dade29 100644
--- a/arch/mips/include/asm/mach-emma2rh/irq.h
+++ b/arch/mips/include/asm/mach-emma2rh/irq.h
@@ -8,7 +8,7 @@
#ifndef __ASM_MACH_EMMA2RH_IRQ_H
#define __ASM_MACH_EMMA2RH_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
index 7c185bb06f13..42be9e9ced2c 100644
--- a/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
@@ -8,6 +8,6 @@
#ifndef __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
-/* Intentionally empty file ... */
+/* Intentionally empty file ... */
#endif /* __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
index a38f4d43e5e5..5b5cd689a2f7 100644
--- a/arch/mips/include/asm/mach-generic/floppy.h
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -98,7 +98,7 @@ static inline void fd_disable_irq(void)
static inline int fd_request_irq(void)
{
return request_irq(FLOPPY_IRQ, floppy_interrupt,
- 0, "floppy", NULL);
+ 0, "floppy", NULL);
}
static inline void fd_free_irq(void)
@@ -106,7 +106,7 @@ static inline void fd_free_irq(void)
free_irq(FLOPPY_IRQ, NULL);
}
-#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
static inline unsigned long fd_getfdaddr1(void)
diff --git a/arch/mips/include/asm/mach-generic/ide.h b/arch/mips/include/asm/mach-generic/ide.h
index 9c93a5b36f2a..affa66f5c2da 100644
--- a/arch/mips/include/asm/mach-generic/ide.h
+++ b/arch/mips/include/asm/mach-generic/ide.h
@@ -51,7 +51,7 @@ static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long si
/*
* insw() and gang might be called with interrupts disabled, so we can't
* send IPIs for flushing due to the potencial of deadlocks, see the comment
- * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
+ * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
* problem by disabling preemption so we know we actually perform the flush
* on the processor that actually has the lines to be flushed which hopefully
* is even better for performance anyway.
@@ -123,7 +123,7 @@ static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
__ide_flush_epilogue();
}
-/* ide_insw calls insw, not __ide_insw. Why? */
+/* ide_insw calls insw, not __ide_insw. Why? */
#undef insw
#undef insl
#undef outsw
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
index e014264b2be2..139cd200e79d 100644
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -9,12 +9,12 @@
#define __ASM_MACH_GENERIC_IRQ_H
#ifndef NR_IRQS
-#define NR_IRQS 128
+#define NR_IRQS 128
#endif
#ifdef CONFIG_I8259
#ifndef I8259A_IRQ_BASE
-#define I8259A_IRQ_BASE 0
+#define I8259A_IRQ_BASE 0
#endif
#endif
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index d7a9efd3a5ce..73d717a75cb0 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -69,7 +69,7 @@
#define HIGHMEM_START (_AC(1, UL) << _AC(59, UL))
#endif
-#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
+#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK))
#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK))
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index 624d66c7f290..a323efb720dc 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -51,8 +51,8 @@
* We might not get launched at the address the kernel is linked to,
* so we jump there.
*/
- PTR_LA t0, 0f
- jr t0
+ PTR_LA t0, 0f
+ jr t0
0:
.endm
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 986a3b9b59a7..ebc9377ff876 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -7,7 +7,7 @@
#define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr))
-#define LEVELS_PER_SLICE 128
+#define LEVELS_PER_SLICE 128
struct slice_data {
unsigned long irq_enable_mask[2];
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index b2cf641f206f..defd135e7ac8 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -34,7 +34,7 @@ extern int pcibus_to_node(struct pci_bus *);
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
-#define node_distance(from, to) (__node_distances[(from)][(to)])
+#define node_distance(from, to) (__node_distances[(from)][(to)])
#include <asm-generic/topology.h>
diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
index 50d344ca60a8..65e9c856390d 100644
--- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
@@ -28,7 +28,7 @@
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_dsp 0
#define cpu_has_dsp2 0
-#define cpu_icache_snoops_remote_store 1
+#define cpu_icache_snoops_remote_store 1
#define cpu_has_mipsmt 0
#define cpu_has_userlocal 0
diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h
index 05aabb27e5e7..5edf05d9dad8 100644
--- a/arch/mips/include/asm/mach-ip28/spaces.h
+++ b/arch/mips/include/asm/mach-ip28/spaces.h
@@ -6,7 +6,7 @@
* Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
- * 2004 pf
+ * 2004 pf
*/
#ifndef _ASM_MACH_IP28_SPACES_H
#define _ASM_MACH_IP28_SPACES_H
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index c8fb5aacf50a..073f0c4760ba 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -50,7 +50,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
return pa;
}
-/* This is almost certainly wrong but it's what dma-ip32.c used to use */
+/* This is almost certainly wrong but it's what dma-ip32.c used to use */
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
diff --git a/arch/mips/include/asm/mach-ip32/war.h b/arch/mips/include/asm/mach-ip32/war.h
index 7237a935a133..9807ecda5a88 100644
--- a/arch/mips/include/asm/mach-ip32/war.h
+++ b/arch/mips/include/asm/mach-ip32/war.h
@@ -17,7 +17,7 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 1
+#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-jazz/floppy.h b/arch/mips/include/asm/mach-jazz/floppy.h
index 88b5acb75145..62aa1e287fba 100644
--- a/arch/mips/include/asm/mach-jazz/floppy.h
+++ b/arch/mips/include/asm/mach-jazz/floppy.h
@@ -90,7 +90,7 @@ static inline void fd_disable_irq(void)
static inline int fd_request_irq(void)
{
return request_irq(FLOPPY_IRQ, floppy_interrupt,
- 0, "floppy", NULL);
+ 0, "floppy", NULL);
}
static inline void fd_free_irq(void)
diff --git a/arch/mips/include/asm/mach-jz4740/clock.h b/arch/mips/include/asm/mach-jz4740/clock.h
index 1b7408dd0e23..16659cd76d4e 100644
--- a/arch/mips/include/asm/mach-jz4740/clock.h
+++ b/arch/mips/include/asm/mach-jz4740/clock.h
@@ -2,7 +2,7 @@
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-jz4740/dma.h b/arch/mips/include/asm/mach-jz4740/dma.h
index a3be12183599..98b4e7c0dbae 100644
--- a/arch/mips/include/asm/mach-jz4740/dma.h
+++ b/arch/mips/include/asm/mach-jz4740/dma.h
@@ -3,7 +3,7 @@
* JZ7420/JZ4740 DMA definitions
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -40,9 +40,9 @@ enum jz4740_dma_width {
};
enum jz4740_dma_transfer_size {
- JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
- JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
- JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
+ JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
+ JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
+ JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
};
@@ -87,4 +87,4 @@ uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma);
void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma,
jz4740_dma_complete_callback_t cb);
-#endif /* __ASM_JZ4740_DMA_H__ */
+#endif /* __ASM_JZ4740_DMA_H__ */
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index 1a6482ea0bb3..eaacba79cf18 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -198,7 +198,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask);
#define JZ_GPIO_FUNC_MEM_ADDR14 JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_ADDR15 JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_ADDR16 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_CLS JZ_GPIO_FUNC1
+#define JZ_GPIO_FUNC_LCD_CLS JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_LCD_SPL JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_DCS JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_RAS JZ_GPIO_FUNC1
diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h
index 5ad1a9c113c6..df50736749c1 100644
--- a/arch/mips/include/asm/mach-jz4740/irq.h
+++ b/arch/mips/include/asm/mach-jz4740/irq.h
@@ -3,7 +3,7 @@
* JZ4740 IRQ definitions
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 163e81db880d..72cfebdb5a47 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -3,7 +3,7 @@
* JZ4740 platform device definitions
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-jz4740/timer.h b/arch/mips/include/asm/mach-jz4740/timer.h
index a7759fb1f73d..8750a1d04e22 100644
--- a/arch/mips/include/asm/mach-jz4740/timer.h
+++ b/arch/mips/include/asm/mach-jz4740/timer.h
@@ -3,7 +3,7 @@
* JZ4740 platform timer support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
index fccac3592651..98d6a2f14aaf 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -44,7 +44,7 @@
/* BOOT_SEL - find what boot media we have */
#define BS_FLASH 0x1
-#define BS_SPI 0x4
+#define BS_SPI 0x4
/* global register ranges */
extern __iomem void *ltq_ebu_membase;
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index 5e8a6e965756..f196cceb7322 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -34,6 +34,7 @@ extern spinlock_t ebu_lock;
extern void ltq_disable_irq(struct irq_data *data);
extern void ltq_mask_and_ack_irq(struct irq_data *data);
extern void ltq_enable_irq(struct irq_data *data);
+extern int ltq_eiu_get_irq(int exin);
/* clock handling */
extern int clk_activate(struct clk *clk);
@@ -41,6 +42,7 @@ extern void clk_deactivate(struct clk *clk);
extern struct clk *clk_get_cpu(void);
extern struct clk *clk_get_fpi(void);
extern struct clk *clk_get_io(void);
+extern struct clk *clk_get_ppe(void);
/* find out what bootsource we have */
extern unsigned char ltq_boot_select(void);
diff --git a/arch/mips/include/asm/mach-lantiq/war.h b/arch/mips/include/asm/mach-lantiq/war.h
index b6c568c280ef..358ca979c1bd 100644
--- a/arch/mips/include/asm/mach-lantiq/war.h
+++ b/arch/mips/include/asm/mach-lantiq/war.h
@@ -7,17 +7,17 @@
#ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H
#define __ASM_MIPS_MACH_LANTIQ_WAR_H
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
index 872943a4b90e..5f8693d5ab12 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -21,7 +21,7 @@
#define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */
#define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */
-#define LTQ_DMA_OWN BIT(31) /* owner bit */
+#define LTQ_DMA_OWN BIT(31) /* owner bit */
#define LTQ_DMA_C BIT(30) /* complete bit */
#define LTQ_DMA_SOP BIT(29) /* start of packet */
#define LTQ_DMA_EOP BIT(28) /* end of packet */
@@ -38,7 +38,7 @@ struct ltq_dma_channel {
int nr; /* the channel number */
int irq; /* the mapped irq */
int desc; /* the current descriptor */
- struct ltq_dma_desc *desc_base; /* the descriptor base */
+ struct ltq_dma_desc *desc_base; /* the descriptor base */
int phys; /* physical addr */
};
diff --git a/arch/mips/include/asm/mach-lasat/mach-gt64120.h b/arch/mips/include/asm/mach-lasat/mach-gt64120.h
index 1a9ad45cc135..c253d3fa5167 100644
--- a/arch/mips/include/asm/mach-lasat/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-lasat/mach-gt64120.h
@@ -1,6 +1,6 @@
/*
* This is a direct copy of the ev96100.h file, with a global
- * search and replace. The numbers are the same.
+ * search and replace. The numbers are the same.
*
* The reason I'm duplicating this is so that the 64120/96100
* defines won't be confusing in the source code.
@@ -18,8 +18,8 @@
*
* (Guessing ...)
*/
-#define GT_PCI_MEM_BASE 0x12000000UL
-#define GT_PCI_MEM_SIZE 0x02000000UL
+#define GT_PCI_MEM_BASE 0x12000000UL
+#define GT_PCI_MEM_SIZE 0x02000000UL
#define GT_PCI_IO_BASE 0x10000000UL
#define GT_PCI_IO_SIZE 0x02000000UL
#define GT_ISA_IO_BASE PCI_IO_BASE
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 1a05d854e34c..75fd8c0f986e 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -8,9 +8,9 @@
* Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
*
* reference: /proc/cpuinfo,
- * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
- * arch/mips/kernel/proc.c(show_cpuinfo),
- * loongson2f user manual.
+ * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
+ * arch/mips/kernel/proc.c(show_cpuinfo),
+ * loongson2f user manual.
*/
#ifndef __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H
@@ -37,7 +37,7 @@
#define cpu_has_fpu 1
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_inclusive_pcaches 1
-#define cpu_has_llsc 1
+#define cpu_has_llsc 1
#define cpu_has_mcheck 0
#define cpu_has_mdmx 0
#define cpu_has_mips16 0
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
index 2a8e2bb5d539..a0ee0cb775ad 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
@@ -5,8 +5,8 @@
* Author : jlliu <liujl@lemote.com>
*/
-#ifndef _CS5536_H
-#define _CS5536_H
+#ifndef _CS5536_H
+#define _CS5536_H
#include <linux/types.h>
@@ -16,237 +16,237 @@ extern void _wrmsr(u32 msr, u32 hi, u32 lo);
/*
* MSR module base
*/
-#define CS5536_SB_MSR_BASE (0x00000000)
-#define CS5536_GLIU_MSR_BASE (0x10000000)
-#define CS5536_ILLEGAL_MSR_BASE (0x20000000)
-#define CS5536_USB_MSR_BASE (0x40000000)
-#define CS5536_IDE_MSR_BASE (0x60000000)
-#define CS5536_DIVIL_MSR_BASE (0x80000000)
-#define CS5536_ACC_MSR_BASE (0xa0000000)
-#define CS5536_UNUSED_MSR_BASE (0xc0000000)
-#define CS5536_GLCP_MSR_BASE (0xe0000000)
+#define CS5536_SB_MSR_BASE (0x00000000)
+#define CS5536_GLIU_MSR_BASE (0x10000000)
+#define CS5536_ILLEGAL_MSR_BASE (0x20000000)
+#define CS5536_USB_MSR_BASE (0x40000000)
+#define CS5536_IDE_MSR_BASE (0x60000000)
+#define CS5536_DIVIL_MSR_BASE (0x80000000)
+#define CS5536_ACC_MSR_BASE (0xa0000000)
+#define CS5536_UNUSED_MSR_BASE (0xc0000000)
+#define CS5536_GLCP_MSR_BASE (0xe0000000)
-#define SB_MSR_REG(offset) (CS5536_SB_MSR_BASE | (offset))
-#define GLIU_MSR_REG(offset) (CS5536_GLIU_MSR_BASE | (offset))
-#define ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
-#define USB_MSR_REG(offset) (CS5536_USB_MSR_BASE | (offset))
-#define IDE_MSR_REG(offset) (CS5536_IDE_MSR_BASE | (offset))
-#define DIVIL_MSR_REG(offset) (CS5536_DIVIL_MSR_BASE | (offset))
-#define ACC_MSR_REG(offset) (CS5536_ACC_MSR_BASE | (offset))
-#define UNUSED_MSR_REG(offset) (CS5536_UNUSED_MSR_BASE | (offset))
-#define GLCP_MSR_REG(offset) (CS5536_GLCP_MSR_BASE | (offset))
+#define SB_MSR_REG(offset) (CS5536_SB_MSR_BASE | (offset))
+#define GLIU_MSR_REG(offset) (CS5536_GLIU_MSR_BASE | (offset))
+#define ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
+#define USB_MSR_REG(offset) (CS5536_USB_MSR_BASE | (offset))
+#define IDE_MSR_REG(offset) (CS5536_IDE_MSR_BASE | (offset))
+#define DIVIL_MSR_REG(offset) (CS5536_DIVIL_MSR_BASE | (offset))
+#define ACC_MSR_REG(offset) (CS5536_ACC_MSR_BASE | (offset))
+#define UNUSED_MSR_REG(offset) (CS5536_UNUSED_MSR_BASE | (offset))
+#define GLCP_MSR_REG(offset) (CS5536_GLCP_MSR_BASE | (offset))
/*
* BAR SPACE OF VIRTUAL PCI :
* range for pci probe use, length is the actual size.
*/
/* IO space for all DIVIL modules */
-#define CS5536_IRQ_RANGE 0xffffffe0 /* USERD FOR PCI PROBE */
-#define CS5536_IRQ_LENGTH 0x20 /* THE REGS ACTUAL LENGTH */
-#define CS5536_SMB_RANGE 0xfffffff8
-#define CS5536_SMB_LENGTH 0x08
-#define CS5536_GPIO_RANGE 0xffffff00
-#define CS5536_GPIO_LENGTH 0x100
-#define CS5536_MFGPT_RANGE 0xffffffc0
-#define CS5536_MFGPT_LENGTH 0x40
-#define CS5536_ACPI_RANGE 0xffffffe0
-#define CS5536_ACPI_LENGTH 0x20
-#define CS5536_PMS_RANGE 0xffffff80
-#define CS5536_PMS_LENGTH 0x80
+#define CS5536_IRQ_RANGE 0xffffffe0 /* USERD FOR PCI PROBE */
+#define CS5536_IRQ_LENGTH 0x20 /* THE REGS ACTUAL LENGTH */
+#define CS5536_SMB_RANGE 0xfffffff8
+#define CS5536_SMB_LENGTH 0x08
+#define CS5536_GPIO_RANGE 0xffffff00
+#define CS5536_GPIO_LENGTH 0x100
+#define CS5536_MFGPT_RANGE 0xffffffc0
+#define CS5536_MFGPT_LENGTH 0x40
+#define CS5536_ACPI_RANGE 0xffffffe0
+#define CS5536_ACPI_LENGTH 0x20
+#define CS5536_PMS_RANGE 0xffffff80
+#define CS5536_PMS_LENGTH 0x80
/* IO space for IDE */
-#define CS5536_IDE_RANGE 0xfffffff0
-#define CS5536_IDE_LENGTH 0x10
+#define CS5536_IDE_RANGE 0xfffffff0
+#define CS5536_IDE_LENGTH 0x10
/* IO space for ACC */
-#define CS5536_ACC_RANGE 0xffffff80
-#define CS5536_ACC_LENGTH 0x80
+#define CS5536_ACC_RANGE 0xffffff80
+#define CS5536_ACC_LENGTH 0x80
/* MEM space for ALL USB modules */
-#define CS5536_OHCI_RANGE 0xfffff000
-#define CS5536_OHCI_LENGTH 0x1000
-#define CS5536_EHCI_RANGE 0xfffff000
-#define CS5536_EHCI_LENGTH 0x1000
+#define CS5536_OHCI_RANGE 0xfffff000
+#define CS5536_OHCI_LENGTH 0x1000
+#define CS5536_EHCI_RANGE 0xfffff000
+#define CS5536_EHCI_LENGTH 0x1000
/*
* PCI MSR ACCESS
*/
-#define PCI_MSR_CTRL 0xF0
-#define PCI_MSR_ADDR 0xF4
-#define PCI_MSR_DATA_LO 0xF8
-#define PCI_MSR_DATA_HI 0xFC
+#define PCI_MSR_CTRL 0xF0
+#define PCI_MSR_ADDR 0xF4
+#define PCI_MSR_DATA_LO 0xF8
+#define PCI_MSR_DATA_HI 0xFC
/**************** MSR *****************************/
/*
* GLIU STANDARD MSR
*/
-#define GLIU_CAP 0x00
-#define GLIU_CONFIG 0x01
-#define GLIU_SMI 0x02
-#define GLIU_ERROR 0x03
-#define GLIU_PM 0x04
-#define GLIU_DIAG 0x05
+#define GLIU_CAP 0x00
+#define GLIU_CONFIG 0x01
+#define GLIU_SMI 0x02
+#define GLIU_ERROR 0x03
+#define GLIU_PM 0x04
+#define GLIU_DIAG 0x05
/*
* GLIU SPEC. MSR
*/
-#define GLIU_P2D_BM0 0x20
-#define GLIU_P2D_BM1 0x21
-#define GLIU_P2D_BM2 0x22
-#define GLIU_P2D_BMK0 0x23
-#define GLIU_P2D_BMK1 0x24
-#define GLIU_P2D_BM3 0x25
-#define GLIU_P2D_BM4 0x26
-#define GLIU_COH 0x80
-#define GLIU_PAE 0x81
-#define GLIU_ARB 0x82
-#define GLIU_ASMI 0x83
-#define GLIU_AERR 0x84
-#define GLIU_DEBUG 0x85
-#define GLIU_PHY_CAP 0x86
-#define GLIU_NOUT_RESP 0x87
-#define GLIU_NOUT_WDATA 0x88
-#define GLIU_WHOAMI 0x8B
-#define GLIU_SLV_DIS 0x8C
-#define GLIU_IOD_BM0 0xE0
-#define GLIU_IOD_BM1 0xE1
-#define GLIU_IOD_BM2 0xE2
-#define GLIU_IOD_BM3 0xE3
-#define GLIU_IOD_BM4 0xE4
-#define GLIU_IOD_BM5 0xE5
-#define GLIU_IOD_BM6 0xE6
-#define GLIU_IOD_BM7 0xE7
-#define GLIU_IOD_BM8 0xE8
-#define GLIU_IOD_BM9 0xE9
-#define GLIU_IOD_SC0 0xEA
-#define GLIU_IOD_SC1 0xEB
-#define GLIU_IOD_SC2 0xEC
-#define GLIU_IOD_SC3 0xED
-#define GLIU_IOD_SC4 0xEE
-#define GLIU_IOD_SC5 0xEF
-#define GLIU_IOD_SC6 0xF0
-#define GLIU_IOD_SC7 0xF1
+#define GLIU_P2D_BM0 0x20
+#define GLIU_P2D_BM1 0x21
+#define GLIU_P2D_BM2 0x22
+#define GLIU_P2D_BMK0 0x23
+#define GLIU_P2D_BMK1 0x24
+#define GLIU_P2D_BM3 0x25
+#define GLIU_P2D_BM4 0x26
+#define GLIU_COH 0x80
+#define GLIU_PAE 0x81
+#define GLIU_ARB 0x82
+#define GLIU_ASMI 0x83
+#define GLIU_AERR 0x84
+#define GLIU_DEBUG 0x85
+#define GLIU_PHY_CAP 0x86
+#define GLIU_NOUT_RESP 0x87
+#define GLIU_NOUT_WDATA 0x88
+#define GLIU_WHOAMI 0x8B
+#define GLIU_SLV_DIS 0x8C
+#define GLIU_IOD_BM0 0xE0
+#define GLIU_IOD_BM1 0xE1
+#define GLIU_IOD_BM2 0xE2
+#define GLIU_IOD_BM3 0xE3
+#define GLIU_IOD_BM4 0xE4
+#define GLIU_IOD_BM5 0xE5
+#define GLIU_IOD_BM6 0xE6
+#define GLIU_IOD_BM7 0xE7
+#define GLIU_IOD_BM8 0xE8
+#define GLIU_IOD_BM9 0xE9
+#define GLIU_IOD_SC0 0xEA
+#define GLIU_IOD_SC1 0xEB
+#define GLIU_IOD_SC2 0xEC
+#define GLIU_IOD_SC3 0xED
+#define GLIU_IOD_SC4 0xEE
+#define GLIU_IOD_SC5 0xEF
+#define GLIU_IOD_SC6 0xF0
+#define GLIU_IOD_SC7 0xF1
/*
* SB STANDARD
*/
-#define SB_CAP 0x00
-#define SB_CONFIG 0x01
-#define SB_SMI 0x02
-#define SB_ERROR 0x03
-#define SB_MAR_ERR_EN 0x00000001
-#define SB_TAR_ERR_EN 0x00000002
-#define SB_RSVD_BIT1 0x00000004
-#define SB_EXCEP_ERR_EN 0x00000008
-#define SB_SYSE_ERR_EN 0x00000010
-#define SB_PARE_ERR_EN 0x00000020
-#define SB_TAS_ERR_EN 0x00000040
-#define SB_MAR_ERR_FLAG 0x00010000
-#define SB_TAR_ERR_FLAG 0x00020000
-#define SB_RSVD_BIT2 0x00040000
-#define SB_EXCEP_ERR_FLAG 0x00080000
-#define SB_SYSE_ERR_FLAG 0x00100000
-#define SB_PARE_ERR_FLAG 0x00200000
-#define SB_TAS_ERR_FLAG 0x00400000
-#define SB_PM 0x04
-#define SB_DIAG 0x05
+#define SB_CAP 0x00
+#define SB_CONFIG 0x01
+#define SB_SMI 0x02
+#define SB_ERROR 0x03
+#define SB_MAR_ERR_EN 0x00000001
+#define SB_TAR_ERR_EN 0x00000002
+#define SB_RSVD_BIT1 0x00000004
+#define SB_EXCEP_ERR_EN 0x00000008
+#define SB_SYSE_ERR_EN 0x00000010
+#define SB_PARE_ERR_EN 0x00000020
+#define SB_TAS_ERR_EN 0x00000040
+#define SB_MAR_ERR_FLAG 0x00010000
+#define SB_TAR_ERR_FLAG 0x00020000
+#define SB_RSVD_BIT2 0x00040000
+#define SB_EXCEP_ERR_FLAG 0x00080000
+#define SB_SYSE_ERR_FLAG 0x00100000
+#define SB_PARE_ERR_FLAG 0x00200000
+#define SB_TAS_ERR_FLAG 0x00400000
+#define SB_PM 0x04
+#define SB_DIAG 0x05
/*
* SB SPEC.
*/
-#define SB_CTRL 0x10
-#define SB_R0 0x20
-#define SB_R1 0x21
-#define SB_R2 0x22
-#define SB_R3 0x23
-#define SB_R4 0x24
-#define SB_R5 0x25
-#define SB_R6 0x26
-#define SB_R7 0x27
-#define SB_R8 0x28
-#define SB_R9 0x29
-#define SB_R10 0x2A
-#define SB_R11 0x2B
-#define SB_R12 0x2C
-#define SB_R13 0x2D
-#define SB_R14 0x2E
-#define SB_R15 0x2F
+#define SB_CTRL 0x10
+#define SB_R0 0x20
+#define SB_R1 0x21
+#define SB_R2 0x22
+#define SB_R3 0x23
+#define SB_R4 0x24
+#define SB_R5 0x25
+#define SB_R6 0x26
+#define SB_R7 0x27
+#define SB_R8 0x28
+#define SB_R9 0x29
+#define SB_R10 0x2A
+#define SB_R11 0x2B
+#define SB_R12 0x2C
+#define SB_R13 0x2D
+#define SB_R14 0x2E
+#define SB_R15 0x2F
/*
* GLCP STANDARD
*/
-#define GLCP_CAP 0x00
-#define GLCP_CONFIG 0x01
-#define GLCP_SMI 0x02
-#define GLCP_ERROR 0x03
-#define GLCP_PM 0x04
-#define GLCP_DIAG 0x05
+#define GLCP_CAP 0x00
+#define GLCP_CONFIG 0x01
+#define GLCP_SMI 0x02
+#define GLCP_ERROR 0x03
+#define GLCP_PM 0x04
+#define GLCP_DIAG 0x05
/*
* GLCP SPEC.
*/
-#define GLCP_CLK_DIS_DELAY 0x08
-#define GLCP_PM_CLK_DISABLE 0x09
-#define GLCP_GLB_PM 0x0B
-#define GLCP_DBG_OUT 0x0C
-#define GLCP_RSVD1 0x0D
-#define GLCP_SOFT_COM 0x0E
-#define SOFT_BAR_SMB_FLAG 0x00000001
-#define SOFT_BAR_GPIO_FLAG 0x00000002
-#define SOFT_BAR_MFGPT_FLAG 0x00000004
-#define SOFT_BAR_IRQ_FLAG 0x00000008
-#define SOFT_BAR_PMS_FLAG 0x00000010
-#define SOFT_BAR_ACPI_FLAG 0x00000020
-#define SOFT_BAR_IDE_FLAG 0x00000400
-#define SOFT_BAR_ACC_FLAG 0x00000800
-#define SOFT_BAR_OHCI_FLAG 0x00001000
-#define SOFT_BAR_EHCI_FLAG 0x00002000
-#define GLCP_RSVD2 0x0F
-#define GLCP_CLK_OFF 0x10
-#define GLCP_CLK_ACTIVE 0x11
-#define GLCP_CLK_DISABLE 0x12
-#define GLCP_CLK4ACK 0x13
-#define GLCP_SYS_RST 0x14
-#define GLCP_RSVD3 0x15
-#define GLCP_DBG_CLK_CTRL 0x16
-#define GLCP_CHIP_REV_ID 0x17
+#define GLCP_CLK_DIS_DELAY 0x08
+#define GLCP_PM_CLK_DISABLE 0x09
+#define GLCP_GLB_PM 0x0B
+#define GLCP_DBG_OUT 0x0C
+#define GLCP_RSVD1 0x0D
+#define GLCP_SOFT_COM 0x0E
+#define SOFT_BAR_SMB_FLAG 0x00000001
+#define SOFT_BAR_GPIO_FLAG 0x00000002
+#define SOFT_BAR_MFGPT_FLAG 0x00000004
+#define SOFT_BAR_IRQ_FLAG 0x00000008
+#define SOFT_BAR_PMS_FLAG 0x00000010
+#define SOFT_BAR_ACPI_FLAG 0x00000020
+#define SOFT_BAR_IDE_FLAG 0x00000400
+#define SOFT_BAR_ACC_FLAG 0x00000800
+#define SOFT_BAR_OHCI_FLAG 0x00001000
+#define SOFT_BAR_EHCI_FLAG 0x00002000
+#define GLCP_RSVD2 0x0F
+#define GLCP_CLK_OFF 0x10
+#define GLCP_CLK_ACTIVE 0x11
+#define GLCP_CLK_DISABLE 0x12
+#define GLCP_CLK4ACK 0x13
+#define GLCP_SYS_RST 0x14
+#define GLCP_RSVD3 0x15
+#define GLCP_DBG_CLK_CTRL 0x16
+#define GLCP_CHIP_REV_ID 0x17
/* PIC */
-#define PIC_YSEL_LOW 0x20
-#define PIC_YSEL_LOW_USB_SHIFT 8
-#define PIC_YSEL_LOW_ACC_SHIFT 16
-#define PIC_YSEL_LOW_FLASH_SHIFT 24
-#define PIC_YSEL_HIGH 0x21
-#define PIC_ZSEL_LOW 0x22
-#define PIC_ZSEL_HIGH 0x23
-#define PIC_IRQM_PRIM 0x24
-#define PIC_IRQM_LPC 0x25
-#define PIC_XIRR_STS_LOW 0x26
-#define PIC_XIRR_STS_HIGH 0x27
-#define PCI_SHDW 0x34
+#define PIC_YSEL_LOW 0x20
+#define PIC_YSEL_LOW_USB_SHIFT 8
+#define PIC_YSEL_LOW_ACC_SHIFT 16
+#define PIC_YSEL_LOW_FLASH_SHIFT 24
+#define PIC_YSEL_HIGH 0x21
+#define PIC_ZSEL_LOW 0x22
+#define PIC_ZSEL_HIGH 0x23
+#define PIC_IRQM_PRIM 0x24
+#define PIC_IRQM_LPC 0x25
+#define PIC_XIRR_STS_LOW 0x26
+#define PIC_XIRR_STS_HIGH 0x27
+#define PCI_SHDW 0x34
/*
* DIVIL STANDARD
*/
-#define DIVIL_CAP 0x00
-#define DIVIL_CONFIG 0x01
-#define DIVIL_SMI 0x02
-#define DIVIL_ERROR 0x03
-#define DIVIL_PM 0x04
-#define DIVIL_DIAG 0x05
+#define DIVIL_CAP 0x00
+#define DIVIL_CONFIG 0x01
+#define DIVIL_SMI 0x02
+#define DIVIL_ERROR 0x03
+#define DIVIL_PM 0x04
+#define DIVIL_DIAG 0x05
/*
* DIVIL SPEC.
*/
-#define DIVIL_LBAR_IRQ 0x08
-#define DIVIL_LBAR_KEL 0x09
-#define DIVIL_LBAR_SMB 0x0B
-#define DIVIL_LBAR_GPIO 0x0C
-#define DIVIL_LBAR_MFGPT 0x0D
-#define DIVIL_LBAR_ACPI 0x0E
-#define DIVIL_LBAR_PMS 0x0F
-#define DIVIL_LEG_IO 0x14
-#define DIVIL_BALL_OPTS 0x15
-#define DIVIL_SOFT_IRQ 0x16
-#define DIVIL_SOFT_RESET 0x17
+#define DIVIL_LBAR_IRQ 0x08
+#define DIVIL_LBAR_KEL 0x09
+#define DIVIL_LBAR_SMB 0x0B
+#define DIVIL_LBAR_GPIO 0x0C
+#define DIVIL_LBAR_MFGPT 0x0D
+#define DIVIL_LBAR_ACPI 0x0E
+#define DIVIL_LBAR_PMS 0x0F
+#define DIVIL_LEG_IO 0x14
+#define DIVIL_BALL_OPTS 0x15
+#define DIVIL_SOFT_IRQ 0x16
+#define DIVIL_SOFT_RESET 0x17
/* MFGPT */
#define MFGPT_IRQ 0x28
@@ -254,52 +254,52 @@ extern void _wrmsr(u32 msr, u32 hi, u32 lo);
/*
* IDE STANDARD
*/
-#define IDE_CAP 0x00
-#define IDE_CONFIG 0x01
-#define IDE_SMI 0x02
-#define IDE_ERROR 0x03
-#define IDE_PM 0x04
-#define IDE_DIAG 0x05
+#define IDE_CAP 0x00
+#define IDE_CONFIG 0x01
+#define IDE_SMI 0x02
+#define IDE_ERROR 0x03
+#define IDE_PM 0x04
+#define IDE_DIAG 0x05
/*
* IDE SPEC.
*/
-#define IDE_IO_BAR 0x08
-#define IDE_CFG 0x10
-#define IDE_DTC 0x12
-#define IDE_CAST 0x13
-#define IDE_ETC 0x14
-#define IDE_INTERNAL_PM 0x15
+#define IDE_IO_BAR 0x08
+#define IDE_CFG 0x10
+#define IDE_DTC 0x12
+#define IDE_CAST 0x13
+#define IDE_ETC 0x14
+#define IDE_INTERNAL_PM 0x15
/*
* ACC STANDARD
*/
-#define ACC_CAP 0x00
-#define ACC_CONFIG 0x01
-#define ACC_SMI 0x02
-#define ACC_ERROR 0x03
-#define ACC_PM 0x04
-#define ACC_DIAG 0x05
+#define ACC_CAP 0x00
+#define ACC_CONFIG 0x01
+#define ACC_SMI 0x02
+#define ACC_ERROR 0x03
+#define ACC_PM 0x04
+#define ACC_DIAG 0x05
/*
* USB STANDARD
*/
-#define USB_CAP 0x00
-#define USB_CONFIG 0x01
-#define USB_SMI 0x02
-#define USB_ERROR 0x03
-#define USB_PM 0x04
-#define USB_DIAG 0x05
+#define USB_CAP 0x00
+#define USB_CONFIG 0x01
+#define USB_SMI 0x02
+#define USB_ERROR 0x03
+#define USB_PM 0x04
+#define USB_DIAG 0x05
/*
* USB SPEC.
*/
-#define USB_OHCI 0x08
-#define USB_EHCI 0x09
+#define USB_OHCI 0x08
+#define USB_EHCI 0x09
/****************** NATIVE ***************************/
/* GPIO : I/O SPACE; REG : 32BITS */
-#define GPIOL_OUT_VAL 0x00
-#define GPIOL_OUT_EN 0x04
+#define GPIOL_OUT_VAL 0x00
+#define GPIOL_OUT_EN 0x04
#endif /* _CS5536_H */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
index 4b493d6772c2..021d0172dad6 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
@@ -25,7 +25,7 @@ static inline void __maybe_unused enable_mfgpt0_counter(void)
#endif
#define MFGPT_TICK_RATE 14318000
-#define COMPARE ((MFGPT_TICK_RATE + HZ/2) / HZ)
+#define COMPARE ((MFGPT_TICK_RATE + HZ/2) / HZ)
#define MFGPT_BASE mfgpt_base
#define MFGPT0_CMP2 (MFGPT_BASE + 2)
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
index 0dca9c89ee7c..8a7ecb4d5c64 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
@@ -8,8 +8,8 @@
* Author : jlliu, liujl@lemote.com
*/
-#ifndef _CS5536_PCI_H
-#define _CS5536_PCI_H
+#ifndef _CS5536_PCI_H
+#define _CS5536_PCI_H
#include <linux/types.h>
#include <linux/pci_regs.h>
@@ -17,20 +17,20 @@
extern void cs5536_pci_conf_write4(int function, int reg, u32 value);
extern u32 cs5536_pci_conf_read4(int function, int reg);
-#define CS5536_ACC_INTR 9
-#define CS5536_IDE_INTR 14
-#define CS5536_USB_INTR 11
-#define CS5536_MFGPT_INTR 5
-#define CS5536_UART1_INTR 4
-#define CS5536_UART2_INTR 3
+#define CS5536_ACC_INTR 9
+#define CS5536_IDE_INTR 14
+#define CS5536_USB_INTR 11
+#define CS5536_MFGPT_INTR 5
+#define CS5536_UART1_INTR 4
+#define CS5536_UART2_INTR 3
/************** PCI BUS DEVICE FUNCTION ***************/
/*
* PCI bus device function
*/
-#define PCI_BUS_CS5536 0
-#define PCI_IDSEL_CS5536 14
+#define PCI_BUS_CS5536 0
+#define PCI_IDSEL_CS5536 14
/********** STANDARD PCI-2.2 EXPANSION ****************/
@@ -45,21 +45,21 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
(((mod_dev_id) << 16) | (sys_vendor_id))
/* VENDOR ID */
-#define CS5536_VENDOR_ID 0x1022
+#define CS5536_VENDOR_ID 0x1022
/* DEVICE ID */
-#define CS5536_ISA_DEVICE_ID 0x2090
-#define CS5536_IDE_DEVICE_ID 0x209a
-#define CS5536_ACC_DEVICE_ID 0x2093
-#define CS5536_OHCI_DEVICE_ID 0x2094
-#define CS5536_EHCI_DEVICE_ID 0x2095
+#define CS5536_ISA_DEVICE_ID 0x2090
+#define CS5536_IDE_DEVICE_ID 0x209a
+#define CS5536_ACC_DEVICE_ID 0x2093
+#define CS5536_OHCI_DEVICE_ID 0x2094
+#define CS5536_EHCI_DEVICE_ID 0x2095
/* CLASS CODE : CLASS SUB-CLASS INTERFACE */
-#define CS5536_ISA_CLASS_CODE 0x060100
+#define CS5536_ISA_CLASS_CODE 0x060100
#define CS5536_IDE_CLASS_CODE 0x010180
-#define CS5536_ACC_CLASS_CODE 0x040100
-#define CS5536_OHCI_CLASS_CODE 0x0C0310
-#define CS5536_EHCI_CLASS_CODE 0x0C0320
+#define CS5536_ACC_CLASS_CODE 0x040100
+#define CS5536_OHCI_CLASS_CODE 0x0C0310
+#define CS5536_EHCI_CLASS_CODE 0x0C0320
/* BHLC : BIST HEADER-TYPE LATENCY-TIMER CACHE-LINE-SIZE */
@@ -67,40 +67,40 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
((PCI_NONE_BIST << 24) | ((header_type) << 16) \
| ((latency_timer) << 8) | PCI_NORMAL_CACHE_LINE_SIZE);
-#define PCI_NONE_BIST 0x00 /* RO not implemented yet. */
-#define PCI_BRIDGE_HEADER_TYPE 0x80 /* RO */
-#define PCI_NORMAL_HEADER_TYPE 0x00
-#define PCI_NORMAL_LATENCY_TIMER 0x00
-#define PCI_NORMAL_CACHE_LINE_SIZE 0x08 /* RW */
+#define PCI_NONE_BIST 0x00 /* RO not implemented yet. */
+#define PCI_BRIDGE_HEADER_TYPE 0x80 /* RO */
+#define PCI_NORMAL_HEADER_TYPE 0x00
+#define PCI_NORMAL_LATENCY_TIMER 0x00
+#define PCI_NORMAL_CACHE_LINE_SIZE 0x08 /* RW */
/* BAR */
-#define PCI_BAR0_REG 0x10
-#define PCI_BAR1_REG 0x14
-#define PCI_BAR2_REG 0x18
-#define PCI_BAR3_REG 0x1c
-#define PCI_BAR4_REG 0x20
-#define PCI_BAR5_REG 0x24
-#define PCI_BAR_COUNT 6
-#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
+#define PCI_BAR0_REG 0x10
+#define PCI_BAR1_REG 0x14
+#define PCI_BAR2_REG 0x18
+#define PCI_BAR3_REG 0x1c
+#define PCI_BAR4_REG 0x20
+#define PCI_BAR5_REG 0x24
+#define PCI_BAR_COUNT 6
+#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
/* CARDBUS CIS POINTER */
-#define PCI_CARDBUS_CIS_POINTER 0x00000000
+#define PCI_CARDBUS_CIS_POINTER 0x00000000
-/* SUBSYSTEM VENDOR ID */
-#define CS5536_SUB_VENDOR_ID CS5536_VENDOR_ID
+/* SUBSYSTEM VENDOR ID */
+#define CS5536_SUB_VENDOR_ID CS5536_VENDOR_ID
/* SUBSYSTEM ID */
-#define CS5536_ISA_SUB_ID CS5536_ISA_DEVICE_ID
-#define CS5536_IDE_SUB_ID CS5536_IDE_DEVICE_ID
-#define CS5536_ACC_SUB_ID CS5536_ACC_DEVICE_ID
-#define CS5536_OHCI_SUB_ID CS5536_OHCI_DEVICE_ID
-#define CS5536_EHCI_SUB_ID CS5536_EHCI_DEVICE_ID
+#define CS5536_ISA_SUB_ID CS5536_ISA_DEVICE_ID
+#define CS5536_IDE_SUB_ID CS5536_IDE_DEVICE_ID
+#define CS5536_ACC_SUB_ID CS5536_ACC_DEVICE_ID
+#define CS5536_OHCI_SUB_ID CS5536_OHCI_DEVICE_ID
+#define CS5536_EHCI_SUB_ID CS5536_EHCI_DEVICE_ID
/* EXPANSION ROM BAR */
-#define PCI_EXPANSION_ROM_BAR 0x00000000
+#define PCI_EXPANSION_ROM_BAR 0x00000000
/* CAPABILITIES POINTER */
-#define PCI_CAPLIST_POINTER 0x00000000
+#define PCI_CAPLIST_POINTER 0x00000000
#define PCI_CAPLIST_USB_POINTER 0x40
/* INTERRUPT */
@@ -108,46 +108,46 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
((PCI_MAX_LATENCY << 24) | (PCI_MIN_GRANT << 16) | \
((pin) << 8) | (mod_intr))
-#define PCI_MAX_LATENCY 0x40
-#define PCI_MIN_GRANT 0x00
-#define PCI_DEFAULT_PIN 0x01
+#define PCI_MAX_LATENCY 0x40
+#define PCI_MIN_GRANT 0x00
+#define PCI_DEFAULT_PIN 0x01
/*********** EXPANSION PCI REG ************************/
/*
* ISA EXPANSION
*/
-#define PCI_UART1_INT_REG 0x50
+#define PCI_UART1_INT_REG 0x50
#define PCI_UART2_INT_REG 0x54
-#define PCI_ISA_FIXUP_REG 0x58
+#define PCI_ISA_FIXUP_REG 0x58
/*
* IDE EXPANSION
*/
-#define PCI_IDE_CFG_REG 0x40
-#define CS5536_IDE_FLASH_SIGNATURE 0xDEADBEEF
-#define PCI_IDE_DTC_REG 0x48
-#define PCI_IDE_CAST_REG 0x4C
-#define PCI_IDE_ETC_REG 0x50
-#define PCI_IDE_PM_REG 0x54
-#define PCI_IDE_INT_REG 0x60
+#define PCI_IDE_CFG_REG 0x40
+#define CS5536_IDE_FLASH_SIGNATURE 0xDEADBEEF
+#define PCI_IDE_DTC_REG 0x48
+#define PCI_IDE_CAST_REG 0x4C
+#define PCI_IDE_ETC_REG 0x50
+#define PCI_IDE_PM_REG 0x54
+#define PCI_IDE_INT_REG 0x60
/*
* ACC EXPANSION
*/
-#define PCI_ACC_INT_REG 0x50
+#define PCI_ACC_INT_REG 0x50
/*
* OHCI EXPANSION : INTTERUPT IS IMPLEMENTED BY THE OHCI
*/
-#define PCI_OHCI_PM_REG 0x40
-#define PCI_OHCI_INT_REG 0x50
+#define PCI_OHCI_PM_REG 0x40
+#define PCI_OHCI_INT_REG 0x50
/*
* EHCI EXPANSION
*/
-#define PCI_EHCI_LEGSMIEN_REG 0x50
-#define PCI_EHCI_LEGSMISTS_REG 0x54
-#define PCI_EHCI_FLADJ_REG 0x60
+#define PCI_EHCI_LEGSMIEN_REG 0x50
+#define PCI_EHCI_LEGSMISTS_REG 0x54
+#define PCI_EHCI_FLADJ_REG 0x60
#endif /* _CS5536_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
index 21c4ecedebe7..1f17c1815ee5 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
@@ -5,8 +5,8 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#ifndef _CS5536_VSM_H
-#define _CS5536_VSM_H
+#ifndef _CS5536_VSM_H
+#define _CS5536_VSM_H
#include <linux/types.h>
diff --git a/arch/mips/include/asm/mach-loongson/gpio.h b/arch/mips/include/asm/mach-loongson/gpio.h
index e30e73d443df..211a7b7138fe 100644
--- a/arch/mips/include/asm/mach-loongson/gpio.h
+++ b/arch/mips/include/asm/mach-loongson/gpio.h
@@ -10,8 +10,8 @@
* (at your option) any later version.
*/
-#ifndef __STLS2F_GPIO_H
-#define __STLS2F_GPIO_H
+#ifndef __STLS2F_GPIO_H
+#define __STLS2F_GPIO_H
#include <asm-generic/gpio.h>
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index 5222a007bc21..b286534fef08 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -52,7 +52,7 @@ extern void mach_irq_dispatch(unsigned int pending);
extern int mach_i8259_irq(void);
/* We need this in some places... */
-#define delay() ({ \
+#define delay() ({ \
int x; \
for (x = 0; x < 100000; x++) \
__asm__ __volatile__(""); \
@@ -82,13 +82,13 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_BOOT_BASE 0x1fc00000
#define LOONGSON_BOOT_SIZE 0x00100000 /* 1M */
-#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
-#define LOONGSON_REG_BASE 0x1fe00000
-#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
-#define LOONGSON_LIO1_BASE 0x1ff00000
-#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO1_BASE 0x1ff00000
+#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
#define LOONGSON_LIO1_TOP (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
#define LOONGSON_PCILO0_BASE 0x10000000
@@ -115,13 +115,13 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PCI_REG(x) LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
#define LOONGSON_PCIDID LOONGSON_PCI_REG(0x00)
#define LOONGSON_PCICMD LOONGSON_PCI_REG(0x04)
-#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
#define LOONGSON_PCILTIMER LOONGSON_PCI_REG(0x0c)
-#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
-#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
-#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
-#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
-#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
#define LOONGSON_PCIEXPRBASE LOONGSON_PCI_REG(0x30)
#define LOONGSON_PCIINT LOONGSON_PCI_REG(0x3c)
@@ -132,7 +132,7 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PCICMD_MABORT_CLR 0x20000000
#define LOONGSON_PCICMD_MTABORT_CLR 0x10000000
#define LOONGSON_PCICMD_TABORT_CLR 0x08000000
-#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
+#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
#define LOONGSON_PCICMD_PERRRESPEN 0x00000040
#define LOONGSON_PCICMD_ASTEPEN 0x00000080
#define LOONGSON_PCICMD_SERREN 0x00000100
@@ -142,7 +142,7 @@ static inline void do_perfcnt_IRQ(void)
/* Loongson h/w Configuration */
#define LOONGSON_GENCFG_OFFSET 0x4
-#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
#define LOONGSON_GENCFG_DEBUGMODE 0x00000001
#define LOONGSON_GENCFG_SNOOPEN 0x00000002
@@ -173,25 +173,25 @@ static inline void do_perfcnt_IRQ(void)
/* GPIO Regs - r/w */
-#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
#define LOONGSON_GPIOIE LOONGSON_REG(LOONGSON_REGBASE + 0x20)
/* ICU Configuration Regs - r/w */
#define LOONGSON_INTEDGE LOONGSON_REG(LOONGSON_REGBASE + 0x24)
-#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
#define LOONGSON_INTPOL LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
/* ICU Enable Regs - IntEn & IntISR are r/o. */
-#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
-#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
#define LOONGSON_INTEN LOONGSON_REG(LOONGSON_REGBASE + 0x38)
#define LOONGSON_INTISR LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
/* ICU */
#define LOONGSON_ICU_MBOXES 0x0000000f
-#define LOONGSON_ICU_MBOXES_SHIFT 0
+#define LOONGSON_ICU_MBOXES_SHIFT 0
#define LOONGSON_ICU_DMARDY 0x00000010
#define LOONGSON_ICU_DMAEMPTY 0x00000020
#define LOONGSON_ICU_COPYRDY 0x00000040
@@ -212,10 +212,10 @@ static inline void do_perfcnt_IRQ(void)
/* PCI prefetch window base & mask */
-#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
-#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
-#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
-#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
+#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
/* PCI_Hit*_Sel_* */
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 43213388c174..3810d5ca84ac 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson/mem.h b/arch/mips/include/asm/mach-loongson/mem.h
index 3b23ee8647d6..f4a36d7dbfab 100644
--- a/arch/mips/include/asm/mach-loongson/mem.h
+++ b/arch/mips/include/asm/mach-loongson/mem.h
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/irq.h b/arch/mips/include/asm/mach-loongson1/irq.h
index da96ed42f733..96bfb1c1c73d 100644
--- a/arch/mips/include/asm/mach-loongson1/irq.h
+++ b/arch/mips/include/asm/mach-loongson1/irq.h
@@ -3,8 +3,8 @@
*
* IRQ mappings for Loongson 1
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/loongson1.h b/arch/mips/include/asm/mach-loongson1/loongson1.h
index 4e18e88cebbf..5c437c2ba6b3 100644
--- a/arch/mips/include/asm/mach-loongson1/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson1/loongson1.h
@@ -3,8 +3,8 @@
*
* Register mappings for Loongson 1
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/platform.h b/arch/mips/include/asm/mach-loongson1/platform.h
index 718a1228a4f3..30c13e508fff 100644
--- a/arch/mips/include/asm/mach-loongson1/platform.h
+++ b/arch/mips/include/asm/mach-loongson1/platform.h
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/prom.h b/arch/mips/include/asm/mach-loongson1/prom.h
index b871dc41b8d9..34859a4d4ac4 100644
--- a/arch/mips/include/asm/mach-loongson1/prom.h
+++ b/arch/mips/include/asm/mach-loongson1/prom.h
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/regs-clk.h b/arch/mips/include/asm/mach-loongson1/regs-clk.h
index a81fa3d0dc91..fb6a3ff9318f 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-clk.h
@@ -3,8 +3,8 @@
*
* Loongson 1 Clock Register Definitions.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/regs-wdt.h b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
index f897de68c527..6574568c2084 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-wdt.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
@@ -3,8 +3,8 @@
*
* Loongson 1 watchdog register definitions.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
index 37e3583a9fdd..de3b66a3723e 100644
--- a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
@@ -23,8 +23,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
@@ -50,8 +50,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
diff --git a/arch/mips/include/asm/mach-malta/irq.h b/arch/mips/include/asm/mach-malta/irq.h
index 9b9da26683c2..47cfe64efbb0 100644
--- a/arch/mips/include/asm/mach-malta/irq.h
+++ b/arch/mips/include/asm/mach-malta/irq.h
@@ -2,7 +2,7 @@
#define __ASM_MACH_MIPS_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-malta/mach-gt64120.h b/arch/mips/include/asm/mach-malta/mach-gt64120.h
index 0f863148f3b6..62a4b2889fa7 100644
--- a/arch/mips/include/asm/mach-malta/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-malta/mach-gt64120.h
@@ -1,6 +1,6 @@
/*
* This is a direct copy of the ev96100.h file, with a global
- * search and replace. The numbers are the same.
+ * search and replace. The numbers are the same.
*
* The reason I'm duplicating this is so that the 64120/96100
* defines won't be confusing in the source code.
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h
index 016fa9446ba9..016fa9446ba9 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h b/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h
index ebdbab973e41..ebdbab973e41 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h
index c84bcf9570b1..ac863e2deb62 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h
@@ -43,14 +43,14 @@
* IRQs directly forwarded to the CPU
*/
#define MSP_MIPS_INTBASE 0
-#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
-#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
-#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
-#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
-#define MSP_INT_USB 4 /* IRQ for USB, C_IRQ2 */
-#define MSP_INT_SAR 5 /* IRQ for ADSL2+ SAR, C_IRQ3 */
-#define MSP_INT_CIC 6 /* IRQ for CIC block, C_IRQ4 */
-#define MSP_INT_SEC 7 /* IRQ for Sec engine, C_IRQ5 */
+#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
+#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
+#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
+#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
+#define MSP_INT_USB 4 /* IRQ for USB, C_IRQ2 */
+#define MSP_INT_SAR 5 /* IRQ for ADSL2+ SAR, C_IRQ3 */
+#define MSP_INT_CIC 6 /* IRQ for CIC block, C_IRQ4 */
+#define MSP_INT_SEC 7 /* IRQ for Sec engine, C_IRQ5 */
/*
* IRQs cascaded on CPU interrupt 4 (CAUSE bit 12, C_IRQ4)
@@ -59,93 +59,93 @@
*/
#define MSP_CIC_INTBASE (MSP_MIPS_INTBASE + 8)
#define MSP_INT_EXT0 (MSP_CIC_INTBASE + 0)
- /* External interrupt 0 */
+ /* External interrupt 0 */
#define MSP_INT_EXT1 (MSP_CIC_INTBASE + 1)
- /* External interrupt 1 */
+ /* External interrupt 1 */
#define MSP_INT_EXT2 (MSP_CIC_INTBASE + 2)
- /* External interrupt 2 */
+ /* External interrupt 2 */
#define MSP_INT_EXT3 (MSP_CIC_INTBASE + 3)
- /* External interrupt 3 */
+ /* External interrupt 3 */
#define MSP_INT_CPUIF (MSP_CIC_INTBASE + 4)
- /* CPU interface interrupt */
+ /* CPU interface interrupt */
#define MSP_INT_EXT4 (MSP_CIC_INTBASE + 5)
- /* External interrupt 4 */
+ /* External interrupt 4 */
#define MSP_INT_CIC_USB (MSP_CIC_INTBASE + 6)
- /* Cascaded IRQ for USB */
+ /* Cascaded IRQ for USB */
#define MSP_INT_MBOX (MSP_CIC_INTBASE + 7)
- /* Sec engine mailbox IRQ */
+ /* Sec engine mailbox IRQ */
#define MSP_INT_EXT5 (MSP_CIC_INTBASE + 8)
- /* External interrupt 5 */
+ /* External interrupt 5 */
#define MSP_INT_TDM (MSP_CIC_INTBASE + 9)
- /* TDM interrupt */
+ /* TDM interrupt */
#define MSP_INT_CIC_MAC0 (MSP_CIC_INTBASE + 10)
- /* Cascaded IRQ for MAC 0 */
+ /* Cascaded IRQ for MAC 0 */
#define MSP_INT_CIC_MAC1 (MSP_CIC_INTBASE + 11)
- /* Cascaded IRQ for MAC 1 */
+ /* Cascaded IRQ for MAC 1 */
#define MSP_INT_CIC_SEC (MSP_CIC_INTBASE + 12)
- /* Cascaded IRQ for sec engine */
-#define MSP_INT_PER (MSP_CIC_INTBASE + 13)
- /* Peripheral interrupt */
-#define MSP_INT_TIMER0 (MSP_CIC_INTBASE + 14)
- /* SLP timer 0 */
-#define MSP_INT_TIMER1 (MSP_CIC_INTBASE + 15)
- /* SLP timer 1 */
-#define MSP_INT_TIMER2 (MSP_CIC_INTBASE + 16)
- /* SLP timer 2 */
-#define MSP_INT_VPE0_TIMER (MSP_CIC_INTBASE + 17)
- /* VPE0 MIPS timer */
+ /* Cascaded IRQ for sec engine */
+#define MSP_INT_PER (MSP_CIC_INTBASE + 13)
+ /* Peripheral interrupt */
+#define MSP_INT_TIMER0 (MSP_CIC_INTBASE + 14)
+ /* SLP timer 0 */
+#define MSP_INT_TIMER1 (MSP_CIC_INTBASE + 15)
+ /* SLP timer 1 */
+#define MSP_INT_TIMER2 (MSP_CIC_INTBASE + 16)
+ /* SLP timer 2 */
+#define MSP_INT_VPE0_TIMER (MSP_CIC_INTBASE + 17)
+ /* VPE0 MIPS timer */
#define MSP_INT_BLKCP (MSP_CIC_INTBASE + 18)
- /* Block Copy */
+ /* Block Copy */
#define MSP_INT_UART0 (MSP_CIC_INTBASE + 19)
- /* UART 0 */
+ /* UART 0 */
#define MSP_INT_PCI (MSP_CIC_INTBASE + 20)
- /* PCI subsystem */
+ /* PCI subsystem */
#define MSP_INT_EXT6 (MSP_CIC_INTBASE + 21)
- /* External interrupt 5 */
+ /* External interrupt 5 */
#define MSP_INT_PCI_MSI (MSP_CIC_INTBASE + 22)
- /* PCI Message Signal */
+ /* PCI Message Signal */
#define MSP_INT_CIC_SAR (MSP_CIC_INTBASE + 23)
- /* Cascaded ADSL2+ SAR IRQ */
+ /* Cascaded ADSL2+ SAR IRQ */
#define MSP_INT_DSL (MSP_CIC_INTBASE + 24)
- /* ADSL2+ IRQ */
+ /* ADSL2+ IRQ */
#define MSP_INT_CIC_ERR (MSP_CIC_INTBASE + 25)
- /* SLP error condition */
+ /* SLP error condition */
#define MSP_INT_VPE1_TIMER (MSP_CIC_INTBASE + 26)
- /* VPE1 MIPS timer */
+ /* VPE1 MIPS timer */
#define MSP_INT_VPE0_PC (MSP_CIC_INTBASE + 27)
- /* VPE0 Performance counter */
+ /* VPE0 Performance counter */
#define MSP_INT_VPE1_PC (MSP_CIC_INTBASE + 28)
- /* VPE1 Performance counter */
+ /* VPE1 Performance counter */
#define MSP_INT_EXT7 (MSP_CIC_INTBASE + 29)
- /* External interrupt 5 */
+ /* External interrupt 5 */
#define MSP_INT_VPE0_SW (MSP_CIC_INTBASE + 30)
- /* VPE0 Software interrupt */
+ /* VPE0 Software interrupt */
#define MSP_INT_VPE1_SW (MSP_CIC_INTBASE + 31)
- /* VPE0 Software interrupt */
+ /* VPE0 Software interrupt */
/*
* IRQs cascaded on CIC PER interrupt (MSP_INT_PER)
*/
#define MSP_PER_INTBASE (MSP_CIC_INTBASE + 32)
-/* Reserved 0-1 */
+/* Reserved 0-1 */
#define MSP_INT_UART1 (MSP_PER_INTBASE + 2)
- /* UART 1 */
-/* Reserved 3-5 */
+ /* UART 1 */
+/* Reserved 3-5 */
#define MSP_INT_2WIRE (MSP_PER_INTBASE + 6)
- /* 2-wire */
+ /* 2-wire */
#define MSP_INT_TM0 (MSP_PER_INTBASE + 7)
/* Peripheral timer block out 0 */
#define MSP_INT_TM1 (MSP_PER_INTBASE + 8)
/* Peripheral timer block out 1 */
-/* Reserved 9 */
+/* Reserved 9 */
#define MSP_INT_SPRX (MSP_PER_INTBASE + 10)
- /* SPI RX complete */
+ /* SPI RX complete */
#define MSP_INT_SPTX (MSP_PER_INTBASE + 11)
- /* SPI TX complete */
+ /* SPI TX complete */
#define MSP_INT_GPIO (MSP_PER_INTBASE + 12)
- /* GPIO */
+ /* GPIO */
#define MSP_INT_PER_ERR (MSP_PER_INTBASE + 13)
- /* Peripheral error */
-/* Reserved 14-31 */
+ /* Peripheral error */
+/* Reserved 14-31 */
#endif /* !_MSP_CIC_INT_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h
index 156f320c69e7..daacebb047c2 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h
@@ -54,7 +54,7 @@ enum msp_gpio_mode {
MSP_GPIO_UART_OUTPUT = 0x9, /* Only GPIO 2 or 3 */
MSP_GPIO_PERIF_TIMERA = 0x9, /* Only GPIO 0 or 1 */
MSP_GPIO_PERIF_TIMERB = 0xa, /* Only GPIO 0 or 1 */
- MSP_GPIO_UNKNOWN = 0xb, /* No such GPIO or mode */
+ MSP_GPIO_UNKNOWN = 0xb, /* No such GPIO or mode */
};
/* -- Static Tables -- */
@@ -148,7 +148,7 @@ static unsigned int MSP_GPIO_MODE_ALLOWED[] = {
BASIC_MODE_REG_VALUE(mode, OFFSET_GPIO_NUMBER(gpio))
#define BASIC_MODE_SHIFT(gpio) \
BASIC_MODE_REG_SHIFT(OFFSET_GPIO_NUMBER(gpio))
-#define BASIC_MODE_FROM_REG(data, gpio) \
+#define BASIC_MODE_FROM_REG(data, gpio) \
BASIC_MODE_REG_FROM_REG(data, OFFSET_GPIO_NUMBER(gpio))
/*
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h
index 1d9f05474820..29f8bf79d7a5 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h
@@ -1,7 +1,7 @@
/*
* Defines for the MSP interrupt handlers.
*
- * Copyright (C) 2005, PMC-Sierra, Inc. All rights reserved.
+ * Copyright (C) 2005, PMC-Sierra, Inc. All rights reserved.
* Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
*
* ########################################################################
@@ -28,7 +28,7 @@
/*
* The PMC-Sierra MSP product line has at least two different interrupt
* controllers, the SLP register based scheme and the CIC interrupt
- * controller block mechanism. This file distinguishes between them
+ * controller block mechanism. This file distinguishes between them
* so that devices see a uniform interface.
*/
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h
index 415606903617..24948cc42461 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h
@@ -26,7 +26,7 @@
#ifndef _MSP_PCI_H_
#define _MSP_PCI_H_
-#define MSP_HAS_PCI(ID) (((u32)(ID) <= 0x4236) && ((u32)(ID) >= 0x4220))
+#define MSP_HAS_PCI(ID) (((u32)(ID) <= 0x4236) && ((u32)(ID) >= 0x4220))
/*
* It is convenient to program the OATRAN register so that
@@ -96,24 +96,24 @@ enum
config_status_command, /* 1 */
config_class_revision, /* 2 */
config_BIST_header_latency_cache, /* 3 */
- config_BAR0, /* 4 */
- config_BAR1, /* 5 */
- config_BAR2, /* 6 */
- config_not_used7, /* 7 */
- config_not_used8, /* 8 */
- config_not_used9, /* 9 */
- config_CIS, /* 10 */
- config_subsystem, /* 11 */
- config_not_used12, /* 12 */
+ config_BAR0, /* 4 */
+ config_BAR1, /* 5 */
+ config_BAR2, /* 6 */
+ config_not_used7, /* 7 */
+ config_not_used8, /* 8 */
+ config_not_used9, /* 9 */
+ config_CIS, /* 10 */
+ config_subsystem, /* 11 */
+ config_not_used12, /* 12 */
config_capabilities, /* 13 */
- config_not_used14, /* 14 */
+ config_not_used14, /* 14 */
config_lat_grant_irq, /* 15 */
config_message_control,/* 16 */
config_message_addr, /* 17 */
config_message_data, /* 18 */
- config_VPD_addr, /* 19 */
- config_VPD_data, /* 20 */
- config_maxregs /* 21 - number of registers */
+ config_VPD_addr, /* 19 */
+ config_VPD_data, /* 20 */
+ config_maxregs /* 21 - number of registers */
};
struct msp_pci_regs
@@ -132,15 +132,15 @@ struct msp_pci_regs
pcireg hop_unused_2C; /* +0x2C */
pcireg hop_unused_30; /* +0x30 */
pcireg hop_unused_34; /* +0x34 */
- pcireg if_control; /* +0x38 */
- pcireg oatran; /* +0x3C */
- pcireg reset_ctl; /* +0x40 */
- pcireg config_addr; /* +0x44 */
+ pcireg if_control; /* +0x38 */
+ pcireg oatran; /* +0x3C */
+ pcireg reset_ctl; /* +0x40 */
+ pcireg config_addr; /* +0x44 */
pcireg hop_unused_48; /* +0x48 */
pcireg msg_signaled_int_status; /* +0x4C */
pcireg msg_signaled_int_mask; /* +0x50 */
- pcireg if_status; /* +0x54 */
- pcireg if_mask; /* +0x58 */
+ pcireg if_status; /* +0x54 */
+ pcireg if_mask; /* +0x58 */
pcireg hop_unused_5C; /* +0x5C */
pcireg hop_unused_60; /* +0x60 */
pcireg hop_unused_64; /* +0x64 */
@@ -190,9 +190,9 @@ struct msp_pci_regs
#define BPCI_IFSTATUS_PEI (1<<30) /* Parity error as initiator */
#define BPCI_IFSTATUS_PET (1<<31) /* Parity error as target */
-#define BPCI_RESETCTL_PR (1<<0) /* True if reset asserted */
-#define BPCI_RESETCTL_RT (1<<4) /* Release time */
-#define BPCI_RESETCTL_CT (1<<8) /* Config time */
+#define BPCI_RESETCTL_PR (1<<0) /* True if reset asserted */
+#define BPCI_RESETCTL_RT (1<<4) /* Release time */
+#define BPCI_RESETCTL_CT (1<<8) /* Config time */
#define BPCI_RESETCTL_PE (1<<12) /* PCI enabled */
#define BPCI_RESETCTL_HM (1<<13) /* PCI host mode */
#define BPCI_RESETCTL_RI (1<<14) /* PCI reset in */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h
index 786d82daf8d6..4d3052ab89a2 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h
@@ -40,7 +40,7 @@
(((revision >= 0xb0) && (revision < 0xd0)))
#define FPGA_IS_5000(revision) \
((revision >= 0x80) && (revision <= 0x90))
-#define FPGA_IS_ZEUS(revision) ((revision < 0x7f))
+#define FPGA_IS_ZEUS(revision) ((revision < 0x7f))
#define FPGA_IS_DUET(revision) \
(((revision >= 0xa0) && (revision < 0xb0)))
#define FPGA_IS_MSP4200(revision) ((revision >= 0xd0))
@@ -48,7 +48,7 @@
#define MACHINE_TYPE_POLO "POLO"
#define MACHINE_TYPE_DUET "DUET"
-#define MACHINE_TYPE_ZEUS "ZEUS"
+#define MACHINE_TYPE_ZEUS "ZEUS"
#define MACHINE_TYPE_MSP2000REVB "MSP2000REVB"
#define MACHINE_TYPE_MSP5000 "MSP5000"
#define MACHINE_TYPE_MSP4200 "MSP4200"
@@ -58,7 +58,7 @@
#define MACHINE_TYPE_POLO_FPGA "POLO-FPGA"
#define MACHINE_TYPE_DUET_FPGA "DUET-FPGA"
-#define MACHINE_TYPE_ZEUS_FPGA "ZEUS_FPGA"
+#define MACHINE_TYPE_ZEUS_FPGA "ZEUS_FPGA"
#define MACHINE_TYPE_MSP2000REVB_FPGA "MSP2000REVB-FPGA"
#define MACHINE_TYPE_MSP5000_FPGA "MSP5000-FPGA"
#define MACHINE_TYPE_MSP4200_FPGA "MSP4200-FPGA"
@@ -95,7 +95,7 @@
#define ENET_MII 'M'
#define ENET_RMII 'R'
-#define ENETTXD_FALLING 'F'
+#define ENETTXD_FALLING 'F'
#define ENETTXD_RISING 'R'
#define PCI_HOST 'H'
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 7d41474e5488..2dbc7a8cec1a 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -233,4 +233,4 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
: "=&r" (tmp), "=m" (*address) \
: "0" (tmp), "m" (*address))
-#endif /* __ASM_REGOPS_H__ */
+#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h
index 692c1b658b92..da3a8dea2282 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h
@@ -37,13 +37,13 @@
/*
########################################################################
- # Address space and device base definitions #
+ # Address space and device base definitions #
########################################################################
*/
/*
***************************************************************************
- * System Logic and Peripherals (ELB, UART0, etc) device address space *
+ * System Logic and Peripherals (ELB, UART0, etc) device address space *
***************************************************************************
*/
#define MSP_SLP_BASE 0x1c000000
@@ -53,69 +53,69 @@
#define MSP_RST_SIZE 0x0C /* System reset register space */
#define MSP_WTIMER_BASE (MSP_SLP_BASE + 0x04C)
- /* watchdog timer base */
+ /* watchdog timer base */
#define MSP_ITIMER_BASE (MSP_SLP_BASE + 0x054)
- /* internal timer base */
+ /* internal timer base */
#define MSP_UART0_BASE (MSP_SLP_BASE + 0x100)
- /* UART0 controller base */
+ /* UART0 controller base */
#define MSP_BCPY_CTRL_BASE (MSP_SLP_BASE + 0x120)
- /* Block Copy controller base */
+ /* Block Copy controller base */
#define MSP_BCPY_DESC_BASE (MSP_SLP_BASE + 0x160)
- /* Block Copy descriptor base */
+ /* Block Copy descriptor base */
/*
***************************************************************************
- * PCI address space *
+ * PCI address space *
***************************************************************************
*/
#define MSP_PCI_BASE 0x19000000
/*
***************************************************************************
- * MSbus device address space *
+ * MSbus device address space *
***************************************************************************
*/
#define MSP_MSB_BASE 0x18000000
- /* MSbus address start */
+ /* MSbus address start */
#define MSP_PER_BASE (MSP_MSB_BASE + 0x400000)
- /* Peripheral device registers */
+ /* Peripheral device registers */
#define MSP_MAC0_BASE (MSP_MSB_BASE + 0x600000)
- /* MAC A device registers */
+ /* MAC A device registers */
#define MSP_MAC1_BASE (MSP_MSB_BASE + 0x700000)
- /* MAC B device registers */
+ /* MAC B device registers */
#define MSP_MAC_SIZE 0xE0 /* MAC register space */
#define MSP_SEC_BASE (MSP_MSB_BASE + 0x800000)
- /* Security Engine registers */
+ /* Security Engine registers */
#define MSP_MAC2_BASE (MSP_MSB_BASE + 0x900000)
- /* MAC C device registers */
+ /* MAC C device registers */
#define MSP_ADSL2_BASE (MSP_MSB_BASE + 0xA80000)
- /* ADSL2 device registers */
+ /* ADSL2 device registers */
#define MSP_USB0_BASE (MSP_MSB_BASE + 0xB00000)
- /* USB0 device registers */
+ /* USB0 device registers */
#define MSP_USB1_BASE (MSP_MSB_BASE + 0x300000)
/* USB1 device registers */
#define MSP_CPUIF_BASE (MSP_MSB_BASE + 0xC00000)
- /* CPU interface registers */
+ /* CPU interface registers */
/* Devices within the MSbus peripheral block */
#define MSP_UART1_BASE (MSP_PER_BASE + 0x030)
- /* UART1 controller base */
+ /* UART1 controller base */
#define MSP_SPI_BASE (MSP_PER_BASE + 0x058)
- /* SPI/MPI control registers */
+ /* SPI/MPI control registers */
#define MSP_TWI_BASE (MSP_PER_BASE + 0x090)
- /* Two-wire control registers */
+ /* Two-wire control registers */
#define MSP_PTIMER_BASE (MSP_PER_BASE + 0x0F0)
- /* Programmable timer control */
+ /* Programmable timer control */
/*
***************************************************************************
- * Physical Memory configuration address space *
+ * Physical Memory configuration address space *
***************************************************************************
*/
#define MSP_MEM_CFG_BASE 0x17f00000
-#define MSP_MEM_INDIRECT_CTL_10 0x10
+#define MSP_MEM_INDIRECT_CTL_10 0x10
/*
* Notes:
@@ -130,10 +130,10 @@
* 3) These constants are for physical addresses which means that they
* work correctly with "ioremap" and friends. This means that device
* drivers will need to remap these addresses using ioremap and perhaps
- * the readw/writew macros. Or they could use the regptr() macro
+ * the readw/writew macros. Or they could use the regptr() macro
* defined below, but the readw/writew calls are the correct thing.
* 4) The UARTs have an additional status register offset from the base
- * address. This register isn't used in the standard 8250 driver but
+ * address. This register isn't used in the standard 8250 driver but
* may be used in other software. Consult the hardware datasheet for
* offset details.
* 5) For some unknown reason the security engine (MSP_SEC_BASE) registers
@@ -163,44 +163,44 @@
/*
***************************************************************************
- * System Logic and Peripherals (RESET, ELB, etc) registers *
+ * System Logic and Peripherals (RESET, ELB, etc) registers *
***************************************************************************
*/
/* System Control register definitions */
-#define DEV_ID_REG regptr(MSP_SLP_BASE + 0x00)
- /* Device-ID RO */
-#define FWR_ID_REG regptr(MSP_SLP_BASE + 0x04)
- /* Firmware-ID Register RW */
-#define SYS_ID_REG0 regptr(MSP_SLP_BASE + 0x08)
- /* System-ID Register-0 RW */
-#define SYS_ID_REG1 regptr(MSP_SLP_BASE + 0x0C)
- /* System-ID Register-1 RW */
+#define DEV_ID_REG regptr(MSP_SLP_BASE + 0x00)
+ /* Device-ID RO */
+#define FWR_ID_REG regptr(MSP_SLP_BASE + 0x04)
+ /* Firmware-ID Register RW */
+#define SYS_ID_REG0 regptr(MSP_SLP_BASE + 0x08)
+ /* System-ID Register-0 RW */
+#define SYS_ID_REG1 regptr(MSP_SLP_BASE + 0x0C)
+ /* System-ID Register-1 RW */
/* System Reset register definitions */
-#define RST_STS_REG regptr(MSP_SLP_BASE + 0x10)
- /* System Reset Status RO */
-#define RST_SET_REG regptr(MSP_SLP_BASE + 0x14)
- /* System Set Reset WO */
-#define RST_CLR_REG regptr(MSP_SLP_BASE + 0x18)
- /* System Clear Reset WO */
+#define RST_STS_REG regptr(MSP_SLP_BASE + 0x10)
+ /* System Reset Status RO */
+#define RST_SET_REG regptr(MSP_SLP_BASE + 0x14)
+ /* System Set Reset WO */
+#define RST_CLR_REG regptr(MSP_SLP_BASE + 0x18)
+ /* System Clear Reset WO */
/* System Clock Registers */
#define PCI_SLP_REG regptr(MSP_SLP_BASE + 0x1C)
- /* PCI clock generator RW */
+ /* PCI clock generator RW */
#define URT_SLP_REG regptr(MSP_SLP_BASE + 0x20)
- /* UART clock generator RW */
-/* reserved (MSP_SLP_BASE + 0x24) */
-/* reserved (MSP_SLP_BASE + 0x28) */
+ /* UART clock generator RW */
+/* reserved (MSP_SLP_BASE + 0x24) */
+/* reserved (MSP_SLP_BASE + 0x28) */
#define PLL1_SLP_REG regptr(MSP_SLP_BASE + 0x2C)
- /* PLL1 clock generator RW */
+ /* PLL1 clock generator RW */
#define PLL0_SLP_REG regptr(MSP_SLP_BASE + 0x30)
- /* PLL0 clock generator RW */
+ /* PLL0 clock generator RW */
#define MIPS_SLP_REG regptr(MSP_SLP_BASE + 0x34)
- /* MIPS clock generator RW */
-#define VE_SLP_REG regptr(MSP_SLP_BASE + 0x38)
+ /* MIPS clock generator RW */
+#define VE_SLP_REG regptr(MSP_SLP_BASE + 0x38)
/* Voice Eng clock generator RW */
-/* reserved (MSP_SLP_BASE + 0x3C) */
+/* reserved (MSP_SLP_BASE + 0x3C) */
#define MSB_SLP_REG regptr(MSP_SLP_BASE + 0x40)
/* MS-Bus clock generator RW */
#define SMAC_SLP_REG regptr(MSP_SLP_BASE + 0x44)
@@ -216,108 +216,108 @@
#define SE_MBOX_REG regptr(MSP_SLP_BASE + 0x78)
/* Security Engine mailbox RW */
#define VE_MBOX_REG regptr(MSP_SLP_BASE + 0x7C)
- /* Voice Engine mailbox RW */
+ /* Voice Engine mailbox RW */
/* ELB Controller Registers */
#define CS0_CNFG_REG regptr(MSP_SLP_BASE + 0x80)
- /* ELB CS0 Configuration Reg */
+ /* ELB CS0 Configuration Reg */
#define CS0_ADDR_REG regptr(MSP_SLP_BASE + 0x84)
- /* ELB CS0 Base Address Reg */
+ /* ELB CS0 Base Address Reg */
#define CS0_MASK_REG regptr(MSP_SLP_BASE + 0x88)
- /* ELB CS0 Mask Register */
+ /* ELB CS0 Mask Register */
#define CS0_ACCESS_REG regptr(MSP_SLP_BASE + 0x8C)
- /* ELB CS0 access register */
+ /* ELB CS0 access register */
#define CS1_CNFG_REG regptr(MSP_SLP_BASE + 0x90)
- /* ELB CS1 Configuration Reg */
+ /* ELB CS1 Configuration Reg */
#define CS1_ADDR_REG regptr(MSP_SLP_BASE + 0x94)
- /* ELB CS1 Base Address Reg */
+ /* ELB CS1 Base Address Reg */
#define CS1_MASK_REG regptr(MSP_SLP_BASE + 0x98)
- /* ELB CS1 Mask Register */
+ /* ELB CS1 Mask Register */
#define CS1_ACCESS_REG regptr(MSP_SLP_BASE + 0x9C)
- /* ELB CS1 access register */
+ /* ELB CS1 access register */
#define CS2_CNFG_REG regptr(MSP_SLP_BASE + 0xA0)
- /* ELB CS2 Configuration Reg */
+ /* ELB CS2 Configuration Reg */
#define CS2_ADDR_REG regptr(MSP_SLP_BASE + 0xA4)
- /* ELB CS2 Base Address Reg */
+ /* ELB CS2 Base Address Reg */
#define CS2_MASK_REG regptr(MSP_SLP_BASE + 0xA8)
- /* ELB CS2 Mask Register */
+ /* ELB CS2 Mask Register */
#define CS2_ACCESS_REG regptr(MSP_SLP_BASE + 0xAC)
- /* ELB CS2 access register */
+ /* ELB CS2 access register */
#define CS3_CNFG_REG regptr(MSP_SLP_BASE + 0xB0)
- /* ELB CS3 Configuration Reg */
+ /* ELB CS3 Configuration Reg */
#define CS3_ADDR_REG regptr(MSP_SLP_BASE + 0xB4)
- /* ELB CS3 Base Address Reg */
+ /* ELB CS3 Base Address Reg */
#define CS3_MASK_REG regptr(MSP_SLP_BASE + 0xB8)
- /* ELB CS3 Mask Register */
+ /* ELB CS3 Mask Register */
#define CS3_ACCESS_REG regptr(MSP_SLP_BASE + 0xBC)
- /* ELB CS3 access register */
+ /* ELB CS3 access register */
#define CS4_CNFG_REG regptr(MSP_SLP_BASE + 0xC0)
- /* ELB CS4 Configuration Reg */
+ /* ELB CS4 Configuration Reg */
#define CS4_ADDR_REG regptr(MSP_SLP_BASE + 0xC4)
- /* ELB CS4 Base Address Reg */
+ /* ELB CS4 Base Address Reg */
#define CS4_MASK_REG regptr(MSP_SLP_BASE + 0xC8)
- /* ELB CS4 Mask Register */
+ /* ELB CS4 Mask Register */
#define CS4_ACCESS_REG regptr(MSP_SLP_BASE + 0xCC)
- /* ELB CS4 access register */
+ /* ELB CS4 access register */
#define CS5_CNFG_REG regptr(MSP_SLP_BASE + 0xD0)
- /* ELB CS5 Configuration Reg */
+ /* ELB CS5 Configuration Reg */
#define CS5_ADDR_REG regptr(MSP_SLP_BASE + 0xD4)
- /* ELB CS5 Base Address Reg */
+ /* ELB CS5 Base Address Reg */
#define CS5_MASK_REG regptr(MSP_SLP_BASE + 0xD8)
- /* ELB CS5 Mask Register */
+ /* ELB CS5 Mask Register */
#define CS5_ACCESS_REG regptr(MSP_SLP_BASE + 0xDC)
- /* ELB CS5 access register */
+ /* ELB CS5 access register */
-/* reserved 0xE0 - 0xE8 */
+/* reserved 0xE0 - 0xE8 */
#define ELB_1PC_EN_REG regptr(MSP_SLP_BASE + 0xEC)
- /* ELB single PC card detect */
+ /* ELB single PC card detect */
-/* reserved 0xF0 - 0xF8 */
-#define ELB_CLK_CFG_REG regptr(MSP_SLP_BASE + 0xFC)
- /* SDRAM read/ELB timing Reg */
+/* reserved 0xF0 - 0xF8 */
+#define ELB_CLK_CFG_REG regptr(MSP_SLP_BASE + 0xFC)
+ /* SDRAM read/ELB timing Reg */
/* Extended UART status registers */
#define UART0_STATUS_REG regptr(MSP_UART0_BASE + 0x0c0)
- /* UART Status Register 0 */
+ /* UART Status Register 0 */
#define UART1_STATUS_REG regptr(MSP_UART1_BASE + 0x170)
- /* UART Status Register 1 */
+ /* UART Status Register 1 */
/* Performance monitoring registers */
#define PERF_MON_CTRL_REG regptr(MSP_SLP_BASE + 0x140)
- /* Performance monitor control */
+ /* Performance monitor control */
#define PERF_MON_CLR_REG regptr(MSP_SLP_BASE + 0x144)
- /* Performance monitor clear */
+ /* Performance monitor clear */
#define PERF_MON_CNTH_REG regptr(MSP_SLP_BASE + 0x148)
- /* Perf monitor counter high */
+ /* Perf monitor counter high */
#define PERF_MON_CNTL_REG regptr(MSP_SLP_BASE + 0x14C)
- /* Perf monitor counter low */
+ /* Perf monitor counter low */
/* System control registers */
#define SYS_CTRL_REG regptr(MSP_SLP_BASE + 0x150)
- /* System control register */
+ /* System control register */
#define SYS_ERR1_REG regptr(MSP_SLP_BASE + 0x154)
- /* System Error status 1 */
+ /* System Error status 1 */
#define SYS_ERR2_REG regptr(MSP_SLP_BASE + 0x158)
- /* System Error status 2 */
+ /* System Error status 2 */
#define SYS_INT_CFG_REG regptr(MSP_SLP_BASE + 0x15C)
- /* System Interrupt config */
+ /* System Interrupt config */
/* Voice Engine Memory configuration */
#define VE_MEM_REG regptr(MSP_SLP_BASE + 0x17C)
- /* Voice engine memory config */
+ /* Voice engine memory config */
/* CPU/SLP Error Status registers */
#define CPU_ERR1_REG regptr(MSP_SLP_BASE + 0x180)
- /* CPU/SLP Error status 1 */
+ /* CPU/SLP Error status 1 */
#define CPU_ERR2_REG regptr(MSP_SLP_BASE + 0x184)
- /* CPU/SLP Error status 1 */
+ /* CPU/SLP Error status 1 */
-/* Extended GPIO registers */
+/* Extended GPIO registers */
#define EXTENDED_GPIO1_REG regptr(MSP_SLP_BASE + 0x188)
#define EXTENDED_GPIO2_REG regptr(MSP_SLP_BASE + 0x18c)
#define EXTENDED_GPIO_REG EXTENDED_GPIO1_REG
@@ -325,182 +325,182 @@
/* System Error registers */
#define SLP_ERR_STS_REG regptr(MSP_SLP_BASE + 0x190)
- /* Int status for SLP errors */
+ /* Int status for SLP errors */
#define SLP_ERR_MSK_REG regptr(MSP_SLP_BASE + 0x194)
- /* Int mask for SLP errors */
+ /* Int mask for SLP errors */
#define SLP_ELB_ERST_REG regptr(MSP_SLP_BASE + 0x198)
- /* External ELB reset */
+ /* External ELB reset */
#define SLP_BOOT_STS_REG regptr(MSP_SLP_BASE + 0x19C)
- /* Boot Status */
+ /* Boot Status */
/* Extended ELB addressing */
#define CS0_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1A0)
- /* CS0 Extended address */
+ /* CS0 Extended address */
#define CS1_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1A4)
- /* CS1 Extended address */
+ /* CS1 Extended address */
#define CS2_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1A8)
- /* CS2 Extended address */
+ /* CS2 Extended address */
#define CS3_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1AC)
- /* CS3 Extended address */
-/* reserved 0x1B0 */
+ /* CS3 Extended address */
+/* reserved 0x1B0 */
#define CS5_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1B4)
- /* CS5 Extended address */
+ /* CS5 Extended address */
/* PLL Adjustment registers */
#define PLL_LOCK_REG regptr(MSP_SLP_BASE + 0x200)
- /* PLL0 lock status */
+ /* PLL0 lock status */
#define PLL_ARST_REG regptr(MSP_SLP_BASE + 0x204)
- /* PLL Analog reset status */
+ /* PLL Analog reset status */
#define PLL0_ADJ_REG regptr(MSP_SLP_BASE + 0x208)
- /* PLL0 Adjustment value */
+ /* PLL0 Adjustment value */
#define PLL1_ADJ_REG regptr(MSP_SLP_BASE + 0x20C)
- /* PLL1 Adjustment value */
+ /* PLL1 Adjustment value */
/*
***************************************************************************
- * Peripheral Register definitions *
+ * Peripheral Register definitions *
***************************************************************************
*/
/* Peripheral status */
#define PER_CTRL_REG regptr(MSP_PER_BASE + 0x50)
- /* Peripheral control register */
+ /* Peripheral control register */
#define PER_STS_REG regptr(MSP_PER_BASE + 0x54)
- /* Peripheral status register */
+ /* Peripheral status register */
/* SPI/MPI Registers */
#define SMPI_TX_SZ_REG regptr(MSP_PER_BASE + 0x58)
- /* SPI/MPI Tx Size register */
+ /* SPI/MPI Tx Size register */
#define SMPI_RX_SZ_REG regptr(MSP_PER_BASE + 0x5C)
- /* SPI/MPI Rx Size register */
+ /* SPI/MPI Rx Size register */
#define SMPI_CTL_REG regptr(MSP_PER_BASE + 0x60)
- /* SPI/MPI Control register */
+ /* SPI/MPI Control register */
#define SMPI_MS_REG regptr(MSP_PER_BASE + 0x64)
- /* SPI/MPI Chip Select reg */
+ /* SPI/MPI Chip Select reg */
#define SMPI_CORE_DATA_REG regptr(MSP_PER_BASE + 0xC0)
- /* SPI/MPI Core Data reg */
+ /* SPI/MPI Core Data reg */
#define SMPI_CORE_CTRL_REG regptr(MSP_PER_BASE + 0xC4)
- /* SPI/MPI Core Control reg */
+ /* SPI/MPI Core Control reg */
#define SMPI_CORE_STAT_REG regptr(MSP_PER_BASE + 0xC8)
- /* SPI/MPI Core Status reg */
+ /* SPI/MPI Core Status reg */
#define SMPI_CORE_SSEL_REG regptr(MSP_PER_BASE + 0xCC)
- /* SPI/MPI Core Ssel reg */
+ /* SPI/MPI Core Ssel reg */
#define SMPI_FIFO_REG regptr(MSP_PER_BASE + 0xD0)
- /* SPI/MPI Data FIFO reg */
+ /* SPI/MPI Data FIFO reg */
-/* Peripheral Block Error Registers */
+/* Peripheral Block Error Registers */
#define PER_ERR_STS_REG regptr(MSP_PER_BASE + 0x70)
- /* Error Bit Status Register */
+ /* Error Bit Status Register */
#define PER_ERR_MSK_REG regptr(MSP_PER_BASE + 0x74)
- /* Error Bit Mask Register */
+ /* Error Bit Mask Register */
#define PER_HDR1_REG regptr(MSP_PER_BASE + 0x78)
- /* Error Header 1 Register */
+ /* Error Header 1 Register */
#define PER_HDR2_REG regptr(MSP_PER_BASE + 0x7C)
- /* Error Header 2 Register */
+ /* Error Header 2 Register */
-/* Peripheral Block Interrupt Registers */
+/* Peripheral Block Interrupt Registers */
#define PER_INT_STS_REG regptr(MSP_PER_BASE + 0x80)
- /* Interrupt status register */
+ /* Interrupt status register */
#define PER_INT_MSK_REG regptr(MSP_PER_BASE + 0x84)
- /* Interrupt Mask Register */
+ /* Interrupt Mask Register */
#define GPIO_INT_STS_REG regptr(MSP_PER_BASE + 0x88)
- /* GPIO interrupt status reg */
+ /* GPIO interrupt status reg */
#define GPIO_INT_MSK_REG regptr(MSP_PER_BASE + 0x8C)
- /* GPIO interrupt MASK Reg */
+ /* GPIO interrupt MASK Reg */
-/* POLO GPIO registers */
+/* POLO GPIO registers */
#define POLO_GPIO_DAT1_REG regptr(MSP_PER_BASE + 0x0E0)
- /* Polo GPIO[8:0] data reg */
+ /* Polo GPIO[8:0] data reg */
#define POLO_GPIO_CFG1_REG regptr(MSP_PER_BASE + 0x0E4)
- /* Polo GPIO[7:0] config reg */
+ /* Polo GPIO[7:0] config reg */
#define POLO_GPIO_CFG2_REG regptr(MSP_PER_BASE + 0x0E8)
- /* Polo GPIO[15:8] config reg */
+ /* Polo GPIO[15:8] config reg */
#define POLO_GPIO_OD1_REG regptr(MSP_PER_BASE + 0x0EC)
/* Polo GPIO[31:0] output drive */
#define POLO_GPIO_CFG3_REG regptr(MSP_PER_BASE + 0x170)
- /* Polo GPIO[23:16] config reg */
+ /* Polo GPIO[23:16] config reg */
#define POLO_GPIO_DAT2_REG regptr(MSP_PER_BASE + 0x174)
- /* Polo GPIO[15:9] data reg */
+ /* Polo GPIO[15:9] data reg */
#define POLO_GPIO_DAT3_REG regptr(MSP_PER_BASE + 0x178)
- /* Polo GPIO[23:16] data reg */
+ /* Polo GPIO[23:16] data reg */
#define POLO_GPIO_DAT4_REG regptr(MSP_PER_BASE + 0x17C)
- /* Polo GPIO[31:24] data reg */
+ /* Polo GPIO[31:24] data reg */
#define POLO_GPIO_DAT5_REG regptr(MSP_PER_BASE + 0x180)
- /* Polo GPIO[39:32] data reg */
+ /* Polo GPIO[39:32] data reg */
#define POLO_GPIO_DAT6_REG regptr(MSP_PER_BASE + 0x184)
- /* Polo GPIO[47:40] data reg */
+ /* Polo GPIO[47:40] data reg */
#define POLO_GPIO_DAT7_REG regptr(MSP_PER_BASE + 0x188)
- /* Polo GPIO[54:48] data reg */
+ /* Polo GPIO[54:48] data reg */
#define POLO_GPIO_CFG4_REG regptr(MSP_PER_BASE + 0x18C)
- /* Polo GPIO[31:24] config reg */
+ /* Polo GPIO[31:24] config reg */
#define POLO_GPIO_CFG5_REG regptr(MSP_PER_BASE + 0x190)
- /* Polo GPIO[39:32] config reg */
+ /* Polo GPIO[39:32] config reg */
#define POLO_GPIO_CFG6_REG regptr(MSP_PER_BASE + 0x194)
- /* Polo GPIO[47:40] config reg */
+ /* Polo GPIO[47:40] config reg */
#define POLO_GPIO_CFG7_REG regptr(MSP_PER_BASE + 0x198)
- /* Polo GPIO[54:48] config reg */
+ /* Polo GPIO[54:48] config reg */
#define POLO_GPIO_OD2_REG regptr(MSP_PER_BASE + 0x19C)
/* Polo GPIO[54:32] output drive */
-/* Generic GPIO registers */
+/* Generic GPIO registers */
#define GPIO_DATA1_REG regptr(MSP_PER_BASE + 0x170)
- /* GPIO[1:0] data register */
+ /* GPIO[1:0] data register */
#define GPIO_DATA2_REG regptr(MSP_PER_BASE + 0x174)
- /* GPIO[5:2] data register */
+ /* GPIO[5:2] data register */
#define GPIO_DATA3_REG regptr(MSP_PER_BASE + 0x178)
- /* GPIO[9:6] data register */
+ /* GPIO[9:6] data register */
#define GPIO_DATA4_REG regptr(MSP_PER_BASE + 0x17C)
- /* GPIO[15:10] data register */
+ /* GPIO[15:10] data register */
#define GPIO_CFG1_REG regptr(MSP_PER_BASE + 0x180)
- /* GPIO[1:0] config register */
+ /* GPIO[1:0] config register */
#define GPIO_CFG2_REG regptr(MSP_PER_BASE + 0x184)
- /* GPIO[5:2] config register */
+ /* GPIO[5:2] config register */
#define GPIO_CFG3_REG regptr(MSP_PER_BASE + 0x188)
- /* GPIO[9:6] config register */
+ /* GPIO[9:6] config register */
#define GPIO_CFG4_REG regptr(MSP_PER_BASE + 0x18C)
- /* GPIO[15:10] config register */
+ /* GPIO[15:10] config register */
#define GPIO_OD_REG regptr(MSP_PER_BASE + 0x190)
- /* GPIO[15:0] output drive */
+ /* GPIO[15:0] output drive */
/*
***************************************************************************
- * CPU Interface register definitions *
+ * CPU Interface register definitions *
***************************************************************************
*/
#define PCI_FLUSH_REG regptr(MSP_CPUIF_BASE + 0x00)
/* PCI-SDRAM queue flush trigger */
#define OCP_ERR1_REG regptr(MSP_CPUIF_BASE + 0x04)
- /* OCP Error Attribute 1 */
+ /* OCP Error Attribute 1 */
#define OCP_ERR2_REG regptr(MSP_CPUIF_BASE + 0x08)
- /* OCP Error Attribute 2 */
+ /* OCP Error Attribute 2 */
#define OCP_STS_REG regptr(MSP_CPUIF_BASE + 0x0C)
- /* OCP Error Status */
+ /* OCP Error Status */
#define CPUIF_PM_REG regptr(MSP_CPUIF_BASE + 0x10)
- /* CPU policy configuration */
+ /* CPU policy configuration */
#define CPUIF_CFG_REG regptr(MSP_CPUIF_BASE + 0x10)
- /* Misc configuration options */
+ /* Misc configuration options */
/* Central Interrupt Controller Registers */
#define MSP_CIC_BASE (MSP_CPUIF_BASE + 0x8000)
- /* Central Interrupt registers */
+ /* Central Interrupt registers */
#define CIC_EXT_CFG_REG regptr(MSP_CIC_BASE + 0x00)
- /* External interrupt config */
+ /* External interrupt config */
#define CIC_STS_REG regptr(MSP_CIC_BASE + 0x04)
- /* CIC Interrupt Status */
+ /* CIC Interrupt Status */
#define CIC_VPE0_MSK_REG regptr(MSP_CIC_BASE + 0x08)
- /* VPE0 Interrupt Mask */
+ /* VPE0 Interrupt Mask */
#define CIC_VPE1_MSK_REG regptr(MSP_CIC_BASE + 0x0C)
- /* VPE1 Interrupt Mask */
+ /* VPE1 Interrupt Mask */
#define CIC_TC0_MSK_REG regptr(MSP_CIC_BASE + 0x10)
- /* Thread Context 0 Int Mask */
+ /* Thread Context 0 Int Mask */
#define CIC_TC1_MSK_REG regptr(MSP_CIC_BASE + 0x14)
- /* Thread Context 1 Int Mask */
+ /* Thread Context 1 Int Mask */
#define CIC_TC2_MSK_REG regptr(MSP_CIC_BASE + 0x18)
- /* Thread Context 2 Int Mask */
+ /* Thread Context 2 Int Mask */
#define CIC_TC3_MSK_REG regptr(MSP_CIC_BASE + 0x18)
- /* Thread Context 3 Int Mask */
+ /* Thread Context 3 Int Mask */
#define CIC_TC4_MSK_REG regptr(MSP_CIC_BASE + 0x18)
- /* Thread Context 4 Int Mask */
+ /* Thread Context 4 Int Mask */
#define CIC_PCIMSI_STS_REG regptr(MSP_CIC_BASE + 0x18)
#define CIC_PCIMSI_MSK_REG regptr(MSP_CIC_BASE + 0x18)
#define CIC_PCIFLSH_REG regptr(MSP_CIC_BASE + 0x18)
@@ -509,7 +509,7 @@
/*
***************************************************************************
- * Memory controller registers *
+ * Memory controller registers *
***************************************************************************
*/
#define MEM_CFG1_REG regptr(MSP_MEM_CFG_BASE + 0x00)
@@ -519,7 +519,7 @@
/*
***************************************************************************
- * PCI controller registers *
+ * PCI controller registers *
***************************************************************************
*/
#define PCI_BASE_REG regptr(MSP_PCI_BASE + 0x00)
@@ -528,25 +528,25 @@
/*
########################################################################
- # Register content & macro definitions #
+ # Register content & macro definitions #
########################################################################
*/
/*
***************************************************************************
- * DEV_ID defines *
+ * DEV_ID defines *
***************************************************************************
*/
-#define DEV_ID_PCI_DIS (1 << 26) /* Set if PCI disabled */
-#define DEV_ID_PCI_HOST (1 << 20) /* Set if PCI host */
-#define DEV_ID_SINGLE_PC (1 << 19) /* Set if single PC Card */
-#define DEV_ID_FAMILY (0xff << 8) /* family ID code */
-#define POLO_ZEUS_SUB_FAMILY (0x7 << 16) /* sub family for Polo/Zeus */
+#define DEV_ID_PCI_DIS (1 << 26) /* Set if PCI disabled */
+#define DEV_ID_PCI_HOST (1 << 20) /* Set if PCI host */
+#define DEV_ID_SINGLE_PC (1 << 19) /* Set if single PC Card */
+#define DEV_ID_FAMILY (0xff << 8) /* family ID code */
+#define POLO_ZEUS_SUB_FAMILY (0x7 << 16) /* sub family for Polo/Zeus */
-#define MSPFPGA_ID (0x00 << 8) /* you are on your own here */
+#define MSPFPGA_ID (0x00 << 8) /* you are on your own here */
#define MSP5000_ID (0x50 << 8)
-#define MSP4F00_ID (0x4f << 8) /* FPGA version of MSP4200 */
-#define MSP4E00_ID (0x4f << 8) /* FPGA version of MSP7120 */
+#define MSP4F00_ID (0x4f << 8) /* FPGA version of MSP4200 */
+#define MSP4E00_ID (0x4f << 8) /* FPGA version of MSP7120 */
#define MSP4200_ID (0x42 << 8)
#define MSP4000_ID (0x40 << 8)
#define MSP2XXX_ID (0x20 << 8)
@@ -563,27 +563,27 @@
/*
***************************************************************************
- * RESET defines *
+ * RESET defines *
***************************************************************************
*/
-#define MSP_GR_RST (0x01 << 0) /* Global reset bit */
-#define MSP_MR_RST (0x01 << 1) /* MIPS reset bit */
-#define MSP_PD_RST (0x01 << 2) /* PVC DMA reset bit */
-#define MSP_PP_RST (0x01 << 3) /* PVC reset bit */
-/* reserved */
-#define MSP_EA_RST (0x01 << 6) /* Mac A reset bit */
-#define MSP_EB_RST (0x01 << 7) /* Mac B reset bit */
-#define MSP_SE_RST (0x01 << 8) /* Security Eng reset bit */
-#define MSP_PB_RST (0x01 << 9) /* Per block reset bit */
-#define MSP_EC_RST (0x01 << 10) /* Mac C reset bit */
-#define MSP_TW_RST (0x01 << 11) /* TWI reset bit */
-#define MSP_SPI_RST (0x01 << 12) /* SPI/MPI reset bit */
-#define MSP_U1_RST (0x01 << 13) /* UART1 reset bit */
-#define MSP_U0_RST (0x01 << 14) /* UART0 reset bit */
+#define MSP_GR_RST (0x01 << 0) /* Global reset bit */
+#define MSP_MR_RST (0x01 << 1) /* MIPS reset bit */
+#define MSP_PD_RST (0x01 << 2) /* PVC DMA reset bit */
+#define MSP_PP_RST (0x01 << 3) /* PVC reset bit */
+/* reserved */
+#define MSP_EA_RST (0x01 << 6) /* Mac A reset bit */
+#define MSP_EB_RST (0x01 << 7) /* Mac B reset bit */
+#define MSP_SE_RST (0x01 << 8) /* Security Eng reset bit */
+#define MSP_PB_RST (0x01 << 9) /* Per block reset bit */
+#define MSP_EC_RST (0x01 << 10) /* Mac C reset bit */
+#define MSP_TW_RST (0x01 << 11) /* TWI reset bit */
+#define MSP_SPI_RST (0x01 << 12) /* SPI/MPI reset bit */
+#define MSP_U1_RST (0x01 << 13) /* UART1 reset bit */
+#define MSP_U0_RST (0x01 << 14) /* UART0 reset bit */
/*
***************************************************************************
- * UART defines *
+ * UART defines *
***************************************************************************
*/
#define MSP_BASE_BAUD 25000000
@@ -591,15 +591,15 @@
/*
***************************************************************************
- * ELB defines *
+ * ELB defines *
***************************************************************************
*/
-#define PCCARD_32 0x02 /* Set if is PCCARD 32 (Cardbus) */
-#define SINGLE_PCCARD 0x01 /* Set to enable single PC card */
+#define PCCARD_32 0x02 /* Set if is PCCARD 32 (Cardbus) */
+#define SINGLE_PCCARD 0x01 /* Set to enable single PC card */
/*
***************************************************************************
- * CIC defines *
+ * CIC defines *
***************************************************************************
*/
@@ -625,7 +625,7 @@
/*
***************************************************************************
- * Memory Controller defines *
+ * Memory Controller defines *
***************************************************************************
*/
@@ -644,17 +644,17 @@
/*
***************************************************************************
- * SPI/MPI Mode *
+ * SPI/MPI Mode *
***************************************************************************
*/
#define SPI_MPI_RX_BUSY 0x00008000 /* SPI/MPI Receive Busy */
-#define SPI_MPI_FIFO_EMPTY 0x00004000 /* SPI/MPI Fifo Empty */
+#define SPI_MPI_FIFO_EMPTY 0x00004000 /* SPI/MPI Fifo Empty */
#define SPI_MPI_TX_BUSY 0x00002000 /* SPI/MPI Transmit Busy */
-#define SPI_MPI_FIFO_FULL 0x00001000 /* SPI/MPU FIFO full */
+#define SPI_MPI_FIFO_FULL 0x00001000 /* SPI/MPU FIFO full */
/*
***************************************************************************
- * SPI/MPI Control Register *
+ * SPI/MPI Control Register *
***************************************************************************
*/
#define SPI_MPI_RX_START 0x00000004 /* Start receive command */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h
index 96d4c8ce8c83..51a66dcc429d 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h
@@ -27,9 +27,9 @@
/*
* The PMC-Sierra SLP interrupts are arranged in a 3 level cascaded
- * hierarchical system. The first level are the direct MIPS interrupts
+ * hierarchical system. The first level are the direct MIPS interrupts
* and are assigned the interrupt range 0-7. The second level is the SLM
- * interrupt controller and is assigned the range 8-39. The third level
+ * interrupt controller and is assigned the range 8-39. The third level
* comprises the Peripherial block, the PCI block, the PCI MSI block and
* the SLP. The PCI interrupts and the SLP errors are handled by the
* relevant subsystems so the core interrupt code needs only concern
@@ -41,11 +41,11 @@
* IRQs directly connected to CPU
*/
#define MSP_MIPS_INTBASE 0
-#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
-#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
-#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
-#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
-#define MSP_INT_C_IRQ2 4 /* Wired off, C_IRQ2 */
+#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
+#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
+#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
+#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
+#define MSP_INT_C_IRQ2 4 /* Wired off, C_IRQ2 */
#define MSP_INT_VE 5 /* IRQ for Voice Engine, C_IRQ3 */
#define MSP_INT_SLP 6 /* IRQ for SLM block, C_IRQ4 */
#define MSP_INT_TIMER 7 /* IRQ for the MIPS timer, C_IRQ5 */
@@ -57,85 +57,85 @@
*/
#define MSP_SLP_INTBASE (MSP_MIPS_INTBASE + 8)
#define MSP_INT_EXT0 (MSP_SLP_INTBASE + 0)
- /* External interrupt 0 */
+ /* External interrupt 0 */
#define MSP_INT_EXT1 (MSP_SLP_INTBASE + 1)
- /* External interrupt 1 */
+ /* External interrupt 1 */
#define MSP_INT_EXT2 (MSP_SLP_INTBASE + 2)
- /* External interrupt 2 */
+ /* External interrupt 2 */
#define MSP_INT_EXT3 (MSP_SLP_INTBASE + 3)
- /* External interrupt 3 */
-/* Reserved 4-7 */
+ /* External interrupt 3 */
+/* Reserved 4-7 */
/*
*************************************************************************
* DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER *
- * Some MSP produces have this interrupt labelled as Voice and some are *
- * SEC mbox ... *
+ * Some MSP produces have this interrupt labelled as Voice and some are *
+ * SEC mbox ... *
*************************************************************************
*/
#define MSP_INT_SLP_VE (MSP_SLP_INTBASE + 8)
/* Cascaded IRQ for Voice Engine*/
#define MSP_INT_SLP_TDM (MSP_SLP_INTBASE + 9)
- /* TDM interrupt */
+ /* TDM interrupt */
#define MSP_INT_SLP_MAC0 (MSP_SLP_INTBASE + 10)
- /* Cascaded IRQ for MAC 0 */
+ /* Cascaded IRQ for MAC 0 */
#define MSP_INT_SLP_MAC1 (MSP_SLP_INTBASE + 11)
- /* Cascaded IRQ for MAC 1 */
+ /* Cascaded IRQ for MAC 1 */
#define MSP_INT_SEC (MSP_SLP_INTBASE + 12)
- /* IRQ for security engine */
-#define MSP_INT_PER (MSP_SLP_INTBASE + 13)
- /* Peripheral interrupt */
-#define MSP_INT_TIMER0 (MSP_SLP_INTBASE + 14)
- /* SLP timer 0 */
-#define MSP_INT_TIMER1 (MSP_SLP_INTBASE + 15)
- /* SLP timer 1 */
-#define MSP_INT_TIMER2 (MSP_SLP_INTBASE + 16)
- /* SLP timer 2 */
-#define MSP_INT_SLP_TIMER (MSP_SLP_INTBASE + 17)
- /* Cascaded MIPS timer */
+ /* IRQ for security engine */
+#define MSP_INT_PER (MSP_SLP_INTBASE + 13)
+ /* Peripheral interrupt */
+#define MSP_INT_TIMER0 (MSP_SLP_INTBASE + 14)
+ /* SLP timer 0 */
+#define MSP_INT_TIMER1 (MSP_SLP_INTBASE + 15)
+ /* SLP timer 1 */
+#define MSP_INT_TIMER2 (MSP_SLP_INTBASE + 16)
+ /* SLP timer 2 */
+#define MSP_INT_SLP_TIMER (MSP_SLP_INTBASE + 17)
+ /* Cascaded MIPS timer */
#define MSP_INT_BLKCP (MSP_SLP_INTBASE + 18)
- /* Block Copy */
+ /* Block Copy */
#define MSP_INT_UART0 (MSP_SLP_INTBASE + 19)
- /* UART 0 */
+ /* UART 0 */
#define MSP_INT_PCI (MSP_SLP_INTBASE + 20)
- /* PCI subsystem */
+ /* PCI subsystem */
#define MSP_INT_PCI_DBELL (MSP_SLP_INTBASE + 21)
- /* PCI doorbell */
+ /* PCI doorbell */
#define MSP_INT_PCI_MSI (MSP_SLP_INTBASE + 22)
- /* PCI Message Signal */
+ /* PCI Message Signal */
#define MSP_INT_PCI_BC0 (MSP_SLP_INTBASE + 23)
- /* PCI Block Copy 0 */
+ /* PCI Block Copy 0 */
#define MSP_INT_PCI_BC1 (MSP_SLP_INTBASE + 24)
- /* PCI Block Copy 1 */
+ /* PCI Block Copy 1 */
#define MSP_INT_SLP_ERR (MSP_SLP_INTBASE + 25)
- /* SLP error condition */
+ /* SLP error condition */
#define MSP_INT_MAC2 (MSP_SLP_INTBASE + 26)
- /* IRQ for MAC2 */
-/* Reserved 26-31 */
+ /* IRQ for MAC2 */
+/* Reserved 26-31 */
/*
* IRQs cascaded on SLP PER interrupt (MSP_INT_PER)
*/
#define MSP_PER_INTBASE (MSP_SLP_INTBASE + 32)
-/* Reserved 0-1 */
+/* Reserved 0-1 */
#define MSP_INT_UART1 (MSP_PER_INTBASE + 2)
- /* UART 1 */
-/* Reserved 3-5 */
+ /* UART 1 */
+/* Reserved 3-5 */
#define MSP_INT_2WIRE (MSP_PER_INTBASE + 6)
- /* 2-wire */
+ /* 2-wire */
#define MSP_INT_TM0 (MSP_PER_INTBASE + 7)
/* Peripheral timer block out 0 */
#define MSP_INT_TM1 (MSP_PER_INTBASE + 8)
/* Peripheral timer block out 1 */
-/* Reserved 9 */
+/* Reserved 9 */
#define MSP_INT_SPRX (MSP_PER_INTBASE + 10)
- /* SPI RX complete */
+ /* SPI RX complete */
#define MSP_INT_SPTX (MSP_PER_INTBASE + 11)
- /* SPI TX complete */
+ /* SPI TX complete */
#define MSP_INT_GPIO (MSP_PER_INTBASE + 12)
- /* GPIO */
+ /* GPIO */
#define MSP_INT_PER_ERR (MSP_PER_INTBASE + 13)
- /* Peripheral error */
-/* Reserved 14-31 */
+ /* Peripheral error */
+/* Reserved 14-31 */
#endif /* !_MSP_SLP_INT_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
index 4c9348df9df2..aa45e6a07126 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
@@ -40,7 +40,7 @@
#define MSP_USB0_HS_END (MSP_USB0_BASE + 0x401FF)
/* Register spaces for USB host 1 */
-#define MSP_USB1_MAB_START (MSP_USB1_BASE + 0x0)
+#define MSP_USB1_MAB_START (MSP_USB1_BASE + 0x0)
#define MSP_USB1_MAB_END (MSP_USB1_BASE + 0x17)
#define MSP_USB1_ID_START (MSP_USB1_BASE + 0x40000)
#define MSP_USB1_ID_END (MSP_USB1_BASE + 0x4008f)
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h b/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
index c74eb1657f5f..a60bf9dd14ae 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
@@ -21,9 +21,9 @@
#define R10000_LLSC_WAR 0
#if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \
defined(CONFIG_PMC_MSP7120_FPGA)
-#define MIPS34K_MISSED_ITLB_WAR 1
+#define MIPS34K_MISSED_ITLB_WAR 1
#else
-#define MIPS34K_MISSED_ITLB_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
#endif
#endif /* __ASM_MIPS_PMC_SIERRA_WAR_H */
diff --git a/arch/mips/include/asm/mach-pnx833x/irq-mapping.h b/arch/mips/include/asm/mach-pnx833x/irq-mapping.h
index 6d70264557b2..daa85ce03ef6 100644
--- a/arch/mips/include/asm/mach-pnx833x/irq-mapping.h
+++ b/arch/mips/include/asm/mach-pnx833x/irq-mapping.h
@@ -42,15 +42,15 @@
#define PNX833X_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7)
/* Interrupts supported by PIC */
-#define PNX833X_PIC_I2C0_INT (PNX833X_PIC_IRQ_BASE + 1)
-#define PNX833X_PIC_I2C1_INT (PNX833X_PIC_IRQ_BASE + 2)
-#define PNX833X_PIC_UART0_INT (PNX833X_PIC_IRQ_BASE + 3)
-#define PNX833X_PIC_UART1_INT (PNX833X_PIC_IRQ_BASE + 4)
-#define PNX833X_PIC_TS_IN0_DV_INT (PNX833X_PIC_IRQ_BASE + 5)
-#define PNX833X_PIC_TS_IN0_DMA_INT (PNX833X_PIC_IRQ_BASE + 6)
-#define PNX833X_PIC_GPIO_INT (PNX833X_PIC_IRQ_BASE + 7)
-#define PNX833X_PIC_AUDIO_DEC_INT (PNX833X_PIC_IRQ_BASE + 8)
-#define PNX833X_PIC_VIDEO_DEC_INT (PNX833X_PIC_IRQ_BASE + 9)
+#define PNX833X_PIC_I2C0_INT (PNX833X_PIC_IRQ_BASE + 1)
+#define PNX833X_PIC_I2C1_INT (PNX833X_PIC_IRQ_BASE + 2)
+#define PNX833X_PIC_UART0_INT (PNX833X_PIC_IRQ_BASE + 3)
+#define PNX833X_PIC_UART1_INT (PNX833X_PIC_IRQ_BASE + 4)
+#define PNX833X_PIC_TS_IN0_DV_INT (PNX833X_PIC_IRQ_BASE + 5)
+#define PNX833X_PIC_TS_IN0_DMA_INT (PNX833X_PIC_IRQ_BASE + 6)
+#define PNX833X_PIC_GPIO_INT (PNX833X_PIC_IRQ_BASE + 7)
+#define PNX833X_PIC_AUDIO_DEC_INT (PNX833X_PIC_IRQ_BASE + 8)
+#define PNX833X_PIC_VIDEO_DEC_INT (PNX833X_PIC_IRQ_BASE + 9)
#define PNX833X_PIC_CONFIG_INT (PNX833X_PIC_IRQ_BASE + 10)
#define PNX833X_PIC_AOI_INT (PNX833X_PIC_IRQ_BASE + 11)
#define PNX833X_PIC_SYNC_INT (PNX833X_PIC_IRQ_BASE + 12)
diff --git a/arch/mips/include/asm/mach-pnx833x/pnx833x.h b/arch/mips/include/asm/mach-pnx833x/pnx833x.h
index 100f52870e3c..e6fc3a9d594a 100644
--- a/arch/mips/include/asm/mach-pnx833x/pnx833x.h
+++ b/arch/mips/include/asm/mach-pnx833x/pnx833x.h
@@ -73,7 +73,7 @@
#define PNX833X_RESET_CONTROL PNX833X_REG(0x8004)
-#define PNX833X_RESET_CONTROL_2 PNX833X_REG(0x8014)
+#define PNX833X_RESET_CONTROL_2 PNX833X_REG(0x8014)
#define PNX833X_PIC_REG(offs) PNX833X_REG(0x01000 + (offs))
#define PNX833X_PIC_INT_PRIORITY PNX833X_PIC_REG(0x0)
@@ -82,10 +82,10 @@
#define PNX833X_PIC_INT_SRC_INT_SRC_SHIFT 3
#define PNX833X_PIC_INT_REG(irq) PNX833X_PIC_REG(0x10 + 4*(irq))
-#define PNX833X_CLOCK_CPUCP_CTL PNX833X_REG(0x9228)
+#define PNX833X_CLOCK_CPUCP_CTL PNX833X_REG(0x9228)
#define PNX833X_CLOCK_CPUCP_CTL_EXIT_RESET 0x00000002ul /* bit 1 */
#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_MASK 0x00000018ul /* bits 4:3 */
-#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_SHIFT 3
+#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_SHIFT 3
#define PNX8335_CLOCK_PLL_CPU_CTL PNX833X_REG(0x9020)
#define PNX8335_CLOCK_PLL_CPU_CTL_FREQ_MASK 0x1f
@@ -149,7 +149,7 @@
#define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_MASK (1 << 14)
#define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_SHIFT 14
-#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_MASK (1 << 7)
+#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_MASK (1 << 7)
#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_SHIFT 7
#define PNX833X_MIU_SEL0_BURST_PAGE_LEN_MASK (0xF << 9)
@@ -160,10 +160,10 @@
#define PNX833X_MIU_CONFIG_SPI_OPCODE_MASK (0xFF << 3)
#define PNX833X_MIU_CONFIG_SPI_OPCODE_SHIFT 3
-#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_MASK (1 << 2)
+#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_MASK (1 << 2)
#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_SHIFT 2
-#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_MASK (1 << 1)
+#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_MASK (1 << 1)
#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_SHIFT 1
#define PNX833X_MIU_CONFIG_SPI_SYNC_MASK (1 << 0)
diff --git a/arch/mips/include/asm/mach-pnx8550/cm.h b/arch/mips/include/asm/mach-pnx8550/cm.h
deleted file mode 100644
index bb0a56c7d011..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/cm.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Clock module specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_CM_H
-#define __PNX8550_CM_H
-
-#define PNX8550_CM_BASE 0xBBE47000
-
-#define PNX8550_CM_PLL0_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x000)
-#define PNX8550_CM_PLL1_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x004)
-#define PNX8550_CM_PLL2_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x008)
-#define PNX8550_CM_PLL3_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x00C)
-
-// Table not complete.....
-
-#define PNX8550_CM_PLL_BLOCKED_MASK 0x80000000
-#define PNX8550_CM_PLL_LOCK_MASK 0x40000000
-#define PNX8550_CM_PLL_CURRENT_ADJ_MASK 0x3c000000
-#define PNX8550_CM_PLL_N_MASK 0x01ff0000
-#define PNX8550_CM_PLL_M_MASK 0x00003f00
-#define PNX8550_CM_PLL_P_MASK 0x0000000c
-#define PNX8550_CM_PLL_PD_MASK 0x00000002
-
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/glb.h b/arch/mips/include/asm/mach-pnx8550/glb.h
deleted file mode 100644
index 07aa85e609bc..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/glb.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * PNX8550 global definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_GLB_H
-#define __PNX8550_GLB_H
-
-#define PNX8550_GLB1_BASE 0xBBE63000
-#define PNX8550_GLB2_BASE 0xBBE4d000
-#define PNX8550_RESET_BASE 0xBBE60000
-
-/* PCI Inta Output Enable Registers */
-#define PNX8550_GLB2_ENAB_INTA_O *(volatile unsigned long *)(PNX8550_GLB2_BASE + 0x050)
-
-/* Bit 1:Enable DAC Powerdown
- 0:DACs are enabled and are working normally
- 1:DACs are powerdown
-*/
-#define PNX8550_GLB_DAC_PD 0x2
-/* Bit 0:Enable of PCI inta output
- 0 = Disable PCI inta output
- 1 = Enable PCI inta output
-*/
-#define PNX8550_GLB_ENABLE_INTA_O 0x1
-
-/* PCI Direct Mappings */
-#define PNX8550_PCIMEM 0x12000000
-#define PNX8550_PCIMEM_SIZE 0x08000000
-#define PNX8550_PCIIO 0x1c000000
-#define PNX8550_PCIIO_SIZE 0x02000000 /* 32M */
-
-#define PNX8550_PORT_BASE KSEG1
-
-// GPIO def
-#define PNX8550_GPIO_BASE 0x1Be00000
-
-#define PNX8550_GPIO_DIRQ0 (PNX8550_GPIO_BASE + 0x104500)
-#define PNX8550_GPIO_MC1 (PNX8550_GPIO_BASE + 0x104004)
-#define PNX8550_GPIO_MC_31_BIT 30
-#define PNX8550_GPIO_MC_30_BIT 28
-#define PNX8550_GPIO_MC_29_BIT 26
-#define PNX8550_GPIO_MC_28_BIT 24
-#define PNX8550_GPIO_MC_27_BIT 22
-#define PNX8550_GPIO_MC_26_BIT 20
-#define PNX8550_GPIO_MC_25_BIT 18
-#define PNX8550_GPIO_MC_24_BIT 16
-#define PNX8550_GPIO_MC_23_BIT 14
-#define PNX8550_GPIO_MC_22_BIT 12
-#define PNX8550_GPIO_MC_21_BIT 10
-#define PNX8550_GPIO_MC_20_BIT 8
-#define PNX8550_GPIO_MC_19_BIT 6
-#define PNX8550_GPIO_MC_18_BIT 4
-#define PNX8550_GPIO_MC_17_BIT 2
-#define PNX8550_GPIO_MC_16_BIT 0
-
-#define PNX8550_GPIO_MODE_PRIMOP 0x1
-#define PNX8550_GPIO_MODE_NO_OPENDR 0x2
-#define PNX8550_GPIO_MODE_OPENDR 0x3
-
-// RESET module
-#define PNX8550_RST_CTL *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x0)
-#define PNX8550_RST_CAUSE *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x4)
-#define PNX8550_RST_EN_WATCHDOG *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x8)
-
-#define PNX8550_RST_REL_MIPS_RST_N 0x8
-#define PNX8550_RST_DO_SW_RST 0x4
-#define PNX8550_RST_REL_SYS_RST_OUT 0x2
-#define PNX8550_RST_ASSERT_SYS_RST_OUT 0x1
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/int.h b/arch/mips/include/asm/mach-pnx8550/int.h
deleted file mode 100644
index 0e0668b524f4..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/int.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Interrupt specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_INT_H
-#define __PNX8550_INT_H
-
-#define PNX8550_GIC_BASE 0xBBE3E000
-
-#define PNX8550_GIC_PRIMASK_0 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x000)
-#define PNX8550_GIC_PRIMASK_1 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x004)
-#define PNX8550_GIC_VECTOR_0 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x100)
-#define PNX8550_GIC_VECTOR_1 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x104)
-#define PNX8550_GIC_PEND_1_31 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x200)
-#define PNX8550_GIC_PEND_32_63 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x204)
-#define PNX8550_GIC_PEND_64_70 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x208)
-#define PNX8550_GIC_FEATURES *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x300)
-#define PNX8550_GIC_REQ(x) *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x400 + (x)*4)
-#define PNX8550_GIC_MOD_ID *(volatile unsigned long *)(PNX8550_GIC_BASE + 0xFFC)
-
-// cp0 is two software + six hw exceptions
-#define PNX8550_INT_CP0_TOTINT 8
-#define PNX8550_INT_CP0_MIN 0
-#define PNX8550_INT_CP0_MAX (PNX8550_INT_CP0_MIN + PNX8550_INT_CP0_TOTINT - 1)
-
-#define MIPS_CPU_GIC_IRQ 2
-#define MIPS_CPU_TIMER_IRQ 7
-
-// GIC are 71 exceptions connected to cp0's first hardware exception
-#define PNX8550_INT_GIC_TOTINT 71
-#define PNX8550_INT_GIC_MIN (PNX8550_INT_CP0_MAX+1)
-#define PNX8550_INT_GIC_MAX (PNX8550_INT_GIC_MIN + PNX8550_INT_GIC_TOTINT - 1)
-
-#define PNX8550_INT_UNDEF (PNX8550_INT_GIC_MIN+0)
-#define PNX8550_INT_IPC_TARGET0_MIPS (PNX8550_INT_GIC_MIN+1)
-#define PNX8550_INT_IPC_TARGET1_TM32_1 (PNX8550_INT_GIC_MIN+2)
-#define PNX8550_INT_IPC_TARGET1_TM32_2 (PNX8550_INT_GIC_MIN+3)
-#define PNX8550_INT_RESERVED_4 (PNX8550_INT_GIC_MIN+4)
-#define PNX8550_INT_USB (PNX8550_INT_GIC_MIN+5)
-#define PNX8550_INT_GPIO_EQ1 (PNX8550_INT_GIC_MIN+6)
-#define PNX8550_INT_GPIO_EQ2 (PNX8550_INT_GIC_MIN+7)
-#define PNX8550_INT_GPIO_EQ3 (PNX8550_INT_GIC_MIN+8)
-#define PNX8550_INT_GPIO_EQ4 (PNX8550_INT_GIC_MIN+9)
-
-#define PNX8550_INT_GPIO_EQ5 (PNX8550_INT_GIC_MIN+10)
-#define PNX8550_INT_GPIO_EQ6 (PNX8550_INT_GIC_MIN+11)
-#define PNX8550_INT_RESERVED_12 (PNX8550_INT_GIC_MIN+12)
-#define PNX8550_INT_QVCP1 (PNX8550_INT_GIC_MIN+13)
-#define PNX8550_INT_QVCP2 (PNX8550_INT_GIC_MIN+14)
-#define PNX8550_INT_I2C1 (PNX8550_INT_GIC_MIN+15)
-#define PNX8550_INT_I2C2 (PNX8550_INT_GIC_MIN+16)
-#define PNX8550_INT_ISO_UART1 (PNX8550_INT_GIC_MIN+17)
-#define PNX8550_INT_ISO_UART2 (PNX8550_INT_GIC_MIN+18)
-#define PNX8550_INT_UART1 (PNX8550_INT_GIC_MIN+19)
-
-#define PNX8550_INT_UART2 (PNX8550_INT_GIC_MIN+20)
-#define PNX8550_INT_QNTR (PNX8550_INT_GIC_MIN+21)
-#define PNX8550_INT_RESERVED22 (PNX8550_INT_GIC_MIN+22)
-#define PNX8550_INT_T_DSC (PNX8550_INT_GIC_MIN+23)
-#define PNX8550_INT_M_DSC (PNX8550_INT_GIC_MIN+24)
-#define PNX8550_INT_RESERVED25 (PNX8550_INT_GIC_MIN+25)
-#define PNX8550_INT_2D_DRAW_ENG (PNX8550_INT_GIC_MIN+26)
-#define PNX8550_INT_MEM_BASED_SCALAR1 (PNX8550_INT_GIC_MIN+27)
-#define PNX8550_INT_VIDEO_MPEG (PNX8550_INT_GIC_MIN+28)
-#define PNX8550_INT_VIDEO_INPUT_P1 (PNX8550_INT_GIC_MIN+29)
-
-#define PNX8550_INT_VIDEO_INPUT_P2 (PNX8550_INT_GIC_MIN+30)
-#define PNX8550_INT_SPDI1 (PNX8550_INT_GIC_MIN+31)
-#define PNX8550_INT_SPDO (PNX8550_INT_GIC_MIN+32)
-#define PNX8550_INT_AUDIO_INPUT1 (PNX8550_INT_GIC_MIN+33)
-#define PNX8550_INT_AUDIO_OUTPUT1 (PNX8550_INT_GIC_MIN+34)
-#define PNX8550_INT_AUDIO_INPUT2 (PNX8550_INT_GIC_MIN+35)
-#define PNX8550_INT_AUDIO_OUTPUT2 (PNX8550_INT_GIC_MIN+36)
-#define PNX8550_INT_MEMBASED_SCALAR2 (PNX8550_INT_GIC_MIN+37)
-#define PNX8550_INT_VPK (PNX8550_INT_GIC_MIN+38)
-#define PNX8550_INT_MPEG1_MIPS (PNX8550_INT_GIC_MIN+39)
-
-#define PNX8550_INT_MPEG1_TM (PNX8550_INT_GIC_MIN+40)
-#define PNX8550_INT_MPEG2_MIPS (PNX8550_INT_GIC_MIN+41)
-#define PNX8550_INT_MPEG2_TM (PNX8550_INT_GIC_MIN+42)
-#define PNX8550_INT_TS_DMA (PNX8550_INT_GIC_MIN+43)
-#define PNX8550_INT_EDMA (PNX8550_INT_GIC_MIN+44)
-#define PNX8550_INT_TM_DEBUG1 (PNX8550_INT_GIC_MIN+45)
-#define PNX8550_INT_TM_DEBUG2 (PNX8550_INT_GIC_MIN+46)
-#define PNX8550_INT_PCI_INTA (PNX8550_INT_GIC_MIN+47)
-#define PNX8550_INT_CLOCK_MODULE (PNX8550_INT_GIC_MIN+48)
-#define PNX8550_INT_PCI_XIO_INTA_PCI (PNX8550_INT_GIC_MIN+49)
-
-#define PNX8550_INT_PCI_XIO_INTB_DMA (PNX8550_INT_GIC_MIN+50)
-#define PNX8550_INT_PCI_XIO_INTC_GPPM (PNX8550_INT_GIC_MIN+51)
-#define PNX8550_INT_PCI_XIO_INTD_GPXIO (PNX8550_INT_GIC_MIN+52)
-#define PNX8550_INT_DVD_CSS (PNX8550_INT_GIC_MIN+53)
-#define PNX8550_INT_VLD (PNX8550_INT_GIC_MIN+54)
-#define PNX8550_INT_GPIO_TSU_7_0 (PNX8550_INT_GIC_MIN+55)
-#define PNX8550_INT_GPIO_TSU_15_8 (PNX8550_INT_GIC_MIN+56)
-#define PNX8550_INT_GPIO_CTU_IR (PNX8550_INT_GIC_MIN+57)
-#define PNX8550_INT_GPIO0 (PNX8550_INT_GIC_MIN+58)
-#define PNX8550_INT_GPIO1 (PNX8550_INT_GIC_MIN+59)
-
-#define PNX8550_INT_GPIO2 (PNX8550_INT_GIC_MIN+60)
-#define PNX8550_INT_GPIO3 (PNX8550_INT_GIC_MIN+61)
-#define PNX8550_INT_GPIO4 (PNX8550_INT_GIC_MIN+62)
-#define PNX8550_INT_GPIO5 (PNX8550_INT_GIC_MIN+63)
-#define PNX8550_INT_GPIO6 (PNX8550_INT_GIC_MIN+64)
-#define PNX8550_INT_GPIO7 (PNX8550_INT_GIC_MIN+65)
-#define PNX8550_INT_PMAN_SECURITY (PNX8550_INT_GIC_MIN+66)
-#define PNX8550_INT_I2C3 (PNX8550_INT_GIC_MIN+67)
-#define PNX8550_INT_RESERVED_68 (PNX8550_INT_GIC_MIN+68)
-#define PNX8550_INT_SPDI2 (PNX8550_INT_GIC_MIN+69)
-
-#define PNX8550_INT_I2C4 (PNX8550_INT_GIC_MIN+70)
-
-// Timer are 3 exceptions connected to cp0's 7th hardware exception
-#define PNX8550_INT_TIMER_TOTINT 3
-#define PNX8550_INT_TIMER_MIN (PNX8550_INT_GIC_MAX+1)
-#define PNX8550_INT_TIMER_MAX (PNX8550_INT_TIMER_MIN + PNX8550_INT_TIMER_TOTINT - 1)
-
-#define PNX8550_INT_TIMER1 (PNX8550_INT_TIMER_MIN+0)
-#define PNX8550_INT_TIMER2 (PNX8550_INT_TIMER_MIN+1)
-#define PNX8550_INT_TIMER3 (PNX8550_INT_TIMER_MIN+2)
-#define PNX8550_INT_WATCHDOG PNX8550_INT_TIMER3
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h b/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h
deleted file mode 100644
index bdde00c9199b..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005 Embedded Alley Solutions, Inc
- */
-#ifndef __ASM_MACH_KERNEL_ENTRY_INIT_H
-#define __ASM_MACH_KERNEL_ENTRY_INIT_H
-
-#include <asm/cacheops.h>
-#include <asm/addrspace.h>
-
-#define CO_CONFIGPR_VALID 0x3F1F41FF /* valid bits to write to ConfigPR */
-#define HAZARD_CP0 nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;
-#define CACHE_OPC 0xBC000000 /* MIPS cache instruction opcode */
-#define ICACHE_LINE_SIZE 32 /* Instruction cache line size bytes */
-#define DCACHE_LINE_SIZE 32 /* Data cache line size in bytes */
-
-#define ICACHE_SET_COUNT 256 /* Instruction cache set count */
-#define DCACHE_SET_COUNT 128 /* Data cache set count */
-
-#define ICACHE_SET_SIZE (ICACHE_SET_COUNT * ICACHE_LINE_SIZE)
-#define DCACHE_SET_SIZE (DCACHE_SET_COUNT * DCACHE_LINE_SIZE)
-
- .macro kernel_entry_setup
- .set push
- .set noreorder
- /*
- * PNX8550 entry point, when running a non compressed
- * kernel. When loading a zImage, the head.S code in
- * arch/mips/zboot/pnx8550 will init the caches and,
- * decompress the kernel, and branch to kernel_entry.
- */
-cache_begin: li t0, (1<<28)
- mtc0 t0, CP0_STATUS /* cp0 usable */
- HAZARD_CP0
-
- mtc0 zero, CP0_CAUSE
- HAZARD_CP0
-
-
- /* Set static virtual to phys address translation and TLB disabled */
- mfc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-
- and t0, ~((1<<19) | (1<<20)) /* TLB/MAP cleared */
- mtc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-
- /* CPU boots with kseg0 cache algo set to 0x2 -- uncached */
-
- init_icache
- nop
- init_dcache
- nop
-
- cachePr4450ICReset
- nop
-
- cachePr4450DCReset
- nop
-
- /* read ConfigPR into t0 */
- mfc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-
- /* enable the TLB */
- or t0, (1<<19)
-
- /* disable the ICACHE: at least 10x slower */
- /* or t0, (1<<26) */
-
- /* disable the DCACHE; CONFIG_CPU_HAS_LLSC should not be set */
- /* or t0, (1<<27) */
-
- and t0, CO_CONFIGPR_VALID
-
- /* enable TLB. */
- mtc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-cache_end:
- /* Setup CMEM_0 to MMIO address space, 2MB */
- lui t0, 0x1BE0
- addi t0, t0, 0x3
- mtc0 $8, $22, 4
- nop
-
- /* Setup CMEM_1, 128MB */
- lui t0, 0x1000
- addi t0, t0, 0xf
- mtc0 $8, $22, 5
- nop
-
-
- /* Setup CMEM_2, 32MB */
- lui t0, 0x1C00
- addi t0, t0, 0xb
- mtc0 $8, $22, 6
- nop
-
- /* Setup CMEM_3, 0MB */
- lui t0, 0x0
- addi t0, t0, 0x0
- mtc0 $8, $22, 7
- nop
-
- /* Enable cache */
- mfc0 t0, CP0_CONFIG
- HAZARD_CP0
- and t0, t0, 0xFFFFFFF8
- or t0, t0, 3
- mtc0 t0, CP0_CONFIG
- HAZARD_CP0
- .set pop
- .endm
-
- .macro init_icache
- .set push
- .set noreorder
-
- /* Get Cache Configuration */
- mfc0 t3, CP0_CONFIG, 1
- HAZARD_CP0
-
- /* get cache Line size */
-
- srl t1, t3, 19 /* C0_CONFIGPR_IL_SHIFT */
- andi t1, t1, 0x7 /* C0_CONFIGPR_IL_MASK */
- beq t1, zero, pr4450_instr_cache_invalidated /* if zero instruction cache is absent */
- nop
- addiu t0, t1, 1
- ori t1, zero, 1
- sllv t1, t1, t0
-
- /* get max cache Index */
- srl t2, t3, 22 /* C0_CONFIGPR_IS_SHIFT */
- andi t2, t2, 0x7 /* C0_CONFIGPR_IS_MASK */
- addiu t0, t2, 6
- ori t2, zero, 1
- sllv t2, t2, t0
-
- /* get max cache way */
- srl t3, t3, 16 /* C0_CONFIGPR_IA_SHIFT */
- andi t3, t3, 0x7 /* C0_CONFIGPR_IA_MASK */
- addiu t3, t3, 1
-
- /* total no of cache lines */
- multu t2, t3 /* max index * max way */
- mflo t2
- addiu t2, t2, -1
-
- move t0, zero
-pr4450_next_instruction_cache_set:
- cache Index_Invalidate_I, 0(t0)
- addu t0, t0, t1 /* add bytes in a line */
- bne t2, zero, pr4450_next_instruction_cache_set
- addiu t2, t2, -1 /* reduce no of lines to invalidate by one */
-pr4450_instr_cache_invalidated:
- .set pop
- .endm
-
- .macro init_dcache
- .set push
- .set noreorder
- move t1, zero
-
- /* Store Tag Information */
- mtc0 zero, CP0_TAGLO, 0
- HAZARD_CP0
-
- mtc0 zero, CP0_TAGHI, 0
- HAZARD_CP0
-
- /* Cache size is 16384 = 512 lines x 32 bytes per line */
- or t2, zero, (128*4)-1 /* 512 lines */
- /* Invalidate all lines */
-2:
- cache Index_Store_Tag_D, 0(t1)
- addiu t2, t2, -1
- bne t2, zero, 2b
- addiu t1, t1, 32 /* 32 bytes in a line */
- .set pop
- .endm
-
- .macro cachePr4450ICReset
- .set push
- .set noreorder
-
- /* Save CP0 status reg on entry; */
- /* disable interrupts during cache reset */
- mfc0 t0, CP0_STATUS /* T0 = interrupt status on entry */
- HAZARD_CP0
-
- mtc0 zero, CP0_STATUS /* disable CPU interrupts */
- HAZARD_CP0
-
- or t1, zero, zero /* T1 = starting cache index (0) */
- ori t2, zero, (256 - 1) /* T2 = inst cache set cnt - 1 */
-
- icache_invd_loop:
- /* 9 == register t1 */
- .word CACHE_OPC | (9 << 21) | (Index_Invalidate_I << 16) | \
- (0 * ICACHE_SET_SIZE) /* invalidate inst cache WAY0 */
- .word CACHE_OPC | (9 << 21) | (Index_Invalidate_I << 16) | \
- (1 * ICACHE_SET_SIZE) /* invalidate inst cache WAY1 */
-
- addiu t1, t1, ICACHE_LINE_SIZE /* T1 = next cache line index */
- bne t2, zero, icache_invd_loop /* T2 = 0 if all sets invalidated */
- addiu t2, t2, -1 /* decrement T2 set cnt (delay slot) */
-
- /* Initialize the latches in the instruction cache tag */
- /* that drive the way selection tri-state bus drivers, by doing a */
- /* dummy load while the instruction cache is still disabled. */
- /* TODO: Is this needed ? */
- la t1, KSEG0 /* T1 = cached memory base address */
- lw zero, 0x0000(t1) /* (dummy read of first memory word) */
-
- mtc0 t0, CP0_STATUS /* restore interrupt status on entry */
- HAZARD_CP0
- .set pop
- .endm
-
- .macro cachePr4450DCReset
- .set push
- .set noreorder
- mfc0 t0, CP0_STATUS /* T0 = interrupt status on entry */
- HAZARD_CP0
- mtc0 zero, CP0_STATUS /* disable CPU interrupts */
- HAZARD_CP0
-
- /* Writeback/invalidate entire data cache sets/ways/lines */
- or t1, zero, zero /* T1 = starting cache index (0) */
- ori t2, zero, (DCACHE_SET_COUNT - 1) /* T2 = data cache set cnt - 1 */
-
- dcache_wbinvd_loop:
- /* 9 == register t1 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (0 * DCACHE_SET_SIZE) /* writeback/invalidate WAY0 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (1 * DCACHE_SET_SIZE) /* writeback/invalidate WAY1 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (2 * DCACHE_SET_SIZE) /* writeback/invalidate WAY2 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (3 * DCACHE_SET_SIZE) /* writeback/invalidate WAY3 */
-
- addiu t1, t1, DCACHE_LINE_SIZE /* T1 = next data cache line index */
- bne t2, zero, dcache_wbinvd_loop /* T2 = 0 when wbinvd entire cache */
- addiu t2, t2, -1 /* decrement T2 set cnt (delay slot) */
-
- /* Initialize the latches in the data cache tag that drive the way
- selection tri-state bus drivers, by doing a dummy load while the
- data cache is still in the disabled mode. TODO: Is this needed ? */
- la t1, KSEG0 /* T1 = cached memory base address */
- lw zero, 0x0000(t1) /* (dummy read of first memory word) */
-
- mtc0 t0, CP0_STATUS /* restore interrupt status on entry */
- HAZARD_CP0
- .set pop
- .endm
-
-#endif /* __ASM_MACH_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-pnx8550/nand.h b/arch/mips/include/asm/mach-pnx8550/nand.h
deleted file mode 100644
index aefbc514ab09..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/nand.h
+++ /dev/null
@@ -1,121 +0,0 @@
-#ifndef __PNX8550_NAND_H
-#define __PNX8550_NAND_H
-
-#define PNX8550_NAND_BASE_ADDR 0x10000000
-#define PNX8550_PCIXIO_BASE 0xBBE40000
-
-#define PNX8550_DMA_EXT_ADDR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x800)
-#define PNX8550_DMA_INT_ADDR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x804)
-#define PNX8550_DMA_TRANS_SIZE *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x808)
-#define PNX8550_DMA_CTRL *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x80c)
-#define PNX8550_XIO_SEL0 *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x814)
-#define PNX8550_GPXIO_ADDR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x820)
-#define PNX8550_GPXIO_WR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x824)
-#define PNX8550_GPXIO_RD *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x828)
-#define PNX8550_GPXIO_CTRL *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x82C)
-#define PNX8550_XIO_FLASH_CTRL *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x830)
-#define PNX8550_GPXIO_INT_STATUS *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb0)
-#define PNX8550_GPXIO_INT_ENABLE *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb4)
-#define PNX8550_GPXIO_INT_CLEAR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb8)
-#define PNX8550_DMA_INT_STATUS *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd0)
-#define PNX8550_DMA_INT_ENABLE *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd4)
-#define PNX8550_DMA_INT_CLEAR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd8)
-
-#define PNX8550_XIO_SEL0_EN_16BIT 0x00800000
-#define PNX8550_XIO_SEL0_USE_ACK 0x00400000
-#define PNX8550_XIO_SEL0_REN_HIGH 0x00100000
-#define PNX8550_XIO_SEL0_REN_LOW 0x00040000
-#define PNX8550_XIO_SEL0_WEN_HIGH 0x00010000
-#define PNX8550_XIO_SEL0_WEN_LOW 0x00004000
-#define PNX8550_XIO_SEL0_WAIT 0x00000200
-#define PNX8550_XIO_SEL0_OFFSET 0x00000020
-#define PNX8550_XIO_SEL0_TYPE_68360 0x00000000
-#define PNX8550_XIO_SEL0_TYPE_NOR 0x00000008
-#define PNX8550_XIO_SEL0_TYPE_NAND 0x00000010
-#define PNX8550_XIO_SEL0_TYPE_IDE 0x00000018
-#define PNX8550_XIO_SEL0_SIZE_8MB 0x00000000
-#define PNX8550_XIO_SEL0_SIZE_16MB 0x00000002
-#define PNX8550_XIO_SEL0_SIZE_32MB 0x00000004
-#define PNX8550_XIO_SEL0_SIZE_64MB 0x00000006
-#define PNX8550_XIO_SEL0_ENAB 0x00000001
-
-#define PNX8550_SEL0_DEFAULT ((PNX8550_XIO_SEL0_EN_16BIT) | \
- (PNX8550_XIO_SEL0_REN_HIGH*0)| \
- (PNX8550_XIO_SEL0_REN_LOW*2) | \
- (PNX8550_XIO_SEL0_WEN_HIGH*0)| \
- (PNX8550_XIO_SEL0_WEN_LOW*2) | \
- (PNX8550_XIO_SEL0_WAIT*4) | \
- (PNX8550_XIO_SEL0_OFFSET*0) | \
- (PNX8550_XIO_SEL0_TYPE_NAND) | \
- (PNX8550_XIO_SEL0_SIZE_32MB) | \
- (PNX8550_XIO_SEL0_ENAB))
-
-#define PNX8550_GPXIO_PENDING 0x00000200
-#define PNX8550_GPXIO_DONE 0x00000100
-#define PNX8550_GPXIO_CLR_DONE 0x00000080
-#define PNX8550_GPXIO_INIT 0x00000040
-#define PNX8550_GPXIO_READ_CMD 0x00000010
-#define PNX8550_GPXIO_BEN 0x0000000F
-
-#define PNX8550_XIO_FLASH_64MB 0x00200000
-#define PNX8550_XIO_FLASH_INC_DATA 0x00100000
-#define PNX8550_XIO_FLASH_CMD_PH 0x000C0000
-#define PNX8550_XIO_FLASH_CMD_PH2 0x00080000
-#define PNX8550_XIO_FLASH_CMD_PH1 0x00040000
-#define PNX8550_XIO_FLASH_CMD_PH0 0x00000000
-#define PNX8550_XIO_FLASH_ADR_PH 0x00030000
-#define PNX8550_XIO_FLASH_ADR_PH3 0x00030000
-#define PNX8550_XIO_FLASH_ADR_PH2 0x00020000
-#define PNX8550_XIO_FLASH_ADR_PH1 0x00010000
-#define PNX8550_XIO_FLASH_ADR_PH0 0x00000000
-#define PNX8550_XIO_FLASH_CMD_B(x) ((x<<8) & 0x0000FF00)
-#define PNX8550_XIO_FLASH_CMD_A(x) (x & 0x000000FF)
-
-#define PNX8550_XIO_INT_ACK 0x00004000
-#define PNX8550_XIO_INT_COMPL 0x00002000
-#define PNX8550_XIO_INT_NONSUP 0x00000200
-#define PNX8550_XIO_INT_ABORT 0x00000004
-
-#define PNX8550_DMA_CTRL_SINGLE_DATA 0x00000400
-#define PNX8550_DMA_CTRL_SND2XIO 0x00000200
-#define PNX8550_DMA_CTRL_FIX_ADDR 0x00000100
-#define PNX8550_DMA_CTRL_BURST_8 0x00000000
-#define PNX8550_DMA_CTRL_BURST_16 0x00000020
-#define PNX8550_DMA_CTRL_BURST_32 0x00000040
-#define PNX8550_DMA_CTRL_BURST_64 0x00000060
-#define PNX8550_DMA_CTRL_BURST_128 0x00000080
-#define PNX8550_DMA_CTRL_BURST_256 0x000000A0
-#define PNX8550_DMA_CTRL_BURST_512 0x000000C0
-#define PNX8550_DMA_CTRL_BURST_NORES 0x000000E0
-#define PNX8550_DMA_CTRL_INIT_DMA 0x00000010
-#define PNX8550_DMA_CTRL_CMD_TYPE 0x0000000F
-
-/* see PCI system arch, page 100 for the full list: */
-#define PNX8550_DMA_CTRL_PCI_CMD_READ 0x00000006
-#define PNX8550_DMA_CTRL_PCI_CMD_WRITE 0x00000007
-
-#define PNX8550_DMA_INT_STAT_ACK_DONE (1<<14)
-#define PNX8550_DMA_INT_STAT_DMA_DONE (1<<12)
-#define PNX8550_DMA_INT_STAT_DMA_ERR (1<<9)
-#define PNX8550_DMA_INT_STAT_PERR5 (1<<5)
-#define PNX8550_DMA_INT_STAT_PERR4 (1<<4)
-#define PNX8550_DMA_INT_STAT_M_ABORT (1<<2)
-#define PNX8550_DMA_INT_STAT_T_ABORT (1<<1)
-
-#define PNX8550_DMA_INT_EN_ACK_DONE (1<<14)
-#define PNX8550_DMA_INT_EN_DMA_DONE (1<<12)
-#define PNX8550_DMA_INT_EN_DMA_ERR (1<<9)
-#define PNX8550_DMA_INT_EN_PERR5 (1<<5)
-#define PNX8550_DMA_INT_EN_PERR4 (1<<4)
-#define PNX8550_DMA_INT_EN_M_ABORT (1<<2)
-#define PNX8550_DMA_INT_EN_T_ABORT (1<<1)
-
-#define PNX8550_DMA_INT_CLR_ACK_DONE (1<<14)
-#define PNX8550_DMA_INT_CLR_DMA_DONE (1<<12)
-#define PNX8550_DMA_INT_CLR_DMA_ERR (1<<9)
-#define PNX8550_DMA_INT_CLR_PERR5 (1<<5)
-#define PNX8550_DMA_INT_CLR_PERR4 (1<<4)
-#define PNX8550_DMA_INT_CLR_M_ABORT (1<<2)
-#define PNX8550_DMA_INT_CLR_T_ABORT (1<<1)
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/pci.h b/arch/mips/include/asm/mach-pnx8550/pci.h
deleted file mode 100644
index b921508d701b..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/pci.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * PCI specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_PCI_H
-#define __PNX8550_PCI_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#define PCI_ACCESS_READ 0
-#define PCI_ACCESS_WRITE 1
-
-#define PCI_CMD_IOR 0x20
-#define PCI_CMD_IOW 0x30
-#define PCI_CMD_CONFIG_READ 0xa0
-#define PCI_CMD_CONFIG_WRITE 0xb0
-
-#define PCI_IO_TIMEOUT 1000
-#define PCI_IO_RETRY 5
-/* Timeout for IO and CFG accesses.
- This is in 1/1024 th of a jiffie(=10ms)
- i.e. approx 10us */
-#define PCI_IO_JIFFIES_TIMEOUT 40
-#define PCI_IO_JIFFIES_SHIFT 10
-
-#define PCI_BYTE_ENABLE_MASK 0x0000000f
-#define PCI_CFG_BUS_SHIFT 16
-#define PCI_CFG_FUNC_SHIFT 8
-#define PCI_CFG_REG_SHIFT 2
-
-#define PCI_BASE 0x1be00000
-#define PCI_SETUP 0x00040010
-#define PCI_DIS_REQGNT (1<<30)
-#define PCI_DIS_REQGNTA (1<<29)
-#define PCI_DIS_REQGNTB (1<<28)
-#define PCI_D2_SUPPORT (1<<27)
-#define PCI_D1_SUPPORT (1<<26)
-#define PCI_EN_TA (1<<24)
-#define PCI_EN_PCI2MMI (1<<23)
-#define PCI_EN_XIO (1<<22)
-#define PCI_BASE18_PREF (1<<21)
-#define SIZE_16M 0x3
-#define SIZE_32M 0x4
-#define SIZE_64M 0x5
-#define SIZE_128M 0x6
-#define PCI_SETUP_BASE18_SIZE(X) (X<<18)
-#define PCI_SETUP_BASE18_EN (1<<17)
-#define PCI_SETUP_BASE14_PREF (1<<16)
-#define PCI_SETUP_BASE14_SIZE(X) (X<<12)
-#define PCI_SETUP_BASE14_EN (1<<11)
-#define PCI_SETUP_BASE10_PREF (1<<10)
-#define PCI_SETUP_BASE10_SIZE(X) (X<<7)
-#define PCI_SETUP_CFGMANAGE_EN (1<<1)
-#define PCI_SETUP_PCIARB_EN (1<<0)
-
-#define PCI_CTRL 0x040014
-#define PCI_SWPB_DCS_PCI (1<<16)
-#define PCI_SWPB_PCI_PCI (1<<15)
-#define PCI_SWPB_PCI_DCS (1<<14)
-#define PCI_REG_WR_POST (1<<13)
-#define PCI_XIO_WR_POST (1<<12)
-#define PCI_PCI2_WR_POST (1<<13)
-#define PCI_PCI1_WR_POST (1<<12)
-#define PCI_SERR_SEEN (1<<11)
-#define PCI_B10_SPEC_RD (1<<6)
-#define PCI_B14_SPEC_RD (1<<5)
-#define PCI_B18_SPEC_RD (1<<4)
-#define PCI_B10_NOSUBWORD (1<<3)
-#define PCI_B14_NOSUBWORD (1<<2)
-#define PCI_B18_NOSUBWORD (1<<1)
-#define PCI_RETRY_TMREN (1<<0)
-
-#define PCI_BASE1_LO 0x040018
-#define PCI_BASE1_HI 0x04001C
-#define PCI_BASE2_LO 0x040020
-#define PCI_BASE2_HI 0x040024
-#define PCI_RDLIFETIM 0x040028
-#define PCI_GPPM_ADDR 0x04002C
-#define PCI_GPPM_WDAT 0x040030
-#define PCI_GPPM_RDAT 0x040034
-#define PCI_GPPM_CTRL 0x040038
-#define GPPM_DONE (1<<10)
-#define INIT_PCI_CYCLE (1<<9)
-#define GPPM_CMD(X) (((X)&0xf)<<4)
-#define GPPM_BYTEEN(X) ((X)&0xf)
-#define PCI_UNLOCKREG 0x04003C
-#define UNLOCK_SSID(X) (((X)&0xff)<<8)
-#define UNLOCK_SETUP(X) (((X)&0xff)<<0)
-#define UNLOCK_MAGIC 0xCA
-#define PCI_DEV_VEND_ID 0x040040
-#define DEVICE_ID(X) (((X)>>16)&0xffff)
-#define VENDOR_ID(X) (((X)&0xffff))
-#define PCI_CFG_CMDSTAT 0x040044
-#define PCI_CFG_STATUS(X) (((X)>>16)&0xffff)
-#define PCI_CFG_COMMAND(X) ((X)&0xffff)
-#define PCI_CLASS_REV 0x040048
-#define PCI_CLASSCODE(X) (((X)>>8)&0xffffff)
-#define PCI_REVID(X) ((X)&0xff)
-#define PCI_LAT_TMR 0x04004c
-#define PCI_BASE10 0x040050
-#define PCI_BASE14 0x040054
-#define PCI_BASE18 0x040058
-#define PCI_SUBSYS_ID 0x04006c
-#define PCI_CAP_PTR 0x040074
-#define PCI_CFG_MISC 0x04007c
-#define PCI_PMC 0x040080
-#define PCI_PWR_STATE 0x040084
-#define PCI_IO 0x040088
-#define PCI_SLVTUNING 0x04008C
-#define PCI_DMATUNING 0x040090
-#define PCI_DMAEADDR 0x040800
-#define PCI_DMAIADDR 0x040804
-#define PCI_DMALEN 0x040808
-#define PCI_DMACTRL 0x04080C
-#define PCI_XIOCTRL 0x040810
-#define PCI_SEL0PROF 0x040814
-#define PCI_SEL1PROF 0x040818
-#define PCI_SEL2PROF 0x04081C
-#define PCI_GPXIOADDR 0x040820
-#define PCI_NANDCTRLS 0x400830
-#define PCI_SEL3PROF 0x040834
-#define PCI_SEL4PROF 0x040838
-#define PCI_GPXIO_STAT 0x040FB0
-#define PCI_GPXIO_IMASK 0x040FB4
-#define PCI_GPXIO_ICLR 0x040FB8
-#define PCI_GPXIO_ISET 0x040FBC
-#define PCI_GPPM_STATUS 0x040FC0
-#define GPPM_DONE (1<<10)
-#define GPPM_ERR (1<<9)
-#define GPPM_MPAR_ERR (1<<8)
-#define GPPM_PAR_ERR (1<<7)
-#define GPPM_R_MABORT (1<<2)
-#define GPPM_R_TABORT (1<<1)
-#define PCI_GPPM_IMASK 0x040FC4
-#define PCI_GPPM_ICLR 0x040FC8
-#define PCI_GPPM_ISET 0x040FCC
-#define PCI_DMA_STATUS 0x040FD0
-#define PCI_DMA_IMASK 0x040FD4
-#define PCI_DMA_ICLR 0x040FD8
-#define PCI_DMA_ISET 0x040FDC
-#define PCI_ISTATUS 0x040FE0
-#define PCI_IMASK 0x040FE4
-#define PCI_ICLR 0x040FE8
-#define PCI_ISET 0x040FEC
-#define PCI_MOD_ID 0x040FFC
-
-/*
- * PCI configuration cycle AD bus definition
- */
-/* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF 0
-#define PCI_CFG_TYPE0_FUNC_SHF 8
-
-/* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF 0
-#define PCI_CFG_TYPE1_FUNC_SHF 8
-#define PCI_CFG_TYPE1_DEV_SHF 11
-#define PCI_CFG_TYPE1_BUS_SHF 16
-
-/*
- * Ethernet device DP83816 definition
- */
-#define DP83816_IRQ_ETHER 66
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/uart.h b/arch/mips/include/asm/mach-pnx8550/uart.h
deleted file mode 100644
index ad7608d44874..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/uart.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __IP3106_UART_H
-#define __IP3106_UART_H
-
-#include <int.h>
-
-/* early macros for kgdb use. fixme: clean this up */
-
-#define UART_BASE 0xbbe4a000 /* PNX8550 */
-
-#define PNX8550_UART_PORT0 (UART_BASE)
-#define PNX8550_UART_PORT1 (UART_BASE + 0x1000)
-
-#define PNX8550_UART_INT(x) (PNX8550_INT_GIC_MIN+19+x)
-#define IRQ_TO_UART(x) (x-PNX8550_INT_GIC_MIN-19)
-
-/* early macros needed for prom/kgdb */
-
-#define ip3106_lcr(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x000)
-#define ip3106_mcr(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x004)
-#define ip3106_baud(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x008)
-#define ip3106_cfg(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x00C)
-#define ip3106_fifo(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x028)
-#define ip3106_istat(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE0)
-#define ip3106_ien(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE4)
-#define ip3106_iclr(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE8)
-#define ip3106_iset(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFEC)
-#define ip3106_pd(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFF4)
-#define ip3106_mid(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFFC)
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/usb.h b/arch/mips/include/asm/mach-pnx8550/usb.h
deleted file mode 100644
index 483b7fc65d41..000000000000
--- a/arch/mips/include/asm/mach-pnx8550/usb.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * USB specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_USB_H
-#define __PNX8550_USB_H
-
-/*
- * USB Host controller
- */
-
-#define PNX8550_USB_OHCI_OP_BASE 0x1be48000
-#define PNX8550_USB_OHCI_OP_LEN 0x1000
-
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/asic.h b/arch/mips/include/asm/mach-powertv/asic.h
index c7077a64b9a7..b341108d12f1 100644
--- a/arch/mips/include/asm/mach-powertv/asic.h
+++ b/arch/mips/include/asm/mach-powertv/asic.h
@@ -23,9 +23,9 @@
#include <linux/platform_device.h>
#include <asm/mach-powertv/asic_regs.h>
-#define DVR_CAPABLE (1<<0)
-#define PCIE_CAPABLE (1<<1)
-#define FFS_CAPABLE (1<<2)
+#define DVR_CAPABLE (1<<0)
+#define PCIE_CAPABLE (1<<1)
+#define FFS_CAPABLE (1<<2)
#define DISPLAY_CAPABLE (1<<3)
/* Platform Family types
@@ -111,7 +111,7 @@ enum sys_reboot_type {
* Older drivers may report as
* userReboot. */
sys_hardware_reset = 0x09, /* HW watchdog or front-panel
- * reset button reset. Older
+ * reset button reset. Older
* drivers may report as
* userReboot. */
sys_watchdogInterrupt = 0x0A /* Pre-watchdog interrupt */
diff --git a/arch/mips/include/asm/mach-powertv/asic_regs.h b/arch/mips/include/asm/mach-powertv/asic_regs.h
index deecb26a077e..06712abb3e55 100644
--- a/arch/mips/include/asm/mach-powertv/asic_regs.h
+++ b/arch/mips/include/asm/mach-powertv/asic_regs.h
@@ -49,8 +49,8 @@ enum asic_type {
#define UART1_INTEN uart1_inten
#define UART1_CONFIG1 uart1_config1
#define UART1_CONFIG2 uart1_config2
-#define UART1_DIVISORHI uart1_divisorhi
-#define UART1_DIVISORLO uart1_divisorlo
+#define UART1_DIVISORHI uart1_divisorhi
+#define UART1_DIVISORLO uart1_divisorlo
#define UART1_DATA uart1_data
#define UART1_STATUS uart1_status
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
index 35371641575d..f8316720a218 100644
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -4,7 +4,7 @@
* for more details.
*
* Version from mach-generic modified to support PowerTV port
- * Portions Copyright (C) 2009 Cisco Systems, Inc.
+ * Portions Copyright (C) 2009 Cisco Systems, Inc.
* Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
*
*/
diff --git a/arch/mips/include/asm/mach-powertv/interrupts.h b/arch/mips/include/asm/mach-powertv/interrupts.h
index 4fd652ceb52a..6c463be62156 100644
--- a/arch/mips/include/asm/mach-powertv/interrupts.h
+++ b/arch/mips/include/asm/mach-powertv/interrupts.h
@@ -16,7 +16,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
+#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
#define _ASM_MACH_POWERTV_INTERRUPTS_H_
/*
@@ -49,9 +49,9 @@
* glue logic inside SPARC ILC
* (see INT_SBAG_STAT, below,
* for individual interrupts) */
-#define irq_qam_b_fec (ibase+116) /* QAM B FEC Interrupt */
+#define irq_qam_b_fec (ibase+116) /* QAM B FEC Interrupt */
#define irq_qam_a_fec (ibase+115) /* QAM A FEC Interrupt */
-/* 114 unused (bit 18) */
+/* 114 unused (bit 18) */
#define irq_mailbox (ibase+113) /* Mailbox Debug Interrupt --
* Ored by glue logic inside
* SPARC ILC (see
@@ -99,9 +99,9 @@
#define irq_sata1 (ibase+87) /* SATA 1 Interrupt */
#define irq_dtcp (ibase+86) /* DTCP Interrupt */
#define irq_pciexp1 (ibase+85) /* PCI Express 1 Interrupt */
-/* 84 unused (bit 20) */
-/* 83 unused (bit 19) */
-/* 82 unused (bit 18) */
+/* 84 unused (bit 20) */
+/* 83 unused (bit 19) */
+/* 82 unused (bit 18) */
#define irq_sata2 (ibase+81) /* SATA2 Interrupt */
#define irq_uart2 (ibase+80) /* UART2 Interrupt */
#define irq_legacy_usb (ibase+79) /* Legacy USB Host ISR (1.1
@@ -117,22 +117,22 @@
#define irq_mod_dma (ibase+70) /* Modulator DMA Interrupt */
#define irq_byte_eng1 (ibase+69) /* Byte Engine Interrupt [1] */
#define irq_byte_eng0 (ibase+68) /* Byte Engine Interrupt [0] */
-/* 67 unused (bit 03) */
-/* 66 unused (bit 02) */
-/* 65 unused (bit 01) */
-/* 64 unused (bit 00) */
+/* 67 unused (bit 03) */
+/* 66 unused (bit 02) */
+/* 65 unused (bit 01) */
+/* 64 unused (bit 00) */
/*------------- Register: int_stat_1 */
-/* 63 unused (bit 31) */
-/* 62 unused (bit 30) */
-/* 61 unused (bit 29) */
-/* 60 unused (bit 28) */
-/* 59 unused (bit 27) */
-/* 58 unused (bit 26) */
-/* 57 unused (bit 25) */
-/* 56 unused (bit 24) */
+/* 63 unused (bit 31) */
+/* 62 unused (bit 30) */
+/* 61 unused (bit 29) */
+/* 60 unused (bit 28) */
+/* 59 unused (bit 27) */
+/* 58 unused (bit 26) */
+/* 57 unused (bit 25) */
+/* 56 unused (bit 24) */
#define irq_buf_dma_mem2mem (ibase+55) /* BufDMA Memory to Memory
* Interrupt */
-#define irq_buf_dma_usbtransmit (ibase+54) /* BufDMA USB Transmit
+#define irq_buf_dma_usbtransmit (ibase+54) /* BufDMA USB Transmit
* Interrupt */
#define irq_buf_dma_qpskpodtransmit (ibase+53) /* BufDMA QPSK/POD Tramsit
* Interrupt */
@@ -140,7 +140,7 @@
* Interrupt */
#define irq_buf_dma_usbrecv (ibase+51) /* BufDMA USB Receive
* Interrupt */
-#define irq_buf_dma_qpskpodrecv (ibase+50) /* BufDMA QPSK/POD Receive
+#define irq_buf_dma_qpskpodrecv (ibase+50) /* BufDMA QPSK/POD Receive
* Interrupt */
#define irq_buf_dma_recv_error (ibase+49) /* BufDMA Receive Error
* Interrupt */
@@ -166,7 +166,7 @@
* Module */
#define irq_gpio2 (ibase+37) /* GP I/O IRQ 2 - From GP I/O
* Module (ABE_intN) */
-#define irq_pcrcmplt1 (ibase+36) /* PCR Capture Complete or
+#define irq_pcrcmplt1 (ibase+36) /* PCR Capture Complete or
* Discontinuity 1 */
#define irq_pcrcmplt2 (ibase+35) /* PCR Capture Complete or
* Discontinuity 2 */
@@ -217,18 +217,18 @@
#define irq_qpsk_hecerr (ibase+11) /* QPSK HEC Error Interrupt */
#define irq_qpsk_crcerr (ibase+10) /* QPSK AAL-5 CRC Error
* Interrupt */
-/* 9 unused (bit 09) */
-/* 8 unused (bit 08) */
-#define irq_psicrcerr (ibase+7) /* QAM PSI CRC Error
+/* 9 unused (bit 09) */
+/* 8 unused (bit 08) */
+#define irq_psicrcerr (ibase+7) /* QAM PSI CRC Error
* Interrupt */
-#define irq_psilength_err (ibase+6) /* QAM PSI Length Error
+#define irq_psilength_err (ibase+6) /* QAM PSI Length Error
* Interrupt */
-#define irq_esfforward (ibase+5) /* ESF Interrupt Mark From
+#define irq_esfforward (ibase+5) /* ESF Interrupt Mark From
* Forward Path Reference -
* every 3ms when forward Mbits
* and forward slot control
* bytes are updated. */
-#define irq_esfreverse (ibase+4) /* ESF Interrupt Mark from
+#define irq_esfreverse (ibase+4) /* ESF Interrupt Mark from
* Reverse Path Reference -
* delayed from forward mark by
* the ranging delay plus a
@@ -239,15 +239,15 @@
* 1.554 M upstream rates and
* every 6 ms for 256K upstream
* rate. */
-#define irq_aloha_timeout (ibase+3) /* Slotted-Aloha timeout on
+#define irq_aloha_timeout (ibase+3) /* Slotted-Aloha timeout on
* Channel 1. */
-#define irq_reservation (ibase+2) /* Partial (or Incremental)
+#define irq_reservation (ibase+2) /* Partial (or Incremental)
* Reservation Message Completed
* or Slotted aloha verify for
* channel 1. */
-#define irq_aloha3 (ibase+1) /* Slotted-Aloha Message Verify
+#define irq_aloha3 (ibase+1) /* Slotted-Aloha Message Verify
* Interrupt or Reservation
* increment completed for
* channel 3. */
-#define irq_mpeg_d (ibase+0) /* MPEG Decoder Interrupt */
+#define irq_mpeg_d (ibase+0) /* MPEG Decoder Interrupt */
#endif /* _ASM_MACH_POWERTV_INTERRUPTS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
new file mode 100644
index 000000000000..5a508f9f9432
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -0,0 +1,39 @@
+/*
+ * Ralink SoC register definitions
+ *
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RALINK_REGS_H_
+#define _RALINK_REGS_H_
+
+extern __iomem void *rt_sysc_membase;
+extern __iomem void *rt_memc_membase;
+
+static inline void rt_sysc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_sysc_membase + reg);
+}
+
+static inline u32 rt_sysc_r32(unsigned reg)
+{
+ return __raw_readl(rt_sysc_membase + reg);
+}
+
+static inline void rt_memc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_memc_membase + reg);
+}
+
+static inline u32 rt_memc_r32(unsigned reg)
+{
+ return __raw_readl(rt_memc_membase + reg);
+}
+
+#endif /* _RALINK_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
new file mode 100644
index 000000000000..7d344f2d7d0a
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -0,0 +1,139 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT305X_REGS_H_
+#define _RT305X_REGS_H_
+
+enum rt305x_soc_type {
+ RT305X_SOC_UNKNOWN = 0,
+ RT305X_SOC_RT3050,
+ RT305X_SOC_RT3052,
+ RT305X_SOC_RT3350,
+ RT305X_SOC_RT3352,
+ RT305X_SOC_RT5350,
+};
+
+extern enum rt305x_soc_type rt305x_soc;
+
+static inline int soc_is_rt3050(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3050;
+}
+
+static inline int soc_is_rt3052(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3052;
+}
+
+static inline int soc_is_rt305x(void)
+{
+ return soc_is_rt3050() || soc_is_rt3052();
+}
+
+static inline int soc_is_rt3350(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3350;
+}
+
+static inline int soc_is_rt3352(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3352;
+}
+
+static inline int soc_is_rt5350(void)
+{
+ return rt305x_soc == RT305X_SOC_RT5350;
+}
+
+#define RT305X_SYSC_BASE 0x10000000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_CHIP_ID 0x0c
+#define SYSC_REG_SYSTEM_CONFIG 0x10
+
+#define RT3052_CHIP_NAME0 0x30335452
+#define RT3052_CHIP_NAME1 0x20203235
+
+#define RT3350_CHIP_NAME0 0x33335452
+#define RT3350_CHIP_NAME1 0x20203035
+
+#define RT3352_CHIP_NAME0 0x33335452
+#define RT3352_CHIP_NAME1 0x20203235
+
+#define RT5350_CHIP_NAME0 0x33355452
+#define RT5350_CHIP_NAME1 0x20203035
+
+#define CHIP_ID_ID_MASK 0xff
+#define CHIP_ID_ID_SHIFT 8
+#define CHIP_ID_REV_MASK 0xff
+
+#define RT305X_SYSCFG_CPUCLK_SHIFT 18
+#define RT305X_SYSCFG_CPUCLK_MASK 0x1
+#define RT305X_SYSCFG_CPUCLK_LOW 0x0
+#define RT305X_SYSCFG_CPUCLK_HIGH 0x1
+
+#define RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT 2
+#define RT305X_SYSCFG_CPUCLK_MASK 0x1
+#define RT305X_SYSCFG_SRAM_CS0_MODE_WDT 0x1
+
+#define RT3352_SYSCFG0_CPUCLK_SHIFT 8
+#define RT3352_SYSCFG0_CPUCLK_MASK 0x1
+#define RT3352_SYSCFG0_CPUCLK_LOW 0x0
+#define RT3352_SYSCFG0_CPUCLK_HIGH 0x1
+
+#define RT5350_SYSCFG0_CPUCLK_SHIFT 8
+#define RT5350_SYSCFG0_CPUCLK_MASK 0x3
+#define RT5350_SYSCFG0_CPUCLK_360 0x0
+#define RT5350_SYSCFG0_CPUCLK_320 0x2
+#define RT5350_SYSCFG0_CPUCLK_300 0x3
+
+/* multi function gpio pins */
+#define RT305X_GPIO_I2C_SD 1
+#define RT305X_GPIO_I2C_SCLK 2
+#define RT305X_GPIO_SPI_EN 3
+#define RT305X_GPIO_SPI_CLK 4
+/* GPIO 7-14 is shared between UART0, PCM and I2S interfaces */
+#define RT305X_GPIO_7 7
+#define RT305X_GPIO_10 10
+#define RT305X_GPIO_14 14
+#define RT305X_GPIO_UART1_TXD 15
+#define RT305X_GPIO_UART1_RXD 16
+#define RT305X_GPIO_JTAG_TDO 17
+#define RT305X_GPIO_JTAG_TDI 18
+#define RT305X_GPIO_MDIO_MDC 22
+#define RT305X_GPIO_MDIO_MDIO 23
+#define RT305X_GPIO_SDRAM_MD16 24
+#define RT305X_GPIO_SDRAM_MD31 39
+#define RT305X_GPIO_GE0_TXD0 40
+#define RT305X_GPIO_GE0_RXCLK 51
+
+#define RT305X_GPIO_MODE_I2C BIT(0)
+#define RT305X_GPIO_MODE_SPI BIT(1)
+#define RT305X_GPIO_MODE_UART0_SHIFT 2
+#define RT305X_GPIO_MODE_UART0_MASK 0x7
+#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
+#define RT305X_GPIO_MODE_UARTF 0x0
+#define RT305X_GPIO_MODE_PCM_UARTF 0x1
+#define RT305X_GPIO_MODE_PCM_I2S 0x2
+#define RT305X_GPIO_MODE_I2S_UARTF 0x3
+#define RT305X_GPIO_MODE_PCM_GPIO 0x4
+#define RT305X_GPIO_MODE_GPIO_UARTF 0x5
+#define RT305X_GPIO_MODE_GPIO_I2S 0x6
+#define RT305X_GPIO_MODE_GPIO 0x7
+#define RT305X_GPIO_MODE_UART1 BIT(5)
+#define RT305X_GPIO_MODE_JTAG BIT(6)
+#define RT305X_GPIO_MODE_MDIO BIT(7)
+#define RT305X_GPIO_MODE_SDRAM BIT(8)
+#define RT305X_GPIO_MODE_RGMII BIT(9)
+
+#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/war.h b/arch/mips/include/asm/mach-ralink/war.h
index de8894c46686..a7b712cf2d28 100644
--- a/arch/mips/include/asm/mach-pnx8550/war.h
+++ b/arch/mips/include/asm/mach-ralink/war.h
@@ -5,8 +5,8 @@
*
* Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
*/
-#ifndef __ASM_MIPS_MACH_PNX8550_WAR_H
-#define __ASM_MIPS_MACH_PNX8550_WAR_H
+#ifndef __ASM_MACH_RALINK_WAR_H
+#define __ASM_MACH_RALINK_WAR_H
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
@@ -17,8 +17,9 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
-#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
+#endif /* __ASM_MACH_RALINK_WAR_H */
diff --git a/arch/mips/include/asm/mach-rc32434/ddr.h b/arch/mips/include/asm/mach-rc32434/ddr.h
index 291e2cf9dde0..e1cad0c7fd52 100644
--- a/arch/mips/include/asm/mach-rc32434/ddr.h
+++ b/arch/mips/include/asm/mach-rc32434/ddr.h
@@ -138,4 +138,4 @@ struct ddr_ram {
#define RC32434_DLLED_DBE_BIT 0
#define RC32434_DLLED_DTE_BIT 1
-#endif /* _ASM_RC32434_DDR_H_ */
+#endif /* _ASM_RC32434_DDR_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/dma.h b/arch/mips/include/asm/mach-rc32434/dma.h
index 5f898b5873f7..4322191e46bf 100644
--- a/arch/mips/include/asm/mach-rc32434/dma.h
+++ b/arch/mips/include/asm/mach-rc32434/dma.h
@@ -5,7 +5,7 @@
* DMA register definition.
*
* Author : ryan.holmQVist@idt.com
- * Date : 20011005
+ * Date : 20011005
*/
#ifndef __ASM_RC32434_DMA_H
@@ -71,10 +71,10 @@ struct dma_reg {
#define DMA_CHAN_DONE_BIT (1 << 1)
#define DMA_CHAN_MODE_BIT (1 << 2)
#define DMA_CHAN_MODE_MSK 0x0000000c
-#define DMA_CHAN_MODE_AUTO 0
-#define DMA_CHAN_MODE_BURST 1
-#define DMA_CHAN_MODE_XFRT 2
-#define DMA_CHAN_MODE_RSVD 3
+#define DMA_CHAN_MODE_AUTO 0
+#define DMA_CHAN_MODE_BURST 1
+#define DMA_CHAN_MODE_XFRT 2
+#define DMA_CHAN_MODE_RSVD 3
#define DMA_CHAN_ACT_BIT (1 << 4)
/* DMA status registers */
@@ -100,4 +100,4 @@ struct dma_channel {
struct dma_reg ch[DMA_CHAN_COUNT];
};
-#endif /* __ASM_RC32434_DMA_H */
+#endif /* __ASM_RC32434_DMA_H */
diff --git a/arch/mips/include/asm/mach-rc32434/dma_v.h b/arch/mips/include/asm/mach-rc32434/dma_v.h
index 173a9f9146cd..28c54063a345 100644
--- a/arch/mips/include/asm/mach-rc32434/dma_v.h
+++ b/arch/mips/include/asm/mach-rc32434/dma_v.h
@@ -5,7 +5,7 @@
* DMA register definition.
*
* Author : ryan.holmQVist@idt.com
- * Date : 20011005
+ * Date : 20011005
*/
#ifndef _ASM_RC32434_DMA_V_H_
@@ -49,4 +49,4 @@ static inline void rc32434_chain_dma(struct dma_reg *ch, u32 dma_addr)
__raw_writel(dma_addr, &ch->dmandptr);
}
-#endif /* _ASM_RC32434_DMA_V_H_ */
+#endif /* _ASM_RC32434_DMA_V_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/eth.h b/arch/mips/include/asm/mach-rc32434/eth.h
index a25cbc56173d..c2645faadf57 100644
--- a/arch/mips/include/asm/mach-rc32434/eth.h
+++ b/arch/mips/include/asm/mach-rc32434/eth.h
@@ -26,8 +26,8 @@
*
*/
-#ifndef __ASM_RC32434_ETH_H
-#define __ASM_RC32434_ETH_H
+#ifndef __ASM_RC32434_ETH_H
+#define __ASM_RC32434_ETH_H
#define ETH0_BASE_ADDR 0x18060000
@@ -217,4 +217,4 @@ struct eth_regs {
#define ETH_TX_LE (1 << 16)
#define ETH_TX_CC 0x001E0000
-#endif /* __ASM_RC32434_ETH_H */
+#endif /* __ASM_RC32434_ETH_H */
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
index 12ee8d510160..4dee0a34250c 100644
--- a/arch/mips/include/asm/mach-rc32434/gpio.h
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -5,7 +5,7 @@
* GPIO register definition.
*
* Author : ryan.holmQVist@idt.com
- * Date : 20011005
+ * Date : 20011005
* Copyright (C) 2001, 2002 Ryan Holm <ryan.holmQVist@idt.com>
* Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
*/
@@ -26,9 +26,9 @@
#define irq_to_gpio(irq) (irq - (8 + 4 * 32))
struct rb532_gpio_reg {
- u32 gpiofunc; /* GPIO Function Register
+ u32 gpiofunc; /* GPIO Function Register
* gpiofunc[x]==0 bit = gpio
- * func[x]==1 bit = altfunc
+ * func[x]==1 bit = altfunc
*/
u32 gpiocfg; /* GPIO Configuration Register
* gpiocfg[x]==0 bit = input
diff --git a/arch/mips/include/asm/mach-rc32434/irq.h b/arch/mips/include/asm/mach-rc32434/irq.h
index 023a5b100ed0..b76dec95c04e 100644
--- a/arch/mips/include/asm/mach-rc32434/irq.h
+++ b/arch/mips/include/asm/mach-rc32434/irq.h
@@ -1,7 +1,7 @@
#ifndef __ASM_RC32434_IRQ_H
#define __ASM_RC32434_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include <asm/mach-generic/irq.h>
#include <asm/mach-rc32434/rb.h>
@@ -25,12 +25,12 @@
#define UART0_IRQ (GROUP3_IRQ_BASE + 0)
-#define ETH0_DMA_RX_IRQ (GROUP1_IRQ_BASE + 0)
-#define ETH0_DMA_TX_IRQ (GROUP1_IRQ_BASE + 1)
-#define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9)
-#define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10)
+#define ETH0_DMA_RX_IRQ (GROUP1_IRQ_BASE + 0)
+#define ETH0_DMA_TX_IRQ (GROUP1_IRQ_BASE + 1)
+#define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9)
+#define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10)
#define GPIO_MAPPED_IRQ_BASE GROUP4_IRQ_BASE
#define GPIO_MAPPED_IRQ_GROUP 4
-#endif /* __ASM_RC32434_IRQ_H */
+#endif /* __ASM_RC32434_IRQ_H */
diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
index 410638f2af74..6f40d1515580 100644
--- a/arch/mips/include/asm/mach-rc32434/pci.h
+++ b/arch/mips/include/asm/mach-rc32434/pci.h
@@ -151,11 +151,11 @@ struct pci_msu {
#define PCI_CFGA_REG_PBA2 (0x18 >> 2) /* use PCIPBA_ */
#define PCI_CFGA_REG_PBA3 (0x1c >> 2) /* use PCIPBA_ */
#define PCI_CFGA_REG_SUBSYS (0x2c >> 2) /* use PCFGSS_ */
-#define PCI_CFGA_REG_3C (0x3C >> 2) /* use PCFG3C_ */
+#define PCI_CFGA_REG_3C (0x3C >> 2) /* use PCFG3C_ */
#define PCI_CFGA_REG_PBBA0C (0x44 >> 2) /* use PCIPBAC_ */
-#define PCI_CFGA_REG_PBA0M (0x48 >> 2)
+#define PCI_CFGA_REG_PBA0M (0x48 >> 2)
#define PCI_CFGA_REG_PBA1C (0x4c >> 2) /* use PCIPBAC_ */
-#define PCI_CFGA_REG_PBA1M (0x50 >> 2)
+#define PCI_CFGA_REG_PBA1M (0x50 >> 2)
#define PCI_CFGA_REG_PBA2C (0x54 >> 2) /* use PCIPBAC_ */
#define PCI_CFGA_REG_PBA2M (0x58 >> 2)
#define PCI_CFGA_REG_PBA3C (0x5c >> 2) /* use PCIPBAC_ */
@@ -164,9 +164,9 @@ struct pci_msu {
#define PCI_CFGA_FUNC_BIT 8
#define PCI_CFGA_FUNC 0x00000700
#define PCI_CFGA_DEV_BIT 11
-#define PCI_CFGA_DEV 0x0000f800
-#define PCI_CFGA_DEV_INTERN 0
-#define PCI_CFGA_BUS_BIT 16
+#define PCI_CFGA_DEV 0x0000f800
+#define PCI_CFGA_DEV_INTERN 0
+#define PCI_CFGA_BUS_BIT 16
#define PCI CFGA_BUS 0x00ff0000
#define PCI_CFGA_BUS_TYPE0 0
#define PCI_CFGA_EN (1 << 31)
@@ -201,13 +201,13 @@ struct pci_msu {
#define PCI_PBAC_P (1 << 1)
#define PCI_PBAC_SIZE_BIT 2
#define PCI_PBAC_SIZE 0x0000007c
-#define PCI_PBAC_SB (1 << 7)
-#define PCI_PBAC_PP (1 << 8)
+#define PCI_PBAC_SB (1 << 7)
+#define PCI_PBAC_PP (1 << 8)
#define PCI_PBAC_MR_BIT 9
#define PCI_PBAC_MR 0x00000600
#define PCI_PBAC_MR_RD 0
#define PCI_PBAC_MR_RD_LINE 1
-#define PCI_PBAC_MR_RD_MULT 2
+#define PCI_PBAC_MR_RD_MULT 2
#define PCI_PBAC_MRL (1 << 11)
#define PCI_PBAC_MRM (1 << 12)
#define PCI_PBAC_TRP (1 << 13)
@@ -227,14 +227,14 @@ struct pci_msu {
*/
#define PCI_LBAC_MSI (1 << 0)
-#define PCI_LBAC_MSI_MEM 0
-#define PCI_LBAC_MSI_IO 1
+#define PCI_LBAC_MSI_MEM 0
+#define PCI_LBAC_MSI_IO 1
#define PCI_LBAC_SIZE_BIT 2
#define PCI_LBAC_SIZE 0x0000007c
#define PCI_LBAC_SB (1 << 7)
#define PCI_LBAC_RT (1 << 8)
-#define PCI_LBAC_RT_NO_PREF 0
-#define PCI_LBAC_RT_PREF 1
+#define PCI_LBAC_RT_NO_PREF 0
+#define PCI_LBAC_RT_PREF 1
/*
* PCI Local Base Address [0|1|2|3] Mapping Register
@@ -279,16 +279,16 @@ struct pci_msu {
#define PCI_DMAD_PT 0x00c00000 /* preferred transaction field */
/* These are for reads (DMA channel 8) */
#define PCI_DMAD_DEVCMD_MR 0 /* memory read */
-#define PCI_DMAD_DEVCMD_MRL 1 /* memory read line */
-#define PCI_DMAD_DEVCMD_MRM 2 /* memory read multiple */
-#define PCI_DMAD_DEVCMD_IOR 3 /* I/O read */
+#define PCI_DMAD_DEVCMD_MRL 1 /* memory read line */
+#define PCI_DMAD_DEVCMD_MRM 2 /* memory read multiple */
+#define PCI_DMAD_DEVCMD_IOR 3 /* I/O read */
/* These are for writes (DMA channel 9) */
#define PCI_DMAD_DEVCMD_MW 0 /* memory write */
-#define PCI_DMAD_DEVCMD_MWI 1 /* memory write invalidate */
-#define PCI_DMAD_DEVCMD_IOW 3 /* I/O write */
+#define PCI_DMAD_DEVCMD_MWI 1 /* memory write invalidate */
+#define PCI_DMAD_DEVCMD_IOW 3 /* I/O write */
/* Swap byte field applies to both DMA channel 8 and 9 */
-#define PCI_DMAD_SB (1 << 24) /* swap byte field */
+#define PCI_DMAD_SB (1 << 24) /* swap byte field */
/*
@@ -309,7 +309,7 @@ struct pci_msu {
#define PCI_MSU_M1 (1 << 1)
#define PCI_MSU_DB (1 << 2)
-#define PCI_MSG_ADDR 0xB8088010
+#define PCI_MSG_ADDR 0xB8088010
#define PCI0_ADDR 0xB8080000
#define rc32434_pci ((struct pci_reg *) PCI0_ADDR)
#define rc32434_pci_msg ((struct pci_msu *) PCI_MSG_ADDR)
@@ -331,9 +331,9 @@ struct pci_msu {
#define PCILBA_SIZE_MASK 0x1F
#define SIZE_256MB 0x1C
#define SIZE_128MB 0x1B
-#define SIZE_64MB 0x1A
+#define SIZE_64MB 0x1A
#define SIZE_32MB 0x19
-#define SIZE_16MB 0x18
+#define SIZE_16MB 0x18
#define SIZE_4MB 0x16
#define SIZE_2MB 0x15
#define SIZE_1MB 0x14
@@ -363,7 +363,7 @@ struct pci_msu {
#define KORINA_CONFIG23_ADDR 0x8000005C
#define KORINA_CONFIG24_ADDR 0x80000060
#define KORINA_CONFIG25_ADDR 0x80000064
-#define KORINA_CMD (PCI_CFG04_CMD_IO_ENA | \
+#define KORINA_CMD (PCI_CFG04_CMD_IO_ENA | \
PCI_CFG04_CMD_MEM_ENA | \
PCI_CFG04_CMD_BM_ENA | \
PCI_CFG04_CMD_MW_INV | \
@@ -401,8 +401,8 @@ struct pci_msu {
#define KORINA_BAR3 0x48000008 /* Spare 128 MB Memory */
#define KORINA_CNFG4 KORINA_BAR0
-#define KORINA_CNFG5 KORINA_BAR1
-#define KORINA_CNFG6 KORINA_BAR2
+#define KORINA_CNFG5 KORINA_BAR1
+#define KORINA_CNFG6 KORINA_BAR2
#define KORINA_CNFG7 KORINA_BAR3
#define KORINA_SUBSYS_VENDOR_ID 0x011d
@@ -410,20 +410,20 @@ struct pci_msu {
#define KORINA_CNFG8 0
#define KORINA_CNFG9 0
#define KORINA_CNFG10 0
-#define KORINA_CNFG11 ((KORINA_SUBSYS_VENDOR_ID<<16) | \
+#define KORINA_CNFG11 ((KORINA_SUBSYS_VENDOR_ID<<16) | \
KORINA_SUBSYSTEM_ID)
#define KORINA_INT_LINE 1
#define KORINA_INT_PIN 1
#define KORINA_MIN_GNT 8
#define KORINA_MAX_LAT 0x38
#define KORINA_CNFG12 0
-#define KORINA_CNFG13 0
+#define KORINA_CNFG13 0
#define KORINA_CNFG14 0
#define KORINA_CNFG15 ((KORINA_MAX_LAT<<24) | \
(KORINA_MIN_GNT<<16) | \
(KORINA_INT_PIN<<8) | \
KORINA_INT_LINE)
-#define KORINA_RETRY_LIMIT 0x80
+#define KORINA_RETRY_LIMIT 0x80
#define KORINA_TRDY_LIMIT 0x80
#define KORINA_CNFG16 ((KORINA_RETRY_LIMIT<<8) | \
KORINA_TRDY_LIMIT)
@@ -475,7 +475,7 @@ struct pci_msu {
#define KORINA_PBA3M 0
#define KORINA_CNFG24 KORINA_PBA3M
-#define PCITC_DTIMER_VAL 8
+#define PCITC_DTIMER_VAL 8
#define PCITC_RTIMER_VAL 0x10
-#endif /* __ASM_RC32434_PCI_H */
+#endif /* __ASM_RC32434_PCI_H */
diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h
index 6dc5f8df1f3e..aac8ce8902e7 100644
--- a/arch/mips/include/asm/mach-rc32434/rb.h
+++ b/arch/mips/include/asm/mach-rc32434/rb.h
@@ -18,7 +18,7 @@
#include <linux/genhd.h>
#define REGBASE 0x18000000
-#define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
+#define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
#define UART0BASE 0x58000
#define RST (1 << 15)
#define DEV0BASE 0x010000
@@ -80,10 +80,10 @@ struct cf_device {
struct mpmc_device {
unsigned char state;
spinlock_t lock;
- void __iomem *base;
+ void __iomem *base;
};
extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
extern unsigned char get_latch_u5(void);
-#endif /* __ASM_RC32434_RB_H */
+#endif /* __ASM_RC32434_RB_H */
diff --git a/arch/mips/include/asm/mach-rc32434/rc32434.h b/arch/mips/include/asm/mach-rc32434/rc32434.h
index fce25d4231fc..02fd32b4be16 100644
--- a/arch/mips/include/asm/mach-rc32434/rc32434.h
+++ b/arch/mips/include/asm/mach-rc32434/rc32434.h
@@ -16,4 +16,4 @@ static inline void rc32434_sync(void)
__asm__ volatile ("sync");
}
-#endif /* _ASM_RC32434_RC32434_H_ */
+#endif /* _ASM_RC32434_RC32434_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/timer.h b/arch/mips/include/asm/mach-rc32434/timer.h
index e49b1d57a017..cda26bb9eead 100644
--- a/arch/mips/include/asm/mach-rc32434/timer.h
+++ b/arch/mips/include/asm/mach-rc32434/timer.h
@@ -51,15 +51,15 @@ struct timer {
#define RC32434_CTC_TO_BIT 1
/* Real time clock registers */
-#define RC32434_RTC_MSK(x) BIT_TO_MASK(x)
-#define RC32434_RTC_CE_BIT 0
-#define RC32434_RTC_TO_BIT 1
-#define RC32434_RTC_RQE_BIT 2
+#define RC32434_RTC_MSK(x) BIT_TO_MASK(x)
+#define RC32434_RTC_CE_BIT 0
+#define RC32434_RTC_TO_BIT 1
+#define RC32434_RTC_RQE_BIT 2
/* Counter registers */
-#define RC32434_RCOUNT_BIT 0
-#define RC32434_RCOUNT_MSK 0x0000ffff
-#define RC32434_RCOMP_BIT 0
-#define RC32434_RCOMP_MSK 0x0000ffff
+#define RC32434_RCOUNT_BIT 0
+#define RC32434_RCOUNT_MSK 0x0000ffff
+#define RC32434_RCOMP_BIT 0
+#define RC32434_RCOMP_MSK 0x0000ffff
-#endif /* __ASM_RC32434_TIMER_H */
+#endif /* __ASM_RC32434_TIMER_H */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 7f3e3f9bd23a..d9c828419037 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -23,8 +23,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
@@ -53,8 +53,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h
index 652ea4c38cda..5d154cfbcf4c 100644
--- a/arch/mips/include/asm/mach-sead3/irq.h
+++ b/arch/mips/include/asm/mach-sead3/irq.h
@@ -1,7 +1,7 @@
#ifndef __ASM_MACH_MIPS_IRQ_H
#define __ASM_MACH_MIPS_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 176f5b32dc69..0a227d426b9c 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -21,12 +21,12 @@ extern int sb1250_m3_workaround_needed(void);
#endif
#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
-#define SIBYTE_1956_WAR 1
+#define SIBYTE_1956_WAR 1
#else
#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
+#define SIBYTE_1956_WAR 0
#endif
diff --git a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h b/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h
index 83746b84a5ec..00fa3684ac98 100644
--- a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h
@@ -1,6 +1,6 @@
/*
* This is a direct copy of the ev96100.h file, with a global
- * search and replace. The numbers are the same.
+ * search and replace. The numbers are the same.
*
* The reason I'm duplicating this is so that the 64120/96100
* defines won't be confusing in the source code.
@@ -11,11 +11,11 @@
/*
* This is the CPU physical memory map of PPMC Board:
*
- * 0x00000000-0x03FFFFFF - 64MB SDRAM (SCS[0]#)
- * 0x1C000000-0x1C000000 - LED (CS0)
- * 0x1C800000-0x1C800007 - UART 16550 port (CS1)
- * 0x1F000000-0x1F000000 - MailBox (CS3)
- * 0x1FC00000-0x20000000 - 4MB Flash (BOOT CS)
+ * 0x00000000-0x03FFFFFF - 64MB SDRAM (SCS[0]#)
+ * 0x1C000000-0x1C000000 - LED (CS0)
+ * 0x1C800000-0x1C800007 - UART 16550 port (CS1)
+ * 0x1F000000-0x1F000000 - MailBox (CS3)
+ * 0x1FC00000-0x20000000 - 4MB Flash (BOOT CS)
*/
#define WRPPMC_SDRAM_SCS0_BASE 0x00000000
@@ -39,8 +39,8 @@
*
* NOTE: We only have PCI_0 hose interface
*/
-#define GT_PCI_MEM_BASE 0x13000000UL
-#define GT_PCI_MEM_SIZE 0x02000000UL
+#define GT_PCI_MEM_BASE 0x13000000UL
+#define GT_PCI_MEM_SIZE 0x02000000UL
#define GT_PCI_IO_BASE 0x11000000UL
#define GT_PCI_IO_SIZE 0x02000000UL
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
index 4a08dbe37db8..9e1ad26abdc0 100644
--- a/arch/mips/include/asm/mc146818-time.h
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -26,7 +26,7 @@
* MC146818A or Dallas DS12887 data sheet for details.
*
* BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you'll only notice that after reboot!
+ * sets the minutes. Usually you'll only notice that after reboot!
*/
static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
{
@@ -77,7 +77,7 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
* battery and quartz) will not reset the oscillator and will not
* update precisely 500 ms later. You won't find this mentioned in
* the Dallas Semiconductor data sheets, but who believes data
- * sheets anyway ... -- Markus Kuhn
+ * sheets anyway ... -- Markus Kuhn
*/
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/arch/mips/include/asm/mips-boards/bonito64.h b/arch/mips/include/asm/mips-boards/bonito64.h
index d14e2adc4be5..b2048d1bcc1c 100644
--- a/arch/mips/include/asm/mips-boards/bonito64.h
+++ b/arch/mips/include/asm/mips-boards/bonito64.h
@@ -41,18 +41,18 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_BOOT_BASE 0x1fc00000
#define BONITO_BOOT_SIZE 0x00100000
-#define BONITO_BOOT_TOP (BONITO_BOOT_BASE+BONITO_BOOT_SIZE-1)
+#define BONITO_BOOT_TOP (BONITO_BOOT_BASE+BONITO_BOOT_SIZE-1)
#define BONITO_FLASH_BASE 0x1c000000
#define BONITO_FLASH_SIZE 0x03000000
#define BONITO_FLASH_TOP (BONITO_FLASH_BASE+BONITO_FLASH_SIZE-1)
#define BONITO_SOCKET_BASE 0x1f800000
#define BONITO_SOCKET_SIZE 0x00400000
#define BONITO_SOCKET_TOP (BONITO_SOCKET_BASE+BONITO_SOCKET_SIZE-1)
-#define BONITO_REG_BASE 0x1fe00000
-#define BONITO_REG_SIZE 0x00040000
+#define BONITO_REG_BASE 0x1fe00000
+#define BONITO_REG_SIZE 0x00040000
#define BONITO_REG_TOP (BONITO_REG_BASE+BONITO_REG_SIZE-1)
-#define BONITO_DEV_BASE 0x1ff00000
-#define BONITO_DEV_SIZE 0x00100000
+#define BONITO_DEV_BASE 0x1ff00000
+#define BONITO_DEV_SIZE 0x00100000
#define BONITO_DEV_TOP (BONITO_DEV_BASE+BONITO_DEV_SIZE-1)
#define BONITO_PCILO_BASE 0x10000000
#define BONITO_PCILO_SIZE 0x0c000000
@@ -79,14 +79,14 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* PCI Configuration Registers */
-#define BONITO_PCI_REG(x) BONITO(BONITO_PCICONFIGBASE + (x))
+#define BONITO_PCI_REG(x) BONITO(BONITO_PCICONFIGBASE + (x))
#define BONITO_PCIDID BONITO_PCI_REG(0x00)
#define BONITO_PCICMD BONITO_PCI_REG(0x04)
-#define BONITO_PCICLASS BONITO_PCI_REG(0x08)
+#define BONITO_PCICLASS BONITO_PCI_REG(0x08)
#define BONITO_PCILTIMER BONITO_PCI_REG(0x0c)
-#define BONITO_PCIBASE0 BONITO_PCI_REG(0x10)
-#define BONITO_PCIBASE1 BONITO_PCI_REG(0x14)
-#define BONITO_PCIBASE2 BONITO_PCI_REG(0x18)
+#define BONITO_PCIBASE0 BONITO_PCI_REG(0x10)
+#define BONITO_PCIBASE1 BONITO_PCI_REG(0x14)
+#define BONITO_PCIBASE2 BONITO_PCI_REG(0x18)
#define BONITO_PCIEXPRBASE BONITO_PCI_REG(0x30)
#define BONITO_PCIINT BONITO_PCI_REG(0x3c)
@@ -95,7 +95,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCICMD_MABORT_CLR 0x20000000
#define BONITO_PCICMD_MTABORT_CLR 0x10000000
#define BONITO_PCICMD_TABORT_CLR 0x08000000
-#define BONITO_PCICMD_MPERR_CLR 0x01000000
+#define BONITO_PCICMD_MPERR_CLR 0x01000000
#define BONITO_PCICMD_PERRRESPEN 0x00000040
#define BONITO_PCICMD_ASTEPEN 0x00000080
#define BONITO_PCICMD_SERREN 0x00000100
@@ -139,7 +139,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* Other Bonito configuration */
-#define BONITO_BONGENCFG_OFFSET 0x4
+#define BONITO_BONGENCFG_OFFSET 0x4
#define BONITO_BONGENCFG BONITO(BONITO_REGBASE + BONITO_BONGENCFG_OFFSET)
#define BONITO_BONGENCFG_DEBUGMODE 0x00000001
@@ -165,7 +165,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* 2. IO & IDE configuration */
-#define BONITO_IODEVCFG BONITO(BONITO_REGBASE + 0x08)
+#define BONITO_IODEVCFG BONITO(BONITO_REGBASE + 0x08)
/* 3. IO & IDE configuration */
@@ -181,33 +181,33 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* GPIO Regs - r/w */
-#define BONITO_GPIODATA_OFFSET 0x1c
-#define BONITO_GPIODATA BONITO(BONITO_REGBASE + BONITO_GPIODATA_OFFSET)
+#define BONITO_GPIODATA_OFFSET 0x1c
+#define BONITO_GPIODATA BONITO(BONITO_REGBASE + BONITO_GPIODATA_OFFSET)
#define BONITO_GPIOIE BONITO(BONITO_REGBASE + 0x20)
/* ICU Configuration Regs - r/w */
#define BONITO_INTEDGE BONITO(BONITO_REGBASE + 0x24)
-#define BONITO_INTSTEER BONITO(BONITO_REGBASE + 0x28)
+#define BONITO_INTSTEER BONITO(BONITO_REGBASE + 0x28)
#define BONITO_INTPOL BONITO(BONITO_REGBASE + 0x2c)
/* ICU Enable Regs - IntEn & IntISR are r/o. */
-#define BONITO_INTENSET BONITO(BONITO_REGBASE + 0x30)
-#define BONITO_INTENCLR BONITO(BONITO_REGBASE + 0x34)
+#define BONITO_INTENSET BONITO(BONITO_REGBASE + 0x30)
+#define BONITO_INTENCLR BONITO(BONITO_REGBASE + 0x34)
#define BONITO_INTEN BONITO(BONITO_REGBASE + 0x38)
#define BONITO_INTISR BONITO(BONITO_REGBASE + 0x3c)
/* PCI mail boxes */
-#define BONITO_PCIMAIL0_OFFSET 0x40
-#define BONITO_PCIMAIL1_OFFSET 0x44
-#define BONITO_PCIMAIL2_OFFSET 0x48
-#define BONITO_PCIMAIL3_OFFSET 0x4c
-#define BONITO_PCIMAIL0 BONITO(BONITO_REGBASE + 0x40)
-#define BONITO_PCIMAIL1 BONITO(BONITO_REGBASE + 0x44)
-#define BONITO_PCIMAIL2 BONITO(BONITO_REGBASE + 0x48)
-#define BONITO_PCIMAIL3 BONITO(BONITO_REGBASE + 0x4c)
+#define BONITO_PCIMAIL0_OFFSET 0x40
+#define BONITO_PCIMAIL1_OFFSET 0x44
+#define BONITO_PCIMAIL2_OFFSET 0x48
+#define BONITO_PCIMAIL3_OFFSET 0x4c
+#define BONITO_PCIMAIL0 BONITO(BONITO_REGBASE + 0x40)
+#define BONITO_PCIMAIL1 BONITO(BONITO_REGBASE + 0x44)
+#define BONITO_PCIMAIL2 BONITO(BONITO_REGBASE + 0x48)
+#define BONITO_PCIMAIL3 BONITO(BONITO_REGBASE + 0x4c)
/* 6. PCI cache */
@@ -216,7 +216,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCICACHETAG BONITO(BONITO_REGBASE + 0x54)
#define BONITO_PCIBADADDR BONITO(BONITO_REGBASE + 0x58)
-#define BONITO_PCIMSTAT BONITO(BONITO_REGBASE + 0x5c)
+#define BONITO_PCIMSTAT BONITO(BONITO_REGBASE + 0x5c)
/*
@@ -228,20 +228,20 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_CONFIGBASE 0x000
#define BONITO_BONITOBASE 0x100
-#define BONITO_LDMABASE 0x200
+#define BONITO_LDMABASE 0x200
#define BONITO_COPBASE 0x300
#define BONITO_REG_BLOCKMASK 0x300
-#define BONITO_LDMACTRL BONITO(BONITO_LDMABASE + 0x0)
-#define BONITO_LDMASTAT BONITO(BONITO_LDMABASE + 0x0)
-#define BONITO_LDMAADDR BONITO(BONITO_LDMABASE + 0x4)
+#define BONITO_LDMACTRL BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMASTAT BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMAADDR BONITO(BONITO_LDMABASE + 0x4)
#define BONITO_LDMAGO BONITO(BONITO_LDMABASE + 0x8)
-#define BONITO_LDMADATA BONITO(BONITO_LDMABASE + 0xc)
+#define BONITO_LDMADATA BONITO(BONITO_LDMABASE + 0xc)
#define BONITO_COPCTRL BONITO(BONITO_COPBASE + 0x0)
#define BONITO_COPSTAT BONITO(BONITO_COPBASE + 0x0)
-#define BONITO_COPPADDR BONITO(BONITO_COPBASE + 0x4)
-#define BONITO_COPDADDR BONITO(BONITO_COPBASE + 0x8)
+#define BONITO_COPPADDR BONITO(BONITO_COPBASE + 0x4)
+#define BONITO_COPDADDR BONITO(BONITO_COPBASE + 0x8)
#define BONITO_COPGO BONITO(BONITO_COPBASE + 0xc)
@@ -257,7 +257,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_IDECOPGO_DMA_SIZE_SHIFT 0
#define BONITO_IDECOPGO_DMA_WRITE 0x00010000
#define BONITO_IDECOPGO_DMAWCOUNT 0x000f0000
-#define BONITO_IDECOPGO_DMAWCOUNT_SHIFT 16
+#define BONITO_IDECOPGO_DMAWCOUNT_SHIFT 16
#define BONITO_IDECOPCTRL_DMA_STARTBIT 0x80000000
#define BONITO_IDECOPCTRL_DMA_RSTBIT 0x40000000
@@ -291,11 +291,11 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_SDCFG_DRAMMODESET 0x00200000
/* --- */
#define BONITO_SDCFG_DRAMEXTREGS 0x00400000
-#define BONITO_SDCFG_DRAMPARITY 0x00800000
+#define BONITO_SDCFG_DRAMPARITY 0x00800000
/* Added by RPF 11-9-00 */
-#define BONITO_SDCFG_DRAMBURSTLEN 0x03000000
-#define BONITO_SDCFG_DRAMBURSTLEN_SHIFT 24
-#define BONITO_SDCFG_DRAMMODESET_DONE 0x80000000
+#define BONITO_SDCFG_DRAMBURSTLEN 0x03000000
+#define BONITO_SDCFG_DRAMBURSTLEN_SHIFT 24
+#define BONITO_SDCFG_DRAMMODESET_DONE 0x80000000
/* --- */
/* PCI Cache - pciCacheCtrl */
@@ -308,7 +308,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCICACHECTRL_IOBCCOH_PRES 0x00000100
#define BONITO_PCICACHECTRL_IOBCCOH_EN 0x00000200
-#define BONITO_PCICACHECTRL_CPUCOH_PRES 0x00000400
+#define BONITO_PCICACHECTRL_CPUCOH_PRES 0x00000400
#define BONITO_PCICACHECTRL_CPUCOH_EN 0x00000800
#define BONITO_IODEVCFG_BUFFBIT_CS0 0x00000001
@@ -343,18 +343,18 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* gpio */
#define BONITO_GPIO_GPIOW 0x000003ff
-#define BONITO_GPIO_GPIOW_SHIFT 0
+#define BONITO_GPIO_GPIOW_SHIFT 0
#define BONITO_GPIO_GPIOR 0x01ff0000
-#define BONITO_GPIO_GPIOR_SHIFT 16
+#define BONITO_GPIO_GPIOR_SHIFT 16
#define BONITO_GPIO_GPINR 0xfe000000
-#define BONITO_GPIO_GPINR_SHIFT 25
+#define BONITO_GPIO_GPINR_SHIFT 25
#define BONITO_GPIO_IOW(N) (1<<(BONITO_GPIO_GPIOW_SHIFT+(N)))
#define BONITO_GPIO_IOR(N) (1<<(BONITO_GPIO_GPIOR_SHIFT+(N)))
#define BONITO_GPIO_INR(N) (1<<(BONITO_GPIO_GPINR_SHIFT+(N)))
/* ICU */
#define BONITO_ICU_MBOXES 0x0000000f
-#define BONITO_ICU_MBOXES_SHIFT 0
+#define BONITO_ICU_MBOXES_SHIFT 0
#define BONITO_ICU_DMARDY 0x00000010
#define BONITO_ICU_DMAEMPTY 0x00000020
#define BONITO_ICU_COPYRDY 0x00000040
@@ -384,13 +384,13 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCIMAP_PCIMAP_2 0x00040000
#define BONITO_PCIMAP_WIN(WIN, ADDR) ((((ADDR)>>26) & BONITO_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
-#define BONITO_PCIMAP_WINSIZE (1<<26)
+#define BONITO_PCIMAP_WINSIZE (1<<26)
#define BONITO_PCIMAP_WINOFFSET(ADDR) ((ADDR) & (BONITO_PCIMAP_WINSIZE - 1))
#define BONITO_PCIMAP_WINBASE(ADDR) ((ADDR) << 26)
/* pcimembaseCfg */
-#define BONITO_PCIMEMBASECFG_MASK 0xf0000000
+#define BONITO_PCIMEMBASECFG_MASK 0xf0000000
#define BONITO_PCIMEMBASECFG_MEMBASE0_MASK 0x0000001f
#define BONITO_PCIMEMBASECFG_MEMBASE0_MASK_SHIFT 0
#define BONITO_PCIMEMBASECFG_MEMBASE0_TRANS 0x000003e0
@@ -406,21 +406,21 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCIMEMBASECFG_MEMBASE1_IO 0x00800000
#define BONITO_PCIMEMBASECFG_ASHIFT 23
-#define BONITO_PCIMEMBASECFG_AMASK 0x007fffff
+#define BONITO_PCIMEMBASECFG_AMASK 0x007fffff
#define BONITO_PCIMEMBASECFGSIZE(WIN, SIZE) (((~((SIZE)-1))>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)
#define BONITO_PCIMEMBASECFGBASE(WIN, BASE) (((BASE)>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS)
#define BONITO_PCIMEMBASECFG_SIZE(WIN, CFG) (((((~(CFG)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)) << (BONITO_PCIMEMBASECFG_ASHIFT - BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) | BONITO_PCIMEMBASECFG_AMASK)
-#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
-#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
#define BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
-#define BONITO_PCITOPHYS(WIN, ADDR, CFG) ( \
- (((ADDR) & (~(BONITO_PCIMEMBASECFG_MASK))) & (~(BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)))) | \
- (BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG)) \
- )
+#define BONITO_PCITOPHYS(WIN, ADDR, CFG) ( \
+ (((ADDR) & (~(BONITO_PCIMEMBASECFG_MASK))) & (~(BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)))) | \
+ (BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG)) \
+ )
/* PCICmd */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 6e23ceb0ba8c..44a09a64160a 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -1,21 +1,14 @@
/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
* Defines of the MIPS boards specific address-MAP, registers, etc.
+ *
+ * Copyright (C) 2000,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#ifndef __ASM_MIPS_BOARDS_GENERIC_H
#define __ASM_MIPS_BOARDS_GENERIC_H
@@ -27,39 +20,32 @@
/*
* Display register base.
*/
-#define ASCII_DISPLAY_WORD_BASE 0x1f000410
-#define ASCII_DISPLAY_POS_BASE 0x1f000418
-
-
-/*
- * Yamon Prom print address.
- */
-#define YAMON_PROM_PRINT_ADDR 0x1fc00504
-
+#define ASCII_DISPLAY_WORD_BASE 0x1f000410
+#define ASCII_DISPLAY_POS_BASE 0x1f000418
/*
* Reset register.
*/
-#define SOFTRES_REG 0x1f000500
-#define GORESET 0x42
+#define SOFTRES_REG 0x1f000500
+#define GORESET 0x42
/*
* Revision register.
*/
-#define MIPS_REVISION_REG 0x1fc00010
-#define MIPS_REVISION_CORID_QED_RM5261 0
-#define MIPS_REVISION_CORID_CORE_LV 1
-#define MIPS_REVISION_CORID_BONITO64 2
-#define MIPS_REVISION_CORID_CORE_20K 3
-#define MIPS_REVISION_CORID_CORE_FPGA 4
-#define MIPS_REVISION_CORID_CORE_MSC 5
-#define MIPS_REVISION_CORID_CORE_EMUL 6
-#define MIPS_REVISION_CORID_CORE_FPGA2 7
-#define MIPS_REVISION_CORID_CORE_FPGAR2 8
-#define MIPS_REVISION_CORID_CORE_FPGA3 9
-#define MIPS_REVISION_CORID_CORE_24K 10
-#define MIPS_REVISION_CORID_CORE_FPGA4 11
-#define MIPS_REVISION_CORID_CORE_FPGA5 12
+#define MIPS_REVISION_REG 0x1fc00010
+#define MIPS_REVISION_CORID_QED_RM5261 0
+#define MIPS_REVISION_CORID_CORE_LV 1
+#define MIPS_REVISION_CORID_BONITO64 2
+#define MIPS_REVISION_CORID_CORE_20K 3
+#define MIPS_REVISION_CORID_CORE_FPGA 4
+#define MIPS_REVISION_CORID_CORE_MSC 5
+#define MIPS_REVISION_CORID_CORE_EMUL 6
+#define MIPS_REVISION_CORID_CORE_FPGA2 7
+#define MIPS_REVISION_CORID_CORE_FPGAR2 8
+#define MIPS_REVISION_CORID_CORE_FPGA3 9
+#define MIPS_REVISION_CORID_CORE_24K 10
+#define MIPS_REVISION_CORID_CORE_FPGA4 11
+#define MIPS_REVISION_CORID_CORE_FPGA5 12
/**** Artificial corid defines ****/
/*
@@ -87,10 +73,14 @@
extern int mips_revision_sconid;
+#ifdef CONFIG_OF
+extern struct boot_param_header __dtb_start;
+#endif
+
#ifdef CONFIG_PCI
extern void mips_pcibios_init(void);
#else
#define mips_pcibios_init() do { } while (0)
#endif
-#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
+#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h
index d8ae7f95a522..653477e4074d 100644
--- a/arch/mips/include/asm/mips-boards/launch.h
+++ b/arch/mips/include/asm/mips-boards/launch.h
@@ -16,11 +16,11 @@ struct cpulaunch {
#else
#define LOG2CPULAUNCH 5
-#define LAUNCH_PC 0
-#define LAUNCH_GP 4
-#define LAUNCH_SP 8
-#define LAUNCH_A0 12
-#define LAUNCH_FLAGS 28
+#define LAUNCH_PC 0
+#define LAUNCH_GP 4
+#define LAUNCH_SP 8
+#define LAUNCH_A0 12
+#define LAUNCH_FLAGS 28
#endif
diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h
index c1891578fa65..722bc889eab5 100644
--- a/arch/mips/include/asm/mips-boards/malta.h
+++ b/arch/mips/include/asm/mips-boards/malta.h
@@ -33,9 +33,9 @@
* Malta I/O ports base address for the Galileo GT64120 and Algorithmics
* Bonito system controllers.
*/
-#define MALTA_GT_PORT_BASE get_gt_port_base(GT_PCI0IOLD_OFS)
-#define MALTA_BONITO_PORT_BASE ((unsigned long)ioremap (0x1fd00000, 0x10000))
-#define MALTA_MSC_PORT_BASE get_msc_port_base(MSC01_PCI_SC2PIOBASL)
+#define MALTA_GT_PORT_BASE get_gt_port_base(GT_PCI0IOLD_OFS)
+#define MALTA_BONITO_PORT_BASE ((unsigned long)ioremap (0x1fd00000, 0x10000))
+#define MALTA_MSC_PORT_BASE get_msc_port_base(MSC01_PCI_SC2PIOBASL)
static inline unsigned long get_gt_port_base(unsigned long reg)
{
@@ -77,8 +77,8 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
/*
* Malta RTC-device indirect register access.
*/
-#define MALTA_RTC_ADR_REG 0x70
-#define MALTA_RTC_DAT_REG 0x71
+#define MALTA_RTC_ADR_REG 0x70
+#define MALTA_RTC_DAT_REG 0x71
/*
* Malta SMSC FDC37M817 Super I/O Controller register.
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index 669244815753..e330732ddf98 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -4,8 +4,8 @@
* for more details.
*
* Copyright (C) 2000,2012 MIPS Technologies, Inc. All rights reserved.
- * Carsten Langgaard <carstenl@mips.com>
- * Steven J. Hill <sjhill@mips.com>
+ * Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#ifndef _MIPS_MALTAINT_H
#define _MIPS_MALTAINT_H
@@ -24,9 +24,9 @@
#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0
#define MIPSCPU_INT_MB1 3
#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1
-#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */
+#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */
#define MIPSCPU_INT_MB2 4
-#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */
+#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */
#define MIPSCPU_INT_MB3 5
#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3
#define MIPSCPU_INT_MB4 6
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index 2971d60f2e95..a02596cf1abd 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -53,7 +53,7 @@
#define PIIX4_OCW2_SP (0x6 << 5)
#define PIIX4_OCW2_NOP (0x2 << 5)
-#define PIIX4_OCW2_SEL (0x0 << 3)
+#define PIIX4_OCW2_SEL (0x0 << 3)
#define PIIX4_OCW2_ILS_0 0
#define PIIX4_OCW2_ILS_1 1
@@ -72,9 +72,9 @@
#define PIIX4_OCW2_ILS_14 6
#define PIIX4_OCW2_ILS_15 7
-#define PIIX4_OCW3_SEL (0x1 << 3)
+#define PIIX4_OCW3_SEL (0x1 << 3)
-#define PIIX4_OCW3_IRR 0x2
-#define PIIX4_OCW3_ISR 0x3
+#define PIIX4_OCW3_IRR 0x2
+#define PIIX4_OCW3_ISR 0x3
#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
index a9db576a9768..e7aed3e4ff58 100644
--- a/arch/mips/include/asm/mips-boards/prom.h
+++ b/arch/mips/include/asm/mips-boards/prom.h
@@ -39,9 +39,9 @@ extern int get_ethernet_addr(char *ethernet_addr);
/* Memory descriptor management. */
#define PROM_MAX_PMEMBLOCKS 32
struct prom_pmemblock {
- unsigned long base; /* Within KSEG0. */
- unsigned int size; /* In bytes. */
- unsigned int type; /* free or prom memory */
+ unsigned long base; /* Within KSEG0. */
+ unsigned int size; /* In bytes. */
+ unsigned int type; /* free or prom memory */
};
#endif /* !(_MIPS_PROM_H) */
diff --git a/arch/mips/include/asm/mips-boards/sead3int.h b/arch/mips/include/asm/mips-boards/sead3int.h
index d634d9a807f6..6b17aaf7d901 100644
--- a/arch/mips/include/asm/mips-boards/sead3int.h
+++ b/arch/mips/include/asm/mips-boards/sead3int.h
@@ -4,8 +4,8 @@
* for more details.
*
* Copyright (C) 2000,2012 MIPS Technologies, Inc. All rights reserved.
- * Douglas Leung <douglas@mips.com>
- * Steven J. Hill <sjhill@mips.com>
+ * Douglas Leung <douglas@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#ifndef _MIPS_SEAD3INT_H
#define _MIPS_SEAD3INT_H
diff --git a/arch/mips/include/asm/mips-boards/sim.h b/arch/mips/include/asm/mips-boards/sim.h
index acb7c2331d98..b112fdc9f77d 100644
--- a/arch/mips/include/asm/mips-boards/sim.h
+++ b/arch/mips/include/asm/mips-boards/sim.h
@@ -19,18 +19,18 @@
#ifndef _ASM_MIPS_BOARDS_SIM_H
#define _ASM_MIPS_BOARDS_SIM_H
-#define STATS_ON 1
-#define STATS_OFF 2
-#define STATS_CLEAR 3
-#define STATS_DUMP 4
+#define STATS_ON 1
+#define STATS_OFF 2
+#define STATS_CLEAR 3
+#define STATS_DUMP 4
#define TRACE_ON 5
-#define TRACE_OFF 6
+#define TRACE_OFF 6
#define simcfg(code) \
({ \
- __asm__ __volatile__( \
- "sltiu $0,$0, %0" \
+ __asm__ __volatile__( \
+ "sltiu $0,$0, %0" \
::"i"(code) \
); \
})
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 5b3cb8553e9a..38b7704ee376 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -270,14 +270,14 @@ static inline void ehb(void)
#define mftc0(rt,sel) \
({ \
- unsigned long __res; \
+ unsigned long __res; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set mips32r2 \n" \
" .set noat \n" \
- " # mftc0 $1, $" #rt ", " #sel " \n" \
- " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \
+ " # mftc0 $1, $" #rt ", " #sel " \n" \
+ " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__res)); \
@@ -334,7 +334,7 @@ do { \
" .set noat \n" \
" move $1, %0 \n" \
" # mttc0 %0," #rd ", " #sel " \n" \
- " .word 0x41810000 | (" #rd " << 11) | " #sel " \n" \
+ " .word 0x41810000 | (" #rd " << 11) | " #sel " \n" \
" .set pop \n" \
: \
: "r" (v)); \
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 7e4e6f8fab37..12b70c25906a 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -123,16 +123,16 @@
* Status Register Values
*/
-#define FPU_CSR_FLUSH 0x01000000 /* flush denormalised results to 0 */
-#define FPU_CSR_COND 0x00800000 /* $fcc0 */
-#define FPU_CSR_COND0 0x00800000 /* $fcc0 */
-#define FPU_CSR_COND1 0x02000000 /* $fcc1 */
-#define FPU_CSR_COND2 0x04000000 /* $fcc2 */
-#define FPU_CSR_COND3 0x08000000 /* $fcc3 */
-#define FPU_CSR_COND4 0x10000000 /* $fcc4 */
-#define FPU_CSR_COND5 0x20000000 /* $fcc5 */
-#define FPU_CSR_COND6 0x40000000 /* $fcc6 */
-#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
+#define FPU_CSR_FLUSH 0x01000000 /* flush denormalised results to 0 */
+#define FPU_CSR_COND 0x00800000 /* $fcc0 */
+#define FPU_CSR_COND0 0x00800000 /* $fcc0 */
+#define FPU_CSR_COND1 0x02000000 /* $fcc1 */
+#define FPU_CSR_COND2 0x04000000 /* $fcc2 */
+#define FPU_CSR_COND3 0x08000000 /* $fcc3 */
+#define FPU_CSR_COND4 0x10000000 /* $fcc4 */
+#define FPU_CSR_COND5 0x20000000 /* $fcc5 */
+#define FPU_CSR_COND6 0x40000000 /* $fcc6 */
+#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
/*
* Bits 18 - 20 of the FPU Status Register will be read as 0,
@@ -145,34 +145,34 @@
* E the exception enable
* S the sticky/flag bit
*/
-#define FPU_CSR_ALL_X 0x0003f000
-#define FPU_CSR_UNI_X 0x00020000
-#define FPU_CSR_INV_X 0x00010000
-#define FPU_CSR_DIV_X 0x00008000
-#define FPU_CSR_OVF_X 0x00004000
-#define FPU_CSR_UDF_X 0x00002000
-#define FPU_CSR_INE_X 0x00001000
-
-#define FPU_CSR_ALL_E 0x00000f80
-#define FPU_CSR_INV_E 0x00000800
-#define FPU_CSR_DIV_E 0x00000400
-#define FPU_CSR_OVF_E 0x00000200
-#define FPU_CSR_UDF_E 0x00000100
-#define FPU_CSR_INE_E 0x00000080
-
-#define FPU_CSR_ALL_S 0x0000007c
-#define FPU_CSR_INV_S 0x00000040
-#define FPU_CSR_DIV_S 0x00000020
-#define FPU_CSR_OVF_S 0x00000010
-#define FPU_CSR_UDF_S 0x00000008
-#define FPU_CSR_INE_S 0x00000004
+#define FPU_CSR_ALL_X 0x0003f000
+#define FPU_CSR_UNI_X 0x00020000
+#define FPU_CSR_INV_X 0x00010000
+#define FPU_CSR_DIV_X 0x00008000
+#define FPU_CSR_OVF_X 0x00004000
+#define FPU_CSR_UDF_X 0x00002000
+#define FPU_CSR_INE_X 0x00001000
+
+#define FPU_CSR_ALL_E 0x00000f80
+#define FPU_CSR_INV_E 0x00000800
+#define FPU_CSR_DIV_E 0x00000400
+#define FPU_CSR_OVF_E 0x00000200
+#define FPU_CSR_UDF_E 0x00000100
+#define FPU_CSR_INE_E 0x00000080
+
+#define FPU_CSR_ALL_S 0x0000007c
+#define FPU_CSR_INV_S 0x00000040
+#define FPU_CSR_DIV_S 0x00000020
+#define FPU_CSR_OVF_S 0x00000010
+#define FPU_CSR_UDF_S 0x00000008
+#define FPU_CSR_INE_S 0x00000004
/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
#define FPU_CSR_RM 0x00000003
-#define FPU_CSR_RN 0x0 /* nearest */
-#define FPU_CSR_RZ 0x1 /* towards zero */
-#define FPU_CSR_RU 0x2 /* towards +Infinity */
-#define FPU_CSR_RD 0x3 /* towards -Infinity */
+#define FPU_CSR_RN 0x0 /* nearest */
+#define FPU_CSR_RZ 0x1 /* towards zero */
+#define FPU_CSR_RU 0x2 /* towards +Infinity */
+#define FPU_CSR_RD 0x3 /* towards -Infinity */
/*
@@ -214,15 +214,15 @@
* Default page size for a given kernel configuration
*/
#ifdef CONFIG_PAGE_SIZE_4KB
-#define PM_DEFAULT_MASK PM_4K
+#define PM_DEFAULT_MASK PM_4K
#elif defined(CONFIG_PAGE_SIZE_8KB)
-#define PM_DEFAULT_MASK PM_8K
+#define PM_DEFAULT_MASK PM_8K
#elif defined(CONFIG_PAGE_SIZE_16KB)
-#define PM_DEFAULT_MASK PM_16K
+#define PM_DEFAULT_MASK PM_16K
#elif defined(CONFIG_PAGE_SIZE_32KB)
-#define PM_DEFAULT_MASK PM_32K
+#define PM_DEFAULT_MASK PM_32K
#elif defined(CONFIG_PAGE_SIZE_64KB)
-#define PM_DEFAULT_MASK PM_64K
+#define PM_DEFAULT_MASK PM_64K
#else
#error Bad page size configuration!
#endif
@@ -260,34 +260,34 @@
/*
* PageGrain bits
*/
-#define PG_RIE (_ULCAST_(1) << 31)
-#define PG_XIE (_ULCAST_(1) << 30)
-#define PG_ELPA (_ULCAST_(1) << 29)
-#define PG_ESP (_ULCAST_(1) << 28)
+#define PG_RIE (_ULCAST_(1) << 31)
+#define PG_XIE (_ULCAST_(1) << 30)
+#define PG_ELPA (_ULCAST_(1) << 29)
+#define PG_ESP (_ULCAST_(1) << 28)
/*
* R4x00 interrupt enable / cause bits
*/
-#define IE_SW0 (_ULCAST_(1) << 8)
-#define IE_SW1 (_ULCAST_(1) << 9)
-#define IE_IRQ0 (_ULCAST_(1) << 10)
-#define IE_IRQ1 (_ULCAST_(1) << 11)
-#define IE_IRQ2 (_ULCAST_(1) << 12)
-#define IE_IRQ3 (_ULCAST_(1) << 13)
-#define IE_IRQ4 (_ULCAST_(1) << 14)
-#define IE_IRQ5 (_ULCAST_(1) << 15)
+#define IE_SW0 (_ULCAST_(1) << 8)
+#define IE_SW1 (_ULCAST_(1) << 9)
+#define IE_IRQ0 (_ULCAST_(1) << 10)
+#define IE_IRQ1 (_ULCAST_(1) << 11)
+#define IE_IRQ2 (_ULCAST_(1) << 12)
+#define IE_IRQ3 (_ULCAST_(1) << 13)
+#define IE_IRQ4 (_ULCAST_(1) << 14)
+#define IE_IRQ5 (_ULCAST_(1) << 15)
/*
* R4x00 interrupt cause bits
*/
-#define C_SW0 (_ULCAST_(1) << 8)
-#define C_SW1 (_ULCAST_(1) << 9)
-#define C_IRQ0 (_ULCAST_(1) << 10)
-#define C_IRQ1 (_ULCAST_(1) << 11)
-#define C_IRQ2 (_ULCAST_(1) << 12)
-#define C_IRQ3 (_ULCAST_(1) << 13)
-#define C_IRQ4 (_ULCAST_(1) << 14)
-#define C_IRQ5 (_ULCAST_(1) << 15)
+#define C_SW0 (_ULCAST_(1) << 8)
+#define C_SW1 (_ULCAST_(1) << 9)
+#define C_IRQ0 (_ULCAST_(1) << 10)
+#define C_IRQ1 (_ULCAST_(1) << 11)
+#define C_IRQ2 (_ULCAST_(1) << 12)
+#define C_IRQ3 (_ULCAST_(1) << 13)
+#define C_IRQ4 (_ULCAST_(1) << 14)
+#define C_IRQ5 (_ULCAST_(1) << 15)
/*
* Bitfields in the R4xx0 cp0 status register
@@ -301,7 +301,7 @@
# define KSU_KERNEL 0x00000000
#define ST0_UX 0x00000020
#define ST0_SX 0x00000040
-#define ST0_KX 0x00000080
+#define ST0_KX 0x00000080
#define ST0_DE 0x00010000
#define ST0_CE 0x00020000
@@ -315,7 +315,7 @@
/*
* Bitfields in the R[23]000 cp0 status register.
*/
-#define ST0_IEC 0x00000001
+#define ST0_IEC 0x00000001
#define ST0_KUC 0x00000002
#define ST0_IEP 0x00000004
#define ST0_KUP 0x00000008
@@ -329,7 +329,7 @@
/*
* Bits specific to the R4640/R4650
*/
-#define ST0_UM (_ULCAST_(1) << 4)
+#define ST0_UM (_ULCAST_(1) << 4)
#define ST0_IL (_ULCAST_(1) << 23)
#define ST0_DL (_ULCAST_(1) << 24)
@@ -343,22 +343,22 @@
*/
#define TX39_CONF_ICS_SHIFT 19
#define TX39_CONF_ICS_MASK 0x00380000
-#define TX39_CONF_ICS_1KB 0x00000000
-#define TX39_CONF_ICS_2KB 0x00080000
-#define TX39_CONF_ICS_4KB 0x00100000
-#define TX39_CONF_ICS_8KB 0x00180000
-#define TX39_CONF_ICS_16KB 0x00200000
+#define TX39_CONF_ICS_1KB 0x00000000
+#define TX39_CONF_ICS_2KB 0x00080000
+#define TX39_CONF_ICS_4KB 0x00100000
+#define TX39_CONF_ICS_8KB 0x00180000
+#define TX39_CONF_ICS_16KB 0x00200000
#define TX39_CONF_DCS_SHIFT 16
#define TX39_CONF_DCS_MASK 0x00070000
-#define TX39_CONF_DCS_1KB 0x00000000
-#define TX39_CONF_DCS_2KB 0x00010000
-#define TX39_CONF_DCS_4KB 0x00020000
-#define TX39_CONF_DCS_8KB 0x00030000
-#define TX39_CONF_DCS_16KB 0x00040000
-
-#define TX39_CONF_CWFON 0x00004000
-#define TX39_CONF_WBON 0x00002000
+#define TX39_CONF_DCS_1KB 0x00000000
+#define TX39_CONF_DCS_2KB 0x00010000
+#define TX39_CONF_DCS_4KB 0x00020000
+#define TX39_CONF_DCS_8KB 0x00030000
+#define TX39_CONF_DCS_16KB 0x00040000
+
+#define TX39_CONF_CWFON 0x00004000
+#define TX39_CONF_WBON 0x00002000
#define TX39_CONF_RF_SHIFT 10
#define TX39_CONF_RF_MASK 0x00000c00
#define TX39_CONF_DOZE 0x00000200
@@ -375,38 +375,38 @@
* Status register bits available in all MIPS CPUs.
*/
#define ST0_IM 0x0000ff00
-#define STATUSB_IP0 8
-#define STATUSF_IP0 (_ULCAST_(1) << 8)
-#define STATUSB_IP1 9
-#define STATUSF_IP1 (_ULCAST_(1) << 9)
-#define STATUSB_IP2 10
-#define STATUSF_IP2 (_ULCAST_(1) << 10)
-#define STATUSB_IP3 11
-#define STATUSF_IP3 (_ULCAST_(1) << 11)
-#define STATUSB_IP4 12
-#define STATUSF_IP4 (_ULCAST_(1) << 12)
-#define STATUSB_IP5 13
-#define STATUSF_IP5 (_ULCAST_(1) << 13)
-#define STATUSB_IP6 14
-#define STATUSF_IP6 (_ULCAST_(1) << 14)
-#define STATUSB_IP7 15
-#define STATUSF_IP7 (_ULCAST_(1) << 15)
-#define STATUSB_IP8 0
-#define STATUSF_IP8 (_ULCAST_(1) << 0)
-#define STATUSB_IP9 1
-#define STATUSF_IP9 (_ULCAST_(1) << 1)
-#define STATUSB_IP10 2
-#define STATUSF_IP10 (_ULCAST_(1) << 2)
-#define STATUSB_IP11 3
-#define STATUSF_IP11 (_ULCAST_(1) << 3)
-#define STATUSB_IP12 4
-#define STATUSF_IP12 (_ULCAST_(1) << 4)
-#define STATUSB_IP13 5
-#define STATUSF_IP13 (_ULCAST_(1) << 5)
-#define STATUSB_IP14 6
-#define STATUSF_IP14 (_ULCAST_(1) << 6)
-#define STATUSB_IP15 7
-#define STATUSF_IP15 (_ULCAST_(1) << 7)
+#define STATUSB_IP0 8
+#define STATUSF_IP0 (_ULCAST_(1) << 8)
+#define STATUSB_IP1 9
+#define STATUSF_IP1 (_ULCAST_(1) << 9)
+#define STATUSB_IP2 10
+#define STATUSF_IP2 (_ULCAST_(1) << 10)
+#define STATUSB_IP3 11
+#define STATUSF_IP3 (_ULCAST_(1) << 11)
+#define STATUSB_IP4 12
+#define STATUSF_IP4 (_ULCAST_(1) << 12)
+#define STATUSB_IP5 13
+#define STATUSF_IP5 (_ULCAST_(1) << 13)
+#define STATUSB_IP6 14
+#define STATUSF_IP6 (_ULCAST_(1) << 14)
+#define STATUSB_IP7 15
+#define STATUSF_IP7 (_ULCAST_(1) << 15)
+#define STATUSB_IP8 0
+#define STATUSF_IP8 (_ULCAST_(1) << 0)
+#define STATUSB_IP9 1
+#define STATUSF_IP9 (_ULCAST_(1) << 1)
+#define STATUSB_IP10 2
+#define STATUSF_IP10 (_ULCAST_(1) << 2)
+#define STATUSB_IP11 3
+#define STATUSF_IP11 (_ULCAST_(1) << 3)
+#define STATUSB_IP12 4
+#define STATUSF_IP12 (_ULCAST_(1) << 4)
+#define STATUSB_IP13 5
+#define STATUSF_IP13 (_ULCAST_(1) << 5)
+#define STATUSB_IP14 6
+#define STATUSF_IP14 (_ULCAST_(1) << 6)
+#define STATUSB_IP15 7
+#define STATUSF_IP15 (_ULCAST_(1) << 7)
#define ST0_CH 0x00040000
#define ST0_NMI 0x00080000
#define ST0_SR 0x00100000
@@ -436,36 +436,36 @@
*
* Refer to your MIPS R4xx0 manual, chapter 5 for explanation.
*/
-#define CAUSEB_EXCCODE 2
-#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
-#define CAUSEB_IP 8
-#define CAUSEF_IP (_ULCAST_(255) << 8)
-#define CAUSEB_IP0 8
-#define CAUSEF_IP0 (_ULCAST_(1) << 8)
-#define CAUSEB_IP1 9
-#define CAUSEF_IP1 (_ULCAST_(1) << 9)
-#define CAUSEB_IP2 10
-#define CAUSEF_IP2 (_ULCAST_(1) << 10)
-#define CAUSEB_IP3 11
-#define CAUSEF_IP3 (_ULCAST_(1) << 11)
-#define CAUSEB_IP4 12
-#define CAUSEF_IP4 (_ULCAST_(1) << 12)
-#define CAUSEB_IP5 13
-#define CAUSEF_IP5 (_ULCAST_(1) << 13)
-#define CAUSEB_IP6 14
-#define CAUSEF_IP6 (_ULCAST_(1) << 14)
-#define CAUSEB_IP7 15
-#define CAUSEF_IP7 (_ULCAST_(1) << 15)
-#define CAUSEB_IV 23
-#define CAUSEF_IV (_ULCAST_(1) << 23)
-#define CAUSEB_PCI 26
-#define CAUSEF_PCI (_ULCAST_(1) << 26)
-#define CAUSEB_CE 28
-#define CAUSEF_CE (_ULCAST_(3) << 28)
-#define CAUSEB_TI 30
-#define CAUSEF_TI (_ULCAST_(1) << 30)
-#define CAUSEB_BD 31
-#define CAUSEF_BD (_ULCAST_(1) << 31)
+#define CAUSEB_EXCCODE 2
+#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
+#define CAUSEB_IP 8
+#define CAUSEF_IP (_ULCAST_(255) << 8)
+#define CAUSEB_IP0 8
+#define CAUSEF_IP0 (_ULCAST_(1) << 8)
+#define CAUSEB_IP1 9
+#define CAUSEF_IP1 (_ULCAST_(1) << 9)
+#define CAUSEB_IP2 10
+#define CAUSEF_IP2 (_ULCAST_(1) << 10)
+#define CAUSEB_IP3 11
+#define CAUSEF_IP3 (_ULCAST_(1) << 11)
+#define CAUSEB_IP4 12
+#define CAUSEF_IP4 (_ULCAST_(1) << 12)
+#define CAUSEB_IP5 13
+#define CAUSEF_IP5 (_ULCAST_(1) << 13)
+#define CAUSEB_IP6 14
+#define CAUSEF_IP6 (_ULCAST_(1) << 14)
+#define CAUSEB_IP7 15
+#define CAUSEF_IP7 (_ULCAST_(1) << 15)
+#define CAUSEB_IV 23
+#define CAUSEF_IV (_ULCAST_(1) << 23)
+#define CAUSEB_PCI 26
+#define CAUSEF_PCI (_ULCAST_(1) << 26)
+#define CAUSEB_CE 28
+#define CAUSEF_CE (_ULCAST_(3) << 28)
+#define CAUSEB_TI 30
+#define CAUSEF_TI (_ULCAST_(1) << 30)
+#define CAUSEB_BD 31
+#define CAUSEF_BD (_ULCAST_(1) << 31)
/*
* Bits in the coprocessor 0 config register.
@@ -483,11 +483,11 @@
#define CONF_BE (_ULCAST_(1) << 15)
/* Bits common to various processors. */
-#define CONF_CU (_ULCAST_(1) << 3)
-#define CONF_DB (_ULCAST_(1) << 4)
-#define CONF_IB (_ULCAST_(1) << 5)
-#define CONF_DC (_ULCAST_(7) << 6)
-#define CONF_IC (_ULCAST_(7) << 9)
+#define CONF_CU (_ULCAST_(1) << 3)
+#define CONF_DB (_ULCAST_(1) << 4)
+#define CONF_IB (_ULCAST_(1) << 5)
+#define CONF_DC (_ULCAST_(7) << 6)
+#define CONF_IC (_ULCAST_(7) << 9)
#define CONF_EB (_ULCAST_(1) << 13)
#define CONF_EM (_ULCAST_(1) << 14)
#define CONF_SM (_ULCAST_(1) << 16)
@@ -497,29 +497,29 @@
#define CONF_EC (_ULCAST_(7) << 28)
#define CONF_CM (_ULCAST_(1) << 31)
-/* Bits specific to the R4xx0. */
+/* Bits specific to the R4xx0. */
#define R4K_CONF_SW (_ULCAST_(1) << 20)
#define R4K_CONF_SS (_ULCAST_(1) << 21)
#define R4K_CONF_SB (_ULCAST_(3) << 22)
-/* Bits specific to the R5000. */
+/* Bits specific to the R5000. */
#define R5K_CONF_SE (_ULCAST_(1) << 12)
#define R5K_CONF_SS (_ULCAST_(3) << 20)
-/* Bits specific to the RM7000. */
-#define RM7K_CONF_SE (_ULCAST_(1) << 3)
+/* Bits specific to the RM7000. */
+#define RM7K_CONF_SE (_ULCAST_(1) << 3)
#define RM7K_CONF_TE (_ULCAST_(1) << 12)
#define RM7K_CONF_CLK (_ULCAST_(1) << 16)
#define RM7K_CONF_TC (_ULCAST_(1) << 17)
#define RM7K_CONF_SI (_ULCAST_(3) << 20)
#define RM7K_CONF_SC (_ULCAST_(1) << 31)
-/* Bits specific to the R10000. */
-#define R10K_CONF_DN (_ULCAST_(3) << 3)
-#define R10K_CONF_CT (_ULCAST_(1) << 5)
-#define R10K_CONF_PE (_ULCAST_(1) << 6)
-#define R10K_CONF_PM (_ULCAST_(3) << 7)
-#define R10K_CONF_EC (_ULCAST_(15)<< 9)
+/* Bits specific to the R10000. */
+#define R10K_CONF_DN (_ULCAST_(3) << 3)
+#define R10K_CONF_CT (_ULCAST_(1) << 5)
+#define R10K_CONF_PE (_ULCAST_(1) << 6)
+#define R10K_CONF_PM (_ULCAST_(3) << 7)
+#define R10K_CONF_EC (_ULCAST_(15)<< 9)
#define R10K_CONF_SB (_ULCAST_(1) << 13)
#define R10K_CONF_SK (_ULCAST_(1) << 14)
#define R10K_CONF_SS (_ULCAST_(7) << 16)
@@ -527,14 +527,14 @@
#define R10K_CONF_DC (_ULCAST_(7) << 26)
#define R10K_CONF_IC (_ULCAST_(7) << 29)
-/* Bits specific to the VR41xx. */
+/* Bits specific to the VR41xx. */
#define VR41_CONF_CS (_ULCAST_(1) << 12)
#define VR41_CONF_P4K (_ULCAST_(1) << 13)
#define VR41_CONF_BP (_ULCAST_(1) << 16)
#define VR41_CONF_M16 (_ULCAST_(1) << 20)
#define VR41_CONF_AD (_ULCAST_(1) << 23)
-/* Bits specific to the R30xx. */
+/* Bits specific to the R30xx. */
#define R30XX_CONF_FDM (_ULCAST_(1) << 19)
#define R30XX_CONF_REV (_ULCAST_(1) << 22)
#define R30XX_CONF_AC (_ULCAST_(1) << 23)
@@ -551,8 +551,8 @@
#define TX49_CONF_HALT (_ULCAST_(1) << 18)
#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
-/* Bits specific to the MIPS32/64 PRA. */
-#define MIPS_CONF_MT (_ULCAST_(7) << 7)
+/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_MT (_ULCAST_(7) << 7)
#define MIPS_CONF_AR (_ULCAST_(7) << 10)
#define MIPS_CONF_AT (_ULCAST_(3) << 13)
#define MIPS_CONF_M (_ULCAST_(1) << 31)
@@ -560,14 +560,14 @@
/*
* Bits in the MIPS32/64 PRA coprocessor 0 config registers 1 and above.
*/
-#define MIPS_CONF1_FP (_ULCAST_(1) << 0)
-#define MIPS_CONF1_EP (_ULCAST_(1) << 1)
-#define MIPS_CONF1_CA (_ULCAST_(1) << 2)
-#define MIPS_CONF1_WR (_ULCAST_(1) << 3)
-#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
-#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
-#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
-#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
+#define MIPS_CONF1_FP (_ULCAST_(1) << 0)
+#define MIPS_CONF1_EP (_ULCAST_(1) << 1)
+#define MIPS_CONF1_CA (_ULCAST_(1) << 2)
+#define MIPS_CONF1_WR (_ULCAST_(1) << 3)
+#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
+#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
+#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
+#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
#define MIPS_CONF1_DL (_ULCAST_(7) << 10)
#define MIPS_CONF1_DS (_ULCAST_(7) << 13)
#define MIPS_CONF1_IA (_ULCAST_(7) << 16)
@@ -575,26 +575,28 @@
#define MIPS_CONF1_IS (_ULCAST_(7) << 22)
#define MIPS_CONF1_TLBS (_ULCAST_(63)<< 25)
-#define MIPS_CONF2_SA (_ULCAST_(15)<< 0)
-#define MIPS_CONF2_SL (_ULCAST_(15)<< 4)
-#define MIPS_CONF2_SS (_ULCAST_(15)<< 8)
+#define MIPS_CONF2_SA (_ULCAST_(15)<< 0)
+#define MIPS_CONF2_SL (_ULCAST_(15)<< 4)
+#define MIPS_CONF2_SS (_ULCAST_(15)<< 8)
#define MIPS_CONF2_SU (_ULCAST_(15)<< 12)
#define MIPS_CONF2_TA (_ULCAST_(15)<< 16)
#define MIPS_CONF2_TL (_ULCAST_(15)<< 20)
#define MIPS_CONF2_TS (_ULCAST_(15)<< 24)
#define MIPS_CONF2_TU (_ULCAST_(7) << 28)
-#define MIPS_CONF3_TL (_ULCAST_(1) << 0)
-#define MIPS_CONF3_SM (_ULCAST_(1) << 1)
-#define MIPS_CONF3_MT (_ULCAST_(1) << 2)
-#define MIPS_CONF3_SP (_ULCAST_(1) << 4)
-#define MIPS_CONF3_VINT (_ULCAST_(1) << 5)
-#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6)
-#define MIPS_CONF3_LPA (_ULCAST_(1) << 7)
+#define MIPS_CONF3_TL (_ULCAST_(1) << 0)
+#define MIPS_CONF3_SM (_ULCAST_(1) << 1)
+#define MIPS_CONF3_MT (_ULCAST_(1) << 2)
+#define MIPS_CONF3_SP (_ULCAST_(1) << 4)
+#define MIPS_CONF3_VINT (_ULCAST_(1) << 5)
+#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6)
+#define MIPS_CONF3_LPA (_ULCAST_(1) << 7)
#define MIPS_CONF3_DSP (_ULCAST_(1) << 10)
#define MIPS_CONF3_DSP2P (_ULCAST_(1) << 11)
#define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
+#define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
+#define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14)
@@ -621,7 +623,7 @@
#ifndef __ASSEMBLY__
/*
- * Functions to access the R10000 performance counters. These are basically
+ * Functions to access the R10000 performance counters. These are basically
* mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
* performance counter number encoded into bits 1 ... 5 of the instruction.
* Only performance counters 0 to 1 actually exist, so for a non-R10000 aware
@@ -632,13 +634,13 @@
unsigned int __res; \
__asm__ __volatile__( \
"mfpc\t%0, %1" \
- : "=r" (__res) \
+ : "=r" (__res) \
: "i" (counter)); \
\
- __res; \
+ __res; \
})
-#define write_r10k_perf_cntr(counter,val) \
+#define write_r10k_perf_cntr(counter,val) \
do { \
__asm__ __volatile__( \
"mtpc\t%0, %1" \
@@ -651,13 +653,13 @@ do { \
unsigned int __res; \
__asm__ __volatile__( \
"mfps\t%0, %1" \
- : "=r" (__res) \
+ : "=r" (__res) \
: "i" (counter)); \
\
- __res; \
+ __res; \
})
-#define write_r10k_perf_cntl(counter,val) \
+#define write_r10k_perf_cntl(counter,val) \
do { \
__asm__ __volatile__( \
"mtps\t%0, %1" \
@@ -847,20 +849,20 @@ do { \
#define write_c0_context(val) __write_ulong_c0_register($4, 0, val)
#define read_c0_userlocal() __read_ulong_c0_register($4, 2)
-#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
+#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
#define read_c0_pagegrain() __read_32bit_c0_register($5, 1)
-#define write_c0_pagegrain(val) __write_32bit_c0_register($5, 1, val)
+#define write_c0_pagegrain(val) __write_32bit_c0_register($5, 1, val)
#define read_c0_wired() __read_32bit_c0_register($6, 0)
#define write_c0_wired(val) __write_32bit_c0_register($6, 0, val)
#define read_c0_info() __read_32bit_c0_register($7, 0)
-#define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */
+#define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */
#define write_c0_cache(val) __write_32bit_c0_register($7, 0, val)
#define read_c0_badvaddr() __read_ulong_c0_register($8, 0)
@@ -975,7 +977,7 @@ do { \
#define write_c0_intcontrol(val) __write_32bit_c0_ctrl_register($20, val)
#define read_c0_framemask() __read_32bit_c0_register($21, 0)
-#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
+#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
#define read_c0_diag() __read_32bit_c0_register($22, 0)
#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val)
@@ -1005,27 +1007,27 @@ do { \
* MIPS32 / MIPS64 performance counters
*/
#define read_c0_perfctrl0() __read_32bit_c0_register($25, 0)
-#define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val)
+#define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val)
#define read_c0_perfcntr0() __read_32bit_c0_register($25, 1)
-#define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val)
+#define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val)
#define read_c0_perfcntr0_64() __read_64bit_c0_register($25, 1)
#define write_c0_perfcntr0_64(val) __write_64bit_c0_register($25, 1, val)
#define read_c0_perfctrl1() __read_32bit_c0_register($25, 2)
-#define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val)
+#define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val)
#define read_c0_perfcntr1() __read_32bit_c0_register($25, 3)
-#define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val)
+#define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val)
#define read_c0_perfcntr1_64() __read_64bit_c0_register($25, 3)
#define write_c0_perfcntr1_64(val) __write_64bit_c0_register($25, 3, val)
#define read_c0_perfctrl2() __read_32bit_c0_register($25, 4)
-#define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val)
+#define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val)
#define read_c0_perfcntr2() __read_32bit_c0_register($25, 5)
-#define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val)
+#define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val)
#define read_c0_perfcntr2_64() __read_64bit_c0_register($25, 5)
#define write_c0_perfcntr2_64(val) __write_64bit_c0_register($25, 5, val)
#define read_c0_perfctrl3() __read_32bit_c0_register($25, 6)
-#define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val)
+#define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val)
#define read_c0_perfcntr3() __read_32bit_c0_register($25, 7)
-#define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val)
+#define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val)
#define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7)
#define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val)
@@ -1033,12 +1035,12 @@ do { \
#define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val)
#define read_c0_derraddr0() __read_ulong_c0_register($26, 1)
-#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val)
+#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val)
#define read_c0_cacheerr() __read_32bit_c0_register($27, 0)
#define read_c0_derraddr1() __read_ulong_c0_register($27, 1)
-#define write_c0_derraddr1(val) __write_ulong_c0_register($27, 1, val)
+#define write_c0_derraddr1(val) __write_ulong_c0_register($27, 1, val)
#define read_c0_taglo() __read_32bit_c0_register($28, 0)
#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val)
@@ -1083,9 +1085,9 @@ do { \
#define write_c0_cvmctl(val) __write_64bit_c0_register($9, 7, val)
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
-#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
+#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
/*
- * The cacheerr registers are not standardized. On OCTEON, they are
+ * The cacheerr registers are not standardized. On OCTEON, they are
* 64 bits wide.
*/
#define read_octeon_c0_icacheerr() __read_64bit_c0_register($27, 0)
@@ -1142,48 +1144,42 @@ do { \
/*
* Macros to access the floating point coprocessor control registers
*/
-#define read_32bit_cp1_register(source) \
-({ int __res; \
- __asm__ __volatile__( \
- ".set\tpush\n\t" \
- ".set\treorder\n\t" \
- /* gas fails to assemble cfc1 for some archs (octeon).*/ \
- ".set\tmips1\n\t" \
- "cfc1\t%0,"STR(source)"\n\t" \
- ".set\tpop" \
- : "=r" (__res)); \
- __res;})
+#define read_32bit_cp1_register(source) \
+({ \
+ int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set reorder \n" \
+ " # gas fails to assemble cfc1 for some archs, \n" \
+ " # like Octeon. \n" \
+ " .set mips1 \n" \
+ " cfc1 %0,"STR(source)" \n" \
+ " .set pop \n" \
+ : "=r" (__res)); \
+ __res; \
+})
+#ifdef HAVE_AS_DSP
#define rddsp(mask) \
({ \
- unsigned int __res; \
+ unsigned int __dspctl; \
\
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # rddsp $1, %x1 \n" \
- " .word 0x7c000cb8 | (%x1 << 16) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__res) \
+ " rddsp %0, %x1 \n" \
+ : "=r" (__dspctl) \
: "i" (mask)); \
- __res; \
+ __dspctl; \
})
#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " # wrdsp $1, %x1 \n" \
- " .word 0x7c2004f8 | (%x1 << 11) \n" \
- " .set pop \n" \
- : \
+ " wrdsp %0, %x1 \n" \
+ : \
: "r" (val), "i" (mask)); \
} while (0)
-#if 0 /* Need DSP ASE capable assembler ... */
#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
@@ -1206,230 +1202,177 @@ do { \
#else
-#define mfhi0() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac0 \n" \
- " .word 0x00000810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mfhi1() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac1 \n" \
- " .word 0x00200810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mfhi2() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac2 \n" \
- " .word 0x00400810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mfhi3() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac3 \n" \
- " .word 0x00600810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo0() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac0 \n" \
- " .word 0x00000812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo1() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac1 \n" \
- " .word 0x00200812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo2() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac2 \n" \
- " .word 0x00400812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo3() \
+#ifdef CONFIG_CPU_MICROMIPS
+#define rddsp(mask) \
({ \
- unsigned long __treg; \
+ unsigned int __res; \
\
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac3 \n" \
- " .word 0x00600812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mthi0(x) \
-do { \
- __asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " move $1, %0 \n" \
- " # mthi $1, $ac0 \n" \
- " .word 0x00200011 \n" \
+ " # rddsp $1, %x1 \n" \
+ " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \
+ " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \
+ " move %0, $1 \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ : "=r" (__res) \
+ : "i" (mask)); \
+ __res; \
+})
-#define mthi1(x) \
+#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mthi $1, $ac1 \n" \
- " .word 0x00200811 \n" \
+ " # wrdsp $1, %x1 \n" \
+ " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \
+ " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \
" .set pop \n" \
: \
- : "r" (x)); \
+ : "r" (val), "i" (mask)); \
} while (0)
-#define mthi2(x) \
-do { \
+#define _umips_dsp_mfxxx(ins) \
+({ \
+ unsigned long __treg; \
+ \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " move $1, %0 \n" \
- " # mthi $1, $ac2 \n" \
- " .word 0x00201011 \n" \
+ " .hword 0x0001 \n" \
+ " .hword %x1 \n" \
+ " move %0, $1 \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ : "=r" (__treg) \
+ : "i" (ins)); \
+ __treg; \
+})
-#define mthi3(x) \
+#define _umips_dsp_mtxxx(val, ins) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mthi $1, $ac3 \n" \
- " .word 0x00201811 \n" \
+ " .hword 0x0001 \n" \
+ " .hword %x1 \n" \
" .set pop \n" \
: \
- : "r" (x)); \
+ : "r" (val), "i" (ins)); \
} while (0)
-#define mtlo0(x) \
-do { \
+#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c)
+#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
+
+#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
+#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
+
+#define mflo0() _umips_dsp_mflo(0)
+#define mflo1() _umips_dsp_mflo(1)
+#define mflo2() _umips_dsp_mflo(2)
+#define mflo3() _umips_dsp_mflo(3)
+
+#define mfhi0() _umips_dsp_mfhi(0)
+#define mfhi1() _umips_dsp_mfhi(1)
+#define mfhi2() _umips_dsp_mfhi(2)
+#define mfhi3() _umips_dsp_mfhi(3)
+
+#define mtlo0(x) _umips_dsp_mtlo(x, 0)
+#define mtlo1(x) _umips_dsp_mtlo(x, 1)
+#define mtlo2(x) _umips_dsp_mtlo(x, 2)
+#define mtlo3(x) _umips_dsp_mtlo(x, 3)
+
+#define mthi0(x) _umips_dsp_mthi(x, 0)
+#define mthi1(x) _umips_dsp_mthi(x, 1)
+#define mthi2(x) _umips_dsp_mthi(x, 2)
+#define mthi3(x) _umips_dsp_mthi(x, 3)
+
+#else /* !CONFIG_CPU_MICROMIPS */
+#define rddsp(mask) \
+({ \
+ unsigned int __res; \
+ \
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " # mtlo $1, $ac0 \n" \
- " .word 0x00200013 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ " .set push \n" \
+ " .set noat \n" \
+ " # rddsp $1, %x1 \n" \
+ " .word 0x7c000cb8 | (%x1 << 16) \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__res) \
+ : "i" (mask)); \
+ __res; \
+})
-#define mtlo1(x) \
+#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mtlo $1, $ac1 \n" \
- " .word 0x00200813 \n" \
+ " # wrdsp $1, %x1 \n" \
+ " .word 0x7c2004f8 | (%x1 << 11) \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
+ : \
+ : "r" (val), "i" (mask)); \
} while (0)
-#define mtlo2(x) \
-do { \
+#define _dsp_mfxxx(ins) \
+({ \
+ unsigned long __treg; \
+ \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " move $1, %0 \n" \
- " # mtlo $1, $ac2 \n" \
- " .word 0x00201013 \n" \
+ " .word (0x00000810 | %1) \n" \
+ " move %0, $1 \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ : "=r" (__treg) \
+ : "i" (ins)); \
+ __treg; \
+})
-#define mtlo3(x) \
+#define _dsp_mtxxx(val, ins) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mtlo $1, $ac3 \n" \
- " .word 0x00201813 \n" \
+ " .word (0x00200011 | %1) \n" \
" .set pop \n" \
: \
- : "r" (x)); \
+ : "r" (val), "i" (ins)); \
} while (0)
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
+
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
+
+#define mflo0() _dsp_mflo(0)
+#define mflo1() _dsp_mflo(1)
+#define mflo2() _dsp_mflo(2)
+#define mflo3() _dsp_mflo(3)
+
+#define mfhi0() _dsp_mfhi(0)
+#define mfhi1() _dsp_mfhi(1)
+#define mfhi2() _dsp_mfhi(2)
+#define mfhi3() _dsp_mfhi(3)
+
+#define mtlo0(x) _dsp_mtlo(x, 0)
+#define mtlo1(x) _dsp_mtlo(x, 1)
+#define mtlo2(x) _dsp_mtlo(x, 2)
+#define mtlo3(x) _dsp_mtlo(x, 3)
+
+#define mthi0(x) _dsp_mthi(x, 0)
+#define mthi1(x) _dsp_mthi(x, 1)
+#define mthi2(x) _dsp_mthi(x, 2)
+#define mthi3(x) _dsp_mthi(x, 3)
+
+#endif /* CONFIG_CPU_MICROMIPS */
#endif
/*
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 45cfa1ad86a6..e81d719efcd1 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -77,7 +77,7 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x1
extern unsigned long smtc_asid_mask;
#define ASID_MASK (smtc_asid_mask)
-#define HW_ASID_MASK 0xff
+#define HW_ASID_MASK 0xff
/* End SMTC/34K debug hack */
#else /* FIXME: not correct for R6000 */
@@ -140,7 +140,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+ struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();
unsigned long flags;
@@ -238,7 +238,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
- cpu_asid(cpu, next));
+ cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
diff --git a/arch/mips/include/asm/msc01_ic.h b/arch/mips/include/asm/msc01_ic.h
index d92406ae2841..ff7f074d073c 100644
--- a/arch/mips/include/asm/msc01_ic.h
+++ b/arch/mips/include/asm/msc01_ic.h
@@ -15,45 +15,45 @@
* Register offset addresses
*****************************************************************************/
-#define MSC01_IC_RST_OFS 0x00008 /* Software reset */
-#define MSC01_IC_ENAL_OFS 0x00100 /* Int_in enable mask 31:0 */
-#define MSC01_IC_ENAH_OFS 0x00108 /* Int_in enable mask 63:32 */
-#define MSC01_IC_DISL_OFS 0x00120 /* Int_in disable mask 31:0 */
-#define MSC01_IC_DISH_OFS 0x00128 /* Int_in disable mask 63:32 */
-#define MSC01_IC_ISBL_OFS 0x00140 /* Raw int_in 31:0 */
-#define MSC01_IC_ISBH_OFS 0x00148 /* Raw int_in 63:32 */
-#define MSC01_IC_ISAL_OFS 0x00160 /* Masked int_in 31:0 */
-#define MSC01_IC_ISAH_OFS 0x00168 /* Masked int_in 63:32 */
-#define MSC01_IC_LVL_OFS 0x00180 /* Disable priority int_out */
-#define MSC01_IC_RAMW_OFS 0x00180 /* Shadow set RAM (EI) */
-#define MSC01_IC_OSB_OFS 0x00188 /* Raw int_out */
-#define MSC01_IC_OSA_OFS 0x00190 /* Masked int_out */
-#define MSC01_IC_GENA_OFS 0x00198 /* Global HW int enable */
-#define MSC01_IC_BASE_OFS 0x001a0 /* Base address of IC_VEC */
-#define MSC01_IC_VEC_OFS 0x001b0 /* Active int's vector address */
-#define MSC01_IC_EOI_OFS 0x001c0 /* Enable lower level ints */
-#define MSC01_IC_CFG_OFS 0x001c8 /* Configuration register */
-#define MSC01_IC_TRLD_OFS 0x001d0 /* Interval timer reload val */
-#define MSC01_IC_TVAL_OFS 0x001e0 /* Interval timer current val */
-#define MSC01_IC_TCFG_OFS 0x001f0 /* Interval timer config */
-#define MSC01_IC_SUP_OFS 0x00200 /* Set up int_in line 0 */
-#define MSC01_IC_ENA_OFS 0x00800 /* Int_in enable mask 63:0 */
-#define MSC01_IC_DIS_OFS 0x00820 /* Int_in disable mask 63:0 */
-#define MSC01_IC_ISB_OFS 0x00840 /* Raw int_in 63:0 */
-#define MSC01_IC_ISA_OFS 0x00860 /* Masked int_in 63:0 */
+#define MSC01_IC_RST_OFS 0x00008 /* Software reset */
+#define MSC01_IC_ENAL_OFS 0x00100 /* Int_in enable mask 31:0 */
+#define MSC01_IC_ENAH_OFS 0x00108 /* Int_in enable mask 63:32 */
+#define MSC01_IC_DISL_OFS 0x00120 /* Int_in disable mask 31:0 */
+#define MSC01_IC_DISH_OFS 0x00128 /* Int_in disable mask 63:32 */
+#define MSC01_IC_ISBL_OFS 0x00140 /* Raw int_in 31:0 */
+#define MSC01_IC_ISBH_OFS 0x00148 /* Raw int_in 63:32 */
+#define MSC01_IC_ISAL_OFS 0x00160 /* Masked int_in 31:0 */
+#define MSC01_IC_ISAH_OFS 0x00168 /* Masked int_in 63:32 */
+#define MSC01_IC_LVL_OFS 0x00180 /* Disable priority int_out */
+#define MSC01_IC_RAMW_OFS 0x00180 /* Shadow set RAM (EI) */
+#define MSC01_IC_OSB_OFS 0x00188 /* Raw int_out */
+#define MSC01_IC_OSA_OFS 0x00190 /* Masked int_out */
+#define MSC01_IC_GENA_OFS 0x00198 /* Global HW int enable */
+#define MSC01_IC_BASE_OFS 0x001a0 /* Base address of IC_VEC */
+#define MSC01_IC_VEC_OFS 0x001b0 /* Active int's vector address */
+#define MSC01_IC_EOI_OFS 0x001c0 /* Enable lower level ints */
+#define MSC01_IC_CFG_OFS 0x001c8 /* Configuration register */
+#define MSC01_IC_TRLD_OFS 0x001d0 /* Interval timer reload val */
+#define MSC01_IC_TVAL_OFS 0x001e0 /* Interval timer current val */
+#define MSC01_IC_TCFG_OFS 0x001f0 /* Interval timer config */
+#define MSC01_IC_SUP_OFS 0x00200 /* Set up int_in line 0 */
+#define MSC01_IC_ENA_OFS 0x00800 /* Int_in enable mask 63:0 */
+#define MSC01_IC_DIS_OFS 0x00820 /* Int_in disable mask 63:0 */
+#define MSC01_IC_ISB_OFS 0x00840 /* Raw int_in 63:0 */
+#define MSC01_IC_ISA_OFS 0x00860 /* Masked int_in 63:0 */
/*****************************************************************************
* Register field encodings
*****************************************************************************/
-#define MSC01_IC_RST_RST_SHF 0
-#define MSC01_IC_RST_RST_MSK 0x00000001
-#define MSC01_IC_RST_RST_BIT MSC01_IC_RST_RST_MSK
-#define MSC01_IC_LVL_LVL_SHF 0
-#define MSC01_IC_LVL_LVL_MSK 0x000000ff
-#define MSC01_IC_LVL_SPUR_SHF 16
-#define MSC01_IC_LVL_SPUR_MSK 0x00010000
-#define MSC01_IC_LVL_SPUR_BIT MSC01_IC_LVL_SPUR_MSK
+#define MSC01_IC_RST_RST_SHF 0
+#define MSC01_IC_RST_RST_MSK 0x00000001
+#define MSC01_IC_RST_RST_BIT MSC01_IC_RST_RST_MSK
+#define MSC01_IC_LVL_LVL_SHF 0
+#define MSC01_IC_LVL_LVL_MSK 0x000000ff
+#define MSC01_IC_LVL_SPUR_SHF 16
+#define MSC01_IC_LVL_SPUR_MSK 0x00010000
+#define MSC01_IC_LVL_SPUR_BIT MSC01_IC_LVL_SPUR_MSK
#define MSC01_IC_RAMW_RIPL_SHF 0
#define MSC01_IC_RAMW_RIPL_MSK 0x0000003f
#define MSC01_IC_RAMW_DATA_SHF 6
@@ -63,33 +63,33 @@
#define MSC01_IC_RAMW_READ_SHF 31
#define MSC01_IC_RAMW_READ_MSK 0x80000000
#define MSC01_IC_RAMW_READ_BIT MSC01_IC_RAMW_READ_MSK
-#define MSC01_IC_OSB_OSB_SHF 0
-#define MSC01_IC_OSB_OSB_MSK 0x000000ff
-#define MSC01_IC_OSA_OSA_SHF 0
-#define MSC01_IC_OSA_OSA_MSK 0x000000ff
-#define MSC01_IC_GENA_GENA_SHF 0
-#define MSC01_IC_GENA_GENA_MSK 0x00000001
-#define MSC01_IC_GENA_GENA_BIT MSC01_IC_GENA_GENA_MSK
-#define MSC01_IC_CFG_DIS_SHF 0
-#define MSC01_IC_CFG_DIS_MSK 0x00000001
-#define MSC01_IC_CFG_DIS_BIT MSC01_IC_CFG_DIS_MSK
-#define MSC01_IC_CFG_SHFT_SHF 8
-#define MSC01_IC_CFG_SHFT_MSK 0x00000f00
-#define MSC01_IC_TCFG_ENA_SHF 0
-#define MSC01_IC_TCFG_ENA_MSK 0x00000001
-#define MSC01_IC_TCFG_ENA_BIT MSC01_IC_TCFG_ENA_MSK
-#define MSC01_IC_TCFG_INT_SHF 8
-#define MSC01_IC_TCFG_INT_MSK 0x00000100
-#define MSC01_IC_TCFG_INT_BIT MSC01_IC_TCFG_INT_MSK
-#define MSC01_IC_TCFG_EDGE_SHF 16
-#define MSC01_IC_TCFG_EDGE_MSK 0x00010000
-#define MSC01_IC_TCFG_EDGE_BIT MSC01_IC_TCFG_EDGE_MSK
-#define MSC01_IC_SUP_PRI_SHF 0
-#define MSC01_IC_SUP_PRI_MSK 0x00000007
-#define MSC01_IC_SUP_EDGE_SHF 8
-#define MSC01_IC_SUP_EDGE_MSK 0x00000100
-#define MSC01_IC_SUP_EDGE_BIT MSC01_IC_SUP_EDGE_MSK
-#define MSC01_IC_SUP_STEP 8
+#define MSC01_IC_OSB_OSB_SHF 0
+#define MSC01_IC_OSB_OSB_MSK 0x000000ff
+#define MSC01_IC_OSA_OSA_SHF 0
+#define MSC01_IC_OSA_OSA_MSK 0x000000ff
+#define MSC01_IC_GENA_GENA_SHF 0
+#define MSC01_IC_GENA_GENA_MSK 0x00000001
+#define MSC01_IC_GENA_GENA_BIT MSC01_IC_GENA_GENA_MSK
+#define MSC01_IC_CFG_DIS_SHF 0
+#define MSC01_IC_CFG_DIS_MSK 0x00000001
+#define MSC01_IC_CFG_DIS_BIT MSC01_IC_CFG_DIS_MSK
+#define MSC01_IC_CFG_SHFT_SHF 8
+#define MSC01_IC_CFG_SHFT_MSK 0x00000f00
+#define MSC01_IC_TCFG_ENA_SHF 0
+#define MSC01_IC_TCFG_ENA_MSK 0x00000001
+#define MSC01_IC_TCFG_ENA_BIT MSC01_IC_TCFG_ENA_MSK
+#define MSC01_IC_TCFG_INT_SHF 8
+#define MSC01_IC_TCFG_INT_MSK 0x00000100
+#define MSC01_IC_TCFG_INT_BIT MSC01_IC_TCFG_INT_MSK
+#define MSC01_IC_TCFG_EDGE_SHF 16
+#define MSC01_IC_TCFG_EDGE_MSK 0x00010000
+#define MSC01_IC_TCFG_EDGE_BIT MSC01_IC_TCFG_EDGE_MSK
+#define MSC01_IC_SUP_PRI_SHF 0
+#define MSC01_IC_SUP_PRI_MSK 0x00000007
+#define MSC01_IC_SUP_EDGE_SHF 8
+#define MSC01_IC_SUP_EDGE_MSK 0x00000100
+#define MSC01_IC_SUP_EDGE_BIT MSC01_IC_SUP_EDGE_MSK
+#define MSC01_IC_SUP_STEP 8
/*
* MIPS System controller interrupt register base.
@@ -100,32 +100,32 @@
* Absolute register addresses
*****************************************************************************/
-#define MSC01_IC_RST (MSC01_IC_REG_BASE + MSC01_IC_RST_OFS)
-#define MSC01_IC_ENAL (MSC01_IC_REG_BASE + MSC01_IC_ENAL_OFS)
-#define MSC01_IC_ENAH (MSC01_IC_REG_BASE + MSC01_IC_ENAH_OFS)
-#define MSC01_IC_DISL (MSC01_IC_REG_BASE + MSC01_IC_DISL_OFS)
-#define MSC01_IC_DISH (MSC01_IC_REG_BASE + MSC01_IC_DISH_OFS)
-#define MSC01_IC_ISBL (MSC01_IC_REG_BASE + MSC01_IC_ISBL_OFS)
-#define MSC01_IC_ISBH (MSC01_IC_REG_BASE + MSC01_IC_ISBH_OFS)
-#define MSC01_IC_ISAL (MSC01_IC_REG_BASE + MSC01_IC_ISAL_OFS)
-#define MSC01_IC_ISAH (MSC01_IC_REG_BASE + MSC01_IC_ISAH_OFS)
-#define MSC01_IC_LVL (MSC01_IC_REG_BASE + MSC01_IC_LVL_OFS)
-#define MSC01_IC_RAMW (MSC01_IC_REG_BASE + MSC01_IC_RAMW_OFS)
-#define MSC01_IC_OSB (MSC01_IC_REG_BASE + MSC01_IC_OSB_OFS)
-#define MSC01_IC_OSA (MSC01_IC_REG_BASE + MSC01_IC_OSA_OFS)
-#define MSC01_IC_GENA (MSC01_IC_REG_BASE + MSC01_IC_GENA_OFS)
-#define MSC01_IC_BASE (MSC01_IC_REG_BASE + MSC01_IC_BASE_OFS)
-#define MSC01_IC_VEC (MSC01_IC_REG_BASE + MSC01_IC_VEC_OFS)
-#define MSC01_IC_EOI (MSC01_IC_REG_BASE + MSC01_IC_EOI_OFS)
-#define MSC01_IC_CFG (MSC01_IC_REG_BASE + MSC01_IC_CFG_OFS)
-#define MSC01_IC_TRLD (MSC01_IC_REG_BASE + MSC01_IC_TRLD_OFS)
-#define MSC01_IC_TVAL (MSC01_IC_REG_BASE + MSC01_IC_TVAL_OFS)
-#define MSC01_IC_TCFG (MSC01_IC_REG_BASE + MSC01_IC_TCFG_OFS)
-#define MSC01_IC_SUP (MSC01_IC_REG_BASE + MSC01_IC_SUP_OFS)
-#define MSC01_IC_ENA (MSC01_IC_REG_BASE + MSC01_IC_ENA_OFS)
-#define MSC01_IC_DIS (MSC01_IC_REG_BASE + MSC01_IC_DIS_OFS)
-#define MSC01_IC_ISB (MSC01_IC_REG_BASE + MSC01_IC_ISB_OFS)
-#define MSC01_IC_ISA (MSC01_IC_REG_BASE + MSC01_IC_ISA_OFS)
+#define MSC01_IC_RST (MSC01_IC_REG_BASE + MSC01_IC_RST_OFS)
+#define MSC01_IC_ENAL (MSC01_IC_REG_BASE + MSC01_IC_ENAL_OFS)
+#define MSC01_IC_ENAH (MSC01_IC_REG_BASE + MSC01_IC_ENAH_OFS)
+#define MSC01_IC_DISL (MSC01_IC_REG_BASE + MSC01_IC_DISL_OFS)
+#define MSC01_IC_DISH (MSC01_IC_REG_BASE + MSC01_IC_DISH_OFS)
+#define MSC01_IC_ISBL (MSC01_IC_REG_BASE + MSC01_IC_ISBL_OFS)
+#define MSC01_IC_ISBH (MSC01_IC_REG_BASE + MSC01_IC_ISBH_OFS)
+#define MSC01_IC_ISAL (MSC01_IC_REG_BASE + MSC01_IC_ISAL_OFS)
+#define MSC01_IC_ISAH (MSC01_IC_REG_BASE + MSC01_IC_ISAH_OFS)
+#define MSC01_IC_LVL (MSC01_IC_REG_BASE + MSC01_IC_LVL_OFS)
+#define MSC01_IC_RAMW (MSC01_IC_REG_BASE + MSC01_IC_RAMW_OFS)
+#define MSC01_IC_OSB (MSC01_IC_REG_BASE + MSC01_IC_OSB_OFS)
+#define MSC01_IC_OSA (MSC01_IC_REG_BASE + MSC01_IC_OSA_OFS)
+#define MSC01_IC_GENA (MSC01_IC_REG_BASE + MSC01_IC_GENA_OFS)
+#define MSC01_IC_BASE (MSC01_IC_REG_BASE + MSC01_IC_BASE_OFS)
+#define MSC01_IC_VEC (MSC01_IC_REG_BASE + MSC01_IC_VEC_OFS)
+#define MSC01_IC_EOI (MSC01_IC_REG_BASE + MSC01_IC_EOI_OFS)
+#define MSC01_IC_CFG (MSC01_IC_REG_BASE + MSC01_IC_CFG_OFS)
+#define MSC01_IC_TRLD (MSC01_IC_REG_BASE + MSC01_IC_TRLD_OFS)
+#define MSC01_IC_TVAL (MSC01_IC_REG_BASE + MSC01_IC_TVAL_OFS)
+#define MSC01_IC_TCFG (MSC01_IC_REG_BASE + MSC01_IC_TCFG_OFS)
+#define MSC01_IC_SUP (MSC01_IC_REG_BASE + MSC01_IC_SUP_OFS)
+#define MSC01_IC_ENA (MSC01_IC_REG_BASE + MSC01_IC_ENA_OFS)
+#define MSC01_IC_DIS (MSC01_IC_REG_BASE + MSC01_IC_DIS_OFS)
+#define MSC01_IC_ISB (MSC01_IC_REG_BASE + MSC01_IC_ISB_OFS)
+#define MSC01_IC_ISA (MSC01_IC_REG_BASE + MSC01_IC_ISA_OFS)
/*
* Soc-it interrupts are configurable.
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index 42bfd5f1eeec..aef560a51a7e 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -38,11 +38,11 @@
/*
* Common SMP definitions
*/
-#define RESET_VEC_PHYS 0x1fc00000
-#define RESET_DATA_PHYS (RESET_VEC_PHYS + (1<<10))
-#define BOOT_THREAD_MODE 0
-#define BOOT_NMI_LOCK 4
-#define BOOT_NMI_HANDLER 8
+#define RESET_VEC_PHYS 0x1fc00000
+#define RESET_DATA_PHYS (RESET_VEC_PHYS + (1<<10))
+#define BOOT_THREAD_MODE 0
+#define BOOT_NMI_LOCK 4
+#define BOOT_NMI_HANDLER 8
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
@@ -80,7 +80,7 @@ extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask;
struct nlm_soc_info {
- unsigned long coremask; /* cores enabled on the soc */
+ unsigned long coremask; /* cores enabled on the soc */
unsigned long ebase;
uint64_t irqmask;
uint64_t sysbase; /* only for XLP */
@@ -88,9 +88,9 @@ struct nlm_soc_info {
spinlock_t piclock;
};
-#define nlm_get_node(i) (&nlm_nodes[i])
+#define nlm_get_node(i) (&nlm_nodes[i])
#ifdef CONFIG_CPU_XLR
-#define nlm_current_node() (&nlm_nodes[0])
+#define nlm_current_node() (&nlm_nodes[0])
#else
#define nlm_current_node() (&nlm_nodes[nlm_nodeid()])
#endif
diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h
index 72a0c788b472..419d8aef8569 100644
--- a/arch/mips/include/asm/netlogic/haldefs.h
+++ b/arch/mips/include/asm/netlogic/haldefs.h
@@ -48,7 +48,7 @@
* access 64 bit addresses or data.
*
* We need to disable interrupts because we save just the lower 32 bits of
- * registers in interrupt handling. So if we get hit by an interrupt while
+ * registers in interrupt handling. So if we get hit by an interrupt while
* using the upper 32 bits of a register, we lose.
*/
static inline uint32_t nlm_save_flags_kx(void)
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 32ba6d95d47c..8ad2e0f81719 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -49,7 +49,7 @@
*/
#define write_c0_eimr(val) \
do { \
- if (sizeof(unsigned long) == 4) { \
+ if (sizeof(unsigned long) == 4) { \
unsigned long __flags; \
\
local_irq_save(__flags); \
@@ -68,6 +68,85 @@ do { \
__write_64bit_c0_register($9, 7, (val)); \
} while (0)
+/*
+ * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
+ * standard functions will be very inefficient. This provides
+ * optimized functions for the normal operations on the registers.
+ *
+ * Call with interrupts disabled.
+ */
+static inline void ack_c0_eirr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv $1, $1, %0\n\t"
+ "dmtc0 $1, $9, 6\n\t"
+ ".set pop"
+ : : "r" (irq));
+}
+
+static inline void set_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+static inline void clear_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "xor $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+/*
+ * Read c0 eimr and c0 eirr, do AND of the two values, the result is
+ * the interrupts which are raised and are not masked.
+ */
+static inline uint64_t read_c0_eirr_and_eimr(void)
+{
+ uint64_t val;
+
+#ifdef CONFIG_64BIT
+ val = read_c0_eimr() & read_c0_eirr();
+#else
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "dmfc0 %M0, $9, 6\n\t"
+ "dmfc0 %L0, $9, 7\n\t"
+ "and %M0, %L0\n\t"
+ "dsll %L0, %M0, 32\n\t"
+ "dsra %M0, %M0, 32\n\t"
+ "dsra %L0, %L0, 32\n\t"
+ ".set pop"
+ : "=r" (val));
+#endif
+
+ return val;
+}
+
static inline int hard_smp_processor_id(void)
{
return __read_32bit_c0_register($15, 1) & 0x3ff;
@@ -208,7 +287,7 @@ do { \
".set\tmips0\n\t" \
: : "Jr" (value)); \
else \
- __asm__ __volatile__( \
+ __asm__ __volatile__( \
".set\tmips32\n\t" \
"mtc2\t%z0, " #reg ", " #sel "\n\t" \
".set\tmips0\n\t" \
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
index ca95133f1ad1..790f0f1e55c6 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
@@ -178,9 +178,9 @@
#define nlm_read_bridge_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_bridge_reg(b, r, v) nlm_write_reg(b, r, v)
-#define nlm_get_bridge_pcibase(node) \
+#define nlm_get_bridge_pcibase(node) \
nlm_pcicfg_base(XLP_IO_BRIDGE_OFFSET(node))
-#define nlm_get_bridge_regbase(node) \
+#define nlm_get_bridge_regbase(node) \
(nlm_get_bridge_pcibase(node) + XLP_IO_PCI_HDRSZ)
#endif /* __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
index 7b63a6b722a0..6d2e58a9a542 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
@@ -46,6 +46,8 @@
#define CPU_BLOCKID_FPU 9
#define CPU_BLOCKID_MAP 10
+#define ICU_DEFEATURE 0x100
+
#define LSU_DEFEATURE 0x304
#define LSU_DEBUG_ADDR 0x305
#define LSU_DEBUG_DATA0 0x306
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
index 2c63f9754640..9fac46fb7913 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
@@ -35,12 +35,12 @@
#ifndef __NLM_HAL_IOMAP_H__
#define __NLM_HAL_IOMAP_H__
-#define XLP_DEFAULT_IO_BASE 0x18000000
+#define XLP_DEFAULT_IO_BASE 0x18000000
#define XLP_DEFAULT_PCI_ECFG_BASE XLP_DEFAULT_IO_BASE
#define XLP_DEFAULT_PCI_CFG_BASE 0x1c000000
#define NMI_BASE 0xbfc00000
-#define XLP_IO_CLK 133333333
+#define XLP_IO_CLK 133333333
#define XLP_PCIE_CFG_SIZE 0x1000 /* 4K */
#define XLP_PCIE_DEV_BLK_SIZE (8 * XLP_PCIE_CFG_SIZE)
@@ -96,8 +96,8 @@
#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
/* SD flash */
-#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
-#define XLP_IO_MMC_OFFSET(node, slot) \
+#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
+#define XLP_IO_MMC_OFFSET(node, slot) \
((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
/* PCI config header register id's */
@@ -125,26 +125,26 @@
#define XLP_PCI_SBB_WT_REG 0x3f
/* PCI IDs for SoC device */
-#define PCI_VENDOR_NETLOGIC 0x184e
-
-#define PCI_DEVICE_ID_NLM_ROOT 0x1001
-#define PCI_DEVICE_ID_NLM_ICI 0x1002
-#define PCI_DEVICE_ID_NLM_PIC 0x1003
-#define PCI_DEVICE_ID_NLM_PCIE 0x1004
-#define PCI_DEVICE_ID_NLM_EHCI 0x1007
-#define PCI_DEVICE_ID_NLM_OHCI 0x1008
-#define PCI_DEVICE_ID_NLM_NAE 0x1009
-#define PCI_DEVICE_ID_NLM_POE 0x100A
-#define PCI_DEVICE_ID_NLM_FMN 0x100B
-#define PCI_DEVICE_ID_NLM_RAID 0x100D
-#define PCI_DEVICE_ID_NLM_SAE 0x100D
-#define PCI_DEVICE_ID_NLM_RSA 0x100E
-#define PCI_DEVICE_ID_NLM_CMP 0x100F
-#define PCI_DEVICE_ID_NLM_UART 0x1010
-#define PCI_DEVICE_ID_NLM_I2C 0x1011
-#define PCI_DEVICE_ID_NLM_NOR 0x1015
-#define PCI_DEVICE_ID_NLM_NAND 0x1016
-#define PCI_DEVICE_ID_NLM_MMC 0x1018
+#define PCI_VENDOR_NETLOGIC 0x184e
+
+#define PCI_DEVICE_ID_NLM_ROOT 0x1001
+#define PCI_DEVICE_ID_NLM_ICI 0x1002
+#define PCI_DEVICE_ID_NLM_PIC 0x1003
+#define PCI_DEVICE_ID_NLM_PCIE 0x1004
+#define PCI_DEVICE_ID_NLM_EHCI 0x1007
+#define PCI_DEVICE_ID_NLM_OHCI 0x1008
+#define PCI_DEVICE_ID_NLM_NAE 0x1009
+#define PCI_DEVICE_ID_NLM_POE 0x100A
+#define PCI_DEVICE_ID_NLM_FMN 0x100B
+#define PCI_DEVICE_ID_NLM_RAID 0x100D
+#define PCI_DEVICE_ID_NLM_SAE 0x100D
+#define PCI_DEVICE_ID_NLM_RSA 0x100E
+#define PCI_DEVICE_ID_NLM_CMP 0x100F
+#define PCI_DEVICE_ID_NLM_UART 0x1010
+#define PCI_DEVICE_ID_NLM_I2C 0x1011
+#define PCI_DEVICE_ID_NLM_NOR 0x1015
+#define PCI_DEVICE_ID_NLM_NAND 0x1016
+#define PCI_DEVICE_ID_NLM_MMC 0x1018
#ifndef __ASSEMBLY__
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
index 66c323d1bd7d..b559cb9f56ea 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
@@ -33,42 +33,42 @@
*/
#ifndef __NLM_HAL_PCIBUS_H__
-#define __NLM_HAL_PCIBUS_H__
+#define __NLM_HAL_PCIBUS_H__
/* PCIE Memory and IO regions */
-#define PCIE_MEM_BASE 0xd0000000ULL
-#define PCIE_MEM_LIMIT 0xdfffffffULL
-#define PCIE_IO_BASE 0x14000000ULL
-#define PCIE_IO_LIMIT 0x15ffffffULL
+#define PCIE_MEM_BASE 0xd0000000ULL
+#define PCIE_MEM_LIMIT 0xdfffffffULL
+#define PCIE_IO_BASE 0x14000000ULL
+#define PCIE_IO_LIMIT 0x15ffffffULL
-#define PCIE_BRIDGE_CMD 0x1
-#define PCIE_BRIDGE_MSI_CAP 0x14
-#define PCIE_BRIDGE_MSI_ADDRL 0x15
-#define PCIE_BRIDGE_MSI_ADDRH 0x16
-#define PCIE_BRIDGE_MSI_DATA 0x17
+#define PCIE_BRIDGE_CMD 0x1
+#define PCIE_BRIDGE_MSI_CAP 0x14
+#define PCIE_BRIDGE_MSI_ADDRL 0x15
+#define PCIE_BRIDGE_MSI_ADDRH 0x16
+#define PCIE_BRIDGE_MSI_DATA 0x17
/* XLP Global PCIE configuration space registers */
-#define PCIE_BYTE_SWAP_MEM_BASE 0x247
-#define PCIE_BYTE_SWAP_MEM_LIM 0x248
-#define PCIE_BYTE_SWAP_IO_BASE 0x249
-#define PCIE_BYTE_SWAP_IO_LIM 0x24A
-#define PCIE_MSI_STATUS 0x25A
-#define PCIE_MSI_EN 0x25B
-#define PCIE_INT_EN0 0x261
+#define PCIE_BYTE_SWAP_MEM_BASE 0x247
+#define PCIE_BYTE_SWAP_MEM_LIM 0x248
+#define PCIE_BYTE_SWAP_IO_BASE 0x249
+#define PCIE_BYTE_SWAP_IO_LIM 0x24A
+#define PCIE_MSI_STATUS 0x25A
+#define PCIE_MSI_EN 0x25B
+#define PCIE_INT_EN0 0x261
/* PCIE_MSI_EN */
-#define PCIE_MSI_VECTOR_INT_EN 0xFFFFFFFF
+#define PCIE_MSI_VECTOR_INT_EN 0xFFFFFFFF
/* PCIE_INT_EN0 */
-#define PCIE_MSI_INT_EN (1 << 9)
+#define PCIE_MSI_INT_EN (1 << 9)
#ifndef __ASSEMBLY__
-#define nlm_read_pcie_reg(b, r) nlm_read_reg(b, r)
-#define nlm_write_pcie_reg(b, r, v) nlm_write_reg(b, r, v)
-#define nlm_get_pcie_base(node, inst) \
+#define nlm_read_pcie_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_pcie_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_pcie_base(node, inst) \
nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, inst))
-#define nlm_get_pcie_regbase(node, inst) \
+#define nlm_get_pcie_regbase(node, inst) \
(nlm_get_pcie_base(node, inst) + XLP_IO_PCI_HDRSZ)
int xlp_pcie_link_irt(int link);
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index b2e53a5383ab..3df53017fe51 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -36,7 +36,7 @@
#define _NLM_HAL_PIC_H
/* PIC Specific registers */
-#define PIC_CTRL 0x00
+#define PIC_CTRL 0x00
/* PIC control register defines */
#define PIC_CTRL_ITV 32 /* interrupt timeout value */
@@ -71,41 +71,41 @@
#define PIC_IRT_DB 16 /* Destination base */
#define PIC_IRT_DTE 0 /* Destination thread enables */
-#define PIC_BYTESWAP 0x02
-#define PIC_STATUS 0x04
+#define PIC_BYTESWAP 0x02
+#define PIC_STATUS 0x04
#define PIC_INTR_TIMEOUT 0x06
#define PIC_ICI0_INTR_TIMEOUT 0x08
#define PIC_ICI1_INTR_TIMEOUT 0x0a
#define PIC_ICI2_INTR_TIMEOUT 0x0c
#define PIC_IPI_CTL 0x0e
-#define PIC_INT_ACK 0x10
-#define PIC_INT_PENDING0 0x12
-#define PIC_INT_PENDING1 0x14
-#define PIC_INT_PENDING2 0x16
-
-#define PIC_WDOG0_MAXVAL 0x18
-#define PIC_WDOG0_COUNT 0x1a
-#define PIC_WDOG0_ENABLE0 0x1c
-#define PIC_WDOG0_ENABLE1 0x1e
-#define PIC_WDOG0_BEATCMD 0x20
-#define PIC_WDOG0_BEAT0 0x22
-#define PIC_WDOG0_BEAT1 0x24
-
-#define PIC_WDOG1_MAXVAL 0x26
-#define PIC_WDOG1_COUNT 0x28
-#define PIC_WDOG1_ENABLE0 0x2a
-#define PIC_WDOG1_ENABLE1 0x2c
-#define PIC_WDOG1_BEATCMD 0x2e
-#define PIC_WDOG1_BEAT0 0x30
-#define PIC_WDOG1_BEAT1 0x32
-
-#define PIC_WDOG_MAXVAL(i) (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
-#define PIC_WDOG_COUNT(i) (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
-#define PIC_WDOG_ENABLE0(i) (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
-#define PIC_WDOG_ENABLE1(i) (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
-#define PIC_WDOG_BEATCMD(i) (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
-#define PIC_WDOG_BEAT0(i) (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
-#define PIC_WDOG_BEAT1(i) (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
+#define PIC_INT_ACK 0x10
+#define PIC_INT_PENDING0 0x12
+#define PIC_INT_PENDING1 0x14
+#define PIC_INT_PENDING2 0x16
+
+#define PIC_WDOG0_MAXVAL 0x18
+#define PIC_WDOG0_COUNT 0x1a
+#define PIC_WDOG0_ENABLE0 0x1c
+#define PIC_WDOG0_ENABLE1 0x1e
+#define PIC_WDOG0_BEATCMD 0x20
+#define PIC_WDOG0_BEAT0 0x22
+#define PIC_WDOG0_BEAT1 0x24
+
+#define PIC_WDOG1_MAXVAL 0x26
+#define PIC_WDOG1_COUNT 0x28
+#define PIC_WDOG1_ENABLE0 0x2a
+#define PIC_WDOG1_ENABLE1 0x2c
+#define PIC_WDOG1_BEATCMD 0x2e
+#define PIC_WDOG1_BEAT0 0x30
+#define PIC_WDOG1_BEAT1 0x32
+
+#define PIC_WDOG_MAXVAL(i) (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
+#define PIC_WDOG_COUNT(i) (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE0(i) (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE1(i) (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEATCMD(i) (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT0(i) (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT1(i) (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
#define PIC_TIMER0_MAXVAL 0x34
#define PIC_TIMER1_MAXVAL 0x36
@@ -127,28 +127,28 @@
#define PIC_TIMER7_COUNT 0x52
#define PIC_TIMER_COUNT(i) (PIC_TIMER0_COUNT + ((i) * 2))
-#define PIC_ITE0_N0_N1 0x54
-#define PIC_ITE1_N0_N1 0x58
-#define PIC_ITE2_N0_N1 0x5c
-#define PIC_ITE3_N0_N1 0x60
-#define PIC_ITE4_N0_N1 0x64
-#define PIC_ITE5_N0_N1 0x68
-#define PIC_ITE6_N0_N1 0x6c
-#define PIC_ITE7_N0_N1 0x70
-#define PIC_ITE_N0_N1(i) (PIC_ITE0_N0_N1 + ((i) * 4))
-
-#define PIC_ITE0_N2_N3 0x56
-#define PIC_ITE1_N2_N3 0x5a
-#define PIC_ITE2_N2_N3 0x5e
-#define PIC_ITE3_N2_N3 0x62
-#define PIC_ITE4_N2_N3 0x66
-#define PIC_ITE5_N2_N3 0x6a
-#define PIC_ITE6_N2_N3 0x6e
-#define PIC_ITE7_N2_N3 0x72
-#define PIC_ITE_N2_N3(i) (PIC_ITE0_N2_N3 + ((i) * 4))
-
-#define PIC_IRT0 0x74
-#define PIC_IRT(i) (PIC_IRT0 + ((i) * 2))
+#define PIC_ITE0_N0_N1 0x54
+#define PIC_ITE1_N0_N1 0x58
+#define PIC_ITE2_N0_N1 0x5c
+#define PIC_ITE3_N0_N1 0x60
+#define PIC_ITE4_N0_N1 0x64
+#define PIC_ITE5_N0_N1 0x68
+#define PIC_ITE6_N0_N1 0x6c
+#define PIC_ITE7_N0_N1 0x70
+#define PIC_ITE_N0_N1(i) (PIC_ITE0_N0_N1 + ((i) * 4))
+
+#define PIC_ITE0_N2_N3 0x56
+#define PIC_ITE1_N2_N3 0x5a
+#define PIC_ITE2_N2_N3 0x5e
+#define PIC_ITE3_N2_N3 0x62
+#define PIC_ITE4_N2_N3 0x66
+#define PIC_ITE5_N2_N3 0x6a
+#define PIC_ITE6_N2_N3 0x6e
+#define PIC_ITE7_N2_N3 0x72
+#define PIC_ITE_N2_N3(i) (PIC_ITE0_N2_N3 + ((i) * 4))
+
+#define PIC_IRT0 0x74
+#define PIC_IRT(i) (PIC_IRT0 + ((i) * 2))
#define TIMER_CYCLES_MAXVAL 0xffffffffffffffffULL
@@ -261,6 +261,8 @@
#define PIC_LOCAL_SCHEDULING 1
#define PIC_GLOBAL_SCHEDULING 0
+#define PIC_CLK_HZ 133333333
+
#define nlm_read_pic_reg(b, r) nlm_read_reg64(b, r)
#define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v)
#define nlm_get_pic_pcibase(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node))
@@ -315,6 +317,12 @@ nlm_pic_read_timer(uint64_t base, int timer)
return nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
}
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+ return (uint32_t)nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
+}
+
static inline void
nlm_pic_write_timer(uint64_t base, int timer, uint64_t value)
{
@@ -376,9 +384,9 @@ nlm_pic_ack(uint64_t base, int irt_num)
}
static inline void
-nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
{
- nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt);
+ nlm_pic_write_irt_direct(base, irt, en, 0, 0, irq, hwt);
}
int nlm_irq_to_irt(int irq);
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
index 258e8cc00e99..470e52bfc061 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -40,89 +40,89 @@
* @author Netlogic Microsystems
* @brief HAL for System configuration registers
*/
-#define SYS_CHIP_RESET 0x00
-#define SYS_POWER_ON_RESET_CFG 0x01
-#define SYS_EFUSE_DEVICE_CFG_STATUS0 0x02
-#define SYS_EFUSE_DEVICE_CFG_STATUS1 0x03
-#define SYS_EFUSE_DEVICE_CFG_STATUS2 0x04
-#define SYS_EFUSE_DEVICE_CFG3 0x05
-#define SYS_EFUSE_DEVICE_CFG4 0x06
-#define SYS_EFUSE_DEVICE_CFG5 0x07
-#define SYS_EFUSE_DEVICE_CFG6 0x08
-#define SYS_EFUSE_DEVICE_CFG7 0x09
-#define SYS_PLL_CTRL 0x0a
-#define SYS_CPU_RESET 0x0b
-#define SYS_CPU_NONCOHERENT_MODE 0x0d
-#define SYS_CORE_DFS_DIS_CTRL 0x0e
-#define SYS_CORE_DFS_RST_CTRL 0x0f
-#define SYS_CORE_DFS_BYP_CTRL 0x10
-#define SYS_CORE_DFS_PHA_CTRL 0x11
-#define SYS_CORE_DFS_DIV_INC_CTRL 0x12
-#define SYS_CORE_DFS_DIV_DEC_CTRL 0x13
-#define SYS_CORE_DFS_DIV_VALUE 0x14
-#define SYS_RESET 0x15
-#define SYS_DFS_DIS_CTRL 0x16
-#define SYS_DFS_RST_CTRL 0x17
-#define SYS_DFS_BYP_CTRL 0x18
-#define SYS_DFS_DIV_INC_CTRL 0x19
-#define SYS_DFS_DIV_DEC_CTRL 0x1a
-#define SYS_DFS_DIV_VALUE0 0x1b
-#define SYS_DFS_DIV_VALUE1 0x1c
-#define SYS_SENSE_AMP_DLY 0x1d
-#define SYS_SOC_SENSE_AMP_DLY 0x1e
-#define SYS_CTRL0 0x1f
-#define SYS_CTRL1 0x20
-#define SYS_TIMEOUT_BS1 0x21
-#define SYS_BYTE_SWAP 0x22
-#define SYS_VRM_VID 0x23
-#define SYS_PWR_RAM_CMD 0x24
-#define SYS_PWR_RAM_ADDR 0x25
-#define SYS_PWR_RAM_DATA0 0x26
-#define SYS_PWR_RAM_DATA1 0x27
-#define SYS_PWR_RAM_DATA2 0x28
-#define SYS_PWR_UCODE 0x29
-#define SYS_CPU0_PWR_STATUS 0x2a
-#define SYS_CPU1_PWR_STATUS 0x2b
-#define SYS_CPU2_PWR_STATUS 0x2c
-#define SYS_CPU3_PWR_STATUS 0x2d
-#define SYS_CPU4_PWR_STATUS 0x2e
-#define SYS_CPU5_PWR_STATUS 0x2f
-#define SYS_CPU6_PWR_STATUS 0x30
-#define SYS_CPU7_PWR_STATUS 0x31
-#define SYS_STATUS 0x32
-#define SYS_INT_POL 0x33
-#define SYS_INT_TYPE 0x34
-#define SYS_INT_STATUS 0x35
-#define SYS_INT_MASK0 0x36
-#define SYS_INT_MASK1 0x37
-#define SYS_UCO_S_ECC 0x38
-#define SYS_UCO_M_ECC 0x39
-#define SYS_UCO_ADDR 0x3a
-#define SYS_UCO_INSTR 0x3b
-#define SYS_MEM_BIST0 0x3c
-#define SYS_MEM_BIST1 0x3d
-#define SYS_MEM_BIST2 0x3e
-#define SYS_MEM_BIST3 0x3f
-#define SYS_MEM_BIST4 0x40
-#define SYS_MEM_BIST5 0x41
-#define SYS_MEM_BIST6 0x42
-#define SYS_MEM_BIST7 0x43
-#define SYS_MEM_BIST8 0x44
-#define SYS_MEM_BIST9 0x45
-#define SYS_MEM_BIST10 0x46
-#define SYS_MEM_BIST11 0x47
-#define SYS_MEM_BIST12 0x48
-#define SYS_SCRTCH0 0x49
-#define SYS_SCRTCH1 0x4a
-#define SYS_SCRTCH2 0x4b
-#define SYS_SCRTCH3 0x4c
+#define SYS_CHIP_RESET 0x00
+#define SYS_POWER_ON_RESET_CFG 0x01
+#define SYS_EFUSE_DEVICE_CFG_STATUS0 0x02
+#define SYS_EFUSE_DEVICE_CFG_STATUS1 0x03
+#define SYS_EFUSE_DEVICE_CFG_STATUS2 0x04
+#define SYS_EFUSE_DEVICE_CFG3 0x05
+#define SYS_EFUSE_DEVICE_CFG4 0x06
+#define SYS_EFUSE_DEVICE_CFG5 0x07
+#define SYS_EFUSE_DEVICE_CFG6 0x08
+#define SYS_EFUSE_DEVICE_CFG7 0x09
+#define SYS_PLL_CTRL 0x0a
+#define SYS_CPU_RESET 0x0b
+#define SYS_CPU_NONCOHERENT_MODE 0x0d
+#define SYS_CORE_DFS_DIS_CTRL 0x0e
+#define SYS_CORE_DFS_RST_CTRL 0x0f
+#define SYS_CORE_DFS_BYP_CTRL 0x10
+#define SYS_CORE_DFS_PHA_CTRL 0x11
+#define SYS_CORE_DFS_DIV_INC_CTRL 0x12
+#define SYS_CORE_DFS_DIV_DEC_CTRL 0x13
+#define SYS_CORE_DFS_DIV_VALUE 0x14
+#define SYS_RESET 0x15
+#define SYS_DFS_DIS_CTRL 0x16
+#define SYS_DFS_RST_CTRL 0x17
+#define SYS_DFS_BYP_CTRL 0x18
+#define SYS_DFS_DIV_INC_CTRL 0x19
+#define SYS_DFS_DIV_DEC_CTRL 0x1a
+#define SYS_DFS_DIV_VALUE0 0x1b
+#define SYS_DFS_DIV_VALUE1 0x1c
+#define SYS_SENSE_AMP_DLY 0x1d
+#define SYS_SOC_SENSE_AMP_DLY 0x1e
+#define SYS_CTRL0 0x1f
+#define SYS_CTRL1 0x20
+#define SYS_TIMEOUT_BS1 0x21
+#define SYS_BYTE_SWAP 0x22
+#define SYS_VRM_VID 0x23
+#define SYS_PWR_RAM_CMD 0x24
+#define SYS_PWR_RAM_ADDR 0x25
+#define SYS_PWR_RAM_DATA0 0x26
+#define SYS_PWR_RAM_DATA1 0x27
+#define SYS_PWR_RAM_DATA2 0x28
+#define SYS_PWR_UCODE 0x29
+#define SYS_CPU0_PWR_STATUS 0x2a
+#define SYS_CPU1_PWR_STATUS 0x2b
+#define SYS_CPU2_PWR_STATUS 0x2c
+#define SYS_CPU3_PWR_STATUS 0x2d
+#define SYS_CPU4_PWR_STATUS 0x2e
+#define SYS_CPU5_PWR_STATUS 0x2f
+#define SYS_CPU6_PWR_STATUS 0x30
+#define SYS_CPU7_PWR_STATUS 0x31
+#define SYS_STATUS 0x32
+#define SYS_INT_POL 0x33
+#define SYS_INT_TYPE 0x34
+#define SYS_INT_STATUS 0x35
+#define SYS_INT_MASK0 0x36
+#define SYS_INT_MASK1 0x37
+#define SYS_UCO_S_ECC 0x38
+#define SYS_UCO_M_ECC 0x39
+#define SYS_UCO_ADDR 0x3a
+#define SYS_UCO_INSTR 0x3b
+#define SYS_MEM_BIST0 0x3c
+#define SYS_MEM_BIST1 0x3d
+#define SYS_MEM_BIST2 0x3e
+#define SYS_MEM_BIST3 0x3f
+#define SYS_MEM_BIST4 0x40
+#define SYS_MEM_BIST5 0x41
+#define SYS_MEM_BIST6 0x42
+#define SYS_MEM_BIST7 0x43
+#define SYS_MEM_BIST8 0x44
+#define SYS_MEM_BIST9 0x45
+#define SYS_MEM_BIST10 0x46
+#define SYS_MEM_BIST11 0x47
+#define SYS_MEM_BIST12 0x48
+#define SYS_SCRTCH0 0x49
+#define SYS_SCRTCH1 0x4a
+#define SYS_SCRTCH2 0x4b
+#define SYS_SCRTCH3 0x4c
#ifndef __ASSEMBLY__
-#define nlm_read_sys_reg(b, r) nlm_read_reg(b, r)
-#define nlm_write_sys_reg(b, r, v) nlm_write_reg(b, r, v)
-#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
-#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
+#define nlm_read_sys_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_sys_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
+#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
#endif
#endif
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/uart.h b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
index 6a7046ca094d..86d16e1e6072 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/uart.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
@@ -91,8 +91,8 @@
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
-#define nlm_read_uart_reg(b, r) nlm_read_reg(b, r)
-#define nlm_write_uart_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_read_uart_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_uart_reg(b, r, v) nlm_write_reg(b, r, v)
#define nlm_get_uart_pcibase(node, inst) \
nlm_pcicfg_base(XLP_IO_UART_OFFSET(node, inst))
#define nlm_get_uart_regbase(node, inst) \
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
index 68d5167c86bb..2a78929cef73 100644
--- a/arch/mips/include/asm/netlogic/xlr/fmn.h
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -38,108 +38,108 @@
#include <asm/netlogic/mips-extns.h> /* for COP2 access */
/* Station IDs */
-#define FMN_STNID_CPU0 0x00
-#define FMN_STNID_CPU1 0x08
-#define FMN_STNID_CPU2 0x10
-#define FMN_STNID_CPU3 0x18
-#define FMN_STNID_CPU4 0x20
-#define FMN_STNID_CPU5 0x28
-#define FMN_STNID_CPU6 0x30
-#define FMN_STNID_CPU7 0x38
-
-#define FMN_STNID_XGS0_TX 64
-#define FMN_STNID_XMAC0_00_TX 64
-#define FMN_STNID_XMAC0_01_TX 65
-#define FMN_STNID_XMAC0_02_TX 66
-#define FMN_STNID_XMAC0_03_TX 67
-#define FMN_STNID_XMAC0_04_TX 68
-#define FMN_STNID_XMAC0_05_TX 69
-#define FMN_STNID_XMAC0_06_TX 70
-#define FMN_STNID_XMAC0_07_TX 71
-#define FMN_STNID_XMAC0_08_TX 72
-#define FMN_STNID_XMAC0_09_TX 73
-#define FMN_STNID_XMAC0_10_TX 74
-#define FMN_STNID_XMAC0_11_TX 75
-#define FMN_STNID_XMAC0_12_TX 76
-#define FMN_STNID_XMAC0_13_TX 77
-#define FMN_STNID_XMAC0_14_TX 78
-#define FMN_STNID_XMAC0_15_TX 79
-
-#define FMN_STNID_XGS1_TX 80
-#define FMN_STNID_XMAC1_00_TX 80
-#define FMN_STNID_XMAC1_01_TX 81
-#define FMN_STNID_XMAC1_02_TX 82
-#define FMN_STNID_XMAC1_03_TX 83
-#define FMN_STNID_XMAC1_04_TX 84
-#define FMN_STNID_XMAC1_05_TX 85
-#define FMN_STNID_XMAC1_06_TX 86
-#define FMN_STNID_XMAC1_07_TX 87
-#define FMN_STNID_XMAC1_08_TX 88
-#define FMN_STNID_XMAC1_09_TX 89
-#define FMN_STNID_XMAC1_10_TX 90
-#define FMN_STNID_XMAC1_11_TX 91
-#define FMN_STNID_XMAC1_12_TX 92
-#define FMN_STNID_XMAC1_13_TX 93
-#define FMN_STNID_XMAC1_14_TX 94
-#define FMN_STNID_XMAC1_15_TX 95
-
-#define FMN_STNID_GMAC 96
-#define FMN_STNID_GMACJFR_0 96
-#define FMN_STNID_GMACRFR_0 97
-#define FMN_STNID_GMACTX0 98
-#define FMN_STNID_GMACTX1 99
-#define FMN_STNID_GMACTX2 100
-#define FMN_STNID_GMACTX3 101
-#define FMN_STNID_GMACJFR_1 102
-#define FMN_STNID_GMACRFR_1 103
-
-#define FMN_STNID_DMA 104
-#define FMN_STNID_DMA_0 104
-#define FMN_STNID_DMA_1 105
-#define FMN_STNID_DMA_2 106
-#define FMN_STNID_DMA_3 107
-
-#define FMN_STNID_XGS0FR 112
-#define FMN_STNID_XMAC0JFR 112
-#define FMN_STNID_XMAC0RFR 113
-
-#define FMN_STNID_XGS1FR 114
-#define FMN_STNID_XMAC1JFR 114
-#define FMN_STNID_XMAC1RFR 115
-#define FMN_STNID_SEC 120
-#define FMN_STNID_SEC0 120
-#define FMN_STNID_SEC1 121
-#define FMN_STNID_SEC2 122
-#define FMN_STNID_SEC3 123
-#define FMN_STNID_PK0 124
-#define FMN_STNID_SEC_RSA 124
-#define FMN_STNID_SEC_RSVD0 125
-#define FMN_STNID_SEC_RSVD1 126
-#define FMN_STNID_SEC_RSVD2 127
-
-#define FMN_STNID_GMAC1 80
-#define FMN_STNID_GMAC1_FR_0 81
-#define FMN_STNID_GMAC1_TX0 82
-#define FMN_STNID_GMAC1_TX1 83
-#define FMN_STNID_GMAC1_TX2 84
-#define FMN_STNID_GMAC1_TX3 85
-#define FMN_STNID_GMAC1_FR_1 87
-#define FMN_STNID_GMAC0 96
-#define FMN_STNID_GMAC0_FR_0 97
-#define FMN_STNID_GMAC0_TX0 98
-#define FMN_STNID_GMAC0_TX1 99
-#define FMN_STNID_GMAC0_TX2 100
-#define FMN_STNID_GMAC0_TX3 101
-#define FMN_STNID_GMAC0_FR_1 103
-#define FMN_STNID_CMP_0 108
-#define FMN_STNID_CMP_1 109
-#define FMN_STNID_CMP_2 110
-#define FMN_STNID_CMP_3 111
-#define FMN_STNID_PCIE_0 116
-#define FMN_STNID_PCIE_1 117
-#define FMN_STNID_PCIE_2 118
-#define FMN_STNID_PCIE_3 119
-#define FMN_STNID_XLS_PK0 121
+#define FMN_STNID_CPU0 0x00
+#define FMN_STNID_CPU1 0x08
+#define FMN_STNID_CPU2 0x10
+#define FMN_STNID_CPU3 0x18
+#define FMN_STNID_CPU4 0x20
+#define FMN_STNID_CPU5 0x28
+#define FMN_STNID_CPU6 0x30
+#define FMN_STNID_CPU7 0x38
+
+#define FMN_STNID_XGS0_TX 64
+#define FMN_STNID_XMAC0_00_TX 64
+#define FMN_STNID_XMAC0_01_TX 65
+#define FMN_STNID_XMAC0_02_TX 66
+#define FMN_STNID_XMAC0_03_TX 67
+#define FMN_STNID_XMAC0_04_TX 68
+#define FMN_STNID_XMAC0_05_TX 69
+#define FMN_STNID_XMAC0_06_TX 70
+#define FMN_STNID_XMAC0_07_TX 71
+#define FMN_STNID_XMAC0_08_TX 72
+#define FMN_STNID_XMAC0_09_TX 73
+#define FMN_STNID_XMAC0_10_TX 74
+#define FMN_STNID_XMAC0_11_TX 75
+#define FMN_STNID_XMAC0_12_TX 76
+#define FMN_STNID_XMAC0_13_TX 77
+#define FMN_STNID_XMAC0_14_TX 78
+#define FMN_STNID_XMAC0_15_TX 79
+
+#define FMN_STNID_XGS1_TX 80
+#define FMN_STNID_XMAC1_00_TX 80
+#define FMN_STNID_XMAC1_01_TX 81
+#define FMN_STNID_XMAC1_02_TX 82
+#define FMN_STNID_XMAC1_03_TX 83
+#define FMN_STNID_XMAC1_04_TX 84
+#define FMN_STNID_XMAC1_05_TX 85
+#define FMN_STNID_XMAC1_06_TX 86
+#define FMN_STNID_XMAC1_07_TX 87
+#define FMN_STNID_XMAC1_08_TX 88
+#define FMN_STNID_XMAC1_09_TX 89
+#define FMN_STNID_XMAC1_10_TX 90
+#define FMN_STNID_XMAC1_11_TX 91
+#define FMN_STNID_XMAC1_12_TX 92
+#define FMN_STNID_XMAC1_13_TX 93
+#define FMN_STNID_XMAC1_14_TX 94
+#define FMN_STNID_XMAC1_15_TX 95
+
+#define FMN_STNID_GMAC 96
+#define FMN_STNID_GMACJFR_0 96
+#define FMN_STNID_GMACRFR_0 97
+#define FMN_STNID_GMACTX0 98
+#define FMN_STNID_GMACTX1 99
+#define FMN_STNID_GMACTX2 100
+#define FMN_STNID_GMACTX3 101
+#define FMN_STNID_GMACJFR_1 102
+#define FMN_STNID_GMACRFR_1 103
+
+#define FMN_STNID_DMA 104
+#define FMN_STNID_DMA_0 104
+#define FMN_STNID_DMA_1 105
+#define FMN_STNID_DMA_2 106
+#define FMN_STNID_DMA_3 107
+
+#define FMN_STNID_XGS0FR 112
+#define FMN_STNID_XMAC0JFR 112
+#define FMN_STNID_XMAC0RFR 113
+
+#define FMN_STNID_XGS1FR 114
+#define FMN_STNID_XMAC1JFR 114
+#define FMN_STNID_XMAC1RFR 115
+#define FMN_STNID_SEC 120
+#define FMN_STNID_SEC0 120
+#define FMN_STNID_SEC1 121
+#define FMN_STNID_SEC2 122
+#define FMN_STNID_SEC3 123
+#define FMN_STNID_PK0 124
+#define FMN_STNID_SEC_RSA 124
+#define FMN_STNID_SEC_RSVD0 125
+#define FMN_STNID_SEC_RSVD1 126
+#define FMN_STNID_SEC_RSVD2 127
+
+#define FMN_STNID_GMAC1 80
+#define FMN_STNID_GMAC1_FR_0 81
+#define FMN_STNID_GMAC1_TX0 82
+#define FMN_STNID_GMAC1_TX1 83
+#define FMN_STNID_GMAC1_TX2 84
+#define FMN_STNID_GMAC1_TX3 85
+#define FMN_STNID_GMAC1_FR_1 87
+#define FMN_STNID_GMAC0 96
+#define FMN_STNID_GMAC0_FR_0 97
+#define FMN_STNID_GMAC0_TX0 98
+#define FMN_STNID_GMAC0_TX1 99
+#define FMN_STNID_GMAC0_TX2 100
+#define FMN_STNID_GMAC0_TX3 101
+#define FMN_STNID_GMAC0_FR_1 103
+#define FMN_STNID_CMP_0 108
+#define FMN_STNID_CMP_1 109
+#define FMN_STNID_CMP_2 110
+#define FMN_STNID_CMP_3 111
+#define FMN_STNID_PCIE_0 116
+#define FMN_STNID_PCIE_1 117
+#define FMN_STNID_PCIE_2 118
+#define FMN_STNID_PCIE_3 119
+#define FMN_STNID_XLS_PK0 121
#define nlm_read_c2_cc0(s) __read_32bit_c2_register($16, s)
#define nlm_read_c2_cc1(s) __read_32bit_c2_register($17, s)
@@ -175,25 +175,25 @@
#define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v)
#define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v)
-#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
-#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
-#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
-#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
-#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
-
-#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
-#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
-#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
-#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
-
-#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
-#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
-#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
-#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
-
-#define FMN_STN_RX_QSIZE 256
-#define FMN_NSTATIONS 128
-#define FMN_CORE_NBUCKETS 8
+#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
+#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
+#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
+#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
+#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
+
+#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
+#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
+#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
+#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
+
+#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
+#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
+#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
+#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
+
+#define FMN_STN_RX_QSIZE 256
+#define FMN_NSTATIONS 128
+#define FMN_CORE_NBUCKETS 8
static inline void nlm_msgsnd(unsigned int stid)
{
diff --git a/arch/mips/include/asm/netlogic/xlr/iomap.h b/arch/mips/include/asm/netlogic/xlr/iomap.h
index 2e768f032e83..ff4533d6ee64 100644
--- a/arch/mips/include/asm/netlogic/xlr/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlr/iomap.h
@@ -35,66 +35,66 @@
#ifndef _ASM_NLM_IOMAP_H
#define _ASM_NLM_IOMAP_H
-#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000)
-#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000
-#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000
-#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000
-#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000
-#define NETLOGIC_IO_PIC_OFFSET 0x08000
-#define NETLOGIC_IO_UART_0_OFFSET 0x14000
-#define NETLOGIC_IO_UART_1_OFFSET 0x15100
+#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000)
+#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000
+#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000
+#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000
+#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000
+#define NETLOGIC_IO_PIC_OFFSET 0x08000
+#define NETLOGIC_IO_UART_0_OFFSET 0x14000
+#define NETLOGIC_IO_UART_1_OFFSET 0x15100
-#define NETLOGIC_IO_SIZE 0x1000
+#define NETLOGIC_IO_SIZE 0x1000
-#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000
+#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000
-#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000
-#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000
+#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000
+#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000
-#define NETLOGIC_IO_SRAM_OFFSET 0x07000
+#define NETLOGIC_IO_SRAM_OFFSET 0x07000
-#define NETLOGIC_IO_PCIX_OFFSET 0x09000
-#define NETLOGIC_IO_HT_OFFSET 0x0A000
+#define NETLOGIC_IO_PCIX_OFFSET 0x09000
+#define NETLOGIC_IO_HT_OFFSET 0x0A000
-#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000
+#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000
-#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000
-#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000
-#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000
-#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000
+#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000
+#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000
+#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000
+#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000
/* XLS devices */
-#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000
-#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000
-#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000
-#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000
+#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000
+#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000
+#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000
+#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000
-#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000
-#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000
-#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000
-#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000
+#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000
+#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000
+#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000
+#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000
-#define NETLOGIC_IO_USB_0_OFFSET 0x24000
-#define NETLOGIC_IO_USB_1_OFFSET 0x25000
+#define NETLOGIC_IO_USB_0_OFFSET 0x24000
+#define NETLOGIC_IO_USB_1_OFFSET 0x25000
-#define NETLOGIC_IO_COMP_OFFSET 0x1D000
+#define NETLOGIC_IO_COMP_OFFSET 0x1D000
/* end XLS devices */
/* XLR devices */
-#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000
-#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000
-#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000
-#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000
+#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000
+#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000
+#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000
+#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000
/* end XLR devices */
-#define NETLOGIC_IO_I2C_0_OFFSET 0x16000
-#define NETLOGIC_IO_I2C_1_OFFSET 0x17000
+#define NETLOGIC_IO_I2C_0_OFFSET 0x16000
+#define NETLOGIC_IO_I2C_1_OFFSET 0x17000
-#define NETLOGIC_IO_GPIO_OFFSET 0x18000
-#define NETLOGIC_IO_FLASH_OFFSET 0x19000
-#define NETLOGIC_IO_TB_OFFSET 0x1C000
+#define NETLOGIC_IO_GPIO_OFFSET 0x18000
+#define NETLOGIC_IO_FLASH_OFFSET 0x19000
+#define NETLOGIC_IO_TB_OFFSET 0x1C000
-#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000)
+#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000)
/*
* Base Address (Virtual) of the PCI Config address space
@@ -102,8 +102,8 @@
* Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes
* ie 1<<24 = 16M
*/
-#define DEFAULT_PCI_CONFIG_BASE 0x18000000
-#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000
-#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000
+#define DEFAULT_PCI_CONFIG_BASE 0x18000000
+#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000
+#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000
#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/msidef.h b/arch/mips/include/asm/netlogic/xlr/msidef.h
index 7e39d40be4f5..c95d18edf12f 100644
--- a/arch/mips/include/asm/netlogic/xlr/msidef.h
+++ b/arch/mips/include/asm/netlogic/xlr/msidef.h
@@ -45,21 +45,21 @@
*/
#define MSI_DATA_VECTOR_SHIFT 0
-#define MSI_DATA_VECTOR_MASK 0x000000ff
+#define MSI_DATA_VECTOR_MASK 0x000000ff
#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
MSI_DATA_VECTOR_MASK)
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
-#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
-#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
#define MSI_DATA_LEVEL_SHIFT 14
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_TRIGGER_SHIFT 15
-#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
-#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
/*
* Shift/mask fields for msi address
@@ -69,16 +69,16 @@
#define MSI_ADDR_BASE_LO 0xfee00000
#define MSI_ADDR_DEST_MODE_SHIFT 2
-#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_REDIRECTION_SHIFT 3
-#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
-#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
#define MSI_ADDR_DEST_ID_SHIFT 12
#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
-#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
MSI_ADDR_DEST_ID_MASK)
#endif /* ASM_RMI_MSIDEF_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/pic.h b/arch/mips/include/asm/netlogic/xlr/pic.h
index 9a691b1f91ba..63c99176dffe 100644
--- a/arch/mips/include/asm/netlogic/xlr/pic.h
+++ b/arch/mips/include/asm/netlogic/xlr/pic.h
@@ -35,10 +35,11 @@
#ifndef _ASM_NLM_XLR_PIC_H
#define _ASM_NLM_XLR_PIC_H
-#define PIC_CLKS_PER_SEC 66666666ULL
+#define PIC_CLK_HZ 66666666
/* PIC hardware interrupt numbers */
#define PIC_IRT_WD_INDEX 0
#define PIC_IRT_TIMER_0_INDEX 1
+#define PIC_IRT_TIMER_INDEX(i) ((i) + PIC_IRT_TIMER_0_INDEX)
#define PIC_IRT_TIMER_1_INDEX 2
#define PIC_IRT_TIMER_2_INDEX 3
#define PIC_IRT_TIMER_3_INDEX 4
@@ -99,6 +100,7 @@
/* PIC Registers */
#define PIC_CTRL 0x00
+#define PIC_CTRL_STE 8 /* timer enable start bit */
#define PIC_IPI 0x04
#define PIC_INT_ACK 0x06
@@ -116,7 +118,7 @@
#define PIC_TIMER_COUNT_0_BASE 0x120
#define PIC_TIMER_COUNT_1_BASE 0x130
-#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr))
+#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr))
#define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr))
#define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i))
@@ -130,9 +132,9 @@
* 8-39. This leaves the IRQ 0-7 for cpu interrupts like
* count/compare and FMN
*/
-#define PIC_IRQ_BASE 8
-#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i))
-#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE)
+#define PIC_IRQ_BASE 8
+#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i))
+#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE)
#define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE
#define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX)
@@ -168,7 +170,7 @@
#define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX)
#define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX)
#define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX)
-#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
+#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
/* XLS defines */
#define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX)
#define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX)
@@ -251,12 +253,52 @@ nlm_pic_ack(uint64_t base, int irt)
}
static inline void
-nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
{
nlm_write_reg(base, PIC_IRT_0(irt), (1u << hwt));
/* local scheduling, invalid, level by default */
nlm_write_reg(base, PIC_IRT_1(irt),
- (1 << 30) | (1 << 6) | irq);
+ (en << 30) | (1 << 6) | irq);
+}
+
+static inline uint64_t
+nlm_pic_read_timer(uint64_t base, int timer)
+{
+ uint32_t up1, up2, low;
+
+ up1 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+ low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+ up2 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+
+ if (up1 != up2) /* wrapped, get the new low */
+ low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+ return ((uint64_t)up2 << 32) | low;
+
+}
+
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+ return nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+}
+
+static inline void
+nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
+{
+ uint32_t up, low;
+ uint64_t pic_ctrl = nlm_read_reg(base, PIC_CTRL);
+ int en;
+
+ en = (irq > 0);
+ up = value >> 32;
+ low = value & 0xFFFFFFFF;
+ nlm_write_reg(base, PIC_TIMER_MAXVAL_0(timer), low);
+ nlm_write_reg(base, PIC_TIMER_MAXVAL_1(timer), up);
+ nlm_pic_init_irt(base, PIC_IRT_TIMER_INDEX(timer), irq, cpu, 0);
+
+ /* enable the timer */
+ pic_ctrl |= (1 << (PIC_CTRL_STE + timer));
+ nlm_write_reg(base, PIC_CTRL, pic_ctrl);
}
#endif
#endif /* _ASM_NLM_XLR_PIC_H */
diff --git a/arch/mips/include/asm/nile4.h b/arch/mips/include/asm/nile4.h
index af0e51a9f68a..2e2436d0e94e 100644
--- a/arch/mips/include/asm/nile4.h
+++ b/arch/mips/include/asm/nile4.h
@@ -2,7 +2,7 @@
* asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions
*
* Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com>
- * Sony Software Development Center Europe (SDCE), Brussels
+ * Sony Software Development Center Europe (SDCE), Brussels
*
* This file is based on the following documentation:
*
@@ -17,7 +17,7 @@
/*
- * Physical Device Address Registers (PDARs)
+ * Physical Device Address Registers (PDARs)
*/
#define NILE4_SDRAM0 0x0000 /* SDRAM Bank 0 [R/W] */
@@ -37,7 +37,7 @@
/*
- * CPU Interface Registers
+ * CPU Interface Registers
*/
#define NILE4_CPUSTAT 0x0080 /* CPU Status [R/W] */
@@ -50,7 +50,7 @@
/*
- * Memory-Interface Registers
+ * Memory-Interface Registers
*/
#define NILE4_MEMCTRL 0x00C0 /* Memory Control */
@@ -59,7 +59,7 @@
/*
- * PCI-Bus Registers
+ * PCI-Bus Registers
*/
#define NILE4_PCICTRL 0x00E0 /* PCI Control [R/W] */
@@ -70,7 +70,7 @@
/*
- * Local-Bus Registers
+ * Local-Bus Registers
*/
#define NILE4_LCNFG 0x0100 /* Local Bus Configuration [R/W] */
@@ -88,7 +88,7 @@
/*
- * DMA Registers
+ * DMA Registers
*/
#define NILE4_DMACTRL0 0x0180 /* DMA Control 0 [R/W] */
@@ -100,7 +100,7 @@
/*
- * Timer Registers
+ * Timer Registers
*/
#define NILE4_T0CTRL 0x01C0 /* SDRAM Refresh Control [R/W] */
@@ -114,7 +114,7 @@
/*
- * PCI Configuration Space Registers
+ * PCI Configuration Space Registers
*/
#define NILE4_PCI_BASE 0x0200
@@ -153,10 +153,10 @@
/*
- * Serial-Port Registers
+ * Serial-Port Registers
*/
-#define NILE4_UART_BASE 0x0300
+#define NILE4_UART_BASE 0x0300
#define NILE4_UARTRBR 0x0300 /* UART Receiver Data Buffer [R] */
#define NILE4_UARTTHR 0x0300 /* UART Transmitter Data Holding [W] */
@@ -175,7 +175,7 @@
/*
- * Interrupt Lines
+ * Interrupt Lines
*/
#define NILE4_INT_CPCE 0 /* CPU-Interface Parity-Error Interrupt */
@@ -185,7 +185,7 @@
#define NILE4_INT_UART 4 /* UART Interrupt */
#define NILE4_INT_WDOG 5 /* Watchdog Timer Interrupt */
#define NILE4_INT_GPT 6 /* General-Purpose Timer Interrupt */
-#define NILE4_INT_LBRTD 7 /* Local-Bus Ready Timer Interrupt */
+#define NILE4_INT_LBRTD 7 /* Local-Bus Ready Timer Interrupt */
#define NILE4_INT_INTA 8 /* PCI Interrupt Signal INTA# */
#define NILE4_INT_INTB 9 /* PCI Interrupt Signal INTB# */
#define NILE4_INT_INTC 10 /* PCI Interrupt Signal INTC# */
@@ -197,7 +197,7 @@
/*
- * Nile 4 Register Access
+ * Nile 4 Register Access
*/
static inline void nile4_sync(void)
@@ -247,7 +247,7 @@ static inline u8 nile4_in8(u32 offset)
/*
- * Physical Device Address Registers
+ * Physical Device Address Registers
*/
extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
@@ -255,7 +255,7 @@ extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
/*
- * PCI Master Registers
+ * PCI Master Registers
*/
#define NILE4_PCICMD_IACK 0 /* PCI Interrupt Acknowledge */
@@ -265,9 +265,9 @@ extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
/*
- * PCI Address Spaces
+ * PCI Address Spaces
*
- * Note that these are multiplexed using PCIINIT[01]!
+ * Note that these are multiplexed using PCIINIT[01]!
*/
#define NILE4_PCI_IO_BASE 0xa6000000
@@ -280,7 +280,7 @@ extern void nile4_set_pmr(u32 pmr, u32 type, u32 addr);
/*
- * Interrupt Programming
+ * Interrupt Programming
*/
#define NUM_I8259_INTERRUPTS 16
diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h
index 3c74d826e2e6..e2d874e681f6 100644
--- a/arch/mips/include/asm/octeon/cvmx-address.h
+++ b/arch/mips/include/asm/octeon/cvmx-address.h
@@ -84,20 +84,20 @@ typedef enum {
* Octeon-I HW never interprets this X (<39:36> reserved
* for future expansion), software should set to 0.
*
- * - 0x0 XXX0 0000 0000 to DRAM Cached
+ * - 0x0 XXX0 0000 0000 to DRAM Cached
* - 0x0 XXX0 0FFF FFFF
*
- * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
- * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
+ * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
+ * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
*
- * - 0x0 XXX0 2000 0000 to DRAM Cached
+ * - 0x0 XXX0 2000 0000 to DRAM Cached
* - 0x0 XXXF FFFF FFFF
*
- * - 0x1 00X0 0000 0000 to Boot Bus Uncached
+ * - 0x1 00X0 0000 0000 to Boot Bus Uncached
* - 0x1 00XF FFFF FFFF
*
- * - 0x1 01X0 0000 0000 to Other NCB Uncached
- * - 0x1 FFXF FFFF FFFF devices
+ * - 0x1 01X0 0000 0000 to Other NCB Uncached
+ * - 0x1 FFXF FFFF FFFF devices
*
* Decode of all Octeon addresses
*/
@@ -129,9 +129,9 @@ typedef union {
*/
struct {
uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
- uint64_t cca:3; /* ignored by octeon */
+ uint64_t cca:3; /* ignored by octeon */
uint64_t mbz:10;
- uint64_t pa:49; /* physical address */
+ uint64_t pa:49; /* physical address */
} sxkphys;
/* physical address */
@@ -253,22 +253,22 @@ typedef union {
#define CVMX_OCT_DID_ASX1 23ULL
#define CVMX_OCT_DID_IOB 30ULL
-#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
-#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
-#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
-#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
-#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
+#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
+#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
+#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
+#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
-#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
-#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
-#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
-#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
-#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
-#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
-#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
-#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
-#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
-#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
-#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
+#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
+#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
+#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
+#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
+#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
+#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
+#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
+#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
+#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
+#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
+#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
#endif /* __CVMX_ADDRESS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index 1db1dc2724cb..284fa8d773ba 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -91,11 +91,11 @@ struct cvmx_bootinfo {
#if (CVMX_BOOTINFO_MIN_VER >= 1)
/*
* Several boards support compact flash on the Octeon boot
- * bus. The CF memory spaces may be mapped to different
+ * bus. The CF memory spaces may be mapped to different
* addresses on different boards. These are the physical
* addresses, so care must be taken to use the correct
* XKPHYS/KSEG0 addressing depending on the application's
- * ABI. These values will be 0 if CF is not present.
+ * ABI. These values will be 0 if CF is not present.
*/
uint64_t compact_flash_common_base_addr;
uint64_t compact_flash_attribute_base_addr;
@@ -131,7 +131,7 @@ struct cvmx_bootinfo {
#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
/* This flag is set if the TLB mappings are not contained in the
* 0x10000000 - 0x20000000 boot bus region. */
-#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
@@ -164,9 +164,9 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_EBT5600 = 22,
CVMX_BOARD_TYPE_EBH5201 = 23,
CVMX_BOARD_TYPE_EBT5200 = 24,
- CVMX_BOARD_TYPE_CB5600 = 25,
- CVMX_BOARD_TYPE_CB5601 = 26,
- CVMX_BOARD_TYPE_CB5200 = 27,
+ CVMX_BOARD_TYPE_CB5600 = 25,
+ CVMX_BOARD_TYPE_CB5601 = 26,
+ CVMX_BOARD_TYPE_CB5200 = 27,
/* Special 'generic' board type, supports many boards */
CVMX_BOARD_TYPE_GENERIC = 28,
CVMX_BOARD_TYPE_EBH5610 = 29,
@@ -223,7 +223,7 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
/*
- * Set aside a range for customer private use. The SDK won't
+ * Set aside a range for customer private use. The SDK won't
* use any numbers in this range.
*/
CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
index 42db2be663f1..352f1dc2508b 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootmem.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -39,7 +39,7 @@
#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
/* minimum alignment of bootmem alloced blocks */
-#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
/* Flags for cvmx_bootmem_phy_mem* functions */
/* Allocate from end of block instead of beginning */
@@ -151,8 +151,8 @@ extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
* memory cannot be allocated at the specified address.
*
* @size: Size in bytes of block to allocate
- * @address: Physical address to allocate memory at. If this memory is not
- * available, the allocation fails.
+ * @address: Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
* @alignment: Alignment required - must be power of 2
* Returns pointer to block of memory, NULL on error
*/
@@ -181,7 +181,7 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
* @name: name of block to free
*
* Returns 0 on failure,
- * !0 on success
+ * !0 on success
*/
@@ -210,9 +210,9 @@ extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment,
*
* @size: Size in bytes of block to allocate
* @address: Physical address to allocate memory at. If this
- * memory is not available, the allocation fails.
+ * memory is not available, the allocation fails.
* @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN
- * bytes
+ * bytes
*
* Returns a pointer to block of memory, NULL on error
*/
@@ -249,7 +249,7 @@ extern int cvmx_bootmem_free_named(char *name);
* @name: name of block to free
*
* Returns pointer to named block descriptor on success
- * 0 on failure
+ * 0 on failure
*/
struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
@@ -258,20 +258,20 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
* (optional) requested address and alignment.
*
* @req_size: size of region to allocate. All requests are rounded up
- * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
*
* @address_min: Minimum address that block can occupy.
*
* @address_max: Specifies the maximum address_min (inclusive) that
- * the allocation can use.
+ * the allocation can use.
*
* @alignment: Requested alignment of the block. If this alignment
- * cannot be met, the allocation fails. This must be a
- * power of 2. (Note: Alignment of
- * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- * internally enforced. Requested alignments of less than
- * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * cannot be met, the allocation fails. This must be a
+ * power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less than
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
*
* @flags: Flags to control options for the allocation.
*
@@ -285,21 +285,21 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
* Allocates a named block of physical memory from the free list, at
* (optional) requested address and alignment.
*
- * @param size size of region to allocate. All requests are rounded
- * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
- * bytes size
+ * @param size size of region to allocate. All requests are rounded
+ * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
+ * bytes size
* @param min_addr Minimum address that block can occupy.
* @param max_addr Specifies the maximum address_min (inclusive) that
- * the allocation can use.
+ * the allocation can use.
* @param alignment Requested alignment of the block. If this
- * alignment cannot be met, the allocation fails.
- * This must be a power of 2. (Note: Alignment of
- * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- * internally enforced. Requested alignments of less
- * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
- * @param name name to assign to named block
- * @param flags Flags to control options for the allocation.
+ * alignment cannot be met, the allocation fails.
+ * This must be a power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less
+ * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param name name to assign to named block
+ * @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
@@ -312,14 +312,14 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
* Finds a named memory block by name.
* Also used for finding an unused entry in the named block table.
*
- * @name: Name of memory block to find. If NULL pointer given, then
- * finds unused descriptor, if available.
+ * @name: Name of memory block to find. If NULL pointer given, then
+ * finds unused descriptor, if available.
*
* @flags: Flags to control options for the allocation.
*
* Returns Pointer to memory block descriptor, NULL if not found.
- * If NULL returned when name parameter is NULL, then no memory
- * block descriptors are available.
+ * If NULL returned when name parameter is NULL, then no memory
+ * block descriptors are available.
*/
struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
@@ -331,31 +331,31 @@ cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
* @flags: flags for passing options
*
* Returns 0 on failure
- * 1 on success
+ * 1 on success
*/
int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
/**
- * Frees a block to the bootmem allocator list. This must
+ * Frees a block to the bootmem allocator list. This must
* be used with care, as the size provided must match the size
* of the block that was allocated, or the list will become
* corrupted.
*
* IMPORTANT: This is only intended to be used as part of named block
* frees and initial population of the free memory list.
- * *
+ * *
*
* @phy_addr: physical address of block
* @size: size of block in bytes.
* @flags: flags for passing options
*
* Returns 1 on success,
- * 0 on failure
+ * 0 on failure
*/
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
/**
- * Locks the bootmem allocator. This is useful in certain situations
+ * Locks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index fed91125317f..024a71b2bff9 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -244,33 +244,33 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
".set noreorder\n"
"1:\n"
/* Atomic add one to ticket_ptr */
- "ll %[my_ticket], %[ticket_ptr]\n"
+ "ll %[my_ticket], %[ticket_ptr]\n"
/* and store the original value */
- "li %[ticket], 1\n"
+ "li %[ticket], 1\n"
/* in my_ticket */
- "baddu %[ticket], %[my_ticket]\n"
- "sc %[ticket], %[ticket_ptr]\n"
- "beqz %[ticket], 1b\n"
+ "baddu %[ticket], %[my_ticket]\n"
+ "sc %[ticket], %[ticket_ptr]\n"
+ "beqz %[ticket], 1b\n"
" nop\n"
/* Load the current now_serving ticket */
- "lbu %[ticket], %[now_serving]\n"
+ "lbu %[ticket], %[now_serving]\n"
"2:\n"
/* Jump out if now_serving == my_ticket */
- "beq %[ticket], %[my_ticket], 4f\n"
+ "beq %[ticket], %[my_ticket], 4f\n"
/* Find out how many tickets are in front of me */
- " subu %[ticket], %[my_ticket], %[ticket]\n"
+ " subu %[ticket], %[my_ticket], %[ticket]\n"
/* Use tickets in front of me minus one to delay */
"subu %[ticket], 1\n"
/* Delay will be ((tickets in front)-1)*32 loops */
- "cins %[ticket], %[ticket], 5, 7\n"
+ "cins %[ticket], %[ticket], 5, 7\n"
"3:\n"
/* Loop here until our ticket might be up */
- "bnez %[ticket], 3b\n"
- " subu %[ticket], 1\n"
+ "bnez %[ticket], 3b\n"
+ " subu %[ticket], 1\n"
/* Jump back up to check out ticket again */
- "b 2b\n"
+ "b 2b\n"
/* Load the current now_serving ticket */
- " lbu %[ticket], %[now_serving]\n"
+ " lbu %[ticket], %[now_serving]\n"
"4:\n"
".set pop\n" :
[ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
@@ -313,9 +313,9 @@ static inline __cvmx_cmd_queue_state_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd_count: Number of command words to write
* @cmds: Array of commands to write
*
@@ -411,9 +411,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
*
@@ -510,9 +510,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
* @cmd3: Command
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index 26835d1b43b8..f7dd17d0dc22 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -31,13 +31,13 @@
/* Pools in use */
/* Packet buffers */
-#define CVMX_FPA_PACKET_POOL (0)
-#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
/* Work queue entrys */
-#define CVMX_FPA_WQE_POOL (1)
-#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
/* PKO queue command buffers */
-#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
/************************* FAU allocation ********************************/
@@ -45,7 +45,7 @@
* in order of descending size so that all alignment constraints are
* automatically met. The enums are linked so that the following enum
* continues allocating where the previous one left off, so the
- * numbering within each enum always starts with zero. The macros
+ * numbering within each enum always starts with zero. The macros
* take care of the address increment size, so the values entered
* always increase by 1. FAU registers are accessed with byte
* addresses.
@@ -90,9 +90,9 @@ typedef enum {
* be taken into account.
*/
/* Generic scratch iobdma area */
-#define CVMX_SCR_SCRATCH (0)
+#define CVMX_SCR_SCRATCH (0)
/* First location available after cvmx-config.h allocated region. */
-#define CVMX_SCR_REG_AVAIL_BASE (8)
+#define CVMX_SCR_REG_AVAIL_BASE (8)
/*
* CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
@@ -145,14 +145,14 @@ typedef enum {
* 1: include
*/
#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
diff --git a/arch/mips/include/asm/octeon/cvmx-fau.h b/arch/mips/include/asm/octeon/cvmx-fau.h
index a6939fc8ba18..ef98f7fc102f 100644
--- a/arch/mips/include/asm/octeon/cvmx-fau.h
+++ b/arch/mips/include/asm/octeon/cvmx-fau.h
@@ -37,13 +37,13 @@
*/
#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
-#define CVMX_FAU_BITS_SCRADDR 63, 56
-#define CVMX_FAU_BITS_LEN 55, 48
-#define CVMX_FAU_BITS_INEVAL 35, 14
-#define CVMX_FAU_BITS_TAGWAIT 13, 13
-#define CVMX_FAU_BITS_NOADD 13, 13
-#define CVMX_FAU_BITS_SIZE 12, 11
-#define CVMX_FAU_BITS_REGISTER 10, 0
+#define CVMX_FAU_BITS_SCRADDR 63, 56
+#define CVMX_FAU_BITS_LEN 55, 48
+#define CVMX_FAU_BITS_INEVAL 35, 14
+#define CVMX_FAU_BITS_TAGWAIT 13, 13
+#define CVMX_FAU_BITS_NOADD 13, 13
+#define CVMX_FAU_BITS_SIZE 12, 11
+#define CVMX_FAU_BITS_REGISTER 10, 0
typedef enum {
CVMX_FAU_OP_SIZE_8 = 0,
@@ -109,11 +109,11 @@ typedef union {
* Builds a store I/O address for writing to the FAU
*
* @noadd: 0 = Store value is atomically added to the current value
- * 1 = Store value is atomically written over the current value
+ * 1 = Store value is atomically written over the current value
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* Returns Address to store for atomic update
*/
static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
@@ -127,16 +127,16 @@ static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
* Builds a I/O address for accessing the FAU
*
* @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
* Returns Address to read from for atomic update
*/
static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
@@ -152,9 +152,9 @@ static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
@@ -167,9 +167,9 @@ static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
@@ -182,7 +182,7 @@ static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Value of the register before the update
*/
@@ -209,12 +209,12 @@ static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait64_t
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -233,12 +233,12 @@ cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait32_t
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -257,11 +257,11 @@ cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait16_t
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -282,8 +282,8 @@ cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait8_t
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
@@ -301,21 +301,21 @@ cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
*
* @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
* @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
* @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
* @size: The size of the operation:
- * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
- * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
- * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
- * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
+ * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
+ * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
+ * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
+ * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* Returns Data to write using cvmx_send_single
*/
static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
@@ -337,11 +337,11 @@ static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
@@ -357,11 +357,11 @@ static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
@@ -377,9 +377,9 @@ static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
*/
@@ -396,7 +396,7 @@ static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
@@ -414,14 +414,14 @@ static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
@@ -437,14 +437,14 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
@@ -460,12 +460,12 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
*
* Returns Placed in the scratch pad register
@@ -483,9 +483,9 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
@@ -504,7 +504,7 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -516,7 +516,7 @@ static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -528,7 +528,7 @@ static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -551,7 +551,7 @@ static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
* Perform an atomic 64 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -563,7 +563,7 @@ static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
* Perform an atomic 32 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -575,7 +575,7 @@ static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
* Perform an atomic 16 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa.h b/arch/mips/include/asm/octeon/cvmx-fpa.h
index 541a1ae02b6f..aa26a2ce5a0e 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa.h
@@ -39,9 +39,9 @@
#include <asm/octeon/cvmx-address.h>
#include <asm/octeon/cvmx-fpa-defs.h>
-#define CVMX_FPA_NUM_POOLS 8
+#define CVMX_FPA_NUM_POOLS 8
#define CVMX_FPA_MIN_BLOCK_SIZE 128
-#define CVMX_FPA_ALIGNMENT 128
+#define CVMX_FPA_ALIGNMENT 128
/**
* Structure describing the data format used for stores to the FPA.
@@ -186,8 +186,8 @@ static inline void *cvmx_fpa_alloc(uint64_t pool)
/**
* Asynchronously get a new block from the FPA
*
- * @scr_addr: Local scratch address to put response in. This is a byte address,
- * but must be 8 byte aligned.
+ * @scr_addr: Local scratch address to put response in. This is a byte address,
+ * but must be 8 byte aligned.
* @pool: Pool to get the block from
*/
static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
@@ -212,7 +212,7 @@ static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
- * Cache lines to invalidate
+ * Cache lines to invalidate
*/
static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
@@ -234,7 +234,7 @@ static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
- * Cache lines to invalidate
+ * Cache lines to invalidate
*/
static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
@@ -245,7 +245,7 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
/*
* Make sure that any previous writes to memory go out before
- * we free this buffer. This also serves as a barrier to
+ * we free this buffer. This also serves as a barrier to
* prevent GCC from reordering operations to after the
* free.
*/
@@ -259,17 +259,17 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
* This can only be called once per pool. Make sure proper
* locking enforces this.
*
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
+ * @pool: Pool to initialize
+ * 0 <= pool < 8
+ * @name: Constant character string to name this pool.
+ * String is not copied.
+ * @buffer: Pointer to the block of memory to use. This must be
+ * accessible by all processors and external hardware.
* @block_size: Size for each block controlled by the FPA
* @num_blocks: Number of blocks
*
* Returns 0 on Success,
- * -1 on failure
+ * -1 on failure
*/
extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
uint64_t block_size, uint64_t num_blocks);
@@ -282,8 +282,8 @@ extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
*
* @pool: Pool to shutdown
* Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
*/
extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 442f508eaac9..41785dd0ddd0 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -48,7 +48,7 @@ typedef enum {
* Fake IPD port, the RGMII/MII interface may use different PHY, use
* this macro to return appropriate MIX address to read the PHY.
*/
-#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
+#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
/**
* cvmx_override_board_link_get(int ipd_port) is a function
@@ -86,10 +86,10 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
*
* @phy_addr: The address of the PHY to program
* @link_flags:
- * Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backware compatibility.
+ * Flags to control autonegotiation. Bit 0 is autonegotiation
+ * enable/disable to maintain backware compatibility.
* @link_info: Link speed to program. If the speed is zero and autonegotiation
- * is enabled, all possible negotiation speeds are advertised.
+ * is enabled, all possible negotiation speeds are advertised.
*
* Returns Zero on success, negative on failure
*/
@@ -111,10 +111,10 @@ int cvmx_helper_board_link_set_phy(int phy_addr,
* enumeration from the bootloader.
*
* @ipd_port: IPD input port associated with the port we want to get link
- * status for.
+ * status for.
*
* Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
+ * return zero.
*/
extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
@@ -134,10 +134,10 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
*
* @interface: Interface to probe
* @supported_ports:
- * Number of ports Octeon supports.
+ * Number of ports Octeon supports.
*
* Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
+ * simple be "support_ports".
*/
extern int __cvmx_helper_board_interface_probe(int interface,
int supported_ports);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
index 78295ba0050f..4d7a3db3a9f6 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
@@ -98,9 +98,9 @@ extern int __cvmx_helper_rgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
index 9a9b6c103ede..4debb1c5153d 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
@@ -92,9 +92,9 @@ extern int __cvmx_helper_sgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-util.h b/arch/mips/include/asm/octeon/cvmx-helper-util.h
index 01c8ddd84ff8..f446f212bbd4 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-util.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-util.h
@@ -57,11 +57,11 @@ extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
@@ -71,11 +71,11 @@ extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
@@ -84,7 +84,7 @@ extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
* Get the version of the CVMX libraries.
*
* Returns Version string. Note this buffer is allocated statically
- * and will be shared by all callers.
+ * and will be shared by all callers.
*/
extern const char *cvmx_helper_get_version(void);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
index f6fbc4f45b56..5e89ed703eaa 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
@@ -92,9 +92,9 @@ extern int __cvmx_helper_xaui_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper.h b/arch/mips/include/asm/octeon/cvmx-helper.h
index 691c8142cd4f..5a3090dc6f2f 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper.h
@@ -93,12 +93,12 @@ extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
- * IPD. This should be called by the user program after any additional
+ * IPD. This should be called by the user program after any additional
* IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
* is not set in the executive-config.h file.
*
* Returns 0 on success
- * -1 on failure
+ * -1 on failure
*/
extern int cvmx_helper_ipd_and_packet_input_enable(void);
@@ -128,7 +128,7 @@ extern int cvmx_helper_initialize_packet_io_local(void);
* @interface: Which interface to return port count for.
*
* Returns Port count for interface
- * -1 for uninitialized interface
+ * -1 for uninitialized interface
*/
extern int cvmx_helper_ports_on_interface(int interface);
@@ -150,7 +150,7 @@ extern int cvmx_helper_get_number_of_interfaces(void);
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
+ * DISABLED.
*/
extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
interface);
@@ -214,9 +214,9 @@ extern int cvmx_helper_interface_enumerate(int interface);
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index 115a552c5c7f..e13490ebbb27 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -38,8 +38,8 @@
#include <asm/octeon/cvmx-ipd-defs.h>
enum cvmx_ipd_mode {
- CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
- CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
+ CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
};
@@ -60,17 +60,17 @@ typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
*
* @mbuff_size: Packets buffer size in 8 byte words
* @first_mbuff_skip:
- * Number of 8 byte words to skip in the first buffer
+ * Number of 8 byte words to skip in the first buffer
* @not_first_mbuff_skip:
- * Number of 8 byte words to skip in each following buffer
+ * Number of 8 byte words to skip in each following buffer
* @first_back: Must be same as first_mbuff_skip / 128
* @second_back:
- * Must be same as not_first_mbuff_skip / 128
+ * Must be same as not_first_mbuff_skip / 128
* @wqe_fpa_pool:
- * FPA pool to get work entries from
+ * FPA pool to get work entries from
* @cache_mode:
* @back_pres_enable_flag:
- * Enable or disable port back pressure
+ * Enable or disable port back pressure
*/
static inline void cvmx_ipd_config(uint64_t mbuff_size,
uint64_t first_mbuff_skip,
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index 2c8ff9e33ec3..11c0a8fa8eb5 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -33,13 +33,13 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
-#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
-#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
-#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
+#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
@@ -67,91 +67,91 @@ union cvmx_l2c_tag {
/* L2C Performance Counter events. */
enum cvmx_l2c_event {
- CVMX_L2C_EVENT_CYCLES = 0,
+ CVMX_L2C_EVENT_CYCLES = 0,
CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
- CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
- CVMX_L2C_EVENT_DATA_MISS = 3,
- CVMX_L2C_EVENT_DATA_HIT = 4,
- CVMX_L2C_EVENT_MISS = 5,
- CVMX_L2C_EVENT_HIT = 6,
- CVMX_L2C_EVENT_VICTIM_HIT = 7,
- CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
- CVMX_L2C_EVENT_TAG_PROBE = 9,
- CVMX_L2C_EVENT_TAG_UPDATE = 10,
- CVMX_L2C_EVENT_TAG_COMPLETE = 11,
- CVMX_L2C_EVENT_TAG_DIRTY = 12,
- CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
- CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+ CVMX_L2C_EVENT_DATA_MISS = 3,
+ CVMX_L2C_EVENT_DATA_HIT = 4,
+ CVMX_L2C_EVENT_MISS = 5,
+ CVMX_L2C_EVENT_HIT = 6,
+ CVMX_L2C_EVENT_VICTIM_HIT = 7,
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+ CVMX_L2C_EVENT_TAG_PROBE = 9,
+ CVMX_L2C_EVENT_TAG_UPDATE = 10,
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+ CVMX_L2C_EVENT_TAG_DIRTY = 12,
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14,
CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
- CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
- CVMX_L2C_EVENT_WRITE_REQUEST = 17,
- CVMX_L2C_EVENT_READ_REQUEST = 18,
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+ CVMX_L2C_EVENT_READ_REQUEST = 18,
CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
- CVMX_L2C_EVENT_XMC_NOP = 20,
- CVMX_L2C_EVENT_XMC_LDT = 21,
- CVMX_L2C_EVENT_XMC_LDI = 22,
- CVMX_L2C_EVENT_XMC_LDD = 23,
- CVMX_L2C_EVENT_XMC_STF = 24,
- CVMX_L2C_EVENT_XMC_STT = 25,
- CVMX_L2C_EVENT_XMC_STP = 26,
- CVMX_L2C_EVENT_XMC_STC = 27,
- CVMX_L2C_EVENT_XMC_DWB = 28,
- CVMX_L2C_EVENT_XMC_PL2 = 29,
- CVMX_L2C_EVENT_XMC_PSL1 = 30,
- CVMX_L2C_EVENT_XMC_IOBLD = 31,
- CVMX_L2C_EVENT_XMC_IOBST = 32,
- CVMX_L2C_EVENT_XMC_IOBDMA = 33,
- CVMX_L2C_EVENT_XMC_IOBRSP = 34,
- CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
- CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
- CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
- CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
- CVMX_L2C_EVENT_RSC_NOP = 39,
- CVMX_L2C_EVENT_RSC_STDN = 40,
- CVMX_L2C_EVENT_RSC_FILL = 41,
- CVMX_L2C_EVENT_RSC_REFL = 42,
- CVMX_L2C_EVENT_RSC_STIN = 43,
- CVMX_L2C_EVENT_RSC_SCIN = 44,
- CVMX_L2C_EVENT_RSC_SCFL = 45,
- CVMX_L2C_EVENT_RSC_SCDN = 46,
- CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
- CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
- CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
- CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
- CVMX_L2C_EVENT_LRF_REQ = 51,
- CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
- CVMX_L2C_EVENT_DT_WR_INVAL = 53,
+ CVMX_L2C_EVENT_XMC_NOP = 20,
+ CVMX_L2C_EVENT_XMC_LDT = 21,
+ CVMX_L2C_EVENT_XMC_LDI = 22,
+ CVMX_L2C_EVENT_XMC_LDD = 23,
+ CVMX_L2C_EVENT_XMC_STF = 24,
+ CVMX_L2C_EVENT_XMC_STT = 25,
+ CVMX_L2C_EVENT_XMC_STP = 26,
+ CVMX_L2C_EVENT_XMC_STC = 27,
+ CVMX_L2C_EVENT_XMC_DWB = 28,
+ CVMX_L2C_EVENT_XMC_PL2 = 29,
+ CVMX_L2C_EVENT_XMC_PSL1 = 30,
+ CVMX_L2C_EVENT_XMC_IOBLD = 31,
+ CVMX_L2C_EVENT_XMC_IOBST = 32,
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+ CVMX_L2C_EVENT_RSC_NOP = 39,
+ CVMX_L2C_EVENT_RSC_STDN = 40,
+ CVMX_L2C_EVENT_RSC_FILL = 41,
+ CVMX_L2C_EVENT_RSC_REFL = 42,
+ CVMX_L2C_EVENT_RSC_STIN = 43,
+ CVMX_L2C_EVENT_RSC_SCIN = 44,
+ CVMX_L2C_EVENT_RSC_SCFL = 45,
+ CVMX_L2C_EVENT_RSC_SCDN = 46,
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+ CVMX_L2C_EVENT_LRF_REQ = 51,
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53,
CVMX_L2C_EVENT_MAX
};
/* L2C Performance Counter events for Octeon2. */
enum cvmx_l2c_tad_event {
- CVMX_L2C_TAD_EVENT_NONE = 0,
- CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
- CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
- CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
- CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
- CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
- CVMX_L2C_TAD_EVENT_SC_PASS = 6,
- CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
- CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
- CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
- CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
- CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
- CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
- CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
- CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
- CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
- CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
- CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
- CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
- CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
- CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
- CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
- CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
- CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
- CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
- CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
+ CVMX_L2C_TAD_EVENT_NONE = 0,
+ CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
+ CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
+ CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
+ CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
+ CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
+ CVMX_L2C_TAD_EVENT_SC_PASS = 6,
+ CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
+ CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
+ CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
+ CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
+ CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
+ CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
+ CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
+ CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
+ CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
+ CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
+ CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
+ CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
+ CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
+ CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
+ CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
+ CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
+ CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
CVMX_L2C_TAD_EVENT_MAX
};
@@ -159,10 +159,10 @@ enum cvmx_l2c_tad_event {
* Configure one of the four L2 Cache performance counters to capture event
* occurrences.
*
- * @counter: The counter to configure. Range 0..3.
- * @event: The type of L2 Cache event occurrence to count.
+ * @counter: The counter to configure. Range 0..3.
+ * @event: The type of L2 Cache event occurrence to count.
* @clear_on_read: When asserted, any read of the performance counter
- * clears the counter.
+ * clears the counter.
*
* @note The routine does not clear the counter.
*/
@@ -184,8 +184,8 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter);
* @core: The core processor of interest.
*
* Returns The mask specifying the partitioning. 0 bits in mask indicates
- * the cache 'ways' that a core can evict from.
- * -1 on error
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
*/
int cvmx_l2c_get_core_way_partition(uint32_t core);
@@ -194,16 +194,16 @@ int cvmx_l2c_get_core_way_partition(uint32_t core);
*
* @core: The core that the partitioning applies to.
* @mask: The partitioning of the ways expressed as a binary
- * mask. A 0 bit allows the core to evict cache lines from
- * a way, while a 1 bit blocks the core from evicting any
- * lines from that way. There must be at least one allowed
- * way (0 bit) in the mask.
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
- * those ways will never have any cache lines evicted from them.
- * All cores and the hardware blocks are free to read from all
- * ways regardless of the partitioning.
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
@@ -211,8 +211,8 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
* Return the L2 Cache way partitioning for the hw blocks.
*
* Returns The mask specifying the reserved way. 0 bits in mask indicates
- * the cache 'ways' that a core can evict from.
- * -1 on error
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
*/
int cvmx_l2c_get_hw_way_partition(void);
@@ -220,16 +220,16 @@ int cvmx_l2c_get_hw_way_partition(void);
* Partitions the L2 cache for the hardware blocks.
*
* @mask: The partitioning of the ways expressed as a binary
- * mask. A 0 bit allows the core to evict cache lines from
- * a way, while a 1 bit blocks the core from evicting any
- * lines from that way. There must be at least one allowed
- * way (0 bit) in the mask.
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
- * those ways will never have any cache lines evicted from them.
- * All cores and the hardware blocks are free to read from all
- * ways regardless of the partitioning.
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_hw_way_partition(uint32_t mask);
@@ -240,7 +240,7 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask);
* @addr: physical address of line to lock
*
* Returns 0 on success,
- * 1 if line not locked.
+ * 1 if line not locked.
*/
int cvmx_l2c_lock_line(uint64_t addr);
@@ -258,7 +258,7 @@ int cvmx_l2c_lock_line(uint64_t addr);
* @len: Length (in bytes) of region to lock
*
* Returns Number of requested lines that where not locked.
- * 0 on success (all locked)
+ * 0 on success (all locked)
*/
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
@@ -272,7 +272,7 @@ int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
* @address: Physical address to unlock
*
* Returns 0: line not unlocked
- * 1: line unlocked
+ * 1: line unlocked
*/
int cvmx_l2c_unlock_line(uint64_t address);
@@ -290,7 +290,7 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
* Read the L2 controller tag for a given location in L2
*
* @association:
- * Which association to read line from
+ * Which association to read line from
* @index: Which way to read from.
*
* Returns l2c tag structure for line requested.
diff --git a/arch/mips/include/asm/octeon/cvmx-mdio.h b/arch/mips/include/asm/octeon/cvmx-mdio.h
index 6f0cd182cec8..9f6a4f32a83c 100644
--- a/arch/mips/include/asm/octeon/cvmx-mdio.h
+++ b/arch/mips/include/asm/octeon/cvmx-mdio.h
@@ -246,21 +246,21 @@ typedef union {
} cvmx_mdio_phy_reg_mmd_address_data_t;
/* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE 0
-#define MDIO_CLAUSE_22_READ 1
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ 1
-#define MDIO_CLAUSE_45_ADDRESS 0
-#define MDIO_CLAUSE_45_WRITE 1
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE 1
#define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ 3
+#define MDIO_CLAUSE_45_READ 3
/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD 1
-#define CVMX_MMD_DEVICE_WIS 2
-#define CVMX_MMD_DEVICE_PCS 3
-#define CVMX_MMD_DEVICE_PHY_XS 4
-#define CVMX_MMD_DEVICE_DTS_XS 5
-#define CVMX_MMD_DEVICE_TC 6
+#define CVMX_MMD_DEVICE_PMA_PMD 1
+#define CVMX_MMD_DEVICE_WIS 2
+#define CVMX_MMD_DEVICE_PCS 3
+#define CVMX_MMD_DEVICE_PHY_XS 4
+#define CVMX_MMD_DEVICE_DTS_XS 5
+#define CVMX_MMD_DEVICE_TC 6
#define CVMX_MMD_DEVICE_CL22_EXT 29
#define CVMX_MMD_DEVICE_VENDOR_1 30
#define CVMX_MMD_DEVICE_VENDOR_2 31
@@ -291,7 +291,7 @@ static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to read
*
@@ -328,13 +328,13 @@ static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
- * 0 on success
+ * 0 on success
*/
static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
{
@@ -370,7 +370,7 @@ static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
* read PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to read
@@ -407,7 +407,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(address)\n",
+ "device %2d register %2d TIME OUT(address)\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -425,7 +425,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(data)\n",
+ "device %2d register %2d TIME OUT(data)\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -434,7 +434,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
return smi_rd.s.dat;
else {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d INVALID READ\n",
+ "device %2d register %2d INVALID READ\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -445,14 +445,14 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
* write PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
- * 0 on success
+ * 0 on success
*/
static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
int location, int val)
diff --git a/arch/mips/include/asm/octeon/cvmx-pip-defs.h b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
index 05a917d6ebe5..e975c7d2e485 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
@@ -44,7 +44,7 @@ enum cvmx_pip_port_parse_mode {
*/
CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
/*
- * Input packets are assumed to be IP. Results from non IP
+ * Input packets are assumed to be IP. Results from non IP
* packets is undefined. Pointers reference the beginning of
* the IP header.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index 9e739a640855..a76fe5a57a9f 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -37,8 +37,8 @@
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pip-defs.h>
-#define CVMX_PIP_NUM_INPUT_PORTS 40
-#define CVMX_PIP_NUM_WATCHERS 4
+#define CVMX_PIP_NUM_INPUT_PORTS 40
+#define CVMX_PIP_NUM_WATCHERS 4
/*
* Encodes the different error and exception codes
@@ -92,10 +92,10 @@ typedef enum {
/**
* NOTES
- * late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as bad FCS
- * or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ * late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as bad FCS
+ * or carrier extend error which is CVMX_PIP_EXTEND_ERR
*/
typedef enum {
/* No error */
@@ -122,11 +122,11 @@ typedef enum {
* error)
*/
CVMX_PIP_UNDER_FCS_ERR = 6ull,
- /* RGM 7 = FCS error */
+ /* RGM 7 = FCS error */
CVMX_PIP_GMX_FCS_ERR = 7ull,
/* RGM+SPI 8 = min frame error (pkt len < min frame len) */
CVMX_PIP_UNDER_ERR = 8ull,
- /* RGM 9 = Frame carrier extend error */
+ /* RGM 9 = Frame carrier extend error */
CVMX_PIP_EXTEND_ERR = 9ull,
/*
* RGM 10 = length mismatch (len did not match len in L2
@@ -161,10 +161,10 @@ typedef enum {
CVMX_PIP_PIP_L2_MAL_HDR = 18L
/*
* NOTES: xx = late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as
- * bad FCS or carrier extend error which is
- * CVMX_PIP_EXTEND_ERR
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as
+ * bad FCS or carrier extend error which is
+ * CVMX_PIP_EXTEND_ERR
*/
} cvmx_pip_rcv_err_t;
@@ -192,13 +192,13 @@ typedef struct {
/* Number of packets processed by PIP */
uint32_t packets;
/*
- * Number of indentified L2 multicast packets. Does not
+ * Number of indentified L2 multicast packets. Does not
* include broadcast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
uint32_t multicast_packets;
/*
- * Number of indentified L2 broadcast packets. Does not
+ * Number of indentified L2 broadcast packets. Does not
* include multicast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
@@ -287,7 +287,7 @@ typedef union {
* @port_num: Port number to configure
* @port_cfg: Port hardware configuration
* @port_tag_cfg:
- * Port POW tagging configuration
+ * Port POW tagging configuration
*/
static inline void cvmx_pip_config_port(uint64_t port_num,
union cvmx_pip_prt_cfgx port_cfg,
@@ -298,20 +298,20 @@ static inline void cvmx_pip_config_port(uint64_t port_num,
}
#if 0
/**
- * @deprecated This function is a thin wrapper around the Pass1 version
- * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
- * setting the group that is incompatible with this function,
- * the preferred upgrade path is to use the CSR directly.
+ * @deprecated This function is a thin wrapper around the Pass1 version
+ * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
+ * setting the group that is incompatible with this function,
+ * the preferred upgrade path is to use the CSR directly.
*
* Configure the global QoS packet watchers. Each watcher is
* capable of matching a field in a packet to determine the
* QoS queue for scheduling.
*
- * @watcher: Watcher number to configure (0 - 3).
+ * @watcher: Watcher number to configure (0 - 3).
* @match_type: Watcher match type
* @match_value:
- * Value the watcher will match against
- * @qos: QoS queue for packets matching this watcher
+ * Value the watcher will match against
+ * @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_watcher(uint64_t watcher,
cvmx_pip_qos_watch_types match_type,
@@ -331,7 +331,7 @@ static inline void cvmx_pip_config_watcher(uint64_t watcher,
* Configure the VLAN priority to QoS queue mapping.
*
* @vlan_priority:
- * VLAN priority (0-7)
+ * VLAN priority (0-7)
* @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
@@ -451,10 +451,10 @@ static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
*
* @interface: Interface to configure (0 or 1)
* @invert_result:
- * Invert the result of the CRC
+ * Invert the result of the CRC
* @reflect: Reflect
* @initialization_vector:
- * CRC initialization vector
+ * CRC initialization vector
*/
static inline void cvmx_pip_config_crc(uint64_t interface,
uint64_t invert_result, uint64_t reflect,
@@ -500,13 +500,13 @@ static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
*
* @mask_index: Which tag mask to modify (0..3)
* @offset: Offset into the bitmask to set bits at. Use the GCC macro
- * offsetof() to determine the offsets into packet headers.
- * For example, offsetof(ethhdr, protocol) returns the offset
- * of the ethernet protocol field. The bitmask selects which
- * bytes to include the the tag, with bit offset X selecting
- * byte at offset X from the beginning of the packet data.
+ * offsetof() to determine the offsets into packet headers.
+ * For example, offsetof(ethhdr, protocol) returns the offset
+ * of the ethernet protocol field. The bitmask selects which
+ * bytes to include the the tag, with bit offset X selecting
+ * byte at offset X from the beginning of the packet data.
* @len: Number of bytes to include. Usually this is the sizeof()
- * the field.
+ * the field.
*/
static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
uint64_t len)
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index c6daeedf1f81..f7d2a6718849 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -69,16 +69,16 @@
#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
-#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
+#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
(OCTEON_IS_MODEL(OCTEON_CN58XX) || \
OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
-#define CVMX_PKO_NUM_OUTPUT_PORTS 40
+#define CVMX_PKO_NUM_OUTPUT_PORTS 40
/* use this for queues that are not used */
#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
-#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
-#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
+#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
+#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
#define CVMX_PKO_MAX_QUEUE_DEPTH 0
typedef enum {
@@ -269,13 +269,13 @@ extern void cvmx_pko_shutdown(void);
/**
* Configure a output port and the associated queues for use.
*
- * @port: Port to configure.
+ * @port: Port to configure.
* @base_queue: First queue number to associate with this port.
* @num_queues: Number of queues t oassociate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 1-8. A value of 8 get 8 times the traffic
- * of a value of 1. There must be num_queues elements in the
- * array.
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 1-8. A value of 8 get 8 times the traffic
+ * of a value of 1. There must be num_queues elements in the
+ * array.
*/
extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
uint64_t base_queue,
@@ -285,7 +285,7 @@ extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
/**
* Ring the packet output doorbell. This tells the packet
* output hardware that "len" command words have been added
- * to its pending list. This command includes the required
+ * to its pending list. This command includes the required
* CVMX_SYNCWS before the doorbell ring.
*
* @port: Port the packet is for
@@ -322,18 +322,18 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
* The use_locking parameter allows the caller to use three
* possible locking modes.
* - CVMX_PKO_LOCK_NONE
- * - PKO doesn't do any locking. It is the responsibility
- * of the application to make sure that no other core
- * is accessing the same queue at the same time.
+ * - PKO doesn't do any locking. It is the responsibility
+ * of the application to make sure that no other core
+ * is accessing the same queue at the same time.
* - CVMX_PKO_LOCK_ATOMIC_TAG
- * - PKO performs an atomic tagswitch to insure exclusive
- * access to the output queue. This will maintain
- * packet ordering on output.
+ * - PKO performs an atomic tagswitch to insure exclusive
+ * access to the output queue. This will maintain
+ * packet ordering on output.
* - CVMX_PKO_LOCK_CMD_QUEUE
- * - PKO uses the common command queue locks to insure
- * exclusive access to the output queue. This is a
- * memory based ll/sc. This is the most portable
- * locking mechanism.
+ * - PKO uses the common command queue locks to insure
+ * exclusive access to the output queue. This is a
+ * memory based ll/sc. This is the most portable
+ * locking mechanism.
*
* NOTE: If atomic locking is used, the POW entry CANNOT be
* descheduled, as it does not contain a valid WQE pointer.
@@ -341,7 +341,7 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
* @port: Port to send it on
* @queue: Queue to use
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*/
static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
@@ -351,11 +351,11 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
/*
* Must do a full switch here to handle all cases. We
* use a fake WQE pointer, as the POW does not access
- * this memory. The WQE pointer and group are only
+ * this memory. The WQE pointer and group are only
* used if this work is descheduled, which is not
* supported by the
* cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
- * combination. Note that this is a special case in
+ * combination. Note that this is a special case in
* which these fake values can be used - this is not a
* general technique.
*/
@@ -377,10 +377,10 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
- * PKO HW command word
+ * PKO HW command word
* @packet: Packet to send
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
@@ -418,12 +418,12 @@ static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
- * PKO HW command word
+ * PKO HW command word
* @packet: Packet to send
* @addr: Plysical address of a work queue entry or physical address
- * to zero on complete.
+ * to zero on complete.
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
@@ -588,7 +588,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
* @port: Port to rate limit
* @packets_s: Maximum packet/sec
* @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
@@ -601,7 +601,7 @@ extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
* @port: Port to rate limit
* @bits_s: PKO rate limit in bits/sec
* @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 92742b241a51..4b4d0ecfd9eb 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -70,7 +70,7 @@ enum cvmx_pow_tag_type {
* The work queue entry from the order - NEVER tag switch from
* NULL to NULL
*/
- CVMX_POW_TAG_TYPE_NULL = 2L,
+ CVMX_POW_TAG_TYPE_NULL = 2L,
/* A tag switch to NULL, and there is no space reserved in POW
* - NEVER tag switch to NULL_NULL
* - NEVER tag switch from NULL_NULL
@@ -90,7 +90,7 @@ typedef enum {
} cvmx_pow_wait_t;
/**
- * POW tag operations. These are used in the data stored to the POW.
+ * POW tag operations. These are used in the data stored to the POW.
*/
typedef enum {
/*
@@ -341,14 +341,14 @@ typedef union {
* lists. The two memory-input queue lists associated
* with each QOS level are:
*
- * - qosgrp = 0, qosgrp = 8: QOS0
- * - qosgrp = 1, qosgrp = 9: QOS1
- * - qosgrp = 2, qosgrp = 10: QOS2
- * - qosgrp = 3, qosgrp = 11: QOS3
- * - qosgrp = 4, qosgrp = 12: QOS4
- * - qosgrp = 5, qosgrp = 13: QOS5
- * - qosgrp = 6, qosgrp = 14: QOS6
- * - qosgrp = 7, qosgrp = 15: QOS7
+ * - qosgrp = 0, qosgrp = 8: QOS0
+ * - qosgrp = 1, qosgrp = 9: QOS1
+ * - qosgrp = 2, qosgrp = 10: QOS2
+ * - qosgrp = 3, qosgrp = 11: QOS3
+ * - qosgrp = 4, qosgrp = 12: QOS4
+ * - qosgrp = 5, qosgrp = 13: QOS5
+ * - qosgrp = 6, qosgrp = 14: QOS6
+ * - qosgrp = 7, qosgrp = 15: QOS7
*/
uint64_t qosgrp:4;
/*
@@ -942,11 +942,11 @@ typedef union {
* operations.
*
* NOTE: The following is the behavior of the pending switch bit at the PP
- * for POW stores (i.e. when did<7:3> == 0xc)
- * - did<2:0> == 0 => pending switch bit is set
- * - did<2:0> == 1 => no affect on the pending switch bit
- * - did<2:0> == 3 => pending switch bit is cleared
- * - did<2:0> == 7 => no affect on the pending switch bit
+ * for POW stores (i.e. when did<7:3> == 0xc)
+ * - did<2:0> == 0 => pending switch bit is set
+ * - did<2:0> == 1 => no affect on the pending switch bit
+ * - did<2:0> == 3 => pending switch bit is cleared
+ * - did<2:0> == 7 => no affect on the pending switch bit
* - did<2:0> == others => must not be used
* - No other loads/stores have an affect on the pending switch bit
* - The switch bus from POW can clear the pending switch bit
@@ -1053,7 +1053,7 @@ static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
}
#ifndef CVMX_MF_CHORD
-#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
#endif
/**
@@ -1097,7 +1097,7 @@ static inline void cvmx_pow_tag_sw_wait(void)
* so the caller must ensure that there is not a pending tag switch.
*
* @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
+ * If not set, returns immediately.
*
* Returns Returns the WQE pointer from POW. Returns NULL if no work
* was available.
@@ -1131,7 +1131,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
* requesting the new work.
*
* @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
+ * If not set, returns immediately.
*
* Returns Returns the WQE pointer from POW. Returns NULL if no work
* was available.
@@ -1148,7 +1148,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
}
/**
- * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
+ * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
* This function waits for any previous tag switch to complete before
* requesting the null_rd.
*
@@ -1183,11 +1183,11 @@ static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
* there is not a pending tag switch.
*
* @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
*
* @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
+ * timeout), 0 to cause response to return immediately
*/
static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
cvmx_pow_wait_t wait)
@@ -1212,11 +1212,11 @@ static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
* tag switch to complete before requesting the new work.
*
* @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
*
* @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
+ * timeout), 0 to cause response to return immediately
*/
static inline void cvmx_pow_work_request_async(int scr_addr,
cvmx_pow_wait_t wait)
@@ -1234,7 +1234,7 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
* to wait for the response.
*
* @scr_addr: Scratch memory address to get result from Byte address,
- * must be 8 byte aligned.
+ * must be 8 byte aligned.
*
* Returns Returns the WQE from the scratch register, or NULL if no
* work was available.
@@ -1260,7 +1260,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
* @wqe_ptr: pointer to a work queue entry returned by the POW
*
* Returns 0 if pointer is valid
- * 1 if invalid (no work was returned)
+ * 1 if invalid (no work was returned)
*/
static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
{
@@ -1314,7 +1314,7 @@ static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1361,7 +1361,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1390,7 +1390,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
* previous tag switch has completed.
*
* @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
+ * updated to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
* @group: group value for the work queue entry.
@@ -1429,7 +1429,7 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1468,10 +1468,10 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
* before requesting the tag switch.
*
* @wqp: pointer to work queue entry to submit. This entry is updated
- * to match the other parameters
+ * to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
- * @group: group value for the work queue entry.
+ * @group: group value for the work queue entry.
*/
static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
enum cvmx_pow_tag_type tag_type,
@@ -1560,7 +1560,7 @@ static inline void cvmx_pow_tag_sw_null(void)
* unrelated to the tag that the core currently holds.
*
* @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
+ * updated to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
* @qos: Input queue to add to.
@@ -1592,7 +1592,7 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
ptr.sio.offset = cvmx_ptr_to_phys(wqp);
/*
- * SYNC write to memory before the work submit. This is
+ * SYNC write to memory before the work submit. This is
* necessary as POW may read values from DRAM at this time.
*/
CVMX_SYNCWS;
@@ -1604,11 +1604,11 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
* indicates which groups each core will accept work from. There are
* 16 groups.
*
- * @core_num: core to apply mask to
+ * @core_num: core to apply mask to
* @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
- * representing groups 0-15.
- * Each 1 bit in the mask enables the core to accept work from
- * the corresponding group.
+ * representing groups 0-15.
+ * Each 1 bit in the mask enables the core to accept work from
+ * the corresponding group.
*/
static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
{
@@ -1623,14 +1623,14 @@ static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
* This function sets POW static priorities for a core. Each input queue has
* an associated priority value.
*
- * @core_num: core to apply priorities to
- * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
- * Highest priority is 0 and lowest is 7. A priority value
- * of 0xF instructs POW to skip the Input Queue when
- * scheduling to this specific core.
- * NOTE: priorities should not have gaps in values, meaning
- * {0,1,1,1,1,1,1,1} is a valid configuration while
- * {0,2,2,2,2,2,2,2} is not.
+ * @core_num: core to apply priorities to
+ * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
+ * Highest priority is 0 and lowest is 7. A priority value
+ * of 0xF instructs POW to skip the Input Queue when
+ * scheduling to this specific core.
+ * NOTE: priorities should not have gaps in values, meaning
+ * {0,1,1,1,1,1,1,1} is a valid configuration while
+ * {0,2,2,2,2,2,2,2} is not.
*/
static inline void cvmx_pow_set_priority(uint64_t core_num,
const uint8_t priority[])
@@ -1708,8 +1708,8 @@ static inline void cvmx_pow_set_priority(uint64_t core_num,
* @tag_type: New tag type
* @group: New group value
* @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
*/
static inline void cvmx_pow_tag_sw_desched_nocheck(
uint32_t tag,
@@ -1794,8 +1794,8 @@ static inline void cvmx_pow_tag_sw_desched_nocheck(
* @tag_type: New tag type
* @group: New group value
* @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
*/
static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
enum cvmx_pow_tag_type tag_type,
@@ -1819,8 +1819,8 @@ static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
* Descchedules the current work queue entry.
*
* @no_sched: no schedule flag value to be set on the work queue
- * entry. If this is set the entry will not be
- * rescheduled.
+ * entry. If this is set the entry will not be
+ * rescheduled.
*/
static inline void cvmx_pow_desched(uint64_t no_sched)
{
@@ -1863,7 +1863,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
*****************************************************/
/*
- * Number of bits of the tag used by software. The SW bits are always
+ * Number of bits of the tag used by software. The SW bits are always
* a contiguous block of the high starting at bit 31. The hardware
* bits are always the low bits. By default, the top 8 bits of the
* tag are reserved for software, and the low 24 are set by the IPD
@@ -1890,7 +1890,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
* are defined here.
*/
/* Mask for the value portion of the tag */
-#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
+#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
#define CVMX_TAG_SUBGROUP_SHIFT 16
#define CVMX_TAG_SUBGROUP_PKO 0x1
@@ -1905,12 +1905,12 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
* This function creates a 32 bit tag value from the two values provided.
*
* @sw_bits: The upper bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * hw_bits parameter.
+ * to this value. The remainder of bits are set by the
+ * hw_bits parameter.
*
* @hw_bits: The lower bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * sw_bits parameter.
+ * to this value. The remainder of bits are set by the
+ * sw_bits parameter.
*
* Returns 32 bit value of the combined hw and sw bits.
*/
@@ -1957,7 +1957,7 @@ static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
*
* @buffer: Buffer to store capture into
* @buffer_size:
- * The size of the supplied buffer
+ * The size of the supplied buffer
*
* Returns Zero on success, negative on failure
*/
@@ -1968,7 +1968,7 @@ extern int cvmx_pow_capture(void *buffer, int buffer_size);
*
* @buffer: POW capture from cvmx_pow_capture()
* @buffer_size:
- * Size of the buffer
+ * Size of the buffer
*/
extern void cvmx_pow_display(void *buffer, int buffer_size);
diff --git a/arch/mips/include/asm/octeon/cvmx-scratch.h b/arch/mips/include/asm/octeon/cvmx-scratch.h
index 96b70cfd6245..8d21cc5e4e40 100644
--- a/arch/mips/include/asm/octeon/cvmx-scratch.h
+++ b/arch/mips/include/asm/octeon/cvmx-scratch.h
@@ -39,7 +39,7 @@
* Note: This define must be a long, not a long long in order to
* compile without warnings for both 32bit and 64bit.
*/
-#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
/**
* Reads an 8 bit value from the processor local scratchpad memory.
diff --git a/arch/mips/include/asm/octeon/cvmx-spi.h b/arch/mips/include/asm/octeon/cvmx-spi.h
index 3bf53b537bcf..d5038cc4b475 100644
--- a/arch/mips/include/asm/octeon/cvmx-spi.h
+++ b/arch/mips/include/asm/octeon/cvmx-spi.h
@@ -84,11 +84,11 @@ static inline int cvmx_spi_is_spi_interface(int interface)
* Initialize and start the SPI interface.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* @num_ports: Number of SPI ports to configure
*
@@ -102,11 +102,11 @@ extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
* with its corespondant system.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* Returns Zero on success, negative of failure.
*/
@@ -154,7 +154,7 @@ static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
/**
* Get current SPI4 initialization callbacks
*
- * @callbacks: Pointer to the callbacks structure.to fill
+ * @callbacks: Pointer to the callbacks structure.to fill
*
* Returns Pointer to cvmx_spi_callbacks_t structure.
*/
@@ -171,11 +171,11 @@ extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
* Callback to perform SPI4 reset
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
@@ -187,11 +187,11 @@ extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
* detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @num_ports: Number of ports to configure on SPI
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -204,11 +204,11 @@ extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform clock detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -221,11 +221,11 @@ extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform link training
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for link to be trained (in seconds)
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -238,11 +238,11 @@ extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform calendar data synchronization
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for calendar data in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -255,11 +255,11 @@ extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
* Callback to handle interface up
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
diff --git a/arch/mips/include/asm/octeon/cvmx-spinlock.h b/arch/mips/include/asm/octeon/cvmx-spinlock.h
index a672abb1bc4f..4f09cff8b8c0 100644
--- a/arch/mips/include/asm/octeon/cvmx-spinlock.h
+++ b/arch/mips/include/asm/octeon/cvmx-spinlock.h
@@ -26,7 +26,7 @@
***********************license end**************************************/
/**
- * Implementation of spinlocks for Octeon CVMX. Although similar in
+ * Implementation of spinlocks for Octeon CVMX. Although similar in
* function to Linux kernel spinlocks, they are not compatible.
* Octeon CVMX spinlocks are only used to synchronize with the boot
* monitor and other non-Linux programs running in the system.
@@ -50,8 +50,8 @@ typedef struct {
} cvmx_spinlock_t;
/* note - macros not expanded in inline ASM, so values hardcoded */
-#define CVMX_SPINLOCK_UNLOCKED_VAL 0
-#define CVMX_SPINLOCK_LOCKED_VAL 1
+#define CVMX_SPINLOCK_UNLOCKED_VAL 0
+#define CVMX_SPINLOCK_LOCKED_VAL 1
#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
@@ -96,7 +96,7 @@ static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
* @lock: pointer to lock structure
*
* Returns 0: lock successfully taken
- * 1: lock not taken, held by someone else
+ * 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
@@ -104,16 +104,16 @@ static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
- __asm__ __volatile__(".set noreorder \n"
+ __asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
- " bnez %[tmp], 2f \n"
- " li %[tmp], 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " li %[tmp], 0 \n"
- "2: \n"
- ".set reorder \n" :
+ " bnez %[tmp], 2f \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
@@ -129,14 +129,14 @@ static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
- __asm__ __volatile__(".set noreorder \n"
+ __asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
- " bnez %[tmp], 1b \n"
- " li %[tmp], 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set reorder \n" :
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
@@ -163,17 +163,17 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
unsigned int tmp;
unsigned int sav;
- __asm__ __volatile__(".set noreorder \n"
- ".set noat \n"
+ __asm__ __volatile__(".set noreorder \n"
+ ".set noat \n"
"1: ll %[tmp], %[val] \n"
- " bbit1 %[tmp], 31, 1b \n"
- " li $at, 1 \n"
- " ins %[tmp], $at, 31, 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set at \n"
- ".set reorder \n" :
+ " bbit1 %[tmp], 31, 1b \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set at \n"
+ ".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
: : "memory");
@@ -187,7 +187,7 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
*
* @word: word to lock bit 31 of
* Returns 0: lock successfully taken
- * 1: lock not taken, held by someone else
+ * 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
@@ -198,15 +198,15 @@ static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
".set noat\n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
- " bbit1 %[tmp], 31, 2f \n"
- " li $at, 1 \n"
- " ins %[tmp], $at, 31, 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " li %[tmp], 0 \n"
- "2: \n"
- ".set at \n"
- ".set reorder \n" :
+ " bbit1 %[tmp], 31, 2f \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set at \n"
+ ".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp)
: : "memory");
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
index 61dd5741afe4..2131197422e5 100644
--- a/arch/mips/include/asm/octeon/cvmx-sysinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -85,7 +85,7 @@ struct cvmx_sysinfo {
char board_serial_number[OCTEON_SERIAL_LEN];
/*
* Several boards support compact flash on the Octeon boot
- * bus. The CF memory spaces may be mapped to different
+ * bus. The CF memory spaces may be mapped to different
* addresses on different boards. These values will be 0 if
* CF is not present. Note that these addresses are physical
* addresses, and it is up to the application to use the
@@ -123,25 +123,25 @@ extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
/**
* This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
* are required to use simple executive files directly.
*
* Locking (if required) must be handled outside of this
* function
*
* @phy_mem_desc_ptr: Pointer to global physical memory descriptor
- * (bootmem descriptor) @board_type: Octeon board
- * type enumeration
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
*
* @board_rev_major:
- * Board major revision
+ * Board major revision
* @board_rev_minor:
- * Board minor revision
+ * Board minor revision
* @cpu_clock_hz:
- * CPU clock freqency in hertz
+ * CPU clock freqency in hertz
*
* Returns 0: Failure
- * 1: success
+ * 1: success
*/
extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
uint16_t board_type,
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
index df762389e271..aa0d3d0de75c 100644
--- a/arch/mips/include/asm/octeon/cvmx-wqe.h
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -101,23 +101,23 @@ typedef union {
* - 1 = Malformed L4
* - 2 = L4 Checksum Error: the L4 checksum value is
* - 3 = UDP Length Error: The UDP length field would
- * make the UDP data longer than what remains in
- * the IP packet (as defined by the IP header
- * length field).
+ * make the UDP data longer than what remains in
+ * the IP packet (as defined by the IP header
+ * length field).
* - 4 = Bad L4 Port: either the source or destination
- * TCP/UDP port is 0.
+ * TCP/UDP port is 0.
* - 8 = TCP FIN Only: the packet is TCP and only the
- * FIN flag set.
+ * FIN flag set.
* - 9 = TCP No Flags: the packet is TCP and no flags
- * are set.
+ * are set.
* - 10 = TCP FIN RST: the packet is TCP and both FIN
- * and RST are set.
+ * and RST are set.
* - 11 = TCP SYN URG: the packet is TCP and both SYN
- * and URG are set.
+ * and URG are set.
* - 12 = TCP SYN RST: the packet is TCP and both SYN
- * and RST are set.
+ * and RST are set.
* - 13 = TCP SYN FIN: the packet is TCP and both SYN
- * and FIN are set.
+ * and FIN are set.
*/
uint64_t L4_error:1;
/* set if the packet is a fragment */
@@ -127,16 +127,16 @@ typedef union {
* failure indicated in err_code below, decode:
*
* - 1 = Not IP: the IP version field is neither 4 nor
- * 6.
+ * 6.
* - 2 = IPv4 Header Checksum Error: the IPv4 header
- * has a checksum violation.
+ * has a checksum violation.
* - 3 = IP Malformed Header: the packet is not long
- * enough to contain the IP header.
+ * enough to contain the IP header.
* - 4 = IP Malformed: the packet is not long enough
* to contain the bytes indicated by the IP
* header. Pad is allowed.
* - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
- * Hop Count field are zero.
+ * Hop Count field are zero.
* - 6 = IP Options
*/
uint64_t IP_exc:1;
@@ -243,46 +243,46 @@ typedef union {
* decode:
*
* - 1 = partial error: a packet was partially
- * received, but internal buffering / bandwidth
- * was not adequate to receive the entire
- * packet.
+ * received, but internal buffering / bandwidth
+ * was not adequate to receive the entire
+ * packet.
* - 2 = jabber error: the RGMII packet was too large
- * and is truncated.
+ * and is truncated.
* - 3 = overrun error: the RGMII packet is longer
- * than allowed and had an FCS error.
+ * than allowed and had an FCS error.
* - 4 = oversize error: the RGMII packet is longer
- * than allowed.
+ * than allowed.
* - 5 = alignment error: the RGMII packet is not an
- * integer number of bytes
- * and had an FCS error (100M and 10M only).
+ * integer number of bytes
+ * and had an FCS error (100M and 10M only).
* - 6 = fragment error: the RGMII packet is shorter
- * than allowed and had an FCS error.
+ * than allowed and had an FCS error.
* - 7 = GMX FCS error: the RGMII packet had an FCS
- * error.
+ * error.
* - 8 = undersize error: the RGMII packet is shorter
- * than allowed.
+ * than allowed.
* - 9 = extend error: the RGMII packet had an extend
- * error.
+ * error.
* - 10 = length mismatch error: the RGMII packet had
- * a length that did not match the length field
- * in the L2 HDR.
+ * a length that did not match the length field
+ * in the L2 HDR.
* - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
- * packet had one or more data reception errors
- * (RXERR) or the SPI4 packet had one or more
- * DIP4 errors.
+ * packet had one or more data reception errors
+ * (RXERR) or the SPI4 packet had one or more
+ * DIP4 errors.
* - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
- * packet was not large enough to cover the
- * skipped bytes or the SPI4 packet was
- * terminated with an About EOPS.
+ * packet was not large enough to cover the
+ * skipped bytes or the SPI4 packet was
+ * terminated with an About EOPS.
* - 13 = RGMII nibble error/SPI4 Port NXA Error: the
- * RGMII packet had a studder error (data not
- * repeated - 10/100M only) or the SPI4 packet
- * was sent to an NXA.
+ * RGMII packet had a studder error (data not
+ * repeated - 10/100M only) or the SPI4 packet
+ * was sent to an NXA.
* - 16 = FCS error: a SPI4.2 packet had an FCS error.
* - 17 = Skip error: a packet was not large enough to
- * cover the skipped bytes.
+ * cover the skipped bytes.
* - 18 = L2 header malformed: the packet is not long
- * enough to contain the L2.
+ * enough to contain the L2.
*/
uint64_t rcv_error:1;
@@ -309,7 +309,7 @@ typedef struct {
/*****************************************************************
* WORD 0
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
@@ -323,14 +323,14 @@ typedef struct {
/**
* Next pointer used by hardware for list maintenance.
* May be written/read by HW before the work queue
- * entry is scheduled to a PP
+ * entry is scheduled to a PP
* (Only 36 bits used in Octeon 1)
*/
uint64_t next_ptr:40;
/*****************************************************************
* WORD 1
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
@@ -362,8 +362,8 @@ typedef struct {
/**
* WORD 2 HW WRITE: the following 64-bits are filled in by
- * hardware when a packet arrives This indicates a variety of
- * status and error conditions.
+ * hardware when a packet arrives This indicates a variety of
+ * status and error conditions.
*/
cvmx_pip_wqe_word2 word2;
@@ -373,15 +373,15 @@ typedef struct {
union cvmx_buf_ptr packet_ptr;
/**
- * HW WRITE: octeon will fill in a programmable amount from the
- * packet, up to (at most, but perhaps less) the amount
- * needed to fill the work queue entry to 128 bytes
+ * HW WRITE: octeon will fill in a programmable amount from the
+ * packet, up to (at most, but perhaps less) the amount
+ * needed to fill the work queue entry to 128 bytes
*
- * If the packet is recognized to be IP, the hardware starts
- * (except that the IPv4 header is padded for appropriate
- * alignment) writing here where the IP header starts. If the
- * packet is not recognized to be IP, the hardware starts
- * writing the beginning of the packet here.
+ * If the packet is recognized to be IP, the hardware starts
+ * (except that the IPv4 header is padded for appropriate
+ * alignment) writing here where the IP header starts. If the
+ * packet is not recognized to be IP, the hardware starts
+ * writing the beginning of the packet here.
*/
uint8_t packet_data[96];
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index db58beab6cb2..f991e7701d3d 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -76,14 +76,14 @@ enum cvmx_mips_space {
#endif
#if CVMX_ENABLE_DEBUG_PRINTS
-#define cvmx_dprintf printk
+#define cvmx_dprintf printk
#else
#define cvmx_dprintf(...) {}
#endif
-#define CVMX_MAX_CORES (16)
-#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
-#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
+#define CVMX_MAX_CORES (16)
+#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
+#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
#define CAST64(v) ((long long)(long)(v))
#define CASTPTR(type, v) ((type *)(long)(v))
@@ -133,8 +133,8 @@ static inline uint64_t cvmx_build_io_address(uint64_t major_did,
*
* Example: cvmx_build_bits(39,24,value)
* <pre>
- * 6 5 4 3 3 2 1
- * 3 5 7 9 1 3 5 7 0
+ * 6 5 4 3 3 2 1
+ * 3 5 7 9 1 3 5 7 0
* +-------+-------+-------+-------+-------+-------+-------+------+
* 000000000000000000000000___________value000000000000000000000000
* </pre>
@@ -183,7 +183,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
* memory pointer (void *).
*
* @physical_address:
- * Hardware physical address to memory
+ * Hardware physical address to memory
* Returns Pointer to memory
*/
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
@@ -207,10 +207,10 @@ static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
-#define CVMX_BUILD_WRITE64(TYPE, ST) \
-static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
-{ \
- *CASTPTR(volatile TYPE##_t, addr) = val; \
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *CASTPTR(volatile TYPE##_t, addr) = val; \
}
@@ -221,19 +221,19 @@ static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
-#define CVMX_BUILD_READ64(TYPE, LT) \
-static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
-{ \
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
return *CASTPTR(volatile TYPE##_t, addr); \
}
/* The following defines 8 functions for writing to a 64bit address. Each
takes two arguments, the address and the value to write.
- cvmx_write64_int64 cvmx_write64_uint64
- cvmx_write64_int32 cvmx_write64_uint32
- cvmx_write64_int16 cvmx_write64_uint16
- cvmx_write64_int8 cvmx_write64_uint8 */
+ cvmx_write64_int64 cvmx_write64_uint64
+ cvmx_write64_int32 cvmx_write64_uint32
+ cvmx_write64_int16 cvmx_write64_uint16
+ cvmx_write64_int8 cvmx_write64_uint8 */
CVMX_BUILD_WRITE64(int64, "sd");
CVMX_BUILD_WRITE64(int32, "sw");
CVMX_BUILD_WRITE64(int16, "sh");
@@ -246,10 +246,10 @@ CVMX_BUILD_WRITE64(uint8, "sb");
/* The following defines 8 functions for reading from a 64bit address. Each
takes the address as the only argument
- cvmx_read64_int64 cvmx_read64_uint64
- cvmx_read64_int32 cvmx_read64_uint32
- cvmx_read64_int16 cvmx_read64_uint16
- cvmx_read64_int8 cvmx_read64_uint8 */
+ cvmx_read64_int64 cvmx_read64_uint64
+ cvmx_read64_int32 cvmx_read64_uint32
+ cvmx_read64_int16 cvmx_read64_uint16
+ cvmx_read64_int8 cvmx_read64_uint8 */
CVMX_BUILD_READ64(int64, "ld");
CVMX_BUILD_READ64(int32, "lw");
CVMX_BUILD_READ64(int16, "lh");
@@ -389,7 +389,7 @@ static inline void cvmx_wait(uint64_t cycles)
/**
* Reads a chip global cycle counter. This counts CPU cycles since
- * chip reset. The counter is 64 bit.
+ * chip reset. The counter is 64 bit.
* This register does not exist on CN38XX pass 1 silicion
*
* Returns Global chip cycle count since chip reset.
@@ -453,7 +453,7 @@ static inline uint32_t cvmx_octeon_num_cores(void)
/**
* Read a byte of fuse data
- * @byte_addr: address to read
+ * @byte_addr: address to read
*
* Returns fuse value: 0 or 1
*/
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 8008da2f8779..90e05a8d4b15 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -35,7 +35,7 @@
#include <asm/octeon/cvmx-rnm-defs.h>
enum octeon_feature {
- /* CN68XX uses port kinds for packet interface */
+ /* CN68XX uses port kinds for packet interface */
OCTEON_FEATURE_PKND,
/* CN68XX has different fields in word0 - word2 */
OCTEON_FEATURE_CN68XX_WQE,
@@ -51,7 +51,7 @@ enum octeon_feature {
OCTEON_FEATURE_DORM_CRYPTO,
/* Does this Octeon support PCI express? */
OCTEON_FEATURE_PCIE,
- /* Does this Octeon support SRIOs */
+ /* Does this Octeon support SRIOs */
OCTEON_FEATURE_SRIO,
/* Does this Octeon support Interlaken */
OCTEON_FEATURE_ILK,
@@ -75,7 +75,7 @@ enum octeon_feature {
/* Octeon MDIO block supports clause 45 transactions for 10
* Gig support */
OCTEON_FEATURE_MDIO_CLAUSE_45,
- /*
+ /*
* CN52XX and CN56XX used a block named NPEI for PCIe
* access. Newer chips replaced this with SLI+DPI.
*/
@@ -94,10 +94,10 @@ static inline int cvmx_fuse_read(int fuse);
* be kept out of fast path code.
*
* @feature: Feature to check for. This should always be a constant so the
- * compiler can remove the switch statement through optimization.
+ * compiler can remove the switch statement through optimization.
*
* Returns Non zero if the feature exists. Zero if the feature does not
- * exist.
+ * exist.
*/
static inline int octeon_has_feature(enum octeon_feature feature)
{
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index 349bb2ba840c..e2c122c6a657 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -29,7 +29,7 @@
/*
* The defines below should be used with the OCTEON_IS_MODEL() macro
- * to determine what model of chip the software is running on. Models
+ * to determine what model of chip the software is running on. Models
* ending in 'XX' match multiple models (families), while specific
* models match only that model. If a pass (revision) is specified,
* then only that revision will be matched. Care should be taken when
@@ -40,183 +40,183 @@
* subject to change at anytime without notice.
*
* NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
- * macros should be used outside of this file. All other macros are
+ * macros should be used outside of this file. All other macros are
* for internal use only, and may change without notice.
*/
-#define OCTEON_FAMILY_MASK 0x00ffff00
+#define OCTEON_FAMILY_MASK 0x00ffff00
/* Flag bits in top byte */
/* Ignores revision in model checks */
-#define OM_IGNORE_REVISION 0x01000000
+#define OM_IGNORE_REVISION 0x01000000
/* Check submodels */
-#define OM_CHECK_SUBMODEL 0x02000000
+#define OM_CHECK_SUBMODEL 0x02000000
/* Match all models previous than the one specified */
#define OM_MATCH_PREVIOUS_MODELS 0x04000000
/* Ignores the minor revison on newer parts */
#define OM_IGNORE_MINOR_REVISION 0x08000000
-#define OM_FLAG_MASK 0xff000000
+#define OM_FLAG_MASK 0xff000000
/* Match all cn5XXX Octeon models. */
-#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
/* Match all cn6XXX Octeon models. */
-#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
/* Match all cnf7XXX Octeon models. */
-#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
+#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
/*
* CNF7XXX models with new revision encoding
*/
-#define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_0 0x000d9400
-#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN6XXX models with new revision encoding
*/
-#define OCTEON_CN68XX_PASS1_0 0x000d9100
-#define OCTEON_CN68XX_PASS1_1 0x000d9101
-#define OCTEON_CN68XX_PASS1_2 0x000d9102
-#define OCTEON_CN68XX_PASS2_0 0x000d9108
+#define OCTEON_CN68XX_PASS1_0 0x000d9100
+#define OCTEON_CN68XX_PASS1_1 0x000d9101
+#define OCTEON_CN68XX_PASS1_2 0x000d9102
+#define OCTEON_CN68XX_PASS2_0 0x000d9108
-#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
-#define OCTEON_CN66XX_PASS1_0 0x000d9200
-#define OCTEON_CN66XX_PASS1_2 0x000d9202
+#define OCTEON_CN66XX_PASS1_0 0x000d9200
+#define OCTEON_CN66XX_PASS1_2 0x000d9202
-#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN63XX_PASS1_0 0x000d9000
-#define OCTEON_CN63XX_PASS1_1 0x000d9001
-#define OCTEON_CN63XX_PASS1_2 0x000d9002
-#define OCTEON_CN63XX_PASS2_0 0x000d9008
-#define OCTEON_CN63XX_PASS2_1 0x000d9009
-#define OCTEON_CN63XX_PASS2_2 0x000d900a
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+#define OCTEON_CN63XX_PASS2_1 0x000d9009
+#define OCTEON_CN63XX_PASS2_2 0x000d900a
-#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN61XX_PASS1_0 0x000d9300
+#define OCTEON_CN61XX_PASS1_0 0x000d9300
-#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN5XXX models with new revision encoding
*/
-#define OCTEON_CN58XX_PASS1_0 0x000d0300
-#define OCTEON_CN58XX_PASS1_1 0x000d0301
-#define OCTEON_CN58XX_PASS1_2 0x000d0303
-#define OCTEON_CN58XX_PASS2_0 0x000d0308
-#define OCTEON_CN58XX_PASS2_1 0x000d0309
-#define OCTEON_CN58XX_PASS2_2 0x000d030a
-#define OCTEON_CN58XX_PASS2_3 0x000d030b
-
-#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
-#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
-
-#define OCTEON_CN56XX_PASS1_0 0x000d0400
-#define OCTEON_CN56XX_PASS1_1 0x000d0401
-#define OCTEON_CN56XX_PASS2_0 0x000d0408
-#define OCTEON_CN56XX_PASS2_1 0x000d0409
-
-#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
-#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
-
-#define OCTEON_CN57XX OCTEON_CN56XX
-#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN55XX OCTEON_CN56XX
-#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN54XX OCTEON_CN56XX
-#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN50XX_PASS1_0 0x000d0600
-
-#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
+#define OCTEON_CN58XX_PASS1_0 0x000d0300
+#define OCTEON_CN58XX_PASS1_1 0x000d0301
+#define OCTEON_CN58XX_PASS1_2 0x000d0303
+#define OCTEON_CN58XX_PASS2_0 0x000d0308
+#define OCTEON_CN58XX_PASS2_1 0x000d0309
+#define OCTEON_CN58XX_PASS2_2 0x000d030a
+#define OCTEON_CN58XX_PASS2_3 0x000d030b
+
+#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0 0x000d0400
+#define OCTEON_CN56XX_PASS1_1 0x000d0401
+#define OCTEON_CN56XX_PASS2_0 0x000d0408
+#define OCTEON_CN56XX_PASS2_1 0x000d0409
+
+#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0 0x000d0600
+
+#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
/*
* NOTE: Octeon CN5000F model is not identifiable using the
* OCTEON_IS_MODEL() functions, but are treated as CN50XX.
*/
-#define OCTEON_CN52XX_PASS1_0 0x000d0700
-#define OCTEON_CN52XX_PASS2_0 0x000d0708
+#define OCTEON_CN52XX_PASS1_0 0x000d0700
+#define OCTEON_CN52XX_PASS2_0 0x000d0708
-#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
-#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
+#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
/*
* CN3XXX models with old revision enconding
*/
-#define OCTEON_CN38XX_PASS1 0x000d0000
-#define OCTEON_CN38XX_PASS2 0x000d0001
-#define OCTEON_CN38XX_PASS3 0x000d0003
-#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+#define OCTEON_CN38XX_PASS1 0x000d0000
+#define OCTEON_CN38XX_PASS2 0x000d0001
+#define OCTEON_CN38XX_PASS3 0x000d0003
+#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
-#define OCTEON_CN36XX OCTEON_CN38XX
-#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
-#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
+#define OCTEON_CN36XX OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
-#define OCTEON_CN31XX_PASS1 0x000d0100
-#define OCTEON_CN31XX_PASS1_1 0x000d0102
-#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN31XX_PASS1 0x000d0100
+#define OCTEON_CN31XX_PASS1_1 0x000d0102
+#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
/*
* This model is only used for internal checks, it is not a valid
* model for the OCTEON_MODEL environment variable. This matches the
* CN3010 and CN3005 but NOT the CN3020.
*/
-#define OCTEON_CN30XX_PASS1 0x000d0200
-#define OCTEON_CN30XX_PASS1_1 0x000d0202
-#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN30XX_PASS1 0x000d0200
+#define OCTEON_CN30XX_PASS1_1 0x000d0202
+#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
-#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
/*
* This matches the complete family of CN3xxx CPUs, and not subsequent
* models
*/
-#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
-#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
-#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
/* These are used to cover entire families of OCTEON processors */
#define OCTEON_FAM_1 (OCTEON_CN3XXX)
@@ -243,18 +243,18 @@
*/
/* Masks used for the various types of model/family/revision matching */
-#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
+#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
-#define OCTEON_38XX_MODEL_MASK 0x00ffff10
+#define OCTEON_38XX_MODEL_MASK 0x00ffff10
#define OCTEON_38XX_MODEL_REV_MASK (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
/* CN5XXX and later use different layout of bits in the revision ID field */
-#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
-#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
/* forward declarations */
static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
@@ -264,7 +264,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
/* NOTE: This for internal use only! */
#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
-((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
+((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
@@ -276,7 +276,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
&& (((chip_model) & OCTEON_38XX_MODEL_MASK) < ((arg_model) & OCTEON_38XX_MODEL_MASK))) \
)) || \
- (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
+ (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
@@ -320,7 +320,7 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
* Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
* is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
* I.e.:
- * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
+ * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
*/
#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
#define OCTEON_IS_COMMON_BINARY() 1
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 254e9954ed71..a2eed23c49a9 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -75,15 +75,15 @@ struct octeon_boot_descriptor {
uint32_t argc;
uint32_t argv[OCTEON_ARGV_MAX_ARGS];
-#define BOOT_FLAG_INIT_CORE (1 << 0)
-#define OCTEON_BL_FLAG_DEBUG (1 << 1)
-#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
+#define BOOT_FLAG_INIT_CORE (1 << 0)
+#define OCTEON_BL_FLAG_DEBUG (1 << 1)
+#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
/* If set, use uart1 for console */
-#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
+#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
/* If set, use PCI console */
-#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
+#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
/* Call exit on break on serial port */
-#define OCTEON_BL_FLAG_BREAK (1 << 5)
+#define OCTEON_BL_FLAG_BREAK (1 << 5)
uint32_t flags;
uint32_t core_mask;
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index c66734bd3382..64ba56a02843 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -22,7 +22,7 @@
#define CVMX_PCIE_BAR1_PHYS_SIZE (1ull << 28)
/*
- * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
+ * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
* place BAR1 so it is the same for both.
*/
#define CVMX_PCIE_BAR1_RC_BASE (1ull << 41)
diff --git a/arch/mips/include/asm/paccess.h b/arch/mips/include/asm/paccess.h
index 9ce5a1e7e14c..2474fc5d1751 100644
--- a/arch/mips/include/asm/paccess.h
+++ b/arch/mips/include/asm/paccess.h
@@ -43,7 +43,7 @@ struct __large_pstruct { unsigned long buf[100]; };
case 1: __get_dbe_asm("lb"); break; \
case 2: __get_dbe_asm("lh"); break; \
case 4: __get_dbe_asm("lw"); break; \
- case 8: __get_dbe_asm("ld"); break; \
+ case 8: __get_dbe_asm("ld"); break; \
default: __get_dbe_unknown(); break; \
} \
x = (__typeof__(*(ptr))) __gu_val; \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index dbaec94046da..99fc547af9d3 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -31,7 +31,7 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
@@ -95,11 +95,11 @@ extern void copy_user_highpage(struct page *to, struct page *from,
#ifdef CONFIG_64BIT_PHYS_ADDR
#ifdef CONFIG_CPU_MIPS32
typedef struct { unsigned long pte_low, pte_high; } pte_t;
- #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+ #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
#else
typedef struct { unsigned long long pte; } pte_t;
- #define pte_val(x) ((x).pte)
+ #define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
#endif
#else
@@ -191,8 +191,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
unsigned long __pfn = (pfn); \
int __n = pfn_to_nid(__pfn); \
((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \
- NODE_DATA(__n)->node_spanned_pages) \
- : 0); \
+ NODE_DATA(__n)->node_spanned_pages) \
+ : 0); \
})
#endif
@@ -206,7 +206,7 @@ extern int __virt_addr_valid(const volatile void *kaddr);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \
+#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \
PHYS_OFFSET)
#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \
PHYS_OFFSET)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index d69ea743272b..b8e24fd4cbc5 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -12,7 +12,7 @@
/*
* This file essentially defines the interface between board
- * specific PCI code and MIPS common PCI code. Should potentially put
+ * specific PCI code and MIPS common PCI code. Should potentially put
* into include/asm/pci.h file.
*/
@@ -20,7 +20,7 @@
#include <linux/of.h>
/*
- * Each pci channel is a top-level PCI bus seem by CPU. A machine with
+ * Each pci channel is a top-level PCI bus seem by CPU. A machine with
* multiple PCI channels may have multiple PCI host controllers or a
* single controller supporting multiple channels.
*/
@@ -99,7 +99,7 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
struct pci_dev;
/*
- * The PCI address space does equal the physical memory address space. The
+ * The PCI address space does equal the physical memory address space. The
* networking and block device layers use this boolean for bounce buffer
* decisions. This is set if any hose does not have an IOMMU.
*/
@@ -144,8 +144,13 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
extern char * (*pcibios_plat_setup)(char *str);
+#ifdef CONFIG_OF
/* this function parses memory ranges from a device node */
extern void pci_load_of_ranges(struct pci_controller *hose,
struct device_node *node);
+#else
+static inline void pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node) {}
+#endif
#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index be44fb0266da..af2c8a351ca7 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -85,7 +85,7 @@ typedef volatile struct bridge_s {
#define b_wid_llp b_widget.w_llp_cfg
#define b_wid_tflush b_widget.w_tflush
- /* bridge-specific widget configuration 0x000058-0x00007F */
+ /* bridge-specific widget configuration 0x000058-0x00007F */
bridgereg_t _pad_000058;
bridgereg_t b_wid_aux_err; /* 0x00005C */
bridgereg_t _pad_000060;
@@ -167,8 +167,8 @@ typedef volatile struct bridge_s {
bridgereg_t __pad; /* 0x0002{80,,,88} */
bridgereg_t reg; /* 0x0002{84,,,8C} */
} b_rrb_map[2]; /* 0x000280 */
-#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
-#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
+#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
+#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
bridgereg_t _pad_000290;
bridgereg_t b_resp_status; /* 0x000294 */
@@ -233,7 +233,7 @@ typedef volatile struct bridge_s {
u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */
/* External Address Translation Entry RAM 0x080000-0x0FFFFF */
- bridge_ate_t b_ext_ate_ram[0x10000];
+ bridge_ate_t b_ext_ate_ram[0x10000];
/* Reserved 0x100000-0x1FFFFF */
char _pad_100000[0x200000-0x100000];
@@ -400,7 +400,7 @@ typedef struct bridge_err_cmdword_s {
#define BRIDGE_REV_A 0x1
#define BRIDGE_REV_B 0x2
#define BRIDGE_REV_C 0x3
-#define BRIDGE_REV_D 0x4
+#define BRIDGE_REV_D 0x4
/* Bridge widget status register bits definition */
@@ -691,21 +691,21 @@ typedef struct bridge_err_cmdword_s {
#define BRIDGE_CREDIT 3
/* RRB assignment register */
-#define BRIDGE_RRB_EN 0x8 /* after shifting down */
-#define BRIDGE_RRB_DEV 0x7 /* after shifting down */
-#define BRIDGE_RRB_VDEV 0x4 /* after shifting down */
-#define BRIDGE_RRB_PDEV 0x3 /* after shifting down */
+#define BRIDGE_RRB_EN 0x8 /* after shifting down */
+#define BRIDGE_RRB_DEV 0x7 /* after shifting down */
+#define BRIDGE_RRB_VDEV 0x4 /* after shifting down */
+#define BRIDGE_RRB_PDEV 0x3 /* after shifting down */
/* RRB status register */
-#define BRIDGE_RRB_VALID(r) (0x00010000<<(r))
-#define BRIDGE_RRB_INUSE(r) (0x00000001<<(r))
+#define BRIDGE_RRB_VALID(r) (0x00010000<<(r))
+#define BRIDGE_RRB_INUSE(r) (0x00000001<<(r))
/* RRB clear register */
-#define BRIDGE_RRB_CLEAR(r) (0x00000001<<(r))
+#define BRIDGE_RRB_CLEAR(r) (0x00000001<<(r))
/* xbox system controller declarations */
-#define XBOX_BRIDGE_WID 8
-#define FLASH_PROM1_BASE 0xE00000 /* To read the xbox sysctlr status */
+#define XBOX_BRIDGE_WID 8
+#define FLASH_PROM1_BASE 0xE00000 /* To read the xbox sysctlr status */
#define XBOX_RPS_EXISTS 1 << 6 /* RPS bit in status register */
#define XBOX_RPS_FAIL 1 << 4 /* RPS status bit in register */
@@ -838,7 +838,7 @@ struct bridge_controller {
bridge_t *base;
nasid_t nasid;
unsigned int widget_id;
- unsigned int irq_cpu;
+ unsigned int irq_cpu;
u64 baddr;
unsigned int pci_int[8];
};
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 5d56bb230345..b4204c179b97 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -47,7 +47,7 @@
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
-#define VMALLOC_START MAP_BASE
+#define VMALLOC_START MAP_BASE
#define PKMAP_BASE (0xfe000000UL)
@@ -136,7 +136,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_map(dir, address) \
+#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
@@ -155,7 +155,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
(((_pte).pte >> 2 ) & 0x38) | \
- (((_pte).pte >> 10) << 6 ))
+ (((_pte).pte >> 10) << 6 ))
#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
(((off) & 0x38) << 2 ) | \
@@ -167,14 +167,14 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
/* Swap entries must have VALID and GLOBAL bits cleared. */
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
#define __swp_type(x) (((x).val >> 2) & 0x1f)
-#define __swp_offset(x) ((x).val >> 7)
+#define __swp_offset(x) ((x).val >> 7)
#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+ ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
#else
#define __swp_type(x) (((x).val >> 8) & 0x1f)
-#define __swp_offset(x) ((x).val >> 13)
+#define __swp_offset(x) ((x).val >> 13)
#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+ ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
@@ -184,7 +184,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
#else
/*
@@ -194,7 +194,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
(((_pte).pte >> 2) & 0x8) | \
- (((_pte).pte >> 8) << 4))
+ (((_pte).pte >> 8) << 4))
#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
(((off) & 0x8) << 2) | \
@@ -208,7 +208,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
#else
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#endif
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 013d5f781263..e1c49a96807d 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -115,7 +115,7 @@
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#if PGDIR_SIZE >= TASK_SIZE64
-#define USER_PTRS_PER_PGD (1)
+#define USER_PTRS_PER_PGD (1)
#else
#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
#endif
@@ -288,7 +288,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __swp_type(x) (((x).val >> 32) & 0xff)
#define __swp_offset(x) ((x).val >> 40)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index f6a0439a4085..32aea4852fb0 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -21,7 +21,7 @@
* Similar to the Alpha port, we need to keep track of the ref
* and mod bits in software. We have a software "yeah you can read
* from this page" bit, and a hardware one which actually lets the
- * process read from the page. On the same token we have a software
+ * process read from the page. On the same token we have a software
* writable bit and the real hardware one which actually lets the
* process write to the page, this keeps a mod bit via the hardware
* dirty bit.
@@ -41,9 +41,9 @@
#define _PAGE_GLOBAL (1 << 0)
#define _PAGE_VALID_SHIFT 1
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ (1 << 1) /* synonym */
+#define _PAGE_SILENT_READ (1 << 1) /* synonym */
#define _PAGE_DIRTY_SHIFT 2
-#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */
+#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */
#define _PAGE_SILENT_WRITE (1 << 2)
#define _CACHE_SHIFT 3
#define _CACHE_MASK (7 << 3)
@@ -52,7 +52,7 @@
* The following bits are implemented in software
*
* _PAGE_FILE semantics: set:pagecache unset:swap
- */
+ */
#define _PAGE_PRESENT_SHIFT 6
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT 7
@@ -134,7 +134,7 @@
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#else
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
+#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -143,7 +143,7 @@
#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
#else
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
-#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
+#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
#endif
/* Page cannot be executed */
@@ -159,10 +159,10 @@
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-/* synonym */
+/* synonym */
#define _PAGE_SILENT_READ (_PAGE_VALID)
-/* The MIPS dirty bit */
+/* The MIPS dirty bit */
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
#define _PAGE_SILENT_WRITE (_PAGE_DIRTY)
@@ -175,7 +175,7 @@
#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
#ifndef _PFN_SHIFT
-#define _PFN_SHIFT PAGE_SHIFT
+#define _PFN_SHIFT PAGE_SHIFT
#endif
#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
@@ -230,28 +230,28 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
/* No penalty for being coherent on the SB1, so just
use it for "noncoherent" spaces, too. Shouldn't hurt. */
-#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
-#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
+#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
+#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#else
-#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
-#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) /* R4600 only */
-#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* R4[0246]00 */
-#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* R4[0246]00 */
-#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COHERENT (5<<_CACHE_SHIFT) /* MIPS32R2 CMP */
-#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* R10000 only */
+#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
+#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) /* R4600 only */
+#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* R4[0246]00 */
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* R4[0246]00 */
+#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COHERENT (5<<_CACHE_SHIFT) /* MIPS32R2 CMP */
+#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) /* R4[04]00MC only */
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* R10000 only */
#endif
#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
-#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index ec50d52cfb74..fdc62fb5630d 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -112,7 +112,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* it better already be global)
*/
if (pte_none(*buddy)) {
- buddy->pte_low |= _PAGE_GLOBAL;
+ buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
}
@@ -319,7 +319,7 @@ static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
- * Macro to make mark a page protection value as "uncacheable". Note
+ * Macro to make mark a page protection value as "uncacheable". Note
* that "protection" is really a misnomer here as the protection value
* contains the memory attribute bits, dirty bits, and various other
* bits as well.
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index bd98b503f04c..2a5fa7abb346 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -112,8 +112,8 @@ struct mips_fpu_struct {
typedef __u32 dspreg_t;
struct mips_dsp_state {
- dspreg_t dspr[NUM_DSP_REGS];
- unsigned int dspcontrol;
+ dspreg_t dspr[NUM_DSP_REGS];
+ unsigned int dspcontrol;
};
#define INIT_CPUMASK { \
@@ -137,46 +137,46 @@ union mips_watch_reg_state {
struct octeon_cop2_state {
/* DMFC2 rt, 0x0201 */
- unsigned long cop2_crc_iv;
+ unsigned long cop2_crc_iv;
/* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
- unsigned long cop2_crc_length;
+ unsigned long cop2_crc_length;
/* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
- unsigned long cop2_crc_poly;
+ unsigned long cop2_crc_poly;
/* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
- unsigned long cop2_llm_dat[2];
+ unsigned long cop2_llm_dat[2];
/* DMFC2 rt, 0x0084 */
- unsigned long cop2_3des_iv;
+ unsigned long cop2_3des_iv;
/* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
- unsigned long cop2_3des_key[3];
+ unsigned long cop2_3des_key[3];
/* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
- unsigned long cop2_3des_result;
+ unsigned long cop2_3des_result;
/* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
- unsigned long cop2_aes_inp0;
+ unsigned long cop2_aes_inp0;
/* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
- unsigned long cop2_aes_iv[2];
+ unsigned long cop2_aes_iv[2];
/* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
* rt, 0x0107 */
- unsigned long cop2_aes_key[4];
+ unsigned long cop2_aes_key[4];
/* DMFC2 rt, 0x0110 */
- unsigned long cop2_aes_keylen;
+ unsigned long cop2_aes_keylen;
/* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
- unsigned long cop2_aes_result[2];
+ unsigned long cop2_aes_result[2];
/* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
* rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
* 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
* 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
* 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
- unsigned long cop2_hsh_datw[15];
+ unsigned long cop2_hsh_datw[15];
/* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
* rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
* 0x0256; DMFC2 rt, 0x0257 - Pass2 */
- unsigned long cop2_hsh_ivw[8];
+ unsigned long cop2_hsh_ivw[8];
/* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
- unsigned long cop2_gfm_mult[2];
+ unsigned long cop2_gfm_mult[2];
/* DMFC2 rt, 0x025E - Pass2 */
- unsigned long cop2_gfm_poly;
+ unsigned long cop2_gfm_poly;
/* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
- unsigned long cop2_gfm_result[2];
+ unsigned long cop2_gfm_result[2];
};
#define INIT_OCTEON_COP2 {0,}
@@ -249,9 +249,9 @@ struct thread_struct {
#endif /* CONFIG_CPU_CAVIUM_OCTEON */
#define INIT_THREAD { \
- /* \
- * Saved main processor registers \
- */ \
+ /* \
+ * Saved main processor registers \
+ */ \
.reg16 = 0, \
.reg17 = 0, \
.reg18 = 0, \
@@ -332,7 +332,7 @@ unsigned long get_wchan(struct task_struct *p);
* aborts compilation on some CPUs. It's simply not possible to unwind
* some CPU's stackframes.
*
- * __builtin_return_address works only for non-leaf functions. We avoid the
+ * __builtin_return_address works only for non-leaf functions. We avoid the
* overhead of a function call by forcing the compiler to save the return
* address register on the stack.
*/
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 54ea47da59a1..a0b2650516ac 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -22,10 +22,10 @@
* for indexed cache operations. Two issues here:
*
* - The MIPS32 and MIPS64 specs permit an implementation to directly derive
- * the index bits from the virtual address. This breaks with tradition
- * set by the R4000. To keep unpleasant surprises from happening we pick
+ * the index bits from the virtual address. This breaks with tradition
+ * set by the R4000. To keep unpleasant surprises from happening we pick
* an address in KSEG0 / CKSEG0.
- * - We need a properly sign extended address for 64-bit code. To get away
+ * - We need a properly sign extended address for 64-bit code. To get away
* without ifdefs we let the compiler do it by a type cast.
*/
#define INDEX_BASE CKSEG0
@@ -347,7 +347,7 @@ static inline void blast_##pfx##cache##lsize(void) \
unsigned long end = start + current_cpu_data.desc.waysize; \
unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
unsigned long ws_end = current_cpu_data.desc.ways << \
- current_cpu_data.desc.waybit; \
+ current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
__##pfx##flush_prologue \
@@ -359,7 +359,7 @@ static inline void blast_##pfx##cache##lsize(void) \
__##pfx##flush_epilogue \
} \
\
-static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
+static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
{ \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
@@ -381,7 +381,7 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
unsigned long end = start + PAGE_SIZE; \
unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
unsigned long ws_end = current_cpu_data.desc.ways << \
- current_cpu_data.desc.waybit; \
+ current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
__##pfx##flush_prologue \
diff --git a/arch/mips/include/asm/regdef.h b/arch/mips/include/asm/regdef.h
index 785a5189b374..3c687df1d515 100644
--- a/arch/mips/include/asm/regdef.h
+++ b/arch/mips/include/asm/regdef.h
@@ -19,44 +19,44 @@
/*
* Symbolic register names for 32 bit ABI
*/
-#define zero $0 /* wired zero */
-#define AT $1 /* assembler temp - uppercase because of ".set at" */
-#define v0 $2 /* return value */
-#define v1 $3
-#define a0 $4 /* argument registers */
-#define a1 $5
-#define a2 $6
-#define a3 $7
-#define t0 $8 /* caller saved */
-#define t1 $9
-#define t2 $10
-#define t3 $11
-#define t4 $12
+#define zero $0 /* wired zero */
+#define AT $1 /* assembler temp - uppercase because of ".set at" */
+#define v0 $2 /* return value */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define t0 $8 /* caller saved */
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
#define ta0 $12
-#define t5 $13
+#define t5 $13
#define ta1 $13
-#define t6 $14
+#define t6 $14
#define ta2 $14
-#define t7 $15
+#define t7 $15
#define ta3 $15
-#define s0 $16 /* callee saved */
-#define s1 $17
-#define s2 $18
-#define s3 $19
-#define s4 $20
-#define s5 $21
-#define s6 $22
-#define s7 $23
-#define t8 $24 /* caller saved */
-#define t9 $25
-#define jp $25 /* PIC jump register */
-#define k0 $26 /* kernel scratch */
-#define k1 $27
-#define gp $28 /* global pointer */
-#define sp $29 /* stack pointer */
-#define fp $30 /* frame pointer */
+#define s0 $16 /* callee saved */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* caller saved */
+#define t9 $25
+#define jp $25 /* PIC jump register */
+#define k0 $26 /* kernel scratch */
+#define k1 $27
+#define gp $28 /* global pointer */
+#define sp $29 /* stack pointer */
+#define fp $30 /* frame pointer */
#define s8 $30 /* same like fp! */
-#define ra $31 /* return address */
+#define ra $31 /* return address */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
diff --git a/arch/mips/include/asm/rtlx.h b/arch/mips/include/asm/rtlx.h
index 4ca3063ed2ce..90985b61dbd9 100644
--- a/arch/mips/include/asm/rtlx.h
+++ b/arch/mips/include/asm/rtlx.h
@@ -38,7 +38,7 @@ enum rtlx_state {
#define RTLX_BUFFER_SIZE 2048
/* each channel supports read and write.
- linux (vpe0) reads lx_buffer and writes rt_buffer
+ linux (vpe0) reads lx_buffer and writes rt_buffer
SP (vpe1) reads rt_buffer and writes lx_buffer
*/
struct rtlx_channel {
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index ae6306ebdcad..f29c75cf83c6 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -10,7 +10,7 @@
/*
* Kludge alert:
*
- * The generic seccomp code currently allows only a single compat ABI. Until
+ * The generic seccomp code currently allows only a single compat ABI. Until
* this is fixed we priorize O32 as the compat ABI over N32.
*/
#ifdef CONFIG_MIPS32_O32
diff --git a/arch/mips/include/asm/sgi/gio.h b/arch/mips/include/asm/sgi/gio.h
index 889cf028c95d..24be2b425be8 100644
--- a/arch/mips/include/asm/sgi/gio.h
+++ b/arch/mips/include/asm/sgi/gio.h
@@ -18,18 +18,18 @@
* three physical connectors, but only two slots, GFX and EXP0.
*
* There is 10MB of GIO address space for GIO64 slot devices
- * slot# slot type address range size
+ * slot# slot type address range size
* ----- --------- ----------------------- -----
- * 0 GFX 0x1f000000 - 0x1f3fffff 4MB
- * 1 EXP0 0x1f400000 - 0x1f5fffff 2MB
- * 2 EXP1 0x1f600000 - 0x1f9fffff 4MB
+ * 0 GFX 0x1f000000 - 0x1f3fffff 4MB
+ * 1 EXP0 0x1f400000 - 0x1f5fffff 2MB
+ * 2 EXP1 0x1f600000 - 0x1f9fffff 4MB
*
* There are un-slotted devices, HPC, I/O and misc devices, which are grouped
* into the HPC address space.
- * - MISC 0x1fb00000 - 0x1fbfffff 1MB
+ * - MISC 0x1fb00000 - 0x1fbfffff 1MB
*
* Following space is reserved and unused
- * - RESERVED 0x18000000 - 0x1effffff 112MB
+ * - RESERVED 0x18000000 - 0x1effffff 112MB
*
* GIO bus IDs
*
@@ -39,10 +39,10 @@
* the slot undefined.
*
* 32-bit IDs are divided into
- * bits 0:6 the product ID; ranges from 0x00 to 0x7F.
+ * bits 0:6 the product ID; ranges from 0x00 to 0x7F.
* bit 7 0=GIO Product ID is 8 bits wide
* 1=GIO Product ID is 32 bits wide.
- * bits 8:15 manufacturer version for the product.
+ * bits 8:15 manufacturer version for the product.
* bit 16 0=GIO32 and GIO32-bis, 1=GIO64.
* bit 17 0=no ROM present
* 1=ROM present on this board AND next three words
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index c4729f531919..59920b345942 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -65,39 +65,39 @@ struct hpc3_scsiregs {
u32 _unused0[0x1000/4 - 2]; /* padding */
volatile u32 bcd; /* byte count info */
#define HPC3_SBCD_BCNTMSK 0x00003fff /* bytes to transfer from/to memory */
-#define HPC3_SBCD_XIE 0x00004000 /* Send IRQ when done with cur buf */
-#define HPC3_SBCD_EOX 0x00008000 /* Indicates this is last buf in chain */
+#define HPC3_SBCD_XIE 0x00004000 /* Send IRQ when done with cur buf */
+#define HPC3_SBCD_EOX 0x00008000 /* Indicates this is last buf in chain */
volatile u32 ctrl; /* control register */
-#define HPC3_SCTRL_IRQ 0x01 /* IRQ asserted, either dma done or parity */
+#define HPC3_SCTRL_IRQ 0x01 /* IRQ asserted, either dma done or parity */
#define HPC3_SCTRL_ENDIAN 0x02 /* DMA endian mode, 0=big 1=little */
-#define HPC3_SCTRL_DIR 0x04 /* DMA direction, 1=dev2mem 0=mem2dev */
+#define HPC3_SCTRL_DIR 0x04 /* DMA direction, 1=dev2mem 0=mem2dev */
#define HPC3_SCTRL_FLUSH 0x08 /* Tells HPC3 to flush scsi fifos */
#define HPC3_SCTRL_ACTIVE 0x10 /* SCSI DMA channel is active */
#define HPC3_SCTRL_AMASK 0x20 /* DMA active inhibits PIO */
#define HPC3_SCTRL_CRESET 0x40 /* Resets dma channel and external controller */
-#define HPC3_SCTRL_PERR 0x80 /* Bad parity on HPC3 iface to scsi controller */
+#define HPC3_SCTRL_PERR 0x80 /* Bad parity on HPC3 iface to scsi controller */
volatile u32 gfptr; /* current GIO fifo ptr */
volatile u32 dfptr; /* current device fifo ptr */
volatile u32 dconfig; /* DMA configuration register */
#define HPC3_SDCFG_HCLK 0x00001 /* Enable DMA half clock mode */
-#define HPC3_SDCFG_D1 0x00006 /* Cycles to spend in D1 state */
-#define HPC3_SDCFG_D2 0x00038 /* Cycles to spend in D2 state */
-#define HPC3_SDCFG_D3 0x001c0 /* Cycles to spend in D3 state */
+#define HPC3_SDCFG_D1 0x00006 /* Cycles to spend in D1 state */
+#define HPC3_SDCFG_D2 0x00038 /* Cycles to spend in D2 state */
+#define HPC3_SDCFG_D3 0x001c0 /* Cycles to spend in D3 state */
#define HPC3_SDCFG_HWAT 0x00e00 /* DMA high water mark */
-#define HPC3_SDCFG_HW 0x01000 /* Enable 16-bit halfword DMA accesses to scsi */
+#define HPC3_SDCFG_HW 0x01000 /* Enable 16-bit halfword DMA accesses to scsi */
#define HPC3_SDCFG_SWAP 0x02000 /* Byte swap all DMA accesses */
#define HPC3_SDCFG_EPAR 0x04000 /* Enable parity checking for DMA */
#define HPC3_SDCFG_POLL 0x08000 /* hd_dreq polarity control */
#define HPC3_SDCFG_ERLY 0x30000 /* hd_dreq behavior control bits */
volatile u32 pconfig; /* PIO configuration register */
-#define HPC3_SPCFG_P3 0x0003 /* Cycles to spend in P3 state */
-#define HPC3_SPCFG_P2W 0x001c /* Cycles to spend in P2 state for writes */
-#define HPC3_SPCFG_P2R 0x01e0 /* Cycles to spend in P2 state for reads */
-#define HPC3_SPCFG_P1 0x0e00 /* Cycles to spend in P1 state */
-#define HPC3_SPCFG_HW 0x1000 /* Enable 16-bit halfword PIO accesses to scsi */
+#define HPC3_SPCFG_P3 0x0003 /* Cycles to spend in P3 state */
+#define HPC3_SPCFG_P2W 0x001c /* Cycles to spend in P2 state for writes */
+#define HPC3_SPCFG_P2R 0x01e0 /* Cycles to spend in P2 state for reads */
+#define HPC3_SPCFG_P1 0x0e00 /* Cycles to spend in P1 state */
+#define HPC3_SPCFG_HW 0x1000 /* Enable 16-bit halfword PIO accesses to scsi */
#define HPC3_SPCFG_SWAP 0x2000 /* Byte swap all PIO accesses */
#define HPC3_SPCFG_EPAR 0x4000 /* Enable parity checking for PIO */
#define HPC3_SPCFG_FUJI 0x8000 /* Fujitsu scsi controller mode for faster dma/pio */
@@ -108,13 +108,13 @@ struct hpc3_scsiregs {
/* SEEQ ethernet HPC3 registers, only one seeq per HPC3. */
struct hpc3_ethregs {
/* Receiver registers. */
- volatile u32 rx_cbptr; /* current dma buffer ptr, diagnostic use only */
- volatile u32 rx_ndptr; /* next dma descriptor ptr */
+ volatile u32 rx_cbptr; /* current dma buffer ptr, diagnostic use only */
+ volatile u32 rx_ndptr; /* next dma descriptor ptr */
u32 _unused0[0x1000/4 - 2]; /* padding */
volatile u32 rx_bcd; /* byte count info */
#define HPC3_ERXBCD_BCNTMSK 0x00003fff /* bytes to be sent to memory */
-#define HPC3_ERXBCD_XIE 0x20000000 /* HPC3 interrupts cpu at end of this buf */
-#define HPC3_ERXBCD_EOX 0x80000000 /* flags this as end of descriptor chain */
+#define HPC3_ERXBCD_XIE 0x20000000 /* HPC3 interrupts cpu at end of this buf */
+#define HPC3_ERXBCD_EOX 0x80000000 /* flags this as end of descriptor chain */
volatile u32 rx_ctrl; /* control register */
#define HPC3_ERXCTRL_STAT50 0x0000003f /* Receive status reg bits of Seeq8003 */
@@ -131,23 +131,23 @@ struct hpc3_ethregs {
volatile u32 reset; /* reset register */
#define HPC3_ERST_CRESET 0x1 /* Reset dma channel and external controller */
#define HPC3_ERST_CLRIRQ 0x2 /* Clear channel interrupt */
-#define HPC3_ERST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */
+#define HPC3_ERST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */
- volatile u32 dconfig; /* DMA configuration register */
-#define HPC3_EDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */
-#define HPC3_EDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */
-#define HPC3_EDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */
+ volatile u32 dconfig; /* DMA configuration register */
+#define HPC3_EDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */
+#define HPC3_EDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */
+#define HPC3_EDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */
#define HPC3_EDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
#define HPC3_EDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
-#define HPC3_EDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */
-#define HPC3_EDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */
-#define HPC3_EDCFG_PTO 0x30000 /* Programmed timeout value for above two */
+#define HPC3_EDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */
+#define HPC3_EDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */
+#define HPC3_EDCFG_PTO 0x30000 /* Programmed timeout value for above two */
- volatile u32 pconfig; /* PIO configuration register */
-#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
-#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
-#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
+ volatile u32 pconfig; /* PIO configuration register */
+#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
+#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
+#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
+#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
u32 _unused2[0x1000/4 - 8]; /* padding */
@@ -158,9 +158,9 @@ struct hpc3_ethregs {
volatile u32 tx_bcd; /* byte count info */
#define HPC3_ETXBCD_BCNTMSK 0x00003fff /* bytes to be read from memory */
#define HPC3_ETXBCD_ESAMP 0x10000000 /* if set, too late to add descriptor */
-#define HPC3_ETXBCD_XIE 0x20000000 /* Interrupt cpu at end of cur desc */
-#define HPC3_ETXBCD_EOP 0x40000000 /* Last byte of cur buf is end of packet */
-#define HPC3_ETXBCD_EOX 0x80000000 /* This buf is the end of desc chain */
+#define HPC3_ETXBCD_XIE 0x20000000 /* Interrupt cpu at end of cur desc */
+#define HPC3_ETXBCD_EOP 0x40000000 /* Last byte of cur buf is end of packet */
+#define HPC3_ETXBCD_EOX 0x80000000 /* This buf is the end of desc chain */
volatile u32 tx_ctrl; /* control register */
#define HPC3_ETXCTRL_STAT30 0x0000000f /* Rdonly copy of seeq tx stat reg */
@@ -215,10 +215,10 @@ struct hpc3_regs {
volatile u32 istat1; /* Irq status, only bits <9:5> reliable. */
volatile u32 bestat; /* Bus error interrupt status reg. */
-#define HPC3_BESTAT_BLMASK 0x000ff /* Bus lane where bad parity occurred */
-#define HPC3_BESTAT_CTYPE 0x00100 /* Bus cycle type, 0=PIO 1=DMA */
+#define HPC3_BESTAT_BLMASK 0x000ff /* Bus lane where bad parity occurred */
+#define HPC3_BESTAT_CTYPE 0x00100 /* Bus cycle type, 0=PIO 1=DMA */
#define HPC3_BESTAT_PIDSHIFT 9
-#define HPC3_BESTAT_PIDMASK 0x3f700 /* DMA channel parity identifier */
+#define HPC3_BESTAT_PIDMASK 0x3f700 /* DMA channel parity identifier */
u32 _unused1[0x14000/4 - 5]; /* padding */
@@ -259,7 +259,7 @@ struct hpc3_regs {
#define HPC3_DMACFG_RTIME 0x00200000
/* 5 bit burst count for DMA device */
#define HPC3_DMACFG_BURST_MASK 0x07c00000
-#define HPC3_DMACFG_BURST_SHIFT 22
+#define HPC3_DMACFG_BURST_SHIFT 22
/* Use live pbus_dreq unsynchronized signal */
#define HPC3_DMACFG_DRQLIVE 0x08000000
volatile u32 pbus_piocfg[16][64];
@@ -288,20 +288,20 @@ struct hpc3_regs {
/* PBUS PROM control regs. */
volatile u32 pbus_promwe; /* PROM write enable register */
-#define HPC3_PROM_WENAB 0x1 /* Enable writes to the PROM */
+#define HPC3_PROM_WENAB 0x1 /* Enable writes to the PROM */
u32 _unused5[0x0800/4 - 1];
volatile u32 pbus_promswap; /* Chip select swap reg */
#define HPC3_PROM_SWAP 0x1 /* invert GIO addr bit to select prom0 or prom1 */
u32 _unused6[0x0800/4 - 1];
- volatile u32 pbus_gout; /* PROM general purpose output reg */
+ volatile u32 pbus_gout; /* PROM general purpose output reg */
#define HPC3_PROM_STAT 0x1 /* General purpose status bit in gout */
u32 _unused7[0x1000/4 - 1];
volatile u32 rtcregs[14]; /* Dallas clock registers */
u32 _unused8[50];
- volatile u32 bbram[8192-50-14]; /* Battery backed ram */
+ volatile u32 bbram[8192-50-14]; /* Battery backed ram */
};
/*
diff --git a/arch/mips/include/asm/sgi/ioc.h b/arch/mips/include/asm/sgi/ioc.h
index 380347b648e2..53c6b1ca6860 100644
--- a/arch/mips/include/asm/sgi/ioc.h
+++ b/arch/mips/include/asm/sgi/ioc.h
@@ -138,7 +138,7 @@ struct sgioc_regs {
u8 _sysid[3];
volatile u8 sysid;
#define SGIOC_SYSID_FULLHOUSE 0x01
-#define SGIOC_SYSID_BOARDREV(x) (((x) & 0x1e) >> 1)
+#define SGIOC_SYSID_BOARDREV(x) (((x) & 0x1e) >> 1)
#define SGIOC_SYSID_CHIPREV(x) (((x) & 0xe0) >> 5)
u32 _unused2;
u8 _read[3];
@@ -150,7 +150,7 @@ struct sgioc_regs {
#define SGIOC_DMASEL_ISDNB 0x01 /* enable isdn B */
#define SGIOC_DMASEL_ISDNA 0x02 /* enable isdn A */
#define SGIOC_DMASEL_PPORT 0x04 /* use parallel DMA */
-#define SGIOC_DMASEL_SCLK667MHZ 0x10 /* use 6.67MHZ serial clock */
+#define SGIOC_DMASEL_SCLK667MHZ 0x10 /* use 6.67MHZ serial clock */
#define SGIOC_DMASEL_SCLKEXT 0x20 /* use external serial clock */
u32 _unused4;
u8 _reset[3];
diff --git a/arch/mips/include/asm/sgi/ip22.h b/arch/mips/include/asm/sgi/ip22.h
index c0501f91719b..8db1a3588cf2 100644
--- a/arch/mips/include/asm/sgi/ip22.h
+++ b/arch/mips/include/asm/sgi/ip22.h
@@ -38,8 +38,8 @@
#define SGI_SOFT_0_IRQ SGINT_CPU + 0
#define SGI_SOFT_1_IRQ SGINT_CPU + 1
-#define SGI_LOCAL_0_IRQ SGINT_CPU + 2
-#define SGI_LOCAL_1_IRQ SGINT_CPU + 3
+#define SGI_LOCAL_0_IRQ SGINT_CPU + 2
+#define SGI_LOCAL_1_IRQ SGINT_CPU + 3
#define SGI_8254_0_IRQ SGINT_CPU + 4
#define SGI_8254_1_IRQ SGINT_CPU + 5
#define SGI_BUSERR_IRQ SGINT_CPU + 6
@@ -51,7 +51,7 @@
#define SGI_WD93_1_IRQ SGINT_LOCAL0 + 2 /* 2nd onboard WD93 */
#define SGI_ENET_IRQ SGINT_LOCAL0 + 3 /* onboard ethernet */
#define SGI_MCDMA_IRQ SGINT_LOCAL0 + 4 /* MC DMA done */
-#define SGI_PARPORT_IRQ SGINT_LOCAL0 + 5 /* Parallel port */
+#define SGI_PARPORT_IRQ SGINT_LOCAL0 + 5 /* Parallel port */
#define SGI_GIO_1_IRQ SGINT_LOCAL0 + 6 /* GE / GIO-1 / 2nd-HPC */
#define SGI_MAP_0_IRQ SGINT_LOCAL0 + 7 /* Mappable interrupt 0 */
diff --git a/arch/mips/include/asm/sgi/mc.h b/arch/mips/include/asm/sgi/mc.h
index 1576c2394de8..3a070cec97e7 100644
--- a/arch/mips/include/asm/sgi/mc.h
+++ b/arch/mips/include/asm/sgi/mc.h
@@ -29,10 +29,10 @@ struct sgimc_regs {
#define SGIMC_CCTRL0_IENAB 0x00002000 /* Allow interrupts from MC */
#define SGIMC_CCTRL0_ESNOOP 0x00004000 /* Snooping I/O enable */
#define SGIMC_CCTRL0_EPROMWR 0x00008000 /* Prom writes from cpu enable */
-#define SGIMC_CCTRL0_WRESETPMEM 0x00010000 /* Perform warm reset, preserves mem */
+#define SGIMC_CCTRL0_WRESETPMEM 0x00010000 /* Perform warm reset, preserves mem */
#define SGIMC_CCTRL0_LENDIAN 0x00020000 /* Put MC in little-endian mode */
-#define SGIMC_CCTRL0_WRESETDMEM 0x00040000 /* Warm reset, destroys mem contents */
-#define SGIMC_CCTRL0_CMEMBADPAR 0x02000000 /* Generate bad perr from cpu to mem */
+#define SGIMC_CCTRL0_WRESETDMEM 0x00040000 /* Warm reset, destroys mem contents */
+#define SGIMC_CCTRL0_CMEMBADPAR 0x02000000 /* Generate bad perr from cpu to mem */
#define SGIMC_CCTRL0_R4KNOCHKPARR 0x04000000 /* Don't chk parity on mem data reads */
#define SGIMC_CCTRL0_GIOBTOB 0x08000000 /* Allow GIO back to back writes */
u32 _unused1;
@@ -40,13 +40,13 @@ struct sgimc_regs {
#define SGIMC_CCTRL1_EGIOTIMEO 0x00000010 /* GIO bus timeout enable */
#define SGIMC_CCTRL1_FIXEDEHPC 0x00001000 /* Fixed HPC endianness */
#define SGIMC_CCTRL1_LITTLEHPC 0x00002000 /* Little endian HPC */
-#define SGIMC_CCTRL1_FIXEDEEXP0 0x00004000 /* Fixed EXP0 endianness */
-#define SGIMC_CCTRL1_LITTLEEXP0 0x00008000 /* Little endian EXP0 */
-#define SGIMC_CCTRL1_FIXEDEEXP1 0x00010000 /* Fixed EXP1 endianness */
-#define SGIMC_CCTRL1_LITTLEEXP1 0x00020000 /* Little endian EXP1 */
+#define SGIMC_CCTRL1_FIXEDEEXP0 0x00004000 /* Fixed EXP0 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP0 0x00008000 /* Little endian EXP0 */
+#define SGIMC_CCTRL1_FIXEDEEXP1 0x00010000 /* Fixed EXP1 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP1 0x00020000 /* Little endian EXP1 */
u32 _unused2;
- volatile u32 watchdogt; /* Watchdog reg rdonly, write clears */
+ volatile u32 watchdogt; /* Watchdog reg rdonly, write clears */
u32 _unused3;
volatile u32 systemid; /* MC system ID register, readonly */
@@ -81,11 +81,11 @@ struct sgimc_regs {
#define SGIMC_GIOPAR_RTIMEGFX 0x00000040 /* GFX device has realtime attr */
#define SGIMC_GIOPAR_RTIMEEXP0 0x00000080 /* EXP(slot0) has realtime attr */
#define SGIMC_GIOPAR_RTIMEEXP1 0x00000100 /* EXP(slot1) has realtime attr */
-#define SGIMC_GIOPAR_MASTEREISA 0x00000200 /* EISA bus can act as bus master */
+#define SGIMC_GIOPAR_MASTEREISA 0x00000200 /* EISA bus can act as bus master */
#define SGIMC_GIOPAR_ONEBUS 0x00000400 /* Exists one GIO64 pipelined bus */
#define SGIMC_GIOPAR_MASTERGFX 0x00000800 /* GFX can act as a bus master */
-#define SGIMC_GIOPAR_MASTEREXP0 0x00001000 /* EXP(slot0) can bus master */
-#define SGIMC_GIOPAR_MASTEREXP1 0x00002000 /* EXP(slot1) can bus master */
+#define SGIMC_GIOPAR_MASTEREXP0 0x00001000 /* EXP(slot0) can bus master */
+#define SGIMC_GIOPAR_MASTEREXP1 0x00002000 /* EXP(slot1) can bus master */
#define SGIMC_GIOPAR_PLINEEXP0 0x00004000 /* EXP(slot0) has pipeline attr */
#define SGIMC_GIOPAR_PLINEEXP1 0x00008000 /* EXP(slot1) has pipeline attr */
@@ -107,9 +107,9 @@ struct sgimc_regs {
#define SGIMC_MCONFIG_SBANKS 0x00004000 /* Number of subbanks */
u32 _unused13;
- volatile u32 cmacc; /* Mem access config for CPU */
+ volatile u32 cmacc; /* Mem access config for CPU */
u32 _unused14;
- volatile u32 gmacc; /* Mem access config for GIO */
+ volatile u32 gmacc; /* Mem access config for GIO */
/* This define applies to both cmacc and gmacc registers above. */
#define SGIMC_MACC_ALIASBIG 0x20000000 /* 512MB home for alias */
diff --git a/arch/mips/include/asm/sgi/pi1.h b/arch/mips/include/asm/sgi/pi1.h
index c9506915dc5c..96b1a0771ec3 100644
--- a/arch/mips/include/asm/sgi/pi1.h
+++ b/arch/mips/include/asm/sgi/pi1.h
@@ -28,16 +28,16 @@ struct pi1_regs {
#define PI1_STAT_BUSY 0x80
u8 _dmactrl[3];
volatile u8 dmactrl;
-#define PI1_DMACTRL_FIFO_EMPTY 0x01 /* fifo empty R/O */
-#define PI1_DMACTRL_ABORT 0x02 /* reset DMA and internal fifo W/O */
-#define PI1_DMACTRL_STDMODE 0x00 /* bits 2-3 */
-#define PI1_DMACTRL_SGIMODE 0x04 /* bits 2-3 */
-#define PI1_DMACTRL_RICOHMODE 0x08 /* bits 2-3 */
-#define PI1_DMACTRL_HPMODE 0x0c /* bits 2-3 */
-#define PI1_DMACTRL_BLKMODE 0x10 /* block mode */
-#define PI1_DMACTRL_FIFO_CLEAR 0x20 /* clear fifo W/O */
-#define PI1_DMACTRL_READ 0x40 /* read */
-#define PI1_DMACTRL_RUN 0x80 /* pedal to the metal */
+#define PI1_DMACTRL_FIFO_EMPTY 0x01 /* fifo empty R/O */
+#define PI1_DMACTRL_ABORT 0x02 /* reset DMA and internal fifo W/O */
+#define PI1_DMACTRL_STDMODE 0x00 /* bits 2-3 */
+#define PI1_DMACTRL_SGIMODE 0x04 /* bits 2-3 */
+#define PI1_DMACTRL_RICOHMODE 0x08 /* bits 2-3 */
+#define PI1_DMACTRL_HPMODE 0x0c /* bits 2-3 */
+#define PI1_DMACTRL_BLKMODE 0x10 /* block mode */
+#define PI1_DMACTRL_FIFO_CLEAR 0x20 /* clear fifo W/O */
+#define PI1_DMACTRL_READ 0x40 /* read */
+#define PI1_DMACTRL_RUN 0x80 /* pedal to the metal */
u8 _intstat[3];
volatile u8 intstat;
#define PI1_INTSTAT_ACK 0x04
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index f58115769457..753275accd18 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -37,7 +37,7 @@ extern char prom_getchar(void);
* in chain is CURR is NULL.
*/
extern struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr);
-#define PROM_NULL_MDESC ((struct linux_mdesc *) 0)
+#define PROM_NULL_MDESC ((struct linux_mdesc *) 0)
/* Called by prom_init to setup the physical memory pmemblock
* array.
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 3dce7c788b3e..26ddfff28c8e 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -16,33 +16,33 @@
#include <asm/fw/arc/types.h>
/* Various ARCS error codes. */
-#define PROM_ESUCCESS 0x00
-#define PROM_E2BIG 0x01
-#define PROM_EACCESS 0x02
-#define PROM_EAGAIN 0x03
-#define PROM_EBADF 0x04
-#define PROM_EBUSY 0x05
-#define PROM_EFAULT 0x06
-#define PROM_EINVAL 0x07
-#define PROM_EIO 0x08
-#define PROM_EISDIR 0x09
-#define PROM_EMFILE 0x0a
-#define PROM_EMLINK 0x0b
-#define PROM_ENAMETOOLONG 0x0c
-#define PROM_ENODEV 0x0d
-#define PROM_ENOENT 0x0e
-#define PROM_ENOEXEC 0x0f
-#define PROM_ENOMEM 0x10
-#define PROM_ENOSPC 0x11
-#define PROM_ENOTDIR 0x12
-#define PROM_ENOTTY 0x13
-#define PROM_ENXIO 0x14
-#define PROM_EROFS 0x15
+#define PROM_ESUCCESS 0x00
+#define PROM_E2BIG 0x01
+#define PROM_EACCESS 0x02
+#define PROM_EAGAIN 0x03
+#define PROM_EBADF 0x04
+#define PROM_EBUSY 0x05
+#define PROM_EFAULT 0x06
+#define PROM_EINVAL 0x07
+#define PROM_EIO 0x08
+#define PROM_EISDIR 0x09
+#define PROM_EMFILE 0x0a
+#define PROM_EMLINK 0x0b
+#define PROM_ENAMETOOLONG 0x0c
+#define PROM_ENODEV 0x0d
+#define PROM_ENOENT 0x0e
+#define PROM_ENOEXEC 0x0f
+#define PROM_ENOMEM 0x10
+#define PROM_ENOSPC 0x11
+#define PROM_ENOTDIR 0x12
+#define PROM_ENOTTY 0x13
+#define PROM_ENXIO 0x14
+#define PROM_EROFS 0x15
/* SGI ARCS specific errno's. */
-#define PROM_EADDRNOTAVAIL 0x1f
-#define PROM_ETIMEDOUT 0x20
-#define PROM_ECONNABORTED 0x21
-#define PROM_ENOCONNECT 0x22
+#define PROM_EADDRNOTAVAIL 0x1f
+#define PROM_ETIMEDOUT 0x20
+#define PROM_ECONNABORTED 0x21
+#define PROM_ENOCONNECT 0x22
/* Device classes, types, and identifiers for prom
* device inventory queries.
@@ -77,14 +77,14 @@ enum linux_identifier {
/* A prom device tree component. */
struct linux_component {
- enum linux_devclass class; /* node class */
- enum linux_devtypes type; /* node type */
- enum linux_identifier iflags; /* node flags */
- USHORT vers; /* node version */
- USHORT rev; /* node revision */
- ULONG key; /* completely magic */
- ULONG amask; /* XXX affinity mask??? */
- ULONG cdsize; /* size of configuration data */
+ enum linux_devclass class; /* node class */
+ enum linux_devtypes type; /* node type */
+ enum linux_identifier iflags; /* node flags */
+ USHORT vers; /* node version */
+ USHORT rev; /* node revision */
+ ULONG key; /* completely magic */
+ ULONG amask; /* XXX affinity mask??? */
+ ULONG cdsize; /* size of configuration data */
ULONG ilen; /* length of string identifier */
_PULONG iname; /* string identifier */
};
@@ -177,13 +177,13 @@ struct linux_finfo {
struct linux_bigint end;
struct linux_bigint cur;
enum linux_devtypes dtype;
- unsigned long namelen;
- unsigned char attr;
- char name[32]; /* XXX imperical, should be define */
+ unsigned long namelen;
+ unsigned char attr;
+ char name[32]; /* XXX imperical, should be define */
};
/* This describes the vector containing function pointers to the ARC
- firmware functions. */
+ firmware functions. */
struct linux_romvec {
LONG load; /* Load an executable image. */
LONG invoke; /* Invoke a standalong image. */
@@ -244,7 +244,7 @@ struct linux_romvec {
*/
typedef struct _SYSTEM_PARAMETER_BLOCK {
ULONG magic; /* magic cookie */
-#define PROMBLOCK_MAGIC 0x53435241
+#define PROMBLOCK_MAGIC 0x53435241
ULONG len; /* length of parm block */
USHORT ver; /* ARCS firmware version */
@@ -294,16 +294,16 @@ struct linux_cdata {
};
/* Common SGI ARCS firmware file descriptors. */
-#define SGIPROM_STDIN 0
-#define SGIPROM_STDOUT 1
+#define SGIPROM_STDIN 0
+#define SGIPROM_STDOUT 1
/* Common SGI ARCS firmware file types. */
-#define SGIPROM_ROFILE 0x01 /* read-only file */
-#define SGIPROM_HFILE 0x02 /* hidden file */
-#define SGIPROM_SFILE 0x04 /* System file */
-#define SGIPROM_AFILE 0x08 /* Archive file */
-#define SGIPROM_DFILE 0x10 /* Directory file */
-#define SGIPROM_DELFILE 0x20 /* Deleted file */
+#define SGIPROM_ROFILE 0x01 /* read-only file */
+#define SGIPROM_HFILE 0x02 /* hidden file */
+#define SGIPROM_SFILE 0x04 /* System file */
+#define SGIPROM_AFILE 0x08 /* Archive file */
+#define SGIPROM_DFILE 0x10 /* Directory file */
+#define SGIPROM_DELFILE 0x20 /* Deleted file */
/* SGI ARCS boot record information. */
struct sgi_partition {
@@ -318,7 +318,7 @@ struct sgi_partition {
unsigned char tsect0, tsect1, tsect2, tsect3;
};
-#define SGIBBLOCK_MAGIC 0xaa55
+#define SGIBBLOCK_MAGIC 0xaa55
#define SGIBBLOCK_MAXPART 0x0004
struct sgi_bootblock {
@@ -332,34 +332,34 @@ struct sgi_bparm_block {
unsigned short bytes_sect; /* bytes per sector */
unsigned char sect_clust; /* sectors per cluster */
unsigned short sect_resv; /* reserved sectors */
- unsigned char nfats; /* # of allocation tables */
+ unsigned char nfats; /* # of allocation tables */
unsigned short nroot_dirents; /* # of root directory entries */
unsigned short sect_volume; /* sectors in volume */
unsigned char media_type; /* media descriptor */
unsigned short sect_fat; /* sectors per allocation table */
unsigned short sect_track; /* sectors per track */
- unsigned short nheads; /* # of heads */
- unsigned short nhsects; /* # of hidden sectors */
+ unsigned short nheads; /* # of heads */
+ unsigned short nhsects; /* # of hidden sectors */
};
struct sgi_bsector {
- unsigned char jmpinfo[3];
- unsigned char manuf_name[8];
+ unsigned char jmpinfo[3];
+ unsigned char manuf_name[8];
struct sgi_bparm_block info;
};
/* Debugging block used with SGI symmon symbolic debugger. */
-#define SMB_DEBUG_MAGIC 0xfeeddead
+#define SMB_DEBUG_MAGIC 0xfeeddead
struct linux_smonblock {
- unsigned long magic;
- void (*handler)(void); /* Breakpoint routine. */
- unsigned long dtable_base; /* Base addr of dbg table. */
- int (*printf)(const char *fmt, ...);
- unsigned long btable_base; /* Breakpoint table. */
- unsigned long mpflushreqs; /* SMP cache flush request list. */
- unsigned long ntab; /* Name table. */
- unsigned long stab; /* Symbol table. */
- int smax; /* Max # of symbols. */
+ unsigned long magic;
+ void (*handler)(void); /* Breakpoint routine. */
+ unsigned long dtable_base; /* Base addr of dbg table. */
+ int (*printf)(const char *fmt, ...);
+ unsigned long btable_base; /* Breakpoint table. */
+ unsigned long mpflushreqs; /* SMP cache flush request list. */
+ unsigned long ntab; /* Name table. */
+ unsigned long stab; /* Symbol table. */
+ int smax; /* Max # of symbols. */
};
/*
@@ -369,7 +369,7 @@ struct linux_smonblock {
#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
#define __arc_clobbers \
- "$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
+ "$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
"$12", "$13", "$14", "$15", "$16", "$24", "$25", "$31"
#define ARC_CALL0(dest) \
@@ -447,7 +447,7 @@ struct linux_smonblock {
"daddu\t$29, 32\n\t" \
"move\t%0, $2" \
: "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3), \
+ : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3), \
"r" (__a4) \
: __arc_clobbers); \
__res; \
@@ -468,8 +468,8 @@ struct linux_smonblock {
"daddu\t$29, 32\n\t" \
"move\t%0, $2" \
: "=r" (__res), "=r" (__vec) \
- : "1" (__vec), \
- "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4), \
+ : "1" (__vec), \
+ "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4), \
"r" (__a5) \
: __arc_clobbers); \
__res; \
@@ -512,7 +512,7 @@ struct linux_smonblock {
long __a1 = (long) (a1); \
long __a2 = (long) (a2); \
long __a3 = (long) (a3); \
- long (*__vec)(long, long, long) = (void *) romvec->dest; \
+ long (*__vec)(long, long, long) = (void *) romvec->dest; \
\
__res = __vec(__a1, __a2, __a3); \
__res; \
diff --git a/arch/mips/include/asm/shmparam.h b/arch/mips/include/asm/shmparam.h
index 09290720751c..324d04042bdf 100644
--- a/arch/mips/include/asm/shmparam.h
+++ b/arch/mips/include/asm/shmparam.h
@@ -8,6 +8,6 @@
#define __ARCH_FORCE_SHMLBA 1
-#define SHMLBA 0x40000 /* attach addr a multiple of this */
+#define SHMLBA 0x40000 /* attach addr a multiple of this */
#endif /* _ASM_SHMPARAM_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_int.h b/arch/mips/include/asm/sibyte/bcm1480_int.h
index fffb224d2297..6b82ed3c2359 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_int.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_int.h
@@ -60,253 +60,253 @@
* Interrupt sources (Table 22)
*/
-#define K_BCM1480_INT_SOURCES 128
+#define K_BCM1480_INT_SOURCES 128
#define _BCM1480_INT_HIGH(k) (k)
#define _BCM1480_INT_LOW(k) ((k)+64)
-#define K_BCM1480_INT_ADDR_TRAP _BCM1480_INT_HIGH(1)
-#define K_BCM1480_INT_GPIO_0 _BCM1480_INT_HIGH(4)
-#define K_BCM1480_INT_GPIO_1 _BCM1480_INT_HIGH(5)
-#define K_BCM1480_INT_GPIO_2 _BCM1480_INT_HIGH(6)
-#define K_BCM1480_INT_GPIO_3 _BCM1480_INT_HIGH(7)
-#define K_BCM1480_INT_PCI_INTA _BCM1480_INT_HIGH(8)
-#define K_BCM1480_INT_PCI_INTB _BCM1480_INT_HIGH(9)
-#define K_BCM1480_INT_PCI_INTC _BCM1480_INT_HIGH(10)
-#define K_BCM1480_INT_PCI_INTD _BCM1480_INT_HIGH(11)
-#define K_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_HIGH(12)
-#define K_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_HIGH(13)
-#define K_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_HIGH(14)
-#define K_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_HIGH(15)
-#define K_BCM1480_INT_TIMER_0 _BCM1480_INT_HIGH(20)
-#define K_BCM1480_INT_TIMER_1 _BCM1480_INT_HIGH(21)
-#define K_BCM1480_INT_TIMER_2 _BCM1480_INT_HIGH(22)
-#define K_BCM1480_INT_TIMER_3 _BCM1480_INT_HIGH(23)
-#define K_BCM1480_INT_DM_CH_0 _BCM1480_INT_HIGH(28)
-#define K_BCM1480_INT_DM_CH_1 _BCM1480_INT_HIGH(29)
-#define K_BCM1480_INT_DM_CH_2 _BCM1480_INT_HIGH(30)
-#define K_BCM1480_INT_DM_CH_3 _BCM1480_INT_HIGH(31)
-#define K_BCM1480_INT_MAC_0 _BCM1480_INT_HIGH(36)
-#define K_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_HIGH(37)
-#define K_BCM1480_INT_MAC_1 _BCM1480_INT_HIGH(38)
-#define K_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_HIGH(39)
-#define K_BCM1480_INT_MAC_2 _BCM1480_INT_HIGH(40)
-#define K_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_HIGH(41)
-#define K_BCM1480_INT_MAC_3 _BCM1480_INT_HIGH(42)
-#define K_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_HIGH(43)
-#define K_BCM1480_INT_PMI_LOW _BCM1480_INT_HIGH(52)
-#define K_BCM1480_INT_PMI_HIGH _BCM1480_INT_HIGH(53)
-#define K_BCM1480_INT_PMO_LOW _BCM1480_INT_HIGH(54)
-#define K_BCM1480_INT_PMO_HIGH _BCM1480_INT_HIGH(55)
-#define K_BCM1480_INT_MBOX_0_0 _BCM1480_INT_HIGH(56)
-#define K_BCM1480_INT_MBOX_0_1 _BCM1480_INT_HIGH(57)
-#define K_BCM1480_INT_MBOX_0_2 _BCM1480_INT_HIGH(58)
-#define K_BCM1480_INT_MBOX_0_3 _BCM1480_INT_HIGH(59)
-#define K_BCM1480_INT_MBOX_1_0 _BCM1480_INT_HIGH(60)
-#define K_BCM1480_INT_MBOX_1_1 _BCM1480_INT_HIGH(61)
-#define K_BCM1480_INT_MBOX_1_2 _BCM1480_INT_HIGH(62)
-#define K_BCM1480_INT_MBOX_1_3 _BCM1480_INT_HIGH(63)
+#define K_BCM1480_INT_ADDR_TRAP _BCM1480_INT_HIGH(1)
+#define K_BCM1480_INT_GPIO_0 _BCM1480_INT_HIGH(4)
+#define K_BCM1480_INT_GPIO_1 _BCM1480_INT_HIGH(5)
+#define K_BCM1480_INT_GPIO_2 _BCM1480_INT_HIGH(6)
+#define K_BCM1480_INT_GPIO_3 _BCM1480_INT_HIGH(7)
+#define K_BCM1480_INT_PCI_INTA _BCM1480_INT_HIGH(8)
+#define K_BCM1480_INT_PCI_INTB _BCM1480_INT_HIGH(9)
+#define K_BCM1480_INT_PCI_INTC _BCM1480_INT_HIGH(10)
+#define K_BCM1480_INT_PCI_INTD _BCM1480_INT_HIGH(11)
+#define K_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_HIGH(12)
+#define K_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_HIGH(13)
+#define K_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_HIGH(14)
+#define K_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_HIGH(15)
+#define K_BCM1480_INT_TIMER_0 _BCM1480_INT_HIGH(20)
+#define K_BCM1480_INT_TIMER_1 _BCM1480_INT_HIGH(21)
+#define K_BCM1480_INT_TIMER_2 _BCM1480_INT_HIGH(22)
+#define K_BCM1480_INT_TIMER_3 _BCM1480_INT_HIGH(23)
+#define K_BCM1480_INT_DM_CH_0 _BCM1480_INT_HIGH(28)
+#define K_BCM1480_INT_DM_CH_1 _BCM1480_INT_HIGH(29)
+#define K_BCM1480_INT_DM_CH_2 _BCM1480_INT_HIGH(30)
+#define K_BCM1480_INT_DM_CH_3 _BCM1480_INT_HIGH(31)
+#define K_BCM1480_INT_MAC_0 _BCM1480_INT_HIGH(36)
+#define K_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_HIGH(37)
+#define K_BCM1480_INT_MAC_1 _BCM1480_INT_HIGH(38)
+#define K_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_HIGH(39)
+#define K_BCM1480_INT_MAC_2 _BCM1480_INT_HIGH(40)
+#define K_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_HIGH(41)
+#define K_BCM1480_INT_MAC_3 _BCM1480_INT_HIGH(42)
+#define K_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_HIGH(43)
+#define K_BCM1480_INT_PMI_LOW _BCM1480_INT_HIGH(52)
+#define K_BCM1480_INT_PMI_HIGH _BCM1480_INT_HIGH(53)
+#define K_BCM1480_INT_PMO_LOW _BCM1480_INT_HIGH(54)
+#define K_BCM1480_INT_PMO_HIGH _BCM1480_INT_HIGH(55)
+#define K_BCM1480_INT_MBOX_0_0 _BCM1480_INT_HIGH(56)
+#define K_BCM1480_INT_MBOX_0_1 _BCM1480_INT_HIGH(57)
+#define K_BCM1480_INT_MBOX_0_2 _BCM1480_INT_HIGH(58)
+#define K_BCM1480_INT_MBOX_0_3 _BCM1480_INT_HIGH(59)
+#define K_BCM1480_INT_MBOX_1_0 _BCM1480_INT_HIGH(60)
+#define K_BCM1480_INT_MBOX_1_1 _BCM1480_INT_HIGH(61)
+#define K_BCM1480_INT_MBOX_1_2 _BCM1480_INT_HIGH(62)
+#define K_BCM1480_INT_MBOX_1_3 _BCM1480_INT_HIGH(63)
-#define K_BCM1480_INT_BAD_ECC _BCM1480_INT_LOW(1)
-#define K_BCM1480_INT_COR_ECC _BCM1480_INT_LOW(2)
-#define K_BCM1480_INT_IO_BUS _BCM1480_INT_LOW(3)
-#define K_BCM1480_INT_PERF_CNT _BCM1480_INT_LOW(4)
-#define K_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_LOW(5)
-#define K_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_LOW(6)
-#define K_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_LOW(7)
-#define K_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_LOW(8)
-#define K_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_LOW(9)
-#define K_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_LOW(10)
-#define K_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_LOW(11)
-#define K_BCM1480_INT_PCI_ERROR _BCM1480_INT_LOW(16)
-#define K_BCM1480_INT_PCI_RESET _BCM1480_INT_LOW(17)
-#define K_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_LOW(18)
-#define K_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_LOW(19)
-#define K_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_LOW(20)
-#define K_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_LOW(21)
-#define K_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_LOW(22)
-#define K_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_LOW(23)
-#define K_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_LOW(24)
-#define K_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_LOW(25)
-#define K_BCM1480_INT_LDT_SMI _BCM1480_INT_LOW(32)
-#define K_BCM1480_INT_LDT_NMI _BCM1480_INT_LOW(33)
-#define K_BCM1480_INT_LDT_INIT _BCM1480_INT_LOW(34)
-#define K_BCM1480_INT_LDT_STARTUP _BCM1480_INT_LOW(35)
-#define K_BCM1480_INT_LDT_EXT _BCM1480_INT_LOW(36)
-#define K_BCM1480_INT_SMB_0 _BCM1480_INT_LOW(40)
-#define K_BCM1480_INT_SMB_1 _BCM1480_INT_LOW(41)
-#define K_BCM1480_INT_PCMCIA _BCM1480_INT_LOW(42)
-#define K_BCM1480_INT_UART_0 _BCM1480_INT_LOW(44)
-#define K_BCM1480_INT_UART_1 _BCM1480_INT_LOW(45)
-#define K_BCM1480_INT_UART_2 _BCM1480_INT_LOW(46)
-#define K_BCM1480_INT_UART_3 _BCM1480_INT_LOW(47)
-#define K_BCM1480_INT_GPIO_4 _BCM1480_INT_LOW(52)
-#define K_BCM1480_INT_GPIO_5 _BCM1480_INT_LOW(53)
-#define K_BCM1480_INT_GPIO_6 _BCM1480_INT_LOW(54)
-#define K_BCM1480_INT_GPIO_7 _BCM1480_INT_LOW(55)
-#define K_BCM1480_INT_GPIO_8 _BCM1480_INT_LOW(56)
-#define K_BCM1480_INT_GPIO_9 _BCM1480_INT_LOW(57)
-#define K_BCM1480_INT_GPIO_10 _BCM1480_INT_LOW(58)
-#define K_BCM1480_INT_GPIO_11 _BCM1480_INT_LOW(59)
-#define K_BCM1480_INT_GPIO_12 _BCM1480_INT_LOW(60)
-#define K_BCM1480_INT_GPIO_13 _BCM1480_INT_LOW(61)
-#define K_BCM1480_INT_GPIO_14 _BCM1480_INT_LOW(62)
-#define K_BCM1480_INT_GPIO_15 _BCM1480_INT_LOW(63)
+#define K_BCM1480_INT_BAD_ECC _BCM1480_INT_LOW(1)
+#define K_BCM1480_INT_COR_ECC _BCM1480_INT_LOW(2)
+#define K_BCM1480_INT_IO_BUS _BCM1480_INT_LOW(3)
+#define K_BCM1480_INT_PERF_CNT _BCM1480_INT_LOW(4)
+#define K_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_LOW(5)
+#define K_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_LOW(6)
+#define K_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_LOW(7)
+#define K_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_LOW(8)
+#define K_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_LOW(9)
+#define K_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_LOW(10)
+#define K_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_LOW(11)
+#define K_BCM1480_INT_PCI_ERROR _BCM1480_INT_LOW(16)
+#define K_BCM1480_INT_PCI_RESET _BCM1480_INT_LOW(17)
+#define K_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_LOW(18)
+#define K_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_LOW(19)
+#define K_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_LOW(20)
+#define K_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_LOW(21)
+#define K_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_LOW(22)
+#define K_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_LOW(23)
+#define K_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_LOW(24)
+#define K_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_LOW(25)
+#define K_BCM1480_INT_LDT_SMI _BCM1480_INT_LOW(32)
+#define K_BCM1480_INT_LDT_NMI _BCM1480_INT_LOW(33)
+#define K_BCM1480_INT_LDT_INIT _BCM1480_INT_LOW(34)
+#define K_BCM1480_INT_LDT_STARTUP _BCM1480_INT_LOW(35)
+#define K_BCM1480_INT_LDT_EXT _BCM1480_INT_LOW(36)
+#define K_BCM1480_INT_SMB_0 _BCM1480_INT_LOW(40)
+#define K_BCM1480_INT_SMB_1 _BCM1480_INT_LOW(41)
+#define K_BCM1480_INT_PCMCIA _BCM1480_INT_LOW(42)
+#define K_BCM1480_INT_UART_0 _BCM1480_INT_LOW(44)
+#define K_BCM1480_INT_UART_1 _BCM1480_INT_LOW(45)
+#define K_BCM1480_INT_UART_2 _BCM1480_INT_LOW(46)
+#define K_BCM1480_INT_UART_3 _BCM1480_INT_LOW(47)
+#define K_BCM1480_INT_GPIO_4 _BCM1480_INT_LOW(52)
+#define K_BCM1480_INT_GPIO_5 _BCM1480_INT_LOW(53)
+#define K_BCM1480_INT_GPIO_6 _BCM1480_INT_LOW(54)
+#define K_BCM1480_INT_GPIO_7 _BCM1480_INT_LOW(55)
+#define K_BCM1480_INT_GPIO_8 _BCM1480_INT_LOW(56)
+#define K_BCM1480_INT_GPIO_9 _BCM1480_INT_LOW(57)
+#define K_BCM1480_INT_GPIO_10 _BCM1480_INT_LOW(58)
+#define K_BCM1480_INT_GPIO_11 _BCM1480_INT_LOW(59)
+#define K_BCM1480_INT_GPIO_12 _BCM1480_INT_LOW(60)
+#define K_BCM1480_INT_GPIO_13 _BCM1480_INT_LOW(61)
+#define K_BCM1480_INT_GPIO_14 _BCM1480_INT_LOW(62)
+#define K_BCM1480_INT_GPIO_15 _BCM1480_INT_LOW(63)
/*
* Mask values for each interrupt
*/
-#define _BCM1480_INT_MASK(w, n) _SB_MAKEMASK(w, ((n) & 0x3F))
-#define _BCM1480_INT_MASK1(n) _SB_MAKEMASK1(((n) & 0x3F))
-#define _BCM1480_INT_OFFSET(n) (((n) & 0x40) << 6)
+#define _BCM1480_INT_MASK(w, n) _SB_MAKEMASK(w, ((n) & 0x3F))
+#define _BCM1480_INT_MASK1(n) _SB_MAKEMASK1(((n) & 0x3F))
+#define _BCM1480_INT_OFFSET(n) (((n) & 0x40) << 6)
-#define M_BCM1480_INT_CASCADE _BCM1480_INT_MASK1(_BCM1480_INT_HIGH(0))
+#define M_BCM1480_INT_CASCADE _BCM1480_INT_MASK1(_BCM1480_INT_HIGH(0))
-#define M_BCM1480_INT_ADDR_TRAP _BCM1480_INT_MASK1(K_BCM1480_INT_ADDR_TRAP)
-#define M_BCM1480_INT_GPIO_0 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_0)
-#define M_BCM1480_INT_GPIO_1 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_1)
-#define M_BCM1480_INT_GPIO_2 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_2)
-#define M_BCM1480_INT_GPIO_3 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_3)
-#define M_BCM1480_INT_PCI_INTA _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTA)
-#define M_BCM1480_INT_PCI_INTB _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTB)
-#define M_BCM1480_INT_PCI_INTC _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTC)
-#define M_BCM1480_INT_PCI_INTD _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTD)
-#define M_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP0)
-#define M_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP1)
-#define M_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP2)
-#define M_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP3)
-#define M_BCM1480_INT_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_0)
-#define M_BCM1480_INT_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_1)
-#define M_BCM1480_INT_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_2)
-#define M_BCM1480_INT_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_3)
-#define M_BCM1480_INT_DM_CH_0 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_0)
-#define M_BCM1480_INT_DM_CH_1 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_1)
-#define M_BCM1480_INT_DM_CH_2 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_2)
-#define M_BCM1480_INT_DM_CH_3 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_3)
-#define M_BCM1480_INT_MAC_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0)
-#define M_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0_CH1)
-#define M_BCM1480_INT_MAC_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1)
-#define M_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1_CH1)
-#define M_BCM1480_INT_MAC_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2)
-#define M_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2_CH1)
-#define M_BCM1480_INT_MAC_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3)
-#define M_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3_CH1)
-#define M_BCM1480_INT_PMI_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_LOW)
-#define M_BCM1480_INT_PMI_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
-#define M_BCM1480_INT_PMO_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
-#define M_BCM1480_INT_PMO_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
-#define M_BCM1480_INT_MBOX_ALL _BCM1480_INT_MASK(8, K_BCM1480_INT_MBOX_0_0)
-#define M_BCM1480_INT_MBOX_0_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
-#define M_BCM1480_INT_MBOX_0_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
-#define M_BCM1480_INT_MBOX_0_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
-#define M_BCM1480_INT_MBOX_0_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_3)
-#define M_BCM1480_INT_MBOX_1_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_0)
-#define M_BCM1480_INT_MBOX_1_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_1)
-#define M_BCM1480_INT_MBOX_1_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_2)
-#define M_BCM1480_INT_MBOX_1_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_3)
-#define M_BCM1480_INT_BAD_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_BAD_ECC)
-#define M_BCM1480_INT_COR_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_COR_ECC)
-#define M_BCM1480_INT_IO_BUS _BCM1480_INT_MASK1(K_BCM1480_INT_IO_BUS)
-#define M_BCM1480_INT_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_PERF_CNT)
-#define M_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_SW_PERF_CNT)
-#define M_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_TRACE_FREEZE)
-#define M_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_SW_TRACE_FREEZE)
-#define M_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_0)
-#define M_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_1)
-#define M_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_2)
-#define M_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_3)
-#define M_BCM1480_INT_PCI_ERROR _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_ERROR)
-#define M_BCM1480_INT_PCI_RESET _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_RESET)
-#define M_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_MASK1(K_BCM1480_INT_NODE_CONTROLLER)
-#define M_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_MASK1(K_BCM1480_INT_HOST_BRIDGE)
-#define M_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_FATAL)
-#define M_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_NONFATAL)
-#define M_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_FATAL)
-#define M_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_NONFATAL)
-#define M_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_FATAL)
-#define M_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_NONFATAL)
-#define M_BCM1480_INT_LDT_SMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_SMI)
-#define M_BCM1480_INT_LDT_NMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_NMI)
-#define M_BCM1480_INT_LDT_INIT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_INIT)
-#define M_BCM1480_INT_LDT_STARTUP _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_STARTUP)
-#define M_BCM1480_INT_LDT_EXT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_EXT)
-#define M_BCM1480_INT_SMB_0 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_0)
-#define M_BCM1480_INT_SMB_1 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_1)
-#define M_BCM1480_INT_PCMCIA _BCM1480_INT_MASK1(K_BCM1480_INT_PCMCIA)
-#define M_BCM1480_INT_UART_0 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_0)
-#define M_BCM1480_INT_UART_1 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_1)
-#define M_BCM1480_INT_UART_2 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_2)
-#define M_BCM1480_INT_UART_3 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_3)
-#define M_BCM1480_INT_GPIO_4 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_4)
-#define M_BCM1480_INT_GPIO_5 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_5)
-#define M_BCM1480_INT_GPIO_6 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_6)
-#define M_BCM1480_INT_GPIO_7 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_7)
-#define M_BCM1480_INT_GPIO_8 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_8)
-#define M_BCM1480_INT_GPIO_9 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_9)
-#define M_BCM1480_INT_GPIO_10 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_10)
-#define M_BCM1480_INT_GPIO_11 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_11)
-#define M_BCM1480_INT_GPIO_12 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_12)
-#define M_BCM1480_INT_GPIO_13 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_13)
-#define M_BCM1480_INT_GPIO_14 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_14)
-#define M_BCM1480_INT_GPIO_15 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_15)
+#define M_BCM1480_INT_ADDR_TRAP _BCM1480_INT_MASK1(K_BCM1480_INT_ADDR_TRAP)
+#define M_BCM1480_INT_GPIO_0 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_0)
+#define M_BCM1480_INT_GPIO_1 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_1)
+#define M_BCM1480_INT_GPIO_2 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_2)
+#define M_BCM1480_INT_GPIO_3 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_3)
+#define M_BCM1480_INT_PCI_INTA _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTA)
+#define M_BCM1480_INT_PCI_INTB _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTB)
+#define M_BCM1480_INT_PCI_INTC _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTC)
+#define M_BCM1480_INT_PCI_INTD _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTD)
+#define M_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP0)
+#define M_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP1)
+#define M_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP2)
+#define M_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP3)
+#define M_BCM1480_INT_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_0)
+#define M_BCM1480_INT_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_1)
+#define M_BCM1480_INT_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_2)
+#define M_BCM1480_INT_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_3)
+#define M_BCM1480_INT_DM_CH_0 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_0)
+#define M_BCM1480_INT_DM_CH_1 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_1)
+#define M_BCM1480_INT_DM_CH_2 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_2)
+#define M_BCM1480_INT_DM_CH_3 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_3)
+#define M_BCM1480_INT_MAC_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0)
+#define M_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0_CH1)
+#define M_BCM1480_INT_MAC_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1)
+#define M_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1_CH1)
+#define M_BCM1480_INT_MAC_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2)
+#define M_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2_CH1)
+#define M_BCM1480_INT_MAC_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3)
+#define M_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3_CH1)
+#define M_BCM1480_INT_PMI_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_LOW)
+#define M_BCM1480_INT_PMI_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
+#define M_BCM1480_INT_PMO_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
+#define M_BCM1480_INT_PMO_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
+#define M_BCM1480_INT_MBOX_ALL _BCM1480_INT_MASK(8, K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
+#define M_BCM1480_INT_MBOX_0_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
+#define M_BCM1480_INT_MBOX_0_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_3)
+#define M_BCM1480_INT_MBOX_1_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_0)
+#define M_BCM1480_INT_MBOX_1_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_1)
+#define M_BCM1480_INT_MBOX_1_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_2)
+#define M_BCM1480_INT_MBOX_1_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_3)
+#define M_BCM1480_INT_BAD_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_BAD_ECC)
+#define M_BCM1480_INT_COR_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_COR_ECC)
+#define M_BCM1480_INT_IO_BUS _BCM1480_INT_MASK1(K_BCM1480_INT_IO_BUS)
+#define M_BCM1480_INT_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_PERF_CNT)
+#define M_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_SW_PERF_CNT)
+#define M_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_TRACE_FREEZE)
+#define M_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_SW_TRACE_FREEZE)
+#define M_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_0)
+#define M_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_1)
+#define M_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_2)
+#define M_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_3)
+#define M_BCM1480_INT_PCI_ERROR _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_ERROR)
+#define M_BCM1480_INT_PCI_RESET _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_RESET)
+#define M_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_MASK1(K_BCM1480_INT_NODE_CONTROLLER)
+#define M_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_MASK1(K_BCM1480_INT_HOST_BRIDGE)
+#define M_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_FATAL)
+#define M_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_NONFATAL)
+#define M_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_FATAL)
+#define M_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_NONFATAL)
+#define M_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_FATAL)
+#define M_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_NONFATAL)
+#define M_BCM1480_INT_LDT_SMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_SMI)
+#define M_BCM1480_INT_LDT_NMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_NMI)
+#define M_BCM1480_INT_LDT_INIT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_INIT)
+#define M_BCM1480_INT_LDT_STARTUP _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_STARTUP)
+#define M_BCM1480_INT_LDT_EXT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_EXT)
+#define M_BCM1480_INT_SMB_0 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_0)
+#define M_BCM1480_INT_SMB_1 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_1)
+#define M_BCM1480_INT_PCMCIA _BCM1480_INT_MASK1(K_BCM1480_INT_PCMCIA)
+#define M_BCM1480_INT_UART_0 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_0)
+#define M_BCM1480_INT_UART_1 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_1)
+#define M_BCM1480_INT_UART_2 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_2)
+#define M_BCM1480_INT_UART_3 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_3)
+#define M_BCM1480_INT_GPIO_4 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_4)
+#define M_BCM1480_INT_GPIO_5 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_5)
+#define M_BCM1480_INT_GPIO_6 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_6)
+#define M_BCM1480_INT_GPIO_7 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_7)
+#define M_BCM1480_INT_GPIO_8 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_8)
+#define M_BCM1480_INT_GPIO_9 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_9)
+#define M_BCM1480_INT_GPIO_10 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_10)
+#define M_BCM1480_INT_GPIO_11 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_11)
+#define M_BCM1480_INT_GPIO_12 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_12)
+#define M_BCM1480_INT_GPIO_13 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_13)
+#define M_BCM1480_INT_GPIO_14 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_14)
+#define M_BCM1480_INT_GPIO_15 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_15)
/*
* Interrupt mappings (Table 18)
*/
-#define K_BCM1480_INT_MAP_I0 0 /* interrupt pins on processor */
-#define K_BCM1480_INT_MAP_I1 1
-#define K_BCM1480_INT_MAP_I2 2
-#define K_BCM1480_INT_MAP_I3 3
-#define K_BCM1480_INT_MAP_I4 4
-#define K_BCM1480_INT_MAP_I5 5
-#define K_BCM1480_INT_MAP_NMI 6 /* nonmaskable */
-#define K_BCM1480_INT_MAP_DINT 7 /* debug interrupt */
+#define K_BCM1480_INT_MAP_I0 0 /* interrupt pins on processor */
+#define K_BCM1480_INT_MAP_I1 1
+#define K_BCM1480_INT_MAP_I2 2
+#define K_BCM1480_INT_MAP_I3 3
+#define K_BCM1480_INT_MAP_I4 4
+#define K_BCM1480_INT_MAP_I5 5
+#define K_BCM1480_INT_MAP_NMI 6 /* nonmaskable */
+#define K_BCM1480_INT_MAP_DINT 7 /* debug interrupt */
/*
* Interrupt LDT Set Register (Table 19)
*/
-#define S_BCM1480_INT_HT_INTMSG 0
-#define M_BCM1480_INT_HT_INTMSG _SB_MAKEMASK(3, S_BCM1480_INT_HT_INTMSG)
-#define V_BCM1480_INT_HT_INTMSG(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTMSG)
-#define G_BCM1480_INT_HT_INTMSG(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTMSG, M_BCM1480_INT_HT_INTMSG)
+#define S_BCM1480_INT_HT_INTMSG 0
+#define M_BCM1480_INT_HT_INTMSG _SB_MAKEMASK(3, S_BCM1480_INT_HT_INTMSG)
+#define V_BCM1480_INT_HT_INTMSG(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTMSG)
+#define G_BCM1480_INT_HT_INTMSG(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTMSG, M_BCM1480_INT_HT_INTMSG)
-#define K_BCM1480_INT_HT_INTMSG_FIXED 0
+#define K_BCM1480_INT_HT_INTMSG_FIXED 0
#define K_BCM1480_INT_HT_INTMSG_ARBITRATED 1
-#define K_BCM1480_INT_HT_INTMSG_SMI 2
-#define K_BCM1480_INT_HT_INTMSG_NMI 3
-#define K_BCM1480_INT_HT_INTMSG_INIT 4
-#define K_BCM1480_INT_HT_INTMSG_STARTUP 5
-#define K_BCM1480_INT_HT_INTMSG_EXTINT 6
+#define K_BCM1480_INT_HT_INTMSG_SMI 2
+#define K_BCM1480_INT_HT_INTMSG_NMI 3
+#define K_BCM1480_INT_HT_INTMSG_INIT 4
+#define K_BCM1480_INT_HT_INTMSG_STARTUP 5
+#define K_BCM1480_INT_HT_INTMSG_EXTINT 6
#define K_BCM1480_INT_HT_INTMSG_RESERVED 7
-#define M_BCM1480_INT_HT_TRIGGERMODE _SB_MAKEMASK1(3)
-#define V_BCM1480_INT_HT_EDGETRIGGER 0
-#define V_BCM1480_INT_HT_LEVELTRIGGER M_BCM1480_INT_HT_TRIGGERMODE
+#define M_BCM1480_INT_HT_TRIGGERMODE _SB_MAKEMASK1(3)
+#define V_BCM1480_INT_HT_EDGETRIGGER 0
+#define V_BCM1480_INT_HT_LEVELTRIGGER M_BCM1480_INT_HT_TRIGGERMODE
-#define M_BCM1480_INT_HT_DESTMODE _SB_MAKEMASK1(4)
-#define V_BCM1480_INT_HT_PHYSICALDEST 0
-#define V_BCM1480_INT_HT_LOGICALDEST M_BCM1480_INT_HT_DESTMODE
+#define M_BCM1480_INT_HT_DESTMODE _SB_MAKEMASK1(4)
+#define V_BCM1480_INT_HT_PHYSICALDEST 0
+#define V_BCM1480_INT_HT_LOGICALDEST M_BCM1480_INT_HT_DESTMODE
-#define S_BCM1480_INT_HT_INTDEST 5
-#define M_BCM1480_INT_HT_INTDEST _SB_MAKEMASK(8, S_BCM1480_INT_HT_INTDEST)
-#define V_BCM1480_INT_HT_INTDEST(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTDEST)
-#define G_BCM1480_INT_HT_INTDEST(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTDEST, M_BCM1480_INT_HT_INTDEST)
+#define S_BCM1480_INT_HT_INTDEST 5
+#define M_BCM1480_INT_HT_INTDEST _SB_MAKEMASK(8, S_BCM1480_INT_HT_INTDEST)
+#define V_BCM1480_INT_HT_INTDEST(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTDEST)
+#define G_BCM1480_INT_HT_INTDEST(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTDEST, M_BCM1480_INT_HT_INTDEST)
-#define S_BCM1480_INT_HT_VECTOR 13
-#define M_BCM1480_INT_HT_VECTOR _SB_MAKEMASK(8, S_BCM1480_INT_HT_VECTOR)
-#define V_BCM1480_INT_HT_VECTOR(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_VECTOR)
-#define G_BCM1480_INT_HT_VECTOR(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_VECTOR, M_BCM1480_INT_HT_VECTOR)
+#define S_BCM1480_INT_HT_VECTOR 13
+#define M_BCM1480_INT_HT_VECTOR _SB_MAKEMASK(8, S_BCM1480_INT_HT_VECTOR)
+#define V_BCM1480_INT_HT_VECTOR(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_VECTOR)
+#define G_BCM1480_INT_HT_VECTOR(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_VECTOR, M_BCM1480_INT_HT_VECTOR)
/*
* Vector prefix (Table 4-7)
*/
#define M_BCM1480_HTVECT_RAISE_INTLDT_HIGH 0x00
-#define M_BCM1480_HTVECT_RAISE_MBOX_0 0x40
+#define M_BCM1480_HTVECT_RAISE_MBOX_0 0x40
#define M_BCM1480_HTVECT_RAISE_INTLDT_LO 0x80
-#define M_BCM1480_HTVECT_RAISE_MBOX_1 0xC0
+#define M_BCM1480_HTVECT_RAISE_MBOX_1 0xC0
#endif /* _BCM1480_INT_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_l2c.h b/arch/mips/include/asm/sibyte/bcm1480_l2c.h
index 725d38cb9d1c..910e5c7e1b08 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_l2c.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_l2c.h
@@ -39,120 +39,120 @@
* Format of level 2 cache management address (Table 55)
*/
-#define S_BCM1480_L2C_MGMT_INDEX 5
-#define M_BCM1480_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_MGMT_INDEX)
-#define V_BCM1480_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_INDEX)
-#define G_BCM1480_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_INDEX, M_BCM1480_L2C_MGMT_INDEX)
+#define S_BCM1480_L2C_MGMT_INDEX 5
+#define M_BCM1480_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_MGMT_INDEX)
+#define V_BCM1480_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_INDEX)
+#define G_BCM1480_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_INDEX, M_BCM1480_L2C_MGMT_INDEX)
-#define S_BCM1480_L2C_MGMT_WAY 17
-#define M_BCM1480_L2C_MGMT_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_MGMT_WAY)
-#define V_BCM1480_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_WAY)
-#define G_BCM1480_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_WAY, M_BCM1480_L2C_MGMT_WAY)
+#define S_BCM1480_L2C_MGMT_WAY 17
+#define M_BCM1480_L2C_MGMT_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_MGMT_WAY)
+#define V_BCM1480_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_WAY)
+#define G_BCM1480_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_WAY, M_BCM1480_L2C_MGMT_WAY)
-#define M_BCM1480_L2C_MGMT_DIRTY _SB_MAKEMASK1(20)
-#define M_BCM1480_L2C_MGMT_VALID _SB_MAKEMASK1(21)
+#define M_BCM1480_L2C_MGMT_DIRTY _SB_MAKEMASK1(20)
+#define M_BCM1480_L2C_MGMT_VALID _SB_MAKEMASK1(21)
-#define S_BCM1480_L2C_MGMT_ECC_DIAG 22
-#define M_BCM1480_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_BCM1480_L2C_MGMT_ECC_DIAG)
-#define V_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG)
-#define G_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG, M_BCM1480_L2C_MGMT_ECC_DIAG)
+#define S_BCM1480_L2C_MGMT_ECC_DIAG 22
+#define M_BCM1480_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define V_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define G_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG, M_BCM1480_L2C_MGMT_ECC_DIAG)
-#define A_BCM1480_L2C_MGMT_TAG_BASE 0x00D0000000
+#define A_BCM1480_L2C_MGMT_TAG_BASE 0x00D0000000
-#define BCM1480_L2C_ENTRIES_PER_WAY 4096
-#define BCM1480_L2C_NUM_WAYS 8
+#define BCM1480_L2C_ENTRIES_PER_WAY 4096
+#define BCM1480_L2C_NUM_WAYS 8
/*
* Level 2 Cache Tag register (Table 59)
*/
-#define S_BCM1480_L2C_TAG_MBZ 0
-#define M_BCM1480_L2C_TAG_MBZ _SB_MAKEMASK(5, S_BCM1480_L2C_TAG_MBZ)
+#define S_BCM1480_L2C_TAG_MBZ 0
+#define M_BCM1480_L2C_TAG_MBZ _SB_MAKEMASK(5, S_BCM1480_L2C_TAG_MBZ)
-#define S_BCM1480_L2C_TAG_INDEX 5
-#define M_BCM1480_L2C_TAG_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_TAG_INDEX)
-#define V_BCM1480_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_INDEX)
-#define G_BCM1480_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_INDEX, M_BCM1480_L2C_TAG_INDEX)
+#define S_BCM1480_L2C_TAG_INDEX 5
+#define M_BCM1480_L2C_TAG_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_TAG_INDEX)
+#define V_BCM1480_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_INDEX)
+#define G_BCM1480_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_INDEX, M_BCM1480_L2C_TAG_INDEX)
/* Note that index bit 16 is also tag bit 40 */
-#define S_BCM1480_L2C_TAG_TAG 17
-#define M_BCM1480_L2C_TAG_TAG _SB_MAKEMASK(23, S_BCM1480_L2C_TAG_TAG)
-#define V_BCM1480_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_TAG)
-#define G_BCM1480_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_TAG, M_BCM1480_L2C_TAG_TAG)
+#define S_BCM1480_L2C_TAG_TAG 17
+#define M_BCM1480_L2C_TAG_TAG _SB_MAKEMASK(23, S_BCM1480_L2C_TAG_TAG)
+#define V_BCM1480_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_TAG)
+#define G_BCM1480_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_TAG, M_BCM1480_L2C_TAG_TAG)
-#define S_BCM1480_L2C_TAG_ECC 40
-#define M_BCM1480_L2C_TAG_ECC _SB_MAKEMASK(6, S_BCM1480_L2C_TAG_ECC)
-#define V_BCM1480_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_ECC)
-#define G_BCM1480_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_ECC, M_BCM1480_L2C_TAG_ECC)
+#define S_BCM1480_L2C_TAG_ECC 40
+#define M_BCM1480_L2C_TAG_ECC _SB_MAKEMASK(6, S_BCM1480_L2C_TAG_ECC)
+#define V_BCM1480_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_ECC)
+#define G_BCM1480_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_ECC, M_BCM1480_L2C_TAG_ECC)
-#define S_BCM1480_L2C_TAG_WAY 46
-#define M_BCM1480_L2C_TAG_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_TAG_WAY)
-#define V_BCM1480_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_WAY)
-#define G_BCM1480_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_WAY, M_BCM1480_L2C_TAG_WAY)
+#define S_BCM1480_L2C_TAG_WAY 46
+#define M_BCM1480_L2C_TAG_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_TAG_WAY)
+#define V_BCM1480_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_WAY)
+#define G_BCM1480_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_WAY, M_BCM1480_L2C_TAG_WAY)
-#define M_BCM1480_L2C_TAG_DIRTY _SB_MAKEMASK1(49)
-#define M_BCM1480_L2C_TAG_VALID _SB_MAKEMASK1(50)
+#define M_BCM1480_L2C_TAG_DIRTY _SB_MAKEMASK1(49)
+#define M_BCM1480_L2C_TAG_VALID _SB_MAKEMASK1(50)
-#define S_BCM1480_L2C_DATA_ECC 51
-#define M_BCM1480_L2C_DATA_ECC _SB_MAKEMASK(10, S_BCM1480_L2C_DATA_ECC)
-#define V_BCM1480_L2C_DATA_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_DATA_ECC)
-#define G_BCM1480_L2C_DATA_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_DATA_ECC, M_BCM1480_L2C_DATA_ECC)
+#define S_BCM1480_L2C_DATA_ECC 51
+#define M_BCM1480_L2C_DATA_ECC _SB_MAKEMASK(10, S_BCM1480_L2C_DATA_ECC)
+#define V_BCM1480_L2C_DATA_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_DATA_ECC)
+#define G_BCM1480_L2C_DATA_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_DATA_ECC, M_BCM1480_L2C_DATA_ECC)
/*
* L2 Misc0 Value Register (Table 60)
*/
-#define S_BCM1480_L2C_MISC0_WAY_REMOTE 0
-#define M_BCM1480_L2C_MISC0_WAY_REMOTE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_REMOTE)
+#define S_BCM1480_L2C_MISC0_WAY_REMOTE 0
+#define M_BCM1480_L2C_MISC0_WAY_REMOTE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_REMOTE)
#define G_BCM1480_L2C_MISC0_WAY_REMOTE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_REMOTE, M_BCM1480_L2C_MISC0_WAY_REMOTE)
-#define S_BCM1480_L2C_MISC0_WAY_LOCAL 8
-#define M_BCM1480_L2C_MISC0_WAY_LOCAL _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_LOCAL)
+#define S_BCM1480_L2C_MISC0_WAY_LOCAL 8
+#define M_BCM1480_L2C_MISC0_WAY_LOCAL _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_LOCAL)
#define G_BCM1480_L2C_MISC0_WAY_LOCAL(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_LOCAL, M_BCM1480_L2C_MISC0_WAY_LOCAL)
-#define S_BCM1480_L2C_MISC0_WAY_ENABLE 16
-#define M_BCM1480_L2C_MISC0_WAY_ENABLE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_ENABLE)
+#define S_BCM1480_L2C_MISC0_WAY_ENABLE 16
+#define M_BCM1480_L2C_MISC0_WAY_ENABLE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_ENABLE)
#define G_BCM1480_L2C_MISC0_WAY_ENABLE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_ENABLE, M_BCM1480_L2C_MISC0_WAY_ENABLE)
#define S_BCM1480_L2C_MISC0_CACHE_DISABLE 24
#define M_BCM1480_L2C_MISC0_CACHE_DISABLE _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_DISABLE)
#define G_BCM1480_L2C_MISC0_CACHE_DISABLE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_DISABLE, M_BCM1480_L2C_MISC0_CACHE_DISABLE)
-#define S_BCM1480_L2C_MISC0_CACHE_QUAD 26
-#define M_BCM1480_L2C_MISC0_CACHE_QUAD _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_QUAD)
+#define S_BCM1480_L2C_MISC0_CACHE_QUAD 26
+#define M_BCM1480_L2C_MISC0_CACHE_QUAD _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_QUAD)
#define G_BCM1480_L2C_MISC0_CACHE_QUAD(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_QUAD, M_BCM1480_L2C_MISC0_CACHE_QUAD)
-#define S_BCM1480_L2C_MISC0_MC_PRIORITY 30
-#define M_BCM1480_L2C_MISC0_MC_PRIORITY _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_MC_PRIORITY)
+#define S_BCM1480_L2C_MISC0_MC_PRIORITY 30
+#define M_BCM1480_L2C_MISC0_MC_PRIORITY _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_MC_PRIORITY)
-#define S_BCM1480_L2C_MISC0_ECC_CLEANUP 31
-#define M_BCM1480_L2C_MISC0_ECC_CLEANUP _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_ECC_CLEANUP)
+#define S_BCM1480_L2C_MISC0_ECC_CLEANUP 31
+#define M_BCM1480_L2C_MISC0_ECC_CLEANUP _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_ECC_CLEANUP)
/*
* L2 Misc1 Value Register (Table 60)
*/
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_0 0
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_0 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_0)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_0 0
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_0 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_0)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_0(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_0, M_BCM1480_L2C_MISC1_WAY_AGENT_0)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_1 8
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_1 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_1)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_1 8
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_1 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_1)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_1(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_1, M_BCM1480_L2C_MISC1_WAY_AGENT_1)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_2 16
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_2 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_2)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_2 16
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_2 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_2)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_2(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_2, M_BCM1480_L2C_MISC1_WAY_AGENT_2)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_3 24
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_3 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_3)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_3 24
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_3 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_3)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_3(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_3, M_BCM1480_L2C_MISC1_WAY_AGENT_3)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_4 32
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_4 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_4)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_4 32
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_4 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_4)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_4(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_4, M_BCM1480_L2C_MISC1_WAY_AGENT_4)
@@ -160,16 +160,16 @@
* L2 Misc2 Value Register (Table 60)
*/
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_8 0
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_8 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_8)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_8 0
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_8 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_8)
#define G_BCM1480_L2C_MISC2_WAY_AGENT_8(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_8, M_BCM1480_L2C_MISC2_WAY_AGENT_8)
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_9 8
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_9 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_9)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_9 8
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_9 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_9)
#define G_BCM1480_L2C_MISC2_WAY_AGENT_9(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_9, M_BCM1480_L2C_MISC2_WAY_AGENT_9)
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_A 16
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_A _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_A)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_A 16
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_A _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_A)
#define G_BCM1480_L2C_MISC2_WAY_AGENT_A(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_A, M_BCM1480_L2C_MISC2_WAY_AGENT_A)
diff --git a/arch/mips/include/asm/sibyte/bcm1480_mc.h b/arch/mips/include/asm/sibyte/bcm1480_mc.h
index 4307a758e3bf..86908fdb4032 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_mc.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_mc.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* BCM1280/BCM1480 Board Support Package
*
- * Memory Controller constants File: bcm1480_mc.h
+ * Memory Controller constants File: bcm1480_mc.h
*
* This module contains constants and macros useful for
* programming the memory controller.
@@ -39,33 +39,33 @@
* Memory Channel Configuration Register (Table 81)
*/
-#define S_BCM1480_MC_INTLV0 0
-#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
-#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0_DEFAULT V_BCM1480_MC_INTLV0(0)
-
-#define S_BCM1480_MC_INTLV1 8
-#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
-#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1_DEFAULT V_BCM1480_MC_INTLV1(0)
-
-#define S_BCM1480_MC_INTLV2 16
-#define M_BCM1480_MC_INTLV2 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV2)
-#define V_BCM1480_MC_INTLV2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV2)
-#define G_BCM1480_MC_INTLV2(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV2, M_BCM1480_MC_INTLV2)
-#define V_BCM1480_MC_INTLV2_DEFAULT V_BCM1480_MC_INTLV2(0)
-
-#define S_BCM1480_MC_CS_MODE 32
-#define M_BCM1480_MC_CS_MODE _SB_MAKEMASK(8, S_BCM1480_MC_CS_MODE)
-#define V_BCM1480_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS_MODE)
-#define G_BCM1480_MC_CS_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_CS_MODE, M_BCM1480_MC_CS_MODE)
-#define V_BCM1480_MC_CS_MODE_DEFAULT V_BCM1480_MC_CS_MODE(0)
-
-#define V_BCM1480_MC_CONFIG_DEFAULT (V_BCM1480_MC_INTLV0_DEFAULT | \
- V_BCM1480_MC_INTLV1_DEFAULT | \
- V_BCM1480_MC_INTLV2_DEFAULT | \
+#define S_BCM1480_MC_INTLV0 0
+#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0_DEFAULT V_BCM1480_MC_INTLV0(0)
+
+#define S_BCM1480_MC_INTLV1 8
+#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1_DEFAULT V_BCM1480_MC_INTLV1(0)
+
+#define S_BCM1480_MC_INTLV2 16
+#define M_BCM1480_MC_INTLV2 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV2)
+#define G_BCM1480_MC_INTLV2(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV2, M_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2_DEFAULT V_BCM1480_MC_INTLV2(0)
+
+#define S_BCM1480_MC_CS_MODE 32
+#define M_BCM1480_MC_CS_MODE _SB_MAKEMASK(8, S_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS_MODE)
+#define G_BCM1480_MC_CS_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_CS_MODE, M_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE_DEFAULT V_BCM1480_MC_CS_MODE(0)
+
+#define V_BCM1480_MC_CONFIG_DEFAULT (V_BCM1480_MC_INTLV0_DEFAULT | \
+ V_BCM1480_MC_INTLV1_DEFAULT | \
+ V_BCM1480_MC_INTLV2_DEFAULT | \
V_BCM1480_MC_CS_MODE_DEFAULT)
#define K_BCM1480_MC_CS01_MODE 0x03
@@ -80,254 +80,254 @@
* Chip Select Start Address Register (Table 82)
*/
-#define S_BCM1480_MC_CS0_START 0
-#define M_BCM1480_MC_CS0_START _SB_MAKEMASK(12, S_BCM1480_MC_CS0_START)
-#define V_BCM1480_MC_CS0_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_START)
-#define G_BCM1480_MC_CS0_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_START, M_BCM1480_MC_CS0_START)
+#define S_BCM1480_MC_CS0_START 0
+#define M_BCM1480_MC_CS0_START _SB_MAKEMASK(12, S_BCM1480_MC_CS0_START)
+#define V_BCM1480_MC_CS0_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_START)
+#define G_BCM1480_MC_CS0_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_START, M_BCM1480_MC_CS0_START)
-#define S_BCM1480_MC_CS1_START 16
-#define M_BCM1480_MC_CS1_START _SB_MAKEMASK(12, S_BCM1480_MC_CS1_START)
-#define V_BCM1480_MC_CS1_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_START)
-#define G_BCM1480_MC_CS1_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_START, M_BCM1480_MC_CS1_START)
+#define S_BCM1480_MC_CS1_START 16
+#define M_BCM1480_MC_CS1_START _SB_MAKEMASK(12, S_BCM1480_MC_CS1_START)
+#define V_BCM1480_MC_CS1_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_START)
+#define G_BCM1480_MC_CS1_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_START, M_BCM1480_MC_CS1_START)
-#define S_BCM1480_MC_CS2_START 32
-#define M_BCM1480_MC_CS2_START _SB_MAKEMASK(12, S_BCM1480_MC_CS2_START)
-#define V_BCM1480_MC_CS2_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_START)
-#define G_BCM1480_MC_CS2_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_START, M_BCM1480_MC_CS2_START)
+#define S_BCM1480_MC_CS2_START 32
+#define M_BCM1480_MC_CS2_START _SB_MAKEMASK(12, S_BCM1480_MC_CS2_START)
+#define V_BCM1480_MC_CS2_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_START)
+#define G_BCM1480_MC_CS2_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_START, M_BCM1480_MC_CS2_START)
-#define S_BCM1480_MC_CS3_START 48
-#define M_BCM1480_MC_CS3_START _SB_MAKEMASK(12, S_BCM1480_MC_CS3_START)
-#define V_BCM1480_MC_CS3_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_START)
-#define G_BCM1480_MC_CS3_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_START, M_BCM1480_MC_CS3_START)
+#define S_BCM1480_MC_CS3_START 48
+#define M_BCM1480_MC_CS3_START _SB_MAKEMASK(12, S_BCM1480_MC_CS3_START)
+#define V_BCM1480_MC_CS3_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_START)
+#define G_BCM1480_MC_CS3_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_START, M_BCM1480_MC_CS3_START)
/*
* Chip Select End Address Register (Table 83)
*/
-#define S_BCM1480_MC_CS0_END 0
-#define M_BCM1480_MC_CS0_END _SB_MAKEMASK(12, S_BCM1480_MC_CS0_END)
-#define V_BCM1480_MC_CS0_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_END)
-#define G_BCM1480_MC_CS0_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_END, M_BCM1480_MC_CS0_END)
+#define S_BCM1480_MC_CS0_END 0
+#define M_BCM1480_MC_CS0_END _SB_MAKEMASK(12, S_BCM1480_MC_CS0_END)
+#define V_BCM1480_MC_CS0_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_END)
+#define G_BCM1480_MC_CS0_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_END, M_BCM1480_MC_CS0_END)
-#define S_BCM1480_MC_CS1_END 16
-#define M_BCM1480_MC_CS1_END _SB_MAKEMASK(12, S_BCM1480_MC_CS1_END)
-#define V_BCM1480_MC_CS1_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_END)
-#define G_BCM1480_MC_CS1_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_END, M_BCM1480_MC_CS1_END)
+#define S_BCM1480_MC_CS1_END 16
+#define M_BCM1480_MC_CS1_END _SB_MAKEMASK(12, S_BCM1480_MC_CS1_END)
+#define V_BCM1480_MC_CS1_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_END)
+#define G_BCM1480_MC_CS1_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_END, M_BCM1480_MC_CS1_END)
-#define S_BCM1480_MC_CS2_END 32
-#define M_BCM1480_MC_CS2_END _SB_MAKEMASK(12, S_BCM1480_MC_CS2_END)
-#define V_BCM1480_MC_CS2_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_END)
-#define G_BCM1480_MC_CS2_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_END, M_BCM1480_MC_CS2_END)
+#define S_BCM1480_MC_CS2_END 32
+#define M_BCM1480_MC_CS2_END _SB_MAKEMASK(12, S_BCM1480_MC_CS2_END)
+#define V_BCM1480_MC_CS2_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_END)
+#define G_BCM1480_MC_CS2_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_END, M_BCM1480_MC_CS2_END)
-#define S_BCM1480_MC_CS3_END 48
-#define M_BCM1480_MC_CS3_END _SB_MAKEMASK(12, S_BCM1480_MC_CS3_END)
-#define V_BCM1480_MC_CS3_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_END)
-#define G_BCM1480_MC_CS3_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_END, M_BCM1480_MC_CS3_END)
+#define S_BCM1480_MC_CS3_END 48
+#define M_BCM1480_MC_CS3_END _SB_MAKEMASK(12, S_BCM1480_MC_CS3_END)
+#define V_BCM1480_MC_CS3_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_END)
+#define G_BCM1480_MC_CS3_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_END, M_BCM1480_MC_CS3_END)
/*
* Row Address Bit Select Register 0 (Table 84)
*/
-#define S_BCM1480_MC_ROW00 0
-#define M_BCM1480_MC_ROW00 _SB_MAKEMASK(6, S_BCM1480_MC_ROW00)
-#define V_BCM1480_MC_ROW00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW00)
-#define G_BCM1480_MC_ROW00(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW00, M_BCM1480_MC_ROW00)
-
-#define S_BCM1480_MC_ROW01 8
-#define M_BCM1480_MC_ROW01 _SB_MAKEMASK(6, S_BCM1480_MC_ROW01)
-#define V_BCM1480_MC_ROW01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW01)
-#define G_BCM1480_MC_ROW01(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW01, M_BCM1480_MC_ROW01)
-
-#define S_BCM1480_MC_ROW02 16
-#define M_BCM1480_MC_ROW02 _SB_MAKEMASK(6, S_BCM1480_MC_ROW02)
-#define V_BCM1480_MC_ROW02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW02)
-#define G_BCM1480_MC_ROW02(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW02, M_BCM1480_MC_ROW02)
-
-#define S_BCM1480_MC_ROW03 24
-#define M_BCM1480_MC_ROW03 _SB_MAKEMASK(6, S_BCM1480_MC_ROW03)
-#define V_BCM1480_MC_ROW03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW03)
-#define G_BCM1480_MC_ROW03(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW03, M_BCM1480_MC_ROW03)
-
-#define S_BCM1480_MC_ROW04 32
-#define M_BCM1480_MC_ROW04 _SB_MAKEMASK(6, S_BCM1480_MC_ROW04)
-#define V_BCM1480_MC_ROW04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW04)
-#define G_BCM1480_MC_ROW04(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW04, M_BCM1480_MC_ROW04)
-
-#define S_BCM1480_MC_ROW05 40
-#define M_BCM1480_MC_ROW05 _SB_MAKEMASK(6, S_BCM1480_MC_ROW05)
-#define V_BCM1480_MC_ROW05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW05)
-#define G_BCM1480_MC_ROW05(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW05, M_BCM1480_MC_ROW05)
-
-#define S_BCM1480_MC_ROW06 48
-#define M_BCM1480_MC_ROW06 _SB_MAKEMASK(6, S_BCM1480_MC_ROW06)
-#define V_BCM1480_MC_ROW06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW06)
-#define G_BCM1480_MC_ROW06(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW06, M_BCM1480_MC_ROW06)
-
-#define S_BCM1480_MC_ROW07 56
-#define M_BCM1480_MC_ROW07 _SB_MAKEMASK(6, S_BCM1480_MC_ROW07)
-#define V_BCM1480_MC_ROW07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW07)
-#define G_BCM1480_MC_ROW07(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW07, M_BCM1480_MC_ROW07)
+#define S_BCM1480_MC_ROW00 0
+#define M_BCM1480_MC_ROW00 _SB_MAKEMASK(6, S_BCM1480_MC_ROW00)
+#define V_BCM1480_MC_ROW00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW00)
+#define G_BCM1480_MC_ROW00(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW00, M_BCM1480_MC_ROW00)
+
+#define S_BCM1480_MC_ROW01 8
+#define M_BCM1480_MC_ROW01 _SB_MAKEMASK(6, S_BCM1480_MC_ROW01)
+#define V_BCM1480_MC_ROW01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW01)
+#define G_BCM1480_MC_ROW01(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW01, M_BCM1480_MC_ROW01)
+
+#define S_BCM1480_MC_ROW02 16
+#define M_BCM1480_MC_ROW02 _SB_MAKEMASK(6, S_BCM1480_MC_ROW02)
+#define V_BCM1480_MC_ROW02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW02)
+#define G_BCM1480_MC_ROW02(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW02, M_BCM1480_MC_ROW02)
+
+#define S_BCM1480_MC_ROW03 24
+#define M_BCM1480_MC_ROW03 _SB_MAKEMASK(6, S_BCM1480_MC_ROW03)
+#define V_BCM1480_MC_ROW03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW03)
+#define G_BCM1480_MC_ROW03(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW03, M_BCM1480_MC_ROW03)
+
+#define S_BCM1480_MC_ROW04 32
+#define M_BCM1480_MC_ROW04 _SB_MAKEMASK(6, S_BCM1480_MC_ROW04)
+#define V_BCM1480_MC_ROW04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW04)
+#define G_BCM1480_MC_ROW04(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW04, M_BCM1480_MC_ROW04)
+
+#define S_BCM1480_MC_ROW05 40
+#define M_BCM1480_MC_ROW05 _SB_MAKEMASK(6, S_BCM1480_MC_ROW05)
+#define V_BCM1480_MC_ROW05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW05)
+#define G_BCM1480_MC_ROW05(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW05, M_BCM1480_MC_ROW05)
+
+#define S_BCM1480_MC_ROW06 48
+#define M_BCM1480_MC_ROW06 _SB_MAKEMASK(6, S_BCM1480_MC_ROW06)
+#define V_BCM1480_MC_ROW06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW06)
+#define G_BCM1480_MC_ROW06(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW06, M_BCM1480_MC_ROW06)
+
+#define S_BCM1480_MC_ROW07 56
+#define M_BCM1480_MC_ROW07 _SB_MAKEMASK(6, S_BCM1480_MC_ROW07)
+#define V_BCM1480_MC_ROW07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW07)
+#define G_BCM1480_MC_ROW07(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW07, M_BCM1480_MC_ROW07)
/*
* Row Address Bit Select Register 1 (Table 85)
*/
-#define S_BCM1480_MC_ROW08 0
-#define M_BCM1480_MC_ROW08 _SB_MAKEMASK(6, S_BCM1480_MC_ROW08)
-#define V_BCM1480_MC_ROW08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW08)
-#define G_BCM1480_MC_ROW08(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW08, M_BCM1480_MC_ROW08)
+#define S_BCM1480_MC_ROW08 0
+#define M_BCM1480_MC_ROW08 _SB_MAKEMASK(6, S_BCM1480_MC_ROW08)
+#define V_BCM1480_MC_ROW08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW08)
+#define G_BCM1480_MC_ROW08(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW08, M_BCM1480_MC_ROW08)
-#define S_BCM1480_MC_ROW09 8
-#define M_BCM1480_MC_ROW09 _SB_MAKEMASK(6, S_BCM1480_MC_ROW09)
-#define V_BCM1480_MC_ROW09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW09)
-#define G_BCM1480_MC_ROW09(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW09, M_BCM1480_MC_ROW09)
+#define S_BCM1480_MC_ROW09 8
+#define M_BCM1480_MC_ROW09 _SB_MAKEMASK(6, S_BCM1480_MC_ROW09)
+#define V_BCM1480_MC_ROW09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW09)
+#define G_BCM1480_MC_ROW09(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW09, M_BCM1480_MC_ROW09)
-#define S_BCM1480_MC_ROW10 16
-#define M_BCM1480_MC_ROW10 _SB_MAKEMASK(6, S_BCM1480_MC_ROW10)
-#define V_BCM1480_MC_ROW10(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW10)
-#define G_BCM1480_MC_ROW10(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW10, M_BCM1480_MC_ROW10)
+#define S_BCM1480_MC_ROW10 16
+#define M_BCM1480_MC_ROW10 _SB_MAKEMASK(6, S_BCM1480_MC_ROW10)
+#define V_BCM1480_MC_ROW10(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW10)
+#define G_BCM1480_MC_ROW10(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW10, M_BCM1480_MC_ROW10)
-#define S_BCM1480_MC_ROW11 24
-#define M_BCM1480_MC_ROW11 _SB_MAKEMASK(6, S_BCM1480_MC_ROW11)
-#define V_BCM1480_MC_ROW11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW11)
-#define G_BCM1480_MC_ROW11(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW11, M_BCM1480_MC_ROW11)
+#define S_BCM1480_MC_ROW11 24
+#define M_BCM1480_MC_ROW11 _SB_MAKEMASK(6, S_BCM1480_MC_ROW11)
+#define V_BCM1480_MC_ROW11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW11)
+#define G_BCM1480_MC_ROW11(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW11, M_BCM1480_MC_ROW11)
-#define S_BCM1480_MC_ROW12 32
-#define M_BCM1480_MC_ROW12 _SB_MAKEMASK(6, S_BCM1480_MC_ROW12)
-#define V_BCM1480_MC_ROW12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW12)
-#define G_BCM1480_MC_ROW12(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW12, M_BCM1480_MC_ROW12)
+#define S_BCM1480_MC_ROW12 32
+#define M_BCM1480_MC_ROW12 _SB_MAKEMASK(6, S_BCM1480_MC_ROW12)
+#define V_BCM1480_MC_ROW12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW12)
+#define G_BCM1480_MC_ROW12(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW12, M_BCM1480_MC_ROW12)
-#define S_BCM1480_MC_ROW13 40
-#define M_BCM1480_MC_ROW13 _SB_MAKEMASK(6, S_BCM1480_MC_ROW13)
-#define V_BCM1480_MC_ROW13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW13)
-#define G_BCM1480_MC_ROW13(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW13, M_BCM1480_MC_ROW13)
+#define S_BCM1480_MC_ROW13 40
+#define M_BCM1480_MC_ROW13 _SB_MAKEMASK(6, S_BCM1480_MC_ROW13)
+#define V_BCM1480_MC_ROW13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW13)
+#define G_BCM1480_MC_ROW13(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW13, M_BCM1480_MC_ROW13)
-#define S_BCM1480_MC_ROW14 48
-#define M_BCM1480_MC_ROW14 _SB_MAKEMASK(6, S_BCM1480_MC_ROW14)
-#define V_BCM1480_MC_ROW14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW14)
-#define G_BCM1480_MC_ROW14(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW14, M_BCM1480_MC_ROW14)
+#define S_BCM1480_MC_ROW14 48
+#define M_BCM1480_MC_ROW14 _SB_MAKEMASK(6, S_BCM1480_MC_ROW14)
+#define V_BCM1480_MC_ROW14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW14)
+#define G_BCM1480_MC_ROW14(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW14, M_BCM1480_MC_ROW14)
-#define K_BCM1480_MC_ROWX_BIT_SPACING 8
+#define K_BCM1480_MC_ROWX_BIT_SPACING 8
/*
* Column Address Bit Select Register 0 (Table 86)
*/
-#define S_BCM1480_MC_COL00 0
-#define M_BCM1480_MC_COL00 _SB_MAKEMASK(6, S_BCM1480_MC_COL00)
-#define V_BCM1480_MC_COL00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL00)
-#define G_BCM1480_MC_COL00(x) _SB_GETVALUE(x, S_BCM1480_MC_COL00, M_BCM1480_MC_COL00)
-
-#define S_BCM1480_MC_COL01 8
-#define M_BCM1480_MC_COL01 _SB_MAKEMASK(6, S_BCM1480_MC_COL01)
-#define V_BCM1480_MC_COL01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL01)
-#define G_BCM1480_MC_COL01(x) _SB_GETVALUE(x, S_BCM1480_MC_COL01, M_BCM1480_MC_COL01)
-
-#define S_BCM1480_MC_COL02 16
-#define M_BCM1480_MC_COL02 _SB_MAKEMASK(6, S_BCM1480_MC_COL02)
-#define V_BCM1480_MC_COL02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL02)
-#define G_BCM1480_MC_COL02(x) _SB_GETVALUE(x, S_BCM1480_MC_COL02, M_BCM1480_MC_COL02)
-
-#define S_BCM1480_MC_COL03 24
-#define M_BCM1480_MC_COL03 _SB_MAKEMASK(6, S_BCM1480_MC_COL03)
-#define V_BCM1480_MC_COL03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL03)
-#define G_BCM1480_MC_COL03(x) _SB_GETVALUE(x, S_BCM1480_MC_COL03, M_BCM1480_MC_COL03)
-
-#define S_BCM1480_MC_COL04 32
-#define M_BCM1480_MC_COL04 _SB_MAKEMASK(6, S_BCM1480_MC_COL04)
-#define V_BCM1480_MC_COL04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL04)
-#define G_BCM1480_MC_COL04(x) _SB_GETVALUE(x, S_BCM1480_MC_COL04, M_BCM1480_MC_COL04)
-
-#define S_BCM1480_MC_COL05 40
-#define M_BCM1480_MC_COL05 _SB_MAKEMASK(6, S_BCM1480_MC_COL05)
-#define V_BCM1480_MC_COL05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL05)
-#define G_BCM1480_MC_COL05(x) _SB_GETVALUE(x, S_BCM1480_MC_COL05, M_BCM1480_MC_COL05)
-
-#define S_BCM1480_MC_COL06 48
-#define M_BCM1480_MC_COL06 _SB_MAKEMASK(6, S_BCM1480_MC_COL06)
-#define V_BCM1480_MC_COL06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL06)
-#define G_BCM1480_MC_COL06(x) _SB_GETVALUE(x, S_BCM1480_MC_COL06, M_BCM1480_MC_COL06)
-
-#define S_BCM1480_MC_COL07 56
-#define M_BCM1480_MC_COL07 _SB_MAKEMASK(6, S_BCM1480_MC_COL07)
-#define V_BCM1480_MC_COL07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL07)
-#define G_BCM1480_MC_COL07(x) _SB_GETVALUE(x, S_BCM1480_MC_COL07, M_BCM1480_MC_COL07)
+#define S_BCM1480_MC_COL00 0
+#define M_BCM1480_MC_COL00 _SB_MAKEMASK(6, S_BCM1480_MC_COL00)
+#define V_BCM1480_MC_COL00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL00)
+#define G_BCM1480_MC_COL00(x) _SB_GETVALUE(x, S_BCM1480_MC_COL00, M_BCM1480_MC_COL00)
+
+#define S_BCM1480_MC_COL01 8
+#define M_BCM1480_MC_COL01 _SB_MAKEMASK(6, S_BCM1480_MC_COL01)
+#define V_BCM1480_MC_COL01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL01)
+#define G_BCM1480_MC_COL01(x) _SB_GETVALUE(x, S_BCM1480_MC_COL01, M_BCM1480_MC_COL01)
+
+#define S_BCM1480_MC_COL02 16
+#define M_BCM1480_MC_COL02 _SB_MAKEMASK(6, S_BCM1480_MC_COL02)
+#define V_BCM1480_MC_COL02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL02)
+#define G_BCM1480_MC_COL02(x) _SB_GETVALUE(x, S_BCM1480_MC_COL02, M_BCM1480_MC_COL02)
+
+#define S_BCM1480_MC_COL03 24
+#define M_BCM1480_MC_COL03 _SB_MAKEMASK(6, S_BCM1480_MC_COL03)
+#define V_BCM1480_MC_COL03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL03)
+#define G_BCM1480_MC_COL03(x) _SB_GETVALUE(x, S_BCM1480_MC_COL03, M_BCM1480_MC_COL03)
+
+#define S_BCM1480_MC_COL04 32
+#define M_BCM1480_MC_COL04 _SB_MAKEMASK(6, S_BCM1480_MC_COL04)
+#define V_BCM1480_MC_COL04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL04)
+#define G_BCM1480_MC_COL04(x) _SB_GETVALUE(x, S_BCM1480_MC_COL04, M_BCM1480_MC_COL04)
+
+#define S_BCM1480_MC_COL05 40
+#define M_BCM1480_MC_COL05 _SB_MAKEMASK(6, S_BCM1480_MC_COL05)
+#define V_BCM1480_MC_COL05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL05)
+#define G_BCM1480_MC_COL05(x) _SB_GETVALUE(x, S_BCM1480_MC_COL05, M_BCM1480_MC_COL05)
+
+#define S_BCM1480_MC_COL06 48
+#define M_BCM1480_MC_COL06 _SB_MAKEMASK(6, S_BCM1480_MC_COL06)
+#define V_BCM1480_MC_COL06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL06)
+#define G_BCM1480_MC_COL06(x) _SB_GETVALUE(x, S_BCM1480_MC_COL06, M_BCM1480_MC_COL06)
+
+#define S_BCM1480_MC_COL07 56
+#define M_BCM1480_MC_COL07 _SB_MAKEMASK(6, S_BCM1480_MC_COL07)
+#define V_BCM1480_MC_COL07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL07)
+#define G_BCM1480_MC_COL07(x) _SB_GETVALUE(x, S_BCM1480_MC_COL07, M_BCM1480_MC_COL07)
/*
* Column Address Bit Select Register 1 (Table 87)
*/
-#define S_BCM1480_MC_COL08 0
-#define M_BCM1480_MC_COL08 _SB_MAKEMASK(6, S_BCM1480_MC_COL08)
-#define V_BCM1480_MC_COL08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL08)
-#define G_BCM1480_MC_COL08(x) _SB_GETVALUE(x, S_BCM1480_MC_COL08, M_BCM1480_MC_COL08)
+#define S_BCM1480_MC_COL08 0
+#define M_BCM1480_MC_COL08 _SB_MAKEMASK(6, S_BCM1480_MC_COL08)
+#define V_BCM1480_MC_COL08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL08)
+#define G_BCM1480_MC_COL08(x) _SB_GETVALUE(x, S_BCM1480_MC_COL08, M_BCM1480_MC_COL08)
-#define S_BCM1480_MC_COL09 8
-#define M_BCM1480_MC_COL09 _SB_MAKEMASK(6, S_BCM1480_MC_COL09)
-#define V_BCM1480_MC_COL09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL09)
-#define G_BCM1480_MC_COL09(x) _SB_GETVALUE(x, S_BCM1480_MC_COL09, M_BCM1480_MC_COL09)
+#define S_BCM1480_MC_COL09 8
+#define M_BCM1480_MC_COL09 _SB_MAKEMASK(6, S_BCM1480_MC_COL09)
+#define V_BCM1480_MC_COL09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL09)
+#define G_BCM1480_MC_COL09(x) _SB_GETVALUE(x, S_BCM1480_MC_COL09, M_BCM1480_MC_COL09)
-#define S_BCM1480_MC_COL10 16 /* not a valid position, must be prog as 0 */
+#define S_BCM1480_MC_COL10 16 /* not a valid position, must be prog as 0 */
-#define S_BCM1480_MC_COL11 24
-#define M_BCM1480_MC_COL11 _SB_MAKEMASK(6, S_BCM1480_MC_COL11)
-#define V_BCM1480_MC_COL11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL11)
-#define G_BCM1480_MC_COL11(x) _SB_GETVALUE(x, S_BCM1480_MC_COL11, M_BCM1480_MC_COL11)
+#define S_BCM1480_MC_COL11 24
+#define M_BCM1480_MC_COL11 _SB_MAKEMASK(6, S_BCM1480_MC_COL11)
+#define V_BCM1480_MC_COL11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL11)
+#define G_BCM1480_MC_COL11(x) _SB_GETVALUE(x, S_BCM1480_MC_COL11, M_BCM1480_MC_COL11)
-#define S_BCM1480_MC_COL12 32
-#define M_BCM1480_MC_COL12 _SB_MAKEMASK(6, S_BCM1480_MC_COL12)
-#define V_BCM1480_MC_COL12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL12)
-#define G_BCM1480_MC_COL12(x) _SB_GETVALUE(x, S_BCM1480_MC_COL12, M_BCM1480_MC_COL12)
+#define S_BCM1480_MC_COL12 32
+#define M_BCM1480_MC_COL12 _SB_MAKEMASK(6, S_BCM1480_MC_COL12)
+#define V_BCM1480_MC_COL12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL12)
+#define G_BCM1480_MC_COL12(x) _SB_GETVALUE(x, S_BCM1480_MC_COL12, M_BCM1480_MC_COL12)
-#define S_BCM1480_MC_COL13 40
-#define M_BCM1480_MC_COL13 _SB_MAKEMASK(6, S_BCM1480_MC_COL13)
-#define V_BCM1480_MC_COL13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL13)
-#define G_BCM1480_MC_COL13(x) _SB_GETVALUE(x, S_BCM1480_MC_COL13, M_BCM1480_MC_COL13)
+#define S_BCM1480_MC_COL13 40
+#define M_BCM1480_MC_COL13 _SB_MAKEMASK(6, S_BCM1480_MC_COL13)
+#define V_BCM1480_MC_COL13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL13)
+#define G_BCM1480_MC_COL13(x) _SB_GETVALUE(x, S_BCM1480_MC_COL13, M_BCM1480_MC_COL13)
-#define S_BCM1480_MC_COL14 48
-#define M_BCM1480_MC_COL14 _SB_MAKEMASK(6, S_BCM1480_MC_COL14)
-#define V_BCM1480_MC_COL14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL14)
-#define G_BCM1480_MC_COL14(x) _SB_GETVALUE(x, S_BCM1480_MC_COL14, M_BCM1480_MC_COL14)
+#define S_BCM1480_MC_COL14 48
+#define M_BCM1480_MC_COL14 _SB_MAKEMASK(6, S_BCM1480_MC_COL14)
+#define V_BCM1480_MC_COL14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL14)
+#define G_BCM1480_MC_COL14(x) _SB_GETVALUE(x, S_BCM1480_MC_COL14, M_BCM1480_MC_COL14)
-#define K_BCM1480_MC_COLX_BIT_SPACING 8
+#define K_BCM1480_MC_COLX_BIT_SPACING 8
/*
* CS0 and CS1 Bank Address Bit Select Register (Table 88)
*/
-#define S_BCM1480_MC_CS01_BANK0 0
-#define M_BCM1480_MC_CS01_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK0)
-#define V_BCM1480_MC_CS01_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK0)
-#define G_BCM1480_MC_CS01_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK0, M_BCM1480_MC_CS01_BANK0)
+#define S_BCM1480_MC_CS01_BANK0 0
+#define M_BCM1480_MC_CS01_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK0)
+#define V_BCM1480_MC_CS01_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK0)
+#define G_BCM1480_MC_CS01_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK0, M_BCM1480_MC_CS01_BANK0)
-#define S_BCM1480_MC_CS01_BANK1 8
-#define M_BCM1480_MC_CS01_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK1)
-#define V_BCM1480_MC_CS01_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK1)
-#define G_BCM1480_MC_CS01_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK1, M_BCM1480_MC_CS01_BANK1)
+#define S_BCM1480_MC_CS01_BANK1 8
+#define M_BCM1480_MC_CS01_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK1)
+#define V_BCM1480_MC_CS01_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK1)
+#define G_BCM1480_MC_CS01_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK1, M_BCM1480_MC_CS01_BANK1)
-#define S_BCM1480_MC_CS01_BANK2 16
-#define M_BCM1480_MC_CS01_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK2)
-#define V_BCM1480_MC_CS01_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK2)
-#define G_BCM1480_MC_CS01_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK2, M_BCM1480_MC_CS01_BANK2)
+#define S_BCM1480_MC_CS01_BANK2 16
+#define M_BCM1480_MC_CS01_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK2)
+#define V_BCM1480_MC_CS01_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK2)
+#define G_BCM1480_MC_CS01_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK2, M_BCM1480_MC_CS01_BANK2)
/*
* CS2 and CS3 Bank Address Bit Select Register (Table 89)
*/
-#define S_BCM1480_MC_CS23_BANK0 0
-#define M_BCM1480_MC_CS23_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK0)
-#define V_BCM1480_MC_CS23_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK0)
-#define G_BCM1480_MC_CS23_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK0, M_BCM1480_MC_CS23_BANK0)
+#define S_BCM1480_MC_CS23_BANK0 0
+#define M_BCM1480_MC_CS23_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK0)
+#define V_BCM1480_MC_CS23_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK0)
+#define G_BCM1480_MC_CS23_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK0, M_BCM1480_MC_CS23_BANK0)
-#define S_BCM1480_MC_CS23_BANK1 8
-#define M_BCM1480_MC_CS23_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK1)
-#define V_BCM1480_MC_CS23_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK1)
-#define G_BCM1480_MC_CS23_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK1, M_BCM1480_MC_CS23_BANK1)
+#define S_BCM1480_MC_CS23_BANK1 8
+#define M_BCM1480_MC_CS23_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK1)
+#define V_BCM1480_MC_CS23_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK1)
+#define G_BCM1480_MC_CS23_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK1, M_BCM1480_MC_CS23_BANK1)
-#define S_BCM1480_MC_CS23_BANK2 16
-#define M_BCM1480_MC_CS23_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK2)
-#define V_BCM1480_MC_CS23_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK2)
-#define G_BCM1480_MC_CS23_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK2, M_BCM1480_MC_CS23_BANK2)
+#define S_BCM1480_MC_CS23_BANK2 16
+#define M_BCM1480_MC_CS23_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK2)
+#define V_BCM1480_MC_CS23_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK2)
+#define G_BCM1480_MC_CS23_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK2, M_BCM1480_MC_CS23_BANK2)
#define K_BCM1480_MC_CSXX_BANKX_BIT_SPACING 8
@@ -335,19 +335,19 @@
* DRAM Command Register (Table 90)
*/
-#define S_BCM1480_MC_COMMAND 0
-#define M_BCM1480_MC_COMMAND _SB_MAKEMASK(4, S_BCM1480_MC_COMMAND)
-#define V_BCM1480_MC_COMMAND(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COMMAND)
-#define G_BCM1480_MC_COMMAND(x) _SB_GETVALUE(x, S_BCM1480_MC_COMMAND, M_BCM1480_MC_COMMAND)
+#define S_BCM1480_MC_COMMAND 0
+#define M_BCM1480_MC_COMMAND _SB_MAKEMASK(4, S_BCM1480_MC_COMMAND)
+#define V_BCM1480_MC_COMMAND(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COMMAND)
+#define G_BCM1480_MC_COMMAND(x) _SB_GETVALUE(x, S_BCM1480_MC_COMMAND, M_BCM1480_MC_COMMAND)
-#define K_BCM1480_MC_COMMAND_EMRS 0
-#define K_BCM1480_MC_COMMAND_MRS 1
-#define K_BCM1480_MC_COMMAND_PRE 2
-#define K_BCM1480_MC_COMMAND_AR 3
-#define K_BCM1480_MC_COMMAND_SETRFSH 4
-#define K_BCM1480_MC_COMMAND_CLRRFSH 5
-#define K_BCM1480_MC_COMMAND_SETPWRDN 6
-#define K_BCM1480_MC_COMMAND_CLRPWRDN 7
+#define K_BCM1480_MC_COMMAND_EMRS 0
+#define K_BCM1480_MC_COMMAND_MRS 1
+#define K_BCM1480_MC_COMMAND_PRE 2
+#define K_BCM1480_MC_COMMAND_AR 3
+#define K_BCM1480_MC_COMMAND_SETRFSH 4
+#define K_BCM1480_MC_COMMAND_CLRRFSH 5
+#define K_BCM1480_MC_COMMAND_SETPWRDN 6
+#define K_BCM1480_MC_COMMAND_CLRPWRDN 7
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define K_BCM1480_MC_COMMAND_EMRS2 8
@@ -356,61 +356,61 @@
#define K_BCM1480_MC_COMMAND_DISABLE_MCLK 11
#endif
-#define V_BCM1480_MC_COMMAND_EMRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS)
-#define V_BCM1480_MC_COMMAND_MRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_MRS)
-#define V_BCM1480_MC_COMMAND_PRE V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_PRE)
-#define V_BCM1480_MC_COMMAND_AR V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_AR)
-#define V_BCM1480_MC_COMMAND_SETRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETRFSH)
-#define V_BCM1480_MC_COMMAND_CLRRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRRFSH)
-#define V_BCM1480_MC_COMMAND_SETPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETPWRDN)
-#define V_BCM1480_MC_COMMAND_CLRPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRPWRDN)
+#define V_BCM1480_MC_COMMAND_EMRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS)
+#define V_BCM1480_MC_COMMAND_MRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_MRS)
+#define V_BCM1480_MC_COMMAND_PRE V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_PRE)
+#define V_BCM1480_MC_COMMAND_AR V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_AR)
+#define V_BCM1480_MC_COMMAND_SETRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETRFSH)
+#define V_BCM1480_MC_COMMAND_CLRRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRRFSH)
+#define V_BCM1480_MC_COMMAND_SETPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETPWRDN)
+#define V_BCM1480_MC_COMMAND_CLRPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRPWRDN)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define V_BCM1480_MC_COMMAND_EMRS2 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS2)
-#define V_BCM1480_MC_COMMAND_EMRS3 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS3)
+#define V_BCM1480_MC_COMMAND_EMRS2 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS2)
+#define V_BCM1480_MC_COMMAND_EMRS3 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS3)
#define V_BCM1480_MC_COMMAND_ENABLE_MCLK V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_ENABLE_MCLK)
#define V_BCM1480_MC_COMMAND_DISABLE_MCLK V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_DISABLE_MCLK)
#endif
#define S_BCM1480_MC_CS0 4
-#define M_BCM1480_MC_CS0 _SB_MAKEMASK1(4)
-#define M_BCM1480_MC_CS1 _SB_MAKEMASK1(5)
-#define M_BCM1480_MC_CS2 _SB_MAKEMASK1(6)
-#define M_BCM1480_MC_CS3 _SB_MAKEMASK1(7)
-#define M_BCM1480_MC_CS4 _SB_MAKEMASK1(8)
-#define M_BCM1480_MC_CS5 _SB_MAKEMASK1(9)
-#define M_BCM1480_MC_CS6 _SB_MAKEMASK1(10)
-#define M_BCM1480_MC_CS7 _SB_MAKEMASK1(11)
+#define M_BCM1480_MC_CS0 _SB_MAKEMASK1(4)
+#define M_BCM1480_MC_CS1 _SB_MAKEMASK1(5)
+#define M_BCM1480_MC_CS2 _SB_MAKEMASK1(6)
+#define M_BCM1480_MC_CS3 _SB_MAKEMASK1(7)
+#define M_BCM1480_MC_CS4 _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_CS5 _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_CS6 _SB_MAKEMASK1(10)
+#define M_BCM1480_MC_CS7 _SB_MAKEMASK1(11)
-#define M_BCM1480_MC_CS _SB_MAKEMASK(8, S_BCM1480_MC_CS0)
-#define V_BCM1480_MC_CS(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0)
-#define G_BCM1480_MC_CS(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0, M_BCM1480_MC_CS0)
+#define M_BCM1480_MC_CS _SB_MAKEMASK(8, S_BCM1480_MC_CS0)
+#define V_BCM1480_MC_CS(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0)
+#define G_BCM1480_MC_CS(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0, M_BCM1480_MC_CS0)
-#define M_BCM1480_MC_CMD_ACTIVE _SB_MAKEMASK1(16)
+#define M_BCM1480_MC_CMD_ACTIVE _SB_MAKEMASK1(16)
/*
* DRAM Mode Register (Table 91)
*/
-#define S_BCM1480_MC_EMODE 0
-#define M_BCM1480_MC_EMODE _SB_MAKEMASK(15, S_BCM1480_MC_EMODE)
-#define V_BCM1480_MC_EMODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_EMODE)
-#define G_BCM1480_MC_EMODE(x) _SB_GETVALUE(x, S_BCM1480_MC_EMODE, M_BCM1480_MC_EMODE)
-#define V_BCM1480_MC_EMODE_DEFAULT V_BCM1480_MC_EMODE(0)
+#define S_BCM1480_MC_EMODE 0
+#define M_BCM1480_MC_EMODE _SB_MAKEMASK(15, S_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_EMODE)
+#define G_BCM1480_MC_EMODE(x) _SB_GETVALUE(x, S_BCM1480_MC_EMODE, M_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE_DEFAULT V_BCM1480_MC_EMODE(0)
-#define S_BCM1480_MC_MODE 16
-#define M_BCM1480_MC_MODE _SB_MAKEMASK(15, S_BCM1480_MC_MODE)
-#define V_BCM1480_MC_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MODE)
-#define G_BCM1480_MC_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_MODE, M_BCM1480_MC_MODE)
-#define V_BCM1480_MC_MODE_DEFAULT V_BCM1480_MC_MODE(0)
+#define S_BCM1480_MC_MODE 16
+#define M_BCM1480_MC_MODE _SB_MAKEMASK(15, S_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MODE)
+#define G_BCM1480_MC_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_MODE, M_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE_DEFAULT V_BCM1480_MC_MODE(0)
-#define S_BCM1480_MC_DRAM_TYPE 32
-#define M_BCM1480_MC_DRAM_TYPE _SB_MAKEMASK(4, S_BCM1480_MC_DRAM_TYPE)
-#define V_BCM1480_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DRAM_TYPE)
-#define G_BCM1480_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_BCM1480_MC_DRAM_TYPE, M_BCM1480_MC_DRAM_TYPE)
+#define S_BCM1480_MC_DRAM_TYPE 32
+#define M_BCM1480_MC_DRAM_TYPE _SB_MAKEMASK(4, S_BCM1480_MC_DRAM_TYPE)
+#define V_BCM1480_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DRAM_TYPE)
+#define G_BCM1480_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_BCM1480_MC_DRAM_TYPE, M_BCM1480_MC_DRAM_TYPE)
-#define K_BCM1480_MC_DRAM_TYPE_JEDEC 0
-#define K_BCM1480_MC_DRAM_TYPE_FCRAM 1
+#define K_BCM1480_MC_DRAM_TYPE_JEDEC 0
+#define K_BCM1480_MC_DRAM_TYPE_FCRAM 1
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define K_BCM1480_MC_DRAM_TYPE_DDR2 2
@@ -418,27 +418,27 @@
#define K_BCM1480_MC_DRAM_TYPE_DDR2_PASS1 0
-#define V_BCM1480_MC_DRAM_TYPE_JEDEC V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
-#define V_BCM1480_MC_DRAM_TYPE_FCRAM V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
+#define V_BCM1480_MC_DRAM_TYPE_JEDEC V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
+#define V_BCM1480_MC_DRAM_TYPE_FCRAM V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define V_BCM1480_MC_DRAM_TYPE_DDR2 V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_DDR2)
#endif
-#define M_BCM1480_MC_GANGED _SB_MAKEMASK1(36)
-#define M_BCM1480_MC_BY9_INTF _SB_MAKEMASK1(37)
-#define M_BCM1480_MC_FORCE_ECC64 _SB_MAKEMASK1(38)
-#define M_BCM1480_MC_ECC_DISABLE _SB_MAKEMASK1(39)
+#define M_BCM1480_MC_GANGED _SB_MAKEMASK1(36)
+#define M_BCM1480_MC_BY9_INTF _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_FORCE_ECC64 _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_ECC_DISABLE _SB_MAKEMASK1(39)
-#define S_BCM1480_MC_PG_POLICY 40
-#define M_BCM1480_MC_PG_POLICY _SB_MAKEMASK(2, S_BCM1480_MC_PG_POLICY)
-#define V_BCM1480_MC_PG_POLICY(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PG_POLICY)
-#define G_BCM1480_MC_PG_POLICY(x) _SB_GETVALUE(x, S_BCM1480_MC_PG_POLICY, M_BCM1480_MC_PG_POLICY)
+#define S_BCM1480_MC_PG_POLICY 40
+#define M_BCM1480_MC_PG_POLICY _SB_MAKEMASK(2, S_BCM1480_MC_PG_POLICY)
+#define V_BCM1480_MC_PG_POLICY(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PG_POLICY)
+#define G_BCM1480_MC_PG_POLICY(x) _SB_GETVALUE(x, S_BCM1480_MC_PG_POLICY, M_BCM1480_MC_PG_POLICY)
-#define K_BCM1480_MC_PG_POLICY_CLOSED 0
+#define K_BCM1480_MC_PG_POLICY_CLOSED 0
#define K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK 1
-#define V_BCM1480_MC_PG_POLICY_CLOSED V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CLOSED)
+#define V_BCM1480_MC_PG_POLICY_CLOSED V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CLOSED)
#define V_BCM1480_MC_PG_POLICY_CAS_TIME_CHK V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
@@ -447,32 +447,32 @@
#endif
#define V_BCM1480_MC_DRAMMODE_DEFAULT V_BCM1480_MC_EMODE_DEFAULT | V_BCM1480_MC_MODE_DEFAULT | V_BCM1480_MC_DRAM_TYPE_JEDEC | \
- V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
+ V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
/*
* Memory Clock Configuration Register (Table 92)
*/
-#define S_BCM1480_MC_CLK_RATIO 0
-#define M_BCM1480_MC_CLK_RATIO _SB_MAKEMASK(6, S_BCM1480_MC_CLK_RATIO)
-#define V_BCM1480_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CLK_RATIO)
-#define G_BCM1480_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_BCM1480_MC_CLK_RATIO, M_BCM1480_MC_CLK_RATIO)
+#define S_BCM1480_MC_CLK_RATIO 0
+#define M_BCM1480_MC_CLK_RATIO _SB_MAKEMASK(6, S_BCM1480_MC_CLK_RATIO)
+#define V_BCM1480_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CLK_RATIO)
+#define G_BCM1480_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_BCM1480_MC_CLK_RATIO, M_BCM1480_MC_CLK_RATIO)
-#define V_BCM1480_MC_CLK_RATIO_DEFAULT V_BCM1480_MC_CLK_RATIO(10)
+#define V_BCM1480_MC_CLK_RATIO_DEFAULT V_BCM1480_MC_CLK_RATIO(10)
-#define S_BCM1480_MC_REF_RATE 8
-#define M_BCM1480_MC_REF_RATE _SB_MAKEMASK(8, S_BCM1480_MC_REF_RATE)
-#define V_BCM1480_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_REF_RATE)
-#define G_BCM1480_MC_REF_RATE(x) _SB_GETVALUE(x, S_BCM1480_MC_REF_RATE, M_BCM1480_MC_REF_RATE)
+#define S_BCM1480_MC_REF_RATE 8
+#define M_BCM1480_MC_REF_RATE _SB_MAKEMASK(8, S_BCM1480_MC_REF_RATE)
+#define V_BCM1480_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_REF_RATE)
+#define G_BCM1480_MC_REF_RATE(x) _SB_GETVALUE(x, S_BCM1480_MC_REF_RATE, M_BCM1480_MC_REF_RATE)
-#define K_BCM1480_MC_REF_RATE_100MHz 0x31
-#define K_BCM1480_MC_REF_RATE_200MHz 0x62
-#define K_BCM1480_MC_REF_RATE_400MHz 0xC4
+#define K_BCM1480_MC_REF_RATE_100MHz 0x31
+#define K_BCM1480_MC_REF_RATE_200MHz 0x62
+#define K_BCM1480_MC_REF_RATE_400MHz 0xC4
-#define V_BCM1480_MC_REF_RATE_100MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_100MHz)
-#define V_BCM1480_MC_REF_RATE_200MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_200MHz)
-#define V_BCM1480_MC_REF_RATE_400MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_400MHz)
-#define V_BCM1480_MC_REF_RATE_DEFAULT V_BCM1480_MC_REF_RATE_400MHz
+#define V_BCM1480_MC_REF_RATE_100MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_100MHz)
+#define V_BCM1480_MC_REF_RATE_200MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_200MHz)
+#define V_BCM1480_MC_REF_RATE_400MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_400MHz)
+#define V_BCM1480_MC_REF_RATE_DEFAULT V_BCM1480_MC_REF_RATE_400MHz
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define M_BCM1480_MC_AUTO_REF_DIS _SB_MAKEMASK1(16)
@@ -518,19 +518,19 @@
#define M_BCM1480_MC_CS_ODD_ODT_EN _SB_MAKEMASK1(32)
-#define S_BCM1480_MC_ODT0 0
+#define S_BCM1480_MC_ODT0 0
#define M_BCM1480_MC_ODT0 _SB_MAKEMASK(8, S_BCM1480_MC_ODT0)
#define V_BCM1480_MC_ODT0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT0)
-#define S_BCM1480_MC_ODT2 8
+#define S_BCM1480_MC_ODT2 8
#define M_BCM1480_MC_ODT2 _SB_MAKEMASK(8, S_BCM1480_MC_ODT2)
#define V_BCM1480_MC_ODT2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT2)
-#define S_BCM1480_MC_ODT4 16
+#define S_BCM1480_MC_ODT4 16
#define M_BCM1480_MC_ODT4 _SB_MAKEMASK(8, S_BCM1480_MC_ODT4)
#define V_BCM1480_MC_ODT4(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT4)
-#define S_BCM1480_MC_ODT6 24
+#define S_BCM1480_MC_ODT6 24
#define M_BCM1480_MC_ODT6 _SB_MAKEMASK(8, S_BCM1480_MC_ODT6)
#define V_BCM1480_MC_ODT6(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT6)
#endif
@@ -539,139 +539,139 @@
* Memory DLL Configuration Register (Table 93)
*/
-#define S_BCM1480_MC_ADDR_COARSE_ADJ 0
-#define M_BCM1480_MC_ADDR_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_ADDR_COARSE_ADJ)
-#define V_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ)
-#define G_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ, M_BCM1480_MC_ADDR_COARSE_ADJ)
+#define S_BCM1480_MC_ADDR_COARSE_ADJ 0
+#define M_BCM1480_MC_ADDR_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define V_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define G_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ, M_BCM1480_MC_ADDR_COARSE_ADJ)
#define V_BCM1480_MC_ADDR_COARSE_ADJ_DEFAULT V_BCM1480_MC_ADDR_COARSE_ADJ(0x0)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_ADDR_FREQ_RANGE 8
-#define M_BCM1480_MC_ADDR_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FREQ_RANGE)
-#define V_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE)
-#define G_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE, M_BCM1480_MC_ADDR_FREQ_RANGE)
-#define V_BCM1480_MC_ADDR_FREQ_RANGE_DEFAULT V_BCM1480_MC_ADDR_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_ADDR_FREQ_RANGE 8
+#define M_BCM1480_MC_ADDR_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define G_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE, M_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE_DEFAULT V_BCM1480_MC_ADDR_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_ADDR_FINE_ADJ 8
-#define M_BCM1480_MC_ADDR_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FINE_ADJ)
-#define V_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ)
-#define G_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ, M_BCM1480_MC_ADDR_FINE_ADJ)
+#define S_BCM1480_MC_ADDR_FINE_ADJ 8
+#define M_BCM1480_MC_ADDR_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define V_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define G_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ, M_BCM1480_MC_ADDR_FINE_ADJ)
#define V_BCM1480_MC_ADDR_FINE_ADJ_DEFAULT V_BCM1480_MC_ADDR_FINE_ADJ(0x8)
-#define S_BCM1480_MC_DQI_COARSE_ADJ 16
-#define M_BCM1480_MC_DQI_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQI_COARSE_ADJ)
-#define V_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ)
-#define G_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ, M_BCM1480_MC_DQI_COARSE_ADJ)
+#define S_BCM1480_MC_DQI_COARSE_ADJ 16
+#define M_BCM1480_MC_DQI_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define V_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define G_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ, M_BCM1480_MC_DQI_COARSE_ADJ)
#define V_BCM1480_MC_DQI_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQI_COARSE_ADJ(0x0)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DQI_FREQ_RANGE 24
-#define M_BCM1480_MC_DQI_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FREQ_RANGE)
-#define V_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE)
-#define G_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE, M_BCM1480_MC_DQI_FREQ_RANGE)
-#define V_BCM1480_MC_DQI_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQI_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DQI_FREQ_RANGE 24
+#define M_BCM1480_MC_DQI_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define G_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE, M_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQI_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_DQI_FINE_ADJ 24
-#define M_BCM1480_MC_DQI_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FINE_ADJ)
-#define V_BCM1480_MC_DQI_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ)
-#define G_BCM1480_MC_DQI_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ, M_BCM1480_MC_DQI_FINE_ADJ)
+#define S_BCM1480_MC_DQI_FINE_ADJ 24
+#define M_BCM1480_MC_DQI_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FINE_ADJ)
+#define V_BCM1480_MC_DQI_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ)
+#define G_BCM1480_MC_DQI_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ, M_BCM1480_MC_DQI_FINE_ADJ)
#define V_BCM1480_MC_DQI_FINE_ADJ_DEFAULT V_BCM1480_MC_DQI_FINE_ADJ(0x8)
-#define S_BCM1480_MC_DQO_COARSE_ADJ 32
-#define M_BCM1480_MC_DQO_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQO_COARSE_ADJ)
-#define V_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ)
-#define G_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ, M_BCM1480_MC_DQO_COARSE_ADJ)
+#define S_BCM1480_MC_DQO_COARSE_ADJ 32
+#define M_BCM1480_MC_DQO_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define V_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define G_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ, M_BCM1480_MC_DQO_COARSE_ADJ)
#define V_BCM1480_MC_DQO_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQO_COARSE_ADJ(0x0)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DQO_FREQ_RANGE 40
-#define M_BCM1480_MC_DQO_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FREQ_RANGE)
-#define V_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE)
-#define G_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE, M_BCM1480_MC_DQO_FREQ_RANGE)
-#define V_BCM1480_MC_DQO_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQO_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DQO_FREQ_RANGE 40
+#define M_BCM1480_MC_DQO_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define G_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE, M_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQO_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_DQO_FINE_ADJ 40
-#define M_BCM1480_MC_DQO_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FINE_ADJ)
-#define V_BCM1480_MC_DQO_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ)
-#define G_BCM1480_MC_DQO_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ, M_BCM1480_MC_DQO_FINE_ADJ)
+#define S_BCM1480_MC_DQO_FINE_ADJ 40
+#define M_BCM1480_MC_DQO_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FINE_ADJ)
+#define V_BCM1480_MC_DQO_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ)
+#define G_BCM1480_MC_DQO_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ, M_BCM1480_MC_DQO_FINE_ADJ)
#define V_BCM1480_MC_DQO_FINE_ADJ_DEFAULT V_BCM1480_MC_DQO_FINE_ADJ(0x8)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DLL_PDSEL 44
-#define M_BCM1480_MC_DLL_PDSEL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_PDSEL)
-#define V_BCM1480_MC_DLL_PDSEL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_PDSEL)
-#define G_BCM1480_MC_DLL_PDSEL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_PDSEL, M_BCM1480_MC_DLL_PDSEL)
-#define V_BCM1480_MC_DLL_DEFAULT_PDSEL V_BCM1480_MC_DLL_PDSEL(0x0)
-
-#define M_BCM1480_MC_DLL_REGBYPASS _SB_MAKEMASK1(46)
-#define M_BCM1480_MC_DQO_SHIFT _SB_MAKEMASK1(47)
+#define S_BCM1480_MC_DLL_PDSEL 44
+#define M_BCM1480_MC_DLL_PDSEL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_PDSEL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_PDSEL)
+#define G_BCM1480_MC_DLL_PDSEL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_PDSEL, M_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_DEFAULT_PDSEL V_BCM1480_MC_DLL_PDSEL(0x0)
+
+#define M_BCM1480_MC_DLL_REGBYPASS _SB_MAKEMASK1(46)
+#define M_BCM1480_MC_DQO_SHIFT _SB_MAKEMASK1(47)
#endif
-#define S_BCM1480_MC_DLL_DEFAULT 48
-#define M_BCM1480_MC_DLL_DEFAULT _SB_MAKEMASK(6, S_BCM1480_MC_DLL_DEFAULT)
-#define V_BCM1480_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_DEFAULT)
-#define G_BCM1480_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_DEFAULT, M_BCM1480_MC_DLL_DEFAULT)
+#define S_BCM1480_MC_DLL_DEFAULT 48
+#define M_BCM1480_MC_DLL_DEFAULT _SB_MAKEMASK(6, S_BCM1480_MC_DLL_DEFAULT)
+#define V_BCM1480_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_DEFAULT)
+#define G_BCM1480_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_DEFAULT, M_BCM1480_MC_DLL_DEFAULT)
#define V_BCM1480_MC_DLL_DEFAULT_DEFAULT V_BCM1480_MC_DLL_DEFAULT(0x10)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define S_BCM1480_MC_DLL_REGCTRL 54
-#define M_BCM1480_MC_DLL_REGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_REGCTRL)
-#define V_BCM1480_MC_DLL_REGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_REGCTRL)
-#define G_BCM1480_MC_DLL_REGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_REGCTRL, M_BCM1480_MC_DLL_REGCTRL)
+#define M_BCM1480_MC_DLL_REGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_REGCTRL)
+#define V_BCM1480_MC_DLL_REGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_REGCTRL)
+#define G_BCM1480_MC_DLL_REGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_REGCTRL, M_BCM1480_MC_DLL_REGCTRL)
#define V_BCM1480_MC_DLL_DEFAULT_REGCTRL V_BCM1480_MC_DLL_REGCTRL(0x0)
#endif
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DLL_FREQ_RANGE 56
-#define M_BCM1480_MC_DLL_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_FREQ_RANGE)
-#define V_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE)
-#define G_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE, M_BCM1480_MC_DLL_FREQ_RANGE)
-#define V_BCM1480_MC_DLL_FREQ_RANGE_DEFAULT V_BCM1480_MC_DLL_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DLL_FREQ_RANGE 56
+#define M_BCM1480_MC_DLL_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define G_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE, M_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE_DEFAULT V_BCM1480_MC_DLL_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_DLL_STEP_SIZE 56
-#define M_BCM1480_MC_DLL_STEP_SIZE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_STEP_SIZE)
-#define V_BCM1480_MC_DLL_STEP_SIZE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE)
-#define G_BCM1480_MC_DLL_STEP_SIZE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE, M_BCM1480_MC_DLL_STEP_SIZE)
+#define S_BCM1480_MC_DLL_STEP_SIZE 56
+#define M_BCM1480_MC_DLL_STEP_SIZE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_STEP_SIZE)
+#define V_BCM1480_MC_DLL_STEP_SIZE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE)
+#define G_BCM1480_MC_DLL_STEP_SIZE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE, M_BCM1480_MC_DLL_STEP_SIZE)
#define V_BCM1480_MC_DLL_STEP_SIZE_DEFAULT V_BCM1480_MC_DLL_STEP_SIZE(0x8)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define S_BCM1480_MC_DLL_BGCTRL 60
-#define M_BCM1480_MC_DLL_BGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_BGCTRL)
-#define V_BCM1480_MC_DLL_BGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_BGCTRL)
-#define G_BCM1480_MC_DLL_BGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_BGCTRL, M_BCM1480_MC_DLL_BGCTRL)
-#define V_BCM1480_MC_DLL_DEFAULT_BGCTRL V_BCM1480_MC_DLL_BGCTRL(0x0)
+#define M_BCM1480_MC_DLL_BGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_BGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_BGCTRL)
+#define G_BCM1480_MC_DLL_BGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_BGCTRL, M_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_DEFAULT_BGCTRL V_BCM1480_MC_DLL_BGCTRL(0x0)
#endif
-#define M_BCM1480_MC_DLL_BYPASS _SB_MAKEMASK1(63)
+#define M_BCM1480_MC_DLL_BYPASS _SB_MAKEMASK1(63)
/*
* Memory Drive Configuration Register (Table 94)
*/
-#define S_BCM1480_MC_RTT_BYP_PULLDOWN 0
-#define M_BCM1480_MC_RTT_BYP_PULLDOWN _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLDOWN)
+#define S_BCM1480_MC_RTT_BYP_PULLDOWN 0
+#define M_BCM1480_MC_RTT_BYP_PULLDOWN _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLDOWN)
#define V_BCM1480_MC_RTT_BYP_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN)
#define G_BCM1480_MC_RTT_BYP_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN, M_BCM1480_MC_RTT_BYP_PULLDOWN)
-#define S_BCM1480_MC_RTT_BYP_PULLUP 6
-#define M_BCM1480_MC_RTT_BYP_PULLUP _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLUP)
-#define V_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP)
-#define G_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP, M_BCM1480_MC_RTT_BYP_PULLUP)
+#define S_BCM1480_MC_RTT_BYP_PULLUP 6
+#define M_BCM1480_MC_RTT_BYP_PULLUP _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define V_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define G_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP, M_BCM1480_MC_RTT_BYP_PULLUP)
-#define M_BCM1480_MC_RTT_BYPASS _SB_MAKEMASK1(8)
-#define M_BCM1480_MC_RTT_COMP_MOV_AVG _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_RTT_BYPASS _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_RTT_COMP_MOV_AVG _SB_MAKEMASK1(9)
#define S_BCM1480_MC_PVT_BYP_C1_PULLDOWN 10
#define M_BCM1480_MC_PVT_BYP_C1_PULLDOWN _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
#define V_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
#define G_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN, M_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
-#define S_BCM1480_MC_PVT_BYP_C1_PULLUP 15
-#define M_BCM1480_MC_PVT_BYP_C1_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
+#define S_BCM1480_MC_PVT_BYP_C1_PULLUP 15
+#define M_BCM1480_MC_PVT_BYP_C1_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
#define V_BCM1480_MC_PVT_BYP_C1_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
#define G_BCM1480_MC_PVT_BYP_C1_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP, M_BCM1480_MC_PVT_BYP_C1_PULLUP)
@@ -680,153 +680,153 @@
#define V_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
#define G_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN, M_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
-#define S_BCM1480_MC_PVT_BYP_C2_PULLUP 25
-#define M_BCM1480_MC_PVT_BYP_C2_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
+#define S_BCM1480_MC_PVT_BYP_C2_PULLUP 25
+#define M_BCM1480_MC_PVT_BYP_C2_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
#define V_BCM1480_MC_PVT_BYP_C2_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
#define G_BCM1480_MC_PVT_BYP_C2_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP, M_BCM1480_MC_PVT_BYP_C2_PULLUP)
-#define M_BCM1480_MC_PVT_BYPASS _SB_MAKEMASK1(30)
-#define M_BCM1480_MC_PVT_COMP_MOV_AVG _SB_MAKEMASK1(31)
+#define M_BCM1480_MC_PVT_BYPASS _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_PVT_COMP_MOV_AVG _SB_MAKEMASK1(31)
-#define M_BCM1480_MC_CLK_CLASS _SB_MAKEMASK1(34)
-#define M_BCM1480_MC_DATA_CLASS _SB_MAKEMASK1(35)
-#define M_BCM1480_MC_ADDR_CLASS _SB_MAKEMASK1(36)
+#define M_BCM1480_MC_CLK_CLASS _SB_MAKEMASK1(34)
+#define M_BCM1480_MC_DATA_CLASS _SB_MAKEMASK1(35)
+#define M_BCM1480_MC_ADDR_CLASS _SB_MAKEMASK1(36)
-#define M_BCM1480_MC_DQ_ODT_75 _SB_MAKEMASK1(37)
-#define M_BCM1480_MC_DQ_ODT_150 _SB_MAKEMASK1(38)
-#define M_BCM1480_MC_DQS_ODT_75 _SB_MAKEMASK1(39)
-#define M_BCM1480_MC_DQS_ODT_150 _SB_MAKEMASK1(40)
-#define M_BCM1480_MC_DQS_DIFF _SB_MAKEMASK1(41)
+#define M_BCM1480_MC_DQ_ODT_75 _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_DQ_ODT_150 _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_DQS_ODT_75 _SB_MAKEMASK1(39)
+#define M_BCM1480_MC_DQS_ODT_150 _SB_MAKEMASK1(40)
+#define M_BCM1480_MC_DQS_DIFF _SB_MAKEMASK1(41)
/*
* ECC Test Data Register (Table 95)
*/
-#define S_BCM1480_MC_DATA_INVERT 0
-#define M_DATA_ECC_INVERT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_INVERT)
+#define S_BCM1480_MC_DATA_INVERT 0
+#define M_DATA_ECC_INVERT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_INVERT)
/*
* ECC Test ECC Register (Table 96)
*/
-#define S_BCM1480_MC_ECC_INVERT 0
-#define M_BCM1480_MC_ECC_INVERT _SB_MAKEMASK(8, S_BCM1480_MC_ECC_INVERT)
+#define S_BCM1480_MC_ECC_INVERT 0
+#define M_BCM1480_MC_ECC_INVERT _SB_MAKEMASK(8, S_BCM1480_MC_ECC_INVERT)
/*
* SDRAM Timing Register (Table 97)
*/
-#define S_BCM1480_MC_tRCD 0
-#define M_BCM1480_MC_tRCD _SB_MAKEMASK(4, S_BCM1480_MC_tRCD)
-#define V_BCM1480_MC_tRCD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCD)
-#define G_BCM1480_MC_tRCD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCD, M_BCM1480_MC_tRCD)
-#define K_BCM1480_MC_tRCD_DEFAULT 3
-#define V_BCM1480_MC_tRCD_DEFAULT V_BCM1480_MC_tRCD(K_BCM1480_MC_tRCD_DEFAULT)
-
-#define S_BCM1480_MC_tCL 4
-#define M_BCM1480_MC_tCL _SB_MAKEMASK(4, S_BCM1480_MC_tCL)
-#define V_BCM1480_MC_tCL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCL)
-#define G_BCM1480_MC_tCL(x) _SB_GETVALUE(x, S_BCM1480_MC_tCL, M_BCM1480_MC_tCL)
-#define K_BCM1480_MC_tCL_DEFAULT 2
-#define V_BCM1480_MC_tCL_DEFAULT V_BCM1480_MC_tCL(K_BCM1480_MC_tCL_DEFAULT)
-
-#define M_BCM1480_MC_tCrDh _SB_MAKEMASK1(8)
-
-#define S_BCM1480_MC_tWR 9
-#define M_BCM1480_MC_tWR _SB_MAKEMASK(3, S_BCM1480_MC_tWR)
-#define V_BCM1480_MC_tWR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tWR)
-#define G_BCM1480_MC_tWR(x) _SB_GETVALUE(x, S_BCM1480_MC_tWR, M_BCM1480_MC_tWR)
-#define K_BCM1480_MC_tWR_DEFAULT 2
-#define V_BCM1480_MC_tWR_DEFAULT V_BCM1480_MC_tWR(K_BCM1480_MC_tWR_DEFAULT)
-
-#define S_BCM1480_MC_tCwD 12
-#define M_BCM1480_MC_tCwD _SB_MAKEMASK(4, S_BCM1480_MC_tCwD)
-#define V_BCM1480_MC_tCwD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCwD)
-#define G_BCM1480_MC_tCwD(x) _SB_GETVALUE(x, S_BCM1480_MC_tCwD, M_BCM1480_MC_tCwD)
-#define K_BCM1480_MC_tCwD_DEFAULT 1
-#define V_BCM1480_MC_tCwD_DEFAULT V_BCM1480_MC_tCwD(K_BCM1480_MC_tCwD_DEFAULT)
-
-#define S_BCM1480_MC_tRP 16
-#define M_BCM1480_MC_tRP _SB_MAKEMASK(4, S_BCM1480_MC_tRP)
-#define V_BCM1480_MC_tRP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRP)
-#define G_BCM1480_MC_tRP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRP, M_BCM1480_MC_tRP)
-#define K_BCM1480_MC_tRP_DEFAULT 4
-#define V_BCM1480_MC_tRP_DEFAULT V_BCM1480_MC_tRP(K_BCM1480_MC_tRP_DEFAULT)
-
-#define S_BCM1480_MC_tRRD 20
-#define M_BCM1480_MC_tRRD _SB_MAKEMASK(4, S_BCM1480_MC_tRRD)
-#define V_BCM1480_MC_tRRD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRRD)
-#define G_BCM1480_MC_tRRD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRRD, M_BCM1480_MC_tRRD)
-#define K_BCM1480_MC_tRRD_DEFAULT 2
-#define V_BCM1480_MC_tRRD_DEFAULT V_BCM1480_MC_tRRD(K_BCM1480_MC_tRRD_DEFAULT)
-
-#define S_BCM1480_MC_tRCw 24
-#define M_BCM1480_MC_tRCw _SB_MAKEMASK(5, S_BCM1480_MC_tRCw)
-#define V_BCM1480_MC_tRCw(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCw)
-#define G_BCM1480_MC_tRCw(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCw, M_BCM1480_MC_tRCw)
-#define K_BCM1480_MC_tRCw_DEFAULT 10
-#define V_BCM1480_MC_tRCw_DEFAULT V_BCM1480_MC_tRCw(K_BCM1480_MC_tRCw_DEFAULT)
-
-#define S_BCM1480_MC_tRCr 32
-#define M_BCM1480_MC_tRCr _SB_MAKEMASK(5, S_BCM1480_MC_tRCr)
-#define V_BCM1480_MC_tRCr(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCr)
-#define G_BCM1480_MC_tRCr(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCr, M_BCM1480_MC_tRCr)
-#define K_BCM1480_MC_tRCr_DEFAULT 9
-#define V_BCM1480_MC_tRCr_DEFAULT V_BCM1480_MC_tRCr(K_BCM1480_MC_tRCr_DEFAULT)
+#define S_BCM1480_MC_tRCD 0
+#define M_BCM1480_MC_tRCD _SB_MAKEMASK(4, S_BCM1480_MC_tRCD)
+#define V_BCM1480_MC_tRCD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCD)
+#define G_BCM1480_MC_tRCD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCD, M_BCM1480_MC_tRCD)
+#define K_BCM1480_MC_tRCD_DEFAULT 3
+#define V_BCM1480_MC_tRCD_DEFAULT V_BCM1480_MC_tRCD(K_BCM1480_MC_tRCD_DEFAULT)
+
+#define S_BCM1480_MC_tCL 4
+#define M_BCM1480_MC_tCL _SB_MAKEMASK(4, S_BCM1480_MC_tCL)
+#define V_BCM1480_MC_tCL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCL)
+#define G_BCM1480_MC_tCL(x) _SB_GETVALUE(x, S_BCM1480_MC_tCL, M_BCM1480_MC_tCL)
+#define K_BCM1480_MC_tCL_DEFAULT 2
+#define V_BCM1480_MC_tCL_DEFAULT V_BCM1480_MC_tCL(K_BCM1480_MC_tCL_DEFAULT)
+
+#define M_BCM1480_MC_tCrDh _SB_MAKEMASK1(8)
+
+#define S_BCM1480_MC_tWR 9
+#define M_BCM1480_MC_tWR _SB_MAKEMASK(3, S_BCM1480_MC_tWR)
+#define V_BCM1480_MC_tWR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tWR)
+#define G_BCM1480_MC_tWR(x) _SB_GETVALUE(x, S_BCM1480_MC_tWR, M_BCM1480_MC_tWR)
+#define K_BCM1480_MC_tWR_DEFAULT 2
+#define V_BCM1480_MC_tWR_DEFAULT V_BCM1480_MC_tWR(K_BCM1480_MC_tWR_DEFAULT)
+
+#define S_BCM1480_MC_tCwD 12
+#define M_BCM1480_MC_tCwD _SB_MAKEMASK(4, S_BCM1480_MC_tCwD)
+#define V_BCM1480_MC_tCwD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCwD)
+#define G_BCM1480_MC_tCwD(x) _SB_GETVALUE(x, S_BCM1480_MC_tCwD, M_BCM1480_MC_tCwD)
+#define K_BCM1480_MC_tCwD_DEFAULT 1
+#define V_BCM1480_MC_tCwD_DEFAULT V_BCM1480_MC_tCwD(K_BCM1480_MC_tCwD_DEFAULT)
+
+#define S_BCM1480_MC_tRP 16
+#define M_BCM1480_MC_tRP _SB_MAKEMASK(4, S_BCM1480_MC_tRP)
+#define V_BCM1480_MC_tRP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRP)
+#define G_BCM1480_MC_tRP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRP, M_BCM1480_MC_tRP)
+#define K_BCM1480_MC_tRP_DEFAULT 4
+#define V_BCM1480_MC_tRP_DEFAULT V_BCM1480_MC_tRP(K_BCM1480_MC_tRP_DEFAULT)
+
+#define S_BCM1480_MC_tRRD 20
+#define M_BCM1480_MC_tRRD _SB_MAKEMASK(4, S_BCM1480_MC_tRRD)
+#define V_BCM1480_MC_tRRD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRRD)
+#define G_BCM1480_MC_tRRD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRRD, M_BCM1480_MC_tRRD)
+#define K_BCM1480_MC_tRRD_DEFAULT 2
+#define V_BCM1480_MC_tRRD_DEFAULT V_BCM1480_MC_tRRD(K_BCM1480_MC_tRRD_DEFAULT)
+
+#define S_BCM1480_MC_tRCw 24
+#define M_BCM1480_MC_tRCw _SB_MAKEMASK(5, S_BCM1480_MC_tRCw)
+#define V_BCM1480_MC_tRCw(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCw)
+#define G_BCM1480_MC_tRCw(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCw, M_BCM1480_MC_tRCw)
+#define K_BCM1480_MC_tRCw_DEFAULT 10
+#define V_BCM1480_MC_tRCw_DEFAULT V_BCM1480_MC_tRCw(K_BCM1480_MC_tRCw_DEFAULT)
+
+#define S_BCM1480_MC_tRCr 32
+#define M_BCM1480_MC_tRCr _SB_MAKEMASK(5, S_BCM1480_MC_tRCr)
+#define V_BCM1480_MC_tRCr(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCr)
+#define G_BCM1480_MC_tRCr(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCr, M_BCM1480_MC_tRCr)
+#define K_BCM1480_MC_tRCr_DEFAULT 9
+#define V_BCM1480_MC_tRCr_DEFAULT V_BCM1480_MC_tRCr(K_BCM1480_MC_tRCr_DEFAULT)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_tFAW 40
-#define M_BCM1480_MC_tFAW _SB_MAKEMASK(6, S_BCM1480_MC_tFAW)
-#define V_BCM1480_MC_tFAW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFAW)
-#define G_BCM1480_MC_tFAW(x) _SB_GETVALUE(x, S_BCM1480_MC_tFAW, M_BCM1480_MC_tFAW)
-#define K_BCM1480_MC_tFAW_DEFAULT 0
-#define V_BCM1480_MC_tFAW_DEFAULT V_BCM1480_MC_tFAW(K_BCM1480_MC_tFAW_DEFAULT)
+#define S_BCM1480_MC_tFAW 40
+#define M_BCM1480_MC_tFAW _SB_MAKEMASK(6, S_BCM1480_MC_tFAW)
+#define V_BCM1480_MC_tFAW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFAW)
+#define G_BCM1480_MC_tFAW(x) _SB_GETVALUE(x, S_BCM1480_MC_tFAW, M_BCM1480_MC_tFAW)
+#define K_BCM1480_MC_tFAW_DEFAULT 0
+#define V_BCM1480_MC_tFAW_DEFAULT V_BCM1480_MC_tFAW(K_BCM1480_MC_tFAW_DEFAULT)
#endif
-#define S_BCM1480_MC_tRFC 48
-#define M_BCM1480_MC_tRFC _SB_MAKEMASK(7, S_BCM1480_MC_tRFC)
-#define V_BCM1480_MC_tRFC(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRFC)
-#define G_BCM1480_MC_tRFC(x) _SB_GETVALUE(x, S_BCM1480_MC_tRFC, M_BCM1480_MC_tRFC)
-#define K_BCM1480_MC_tRFC_DEFAULT 12
-#define V_BCM1480_MC_tRFC_DEFAULT V_BCM1480_MC_tRFC(K_BCM1480_MC_tRFC_DEFAULT)
-
-#define S_BCM1480_MC_tFIFO 56
-#define M_BCM1480_MC_tFIFO _SB_MAKEMASK(2, S_BCM1480_MC_tFIFO)
-#define V_BCM1480_MC_tFIFO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFIFO)
-#define G_BCM1480_MC_tFIFO(x) _SB_GETVALUE(x, S_BCM1480_MC_tFIFO, M_BCM1480_MC_tFIFO)
-#define K_BCM1480_MC_tFIFO_DEFAULT 0
-#define V_BCM1480_MC_tFIFO_DEFAULT V_BCM1480_MC_tFIFO(K_BCM1480_MC_tFIFO_DEFAULT)
-
-#define S_BCM1480_MC_tW2R 58
-#define M_BCM1480_MC_tW2R _SB_MAKEMASK(2, S_BCM1480_MC_tW2R)
-#define V_BCM1480_MC_tW2R(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2R)
-#define G_BCM1480_MC_tW2R(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2R, M_BCM1480_MC_tW2R)
-#define K_BCM1480_MC_tW2R_DEFAULT 1
-#define V_BCM1480_MC_tW2R_DEFAULT V_BCM1480_MC_tW2R(K_BCM1480_MC_tW2R_DEFAULT)
-
-#define S_BCM1480_MC_tR2W 60
-#define M_BCM1480_MC_tR2W _SB_MAKEMASK(2, S_BCM1480_MC_tR2W)
-#define V_BCM1480_MC_tR2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tR2W)
-#define G_BCM1480_MC_tR2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tR2W, M_BCM1480_MC_tR2W)
-#define K_BCM1480_MC_tR2W_DEFAULT 0
-#define V_BCM1480_MC_tR2W_DEFAULT V_BCM1480_MC_tR2W(K_BCM1480_MC_tR2W_DEFAULT)
+#define S_BCM1480_MC_tRFC 48
+#define M_BCM1480_MC_tRFC _SB_MAKEMASK(7, S_BCM1480_MC_tRFC)
+#define V_BCM1480_MC_tRFC(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRFC)
+#define G_BCM1480_MC_tRFC(x) _SB_GETVALUE(x, S_BCM1480_MC_tRFC, M_BCM1480_MC_tRFC)
+#define K_BCM1480_MC_tRFC_DEFAULT 12
+#define V_BCM1480_MC_tRFC_DEFAULT V_BCM1480_MC_tRFC(K_BCM1480_MC_tRFC_DEFAULT)
+
+#define S_BCM1480_MC_tFIFO 56
+#define M_BCM1480_MC_tFIFO _SB_MAKEMASK(2, S_BCM1480_MC_tFIFO)
+#define V_BCM1480_MC_tFIFO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFIFO)
+#define G_BCM1480_MC_tFIFO(x) _SB_GETVALUE(x, S_BCM1480_MC_tFIFO, M_BCM1480_MC_tFIFO)
+#define K_BCM1480_MC_tFIFO_DEFAULT 0
+#define V_BCM1480_MC_tFIFO_DEFAULT V_BCM1480_MC_tFIFO(K_BCM1480_MC_tFIFO_DEFAULT)
+
+#define S_BCM1480_MC_tW2R 58
+#define M_BCM1480_MC_tW2R _SB_MAKEMASK(2, S_BCM1480_MC_tW2R)
+#define V_BCM1480_MC_tW2R(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2R)
+#define G_BCM1480_MC_tW2R(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2R, M_BCM1480_MC_tW2R)
+#define K_BCM1480_MC_tW2R_DEFAULT 1
+#define V_BCM1480_MC_tW2R_DEFAULT V_BCM1480_MC_tW2R(K_BCM1480_MC_tW2R_DEFAULT)
+
+#define S_BCM1480_MC_tR2W 60
+#define M_BCM1480_MC_tR2W _SB_MAKEMASK(2, S_BCM1480_MC_tR2W)
+#define V_BCM1480_MC_tR2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tR2W)
+#define G_BCM1480_MC_tR2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tR2W, M_BCM1480_MC_tR2W)
+#define K_BCM1480_MC_tR2W_DEFAULT 0
+#define V_BCM1480_MC_tR2W_DEFAULT V_BCM1480_MC_tR2W(K_BCM1480_MC_tR2W_DEFAULT)
#define M_BCM1480_MC_tR2R _SB_MAKEMASK1(62)
-#define V_BCM1480_MC_TIMING_DEFAULT (M_BCM1480_MC_tR2R | \
- V_BCM1480_MC_tFIFO_DEFAULT | \
- V_BCM1480_MC_tR2W_DEFAULT | \
- V_BCM1480_MC_tW2R_DEFAULT | \
- V_BCM1480_MC_tRFC_DEFAULT | \
- V_BCM1480_MC_tRCr_DEFAULT | \
- V_BCM1480_MC_tRCw_DEFAULT | \
- V_BCM1480_MC_tRRD_DEFAULT | \
- V_BCM1480_MC_tRP_DEFAULT | \
- V_BCM1480_MC_tCwD_DEFAULT | \
- V_BCM1480_MC_tWR_DEFAULT | \
- M_BCM1480_MC_tCrDh | \
- V_BCM1480_MC_tCL_DEFAULT | \
- V_BCM1480_MC_tRCD_DEFAULT)
+#define V_BCM1480_MC_TIMING_DEFAULT (M_BCM1480_MC_tR2R | \
+ V_BCM1480_MC_tFIFO_DEFAULT | \
+ V_BCM1480_MC_tR2W_DEFAULT | \
+ V_BCM1480_MC_tW2R_DEFAULT | \
+ V_BCM1480_MC_tRFC_DEFAULT | \
+ V_BCM1480_MC_tRCr_DEFAULT | \
+ V_BCM1480_MC_tRCw_DEFAULT | \
+ V_BCM1480_MC_tRRD_DEFAULT | \
+ V_BCM1480_MC_tRP_DEFAULT | \
+ V_BCM1480_MC_tCwD_DEFAULT | \
+ V_BCM1480_MC_tWR_DEFAULT | \
+ M_BCM1480_MC_tCrDh | \
+ V_BCM1480_MC_tCL_DEFAULT | \
+ V_BCM1480_MC_tRCD_DEFAULT)
/*
* SDRAM Timing Register 2
@@ -834,33 +834,33 @@
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_tAL 0
-#define M_BCM1480_MC_tAL _SB_MAKEMASK(4, S_BCM1480_MC_tAL)
-#define V_BCM1480_MC_tAL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tAL)
-#define G_BCM1480_MC_tAL(x) _SB_GETVALUE(x, S_BCM1480_MC_tAL, M_BCM1480_MC_tAL)
-#define K_BCM1480_MC_tAL_DEFAULT 0
-#define V_BCM1480_MC_tAL_DEFAULT V_BCM1480_MC_tAL(K_BCM1480_MC_tAL_DEFAULT)
-
-#define S_BCM1480_MC_tRTP 4
-#define M_BCM1480_MC_tRTP _SB_MAKEMASK(3, S_BCM1480_MC_tRTP)
-#define V_BCM1480_MC_tRTP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRTP)
-#define G_BCM1480_MC_tRTP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRTP, M_BCM1480_MC_tRTP)
-#define K_BCM1480_MC_tRTP_DEFAULT 2
-#define V_BCM1480_MC_tRTP_DEFAULT V_BCM1480_MC_tRTP(K_BCM1480_MC_tRTP_DEFAULT)
-
-#define S_BCM1480_MC_tW2W 8
-#define M_BCM1480_MC_tW2W _SB_MAKEMASK(2, S_BCM1480_MC_tW2W)
-#define V_BCM1480_MC_tW2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2W)
-#define G_BCM1480_MC_tW2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2W, M_BCM1480_MC_tW2W)
-#define K_BCM1480_MC_tW2W_DEFAULT 0
-#define V_BCM1480_MC_tW2W_DEFAULT V_BCM1480_MC_tW2W(K_BCM1480_MC_tW2W_DEFAULT)
-
-#define S_BCM1480_MC_tRAP 12
-#define M_BCM1480_MC_tRAP _SB_MAKEMASK(4, S_BCM1480_MC_tRAP)
-#define V_BCM1480_MC_tRAP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRAP)
-#define G_BCM1480_MC_tRAP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRAP, M_BCM1480_MC_tRAP)
-#define K_BCM1480_MC_tRAP_DEFAULT 0
-#define V_BCM1480_MC_tRAP_DEFAULT V_BCM1480_MC_tRAP(K_BCM1480_MC_tRAP_DEFAULT)
+#define S_BCM1480_MC_tAL 0
+#define M_BCM1480_MC_tAL _SB_MAKEMASK(4, S_BCM1480_MC_tAL)
+#define V_BCM1480_MC_tAL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tAL)
+#define G_BCM1480_MC_tAL(x) _SB_GETVALUE(x, S_BCM1480_MC_tAL, M_BCM1480_MC_tAL)
+#define K_BCM1480_MC_tAL_DEFAULT 0
+#define V_BCM1480_MC_tAL_DEFAULT V_BCM1480_MC_tAL(K_BCM1480_MC_tAL_DEFAULT)
+
+#define S_BCM1480_MC_tRTP 4
+#define M_BCM1480_MC_tRTP _SB_MAKEMASK(3, S_BCM1480_MC_tRTP)
+#define V_BCM1480_MC_tRTP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRTP)
+#define G_BCM1480_MC_tRTP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRTP, M_BCM1480_MC_tRTP)
+#define K_BCM1480_MC_tRTP_DEFAULT 2
+#define V_BCM1480_MC_tRTP_DEFAULT V_BCM1480_MC_tRTP(K_BCM1480_MC_tRTP_DEFAULT)
+
+#define S_BCM1480_MC_tW2W 8
+#define M_BCM1480_MC_tW2W _SB_MAKEMASK(2, S_BCM1480_MC_tW2W)
+#define V_BCM1480_MC_tW2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2W)
+#define G_BCM1480_MC_tW2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2W, M_BCM1480_MC_tW2W)
+#define K_BCM1480_MC_tW2W_DEFAULT 0
+#define V_BCM1480_MC_tW2W_DEFAULT V_BCM1480_MC_tW2W(K_BCM1480_MC_tW2W_DEFAULT)
+
+#define S_BCM1480_MC_tRAP 12
+#define M_BCM1480_MC_tRAP _SB_MAKEMASK(4, S_BCM1480_MC_tRAP)
+#define V_BCM1480_MC_tRAP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRAP)
+#define G_BCM1480_MC_tRAP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRAP, M_BCM1480_MC_tRAP)
+#define K_BCM1480_MC_tRAP_DEFAULT 0
+#define V_BCM1480_MC_tRAP_DEFAULT V_BCM1480_MC_tRAP(K_BCM1480_MC_tRAP_DEFAULT)
#endif
@@ -874,111 +874,111 @@
* Global Configuration Register (Table 99)
*/
-#define S_BCM1480_MC_BLK_SET_MARK 8
-#define M_BCM1480_MC_BLK_SET_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_SET_MARK)
-#define V_BCM1480_MC_BLK_SET_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_SET_MARK)
-#define G_BCM1480_MC_BLK_SET_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_SET_MARK, M_BCM1480_MC_BLK_SET_MARK)
+#define S_BCM1480_MC_BLK_SET_MARK 8
+#define M_BCM1480_MC_BLK_SET_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_SET_MARK)
+#define V_BCM1480_MC_BLK_SET_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_SET_MARK)
+#define G_BCM1480_MC_BLK_SET_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_SET_MARK, M_BCM1480_MC_BLK_SET_MARK)
-#define S_BCM1480_MC_BLK_CLR_MARK 12
-#define M_BCM1480_MC_BLK_CLR_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_CLR_MARK)
-#define V_BCM1480_MC_BLK_CLR_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_CLR_MARK)
-#define G_BCM1480_MC_BLK_CLR_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_CLR_MARK, M_BCM1480_MC_BLK_CLR_MARK)
+#define S_BCM1480_MC_BLK_CLR_MARK 12
+#define M_BCM1480_MC_BLK_CLR_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_CLR_MARK)
+#define V_BCM1480_MC_BLK_CLR_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_CLR_MARK)
+#define G_BCM1480_MC_BLK_CLR_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_CLR_MARK, M_BCM1480_MC_BLK_CLR_MARK)
-#define M_BCM1480_MC_PKT_PRIORITY _SB_MAKEMASK1(16)
+#define M_BCM1480_MC_PKT_PRIORITY _SB_MAKEMASK1(16)
-#define S_BCM1480_MC_MAX_AGE 20
-#define M_BCM1480_MC_MAX_AGE _SB_MAKEMASK(4, S_BCM1480_MC_MAX_AGE)
-#define V_BCM1480_MC_MAX_AGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MAX_AGE)
-#define G_BCM1480_MC_MAX_AGE(x) _SB_GETVALUE(x, S_BCM1480_MC_MAX_AGE, M_BCM1480_MC_MAX_AGE)
+#define S_BCM1480_MC_MAX_AGE 20
+#define M_BCM1480_MC_MAX_AGE _SB_MAKEMASK(4, S_BCM1480_MC_MAX_AGE)
+#define V_BCM1480_MC_MAX_AGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MAX_AGE)
+#define G_BCM1480_MC_MAX_AGE(x) _SB_GETVALUE(x, S_BCM1480_MC_MAX_AGE, M_BCM1480_MC_MAX_AGE)
-#define M_BCM1480_MC_BERR_DISABLE _SB_MAKEMASK1(29)
-#define M_BCM1480_MC_FORCE_SEQ _SB_MAKEMASK1(30)
-#define M_BCM1480_MC_VGEN _SB_MAKEMASK1(32)
+#define M_BCM1480_MC_BERR_DISABLE _SB_MAKEMASK1(29)
+#define M_BCM1480_MC_FORCE_SEQ _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_VGEN _SB_MAKEMASK1(32)
-#define S_BCM1480_MC_SLEW 33
-#define M_BCM1480_MC_SLEW _SB_MAKEMASK(2, S_BCM1480_MC_SLEW)
-#define V_BCM1480_MC_SLEW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_SLEW)
-#define G_BCM1480_MC_SLEW(x) _SB_GETVALUE(x, S_BCM1480_MC_SLEW, M_BCM1480_MC_SLEW)
+#define S_BCM1480_MC_SLEW 33
+#define M_BCM1480_MC_SLEW _SB_MAKEMASK(2, S_BCM1480_MC_SLEW)
+#define V_BCM1480_MC_SLEW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_SLEW)
+#define G_BCM1480_MC_SLEW(x) _SB_GETVALUE(x, S_BCM1480_MC_SLEW, M_BCM1480_MC_SLEW)
-#define M_BCM1480_MC_SSTL_VOLTAGE _SB_MAKEMASK1(35)
+#define M_BCM1480_MC_SSTL_VOLTAGE _SB_MAKEMASK1(35)
/*
* Global Channel Interleave Register (Table 100)
*/
-#define S_BCM1480_MC_INTLV0 0
-#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
-#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
-
-#define S_BCM1480_MC_INTLV1 8
-#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
-#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
-
-#define S_BCM1480_MC_INTLV_MODE 16
-#define M_BCM1480_MC_INTLV_MODE _SB_MAKEMASK(3, S_BCM1480_MC_INTLV_MODE)
-#define V_BCM1480_MC_INTLV_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV_MODE)
-#define G_BCM1480_MC_INTLV_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV_MODE, M_BCM1480_MC_INTLV_MODE)
-
-#define K_BCM1480_MC_INTLV_MODE_NONE 0x0
-#define K_BCM1480_MC_INTLV_MODE_01 0x1
-#define K_BCM1480_MC_INTLV_MODE_23 0x2
-#define K_BCM1480_MC_INTLV_MODE_01_23 0x3
-#define K_BCM1480_MC_INTLV_MODE_0123 0x4
-
-#define V_BCM1480_MC_INTLV_MODE_NONE V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_NONE)
-#define V_BCM1480_MC_INTLV_MODE_01 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01)
-#define V_BCM1480_MC_INTLV_MODE_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_23)
-#define V_BCM1480_MC_INTLV_MODE_01_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01_23)
-#define V_BCM1480_MC_INTLV_MODE_0123 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_0123)
+#define S_BCM1480_MC_INTLV0 0
+#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+
+#define S_BCM1480_MC_INTLV1 8
+#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+
+#define S_BCM1480_MC_INTLV_MODE 16
+#define M_BCM1480_MC_INTLV_MODE _SB_MAKEMASK(3, S_BCM1480_MC_INTLV_MODE)
+#define V_BCM1480_MC_INTLV_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV_MODE)
+#define G_BCM1480_MC_INTLV_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV_MODE, M_BCM1480_MC_INTLV_MODE)
+
+#define K_BCM1480_MC_INTLV_MODE_NONE 0x0
+#define K_BCM1480_MC_INTLV_MODE_01 0x1
+#define K_BCM1480_MC_INTLV_MODE_23 0x2
+#define K_BCM1480_MC_INTLV_MODE_01_23 0x3
+#define K_BCM1480_MC_INTLV_MODE_0123 0x4
+
+#define V_BCM1480_MC_INTLV_MODE_NONE V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_NONE)
+#define V_BCM1480_MC_INTLV_MODE_01 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01)
+#define V_BCM1480_MC_INTLV_MODE_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_23)
+#define V_BCM1480_MC_INTLV_MODE_01_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01_23)
+#define V_BCM1480_MC_INTLV_MODE_0123 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_0123)
/*
* ECC Status Register
*/
-#define S_BCM1480_MC_ECC_ERR_ADDR 0
-#define M_BCM1480_MC_ECC_ERR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_ERR_ADDR)
-#define V_BCM1480_MC_ECC_ERR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR)
-#define G_BCM1480_MC_ECC_ERR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR, M_BCM1480_MC_ECC_ERR_ADDR)
+#define S_BCM1480_MC_ECC_ERR_ADDR 0
+#define M_BCM1480_MC_ECC_ERR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_ERR_ADDR)
+#define V_BCM1480_MC_ECC_ERR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR)
+#define G_BCM1480_MC_ECC_ERR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR, M_BCM1480_MC_ECC_ERR_ADDR)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define M_BCM1480_MC_ECC_ERR_RMW _SB_MAKEMASK1(60)
+#define M_BCM1480_MC_ECC_ERR_RMW _SB_MAKEMASK1(60)
#endif
-#define M_BCM1480_MC_ECC_MULT_ERR_DET _SB_MAKEMASK1(61)
-#define M_BCM1480_MC_ECC_UERR_DET _SB_MAKEMASK1(62)
-#define M_BCM1480_MC_ECC_CERR_DET _SB_MAKEMASK1(63)
+#define M_BCM1480_MC_ECC_MULT_ERR_DET _SB_MAKEMASK1(61)
+#define M_BCM1480_MC_ECC_UERR_DET _SB_MAKEMASK1(62)
+#define M_BCM1480_MC_ECC_CERR_DET _SB_MAKEMASK1(63)
/*
* Global ECC Address Register (Table 102)
*/
-#define S_BCM1480_MC_ECC_CORR_ADDR 0
-#define M_BCM1480_MC_ECC_CORR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_CORR_ADDR)
-#define V_BCM1480_MC_ECC_CORR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR)
-#define G_BCM1480_MC_ECC_CORR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR, M_BCM1480_MC_ECC_CORR_ADDR)
+#define S_BCM1480_MC_ECC_CORR_ADDR 0
+#define M_BCM1480_MC_ECC_CORR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_CORR_ADDR)
+#define V_BCM1480_MC_ECC_CORR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR)
+#define G_BCM1480_MC_ECC_CORR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR, M_BCM1480_MC_ECC_CORR_ADDR)
/*
* Global ECC Correction Register (Table 103)
*/
-#define S_BCM1480_MC_ECC_CORRECT 0
-#define M_BCM1480_MC_ECC_CORRECT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_CORRECT)
-#define V_BCM1480_MC_ECC_CORRECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORRECT)
-#define G_BCM1480_MC_ECC_CORRECT(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORRECT, M_BCM1480_MC_ECC_CORRECT)
+#define S_BCM1480_MC_ECC_CORRECT 0
+#define M_BCM1480_MC_ECC_CORRECT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_CORRECT)
+#define V_BCM1480_MC_ECC_CORRECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORRECT)
+#define G_BCM1480_MC_ECC_CORRECT(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORRECT, M_BCM1480_MC_ECC_CORRECT)
/*
* Global ECC Performance Counters Control Register (Table 104)
*/
-#define S_BCM1480_MC_CHANNEL_SELECT 0
-#define M_BCM1480_MC_CHANNEL_SELECT _SB_MAKEMASK(4, S_BCM1480_MC_CHANNEL_SELECT)
-#define V_BCM1480_MC_CHANNEL_SELECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CHANNEL_SELECT)
-#define G_BCM1480_MC_CHANNEL_SELECT(x) _SB_GETVALUE(x, S_BCM1480_MC_CHANNEL_SELECT, M_BCM1480_MC_CHANNEL_SELECT)
-#define K_BCM1480_MC_CHANNEL_SELECT_0 0x1
-#define K_BCM1480_MC_CHANNEL_SELECT_1 0x2
-#define K_BCM1480_MC_CHANNEL_SELECT_2 0x4
-#define K_BCM1480_MC_CHANNEL_SELECT_3 0x8
+#define S_BCM1480_MC_CHANNEL_SELECT 0
+#define M_BCM1480_MC_CHANNEL_SELECT _SB_MAKEMASK(4, S_BCM1480_MC_CHANNEL_SELECT)
+#define V_BCM1480_MC_CHANNEL_SELECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CHANNEL_SELECT)
+#define G_BCM1480_MC_CHANNEL_SELECT(x) _SB_GETVALUE(x, S_BCM1480_MC_CHANNEL_SELECT, M_BCM1480_MC_CHANNEL_SELECT)
+#define K_BCM1480_MC_CHANNEL_SELECT_0 0x1
+#define K_BCM1480_MC_CHANNEL_SELECT_1 0x2
+#define K_BCM1480_MC_CHANNEL_SELECT_2 0x4
+#define K_BCM1480_MC_CHANNEL_SELECT_3 0x8
#endif /* _BCM1480_MC_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_regs.h b/arch/mips/include/asm/sibyte/bcm1480_regs.h
index 84d168ddfebb..ec0dacf6f0cb 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_regs.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_regs.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* BCM1255/BCM1280/BCM1455/BCM1480 Board Support Package
*
- * Register Definitions File: bcm1480_regs.h
+ * Register Definitions File: bcm1480_regs.h
*
* This module contains the addresses of the on-chip peripherals
* on the BCM1280 and BCM1480.
@@ -80,48 +80,48 @@
* Memory Controller Registers (Section 6)
********************************************************************* */
-#define A_BCM1480_MC_BASE_0 0x0010050000
-#define A_BCM1480_MC_BASE_1 0x0010051000
-#define A_BCM1480_MC_BASE_2 0x0010052000
-#define A_BCM1480_MC_BASE_3 0x0010053000
-#define BCM1480_MC_REGISTER_SPACING 0x1000
+#define A_BCM1480_MC_BASE_0 0x0010050000
+#define A_BCM1480_MC_BASE_1 0x0010051000
+#define A_BCM1480_MC_BASE_2 0x0010052000
+#define A_BCM1480_MC_BASE_3 0x0010053000
+#define BCM1480_MC_REGISTER_SPACING 0x1000
-#define A_BCM1480_MC_BASE(ctlid) (A_BCM1480_MC_BASE_0+(ctlid)*BCM1480_MC_REGISTER_SPACING)
+#define A_BCM1480_MC_BASE(ctlid) (A_BCM1480_MC_BASE_0+(ctlid)*BCM1480_MC_REGISTER_SPACING)
#define A_BCM1480_MC_REGISTER(ctlid, reg) (A_BCM1480_MC_BASE(ctlid)+(reg))
-#define R_BCM1480_MC_CONFIG 0x0000000100
-#define R_BCM1480_MC_CS_START 0x0000000120
-#define R_BCM1480_MC_CS_END 0x0000000140
-#define S_BCM1480_MC_CS_STARTEND 24
-
-#define R_BCM1480_MC_CS01_ROW0 0x0000000180
-#define R_BCM1480_MC_CS01_ROW1 0x00000001A0
-#define R_BCM1480_MC_CS23_ROW0 0x0000000200
-#define R_BCM1480_MC_CS23_ROW1 0x0000000220
-#define R_BCM1480_MC_CS01_COL0 0x0000000280
-#define R_BCM1480_MC_CS01_COL1 0x00000002A0
-#define R_BCM1480_MC_CS23_COL0 0x0000000300
-#define R_BCM1480_MC_CS23_COL1 0x0000000320
-
-#define R_BCM1480_MC_CSX_BASE 0x0000000180
-#define R_BCM1480_MC_CSX_ROW0 0x0000000000 /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_ROW1 0x0000000020 /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_COL0 0x0000000100 /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_COL1 0x0000000120 /* relative to CSX_BASE */
-#define BCM1480_MC_CSX_SPACING 0x0000000080 /* CS23 relative to CS01 */
-
-#define R_BCM1480_MC_CS01_BA 0x0000000380
-#define R_BCM1480_MC_CS23_BA 0x00000003A0
-#define R_BCM1480_MC_DRAMCMD 0x0000000400
-#define R_BCM1480_MC_DRAMMODE 0x0000000420
-#define R_BCM1480_MC_CLOCK_CFG 0x0000000440
-#define R_BCM1480_MC_MCLK_CFG R_BCM1480_MC_CLOCK_CFG
-#define R_BCM1480_MC_TEST_DATA 0x0000000480
-#define R_BCM1480_MC_TEST_ECC 0x00000004A0
-#define R_BCM1480_MC_TIMING1 0x00000004C0
-#define R_BCM1480_MC_TIMING2 0x00000004E0
-#define R_BCM1480_MC_DLL_CFG 0x0000000500
-#define R_BCM1480_MC_DRIVE_CFG 0x0000000520
+#define R_BCM1480_MC_CONFIG 0x0000000100
+#define R_BCM1480_MC_CS_START 0x0000000120
+#define R_BCM1480_MC_CS_END 0x0000000140
+#define S_BCM1480_MC_CS_STARTEND 24
+
+#define R_BCM1480_MC_CS01_ROW0 0x0000000180
+#define R_BCM1480_MC_CS01_ROW1 0x00000001A0
+#define R_BCM1480_MC_CS23_ROW0 0x0000000200
+#define R_BCM1480_MC_CS23_ROW1 0x0000000220
+#define R_BCM1480_MC_CS01_COL0 0x0000000280
+#define R_BCM1480_MC_CS01_COL1 0x00000002A0
+#define R_BCM1480_MC_CS23_COL0 0x0000000300
+#define R_BCM1480_MC_CS23_COL1 0x0000000320
+
+#define R_BCM1480_MC_CSX_BASE 0x0000000180
+#define R_BCM1480_MC_CSX_ROW0 0x0000000000 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_ROW1 0x0000000020 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL0 0x0000000100 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL1 0x0000000120 /* relative to CSX_BASE */
+#define BCM1480_MC_CSX_SPACING 0x0000000080 /* CS23 relative to CS01 */
+
+#define R_BCM1480_MC_CS01_BA 0x0000000380
+#define R_BCM1480_MC_CS23_BA 0x00000003A0
+#define R_BCM1480_MC_DRAMCMD 0x0000000400
+#define R_BCM1480_MC_DRAMMODE 0x0000000420
+#define R_BCM1480_MC_CLOCK_CFG 0x0000000440
+#define R_BCM1480_MC_MCLK_CFG R_BCM1480_MC_CLOCK_CFG
+#define R_BCM1480_MC_TEST_DATA 0x0000000480
+#define R_BCM1480_MC_TEST_ECC 0x00000004A0
+#define R_BCM1480_MC_TIMING1 0x00000004C0
+#define R_BCM1480_MC_TIMING2 0x00000004E0
+#define R_BCM1480_MC_DLL_CFG 0x0000000500
+#define R_BCM1480_MC_DRIVE_CFG 0x0000000520
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define R_BCM1480_MC_ODT 0x0000000460
@@ -129,55 +129,55 @@
#endif
/* Global registers (single instance) */
-#define A_BCM1480_MC_GLB_CONFIG 0x0010054100
-#define A_BCM1480_MC_GLB_INTLV 0x0010054120
-#define A_BCM1480_MC_GLB_ECC_STATUS 0x0010054140
-#define A_BCM1480_MC_GLB_ECC_ADDR 0x0010054160
-#define A_BCM1480_MC_GLB_ECC_CORRECT 0x0010054180
+#define A_BCM1480_MC_GLB_CONFIG 0x0010054100
+#define A_BCM1480_MC_GLB_INTLV 0x0010054120
+#define A_BCM1480_MC_GLB_ECC_STATUS 0x0010054140
+#define A_BCM1480_MC_GLB_ECC_ADDR 0x0010054160
+#define A_BCM1480_MC_GLB_ECC_CORRECT 0x0010054180
#define A_BCM1480_MC_GLB_PERF_CNT_CONTROL 0x00100541A0
/* *********************************************************************
* L2 Cache Control Registers (Section 5)
********************************************************************* */
-#define A_BCM1480_L2_BASE 0x0010040000
+#define A_BCM1480_L2_BASE 0x0010040000
-#define A_BCM1480_L2_READ_TAG 0x0010040018
-#define A_BCM1480_L2_ECC_TAG 0x0010040038
-#define A_BCM1480_L2_MISC0_VALUE 0x0010040058
-#define A_BCM1480_L2_MISC1_VALUE 0x0010040078
-#define A_BCM1480_L2_MISC2_VALUE 0x0010040098
-#define A_BCM1480_L2_MISC_CONFIG 0x0010040040 /* x040 */
-#define A_BCM1480_L2_CACHE_DISABLE 0x0010040060 /* x060 */
+#define A_BCM1480_L2_READ_TAG 0x0010040018
+#define A_BCM1480_L2_ECC_TAG 0x0010040038
+#define A_BCM1480_L2_MISC0_VALUE 0x0010040058
+#define A_BCM1480_L2_MISC1_VALUE 0x0010040078
+#define A_BCM1480_L2_MISC2_VALUE 0x0010040098
+#define A_BCM1480_L2_MISC_CONFIG 0x0010040040 /* x040 */
+#define A_BCM1480_L2_CACHE_DISABLE 0x0010040060 /* x060 */
#define A_BCM1480_L2_MAKECACHEDISABLE(x) (A_BCM1480_L2_CACHE_DISABLE | (((x)&0xF) << 12))
-#define A_BCM1480_L2_WAY_ENABLE_3_0 0x0010040080 /* x080 */
-#define A_BCM1480_L2_WAY_ENABLE_7_4 0x00100400A0 /* x0A0 */
+#define A_BCM1480_L2_WAY_ENABLE_3_0 0x0010040080 /* x080 */
+#define A_BCM1480_L2_WAY_ENABLE_7_4 0x00100400A0 /* x0A0 */
#define A_BCM1480_L2_MAKE_WAY_ENABLE_LO(x) (A_BCM1480_L2_WAY_ENABLE_3_0 | (((x)&0xF) << 12))
#define A_BCM1480_L2_MAKE_WAY_ENABLE_HI(x) (A_BCM1480_L2_WAY_ENABLE_7_4 | (((x)&0xF) << 12))
#define A_BCM1480_L2_MAKE_WAY_DISABLE_LO(x) (A_BCM1480_L2_WAY_ENABLE_3_0 | (((~x)&0xF) << 12))
#define A_BCM1480_L2_MAKE_WAY_DISABLE_HI(x) (A_BCM1480_L2_WAY_ENABLE_7_4 | (((~x)&0xF) << 12))
-#define A_BCM1480_L2_WAY_LOCAL_3_0 0x0010040100 /* x100 */
-#define A_BCM1480_L2_WAY_LOCAL_7_4 0x0010040120 /* x120 */
-#define A_BCM1480_L2_WAY_REMOTE_3_0 0x0010040140 /* x140 */
-#define A_BCM1480_L2_WAY_REMOTE_7_4 0x0010040160 /* x160 */
-#define A_BCM1480_L2_WAY_AGENT_3_0 0x00100400C0 /* xxC0 */
-#define A_BCM1480_L2_WAY_AGENT_7_4 0x00100400E0 /* xxE0 */
+#define A_BCM1480_L2_WAY_LOCAL_3_0 0x0010040100 /* x100 */
+#define A_BCM1480_L2_WAY_LOCAL_7_4 0x0010040120 /* x120 */
+#define A_BCM1480_L2_WAY_REMOTE_3_0 0x0010040140 /* x140 */
+#define A_BCM1480_L2_WAY_REMOTE_7_4 0x0010040160 /* x160 */
+#define A_BCM1480_L2_WAY_AGENT_3_0 0x00100400C0 /* xxC0 */
+#define A_BCM1480_L2_WAY_AGENT_7_4 0x00100400E0 /* xxE0 */
#define A_BCM1480_L2_WAY_ENABLE(A, banks) (A | (((~(banks))&0x0F) << 8))
-#define A_BCM1480_L2_BANK_BASE 0x00D0300000
-#define A_BCM1480_L2_BANK_ADDRESS(b) (A_BCM1480_L2_BANK_BASE | (((b)&0x7)<<17))
-#define A_BCM1480_L2_MGMT_TAG_BASE 0x00D0000000
+#define A_BCM1480_L2_BANK_BASE 0x00D0300000
+#define A_BCM1480_L2_BANK_ADDRESS(b) (A_BCM1480_L2_BANK_BASE | (((b)&0x7)<<17))
+#define A_BCM1480_L2_MGMT_TAG_BASE 0x00D0000000
/* *********************************************************************
* PCI-X Interface Registers (Section 7)
********************************************************************* */
-#define A_BCM1480_PCI_BASE 0x0010061400
+#define A_BCM1480_PCI_BASE 0x0010061400
-#define A_BCM1480_PCI_RESET 0x0010061400
-#define A_BCM1480_PCI_DLL 0x0010061500
+#define A_BCM1480_PCI_RESET 0x0010061400
+#define A_BCM1480_PCI_DLL 0x0010061500
-#define A_BCM1480_PCI_TYPE00_HEADER 0x002E000000
+#define A_BCM1480_PCI_TYPE00_HEADER 0x002E000000
/* *********************************************************************
* Ethernet MAC Registers (Section 11) and DMA Registers (Section 10.6)
@@ -185,19 +185,19 @@
/* No register changes with Rev.C BCM1250, but one additional MAC */
-#define A_BCM1480_MAC_BASE_2 0x0010066000
+#define A_BCM1480_MAC_BASE_2 0x0010066000
#ifndef A_MAC_BASE_2
-#define A_MAC_BASE_2 A_BCM1480_MAC_BASE_2
+#define A_MAC_BASE_2 A_BCM1480_MAC_BASE_2
#endif
-#define A_BCM1480_MAC_BASE_3 0x0010067000
-#define A_MAC_BASE_3 A_BCM1480_MAC_BASE_3
+#define A_BCM1480_MAC_BASE_3 0x0010067000
+#define A_MAC_BASE_3 A_BCM1480_MAC_BASE_3
-#define R_BCM1480_MAC_DMA_OODPKTLOST 0x00000038
+#define R_BCM1480_MAC_DMA_OODPKTLOST 0x00000038
#ifndef R_MAC_DMA_OODPKTLOST
-#define R_MAC_DMA_OODPKTLOST R_BCM1480_MAC_DMA_OODPKTLOST
+#define R_MAC_DMA_OODPKTLOST R_BCM1480_MAC_DMA_OODPKTLOST
#endif
@@ -208,18 +208,18 @@
/* No significant differences from BCM1250, two DUARTs */
/* Conventions, per user manual:
- * DUART generic, channels A,B,C,D
- * DUART0 implementing channels A,B
- * DUART1 inplementing channels C,D
+ * DUART generic, channels A,B,C,D
+ * DUART0 implementing channels A,B
+ * DUART1 inplementing channels C,D
*/
-#define BCM1480_DUART_NUM_PORTS 4
+#define BCM1480_DUART_NUM_PORTS 4
-#define A_BCM1480_DUART0 0x0010060000
-#define A_BCM1480_DUART1 0x0010060400
-#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
+#define A_BCM1480_DUART0 0x0010060000
+#define A_BCM1480_DUART1 0x0010060400
+#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
-#define BCM1480_DUART_CHANREG_SPACING 0x100
+#define BCM1480_DUART_CHANREG_SPACING 0x100
#define A_BCM1480_DUART_CHANREG(chan, reg) \
(A_BCM1480_DUART(chan) + \
BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg))
@@ -249,43 +249,43 @@
* These constants are the absolute addresses.
*/
-#define A_BCM1480_DUART_MODE_REG_1_C 0x0010060400
-#define A_BCM1480_DUART_MODE_REG_2_C 0x0010060410
-#define A_BCM1480_DUART_STATUS_C 0x0010060420
-#define A_BCM1480_DUART_CLK_SEL_C 0x0010060430
-#define A_BCM1480_DUART_FULL_CTL_C 0x0010060440
-#define A_BCM1480_DUART_CMD_C 0x0010060450
-#define A_BCM1480_DUART_RX_HOLD_C 0x0010060460
-#define A_BCM1480_DUART_TX_HOLD_C 0x0010060470
-#define A_BCM1480_DUART_OPCR_C 0x0010060480
-#define A_BCM1480_DUART_AUX_CTRL_C 0x0010060490
-
-#define A_BCM1480_DUART_MODE_REG_1_D 0x0010060500
-#define A_BCM1480_DUART_MODE_REG_2_D 0x0010060510
-#define A_BCM1480_DUART_STATUS_D 0x0010060520
-#define A_BCM1480_DUART_CLK_SEL_D 0x0010060530
-#define A_BCM1480_DUART_FULL_CTL_D 0x0010060540
-#define A_BCM1480_DUART_CMD_D 0x0010060550
-#define A_BCM1480_DUART_RX_HOLD_D 0x0010060560
-#define A_BCM1480_DUART_TX_HOLD_D 0x0010060570
-#define A_BCM1480_DUART_OPCR_D 0x0010060580
-#define A_BCM1480_DUART_AUX_CTRL_D 0x0010060590
-
-#define A_BCM1480_DUART_INPORT_CHNG_CD 0x0010060600
-#define A_BCM1480_DUART_AUX_CTRL_CD 0x0010060610
-#define A_BCM1480_DUART_ISR_C 0x0010060620
-#define A_BCM1480_DUART_IMR_C 0x0010060630
-#define A_BCM1480_DUART_ISR_D 0x0010060640
-#define A_BCM1480_DUART_IMR_D 0x0010060650
-#define A_BCM1480_DUART_OUT_PORT_CD 0x0010060660
-#define A_BCM1480_DUART_OPCR_CD 0x0010060670
-#define A_BCM1480_DUART_IN_PORT_CD 0x0010060680
-#define A_BCM1480_DUART_ISR_CD 0x0010060690
-#define A_BCM1480_DUART_IMR_CD 0x00100606A0
-#define A_BCM1480_DUART_SET_OPR_CD 0x00100606B0
-#define A_BCM1480_DUART_CLEAR_OPR_CD 0x00100606C0
-#define A_BCM1480_DUART_INPORT_CHNG_C 0x00100606D0
-#define A_BCM1480_DUART_INPORT_CHNG_D 0x00100606E0
+#define A_BCM1480_DUART_MODE_REG_1_C 0x0010060400
+#define A_BCM1480_DUART_MODE_REG_2_C 0x0010060410
+#define A_BCM1480_DUART_STATUS_C 0x0010060420
+#define A_BCM1480_DUART_CLK_SEL_C 0x0010060430
+#define A_BCM1480_DUART_FULL_CTL_C 0x0010060440
+#define A_BCM1480_DUART_CMD_C 0x0010060450
+#define A_BCM1480_DUART_RX_HOLD_C 0x0010060460
+#define A_BCM1480_DUART_TX_HOLD_C 0x0010060470
+#define A_BCM1480_DUART_OPCR_C 0x0010060480
+#define A_BCM1480_DUART_AUX_CTRL_C 0x0010060490
+
+#define A_BCM1480_DUART_MODE_REG_1_D 0x0010060500
+#define A_BCM1480_DUART_MODE_REG_2_D 0x0010060510
+#define A_BCM1480_DUART_STATUS_D 0x0010060520
+#define A_BCM1480_DUART_CLK_SEL_D 0x0010060530
+#define A_BCM1480_DUART_FULL_CTL_D 0x0010060540
+#define A_BCM1480_DUART_CMD_D 0x0010060550
+#define A_BCM1480_DUART_RX_HOLD_D 0x0010060560
+#define A_BCM1480_DUART_TX_HOLD_D 0x0010060570
+#define A_BCM1480_DUART_OPCR_D 0x0010060580
+#define A_BCM1480_DUART_AUX_CTRL_D 0x0010060590
+
+#define A_BCM1480_DUART_INPORT_CHNG_CD 0x0010060600
+#define A_BCM1480_DUART_AUX_CTRL_CD 0x0010060610
+#define A_BCM1480_DUART_ISR_C 0x0010060620
+#define A_BCM1480_DUART_IMR_C 0x0010060630
+#define A_BCM1480_DUART_ISR_D 0x0010060640
+#define A_BCM1480_DUART_IMR_D 0x0010060650
+#define A_BCM1480_DUART_OUT_PORT_CD 0x0010060660
+#define A_BCM1480_DUART_OPCR_CD 0x0010060670
+#define A_BCM1480_DUART_IN_PORT_CD 0x0010060680
+#define A_BCM1480_DUART_ISR_CD 0x0010060690
+#define A_BCM1480_DUART_IMR_CD 0x00100606A0
+#define A_BCM1480_DUART_SET_OPR_CD 0x00100606B0
+#define A_BCM1480_DUART_CLEAR_OPR_CD 0x00100606C0
+#define A_BCM1480_DUART_INPORT_CHNG_C 0x00100606D0
+#define A_BCM1480_DUART_INPORT_CHNG_D 0x00100606E0
/* *********************************************************************
@@ -301,8 +301,8 @@
/* One additional GPIO register, placed _before_ the BCM1250's GPIO block base */
-#define A_BCM1480_GPIO_INT_ADD_TYPE 0x0010061A78
-#define R_BCM1480_GPIO_INT_ADD_TYPE (-8)
+#define A_BCM1480_GPIO_INT_ADD_TYPE 0x0010061A78
+#define R_BCM1480_GPIO_INT_ADD_TYPE (-8)
#define A_GPIO_INT_ADD_TYPE A_BCM1480_GPIO_INT_ADD_TYPE
#define R_GPIO_INT_ADD_TYPE R_BCM1480_GPIO_INT_ADD_TYPE
@@ -321,30 +321,30 @@
/* Watchdog timers */
-#define A_BCM1480_SCD_WDOG_2 0x0010022050
-#define A_BCM1480_SCD_WDOG_3 0x0010022150
+#define A_BCM1480_SCD_WDOG_2 0x0010022050
+#define A_BCM1480_SCD_WDOG_3 0x0010022150
-#define BCM1480_SCD_NUM_WDOGS 4
+#define BCM1480_SCD_NUM_WDOGS 4
-#define A_BCM1480_SCD_WDOG_BASE(w) (A_BCM1480_SCD_WDOG_0+((w)&2)*0x1000 + ((w)&1)*0x100)
+#define A_BCM1480_SCD_WDOG_BASE(w) (A_BCM1480_SCD_WDOG_0+((w)&2)*0x1000 + ((w)&1)*0x100)
#define A_BCM1480_SCD_WDOG_REGISTER(w, r) (A_BCM1480_SCD_WDOG_BASE(w) + (r))
-#define A_BCM1480_SCD_WDOG_INIT_2 0x0010022050
-#define A_BCM1480_SCD_WDOG_CNT_2 0x0010022058
-#define A_BCM1480_SCD_WDOG_CFG_2 0x0010022060
+#define A_BCM1480_SCD_WDOG_INIT_2 0x0010022050
+#define A_BCM1480_SCD_WDOG_CNT_2 0x0010022058
+#define A_BCM1480_SCD_WDOG_CFG_2 0x0010022060
-#define A_BCM1480_SCD_WDOG_INIT_3 0x0010022150
-#define A_BCM1480_SCD_WDOG_CNT_3 0x0010022158
-#define A_BCM1480_SCD_WDOG_CFG_3 0x0010022160
+#define A_BCM1480_SCD_WDOG_INIT_3 0x0010022150
+#define A_BCM1480_SCD_WDOG_CNT_3 0x0010022158
+#define A_BCM1480_SCD_WDOG_CFG_3 0x0010022160
/* BCM1480 has two additional compare registers */
#define A_BCM1480_SCD_ZBBUS_CYCLE_COUNT A_SCD_ZBBUS_CYCLE_COUNT
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP_BASE 0x0010020C00
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP0 A_SCD_ZBBUS_CYCLE_CP0
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP1 A_SCD_ZBBUS_CYCLE_CP1
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP2 0x0010020C10
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP3 0x0010020C18
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP_BASE 0x0010020C00
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP0 A_SCD_ZBBUS_CYCLE_CP0
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP1 A_SCD_ZBBUS_CYCLE_CP1
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP2 0x0010020C10
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP3 0x0010020C18
/* *********************************************************************
* System Control Registers (Section 4.2)
@@ -352,7 +352,7 @@
/* Scratch register in different place */
-#define A_BCM1480_SCD_SCRATCH 0x100200A0
+#define A_BCM1480_SCD_SCRATCH 0x100200A0
/* *********************************************************************
* System Address Trap Registers (Section 4.9)
@@ -364,68 +364,68 @@
* System Interrupt Mapper Registers (Sections 4.3-4.5)
********************************************************************* */
-#define A_BCM1480_IMR_CPU0_BASE 0x0010020000
-#define A_BCM1480_IMR_CPU1_BASE 0x0010022000
-#define A_BCM1480_IMR_CPU2_BASE 0x0010024000
-#define A_BCM1480_IMR_CPU3_BASE 0x0010026000
-#define BCM1480_IMR_REGISTER_SPACING 0x2000
+#define A_BCM1480_IMR_CPU0_BASE 0x0010020000
+#define A_BCM1480_IMR_CPU1_BASE 0x0010022000
+#define A_BCM1480_IMR_CPU2_BASE 0x0010024000
+#define A_BCM1480_IMR_CPU3_BASE 0x0010026000
+#define BCM1480_IMR_REGISTER_SPACING 0x2000
#define BCM1480_IMR_REGISTER_SPACING_SHIFT 13
-#define A_BCM1480_IMR_MAPPER(cpu) (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
+#define A_BCM1480_IMR_MAPPER(cpu) (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
#define A_BCM1480_IMR_REGISTER(cpu, reg) (A_BCM1480_IMR_MAPPER(cpu)+(reg))
/* Most IMR registers are 128 bits, implemented as non-contiguous
64-bit registers high (_H) and low (_L) */
-#define BCM1480_IMR_HL_SPACING 0x1000
+#define BCM1480_IMR_HL_SPACING 0x1000
-#define R_BCM1480_IMR_INTERRUPT_DIAG_H 0x0010
-#define R_BCM1480_IMR_LDT_INTERRUPT_H 0x0018
-#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_H 0x0020
-#define R_BCM1480_IMR_INTERRUPT_MASK_H 0x0028
-#define R_BCM1480_IMR_INTERRUPT_TRACE_H 0x0038
+#define R_BCM1480_IMR_INTERRUPT_DIAG_H 0x0010
+#define R_BCM1480_IMR_LDT_INTERRUPT_H 0x0018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_H 0x0020
+#define R_BCM1480_IMR_INTERRUPT_MASK_H 0x0028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_H 0x0038
#define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_H 0x0040
-#define R_BCM1480_IMR_LDT_INTERRUPT_SET 0x0048
-#define R_BCM1480_IMR_MAILBOX_0_CPU 0x00C0
-#define R_BCM1480_IMR_MAILBOX_0_SET_CPU 0x00C8
-#define R_BCM1480_IMR_MAILBOX_0_CLR_CPU 0x00D0
-#define R_BCM1480_IMR_MAILBOX_1_CPU 0x00E0
-#define R_BCM1480_IMR_MAILBOX_1_SET_CPU 0x00E8
-#define R_BCM1480_IMR_MAILBOX_1_CLR_CPU 0x00F0
-#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H 0x0100
-#define BCM1480_IMR_INTERRUPT_STATUS_COUNT 8
-#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_H 0x0200
-#define BCM1480_IMR_INTERRUPT_MAP_COUNT 64
-
-#define R_BCM1480_IMR_INTERRUPT_DIAG_L 0x1010
-#define R_BCM1480_IMR_LDT_INTERRUPT_L 0x1018
-#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_L 0x1020
-#define R_BCM1480_IMR_INTERRUPT_MASK_L 0x1028
-#define R_BCM1480_IMR_INTERRUPT_TRACE_L 0x1038
+#define R_BCM1480_IMR_LDT_INTERRUPT_SET 0x0048
+#define R_BCM1480_IMR_MAILBOX_0_CPU 0x00C0
+#define R_BCM1480_IMR_MAILBOX_0_SET_CPU 0x00C8
+#define R_BCM1480_IMR_MAILBOX_0_CLR_CPU 0x00D0
+#define R_BCM1480_IMR_MAILBOX_1_CPU 0x00E0
+#define R_BCM1480_IMR_MAILBOX_1_SET_CPU 0x00E8
+#define R_BCM1480_IMR_MAILBOX_1_CLR_CPU 0x00F0
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H 0x0100
+#define BCM1480_IMR_INTERRUPT_STATUS_COUNT 8
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_H 0x0200
+#define BCM1480_IMR_INTERRUPT_MAP_COUNT 64
+
+#define R_BCM1480_IMR_INTERRUPT_DIAG_L 0x1010
+#define R_BCM1480_IMR_LDT_INTERRUPT_L 0x1018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_L 0x1020
+#define R_BCM1480_IMR_INTERRUPT_MASK_L 0x1028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_L 0x1038
#define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_L 0x1040
-#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L 0x1100
-#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_L 0x1200
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L 0x1100
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_L 0x1200
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE 0x0010028000
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU1_BASE 0x0010028100
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU2_BASE 0x0010028200
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU3_BASE 0x0010028300
-#define BCM1480_IMR_ALIAS_MAILBOX_SPACING 0100
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE 0x0010028000
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU1_BASE 0x0010028100
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU2_BASE 0x0010028200
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU3_BASE 0x0010028300
+#define BCM1480_IMR_ALIAS_MAILBOX_SPACING 0100
#define A_BCM1480_IMR_ALIAS_MAILBOX(cpu) (A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE + \
- (cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
+ (cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
#define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000 /* 0x0x0 */
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008 /* 0x0x8 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000 /* 0x0x0 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008 /* 0x0x8 */
/*
* these macros work together to build the address of a mailbox
* register, e.g., A_BCM1480_MAILBOX_REGISTER(0,R_BCM1480_IMR_MAILBOX_SET,2)
* for mbox_0_set_cpu2 returns 0x00100240C8
*/
-#define R_BCM1480_IMR_MAILBOX_CPU 0x00
-#define R_BCM1480_IMR_MAILBOX_SET 0x08
-#define R_BCM1480_IMR_MAILBOX_CLR 0x10
+#define R_BCM1480_IMR_MAILBOX_CPU 0x00
+#define R_BCM1480_IMR_MAILBOX_SET 0x08
+#define R_BCM1480_IMR_MAILBOX_CLR 0x10
#define R_BCM1480_IMR_MAILBOX_NUM_SPACING 0x20
#define A_BCM1480_MAILBOX_REGISTER(num, reg, cpu) \
(A_BCM1480_IMR_CPU0_BASE + \
@@ -440,22 +440,22 @@
/* BCM1480 has four more performance counter registers, and two control
registers. */
-#define A_BCM1480_SCD_PERF_CNT_BASE 0x00100204C0
+#define A_BCM1480_SCD_PERF_CNT_BASE 0x00100204C0
-#define A_BCM1480_SCD_PERF_CNT_CFG0 0x00100204C0
-#define A_BCM1480_SCD_PERF_CNT_CFG_0 A_BCM1480_SCD_PERF_CNT_CFG0
-#define A_BCM1480_SCD_PERF_CNT_CFG1 0x00100204C8
-#define A_BCM1480_SCD_PERF_CNT_CFG_1 A_BCM1480_SCD_PERF_CNT_CFG1
+#define A_BCM1480_SCD_PERF_CNT_CFG0 0x00100204C0
+#define A_BCM1480_SCD_PERF_CNT_CFG_0 A_BCM1480_SCD_PERF_CNT_CFG0
+#define A_BCM1480_SCD_PERF_CNT_CFG1 0x00100204C8
+#define A_BCM1480_SCD_PERF_CNT_CFG_1 A_BCM1480_SCD_PERF_CNT_CFG1
-#define A_BCM1480_SCD_PERF_CNT_0 A_SCD_PERF_CNT_0
-#define A_BCM1480_SCD_PERF_CNT_1 A_SCD_PERF_CNT_1
-#define A_BCM1480_SCD_PERF_CNT_2 A_SCD_PERF_CNT_2
-#define A_BCM1480_SCD_PERF_CNT_3 A_SCD_PERF_CNT_3
+#define A_BCM1480_SCD_PERF_CNT_0 A_SCD_PERF_CNT_0
+#define A_BCM1480_SCD_PERF_CNT_1 A_SCD_PERF_CNT_1
+#define A_BCM1480_SCD_PERF_CNT_2 A_SCD_PERF_CNT_2
+#define A_BCM1480_SCD_PERF_CNT_3 A_SCD_PERF_CNT_3
-#define A_BCM1480_SCD_PERF_CNT_4 0x00100204F0
-#define A_BCM1480_SCD_PERF_CNT_5 0x00100204F8
-#define A_BCM1480_SCD_PERF_CNT_6 0x0010020500
-#define A_BCM1480_SCD_PERF_CNT_7 0x0010020508
+#define A_BCM1480_SCD_PERF_CNT_4 0x00100204F0
+#define A_BCM1480_SCD_PERF_CNT_5 0x00100204F8
+#define A_BCM1480_SCD_PERF_CNT_6 0x0010020500
+#define A_BCM1480_SCD_PERF_CNT_7 0x0010020508
#define BCM1480_SCD_NUM_PERF_CNT 8
#define BCM1480_SCD_PERF_CNT_SPACING 8
@@ -468,7 +468,7 @@
/* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */
-#define A_BCM1480_BUS_ERR_STATUS_DEBUG 0x00100208D8
+#define A_BCM1480_BUS_ERR_STATUS_DEBUG 0x00100208D8
/* *********************************************************************
* System Debug Controller Registers (Section 19)
@@ -497,46 +497,46 @@
#define BCM1480_HT_PORT_SPACING 0x800
#define A_BCM1480_HT_PORT_HEADER(x) (A_BCM1480_HT_PORT0_HEADER + ((x)*BCM1480_HT_PORT_SPACING))
-#define A_BCM1480_HT_PORT0_HEADER 0x00FE000000
-#define A_BCM1480_HT_PORT1_HEADER 0x00FE000800
-#define A_BCM1480_HT_PORT2_HEADER 0x00FE001000
-#define A_BCM1480_HT_TYPE00_HEADER 0x00FE002000
+#define A_BCM1480_HT_PORT0_HEADER 0x00FE000000
+#define A_BCM1480_HT_PORT1_HEADER 0x00FE000800
+#define A_BCM1480_HT_PORT2_HEADER 0x00FE001000
+#define A_BCM1480_HT_TYPE00_HEADER 0x00FE002000
/* *********************************************************************
* Node Controller Registers (Section 9)
********************************************************************* */
-#define A_BCM1480_NC_BASE 0x00DFBD0000
+#define A_BCM1480_NC_BASE 0x00DFBD0000
-#define A_BCM1480_NC_RLD_FIELD 0x00DFBD0000
-#define A_BCM1480_NC_RLD_TRIGGER 0x00DFBD0020
-#define A_BCM1480_NC_RLD_BAD_ERROR 0x00DFBD0040
-#define A_BCM1480_NC_RLD_COR_ERROR 0x00DFBD0060
-#define A_BCM1480_NC_RLD_ECC_STATUS 0x00DFBD0080
-#define A_BCM1480_NC_RLD_WAY_ENABLE 0x00DFBD00A0
-#define A_BCM1480_NC_RLD_RANDOM_LFSR 0x00DFBD00C0
+#define A_BCM1480_NC_RLD_FIELD 0x00DFBD0000
+#define A_BCM1480_NC_RLD_TRIGGER 0x00DFBD0020
+#define A_BCM1480_NC_RLD_BAD_ERROR 0x00DFBD0040
+#define A_BCM1480_NC_RLD_COR_ERROR 0x00DFBD0060
+#define A_BCM1480_NC_RLD_ECC_STATUS 0x00DFBD0080
+#define A_BCM1480_NC_RLD_WAY_ENABLE 0x00DFBD00A0
+#define A_BCM1480_NC_RLD_RANDOM_LFSR 0x00DFBD00C0
-#define A_BCM1480_NC_INTERRUPT_STATUS 0x00DFBD00E0
-#define A_BCM1480_NC_INTERRUPT_ENABLE 0x00DFBD0100
-#define A_BCM1480_NC_TIMEOUT_COUNTER 0x00DFBD0120
+#define A_BCM1480_NC_INTERRUPT_STATUS 0x00DFBD00E0
+#define A_BCM1480_NC_INTERRUPT_ENABLE 0x00DFBD0100
+#define A_BCM1480_NC_TIMEOUT_COUNTER 0x00DFBD0120
#define A_BCM1480_NC_TIMEOUT_COUNTER_SEL 0x00DFBD0140
-#define A_BCM1480_NC_CREDIT_STATUS_REG0 0x00DFBD0200
-#define A_BCM1480_NC_CREDIT_STATUS_REG1 0x00DFBD0220
-#define A_BCM1480_NC_CREDIT_STATUS_REG2 0x00DFBD0240
-#define A_BCM1480_NC_CREDIT_STATUS_REG3 0x00DFBD0260
-#define A_BCM1480_NC_CREDIT_STATUS_REG4 0x00DFBD0280
-#define A_BCM1480_NC_CREDIT_STATUS_REG5 0x00DFBD02A0
-#define A_BCM1480_NC_CREDIT_STATUS_REG6 0x00DFBD02C0
-#define A_BCM1480_NC_CREDIT_STATUS_REG7 0x00DFBD02E0
-#define A_BCM1480_NC_CREDIT_STATUS_REG8 0x00DFBD0300
-#define A_BCM1480_NC_CREDIT_STATUS_REG9 0x00DFBD0320
+#define A_BCM1480_NC_CREDIT_STATUS_REG0 0x00DFBD0200
+#define A_BCM1480_NC_CREDIT_STATUS_REG1 0x00DFBD0220
+#define A_BCM1480_NC_CREDIT_STATUS_REG2 0x00DFBD0240
+#define A_BCM1480_NC_CREDIT_STATUS_REG3 0x00DFBD0260
+#define A_BCM1480_NC_CREDIT_STATUS_REG4 0x00DFBD0280
+#define A_BCM1480_NC_CREDIT_STATUS_REG5 0x00DFBD02A0
+#define A_BCM1480_NC_CREDIT_STATUS_REG6 0x00DFBD02C0
+#define A_BCM1480_NC_CREDIT_STATUS_REG7 0x00DFBD02E0
+#define A_BCM1480_NC_CREDIT_STATUS_REG8 0x00DFBD0300
+#define A_BCM1480_NC_CREDIT_STATUS_REG9 0x00DFBD0320
#define A_BCM1480_NC_CREDIT_STATUS_REG10 0x00DFBE0000
#define A_BCM1480_NC_CREDIT_STATUS_REG11 0x00DFBE0020
#define A_BCM1480_NC_CREDIT_STATUS_REG12 0x00DFBE0040
-#define A_BCM1480_NC_SR_TIMEOUT_COUNTER 0x00DFBE0060
+#define A_BCM1480_NC_SR_TIMEOUT_COUNTER 0x00DFBE0060
#define A_BCM1480_NC_SR_TIMEOUT_COUNTER_SEL 0x00DFBE0080
@@ -544,43 +544,43 @@
* H&R Block Configuration Registers (Section 12.4)
********************************************************************* */
-#define A_BCM1480_HR_BASE_0 0x00DF820000
-#define A_BCM1480_HR_BASE_1 0x00DF8A0000
-#define A_BCM1480_HR_BASE_2 0x00DF920000
-#define BCM1480_HR_REGISTER_SPACING 0x80000
+#define A_BCM1480_HR_BASE_0 0x00DF820000
+#define A_BCM1480_HR_BASE_1 0x00DF8A0000
+#define A_BCM1480_HR_BASE_2 0x00DF920000
+#define BCM1480_HR_REGISTER_SPACING 0x80000
-#define A_BCM1480_HR_BASE(idx) (A_BCM1480_HR_BASE_0 + ((idx)*BCM1480_HR_REGISTER_SPACING))
-#define A_BCM1480_HR_REGISTER(idx, reg) (A_BCM1480_HR_BASE(idx) + (reg))
+#define A_BCM1480_HR_BASE(idx) (A_BCM1480_HR_BASE_0 + ((idx)*BCM1480_HR_REGISTER_SPACING))
+#define A_BCM1480_HR_REGISTER(idx, reg) (A_BCM1480_HR_BASE(idx) + (reg))
-#define R_BCM1480_HR_CFG 0x0000000000
+#define R_BCM1480_HR_CFG 0x0000000000
#define R_BCM1480_HR_MAPPING 0x0000010010
-#define BCM1480_HR_RULE_SPACING 0x0000000010
-#define BCM1480_HR_NUM_RULES 16
-#define BCM1480_HR_OP_OFFSET 0x0000000100
-#define BCM1480_HR_TYPE_OFFSET 0x0000000108
-#define R_BCM1480_HR_RULE_OP(idx) (BCM1480_HR_OP_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
-#define R_BCM1480_HR_RULE_TYPE(idx) (BCM1480_HR_TYPE_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+#define BCM1480_HR_RULE_SPACING 0x0000000010
+#define BCM1480_HR_NUM_RULES 16
+#define BCM1480_HR_OP_OFFSET 0x0000000100
+#define BCM1480_HR_TYPE_OFFSET 0x0000000108
+#define R_BCM1480_HR_RULE_OP(idx) (BCM1480_HR_OP_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+#define R_BCM1480_HR_RULE_TYPE(idx) (BCM1480_HR_TYPE_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
-#define BCM1480_HR_LEAF_SPACING 0x0000000010
-#define BCM1480_HR_NUM_LEAVES 10
-#define BCM1480_HR_LEAF_OFFSET 0x0000000300
-#define R_BCM1480_HR_HA_LEAF0(idx) (BCM1480_HR_LEAF_OFFSET + ((idx)*BCM1480_HR_LEAF_SPACING))
+#define BCM1480_HR_LEAF_SPACING 0x0000000010
+#define BCM1480_HR_NUM_LEAVES 10
+#define BCM1480_HR_LEAF_OFFSET 0x0000000300
+#define R_BCM1480_HR_HA_LEAF0(idx) (BCM1480_HR_LEAF_OFFSET + ((idx)*BCM1480_HR_LEAF_SPACING))
-#define R_BCM1480_HR_EX_LEAF0 0x00000003A0
+#define R_BCM1480_HR_EX_LEAF0 0x00000003A0
-#define BCM1480_HR_PATH_SPACING 0x0000000010
-#define BCM1480_HR_NUM_PATHS 16
-#define BCM1480_HR_PATH_OFFSET 0x0000000600
-#define R_BCM1480_HR_PATH(idx) (BCM1480_HR_PATH_OFFSET + ((idx)*BCM1480_HR_PATH_SPACING))
+#define BCM1480_HR_PATH_SPACING 0x0000000010
+#define BCM1480_HR_NUM_PATHS 16
+#define BCM1480_HR_PATH_OFFSET 0x0000000600
+#define R_BCM1480_HR_PATH(idx) (BCM1480_HR_PATH_OFFSET + ((idx)*BCM1480_HR_PATH_SPACING))
-#define R_BCM1480_HR_PATH_DEFAULT 0x0000000700
+#define R_BCM1480_HR_PATH_DEFAULT 0x0000000700
-#define BCM1480_HR_ROUTE_SPACING 8
-#define BCM1480_HR_NUM_ROUTES 512
-#define BCM1480_HR_ROUTE_OFFSET 0x0000001000
-#define R_BCM1480_HR_RT_WORD(idx) (BCM1480_HR_ROUTE_OFFSET + ((idx)*BCM1480_HR_ROUTE_SPACING))
+#define BCM1480_HR_ROUTE_SPACING 8
+#define BCM1480_HR_NUM_ROUTES 512
+#define BCM1480_HR_ROUTE_OFFSET 0x0000001000
+#define R_BCM1480_HR_RT_WORD(idx) (BCM1480_HR_ROUTE_OFFSET + ((idx)*BCM1480_HR_ROUTE_SPACING))
/* checked to here - ehs */
@@ -588,55 +588,55 @@
* Packet Manager DMA Registers (Section 12.5)
********************************************************************* */
-#define A_BCM1480_PM_BASE 0x0010056000
+#define A_BCM1480_PM_BASE 0x0010056000
-#define A_BCM1480_PMI_LCL_0 0x0010058000
-#define A_BCM1480_PMO_LCL_0 0x001005C000
-#define A_BCM1480_PMI_OFFSET_0 (A_BCM1480_PMI_LCL_0 - A_BCM1480_PM_BASE)
-#define A_BCM1480_PMO_OFFSET_0 (A_BCM1480_PMO_LCL_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMI_LCL_0 0x0010058000
+#define A_BCM1480_PMO_LCL_0 0x001005C000
+#define A_BCM1480_PMI_OFFSET_0 (A_BCM1480_PMI_LCL_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_OFFSET_0 (A_BCM1480_PMO_LCL_0 - A_BCM1480_PM_BASE)
-#define BCM1480_PM_LCL_REGISTER_SPACING 0x100
-#define BCM1480_PM_NUM_CHANNELS 32
+#define BCM1480_PM_LCL_REGISTER_SPACING 0x100
+#define BCM1480_PM_NUM_CHANNELS 32
-#define A_BCM1480_PMI_LCL_BASE(idx) (A_BCM1480_PMI_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
-#define A_BCM1480_PMI_LCL_REGISTER(idx, reg) (A_BCM1480_PMI_LCL_BASE(idx) + (reg))
-#define A_BCM1480_PMO_LCL_BASE(idx) (A_BCM1480_PMO_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
-#define A_BCM1480_PMO_LCL_REGISTER(idx, reg) (A_BCM1480_PMO_LCL_BASE(idx) + (reg))
+#define A_BCM1480_PMI_LCL_BASE(idx) (A_BCM1480_PMI_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMI_LCL_REGISTER(idx, reg) (A_BCM1480_PMI_LCL_BASE(idx) + (reg))
+#define A_BCM1480_PMO_LCL_BASE(idx) (A_BCM1480_PMO_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMO_LCL_REGISTER(idx, reg) (A_BCM1480_PMO_LCL_BASE(idx) + (reg))
-#define BCM1480_PM_INT_PACKING 8
-#define BCM1480_PM_INT_FUNCTION_SPACING 0x40
-#define BCM1480_PM_INT_NUM_FUNCTIONS 3
+#define BCM1480_PM_INT_PACKING 8
+#define BCM1480_PM_INT_FUNCTION_SPACING 0x40
+#define BCM1480_PM_INT_NUM_FUNCTIONS 3
/*
* DMA channel registers relative to A_BCM1480_PMI_LCL_BASE(n) and A_BCM1480_PMO_LCL_BASE(n)
*/
-#define R_BCM1480_PM_BASE_SIZE 0x0000000000
-#define R_BCM1480_PM_CNT 0x0000000008
-#define R_BCM1480_PM_PFCNT 0x0000000010
-#define R_BCM1480_PM_LAST 0x0000000018
-#define R_BCM1480_PM_PFINDX 0x0000000020
-#define R_BCM1480_PM_INT_WMK 0x0000000028
-#define R_BCM1480_PM_CONFIG0 0x0000000030
-#define R_BCM1480_PM_LOCALDEBUG 0x0000000078
-#define R_BCM1480_PM_CACHEABILITY 0x0000000080 /* PMI only */
-#define R_BCM1480_PM_INT_CNFG 0x0000000088
-#define R_BCM1480_PM_DESC_MERGE_TIMER 0x0000000090
-#define R_BCM1480_PM_LOCALDEBUG_PIB 0x00000000F8 /* PMI only */
-#define R_BCM1480_PM_LOCALDEBUG_POB 0x00000000F8 /* PMO only */
+#define R_BCM1480_PM_BASE_SIZE 0x0000000000
+#define R_BCM1480_PM_CNT 0x0000000008
+#define R_BCM1480_PM_PFCNT 0x0000000010
+#define R_BCM1480_PM_LAST 0x0000000018
+#define R_BCM1480_PM_PFINDX 0x0000000020
+#define R_BCM1480_PM_INT_WMK 0x0000000028
+#define R_BCM1480_PM_CONFIG0 0x0000000030
+#define R_BCM1480_PM_LOCALDEBUG 0x0000000078
+#define R_BCM1480_PM_CACHEABILITY 0x0000000080 /* PMI only */
+#define R_BCM1480_PM_INT_CNFG 0x0000000088
+#define R_BCM1480_PM_DESC_MERGE_TIMER 0x0000000090
+#define R_BCM1480_PM_LOCALDEBUG_PIB 0x00000000F8 /* PMI only */
+#define R_BCM1480_PM_LOCALDEBUG_POB 0x00000000F8 /* PMO only */
/*
* Global Registers (Not Channelized)
*/
-#define A_BCM1480_PMI_GLB_0 0x0010056000
-#define A_BCM1480_PMO_GLB_0 0x0010057000
+#define A_BCM1480_PMI_GLB_0 0x0010056000
+#define A_BCM1480_PMO_GLB_0 0x0010057000
/*
* PM to TX Mapping Register relative to A_BCM1480_PMI_GLB_0 and A_BCM1480_PMO_GLB_0
*/
-#define R_BCM1480_PM_PMO_MAPPING 0x00000008C8 /* PMO only */
+#define R_BCM1480_PM_PMO_MAPPING 0x00000008C8 /* PMO only */
#define A_BCM1480_PM_PMO_MAPPING (A_BCM1480_PMO_GLB_0 + R_BCM1480_PM_PMO_MAPPING)
@@ -645,32 +645,32 @@
*/
-#define A_BCM1480_PMI_INT_0 0x0010056800
-#define A_BCM1480_PMI_INT(q) (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
-#define A_BCM1480_PMI_INT_OFFSET_0 (A_BCM1480_PMI_INT_0 - A_BCM1480_PM_BASE)
-#define A_BCM1480_PMO_INT_0 0x0010057800
-#define A_BCM1480_PMO_INT(q) (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
-#define A_BCM1480_PMO_INT_OFFSET_0 (A_BCM1480_PMO_INT_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMI_INT_0 0x0010056800
+#define A_BCM1480_PMI_INT(q) (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMI_INT_OFFSET_0 (A_BCM1480_PMI_INT_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_INT_0 0x0010057800
+#define A_BCM1480_PMO_INT(q) (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMO_INT_OFFSET_0 (A_BCM1480_PMO_INT_0 - A_BCM1480_PM_BASE)
/*
* Interrupt registers relative to A_BCM1480_PMI_INT_0 and A_BCM1480_PMO_INT_0
*/
-#define R_BCM1480_PM_INT_ST 0x0000000000
-#define R_BCM1480_PM_INT_MSK 0x0000000040
-#define R_BCM1480_PM_INT_CLR 0x0000000080
-#define R_BCM1480_PM_MRGD_INT 0x00000000C0
+#define R_BCM1480_PM_INT_ST 0x0000000000
+#define R_BCM1480_PM_INT_MSK 0x0000000040
+#define R_BCM1480_PM_INT_CLR 0x0000000080
+#define R_BCM1480_PM_MRGD_INT 0x00000000C0
/*
* Debug registers (global)
*/
#define A_BCM1480_PM_GLOBALDEBUGMODE_PMI 0x0010056000
-#define A_BCM1480_PM_GLOBALDEBUG_PID 0x00100567F8
-#define A_BCM1480_PM_GLOBALDEBUG_PIB 0x0010056FF8
+#define A_BCM1480_PM_GLOBALDEBUG_PID 0x00100567F8
+#define A_BCM1480_PM_GLOBALDEBUG_PIB 0x0010056FF8
#define A_BCM1480_PM_GLOBALDEBUGMODE_PMO 0x0010057000
-#define A_BCM1480_PM_GLOBALDEBUG_POD 0x00100577F8
-#define A_BCM1480_PM_GLOBALDEBUG_POB 0x0010057FF8
+#define A_BCM1480_PM_GLOBALDEBUG_POD 0x00100577F8
+#define A_BCM1480_PM_GLOBALDEBUG_POB 0x0010057FF8
/* *********************************************************************
* Switch performance counters
@@ -715,16 +715,16 @@
* High-Speed Port Registers (Section 13)
********************************************************************* */
-#define A_BCM1480_HSP_BASE_0 0x00DF810000
-#define A_BCM1480_HSP_BASE_1 0x00DF890000
-#define A_BCM1480_HSP_BASE_2 0x00DF910000
-#define BCM1480_HSP_REGISTER_SPACING 0x80000
+#define A_BCM1480_HSP_BASE_0 0x00DF810000
+#define A_BCM1480_HSP_BASE_1 0x00DF890000
+#define A_BCM1480_HSP_BASE_2 0x00DF910000
+#define BCM1480_HSP_REGISTER_SPACING 0x80000
-#define A_BCM1480_HSP_BASE(idx) (A_BCM1480_HSP_BASE_0 + ((idx)*BCM1480_HSP_REGISTER_SPACING))
+#define A_BCM1480_HSP_BASE(idx) (A_BCM1480_HSP_BASE_0 + ((idx)*BCM1480_HSP_REGISTER_SPACING))
#define A_BCM1480_HSP_REGISTER(idx, reg) (A_BCM1480_HSP_BASE(idx) + (reg))
-#define R_BCM1480_HSP_RX_SPI4_CFG_0 0x0000000000
-#define R_BCM1480_HSP_RX_SPI4_CFG_1 0x0000000008
+#define R_BCM1480_HSP_RX_SPI4_CFG_0 0x0000000000
+#define R_BCM1480_HSP_RX_SPI4_CFG_1 0x0000000008
#define R_BCM1480_HSP_RX_SPI4_DESKEW_OVERRIDE 0x0000000010
#define R_BCM1480_HSP_RX_SPI4_DESKEW_DATAPATH 0x0000000018
#define R_BCM1480_HSP_RX_SPI4_PORT_INT_EN 0x0000000020
@@ -733,34 +733,34 @@
#define R_BCM1480_HSP_RX_SPI4_CALENDAR_0 0x0000000200
#define R_BCM1480_HSP_RX_SPI4_CALENDAR_1 0x0000000208
-#define R_BCM1480_HSP_RX_PLL_CNFG 0x0000000800
-#define R_BCM1480_HSP_RX_CALIBRATION 0x0000000808
-#define R_BCM1480_HSP_RX_TEST 0x0000000810
-#define R_BCM1480_HSP_RX_DIAG_DETAILS 0x0000000818
-#define R_BCM1480_HSP_RX_DIAG_CRC_0 0x0000000820
-#define R_BCM1480_HSP_RX_DIAG_CRC_1 0x0000000828
-#define R_BCM1480_HSP_RX_DIAG_HTCMD 0x0000000830
-#define R_BCM1480_HSP_RX_DIAG_PKTCTL 0x0000000838
+#define R_BCM1480_HSP_RX_PLL_CNFG 0x0000000800
+#define R_BCM1480_HSP_RX_CALIBRATION 0x0000000808
+#define R_BCM1480_HSP_RX_TEST 0x0000000810
+#define R_BCM1480_HSP_RX_DIAG_DETAILS 0x0000000818
+#define R_BCM1480_HSP_RX_DIAG_CRC_0 0x0000000820
+#define R_BCM1480_HSP_RX_DIAG_CRC_1 0x0000000828
+#define R_BCM1480_HSP_RX_DIAG_HTCMD 0x0000000830
+#define R_BCM1480_HSP_RX_DIAG_PKTCTL 0x0000000838
#define R_BCM1480_HSP_RX_VIS_FLCTRL_COUNTER 0x0000000870
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_0 0x0000020020
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_1 0x0000020028
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_2 0x0000020030
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_3 0x0000020038
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_4 0x0000020040
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_5 0x0000020048
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_6 0x0000020050
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_7 0x0000020058
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_0 0x0000020020
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_1 0x0000020028
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_2 0x0000020030
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_3 0x0000020038
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_4 0x0000020040
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_5 0x0000020048
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_6 0x0000020050
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_7 0x0000020058
#define R_BCM1480_HSP_RX_PKT_RAMALLOC(idx) (R_BCM1480_HSP_RX_PKT_RAMALLOC_0 + 8*(idx))
/* XXX Following registers were shuffled. Renamed/renumbered per errata. */
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_0 0x0000020078
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_1 0x0000020080
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_2 0x0000020088
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_3 0x0000020090
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_4 0x0000020098
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_5 0x00000200A0
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_0 0x0000020078
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_1 0x0000020080
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_2 0x0000020088
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_3 0x0000020090
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_4 0x0000020098
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_5 0x00000200A0
#define R_BCM1480_HSP_RX_SPI_WATERMARK_0 0x00000200B0
#define R_BCM1480_HSP_RX_SPI_WATERMARK_1 0x00000200B8
@@ -772,30 +772,30 @@
#define R_BCM1480_HSP_RX_SPI_WATERMARK_7 0x00000200E8
#define R_BCM1480_HSP_RX_SPI_WATERMARK(idx) (R_BCM1480_HSP_RX_SPI_WATERMARK_0 + 8*(idx))
-#define R_BCM1480_HSP_RX_VIS_CMDQ_0 0x00000200F0
-#define R_BCM1480_HSP_RX_VIS_CMDQ_1 0x00000200F8
-#define R_BCM1480_HSP_RX_VIS_CMDQ_2 0x0000020100
-#define R_BCM1480_HSP_RX_RAM_READCTL 0x0000020108
-#define R_BCM1480_HSP_RX_RAM_READWINDOW 0x0000020110
-#define R_BCM1480_HSP_RX_RF_READCTL 0x0000020118
-#define R_BCM1480_HSP_RX_RF_READWINDOW 0x0000020120
+#define R_BCM1480_HSP_RX_VIS_CMDQ_0 0x00000200F0
+#define R_BCM1480_HSP_RX_VIS_CMDQ_1 0x00000200F8
+#define R_BCM1480_HSP_RX_VIS_CMDQ_2 0x0000020100
+#define R_BCM1480_HSP_RX_RAM_READCTL 0x0000020108
+#define R_BCM1480_HSP_RX_RAM_READWINDOW 0x0000020110
+#define R_BCM1480_HSP_RX_RF_READCTL 0x0000020118
+#define R_BCM1480_HSP_RX_RF_READWINDOW 0x0000020120
-#define R_BCM1480_HSP_TX_SPI4_CFG_0 0x0000040000
-#define R_BCM1480_HSP_TX_SPI4_CFG_1 0x0000040008
+#define R_BCM1480_HSP_TX_SPI4_CFG_0 0x0000040000
+#define R_BCM1480_HSP_TX_SPI4_CFG_1 0x0000040008
#define R_BCM1480_HSP_TX_SPI4_TRAINING_FMT 0x0000040010
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_0 0x0000040020
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_1 0x0000040028
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_2 0x0000040030
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_3 0x0000040038
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_4 0x0000040040
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_5 0x0000040048
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_6 0x0000040050
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_7 0x0000040058
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_0 0x0000040020
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_1 0x0000040028
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_2 0x0000040030
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_3 0x0000040038
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_4 0x0000040040
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_5 0x0000040048
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_6 0x0000040050
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_7 0x0000040058
#define R_BCM1480_HSP_TX_PKT_RAMALLOC(idx) (R_BCM1480_HSP_TX_PKT_RAMALLOC_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_NPC_RAMALLOC 0x0000040078
-#define R_BCM1480_HSP_TX_RSP_RAMALLOC 0x0000040080
-#define R_BCM1480_HSP_TX_PC_RAMALLOC 0x0000040088
+#define R_BCM1480_HSP_TX_NPC_RAMALLOC 0x0000040078
+#define R_BCM1480_HSP_TX_RSP_RAMALLOC 0x0000040080
+#define R_BCM1480_HSP_TX_PC_RAMALLOC 0x0000040088
#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_0 0x0000040090
#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_1 0x0000040098
#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_2 0x00000400A0
@@ -805,37 +805,37 @@
#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_2 0x00000400C0
#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_3 0x00000400C8
#define R_BCM1480_HSP_TX_PKT_RXPHITCNT(idx) (R_BCM1480_HSP_TX_PKT_RXPHITCNT_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_HTIO_RXPHITCNT 0x00000400D0
-#define R_BCM1480_HSP_TX_HTCC_RXPHITCNT 0x00000400D8
+#define R_BCM1480_HSP_TX_HTIO_RXPHITCNT 0x00000400D0
+#define R_BCM1480_HSP_TX_HTCC_RXPHITCNT 0x00000400D8
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_0 0x00000400E0
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_1 0x00000400E8
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_2 0x00000400F0
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_3 0x00000400F8
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT(idx) (R_BCM1480_HSP_TX_PKT_TXPHITCNT_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_HTIO_TXPHITCNT 0x0000040100
-#define R_BCM1480_HSP_TX_HTCC_TXPHITCNT 0x0000040108
+#define R_BCM1480_HSP_TX_HTIO_TXPHITCNT 0x0000040100
+#define R_BCM1480_HSP_TX_HTCC_TXPHITCNT 0x0000040108
#define R_BCM1480_HSP_TX_SPI4_CALENDAR_0 0x0000040200
#define R_BCM1480_HSP_TX_SPI4_CALENDAR_1 0x0000040208
-#define R_BCM1480_HSP_TX_PLL_CNFG 0x0000040800
-#define R_BCM1480_HSP_TX_CALIBRATION 0x0000040808
-#define R_BCM1480_HSP_TX_TEST 0x0000040810
+#define R_BCM1480_HSP_TX_PLL_CNFG 0x0000040800
+#define R_BCM1480_HSP_TX_CALIBRATION 0x0000040808
+#define R_BCM1480_HSP_TX_TEST 0x0000040810
-#define R_BCM1480_HSP_TX_VIS_CMDQ_0 0x0000040840
-#define R_BCM1480_HSP_TX_VIS_CMDQ_1 0x0000040848
-#define R_BCM1480_HSP_TX_VIS_CMDQ_2 0x0000040850
-#define R_BCM1480_HSP_TX_RAM_READCTL 0x0000040860
-#define R_BCM1480_HSP_TX_RAM_READWINDOW 0x0000040868
-#define R_BCM1480_HSP_TX_RF_READCTL 0x0000040870
-#define R_BCM1480_HSP_TX_RF_READWINDOW 0x0000040878
+#define R_BCM1480_HSP_TX_VIS_CMDQ_0 0x0000040840
+#define R_BCM1480_HSP_TX_VIS_CMDQ_1 0x0000040848
+#define R_BCM1480_HSP_TX_VIS_CMDQ_2 0x0000040850
+#define R_BCM1480_HSP_TX_RAM_READCTL 0x0000040860
+#define R_BCM1480_HSP_TX_RAM_READWINDOW 0x0000040868
+#define R_BCM1480_HSP_TX_RF_READCTL 0x0000040870
+#define R_BCM1480_HSP_TX_RF_READWINDOW 0x0000040878
#define R_BCM1480_HSP_TX_SPI4_PORT_INT_STATUS 0x0000040880
#define R_BCM1480_HSP_TX_SPI4_PORT_INT_EN 0x0000040888
#define R_BCM1480_HSP_TX_NEXT_ADDR_BASE 0x000040400
-#define R_BCM1480_HSP_TX_NEXT_ADDR_REGISTER(x) (R_BCM1480_HSP_TX_NEXT_ADDR_BASE+ 8*(x))
+#define R_BCM1480_HSP_TX_NEXT_ADDR_REGISTER(x) (R_BCM1480_HSP_TX_NEXT_ADDR_BASE+ 8*(x))
@@ -843,60 +843,60 @@
* Physical Address Map (Table 10 and Figure 7)
********************************************************************* */
-#define A_BCM1480_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
-#define A_BCM1480_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
-#define A_BCM1480_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
-#define A_BCM1480_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
-#define A_BCM1480_PHYS_GENBUS _SB_MAKE64(0x0010090000)
-#define A_BCM1480_PHYS_GENBUS_END _SB_MAKE64(0x0028000000)
-#define A_BCM1480_PHYS_PCI_MISC_MATCH_BYTES _SB_MAKE64(0x0028000000)
-#define A_BCM1480_PHYS_PCI_IACK_MATCH_BYTES _SB_MAKE64(0x0029000000)
-#define A_BCM1480_PHYS_PCI_IO_MATCH_BYTES _SB_MAKE64(0x002C000000)
-#define A_BCM1480_PHYS_PCI_CFG_MATCH_BYTES _SB_MAKE64(0x002E000000)
-#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BYTES _SB_MAKE64(0x002F000000)
-#define A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES _SB_MAKE64(0x0030000000)
-#define A_BCM1480_PHYS_HT_MEM_MATCH_BYTES _SB_MAKE64(0x0040000000)
-#define A_BCM1480_PHYS_HT_MEM_MATCH_BITS _SB_MAKE64(0x0060000000)
-#define A_BCM1480_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
-#define A_BCM1480_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
-#define A_BCM1480_PHYS_PCI_MISC_MATCH_BITS _SB_MAKE64(0x00A8000000)
-#define A_BCM1480_PHYS_PCI_IACK_MATCH_BITS _SB_MAKE64(0x00A9000000)
-#define A_BCM1480_PHYS_PCI_IO_MATCH_BITS _SB_MAKE64(0x00AC000000)
-#define A_BCM1480_PHYS_PCI_CFG_MATCH_BITS _SB_MAKE64(0x00AE000000)
-#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BITS _SB_MAKE64(0x00AF000000)
-#define A_BCM1480_PHYS_PCI_MEM_MATCH_BITS _SB_MAKE64(0x00B0000000)
-#define A_BCM1480_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
-#define A_BCM1480_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
-#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
-#define A_BCM1480_PHYS_HT_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
-#define A_BCM1480_PHYS_HT_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
-#define A_BCM1480_PHYS_HS_SUBSYS _SB_MAKE64(0x00DF000000)
-#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
-#define A_BCM1480_PHYS_HT_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
-#define A_BCM1480_PHYS_HT_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
-#define A_BCM1480_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
-#define A_BCM1480_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
-#define A_BCM1480_PHYS_PCI_UPPER _SB_MAKE64(0x1000000000)
-#define A_BCM1480_PHYS_HT_UPPER_MATCH_BYTES _SB_MAKE64(0x2000000000)
-#define A_BCM1480_PHYS_HT_UPPER_MATCH_BITS _SB_MAKE64(0x3000000000)
-#define A_BCM1480_PHYS_HT_NODE_ALIAS _SB_MAKE64(0x4000000000)
-#define A_BCM1480_PHYS_HT_FULLACCESS _SB_MAKE64(0xF000000000)
+#define A_BCM1480_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
+#define A_BCM1480_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
+#define A_BCM1480_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
+#define A_BCM1480_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
+#define A_BCM1480_PHYS_GENBUS _SB_MAKE64(0x0010090000)
+#define A_BCM1480_PHYS_GENBUS_END _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BYTES _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BYTES _SB_MAKE64(0x0029000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BYTES _SB_MAKE64(0x002C000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BYTES _SB_MAKE64(0x002E000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BYTES _SB_MAKE64(0x002F000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES _SB_MAKE64(0x0030000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BYTES _SB_MAKE64(0x0040000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BITS _SB_MAKE64(0x0060000000)
+#define A_BCM1480_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
+#define A_BCM1480_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BITS _SB_MAKE64(0x00A8000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BITS _SB_MAKE64(0x00A9000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BITS _SB_MAKE64(0x00AC000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BITS _SB_MAKE64(0x00AE000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BITS _SB_MAKE64(0x00AF000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BITS _SB_MAKE64(0x00B0000000)
+#define A_BCM1480_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
+#define A_BCM1480_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
+#define A_BCM1480_PHYS_HS_SUBSYS _SB_MAKE64(0x00DF000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
+#define A_BCM1480_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
+#define A_BCM1480_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
+#define A_BCM1480_PHYS_PCI_UPPER _SB_MAKE64(0x1000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BYTES _SB_MAKE64(0x2000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BITS _SB_MAKE64(0x3000000000)
+#define A_BCM1480_PHYS_HT_NODE_ALIAS _SB_MAKE64(0x4000000000)
+#define A_BCM1480_PHYS_HT_FULLACCESS _SB_MAKE64(0xF000000000)
/* *********************************************************************
* L2 Cache as RAM (Table 54)
********************************************************************* */
-#define A_BCM1480_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
-#define BCM1480_PHYS_L2CACHE_NUM_WAYS 8
-#define A_BCM1480_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000100000)
-#define A_BCM1480_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0300000)
-#define A_BCM1480_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D0320000)
-#define A_BCM1480_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D0340000)
-#define A_BCM1480_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D0360000)
-#define A_BCM1480_PHYS_L2CACHE_WAY4 _SB_MAKE64(0x00D0380000)
-#define A_BCM1480_PHYS_L2CACHE_WAY5 _SB_MAKE64(0x00D03A0000)
-#define A_BCM1480_PHYS_L2CACHE_WAY6 _SB_MAKE64(0x00D03C0000)
-#define A_BCM1480_PHYS_L2CACHE_WAY7 _SB_MAKE64(0x00D03E0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
+#define BCM1480_PHYS_L2CACHE_NUM_WAYS 8
+#define A_BCM1480_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000100000)
+#define A_BCM1480_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0300000)
+#define A_BCM1480_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D0320000)
+#define A_BCM1480_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D0340000)
+#define A_BCM1480_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D0360000)
+#define A_BCM1480_PHYS_L2CACHE_WAY4 _SB_MAKE64(0x00D0380000)
+#define A_BCM1480_PHYS_L2CACHE_WAY5 _SB_MAKE64(0x00D03A0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY6 _SB_MAKE64(0x00D03C0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY7 _SB_MAKE64(0x00D03E0000)
#endif /* _BCM1480_REGS_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_scd.h b/arch/mips/include/asm/sibyte/bcm1480_scd.h
index 2af3706b9648..8a1e2b05a626 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_scd.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_scd.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* BCM1280/BCM1400 Board Support Package
*
- * SCD Constants and Macros File: bcm1480_scd.h
+ * SCD Constants and Macros File: bcm1480_scd.h
*
* This module contains constants and macros useful for
* manipulating the System Control and Debug module.
@@ -74,11 +74,11 @@
* New part definitions
*/
-#define K_SYS_PART_BCM1480 0x1406
-#define K_SYS_PART_BCM1280 0x1206
-#define K_SYS_PART_BCM1455 0x1407
-#define K_SYS_PART_BCM1255 0x1257
-#define K_SYS_PART_BCM1158 0x1156
+#define K_SYS_PART_BCM1480 0x1406
+#define K_SYS_PART_BCM1280 0x1206
+#define K_SYS_PART_BCM1455 0x1407
+#define K_SYS_PART_BCM1255 0x1257
+#define K_SYS_PART_BCM1158 0x1156
/*
* Manufacturing Information Register (Table 14)
@@ -91,73 +91,73 @@
* Entire register is different from 1250, all new constants below
*/
-#define M_BCM1480_SYS_RESERVED0 _SB_MAKEMASK1(0)
-#define M_BCM1480_SYS_HT_MINRSTCNT _SB_MAKEMASK1(1)
-#define M_BCM1480_SYS_RESERVED2 _SB_MAKEMASK1(2)
-#define M_BCM1480_SYS_RESERVED3 _SB_MAKEMASK1(3)
-#define M_BCM1480_SYS_RESERVED4 _SB_MAKEMASK1(4)
-#define M_BCM1480_SYS_IOB_DIV _SB_MAKEMASK1(5)
-
-#define S_BCM1480_SYS_PLL_DIV _SB_MAKE64(6)
-#define M_BCM1480_SYS_PLL_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_PLL_DIV)
-#define V_BCM1480_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_PLL_DIV)
-#define G_BCM1480_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_PLL_DIV, M_BCM1480_SYS_PLL_DIV)
-
-#define S_BCM1480_SYS_SW_DIV _SB_MAKE64(11)
-#define M_BCM1480_SYS_SW_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_SW_DIV)
-#define V_BCM1480_SYS_SW_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_SW_DIV)
-#define G_BCM1480_SYS_SW_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_SW_DIV, M_BCM1480_SYS_SW_DIV)
-
-#define M_BCM1480_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
-#define M_BCM1480_SYS_DUART1_ENABLE _SB_MAKEMASK1(17)
-
-#define S_BCM1480_SYS_BOOT_MODE _SB_MAKE64(18)
-#define M_BCM1480_SYS_BOOT_MODE _SB_MAKEMASK(2, S_BCM1480_SYS_BOOT_MODE)
-#define V_BCM1480_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_BOOT_MODE)
-#define G_BCM1480_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_BCM1480_SYS_BOOT_MODE, M_BCM1480_SYS_BOOT_MODE)
-#define K_BCM1480_SYS_BOOT_MODE_ROM32 0
-#define K_BCM1480_SYS_BOOT_MODE_ROM8 1
+#define M_BCM1480_SYS_RESERVED0 _SB_MAKEMASK1(0)
+#define M_BCM1480_SYS_HT_MINRSTCNT _SB_MAKEMASK1(1)
+#define M_BCM1480_SYS_RESERVED2 _SB_MAKEMASK1(2)
+#define M_BCM1480_SYS_RESERVED3 _SB_MAKEMASK1(3)
+#define M_BCM1480_SYS_RESERVED4 _SB_MAKEMASK1(4)
+#define M_BCM1480_SYS_IOB_DIV _SB_MAKEMASK1(5)
+
+#define S_BCM1480_SYS_PLL_DIV _SB_MAKE64(6)
+#define M_BCM1480_SYS_PLL_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_PLL_DIV)
+#define V_BCM1480_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_PLL_DIV)
+#define G_BCM1480_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_PLL_DIV, M_BCM1480_SYS_PLL_DIV)
+
+#define S_BCM1480_SYS_SW_DIV _SB_MAKE64(11)
+#define M_BCM1480_SYS_SW_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_SW_DIV)
+#define V_BCM1480_SYS_SW_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_SW_DIV)
+#define G_BCM1480_SYS_SW_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_SW_DIV, M_BCM1480_SYS_SW_DIV)
+
+#define M_BCM1480_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
+#define M_BCM1480_SYS_DUART1_ENABLE _SB_MAKEMASK1(17)
+
+#define S_BCM1480_SYS_BOOT_MODE _SB_MAKE64(18)
+#define M_BCM1480_SYS_BOOT_MODE _SB_MAKEMASK(2, S_BCM1480_SYS_BOOT_MODE)
+#define V_BCM1480_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_BOOT_MODE)
+#define G_BCM1480_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_BCM1480_SYS_BOOT_MODE, M_BCM1480_SYS_BOOT_MODE)
+#define K_BCM1480_SYS_BOOT_MODE_ROM32 0
+#define K_BCM1480_SYS_BOOT_MODE_ROM8 1
#define K_BCM1480_SYS_BOOT_MODE_SMBUS_SMALL 2
#define K_BCM1480_SYS_BOOT_MODE_SMBUS_BIG 3
-#define M_BCM1480_SYS_BOOT_MODE_SMBUS _SB_MAKEMASK1(19)
-
-#define M_BCM1480_SYS_PCI_HOST _SB_MAKEMASK1(20)
-#define M_BCM1480_SYS_PCI_ARBITER _SB_MAKEMASK1(21)
-#define M_BCM1480_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
-#define M_BCM1480_SYS_GENCLK_EN _SB_MAKEMASK1(23)
-#define M_BCM1480_SYS_GEN_PARITY_EN _SB_MAKEMASK1(24)
-#define M_BCM1480_SYS_RESERVED25 _SB_MAKEMASK1(25)
-
-#define S_BCM1480_SYS_CONFIG 26
-#define M_BCM1480_SYS_CONFIG _SB_MAKEMASK(6, S_BCM1480_SYS_CONFIG)
-#define V_BCM1480_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_CONFIG)
-#define G_BCM1480_SYS_CONFIG(x) _SB_GETVALUE(x, S_BCM1480_SYS_CONFIG, M_BCM1480_SYS_CONFIG)
-
-#define M_BCM1480_SYS_RESERVED32 _SB_MAKEMASK(32, 15)
-
-#define S_BCM1480_SYS_NODEID 47
-#define M_BCM1480_SYS_NODEID _SB_MAKEMASK(4, S_BCM1480_SYS_NODEID)
-#define V_BCM1480_SYS_NODEID(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_NODEID)
-#define G_BCM1480_SYS_NODEID(x) _SB_GETVALUE(x, S_BCM1480_SYS_NODEID, M_BCM1480_SYS_NODEID)
-
-#define M_BCM1480_SYS_CCNUMA_EN _SB_MAKEMASK1(51)
-#define M_BCM1480_SYS_CPU_RESET_0 _SB_MAKEMASK1(52)
-#define M_BCM1480_SYS_CPU_RESET_1 _SB_MAKEMASK1(53)
-#define M_BCM1480_SYS_CPU_RESET_2 _SB_MAKEMASK1(54)
-#define M_BCM1480_SYS_CPU_RESET_3 _SB_MAKEMASK1(55)
-#define S_BCM1480_SYS_DISABLECPU0 56
-#define M_BCM1480_SYS_DISABLECPU0 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU0)
-#define S_BCM1480_SYS_DISABLECPU1 57
-#define M_BCM1480_SYS_DISABLECPU1 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU1)
-#define S_BCM1480_SYS_DISABLECPU2 58
-#define M_BCM1480_SYS_DISABLECPU2 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU2)
-#define S_BCM1480_SYS_DISABLECPU3 59
-#define M_BCM1480_SYS_DISABLECPU3 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU3)
-
-#define M_BCM1480_SYS_SB_SOFTRES _SB_MAKEMASK1(60)
-#define M_BCM1480_SYS_EXT_RESET _SB_MAKEMASK1(61)
-#define M_BCM1480_SYS_SYSTEM_RESET _SB_MAKEMASK1(62)
-#define M_BCM1480_SYS_SW_FLAG _SB_MAKEMASK1(63)
+#define M_BCM1480_SYS_BOOT_MODE_SMBUS _SB_MAKEMASK1(19)
+
+#define M_BCM1480_SYS_PCI_HOST _SB_MAKEMASK1(20)
+#define M_BCM1480_SYS_PCI_ARBITER _SB_MAKEMASK1(21)
+#define M_BCM1480_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
+#define M_BCM1480_SYS_GENCLK_EN _SB_MAKEMASK1(23)
+#define M_BCM1480_SYS_GEN_PARITY_EN _SB_MAKEMASK1(24)
+#define M_BCM1480_SYS_RESERVED25 _SB_MAKEMASK1(25)
+
+#define S_BCM1480_SYS_CONFIG 26
+#define M_BCM1480_SYS_CONFIG _SB_MAKEMASK(6, S_BCM1480_SYS_CONFIG)
+#define V_BCM1480_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_CONFIG)
+#define G_BCM1480_SYS_CONFIG(x) _SB_GETVALUE(x, S_BCM1480_SYS_CONFIG, M_BCM1480_SYS_CONFIG)
+
+#define M_BCM1480_SYS_RESERVED32 _SB_MAKEMASK(32, 15)
+
+#define S_BCM1480_SYS_NODEID 47
+#define M_BCM1480_SYS_NODEID _SB_MAKEMASK(4, S_BCM1480_SYS_NODEID)
+#define V_BCM1480_SYS_NODEID(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_NODEID)
+#define G_BCM1480_SYS_NODEID(x) _SB_GETVALUE(x, S_BCM1480_SYS_NODEID, M_BCM1480_SYS_NODEID)
+
+#define M_BCM1480_SYS_CCNUMA_EN _SB_MAKEMASK1(51)
+#define M_BCM1480_SYS_CPU_RESET_0 _SB_MAKEMASK1(52)
+#define M_BCM1480_SYS_CPU_RESET_1 _SB_MAKEMASK1(53)
+#define M_BCM1480_SYS_CPU_RESET_2 _SB_MAKEMASK1(54)
+#define M_BCM1480_SYS_CPU_RESET_3 _SB_MAKEMASK1(55)
+#define S_BCM1480_SYS_DISABLECPU0 56
+#define M_BCM1480_SYS_DISABLECPU0 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU0)
+#define S_BCM1480_SYS_DISABLECPU1 57
+#define M_BCM1480_SYS_DISABLECPU1 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU1)
+#define S_BCM1480_SYS_DISABLECPU2 58
+#define M_BCM1480_SYS_DISABLECPU2 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU2)
+#define S_BCM1480_SYS_DISABLECPU3 59
+#define M_BCM1480_SYS_DISABLECPU3 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU3)
+
+#define M_BCM1480_SYS_SB_SOFTRES _SB_MAKEMASK1(60)
+#define M_BCM1480_SYS_EXT_RESET _SB_MAKEMASK1(61)
+#define M_BCM1480_SYS_SYSTEM_RESET _SB_MAKEMASK1(62)
+#define M_BCM1480_SYS_SW_FLAG _SB_MAKEMASK1(63)
/*
* Scratch Register (Table 16)
@@ -193,23 +193,23 @@
* Registers: SCD_WDOG_CFG_x
*/
-#define M_BCM1480_SCD_WDOG_ENABLE _SB_MAKEMASK1(0)
+#define M_BCM1480_SCD_WDOG_ENABLE _SB_MAKEMASK1(0)
-#define S_BCM1480_SCD_WDOG_RESET_TYPE 2
-#define M_BCM1480_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(5, S_BCM1480_SCD_WDOG_RESET_TYPE)
+#define S_BCM1480_SCD_WDOG_RESET_TYPE 2
+#define M_BCM1480_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(5, S_BCM1480_SCD_WDOG_RESET_TYPE)
#define V_BCM1480_SCD_WDOG_RESET_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE)
#define G_BCM1480_SCD_WDOG_RESET_TYPE(x) _SB_GETVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE, M_BCM1480_SCD_WDOG_RESET_TYPE)
-#define K_BCM1480_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
-#define K_BCM1480_SCD_WDOG_RESET_SOFT 1
-#define K_BCM1480_SCD_WDOG_RESET_CPU0 3
-#define K_BCM1480_SCD_WDOG_RESET_CPU1 5
-#define K_BCM1480_SCD_WDOG_RESET_CPU2 9
-#define K_BCM1480_SCD_WDOG_RESET_CPU3 17
+#define K_BCM1480_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
+#define K_BCM1480_SCD_WDOG_RESET_SOFT 1
+#define K_BCM1480_SCD_WDOG_RESET_CPU0 3
+#define K_BCM1480_SCD_WDOG_RESET_CPU1 5
+#define K_BCM1480_SCD_WDOG_RESET_CPU2 9
+#define K_BCM1480_SCD_WDOG_RESET_CPU3 17
#define K_BCM1480_SCD_WDOG_RESET_ALL_CPUS 31
-#define M_BCM1480_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(8)
+#define M_BCM1480_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(8)
/*
* General Timer Initial Count Registers (Table 26)
@@ -243,32 +243,32 @@
* The clear/enable bits are in different locations on the 1250 and 1480.
*/
-#define S_SPC_CFG_SRC4 32
-#define M_SPC_CFG_SRC4 _SB_MAKEMASK(8, S_SPC_CFG_SRC4)
-#define V_SPC_CFG_SRC4(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC4)
-#define G_SPC_CFG_SRC4(x) _SB_GETVALUE(x, S_SPC_CFG_SRC4, M_SPC_CFG_SRC4)
+#define S_SPC_CFG_SRC4 32
+#define M_SPC_CFG_SRC4 _SB_MAKEMASK(8, S_SPC_CFG_SRC4)
+#define V_SPC_CFG_SRC4(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC4)
+#define G_SPC_CFG_SRC4(x) _SB_GETVALUE(x, S_SPC_CFG_SRC4, M_SPC_CFG_SRC4)
-#define S_SPC_CFG_SRC5 40
-#define M_SPC_CFG_SRC5 _SB_MAKEMASK(8, S_SPC_CFG_SRC5)
-#define V_SPC_CFG_SRC5(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC5)
-#define G_SPC_CFG_SRC5(x) _SB_GETVALUE(x, S_SPC_CFG_SRC5, M_SPC_CFG_SRC5)
+#define S_SPC_CFG_SRC5 40
+#define M_SPC_CFG_SRC5 _SB_MAKEMASK(8, S_SPC_CFG_SRC5)
+#define V_SPC_CFG_SRC5(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC5)
+#define G_SPC_CFG_SRC5(x) _SB_GETVALUE(x, S_SPC_CFG_SRC5, M_SPC_CFG_SRC5)
-#define S_SPC_CFG_SRC6 48
-#define M_SPC_CFG_SRC6 _SB_MAKEMASK(8, S_SPC_CFG_SRC6)
-#define V_SPC_CFG_SRC6(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC6)
-#define G_SPC_CFG_SRC6(x) _SB_GETVALUE(x, S_SPC_CFG_SRC6, M_SPC_CFG_SRC6)
+#define S_SPC_CFG_SRC6 48
+#define M_SPC_CFG_SRC6 _SB_MAKEMASK(8, S_SPC_CFG_SRC6)
+#define V_SPC_CFG_SRC6(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC6)
+#define G_SPC_CFG_SRC6(x) _SB_GETVALUE(x, S_SPC_CFG_SRC6, M_SPC_CFG_SRC6)
-#define S_SPC_CFG_SRC7 56
-#define M_SPC_CFG_SRC7 _SB_MAKEMASK(8, S_SPC_CFG_SRC7)
-#define V_SPC_CFG_SRC7(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC7)
-#define G_SPC_CFG_SRC7(x) _SB_GETVALUE(x, S_SPC_CFG_SRC7, M_SPC_CFG_SRC7)
+#define S_SPC_CFG_SRC7 56
+#define M_SPC_CFG_SRC7 _SB_MAKEMASK(8, S_SPC_CFG_SRC7)
+#define V_SPC_CFG_SRC7(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC7)
+#define G_SPC_CFG_SRC7(x) _SB_GETVALUE(x, S_SPC_CFG_SRC7, M_SPC_CFG_SRC7)
/*
* System Performance Counter Control Register (Table 32)
* Register: PERF_CNT_CFG_1
* BCM1480 specific
*/
-#define M_BCM1480_SPC_CFG_CLEAR _SB_MAKEMASK1(0)
+#define M_BCM1480_SPC_CFG_CLEAR _SB_MAKEMASK1(0)
#define M_BCM1480_SPC_CFG_ENABLE _SB_MAKEMASK1(1)
#if SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_SPC_CFG_CLEAR M_BCM1480_SPC_CFG_CLEAR
@@ -280,12 +280,12 @@
* Registers: PERF_CNT_x
*/
-#define S_BCM1480_SPC_CNT_COUNT 0
-#define M_BCM1480_SPC_CNT_COUNT _SB_MAKEMASK(40, S_BCM1480_SPC_CNT_COUNT)
-#define V_BCM1480_SPC_CNT_COUNT(x) _SB_MAKEVALUE(x, S_BCM1480_SPC_CNT_COUNT)
-#define G_BCM1480_SPC_CNT_COUNT(x) _SB_GETVALUE(x, S_BCM1480_SPC_CNT_COUNT, M_BCM1480_SPC_CNT_COUNT)
+#define S_BCM1480_SPC_CNT_COUNT 0
+#define M_BCM1480_SPC_CNT_COUNT _SB_MAKEMASK(40, S_BCM1480_SPC_CNT_COUNT)
+#define V_BCM1480_SPC_CNT_COUNT(x) _SB_MAKEVALUE(x, S_BCM1480_SPC_CNT_COUNT)
+#define G_BCM1480_SPC_CNT_COUNT(x) _SB_GETVALUE(x, S_BCM1480_SPC_CNT_COUNT, M_BCM1480_SPC_CNT_COUNT)
-#define M_BCM1480_SPC_CNT_OFLOW _SB_MAKEMASK1(40)
+#define M_BCM1480_SPC_CNT_OFLOW _SB_MAKEMASK1(40)
/*
@@ -325,45 +325,45 @@
#define M_BCM1480_ATRAP_INDEX _SB_MAKEMASK(4, 0)
#define M_BCM1480_ATRAP_ADDRESS _SB_MAKEMASK(40, 0)
-#define S_BCM1480_ATRAP_CFG_CNT 0
-#define M_BCM1480_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_BCM1480_ATRAP_CFG_CNT)
-#define V_BCM1480_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CNT)
-#define G_BCM1480_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CNT, M_BCM1480_ATRAP_CFG_CNT)
+#define S_BCM1480_ATRAP_CFG_CNT 0
+#define M_BCM1480_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_BCM1480_ATRAP_CFG_CNT)
+#define V_BCM1480_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CNT)
+#define G_BCM1480_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CNT, M_BCM1480_ATRAP_CFG_CNT)
#define M_BCM1480_ATRAP_CFG_WRITE _SB_MAKEMASK1(3)
-#define M_BCM1480_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
-#define M_BCM1480_ATRAP_CFG_INV _SB_MAKEMASK1(5)
+#define M_BCM1480_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
+#define M_BCM1480_ATRAP_CFG_INV _SB_MAKEMASK1(5)
#define M_BCM1480_ATRAP_CFG_USESRC _SB_MAKEMASK1(6)
#define M_BCM1480_ATRAP_CFG_SRCINV _SB_MAKEMASK1(7)
-#define S_BCM1480_ATRAP_CFG_AGENTID 8
-#define M_BCM1480_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_BCM1480_ATRAP_CFG_AGENTID)
-#define V_BCM1480_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID)
-#define G_BCM1480_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID, M_BCM1480_ATRAP_CFG_AGENTID)
+#define S_BCM1480_ATRAP_CFG_AGENTID 8
+#define M_BCM1480_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_BCM1480_ATRAP_CFG_AGENTID)
+#define V_BCM1480_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID)
+#define G_BCM1480_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID, M_BCM1480_ATRAP_CFG_AGENTID)
-#define K_BCM1480_BUS_AGENT_CPU0 0
-#define K_BCM1480_BUS_AGENT_CPU1 1
-#define K_BCM1480_BUS_AGENT_NC 2
-#define K_BCM1480_BUS_AGENT_IOB 3
-#define K_BCM1480_BUS_AGENT_SCD 4
-#define K_BCM1480_BUS_AGENT_L2C 6
-#define K_BCM1480_BUS_AGENT_MC 7
-#define K_BCM1480_BUS_AGENT_CPU2 8
-#define K_BCM1480_BUS_AGENT_CPU3 9
-#define K_BCM1480_BUS_AGENT_PM 10
+#define K_BCM1480_BUS_AGENT_CPU0 0
+#define K_BCM1480_BUS_AGENT_CPU1 1
+#define K_BCM1480_BUS_AGENT_NC 2
+#define K_BCM1480_BUS_AGENT_IOB 3
+#define K_BCM1480_BUS_AGENT_SCD 4
+#define K_BCM1480_BUS_AGENT_L2C 6
+#define K_BCM1480_BUS_AGENT_MC 7
+#define K_BCM1480_BUS_AGENT_CPU2 8
+#define K_BCM1480_BUS_AGENT_CPU3 9
+#define K_BCM1480_BUS_AGENT_PM 10
-#define S_BCM1480_ATRAP_CFG_CATTR 12
-#define M_BCM1480_ATRAP_CFG_CATTR _SB_MAKEMASK(2, S_BCM1480_ATRAP_CFG_CATTR)
-#define V_BCM1480_ATRAP_CFG_CATTR(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CATTR)
-#define G_BCM1480_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CATTR, M_BCM1480_ATRAP_CFG_CATTR)
+#define S_BCM1480_ATRAP_CFG_CATTR 12
+#define M_BCM1480_ATRAP_CFG_CATTR _SB_MAKEMASK(2, S_BCM1480_ATRAP_CFG_CATTR)
+#define V_BCM1480_ATRAP_CFG_CATTR(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CATTR)
+#define G_BCM1480_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CATTR, M_BCM1480_ATRAP_CFG_CATTR)
#define K_BCM1480_ATRAP_CFG_CATTR_IGNORE 0
-#define K_BCM1480_ATRAP_CFG_CATTR_UNC 1
+#define K_BCM1480_ATRAP_CFG_CATTR_UNC 1
#define K_BCM1480_ATRAP_CFG_CATTR_NONCOH 2
#define K_BCM1480_ATRAP_CFG_CATTR_COHERENT 3
-#define M_BCM1480_ATRAP_CFG_CATTRINV _SB_MAKEMASK1(14)
+#define M_BCM1480_ATRAP_CFG_CATTRINV _SB_MAKEMASK1(14)
/*
@@ -381,10 +381,10 @@
#define M_BCM1480_SCD_TRSEQ_TID_MATCH_EN _SB_MAKEMASK1(25)
-#define S_BCM1480_SCD_TRSEQ_SWFUNC 26
-#define M_BCM1480_SCD_TRSEQ_SWFUNC _SB_MAKEMASK(2, S_BCM1480_SCD_TRSEQ_SWFUNC)
-#define V_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC)
-#define G_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC, M_BCM1480_SCD_TRSEQ_SWFUNC)
+#define S_BCM1480_SCD_TRSEQ_SWFUNC 26
+#define M_BCM1480_SCD_TRSEQ_SWFUNC _SB_MAKEMASK(2, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define V_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define G_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC, M_BCM1480_SCD_TRSEQ_SWFUNC)
/*
* Trace Control Register (Table 49)
@@ -394,13 +394,13 @@
* are defined below.
*/
-#define S_BCM1480_SCD_TRACE_CFG_MODE 16
-#define M_BCM1480_SCD_TRACE_CFG_MODE _SB_MAKEMASK(2, S_BCM1480_SCD_TRACE_CFG_MODE)
-#define V_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE)
-#define G_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE, M_BCM1480_SCD_TRACE_CFG_MODE)
+#define S_BCM1480_SCD_TRACE_CFG_MODE 16
+#define M_BCM1480_SCD_TRACE_CFG_MODE _SB_MAKEMASK(2, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define V_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define G_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE, M_BCM1480_SCD_TRACE_CFG_MODE)
#define K_BCM1480_SCD_TRACE_CFG_MODE_BLOCKERS 0
-#define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT 1
+#define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT 1
#define K_BCM1480_SCD_TRACE_CFG_MODE_FLOW_ID 2
#endif /* _BCM1480_SCD_H */
diff --git a/arch/mips/include/asm/sibyte/bigsur.h b/arch/mips/include/asm/sibyte/bigsur.h
index 2d1a26d3436a..ae29dae41554 100644
--- a/arch/mips/include/asm/sibyte/bigsur.h
+++ b/arch/mips/include/asm/sibyte/bigsur.h
@@ -24,25 +24,25 @@
#ifdef CONFIG_SIBYTE_BIGSUR
#define SIBYTE_BOARD_NAME "BCM91x80A/B (BigSur)"
#define SIBYTE_HAVE_PCMCIA 1
-#define SIBYTE_HAVE_IDE 1
+#define SIBYTE_HAVE_IDE 1
#endif
/* Generic bus chip selects */
-#define LEDS_CS 3
-#define LEDS_PHYS 0x100a0000
+#define LEDS_CS 3
+#define LEDS_PHYS 0x100a0000
#ifdef SIBYTE_HAVE_IDE
-#define IDE_CS 4
-#define IDE_PHYS 0x100b0000
-#define K_GPIO_GB_IDE 4
-#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define IDE_CS 4
+#define IDE_PHYS 0x100b0000
+#define K_GPIO_GB_IDE 4
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
#endif
#ifdef SIBYTE_HAVE_PCMCIA
-#define PCMCIA_CS 6
-#define PCMCIA_PHYS 0x11000000
+#define PCMCIA_CS 6
+#define PCMCIA_PHYS 0x11000000
#define K_GPIO_PC_READY 9
-#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
#endif
#endif /* __ASM_SIBYTE_BIGSUR_H */
diff --git a/arch/mips/include/asm/sibyte/carmel.h b/arch/mips/include/asm/sibyte/carmel.h
index 11cad71323e8..793edba73aa4 100644
--- a/arch/mips/include/asm/sibyte/carmel.h
+++ b/arch/mips/include/asm/sibyte/carmel.h
@@ -23,35 +23,35 @@
#define SIBYTE_BOARD_NAME "Carmel"
-#define GPIO_PHY_INTERRUPT 2
-#define GPIO_NONMASKABLE_INT 3
-#define GPIO_CF_INSERTED 6
-#define GPIO_MONTEREY_RESET 7
-#define GPIO_QUADUART_INT 8
-#define GPIO_CF_INT 9
-#define GPIO_FPGA_CCLK 10
-#define GPIO_FPGA_DOUT 11
-#define GPIO_FPGA_DIN 12
-#define GPIO_FPGA_PGM 13
-#define GPIO_FPGA_DONE 14
-#define GPIO_FPGA_INIT 15
+#define GPIO_PHY_INTERRUPT 2
+#define GPIO_NONMASKABLE_INT 3
+#define GPIO_CF_INSERTED 6
+#define GPIO_MONTEREY_RESET 7
+#define GPIO_QUADUART_INT 8
+#define GPIO_CF_INT 9
+#define GPIO_FPGA_CCLK 10
+#define GPIO_FPGA_DOUT 11
+#define GPIO_FPGA_DIN 12
+#define GPIO_FPGA_PGM 13
+#define GPIO_FPGA_DONE 14
+#define GPIO_FPGA_INIT 15
-#define LEDS_CS 2
-#define LEDS_PHYS 0x100C0000
-#define MLEDS_CS 3
-#define MLEDS_PHYS 0x100A0000
-#define UART_CS 4
-#define UART_PHYS 0x100D0000
-#define ARAVALI_CS 5
-#define ARAVALI_PHYS 0x11000000
-#define IDE_CS 6
-#define IDE_PHYS 0x100B0000
-#define ARAVALI2_CS 7
-#define ARAVALI2_PHYS 0x100E0000
+#define LEDS_CS 2
+#define LEDS_PHYS 0x100C0000
+#define MLEDS_CS 3
+#define MLEDS_PHYS 0x100A0000
+#define UART_CS 4
+#define UART_PHYS 0x100D0000
+#define ARAVALI_CS 5
+#define ARAVALI_PHYS 0x11000000
+#define IDE_CS 6
+#define IDE_PHYS 0x100B0000
+#define ARAVALI2_CS 7
+#define ARAVALI2_PHYS 0x100E0000
#if defined(CONFIG_SIBYTE_CARMEL)
-#define K_GPIO_GB_IDE 9
-#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define K_GPIO_GB_IDE 9
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250.h b/arch/mips/include/asm/sibyte/sb1250.h
index 80c1a052662a..d45dff9753d3 100644
--- a/arch/mips/include/asm/sibyte/sb1250.h
+++ b/arch/mips/include/asm/sibyte/sb1250.h
@@ -27,8 +27,8 @@
#define SB1250_NR_IRQS 64
-#define BCM1480_NR_IRQS 128
-#define BCM1480_NR_IRQS_HALF 64
+#define BCM1480_NR_IRQS 128
+#define BCM1480_NR_IRQS_HALF 64
#define SB1250_DUART_MINOR_BASE 64
diff --git a/arch/mips/include/asm/sibyte/sb1250_defs.h b/arch/mips/include/asm/sibyte/sb1250_defs.h
index 09365f9111fa..4364eb8d22ab 100644
--- a/arch/mips/include/asm/sibyte/sb1250_defs.h
+++ b/arch/mips/include/asm/sibyte/sb1250_defs.h
@@ -51,15 +51,15 @@
*
* Use like:
*
- * #define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_112x_PASS1
+ * #define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_112x_PASS1
*
* Generate defines only for that revision of chip.
*
- * #if SIBYTE_HDR_FEATURE(chip,pass)
+ * #if SIBYTE_HDR_FEATURE(chip,pass)
*
* True if header features for that revision or later of
- * that particular chip type are enabled in SIBYTE_HDR_FEATURES.
- * (Use this to bracket #defines for features present in a given
+ * that particular chip type are enabled in SIBYTE_HDR_FEATURES.
+ * (Use this to bracket #defines for features present in a given
* revision and later.)
*
* Note that there is no implied ordering between chip types.
@@ -69,12 +69,12 @@
* SIBYTE_HDR_FEATURE(112x, PASS1) is OK, but
* SIBYTE_HDR_FEATURE(1120, pass1) is not (for two reasons).
*
- * #if SIBYTE_HDR_FEATURE_UP_TO(chip,pass)
+ * #if SIBYTE_HDR_FEATURE_UP_TO(chip,pass)
*
* Same as SIBYTE_HDR_FEATURE, but true for the named revision
* and earlier revisions of the named chip type.
*
- * #if SIBYTE_HDR_FEATURE_EXACT(chip,pass)
+ * #if SIBYTE_HDR_FEATURE_EXACT(chip,pass)
*
* Same as SIBYTE_HDR_FEATURE, but only true for the named
* revision of the named chip type. (Note that this CANNOT
@@ -82,7 +82,7 @@
* particular chip/revision. It will be true any time this
* chip/revision is included in SIBYTE_HDR_FEATURES.)
*
- * #if SIBYTE_HDR_FEATURE_CHIP(chip)
+ * #if SIBYTE_HDR_FEATURE_CHIP(chip)
*
* True if header features for (any revision of) that chip type
* are enabled in SIBYTE_HDR_FEATURES. (Use this to bracket
@@ -95,47 +95,47 @@
* ordering, so be careful when adding support for new minor revs.
********************************************************************* */
-#define SIBYTE_HDR_FMASK_1250_ALL 0x000000ff
-#define SIBYTE_HDR_FMASK_1250_PASS1 0x00000001
-#define SIBYTE_HDR_FMASK_1250_PASS2 0x00000002
-#define SIBYTE_HDR_FMASK_1250_PASS3 0x00000004
+#define SIBYTE_HDR_FMASK_1250_ALL 0x000000ff
+#define SIBYTE_HDR_FMASK_1250_PASS1 0x00000001
+#define SIBYTE_HDR_FMASK_1250_PASS2 0x00000002
+#define SIBYTE_HDR_FMASK_1250_PASS3 0x00000004
-#define SIBYTE_HDR_FMASK_112x_ALL 0x00000f00
-#define SIBYTE_HDR_FMASK_112x_PASS1 0x00000100
+#define SIBYTE_HDR_FMASK_112x_ALL 0x00000f00
+#define SIBYTE_HDR_FMASK_112x_PASS1 0x00000100
#define SIBYTE_HDR_FMASK_1480_ALL 0x0000f000
#define SIBYTE_HDR_FMASK_1480_PASS1 0x00001000
#define SIBYTE_HDR_FMASK_1480_PASS2 0x00002000
-/* Bit mask for chip/revision. (use _ALL for all revisions of a chip). */
-#define SIBYTE_HDR_FMASK(chip, pass) \
+/* Bit mask for chip/revision. (use _ALL for all revisions of a chip). */
+#define SIBYTE_HDR_FMASK(chip, pass) \
(SIBYTE_HDR_FMASK_ ## chip ## _ ## pass)
-#define SIBYTE_HDR_FMASK_ALLREVS(chip) \
+#define SIBYTE_HDR_FMASK_ALLREVS(chip) \
(SIBYTE_HDR_FMASK_ ## chip ## _ALL)
/* Default constant value for all chips, all revisions */
-#define SIBYTE_HDR_FMASK_ALL \
+#define SIBYTE_HDR_FMASK_ALL \
(SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL \
| SIBYTE_HDR_FMASK_1480_ALL)
/* This one is used for the "original" BCM1250/BCM112x chips. We use this
to weed out constants and macros that do not exist on later chips like
- the BCM1480 */
+ the BCM1480 */
#define SIBYTE_HDR_FMASK_1250_112x_ALL \
(SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL)
#define SIBYTE_HDR_FMASK_1250_112x SIBYTE_HDR_FMASK_1250_112x_ALL
#ifndef SIBYTE_HDR_FEATURES
-#define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_ALL
+#define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_ALL
#endif
/* Bit mask for revisions of chip exclusively before the named revision. */
-#define SIBYTE_HDR_FMASK_BEFORE(chip, pass) \
+#define SIBYTE_HDR_FMASK_BEFORE(chip, pass) \
((SIBYTE_HDR_FMASK(chip, pass) - 1) & SIBYTE_HDR_FMASK_ALLREVS(chip))
-/* Bit mask for revisions of chip exclusively after the named revision. */
-#define SIBYTE_HDR_FMASK_AFTER(chip, pass) \
+/* Bit mask for revisions of chip exclusively after the named revision. */
+#define SIBYTE_HDR_FMASK_AFTER(chip, pass) \
(~(SIBYTE_HDR_FMASK(chip, pass) \
| (SIBYTE_HDR_FMASK(chip, pass) - 1)) & SIBYTE_HDR_FMASK_ALLREVS(chip))
@@ -168,38 +168,38 @@
/* *********************************************************************
* Naming schemes for constants in these files:
*
- * M_xxx MASK constant (identifies bits in a register).
- * For multi-bit fields, all bits in the field will
- * be set.
+ * M_xxx MASK constant (identifies bits in a register).
+ * For multi-bit fields, all bits in the field will
+ * be set.
*
- * K_xxx "Code" constant (value for data in a multi-bit
- * field). The value is right justified.
+ * K_xxx "Code" constant (value for data in a multi-bit
+ * field). The value is right justified.
*
- * V_xxx "Value" constant. This is the same as the
- * corresponding "K_xxx" constant, except it is
- * shifted to the correct position in the register.
+ * V_xxx "Value" constant. This is the same as the
+ * corresponding "K_xxx" constant, except it is
+ * shifted to the correct position in the register.
*
- * S_xxx SHIFT constant. This is the number of bits that
- * a field value (code) needs to be shifted
- * (towards the left) to put the value in the right
- * position for the register.
+ * S_xxx SHIFT constant. This is the number of bits that
+ * a field value (code) needs to be shifted
+ * (towards the left) to put the value in the right
+ * position for the register.
*
- * A_xxx ADDRESS constant. This will be a physical
- * address. Use the PHYS_TO_K1 macro to generate
- * a K1SEG address.
+ * A_xxx ADDRESS constant. This will be a physical
+ * address. Use the PHYS_TO_K1 macro to generate
+ * a K1SEG address.
*
- * R_xxx RELATIVE offset constant. This is an offset from
- * an A_xxx constant (usually the first register in
- * a group).
+ * R_xxx RELATIVE offset constant. This is an offset from
+ * an A_xxx constant (usually the first register in
+ * a group).
*
- * G_xxx(X) GET value. This macro obtains a multi-bit field
- * from a register, masks it, and shifts it to
- * the bottom of the register (retrieving a K_xxx
- * value, for example).
+ * G_xxx(X) GET value. This macro obtains a multi-bit field
+ * from a register, masks it, and shifts it to
+ * the bottom of the register (retrieving a K_xxx
+ * value, for example).
*
- * V_xxx(X) VALUE. This macro computes the value of a
- * K_xxx constant shifted to the correct position
- * in the register.
+ * V_xxx(X) VALUE. This macro computes the value of a
+ * K_xxx constant shifted to the correct position
+ * in the register.
********************************************************************* */
diff --git a/arch/mips/include/asm/sibyte/sb1250_dma.h b/arch/mips/include/asm/sibyte/sb1250_dma.h
index 6c44dfb52878..ea81713b78d6 100644
--- a/arch/mips/include/asm/sibyte/sb1250_dma.h
+++ b/arch/mips/include/asm/sibyte/sb1250_dma.h
@@ -51,15 +51,15 @@
*/
-#define M_DMA_DROP _SB_MAKEMASK1(0)
+#define M_DMA_DROP _SB_MAKEMASK1(0)
-#define M_DMA_CHAIN_SEL _SB_MAKEMASK1(1)
-#define M_DMA_RESERVED1 _SB_MAKEMASK1(2)
+#define M_DMA_CHAIN_SEL _SB_MAKEMASK1(1)
+#define M_DMA_RESERVED1 _SB_MAKEMASK1(2)
#define S_DMA_DESC_TYPE _SB_MAKE64(1)
#define M_DMA_DESC_TYPE _SB_MAKEMASK(2, S_DMA_DESC_TYPE)
-#define V_DMA_DESC_TYPE(x) _SB_MAKEVALUE(x, S_DMA_DESC_TYPE)
-#define G_DMA_DESC_TYPE(x) _SB_GETVALUE(x, S_DMA_DESC_TYPE, M_DMA_DESC_TYPE)
+#define V_DMA_DESC_TYPE(x) _SB_MAKEVALUE(x, S_DMA_DESC_TYPE)
+#define G_DMA_DESC_TYPE(x) _SB_GETVALUE(x, S_DMA_DESC_TYPE, M_DMA_DESC_TYPE)
#define K_DMA_DESC_TYPE_RING_AL 0
#define K_DMA_DESC_TYPE_CHAIN_AL 1
@@ -69,31 +69,31 @@
#define K_DMA_DESC_TYPE_RING_UAL_RMW 3
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DMA_EOP_INT_EN _SB_MAKEMASK1(3)
-#define M_DMA_HWM_INT_EN _SB_MAKEMASK1(4)
-#define M_DMA_LWM_INT_EN _SB_MAKEMASK1(5)
-#define M_DMA_TBX_EN _SB_MAKEMASK1(6)
-#define M_DMA_TDX_EN _SB_MAKEMASK1(7)
+#define M_DMA_EOP_INT_EN _SB_MAKEMASK1(3)
+#define M_DMA_HWM_INT_EN _SB_MAKEMASK1(4)
+#define M_DMA_LWM_INT_EN _SB_MAKEMASK1(5)
+#define M_DMA_TBX_EN _SB_MAKEMASK1(6)
+#define M_DMA_TDX_EN _SB_MAKEMASK1(7)
-#define S_DMA_INT_PKTCNT _SB_MAKE64(8)
-#define M_DMA_INT_PKTCNT _SB_MAKEMASK(8, S_DMA_INT_PKTCNT)
-#define V_DMA_INT_PKTCNT(x) _SB_MAKEVALUE(x, S_DMA_INT_PKTCNT)
-#define G_DMA_INT_PKTCNT(x) _SB_GETVALUE(x, S_DMA_INT_PKTCNT, M_DMA_INT_PKTCNT)
+#define S_DMA_INT_PKTCNT _SB_MAKE64(8)
+#define M_DMA_INT_PKTCNT _SB_MAKEMASK(8, S_DMA_INT_PKTCNT)
+#define V_DMA_INT_PKTCNT(x) _SB_MAKEVALUE(x, S_DMA_INT_PKTCNT)
+#define G_DMA_INT_PKTCNT(x) _SB_GETVALUE(x, S_DMA_INT_PKTCNT, M_DMA_INT_PKTCNT)
-#define S_DMA_RINGSZ _SB_MAKE64(16)
-#define M_DMA_RINGSZ _SB_MAKEMASK(16, S_DMA_RINGSZ)
-#define V_DMA_RINGSZ(x) _SB_MAKEVALUE(x, S_DMA_RINGSZ)
-#define G_DMA_RINGSZ(x) _SB_GETVALUE(x, S_DMA_RINGSZ, M_DMA_RINGSZ)
+#define S_DMA_RINGSZ _SB_MAKE64(16)
+#define M_DMA_RINGSZ _SB_MAKEMASK(16, S_DMA_RINGSZ)
+#define V_DMA_RINGSZ(x) _SB_MAKEVALUE(x, S_DMA_RINGSZ)
+#define G_DMA_RINGSZ(x) _SB_GETVALUE(x, S_DMA_RINGSZ, M_DMA_RINGSZ)
-#define S_DMA_HIGH_WATERMARK _SB_MAKE64(32)
-#define M_DMA_HIGH_WATERMARK _SB_MAKEMASK(16, S_DMA_HIGH_WATERMARK)
-#define V_DMA_HIGH_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_HIGH_WATERMARK)
-#define G_DMA_HIGH_WATERMARK(x) _SB_GETVALUE(x, S_DMA_HIGH_WATERMARK, M_DMA_HIGH_WATERMARK)
+#define S_DMA_HIGH_WATERMARK _SB_MAKE64(32)
+#define M_DMA_HIGH_WATERMARK _SB_MAKEMASK(16, S_DMA_HIGH_WATERMARK)
+#define V_DMA_HIGH_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_HIGH_WATERMARK)
+#define G_DMA_HIGH_WATERMARK(x) _SB_GETVALUE(x, S_DMA_HIGH_WATERMARK, M_DMA_HIGH_WATERMARK)
-#define S_DMA_LOW_WATERMARK _SB_MAKE64(48)
-#define M_DMA_LOW_WATERMARK _SB_MAKEMASK(16, S_DMA_LOW_WATERMARK)
-#define V_DMA_LOW_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_LOW_WATERMARK)
-#define G_DMA_LOW_WATERMARK(x) _SB_GETVALUE(x, S_DMA_LOW_WATERMARK, M_DMA_LOW_WATERMARK)
+#define S_DMA_LOW_WATERMARK _SB_MAKE64(48)
+#define M_DMA_LOW_WATERMARK _SB_MAKEMASK(16, S_DMA_LOW_WATERMARK)
+#define V_DMA_LOW_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_LOW_WATERMARK)
+#define G_DMA_LOW_WATERMARK(x) _SB_GETVALUE(x, S_DMA_LOW_WATERMARK, M_DMA_LOW_WATERMARK)
/*
* Ethernet and Serial DMA Configuration Register 1 (Table 7-5)
@@ -103,11 +103,11 @@
* Registers: DMA_CONFIG1_SER_x_TX
*/
-#define M_DMA_HDR_CF_EN _SB_MAKEMASK1(0)
-#define M_DMA_ASIC_XFR_EN _SB_MAKEMASK1(1)
-#define M_DMA_PRE_ADDR_EN _SB_MAKEMASK1(2)
-#define M_DMA_FLOW_CTL_EN _SB_MAKEMASK1(3)
-#define M_DMA_NO_DSCR_UPDT _SB_MAKEMASK1(4)
+#define M_DMA_HDR_CF_EN _SB_MAKEMASK1(0)
+#define M_DMA_ASIC_XFR_EN _SB_MAKEMASK1(1)
+#define M_DMA_PRE_ADDR_EN _SB_MAKEMASK1(2)
+#define M_DMA_FLOW_CTL_EN _SB_MAKEMASK1(3)
+#define M_DMA_NO_DSCR_UPDT _SB_MAKEMASK1(4)
#define M_DMA_L2CA _SB_MAKEMASK1(5)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -116,37 +116,37 @@
#define M_DMA_TX_FC_PAUSE_EN _SB_MAKEMASK1(7)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DMA_MBZ1 _SB_MAKEMASK(6, 15)
+#define M_DMA_MBZ1 _SB_MAKEMASK(6, 15)
-#define S_DMA_HDR_SIZE _SB_MAKE64(21)
-#define M_DMA_HDR_SIZE _SB_MAKEMASK(9, S_DMA_HDR_SIZE)
-#define V_DMA_HDR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_HDR_SIZE)
-#define G_DMA_HDR_SIZE(x) _SB_GETVALUE(x, S_DMA_HDR_SIZE, M_DMA_HDR_SIZE)
+#define S_DMA_HDR_SIZE _SB_MAKE64(21)
+#define M_DMA_HDR_SIZE _SB_MAKEMASK(9, S_DMA_HDR_SIZE)
+#define V_DMA_HDR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_HDR_SIZE)
+#define G_DMA_HDR_SIZE(x) _SB_GETVALUE(x, S_DMA_HDR_SIZE, M_DMA_HDR_SIZE)
-#define M_DMA_MBZ2 _SB_MAKEMASK(5, 32)
+#define M_DMA_MBZ2 _SB_MAKEMASK(5, 32)
-#define S_DMA_ASICXFR_SIZE _SB_MAKE64(37)
-#define M_DMA_ASICXFR_SIZE _SB_MAKEMASK(9, S_DMA_ASICXFR_SIZE)
-#define V_DMA_ASICXFR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_ASICXFR_SIZE)
-#define G_DMA_ASICXFR_SIZE(x) _SB_GETVALUE(x, S_DMA_ASICXFR_SIZE, M_DMA_ASICXFR_SIZE)
+#define S_DMA_ASICXFR_SIZE _SB_MAKE64(37)
+#define M_DMA_ASICXFR_SIZE _SB_MAKEMASK(9, S_DMA_ASICXFR_SIZE)
+#define V_DMA_ASICXFR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_ASICXFR_SIZE)
+#define G_DMA_ASICXFR_SIZE(x) _SB_GETVALUE(x, S_DMA_ASICXFR_SIZE, M_DMA_ASICXFR_SIZE)
-#define S_DMA_INT_TIMEOUT _SB_MAKE64(48)
-#define M_DMA_INT_TIMEOUT _SB_MAKEMASK(16, S_DMA_INT_TIMEOUT)
-#define V_DMA_INT_TIMEOUT(x) _SB_MAKEVALUE(x, S_DMA_INT_TIMEOUT)
-#define G_DMA_INT_TIMEOUT(x) _SB_GETVALUE(x, S_DMA_INT_TIMEOUT, M_DMA_INT_TIMEOUT)
+#define S_DMA_INT_TIMEOUT _SB_MAKE64(48)
+#define M_DMA_INT_TIMEOUT _SB_MAKEMASK(16, S_DMA_INT_TIMEOUT)
+#define V_DMA_INT_TIMEOUT(x) _SB_MAKEVALUE(x, S_DMA_INT_TIMEOUT)
+#define G_DMA_INT_TIMEOUT(x) _SB_GETVALUE(x, S_DMA_INT_TIMEOUT, M_DMA_INT_TIMEOUT)
/*
* Ethernet and Serial DMA Descriptor base address (Table 7-6)
*/
-#define M_DMA_DSCRBASE_MBZ _SB_MAKEMASK(4, 0)
+#define M_DMA_DSCRBASE_MBZ _SB_MAKEMASK(4, 0)
/*
* ASIC Mode Base Address (Table 7-7)
*/
-#define M_DMA_ASIC_BASE_MBZ _SB_MAKEMASK(20, 0)
+#define M_DMA_ASIC_BASE_MBZ _SB_MAKEMASK(20, 0)
/*
* DMA Descriptor Count Registers (Table 7-8)
@@ -159,10 +159,10 @@
* Current Descriptor Address Register (Table 7-11)
*/
-#define S_DMA_CURDSCR_ADDR _SB_MAKE64(0)
-#define M_DMA_CURDSCR_ADDR _SB_MAKEMASK(40, S_DMA_CURDSCR_ADDR)
-#define S_DMA_CURDSCR_COUNT _SB_MAKE64(40)
-#define M_DMA_CURDSCR_COUNT _SB_MAKEMASK(16, S_DMA_CURDSCR_COUNT)
+#define S_DMA_CURDSCR_ADDR _SB_MAKE64(0)
+#define M_DMA_CURDSCR_ADDR _SB_MAKEMASK(40, S_DMA_CURDSCR_ADDR)
+#define S_DMA_CURDSCR_COUNT _SB_MAKE64(40)
+#define M_DMA_CURDSCR_COUNT _SB_MAKEMASK(16, S_DMA_CURDSCR_COUNT)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_DMA_TX_CH_PAUSE_ON _SB_MAKEMASK1(56)
@@ -172,13 +172,13 @@
* Receive Packet Drop Registers
*/
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_OODLOST_RX _SB_MAKE64(0)
-#define M_DMA_OODLOST_RX _SB_MAKEMASK(16, S_DMA_OODLOST_RX)
-#define G_DMA_OODLOST_RX(x) _SB_GETVALUE(x, S_DMA_OODLOST_RX, M_DMA_OODLOST_RX)
+#define S_DMA_OODLOST_RX _SB_MAKE64(0)
+#define M_DMA_OODLOST_RX _SB_MAKEMASK(16, S_DMA_OODLOST_RX)
+#define G_DMA_OODLOST_RX(x) _SB_GETVALUE(x, S_DMA_OODLOST_RX, M_DMA_OODLOST_RX)
-#define S_DMA_EOP_COUNT_RX _SB_MAKE64(16)
-#define M_DMA_EOP_COUNT_RX _SB_MAKEMASK(8, S_DMA_EOP_COUNT_RX)
-#define G_DMA_EOP_COUNT_RX(x) _SB_GETVALUE(x, S_DMA_EOP_COUNT_RX, M_DMA_EOP_COUNT_RX)
+#define S_DMA_EOP_COUNT_RX _SB_MAKE64(16)
+#define M_DMA_EOP_COUNT_RX _SB_MAKEMASK(8, S_DMA_EOP_COUNT_RX)
+#define G_DMA_EOP_COUNT_RX(x) _SB_GETVALUE(x, S_DMA_EOP_COUNT_RX, M_DMA_EOP_COUNT_RX)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
/* *********************************************************************
@@ -189,26 +189,26 @@
* Descriptor doubleword "A" (Table 7-12)
*/
-#define S_DMA_DSCRA_OFFSET _SB_MAKE64(0)
-#define M_DMA_DSCRA_OFFSET _SB_MAKEMASK(5, S_DMA_DSCRA_OFFSET)
-#define V_DMA_DSCRA_OFFSET(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_OFFSET)
-#define G_DMA_DSCRA_OFFSET(x) _SB_GETVALUE(x, S_DMA_DSCRA_OFFSET, M_DMA_DSCRA_OFFSET)
+#define S_DMA_DSCRA_OFFSET _SB_MAKE64(0)
+#define M_DMA_DSCRA_OFFSET _SB_MAKEMASK(5, S_DMA_DSCRA_OFFSET)
+#define V_DMA_DSCRA_OFFSET(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_OFFSET)
+#define G_DMA_DSCRA_OFFSET(x) _SB_GETVALUE(x, S_DMA_DSCRA_OFFSET, M_DMA_DSCRA_OFFSET)
/* Note: Don't shift the address over, just mask it with the mask below */
-#define S_DMA_DSCRA_A_ADDR _SB_MAKE64(5)
-#define M_DMA_DSCRA_A_ADDR _SB_MAKEMASK(35, S_DMA_DSCRA_A_ADDR)
+#define S_DMA_DSCRA_A_ADDR _SB_MAKE64(5)
+#define M_DMA_DSCRA_A_ADDR _SB_MAKEMASK(35, S_DMA_DSCRA_A_ADDR)
#define M_DMA_DSCRA_A_ADDR_OFFSET (M_DMA_DSCRA_OFFSET | M_DMA_DSCRA_A_ADDR)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_DSCRA_A_ADDR_UA _SB_MAKE64(0)
-#define M_DMA_DSCRA_A_ADDR_UA _SB_MAKEMASK(40, S_DMA_DSCRA_A_ADDR_UA)
+#define S_DMA_DSCRA_A_ADDR_UA _SB_MAKE64(0)
+#define M_DMA_DSCRA_A_ADDR_UA _SB_MAKEMASK(40, S_DMA_DSCRA_A_ADDR_UA)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_DMA_DSCRA_A_SIZE _SB_MAKE64(40)
-#define M_DMA_DSCRA_A_SIZE _SB_MAKEMASK(9, S_DMA_DSCRA_A_SIZE)
-#define V_DMA_DSCRA_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_A_SIZE)
-#define G_DMA_DSCRA_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRA_A_SIZE, M_DMA_DSCRA_A_SIZE)
+#define S_DMA_DSCRA_A_SIZE _SB_MAKE64(40)
+#define M_DMA_DSCRA_A_SIZE _SB_MAKEMASK(9, S_DMA_DSCRA_A_SIZE)
+#define V_DMA_DSCRA_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_A_SIZE)
+#define G_DMA_DSCRA_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRA_A_SIZE, M_DMA_DSCRA_A_SIZE)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define S_DMA_DSCRA_DSCR_CNT _SB_MAKE64(40)
@@ -216,43 +216,43 @@
#define G_DMA_DSCRA_DSCR_CNT(x) _SB_GETVALUE(x, S_DMA_DSCRA_DSCR_CNT, M_DMA_DSCRA_DSCR_CNT)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DMA_DSCRA_INTERRUPT _SB_MAKEMASK1(49)
+#define M_DMA_DSCRA_INTERRUPT _SB_MAKEMASK1(49)
#define M_DMA_DSCRA_OFFSETB _SB_MAKEMASK1(50)
-#define S_DMA_DSCRA_STATUS _SB_MAKE64(51)
-#define M_DMA_DSCRA_STATUS _SB_MAKEMASK(13, S_DMA_DSCRA_STATUS)
-#define V_DMA_DSCRA_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_STATUS)
-#define G_DMA_DSCRA_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRA_STATUS, M_DMA_DSCRA_STATUS)
+#define S_DMA_DSCRA_STATUS _SB_MAKE64(51)
+#define M_DMA_DSCRA_STATUS _SB_MAKEMASK(13, S_DMA_DSCRA_STATUS)
+#define V_DMA_DSCRA_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_STATUS)
+#define G_DMA_DSCRA_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRA_STATUS, M_DMA_DSCRA_STATUS)
/*
* Descriptor doubleword "B" (Table 7-13)
*/
-#define S_DMA_DSCRB_OPTIONS _SB_MAKE64(0)
-#define M_DMA_DSCRB_OPTIONS _SB_MAKEMASK(4, S_DMA_DSCRB_OPTIONS)
-#define V_DMA_DSCRB_OPTIONS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_OPTIONS)
-#define G_DMA_DSCRB_OPTIONS(x) _SB_GETVALUE(x, S_DMA_DSCRB_OPTIONS, M_DMA_DSCRB_OPTIONS)
+#define S_DMA_DSCRB_OPTIONS _SB_MAKE64(0)
+#define M_DMA_DSCRB_OPTIONS _SB_MAKEMASK(4, S_DMA_DSCRB_OPTIONS)
+#define V_DMA_DSCRB_OPTIONS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_OPTIONS)
+#define G_DMA_DSCRB_OPTIONS(x) _SB_GETVALUE(x, S_DMA_DSCRB_OPTIONS, M_DMA_DSCRB_OPTIONS)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_DSCRB_A_SIZE _SB_MAKE64(8)
-#define M_DMA_DSCRB_A_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_A_SIZE)
-#define V_DMA_DSCRB_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_A_SIZE)
-#define G_DMA_DSCRB_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_A_SIZE, M_DMA_DSCRB_A_SIZE)
+#define S_DMA_DSCRB_A_SIZE _SB_MAKE64(8)
+#define M_DMA_DSCRB_A_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_A_SIZE)
+#define V_DMA_DSCRB_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_A_SIZE)
+#define G_DMA_DSCRB_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_A_SIZE, M_DMA_DSCRB_A_SIZE)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define R_DMA_DSCRB_ADDR _SB_MAKE64(0x10)
+#define R_DMA_DSCRB_ADDR _SB_MAKE64(0x10)
/* Note: Don't shift the address over, just mask it with the mask below */
-#define S_DMA_DSCRB_B_ADDR _SB_MAKE64(5)
-#define M_DMA_DSCRB_B_ADDR _SB_MAKEMASK(35, S_DMA_DSCRB_B_ADDR)
+#define S_DMA_DSCRB_B_ADDR _SB_MAKE64(5)
+#define M_DMA_DSCRB_B_ADDR _SB_MAKEMASK(35, S_DMA_DSCRB_B_ADDR)
-#define S_DMA_DSCRB_B_SIZE _SB_MAKE64(40)
-#define M_DMA_DSCRB_B_SIZE _SB_MAKEMASK(9, S_DMA_DSCRB_B_SIZE)
-#define V_DMA_DSCRB_B_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_B_SIZE)
-#define G_DMA_DSCRB_B_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_B_SIZE, M_DMA_DSCRB_B_SIZE)
+#define S_DMA_DSCRB_B_SIZE _SB_MAKE64(40)
+#define M_DMA_DSCRB_B_SIZE _SB_MAKEMASK(9, S_DMA_DSCRB_B_SIZE)
+#define V_DMA_DSCRB_B_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_B_SIZE)
+#define G_DMA_DSCRB_B_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_B_SIZE, M_DMA_DSCRB_B_SIZE)
-#define M_DMA_DSCRB_B_VALID _SB_MAKEMASK1(49)
+#define M_DMA_DSCRB_B_VALID _SB_MAKEMASK1(49)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define S_DMA_DSCRB_PKT_SIZE_MSB _SB_MAKE64(48)
@@ -261,24 +261,24 @@
#define G_DMA_DSCRB_PKT_SIZE_MSB(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE_MSB, M_DMA_DSCRB_PKT_SIZE_MSB)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_DMA_DSCRB_PKT_SIZE _SB_MAKE64(50)
-#define M_DMA_DSCRB_PKT_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_PKT_SIZE)
-#define V_DMA_DSCRB_PKT_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE)
-#define G_DMA_DSCRB_PKT_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE, M_DMA_DSCRB_PKT_SIZE)
+#define S_DMA_DSCRB_PKT_SIZE _SB_MAKE64(50)
+#define M_DMA_DSCRB_PKT_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_PKT_SIZE)
+#define V_DMA_DSCRB_PKT_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE)
+#define G_DMA_DSCRB_PKT_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE, M_DMA_DSCRB_PKT_SIZE)
/*
* from pass2 some bits in dscr_b are also used for rx status
*/
-#define S_DMA_DSCRB_STATUS _SB_MAKE64(0)
-#define M_DMA_DSCRB_STATUS _SB_MAKEMASK(1, S_DMA_DSCRB_STATUS)
-#define V_DMA_DSCRB_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_STATUS)
-#define G_DMA_DSCRB_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRB_STATUS, M_DMA_DSCRB_STATUS)
+#define S_DMA_DSCRB_STATUS _SB_MAKE64(0)
+#define M_DMA_DSCRB_STATUS _SB_MAKEMASK(1, S_DMA_DSCRB_STATUS)
+#define V_DMA_DSCRB_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_STATUS)
+#define G_DMA_DSCRB_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRB_STATUS, M_DMA_DSCRB_STATUS)
/*
* Ethernet Descriptor Status Bits (Table 7-15)
*/
-#define M_DMA_ETHRX_BADIP4CS _SB_MAKEMASK1(51)
+#define M_DMA_ETHRX_BADIP4CS _SB_MAKEMASK1(51)
#define M_DMA_ETHRX_DSCRERR _SB_MAKEMASK1(52)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -292,70 +292,70 @@
#define M_DMA_ETH_CRC_FLAG _SB_MAKEMASK1(2)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_DMA_ETHRX_RXCH 53
-#define M_DMA_ETHRX_RXCH _SB_MAKEMASK(2, S_DMA_ETHRX_RXCH)
-#define V_DMA_ETHRX_RXCH(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_RXCH)
-#define G_DMA_ETHRX_RXCH(x) _SB_GETVALUE(x, S_DMA_ETHRX_RXCH, M_DMA_ETHRX_RXCH)
+#define S_DMA_ETHRX_RXCH 53
+#define M_DMA_ETHRX_RXCH _SB_MAKEMASK(2, S_DMA_ETHRX_RXCH)
+#define V_DMA_ETHRX_RXCH(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_RXCH)
+#define G_DMA_ETHRX_RXCH(x) _SB_GETVALUE(x, S_DMA_ETHRX_RXCH, M_DMA_ETHRX_RXCH)
-#define S_DMA_ETHRX_PKTTYPE 55
-#define M_DMA_ETHRX_PKTTYPE _SB_MAKEMASK(3, S_DMA_ETHRX_PKTTYPE)
-#define V_DMA_ETHRX_PKTTYPE(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_PKTTYPE)
-#define G_DMA_ETHRX_PKTTYPE(x) _SB_GETVALUE(x, S_DMA_ETHRX_PKTTYPE, M_DMA_ETHRX_PKTTYPE)
+#define S_DMA_ETHRX_PKTTYPE 55
+#define M_DMA_ETHRX_PKTTYPE _SB_MAKEMASK(3, S_DMA_ETHRX_PKTTYPE)
+#define V_DMA_ETHRX_PKTTYPE(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_PKTTYPE)
+#define G_DMA_ETHRX_PKTTYPE(x) _SB_GETVALUE(x, S_DMA_ETHRX_PKTTYPE, M_DMA_ETHRX_PKTTYPE)
#define K_DMA_ETHRX_PKTTYPE_IPV4 0
#define K_DMA_ETHRX_PKTTYPE_ARPV4 1
-#define K_DMA_ETHRX_PKTTYPE_802 2
+#define K_DMA_ETHRX_PKTTYPE_802 2
#define K_DMA_ETHRX_PKTTYPE_OTHER 3
#define K_DMA_ETHRX_PKTTYPE_USER0 4
#define K_DMA_ETHRX_PKTTYPE_USER1 5
#define K_DMA_ETHRX_PKTTYPE_USER2 6
#define K_DMA_ETHRX_PKTTYPE_USER3 7
-#define M_DMA_ETHRX_MATCH_HASH _SB_MAKEMASK1(58)
-#define M_DMA_ETHRX_MATCH_EXACT _SB_MAKEMASK1(59)
-#define M_DMA_ETHRX_BCAST _SB_MAKEMASK1(60)
-#define M_DMA_ETHRX_MCAST _SB_MAKEMASK1(61)
-#define M_DMA_ETHRX_BAD _SB_MAKEMASK1(62)
-#define M_DMA_ETHRX_SOP _SB_MAKEMASK1(63)
+#define M_DMA_ETHRX_MATCH_HASH _SB_MAKEMASK1(58)
+#define M_DMA_ETHRX_MATCH_EXACT _SB_MAKEMASK1(59)
+#define M_DMA_ETHRX_BCAST _SB_MAKEMASK1(60)
+#define M_DMA_ETHRX_MCAST _SB_MAKEMASK1(61)
+#define M_DMA_ETHRX_BAD _SB_MAKEMASK1(62)
+#define M_DMA_ETHRX_SOP _SB_MAKEMASK1(63)
/*
* Ethernet Transmit Status Bits (Table 7-16)
*/
-#define M_DMA_ETHTX_SOP _SB_MAKEMASK1(63)
+#define M_DMA_ETHTX_SOP _SB_MAKEMASK1(63)
/*
* Ethernet Transmit Options (Table 7-17)
*/
-#define K_DMA_ETHTX_NOTSOP _SB_MAKE64(0x00)
-#define K_DMA_ETHTX_APPENDCRC _SB_MAKE64(0x01)
-#define K_DMA_ETHTX_REPLACECRC _SB_MAKE64(0x02)
+#define K_DMA_ETHTX_NOTSOP _SB_MAKE64(0x00)
+#define K_DMA_ETHTX_APPENDCRC _SB_MAKE64(0x01)
+#define K_DMA_ETHTX_REPLACECRC _SB_MAKE64(0x02)
#define K_DMA_ETHTX_APPENDCRC_APPENDPAD _SB_MAKE64(0x03)
#define K_DMA_ETHTX_APPENDVLAN_REPLACECRC _SB_MAKE64(0x04)
#define K_DMA_ETHTX_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x05)
#define K_DMA_ETHTX_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x6)
-#define K_DMA_ETHTX_NOMODS _SB_MAKE64(0x07)
-#define K_DMA_ETHTX_RESERVED1 _SB_MAKE64(0x08)
+#define K_DMA_ETHTX_NOMODS _SB_MAKE64(0x07)
+#define K_DMA_ETHTX_RESERVED1 _SB_MAKE64(0x08)
#define K_DMA_ETHTX_REPLACESADDR_APPENDCRC _SB_MAKE64(0x09)
#define K_DMA_ETHTX_REPLACESADDR_REPLACECRC _SB_MAKE64(0x0A)
#define K_DMA_ETHTX_REPLACESADDR_APPENDCRC_APPENDPAD _SB_MAKE64(0x0B)
#define K_DMA_ETHTX_REPLACESADDR_APPENDVLAN_REPLACECRC _SB_MAKE64(0x0C)
#define K_DMA_ETHTX_REPLACESADDR_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x0D)
#define K_DMA_ETHTX_REPLACESADDR_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x0E)
-#define K_DMA_ETHTX_RESERVED2 _SB_MAKE64(0x0F)
+#define K_DMA_ETHTX_RESERVED2 _SB_MAKE64(0x0F)
/*
* Serial Receive Options (Table 7-18)
*/
-#define M_DMA_SERRX_CRC_ERROR _SB_MAKEMASK1(56)
-#define M_DMA_SERRX_ABORT _SB_MAKEMASK1(57)
-#define M_DMA_SERRX_OCTET_ERROR _SB_MAKEMASK1(58)
+#define M_DMA_SERRX_CRC_ERROR _SB_MAKEMASK1(56)
+#define M_DMA_SERRX_ABORT _SB_MAKEMASK1(57)
+#define M_DMA_SERRX_OCTET_ERROR _SB_MAKEMASK1(58)
#define M_DMA_SERRX_LONGFRAME_ERROR _SB_MAKEMASK1(59)
#define M_DMA_SERRX_SHORTFRAME_ERROR _SB_MAKEMASK1(60)
#define M_DMA_SERRX_OVERRUN_ERROR _SB_MAKEMASK1(61)
-#define M_DMA_SERRX_GOOD _SB_MAKEMASK1(62)
-#define M_DMA_SERRX_SOP _SB_MAKEMASK1(63)
+#define M_DMA_SERRX_GOOD _SB_MAKEMASK1(62)
+#define M_DMA_SERRX_SOP _SB_MAKEMASK1(63)
/*
* Serial Transmit Status Bits (Table 7-20)
@@ -367,10 +367,10 @@
* Serial Transmit Options (Table 7-21)
*/
-#define K_DMA_SERTX_RESERVED _SB_MAKEMASK1(0)
-#define K_DMA_SERTX_APPENDCRC _SB_MAKEMASK1(1)
-#define K_DMA_SERTX_APPENDPAD _SB_MAKEMASK1(2)
-#define K_DMA_SERTX_ABORT _SB_MAKEMASK1(3)
+#define K_DMA_SERTX_RESERVED _SB_MAKEMASK1(0)
+#define K_DMA_SERTX_APPENDCRC _SB_MAKEMASK1(1)
+#define K_DMA_SERTX_APPENDPAD _SB_MAKEMASK1(2)
+#define K_DMA_SERTX_ABORT _SB_MAKEMASK1(3)
/* *********************************************************************
@@ -385,19 +385,19 @@
* Register: DM_DSCR_BASE_3
*/
-#define M_DM_DSCR_BASE_MBZ _SB_MAKEMASK(4, 0)
+#define M_DM_DSCR_BASE_MBZ _SB_MAKEMASK(4, 0)
/* Note: Just mask the base address and then OR it in. */
-#define S_DM_DSCR_BASE_ADDR _SB_MAKE64(4)
-#define M_DM_DSCR_BASE_ADDR _SB_MAKEMASK(36, S_DM_DSCR_BASE_ADDR)
+#define S_DM_DSCR_BASE_ADDR _SB_MAKE64(4)
+#define M_DM_DSCR_BASE_ADDR _SB_MAKEMASK(36, S_DM_DSCR_BASE_ADDR)
-#define S_DM_DSCR_BASE_RINGSZ _SB_MAKE64(40)
-#define M_DM_DSCR_BASE_RINGSZ _SB_MAKEMASK(16, S_DM_DSCR_BASE_RINGSZ)
+#define S_DM_DSCR_BASE_RINGSZ _SB_MAKE64(40)
+#define M_DM_DSCR_BASE_RINGSZ _SB_MAKEMASK(16, S_DM_DSCR_BASE_RINGSZ)
#define V_DM_DSCR_BASE_RINGSZ(x) _SB_MAKEVALUE(x, S_DM_DSCR_BASE_RINGSZ)
#define G_DM_DSCR_BASE_RINGSZ(x) _SB_GETVALUE(x, S_DM_DSCR_BASE_RINGSZ, M_DM_DSCR_BASE_RINGSZ)
-#define S_DM_DSCR_BASE_PRIORITY _SB_MAKE64(56)
-#define M_DM_DSCR_BASE_PRIORITY _SB_MAKEMASK(3, S_DM_DSCR_BASE_PRIORITY)
+#define S_DM_DSCR_BASE_PRIORITY _SB_MAKE64(56)
+#define M_DM_DSCR_BASE_PRIORITY _SB_MAKEMASK(3, S_DM_DSCR_BASE_PRIORITY)
#define V_DM_DSCR_BASE_PRIORITY(x) _SB_MAKEVALUE(x, S_DM_DSCR_BASE_PRIORITY)
#define G_DM_DSCR_BASE_PRIORITY(x) _SB_GETVALUE(x, S_DM_DSCR_BASE_PRIORITY, M_DM_DSCR_BASE_PRIORITY)
@@ -407,12 +407,12 @@
#define K_DM_DSCR_BASE_PRIORITY_8 3
#define K_DM_DSCR_BASE_PRIORITY_16 4
-#define M_DM_DSCR_BASE_ACTIVE _SB_MAKEMASK1(59)
+#define M_DM_DSCR_BASE_ACTIVE _SB_MAKEMASK1(59)
#define M_DM_DSCR_BASE_INTERRUPT _SB_MAKEMASK1(60)
-#define M_DM_DSCR_BASE_RESET _SB_MAKEMASK1(61) /* write register */
-#define M_DM_DSCR_BASE_ERROR _SB_MAKEMASK1(61) /* read register */
-#define M_DM_DSCR_BASE_ABORT _SB_MAKEMASK1(62)
-#define M_DM_DSCR_BASE_ENABL _SB_MAKEMASK1(63)
+#define M_DM_DSCR_BASE_RESET _SB_MAKEMASK1(61) /* write register */
+#define M_DM_DSCR_BASE_ERROR _SB_MAKEMASK1(61) /* read register */
+#define M_DM_DSCR_BASE_ABORT _SB_MAKEMASK1(62)
+#define M_DM_DSCR_BASE_ENABL _SB_MAKEMASK1(63)
/*
* Data Mover Descriptor Count Register (Table 7-25)
@@ -428,14 +428,14 @@
* Register: DM_CUR_DSCR_ADDR_3
*/
-#define S_DM_CUR_DSCR_DSCR_ADDR _SB_MAKE64(0)
-#define M_DM_CUR_DSCR_DSCR_ADDR _SB_MAKEMASK(40, S_DM_CUR_DSCR_DSCR_ADDR)
+#define S_DM_CUR_DSCR_DSCR_ADDR _SB_MAKE64(0)
+#define M_DM_CUR_DSCR_DSCR_ADDR _SB_MAKEMASK(40, S_DM_CUR_DSCR_DSCR_ADDR)
#define S_DM_CUR_DSCR_DSCR_COUNT _SB_MAKE64(48)
#define M_DM_CUR_DSCR_DSCR_COUNT _SB_MAKEMASK(16, S_DM_CUR_DSCR_DSCR_COUNT)
#define V_DM_CUR_DSCR_DSCR_COUNT(r) _SB_MAKEVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT)
#define G_DM_CUR_DSCR_DSCR_COUNT(r) _SB_GETVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT,\
- M_DM_CUR_DSCR_DSCR_COUNT)
+ M_DM_CUR_DSCR_DSCR_COUNT)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -450,15 +450,15 @@
#define M_DM_PARTIAL_CRC_PARTIAL _SB_MAKEMASK(32, S_DM_PARTIAL_CRC_PARTIAL)
#define V_DM_PARTIAL_CRC_PARTIAL(r) _SB_MAKEVALUE(r, S_DM_PARTIAL_CRC_PARTIAL)
#define G_DM_PARTIAL_CRC_PARTIAL(r) _SB_GETVALUE(r, S_DM_PARTIAL_CRC_PARTIAL,\
- M_DM_PARTIAL_CRC_PARTIAL)
+ M_DM_PARTIAL_CRC_PARTIAL)
#define S_DM_PARTIAL_TCPCS_PARTIAL _SB_MAKE64(32)
#define M_DM_PARTIAL_TCPCS_PARTIAL _SB_MAKEMASK(16, S_DM_PARTIAL_TCPCS_PARTIAL)
#define V_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_MAKEVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL)
#define G_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_GETVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL,\
- M_DM_PARTIAL_TCPCS_PARTIAL)
+ M_DM_PARTIAL_TCPCS_PARTIAL)
-#define M_DM_PARTIAL_ODD_BYTE _SB_MAKEMASK1(48)
+#define M_DM_PARTIAL_ODD_BYTE _SB_MAKEMASK1(48)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
@@ -468,17 +468,17 @@
* Register: CRC_DEF_0
* Register: CRC_DEF_1
*/
-#define S_CRC_DEF_CRC_INIT _SB_MAKE64(0)
-#define M_CRC_DEF_CRC_INIT _SB_MAKEMASK(32, S_CRC_DEF_CRC_INIT)
-#define V_CRC_DEF_CRC_INIT(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_INIT)
-#define G_CRC_DEF_CRC_INIT(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_INIT,\
- M_CRC_DEF_CRC_INIT)
-
-#define S_CRC_DEF_CRC_POLY _SB_MAKE64(32)
-#define M_CRC_DEF_CRC_POLY _SB_MAKEMASK(32, S_CRC_DEF_CRC_POLY)
-#define V_CRC_DEF_CRC_POLY(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_POLY)
-#define G_CRC_DEF_CRC_POLY(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_POLY,\
- M_CRC_DEF_CRC_POLY)
+#define S_CRC_DEF_CRC_INIT _SB_MAKE64(0)
+#define M_CRC_DEF_CRC_INIT _SB_MAKEMASK(32, S_CRC_DEF_CRC_INIT)
+#define V_CRC_DEF_CRC_INIT(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_INIT)
+#define G_CRC_DEF_CRC_INIT(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_INIT,\
+ M_CRC_DEF_CRC_INIT)
+
+#define S_CRC_DEF_CRC_POLY _SB_MAKE64(32)
+#define M_CRC_DEF_CRC_POLY _SB_MAKEMASK(32, S_CRC_DEF_CRC_POLY)
+#define V_CRC_DEF_CRC_POLY(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_POLY)
+#define G_CRC_DEF_CRC_POLY(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_POLY,\
+ M_CRC_DEF_CRC_POLY)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
@@ -488,50 +488,50 @@
* Register: CTCP_DEF_0
* Register: CTCP_DEF_1
*/
-#define S_CTCP_DEF_CRC_TXOR _SB_MAKE64(0)
-#define M_CTCP_DEF_CRC_TXOR _SB_MAKEMASK(32, S_CTCP_DEF_CRC_TXOR)
-#define V_CTCP_DEF_CRC_TXOR(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_TXOR)
-#define G_CTCP_DEF_CRC_TXOR(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_TXOR,\
- M_CTCP_DEF_CRC_TXOR)
-
-#define S_CTCP_DEF_TCPCS_INIT _SB_MAKE64(32)
-#define M_CTCP_DEF_TCPCS_INIT _SB_MAKEMASK(16, S_CTCP_DEF_TCPCS_INIT)
+#define S_CTCP_DEF_CRC_TXOR _SB_MAKE64(0)
+#define M_CTCP_DEF_CRC_TXOR _SB_MAKEMASK(32, S_CTCP_DEF_CRC_TXOR)
+#define V_CTCP_DEF_CRC_TXOR(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_TXOR)
+#define G_CTCP_DEF_CRC_TXOR(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_TXOR,\
+ M_CTCP_DEF_CRC_TXOR)
+
+#define S_CTCP_DEF_TCPCS_INIT _SB_MAKE64(32)
+#define M_CTCP_DEF_TCPCS_INIT _SB_MAKEMASK(16, S_CTCP_DEF_TCPCS_INIT)
#define V_CTCP_DEF_TCPCS_INIT(r) _SB_MAKEVALUE(r, S_CTCP_DEF_TCPCS_INIT)
#define G_CTCP_DEF_TCPCS_INIT(r) _SB_GETVALUE(r, S_CTCP_DEF_TCPCS_INIT,\
- M_CTCP_DEF_TCPCS_INIT)
+ M_CTCP_DEF_TCPCS_INIT)
-#define S_CTCP_DEF_CRC_WIDTH _SB_MAKE64(48)
-#define M_CTCP_DEF_CRC_WIDTH _SB_MAKEMASK(2, S_CTCP_DEF_CRC_WIDTH)
-#define V_CTCP_DEF_CRC_WIDTH(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_WIDTH)
-#define G_CTCP_DEF_CRC_WIDTH(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_WIDTH,\
- M_CTCP_DEF_CRC_WIDTH)
+#define S_CTCP_DEF_CRC_WIDTH _SB_MAKE64(48)
+#define M_CTCP_DEF_CRC_WIDTH _SB_MAKEMASK(2, S_CTCP_DEF_CRC_WIDTH)
+#define V_CTCP_DEF_CRC_WIDTH(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_WIDTH)
+#define G_CTCP_DEF_CRC_WIDTH(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_WIDTH,\
+ M_CTCP_DEF_CRC_WIDTH)
-#define K_CTCP_DEF_CRC_WIDTH_4 0
-#define K_CTCP_DEF_CRC_WIDTH_2 1
-#define K_CTCP_DEF_CRC_WIDTH_1 2
+#define K_CTCP_DEF_CRC_WIDTH_4 0
+#define K_CTCP_DEF_CRC_WIDTH_2 1
+#define K_CTCP_DEF_CRC_WIDTH_1 2
#define M_CTCP_DEF_CRC_BIT_ORDER _SB_MAKEMASK1(50)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
/*
- * Data Mover Descriptor Doubleword "A" (Table 7-26)
+ * Data Mover Descriptor Doubleword "A" (Table 7-26)
*/
-#define S_DM_DSCRA_DST_ADDR _SB_MAKE64(0)
-#define M_DM_DSCRA_DST_ADDR _SB_MAKEMASK(40, S_DM_DSCRA_DST_ADDR)
+#define S_DM_DSCRA_DST_ADDR _SB_MAKE64(0)
+#define M_DM_DSCRA_DST_ADDR _SB_MAKEMASK(40, S_DM_DSCRA_DST_ADDR)
-#define M_DM_DSCRA_UN_DEST _SB_MAKEMASK1(40)
-#define M_DM_DSCRA_UN_SRC _SB_MAKEMASK1(41)
-#define M_DM_DSCRA_INTERRUPT _SB_MAKEMASK1(42)
+#define M_DM_DSCRA_UN_DEST _SB_MAKEMASK1(40)
+#define M_DM_DSCRA_UN_SRC _SB_MAKEMASK1(41)
+#define M_DM_DSCRA_INTERRUPT _SB_MAKEMASK1(42)
#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-#define M_DM_DSCRA_THROTTLE _SB_MAKEMASK1(43)
+#define M_DM_DSCRA_THROTTLE _SB_MAKEMASK1(43)
#endif /* up to 1250 PASS1 */
-#define S_DM_DSCRA_DIR_DEST _SB_MAKE64(44)
-#define M_DM_DSCRA_DIR_DEST _SB_MAKEMASK(2, S_DM_DSCRA_DIR_DEST)
-#define V_DM_DSCRA_DIR_DEST(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_DEST)
-#define G_DM_DSCRA_DIR_DEST(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_DEST, M_DM_DSCRA_DIR_DEST)
+#define S_DM_DSCRA_DIR_DEST _SB_MAKE64(44)
+#define M_DM_DSCRA_DIR_DEST _SB_MAKEMASK(2, S_DM_DSCRA_DIR_DEST)
+#define V_DM_DSCRA_DIR_DEST(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_DEST)
+#define G_DM_DSCRA_DIR_DEST(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_DEST, M_DM_DSCRA_DIR_DEST)
#define K_DM_DSCRA_DIR_DEST_INCR 0
#define K_DM_DSCRA_DIR_DEST_DECR 1
@@ -541,24 +541,24 @@
#define V_DM_DSCRA_DIR_DEST_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_DECR, S_DM_DSCRA_DIR_DEST)
#define V_DM_DSCRA_DIR_DEST_CONST _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_CONST, S_DM_DSCRA_DIR_DEST)
-#define S_DM_DSCRA_DIR_SRC _SB_MAKE64(46)
-#define M_DM_DSCRA_DIR_SRC _SB_MAKEMASK(2, S_DM_DSCRA_DIR_SRC)
-#define V_DM_DSCRA_DIR_SRC(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_SRC)
-#define G_DM_DSCRA_DIR_SRC(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_SRC, M_DM_DSCRA_DIR_SRC)
+#define S_DM_DSCRA_DIR_SRC _SB_MAKE64(46)
+#define M_DM_DSCRA_DIR_SRC _SB_MAKEMASK(2, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_SRC)
+#define G_DM_DSCRA_DIR_SRC(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_SRC, M_DM_DSCRA_DIR_SRC)
-#define K_DM_DSCRA_DIR_SRC_INCR 0
-#define K_DM_DSCRA_DIR_SRC_DECR 1
+#define K_DM_DSCRA_DIR_SRC_INCR 0
+#define K_DM_DSCRA_DIR_SRC_DECR 1
#define K_DM_DSCRA_DIR_SRC_CONST 2
-#define V_DM_DSCRA_DIR_SRC_INCR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_INCR, S_DM_DSCRA_DIR_SRC)
-#define V_DM_DSCRA_DIR_SRC_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_DECR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_INCR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_INCR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_DECR, S_DM_DSCRA_DIR_SRC)
#define V_DM_DSCRA_DIR_SRC_CONST _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_CONST, S_DM_DSCRA_DIR_SRC)
-#define M_DM_DSCRA_ZERO_MEM _SB_MAKEMASK1(48)
-#define M_DM_DSCRA_PREFETCH _SB_MAKEMASK1(49)
-#define M_DM_DSCRA_L2C_DEST _SB_MAKEMASK1(50)
-#define M_DM_DSCRA_L2C_SRC _SB_MAKEMASK1(51)
+#define M_DM_DSCRA_ZERO_MEM _SB_MAKEMASK1(48)
+#define M_DM_DSCRA_PREFETCH _SB_MAKEMASK1(49)
+#define M_DM_DSCRA_L2C_DEST _SB_MAKEMASK1(50)
+#define M_DM_DSCRA_L2C_SRC _SB_MAKEMASK1(51)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_DM_DSCRA_RD_BKOFF _SB_MAKEMASK1(52)
@@ -566,27 +566,27 @@
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_DM_DSCRA_TCPCS_EN _SB_MAKEMASK1(54)
-#define M_DM_DSCRA_TCPCS_RES _SB_MAKEMASK1(55)
-#define M_DM_DSCRA_TCPCS_AP _SB_MAKEMASK1(56)
-#define M_DM_DSCRA_CRC_EN _SB_MAKEMASK1(57)
-#define M_DM_DSCRA_CRC_RES _SB_MAKEMASK1(58)
-#define M_DM_DSCRA_CRC_AP _SB_MAKEMASK1(59)
-#define M_DM_DSCRA_CRC_DFN _SB_MAKEMASK1(60)
-#define M_DM_DSCRA_CRC_XBIT _SB_MAKEMASK1(61)
+#define M_DM_DSCRA_TCPCS_EN _SB_MAKEMASK1(54)
+#define M_DM_DSCRA_TCPCS_RES _SB_MAKEMASK1(55)
+#define M_DM_DSCRA_TCPCS_AP _SB_MAKEMASK1(56)
+#define M_DM_DSCRA_CRC_EN _SB_MAKEMASK1(57)
+#define M_DM_DSCRA_CRC_RES _SB_MAKEMASK1(58)
+#define M_DM_DSCRA_CRC_AP _SB_MAKEMASK1(59)
+#define M_DM_DSCRA_CRC_DFN _SB_MAKEMASK1(60)
+#define M_DM_DSCRA_CRC_XBIT _SB_MAKEMASK1(61)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DM_DSCRA_RESERVED2 _SB_MAKEMASK(3, 61)
+#define M_DM_DSCRA_RESERVED2 _SB_MAKEMASK(3, 61)
/*
- * Data Mover Descriptor Doubleword "B" (Table 7-25)
+ * Data Mover Descriptor Doubleword "B" (Table 7-25)
*/
-#define S_DM_DSCRB_SRC_ADDR _SB_MAKE64(0)
-#define M_DM_DSCRB_SRC_ADDR _SB_MAKEMASK(40, S_DM_DSCRB_SRC_ADDR)
+#define S_DM_DSCRB_SRC_ADDR _SB_MAKE64(0)
+#define M_DM_DSCRB_SRC_ADDR _SB_MAKEMASK(40, S_DM_DSCRB_SRC_ADDR)
-#define S_DM_DSCRB_SRC_LENGTH _SB_MAKE64(40)
-#define M_DM_DSCRB_SRC_LENGTH _SB_MAKEMASK(20, S_DM_DSCRB_SRC_LENGTH)
+#define S_DM_DSCRB_SRC_LENGTH _SB_MAKE64(40)
+#define M_DM_DSCRB_SRC_LENGTH _SB_MAKEMASK(20, S_DM_DSCRB_SRC_LENGTH)
#define V_DM_DSCRB_SRC_LENGTH(x) _SB_MAKEVALUE(x, S_DM_DSCRB_SRC_LENGTH)
#define G_DM_DSCRB_SRC_LENGTH(x) _SB_GETVALUE(x, S_DM_DSCRB_SRC_LENGTH, M_DM_DSCRB_SRC_LENGTH)
diff --git a/arch/mips/include/asm/sibyte/sb1250_genbus.h b/arch/mips/include/asm/sibyte/sb1250_genbus.h
index a96ded17bdc9..04c009c36937 100644
--- a/arch/mips/include/asm/sibyte/sb1250_genbus.h
+++ b/arch/mips/include/asm/sibyte/sb1250_genbus.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Generic Bus Constants File: sb1250_genbus.h
+ * Generic Bus Constants File: sb1250_genbus.h
*
* This module contains constants and macros useful for
* manipulating the SB1250's Generic Bus interface
@@ -40,10 +40,10 @@
* Generic Bus Region Configuration Registers (Table 11-4)
*/
-#define S_IO_RDY_ACTIVE 0
+#define S_IO_RDY_ACTIVE 0
#define M_IO_RDY_ACTIVE _SB_MAKEMASK1(S_IO_RDY_ACTIVE)
-#define S_IO_ENA_RDY 1
+#define S_IO_ENA_RDY 1
#define M_IO_ENA_RDY _SB_MAKEMASK1(S_IO_ENA_RDY)
#define S_IO_WIDTH_SEL 2
@@ -52,7 +52,7 @@
#define K_IO_WIDTH_SEL_2 1
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define K_IO_WIDTH_SEL_1L 2
+#define K_IO_WIDTH_SEL_1L 2
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define K_IO_WIDTH_SEL_4 3
#define V_IO_WIDTH_SEL(x) _SB_MAKEVALUE(x, S_IO_WIDTH_SEL)
@@ -111,7 +111,7 @@
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_EARLY_CS _SB_MAKEMASK1(3)
+#define M_IO_EARLY_CS _SB_MAKEMASK1(3)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define S_IO_ALE_TO_CS 4
@@ -121,10 +121,10 @@
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_IO_BURST_WIDTH _SB_MAKE64(6)
-#define M_IO_BURST_WIDTH _SB_MAKEMASK(2, S_IO_BURST_WIDTH)
-#define V_IO_BURST_WIDTH(x) _SB_MAKEVALUE(x, S_IO_BURST_WIDTH)
-#define G_IO_BURST_WIDTH(x) _SB_GETVALUE(x, S_IO_BURST_WIDTH, M_IO_BURST_WIDTH)
+#define S_IO_BURST_WIDTH _SB_MAKE64(6)
+#define M_IO_BURST_WIDTH _SB_MAKEMASK(2, S_IO_BURST_WIDTH)
+#define V_IO_BURST_WIDTH(x) _SB_MAKEVALUE(x, S_IO_BURST_WIDTH)
+#define G_IO_BURST_WIDTH(x) _SB_GETVALUE(x, S_IO_BURST_WIDTH, M_IO_BURST_WIDTH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define S_IO_CS_WIDTH 8
@@ -149,7 +149,7 @@
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_RDY_SYNC _SB_MAKEMASK1(3)
+#define M_IO_RDY_SYNC _SB_MAKEMASK1(3)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define S_IO_WRITE_WIDTH 4
@@ -191,7 +191,7 @@
#define M_IO_ILL_ADDR_INT _SB_MAKEMASK1(11)
#define M_IO_MULT_CS_INT _SB_MAKEMASK1(12)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_COH_ERR _SB_MAKEMASK1(14)
+#define M_IO_COH_ERR _SB_MAKEMASK1(14)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
@@ -370,8 +370,8 @@
#define S_GPIO_INTR_TYPEX(n) (((n)/2)*2)
#define M_GPIO_INTR_TYPEX(n) _SB_MAKEMASK(2, S_GPIO_INTR_TYPEX(n))
-#define V_GPIO_INTR_TYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n))
-#define G_GPIO_INTR_TYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n))
+#define V_GPIO_INTR_TYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n))
+#define G_GPIO_INTR_TYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n))
#define S_GPIO_INTR_TYPE0 0
#define M_GPIO_INTR_TYPE0 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE0)
diff --git a/arch/mips/include/asm/sibyte/sb1250_int.h b/arch/mips/include/asm/sibyte/sb1250_int.h
index dbea73ddd2fe..36afcb2766c6 100644
--- a/arch/mips/include/asm/sibyte/sb1250_int.h
+++ b/arch/mips/include/asm/sibyte/sb1250_int.h
@@ -45,71 +45,71 @@
* First, the interrupt numbers.
*/
-#define K_INT_SOURCES 64
-
-#define K_INT_WATCHDOG_TIMER_0 0
-#define K_INT_WATCHDOG_TIMER_1 1
-#define K_INT_TIMER_0 2
-#define K_INT_TIMER_1 3
-#define K_INT_TIMER_2 4
-#define K_INT_TIMER_3 5
-#define K_INT_SMB_0 6
-#define K_INT_SMB_1 7
-#define K_INT_UART_0 8
-#define K_INT_UART_1 9
-#define K_INT_SER_0 10
-#define K_INT_SER_1 11
-#define K_INT_PCMCIA 12
-#define K_INT_ADDR_TRAP 13
-#define K_INT_PERF_CNT 14
-#define K_INT_TRACE_FREEZE 15
-#define K_INT_BAD_ECC 16
-#define K_INT_COR_ECC 17
-#define K_INT_IO_BUS 18
-#define K_INT_MAC_0 19
-#define K_INT_MAC_1 20
-#define K_INT_MAC_2 21
-#define K_INT_DM_CH_0 22
-#define K_INT_DM_CH_1 23
-#define K_INT_DM_CH_2 24
-#define K_INT_DM_CH_3 25
-#define K_INT_MBOX_0 26
-#define K_INT_MBOX_1 27
-#define K_INT_MBOX_2 28
-#define K_INT_MBOX_3 29
+#define K_INT_SOURCES 64
+
+#define K_INT_WATCHDOG_TIMER_0 0
+#define K_INT_WATCHDOG_TIMER_1 1
+#define K_INT_TIMER_0 2
+#define K_INT_TIMER_1 3
+#define K_INT_TIMER_2 4
+#define K_INT_TIMER_3 5
+#define K_INT_SMB_0 6
+#define K_INT_SMB_1 7
+#define K_INT_UART_0 8
+#define K_INT_UART_1 9
+#define K_INT_SER_0 10
+#define K_INT_SER_1 11
+#define K_INT_PCMCIA 12
+#define K_INT_ADDR_TRAP 13
+#define K_INT_PERF_CNT 14
+#define K_INT_TRACE_FREEZE 15
+#define K_INT_BAD_ECC 16
+#define K_INT_COR_ECC 17
+#define K_INT_IO_BUS 18
+#define K_INT_MAC_0 19
+#define K_INT_MAC_1 20
+#define K_INT_MAC_2 21
+#define K_INT_DM_CH_0 22
+#define K_INT_DM_CH_1 23
+#define K_INT_DM_CH_2 24
+#define K_INT_DM_CH_3 25
+#define K_INT_MBOX_0 26
+#define K_INT_MBOX_1 27
+#define K_INT_MBOX_2 28
+#define K_INT_MBOX_3 29
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define K_INT_CYCLE_CP0_INT 30
#define K_INT_CYCLE_CP1_INT 31
#endif /* 1250 PASS2 || 112x PASS1 */
-#define K_INT_GPIO_0 32
-#define K_INT_GPIO_1 33
-#define K_INT_GPIO_2 34
-#define K_INT_GPIO_3 35
-#define K_INT_GPIO_4 36
-#define K_INT_GPIO_5 37
-#define K_INT_GPIO_6 38
-#define K_INT_GPIO_7 39
-#define K_INT_GPIO_8 40
-#define K_INT_GPIO_9 41
-#define K_INT_GPIO_10 42
-#define K_INT_GPIO_11 43
-#define K_INT_GPIO_12 44
-#define K_INT_GPIO_13 45
-#define K_INT_GPIO_14 46
-#define K_INT_GPIO_15 47
-#define K_INT_LDT_FATAL 48
-#define K_INT_LDT_NONFATAL 49
-#define K_INT_LDT_SMI 50
-#define K_INT_LDT_NMI 51
-#define K_INT_LDT_INIT 52
-#define K_INT_LDT_STARTUP 53
-#define K_INT_LDT_EXT 54
-#define K_INT_PCI_ERROR 55
-#define K_INT_PCI_INTA 56
-#define K_INT_PCI_INTB 57
-#define K_INT_PCI_INTC 58
-#define K_INT_PCI_INTD 59
-#define K_INT_SPARE_2 60
+#define K_INT_GPIO_0 32
+#define K_INT_GPIO_1 33
+#define K_INT_GPIO_2 34
+#define K_INT_GPIO_3 35
+#define K_INT_GPIO_4 36
+#define K_INT_GPIO_5 37
+#define K_INT_GPIO_6 38
+#define K_INT_GPIO_7 39
+#define K_INT_GPIO_8 40
+#define K_INT_GPIO_9 41
+#define K_INT_GPIO_10 42
+#define K_INT_GPIO_11 43
+#define K_INT_GPIO_12 44
+#define K_INT_GPIO_13 45
+#define K_INT_GPIO_14 46
+#define K_INT_GPIO_15 47
+#define K_INT_LDT_FATAL 48
+#define K_INT_LDT_NONFATAL 49
+#define K_INT_LDT_SMI 50
+#define K_INT_LDT_NMI 51
+#define K_INT_LDT_INIT 52
+#define K_INT_LDT_STARTUP 53
+#define K_INT_LDT_EXT 54
+#define K_INT_PCI_ERROR 55
+#define K_INT_PCI_INTA 56
+#define K_INT_PCI_INTB 57
+#define K_INT_PCI_INTC 58
+#define K_INT_PCI_INTD 59
+#define K_INT_SPARE_2 60
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define K_INT_MAC_0_CH1 61
#define K_INT_MAC_1_CH1 62
@@ -120,70 +120,70 @@
* Mask values for each interrupt
*/
-#define M_INT_WATCHDOG_TIMER_0 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_0)
-#define M_INT_WATCHDOG_TIMER_1 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_1)
-#define M_INT_TIMER_0 _SB_MAKEMASK1(K_INT_TIMER_0)
-#define M_INT_TIMER_1 _SB_MAKEMASK1(K_INT_TIMER_1)
-#define M_INT_TIMER_2 _SB_MAKEMASK1(K_INT_TIMER_2)
-#define M_INT_TIMER_3 _SB_MAKEMASK1(K_INT_TIMER_3)
-#define M_INT_SMB_0 _SB_MAKEMASK1(K_INT_SMB_0)
-#define M_INT_SMB_1 _SB_MAKEMASK1(K_INT_SMB_1)
-#define M_INT_UART_0 _SB_MAKEMASK1(K_INT_UART_0)
-#define M_INT_UART_1 _SB_MAKEMASK1(K_INT_UART_1)
-#define M_INT_SER_0 _SB_MAKEMASK1(K_INT_SER_0)
-#define M_INT_SER_1 _SB_MAKEMASK1(K_INT_SER_1)
-#define M_INT_PCMCIA _SB_MAKEMASK1(K_INT_PCMCIA)
-#define M_INT_ADDR_TRAP _SB_MAKEMASK1(K_INT_ADDR_TRAP)
-#define M_INT_PERF_CNT _SB_MAKEMASK1(K_INT_PERF_CNT)
-#define M_INT_TRACE_FREEZE _SB_MAKEMASK1(K_INT_TRACE_FREEZE)
-#define M_INT_BAD_ECC _SB_MAKEMASK1(K_INT_BAD_ECC)
-#define M_INT_COR_ECC _SB_MAKEMASK1(K_INT_COR_ECC)
-#define M_INT_IO_BUS _SB_MAKEMASK1(K_INT_IO_BUS)
-#define M_INT_MAC_0 _SB_MAKEMASK1(K_INT_MAC_0)
-#define M_INT_MAC_1 _SB_MAKEMASK1(K_INT_MAC_1)
-#define M_INT_MAC_2 _SB_MAKEMASK1(K_INT_MAC_2)
-#define M_INT_DM_CH_0 _SB_MAKEMASK1(K_INT_DM_CH_0)
-#define M_INT_DM_CH_1 _SB_MAKEMASK1(K_INT_DM_CH_1)
-#define M_INT_DM_CH_2 _SB_MAKEMASK1(K_INT_DM_CH_2)
-#define M_INT_DM_CH_3 _SB_MAKEMASK1(K_INT_DM_CH_3)
-#define M_INT_MBOX_0 _SB_MAKEMASK1(K_INT_MBOX_0)
-#define M_INT_MBOX_1 _SB_MAKEMASK1(K_INT_MBOX_1)
-#define M_INT_MBOX_2 _SB_MAKEMASK1(K_INT_MBOX_2)
-#define M_INT_MBOX_3 _SB_MAKEMASK1(K_INT_MBOX_3)
-#define M_INT_MBOX_ALL _SB_MAKEMASK(4, K_INT_MBOX_0)
+#define M_INT_WATCHDOG_TIMER_0 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_0)
+#define M_INT_WATCHDOG_TIMER_1 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_1)
+#define M_INT_TIMER_0 _SB_MAKEMASK1(K_INT_TIMER_0)
+#define M_INT_TIMER_1 _SB_MAKEMASK1(K_INT_TIMER_1)
+#define M_INT_TIMER_2 _SB_MAKEMASK1(K_INT_TIMER_2)
+#define M_INT_TIMER_3 _SB_MAKEMASK1(K_INT_TIMER_3)
+#define M_INT_SMB_0 _SB_MAKEMASK1(K_INT_SMB_0)
+#define M_INT_SMB_1 _SB_MAKEMASK1(K_INT_SMB_1)
+#define M_INT_UART_0 _SB_MAKEMASK1(K_INT_UART_0)
+#define M_INT_UART_1 _SB_MAKEMASK1(K_INT_UART_1)
+#define M_INT_SER_0 _SB_MAKEMASK1(K_INT_SER_0)
+#define M_INT_SER_1 _SB_MAKEMASK1(K_INT_SER_1)
+#define M_INT_PCMCIA _SB_MAKEMASK1(K_INT_PCMCIA)
+#define M_INT_ADDR_TRAP _SB_MAKEMASK1(K_INT_ADDR_TRAP)
+#define M_INT_PERF_CNT _SB_MAKEMASK1(K_INT_PERF_CNT)
+#define M_INT_TRACE_FREEZE _SB_MAKEMASK1(K_INT_TRACE_FREEZE)
+#define M_INT_BAD_ECC _SB_MAKEMASK1(K_INT_BAD_ECC)
+#define M_INT_COR_ECC _SB_MAKEMASK1(K_INT_COR_ECC)
+#define M_INT_IO_BUS _SB_MAKEMASK1(K_INT_IO_BUS)
+#define M_INT_MAC_0 _SB_MAKEMASK1(K_INT_MAC_0)
+#define M_INT_MAC_1 _SB_MAKEMASK1(K_INT_MAC_1)
+#define M_INT_MAC_2 _SB_MAKEMASK1(K_INT_MAC_2)
+#define M_INT_DM_CH_0 _SB_MAKEMASK1(K_INT_DM_CH_0)
+#define M_INT_DM_CH_1 _SB_MAKEMASK1(K_INT_DM_CH_1)
+#define M_INT_DM_CH_2 _SB_MAKEMASK1(K_INT_DM_CH_2)
+#define M_INT_DM_CH_3 _SB_MAKEMASK1(K_INT_DM_CH_3)
+#define M_INT_MBOX_0 _SB_MAKEMASK1(K_INT_MBOX_0)
+#define M_INT_MBOX_1 _SB_MAKEMASK1(K_INT_MBOX_1)
+#define M_INT_MBOX_2 _SB_MAKEMASK1(K_INT_MBOX_2)
+#define M_INT_MBOX_3 _SB_MAKEMASK1(K_INT_MBOX_3)
+#define M_INT_MBOX_ALL _SB_MAKEMASK(4, K_INT_MBOX_0)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define M_INT_CYCLE_CP0_INT _SB_MAKEMASK1(K_INT_CYCLE_CP0_INT)
#define M_INT_CYCLE_CP1_INT _SB_MAKEMASK1(K_INT_CYCLE_CP1_INT)
#endif /* 1250 PASS2 || 112x PASS1 */
-#define M_INT_GPIO_0 _SB_MAKEMASK1(K_INT_GPIO_0)
-#define M_INT_GPIO_1 _SB_MAKEMASK1(K_INT_GPIO_1)
-#define M_INT_GPIO_2 _SB_MAKEMASK1(K_INT_GPIO_2)
-#define M_INT_GPIO_3 _SB_MAKEMASK1(K_INT_GPIO_3)
-#define M_INT_GPIO_4 _SB_MAKEMASK1(K_INT_GPIO_4)
-#define M_INT_GPIO_5 _SB_MAKEMASK1(K_INT_GPIO_5)
-#define M_INT_GPIO_6 _SB_MAKEMASK1(K_INT_GPIO_6)
-#define M_INT_GPIO_7 _SB_MAKEMASK1(K_INT_GPIO_7)
-#define M_INT_GPIO_8 _SB_MAKEMASK1(K_INT_GPIO_8)
-#define M_INT_GPIO_9 _SB_MAKEMASK1(K_INT_GPIO_9)
-#define M_INT_GPIO_10 _SB_MAKEMASK1(K_INT_GPIO_10)
-#define M_INT_GPIO_11 _SB_MAKEMASK1(K_INT_GPIO_11)
-#define M_INT_GPIO_12 _SB_MAKEMASK1(K_INT_GPIO_12)
-#define M_INT_GPIO_13 _SB_MAKEMASK1(K_INT_GPIO_13)
-#define M_INT_GPIO_14 _SB_MAKEMASK1(K_INT_GPIO_14)
-#define M_INT_GPIO_15 _SB_MAKEMASK1(K_INT_GPIO_15)
-#define M_INT_LDT_FATAL _SB_MAKEMASK1(K_INT_LDT_FATAL)
-#define M_INT_LDT_NONFATAL _SB_MAKEMASK1(K_INT_LDT_NONFATAL)
-#define M_INT_LDT_SMI _SB_MAKEMASK1(K_INT_LDT_SMI)
-#define M_INT_LDT_NMI _SB_MAKEMASK1(K_INT_LDT_NMI)
-#define M_INT_LDT_INIT _SB_MAKEMASK1(K_INT_LDT_INIT)
-#define M_INT_LDT_STARTUP _SB_MAKEMASK1(K_INT_LDT_STARTUP)
-#define M_INT_LDT_EXT _SB_MAKEMASK1(K_INT_LDT_EXT)
-#define M_INT_PCI_ERROR _SB_MAKEMASK1(K_INT_PCI_ERROR)
-#define M_INT_PCI_INTA _SB_MAKEMASK1(K_INT_PCI_INTA)
-#define M_INT_PCI_INTB _SB_MAKEMASK1(K_INT_PCI_INTB)
-#define M_INT_PCI_INTC _SB_MAKEMASK1(K_INT_PCI_INTC)
-#define M_INT_PCI_INTD _SB_MAKEMASK1(K_INT_PCI_INTD)
-#define M_INT_SPARE_2 _SB_MAKEMASK1(K_INT_SPARE_2)
+#define M_INT_GPIO_0 _SB_MAKEMASK1(K_INT_GPIO_0)
+#define M_INT_GPIO_1 _SB_MAKEMASK1(K_INT_GPIO_1)
+#define M_INT_GPIO_2 _SB_MAKEMASK1(K_INT_GPIO_2)
+#define M_INT_GPIO_3 _SB_MAKEMASK1(K_INT_GPIO_3)
+#define M_INT_GPIO_4 _SB_MAKEMASK1(K_INT_GPIO_4)
+#define M_INT_GPIO_5 _SB_MAKEMASK1(K_INT_GPIO_5)
+#define M_INT_GPIO_6 _SB_MAKEMASK1(K_INT_GPIO_6)
+#define M_INT_GPIO_7 _SB_MAKEMASK1(K_INT_GPIO_7)
+#define M_INT_GPIO_8 _SB_MAKEMASK1(K_INT_GPIO_8)
+#define M_INT_GPIO_9 _SB_MAKEMASK1(K_INT_GPIO_9)
+#define M_INT_GPIO_10 _SB_MAKEMASK1(K_INT_GPIO_10)
+#define M_INT_GPIO_11 _SB_MAKEMASK1(K_INT_GPIO_11)
+#define M_INT_GPIO_12 _SB_MAKEMASK1(K_INT_GPIO_12)
+#define M_INT_GPIO_13 _SB_MAKEMASK1(K_INT_GPIO_13)
+#define M_INT_GPIO_14 _SB_MAKEMASK1(K_INT_GPIO_14)
+#define M_INT_GPIO_15 _SB_MAKEMASK1(K_INT_GPIO_15)
+#define M_INT_LDT_FATAL _SB_MAKEMASK1(K_INT_LDT_FATAL)
+#define M_INT_LDT_NONFATAL _SB_MAKEMASK1(K_INT_LDT_NONFATAL)
+#define M_INT_LDT_SMI _SB_MAKEMASK1(K_INT_LDT_SMI)
+#define M_INT_LDT_NMI _SB_MAKEMASK1(K_INT_LDT_NMI)
+#define M_INT_LDT_INIT _SB_MAKEMASK1(K_INT_LDT_INIT)
+#define M_INT_LDT_STARTUP _SB_MAKEMASK1(K_INT_LDT_STARTUP)
+#define M_INT_LDT_EXT _SB_MAKEMASK1(K_INT_LDT_EXT)
+#define M_INT_PCI_ERROR _SB_MAKEMASK1(K_INT_PCI_ERROR)
+#define M_INT_PCI_INTA _SB_MAKEMASK1(K_INT_PCI_INTA)
+#define M_INT_PCI_INTB _SB_MAKEMASK1(K_INT_PCI_INTB)
+#define M_INT_PCI_INTC _SB_MAKEMASK1(K_INT_PCI_INTC)
+#define M_INT_PCI_INTD _SB_MAKEMASK1(K_INT_PCI_INTD)
+#define M_INT_SPARE_2 _SB_MAKEMASK1(K_INT_SPARE_2)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define M_INT_MAC_0_CH1 _SB_MAKEMASK1(K_INT_MAC_0_CH1)
#define M_INT_MAC_1_CH1 _SB_MAKEMASK1(K_INT_MAC_1_CH1)
@@ -208,9 +208,9 @@
*/
#define S_INT_LDT_INTMSG 0
-#define M_INT_LDT_INTMSG _SB_MAKEMASK(3, S_INT_LDT_INTMSG)
-#define V_INT_LDT_INTMSG(x) _SB_MAKEVALUE(x, S_INT_LDT_INTMSG)
-#define G_INT_LDT_INTMSG(x) _SB_GETVALUE(x, S_INT_LDT_INTMSG, M_INT_LDT_INTMSG)
+#define M_INT_LDT_INTMSG _SB_MAKEMASK(3, S_INT_LDT_INTMSG)
+#define V_INT_LDT_INTMSG(x) _SB_MAKEVALUE(x, S_INT_LDT_INTMSG)
+#define G_INT_LDT_INTMSG(x) _SB_GETVALUE(x, S_INT_LDT_INTMSG, M_INT_LDT_INTMSG)
#define K_INT_LDT_INTMSG_FIXED 0
#define K_INT_LDT_INTMSG_ARBITRATED 1
@@ -221,28 +221,28 @@
#define K_INT_LDT_INTMSG_EXTINT 6
#define K_INT_LDT_INTMSG_RESERVED 7
-#define M_INT_LDT_EDGETRIGGER 0
-#define M_INT_LDT_LEVELTRIGGER _SB_MAKEMASK1(3)
+#define M_INT_LDT_EDGETRIGGER 0
+#define M_INT_LDT_LEVELTRIGGER _SB_MAKEMASK1(3)
-#define M_INT_LDT_PHYSICALDEST 0
-#define M_INT_LDT_LOGICALDEST _SB_MAKEMASK1(4)
+#define M_INT_LDT_PHYSICALDEST 0
+#define M_INT_LDT_LOGICALDEST _SB_MAKEMASK1(4)
-#define S_INT_LDT_INTDEST 5
-#define M_INT_LDT_INTDEST _SB_MAKEMASK(10, S_INT_LDT_INTDEST)
-#define V_INT_LDT_INTDEST(x) _SB_MAKEVALUE(x, S_INT_LDT_INTDEST)
-#define G_INT_LDT_INTDEST(x) _SB_GETVALUE(x, S_INT_LDT_INTDEST, M_INT_LDT_INTDEST)
+#define S_INT_LDT_INTDEST 5
+#define M_INT_LDT_INTDEST _SB_MAKEMASK(10, S_INT_LDT_INTDEST)
+#define V_INT_LDT_INTDEST(x) _SB_MAKEVALUE(x, S_INT_LDT_INTDEST)
+#define G_INT_LDT_INTDEST(x) _SB_GETVALUE(x, S_INT_LDT_INTDEST, M_INT_LDT_INTDEST)
-#define S_INT_LDT_VECTOR 13
-#define M_INT_LDT_VECTOR _SB_MAKEMASK(8, S_INT_LDT_VECTOR)
-#define V_INT_LDT_VECTOR(x) _SB_MAKEVALUE(x, S_INT_LDT_VECTOR)
-#define G_INT_LDT_VECTOR(x) _SB_GETVALUE(x, S_INT_LDT_VECTOR, M_INT_LDT_VECTOR)
+#define S_INT_LDT_VECTOR 13
+#define M_INT_LDT_VECTOR _SB_MAKEMASK(8, S_INT_LDT_VECTOR)
+#define V_INT_LDT_VECTOR(x) _SB_MAKEVALUE(x, S_INT_LDT_VECTOR)
+#define G_INT_LDT_VECTOR(x) _SB_GETVALUE(x, S_INT_LDT_VECTOR, M_INT_LDT_VECTOR)
/*
* Vector format (Table 4-6)
*/
#define M_LDTVECT_RAISEINT 0x00
-#define M_LDTVECT_RAISEMBOX 0x40
+#define M_LDTVECT_RAISEMBOX 0x40
#endif /* 1250/112x */
diff --git a/arch/mips/include/asm/sibyte/sb1250_l2c.h b/arch/mips/include/asm/sibyte/sb1250_l2c.h
index b61a7491607d..30092d7cfdc2 100644
--- a/arch/mips/include/asm/sibyte/sb1250_l2c.h
+++ b/arch/mips/include/asm/sibyte/sb1250_l2c.h
@@ -39,71 +39,71 @@
* Level 2 Cache Tag register (Table 5-3)
*/
-#define S_L2C_TAG_MBZ 0
-#define M_L2C_TAG_MBZ _SB_MAKEMASK(5, S_L2C_TAG_MBZ)
+#define S_L2C_TAG_MBZ 0
+#define M_L2C_TAG_MBZ _SB_MAKEMASK(5, S_L2C_TAG_MBZ)
-#define S_L2C_TAG_INDEX 5
-#define M_L2C_TAG_INDEX _SB_MAKEMASK(12, S_L2C_TAG_INDEX)
-#define V_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_L2C_TAG_INDEX)
-#define G_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_L2C_TAG_INDEX, M_L2C_TAG_INDEX)
+#define S_L2C_TAG_INDEX 5
+#define M_L2C_TAG_INDEX _SB_MAKEMASK(12, S_L2C_TAG_INDEX)
+#define V_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_L2C_TAG_INDEX)
+#define G_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_L2C_TAG_INDEX, M_L2C_TAG_INDEX)
-#define S_L2C_TAG_TAG 17
-#define M_L2C_TAG_TAG _SB_MAKEMASK(23, S_L2C_TAG_TAG)
-#define V_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_L2C_TAG_TAG)
-#define G_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_L2C_TAG_TAG, M_L2C_TAG_TAG)
+#define S_L2C_TAG_TAG 17
+#define M_L2C_TAG_TAG _SB_MAKEMASK(23, S_L2C_TAG_TAG)
+#define V_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_L2C_TAG_TAG)
+#define G_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_L2C_TAG_TAG, M_L2C_TAG_TAG)
-#define S_L2C_TAG_ECC 40
-#define M_L2C_TAG_ECC _SB_MAKEMASK(6, S_L2C_TAG_ECC)
-#define V_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_L2C_TAG_ECC)
-#define G_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_L2C_TAG_ECC, M_L2C_TAG_ECC)
+#define S_L2C_TAG_ECC 40
+#define M_L2C_TAG_ECC _SB_MAKEMASK(6, S_L2C_TAG_ECC)
+#define V_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_L2C_TAG_ECC)
+#define G_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_L2C_TAG_ECC, M_L2C_TAG_ECC)
-#define S_L2C_TAG_WAY 46
-#define M_L2C_TAG_WAY _SB_MAKEMASK(2, S_L2C_TAG_WAY)
-#define V_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_L2C_TAG_WAY)
-#define G_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_L2C_TAG_WAY, M_L2C_TAG_WAY)
+#define S_L2C_TAG_WAY 46
+#define M_L2C_TAG_WAY _SB_MAKEMASK(2, S_L2C_TAG_WAY)
+#define V_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_L2C_TAG_WAY)
+#define G_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_L2C_TAG_WAY, M_L2C_TAG_WAY)
-#define M_L2C_TAG_DIRTY _SB_MAKEMASK1(48)
-#define M_L2C_TAG_VALID _SB_MAKEMASK1(49)
+#define M_L2C_TAG_DIRTY _SB_MAKEMASK1(48)
+#define M_L2C_TAG_VALID _SB_MAKEMASK1(49)
/*
* Format of level 2 cache management address (table 5-2)
*/
-#define S_L2C_MGMT_INDEX 5
-#define M_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_L2C_MGMT_INDEX)
-#define V_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_L2C_MGMT_INDEX)
-#define G_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_L2C_MGMT_INDEX, M_L2C_MGMT_INDEX)
+#define S_L2C_MGMT_INDEX 5
+#define M_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_L2C_MGMT_INDEX)
+#define V_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_L2C_MGMT_INDEX)
+#define G_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_L2C_MGMT_INDEX, M_L2C_MGMT_INDEX)
-#define S_L2C_MGMT_QUADRANT 15
-#define M_L2C_MGMT_QUADRANT _SB_MAKEMASK(2, S_L2C_MGMT_QUADRANT)
-#define V_L2C_MGMT_QUADRANT(x) _SB_MAKEVALUE(x, S_L2C_MGMT_QUADRANT)
-#define G_L2C_MGMT_QUADRANT(x) _SB_GETVALUE(x, S_L2C_MGMT_QUADRANT, M_L2C_MGMT_QUADRANT)
+#define S_L2C_MGMT_QUADRANT 15
+#define M_L2C_MGMT_QUADRANT _SB_MAKEMASK(2, S_L2C_MGMT_QUADRANT)
+#define V_L2C_MGMT_QUADRANT(x) _SB_MAKEVALUE(x, S_L2C_MGMT_QUADRANT)
+#define G_L2C_MGMT_QUADRANT(x) _SB_GETVALUE(x, S_L2C_MGMT_QUADRANT, M_L2C_MGMT_QUADRANT)
#define S_L2C_MGMT_HALF 16
-#define M_L2C_MGMT_HALF _SB_MAKEMASK(1, S_L2C_MGMT_HALF)
+#define M_L2C_MGMT_HALF _SB_MAKEMASK(1, S_L2C_MGMT_HALF)
-#define S_L2C_MGMT_WAY 17
-#define M_L2C_MGMT_WAY _SB_MAKEMASK(2, S_L2C_MGMT_WAY)
-#define V_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_L2C_MGMT_WAY)
-#define G_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_L2C_MGMT_WAY, M_L2C_MGMT_WAY)
+#define S_L2C_MGMT_WAY 17
+#define M_L2C_MGMT_WAY _SB_MAKEMASK(2, S_L2C_MGMT_WAY)
+#define V_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_L2C_MGMT_WAY)
+#define G_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_L2C_MGMT_WAY, M_L2C_MGMT_WAY)
-#define S_L2C_MGMT_ECC_DIAG 21
-#define M_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_L2C_MGMT_ECC_DIAG)
-#define V_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_ECC_DIAG)
-#define G_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_L2C_MGMT_ECC_DIAG, M_L2C_MGMT_ECC_DIAG)
+#define S_L2C_MGMT_ECC_DIAG 21
+#define M_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_L2C_MGMT_ECC_DIAG)
+#define V_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_ECC_DIAG)
+#define G_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_L2C_MGMT_ECC_DIAG, M_L2C_MGMT_ECC_DIAG)
-#define S_L2C_MGMT_TAG 23
-#define M_L2C_MGMT_TAG _SB_MAKEMASK(4, S_L2C_MGMT_TAG)
-#define V_L2C_MGMT_TAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_TAG)
-#define G_L2C_MGMT_TAG(x) _SB_GETVALUE(x, S_L2C_MGMT_TAG, M_L2C_MGMT_TAG)
+#define S_L2C_MGMT_TAG 23
+#define M_L2C_MGMT_TAG _SB_MAKEMASK(4, S_L2C_MGMT_TAG)
+#define V_L2C_MGMT_TAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_TAG)
+#define G_L2C_MGMT_TAG(x) _SB_GETVALUE(x, S_L2C_MGMT_TAG, M_L2C_MGMT_TAG)
-#define M_L2C_MGMT_DIRTY _SB_MAKEMASK1(19)
-#define M_L2C_MGMT_VALID _SB_MAKEMASK1(20)
+#define M_L2C_MGMT_DIRTY _SB_MAKEMASK1(19)
+#define M_L2C_MGMT_VALID _SB_MAKEMASK1(20)
-#define A_L2C_MGMT_TAG_BASE 0x00D0000000
+#define A_L2C_MGMT_TAG_BASE 0x00D0000000
-#define L2C_ENTRIES_PER_WAY 4096
-#define L2C_NUM_WAYS 4
+#define L2C_ENTRIES_PER_WAY 4096
+#define L2C_NUM_WAYS 4
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
diff --git a/arch/mips/include/asm/sibyte/sb1250_ldt.h b/arch/mips/include/asm/sibyte/sb1250_ldt.h
index bf7f320d1a87..2340c29dc0c7 100644
--- a/arch/mips/include/asm/sibyte/sb1250_ldt.h
+++ b/arch/mips/include/asm/sibyte/sb1250_ldt.h
@@ -66,7 +66,7 @@
#define R_LDT_TYPE1_SRICMD 0x0050
#define R_LDT_TYPE1_SRITXNUM 0x0054
#define R_LDT_TYPE1_SRIRXNUM 0x0058
-#define R_LDT_TYPE1_ERRSTATUS 0x0068
+#define R_LDT_TYPE1_ERRSTATUS 0x0068
#define R_LDT_TYPE1_SRICTRL 0x006C
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define R_LDT_TYPE1_ADDSTATUS 0x0070
@@ -258,7 +258,7 @@
#define M_LDT_LINKCTRL_DWFCOUT_EN _SB_MAKEMASK1_32(31)
/*
- * LDT Link frequency register (Table 8-20) offset 0x48
+ * LDT Link frequency register (Table 8-20) offset 0x48
*/
#define S_LDT_LINKFREQ_FREQ 8
@@ -301,8 +301,8 @@
#define S_LDT_SRICMD_TXINITIALOFFSET 28
#define M_LDT_SRICMD_TXINITIALOFFSET _SB_MAKEMASK_32(3, S_LDT_SRICMD_TXINITIALOFFSET)
-#define V_LDT_SRICMD_TXINITIALOFFSET(x) _SB_MAKEVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET)
-#define G_LDT_SRICMD_TXINITIALOFFSET(x) _SB_GETVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET, M_LDT_SRICMD_TXINITIALOFFSET)
+#define V_LDT_SRICMD_TXINITIALOFFSET(x) _SB_MAKEVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET)
+#define G_LDT_SRICMD_TXINITIALOFFSET(x) _SB_GETVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET, M_LDT_SRICMD_TXINITIALOFFSET)
#define M_LDT_SRICMD_LINKFREQDIRECT _SB_MAKEMASK1_32(31)
@@ -318,16 +318,16 @@
#define M_LDT_ERRCTL_OVFSYNCFLOOD_EN _SB_MAKEMASK1_32(5)
#define M_LDT_ERRCTL_EOCNXAFATAL_EN _SB_MAKEMASK1_32(6)
#define M_LDT_ERRCTL_EOCNXANONFATAL_EN _SB_MAKEMASK1_32(7)
-#define M_LDT_ERRCTL_EOCNXASYNCFLOOD_EN _SB_MAKEMASK1_32(8)
+#define M_LDT_ERRCTL_EOCNXASYNCFLOOD_EN _SB_MAKEMASK1_32(8)
#define M_LDT_ERRCTL_CRCFATAL_EN _SB_MAKEMASK1_32(9)
#define M_LDT_ERRCTL_CRCNONFATAL_EN _SB_MAKEMASK1_32(10)
#define M_LDT_ERRCTL_SERRFATAL_EN _SB_MAKEMASK1_32(11)
#define M_LDT_ERRCTL_SRCTAGFATAL_EN _SB_MAKEMASK1_32(12)
#define M_LDT_ERRCTL_SRCTAGNONFATAL_EN _SB_MAKEMASK1_32(13)
-#define M_LDT_ERRCTL_SRCTAGSYNCFLOOD_EN _SB_MAKEMASK1_32(14)
+#define M_LDT_ERRCTL_SRCTAGSYNCFLOOD_EN _SB_MAKEMASK1_32(14)
#define M_LDT_ERRCTL_MAPNXAFATAL_EN _SB_MAKEMASK1_32(15)
#define M_LDT_ERRCTL_MAPNXANONFATAL_EN _SB_MAKEMASK1_32(16)
-#define M_LDT_ERRCTL_MAPNXASYNCFLOOD_EN _SB_MAKEMASK1_32(17)
+#define M_LDT_ERRCTL_MAPNXASYNCFLOOD_EN _SB_MAKEMASK1_32(17)
#define M_LDT_ERRCTL_PROTOERR _SB_MAKEMASK1_32(24)
#define M_LDT_ERRCTL_OVFERR _SB_MAKEMASK1_32(25)
diff --git a/arch/mips/include/asm/sibyte/sb1250_mac.h b/arch/mips/include/asm/sibyte/sb1250_mac.h
index cfc4d7870882..3fa94fc74042 100644
--- a/arch/mips/include/asm/sibyte/sb1250_mac.h
+++ b/arch/mips/include/asm/sibyte/sb1250_mac.h
@@ -47,86 +47,86 @@
*/
-#define M_MAC_RESERVED0 _SB_MAKEMASK1(0)
-#define M_MAC_TX_HOLD_SOP_EN _SB_MAKEMASK1(1)
-#define M_MAC_RETRY_EN _SB_MAKEMASK1(2)
-#define M_MAC_RET_DRPREQ_EN _SB_MAKEMASK1(3)
-#define M_MAC_RET_UFL_EN _SB_MAKEMASK1(4)
-#define M_MAC_BURST_EN _SB_MAKEMASK1(5)
-
-#define S_MAC_TX_PAUSE _SB_MAKE64(6)
-#define M_MAC_TX_PAUSE_CNT _SB_MAKEMASK(3, S_MAC_TX_PAUSE)
-#define V_MAC_TX_PAUSE_CNT(x) _SB_MAKEVALUE(x, S_MAC_TX_PAUSE)
-
-#define K_MAC_TX_PAUSE_CNT_512 0
-#define K_MAC_TX_PAUSE_CNT_1K 1
-#define K_MAC_TX_PAUSE_CNT_2K 2
-#define K_MAC_TX_PAUSE_CNT_4K 3
-#define K_MAC_TX_PAUSE_CNT_8K 4
-#define K_MAC_TX_PAUSE_CNT_16K 5
-#define K_MAC_TX_PAUSE_CNT_32K 6
-#define K_MAC_TX_PAUSE_CNT_64K 7
-
-#define V_MAC_TX_PAUSE_CNT_512 V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_512)
-#define V_MAC_TX_PAUSE_CNT_1K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_1K)
-#define V_MAC_TX_PAUSE_CNT_2K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_2K)
-#define V_MAC_TX_PAUSE_CNT_4K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_4K)
-#define V_MAC_TX_PAUSE_CNT_8K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_8K)
-#define V_MAC_TX_PAUSE_CNT_16K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_16K)
-#define V_MAC_TX_PAUSE_CNT_32K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_32K)
-#define V_MAC_TX_PAUSE_CNT_64K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_64K)
-
-#define M_MAC_RESERVED1 _SB_MAKEMASK(8, 9)
-
-#define M_MAC_AP_STAT_EN _SB_MAKEMASK1(17)
+#define M_MAC_RESERVED0 _SB_MAKEMASK1(0)
+#define M_MAC_TX_HOLD_SOP_EN _SB_MAKEMASK1(1)
+#define M_MAC_RETRY_EN _SB_MAKEMASK1(2)
+#define M_MAC_RET_DRPREQ_EN _SB_MAKEMASK1(3)
+#define M_MAC_RET_UFL_EN _SB_MAKEMASK1(4)
+#define M_MAC_BURST_EN _SB_MAKEMASK1(5)
+
+#define S_MAC_TX_PAUSE _SB_MAKE64(6)
+#define M_MAC_TX_PAUSE_CNT _SB_MAKEMASK(3, S_MAC_TX_PAUSE)
+#define V_MAC_TX_PAUSE_CNT(x) _SB_MAKEVALUE(x, S_MAC_TX_PAUSE)
+
+#define K_MAC_TX_PAUSE_CNT_512 0
+#define K_MAC_TX_PAUSE_CNT_1K 1
+#define K_MAC_TX_PAUSE_CNT_2K 2
+#define K_MAC_TX_PAUSE_CNT_4K 3
+#define K_MAC_TX_PAUSE_CNT_8K 4
+#define K_MAC_TX_PAUSE_CNT_16K 5
+#define K_MAC_TX_PAUSE_CNT_32K 6
+#define K_MAC_TX_PAUSE_CNT_64K 7
+
+#define V_MAC_TX_PAUSE_CNT_512 V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_512)
+#define V_MAC_TX_PAUSE_CNT_1K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_1K)
+#define V_MAC_TX_PAUSE_CNT_2K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_2K)
+#define V_MAC_TX_PAUSE_CNT_4K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_4K)
+#define V_MAC_TX_PAUSE_CNT_8K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_8K)
+#define V_MAC_TX_PAUSE_CNT_16K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_16K)
+#define V_MAC_TX_PAUSE_CNT_32K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_32K)
+#define V_MAC_TX_PAUSE_CNT_64K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_64K)
+
+#define M_MAC_RESERVED1 _SB_MAKEMASK(8, 9)
+
+#define M_MAC_AP_STAT_EN _SB_MAKEMASK1(17)
#if SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_MAC_TIMESTAMP _SB_MAKEMASK1(18)
#endif
-#define M_MAC_DRP_ERRPKT_EN _SB_MAKEMASK1(19)
-#define M_MAC_DRP_FCSERRPKT_EN _SB_MAKEMASK1(20)
-#define M_MAC_DRP_CODEERRPKT_EN _SB_MAKEMASK1(21)
-#define M_MAC_DRP_DRBLERRPKT_EN _SB_MAKEMASK1(22)
-#define M_MAC_DRP_RNTPKT_EN _SB_MAKEMASK1(23)
-#define M_MAC_DRP_OSZPKT_EN _SB_MAKEMASK1(24)
-#define M_MAC_DRP_LENERRPKT_EN _SB_MAKEMASK1(25)
+#define M_MAC_DRP_ERRPKT_EN _SB_MAKEMASK1(19)
+#define M_MAC_DRP_FCSERRPKT_EN _SB_MAKEMASK1(20)
+#define M_MAC_DRP_CODEERRPKT_EN _SB_MAKEMASK1(21)
+#define M_MAC_DRP_DRBLERRPKT_EN _SB_MAKEMASK1(22)
+#define M_MAC_DRP_RNTPKT_EN _SB_MAKEMASK1(23)
+#define M_MAC_DRP_OSZPKT_EN _SB_MAKEMASK1(24)
+#define M_MAC_DRP_LENERRPKT_EN _SB_MAKEMASK1(25)
-#define M_MAC_RESERVED3 _SB_MAKEMASK(6, 26)
+#define M_MAC_RESERVED3 _SB_MAKEMASK(6, 26)
-#define M_MAC_BYPASS_SEL _SB_MAKEMASK1(32)
-#define M_MAC_HDX_EN _SB_MAKEMASK1(33)
+#define M_MAC_BYPASS_SEL _SB_MAKEMASK1(32)
+#define M_MAC_HDX_EN _SB_MAKEMASK1(33)
-#define S_MAC_SPEED_SEL _SB_MAKE64(34)
-#define M_MAC_SPEED_SEL _SB_MAKEMASK(2, S_MAC_SPEED_SEL)
+#define S_MAC_SPEED_SEL _SB_MAKE64(34)
+#define M_MAC_SPEED_SEL _SB_MAKEMASK(2, S_MAC_SPEED_SEL)
#define V_MAC_SPEED_SEL(x) _SB_MAKEVALUE(x, S_MAC_SPEED_SEL)
#define G_MAC_SPEED_SEL(x) _SB_GETVALUE(x, S_MAC_SPEED_SEL, M_MAC_SPEED_SEL)
-#define K_MAC_SPEED_SEL_10MBPS 0
-#define K_MAC_SPEED_SEL_100MBPS 1
+#define K_MAC_SPEED_SEL_10MBPS 0
+#define K_MAC_SPEED_SEL_100MBPS 1
#define K_MAC_SPEED_SEL_1000MBPS 2
#define K_MAC_SPEED_SEL_RESERVED 3
-#define V_MAC_SPEED_SEL_10MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_10MBPS)
-#define V_MAC_SPEED_SEL_100MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_100MBPS)
+#define V_MAC_SPEED_SEL_10MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_10MBPS)
+#define V_MAC_SPEED_SEL_100MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_100MBPS)
#define V_MAC_SPEED_SEL_1000MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_1000MBPS)
#define V_MAC_SPEED_SEL_RESERVED V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_RESERVED)
-#define M_MAC_TX_CLK_EDGE_SEL _SB_MAKEMASK1(36)
-#define M_MAC_LOOPBACK_SEL _SB_MAKEMASK1(37)
-#define M_MAC_FAST_SYNC _SB_MAKEMASK1(38)
-#define M_MAC_SS_EN _SB_MAKEMASK1(39)
+#define M_MAC_TX_CLK_EDGE_SEL _SB_MAKEMASK1(36)
+#define M_MAC_LOOPBACK_SEL _SB_MAKEMASK1(37)
+#define M_MAC_FAST_SYNC _SB_MAKEMASK1(38)
+#define M_MAC_SS_EN _SB_MAKEMASK1(39)
#define S_MAC_BYPASS_CFG _SB_MAKE64(40)
-#define M_MAC_BYPASS_CFG _SB_MAKEMASK(2, S_MAC_BYPASS_CFG)
-#define V_MAC_BYPASS_CFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_CFG)
-#define G_MAC_BYPASS_CFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_CFG, M_MAC_BYPASS_CFG)
+#define M_MAC_BYPASS_CFG _SB_MAKEMASK(2, S_MAC_BYPASS_CFG)
+#define V_MAC_BYPASS_CFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_CFG)
+#define G_MAC_BYPASS_CFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_CFG, M_MAC_BYPASS_CFG)
#define K_MAC_BYPASS_GMII 0
-#define K_MAC_BYPASS_ENCODED 1
-#define K_MAC_BYPASS_SOP 2
-#define K_MAC_BYPASS_EOP 3
+#define K_MAC_BYPASS_ENCODED 1
+#define K_MAC_BYPASS_SOP 2
+#define K_MAC_BYPASS_EOP 3
-#define M_MAC_BYPASS_16 _SB_MAKEMASK1(42)
+#define M_MAC_BYPASS_16 _SB_MAKEMASK1(42)
#define M_MAC_BYPASS_FCS_CHK _SB_MAKEMASK1(43)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -137,30 +137,30 @@
#define M_MAC_SPLIT_CH_SEL _SB_MAKEMASK1(45)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_MAC_BYPASS_IFG _SB_MAKE64(46)
-#define M_MAC_BYPASS_IFG _SB_MAKEMASK(8, S_MAC_BYPASS_IFG)
+#define S_MAC_BYPASS_IFG _SB_MAKE64(46)
+#define M_MAC_BYPASS_IFG _SB_MAKEMASK(8, S_MAC_BYPASS_IFG)
#define V_MAC_BYPASS_IFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_IFG)
#define G_MAC_BYPASS_IFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_IFG, M_MAC_BYPASS_IFG)
-#define K_MAC_FC_CMD_DISABLED 0
-#define K_MAC_FC_CMD_ENABLED 1
+#define K_MAC_FC_CMD_DISABLED 0
+#define K_MAC_FC_CMD_ENABLED 1
#define K_MAC_FC_CMD_ENAB_FALSECARR 2
-#define V_MAC_FC_CMD_DISABLED V_MAC_FC_CMD(K_MAC_FC_CMD_DISABLED)
-#define V_MAC_FC_CMD_ENABLED V_MAC_FC_CMD(K_MAC_FC_CMD_ENABLED)
+#define V_MAC_FC_CMD_DISABLED V_MAC_FC_CMD(K_MAC_FC_CMD_DISABLED)
+#define V_MAC_FC_CMD_ENABLED V_MAC_FC_CMD(K_MAC_FC_CMD_ENABLED)
#define V_MAC_FC_CMD_ENAB_FALSECARR V_MAC_FC_CMD(K_MAC_FC_CMD_ENAB_FALSECARR)
-#define M_MAC_FC_SEL _SB_MAKEMASK1(54)
+#define M_MAC_FC_SEL _SB_MAKEMASK1(54)
-#define S_MAC_FC_CMD _SB_MAKE64(55)
-#define M_MAC_FC_CMD _SB_MAKEMASK(2, S_MAC_FC_CMD)
-#define V_MAC_FC_CMD(x) _SB_MAKEVALUE(x, S_MAC_FC_CMD)
-#define G_MAC_FC_CMD(x) _SB_GETVALUE(x, S_MAC_FC_CMD, M_MAC_FC_CMD)
+#define S_MAC_FC_CMD _SB_MAKE64(55)
+#define M_MAC_FC_CMD _SB_MAKEMASK(2, S_MAC_FC_CMD)
+#define V_MAC_FC_CMD(x) _SB_MAKEVALUE(x, S_MAC_FC_CMD)
+#define G_MAC_FC_CMD(x) _SB_GETVALUE(x, S_MAC_FC_CMD, M_MAC_FC_CMD)
-#define S_MAC_RX_CH_SEL _SB_MAKE64(57)
-#define M_MAC_RX_CH_SEL _SB_MAKEMASK(7, S_MAC_RX_CH_SEL)
-#define V_MAC_RX_CH_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_SEL)
-#define G_MAC_RX_CH_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_SEL, M_MAC_RX_CH_SEL)
+#define S_MAC_RX_CH_SEL _SB_MAKE64(57)
+#define M_MAC_RX_CH_SEL _SB_MAKEMASK(7, S_MAC_RX_CH_SEL)
+#define V_MAC_RX_CH_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_SEL)
+#define G_MAC_RX_CH_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_SEL, M_MAC_RX_CH_SEL)
/*
@@ -170,18 +170,18 @@
* Register: MAC_ENABLE_2
*/
-#define M_MAC_RXDMA_EN0 _SB_MAKEMASK1(0)
-#define M_MAC_RXDMA_EN1 _SB_MAKEMASK1(1)
-#define M_MAC_TXDMA_EN0 _SB_MAKEMASK1(4)
-#define M_MAC_TXDMA_EN1 _SB_MAKEMASK1(5)
+#define M_MAC_RXDMA_EN0 _SB_MAKEMASK1(0)
+#define M_MAC_RXDMA_EN1 _SB_MAKEMASK1(1)
+#define M_MAC_TXDMA_EN0 _SB_MAKEMASK1(4)
+#define M_MAC_TXDMA_EN1 _SB_MAKEMASK1(5)
-#define M_MAC_PORT_RESET _SB_MAKEMASK1(8)
+#define M_MAC_PORT_RESET _SB_MAKEMASK1(8)
#if (SIBYTE_HDR_FEATURE_CHIP(1250) || SIBYTE_HDR_FEATURE_CHIP(112x))
-#define M_MAC_RX_ENABLE _SB_MAKEMASK1(10)
-#define M_MAC_TX_ENABLE _SB_MAKEMASK1(11)
-#define M_MAC_BYP_RX_ENABLE _SB_MAKEMASK1(12)
-#define M_MAC_BYP_TX_ENABLE _SB_MAKEMASK1(13)
+#define M_MAC_RX_ENABLE _SB_MAKEMASK1(10)
+#define M_MAC_TX_ENABLE _SB_MAKEMASK1(11)
+#define M_MAC_BYP_RX_ENABLE _SB_MAKEMASK1(12)
+#define M_MAC_BYP_TX_ENABLE _SB_MAKEMASK1(13)
#endif
/*
@@ -203,13 +203,13 @@
#define S_MAC_TXD_WEIGHT0 _SB_MAKE64(0)
#define M_MAC_TXD_WEIGHT0 _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT0)
-#define V_MAC_TXD_WEIGHT0(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT0)
-#define G_MAC_TXD_WEIGHT0(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT0, M_MAC_TXD_WEIGHT0)
+#define V_MAC_TXD_WEIGHT0(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT0)
+#define G_MAC_TXD_WEIGHT0(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT0, M_MAC_TXD_WEIGHT0)
#define S_MAC_TXD_WEIGHT1 _SB_MAKE64(4)
#define M_MAC_TXD_WEIGHT1 _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT1)
-#define V_MAC_TXD_WEIGHT1(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT1)
-#define G_MAC_TXD_WEIGHT1(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1)
+#define V_MAC_TXD_WEIGHT1(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT1)
+#define G_MAC_TXD_WEIGHT1(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1)
/*
* MAC Fifo Threshold registers (Table 9-14)
@@ -218,53 +218,53 @@
* Register: MAC_THRSH_CFG_2
*/
-#define S_MAC_TX_WR_THRSH _SB_MAKE64(0)
+#define S_MAC_TX_WR_THRSH _SB_MAKE64(0)
#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
-/* #define M_MAC_TX_WR_THRSH _SB_MAKEMASK(6, S_MAC_TX_WR_THRSH) */
+/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
+/* #define M_MAC_TX_WR_THRSH _SB_MAKEMASK(6, S_MAC_TX_WR_THRSH) */
#endif /* up to 1250 PASS1 */
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_TX_WR_THRSH _SB_MAKEMASK(7, S_MAC_TX_WR_THRSH)
+#define M_MAC_TX_WR_THRSH _SB_MAKEMASK(7, S_MAC_TX_WR_THRSH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define V_MAC_TX_WR_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_WR_THRSH)
-#define G_MAC_TX_WR_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_WR_THRSH, M_MAC_TX_WR_THRSH)
+#define V_MAC_TX_WR_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_WR_THRSH)
+#define G_MAC_TX_WR_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_WR_THRSH, M_MAC_TX_WR_THRSH)
-#define S_MAC_TX_RD_THRSH _SB_MAKE64(8)
+#define S_MAC_TX_RD_THRSH _SB_MAKE64(8)
#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
-/* #define M_MAC_TX_RD_THRSH _SB_MAKEMASK(6, S_MAC_TX_RD_THRSH) */
+/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
+/* #define M_MAC_TX_RD_THRSH _SB_MAKEMASK(6, S_MAC_TX_RD_THRSH) */
#endif /* up to 1250 PASS1 */
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_TX_RD_THRSH _SB_MAKEMASK(7, S_MAC_TX_RD_THRSH)
+#define M_MAC_TX_RD_THRSH _SB_MAKEMASK(7, S_MAC_TX_RD_THRSH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define V_MAC_TX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RD_THRSH)
-#define G_MAC_TX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RD_THRSH, M_MAC_TX_RD_THRSH)
+#define V_MAC_TX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RD_THRSH)
+#define G_MAC_TX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RD_THRSH, M_MAC_TX_RD_THRSH)
-#define S_MAC_TX_RL_THRSH _SB_MAKE64(16)
-#define M_MAC_TX_RL_THRSH _SB_MAKEMASK(4, S_MAC_TX_RL_THRSH)
-#define V_MAC_TX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RL_THRSH)
-#define G_MAC_TX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RL_THRSH, M_MAC_TX_RL_THRSH)
+#define S_MAC_TX_RL_THRSH _SB_MAKE64(16)
+#define M_MAC_TX_RL_THRSH _SB_MAKEMASK(4, S_MAC_TX_RL_THRSH)
+#define V_MAC_TX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RL_THRSH)
+#define G_MAC_TX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RL_THRSH, M_MAC_TX_RL_THRSH)
-#define S_MAC_RX_PL_THRSH _SB_MAKE64(24)
-#define M_MAC_RX_PL_THRSH _SB_MAKEMASK(6, S_MAC_RX_PL_THRSH)
-#define V_MAC_RX_PL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_PL_THRSH)
-#define G_MAC_RX_PL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_PL_THRSH, M_MAC_RX_PL_THRSH)
+#define S_MAC_RX_PL_THRSH _SB_MAKE64(24)
+#define M_MAC_RX_PL_THRSH _SB_MAKEMASK(6, S_MAC_RX_PL_THRSH)
+#define V_MAC_RX_PL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_PL_THRSH)
+#define G_MAC_RX_PL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_PL_THRSH, M_MAC_RX_PL_THRSH)
-#define S_MAC_RX_RD_THRSH _SB_MAKE64(32)
-#define M_MAC_RX_RD_THRSH _SB_MAKEMASK(6, S_MAC_RX_RD_THRSH)
-#define V_MAC_RX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RD_THRSH)
-#define G_MAC_RX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RD_THRSH, M_MAC_RX_RD_THRSH)
+#define S_MAC_RX_RD_THRSH _SB_MAKE64(32)
+#define M_MAC_RX_RD_THRSH _SB_MAKEMASK(6, S_MAC_RX_RD_THRSH)
+#define V_MAC_RX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RD_THRSH)
+#define G_MAC_RX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RD_THRSH, M_MAC_RX_RD_THRSH)
-#define S_MAC_RX_RL_THRSH _SB_MAKE64(40)
-#define M_MAC_RX_RL_THRSH _SB_MAKEMASK(6, S_MAC_RX_RL_THRSH)
-#define V_MAC_RX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RL_THRSH)
-#define G_MAC_RX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RL_THRSH, M_MAC_RX_RL_THRSH)
+#define S_MAC_RX_RL_THRSH _SB_MAKE64(40)
+#define M_MAC_RX_RL_THRSH _SB_MAKEMASK(6, S_MAC_RX_RL_THRSH)
+#define V_MAC_RX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RL_THRSH)
+#define G_MAC_RX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RL_THRSH, M_MAC_RX_RL_THRSH)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_ENC_FC_THRSH _SB_MAKE64(56)
-#define M_MAC_ENC_FC_THRSH _SB_MAKEMASK(6, S_MAC_ENC_FC_THRSH)
-#define V_MAC_ENC_FC_THRSH(x) _SB_MAKEVALUE(x, S_MAC_ENC_FC_THRSH)
-#define G_MAC_ENC_FC_THRSH(x) _SB_GETVALUE(x, S_MAC_ENC_FC_THRSH, M_MAC_ENC_FC_THRSH)
+#define S_MAC_ENC_FC_THRSH _SB_MAKE64(56)
+#define M_MAC_ENC_FC_THRSH _SB_MAKEMASK(6, S_MAC_ENC_FC_THRSH)
+#define V_MAC_ENC_FC_THRSH(x) _SB_MAKEVALUE(x, S_MAC_ENC_FC_THRSH)
+#define G_MAC_ENC_FC_THRSH(x) _SB_GETVALUE(x, S_MAC_ENC_FC_THRSH, M_MAC_ENC_FC_THRSH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
/*
@@ -275,79 +275,79 @@
*/
/* XXXCGD: ??? Unused in pass2? */
-#define S_MAC_IFG_RX _SB_MAKE64(0)
-#define M_MAC_IFG_RX _SB_MAKEMASK(6, S_MAC_IFG_RX)
-#define V_MAC_IFG_RX(x) _SB_MAKEVALUE(x, S_MAC_IFG_RX)
-#define G_MAC_IFG_RX(x) _SB_GETVALUE(x, S_MAC_IFG_RX, M_MAC_IFG_RX)
+#define S_MAC_IFG_RX _SB_MAKE64(0)
+#define M_MAC_IFG_RX _SB_MAKEMASK(6, S_MAC_IFG_RX)
+#define V_MAC_IFG_RX(x) _SB_MAKEVALUE(x, S_MAC_IFG_RX)
+#define G_MAC_IFG_RX(x) _SB_GETVALUE(x, S_MAC_IFG_RX, M_MAC_IFG_RX)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_PRE_LEN _SB_MAKE64(0)
-#define M_MAC_PRE_LEN _SB_MAKEMASK(6, S_MAC_PRE_LEN)
-#define V_MAC_PRE_LEN(x) _SB_MAKEVALUE(x, S_MAC_PRE_LEN)
-#define G_MAC_PRE_LEN(x) _SB_GETVALUE(x, S_MAC_PRE_LEN, M_MAC_PRE_LEN)
+#define S_MAC_PRE_LEN _SB_MAKE64(0)
+#define M_MAC_PRE_LEN _SB_MAKEMASK(6, S_MAC_PRE_LEN)
+#define V_MAC_PRE_LEN(x) _SB_MAKEVALUE(x, S_MAC_PRE_LEN)
+#define G_MAC_PRE_LEN(x) _SB_GETVALUE(x, S_MAC_PRE_LEN, M_MAC_PRE_LEN)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_MAC_IFG_TX _SB_MAKE64(6)
-#define M_MAC_IFG_TX _SB_MAKEMASK(6, S_MAC_IFG_TX)
-#define V_MAC_IFG_TX(x) _SB_MAKEVALUE(x, S_MAC_IFG_TX)
-#define G_MAC_IFG_TX(x) _SB_GETVALUE(x, S_MAC_IFG_TX, M_MAC_IFG_TX)
-
-#define S_MAC_IFG_THRSH _SB_MAKE64(12)
-#define M_MAC_IFG_THRSH _SB_MAKEMASK(6, S_MAC_IFG_THRSH)
-#define V_MAC_IFG_THRSH(x) _SB_MAKEVALUE(x, S_MAC_IFG_THRSH)
-#define G_MAC_IFG_THRSH(x) _SB_GETVALUE(x, S_MAC_IFG_THRSH, M_MAC_IFG_THRSH)
-
-#define S_MAC_BACKOFF_SEL _SB_MAKE64(18)
-#define M_MAC_BACKOFF_SEL _SB_MAKEMASK(4, S_MAC_BACKOFF_SEL)
-#define V_MAC_BACKOFF_SEL(x) _SB_MAKEVALUE(x, S_MAC_BACKOFF_SEL)
-#define G_MAC_BACKOFF_SEL(x) _SB_GETVALUE(x, S_MAC_BACKOFF_SEL, M_MAC_BACKOFF_SEL)
-
-#define S_MAC_LFSR_SEED _SB_MAKE64(22)
-#define M_MAC_LFSR_SEED _SB_MAKEMASK(8, S_MAC_LFSR_SEED)
-#define V_MAC_LFSR_SEED(x) _SB_MAKEVALUE(x, S_MAC_LFSR_SEED)
-#define G_MAC_LFSR_SEED(x) _SB_GETVALUE(x, S_MAC_LFSR_SEED, M_MAC_LFSR_SEED)
-
-#define S_MAC_SLOT_SIZE _SB_MAKE64(30)
-#define M_MAC_SLOT_SIZE _SB_MAKEMASK(10, S_MAC_SLOT_SIZE)
-#define V_MAC_SLOT_SIZE(x) _SB_MAKEVALUE(x, S_MAC_SLOT_SIZE)
-#define G_MAC_SLOT_SIZE(x) _SB_GETVALUE(x, S_MAC_SLOT_SIZE, M_MAC_SLOT_SIZE)
-
-#define S_MAC_MIN_FRAMESZ _SB_MAKE64(40)
-#define M_MAC_MIN_FRAMESZ _SB_MAKEMASK(8, S_MAC_MIN_FRAMESZ)
-#define V_MAC_MIN_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MIN_FRAMESZ)
-#define G_MAC_MIN_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MIN_FRAMESZ, M_MAC_MIN_FRAMESZ)
-
-#define S_MAC_MAX_FRAMESZ _SB_MAKE64(48)
-#define M_MAC_MAX_FRAMESZ _SB_MAKEMASK(16, S_MAC_MAX_FRAMESZ)
-#define V_MAC_MAX_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MAX_FRAMESZ)
-#define G_MAC_MAX_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MAX_FRAMESZ, M_MAC_MAX_FRAMESZ)
+#define S_MAC_IFG_TX _SB_MAKE64(6)
+#define M_MAC_IFG_TX _SB_MAKEMASK(6, S_MAC_IFG_TX)
+#define V_MAC_IFG_TX(x) _SB_MAKEVALUE(x, S_MAC_IFG_TX)
+#define G_MAC_IFG_TX(x) _SB_GETVALUE(x, S_MAC_IFG_TX, M_MAC_IFG_TX)
+
+#define S_MAC_IFG_THRSH _SB_MAKE64(12)
+#define M_MAC_IFG_THRSH _SB_MAKEMASK(6, S_MAC_IFG_THRSH)
+#define V_MAC_IFG_THRSH(x) _SB_MAKEVALUE(x, S_MAC_IFG_THRSH)
+#define G_MAC_IFG_THRSH(x) _SB_GETVALUE(x, S_MAC_IFG_THRSH, M_MAC_IFG_THRSH)
+
+#define S_MAC_BACKOFF_SEL _SB_MAKE64(18)
+#define M_MAC_BACKOFF_SEL _SB_MAKEMASK(4, S_MAC_BACKOFF_SEL)
+#define V_MAC_BACKOFF_SEL(x) _SB_MAKEVALUE(x, S_MAC_BACKOFF_SEL)
+#define G_MAC_BACKOFF_SEL(x) _SB_GETVALUE(x, S_MAC_BACKOFF_SEL, M_MAC_BACKOFF_SEL)
+
+#define S_MAC_LFSR_SEED _SB_MAKE64(22)
+#define M_MAC_LFSR_SEED _SB_MAKEMASK(8, S_MAC_LFSR_SEED)
+#define V_MAC_LFSR_SEED(x) _SB_MAKEVALUE(x, S_MAC_LFSR_SEED)
+#define G_MAC_LFSR_SEED(x) _SB_GETVALUE(x, S_MAC_LFSR_SEED, M_MAC_LFSR_SEED)
+
+#define S_MAC_SLOT_SIZE _SB_MAKE64(30)
+#define M_MAC_SLOT_SIZE _SB_MAKEMASK(10, S_MAC_SLOT_SIZE)
+#define V_MAC_SLOT_SIZE(x) _SB_MAKEVALUE(x, S_MAC_SLOT_SIZE)
+#define G_MAC_SLOT_SIZE(x) _SB_GETVALUE(x, S_MAC_SLOT_SIZE, M_MAC_SLOT_SIZE)
+
+#define S_MAC_MIN_FRAMESZ _SB_MAKE64(40)
+#define M_MAC_MIN_FRAMESZ _SB_MAKEMASK(8, S_MAC_MIN_FRAMESZ)
+#define V_MAC_MIN_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MIN_FRAMESZ)
+#define G_MAC_MIN_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MIN_FRAMESZ, M_MAC_MIN_FRAMESZ)
+
+#define S_MAC_MAX_FRAMESZ _SB_MAKE64(48)
+#define M_MAC_MAX_FRAMESZ _SB_MAKEMASK(16, S_MAC_MAX_FRAMESZ)
+#define V_MAC_MAX_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MAX_FRAMESZ)
+#define G_MAC_MAX_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MAX_FRAMESZ, M_MAC_MAX_FRAMESZ)
/*
* These constants are used to configure the fields within the Frame
* Configuration Register.
*/
-#define K_MAC_IFG_RX_10 _SB_MAKE64(0) /* See table 176, not used */
-#define K_MAC_IFG_RX_100 _SB_MAKE64(0)
-#define K_MAC_IFG_RX_1000 _SB_MAKE64(0)
+#define K_MAC_IFG_RX_10 _SB_MAKE64(0) /* See table 176, not used */
+#define K_MAC_IFG_RX_100 _SB_MAKE64(0)
+#define K_MAC_IFG_RX_1000 _SB_MAKE64(0)
-#define K_MAC_IFG_TX_10 _SB_MAKE64(20)
-#define K_MAC_IFG_TX_100 _SB_MAKE64(20)
-#define K_MAC_IFG_TX_1000 _SB_MAKE64(8)
+#define K_MAC_IFG_TX_10 _SB_MAKE64(20)
+#define K_MAC_IFG_TX_100 _SB_MAKE64(20)
+#define K_MAC_IFG_TX_1000 _SB_MAKE64(8)
-#define K_MAC_IFG_THRSH_10 _SB_MAKE64(4)
-#define K_MAC_IFG_THRSH_100 _SB_MAKE64(4)
-#define K_MAC_IFG_THRSH_1000 _SB_MAKE64(0)
+#define K_MAC_IFG_THRSH_10 _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_100 _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_1000 _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_10 _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_100 _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_1000 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_10 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_100 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_1000 _SB_MAKE64(0)
-#define V_MAC_IFG_RX_10 V_MAC_IFG_RX(K_MAC_IFG_RX_10)
+#define V_MAC_IFG_RX_10 V_MAC_IFG_RX(K_MAC_IFG_RX_10)
#define V_MAC_IFG_RX_100 V_MAC_IFG_RX(K_MAC_IFG_RX_100)
#define V_MAC_IFG_RX_1000 V_MAC_IFG_RX(K_MAC_IFG_RX_1000)
-#define V_MAC_IFG_TX_10 V_MAC_IFG_TX(K_MAC_IFG_TX_10)
+#define V_MAC_IFG_TX_10 V_MAC_IFG_TX(K_MAC_IFG_TX_10)
#define V_MAC_IFG_TX_100 V_MAC_IFG_TX(K_MAC_IFG_TX_100)
#define V_MAC_IFG_TX_1000 V_MAC_IFG_TX(K_MAC_IFG_TX_1000)
@@ -359,15 +359,15 @@
#define V_MAC_SLOT_SIZE_100 V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_100)
#define V_MAC_SLOT_SIZE_1000 V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_1000)
-#define K_MAC_MIN_FRAMESZ_FIFO _SB_MAKE64(9)
+#define K_MAC_MIN_FRAMESZ_FIFO _SB_MAKE64(9)
#define K_MAC_MIN_FRAMESZ_DEFAULT _SB_MAKE64(64)
#define K_MAC_MAX_FRAMESZ_DEFAULT _SB_MAKE64(1518)
-#define K_MAC_MAX_FRAMESZ_JUMBO _SB_MAKE64(9216)
+#define K_MAC_MAX_FRAMESZ_JUMBO _SB_MAKE64(9216)
-#define V_MAC_MIN_FRAMESZ_FIFO V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_FIFO)
+#define V_MAC_MIN_FRAMESZ_FIFO V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_FIFO)
#define V_MAC_MIN_FRAMESZ_DEFAULT V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_DEFAULT)
#define V_MAC_MAX_FRAMESZ_DEFAULT V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_DEFAULT)
-#define V_MAC_MAX_FRAMESZ_JUMBO V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_JUMBO)
+#define V_MAC_MAX_FRAMESZ_JUMBO V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_JUMBO)
/*
* MAC VLAN Tag Registers (Table 9-16)
@@ -376,23 +376,23 @@
* Register: MAC_VLANTAG_2
*/
-#define S_MAC_VLAN_TAG _SB_MAKE64(0)
-#define M_MAC_VLAN_TAG _SB_MAKEMASK(32, S_MAC_VLAN_TAG)
-#define V_MAC_VLAN_TAG(x) _SB_MAKEVALUE(x, S_MAC_VLAN_TAG)
-#define G_MAC_VLAN_TAG(x) _SB_GETVALUE(x, S_MAC_VLAN_TAG, M_MAC_VLAN_TAG)
+#define S_MAC_VLAN_TAG _SB_MAKE64(0)
+#define M_MAC_VLAN_TAG _SB_MAKEMASK(32, S_MAC_VLAN_TAG)
+#define V_MAC_VLAN_TAG(x) _SB_MAKEVALUE(x, S_MAC_VLAN_TAG)
+#define G_MAC_VLAN_TAG(x) _SB_GETVALUE(x, S_MAC_VLAN_TAG, M_MAC_VLAN_TAG)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define S_MAC_TX_PKT_OFFSET _SB_MAKE64(32)
-#define M_MAC_TX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_TX_PKT_OFFSET)
-#define V_MAC_TX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_PKT_OFFSET)
-#define G_MAC_TX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_PKT_OFFSET, M_MAC_TX_PKT_OFFSET)
+#define S_MAC_TX_PKT_OFFSET _SB_MAKE64(32)
+#define M_MAC_TX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_TX_PKT_OFFSET)
+#define V_MAC_TX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_PKT_OFFSET)
+#define G_MAC_TX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_PKT_OFFSET, M_MAC_TX_PKT_OFFSET)
-#define S_MAC_TX_CRC_OFFSET _SB_MAKE64(40)
-#define M_MAC_TX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_TX_CRC_OFFSET)
-#define V_MAC_TX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_CRC_OFFSET)
-#define G_MAC_TX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_CRC_OFFSET, M_MAC_TX_CRC_OFFSET)
+#define S_MAC_TX_CRC_OFFSET _SB_MAKE64(40)
+#define M_MAC_TX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_TX_CRC_OFFSET)
+#define V_MAC_TX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_CRC_OFFSET)
+#define G_MAC_TX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_CRC_OFFSET, M_MAC_TX_CRC_OFFSET)
-#define M_MAC_CH_BASE_FC_EN _SB_MAKEMASK1(48)
+#define M_MAC_CH_BASE_FC_EN _SB_MAKEMASK1(48)
#endif /* 1250 PASS3 || 112x PASS1 */
/*
@@ -412,29 +412,29 @@
* on each channel.
*/
-#define S_MAC_RX_CH0 _SB_MAKE64(0)
-#define S_MAC_RX_CH1 _SB_MAKE64(8)
-#define S_MAC_TX_CH0 _SB_MAKE64(16)
-#define S_MAC_TX_CH1 _SB_MAKE64(24)
+#define S_MAC_RX_CH0 _SB_MAKE64(0)
+#define S_MAC_RX_CH1 _SB_MAKE64(8)
+#define S_MAC_TX_CH0 _SB_MAKE64(16)
+#define S_MAC_TX_CH1 _SB_MAKE64(24)
#define S_MAC_TXCHANNELS _SB_MAKE64(16) /* this is 1st TX chan */
-#define S_MAC_CHANWIDTH _SB_MAKE64(8) /* bits between channels */
+#define S_MAC_CHANWIDTH _SB_MAKE64(8) /* bits between channels */
/*
- * These are the same as RX channel 0. The idea here
+ * These are the same as RX channel 0. The idea here
* is that you'll use one of the "S_" things above
* and pass just the six bits to a DMA-channel-specific ISR
*/
-#define M_MAC_INT_CHANNEL _SB_MAKEMASK(8, 0)
-#define M_MAC_INT_EOP_COUNT _SB_MAKEMASK1(0)
-#define M_MAC_INT_EOP_TIMER _SB_MAKEMASK1(1)
-#define M_MAC_INT_EOP_SEEN _SB_MAKEMASK1(2)
-#define M_MAC_INT_HWM _SB_MAKEMASK1(3)
-#define M_MAC_INT_LWM _SB_MAKEMASK1(4)
-#define M_MAC_INT_DSCR _SB_MAKEMASK1(5)
-#define M_MAC_INT_ERR _SB_MAKEMASK1(6)
-#define M_MAC_INT_DZERO _SB_MAKEMASK1(7) /* only for TX channels */
-#define M_MAC_INT_DROP _SB_MAKEMASK1(7) /* only for RX channels */
+#define M_MAC_INT_CHANNEL _SB_MAKEMASK(8, 0)
+#define M_MAC_INT_EOP_COUNT _SB_MAKEMASK1(0)
+#define M_MAC_INT_EOP_TIMER _SB_MAKEMASK1(1)
+#define M_MAC_INT_EOP_SEEN _SB_MAKEMASK1(2)
+#define M_MAC_INT_HWM _SB_MAKEMASK1(3)
+#define M_MAC_INT_LWM _SB_MAKEMASK1(4)
+#define M_MAC_INT_DSCR _SB_MAKEMASK1(5)
+#define M_MAC_INT_ERR _SB_MAKEMASK1(6)
+#define M_MAC_INT_DZERO _SB_MAKEMASK1(7) /* only for TX channels */
+#define M_MAC_INT_DROP _SB_MAKEMASK1(7) /* only for RX channels */
/*
* In the following definitions we use ch (0/1) and txrx (TX=1, RX=0, see
@@ -442,34 +442,34 @@
*/
#define S_MAC_STATUS_CH_OFFSET(ch, txrx) _SB_MAKE64(((ch) + 2 * (txrx)) * S_MAC_CHANWIDTH)
-#define M_MAC_STATUS_CHANNEL(ch, txrx) _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_CHANNEL(ch, txrx) _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
#define M_MAC_STATUS_EOP_COUNT(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_COUNT, S_MAC_STATUS_CH_OFFSET(ch, txrx))
#define M_MAC_STATUS_EOP_TIMER(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_TIMER, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_EOP_SEEN(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_HWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_LWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DSCR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_ERR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DZERO(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DROP(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_OTHER_ERR _SB_MAKEVALUE(_SB_MAKEMASK(7, 0), 40)
-
-
-#define M_MAC_RX_UNDRFL _SB_MAKEMASK1(40)
-#define M_MAC_RX_OVRFL _SB_MAKEMASK1(41)
-#define M_MAC_TX_UNDRFL _SB_MAKEMASK1(42)
-#define M_MAC_TX_OVRFL _SB_MAKEMASK1(43)
-#define M_MAC_LTCOL_ERR _SB_MAKEMASK1(44)
-#define M_MAC_EXCOL_ERR _SB_MAKEMASK1(45)
-#define M_MAC_CNTR_OVRFL_ERR _SB_MAKEMASK1(46)
+#define M_MAC_STATUS_EOP_SEEN(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_HWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_LWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DSCR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_ERR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DZERO(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DROP(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_OTHER_ERR _SB_MAKEVALUE(_SB_MAKEMASK(7, 0), 40)
+
+
+#define M_MAC_RX_UNDRFL _SB_MAKEMASK1(40)
+#define M_MAC_RX_OVRFL _SB_MAKEMASK1(41)
+#define M_MAC_TX_UNDRFL _SB_MAKEMASK1(42)
+#define M_MAC_TX_OVRFL _SB_MAKEMASK1(43)
+#define M_MAC_LTCOL_ERR _SB_MAKEMASK1(44)
+#define M_MAC_EXCOL_ERR _SB_MAKEMASK1(45)
+#define M_MAC_CNTR_OVRFL_ERR _SB_MAKEMASK1(46)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_SPLIT_EN _SB_MAKEMASK1(47) /* interrupt mask only */
+#define M_MAC_SPLIT_EN _SB_MAKEMASK1(47) /* interrupt mask only */
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define S_MAC_COUNTER_ADDR _SB_MAKE64(47)
-#define M_MAC_COUNTER_ADDR _SB_MAKEMASK(5, S_MAC_COUNTER_ADDR)
-#define V_MAC_COUNTER_ADDR(x) _SB_MAKEVALUE(x, S_MAC_COUNTER_ADDR)
-#define G_MAC_COUNTER_ADDR(x) _SB_GETVALUE(x, S_MAC_COUNTER_ADDR, M_MAC_COUNTER_ADDR)
+#define S_MAC_COUNTER_ADDR _SB_MAKE64(47)
+#define M_MAC_COUNTER_ADDR _SB_MAKEMASK(5, S_MAC_COUNTER_ADDR)
+#define V_MAC_COUNTER_ADDR(x) _SB_MAKEVALUE(x, S_MAC_COUNTER_ADDR)
+#define G_MAC_COUNTER_ADDR(x) _SB_GETVALUE(x, S_MAC_COUNTER_ADDR, M_MAC_COUNTER_ADDR)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_MAC_TX_PAUSE_ON _SB_MAKEMASK1(52)
@@ -482,42 +482,42 @@
* Register: MAC_FIFO_PTRS_2
*/
-#define S_MAC_TX_WRPTR _SB_MAKE64(0)
-#define M_MAC_TX_WRPTR _SB_MAKEMASK(6, S_MAC_TX_WRPTR)
-#define V_MAC_TX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_WRPTR)
-#define G_MAC_TX_WRPTR(x) _SB_GETVALUE(x, S_MAC_TX_WRPTR, M_MAC_TX_WRPTR)
+#define S_MAC_TX_WRPTR _SB_MAKE64(0)
+#define M_MAC_TX_WRPTR _SB_MAKEMASK(6, S_MAC_TX_WRPTR)
+#define V_MAC_TX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_WRPTR)
+#define G_MAC_TX_WRPTR(x) _SB_GETVALUE(x, S_MAC_TX_WRPTR, M_MAC_TX_WRPTR)
-#define S_MAC_TX_RDPTR _SB_MAKE64(8)
-#define M_MAC_TX_RDPTR _SB_MAKEMASK(6, S_MAC_TX_RDPTR)
-#define V_MAC_TX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_RDPTR)
-#define G_MAC_TX_RDPTR(x) _SB_GETVALUE(x, S_MAC_TX_RDPTR, M_MAC_TX_RDPTR)
+#define S_MAC_TX_RDPTR _SB_MAKE64(8)
+#define M_MAC_TX_RDPTR _SB_MAKEMASK(6, S_MAC_TX_RDPTR)
+#define V_MAC_TX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_RDPTR)
+#define G_MAC_TX_RDPTR(x) _SB_GETVALUE(x, S_MAC_TX_RDPTR, M_MAC_TX_RDPTR)
-#define S_MAC_RX_WRPTR _SB_MAKE64(16)
-#define M_MAC_RX_WRPTR _SB_MAKEMASK(6, S_MAC_RX_WRPTR)
-#define V_MAC_RX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_WRPTR)
-#define G_MAC_RX_WRPTR(x) _SB_GETVALUE(x, S_MAC_RX_WRPTR, M_MAC_TX_WRPTR)
+#define S_MAC_RX_WRPTR _SB_MAKE64(16)
+#define M_MAC_RX_WRPTR _SB_MAKEMASK(6, S_MAC_RX_WRPTR)
+#define V_MAC_RX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_WRPTR)
+#define G_MAC_RX_WRPTR(x) _SB_GETVALUE(x, S_MAC_RX_WRPTR, M_MAC_TX_WRPTR)
-#define S_MAC_RX_RDPTR _SB_MAKE64(24)
-#define M_MAC_RX_RDPTR _SB_MAKEMASK(6, S_MAC_RX_RDPTR)
-#define V_MAC_RX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_RDPTR)
-#define G_MAC_RX_RDPTR(x) _SB_GETVALUE(x, S_MAC_RX_RDPTR, M_MAC_TX_RDPTR)
+#define S_MAC_RX_RDPTR _SB_MAKE64(24)
+#define M_MAC_RX_RDPTR _SB_MAKEMASK(6, S_MAC_RX_RDPTR)
+#define V_MAC_RX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_RDPTR)
+#define G_MAC_RX_RDPTR(x) _SB_GETVALUE(x, S_MAC_RX_RDPTR, M_MAC_TX_RDPTR)
/*
- * MAC Fifo End Of Packet Count Registers (Table 9-20) [Debug register]
+ * MAC Fifo End Of Packet Count Registers (Table 9-20) [Debug register]
* Register: MAC_EOPCNT_0
* Register: MAC_EOPCNT_1
* Register: MAC_EOPCNT_2
*/
-#define S_MAC_TX_EOP_COUNTER _SB_MAKE64(0)
-#define M_MAC_TX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_TX_EOP_COUNTER)
-#define V_MAC_TX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_TX_EOP_COUNTER)
-#define G_MAC_TX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_TX_EOP_COUNTER, M_MAC_TX_EOP_COUNTER)
+#define S_MAC_TX_EOP_COUNTER _SB_MAKE64(0)
+#define M_MAC_TX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_TX_EOP_COUNTER)
+#define V_MAC_TX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_TX_EOP_COUNTER)
+#define G_MAC_TX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_TX_EOP_COUNTER, M_MAC_TX_EOP_COUNTER)
-#define S_MAC_RX_EOP_COUNTER _SB_MAKE64(8)
-#define M_MAC_RX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_RX_EOP_COUNTER)
-#define V_MAC_RX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_RX_EOP_COUNTER)
-#define G_MAC_RX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
+#define S_MAC_RX_EOP_COUNTER _SB_MAKE64(8)
+#define M_MAC_RX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_RX_EOP_COUNTER)
+#define V_MAC_RX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_RX_EOP_COUNTER)
+#define G_MAC_RX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
/*
* MAC Receive Address Filter Exact Match Registers (Table 9-21)
@@ -562,27 +562,27 @@
* Register: MAC_TYPE_CFG_2
*/
-#define S_TYPECFG_TYPESIZE _SB_MAKE64(16)
+#define S_TYPECFG_TYPESIZE _SB_MAKE64(16)
#define S_TYPECFG_TYPE0 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE0 _SB_MAKEMASK(16, S_TYPECFG_TYPE0)
-#define V_TYPECFG_TYPE0(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE0)
-#define G_TYPECFG_TYPE0(x) _SB_GETVALUE(x, S_TYPECFG_TYPE0, M_TYPECFG_TYPE0)
+#define M_TYPECFG_TYPE0 _SB_MAKEMASK(16, S_TYPECFG_TYPE0)
+#define V_TYPECFG_TYPE0(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE0)
+#define G_TYPECFG_TYPE0(x) _SB_GETVALUE(x, S_TYPECFG_TYPE0, M_TYPECFG_TYPE0)
#define S_TYPECFG_TYPE1 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE1 _SB_MAKEMASK(16, S_TYPECFG_TYPE1)
-#define V_TYPECFG_TYPE1(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE1)
-#define G_TYPECFG_TYPE1(x) _SB_GETVALUE(x, S_TYPECFG_TYPE1, M_TYPECFG_TYPE1)
+#define M_TYPECFG_TYPE1 _SB_MAKEMASK(16, S_TYPECFG_TYPE1)
+#define V_TYPECFG_TYPE1(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE1)
+#define G_TYPECFG_TYPE1(x) _SB_GETVALUE(x, S_TYPECFG_TYPE1, M_TYPECFG_TYPE1)
#define S_TYPECFG_TYPE2 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE2 _SB_MAKEMASK(16, S_TYPECFG_TYPE2)
-#define V_TYPECFG_TYPE2(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE2)
-#define G_TYPECFG_TYPE2(x) _SB_GETVALUE(x, S_TYPECFG_TYPE2, M_TYPECFG_TYPE2)
+#define M_TYPECFG_TYPE2 _SB_MAKEMASK(16, S_TYPECFG_TYPE2)
+#define V_TYPECFG_TYPE2(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE2)
+#define G_TYPECFG_TYPE2(x) _SB_GETVALUE(x, S_TYPECFG_TYPE2, M_TYPECFG_TYPE2)
#define S_TYPECFG_TYPE3 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE3 _SB_MAKEMASK(16, S_TYPECFG_TYPE3)
-#define V_TYPECFG_TYPE3(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE3)
-#define G_TYPECFG_TYPE3(x) _SB_GETVALUE(x, S_TYPECFG_TYPE3, M_TYPECFG_TYPE3)
+#define M_TYPECFG_TYPE3 _SB_MAKEMASK(16, S_TYPECFG_TYPE3)
+#define V_TYPECFG_TYPE3(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE3)
+#define G_TYPECFG_TYPE3(x) _SB_GETVALUE(x, S_TYPECFG_TYPE3, M_TYPECFG_TYPE3)
/*
* MAC Receive Address Filter Control Registers (Table 9-24)
@@ -591,38 +591,38 @@
* Register: MAC_ADFILTER_CFG_2
*/
-#define M_MAC_ALLPKT_EN _SB_MAKEMASK1(0)
-#define M_MAC_UCAST_EN _SB_MAKEMASK1(1)
-#define M_MAC_UCAST_INV _SB_MAKEMASK1(2)
-#define M_MAC_MCAST_EN _SB_MAKEMASK1(3)
-#define M_MAC_MCAST_INV _SB_MAKEMASK1(4)
-#define M_MAC_BCAST_EN _SB_MAKEMASK1(5)
-#define M_MAC_DIRECT_INV _SB_MAKEMASK1(6)
+#define M_MAC_ALLPKT_EN _SB_MAKEMASK1(0)
+#define M_MAC_UCAST_EN _SB_MAKEMASK1(1)
+#define M_MAC_UCAST_INV _SB_MAKEMASK1(2)
+#define M_MAC_MCAST_EN _SB_MAKEMASK1(3)
+#define M_MAC_MCAST_INV _SB_MAKEMASK1(4)
+#define M_MAC_BCAST_EN _SB_MAKEMASK1(5)
+#define M_MAC_DIRECT_INV _SB_MAKEMASK1(6)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_MAC_ALLMCAST_EN _SB_MAKEMASK1(7)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define S_MAC_IPHDR_OFFSET _SB_MAKE64(8)
-#define M_MAC_IPHDR_OFFSET _SB_MAKEMASK(8, S_MAC_IPHDR_OFFSET)
+#define S_MAC_IPHDR_OFFSET _SB_MAKE64(8)
+#define M_MAC_IPHDR_OFFSET _SB_MAKEMASK(8, S_MAC_IPHDR_OFFSET)
#define V_MAC_IPHDR_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_IPHDR_OFFSET)
#define G_MAC_IPHDR_OFFSET(x) _SB_GETVALUE(x, S_MAC_IPHDR_OFFSET, M_MAC_IPHDR_OFFSET)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_RX_CRC_OFFSET _SB_MAKE64(16)
-#define M_MAC_RX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_RX_CRC_OFFSET)
+#define S_MAC_RX_CRC_OFFSET _SB_MAKE64(16)
+#define M_MAC_RX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_RX_CRC_OFFSET)
#define V_MAC_RX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_CRC_OFFSET)
#define G_MAC_RX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_CRC_OFFSET, M_MAC_RX_CRC_OFFSET)
-#define S_MAC_RX_PKT_OFFSET _SB_MAKE64(24)
-#define M_MAC_RX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_RX_PKT_OFFSET)
+#define S_MAC_RX_PKT_OFFSET _SB_MAKE64(24)
+#define M_MAC_RX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_RX_PKT_OFFSET)
#define V_MAC_RX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_PKT_OFFSET)
#define G_MAC_RX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_PKT_OFFSET, M_MAC_RX_PKT_OFFSET)
#define M_MAC_FWDPAUSE_EN _SB_MAKEMASK1(32)
#define M_MAC_VLAN_DET_EN _SB_MAKEMASK1(33)
-#define S_MAC_RX_CH_MSN_SEL _SB_MAKE64(34)
-#define M_MAC_RX_CH_MSN_SEL _SB_MAKEMASK(8, S_MAC_RX_CH_MSN_SEL)
+#define S_MAC_RX_CH_MSN_SEL _SB_MAKE64(34)
+#define M_MAC_RX_CH_MSN_SEL _SB_MAKEMASK(8, S_MAC_RX_CH_MSN_SEL)
#define V_MAC_RX_CH_MSN_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_MSN_SEL)
#define G_MAC_RX_CH_MSN_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_MSN_SEL, M_MAC_RX_CH_MSN_SEL)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
diff --git a/arch/mips/include/asm/sibyte/sb1250_mc.h b/arch/mips/include/asm/sibyte/sb1250_mc.h
index 15048dcaf22f..8368e411131f 100644
--- a/arch/mips/include/asm/sibyte/sb1250_mc.h
+++ b/arch/mips/include/asm/sibyte/sb1250_mc.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Memory Controller constants File: sb1250_mc.h
+ * Memory Controller constants File: sb1250_mc.h
*
* This module contains constants and macros useful for
* programming the memory controller.
@@ -39,96 +39,96 @@
* Memory Channel Config Register (table 6-14)
*/
-#define S_MC_RESERVED0 0
-#define M_MC_RESERVED0 _SB_MAKEMASK(8, S_MC_RESERVED0)
+#define S_MC_RESERVED0 0
+#define M_MC_RESERVED0 _SB_MAKEMASK(8, S_MC_RESERVED0)
-#define S_MC_CHANNEL_SEL 8
-#define M_MC_CHANNEL_SEL _SB_MAKEMASK(8, S_MC_CHANNEL_SEL)
-#define V_MC_CHANNEL_SEL(x) _SB_MAKEVALUE(x, S_MC_CHANNEL_SEL)
-#define G_MC_CHANNEL_SEL(x) _SB_GETVALUE(x, S_MC_CHANNEL_SEL, M_MC_CHANNEL_SEL)
+#define S_MC_CHANNEL_SEL 8
+#define M_MC_CHANNEL_SEL _SB_MAKEMASK(8, S_MC_CHANNEL_SEL)
+#define V_MC_CHANNEL_SEL(x) _SB_MAKEVALUE(x, S_MC_CHANNEL_SEL)
+#define G_MC_CHANNEL_SEL(x) _SB_GETVALUE(x, S_MC_CHANNEL_SEL, M_MC_CHANNEL_SEL)
-#define S_MC_BANK0_MAP 16
-#define M_MC_BANK0_MAP _SB_MAKEMASK(4, S_MC_BANK0_MAP)
-#define V_MC_BANK0_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK0_MAP)
-#define G_MC_BANK0_MAP(x) _SB_GETVALUE(x, S_MC_BANK0_MAP, M_MC_BANK0_MAP)
+#define S_MC_BANK0_MAP 16
+#define M_MC_BANK0_MAP _SB_MAKEMASK(4, S_MC_BANK0_MAP)
+#define V_MC_BANK0_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK0_MAP)
+#define G_MC_BANK0_MAP(x) _SB_GETVALUE(x, S_MC_BANK0_MAP, M_MC_BANK0_MAP)
-#define K_MC_BANK0_MAP_DEFAULT 0x00
-#define V_MC_BANK0_MAP_DEFAULT V_MC_BANK0_MAP(K_MC_BANK0_MAP_DEFAULT)
+#define K_MC_BANK0_MAP_DEFAULT 0x00
+#define V_MC_BANK0_MAP_DEFAULT V_MC_BANK0_MAP(K_MC_BANK0_MAP_DEFAULT)
-#define S_MC_BANK1_MAP 20
-#define M_MC_BANK1_MAP _SB_MAKEMASK(4, S_MC_BANK1_MAP)
-#define V_MC_BANK1_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK1_MAP)
-#define G_MC_BANK1_MAP(x) _SB_GETVALUE(x, S_MC_BANK1_MAP, M_MC_BANK1_MAP)
+#define S_MC_BANK1_MAP 20
+#define M_MC_BANK1_MAP _SB_MAKEMASK(4, S_MC_BANK1_MAP)
+#define V_MC_BANK1_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK1_MAP)
+#define G_MC_BANK1_MAP(x) _SB_GETVALUE(x, S_MC_BANK1_MAP, M_MC_BANK1_MAP)
-#define K_MC_BANK1_MAP_DEFAULT 0x08
-#define V_MC_BANK1_MAP_DEFAULT V_MC_BANK1_MAP(K_MC_BANK1_MAP_DEFAULT)
+#define K_MC_BANK1_MAP_DEFAULT 0x08
+#define V_MC_BANK1_MAP_DEFAULT V_MC_BANK1_MAP(K_MC_BANK1_MAP_DEFAULT)
-#define S_MC_BANK2_MAP 24
-#define M_MC_BANK2_MAP _SB_MAKEMASK(4, S_MC_BANK2_MAP)
-#define V_MC_BANK2_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK2_MAP)
-#define G_MC_BANK2_MAP(x) _SB_GETVALUE(x, S_MC_BANK2_MAP, M_MC_BANK2_MAP)
+#define S_MC_BANK2_MAP 24
+#define M_MC_BANK2_MAP _SB_MAKEMASK(4, S_MC_BANK2_MAP)
+#define V_MC_BANK2_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK2_MAP)
+#define G_MC_BANK2_MAP(x) _SB_GETVALUE(x, S_MC_BANK2_MAP, M_MC_BANK2_MAP)
-#define K_MC_BANK2_MAP_DEFAULT 0x09
-#define V_MC_BANK2_MAP_DEFAULT V_MC_BANK2_MAP(K_MC_BANK2_MAP_DEFAULT)
+#define K_MC_BANK2_MAP_DEFAULT 0x09
+#define V_MC_BANK2_MAP_DEFAULT V_MC_BANK2_MAP(K_MC_BANK2_MAP_DEFAULT)
-#define S_MC_BANK3_MAP 28
-#define M_MC_BANK3_MAP _SB_MAKEMASK(4, S_MC_BANK3_MAP)
-#define V_MC_BANK3_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK3_MAP)
-#define G_MC_BANK3_MAP(x) _SB_GETVALUE(x, S_MC_BANK3_MAP, M_MC_BANK3_MAP)
+#define S_MC_BANK3_MAP 28
+#define M_MC_BANK3_MAP _SB_MAKEMASK(4, S_MC_BANK3_MAP)
+#define V_MC_BANK3_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK3_MAP)
+#define G_MC_BANK3_MAP(x) _SB_GETVALUE(x, S_MC_BANK3_MAP, M_MC_BANK3_MAP)
-#define K_MC_BANK3_MAP_DEFAULT 0x0C
-#define V_MC_BANK3_MAP_DEFAULT V_MC_BANK3_MAP(K_MC_BANK3_MAP_DEFAULT)
+#define K_MC_BANK3_MAP_DEFAULT 0x0C
+#define V_MC_BANK3_MAP_DEFAULT V_MC_BANK3_MAP(K_MC_BANK3_MAP_DEFAULT)
-#define M_MC_RESERVED1 _SB_MAKEMASK(8, 32)
+#define M_MC_RESERVED1 _SB_MAKEMASK(8, 32)
#define S_MC_QUEUE_SIZE 40
-#define M_MC_QUEUE_SIZE _SB_MAKEMASK(4, S_MC_QUEUE_SIZE)
-#define V_MC_QUEUE_SIZE(x) _SB_MAKEVALUE(x, S_MC_QUEUE_SIZE)
-#define G_MC_QUEUE_SIZE(x) _SB_GETVALUE(x, S_MC_QUEUE_SIZE, M_MC_QUEUE_SIZE)
-#define V_MC_QUEUE_SIZE_DEFAULT V_MC_QUEUE_SIZE(0x0A)
-
-#define S_MC_AGE_LIMIT 44
-#define M_MC_AGE_LIMIT _SB_MAKEMASK(4, S_MC_AGE_LIMIT)
-#define V_MC_AGE_LIMIT(x) _SB_MAKEVALUE(x, S_MC_AGE_LIMIT)
-#define G_MC_AGE_LIMIT(x) _SB_GETVALUE(x, S_MC_AGE_LIMIT, M_MC_AGE_LIMIT)
-#define V_MC_AGE_LIMIT_DEFAULT V_MC_AGE_LIMIT(8)
-
-#define S_MC_WR_LIMIT 48
-#define M_MC_WR_LIMIT _SB_MAKEMASK(4, S_MC_WR_LIMIT)
-#define V_MC_WR_LIMIT(x) _SB_MAKEVALUE(x, S_MC_WR_LIMIT)
-#define G_MC_WR_LIMIT(x) _SB_GETVALUE(x, S_MC_WR_LIMIT, M_MC_WR_LIMIT)
-#define V_MC_WR_LIMIT_DEFAULT V_MC_WR_LIMIT(5)
+#define M_MC_QUEUE_SIZE _SB_MAKEMASK(4, S_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE(x) _SB_MAKEVALUE(x, S_MC_QUEUE_SIZE)
+#define G_MC_QUEUE_SIZE(x) _SB_GETVALUE(x, S_MC_QUEUE_SIZE, M_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE_DEFAULT V_MC_QUEUE_SIZE(0x0A)
+
+#define S_MC_AGE_LIMIT 44
+#define M_MC_AGE_LIMIT _SB_MAKEMASK(4, S_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT(x) _SB_MAKEVALUE(x, S_MC_AGE_LIMIT)
+#define G_MC_AGE_LIMIT(x) _SB_GETVALUE(x, S_MC_AGE_LIMIT, M_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT_DEFAULT V_MC_AGE_LIMIT(8)
+
+#define S_MC_WR_LIMIT 48
+#define M_MC_WR_LIMIT _SB_MAKEMASK(4, S_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT(x) _SB_MAKEVALUE(x, S_MC_WR_LIMIT)
+#define G_MC_WR_LIMIT(x) _SB_GETVALUE(x, S_MC_WR_LIMIT, M_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT_DEFAULT V_MC_WR_LIMIT(5)
#define M_MC_IOB1HIGHPRIORITY _SB_MAKEMASK1(52)
-#define M_MC_RESERVED2 _SB_MAKEMASK(3, 53)
+#define M_MC_RESERVED2 _SB_MAKEMASK(3, 53)
-#define S_MC_CS_MODE 56
-#define M_MC_CS_MODE _SB_MAKEMASK(4, S_MC_CS_MODE)
-#define V_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_MC_CS_MODE)
-#define G_MC_CS_MODE(x) _SB_GETVALUE(x, S_MC_CS_MODE, M_MC_CS_MODE)
+#define S_MC_CS_MODE 56
+#define M_MC_CS_MODE _SB_MAKEMASK(4, S_MC_CS_MODE)
+#define V_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_MC_CS_MODE)
+#define G_MC_CS_MODE(x) _SB_GETVALUE(x, S_MC_CS_MODE, M_MC_CS_MODE)
-#define K_MC_CS_MODE_MSB_CS 0
-#define K_MC_CS_MODE_INTLV_CS 15
+#define K_MC_CS_MODE_MSB_CS 0
+#define K_MC_CS_MODE_INTLV_CS 15
#define K_MC_CS_MODE_MIXED_CS_10 12
#define K_MC_CS_MODE_MIXED_CS_30 6
#define K_MC_CS_MODE_MIXED_CS_32 3
-#define V_MC_CS_MODE_MSB_CS V_MC_CS_MODE(K_MC_CS_MODE_MSB_CS)
-#define V_MC_CS_MODE_INTLV_CS V_MC_CS_MODE(K_MC_CS_MODE_INTLV_CS)
+#define V_MC_CS_MODE_MSB_CS V_MC_CS_MODE(K_MC_CS_MODE_MSB_CS)
+#define V_MC_CS_MODE_INTLV_CS V_MC_CS_MODE(K_MC_CS_MODE_INTLV_CS)
#define V_MC_CS_MODE_MIXED_CS_10 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_10)
#define V_MC_CS_MODE_MIXED_CS_30 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_30)
#define V_MC_CS_MODE_MIXED_CS_32 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_32)
-#define M_MC_ECC_DISABLE _SB_MAKEMASK1(60)
-#define M_MC_BERR_DISABLE _SB_MAKEMASK1(61)
-#define M_MC_FORCE_SEQ _SB_MAKEMASK1(62)
-#define M_MC_DEBUG _SB_MAKEMASK1(63)
+#define M_MC_ECC_DISABLE _SB_MAKEMASK1(60)
+#define M_MC_BERR_DISABLE _SB_MAKEMASK1(61)
+#define M_MC_FORCE_SEQ _SB_MAKEMASK1(62)
+#define M_MC_DEBUG _SB_MAKEMASK1(63)
-#define V_MC_CONFIG_DEFAULT V_MC_WR_LIMIT_DEFAULT | V_MC_AGE_LIMIT_DEFAULT | \
+#define V_MC_CONFIG_DEFAULT V_MC_WR_LIMIT_DEFAULT | V_MC_AGE_LIMIT_DEFAULT | \
V_MC_BANK0_MAP_DEFAULT | V_MC_BANK1_MAP_DEFAULT | \
V_MC_BANK2_MAP_DEFAULT | V_MC_BANK3_MAP_DEFAULT | V_MC_CHANNEL_SEL(0) | \
- M_MC_IOB1HIGHPRIORITY | V_MC_QUEUE_SIZE_DEFAULT
+ M_MC_IOB1HIGHPRIORITY | V_MC_QUEUE_SIZE_DEFAULT
/*
@@ -137,96 +137,96 @@
* Note: this field has been updated to be consistent with the errata to 0.2
*/
-#define S_MC_CLK_RATIO 0
-#define M_MC_CLK_RATIO _SB_MAKEMASK(4, S_MC_CLK_RATIO)
-#define V_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_MC_CLK_RATIO)
-#define G_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_MC_CLK_RATIO, M_MC_CLK_RATIO)
+#define S_MC_CLK_RATIO 0
+#define M_MC_CLK_RATIO _SB_MAKEMASK(4, S_MC_CLK_RATIO)
+#define V_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_MC_CLK_RATIO)
+#define G_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_MC_CLK_RATIO, M_MC_CLK_RATIO)
-#define K_MC_CLK_RATIO_2X 4
-#define K_MC_CLK_RATIO_25X 5
-#define K_MC_CLK_RATIO_3X 6
-#define K_MC_CLK_RATIO_35X 7
-#define K_MC_CLK_RATIO_4X 8
+#define K_MC_CLK_RATIO_2X 4
+#define K_MC_CLK_RATIO_25X 5
+#define K_MC_CLK_RATIO_3X 6
+#define K_MC_CLK_RATIO_35X 7
+#define K_MC_CLK_RATIO_4X 8
#define K_MC_CLK_RATIO_45X 9
#define V_MC_CLK_RATIO_2X V_MC_CLK_RATIO(K_MC_CLK_RATIO_2X)
-#define V_MC_CLK_RATIO_25X V_MC_CLK_RATIO(K_MC_CLK_RATIO_25X)
-#define V_MC_CLK_RATIO_3X V_MC_CLK_RATIO(K_MC_CLK_RATIO_3X)
-#define V_MC_CLK_RATIO_35X V_MC_CLK_RATIO(K_MC_CLK_RATIO_35X)
-#define V_MC_CLK_RATIO_4X V_MC_CLK_RATIO(K_MC_CLK_RATIO_4X)
-#define V_MC_CLK_RATIO_45X V_MC_CLK_RATIO(K_MC_CLK_RATIO_45X)
-#define V_MC_CLK_RATIO_DEFAULT V_MC_CLK_RATIO_25X
-
-#define S_MC_REF_RATE 8
-#define M_MC_REF_RATE _SB_MAKEMASK(8, S_MC_REF_RATE)
-#define V_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_MC_REF_RATE)
-#define G_MC_REF_RATE(x) _SB_GETVALUE(x, S_MC_REF_RATE, M_MC_REF_RATE)
-
-#define K_MC_REF_RATE_100MHz 0x62
-#define K_MC_REF_RATE_133MHz 0x81
-#define K_MC_REF_RATE_200MHz 0xC4
-
-#define V_MC_REF_RATE_100MHz V_MC_REF_RATE(K_MC_REF_RATE_100MHz)
-#define V_MC_REF_RATE_133MHz V_MC_REF_RATE(K_MC_REF_RATE_133MHz)
-#define V_MC_REF_RATE_200MHz V_MC_REF_RATE(K_MC_REF_RATE_200MHz)
-#define V_MC_REF_RATE_DEFAULT V_MC_REF_RATE_100MHz
-
-#define S_MC_CLOCK_DRIVE 16
-#define M_MC_CLOCK_DRIVE _SB_MAKEMASK(4, S_MC_CLOCK_DRIVE)
-#define V_MC_CLOCK_DRIVE(x) _SB_MAKEVALUE(x, S_MC_CLOCK_DRIVE)
-#define G_MC_CLOCK_DRIVE(x) _SB_GETVALUE(x, S_MC_CLOCK_DRIVE, M_MC_CLOCK_DRIVE)
+#define V_MC_CLK_RATIO_25X V_MC_CLK_RATIO(K_MC_CLK_RATIO_25X)
+#define V_MC_CLK_RATIO_3X V_MC_CLK_RATIO(K_MC_CLK_RATIO_3X)
+#define V_MC_CLK_RATIO_35X V_MC_CLK_RATIO(K_MC_CLK_RATIO_35X)
+#define V_MC_CLK_RATIO_4X V_MC_CLK_RATIO(K_MC_CLK_RATIO_4X)
+#define V_MC_CLK_RATIO_45X V_MC_CLK_RATIO(K_MC_CLK_RATIO_45X)
+#define V_MC_CLK_RATIO_DEFAULT V_MC_CLK_RATIO_25X
+
+#define S_MC_REF_RATE 8
+#define M_MC_REF_RATE _SB_MAKEMASK(8, S_MC_REF_RATE)
+#define V_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_MC_REF_RATE)
+#define G_MC_REF_RATE(x) _SB_GETVALUE(x, S_MC_REF_RATE, M_MC_REF_RATE)
+
+#define K_MC_REF_RATE_100MHz 0x62
+#define K_MC_REF_RATE_133MHz 0x81
+#define K_MC_REF_RATE_200MHz 0xC4
+
+#define V_MC_REF_RATE_100MHz V_MC_REF_RATE(K_MC_REF_RATE_100MHz)
+#define V_MC_REF_RATE_133MHz V_MC_REF_RATE(K_MC_REF_RATE_133MHz)
+#define V_MC_REF_RATE_200MHz V_MC_REF_RATE(K_MC_REF_RATE_200MHz)
+#define V_MC_REF_RATE_DEFAULT V_MC_REF_RATE_100MHz
+
+#define S_MC_CLOCK_DRIVE 16
+#define M_MC_CLOCK_DRIVE _SB_MAKEMASK(4, S_MC_CLOCK_DRIVE)
+#define V_MC_CLOCK_DRIVE(x) _SB_MAKEVALUE(x, S_MC_CLOCK_DRIVE)
+#define G_MC_CLOCK_DRIVE(x) _SB_GETVALUE(x, S_MC_CLOCK_DRIVE, M_MC_CLOCK_DRIVE)
#define V_MC_CLOCK_DRIVE_DEFAULT V_MC_CLOCK_DRIVE(0xF)
-#define S_MC_DATA_DRIVE 20
-#define M_MC_DATA_DRIVE _SB_MAKEMASK(4, S_MC_DATA_DRIVE)
-#define V_MC_DATA_DRIVE(x) _SB_MAKEVALUE(x, S_MC_DATA_DRIVE)
-#define G_MC_DATA_DRIVE(x) _SB_GETVALUE(x, S_MC_DATA_DRIVE, M_MC_DATA_DRIVE)
-#define V_MC_DATA_DRIVE_DEFAULT V_MC_DATA_DRIVE(0x0)
+#define S_MC_DATA_DRIVE 20
+#define M_MC_DATA_DRIVE _SB_MAKEMASK(4, S_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE(x) _SB_MAKEVALUE(x, S_MC_DATA_DRIVE)
+#define G_MC_DATA_DRIVE(x) _SB_GETVALUE(x, S_MC_DATA_DRIVE, M_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE_DEFAULT V_MC_DATA_DRIVE(0x0)
-#define S_MC_ADDR_DRIVE 24
-#define M_MC_ADDR_DRIVE _SB_MAKEMASK(4, S_MC_ADDR_DRIVE)
-#define V_MC_ADDR_DRIVE(x) _SB_MAKEVALUE(x, S_MC_ADDR_DRIVE)
-#define G_MC_ADDR_DRIVE(x) _SB_GETVALUE(x, S_MC_ADDR_DRIVE, M_MC_ADDR_DRIVE)
-#define V_MC_ADDR_DRIVE_DEFAULT V_MC_ADDR_DRIVE(0x0)
+#define S_MC_ADDR_DRIVE 24
+#define M_MC_ADDR_DRIVE _SB_MAKEMASK(4, S_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE(x) _SB_MAKEVALUE(x, S_MC_ADDR_DRIVE)
+#define G_MC_ADDR_DRIVE(x) _SB_GETVALUE(x, S_MC_ADDR_DRIVE, M_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE_DEFAULT V_MC_ADDR_DRIVE(0x0)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define M_MC_REF_DISABLE _SB_MAKEMASK1(30)
+#define M_MC_REF_DISABLE _SB_MAKEMASK1(30)
#endif /* 1250 PASS3 || 112x PASS1 */
-#define M_MC_DLL_BYPASS _SB_MAKEMASK1(31)
-
-#define S_MC_DQI_SKEW 32
-#define M_MC_DQI_SKEW _SB_MAKEMASK(8, S_MC_DQI_SKEW)
-#define V_MC_DQI_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQI_SKEW)
-#define G_MC_DQI_SKEW(x) _SB_GETVALUE(x, S_MC_DQI_SKEW, M_MC_DQI_SKEW)
-#define V_MC_DQI_SKEW_DEFAULT V_MC_DQI_SKEW(0)
-
-#define S_MC_DQO_SKEW 40
-#define M_MC_DQO_SKEW _SB_MAKEMASK(8, S_MC_DQO_SKEW)
-#define V_MC_DQO_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQO_SKEW)
-#define G_MC_DQO_SKEW(x) _SB_GETVALUE(x, S_MC_DQO_SKEW, M_MC_DQO_SKEW)
-#define V_MC_DQO_SKEW_DEFAULT V_MC_DQO_SKEW(0)
-
-#define S_MC_ADDR_SKEW 48
-#define M_MC_ADDR_SKEW _SB_MAKEMASK(8, S_MC_ADDR_SKEW)
-#define V_MC_ADDR_SKEW(x) _SB_MAKEVALUE(x, S_MC_ADDR_SKEW)
-#define G_MC_ADDR_SKEW(x) _SB_GETVALUE(x, S_MC_ADDR_SKEW, M_MC_ADDR_SKEW)
-#define V_MC_ADDR_SKEW_DEFAULT V_MC_ADDR_SKEW(0x0F)
-
-#define S_MC_DLL_DEFAULT 56
-#define M_MC_DLL_DEFAULT _SB_MAKEMASK(8, S_MC_DLL_DEFAULT)
-#define V_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_MC_DLL_DEFAULT)
-#define G_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_MC_DLL_DEFAULT, M_MC_DLL_DEFAULT)
+#define M_MC_DLL_BYPASS _SB_MAKEMASK1(31)
+
+#define S_MC_DQI_SKEW 32
+#define M_MC_DQI_SKEW _SB_MAKEMASK(8, S_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQI_SKEW)
+#define G_MC_DQI_SKEW(x) _SB_GETVALUE(x, S_MC_DQI_SKEW, M_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW_DEFAULT V_MC_DQI_SKEW(0)
+
+#define S_MC_DQO_SKEW 40
+#define M_MC_DQO_SKEW _SB_MAKEMASK(8, S_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQO_SKEW)
+#define G_MC_DQO_SKEW(x) _SB_GETVALUE(x, S_MC_DQO_SKEW, M_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW_DEFAULT V_MC_DQO_SKEW(0)
+
+#define S_MC_ADDR_SKEW 48
+#define M_MC_ADDR_SKEW _SB_MAKEMASK(8, S_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW(x) _SB_MAKEVALUE(x, S_MC_ADDR_SKEW)
+#define G_MC_ADDR_SKEW(x) _SB_GETVALUE(x, S_MC_ADDR_SKEW, M_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW_DEFAULT V_MC_ADDR_SKEW(0x0F)
+
+#define S_MC_DLL_DEFAULT 56
+#define M_MC_DLL_DEFAULT _SB_MAKEMASK(8, S_MC_DLL_DEFAULT)
+#define V_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_MC_DLL_DEFAULT)
+#define G_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_MC_DLL_DEFAULT, M_MC_DLL_DEFAULT)
#define V_MC_DLL_DEFAULT_DEFAULT V_MC_DLL_DEFAULT(0x10)
-#define V_MC_CLKCONFIG_DEFAULT V_MC_DLL_DEFAULT_DEFAULT | \
- V_MC_ADDR_SKEW_DEFAULT | \
- V_MC_DQO_SKEW_DEFAULT | \
- V_MC_DQI_SKEW_DEFAULT | \
- V_MC_ADDR_DRIVE_DEFAULT | \
- V_MC_DATA_DRIVE_DEFAULT | \
- V_MC_CLOCK_DRIVE_DEFAULT | \
- V_MC_REF_RATE_DEFAULT
+#define V_MC_CLKCONFIG_DEFAULT V_MC_DLL_DEFAULT_DEFAULT | \
+ V_MC_ADDR_SKEW_DEFAULT | \
+ V_MC_DQO_SKEW_DEFAULT | \
+ V_MC_DQI_SKEW_DEFAULT | \
+ V_MC_ADDR_DRIVE_DEFAULT | \
+ V_MC_DATA_DRIVE_DEFAULT | \
+ V_MC_CLOCK_DRIVE_DEFAULT | \
+ V_MC_REF_RATE_DEFAULT
@@ -234,68 +234,68 @@
* DRAM Command Register (Table 6-13)
*/
-#define S_MC_COMMAND 0
-#define M_MC_COMMAND _SB_MAKEMASK(4, S_MC_COMMAND)
-#define V_MC_COMMAND(x) _SB_MAKEVALUE(x, S_MC_COMMAND)
-#define G_MC_COMMAND(x) _SB_GETVALUE(x, S_MC_COMMAND, M_MC_COMMAND)
-
-#define K_MC_COMMAND_EMRS 0
-#define K_MC_COMMAND_MRS 1
-#define K_MC_COMMAND_PRE 2
-#define K_MC_COMMAND_AR 3
-#define K_MC_COMMAND_SETRFSH 4
-#define K_MC_COMMAND_CLRRFSH 5
-#define K_MC_COMMAND_SETPWRDN 6
-#define K_MC_COMMAND_CLRPWRDN 7
-
-#define V_MC_COMMAND_EMRS V_MC_COMMAND(K_MC_COMMAND_EMRS)
-#define V_MC_COMMAND_MRS V_MC_COMMAND(K_MC_COMMAND_MRS)
-#define V_MC_COMMAND_PRE V_MC_COMMAND(K_MC_COMMAND_PRE)
-#define V_MC_COMMAND_AR V_MC_COMMAND(K_MC_COMMAND_AR)
-#define V_MC_COMMAND_SETRFSH V_MC_COMMAND(K_MC_COMMAND_SETRFSH)
-#define V_MC_COMMAND_CLRRFSH V_MC_COMMAND(K_MC_COMMAND_CLRRFSH)
-#define V_MC_COMMAND_SETPWRDN V_MC_COMMAND(K_MC_COMMAND_SETPWRDN)
-#define V_MC_COMMAND_CLRPWRDN V_MC_COMMAND(K_MC_COMMAND_CLRPWRDN)
-
-#define M_MC_CS0 _SB_MAKEMASK1(4)
-#define M_MC_CS1 _SB_MAKEMASK1(5)
-#define M_MC_CS2 _SB_MAKEMASK1(6)
-#define M_MC_CS3 _SB_MAKEMASK1(7)
+#define S_MC_COMMAND 0
+#define M_MC_COMMAND _SB_MAKEMASK(4, S_MC_COMMAND)
+#define V_MC_COMMAND(x) _SB_MAKEVALUE(x, S_MC_COMMAND)
+#define G_MC_COMMAND(x) _SB_GETVALUE(x, S_MC_COMMAND, M_MC_COMMAND)
+
+#define K_MC_COMMAND_EMRS 0
+#define K_MC_COMMAND_MRS 1
+#define K_MC_COMMAND_PRE 2
+#define K_MC_COMMAND_AR 3
+#define K_MC_COMMAND_SETRFSH 4
+#define K_MC_COMMAND_CLRRFSH 5
+#define K_MC_COMMAND_SETPWRDN 6
+#define K_MC_COMMAND_CLRPWRDN 7
+
+#define V_MC_COMMAND_EMRS V_MC_COMMAND(K_MC_COMMAND_EMRS)
+#define V_MC_COMMAND_MRS V_MC_COMMAND(K_MC_COMMAND_MRS)
+#define V_MC_COMMAND_PRE V_MC_COMMAND(K_MC_COMMAND_PRE)
+#define V_MC_COMMAND_AR V_MC_COMMAND(K_MC_COMMAND_AR)
+#define V_MC_COMMAND_SETRFSH V_MC_COMMAND(K_MC_COMMAND_SETRFSH)
+#define V_MC_COMMAND_CLRRFSH V_MC_COMMAND(K_MC_COMMAND_CLRRFSH)
+#define V_MC_COMMAND_SETPWRDN V_MC_COMMAND(K_MC_COMMAND_SETPWRDN)
+#define V_MC_COMMAND_CLRPWRDN V_MC_COMMAND(K_MC_COMMAND_CLRPWRDN)
+
+#define M_MC_CS0 _SB_MAKEMASK1(4)
+#define M_MC_CS1 _SB_MAKEMASK1(5)
+#define M_MC_CS2 _SB_MAKEMASK1(6)
+#define M_MC_CS3 _SB_MAKEMASK1(7)
/*
* DRAM Mode Register (Table 6-14)
*/
-#define S_MC_EMODE 0
-#define M_MC_EMODE _SB_MAKEMASK(15, S_MC_EMODE)
-#define V_MC_EMODE(x) _SB_MAKEVALUE(x, S_MC_EMODE)
-#define G_MC_EMODE(x) _SB_GETVALUE(x, S_MC_EMODE, M_MC_EMODE)
-#define V_MC_EMODE_DEFAULT V_MC_EMODE(0)
-
-#define S_MC_MODE 16
-#define M_MC_MODE _SB_MAKEMASK(15, S_MC_MODE)
-#define V_MC_MODE(x) _SB_MAKEVALUE(x, S_MC_MODE)
-#define G_MC_MODE(x) _SB_GETVALUE(x, S_MC_MODE, M_MC_MODE)
-#define V_MC_MODE_DEFAULT V_MC_MODE(0x22)
-
-#define S_MC_DRAM_TYPE 32
-#define M_MC_DRAM_TYPE _SB_MAKEMASK(3, S_MC_DRAM_TYPE)
-#define V_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_MC_DRAM_TYPE)
-#define G_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_MC_DRAM_TYPE, M_MC_DRAM_TYPE)
-
-#define K_MC_DRAM_TYPE_JEDEC 0
-#define K_MC_DRAM_TYPE_FCRAM 1
+#define S_MC_EMODE 0
+#define M_MC_EMODE _SB_MAKEMASK(15, S_MC_EMODE)
+#define V_MC_EMODE(x) _SB_MAKEVALUE(x, S_MC_EMODE)
+#define G_MC_EMODE(x) _SB_GETVALUE(x, S_MC_EMODE, M_MC_EMODE)
+#define V_MC_EMODE_DEFAULT V_MC_EMODE(0)
+
+#define S_MC_MODE 16
+#define M_MC_MODE _SB_MAKEMASK(15, S_MC_MODE)
+#define V_MC_MODE(x) _SB_MAKEVALUE(x, S_MC_MODE)
+#define G_MC_MODE(x) _SB_GETVALUE(x, S_MC_MODE, M_MC_MODE)
+#define V_MC_MODE_DEFAULT V_MC_MODE(0x22)
+
+#define S_MC_DRAM_TYPE 32
+#define M_MC_DRAM_TYPE _SB_MAKEMASK(3, S_MC_DRAM_TYPE)
+#define V_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_MC_DRAM_TYPE)
+#define G_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_MC_DRAM_TYPE, M_MC_DRAM_TYPE)
+
+#define K_MC_DRAM_TYPE_JEDEC 0
+#define K_MC_DRAM_TYPE_FCRAM 1
#define K_MC_DRAM_TYPE_SGRAM 2
-#define V_MC_DRAM_TYPE_JEDEC V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_JEDEC)
-#define V_MC_DRAM_TYPE_FCRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_FCRAM)
-#define V_MC_DRAM_TYPE_SGRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_SGRAM)
+#define V_MC_DRAM_TYPE_JEDEC V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_JEDEC)
+#define V_MC_DRAM_TYPE_FCRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_FCRAM)
+#define V_MC_DRAM_TYPE_SGRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_SGRAM)
#define M_MC_EXTERNALDECODE _SB_MAKEMASK1(35)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define M_MC_PRE_ON_A8 _SB_MAKEMASK1(36)
-#define M_MC_RAM_WITH_A13 _SB_MAKEMASK1(37)
+#define M_MC_PRE_ON_A8 _SB_MAKEMASK1(36)
+#define M_MC_RAM_WITH_A13 _SB_MAKEMASK1(37)
#endif /* 1250 PASS3 || 112x PASS1 */
@@ -308,99 +308,99 @@
#define M_MC_r2wIDLE_TWOCYCLES _SB_MAKEMASK1(61)
#define M_MC_r2rIDLE_TWOCYCLES _SB_MAKEMASK1(62)
-#define S_MC_tFIFO 56
-#define M_MC_tFIFO _SB_MAKEMASK(4, S_MC_tFIFO)
-#define V_MC_tFIFO(x) _SB_MAKEVALUE(x, S_MC_tFIFO)
-#define G_MC_tFIFO(x) _SB_GETVALUE(x, S_MC_tFIFO, M_MC_tFIFO)
-#define K_MC_tFIFO_DEFAULT 1
-#define V_MC_tFIFO_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT)
+#define S_MC_tFIFO 56
+#define M_MC_tFIFO _SB_MAKEMASK(4, S_MC_tFIFO)
+#define V_MC_tFIFO(x) _SB_MAKEVALUE(x, S_MC_tFIFO)
+#define G_MC_tFIFO(x) _SB_GETVALUE(x, S_MC_tFIFO, M_MC_tFIFO)
+#define K_MC_tFIFO_DEFAULT 1
+#define V_MC_tFIFO_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT)
-#define S_MC_tRFC 52
-#define M_MC_tRFC _SB_MAKEMASK(4, S_MC_tRFC)
-#define V_MC_tRFC(x) _SB_MAKEVALUE(x, S_MC_tRFC)
-#define G_MC_tRFC(x) _SB_GETVALUE(x, S_MC_tRFC, M_MC_tRFC)
-#define K_MC_tRFC_DEFAULT 12
-#define V_MC_tRFC_DEFAULT V_MC_tRFC(K_MC_tRFC_DEFAULT)
+#define S_MC_tRFC 52
+#define M_MC_tRFC _SB_MAKEMASK(4, S_MC_tRFC)
+#define V_MC_tRFC(x) _SB_MAKEVALUE(x, S_MC_tRFC)
+#define G_MC_tRFC(x) _SB_GETVALUE(x, S_MC_tRFC, M_MC_tRFC)
+#define K_MC_tRFC_DEFAULT 12
+#define V_MC_tRFC_DEFAULT V_MC_tRFC(K_MC_tRFC_DEFAULT)
#if SIBYTE_HDR_FEATURE(1250, PASS3)
-#define M_MC_tRFC_PLUS16 _SB_MAKEMASK1(51) /* 1250C3 and later. */
+#define M_MC_tRFC_PLUS16 _SB_MAKEMASK1(51) /* 1250C3 and later. */
#endif
-#define S_MC_tCwCr 40
-#define M_MC_tCwCr _SB_MAKEMASK(4, S_MC_tCwCr)
-#define V_MC_tCwCr(x) _SB_MAKEVALUE(x, S_MC_tCwCr)
-#define G_MC_tCwCr(x) _SB_GETVALUE(x, S_MC_tCwCr, M_MC_tCwCr)
-#define K_MC_tCwCr_DEFAULT 4
-#define V_MC_tCwCr_DEFAULT V_MC_tCwCr(K_MC_tCwCr_DEFAULT)
-
-#define S_MC_tRCr 28
-#define M_MC_tRCr _SB_MAKEMASK(4, S_MC_tRCr)
-#define V_MC_tRCr(x) _SB_MAKEVALUE(x, S_MC_tRCr)
-#define G_MC_tRCr(x) _SB_GETVALUE(x, S_MC_tRCr, M_MC_tRCr)
-#define K_MC_tRCr_DEFAULT 9
-#define V_MC_tRCr_DEFAULT V_MC_tRCr(K_MC_tRCr_DEFAULT)
-
-#define S_MC_tRCw 24
-#define M_MC_tRCw _SB_MAKEMASK(4, S_MC_tRCw)
-#define V_MC_tRCw(x) _SB_MAKEVALUE(x, S_MC_tRCw)
-#define G_MC_tRCw(x) _SB_GETVALUE(x, S_MC_tRCw, M_MC_tRCw)
-#define K_MC_tRCw_DEFAULT 10
-#define V_MC_tRCw_DEFAULT V_MC_tRCw(K_MC_tRCw_DEFAULT)
-
-#define S_MC_tRRD 20
-#define M_MC_tRRD _SB_MAKEMASK(4, S_MC_tRRD)
-#define V_MC_tRRD(x) _SB_MAKEVALUE(x, S_MC_tRRD)
-#define G_MC_tRRD(x) _SB_GETVALUE(x, S_MC_tRRD, M_MC_tRRD)
-#define K_MC_tRRD_DEFAULT 2
-#define V_MC_tRRD_DEFAULT V_MC_tRRD(K_MC_tRRD_DEFAULT)
-
-#define S_MC_tRP 16
-#define M_MC_tRP _SB_MAKEMASK(4, S_MC_tRP)
-#define V_MC_tRP(x) _SB_MAKEVALUE(x, S_MC_tRP)
-#define G_MC_tRP(x) _SB_GETVALUE(x, S_MC_tRP, M_MC_tRP)
-#define K_MC_tRP_DEFAULT 4
-#define V_MC_tRP_DEFAULT V_MC_tRP(K_MC_tRP_DEFAULT)
-
-#define S_MC_tCwD 8
-#define M_MC_tCwD _SB_MAKEMASK(4, S_MC_tCwD)
-#define V_MC_tCwD(x) _SB_MAKEVALUE(x, S_MC_tCwD)
-#define G_MC_tCwD(x) _SB_GETVALUE(x, S_MC_tCwD, M_MC_tCwD)
-#define K_MC_tCwD_DEFAULT 1
-#define V_MC_tCwD_DEFAULT V_MC_tCwD(K_MC_tCwD_DEFAULT)
-
-#define M_tCrDh _SB_MAKEMASK1(7)
+#define S_MC_tCwCr 40
+#define M_MC_tCwCr _SB_MAKEMASK(4, S_MC_tCwCr)
+#define V_MC_tCwCr(x) _SB_MAKEVALUE(x, S_MC_tCwCr)
+#define G_MC_tCwCr(x) _SB_GETVALUE(x, S_MC_tCwCr, M_MC_tCwCr)
+#define K_MC_tCwCr_DEFAULT 4
+#define V_MC_tCwCr_DEFAULT V_MC_tCwCr(K_MC_tCwCr_DEFAULT)
+
+#define S_MC_tRCr 28
+#define M_MC_tRCr _SB_MAKEMASK(4, S_MC_tRCr)
+#define V_MC_tRCr(x) _SB_MAKEVALUE(x, S_MC_tRCr)
+#define G_MC_tRCr(x) _SB_GETVALUE(x, S_MC_tRCr, M_MC_tRCr)
+#define K_MC_tRCr_DEFAULT 9
+#define V_MC_tRCr_DEFAULT V_MC_tRCr(K_MC_tRCr_DEFAULT)
+
+#define S_MC_tRCw 24
+#define M_MC_tRCw _SB_MAKEMASK(4, S_MC_tRCw)
+#define V_MC_tRCw(x) _SB_MAKEVALUE(x, S_MC_tRCw)
+#define G_MC_tRCw(x) _SB_GETVALUE(x, S_MC_tRCw, M_MC_tRCw)
+#define K_MC_tRCw_DEFAULT 10
+#define V_MC_tRCw_DEFAULT V_MC_tRCw(K_MC_tRCw_DEFAULT)
+
+#define S_MC_tRRD 20
+#define M_MC_tRRD _SB_MAKEMASK(4, S_MC_tRRD)
+#define V_MC_tRRD(x) _SB_MAKEVALUE(x, S_MC_tRRD)
+#define G_MC_tRRD(x) _SB_GETVALUE(x, S_MC_tRRD, M_MC_tRRD)
+#define K_MC_tRRD_DEFAULT 2
+#define V_MC_tRRD_DEFAULT V_MC_tRRD(K_MC_tRRD_DEFAULT)
+
+#define S_MC_tRP 16
+#define M_MC_tRP _SB_MAKEMASK(4, S_MC_tRP)
+#define V_MC_tRP(x) _SB_MAKEVALUE(x, S_MC_tRP)
+#define G_MC_tRP(x) _SB_GETVALUE(x, S_MC_tRP, M_MC_tRP)
+#define K_MC_tRP_DEFAULT 4
+#define V_MC_tRP_DEFAULT V_MC_tRP(K_MC_tRP_DEFAULT)
+
+#define S_MC_tCwD 8
+#define M_MC_tCwD _SB_MAKEMASK(4, S_MC_tCwD)
+#define V_MC_tCwD(x) _SB_MAKEVALUE(x, S_MC_tCwD)
+#define G_MC_tCwD(x) _SB_GETVALUE(x, S_MC_tCwD, M_MC_tCwD)
+#define K_MC_tCwD_DEFAULT 1
+#define V_MC_tCwD_DEFAULT V_MC_tCwD(K_MC_tCwD_DEFAULT)
+
+#define M_tCrDh _SB_MAKEMASK1(7)
#define M_MC_tCrDh M_tCrDh
-#define S_MC_tCrD 4
-#define M_MC_tCrD _SB_MAKEMASK(3, S_MC_tCrD)
-#define V_MC_tCrD(x) _SB_MAKEVALUE(x, S_MC_tCrD)
-#define G_MC_tCrD(x) _SB_GETVALUE(x, S_MC_tCrD, M_MC_tCrD)
-#define K_MC_tCrD_DEFAULT 2
-#define V_MC_tCrD_DEFAULT V_MC_tCrD(K_MC_tCrD_DEFAULT)
-
-#define S_MC_tRCD 0
-#define M_MC_tRCD _SB_MAKEMASK(4, S_MC_tRCD)
-#define V_MC_tRCD(x) _SB_MAKEVALUE(x, S_MC_tRCD)
-#define G_MC_tRCD(x) _SB_GETVALUE(x, S_MC_tRCD, M_MC_tRCD)
-#define K_MC_tRCD_DEFAULT 3
-#define V_MC_tRCD_DEFAULT V_MC_tRCD(K_MC_tRCD_DEFAULT)
-
-#define V_MC_TIMING_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT) | \
- V_MC_tRFC(K_MC_tRFC_DEFAULT) | \
- V_MC_tCwCr(K_MC_tCwCr_DEFAULT) | \
- V_MC_tRCr(K_MC_tRCr_DEFAULT) | \
- V_MC_tRCw(K_MC_tRCw_DEFAULT) | \
- V_MC_tRRD(K_MC_tRRD_DEFAULT) | \
- V_MC_tRP(K_MC_tRP_DEFAULT) | \
- V_MC_tCwD(K_MC_tCwD_DEFAULT) | \
- V_MC_tCrD(K_MC_tCrD_DEFAULT) | \
- V_MC_tRCD(K_MC_tRCD_DEFAULT) | \
- M_MC_r2rIDLE_TWOCYCLES
+#define S_MC_tCrD 4
+#define M_MC_tCrD _SB_MAKEMASK(3, S_MC_tCrD)
+#define V_MC_tCrD(x) _SB_MAKEVALUE(x, S_MC_tCrD)
+#define G_MC_tCrD(x) _SB_GETVALUE(x, S_MC_tCrD, M_MC_tCrD)
+#define K_MC_tCrD_DEFAULT 2
+#define V_MC_tCrD_DEFAULT V_MC_tCrD(K_MC_tCrD_DEFAULT)
+
+#define S_MC_tRCD 0
+#define M_MC_tRCD _SB_MAKEMASK(4, S_MC_tRCD)
+#define V_MC_tRCD(x) _SB_MAKEVALUE(x, S_MC_tRCD)
+#define G_MC_tRCD(x) _SB_GETVALUE(x, S_MC_tRCD, M_MC_tRCD)
+#define K_MC_tRCD_DEFAULT 3
+#define V_MC_tRCD_DEFAULT V_MC_tRCD(K_MC_tRCD_DEFAULT)
+
+#define V_MC_TIMING_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT) | \
+ V_MC_tRFC(K_MC_tRFC_DEFAULT) | \
+ V_MC_tCwCr(K_MC_tCwCr_DEFAULT) | \
+ V_MC_tRCr(K_MC_tRCr_DEFAULT) | \
+ V_MC_tRCw(K_MC_tRCw_DEFAULT) | \
+ V_MC_tRRD(K_MC_tRRD_DEFAULT) | \
+ V_MC_tRP(K_MC_tRP_DEFAULT) | \
+ V_MC_tCwD(K_MC_tCwD_DEFAULT) | \
+ V_MC_tCrD(K_MC_tCrD_DEFAULT) | \
+ V_MC_tRCD(K_MC_tRCD_DEFAULT) | \
+ M_MC_r2rIDLE_TWOCYCLES
/*
* Errata says these are not the default
- * M_MC_w2rIDLE_TWOCYCLES | \
- * M_MC_r2wIDLE_TWOCYCLES | \
+ * M_MC_w2rIDLE_TWOCYCLES | \
+ * M_MC_r2wIDLE_TWOCYCLES | \
*/
@@ -408,143 +408,143 @@
* Chip Select Start Address Register (Table 6-17)
*/
-#define S_MC_CS0_START 0
-#define M_MC_CS0_START _SB_MAKEMASK(16, S_MC_CS0_START)
-#define V_MC_CS0_START(x) _SB_MAKEVALUE(x, S_MC_CS0_START)
-#define G_MC_CS0_START(x) _SB_GETVALUE(x, S_MC_CS0_START, M_MC_CS0_START)
+#define S_MC_CS0_START 0
+#define M_MC_CS0_START _SB_MAKEMASK(16, S_MC_CS0_START)
+#define V_MC_CS0_START(x) _SB_MAKEVALUE(x, S_MC_CS0_START)
+#define G_MC_CS0_START(x) _SB_GETVALUE(x, S_MC_CS0_START, M_MC_CS0_START)
-#define S_MC_CS1_START 16
-#define M_MC_CS1_START _SB_MAKEMASK(16, S_MC_CS1_START)
-#define V_MC_CS1_START(x) _SB_MAKEVALUE(x, S_MC_CS1_START)
-#define G_MC_CS1_START(x) _SB_GETVALUE(x, S_MC_CS1_START, M_MC_CS1_START)
+#define S_MC_CS1_START 16
+#define M_MC_CS1_START _SB_MAKEMASK(16, S_MC_CS1_START)
+#define V_MC_CS1_START(x) _SB_MAKEVALUE(x, S_MC_CS1_START)
+#define G_MC_CS1_START(x) _SB_GETVALUE(x, S_MC_CS1_START, M_MC_CS1_START)
-#define S_MC_CS2_START 32
-#define M_MC_CS2_START _SB_MAKEMASK(16, S_MC_CS2_START)
-#define V_MC_CS2_START(x) _SB_MAKEVALUE(x, S_MC_CS2_START)
-#define G_MC_CS2_START(x) _SB_GETVALUE(x, S_MC_CS2_START, M_MC_CS2_START)
+#define S_MC_CS2_START 32
+#define M_MC_CS2_START _SB_MAKEMASK(16, S_MC_CS2_START)
+#define V_MC_CS2_START(x) _SB_MAKEVALUE(x, S_MC_CS2_START)
+#define G_MC_CS2_START(x) _SB_GETVALUE(x, S_MC_CS2_START, M_MC_CS2_START)
-#define S_MC_CS3_START 48
-#define M_MC_CS3_START _SB_MAKEMASK(16, S_MC_CS3_START)
-#define V_MC_CS3_START(x) _SB_MAKEVALUE(x, S_MC_CS3_START)
-#define G_MC_CS3_START(x) _SB_GETVALUE(x, S_MC_CS3_START, M_MC_CS3_START)
+#define S_MC_CS3_START 48
+#define M_MC_CS3_START _SB_MAKEMASK(16, S_MC_CS3_START)
+#define V_MC_CS3_START(x) _SB_MAKEVALUE(x, S_MC_CS3_START)
+#define G_MC_CS3_START(x) _SB_GETVALUE(x, S_MC_CS3_START, M_MC_CS3_START)
/*
* Chip Select End Address Register (Table 6-18)
*/
-#define S_MC_CS0_END 0
-#define M_MC_CS0_END _SB_MAKEMASK(16, S_MC_CS0_END)
-#define V_MC_CS0_END(x) _SB_MAKEVALUE(x, S_MC_CS0_END)
-#define G_MC_CS0_END(x) _SB_GETVALUE(x, S_MC_CS0_END, M_MC_CS0_END)
+#define S_MC_CS0_END 0
+#define M_MC_CS0_END _SB_MAKEMASK(16, S_MC_CS0_END)
+#define V_MC_CS0_END(x) _SB_MAKEVALUE(x, S_MC_CS0_END)
+#define G_MC_CS0_END(x) _SB_GETVALUE(x, S_MC_CS0_END, M_MC_CS0_END)
-#define S_MC_CS1_END 16
-#define M_MC_CS1_END _SB_MAKEMASK(16, S_MC_CS1_END)
-#define V_MC_CS1_END(x) _SB_MAKEVALUE(x, S_MC_CS1_END)
-#define G_MC_CS1_END(x) _SB_GETVALUE(x, S_MC_CS1_END, M_MC_CS1_END)
+#define S_MC_CS1_END 16
+#define M_MC_CS1_END _SB_MAKEMASK(16, S_MC_CS1_END)
+#define V_MC_CS1_END(x) _SB_MAKEVALUE(x, S_MC_CS1_END)
+#define G_MC_CS1_END(x) _SB_GETVALUE(x, S_MC_CS1_END, M_MC_CS1_END)
-#define S_MC_CS2_END 32
-#define M_MC_CS2_END _SB_MAKEMASK(16, S_MC_CS2_END)
-#define V_MC_CS2_END(x) _SB_MAKEVALUE(x, S_MC_CS2_END)
-#define G_MC_CS2_END(x) _SB_GETVALUE(x, S_MC_CS2_END, M_MC_CS2_END)
+#define S_MC_CS2_END 32
+#define M_MC_CS2_END _SB_MAKEMASK(16, S_MC_CS2_END)
+#define V_MC_CS2_END(x) _SB_MAKEVALUE(x, S_MC_CS2_END)
+#define G_MC_CS2_END(x) _SB_GETVALUE(x, S_MC_CS2_END, M_MC_CS2_END)
-#define S_MC_CS3_END 48
-#define M_MC_CS3_END _SB_MAKEMASK(16, S_MC_CS3_END)
-#define V_MC_CS3_END(x) _SB_MAKEVALUE(x, S_MC_CS3_END)
-#define G_MC_CS3_END(x) _SB_GETVALUE(x, S_MC_CS3_END, M_MC_CS3_END)
+#define S_MC_CS3_END 48
+#define M_MC_CS3_END _SB_MAKEMASK(16, S_MC_CS3_END)
+#define V_MC_CS3_END(x) _SB_MAKEVALUE(x, S_MC_CS3_END)
+#define G_MC_CS3_END(x) _SB_GETVALUE(x, S_MC_CS3_END, M_MC_CS3_END)
/*
* Chip Select Interleave Register (Table 6-19)
*/
-#define S_MC_INTLV_RESERVED 0
-#define M_MC_INTLV_RESERVED _SB_MAKEMASK(5, S_MC_INTLV_RESERVED)
+#define S_MC_INTLV_RESERVED 0
+#define M_MC_INTLV_RESERVED _SB_MAKEMASK(5, S_MC_INTLV_RESERVED)
-#define S_MC_INTERLEAVE 7
-#define M_MC_INTERLEAVE _SB_MAKEMASK(18, S_MC_INTERLEAVE)
-#define V_MC_INTERLEAVE(x) _SB_MAKEVALUE(x, S_MC_INTERLEAVE)
+#define S_MC_INTERLEAVE 7
+#define M_MC_INTERLEAVE _SB_MAKEMASK(18, S_MC_INTERLEAVE)
+#define V_MC_INTERLEAVE(x) _SB_MAKEVALUE(x, S_MC_INTERLEAVE)
-#define S_MC_INTLV_MBZ 25
-#define M_MC_INTLV_MBZ _SB_MAKEMASK(39, S_MC_INTLV_MBZ)
+#define S_MC_INTLV_MBZ 25
+#define M_MC_INTLV_MBZ _SB_MAKEMASK(39, S_MC_INTLV_MBZ)
/*
* Row Address Bits Register (Table 6-20)
*/
-#define S_MC_RAS_RESERVED 0
-#define M_MC_RAS_RESERVED _SB_MAKEMASK(5, S_MC_RAS_RESERVED)
+#define S_MC_RAS_RESERVED 0
+#define M_MC_RAS_RESERVED _SB_MAKEMASK(5, S_MC_RAS_RESERVED)
-#define S_MC_RAS_SELECT 12
-#define M_MC_RAS_SELECT _SB_MAKEMASK(25, S_MC_RAS_SELECT)
-#define V_MC_RAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_RAS_SELECT)
+#define S_MC_RAS_SELECT 12
+#define M_MC_RAS_SELECT _SB_MAKEMASK(25, S_MC_RAS_SELECT)
+#define V_MC_RAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_RAS_SELECT)
-#define S_MC_RAS_MBZ 37
-#define M_MC_RAS_MBZ _SB_MAKEMASK(27, S_MC_RAS_MBZ)
+#define S_MC_RAS_MBZ 37
+#define M_MC_RAS_MBZ _SB_MAKEMASK(27, S_MC_RAS_MBZ)
/*
* Column Address Bits Register (Table 6-21)
*/
-#define S_MC_CAS_RESERVED 0
-#define M_MC_CAS_RESERVED _SB_MAKEMASK(5, S_MC_CAS_RESERVED)
+#define S_MC_CAS_RESERVED 0
+#define M_MC_CAS_RESERVED _SB_MAKEMASK(5, S_MC_CAS_RESERVED)
-#define S_MC_CAS_SELECT 5
-#define M_MC_CAS_SELECT _SB_MAKEMASK(18, S_MC_CAS_SELECT)
-#define V_MC_CAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_CAS_SELECT)
+#define S_MC_CAS_SELECT 5
+#define M_MC_CAS_SELECT _SB_MAKEMASK(18, S_MC_CAS_SELECT)
+#define V_MC_CAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_CAS_SELECT)
-#define S_MC_CAS_MBZ 23
-#define M_MC_CAS_MBZ _SB_MAKEMASK(41, S_MC_CAS_MBZ)
+#define S_MC_CAS_MBZ 23
+#define M_MC_CAS_MBZ _SB_MAKEMASK(41, S_MC_CAS_MBZ)
/*
* Bank Address Address Bits Register (Table 6-22)
*/
-#define S_MC_BA_RESERVED 0
-#define M_MC_BA_RESERVED _SB_MAKEMASK(5, S_MC_BA_RESERVED)
+#define S_MC_BA_RESERVED 0
+#define M_MC_BA_RESERVED _SB_MAKEMASK(5, S_MC_BA_RESERVED)
-#define S_MC_BA_SELECT 5
-#define M_MC_BA_SELECT _SB_MAKEMASK(20, S_MC_BA_SELECT)
-#define V_MC_BA_SELECT(x) _SB_MAKEVALUE(x, S_MC_BA_SELECT)
+#define S_MC_BA_SELECT 5
+#define M_MC_BA_SELECT _SB_MAKEMASK(20, S_MC_BA_SELECT)
+#define V_MC_BA_SELECT(x) _SB_MAKEVALUE(x, S_MC_BA_SELECT)
-#define S_MC_BA_MBZ 25
-#define M_MC_BA_MBZ _SB_MAKEMASK(39, S_MC_BA_MBZ)
+#define S_MC_BA_MBZ 25
+#define M_MC_BA_MBZ _SB_MAKEMASK(39, S_MC_BA_MBZ)
/*
* Chip Select Attribute Register (Table 6-23)
*/
-#define K_MC_CS_ATTR_CLOSED 0
-#define K_MC_CS_ATTR_CASCHECK 1
-#define K_MC_CS_ATTR_HINT 2
-#define K_MC_CS_ATTR_OPEN 3
+#define K_MC_CS_ATTR_CLOSED 0
+#define K_MC_CS_ATTR_CASCHECK 1
+#define K_MC_CS_ATTR_HINT 2
+#define K_MC_CS_ATTR_OPEN 3
-#define S_MC_CS0_PAGE 0
-#define M_MC_CS0_PAGE _SB_MAKEMASK(2, S_MC_CS0_PAGE)
-#define V_MC_CS0_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS0_PAGE)
-#define G_MC_CS0_PAGE(x) _SB_GETVALUE(x, S_MC_CS0_PAGE, M_MC_CS0_PAGE)
+#define S_MC_CS0_PAGE 0
+#define M_MC_CS0_PAGE _SB_MAKEMASK(2, S_MC_CS0_PAGE)
+#define V_MC_CS0_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS0_PAGE)
+#define G_MC_CS0_PAGE(x) _SB_GETVALUE(x, S_MC_CS0_PAGE, M_MC_CS0_PAGE)
-#define S_MC_CS1_PAGE 16
-#define M_MC_CS1_PAGE _SB_MAKEMASK(2, S_MC_CS1_PAGE)
-#define V_MC_CS1_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS1_PAGE)
-#define G_MC_CS1_PAGE(x) _SB_GETVALUE(x, S_MC_CS1_PAGE, M_MC_CS1_PAGE)
+#define S_MC_CS1_PAGE 16
+#define M_MC_CS1_PAGE _SB_MAKEMASK(2, S_MC_CS1_PAGE)
+#define V_MC_CS1_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS1_PAGE)
+#define G_MC_CS1_PAGE(x) _SB_GETVALUE(x, S_MC_CS1_PAGE, M_MC_CS1_PAGE)
-#define S_MC_CS2_PAGE 32
-#define M_MC_CS2_PAGE _SB_MAKEMASK(2, S_MC_CS2_PAGE)
-#define V_MC_CS2_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS2_PAGE)
-#define G_MC_CS2_PAGE(x) _SB_GETVALUE(x, S_MC_CS2_PAGE, M_MC_CS2_PAGE)
+#define S_MC_CS2_PAGE 32
+#define M_MC_CS2_PAGE _SB_MAKEMASK(2, S_MC_CS2_PAGE)
+#define V_MC_CS2_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS2_PAGE)
+#define G_MC_CS2_PAGE(x) _SB_GETVALUE(x, S_MC_CS2_PAGE, M_MC_CS2_PAGE)
-#define S_MC_CS3_PAGE 48
-#define M_MC_CS3_PAGE _SB_MAKEMASK(2, S_MC_CS3_PAGE)
-#define V_MC_CS3_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS3_PAGE)
-#define G_MC_CS3_PAGE(x) _SB_GETVALUE(x, S_MC_CS3_PAGE, M_MC_CS3_PAGE)
+#define S_MC_CS3_PAGE 48
+#define M_MC_CS3_PAGE _SB_MAKEMASK(2, S_MC_CS3_PAGE)
+#define V_MC_CS3_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS3_PAGE)
+#define G_MC_CS3_PAGE(x) _SB_GETVALUE(x, S_MC_CS3_PAGE, M_MC_CS3_PAGE)
/*
* ECC Test ECC Register (Table 6-25)
*/
-#define S_MC_ECC_INVERT 0
-#define M_MC_ECC_INVERT _SB_MAKEMASK(8, S_MC_ECC_INVERT)
+#define S_MC_ECC_INVERT 0
+#define M_MC_ECC_INVERT _SB_MAKEMASK(8, S_MC_ECC_INVERT)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_regs.h b/arch/mips/include/asm/sibyte/sb1250_regs.h
index 29b9f0b26b3a..ee86ca0fad32 100644
--- a/arch/mips/include/asm/sibyte/sb1250_regs.h
+++ b/arch/mips/include/asm/sibyte/sb1250_regs.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Register Definitions File: sb1250_regs.h
+ * Register Definitions File: sb1250_regs.h
*
* This module contains the addresses of the on-chip peripherals
* on the SB1250.
@@ -61,45 +61,45 @@
*/
#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
-#define A_MC_BASE_0 0x0010051000
-#define A_MC_BASE_1 0x0010052000
-#define MC_REGISTER_SPACING 0x1000
+#define A_MC_BASE_0 0x0010051000
+#define A_MC_BASE_1 0x0010052000
+#define MC_REGISTER_SPACING 0x1000
-#define A_MC_BASE(ctlid) ((ctlid)*MC_REGISTER_SPACING+A_MC_BASE_0)
+#define A_MC_BASE(ctlid) ((ctlid)*MC_REGISTER_SPACING+A_MC_BASE_0)
#define A_MC_REGISTER(ctlid, reg) (A_MC_BASE(ctlid)+(reg))
-#define R_MC_CONFIG 0x0000000100
-#define R_MC_DRAMCMD 0x0000000120
-#define R_MC_DRAMMODE 0x0000000140
-#define R_MC_TIMING1 0x0000000160
-#define R_MC_TIMING2 0x0000000180
-#define R_MC_CS_START 0x00000001A0
-#define R_MC_CS_END 0x00000001C0
-#define R_MC_CS_INTERLEAVE 0x00000001E0
-#define S_MC_CS_STARTEND 16
-
-#define R_MC_CSX_BASE 0x0000000200
-#define R_MC_CSX_ROW 0x0000000000 /* relative to CSX_BASE, above */
-#define R_MC_CSX_COL 0x0000000020 /* relative to CSX_BASE, above */
-#define R_MC_CSX_BA 0x0000000040 /* relative to CSX_BASE, above */
-#define MC_CSX_SPACING 0x0000000060 /* relative to CSX_BASE, above */
-
-#define R_MC_CS0_ROW 0x0000000200
-#define R_MC_CS0_COL 0x0000000220
-#define R_MC_CS0_BA 0x0000000240
-#define R_MC_CS1_ROW 0x0000000260
-#define R_MC_CS1_COL 0x0000000280
-#define R_MC_CS1_BA 0x00000002A0
-#define R_MC_CS2_ROW 0x00000002C0
-#define R_MC_CS2_COL 0x00000002E0
-#define R_MC_CS2_BA 0x0000000300
-#define R_MC_CS3_ROW 0x0000000320
-#define R_MC_CS3_COL 0x0000000340
-#define R_MC_CS3_BA 0x0000000360
-#define R_MC_CS_ATTR 0x0000000380
-#define R_MC_TEST_DATA 0x0000000400
-#define R_MC_TEST_ECC 0x0000000420
-#define R_MC_MCLK_CFG 0x0000000500
+#define R_MC_CONFIG 0x0000000100
+#define R_MC_DRAMCMD 0x0000000120
+#define R_MC_DRAMMODE 0x0000000140
+#define R_MC_TIMING1 0x0000000160
+#define R_MC_TIMING2 0x0000000180
+#define R_MC_CS_START 0x00000001A0
+#define R_MC_CS_END 0x00000001C0
+#define R_MC_CS_INTERLEAVE 0x00000001E0
+#define S_MC_CS_STARTEND 16
+
+#define R_MC_CSX_BASE 0x0000000200
+#define R_MC_CSX_ROW 0x0000000000 /* relative to CSX_BASE, above */
+#define R_MC_CSX_COL 0x0000000020 /* relative to CSX_BASE, above */
+#define R_MC_CSX_BA 0x0000000040 /* relative to CSX_BASE, above */
+#define MC_CSX_SPACING 0x0000000060 /* relative to CSX_BASE, above */
+
+#define R_MC_CS0_ROW 0x0000000200
+#define R_MC_CS0_COL 0x0000000220
+#define R_MC_CS0_BA 0x0000000240
+#define R_MC_CS1_ROW 0x0000000260
+#define R_MC_CS1_COL 0x0000000280
+#define R_MC_CS1_BA 0x00000002A0
+#define R_MC_CS2_ROW 0x00000002C0
+#define R_MC_CS2_COL 0x00000002E0
+#define R_MC_CS2_BA 0x0000000300
+#define R_MC_CS3_ROW 0x0000000320
+#define R_MC_CS3_COL 0x0000000340
+#define R_MC_CS3_BA 0x0000000360
+#define R_MC_CS_ATTR 0x0000000380
+#define R_MC_TEST_DATA 0x0000000400
+#define R_MC_TEST_ECC 0x0000000420
+#define R_MC_MCLK_CFG 0x0000000500
#endif /* 1250 & 112x */
@@ -109,14 +109,14 @@
#if SIBYTE_HDR_FEATURE_1250_112x /* This L2C only on 1250/112x */
-#define A_L2_READ_TAG 0x0010040018
-#define A_L2_ECC_TAG 0x0010040038
+#define A_L2_READ_TAG 0x0010040018
+#define A_L2_ECC_TAG 0x0010040038
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define A_L2_READ_MISC 0x0010040058
+#define A_L2_READ_MISC 0x0010040058
#endif /* 1250 PASS3 || 112x PASS1 */
-#define A_L2_WAY_DISABLE 0x0010041000
-#define A_L2_MAKEDISABLE(x) (A_L2_WAY_DISABLE | (((~(x))&0x0F) << 8))
-#define A_L2_MGMT_TAG_BASE 0x00D0000000
+#define A_L2_WAY_DISABLE 0x0010041000
+#define A_L2_MAKEDISABLE(x) (A_L2_WAY_DISABLE | (((~(x))&0x0F) << 8))
+#define A_L2_MGMT_TAG_BASE 0x00D0000000
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_L2_CACHE_DISABLE 0x0010042000
@@ -124,10 +124,10 @@
#define A_L2_MISC_CONFIG 0x0010043000
#endif /* 1250 PASS2 || 112x PASS1 */
-/* Backward-compatibility definitions. */
+/* Backward-compatibility definitions. */
/* XXX: discourage people from using these constants. */
-#define A_L2_READ_ADDRESS A_L2_READ_TAG
-#define A_L2_EEC_ADDRESS A_L2_ECC_TAG
+#define A_L2_READ_ADDRESS A_L2_READ_TAG
+#define A_L2_EEC_ADDRESS A_L2_ECC_TAG
#endif
@@ -137,8 +137,8 @@
********************************************************************* */
#if SIBYTE_HDR_FEATURE_1250_112x /* This PCI/HT only on 1250/112x */
-#define A_PCI_TYPE00_HEADER 0x00DE000000
-#define A_PCI_TYPE01_HEADER 0x00DE000800
+#define A_PCI_TYPE00_HEADER 0x00DE000000
+#define A_PCI_TYPE01_HEADER 0x00DE000800
#endif
@@ -146,121 +146,121 @@
* Ethernet DMA and MACs
********************************************************************* */
-#define A_MAC_BASE_0 0x0010064000
-#define A_MAC_BASE_1 0x0010065000
+#define A_MAC_BASE_0 0x0010064000
+#define A_MAC_BASE_1 0x0010065000
#if SIBYTE_HDR_FEATURE_CHIP(1250)
-#define A_MAC_BASE_2 0x0010066000
+#define A_MAC_BASE_2 0x0010066000
#endif /* 1250 */
-#define MAC_SPACING 0x1000
-#define MAC_DMA_TXRX_SPACING 0x0400
-#define MAC_DMA_CHANNEL_SPACING 0x0100
-#define DMA_RX 0
-#define DMA_TX 1
+#define MAC_SPACING 0x1000
+#define MAC_DMA_TXRX_SPACING 0x0400
+#define MAC_DMA_CHANNEL_SPACING 0x0100
+#define DMA_RX 0
+#define DMA_TX 1
#define MAC_NUM_DMACHAN 2 /* channels per direction */
/* XXX: not correct; depends on SOC type. */
-#define MAC_NUM_PORTS 3
+#define MAC_NUM_PORTS 3
-#define A_MAC_CHANNEL_BASE(macnum) \
- (A_MAC_BASE_0 + \
- MAC_SPACING*(macnum))
+#define A_MAC_CHANNEL_BASE(macnum) \
+ (A_MAC_BASE_0 + \
+ MAC_SPACING*(macnum))
-#define A_MAC_REGISTER(macnum,reg) \
- (A_MAC_BASE_0 + \
- MAC_SPACING*(macnum) + (reg))
+#define A_MAC_REGISTER(macnum,reg) \
+ (A_MAC_BASE_0 + \
+ MAC_SPACING*(macnum) + (reg))
#define R_MAC_DMA_CHANNELS 0x800 /* Relative to A_MAC_CHANNEL_BASE */
#define A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) \
- ((A_MAC_CHANNEL_BASE(macnum)) + \
- R_MAC_DMA_CHANNELS + \
- (MAC_DMA_TXRX_SPACING*(txrx)) + \
- (MAC_DMA_CHANNEL_SPACING*(chan)))
+ ((A_MAC_CHANNEL_BASE(macnum)) + \
+ R_MAC_DMA_CHANNELS + \
+ (MAC_DMA_TXRX_SPACING*(txrx)) + \
+ (MAC_DMA_CHANNEL_SPACING*(chan)))
#define R_MAC_DMA_CHANNEL_BASE(txrx, chan) \
- (R_MAC_DMA_CHANNELS + \
- (MAC_DMA_TXRX_SPACING*(txrx)) + \
- (MAC_DMA_CHANNEL_SPACING*(chan)))
+ (R_MAC_DMA_CHANNELS + \
+ (MAC_DMA_TXRX_SPACING*(txrx)) + \
+ (MAC_DMA_CHANNEL_SPACING*(chan)))
-#define A_MAC_DMA_REGISTER(macnum, txrx, chan, reg) \
- (A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) + \
- (reg))
+#define A_MAC_DMA_REGISTER(macnum, txrx, chan, reg) \
+ (A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) + \
+ (reg))
-#define R_MAC_DMA_REGISTER(txrx, chan, reg) \
- (R_MAC_DMA_CHANNEL_BASE(txrx, chan) + \
- (reg))
+#define R_MAC_DMA_REGISTER(txrx, chan, reg) \
+ (R_MAC_DMA_CHANNEL_BASE(txrx, chan) + \
+ (reg))
/*
* DMA channel registers, relative to A_MAC_DMA_CHANNEL_BASE
*/
-#define R_MAC_DMA_CONFIG0 0x00000000
-#define R_MAC_DMA_CONFIG1 0x00000008
-#define R_MAC_DMA_DSCR_BASE 0x00000010
-#define R_MAC_DMA_DSCR_CNT 0x00000018
-#define R_MAC_DMA_CUR_DSCRA 0x00000020
-#define R_MAC_DMA_CUR_DSCRB 0x00000028
-#define R_MAC_DMA_CUR_DSCRADDR 0x00000030
+#define R_MAC_DMA_CONFIG0 0x00000000
+#define R_MAC_DMA_CONFIG1 0x00000008
+#define R_MAC_DMA_DSCR_BASE 0x00000010
+#define R_MAC_DMA_DSCR_CNT 0x00000018
+#define R_MAC_DMA_CUR_DSCRA 0x00000020
+#define R_MAC_DMA_CUR_DSCRB 0x00000028
+#define R_MAC_DMA_CUR_DSCRADDR 0x00000030
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define R_MAC_DMA_OODPKTLOST_RX 0x00000038 /* rx only */
+#define R_MAC_DMA_OODPKTLOST_RX 0x00000038 /* rx only */
#endif /* 1250 PASS3 || 112x PASS1 */
/*
* RMON Counters
*/
-#define R_MAC_RMON_TX_BYTES 0x00000000
-#define R_MAC_RMON_COLLISIONS 0x00000008
-#define R_MAC_RMON_LATE_COL 0x00000010
-#define R_MAC_RMON_EX_COL 0x00000018
-#define R_MAC_RMON_FCS_ERROR 0x00000020
-#define R_MAC_RMON_TX_ABORT 0x00000028
+#define R_MAC_RMON_TX_BYTES 0x00000000
+#define R_MAC_RMON_COLLISIONS 0x00000008
+#define R_MAC_RMON_LATE_COL 0x00000010
+#define R_MAC_RMON_EX_COL 0x00000018
+#define R_MAC_RMON_FCS_ERROR 0x00000020
+#define R_MAC_RMON_TX_ABORT 0x00000028
/* Counter #6 (0x30) now reserved */
-#define R_MAC_RMON_TX_BAD 0x00000038
-#define R_MAC_RMON_TX_GOOD 0x00000040
-#define R_MAC_RMON_TX_RUNT 0x00000048
-#define R_MAC_RMON_TX_OVERSIZE 0x00000050
-#define R_MAC_RMON_RX_BYTES 0x00000080
-#define R_MAC_RMON_RX_MCAST 0x00000088
-#define R_MAC_RMON_RX_BCAST 0x00000090
-#define R_MAC_RMON_RX_BAD 0x00000098
-#define R_MAC_RMON_RX_GOOD 0x000000A0
-#define R_MAC_RMON_RX_RUNT 0x000000A8
-#define R_MAC_RMON_RX_OVERSIZE 0x000000B0
-#define R_MAC_RMON_RX_FCS_ERROR 0x000000B8
-#define R_MAC_RMON_RX_LENGTH_ERROR 0x000000C0
-#define R_MAC_RMON_RX_CODE_ERROR 0x000000C8
-#define R_MAC_RMON_RX_ALIGN_ERROR 0x000000D0
+#define R_MAC_RMON_TX_BAD 0x00000038
+#define R_MAC_RMON_TX_GOOD 0x00000040
+#define R_MAC_RMON_TX_RUNT 0x00000048
+#define R_MAC_RMON_TX_OVERSIZE 0x00000050
+#define R_MAC_RMON_RX_BYTES 0x00000080
+#define R_MAC_RMON_RX_MCAST 0x00000088
+#define R_MAC_RMON_RX_BCAST 0x00000090
+#define R_MAC_RMON_RX_BAD 0x00000098
+#define R_MAC_RMON_RX_GOOD 0x000000A0
+#define R_MAC_RMON_RX_RUNT 0x000000A8
+#define R_MAC_RMON_RX_OVERSIZE 0x000000B0
+#define R_MAC_RMON_RX_FCS_ERROR 0x000000B8
+#define R_MAC_RMON_RX_LENGTH_ERROR 0x000000C0
+#define R_MAC_RMON_RX_CODE_ERROR 0x000000C8
+#define R_MAC_RMON_RX_ALIGN_ERROR 0x000000D0
/* Updated to spec 0.2 */
-#define R_MAC_CFG 0x00000100
-#define R_MAC_THRSH_CFG 0x00000108
-#define R_MAC_VLANTAG 0x00000110
-#define R_MAC_FRAMECFG 0x00000118
-#define R_MAC_EOPCNT 0x00000120
-#define R_MAC_FIFO_PTRS 0x00000128
-#define R_MAC_ADFILTER_CFG 0x00000200
-#define R_MAC_ETHERNET_ADDR 0x00000208
-#define R_MAC_PKT_TYPE 0x00000210
+#define R_MAC_CFG 0x00000100
+#define R_MAC_THRSH_CFG 0x00000108
+#define R_MAC_VLANTAG 0x00000110
+#define R_MAC_FRAMECFG 0x00000118
+#define R_MAC_EOPCNT 0x00000120
+#define R_MAC_FIFO_PTRS 0x00000128
+#define R_MAC_ADFILTER_CFG 0x00000200
+#define R_MAC_ETHERNET_ADDR 0x00000208
+#define R_MAC_PKT_TYPE 0x00000210
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define R_MAC_ADMASK0 0x00000218
#define R_MAC_ADMASK1 0x00000220
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define R_MAC_HASH_BASE 0x00000240
-#define R_MAC_ADDR_BASE 0x00000280
-#define R_MAC_CHLO0_BASE 0x00000300
-#define R_MAC_CHUP0_BASE 0x00000320
-#define R_MAC_ENABLE 0x00000400
-#define R_MAC_STATUS 0x00000408
-#define R_MAC_INT_MASK 0x00000410
-#define R_MAC_TXD_CTL 0x00000420
-#define R_MAC_MDIO 0x00000428
+#define R_MAC_HASH_BASE 0x00000240
+#define R_MAC_ADDR_BASE 0x00000280
+#define R_MAC_CHLO0_BASE 0x00000300
+#define R_MAC_CHUP0_BASE 0x00000320
+#define R_MAC_ENABLE 0x00000400
+#define R_MAC_STATUS 0x00000408
+#define R_MAC_INT_MASK 0x00000410
+#define R_MAC_TXD_CTL 0x00000420
+#define R_MAC_MDIO 0x00000428
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define R_MAC_STATUS1 0x00000430
+#define R_MAC_STATUS1 0x00000430
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define R_MAC_DEBUG_STATUS 0x00000448
+#define R_MAC_DEBUG_STATUS 0x00000448
#define MAC_HASH_COUNT 8
#define MAC_ADDR_COUNT 8
@@ -273,11 +273,11 @@
#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
-#define R_DUART_NUM_PORTS 2
+#define R_DUART_NUM_PORTS 2
-#define A_DUART 0x0010060000
+#define A_DUART 0x0010060000
-#define DUART_CHANREG_SPACING 0x100
+#define DUART_CHANREG_SPACING 0x100
#define A_DUART_CHANREG(chan, reg) \
(A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg))
@@ -341,44 +341,44 @@
* These constants are the absolute addresses.
*/
-#define A_DUART_MODE_REG_1_A 0x0010060100
-#define A_DUART_MODE_REG_2_A 0x0010060110
-#define A_DUART_STATUS_A 0x0010060120
-#define A_DUART_CLK_SEL_A 0x0010060130
-#define A_DUART_CMD_A 0x0010060150
-#define A_DUART_RX_HOLD_A 0x0010060160
-#define A_DUART_TX_HOLD_A 0x0010060170
-
-#define A_DUART_MODE_REG_1_B 0x0010060200
-#define A_DUART_MODE_REG_2_B 0x0010060210
-#define A_DUART_STATUS_B 0x0010060220
-#define A_DUART_CLK_SEL_B 0x0010060230
-#define A_DUART_CMD_B 0x0010060250
-#define A_DUART_RX_HOLD_B 0x0010060260
-#define A_DUART_TX_HOLD_B 0x0010060270
-
-#define A_DUART_INPORT_CHNG 0x0010060300
-#define A_DUART_AUX_CTRL 0x0010060310
-#define A_DUART_ISR_A 0x0010060320
-#define A_DUART_IMR_A 0x0010060330
-#define A_DUART_ISR_B 0x0010060340
-#define A_DUART_IMR_B 0x0010060350
-#define A_DUART_OUT_PORT 0x0010060360
-#define A_DUART_OPCR 0x0010060370
-#define A_DUART_IN_PORT 0x0010060380
-#define A_DUART_ISR 0x0010060390
-#define A_DUART_IMR 0x00100603A0
-#define A_DUART_SET_OPR 0x00100603B0
-#define A_DUART_CLEAR_OPR 0x00100603C0
-#define A_DUART_INPORT_CHNG_A 0x00100603D0
-#define A_DUART_INPORT_CHNG_B 0x00100603E0
+#define A_DUART_MODE_REG_1_A 0x0010060100
+#define A_DUART_MODE_REG_2_A 0x0010060110
+#define A_DUART_STATUS_A 0x0010060120
+#define A_DUART_CLK_SEL_A 0x0010060130
+#define A_DUART_CMD_A 0x0010060150
+#define A_DUART_RX_HOLD_A 0x0010060160
+#define A_DUART_TX_HOLD_A 0x0010060170
+
+#define A_DUART_MODE_REG_1_B 0x0010060200
+#define A_DUART_MODE_REG_2_B 0x0010060210
+#define A_DUART_STATUS_B 0x0010060220
+#define A_DUART_CLK_SEL_B 0x0010060230
+#define A_DUART_CMD_B 0x0010060250
+#define A_DUART_RX_HOLD_B 0x0010060260
+#define A_DUART_TX_HOLD_B 0x0010060270
+
+#define A_DUART_INPORT_CHNG 0x0010060300
+#define A_DUART_AUX_CTRL 0x0010060310
+#define A_DUART_ISR_A 0x0010060320
+#define A_DUART_IMR_A 0x0010060330
+#define A_DUART_ISR_B 0x0010060340
+#define A_DUART_IMR_B 0x0010060350
+#define A_DUART_OUT_PORT 0x0010060360
+#define A_DUART_OPCR 0x0010060370
+#define A_DUART_IN_PORT 0x0010060380
+#define A_DUART_ISR 0x0010060390
+#define A_DUART_IMR 0x00100603A0
+#define A_DUART_SET_OPR 0x00100603B0
+#define A_DUART_CLEAR_OPR 0x00100603C0
+#define A_DUART_INPORT_CHNG_A 0x00100603D0
+#define A_DUART_INPORT_CHNG_B 0x00100603E0
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_DUART_FULL_CTL_A 0x0010060140
#define A_DUART_FULL_CTL_B 0x0010060240
-#define A_DUART_OPCR_A 0x0010060180
-#define A_DUART_OPCR_B 0x0010060280
+#define A_DUART_OPCR_A 0x0010060180
+#define A_DUART_OPCR_B 0x0010060280
#define A_DUART_INPORT_CHNG_DEBUG 0x00100603F0
#endif /* 1250 PASS2 || 112x PASS1 */
@@ -391,94 +391,94 @@
#if SIBYTE_HDR_FEATURE_1250_112x /* sync serial only on 1250/112x */
-#define A_SER_BASE_0 0x0010060400
-#define A_SER_BASE_1 0x0010060800
-#define SER_SPACING 0x400
+#define A_SER_BASE_0 0x0010060400
+#define A_SER_BASE_1 0x0010060800
+#define SER_SPACING 0x400
-#define SER_DMA_TXRX_SPACING 0x80
+#define SER_DMA_TXRX_SPACING 0x80
-#define SER_NUM_PORTS 2
+#define SER_NUM_PORTS 2
-#define A_SER_CHANNEL_BASE(sernum) \
- (A_SER_BASE_0 + \
- SER_SPACING*(sernum))
+#define A_SER_CHANNEL_BASE(sernum) \
+ (A_SER_BASE_0 + \
+ SER_SPACING*(sernum))
-#define A_SER_REGISTER(sernum,reg) \
- (A_SER_BASE_0 + \
- SER_SPACING*(sernum) + (reg))
+#define A_SER_REGISTER(sernum,reg) \
+ (A_SER_BASE_0 + \
+ SER_SPACING*(sernum) + (reg))
#define R_SER_DMA_CHANNELS 0 /* Relative to A_SER_BASE_x */
#define A_SER_DMA_CHANNEL_BASE(sernum,txrx) \
- ((A_SER_CHANNEL_BASE(sernum)) + \
- R_SER_DMA_CHANNELS + \
- (SER_DMA_TXRX_SPACING*(txrx)))
+ ((A_SER_CHANNEL_BASE(sernum)) + \
+ R_SER_DMA_CHANNELS + \
+ (SER_DMA_TXRX_SPACING*(txrx)))
-#define A_SER_DMA_REGISTER(sernum, txrx, reg) \
- (A_SER_DMA_CHANNEL_BASE(sernum, txrx) + \
- (reg))
+#define A_SER_DMA_REGISTER(sernum, txrx, reg) \
+ (A_SER_DMA_CHANNEL_BASE(sernum, txrx) + \
+ (reg))
/*
* DMA channel registers, relative to A_SER_DMA_CHANNEL_BASE
*/
-#define R_SER_DMA_CONFIG0 0x00000000
-#define R_SER_DMA_CONFIG1 0x00000008
-#define R_SER_DMA_DSCR_BASE 0x00000010
-#define R_SER_DMA_DSCR_CNT 0x00000018
-#define R_SER_DMA_CUR_DSCRA 0x00000020
-#define R_SER_DMA_CUR_DSCRB 0x00000028
-#define R_SER_DMA_CUR_DSCRADDR 0x00000030
-
-#define R_SER_DMA_CONFIG0_RX 0x00000000
-#define R_SER_DMA_CONFIG1_RX 0x00000008
-#define R_SER_DMA_DSCR_BASE_RX 0x00000010
-#define R_SER_DMA_DSCR_COUNT_RX 0x00000018
-#define R_SER_DMA_CUR_DSCR_A_RX 0x00000020
-#define R_SER_DMA_CUR_DSCR_B_RX 0x00000028
+#define R_SER_DMA_CONFIG0 0x00000000
+#define R_SER_DMA_CONFIG1 0x00000008
+#define R_SER_DMA_DSCR_BASE 0x00000010
+#define R_SER_DMA_DSCR_CNT 0x00000018
+#define R_SER_DMA_CUR_DSCRA 0x00000020
+#define R_SER_DMA_CUR_DSCRB 0x00000028
+#define R_SER_DMA_CUR_DSCRADDR 0x00000030
+
+#define R_SER_DMA_CONFIG0_RX 0x00000000
+#define R_SER_DMA_CONFIG1_RX 0x00000008
+#define R_SER_DMA_DSCR_BASE_RX 0x00000010
+#define R_SER_DMA_DSCR_COUNT_RX 0x00000018
+#define R_SER_DMA_CUR_DSCR_A_RX 0x00000020
+#define R_SER_DMA_CUR_DSCR_B_RX 0x00000028
#define R_SER_DMA_CUR_DSCR_ADDR_RX 0x00000030
-#define R_SER_DMA_CONFIG0_TX 0x00000080
-#define R_SER_DMA_CONFIG1_TX 0x00000088
-#define R_SER_DMA_DSCR_BASE_TX 0x00000090
-#define R_SER_DMA_DSCR_COUNT_TX 0x00000098
-#define R_SER_DMA_CUR_DSCR_A_TX 0x000000A0
-#define R_SER_DMA_CUR_DSCR_B_TX 0x000000A8
+#define R_SER_DMA_CONFIG0_TX 0x00000080
+#define R_SER_DMA_CONFIG1_TX 0x00000088
+#define R_SER_DMA_DSCR_BASE_TX 0x00000090
+#define R_SER_DMA_DSCR_COUNT_TX 0x00000098
+#define R_SER_DMA_CUR_DSCR_A_TX 0x000000A0
+#define R_SER_DMA_CUR_DSCR_B_TX 0x000000A8
#define R_SER_DMA_CUR_DSCR_ADDR_TX 0x000000B0
-#define R_SER_MODE 0x00000100
-#define R_SER_MINFRM_SZ 0x00000108
-#define R_SER_MAXFRM_SZ 0x00000110
-#define R_SER_ADDR 0x00000118
-#define R_SER_USR0_ADDR 0x00000120
-#define R_SER_USR1_ADDR 0x00000128
-#define R_SER_USR2_ADDR 0x00000130
-#define R_SER_USR3_ADDR 0x00000138
-#define R_SER_CMD 0x00000140
-#define R_SER_TX_RD_THRSH 0x00000160
-#define R_SER_TX_WR_THRSH 0x00000168
-#define R_SER_RX_RD_THRSH 0x00000170
+#define R_SER_MODE 0x00000100
+#define R_SER_MINFRM_SZ 0x00000108
+#define R_SER_MAXFRM_SZ 0x00000110
+#define R_SER_ADDR 0x00000118
+#define R_SER_USR0_ADDR 0x00000120
+#define R_SER_USR1_ADDR 0x00000128
+#define R_SER_USR2_ADDR 0x00000130
+#define R_SER_USR3_ADDR 0x00000138
+#define R_SER_CMD 0x00000140
+#define R_SER_TX_RD_THRSH 0x00000160
+#define R_SER_TX_WR_THRSH 0x00000168
+#define R_SER_RX_RD_THRSH 0x00000170
#define R_SER_LINE_MODE 0x00000178
-#define R_SER_DMA_ENABLE 0x00000180
-#define R_SER_INT_MASK 0x00000190
-#define R_SER_STATUS 0x00000188
-#define R_SER_STATUS_DEBUG 0x000001A8
-#define R_SER_RX_TABLE_BASE 0x00000200
-#define SER_RX_TABLE_COUNT 16
-#define R_SER_TX_TABLE_BASE 0x00000300
-#define SER_TX_TABLE_COUNT 16
+#define R_SER_DMA_ENABLE 0x00000180
+#define R_SER_INT_MASK 0x00000190
+#define R_SER_STATUS 0x00000188
+#define R_SER_STATUS_DEBUG 0x000001A8
+#define R_SER_RX_TABLE_BASE 0x00000200
+#define SER_RX_TABLE_COUNT 16
+#define R_SER_TX_TABLE_BASE 0x00000300
+#define SER_TX_TABLE_COUNT 16
/* RMON Counters */
-#define R_SER_RMON_TX_BYTE_LO 0x000001C0
-#define R_SER_RMON_TX_BYTE_HI 0x000001C8
-#define R_SER_RMON_RX_BYTE_LO 0x000001D0
-#define R_SER_RMON_RX_BYTE_HI 0x000001D8
-#define R_SER_RMON_TX_UNDERRUN 0x000001E0
-#define R_SER_RMON_RX_OVERFLOW 0x000001E8
-#define R_SER_RMON_RX_ERRORS 0x000001F0
-#define R_SER_RMON_RX_BADADDR 0x000001F8
+#define R_SER_RMON_TX_BYTE_LO 0x000001C0
+#define R_SER_RMON_TX_BYTE_HI 0x000001C8
+#define R_SER_RMON_RX_BYTE_LO 0x000001D0
+#define R_SER_RMON_RX_BYTE_HI 0x000001D8
+#define R_SER_RMON_TX_UNDERRUN 0x000001E0
+#define R_SER_RMON_RX_OVERFLOW 0x000001E8
+#define R_SER_RMON_RX_ERRORS 0x000001F0
+#define R_SER_RMON_RX_BADADDR 0x000001F8
#endif /* 1250/112x */
@@ -486,38 +486,38 @@
* Generic Bus Registers
********************************************************************* */
-#define IO_EXT_CFG_COUNT 8
+#define IO_EXT_CFG_COUNT 8
#define A_IO_EXT_BASE 0x0010061000
#define A_IO_EXT_REG(r) (A_IO_EXT_BASE + (r))
-#define A_IO_EXT_CFG_BASE 0x0010061000
-#define A_IO_EXT_MULT_SIZE_BASE 0x0010061100
+#define A_IO_EXT_CFG_BASE 0x0010061000
+#define A_IO_EXT_MULT_SIZE_BASE 0x0010061100
#define A_IO_EXT_START_ADDR_BASE 0x0010061200
-#define A_IO_EXT_TIME_CFG0_BASE 0x0010061600
-#define A_IO_EXT_TIME_CFG1_BASE 0x0010061700
+#define A_IO_EXT_TIME_CFG0_BASE 0x0010061600
+#define A_IO_EXT_TIME_CFG1_BASE 0x0010061700
#define IO_EXT_REGISTER_SPACING 8
#define A_IO_EXT_CS_BASE(cs) (A_IO_EXT_CFG_BASE+IO_EXT_REGISTER_SPACING*(cs))
#define R_IO_EXT_REG(reg, cs) ((cs)*IO_EXT_REGISTER_SPACING + (reg))
#define R_IO_EXT_CFG 0x0000
-#define R_IO_EXT_MULT_SIZE 0x0100
+#define R_IO_EXT_MULT_SIZE 0x0100
#define R_IO_EXT_START_ADDR 0x0200
-#define R_IO_EXT_TIME_CFG0 0x0600
-#define R_IO_EXT_TIME_CFG1 0x0700
-
-
-#define A_IO_INTERRUPT_STATUS 0x0010061A00
-#define A_IO_INTERRUPT_DATA0 0x0010061A10
-#define A_IO_INTERRUPT_DATA1 0x0010061A18
-#define A_IO_INTERRUPT_DATA2 0x0010061A20
-#define A_IO_INTERRUPT_DATA3 0x0010061A28
-#define A_IO_INTERRUPT_ADDR0 0x0010061A30
-#define A_IO_INTERRUPT_ADDR1 0x0010061A40
-#define A_IO_INTERRUPT_PARITY 0x0010061A50
-#define A_IO_PCMCIA_CFG 0x0010061A60
-#define A_IO_PCMCIA_STATUS 0x0010061A70
+#define R_IO_EXT_TIME_CFG0 0x0600
+#define R_IO_EXT_TIME_CFG1 0x0700
+
+
+#define A_IO_INTERRUPT_STATUS 0x0010061A00
+#define A_IO_INTERRUPT_DATA0 0x0010061A10
+#define A_IO_INTERRUPT_DATA1 0x0010061A18
+#define A_IO_INTERRUPT_DATA2 0x0010061A20
+#define A_IO_INTERRUPT_DATA3 0x0010061A28
+#define A_IO_INTERRUPT_ADDR0 0x0010061A30
+#define A_IO_INTERRUPT_ADDR1 0x0010061A40
+#define A_IO_INTERRUPT_PARITY 0x0010061A50
+#define A_IO_PCMCIA_CFG 0x0010061A60
+#define A_IO_PCMCIA_STATUS 0x0010061A70
#define A_IO_DRIVE_0 0x0010061300
#define A_IO_DRIVE_1 0x0010061308
#define A_IO_DRIVE_2 0x0010061310
@@ -527,76 +527,76 @@
#define R_IO_DRIVE(x) ((x)*IO_DRIVE_REGISTER_SPACING)
#define A_IO_DRIVE(x) (A_IO_DRIVE_BASE + R_IO_DRIVE(x))
-#define R_IO_INTERRUPT_STATUS 0x0A00
-#define R_IO_INTERRUPT_DATA0 0x0A10
-#define R_IO_INTERRUPT_DATA1 0x0A18
-#define R_IO_INTERRUPT_DATA2 0x0A20
-#define R_IO_INTERRUPT_DATA3 0x0A28
-#define R_IO_INTERRUPT_ADDR0 0x0A30
-#define R_IO_INTERRUPT_ADDR1 0x0A40
-#define R_IO_INTERRUPT_PARITY 0x0A50
-#define R_IO_PCMCIA_CFG 0x0A60
-#define R_IO_PCMCIA_STATUS 0x0A70
+#define R_IO_INTERRUPT_STATUS 0x0A00
+#define R_IO_INTERRUPT_DATA0 0x0A10
+#define R_IO_INTERRUPT_DATA1 0x0A18
+#define R_IO_INTERRUPT_DATA2 0x0A20
+#define R_IO_INTERRUPT_DATA3 0x0A28
+#define R_IO_INTERRUPT_ADDR0 0x0A30
+#define R_IO_INTERRUPT_ADDR1 0x0A40
+#define R_IO_INTERRUPT_PARITY 0x0A50
+#define R_IO_PCMCIA_CFG 0x0A60
+#define R_IO_PCMCIA_STATUS 0x0A70
/* *********************************************************************
* GPIO Registers
********************************************************************* */
-#define A_GPIO_CLR_EDGE 0x0010061A80
-#define A_GPIO_INT_TYPE 0x0010061A88
-#define A_GPIO_INPUT_INVERT 0x0010061A90
-#define A_GPIO_GLITCH 0x0010061A98
-#define A_GPIO_READ 0x0010061AA0
-#define A_GPIO_DIRECTION 0x0010061AA8
-#define A_GPIO_PIN_CLR 0x0010061AB0
-#define A_GPIO_PIN_SET 0x0010061AB8
+#define A_GPIO_CLR_EDGE 0x0010061A80
+#define A_GPIO_INT_TYPE 0x0010061A88
+#define A_GPIO_INPUT_INVERT 0x0010061A90
+#define A_GPIO_GLITCH 0x0010061A98
+#define A_GPIO_READ 0x0010061AA0
+#define A_GPIO_DIRECTION 0x0010061AA8
+#define A_GPIO_PIN_CLR 0x0010061AB0
+#define A_GPIO_PIN_SET 0x0010061AB8
#define A_GPIO_BASE 0x0010061A80
-#define R_GPIO_CLR_EDGE 0x00
-#define R_GPIO_INT_TYPE 0x08
-#define R_GPIO_INPUT_INVERT 0x10
-#define R_GPIO_GLITCH 0x18
-#define R_GPIO_READ 0x20
-#define R_GPIO_DIRECTION 0x28
-#define R_GPIO_PIN_CLR 0x30
-#define R_GPIO_PIN_SET 0x38
+#define R_GPIO_CLR_EDGE 0x00
+#define R_GPIO_INT_TYPE 0x08
+#define R_GPIO_INPUT_INVERT 0x10
+#define R_GPIO_GLITCH 0x18
+#define R_GPIO_READ 0x20
+#define R_GPIO_DIRECTION 0x28
+#define R_GPIO_PIN_CLR 0x30
+#define R_GPIO_PIN_SET 0x38
/* *********************************************************************
* SMBus Registers
********************************************************************* */
-#define A_SMB_XTRA_0 0x0010060000
-#define A_SMB_XTRA_1 0x0010060008
-#define A_SMB_FREQ_0 0x0010060010
-#define A_SMB_FREQ_1 0x0010060018
-#define A_SMB_STATUS_0 0x0010060020
-#define A_SMB_STATUS_1 0x0010060028
-#define A_SMB_CMD_0 0x0010060030
-#define A_SMB_CMD_1 0x0010060038
-#define A_SMB_START_0 0x0010060040
-#define A_SMB_START_1 0x0010060048
-#define A_SMB_DATA_0 0x0010060050
-#define A_SMB_DATA_1 0x0010060058
-#define A_SMB_CONTROL_0 0x0010060060
-#define A_SMB_CONTROL_1 0x0010060068
-#define A_SMB_PEC_0 0x0010060070
-#define A_SMB_PEC_1 0x0010060078
-
-#define A_SMB_0 0x0010060000
-#define A_SMB_1 0x0010060008
-#define SMB_REGISTER_SPACING 0x8
-#define A_SMB_BASE(idx) (A_SMB_0+(idx)*SMB_REGISTER_SPACING)
+#define A_SMB_XTRA_0 0x0010060000
+#define A_SMB_XTRA_1 0x0010060008
+#define A_SMB_FREQ_0 0x0010060010
+#define A_SMB_FREQ_1 0x0010060018
+#define A_SMB_STATUS_0 0x0010060020
+#define A_SMB_STATUS_1 0x0010060028
+#define A_SMB_CMD_0 0x0010060030
+#define A_SMB_CMD_1 0x0010060038
+#define A_SMB_START_0 0x0010060040
+#define A_SMB_START_1 0x0010060048
+#define A_SMB_DATA_0 0x0010060050
+#define A_SMB_DATA_1 0x0010060058
+#define A_SMB_CONTROL_0 0x0010060060
+#define A_SMB_CONTROL_1 0x0010060068
+#define A_SMB_PEC_0 0x0010060070
+#define A_SMB_PEC_1 0x0010060078
+
+#define A_SMB_0 0x0010060000
+#define A_SMB_1 0x0010060008
+#define SMB_REGISTER_SPACING 0x8
+#define A_SMB_BASE(idx) (A_SMB_0+(idx)*SMB_REGISTER_SPACING)
#define A_SMB_REGISTER(idx, reg) (A_SMB_BASE(idx)+(reg))
-#define R_SMB_XTRA 0x0000000000
-#define R_SMB_FREQ 0x0000000010
-#define R_SMB_STATUS 0x0000000020
-#define R_SMB_CMD 0x0000000030
-#define R_SMB_START 0x0000000040
-#define R_SMB_DATA 0x0000000050
-#define R_SMB_CONTROL 0x0000000060
-#define R_SMB_PEC 0x0000000070
+#define R_SMB_XTRA 0x0000000000
+#define R_SMB_FREQ 0x0000000010
+#define R_SMB_STATUS 0x0000000020
+#define R_SMB_CMD 0x0000000030
+#define R_SMB_START 0x0000000040
+#define R_SMB_DATA 0x0000000050
+#define R_SMB_CONTROL 0x0000000060
+#define R_SMB_PEC 0x0000000070
/* *********************************************************************
* Timer Registers
@@ -607,55 +607,55 @@
*/
#define A_SCD_WDOG_0 0x0010020050
-#define A_SCD_WDOG_1 0x0010020150
-#define SCD_WDOG_SPACING 0x100
+#define A_SCD_WDOG_1 0x0010020150
+#define SCD_WDOG_SPACING 0x100
#define SCD_NUM_WDOGS 2
-#define A_SCD_WDOG_BASE(w) (A_SCD_WDOG_0+SCD_WDOG_SPACING*(w))
+#define A_SCD_WDOG_BASE(w) (A_SCD_WDOG_0+SCD_WDOG_SPACING*(w))
#define A_SCD_WDOG_REGISTER(w, r) (A_SCD_WDOG_BASE(w) + (r))
#define R_SCD_WDOG_INIT 0x0000000000
#define R_SCD_WDOG_CNT 0x0000000008
#define R_SCD_WDOG_CFG 0x0000000010
-#define A_SCD_WDOG_INIT_0 0x0010020050
-#define A_SCD_WDOG_CNT_0 0x0010020058
-#define A_SCD_WDOG_CFG_0 0x0010020060
+#define A_SCD_WDOG_INIT_0 0x0010020050
+#define A_SCD_WDOG_CNT_0 0x0010020058
+#define A_SCD_WDOG_CFG_0 0x0010020060
-#define A_SCD_WDOG_INIT_1 0x0010020150
-#define A_SCD_WDOG_CNT_1 0x0010020158
-#define A_SCD_WDOG_CFG_1 0x0010020160
+#define A_SCD_WDOG_INIT_1 0x0010020150
+#define A_SCD_WDOG_CNT_1 0x0010020158
+#define A_SCD_WDOG_CFG_1 0x0010020160
/*
* Generic timers
*/
#define A_SCD_TIMER_0 0x0010020070
-#define A_SCD_TIMER_1 0x0010020078
+#define A_SCD_TIMER_1 0x0010020078
#define A_SCD_TIMER_2 0x0010020170
-#define A_SCD_TIMER_3 0x0010020178
+#define A_SCD_TIMER_3 0x0010020178
#define SCD_NUM_TIMERS 4
-#define A_SCD_TIMER_BASE(w) (A_SCD_TIMER_0+0x08*((w)&1)+0x100*(((w)&2)>>1))
+#define A_SCD_TIMER_BASE(w) (A_SCD_TIMER_0+0x08*((w)&1)+0x100*(((w)&2)>>1))
#define A_SCD_TIMER_REGISTER(w, r) (A_SCD_TIMER_BASE(w) + (r))
#define R_SCD_TIMER_INIT 0x0000000000
#define R_SCD_TIMER_CNT 0x0000000010
#define R_SCD_TIMER_CFG 0x0000000020
-#define A_SCD_TIMER_INIT_0 0x0010020070
-#define A_SCD_TIMER_CNT_0 0x0010020080
-#define A_SCD_TIMER_CFG_0 0x0010020090
+#define A_SCD_TIMER_INIT_0 0x0010020070
+#define A_SCD_TIMER_CNT_0 0x0010020080
+#define A_SCD_TIMER_CFG_0 0x0010020090
-#define A_SCD_TIMER_INIT_1 0x0010020078
-#define A_SCD_TIMER_CNT_1 0x0010020088
-#define A_SCD_TIMER_CFG_1 0x0010020098
+#define A_SCD_TIMER_INIT_1 0x0010020078
+#define A_SCD_TIMER_CNT_1 0x0010020088
+#define A_SCD_TIMER_CFG_1 0x0010020098
-#define A_SCD_TIMER_INIT_2 0x0010020170
-#define A_SCD_TIMER_CNT_2 0x0010020180
-#define A_SCD_TIMER_CFG_2 0x0010020190
+#define A_SCD_TIMER_INIT_2 0x0010020170
+#define A_SCD_TIMER_CNT_2 0x0010020180
+#define A_SCD_TIMER_CFG_2 0x0010020190
-#define A_SCD_TIMER_INIT_3 0x0010020178
-#define A_SCD_TIMER_CNT_3 0x0010020188
-#define A_SCD_TIMER_CFG_3 0x0010020198
+#define A_SCD_TIMER_INIT_3 0x0010020178
+#define A_SCD_TIMER_CNT_3 0x0010020188
+#define A_SCD_TIMER_CFG_3 0x0010020198
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_SCD_SCRATCH 0x0010020C10
@@ -671,28 +671,28 @@
* System Control Registers
********************************************************************* */
-#define A_SCD_SYSTEM_REVISION 0x0010020000
-#define A_SCD_SYSTEM_CFG 0x0010020008
-#define A_SCD_SYSTEM_MANUF 0x0010038000
+#define A_SCD_SYSTEM_REVISION 0x0010020000
+#define A_SCD_SYSTEM_CFG 0x0010020008
+#define A_SCD_SYSTEM_MANUF 0x0010038000
/* *********************************************************************
* System Address Trap Registers
********************************************************************* */
-#define A_ADDR_TRAP_INDEX 0x00100200B0
-#define A_ADDR_TRAP_REG 0x00100200B8
-#define A_ADDR_TRAP_UP_0 0x0010020400
-#define A_ADDR_TRAP_UP_1 0x0010020408
-#define A_ADDR_TRAP_UP_2 0x0010020410
-#define A_ADDR_TRAP_UP_3 0x0010020418
-#define A_ADDR_TRAP_DOWN_0 0x0010020420
-#define A_ADDR_TRAP_DOWN_1 0x0010020428
-#define A_ADDR_TRAP_DOWN_2 0x0010020430
-#define A_ADDR_TRAP_DOWN_3 0x0010020438
-#define A_ADDR_TRAP_CFG_0 0x0010020440
-#define A_ADDR_TRAP_CFG_1 0x0010020448
-#define A_ADDR_TRAP_CFG_2 0x0010020450
-#define A_ADDR_TRAP_CFG_3 0x0010020458
+#define A_ADDR_TRAP_INDEX 0x00100200B0
+#define A_ADDR_TRAP_REG 0x00100200B8
+#define A_ADDR_TRAP_UP_0 0x0010020400
+#define A_ADDR_TRAP_UP_1 0x0010020408
+#define A_ADDR_TRAP_UP_2 0x0010020410
+#define A_ADDR_TRAP_UP_3 0x0010020418
+#define A_ADDR_TRAP_DOWN_0 0x0010020420
+#define A_ADDR_TRAP_DOWN_1 0x0010020428
+#define A_ADDR_TRAP_DOWN_2 0x0010020430
+#define A_ADDR_TRAP_DOWN_3 0x0010020438
+#define A_ADDR_TRAP_CFG_0 0x0010020440
+#define A_ADDR_TRAP_CFG_1 0x0010020448
+#define A_ADDR_TRAP_CFG_2 0x0010020450
+#define A_ADDR_TRAP_CFG_3 0x0010020458
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define A_ADDR_TRAP_REG_DEBUG 0x0010020460
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
@@ -708,31 +708,31 @@
* System Interrupt Mapper Registers
********************************************************************* */
-#define A_IMR_CPU0_BASE 0x0010020000
-#define A_IMR_CPU1_BASE 0x0010022000
-#define IMR_REGISTER_SPACING 0x2000
-#define IMR_REGISTER_SPACING_SHIFT 13
+#define A_IMR_CPU0_BASE 0x0010020000
+#define A_IMR_CPU1_BASE 0x0010022000
+#define IMR_REGISTER_SPACING 0x2000
+#define IMR_REGISTER_SPACING_SHIFT 13
#define A_IMR_MAPPER(cpu) (A_IMR_CPU0_BASE+(cpu)*IMR_REGISTER_SPACING)
#define A_IMR_REGISTER(cpu, reg) (A_IMR_MAPPER(cpu)+(reg))
-#define R_IMR_INTERRUPT_DIAG 0x0010
-#define R_IMR_INTERRUPT_LDT 0x0018
-#define R_IMR_INTERRUPT_MASK 0x0028
-#define R_IMR_INTERRUPT_TRACE 0x0038
-#define R_IMR_INTERRUPT_SOURCE_STATUS 0x0040
-#define R_IMR_LDT_INTERRUPT_SET 0x0048
-#define R_IMR_LDT_INTERRUPT 0x0018
-#define R_IMR_LDT_INTERRUPT_CLR 0x0020
-#define R_IMR_MAILBOX_CPU 0x00c0
-#define R_IMR_ALIAS_MAILBOX_CPU 0x1000
-#define R_IMR_MAILBOX_SET_CPU 0x00C8
-#define R_IMR_ALIAS_MAILBOX_SET_CPU 0x1008
-#define R_IMR_MAILBOX_CLR_CPU 0x00D0
-#define R_IMR_INTERRUPT_STATUS_BASE 0x0100
-#define R_IMR_INTERRUPT_STATUS_COUNT 7
-#define R_IMR_INTERRUPT_MAP_BASE 0x0200
-#define R_IMR_INTERRUPT_MAP_COUNT 64
+#define R_IMR_INTERRUPT_DIAG 0x0010
+#define R_IMR_INTERRUPT_LDT 0x0018
+#define R_IMR_INTERRUPT_MASK 0x0028
+#define R_IMR_INTERRUPT_TRACE 0x0038
+#define R_IMR_INTERRUPT_SOURCE_STATUS 0x0040
+#define R_IMR_LDT_INTERRUPT_SET 0x0048
+#define R_IMR_LDT_INTERRUPT 0x0018
+#define R_IMR_LDT_INTERRUPT_CLR 0x0020
+#define R_IMR_MAILBOX_CPU 0x00c0
+#define R_IMR_ALIAS_MAILBOX_CPU 0x1000
+#define R_IMR_MAILBOX_SET_CPU 0x00C8
+#define R_IMR_ALIAS_MAILBOX_SET_CPU 0x1008
+#define R_IMR_MAILBOX_CLR_CPU 0x00D0
+#define R_IMR_INTERRUPT_STATUS_BASE 0x0100
+#define R_IMR_INTERRUPT_STATUS_COUNT 7
+#define R_IMR_INTERRUPT_MAP_BASE 0x0200
+#define R_IMR_INTERRUPT_MAP_COUNT 64
/*
* these macros work together to build the address of a mailbox
@@ -746,11 +746,11 @@
* System Performance Counter Registers
********************************************************************* */
-#define A_SCD_PERF_CNT_CFG 0x00100204C0
-#define A_SCD_PERF_CNT_0 0x00100204D0
-#define A_SCD_PERF_CNT_1 0x00100204D8
-#define A_SCD_PERF_CNT_2 0x00100204E0
-#define A_SCD_PERF_CNT_3 0x00100204E8
+#define A_SCD_PERF_CNT_CFG 0x00100204C0
+#define A_SCD_PERF_CNT_0 0x00100204D0
+#define A_SCD_PERF_CNT_1 0x00100204D8
+#define A_SCD_PERF_CNT_2 0x00100204E0
+#define A_SCD_PERF_CNT_3 0x00100204E8
#define SCD_NUM_PERF_CNT 4
#define SCD_PERF_CNT_SPACING 8
@@ -760,46 +760,46 @@
* System Bus Watcher Registers
********************************************************************* */
-#define A_SCD_BUS_ERR_STATUS 0x0010020880
+#define A_SCD_BUS_ERR_STATUS 0x0010020880
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_SCD_BUS_ERR_STATUS_DEBUG 0x00100208D0
-#define A_BUS_ERR_STATUS_DEBUG 0x00100208D0
+#define A_BUS_ERR_STATUS_DEBUG 0x00100208D0
#endif /* 1250 PASS2 || 112x PASS1 */
-#define A_BUS_ERR_DATA_0 0x00100208A0
-#define A_BUS_ERR_DATA_1 0x00100208A8
-#define A_BUS_ERR_DATA_2 0x00100208B0
-#define A_BUS_ERR_DATA_3 0x00100208B8
-#define A_BUS_L2_ERRORS 0x00100208C0
-#define A_BUS_MEM_IO_ERRORS 0x00100208C8
+#define A_BUS_ERR_DATA_0 0x00100208A0
+#define A_BUS_ERR_DATA_1 0x00100208A8
+#define A_BUS_ERR_DATA_2 0x00100208B0
+#define A_BUS_ERR_DATA_3 0x00100208B8
+#define A_BUS_L2_ERRORS 0x00100208C0
+#define A_BUS_MEM_IO_ERRORS 0x00100208C8
/* *********************************************************************
* System Debug Controller Registers
********************************************************************* */
-#define A_SCD_JTAG_BASE 0x0010000000
+#define A_SCD_JTAG_BASE 0x0010000000
/* *********************************************************************
* System Trace Buffer Registers
********************************************************************* */
-#define A_SCD_TRACE_CFG 0x0010020A00
-#define A_SCD_TRACE_READ 0x0010020A08
-#define A_SCD_TRACE_EVENT_0 0x0010020A20
-#define A_SCD_TRACE_EVENT_1 0x0010020A28
-#define A_SCD_TRACE_EVENT_2 0x0010020A30
-#define A_SCD_TRACE_EVENT_3 0x0010020A38
-#define A_SCD_TRACE_SEQUENCE_0 0x0010020A40
-#define A_SCD_TRACE_SEQUENCE_1 0x0010020A48
-#define A_SCD_TRACE_SEQUENCE_2 0x0010020A50
-#define A_SCD_TRACE_SEQUENCE_3 0x0010020A58
-#define A_SCD_TRACE_EVENT_4 0x0010020A60
-#define A_SCD_TRACE_EVENT_5 0x0010020A68
-#define A_SCD_TRACE_EVENT_6 0x0010020A70
-#define A_SCD_TRACE_EVENT_7 0x0010020A78
-#define A_SCD_TRACE_SEQUENCE_4 0x0010020A80
-#define A_SCD_TRACE_SEQUENCE_5 0x0010020A88
-#define A_SCD_TRACE_SEQUENCE_6 0x0010020A90
-#define A_SCD_TRACE_SEQUENCE_7 0x0010020A98
+#define A_SCD_TRACE_CFG 0x0010020A00
+#define A_SCD_TRACE_READ 0x0010020A08
+#define A_SCD_TRACE_EVENT_0 0x0010020A20
+#define A_SCD_TRACE_EVENT_1 0x0010020A28
+#define A_SCD_TRACE_EVENT_2 0x0010020A30
+#define A_SCD_TRACE_EVENT_3 0x0010020A38
+#define A_SCD_TRACE_SEQUENCE_0 0x0010020A40
+#define A_SCD_TRACE_SEQUENCE_1 0x0010020A48
+#define A_SCD_TRACE_SEQUENCE_2 0x0010020A50
+#define A_SCD_TRACE_SEQUENCE_3 0x0010020A58
+#define A_SCD_TRACE_EVENT_4 0x0010020A60
+#define A_SCD_TRACE_EVENT_5 0x0010020A68
+#define A_SCD_TRACE_EVENT_6 0x0010020A70
+#define A_SCD_TRACE_EVENT_7 0x0010020A78
+#define A_SCD_TRACE_SEQUENCE_4 0x0010020A80
+#define A_SCD_TRACE_SEQUENCE_5 0x0010020A88
+#define A_SCD_TRACE_SEQUENCE_6 0x0010020A90
+#define A_SCD_TRACE_SEQUENCE_7 0x0010020A98
#define TRACE_REGISTER_SPACING 8
#define TRACE_NUM_REGISTERS 8
@@ -814,8 +814,8 @@
* System Generic DMA Registers
********************************************************************* */
-#define A_DM_0 0x0010020B00
-#define A_DM_1 0x0010020B20
+#define A_DM_0 0x0010020B00
+#define A_DM_1 0x0010020B20
#define A_DM_2 0x0010020B40
#define A_DM_3 0x0010020B60
#define DM_REGISTER_SPACING 0x20
@@ -854,39 +854,39 @@
********************************************************************* */
#if SIBYTE_HDR_FEATURE_1250_112x
-#define A_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
-#define A_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
-#define A_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
-#define A_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
+#define A_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
+#define A_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
+#define A_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
+#define A_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
#define A_PHYS_GENBUS _SB_MAKE64(0x0010090000)
#define A_PHYS_GENBUS_END _SB_MAKE64(0x0040000000)
#define A_PHYS_LDTPCI_IO_MATCH_BYTES_32 _SB_MAKE64(0x0040000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BITS_32 _SB_MAKE64(0x0060000000)
-#define A_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
-#define A_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
-#define A_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
-#define A_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
-#define A_PHYS_LDT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
-#define A_PHYS_LDTPCI_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
-#define A_PHYS_LDT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
-#define A_PHYS_LDTPCI_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
-#define A_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
-#define A_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
-#define A_PHYS_LDT_EXP _SB_MAKE64(0x8000000000)
-#define A_PHYS_PCI_FULLACCESS_BYTES _SB_MAKE64(0xF000000000)
-#define A_PHYS_PCI_FULLACCESS_BITS _SB_MAKE64(0xF100000000)
-#define A_PHYS_RESERVED _SB_MAKE64(0xF200000000)
-#define A_PHYS_RESERVED_SPECIAL_LDT _SB_MAKE64(0xFD00000000)
-
-#define A_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
-#define PHYS_L2CACHE_NUM_WAYS 4
-#define A_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000080000)
-#define A_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0180000)
-#define A_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D01A0000)
-#define A_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D01C0000)
-#define A_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D01E0000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS_32 _SB_MAKE64(0x0060000000)
+#define A_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
+#define A_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
+#define A_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
+#define A_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
+#define A_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
+#define A_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
+#define A_PHYS_LDT_EXP _SB_MAKE64(0x8000000000)
+#define A_PHYS_PCI_FULLACCESS_BYTES _SB_MAKE64(0xF000000000)
+#define A_PHYS_PCI_FULLACCESS_BITS _SB_MAKE64(0xF100000000)
+#define A_PHYS_RESERVED _SB_MAKE64(0xF200000000)
+#define A_PHYS_RESERVED_SPECIAL_LDT _SB_MAKE64(0xFD00000000)
+
+#define A_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
+#define PHYS_L2CACHE_NUM_WAYS 4
+#define A_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000080000)
+#define A_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0180000)
+#define A_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D01A0000)
+#define A_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D01C0000)
+#define A_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D01E0000)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_scd.h b/arch/mips/include/asm/sibyte/sb1250_scd.h
index 615e165dbd21..d725f2f41afa 100644
--- a/arch/mips/include/asm/sibyte/sb1250_scd.h
+++ b/arch/mips/include/asm/sibyte/sb1250_scd.h
@@ -44,10 +44,10 @@
#define M_SYS_RESERVED _SB_MAKEMASK(8, 0)
-#define S_SYS_REVISION _SB_MAKE64(8)
-#define M_SYS_REVISION _SB_MAKEMASK(8, S_SYS_REVISION)
-#define V_SYS_REVISION(x) _SB_MAKEVALUE(x, S_SYS_REVISION)
-#define G_SYS_REVISION(x) _SB_GETVALUE(x, S_SYS_REVISION, M_SYS_REVISION)
+#define S_SYS_REVISION _SB_MAKE64(8)
+#define M_SYS_REVISION _SB_MAKEMASK(8, S_SYS_REVISION)
+#define V_SYS_REVISION(x) _SB_MAKEVALUE(x, S_SYS_REVISION)
+#define G_SYS_REVISION(x) _SB_GETVALUE(x, S_SYS_REVISION, M_SYS_REVISION)
#define K_SYS_REVISION_BCM1250_PASS1 0x01
@@ -93,10 +93,10 @@
#define K_SYS_REVISION_BCM1480_B0 0x11
/*Cache size - 23:20 of revision register*/
-#define S_SYS_L2C_SIZE _SB_MAKE64(20)
-#define M_SYS_L2C_SIZE _SB_MAKEMASK(4, S_SYS_L2C_SIZE)
-#define V_SYS_L2C_SIZE(x) _SB_MAKEVALUE(x, S_SYS_L2C_SIZE)
-#define G_SYS_L2C_SIZE(x) _SB_GETVALUE(x, S_SYS_L2C_SIZE, M_SYS_L2C_SIZE)
+#define S_SYS_L2C_SIZE _SB_MAKE64(20)
+#define M_SYS_L2C_SIZE _SB_MAKEMASK(4, S_SYS_L2C_SIZE)
+#define V_SYS_L2C_SIZE(x) _SB_MAKEVALUE(x, S_SYS_L2C_SIZE)
+#define G_SYS_L2C_SIZE(x) _SB_GETVALUE(x, S_SYS_L2C_SIZE, M_SYS_L2C_SIZE)
#define K_SYS_L2C_SIZE_1MB 0
#define K_SYS_L2C_SIZE_512KB 5
@@ -109,40 +109,40 @@
/* Number of CPU cores, bits 27:24 of revision register*/
-#define S_SYS_NUM_CPUS _SB_MAKE64(24)
-#define M_SYS_NUM_CPUS _SB_MAKEMASK(4, S_SYS_NUM_CPUS)
-#define V_SYS_NUM_CPUS(x) _SB_MAKEVALUE(x, S_SYS_NUM_CPUS)
-#define G_SYS_NUM_CPUS(x) _SB_GETVALUE(x, S_SYS_NUM_CPUS, M_SYS_NUM_CPUS)
+#define S_SYS_NUM_CPUS _SB_MAKE64(24)
+#define M_SYS_NUM_CPUS _SB_MAKEMASK(4, S_SYS_NUM_CPUS)
+#define V_SYS_NUM_CPUS(x) _SB_MAKEVALUE(x, S_SYS_NUM_CPUS)
+#define G_SYS_NUM_CPUS(x) _SB_GETVALUE(x, S_SYS_NUM_CPUS, M_SYS_NUM_CPUS)
/* XXX: discourage people from using these constants. */
-#define S_SYS_PART _SB_MAKE64(16)
-#define M_SYS_PART _SB_MAKEMASK(16, S_SYS_PART)
-#define V_SYS_PART(x) _SB_MAKEVALUE(x, S_SYS_PART)
-#define G_SYS_PART(x) _SB_GETVALUE(x, S_SYS_PART, M_SYS_PART)
+#define S_SYS_PART _SB_MAKE64(16)
+#define M_SYS_PART _SB_MAKEMASK(16, S_SYS_PART)
+#define V_SYS_PART(x) _SB_MAKEVALUE(x, S_SYS_PART)
+#define G_SYS_PART(x) _SB_GETVALUE(x, S_SYS_PART, M_SYS_PART)
/* XXX: discourage people from using these constants. */
-#define K_SYS_PART_SB1250 0x1250
-#define K_SYS_PART_BCM1120 0x1121
-#define K_SYS_PART_BCM1125 0x1123
-#define K_SYS_PART_BCM1125H 0x1124
-#define K_SYS_PART_BCM1122 0x1113
+#define K_SYS_PART_SB1250 0x1250
+#define K_SYS_PART_BCM1120 0x1121
+#define K_SYS_PART_BCM1125 0x1123
+#define K_SYS_PART_BCM1125H 0x1124
+#define K_SYS_PART_BCM1122 0x1113
/* The "peripheral set" (SOC type) is the low 4 bits of the "part" field. */
-#define S_SYS_SOC_TYPE _SB_MAKE64(16)
-#define M_SYS_SOC_TYPE _SB_MAKEMASK(4, S_SYS_SOC_TYPE)
-#define V_SYS_SOC_TYPE(x) _SB_MAKEVALUE(x, S_SYS_SOC_TYPE)
-#define G_SYS_SOC_TYPE(x) _SB_GETVALUE(x, S_SYS_SOC_TYPE, M_SYS_SOC_TYPE)
-
-#define K_SYS_SOC_TYPE_BCM1250 0x0
-#define K_SYS_SOC_TYPE_BCM1120 0x1
-#define K_SYS_SOC_TYPE_BCM1250_ALT 0x2 /* 1250pass2 w/ 1/4 L2. */
-#define K_SYS_SOC_TYPE_BCM1125 0x3
-#define K_SYS_SOC_TYPE_BCM1125H 0x4
-#define K_SYS_SOC_TYPE_BCM1250_ALT2 0x5 /* 1250pass2 w/ 1/2 L2. */
-#define K_SYS_SOC_TYPE_BCM1x80 0x6
-#define K_SYS_SOC_TYPE_BCM1x55 0x7
+#define S_SYS_SOC_TYPE _SB_MAKE64(16)
+#define M_SYS_SOC_TYPE _SB_MAKEMASK(4, S_SYS_SOC_TYPE)
+#define V_SYS_SOC_TYPE(x) _SB_MAKEVALUE(x, S_SYS_SOC_TYPE)
+#define G_SYS_SOC_TYPE(x) _SB_GETVALUE(x, S_SYS_SOC_TYPE, M_SYS_SOC_TYPE)
+
+#define K_SYS_SOC_TYPE_BCM1250 0x0
+#define K_SYS_SOC_TYPE_BCM1120 0x1
+#define K_SYS_SOC_TYPE_BCM1250_ALT 0x2 /* 1250pass2 w/ 1/4 L2. */
+#define K_SYS_SOC_TYPE_BCM1125 0x3
+#define K_SYS_SOC_TYPE_BCM1125H 0x4
+#define K_SYS_SOC_TYPE_BCM1250_ALT2 0x5 /* 1250pass2 w/ 1/2 L2. */
+#define K_SYS_SOC_TYPE_BCM1x80 0x6
+#define K_SYS_SOC_TYPE_BCM1x55 0x7
/*
* Calculate correct SOC type given a copy of system revision register.
@@ -169,10 +169,10 @@
? K_SYS_SOC_TYPE_BCM1250 : G_SYS_SOC_TYPE(sysrev))
#endif
-#define S_SYS_WID _SB_MAKE64(32)
-#define M_SYS_WID _SB_MAKEMASK(32, S_SYS_WID)
-#define V_SYS_WID(x) _SB_MAKEVALUE(x, S_SYS_WID)
-#define G_SYS_WID(x) _SB_GETVALUE(x, S_SYS_WID, M_SYS_WID)
+#define S_SYS_WID _SB_MAKE64(32)
+#define M_SYS_WID _SB_MAKEMASK(32, S_SYS_WID)
+#define V_SYS_WID(x) _SB_MAKEVALUE(x, S_SYS_WID)
+#define G_SYS_WID(x) _SB_GETVALUE(x, S_SYS_WID, M_SYS_WID)
/*
* System Manufacturing Register
@@ -181,37 +181,37 @@
#if SIBYTE_HDR_FEATURE_1250_112x
/* Wafer ID: bits 31:0 */
-#define S_SYS_WAFERID1_200 _SB_MAKE64(0)
-#define M_SYS_WAFERID1_200 _SB_MAKEMASK(32, S_SYS_WAFERID1_200)
-#define V_SYS_WAFERID1_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID1_200)
-#define G_SYS_WAFERID1_200(x) _SB_GETVALUE(x, S_SYS_WAFERID1_200, M_SYS_WAFERID1_200)
+#define S_SYS_WAFERID1_200 _SB_MAKE64(0)
+#define M_SYS_WAFERID1_200 _SB_MAKEMASK(32, S_SYS_WAFERID1_200)
+#define V_SYS_WAFERID1_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID1_200)
+#define G_SYS_WAFERID1_200(x) _SB_GETVALUE(x, S_SYS_WAFERID1_200, M_SYS_WAFERID1_200)
-#define S_SYS_BIN _SB_MAKE64(32)
-#define M_SYS_BIN _SB_MAKEMASK(4, S_SYS_BIN)
-#define V_SYS_BIN(x) _SB_MAKEVALUE(x, S_SYS_BIN)
-#define G_SYS_BIN(x) _SB_GETVALUE(x, S_SYS_BIN, M_SYS_BIN)
+#define S_SYS_BIN _SB_MAKE64(32)
+#define M_SYS_BIN _SB_MAKEMASK(4, S_SYS_BIN)
+#define V_SYS_BIN(x) _SB_MAKEVALUE(x, S_SYS_BIN)
+#define G_SYS_BIN(x) _SB_GETVALUE(x, S_SYS_BIN, M_SYS_BIN)
/* Wafer ID: bits 39:36 */
-#define S_SYS_WAFERID2_200 _SB_MAKE64(36)
-#define M_SYS_WAFERID2_200 _SB_MAKEMASK(4, S_SYS_WAFERID2_200)
-#define V_SYS_WAFERID2_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID2_200)
-#define G_SYS_WAFERID2_200(x) _SB_GETVALUE(x, S_SYS_WAFERID2_200, M_SYS_WAFERID2_200)
+#define S_SYS_WAFERID2_200 _SB_MAKE64(36)
+#define M_SYS_WAFERID2_200 _SB_MAKEMASK(4, S_SYS_WAFERID2_200)
+#define V_SYS_WAFERID2_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID2_200)
+#define G_SYS_WAFERID2_200(x) _SB_GETVALUE(x, S_SYS_WAFERID2_200, M_SYS_WAFERID2_200)
/* Wafer ID: bits 39:0 */
-#define S_SYS_WAFERID_300 _SB_MAKE64(0)
-#define M_SYS_WAFERID_300 _SB_MAKEMASK(40, S_SYS_WAFERID_300)
-#define V_SYS_WAFERID_300(x) _SB_MAKEVALUE(x, S_SYS_WAFERID_300)
-#define G_SYS_WAFERID_300(x) _SB_GETVALUE(x, S_SYS_WAFERID_300, M_SYS_WAFERID_300)
-
-#define S_SYS_XPOS _SB_MAKE64(40)
-#define M_SYS_XPOS _SB_MAKEMASK(6, S_SYS_XPOS)
-#define V_SYS_XPOS(x) _SB_MAKEVALUE(x, S_SYS_XPOS)
-#define G_SYS_XPOS(x) _SB_GETVALUE(x, S_SYS_XPOS, M_SYS_XPOS)
-
-#define S_SYS_YPOS _SB_MAKE64(46)
-#define M_SYS_YPOS _SB_MAKEMASK(6, S_SYS_YPOS)
-#define V_SYS_YPOS(x) _SB_MAKEVALUE(x, S_SYS_YPOS)
-#define G_SYS_YPOS(x) _SB_GETVALUE(x, S_SYS_YPOS, M_SYS_YPOS)
+#define S_SYS_WAFERID_300 _SB_MAKE64(0)
+#define M_SYS_WAFERID_300 _SB_MAKEMASK(40, S_SYS_WAFERID_300)
+#define V_SYS_WAFERID_300(x) _SB_MAKEVALUE(x, S_SYS_WAFERID_300)
+#define G_SYS_WAFERID_300(x) _SB_GETVALUE(x, S_SYS_WAFERID_300, M_SYS_WAFERID_300)
+
+#define S_SYS_XPOS _SB_MAKE64(40)
+#define M_SYS_XPOS _SB_MAKEMASK(6, S_SYS_XPOS)
+#define V_SYS_XPOS(x) _SB_MAKEVALUE(x, S_SYS_XPOS)
+#define G_SYS_XPOS(x) _SB_GETVALUE(x, S_SYS_XPOS, M_SYS_XPOS)
+
+#define S_SYS_YPOS _SB_MAKE64(46)
+#define M_SYS_YPOS _SB_MAKEMASK(6, S_SYS_YPOS)
+#define V_SYS_YPOS(x) _SB_MAKEVALUE(x, S_SYS_YPOS)
+#define G_SYS_YPOS(x) _SB_GETVALUE(x, S_SYS_YPOS, M_SYS_YPOS)
#endif
@@ -221,55 +221,55 @@
*/
#if SIBYTE_HDR_FEATURE_1250_112x
-#define M_SYS_LDT_PLL_BYP _SB_MAKEMASK1(3)
+#define M_SYS_LDT_PLL_BYP _SB_MAKEMASK1(3)
#define M_SYS_PCI_SYNC_TEST_MODE _SB_MAKEMASK1(4)
-#define M_SYS_IOB0_DIV _SB_MAKEMASK1(5)
-#define M_SYS_IOB1_DIV _SB_MAKEMASK1(6)
-
-#define S_SYS_PLL_DIV _SB_MAKE64(7)
-#define M_SYS_PLL_DIV _SB_MAKEMASK(5, S_SYS_PLL_DIV)
-#define V_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_SYS_PLL_DIV)
-#define G_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_SYS_PLL_DIV, M_SYS_PLL_DIV)
-
-#define M_SYS_SER0_ENABLE _SB_MAKEMASK1(12)
-#define M_SYS_SER0_RSTB_EN _SB_MAKEMASK1(13)
-#define M_SYS_SER1_ENABLE _SB_MAKEMASK1(14)
-#define M_SYS_SER1_RSTB_EN _SB_MAKEMASK1(15)
-#define M_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
-
-#define S_SYS_BOOT_MODE _SB_MAKE64(17)
-#define M_SYS_BOOT_MODE _SB_MAKEMASK(2, S_SYS_BOOT_MODE)
-#define V_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_SYS_BOOT_MODE)
-#define G_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_SYS_BOOT_MODE, M_SYS_BOOT_MODE)
-#define K_SYS_BOOT_MODE_ROM32 0
-#define K_SYS_BOOT_MODE_ROM8 1
+#define M_SYS_IOB0_DIV _SB_MAKEMASK1(5)
+#define M_SYS_IOB1_DIV _SB_MAKEMASK1(6)
+
+#define S_SYS_PLL_DIV _SB_MAKE64(7)
+#define M_SYS_PLL_DIV _SB_MAKEMASK(5, S_SYS_PLL_DIV)
+#define V_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_SYS_PLL_DIV)
+#define G_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_SYS_PLL_DIV, M_SYS_PLL_DIV)
+
+#define M_SYS_SER0_ENABLE _SB_MAKEMASK1(12)
+#define M_SYS_SER0_RSTB_EN _SB_MAKEMASK1(13)
+#define M_SYS_SER1_ENABLE _SB_MAKEMASK1(14)
+#define M_SYS_SER1_RSTB_EN _SB_MAKEMASK1(15)
+#define M_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
+
+#define S_SYS_BOOT_MODE _SB_MAKE64(17)
+#define M_SYS_BOOT_MODE _SB_MAKEMASK(2, S_SYS_BOOT_MODE)
+#define V_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_SYS_BOOT_MODE)
+#define G_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_SYS_BOOT_MODE, M_SYS_BOOT_MODE)
+#define K_SYS_BOOT_MODE_ROM32 0
+#define K_SYS_BOOT_MODE_ROM8 1
#define K_SYS_BOOT_MODE_SMBUS_SMALL 2
#define K_SYS_BOOT_MODE_SMBUS_BIG 3
-#define M_SYS_PCI_HOST _SB_MAKEMASK1(19)
-#define M_SYS_PCI_ARBITER _SB_MAKEMASK1(20)
-#define M_SYS_SOUTH_ON_LDT _SB_MAKEMASK1(21)
-#define M_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
-#define M_SYS_GENCLK_EN _SB_MAKEMASK1(23)
-#define M_SYS_LDT_TEST_EN _SB_MAKEMASK1(24)
-#define M_SYS_GEN_PARITY_EN _SB_MAKEMASK1(25)
+#define M_SYS_PCI_HOST _SB_MAKEMASK1(19)
+#define M_SYS_PCI_ARBITER _SB_MAKEMASK1(20)
+#define M_SYS_SOUTH_ON_LDT _SB_MAKEMASK1(21)
+#define M_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
+#define M_SYS_GENCLK_EN _SB_MAKEMASK1(23)
+#define M_SYS_LDT_TEST_EN _SB_MAKEMASK1(24)
+#define M_SYS_GEN_PARITY_EN _SB_MAKEMASK1(25)
-#define S_SYS_CONFIG 26
-#define M_SYS_CONFIG _SB_MAKEMASK(6, S_SYS_CONFIG)
-#define V_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_SYS_CONFIG)
-#define G_SYS_CONFIG(x) _SB_GETVALUE(x, S_SYS_CONFIG, M_SYS_CONFIG)
+#define S_SYS_CONFIG 26
+#define M_SYS_CONFIG _SB_MAKEMASK(6, S_SYS_CONFIG)
+#define V_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_SYS_CONFIG)
+#define G_SYS_CONFIG(x) _SB_GETVALUE(x, S_SYS_CONFIG, M_SYS_CONFIG)
/* The following bits are writeable by JTAG only. */
-#define M_SYS_CLKSTOP _SB_MAKEMASK1(32)
-#define M_SYS_CLKSTEP _SB_MAKEMASK1(33)
+#define M_SYS_CLKSTOP _SB_MAKEMASK1(32)
+#define M_SYS_CLKSTEP _SB_MAKEMASK1(33)
-#define S_SYS_CLKCOUNT 34
-#define M_SYS_CLKCOUNT _SB_MAKEMASK(8, S_SYS_CLKCOUNT)
-#define V_SYS_CLKCOUNT(x) _SB_MAKEVALUE(x, S_SYS_CLKCOUNT)
-#define G_SYS_CLKCOUNT(x) _SB_GETVALUE(x, S_SYS_CLKCOUNT, M_SYS_CLKCOUNT)
+#define S_SYS_CLKCOUNT 34
+#define M_SYS_CLKCOUNT _SB_MAKEMASK(8, S_SYS_CLKCOUNT)
+#define V_SYS_CLKCOUNT(x) _SB_MAKEVALUE(x, S_SYS_CLKCOUNT)
+#define G_SYS_CLKCOUNT(x) _SB_GETVALUE(x, S_SYS_CLKCOUNT, M_SYS_CLKCOUNT)
-#define M_SYS_PLL_BYPASS _SB_MAKEMASK1(42)
+#define M_SYS_PLL_BYPASS _SB_MAKEMASK1(42)
#define S_SYS_PLL_IREF 43
#define M_SYS_PLL_IREF _SB_MAKEMASK(2, S_SYS_PLL_IREF)
@@ -280,26 +280,26 @@
#define S_SYS_PLL_VREG 47
#define M_SYS_PLL_VREG _SB_MAKEMASK(2, S_SYS_PLL_VREG)
-#define M_SYS_MEM_RESET _SB_MAKEMASK1(49)
-#define M_SYS_L2C_RESET _SB_MAKEMASK1(50)
-#define M_SYS_IO_RESET_0 _SB_MAKEMASK1(51)
-#define M_SYS_IO_RESET_1 _SB_MAKEMASK1(52)
-#define M_SYS_SCD_RESET _SB_MAKEMASK1(53)
+#define M_SYS_MEM_RESET _SB_MAKEMASK1(49)
+#define M_SYS_L2C_RESET _SB_MAKEMASK1(50)
+#define M_SYS_IO_RESET_0 _SB_MAKEMASK1(51)
+#define M_SYS_IO_RESET_1 _SB_MAKEMASK1(52)
+#define M_SYS_SCD_RESET _SB_MAKEMASK1(53)
/* End of bits writable by JTAG only. */
-#define M_SYS_CPU_RESET_0 _SB_MAKEMASK1(54)
-#define M_SYS_CPU_RESET_1 _SB_MAKEMASK1(55)
+#define M_SYS_CPU_RESET_0 _SB_MAKEMASK1(54)
+#define M_SYS_CPU_RESET_1 _SB_MAKEMASK1(55)
-#define M_SYS_UNICPU0 _SB_MAKEMASK1(56)
-#define M_SYS_UNICPU1 _SB_MAKEMASK1(57)
+#define M_SYS_UNICPU0 _SB_MAKEMASK1(56)
+#define M_SYS_UNICPU1 _SB_MAKEMASK1(57)
-#define M_SYS_SB_SOFTRES _SB_MAKEMASK1(58)
-#define M_SYS_EXT_RESET _SB_MAKEMASK1(59)
-#define M_SYS_SYSTEM_RESET _SB_MAKEMASK1(60)
+#define M_SYS_SB_SOFTRES _SB_MAKEMASK1(58)
+#define M_SYS_EXT_RESET _SB_MAKEMASK1(59)
+#define M_SYS_SYSTEM_RESET _SB_MAKEMASK1(60)
-#define M_SYS_MISR_MODE _SB_MAKEMASK1(61)
-#define M_SYS_MISR_RESET _SB_MAKEMASK1(62)
+#define M_SYS_MISR_MODE _SB_MAKEMASK1(61)
+#define M_SYS_MISR_RESET _SB_MAKEMASK1(62)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define M_SYS_SW_FLAG _SB_MAKEMASK1(63)
@@ -313,46 +313,46 @@
* Registers: SCD_MBOX_CPU_x
*/
-#define S_MBOX_INT_3 0
-#define M_MBOX_INT_3 _SB_MAKEMASK(16, S_MBOX_INT_3)
-#define S_MBOX_INT_2 16
-#define M_MBOX_INT_2 _SB_MAKEMASK(16, S_MBOX_INT_2)
-#define S_MBOX_INT_1 32
-#define M_MBOX_INT_1 _SB_MAKEMASK(16, S_MBOX_INT_1)
-#define S_MBOX_INT_0 48
-#define M_MBOX_INT_0 _SB_MAKEMASK(16, S_MBOX_INT_0)
+#define S_MBOX_INT_3 0
+#define M_MBOX_INT_3 _SB_MAKEMASK(16, S_MBOX_INT_3)
+#define S_MBOX_INT_2 16
+#define M_MBOX_INT_2 _SB_MAKEMASK(16, S_MBOX_INT_2)
+#define S_MBOX_INT_1 32
+#define M_MBOX_INT_1 _SB_MAKEMASK(16, S_MBOX_INT_1)
+#define S_MBOX_INT_0 48
+#define M_MBOX_INT_0 _SB_MAKEMASK(16, S_MBOX_INT_0)
/*
* Watchdog Registers (Table 4-8) (Table 4-9) (Table 4-10)
* Registers: SCD_WDOG_INIT_CNT_x
*/
-#define V_SCD_WDOG_FREQ 1000000
+#define V_SCD_WDOG_FREQ 1000000
-#define S_SCD_WDOG_INIT 0
-#define M_SCD_WDOG_INIT _SB_MAKEMASK(23, S_SCD_WDOG_INIT)
+#define S_SCD_WDOG_INIT 0
+#define M_SCD_WDOG_INIT _SB_MAKEMASK(23, S_SCD_WDOG_INIT)
-#define S_SCD_WDOG_CNT 0
-#define M_SCD_WDOG_CNT _SB_MAKEMASK(23, S_SCD_WDOG_CNT)
+#define S_SCD_WDOG_CNT 0
+#define M_SCD_WDOG_CNT _SB_MAKEMASK(23, S_SCD_WDOG_CNT)
-#define S_SCD_WDOG_ENABLE 0
-#define M_SCD_WDOG_ENABLE _SB_MAKEMASK1(S_SCD_WDOG_ENABLE)
+#define S_SCD_WDOG_ENABLE 0
+#define M_SCD_WDOG_ENABLE _SB_MAKEMASK1(S_SCD_WDOG_ENABLE)
-#define S_SCD_WDOG_RESET_TYPE 2
-#define M_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(3, S_SCD_WDOG_RESET_TYPE)
+#define S_SCD_WDOG_RESET_TYPE 2
+#define M_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(3, S_SCD_WDOG_RESET_TYPE)
#define V_SCD_WDOG_RESET_TYPE(x) _SB_MAKEVALUE(x, S_SCD_WDOG_RESET_TYPE)
#define G_SCD_WDOG_RESET_TYPE(x) _SB_GETVALUE(x, S_SCD_WDOG_RESET_TYPE, M_SCD_WDOG_RESET_TYPE)
-#define K_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
-#define K_SCD_WDOG_RESET_SOFT 1
-#define K_SCD_WDOG_RESET_CPU0 3
-#define K_SCD_WDOG_RESET_CPU1 5
+#define K_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
+#define K_SCD_WDOG_RESET_SOFT 1
+#define K_SCD_WDOG_RESET_CPU0 3
+#define K_SCD_WDOG_RESET_CPU1 5
#define K_SCD_WDOG_RESET_BOTH_CPUS 7
/* This feature is present in 1250 C0 and later, but *not* in 112x A revs. */
#if SIBYTE_HDR_FEATURE(1250, PASS3)
-#define S_SCD_WDOG_HAS_RESET 8
-#define M_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(S_SCD_WDOG_HAS_RESET)
+#define S_SCD_WDOG_HAS_RESET 8
+#define M_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(S_SCD_WDOG_HAS_RESET)
#endif
@@ -360,46 +360,46 @@
* Timer Registers (Table 4-11) (Table 4-12) (Table 4-13)
*/
-#define V_SCD_TIMER_FREQ 1000000
+#define V_SCD_TIMER_FREQ 1000000
-#define S_SCD_TIMER_INIT 0
-#define M_SCD_TIMER_INIT _SB_MAKEMASK(23, S_SCD_TIMER_INIT)
-#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_INIT)
-#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x, S_SCD_TIMER_INIT, M_SCD_TIMER_INIT)
+#define S_SCD_TIMER_INIT 0
+#define M_SCD_TIMER_INIT _SB_MAKEMASK(23, S_SCD_TIMER_INIT)
+#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_INIT)
+#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x, S_SCD_TIMER_INIT, M_SCD_TIMER_INIT)
#define V_SCD_TIMER_WIDTH 23
-#define S_SCD_TIMER_CNT 0
-#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH, S_SCD_TIMER_CNT)
-#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_CNT)
-#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x, S_SCD_TIMER_CNT, M_SCD_TIMER_CNT)
+#define S_SCD_TIMER_CNT 0
+#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH, S_SCD_TIMER_CNT)
+#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_CNT)
+#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x, S_SCD_TIMER_CNT, M_SCD_TIMER_CNT)
-#define M_SCD_TIMER_ENABLE _SB_MAKEMASK1(0)
-#define M_SCD_TIMER_MODE _SB_MAKEMASK1(1)
+#define M_SCD_TIMER_ENABLE _SB_MAKEMASK1(0)
+#define M_SCD_TIMER_MODE _SB_MAKEMASK1(1)
#define M_SCD_TIMER_MODE_CONTINUOUS M_SCD_TIMER_MODE
/*
* System Performance Counters
*/
-#define S_SPC_CFG_SRC0 0
-#define M_SPC_CFG_SRC0 _SB_MAKEMASK(8, S_SPC_CFG_SRC0)
-#define V_SPC_CFG_SRC0(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC0)
-#define G_SPC_CFG_SRC0(x) _SB_GETVALUE(x, S_SPC_CFG_SRC0, M_SPC_CFG_SRC0)
+#define S_SPC_CFG_SRC0 0
+#define M_SPC_CFG_SRC0 _SB_MAKEMASK(8, S_SPC_CFG_SRC0)
+#define V_SPC_CFG_SRC0(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC0)
+#define G_SPC_CFG_SRC0(x) _SB_GETVALUE(x, S_SPC_CFG_SRC0, M_SPC_CFG_SRC0)
-#define S_SPC_CFG_SRC1 8
-#define M_SPC_CFG_SRC1 _SB_MAKEMASK(8, S_SPC_CFG_SRC1)
-#define V_SPC_CFG_SRC1(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC1)
-#define G_SPC_CFG_SRC1(x) _SB_GETVALUE(x, S_SPC_CFG_SRC1, M_SPC_CFG_SRC1)
+#define S_SPC_CFG_SRC1 8
+#define M_SPC_CFG_SRC1 _SB_MAKEMASK(8, S_SPC_CFG_SRC1)
+#define V_SPC_CFG_SRC1(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC1)
+#define G_SPC_CFG_SRC1(x) _SB_GETVALUE(x, S_SPC_CFG_SRC1, M_SPC_CFG_SRC1)
-#define S_SPC_CFG_SRC2 16
-#define M_SPC_CFG_SRC2 _SB_MAKEMASK(8, S_SPC_CFG_SRC2)
-#define V_SPC_CFG_SRC2(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC2)
-#define G_SPC_CFG_SRC2(x) _SB_GETVALUE(x, S_SPC_CFG_SRC2, M_SPC_CFG_SRC2)
+#define S_SPC_CFG_SRC2 16
+#define M_SPC_CFG_SRC2 _SB_MAKEMASK(8, S_SPC_CFG_SRC2)
+#define V_SPC_CFG_SRC2(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC2)
+#define G_SPC_CFG_SRC2(x) _SB_GETVALUE(x, S_SPC_CFG_SRC2, M_SPC_CFG_SRC2)
-#define S_SPC_CFG_SRC3 24
-#define M_SPC_CFG_SRC3 _SB_MAKEMASK(8, S_SPC_CFG_SRC3)
-#define V_SPC_CFG_SRC3(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC3)
-#define G_SPC_CFG_SRC3(x) _SB_GETVALUE(x, S_SPC_CFG_SRC3, M_SPC_CFG_SRC3)
+#define S_SPC_CFG_SRC3 24
+#define M_SPC_CFG_SRC3 _SB_MAKEMASK(8, S_SPC_CFG_SRC3)
+#define V_SPC_CFG_SRC3(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC3)
+#define G_SPC_CFG_SRC3(x) _SB_GETVALUE(x, S_SPC_CFG_SRC3, M_SPC_CFG_SRC3)
#if SIBYTE_HDR_FEATURE_1250_112x
#define M_SPC_CFG_CLEAR _SB_MAKEMASK1(32)
@@ -411,58 +411,58 @@
* Bus Watcher
*/
-#define S_SCD_BERR_TID 8
-#define M_SCD_BERR_TID _SB_MAKEMASK(10, S_SCD_BERR_TID)
-#define V_SCD_BERR_TID(x) _SB_MAKEVALUE(x, S_SCD_BERR_TID)
-#define G_SCD_BERR_TID(x) _SB_GETVALUE(x, S_SCD_BERR_TID, M_SCD_BERR_TID)
+#define S_SCD_BERR_TID 8
+#define M_SCD_BERR_TID _SB_MAKEMASK(10, S_SCD_BERR_TID)
+#define V_SCD_BERR_TID(x) _SB_MAKEVALUE(x, S_SCD_BERR_TID)
+#define G_SCD_BERR_TID(x) _SB_GETVALUE(x, S_SCD_BERR_TID, M_SCD_BERR_TID)
-#define S_SCD_BERR_RID 18
-#define M_SCD_BERR_RID _SB_MAKEMASK(4, S_SCD_BERR_RID)
-#define V_SCD_BERR_RID(x) _SB_MAKEVALUE(x, S_SCD_BERR_RID)
-#define G_SCD_BERR_RID(x) _SB_GETVALUE(x, S_SCD_BERR_RID, M_SCD_BERR_RID)
+#define S_SCD_BERR_RID 18
+#define M_SCD_BERR_RID _SB_MAKEMASK(4, S_SCD_BERR_RID)
+#define V_SCD_BERR_RID(x) _SB_MAKEVALUE(x, S_SCD_BERR_RID)
+#define G_SCD_BERR_RID(x) _SB_GETVALUE(x, S_SCD_BERR_RID, M_SCD_BERR_RID)
-#define S_SCD_BERR_DCODE 22
-#define M_SCD_BERR_DCODE _SB_MAKEMASK(3, S_SCD_BERR_DCODE)
-#define V_SCD_BERR_DCODE(x) _SB_MAKEVALUE(x, S_SCD_BERR_DCODE)
-#define G_SCD_BERR_DCODE(x) _SB_GETVALUE(x, S_SCD_BERR_DCODE, M_SCD_BERR_DCODE)
+#define S_SCD_BERR_DCODE 22
+#define M_SCD_BERR_DCODE _SB_MAKEMASK(3, S_SCD_BERR_DCODE)
+#define V_SCD_BERR_DCODE(x) _SB_MAKEVALUE(x, S_SCD_BERR_DCODE)
+#define G_SCD_BERR_DCODE(x) _SB_GETVALUE(x, S_SCD_BERR_DCODE, M_SCD_BERR_DCODE)
-#define M_SCD_BERR_MULTERRS _SB_MAKEMASK1(30)
+#define M_SCD_BERR_MULTERRS _SB_MAKEMASK1(30)
-#define S_SCD_L2ECC_CORR_D 0
-#define M_SCD_L2ECC_CORR_D _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_D)
-#define V_SCD_L2ECC_CORR_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_D)
-#define G_SCD_L2ECC_CORR_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_D, M_SCD_L2ECC_CORR_D)
+#define S_SCD_L2ECC_CORR_D 0
+#define M_SCD_L2ECC_CORR_D _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_D)
+#define V_SCD_L2ECC_CORR_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_D)
+#define G_SCD_L2ECC_CORR_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_D, M_SCD_L2ECC_CORR_D)
-#define S_SCD_L2ECC_BAD_D 8
-#define M_SCD_L2ECC_BAD_D _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_D)
-#define V_SCD_L2ECC_BAD_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_D)
-#define G_SCD_L2ECC_BAD_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_D, M_SCD_L2ECC_BAD_D)
+#define S_SCD_L2ECC_BAD_D 8
+#define M_SCD_L2ECC_BAD_D _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_D)
+#define V_SCD_L2ECC_BAD_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_D)
+#define G_SCD_L2ECC_BAD_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_D, M_SCD_L2ECC_BAD_D)
-#define S_SCD_L2ECC_CORR_T 16
-#define M_SCD_L2ECC_CORR_T _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_T)
-#define V_SCD_L2ECC_CORR_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_T)
-#define G_SCD_L2ECC_CORR_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_T, M_SCD_L2ECC_CORR_T)
+#define S_SCD_L2ECC_CORR_T 16
+#define M_SCD_L2ECC_CORR_T _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_T)
+#define V_SCD_L2ECC_CORR_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_T)
+#define G_SCD_L2ECC_CORR_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_T, M_SCD_L2ECC_CORR_T)
-#define S_SCD_L2ECC_BAD_T 24
-#define M_SCD_L2ECC_BAD_T _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_T)
-#define V_SCD_L2ECC_BAD_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_T)
-#define G_SCD_L2ECC_BAD_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_T, M_SCD_L2ECC_BAD_T)
+#define S_SCD_L2ECC_BAD_T 24
+#define M_SCD_L2ECC_BAD_T _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_T)
+#define V_SCD_L2ECC_BAD_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_T)
+#define G_SCD_L2ECC_BAD_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_T, M_SCD_L2ECC_BAD_T)
-#define S_SCD_MEM_ECC_CORR 0
-#define M_SCD_MEM_ECC_CORR _SB_MAKEMASK(8, S_SCD_MEM_ECC_CORR)
-#define V_SCD_MEM_ECC_CORR(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_CORR)
-#define G_SCD_MEM_ECC_CORR(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_CORR, M_SCD_MEM_ECC_CORR)
+#define S_SCD_MEM_ECC_CORR 0
+#define M_SCD_MEM_ECC_CORR _SB_MAKEMASK(8, S_SCD_MEM_ECC_CORR)
+#define V_SCD_MEM_ECC_CORR(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_CORR)
+#define G_SCD_MEM_ECC_CORR(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_CORR, M_SCD_MEM_ECC_CORR)
-#define S_SCD_MEM_ECC_BAD 8
-#define M_SCD_MEM_ECC_BAD _SB_MAKEMASK(8, S_SCD_MEM_ECC_BAD)
-#define V_SCD_MEM_ECC_BAD(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_BAD)
-#define G_SCD_MEM_ECC_BAD(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_BAD, M_SCD_MEM_ECC_BAD)
+#define S_SCD_MEM_ECC_BAD 8
+#define M_SCD_MEM_ECC_BAD _SB_MAKEMASK(8, S_SCD_MEM_ECC_BAD)
+#define V_SCD_MEM_ECC_BAD(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_BAD)
+#define G_SCD_MEM_ECC_BAD(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_BAD, M_SCD_MEM_ECC_BAD)
-#define S_SCD_MEM_BUSERR 16
-#define M_SCD_MEM_BUSERR _SB_MAKEMASK(8, S_SCD_MEM_BUSERR)
-#define V_SCD_MEM_BUSERR(x) _SB_MAKEVALUE(x, S_SCD_MEM_BUSERR)
-#define G_SCD_MEM_BUSERR(x) _SB_GETVALUE(x, S_SCD_MEM_BUSERR, M_SCD_MEM_BUSERR)
+#define S_SCD_MEM_BUSERR 16
+#define M_SCD_MEM_BUSERR _SB_MAKEMASK(8, S_SCD_MEM_BUSERR)
+#define V_SCD_MEM_BUSERR(x) _SB_MAKEVALUE(x, S_SCD_MEM_BUSERR)
+#define G_SCD_MEM_BUSERR(x) _SB_GETVALUE(x, S_SCD_MEM_BUSERR, M_SCD_MEM_BUSERR)
/*
@@ -473,28 +473,28 @@
#define M_ATRAP_INDEX _SB_MAKEMASK(4, 0)
#define M_ATRAP_ADDRESS _SB_MAKEMASK(40, 0)
-#define S_ATRAP_CFG_CNT 0
-#define M_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_ATRAP_CFG_CNT)
-#define V_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_CNT)
-#define G_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_ATRAP_CFG_CNT, M_ATRAP_CFG_CNT)
+#define S_ATRAP_CFG_CNT 0
+#define M_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_ATRAP_CFG_CNT)
+#define V_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_CNT)
+#define G_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_ATRAP_CFG_CNT, M_ATRAP_CFG_CNT)
#define M_ATRAP_CFG_WRITE _SB_MAKEMASK1(3)
-#define M_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
-#define M_ATRAP_CFG_INV _SB_MAKEMASK1(5)
+#define M_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
+#define M_ATRAP_CFG_INV _SB_MAKEMASK1(5)
#define M_ATRAP_CFG_USESRC _SB_MAKEMASK1(6)
#define M_ATRAP_CFG_SRCINV _SB_MAKEMASK1(7)
-#define S_ATRAP_CFG_AGENTID 8
-#define M_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_ATRAP_CFG_AGENTID)
-#define V_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_AGENTID)
-#define G_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_ATRAP_CFG_AGENTID, M_ATRAP_CFG_AGENTID)
+#define S_ATRAP_CFG_AGENTID 8
+#define M_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_ATRAP_CFG_AGENTID)
+#define V_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_AGENTID)
+#define G_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_ATRAP_CFG_AGENTID, M_ATRAP_CFG_AGENTID)
#define K_BUS_AGENT_CPU0 0
#define K_BUS_AGENT_CPU1 1
#define K_BUS_AGENT_IOB0 2
#define K_BUS_AGENT_IOB1 3
-#define K_BUS_AGENT_SCD 4
-#define K_BUS_AGENT_L2C 6
+#define K_BUS_AGENT_SCD 4
+#define K_BUS_AGENT_L2C 6
#define K_BUS_AGENT_MC 7
#define S_ATRAP_CFG_CATTR 12
@@ -503,13 +503,13 @@
#define G_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_ATRAP_CFG_CATTR, M_ATRAP_CFG_CATTR)
#define K_ATRAP_CFG_CATTR_IGNORE 0
-#define K_ATRAP_CFG_CATTR_UNC 1
+#define K_ATRAP_CFG_CATTR_UNC 1
#define K_ATRAP_CFG_CATTR_CACHEABLE 2
-#define K_ATRAP_CFG_CATTR_NONCOH 3
+#define K_ATRAP_CFG_CATTR_NONCOH 3
#define K_ATRAP_CFG_CATTR_COHERENT 4
#define K_ATRAP_CFG_CATTR_NOTUNC 5
#define K_ATRAP_CFG_CATTR_NOTNONCOH 6
-#define K_ATRAP_CFG_CATTR_NOTCOHERENT 7
+#define K_ATRAP_CFG_CATTR_NOTCOHERENT 7
#endif /* 1250/112x */
@@ -517,16 +517,16 @@
* Trace Buffer Config register
*/
-#define M_SCD_TRACE_CFG_RESET _SB_MAKEMASK1(0)
-#define M_SCD_TRACE_CFG_START_READ _SB_MAKEMASK1(1)
-#define M_SCD_TRACE_CFG_START _SB_MAKEMASK1(2)
-#define M_SCD_TRACE_CFG_STOP _SB_MAKEMASK1(3)
-#define M_SCD_TRACE_CFG_FREEZE _SB_MAKEMASK1(4)
-#define M_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5)
-#define M_SCD_TRACE_CFG_DEBUG_FULL _SB_MAKEMASK1(6)
-#define M_SCD_TRACE_CFG_FULL _SB_MAKEMASK1(7)
+#define M_SCD_TRACE_CFG_RESET _SB_MAKEMASK1(0)
+#define M_SCD_TRACE_CFG_START_READ _SB_MAKEMASK1(1)
+#define M_SCD_TRACE_CFG_START _SB_MAKEMASK1(2)
+#define M_SCD_TRACE_CFG_STOP _SB_MAKEMASK1(3)
+#define M_SCD_TRACE_CFG_FREEZE _SB_MAKEMASK1(4)
+#define M_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5)
+#define M_SCD_TRACE_CFG_DEBUG_FULL _SB_MAKEMASK1(6)
+#define M_SCD_TRACE_CFG_FULL _SB_MAKEMASK1(7)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_SCD_TRACE_CFG_FORCECNT _SB_MAKEMASK1(8)
+#define M_SCD_TRACE_CFG_FORCECNT _SB_MAKEMASK1(8)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
/*
@@ -534,121 +534,121 @@
* a slightly different place in the register.
*/
#if SIBYTE_HDR_FEATURE_1250_112x
-#define S_SCD_TRACE_CFG_CUR_ADDR 10
+#define S_SCD_TRACE_CFG_CUR_ADDR 10
#else
#if SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SCD_TRACE_CFG_CUR_ADDR 24
+#define S_SCD_TRACE_CFG_CUR_ADDR 24
#endif /* 1480 */
-#endif /* 1250/112x */
+#endif /* 1250/112x */
-#define M_SCD_TRACE_CFG_CUR_ADDR _SB_MAKEMASK(8, S_SCD_TRACE_CFG_CUR_ADDR)
-#define V_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR)
-#define G_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR, M_SCD_TRACE_CFG_CUR_ADDR)
+#define M_SCD_TRACE_CFG_CUR_ADDR _SB_MAKEMASK(8, S_SCD_TRACE_CFG_CUR_ADDR)
+#define V_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR)
+#define G_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR, M_SCD_TRACE_CFG_CUR_ADDR)
/*
* Trace Event registers
*/
-#define S_SCD_TREVT_ADDR_MATCH 0
-#define M_SCD_TREVT_ADDR_MATCH _SB_MAKEMASK(4, S_SCD_TREVT_ADDR_MATCH)
-#define V_SCD_TREVT_ADDR_MATCH(x) _SB_MAKEVALUE(x, S_SCD_TREVT_ADDR_MATCH)
-#define G_SCD_TREVT_ADDR_MATCH(x) _SB_GETVALUE(x, S_SCD_TREVT_ADDR_MATCH, M_SCD_TREVT_ADDR_MATCH)
-
-#define M_SCD_TREVT_REQID_MATCH _SB_MAKEMASK1(4)
-#define M_SCD_TREVT_DATAID_MATCH _SB_MAKEMASK1(5)
-#define M_SCD_TREVT_RESPID_MATCH _SB_MAKEMASK1(6)
-#define M_SCD_TREVT_INTERRUPT _SB_MAKEMASK1(7)
-#define M_SCD_TREVT_DEBUG_PIN _SB_MAKEMASK1(9)
-#define M_SCD_TREVT_WRITE _SB_MAKEMASK1(10)
-#define M_SCD_TREVT_READ _SB_MAKEMASK1(11)
-
-#define S_SCD_TREVT_REQID 12
-#define M_SCD_TREVT_REQID _SB_MAKEMASK(4, S_SCD_TREVT_REQID)
-#define V_SCD_TREVT_REQID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_REQID)
-#define G_SCD_TREVT_REQID(x) _SB_GETVALUE(x, S_SCD_TREVT_REQID, M_SCD_TREVT_REQID)
-
-#define S_SCD_TREVT_RESPID 16
-#define M_SCD_TREVT_RESPID _SB_MAKEMASK(4, S_SCD_TREVT_RESPID)
-#define V_SCD_TREVT_RESPID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_RESPID)
-#define G_SCD_TREVT_RESPID(x) _SB_GETVALUE(x, S_SCD_TREVT_RESPID, M_SCD_TREVT_RESPID)
-
-#define S_SCD_TREVT_DATAID 20
-#define M_SCD_TREVT_DATAID _SB_MAKEMASK(4, S_SCD_TREVT_DATAID)
-#define V_SCD_TREVT_DATAID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_DATAID)
-#define G_SCD_TREVT_DATAID(x) _SB_GETVALUE(x, S_SCD_TREVT_DATAID, M_SCD_TREVT_DATID)
-
-#define S_SCD_TREVT_COUNT 24
-#define M_SCD_TREVT_COUNT _SB_MAKEMASK(8, S_SCD_TREVT_COUNT)
-#define V_SCD_TREVT_COUNT(x) _SB_MAKEVALUE(x, S_SCD_TREVT_COUNT)
-#define G_SCD_TREVT_COUNT(x) _SB_GETVALUE(x, S_SCD_TREVT_COUNT, M_SCD_TREVT_COUNT)
+#define S_SCD_TREVT_ADDR_MATCH 0
+#define M_SCD_TREVT_ADDR_MATCH _SB_MAKEMASK(4, S_SCD_TREVT_ADDR_MATCH)
+#define V_SCD_TREVT_ADDR_MATCH(x) _SB_MAKEVALUE(x, S_SCD_TREVT_ADDR_MATCH)
+#define G_SCD_TREVT_ADDR_MATCH(x) _SB_GETVALUE(x, S_SCD_TREVT_ADDR_MATCH, M_SCD_TREVT_ADDR_MATCH)
+
+#define M_SCD_TREVT_REQID_MATCH _SB_MAKEMASK1(4)
+#define M_SCD_TREVT_DATAID_MATCH _SB_MAKEMASK1(5)
+#define M_SCD_TREVT_RESPID_MATCH _SB_MAKEMASK1(6)
+#define M_SCD_TREVT_INTERRUPT _SB_MAKEMASK1(7)
+#define M_SCD_TREVT_DEBUG_PIN _SB_MAKEMASK1(9)
+#define M_SCD_TREVT_WRITE _SB_MAKEMASK1(10)
+#define M_SCD_TREVT_READ _SB_MAKEMASK1(11)
+
+#define S_SCD_TREVT_REQID 12
+#define M_SCD_TREVT_REQID _SB_MAKEMASK(4, S_SCD_TREVT_REQID)
+#define V_SCD_TREVT_REQID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_REQID)
+#define G_SCD_TREVT_REQID(x) _SB_GETVALUE(x, S_SCD_TREVT_REQID, M_SCD_TREVT_REQID)
+
+#define S_SCD_TREVT_RESPID 16
+#define M_SCD_TREVT_RESPID _SB_MAKEMASK(4, S_SCD_TREVT_RESPID)
+#define V_SCD_TREVT_RESPID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_RESPID)
+#define G_SCD_TREVT_RESPID(x) _SB_GETVALUE(x, S_SCD_TREVT_RESPID, M_SCD_TREVT_RESPID)
+
+#define S_SCD_TREVT_DATAID 20
+#define M_SCD_TREVT_DATAID _SB_MAKEMASK(4, S_SCD_TREVT_DATAID)
+#define V_SCD_TREVT_DATAID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_DATAID)
+#define G_SCD_TREVT_DATAID(x) _SB_GETVALUE(x, S_SCD_TREVT_DATAID, M_SCD_TREVT_DATID)
+
+#define S_SCD_TREVT_COUNT 24
+#define M_SCD_TREVT_COUNT _SB_MAKEMASK(8, S_SCD_TREVT_COUNT)
+#define V_SCD_TREVT_COUNT(x) _SB_MAKEVALUE(x, S_SCD_TREVT_COUNT)
+#define G_SCD_TREVT_COUNT(x) _SB_GETVALUE(x, S_SCD_TREVT_COUNT, M_SCD_TREVT_COUNT)
/*
* Trace Sequence registers
*/
-#define S_SCD_TRSEQ_EVENT4 0
-#define M_SCD_TRSEQ_EVENT4 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT4)
-#define V_SCD_TRSEQ_EVENT4(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT4)
-#define G_SCD_TRSEQ_EVENT4(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT4, M_SCD_TRSEQ_EVENT4)
-
-#define S_SCD_TRSEQ_EVENT3 4
-#define M_SCD_TRSEQ_EVENT3 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT3)
-#define V_SCD_TRSEQ_EVENT3(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT3)
-#define G_SCD_TRSEQ_EVENT3(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT3, M_SCD_TRSEQ_EVENT3)
-
-#define S_SCD_TRSEQ_EVENT2 8
-#define M_SCD_TRSEQ_EVENT2 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT2)
-#define V_SCD_TRSEQ_EVENT2(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT2)
-#define G_SCD_TRSEQ_EVENT2(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT2, M_SCD_TRSEQ_EVENT2)
-
-#define S_SCD_TRSEQ_EVENT1 12
-#define M_SCD_TRSEQ_EVENT1 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT1)
-#define V_SCD_TRSEQ_EVENT1(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT1)
-#define G_SCD_TRSEQ_EVENT1(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT1, M_SCD_TRSEQ_EVENT1)
-
-#define K_SCD_TRSEQ_E0 0
-#define K_SCD_TRSEQ_E1 1
-#define K_SCD_TRSEQ_E2 2
-#define K_SCD_TRSEQ_E3 3
-#define K_SCD_TRSEQ_E0_E1 4
-#define K_SCD_TRSEQ_E1_E2 5
-#define K_SCD_TRSEQ_E2_E3 6
-#define K_SCD_TRSEQ_E0_E1_E2 7
-#define K_SCD_TRSEQ_E0_E1_E2_E3 8
-#define K_SCD_TRSEQ_E0E1 9
-#define K_SCD_TRSEQ_E0E1E2 10
-#define K_SCD_TRSEQ_E0E1E2E3 11
-#define K_SCD_TRSEQ_E0E1_E2 12
-#define K_SCD_TRSEQ_E0E1_E2E3 13
-#define K_SCD_TRSEQ_E0E1_E2_E3 14
-#define K_SCD_TRSEQ_IGNORED 15
-
-#define K_SCD_TRSEQ_TRIGGER_ALL (V_SCD_TRSEQ_EVENT1(K_SCD_TRSEQ_IGNORED) | \
- V_SCD_TRSEQ_EVENT2(K_SCD_TRSEQ_IGNORED) | \
- V_SCD_TRSEQ_EVENT3(K_SCD_TRSEQ_IGNORED) | \
- V_SCD_TRSEQ_EVENT4(K_SCD_TRSEQ_IGNORED))
-
-#define S_SCD_TRSEQ_FUNCTION 16
-#define M_SCD_TRSEQ_FUNCTION _SB_MAKEMASK(4, S_SCD_TRSEQ_FUNCTION)
-#define V_SCD_TRSEQ_FUNCTION(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_FUNCTION)
-#define G_SCD_TRSEQ_FUNCTION(x) _SB_GETVALUE(x, S_SCD_TRSEQ_FUNCTION, M_SCD_TRSEQ_FUNCTION)
-
-#define K_SCD_TRSEQ_FUNC_NOP 0
-#define K_SCD_TRSEQ_FUNC_START 1
-#define K_SCD_TRSEQ_FUNC_STOP 2
-#define K_SCD_TRSEQ_FUNC_FREEZE 3
-
-#define V_SCD_TRSEQ_FUNC_NOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_NOP)
-#define V_SCD_TRSEQ_FUNC_START V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_START)
-#define V_SCD_TRSEQ_FUNC_STOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_STOP)
-#define V_SCD_TRSEQ_FUNC_FREEZE V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_FREEZE)
-
-#define M_SCD_TRSEQ_ASAMPLE _SB_MAKEMASK1(18)
-#define M_SCD_TRSEQ_DSAMPLE _SB_MAKEMASK1(19)
-#define M_SCD_TRSEQ_DEBUGPIN _SB_MAKEMASK1(20)
-#define M_SCD_TRSEQ_DEBUGCPU _SB_MAKEMASK1(21)
-#define M_SCD_TRSEQ_CLEARUSE _SB_MAKEMASK1(22)
-#define M_SCD_TRSEQ_ALLD_A _SB_MAKEMASK1(23)
-#define M_SCD_TRSEQ_ALL_A _SB_MAKEMASK1(24)
+#define S_SCD_TRSEQ_EVENT4 0
+#define M_SCD_TRSEQ_EVENT4 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT4)
+#define V_SCD_TRSEQ_EVENT4(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT4)
+#define G_SCD_TRSEQ_EVENT4(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT4, M_SCD_TRSEQ_EVENT4)
+
+#define S_SCD_TRSEQ_EVENT3 4
+#define M_SCD_TRSEQ_EVENT3 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT3)
+#define V_SCD_TRSEQ_EVENT3(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT3)
+#define G_SCD_TRSEQ_EVENT3(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT3, M_SCD_TRSEQ_EVENT3)
+
+#define S_SCD_TRSEQ_EVENT2 8
+#define M_SCD_TRSEQ_EVENT2 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT2)
+#define V_SCD_TRSEQ_EVENT2(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT2)
+#define G_SCD_TRSEQ_EVENT2(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT2, M_SCD_TRSEQ_EVENT2)
+
+#define S_SCD_TRSEQ_EVENT1 12
+#define M_SCD_TRSEQ_EVENT1 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT1)
+#define V_SCD_TRSEQ_EVENT1(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT1)
+#define G_SCD_TRSEQ_EVENT1(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT1, M_SCD_TRSEQ_EVENT1)
+
+#define K_SCD_TRSEQ_E0 0
+#define K_SCD_TRSEQ_E1 1
+#define K_SCD_TRSEQ_E2 2
+#define K_SCD_TRSEQ_E3 3
+#define K_SCD_TRSEQ_E0_E1 4
+#define K_SCD_TRSEQ_E1_E2 5
+#define K_SCD_TRSEQ_E2_E3 6
+#define K_SCD_TRSEQ_E0_E1_E2 7
+#define K_SCD_TRSEQ_E0_E1_E2_E3 8
+#define K_SCD_TRSEQ_E0E1 9
+#define K_SCD_TRSEQ_E0E1E2 10
+#define K_SCD_TRSEQ_E0E1E2E3 11
+#define K_SCD_TRSEQ_E0E1_E2 12
+#define K_SCD_TRSEQ_E0E1_E2E3 13
+#define K_SCD_TRSEQ_E0E1_E2_E3 14
+#define K_SCD_TRSEQ_IGNORED 15
+
+#define K_SCD_TRSEQ_TRIGGER_ALL (V_SCD_TRSEQ_EVENT1(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT2(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT3(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT4(K_SCD_TRSEQ_IGNORED))
+
+#define S_SCD_TRSEQ_FUNCTION 16
+#define M_SCD_TRSEQ_FUNCTION _SB_MAKEMASK(4, S_SCD_TRSEQ_FUNCTION)
+#define V_SCD_TRSEQ_FUNCTION(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_FUNCTION)
+#define G_SCD_TRSEQ_FUNCTION(x) _SB_GETVALUE(x, S_SCD_TRSEQ_FUNCTION, M_SCD_TRSEQ_FUNCTION)
+
+#define K_SCD_TRSEQ_FUNC_NOP 0
+#define K_SCD_TRSEQ_FUNC_START 1
+#define K_SCD_TRSEQ_FUNC_STOP 2
+#define K_SCD_TRSEQ_FUNC_FREEZE 3
+
+#define V_SCD_TRSEQ_FUNC_NOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_NOP)
+#define V_SCD_TRSEQ_FUNC_START V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_START)
+#define V_SCD_TRSEQ_FUNC_STOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_STOP)
+#define V_SCD_TRSEQ_FUNC_FREEZE V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_FREEZE)
+
+#define M_SCD_TRSEQ_ASAMPLE _SB_MAKEMASK1(18)
+#define M_SCD_TRSEQ_DSAMPLE _SB_MAKEMASK1(19)
+#define M_SCD_TRSEQ_DEBUGPIN _SB_MAKEMASK1(20)
+#define M_SCD_TRSEQ_DEBUGCPU _SB_MAKEMASK1(21)
+#define M_SCD_TRSEQ_CLEARUSE _SB_MAKEMASK1(22)
+#define M_SCD_TRSEQ_ALLD_A _SB_MAKEMASK1(23)
+#define M_SCD_TRSEQ_ALL_A _SB_MAKEMASK1(24)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_smbus.h b/arch/mips/include/asm/sibyte/sb1250_smbus.h
index 128d6b75b819..3cb73e89bbbc 100644
--- a/arch/mips/include/asm/sibyte/sb1250_smbus.h
+++ b/arch/mips/include/asm/sibyte/sb1250_smbus.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * SMBUS Constants File: sb1250_smbus.h
+ * SMBUS Constants File: sb1250_smbus.h
*
* This module contains constants and macros useful for
* manipulating the SB1250's SMbus devices.
@@ -40,83 +40,83 @@
* SMBus Clock Frequency Register (Table 14-2)
*/
-#define S_SMB_FREQ_DIV 0
-#define M_SMB_FREQ_DIV _SB_MAKEMASK(13, S_SMB_FREQ_DIV)
-#define V_SMB_FREQ_DIV(x) _SB_MAKEVALUE(x, S_SMB_FREQ_DIV)
+#define S_SMB_FREQ_DIV 0
+#define M_SMB_FREQ_DIV _SB_MAKEMASK(13, S_SMB_FREQ_DIV)
+#define V_SMB_FREQ_DIV(x) _SB_MAKEVALUE(x, S_SMB_FREQ_DIV)
#define K_SMB_FREQ_400KHZ 0x1F
#define K_SMB_FREQ_100KHZ 0x7D
#define K_SMB_FREQ_10KHZ 1250
-#define S_SMB_CMD 0
-#define M_SMB_CMD _SB_MAKEMASK(8, S_SMB_CMD)
-#define V_SMB_CMD(x) _SB_MAKEVALUE(x, S_SMB_CMD)
+#define S_SMB_CMD 0
+#define M_SMB_CMD _SB_MAKEMASK(8, S_SMB_CMD)
+#define V_SMB_CMD(x) _SB_MAKEVALUE(x, S_SMB_CMD)
/*
* SMBus control register (Table 14-4)
*/
-#define M_SMB_ERR_INTR _SB_MAKEMASK1(0)
-#define M_SMB_FINISH_INTR _SB_MAKEMASK1(1)
+#define M_SMB_ERR_INTR _SB_MAKEMASK1(0)
+#define M_SMB_FINISH_INTR _SB_MAKEMASK1(1)
-#define S_SMB_DATA_OUT 4
-#define M_SMB_DATA_OUT _SB_MAKEMASK1(S_SMB_DATA_OUT)
-#define V_SMB_DATA_OUT(x) _SB_MAKEVALUE(x, S_SMB_DATA_OUT)
+#define S_SMB_DATA_OUT 4
+#define M_SMB_DATA_OUT _SB_MAKEMASK1(S_SMB_DATA_OUT)
+#define V_SMB_DATA_OUT(x) _SB_MAKEVALUE(x, S_SMB_DATA_OUT)
-#define M_SMB_DATA_DIR _SB_MAKEMASK1(5)
-#define M_SMB_DATA_DIR_OUTPUT M_SMB_DATA_DIR
-#define M_SMB_CLK_OUT _SB_MAKEMASK1(6)
-#define M_SMB_DIRECT_ENABLE _SB_MAKEMASK1(7)
+#define M_SMB_DATA_DIR _SB_MAKEMASK1(5)
+#define M_SMB_DATA_DIR_OUTPUT M_SMB_DATA_DIR
+#define M_SMB_CLK_OUT _SB_MAKEMASK1(6)
+#define M_SMB_DIRECT_ENABLE _SB_MAKEMASK1(7)
/*
* SMBus status registers (Table 14-5)
*/
-#define M_SMB_BUSY _SB_MAKEMASK1(0)
-#define M_SMB_ERROR _SB_MAKEMASK1(1)
-#define M_SMB_ERROR_TYPE _SB_MAKEMASK1(2)
+#define M_SMB_BUSY _SB_MAKEMASK1(0)
+#define M_SMB_ERROR _SB_MAKEMASK1(1)
+#define M_SMB_ERROR_TYPE _SB_MAKEMASK1(2)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SMB_SCL_IN 5
-#define M_SMB_SCL_IN _SB_MAKEMASK1(S_SMB_SCL_IN)
-#define V_SMB_SCL_IN(x) _SB_MAKEVALUE(x, S_SMB_SCL_IN)
-#define G_SMB_SCL_IN(x) _SB_GETVALUE(x, S_SMB_SCL_IN, M_SMB_SCL_IN)
+#define S_SMB_SCL_IN 5
+#define M_SMB_SCL_IN _SB_MAKEMASK1(S_SMB_SCL_IN)
+#define V_SMB_SCL_IN(x) _SB_MAKEVALUE(x, S_SMB_SCL_IN)
+#define G_SMB_SCL_IN(x) _SB_GETVALUE(x, S_SMB_SCL_IN, M_SMB_SCL_IN)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_SMB_REF 6
-#define M_SMB_REF _SB_MAKEMASK1(S_SMB_REF)
-#define V_SMB_REF(x) _SB_MAKEVALUE(x, S_SMB_REF)
-#define G_SMB_REF(x) _SB_GETVALUE(x, S_SMB_REF, M_SMB_REF)
+#define S_SMB_REF 6
+#define M_SMB_REF _SB_MAKEMASK1(S_SMB_REF)
+#define V_SMB_REF(x) _SB_MAKEVALUE(x, S_SMB_REF)
+#define G_SMB_REF(x) _SB_GETVALUE(x, S_SMB_REF, M_SMB_REF)
-#define S_SMB_DATA_IN 7
-#define M_SMB_DATA_IN _SB_MAKEMASK1(S_SMB_DATA_IN)
-#define V_SMB_DATA_IN(x) _SB_MAKEVALUE(x, S_SMB_DATA_IN)
-#define G_SMB_DATA_IN(x) _SB_GETVALUE(x, S_SMB_DATA_IN, M_SMB_DATA_IN)
+#define S_SMB_DATA_IN 7
+#define M_SMB_DATA_IN _SB_MAKEMASK1(S_SMB_DATA_IN)
+#define V_SMB_DATA_IN(x) _SB_MAKEVALUE(x, S_SMB_DATA_IN)
+#define G_SMB_DATA_IN(x) _SB_GETVALUE(x, S_SMB_DATA_IN, M_SMB_DATA_IN)
/*
* SMBus Start/Command registers (Table 14-9)
*/
-#define S_SMB_ADDR 0
-#define M_SMB_ADDR _SB_MAKEMASK(7, S_SMB_ADDR)
-#define V_SMB_ADDR(x) _SB_MAKEVALUE(x, S_SMB_ADDR)
-#define G_SMB_ADDR(x) _SB_GETVALUE(x, S_SMB_ADDR, M_SMB_ADDR)
+#define S_SMB_ADDR 0
+#define M_SMB_ADDR _SB_MAKEMASK(7, S_SMB_ADDR)
+#define V_SMB_ADDR(x) _SB_MAKEVALUE(x, S_SMB_ADDR)
+#define G_SMB_ADDR(x) _SB_GETVALUE(x, S_SMB_ADDR, M_SMB_ADDR)
-#define M_SMB_QDATA _SB_MAKEMASK1(7)
+#define M_SMB_QDATA _SB_MAKEMASK1(7)
-#define S_SMB_TT 8
-#define M_SMB_TT _SB_MAKEMASK(3, S_SMB_TT)
-#define V_SMB_TT(x) _SB_MAKEVALUE(x, S_SMB_TT)
-#define G_SMB_TT(x) _SB_GETVALUE(x, S_SMB_TT, M_SMB_TT)
+#define S_SMB_TT 8
+#define M_SMB_TT _SB_MAKEMASK(3, S_SMB_TT)
+#define V_SMB_TT(x) _SB_MAKEVALUE(x, S_SMB_TT)
+#define G_SMB_TT(x) _SB_GETVALUE(x, S_SMB_TT, M_SMB_TT)
-#define K_SMB_TT_WR1BYTE 0
-#define K_SMB_TT_WR2BYTE 1
-#define K_SMB_TT_WR3BYTE 2
-#define K_SMB_TT_CMD_RD1BYTE 3
-#define K_SMB_TT_CMD_RD2BYTE 4
-#define K_SMB_TT_RD1BYTE 5
-#define K_SMB_TT_QUICKCMD 6
-#define K_SMB_TT_EEPROMREAD 7
+#define K_SMB_TT_WR1BYTE 0
+#define K_SMB_TT_WR2BYTE 1
+#define K_SMB_TT_WR3BYTE 2
+#define K_SMB_TT_CMD_RD1BYTE 3
+#define K_SMB_TT_CMD_RD2BYTE 4
+#define K_SMB_TT_RD1BYTE 5
+#define K_SMB_TT_QUICKCMD 6
+#define K_SMB_TT_EEPROMREAD 7
#define V_SMB_TT_WR1BYTE V_SMB_TT(K_SMB_TT_WR1BYTE)
#define V_SMB_TT_WR2BYTE V_SMB_TT(K_SMB_TT_WR2BYTE)
@@ -127,51 +127,51 @@
#define V_SMB_TT_QUICKCMD V_SMB_TT(K_SMB_TT_QUICKCMD)
#define V_SMB_TT_EEPROMREAD V_SMB_TT(K_SMB_TT_EEPROMREAD)
-#define M_SMB_PEC _SB_MAKEMASK1(15)
+#define M_SMB_PEC _SB_MAKEMASK1(15)
/*
* SMBus Data Register (Table 14-6) and SMBus Extra Register (Table 14-7)
*/
-#define S_SMB_LB 0
-#define M_SMB_LB _SB_MAKEMASK(8, S_SMB_LB)
-#define V_SMB_LB(x) _SB_MAKEVALUE(x, S_SMB_LB)
+#define S_SMB_LB 0
+#define M_SMB_LB _SB_MAKEMASK(8, S_SMB_LB)
+#define V_SMB_LB(x) _SB_MAKEVALUE(x, S_SMB_LB)
-#define S_SMB_MB 8
-#define M_SMB_MB _SB_MAKEMASK(8, S_SMB_MB)
-#define V_SMB_MB(x) _SB_MAKEVALUE(x, S_SMB_MB)
+#define S_SMB_MB 8
+#define M_SMB_MB _SB_MAKEMASK(8, S_SMB_MB)
+#define V_SMB_MB(x) _SB_MAKEVALUE(x, S_SMB_MB)
/*
* SMBus Packet Error Check register (Table 14-8)
*/
-#define S_SPEC_PEC 0
-#define M_SPEC_PEC _SB_MAKEMASK(8, S_SPEC_PEC)
-#define V_SPEC_MB(x) _SB_MAKEVALUE(x, S_SPEC_PEC)
+#define S_SPEC_PEC 0
+#define M_SPEC_PEC _SB_MAKEMASK(8, S_SPEC_PEC)
+#define V_SPEC_MB(x) _SB_MAKEVALUE(x, S_SPEC_PEC)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SMB_CMDH 8
-#define M_SMB_CMDH _SB_MAKEMASK(8, S_SMB_CMDH)
-#define V_SMB_CMDH(x) _SB_MAKEVALUE(x, S_SMB_CMDH)
+#define S_SMB_CMDH 8
+#define M_SMB_CMDH _SB_MAKEMASK(8, S_SMB_CMDH)
+#define V_SMB_CMDH(x) _SB_MAKEVALUE(x, S_SMB_CMDH)
#define M_SMB_EXTEND _SB_MAKEMASK1(14)
-#define S_SMB_DFMT 8
-#define M_SMB_DFMT _SB_MAKEMASK(3, S_SMB_DFMT)
-#define V_SMB_DFMT(x) _SB_MAKEVALUE(x, S_SMB_DFMT)
-#define G_SMB_DFMT(x) _SB_GETVALUE(x, S_SMB_DFMT, M_SMB_DFMT)
+#define S_SMB_DFMT 8
+#define M_SMB_DFMT _SB_MAKEMASK(3, S_SMB_DFMT)
+#define V_SMB_DFMT(x) _SB_MAKEVALUE(x, S_SMB_DFMT)
+#define G_SMB_DFMT(x) _SB_GETVALUE(x, S_SMB_DFMT, M_SMB_DFMT)
-#define K_SMB_DFMT_1BYTE 0
-#define K_SMB_DFMT_2BYTE 1
-#define K_SMB_DFMT_3BYTE 2
-#define K_SMB_DFMT_4BYTE 3
-#define K_SMB_DFMT_NODATA 4
-#define K_SMB_DFMT_CMD4BYTE 5
-#define K_SMB_DFMT_CMD5BYTE 6
-#define K_SMB_DFMT_RESERVED 7
+#define K_SMB_DFMT_1BYTE 0
+#define K_SMB_DFMT_2BYTE 1
+#define K_SMB_DFMT_3BYTE 2
+#define K_SMB_DFMT_4BYTE 3
+#define K_SMB_DFMT_NODATA 4
+#define K_SMB_DFMT_CMD4BYTE 5
+#define K_SMB_DFMT_CMD5BYTE 6
+#define K_SMB_DFMT_RESERVED 7
#define V_SMB_DFMT_1BYTE V_SMB_DFMT(K_SMB_DFMT_1BYTE)
#define V_SMB_DFMT_2BYTE V_SMB_DFMT(K_SMB_DFMT_2BYTE)
@@ -182,13 +182,13 @@
#define V_SMB_DFMT_CMD5BYTE V_SMB_DFMT(K_SMB_DFMT_CMD5BYTE)
#define V_SMB_DFMT_RESERVED V_SMB_DFMT(K_SMB_DFMT_RESERVED)
-#define S_SMB_AFMT 11
-#define M_SMB_AFMT _SB_MAKEMASK(2, S_SMB_AFMT)
-#define V_SMB_AFMT(x) _SB_MAKEVALUE(x, S_SMB_AFMT)
-#define G_SMB_AFMT(x) _SB_GETVALUE(x, S_SMB_AFMT, M_SMB_AFMT)
+#define S_SMB_AFMT 11
+#define M_SMB_AFMT _SB_MAKEMASK(2, S_SMB_AFMT)
+#define V_SMB_AFMT(x) _SB_MAKEVALUE(x, S_SMB_AFMT)
+#define G_SMB_AFMT(x) _SB_GETVALUE(x, S_SMB_AFMT, M_SMB_AFMT)
-#define K_SMB_AFMT_NONE 0
-#define K_SMB_AFMT_ADDR 1
+#define K_SMB_AFMT_NONE 0
+#define K_SMB_AFMT_ADDR 1
#define K_SMB_AFMT_ADDR_CMD1BYTE 2
#define K_SMB_AFMT_ADDR_CMD2BYTE 3
diff --git a/arch/mips/include/asm/sibyte/sb1250_syncser.h b/arch/mips/include/asm/sibyte/sb1250_syncser.h
index 274e9179d326..b3acc75cf0f2 100644
--- a/arch/mips/include/asm/sibyte/sb1250_syncser.h
+++ b/arch/mips/include/asm/sibyte/sb1250_syncser.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Synchronous Serial Constants File: sb1250_syncser.h
+ * Synchronous Serial Constants File: sb1250_syncser.h
*
* This module contains constants and macros useful for
* manipulating the SB1250's Synchronous Serial
@@ -39,108 +39,108 @@
* Serial Mode Configuration Register
*/
-#define M_SYNCSER_CRC_MODE _SB_MAKEMASK1(0)
-#define M_SYNCSER_MSB_FIRST _SB_MAKEMASK1(1)
+#define M_SYNCSER_CRC_MODE _SB_MAKEMASK1(0)
+#define M_SYNCSER_MSB_FIRST _SB_MAKEMASK1(1)
-#define S_SYNCSER_FLAG_NUM 2
-#define M_SYNCSER_FLAG_NUM _SB_MAKEMASK(4, S_SYNCSER_FLAG_NUM)
-#define V_SYNCSER_FLAG_NUM _SB_MAKEVALUE(x, S_SYNCSER_FLAG_NUM)
+#define S_SYNCSER_FLAG_NUM 2
+#define M_SYNCSER_FLAG_NUM _SB_MAKEMASK(4, S_SYNCSER_FLAG_NUM)
+#define V_SYNCSER_FLAG_NUM _SB_MAKEVALUE(x, S_SYNCSER_FLAG_NUM)
-#define M_SYNCSER_FLAG_EN _SB_MAKEMASK1(6)
-#define M_SYNCSER_HDLC_EN _SB_MAKEMASK1(7)
-#define M_SYNCSER_LOOP_MODE _SB_MAKEMASK1(8)
-#define M_SYNCSER_LOOPBACK _SB_MAKEMASK1(9)
+#define M_SYNCSER_FLAG_EN _SB_MAKEMASK1(6)
+#define M_SYNCSER_HDLC_EN _SB_MAKEMASK1(7)
+#define M_SYNCSER_LOOP_MODE _SB_MAKEMASK1(8)
+#define M_SYNCSER_LOOPBACK _SB_MAKEMASK1(9)
/*
* Serial Clock Source and Line Interface Mode Register
*/
-#define M_SYNCSER_RXCLK_INV _SB_MAKEMASK1(0)
-#define M_SYNCSER_RXCLK_EXT _SB_MAKEMASK1(1)
+#define M_SYNCSER_RXCLK_INV _SB_MAKEMASK1(0)
+#define M_SYNCSER_RXCLK_EXT _SB_MAKEMASK1(1)
-#define S_SYNCSER_RXSYNC_DLY 2
-#define M_SYNCSER_RXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_RXSYNC_DLY)
-#define V_SYNCSER_RXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_RXSYNC_DLY)
+#define S_SYNCSER_RXSYNC_DLY 2
+#define M_SYNCSER_RXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_RXSYNC_DLY)
+#define V_SYNCSER_RXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_RXSYNC_DLY)
-#define M_SYNCSER_RXSYNC_LOW _SB_MAKEMASK1(4)
-#define M_SYNCSER_RXSTRB_LOW _SB_MAKEMASK1(5)
+#define M_SYNCSER_RXSYNC_LOW _SB_MAKEMASK1(4)
+#define M_SYNCSER_RXSTRB_LOW _SB_MAKEMASK1(5)
-#define M_SYNCSER_RXSYNC_EDGE _SB_MAKEMASK1(6)
-#define M_SYNCSER_RXSYNC_INT _SB_MAKEMASK1(7)
+#define M_SYNCSER_RXSYNC_EDGE _SB_MAKEMASK1(6)
+#define M_SYNCSER_RXSYNC_INT _SB_MAKEMASK1(7)
-#define M_SYNCSER_TXCLK_INV _SB_MAKEMASK1(8)
-#define M_SYNCSER_TXCLK_EXT _SB_MAKEMASK1(9)
+#define M_SYNCSER_TXCLK_INV _SB_MAKEMASK1(8)
+#define M_SYNCSER_TXCLK_EXT _SB_MAKEMASK1(9)
-#define S_SYNCSER_TXSYNC_DLY 10
-#define M_SYNCSER_TXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_TXSYNC_DLY)
-#define V_SYNCSER_TXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_TXSYNC_DLY)
+#define S_SYNCSER_TXSYNC_DLY 10
+#define M_SYNCSER_TXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_TXSYNC_DLY)
+#define V_SYNCSER_TXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_TXSYNC_DLY)
-#define M_SYNCSER_TXSYNC_LOW _SB_MAKEMASK1(12)
-#define M_SYNCSER_TXSTRB_LOW _SB_MAKEMASK1(13)
+#define M_SYNCSER_TXSYNC_LOW _SB_MAKEMASK1(12)
+#define M_SYNCSER_TXSTRB_LOW _SB_MAKEMASK1(13)
-#define M_SYNCSER_TXSYNC_EDGE _SB_MAKEMASK1(14)
-#define M_SYNCSER_TXSYNC_INT _SB_MAKEMASK1(15)
+#define M_SYNCSER_TXSYNC_EDGE _SB_MAKEMASK1(14)
+#define M_SYNCSER_TXSYNC_INT _SB_MAKEMASK1(15)
/*
* Serial Command Register
*/
-#define M_SYNCSER_CMD_RX_EN _SB_MAKEMASK1(0)
-#define M_SYNCSER_CMD_TX_EN _SB_MAKEMASK1(1)
-#define M_SYNCSER_CMD_RX_RESET _SB_MAKEMASK1(2)
-#define M_SYNCSER_CMD_TX_RESET _SB_MAKEMASK1(3)
-#define M_SYNCSER_CMD_TX_PAUSE _SB_MAKEMASK1(5)
+#define M_SYNCSER_CMD_RX_EN _SB_MAKEMASK1(0)
+#define M_SYNCSER_CMD_TX_EN _SB_MAKEMASK1(1)
+#define M_SYNCSER_CMD_RX_RESET _SB_MAKEMASK1(2)
+#define M_SYNCSER_CMD_TX_RESET _SB_MAKEMASK1(3)
+#define M_SYNCSER_CMD_TX_PAUSE _SB_MAKEMASK1(5)
/*
* Serial DMA Enable Register
*/
-#define M_SYNCSER_DMA_RX_EN _SB_MAKEMASK1(0)
-#define M_SYNCSER_DMA_TX_EN _SB_MAKEMASK1(4)
+#define M_SYNCSER_DMA_RX_EN _SB_MAKEMASK1(0)
+#define M_SYNCSER_DMA_TX_EN _SB_MAKEMASK1(4)
/*
* Serial Status Register
*/
-#define M_SYNCSER_RX_CRCERR _SB_MAKEMASK1(0)
-#define M_SYNCSER_RX_ABORT _SB_MAKEMASK1(1)
-#define M_SYNCSER_RX_OCTET _SB_MAKEMASK1(2)
-#define M_SYNCSER_RX_LONGFRM _SB_MAKEMASK1(3)
-#define M_SYNCSER_RX_SHORTFRM _SB_MAKEMASK1(4)
-#define M_SYNCSER_RX_OVERRUN _SB_MAKEMASK1(5)
-#define M_SYNCSER_RX_SYNC_ERR _SB_MAKEMASK1(6)
-#define M_SYNCSER_TX_CRCERR _SB_MAKEMASK1(8)
-#define M_SYNCSER_TX_UNDERRUN _SB_MAKEMASK1(9)
-#define M_SYNCSER_TX_SYNC_ERR _SB_MAKEMASK1(10)
-#define M_SYNCSER_TX_PAUSE_COMPLETE _SB_MAKEMASK1(11)
-#define M_SYNCSER_RX_EOP_COUNT _SB_MAKEMASK1(16)
-#define M_SYNCSER_RX_EOP_TIMER _SB_MAKEMASK1(17)
-#define M_SYNCSER_RX_EOP_SEEN _SB_MAKEMASK1(18)
-#define M_SYNCSER_RX_HWM _SB_MAKEMASK1(19)
-#define M_SYNCSER_RX_LWM _SB_MAKEMASK1(20)
-#define M_SYNCSER_RX_DSCR _SB_MAKEMASK1(21)
-#define M_SYNCSER_RX_DERR _SB_MAKEMASK1(22)
-#define M_SYNCSER_TX_EOP_COUNT _SB_MAKEMASK1(24)
-#define M_SYNCSER_TX_EOP_TIMER _SB_MAKEMASK1(25)
-#define M_SYNCSER_TX_EOP_SEEN _SB_MAKEMASK1(26)
-#define M_SYNCSER_TX_HWM _SB_MAKEMASK1(27)
-#define M_SYNCSER_TX_LWM _SB_MAKEMASK1(28)
-#define M_SYNCSER_TX_DSCR _SB_MAKEMASK1(29)
-#define M_SYNCSER_TX_DERR _SB_MAKEMASK1(30)
-#define M_SYNCSER_TX_DZERO _SB_MAKEMASK1(31)
+#define M_SYNCSER_RX_CRCERR _SB_MAKEMASK1(0)
+#define M_SYNCSER_RX_ABORT _SB_MAKEMASK1(1)
+#define M_SYNCSER_RX_OCTET _SB_MAKEMASK1(2)
+#define M_SYNCSER_RX_LONGFRM _SB_MAKEMASK1(3)
+#define M_SYNCSER_RX_SHORTFRM _SB_MAKEMASK1(4)
+#define M_SYNCSER_RX_OVERRUN _SB_MAKEMASK1(5)
+#define M_SYNCSER_RX_SYNC_ERR _SB_MAKEMASK1(6)
+#define M_SYNCSER_TX_CRCERR _SB_MAKEMASK1(8)
+#define M_SYNCSER_TX_UNDERRUN _SB_MAKEMASK1(9)
+#define M_SYNCSER_TX_SYNC_ERR _SB_MAKEMASK1(10)
+#define M_SYNCSER_TX_PAUSE_COMPLETE _SB_MAKEMASK1(11)
+#define M_SYNCSER_RX_EOP_COUNT _SB_MAKEMASK1(16)
+#define M_SYNCSER_RX_EOP_TIMER _SB_MAKEMASK1(17)
+#define M_SYNCSER_RX_EOP_SEEN _SB_MAKEMASK1(18)
+#define M_SYNCSER_RX_HWM _SB_MAKEMASK1(19)
+#define M_SYNCSER_RX_LWM _SB_MAKEMASK1(20)
+#define M_SYNCSER_RX_DSCR _SB_MAKEMASK1(21)
+#define M_SYNCSER_RX_DERR _SB_MAKEMASK1(22)
+#define M_SYNCSER_TX_EOP_COUNT _SB_MAKEMASK1(24)
+#define M_SYNCSER_TX_EOP_TIMER _SB_MAKEMASK1(25)
+#define M_SYNCSER_TX_EOP_SEEN _SB_MAKEMASK1(26)
+#define M_SYNCSER_TX_HWM _SB_MAKEMASK1(27)
+#define M_SYNCSER_TX_LWM _SB_MAKEMASK1(28)
+#define M_SYNCSER_TX_DSCR _SB_MAKEMASK1(29)
+#define M_SYNCSER_TX_DERR _SB_MAKEMASK1(30)
+#define M_SYNCSER_TX_DZERO _SB_MAKEMASK1(31)
/*
* Sequencer Table Entry format
*/
-#define M_SYNCSER_SEQ_LAST _SB_MAKEMASK1(0)
-#define M_SYNCSER_SEQ_BYTE _SB_MAKEMASK1(1)
+#define M_SYNCSER_SEQ_LAST _SB_MAKEMASK1(0)
+#define M_SYNCSER_SEQ_BYTE _SB_MAKEMASK1(1)
-#define S_SYNCSER_SEQ_COUNT 2
-#define M_SYNCSER_SEQ_COUNT _SB_MAKEMASK(4, S_SYNCSER_SEQ_COUNT)
-#define V_SYNCSER_SEQ_COUNT(x) _SB_MAKEVALUE(x, S_SYNCSER_SEQ_COUNT)
+#define S_SYNCSER_SEQ_COUNT 2
+#define M_SYNCSER_SEQ_COUNT _SB_MAKEMASK(4, S_SYNCSER_SEQ_COUNT)
+#define V_SYNCSER_SEQ_COUNT(x) _SB_MAKEVALUE(x, S_SYNCSER_SEQ_COUNT)
-#define M_SYNCSER_SEQ_ENABLE _SB_MAKEMASK1(6)
-#define M_SYNCSER_SEQ_STROBE _SB_MAKEMASK1(7)
+#define M_SYNCSER_SEQ_ENABLE _SB_MAKEMASK1(6)
+#define M_SYNCSER_SEQ_STROBE _SB_MAKEMASK1(7)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_uart.h b/arch/mips/include/asm/sibyte/sb1250_uart.h
index bb99ecac5817..a43dc1976286 100644
--- a/arch/mips/include/asm/sibyte/sb1250_uart.h
+++ b/arch/mips/include/asm/sibyte/sb1250_uart.h
@@ -45,33 +45,33 @@
* Register: DUART_MODE_REG_1_B
*/
-#define S_DUART_BITS_PER_CHAR 0
-#define M_DUART_BITS_PER_CHAR _SB_MAKEMASK(2, S_DUART_BITS_PER_CHAR)
+#define S_DUART_BITS_PER_CHAR 0
+#define M_DUART_BITS_PER_CHAR _SB_MAKEMASK(2, S_DUART_BITS_PER_CHAR)
#define V_DUART_BITS_PER_CHAR(x) _SB_MAKEVALUE(x, S_DUART_BITS_PER_CHAR)
#define K_DUART_BITS_PER_CHAR_RSV0 0
#define K_DUART_BITS_PER_CHAR_RSV1 1
-#define K_DUART_BITS_PER_CHAR_7 2
-#define K_DUART_BITS_PER_CHAR_8 3
+#define K_DUART_BITS_PER_CHAR_7 2
+#define K_DUART_BITS_PER_CHAR_8 3
#define V_DUART_BITS_PER_CHAR_RSV0 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV0)
#define V_DUART_BITS_PER_CHAR_RSV1 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV1)
-#define V_DUART_BITS_PER_CHAR_7 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_7)
-#define V_DUART_BITS_PER_CHAR_8 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_8)
+#define V_DUART_BITS_PER_CHAR_7 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_7)
+#define V_DUART_BITS_PER_CHAR_8 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_8)
#define M_DUART_PARITY_TYPE_EVEN 0x00
-#define M_DUART_PARITY_TYPE_ODD _SB_MAKEMASK1(2)
+#define M_DUART_PARITY_TYPE_ODD _SB_MAKEMASK1(2)
-#define S_DUART_PARITY_MODE 3
-#define M_DUART_PARITY_MODE _SB_MAKEMASK(2, S_DUART_PARITY_MODE)
-#define V_DUART_PARITY_MODE(x) _SB_MAKEVALUE(x, S_DUART_PARITY_MODE)
+#define S_DUART_PARITY_MODE 3
+#define M_DUART_PARITY_MODE _SB_MAKEMASK(2, S_DUART_PARITY_MODE)
+#define V_DUART_PARITY_MODE(x) _SB_MAKEVALUE(x, S_DUART_PARITY_MODE)
-#define K_DUART_PARITY_MODE_ADD 0
+#define K_DUART_PARITY_MODE_ADD 0
#define K_DUART_PARITY_MODE_ADD_FIXED 1
#define K_DUART_PARITY_MODE_NONE 2
-#define V_DUART_PARITY_MODE_ADD V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD)
+#define V_DUART_PARITY_MODE_ADD V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD)
#define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED)
#define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE)
@@ -81,7 +81,7 @@
#define M_DUART_RX_IRQ_SEL_RXRDY 0
#define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6)
-#define M_DUART_RX_RTS_ENA _SB_MAKEMASK1(7)
+#define M_DUART_RX_RTS_ENA _SB_MAKEMASK1(7)
/*
* DUART Mode Register #2 (Table 10-4)
@@ -89,18 +89,18 @@
* Register: DUART_MODE_REG_2_B
*/
-#define M_DUART_MODE_RESERVED1 _SB_MAKEMASK(3, 0) /* ignored */
+#define M_DUART_MODE_RESERVED1 _SB_MAKEMASK(3, 0) /* ignored */
-#define M_DUART_STOP_BIT_LEN_2 _SB_MAKEMASK1(3)
-#define M_DUART_STOP_BIT_LEN_1 0
+#define M_DUART_STOP_BIT_LEN_2 _SB_MAKEMASK1(3)
+#define M_DUART_STOP_BIT_LEN_1 0
-#define M_DUART_TX_CTS_ENA _SB_MAKEMASK1(4)
+#define M_DUART_TX_CTS_ENA _SB_MAKEMASK1(4)
-#define M_DUART_MODE_RESERVED2 _SB_MAKEMASK1(5) /* must be zero */
+#define M_DUART_MODE_RESERVED2 _SB_MAKEMASK1(5) /* must be zero */
#define S_DUART_CHAN_MODE 6
-#define M_DUART_CHAN_MODE _SB_MAKEMASK(2, S_DUART_CHAN_MODE)
+#define M_DUART_CHAN_MODE _SB_MAKEMASK(2, S_DUART_CHAN_MODE)
#define V_DUART_CHAN_MODE(x) _SB_MAKEVALUE(x, S_DUART_CHAN_MODE)
#define K_DUART_CHAN_MODE_NORMAL 0
@@ -117,34 +117,34 @@
* Register: DUART_CMD_B
*/
-#define M_DUART_RX_EN _SB_MAKEMASK1(0)
-#define M_DUART_RX_DIS _SB_MAKEMASK1(1)
-#define M_DUART_TX_EN _SB_MAKEMASK1(2)
-#define M_DUART_TX_DIS _SB_MAKEMASK1(3)
+#define M_DUART_RX_EN _SB_MAKEMASK1(0)
+#define M_DUART_RX_DIS _SB_MAKEMASK1(1)
+#define M_DUART_TX_EN _SB_MAKEMASK1(2)
+#define M_DUART_TX_DIS _SB_MAKEMASK1(3)
#define S_DUART_MISC_CMD 4
-#define M_DUART_MISC_CMD _SB_MAKEMASK(3, S_DUART_MISC_CMD)
-#define V_DUART_MISC_CMD(x) _SB_MAKEVALUE(x, S_DUART_MISC_CMD)
-
-#define K_DUART_MISC_CMD_NOACTION0 0
-#define K_DUART_MISC_CMD_NOACTION1 1
-#define K_DUART_MISC_CMD_RESET_RX 2
-#define K_DUART_MISC_CMD_RESET_TX 3
-#define K_DUART_MISC_CMD_NOACTION4 4
+#define M_DUART_MISC_CMD _SB_MAKEMASK(3, S_DUART_MISC_CMD)
+#define V_DUART_MISC_CMD(x) _SB_MAKEVALUE(x, S_DUART_MISC_CMD)
+
+#define K_DUART_MISC_CMD_NOACTION0 0
+#define K_DUART_MISC_CMD_NOACTION1 1
+#define K_DUART_MISC_CMD_RESET_RX 2
+#define K_DUART_MISC_CMD_RESET_TX 3
+#define K_DUART_MISC_CMD_NOACTION4 4
#define K_DUART_MISC_CMD_RESET_BREAK_INT 5
-#define K_DUART_MISC_CMD_START_BREAK 6
-#define K_DUART_MISC_CMD_STOP_BREAK 7
-
-#define V_DUART_MISC_CMD_NOACTION0 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION0)
-#define V_DUART_MISC_CMD_NOACTION1 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION1)
-#define V_DUART_MISC_CMD_RESET_RX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_RX)
-#define V_DUART_MISC_CMD_RESET_TX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_TX)
-#define V_DUART_MISC_CMD_NOACTION4 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION4)
+#define K_DUART_MISC_CMD_START_BREAK 6
+#define K_DUART_MISC_CMD_STOP_BREAK 7
+
+#define V_DUART_MISC_CMD_NOACTION0 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION0)
+#define V_DUART_MISC_CMD_NOACTION1 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION1)
+#define V_DUART_MISC_CMD_RESET_RX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_RX)
+#define V_DUART_MISC_CMD_RESET_TX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_TX)
+#define V_DUART_MISC_CMD_NOACTION4 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION4)
#define V_DUART_MISC_CMD_RESET_BREAK_INT V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_BREAK_INT)
-#define V_DUART_MISC_CMD_START_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_START_BREAK)
-#define V_DUART_MISC_CMD_STOP_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_STOP_BREAK)
+#define V_DUART_MISC_CMD_START_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_START_BREAK)
+#define V_DUART_MISC_CMD_STOP_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_STOP_BREAK)
-#define M_DUART_CMD_RESERVED _SB_MAKEMASK1(7)
+#define M_DUART_CMD_RESERVED _SB_MAKEMASK1(7)
/*
* DUART Status Register (Table 10-6)
@@ -153,14 +153,14 @@
* READ-ONLY
*/
-#define M_DUART_RX_RDY _SB_MAKEMASK1(0)
-#define M_DUART_RX_FFUL _SB_MAKEMASK1(1)
-#define M_DUART_TX_RDY _SB_MAKEMASK1(2)
-#define M_DUART_TX_EMT _SB_MAKEMASK1(3)
-#define M_DUART_OVRUN_ERR _SB_MAKEMASK1(4)
-#define M_DUART_PARITY_ERR _SB_MAKEMASK1(5)
-#define M_DUART_FRM_ERR _SB_MAKEMASK1(6)
-#define M_DUART_RCVD_BRK _SB_MAKEMASK1(7)
+#define M_DUART_RX_RDY _SB_MAKEMASK1(0)
+#define M_DUART_RX_FFUL _SB_MAKEMASK1(1)
+#define M_DUART_TX_RDY _SB_MAKEMASK1(2)
+#define M_DUART_TX_EMT _SB_MAKEMASK1(3)
+#define M_DUART_OVRUN_ERR _SB_MAKEMASK1(4)
+#define M_DUART_PARITY_ERR _SB_MAKEMASK1(5)
+#define M_DUART_FRM_ERR _SB_MAKEMASK1(6)
+#define M_DUART_RCVD_BRK _SB_MAKEMASK1(7)
/*
* DUART Baud Rate Register (Table 10-7)
@@ -168,8 +168,8 @@
* Register: DUART_CLK_SEL_B
*/
-#define M_DUART_CLK_COUNTER _SB_MAKEMASK(12, 0)
-#define V_DUART_BAUD_RATE(x) (100000000/((x)*20)-1)
+#define M_DUART_CLK_COUNTER _SB_MAKEMASK(12, 0)
+#define V_DUART_BAUD_RATE(x) (100000000/((x)*20)-1)
/*
* DUART Data Registers (Table 10-8 and 10-9)
@@ -179,33 +179,33 @@
* Register: DUART_TX_HOLD_B
*/
-#define M_DUART_RX_DATA _SB_MAKEMASK(8, 0)
-#define M_DUART_TX_DATA _SB_MAKEMASK(8, 0)
+#define M_DUART_RX_DATA _SB_MAKEMASK(8, 0)
+#define M_DUART_TX_DATA _SB_MAKEMASK(8, 0)
/*
* DUART Input Port Register (Table 10-10)
* Register: DUART_IN_PORT
*/
-#define M_DUART_IN_PIN0_VAL _SB_MAKEMASK1(0)
-#define M_DUART_IN_PIN1_VAL _SB_MAKEMASK1(1)
-#define M_DUART_IN_PIN2_VAL _SB_MAKEMASK1(2)
-#define M_DUART_IN_PIN3_VAL _SB_MAKEMASK1(3)
-#define M_DUART_IN_PIN4_VAL _SB_MAKEMASK1(4)
-#define M_DUART_IN_PIN5_VAL _SB_MAKEMASK1(5)
-#define M_DUART_RIN0_PIN _SB_MAKEMASK1(6)
-#define M_DUART_RIN1_PIN _SB_MAKEMASK1(7)
+#define M_DUART_IN_PIN0_VAL _SB_MAKEMASK1(0)
+#define M_DUART_IN_PIN1_VAL _SB_MAKEMASK1(1)
+#define M_DUART_IN_PIN2_VAL _SB_MAKEMASK1(2)
+#define M_DUART_IN_PIN3_VAL _SB_MAKEMASK1(3)
+#define M_DUART_IN_PIN4_VAL _SB_MAKEMASK1(4)
+#define M_DUART_IN_PIN5_VAL _SB_MAKEMASK1(5)
+#define M_DUART_RIN0_PIN _SB_MAKEMASK1(6)
+#define M_DUART_RIN1_PIN _SB_MAKEMASK1(7)
/*
* DUART Input Port Change Status Register (Tables 10-11, 10-12, and 10-13)
* Register: DUART_INPORT_CHNG
*/
-#define S_DUART_IN_PIN_VAL 0
-#define M_DUART_IN_PIN_VAL _SB_MAKEMASK(4, S_DUART_IN_PIN_VAL)
+#define S_DUART_IN_PIN_VAL 0
+#define M_DUART_IN_PIN_VAL _SB_MAKEMASK(4, S_DUART_IN_PIN_VAL)
-#define S_DUART_IN_PIN_CHNG 4
-#define M_DUART_IN_PIN_CHNG _SB_MAKEMASK(4, S_DUART_IN_PIN_CHNG)
+#define S_DUART_IN_PIN_CHNG 4
+#define M_DUART_IN_PIN_CHNG _SB_MAKEMASK(4, S_DUART_IN_PIN_CHNG)
/*
@@ -213,46 +213,46 @@
* Register: DUART_OPCR
*/
-#define M_DUART_OPCR_RESERVED0 _SB_MAKEMASK1(0) /* must be zero */
-#define M_DUART_OPC2_SEL _SB_MAKEMASK1(1)
-#define M_DUART_OPCR_RESERVED1 _SB_MAKEMASK1(2) /* must be zero */
-#define M_DUART_OPC3_SEL _SB_MAKEMASK1(3)
-#define M_DUART_OPCR_RESERVED2 _SB_MAKEMASK(4, 4) /* must be zero */
+#define M_DUART_OPCR_RESERVED0 _SB_MAKEMASK1(0) /* must be zero */
+#define M_DUART_OPC2_SEL _SB_MAKEMASK1(1)
+#define M_DUART_OPCR_RESERVED1 _SB_MAKEMASK1(2) /* must be zero */
+#define M_DUART_OPC3_SEL _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED2 _SB_MAKEMASK(4, 4) /* must be zero */
/*
* DUART Aux Control Register (Table 10-15)
* Register: DUART_AUX_CTRL
*/
-#define M_DUART_IP0_CHNG_ENA _SB_MAKEMASK1(0)
-#define M_DUART_IP1_CHNG_ENA _SB_MAKEMASK1(1)
-#define M_DUART_IP2_CHNG_ENA _SB_MAKEMASK1(2)
-#define M_DUART_IP3_CHNG_ENA _SB_MAKEMASK1(3)
-#define M_DUART_ACR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_IP0_CHNG_ENA _SB_MAKEMASK1(0)
+#define M_DUART_IP1_CHNG_ENA _SB_MAKEMASK1(1)
+#define M_DUART_IP2_CHNG_ENA _SB_MAKEMASK1(2)
+#define M_DUART_IP3_CHNG_ENA _SB_MAKEMASK1(3)
+#define M_DUART_ACR_RESERVED _SB_MAKEMASK(4, 4)
-#define M_DUART_CTS_CHNG_ENA _SB_MAKEMASK1(0)
-#define M_DUART_CIN_CHNG_ENA _SB_MAKEMASK1(2)
+#define M_DUART_CTS_CHNG_ENA _SB_MAKEMASK1(0)
+#define M_DUART_CIN_CHNG_ENA _SB_MAKEMASK1(2)
/*
* DUART Interrupt Status Register (Table 10-16)
* Register: DUART_ISR
*/
-#define M_DUART_ISR_TX_A _SB_MAKEMASK1(0)
+#define M_DUART_ISR_TX_A _SB_MAKEMASK1(0)
-#define S_DUART_ISR_RX_A 1
-#define M_DUART_ISR_RX_A _SB_MAKEMASK1(S_DUART_ISR_RX_A)
-#define V_DUART_ISR_RX_A(x) _SB_MAKEVALUE(x, S_DUART_ISR_RX_A)
-#define G_DUART_ISR_RX_A(x) _SB_GETVALUE(x, S_DUART_ISR_RX_A, M_DUART_ISR_RX_A)
+#define S_DUART_ISR_RX_A 1
+#define M_DUART_ISR_RX_A _SB_MAKEMASK1(S_DUART_ISR_RX_A)
+#define V_DUART_ISR_RX_A(x) _SB_MAKEVALUE(x, S_DUART_ISR_RX_A)
+#define G_DUART_ISR_RX_A(x) _SB_GETVALUE(x, S_DUART_ISR_RX_A, M_DUART_ISR_RX_A)
-#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2)
-#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3)
+#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3)
#define M_DUART_ISR_ALL_A _SB_MAKEMASK(4, 0)
-#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4)
-#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5)
-#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6)
-#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7)
+#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4)
+#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5)
+#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6)
+#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7)
#define M_DUART_ISR_ALL_B _SB_MAKEMASK(4, 4)
/*
@@ -262,29 +262,29 @@
* Register: DUART_ISR_B
*/
-#define M_DUART_ISR_TX _SB_MAKEMASK1(0)
-#define M_DUART_ISR_RX _SB_MAKEMASK1(1)
-#define M_DUART_ISR_BRK _SB_MAKEMASK1(2)
-#define M_DUART_ISR_IN _SB_MAKEMASK1(3)
+#define M_DUART_ISR_TX _SB_MAKEMASK1(0)
+#define M_DUART_ISR_RX _SB_MAKEMASK1(1)
+#define M_DUART_ISR_BRK _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN _SB_MAKEMASK1(3)
#define M_DUART_ISR_ALL _SB_MAKEMASK(4, 0)
-#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4, 4)
/*
* DUART Interrupt Mask Register (Table 10-19)
* Register: DUART_IMR
*/
-#define M_DUART_IMR_TX_A _SB_MAKEMASK1(0)
-#define M_DUART_IMR_RX_A _SB_MAKEMASK1(1)
-#define M_DUART_IMR_BRK_A _SB_MAKEMASK1(2)
-#define M_DUART_IMR_IN_A _SB_MAKEMASK1(3)
+#define M_DUART_IMR_TX_A _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX_A _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK_A _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN_A _SB_MAKEMASK1(3)
#define M_DUART_IMR_ALL_A _SB_MAKEMASK(4, 0)
-#define M_DUART_IMR_TX_B _SB_MAKEMASK1(4)
-#define M_DUART_IMR_RX_B _SB_MAKEMASK1(5)
-#define M_DUART_IMR_BRK_B _SB_MAKEMASK1(6)
-#define M_DUART_IMR_IN_B _SB_MAKEMASK1(7)
-#define M_DUART_IMR_ALL_B _SB_MAKEMASK(4, 4)
+#define M_DUART_IMR_TX_B _SB_MAKEMASK1(4)
+#define M_DUART_IMR_RX_B _SB_MAKEMASK1(5)
+#define M_DUART_IMR_BRK_B _SB_MAKEMASK1(6)
+#define M_DUART_IMR_IN_B _SB_MAKEMASK1(7)
+#define M_DUART_IMR_ALL_B _SB_MAKEMASK(4, 4)
/*
* DUART Channel A Interrupt Mask Register (Table 10-20)
@@ -293,12 +293,12 @@
* Register: DUART_IMR_B
*/
-#define M_DUART_IMR_TX _SB_MAKEMASK1(0)
-#define M_DUART_IMR_RX _SB_MAKEMASK1(1)
-#define M_DUART_IMR_BRK _SB_MAKEMASK1(2)
-#define M_DUART_IMR_IN _SB_MAKEMASK1(3)
+#define M_DUART_IMR_TX _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN _SB_MAKEMASK1(3)
#define M_DUART_IMR_ALL _SB_MAKEMASK(4, 0)
-#define M_DUART_IMR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_IMR_RESERVED _SB_MAKEMASK(4, 4)
/*
@@ -306,33 +306,33 @@
* Register: DUART_SET_OPR
*/
-#define M_DUART_SET_OPR0 _SB_MAKEMASK1(0)
-#define M_DUART_SET_OPR1 _SB_MAKEMASK1(1)
-#define M_DUART_SET_OPR2 _SB_MAKEMASK1(2)
-#define M_DUART_SET_OPR3 _SB_MAKEMASK1(3)
-#define M_DUART_OPSR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_SET_OPR0 _SB_MAKEMASK1(0)
+#define M_DUART_SET_OPR1 _SB_MAKEMASK1(1)
+#define M_DUART_SET_OPR2 _SB_MAKEMASK1(2)
+#define M_DUART_SET_OPR3 _SB_MAKEMASK1(3)
+#define M_DUART_OPSR_RESERVED _SB_MAKEMASK(4, 4)
/*
* DUART Output Port Clear Register (Table 10-23)
* Register: DUART_CLEAR_OPR
*/
-#define M_DUART_CLR_OPR0 _SB_MAKEMASK1(0)
-#define M_DUART_CLR_OPR1 _SB_MAKEMASK1(1)
-#define M_DUART_CLR_OPR2 _SB_MAKEMASK1(2)
-#define M_DUART_CLR_OPR3 _SB_MAKEMASK1(3)
-#define M_DUART_OPCR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_CLR_OPR0 _SB_MAKEMASK1(0)
+#define M_DUART_CLR_OPR1 _SB_MAKEMASK1(1)
+#define M_DUART_CLR_OPR2 _SB_MAKEMASK1(2)
+#define M_DUART_CLR_OPR3 _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED _SB_MAKEMASK(4, 4)
/*
* DUART Output Port RTS Register (Table 10-24)
* Register: DUART_OUT_PORT
*/
-#define M_DUART_OUT_PIN_SET0 _SB_MAKEMASK1(0)
-#define M_DUART_OUT_PIN_SET1 _SB_MAKEMASK1(1)
-#define M_DUART_OUT_PIN_CLR0 _SB_MAKEMASK1(2)
-#define M_DUART_OUT_PIN_CLR1 _SB_MAKEMASK1(3)
-#define M_DUART_OPRR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_OUT_PIN_SET0 _SB_MAKEMASK1(0)
+#define M_DUART_OUT_PIN_SET1 _SB_MAKEMASK1(1)
+#define M_DUART_OUT_PIN_CLR0 _SB_MAKEMASK1(2)
+#define M_DUART_OUT_PIN_CLR1 _SB_MAKEMASK1(3)
+#define M_DUART_OPRR_RESERVED _SB_MAKEMASK(4, 4)
#define M_DUART_OUT_PIN_SET(chan) \
(chan == 0 ? M_DUART_OUT_PIN_SET0 : M_DUART_OUT_PIN_SET1)
@@ -344,15 +344,15 @@
* Full Interrupt Control Register
*/
-#define S_DUART_SIG_FULL _SB_MAKE64(0)
-#define M_DUART_SIG_FULL _SB_MAKEMASK(4, S_DUART_SIG_FULL)
-#define V_DUART_SIG_FULL(x) _SB_MAKEVALUE(x, S_DUART_SIG_FULL)
-#define G_DUART_SIG_FULL(x) _SB_GETVALUE(x, S_DUART_SIG_FULL, M_DUART_SIG_FULL)
+#define S_DUART_SIG_FULL _SB_MAKE64(0)
+#define M_DUART_SIG_FULL _SB_MAKEMASK(4, S_DUART_SIG_FULL)
+#define V_DUART_SIG_FULL(x) _SB_MAKEVALUE(x, S_DUART_SIG_FULL)
+#define G_DUART_SIG_FULL(x) _SB_GETVALUE(x, S_DUART_SIG_FULL, M_DUART_SIG_FULL)
-#define S_DUART_INT_TIME _SB_MAKE64(4)
-#define M_DUART_INT_TIME _SB_MAKEMASK(4, S_DUART_INT_TIME)
-#define V_DUART_INT_TIME(x) _SB_MAKEVALUE(x, S_DUART_INT_TIME)
-#define G_DUART_INT_TIME(x) _SB_GETVALUE(x, S_DUART_INT_TIME, M_DUART_INT_TIME)
+#define S_DUART_INT_TIME _SB_MAKE64(4)
+#define M_DUART_INT_TIME _SB_MAKEMASK(4, S_DUART_INT_TIME)
+#define V_DUART_INT_TIME(x) _SB_MAKEVALUE(x, S_DUART_INT_TIME)
+#define G_DUART_INT_TIME(x) _SB_GETVALUE(x, S_DUART_INT_TIME, M_DUART_INT_TIME)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
diff --git a/arch/mips/include/asm/sibyte/sentosa.h b/arch/mips/include/asm/sibyte/sentosa.h
index 64c47874f32d..0351a46eebbd 100644
--- a/arch/mips/include/asm/sibyte/sentosa.h
+++ b/arch/mips/include/asm/sibyte/sentosa.h
@@ -30,11 +30,11 @@
/* Generic bus chip selects */
#ifdef CONFIG_SIBYTE_RHONE
-#define LEDS_CS 6
-#define LEDS_PHYS 0x1d0a0000
+#define LEDS_CS 6
+#define LEDS_PHYS 0x1d0a0000
#endif
/* GPIOs */
-#define K_GPIO_DBG_LED 0
+#define K_GPIO_DBG_LED 0
#endif /* __ASM_SIBYTE_SENTOSA_H */
diff --git a/arch/mips/include/asm/sibyte/swarm.h b/arch/mips/include/asm/sibyte/swarm.h
index 114d9d29ca9d..187cfb1f67cb 100644
--- a/arch/mips/include/asm/sibyte/swarm.h
+++ b/arch/mips/include/asm/sibyte/swarm.h
@@ -24,41 +24,41 @@
#ifdef CONFIG_SIBYTE_SWARM
#define SIBYTE_BOARD_NAME "BCM91250A (SWARM)"
#define SIBYTE_HAVE_PCMCIA 1
-#define SIBYTE_HAVE_IDE 1
+#define SIBYTE_HAVE_IDE 1
#endif
#ifdef CONFIG_SIBYTE_LITTLESUR
#define SIBYTE_BOARD_NAME "BCM91250C2 (LittleSur)"
#define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE 1
+#define SIBYTE_HAVE_IDE 1
#define SIBYTE_DEFAULT_CONSOLE "cfe0"
#endif
#ifdef CONFIG_SIBYTE_CRHONE
#define SIBYTE_BOARD_NAME "BCM91125C (CRhone)"
#define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE 0
+#define SIBYTE_HAVE_IDE 0
#endif
#ifdef CONFIG_SIBYTE_CRHINE
#define SIBYTE_BOARD_NAME "BCM91120C (CRhine)"
#define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE 0
+#define SIBYTE_HAVE_IDE 0
#endif
/* Generic bus chip selects */
-#define LEDS_CS 3
-#define LEDS_PHYS 0x100a0000
+#define LEDS_CS 3
+#define LEDS_PHYS 0x100a0000
#ifdef SIBYTE_HAVE_IDE
-#define IDE_CS 4
-#define IDE_PHYS 0x100b0000
-#define K_GPIO_GB_IDE 4
-#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define IDE_CS 4
+#define IDE_PHYS 0x100b0000
+#define K_GPIO_GB_IDE 4
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
#endif
#ifdef SIBYTE_HAVE_PCMCIA
-#define PCMCIA_CS 6
-#define PCMCIA_PHYS 0x11000000
+#define PCMCIA_CS 6
+#define PCMCIA_PHYS 0x11000000
#define K_GPIO_PC_READY 9
-#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
#endif
#endif /* __ASM_SIBYTE_SWARM_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index f33b5fd6972b..eb6008758484 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -26,7 +26,7 @@ extern cpumask_t cpu_sibling_map[];
#define raw_smp_processor_id() (current_thread_info()->cpu)
/* Map from cpu id to sequential logical cpu number. This will only
- not be idempotent when cpus failed to come on-line. */
+ not be idempotent when cpus failed to come on-line. */
extern int __cpu_number_map[NR_CPUS];
#define cpu_number_map(cpu) __cpu_number_map[cpu]
@@ -36,7 +36,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
+#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
@@ -62,14 +62,14 @@ static inline void smp_send_reschedule(int cpu)
#ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable(void)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
return mp_ops->cpu_disable();
}
static inline void __cpu_die(unsigned int cpu)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->cpu_die(cpu);
}
@@ -81,14 +81,14 @@ extern asmlinkage void smp_call_function_interrupt(void);
static inline void arch_send_call_function_single_ipi(int cpu)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h
index 8935426a56ab..e56b439b7871 100644
--- a/arch/mips/include/asm/smtc.h
+++ b/arch/mips/include/asm/smtc.h
@@ -14,8 +14,8 @@
extern unsigned int smtc_status;
-#define SMTC_TLB_SHARED 0x00000001
-#define SMTC_MTC_ACTIVE 0x00000002
+#define SMTC_TLB_SHARED 0x00000001
+#define SMTC_MTC_ACTIVE 0x00000002
/*
* TLB/ASID Management information
diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h
index 2367b56dcdef..66814f8ba8e8 100644
--- a/arch/mips/include/asm/sn/addrs.h
+++ b/arch/mips/include/asm/sn/addrs.h
@@ -88,8 +88,8 @@
#define SWIN_SIZE_BITS 24
#define SWIN_SIZE (UINT64_CAST 1 << 24)
-#define SWIN_SIZEMASK (SWIN_SIZE - 1)
-#define SWIN_WIDGET_MASK 0xF
+#define SWIN_SIZEMASK (SWIN_SIZE - 1)
+#define SWIN_WIDGET_MASK 0xF
/*
* Convert smallwindow address to xtalk address.
@@ -97,8 +97,8 @@
* 'addr' can be physical or virtual address, but will be converted
* to Xtalk address in the range 0 -> SWINZ_SIZEMASK
*/
-#define SWIN_WIDGETADDR(addr) ((addr) & SWIN_SIZEMASK)
-#define SWIN_WIDGETNUM(addr) (((addr) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+#define SWIN_WIDGETADDR(addr) ((addr) & SWIN_SIZEMASK)
+#define SWIN_WIDGETNUM(addr) (((addr) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
/*
* Verify if addr belongs to small window address on node with "nasid"
*
@@ -108,7 +108,7 @@
*
*
*/
-#define NODE_SWIN_ADDR(nasid, addr) \
+#define NODE_SWIN_ADDR(nasid, addr) \
(((addr) >= NODE_SWIN_BASE(nasid, 0)) && \
((addr) < (NODE_SWIN_BASE(nasid, HUB_NUM_WIDGET) + SWIN_SIZE)\
))
@@ -150,7 +150,7 @@
#endif
-#define HUB_REGISTER_WIDGET 1
+#define HUB_REGISTER_WIDGET 1
#define IALIAS_BASE NODE_SWIN_BASE(0, HUB_REGISTER_WIDGET)
#define IALIAS_SIZE 0x800000 /* 8 Megabytes */
#define IS_IALIAS(_a) (((_a) >= IALIAS_BASE) && \
@@ -174,16 +174,16 @@
* WARNING: They won't work in assembler.
*
* BDDIR_ENTRY_LO returns the address of the low double-word of the dir
- * entry corresponding to a physical (Cac or Uncac) address.
+ * entry corresponding to a physical (Cac or Uncac) address.
* BDDIR_ENTRY_HI returns the address of the high double-word of the entry.
* BDPRT_ENTRY returns the address of the double-word protection entry
- * corresponding to the page containing the physical address.
+ * corresponding to the page containing the physical address.
* BDPRT_ENTRY_S Stores the value into the protection entry.
* BDPRT_ENTRY_L Load the value from the protection entry.
* BDECC_ENTRY returns the address of the ECC byte corresponding to a
- * double-word at a specified physical address.
+ * double-word at a specified physical address.
* BDECC_ENTRY_H returns the address of the two ECC bytes corresponding to a
- * quad-word at a specified physical address.
+ * quad-word at a specified physical address.
*/
#define NODE_BDOOR_BASE(_n) (NODE_HSPEC_BASE(_n) + (NODE_ADDRSPACE_SIZE/2))
@@ -226,11 +226,11 @@
#define BDADDR_IS_DIR(_ba) ((UINT64_CAST (_ba) & 0x200) != 0)
#define BDADDR_IS_PRT(_ba) ((UINT64_CAST (_ba) & 0x200) == 0)
-#define BDDIR_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
+#define BDDIR_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
(UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2 | \
(UINT64_CAST(_ba) & 0x1f << 4) << 3)
-#define BDPRT_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
+#define BDPRT_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
(UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2)
#define BDECC_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
@@ -251,23 +251,23 @@
/*
* WARNING:
* When certain Hub chip workaround are defined, it's not sufficient
- * to dereference the *_HUB_ADDR() macros. You should instead use
+ * to dereference the *_HUB_ADDR() macros. You should instead use
* HUB_L() and HUB_S() if you must deal with pointers to hub registers.
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x)))
-#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
+#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
0x800000 + (_x)))
#ifdef CONFIG_SGI_IP27
-#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
+#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
0x800000 + (_x)))
#endif /* CONFIG_SGI_IP27 */
#ifndef __ASSEMBLY__
#define HUB_L(_a) *(_a)
-#define HUB_S(_a, _d) *(_a) = (_d)
+#define HUB_S(_a, _d) *(_a) = (_d)
#define LOCAL_HUB_L(_r) HUB_L(LOCAL_HUB_ADDR(_r))
#define LOCAL_HUB_S(_r, _d) HUB_S(LOCAL_HUB_ADDR(_r), (_d))
@@ -330,14 +330,14 @@
#define KLI_LAUNCH 0 /* Dir. entries */
#define KLI_KLCONFIG 1
-#define KLI_NMI 2
+#define KLI_NMI 2
#define KLI_GDA 3
#define KLI_FREEMEM 4
-#define KLI_SYMMON_STK 5
+#define KLI_SYMMON_STK 5
#define KLI_PI_ERROR 6
#define KLI_KERN_VARS 7
-#define KLI_KERN_XP 8
-#define KLI_KERN_PARTID 9
+#define KLI_KERN_XP 8
+#define KLI_KERN_PARTID 9
#ifndef __ASSEMBLY__
@@ -350,8 +350,8 @@
#define KLD_SYMMON_STK(nasid) (KLD_BASE(nasid) + KLI_SYMMON_STK)
#define KLD_FREEMEM(nasid) (KLD_BASE(nasid) + KLI_FREEMEM)
#define KLD_KERN_VARS(nasid) (KLD_BASE(nasid) + KLI_KERN_VARS)
-#define KLD_KERN_XP(nasid) (KLD_BASE(nasid) + KLI_KERN_XP)
-#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
+#define KLD_KERN_XP(nasid) (KLD_BASE(nasid) + KLI_KERN_XP)
+#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
#define LAUNCH_OFFSET(nasid, slice) \
(KLD_LAUNCH(nasid)->offset + \
@@ -365,7 +365,7 @@
KLD_NMI(nasid)->stride * (slice))
#define NMI_ADDR(nasid, slice) \
TO_NODE_UNCAC((nasid), SN_NMI_OFFSET(nasid, slice))
-#define NMI_SIZE(nasid) KLD_NMI(nasid)->size
+#define NMI_SIZE(nasid) KLD_NMI(nasid)->size
#define KLCONFIG_OFFSET(nasid) KLD_KLCONFIG(nasid)->offset
#define KLCONFIG_ADDR(nasid) \
@@ -390,8 +390,8 @@
/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a
* relocatable program
*/
-#define UNIX_DEBUG_LOADADDR 0x300000
-#define SYMMON_LOADADDR(nasid) \
+#define UNIX_DEBUG_LOADADDR 0x300000
+#define SYMMON_LOADADDR(nasid) \
TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000))
#define FREEMEM_OFFSET(nasid) KLD_FREEMEM(nasid)->offset
@@ -420,8 +420,8 @@
#define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer
#define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size
-#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer
-#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size
+#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer
+#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size
#define GPDA_ADDR(nasid) TO_NODE_CAC(nasid, GPDA_OFFSET)
diff --git a/arch/mips/include/asm/sn/agent.h b/arch/mips/include/asm/sn/agent.h
index dc81114d4742..e33d09293019 100644
--- a/arch/mips/include/asm/sn/agent.h
+++ b/arch/mips/include/asm/sn/agent.h
@@ -25,21 +25,21 @@
*/
#if defined(CONFIG_SGI_IP27)
-#define HUB_NIC_ADDR(_cpuid) \
- REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)), \
+#define HUB_NIC_ADDR(_cpuid) \
+ REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)), \
MD_MLAN_CTL)
#endif
-#define SET_HUB_NIC(_my_cpuid, _val) \
+#define SET_HUB_NIC(_my_cpuid, _val) \
(HUB_S(HUB_NIC_ADDR(_my_cpuid), (_val)))
-#define SET_MY_HUB_NIC(_v) \
+#define SET_MY_HUB_NIC(_v) \
SET_HUB_NIC(cpuid(), (_v))
-#define GET_HUB_NIC(_my_cpuid) \
+#define GET_HUB_NIC(_my_cpuid) \
(HUB_L(HUB_NIC_ADDR(_my_cpuid)))
-#define GET_MY_HUB_NIC() \
+#define GET_MY_HUB_NIC() \
GET_HUB_NIC(cpuid())
#endif /* _ASM_SGI_SN_AGENT_H */
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
index bd75945e10ff..471e6870d876 100644
--- a/arch/mips/include/asm/sn/arch.h
+++ b/arch/mips/include/asm/sn/arch.h
@@ -28,14 +28,14 @@ typedef u64 hubreg_t;
#define INVALID_CNODEID (cnodeid_t)-1
#define INVALID_PNODEID (pnodeid_t)-1
#define INVALID_MODULE (moduleid_t)-1
-#define INVALID_PARTID (partid_t)-1
+#define INVALID_PARTID (partid_t)-1
extern nasid_t get_nasid(void);
extern cnodeid_t get_cpu_cnode(cpuid_t);
extern int get_cpu_slice(cpuid_t);
/*
- * NO ONE should access these arrays directly. The only reason we refer to
+ * NO ONE should access these arrays directly. The only reason we refer to
* them here is to avoid the procedure call that would be required in the
* macros below. (Really want private data members here :-)
*/
@@ -44,12 +44,12 @@ extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
/*
* These macros are used by various parts of the kernel to convert
- * between the three different kinds of node numbering. At least some
+ * between the three different kinds of node numbering. At least some
* of them may change to procedure calls in the future, but the macros
* will continue to work. Don't use the arrays above directly.
*/
-#define NASID_TO_REGION(nnode) \
+#define NASID_TO_REGION(nnode) \
((nnode) >> \
(is_fine_dirmode() ? NASID_TO_FINEREG_SHFT : NASID_TO_COARSEREG_SHFT))
diff --git a/arch/mips/include/asm/sn/fru.h b/arch/mips/include/asm/sn/fru.h
index b3e3606723b7..bbb83257c8e2 100644
--- a/arch/mips/include/asm/sn/fru.h
+++ b/arch/mips/include/asm/sn/fru.h
@@ -21,24 +21,24 @@ typedef struct kf_mem_s {
* is this necessary ?
*/
confidence_t km_dimm[MAX_DIMMS];
- /* confidence level that dimm[i] is bad
+ /* confidence level that dimm[i] is bad
*I think this is the right number
*/
} kf_mem_t;
typedef struct kf_cpu_s {
- confidence_t kc_confidence; /* confidence level that cpu is bad */
- confidence_t kc_icache; /* confidence level that instr. cache is bad */
- confidence_t kc_dcache; /* confidence level that data cache is bad */
- confidence_t kc_scache; /* confidence level that sec. cache is bad */
+ confidence_t kc_confidence; /* confidence level that cpu is bad */
+ confidence_t kc_icache; /* confidence level that instr. cache is bad */
+ confidence_t kc_dcache; /* confidence level that data cache is bad */
+ confidence_t kc_scache; /* confidence level that sec. cache is bad */
confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
} kf_cpu_t;
typedef struct kf_pci_bus_s {
confidence_t kpb_belief; /* confidence level that the pci bus is bad */
confidence_t kpb_pcidev_belief[MAX_PCIDEV];
- /* confidence level that the pci dev is bad */
+ /* confidence level that the pci dev is bad */
} kf_pci_bus_t;
#endif /* __ASM_SN_FRU_H */
diff --git a/arch/mips/include/asm/sn/gda.h b/arch/mips/include/asm/sn/gda.h
index 9cb6ff770915..85fa1b5f639d 100644
--- a/arch/mips/include/asm/sn/gda.h
+++ b/arch/mips/include/asm/sn/gda.h
@@ -8,7 +8,7 @@
* Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
*
* gda.h -- Contains the data structure for the global data area,
- * The GDA contains information communicated between the
+ * The GDA contains information communicated between the
* PROM, SYMMON, and the kernel.
*/
#ifndef _ASM_SN_GDA_H
@@ -23,8 +23,8 @@
*
* Version # | Change
* -------------+-------------------------------------------------------
- * 1 | Initial SN0 version
- * 2 | Prom sets g_partid field to the partition number. 0 IS
+ * 1 | Initial SN0 version
+ * 2 | Prom sets g_partid field to the partition number. 0 IS
* | a valid partition #.
*/
@@ -60,7 +60,7 @@ typedef struct gda {
/* Pointer to a mask of nodes with copies
* of the kernel. */
char g_padding[56]; /* pad out to 128 bytes */
- nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
+ nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
* indexed by cnodeid.
*/
} gda_t;
@@ -74,7 +74,7 @@ typedef struct gda {
* revisions assume GDA is NOT set up, and read partition
* information from the board info.
*/
-#define PART_GDA_VERSION 2
+#define PART_GDA_VERSION 2
/*
* The following requests can be sent to the PROM during startup.
@@ -83,17 +83,17 @@ typedef struct gda {
#define PROMOP_MAGIC 0x0ead0000
#define PROMOP_MAGIC_MASK 0x0fff0000
-#define PROMOP_BIST_SHIFT 11
-#define PROMOP_BIST_MASK (0x3 << 11)
+#define PROMOP_BIST_SHIFT 11
+#define PROMOP_BIST_MASK (0x3 << 11)
#define PROMOP_REG PI_ERR_STACK_ADDR_A
#define PROMOP_INVALID (PROMOP_MAGIC | 0x00)
-#define PROMOP_HALT (PROMOP_MAGIC | 0x10)
-#define PROMOP_POWERDOWN (PROMOP_MAGIC | 0x20)
-#define PROMOP_RESTART (PROMOP_MAGIC | 0x30)
-#define PROMOP_REBOOT (PROMOP_MAGIC | 0x40)
-#define PROMOP_IMODE (PROMOP_MAGIC | 0x50)
+#define PROMOP_HALT (PROMOP_MAGIC | 0x10)
+#define PROMOP_POWERDOWN (PROMOP_MAGIC | 0x20)
+#define PROMOP_RESTART (PROMOP_MAGIC | 0x30)
+#define PROMOP_REBOOT (PROMOP_MAGIC | 0x40)
+#define PROMOP_IMODE (PROMOP_MAGIC | 0x50)
#define PROMOP_CMD_MASK 0x00f0
#define PROMOP_OPTIONS_MASK 0xfff0
diff --git a/arch/mips/include/asm/sn/intr.h b/arch/mips/include/asm/sn/intr.h
index 6718b644b970..fc1348193957 100644
--- a/arch/mips/include/asm/sn/intr.h
+++ b/arch/mips/include/asm/sn/intr.h
@@ -14,8 +14,8 @@
#define INT_PEND0_BASELVL 0
#define INT_PEND1_BASELVL 64
-#define N_INTPENDJUNK_BITS 8
-#define INTPENDJUNK_CLRBIT 0x80
+#define N_INTPENDJUNK_BITS 8
+#define INTPENDJUNK_CLRBIT 0x80
/*
* Macros to manipulate the interrupt register on the calling hub chip.
@@ -32,7 +32,7 @@
* We do an uncached load of the int_pend0 register to ensure this.
*/
-#define LOCAL_HUB_CLR_INTR(level) \
+#define LOCAL_HUB_CLR_INTR(level) \
do { \
LOCAL_HUB_S(PI_INT_PEND_MOD, (level)); \
LOCAL_HUB_L(PI_INT_PEND0); \
@@ -40,7 +40,7 @@ do { \
#define REMOTE_HUB_CLR_INTR(hub, level) \
do { \
- nasid_t __hub = (hub); \
+ nasid_t __hub = (hub); \
\
REMOTE_HUB_S(__hub, PI_INT_PEND_MOD, (level)); \
REMOTE_HUB_L(__hub, PI_INT_PEND0); \
@@ -102,8 +102,8 @@ do { \
#define LLP_PFAIL_INTR_A 41 /* see ml/SN/SN0/sysctlr.c */
#define LLP_PFAIL_INTR_B 42
-#define TLB_INTR_A 43 /* used for tlb flush random */
-#define TLB_INTR_B 44
+#define TLB_INTR_A 43 /* used for tlb flush random */
+#define TLB_INTR_B 44
#define IP27_INTR_0 45 /* Reserved for PROM use */
#define IP27_INTR_1 46 /* do not use in Kernel */
@@ -116,8 +116,8 @@ do { \
#define BRIDGE_ERROR_INTR 53 /* Setup by PROM to catch */
/* Bridge Errors */
-#define DEBUG_INTR_A 54
-#define DEBUG_INTR_B 55 /* Used by symmon to stop all cpus */
+#define DEBUG_INTR_A 54
+#define DEBUG_INTR_B 55 /* Used by symmon to stop all cpus */
#define IO_ERROR_INTR 57 /* Setup by PROM */
#define CLK_ERR_INTR 58
#define COR_ERR_INTR_A 59
diff --git a/arch/mips/include/asm/sn/io.h b/arch/mips/include/asm/sn/io.h
index 24c6775fbb0f..d5174d04538c 100644
--- a/arch/mips/include/asm/sn/io.h
+++ b/arch/mips/include/asm/sn/io.h
@@ -31,7 +31,7 @@
#define HUB_PIO_MAP_TO_MEM 0
#define HUB_PIO_MAP_TO_IO 1
-#define IIO_ITTE_INVALID_WIDGET 3 /* an invalid widget */
+#define IIO_ITTE_INVALID_WIDGET 3 /* an invalid widget */
#define IIO_ITTE_PUT(nasid, bigwin, io_or_mem, widget, addr) \
REMOTE_HUB_S((nasid), IIO_ITTE(bigwin), \
@@ -52,7 +52,7 @@
* value _x is expected to be a widget number in the range
* 0, 8 - 0xF
*/
-#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
(_x) : \
(_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index 099677774d71..e33f0363235b 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -62,8 +62,8 @@ struct ioc3_sioregs {
volatile u8 fill3[0x170 - 0x169 - 1];
- struct ioc3_uartregs uartb; /* 0x20170 */
- struct ioc3_uartregs uarta; /* 0x20178 */
+ struct ioc3_uartregs uartb; /* 0x20170 */
+ struct ioc3_uartregs uarta; /* 0x20178 */
};
/* Register layout of IOC3 in configuration space. */
@@ -106,7 +106,7 @@ struct ioc3 {
volatile u32 ppbr_l_b; /* 0x00094 */
volatile u32 ppcr_b; /* 0x00098 */
- /* Keyboard and Mouse Registers */
+ /* Keyboard and Mouse Registers */
volatile u32 km_csr; /* 0x0009c */
volatile u32 k_rd; /* 0x000a0 */
volatile u32 m_rd; /* 0x000a4 */
@@ -208,7 +208,7 @@ struct ioc3_erxbuf {
/*
* Ethernet TX Descriptor
*/
-#define ETXD_DATALEN 104
+#define ETXD_DATALEN 104
struct ioc3_etxd {
u32 cmd; /* command field */
u32 bufcnt; /* buffer counts field */
diff --git a/arch/mips/include/asm/sn/klconfig.h b/arch/mips/include/asm/sn/klconfig.h
index fe02900b930d..467c313d5767 100644
--- a/arch/mips/include/asm/sn/klconfig.h
+++ b/arch/mips/include/asm/sn/klconfig.h
@@ -8,8 +8,8 @@
* Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 1999, 2000 by Ralf Baechle
*/
-#ifndef _ASM_SN_KLCONFIG_H
-#define _ASM_SN_KLCONFIG_H
+#ifndef _ASM_SN_KLCONFIG_H
+#define _ASM_SN_KLCONFIG_H
/*
* The KLCONFIG structures store info about the various BOARDs found
@@ -20,11 +20,11 @@
/*
* WARNING:
* Certain assembly language routines (notably xxxxx.s) in the IP27PROM
- * will depend on the format of the data structures in this file. In
- * most cases, rearranging the fields can seriously break things.
- * Adding fields in the beginning or middle can also break things.
- * Add fields if necessary, to the end of a struct in such a way
- * that offsets of existing fields do not change.
+ * will depend on the format of the data structures in this file. In
+ * most cases, rearranging the fields can seriously break things.
+ * Adding fields in the beginning or middle can also break things.
+ * Add fields if necessary, to the end of a struct in such a way
+ * that offsets of existing fields do not change.
*/
#include <linux/types.h>
@@ -35,7 +35,7 @@
#include <asm/sn/sn0/addrs.h>
//#include <sys/SN/router.h>
// XXX Stolen from <sys/SN/router.h>:
-#define MAX_ROUTER_PORTS (6) /* Max. number of ports on a router */
+#define MAX_ROUTER_PORTS (6) /* Max. number of ports on a router */
#include <asm/sn/fru.h>
//#include <sys/graph.h>
//#include <sys/xtalk/xbow.h>
@@ -63,14 +63,14 @@
typedef u64 nic_t;
-#define KLCFGINFO_MAGIC 0xbeedbabe
+#define KLCFGINFO_MAGIC 0xbeedbabe
typedef s32 klconf_off_t;
/*
* Some IMPORTANT OFFSETS. These are the offsets on all NODES.
*/
-#define MAX_MODULE_ID 255
+#define MAX_MODULE_ID 255
#define SIZE_PAD 4096 /* 4k padding for structures */
/*
* 1 NODE brd, 2 Router brd (1 8p, 1 meta), 6 Widgets,
@@ -86,25 +86,25 @@ typedef s32 klconf_off_t;
/* All bits in this field are currently used. Try the pad fields if
you need more flag bits */
-#define ENABLE_BOARD 0x01
-#define FAILED_BOARD 0x02
-#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which
+#define ENABLE_BOARD 0x01
+#define FAILED_BOARD 0x02
+#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which
are discovered twice. Use one of them */
#define VISITED_BOARD 0x08 /* Used for compact hub numbering. */
-#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */
+#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */
#define GLOBAL_MASTER_IO6 0x20
-#define THIRD_NIC_PRESENT 0x40 /* for future use */
-#define SECOND_NIC_PRESENT 0x80 /* addons like MIO are present */
+#define THIRD_NIC_PRESENT 0x40 /* for future use */
+#define SECOND_NIC_PRESENT 0x80 /* addons like MIO are present */
/* klinfo->flags fields */
-#define KLINFO_ENABLE 0x01 /* This component is enabled */
-#define KLINFO_FAILED 0x02 /* This component failed */
-#define KLINFO_DEVICE 0x04 /* This component is a device */
-#define KLINFO_VISITED 0x08 /* This component has been visited */
-#define KLINFO_CONTROLLER 0x10 /* This component is a device controller */
-#define KLINFO_INSTALL 0x20 /* Install a driver */
-#define KLINFO_HEADLESS 0x40 /* Headless (or hubless) component */
+#define KLINFO_ENABLE 0x01 /* This component is enabled */
+#define KLINFO_FAILED 0x02 /* This component failed */
+#define KLINFO_DEVICE 0x04 /* This component is a device */
+#define KLINFO_VISITED 0x08 /* This component has been visited */
+#define KLINFO_CONTROLLER 0x10 /* This component is a device controller */
+#define KLINFO_INSTALL 0x20 /* Install a driver */
+#define KLINFO_HEADLESS 0x40 /* Headless (or hubless) component */
#define IS_CONSOLE_IOC3(i) ((((klinfo_t *)i)->flags) & KLINFO_INSTALL)
#define GB2 0x80000000
@@ -116,30 +116,30 @@ typedef s32 klconf_off_t;
is used in the code to allocate various areas.
*/
-#define BOARD_STRUCT 0
-#define COMPONENT_STRUCT 1
-#define ERRINFO_STRUCT 2
-#define KLMALLOC_TYPE_MAX (ERRINFO_STRUCT + 1)
-#define DEVICE_STRUCT 3
+#define BOARD_STRUCT 0
+#define COMPONENT_STRUCT 1
+#define ERRINFO_STRUCT 2
+#define KLMALLOC_TYPE_MAX (ERRINFO_STRUCT + 1)
+#define DEVICE_STRUCT 3
typedef struct console_s {
- unsigned long uart_base;
- unsigned long config_base;
- unsigned long memory_base;
+ unsigned long uart_base;
+ unsigned long config_base;
+ unsigned long memory_base;
short baud;
short flag;
int type;
nasid_t nasid;
char wid;
- char npci;
+ char npci;
nic_t baseio_nic;
} console_t;
typedef struct klc_malloc_hdr {
- klconf_off_t km_base;
- klconf_off_t km_limit;
- klconf_off_t km_current;
+ klconf_off_t km_base;
+ klconf_off_t km_limit;
+ klconf_off_t km_current;
} klc_malloc_hdr_t;
/* Functions/macros needed to use this structure */
@@ -148,7 +148,7 @@ typedef struct kl_config_hdr {
u64 ch_magic; /* set this to KLCFGINFO_MAGIC */
u32 ch_version; /* structure version number */
klconf_off_t ch_malloc_hdr_off; /* offset of ch_malloc_hdr */
- klconf_off_t ch_cons_off; /* offset of ch_cons */
+ klconf_off_t ch_cons_off; /* offset of ch_cons */
klconf_off_t ch_board_info; /* the link list of boards */
console_t ch_cons_info; /* address info of the console */
klc_malloc_hdr_t ch_malloc_hdr[KLMALLOC_TYPE_MAX];
@@ -157,27 +157,27 @@ typedef struct kl_config_hdr {
} kl_config_hdr_t;
-#define KL_CONFIG_HDR(_nasid) ((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
+#define KL_CONFIG_HDR(_nasid) ((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
#define KL_CONFIG_INFO_OFFSET(_nasid) \
- (KL_CONFIG_HDR(_nasid)->ch_board_info)
+ (KL_CONFIG_HDR(_nasid)->ch_board_info)
#define KL_CONFIG_INFO_SET_OFFSET(_nasid, _off) \
- (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
+ (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
-#define KL_CONFIG_INFO(_nasid) \
- (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ? \
+#define KL_CONFIG_INFO(_nasid) \
+ (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ? \
NODE_OFFSET_TO_K1((_nasid), KL_CONFIG_HDR(_nasid)->ch_board_info) : \
0)
#define KL_CONFIG_MAGIC(_nasid) (KL_CONFIG_HDR(_nasid)->ch_magic)
#define KL_CONFIG_CHECK_MAGIC(_nasid) \
- (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
+ (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
#define KL_CONFIG_HDR_INIT_MAGIC(_nasid) \
- (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
+ (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
/* --- New Macros for the changed kl_config_hdr_t structure --- */
-#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\
+#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\
((unsigned long)_k + (_k->ch_malloc_hdr_off)))
#define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
@@ -190,29 +190,29 @@ typedef struct kl_config_hdr {
/* ------------------------------------------------------------- */
#define KL_CONFIG_INFO_START(_nasid) \
- (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
+ (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
#define KL_CONFIG_BOARD_NASID(_brd) ((_brd)->brd_nasid)
#define KL_CONFIG_BOARD_SET_NEXT(_brd, _off) ((_brd)->brd_next = (_off))
-#define KL_CONFIG_DUPLICATE_BOARD(_brd) ((_brd)->brd_flags & DUPLICATE_BOARD)
+#define KL_CONFIG_DUPLICATE_BOARD(_brd) ((_brd)->brd_flags & DUPLICATE_BOARD)
-#define XBOW_PORT_TYPE_HUB(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
-#define XBOW_PORT_TYPE_IO(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
+#define XBOW_PORT_TYPE_HUB(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
+#define XBOW_PORT_TYPE_IO(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
-#define XBOW_PORT_IS_ENABLED(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
-#define XBOW_PORT_NASID(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
+#define XBOW_PORT_IS_ENABLED(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
+#define XBOW_PORT_NASID(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
-#define XBOW_PORT_IO 0x1
-#define XBOW_PORT_HUB 0x2
+#define XBOW_PORT_IO 0x1
+#define XBOW_PORT_HUB 0x2
#define XBOW_PORT_ENABLE 0x4
-#define SN0_PORT_FENCE_SHFT 0
-#define SN0_PORT_FENCE_MASK (1 << SN0_PORT_FENCE_SHFT)
+#define SN0_PORT_FENCE_SHFT 0
+#define SN0_PORT_FENCE_MASK (1 << SN0_PORT_FENCE_SHFT)
/*
* The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
@@ -242,28 +242,28 @@ typedef struct kl_config_hdr {
*
KLCONFIG
- +------------+ +------------+ +------------+ +------------+
- | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
- +------------+ | +------------+ | +------------+ | +------------+
- | board info | | | board info | | |errinfo,bptr| | | board info |
- +------------+ | +------------+ | +------------+ | +------------+
- | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
- +------------+ +------------+ +------------+ +------------+
+ +------------+ +------------+ +------------+ +------------+
+ | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | board info | | | board info | | |errinfo,bptr| | | board info |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+ +------------+ +------------+ +------------+ +------------+
+------------+
| board info |
- +------------+ +--------------------------------+
+ +------------+ +--------------------------------+
| compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
- +------------+ +--------------------------------+
+ +------------+ +--------------------------------+
| compt 2 |--+
- +------------+ | +--------------------------------+
- | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
- +------------+ +--------------------------------+
+ +------------+ | +--------------------------------+
+ | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+ +------------+ +--------------------------------+
| errinfo |--+
- +------------+ | +--------------------------------+
- +--->|r/l brd errinfo,compt err flags |
- +--------------------------------+
+ +------------+ | +--------------------------------+
+ +--->|r/l brd errinfo,compt err flags |
+ +--------------------------------+
*
* Each BOARD consists of COMPONENTs and the BOARD structure has
@@ -311,7 +311,7 @@ typedef struct kl_config_hdr {
*/
#define KL_CPU_R4000 0x1 /* Standard R4000 */
#define KL_CPU_TFP 0x2 /* TFP processor */
-#define KL_CPU_R10000 0x3 /* R10000 (T5) */
+#define KL_CPU_R10000 0x3 /* R10000 (T5) */
#define KL_CPU_NONE (-1) /* no cpu present in slot */
/*
@@ -320,13 +320,13 @@ typedef struct kl_config_hdr {
#define KLCLASS_MASK 0xf0
#define KLCLASS_NONE 0x00
-#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
+#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
#define KLCLASS_CPU KLCLASS_NODE
-#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
+#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
and the non-graphics widget boards */
-#define KLCLASS_ROUTER 0x30 /* Router board */
-#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
- so that we can record error info */
+#define KLCLASS_ROUTER 0x30 /* Router board */
+#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
+ so that we can record error info */
#define KLCLASS_GFX 0x50 /* graphics boards */
#define KLCLASS_PSEUDO_GFX 0x60 /* HDTV type cards that use a gfx
@@ -336,7 +336,7 @@ typedef struct kl_config_hdr {
#define KLCLASS_MAX 7 /* Bump this if a new CLASS is added */
#define KLTYPE_MAX 10 /* Bump this if a new CLASS is added */
-#define KLCLASS_UNKNOWN 0xf0
+#define KLCLASS_UNKNOWN 0xf0
#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
@@ -353,36 +353,36 @@ typedef struct kl_config_hdr {
#define KLTYPE_WEIRDIO (KLCLASS_IO | 0x0)
#define KLTYPE_BASEIO (KLCLASS_IO | 0x1) /* IOC3, SuperIO, Bridge, SCSI */
-#define KLTYPE_IO6 KLTYPE_BASEIO /* Additional name */
+#define KLTYPE_IO6 KLTYPE_BASEIO /* Additional name */
#define KLTYPE_4CHSCSI (KLCLASS_IO | 0x2)
-#define KLTYPE_MSCSI KLTYPE_4CHSCSI /* Additional name */
-#define KLTYPE_ETHERNET (KLCLASS_IO | 0x3)
-#define KLTYPE_MENET KLTYPE_ETHERNET /* Additional name */
-#define KLTYPE_FDDI (KLCLASS_IO | 0x4)
+#define KLTYPE_MSCSI KLTYPE_4CHSCSI /* Additional name */
+#define KLTYPE_ETHERNET (KLCLASS_IO | 0x3)
+#define KLTYPE_MENET KLTYPE_ETHERNET /* Additional name */
+#define KLTYPE_FDDI (KLCLASS_IO | 0x4)
#define KLTYPE_UNUSED (KLCLASS_IO | 0x5) /* XXX UNUSED */
-#define KLTYPE_HAROLD (KLCLASS_IO | 0x6) /* PCI SHOE BOX */
+#define KLTYPE_HAROLD (KLCLASS_IO | 0x6) /* PCI SHOE BOX */
#define KLTYPE_PCI KLTYPE_HAROLD
-#define KLTYPE_VME (KLCLASS_IO | 0x7) /* Any 3rd party VME card */
-#define KLTYPE_MIO (KLCLASS_IO | 0x8)
-#define KLTYPE_FC (KLCLASS_IO | 0x9)
-#define KLTYPE_LINC (KLCLASS_IO | 0xA)
-#define KLTYPE_TPU (KLCLASS_IO | 0xB) /* Tensor Processing Unit */
-#define KLTYPE_GSN_A (KLCLASS_IO | 0xC) /* Main GSN board */
-#define KLTYPE_GSN_B (KLCLASS_IO | 0xD) /* Auxiliary GSN board */
+#define KLTYPE_VME (KLCLASS_IO | 0x7) /* Any 3rd party VME card */
+#define KLTYPE_MIO (KLCLASS_IO | 0x8)
+#define KLTYPE_FC (KLCLASS_IO | 0x9)
+#define KLTYPE_LINC (KLCLASS_IO | 0xA)
+#define KLTYPE_TPU (KLCLASS_IO | 0xB) /* Tensor Processing Unit */
+#define KLTYPE_GSN_A (KLCLASS_IO | 0xC) /* Main GSN board */
+#define KLTYPE_GSN_B (KLCLASS_IO | 0xD) /* Auxiliary GSN board */
#define KLTYPE_GFX (KLCLASS_GFX | 0x0) /* unknown graphics type */
#define KLTYPE_GFX_KONA (KLCLASS_GFX | 0x1) /* KONA graphics on IP27 */
#define KLTYPE_GFX_MGRA (KLCLASS_GFX | 0x3) /* MGRAS graphics on IP27 */
#define KLTYPE_WEIRDROUTER (KLCLASS_ROUTER | 0x0)
-#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
-#define KLTYPE_ROUTER2 KLTYPE_ROUTER /* Obsolete! */
+#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_ROUTER2 KLTYPE_ROUTER /* Obsolete! */
#define KLTYPE_NULL_ROUTER (KLCLASS_ROUTER | 0x2)
#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
#define KLTYPE_WEIRDMIDPLANE (KLCLASS_MIDPLANE | 0x0)
#define KLTYPE_MIDPLANE8 (KLCLASS_MIDPLANE | 0x1) /* 8 slot backplane */
-#define KLTYPE_MIDPLANE KLTYPE_MIDPLANE8
+#define KLTYPE_MIDPLANE KLTYPE_MIDPLANE8
#define KLTYPE_PBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
@@ -398,11 +398,11 @@ typedef struct kl_config_hdr {
* When bringup started nic names had not standardized and so we
* had to hard code. (For people interested in history.)
*/
-#define KLTYPE_XTHD (KLCLASS_PSEUDO_GFX | 0x9)
+#define KLTYPE_XTHD (KLCLASS_PSEUDO_GFX | 0x9)
#define KLTYPE_UNKNOWN (KLCLASS_UNKNOWN | 0xf)
-#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
+#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
#define IS_MIO_PRESENT(l) ((l->brd_type == KLTYPE_BASEIO) && \
(l->brd_flags & SECOND_NIC_PRESENT))
#define IS_MIO_IOC3(l, n) (IS_MIO_PRESENT(l) && (n > 2))
@@ -416,33 +416,33 @@ typedef struct kl_config_hdr {
#define LOCAL_BOARD 1
#define REMOTE_BOARD 2
-#define LBOARD_STRUCT_VERSION 2
+#define LBOARD_STRUCT_VERSION 2
typedef struct lboard_s {
- klconf_off_t brd_next; /* Next BOARD */
- unsigned char struct_type; /* type of structure, local or remote */
- unsigned char brd_type; /* type+class */
- unsigned char brd_sversion; /* version of this structure */
- unsigned char brd_brevision; /* board revision */
- unsigned char brd_promver; /* board prom version, if any */
- unsigned char brd_flags; /* Enabled, Disabled etc */
- unsigned char brd_slot; /* slot number */
- unsigned short brd_debugsw; /* Debug switches */
- moduleid_t brd_module; /* module to which it belongs */
- partid_t brd_partition; /* Partition number */
- unsigned short brd_diagval; /* diagnostic value */
- unsigned short brd_diagparm; /* diagnostic parameter */
- unsigned char brd_inventory; /* inventory history */
- unsigned char brd_numcompts; /* Number of components */
- nic_t brd_nic; /* Number in CAN */
- nasid_t brd_nasid; /* passed parameter */
- klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
- klconf_off_t brd_errinfo; /* Board's error information */
+ klconf_off_t brd_next; /* Next BOARD */
+ unsigned char struct_type; /* type of structure, local or remote */
+ unsigned char brd_type; /* type+class */
+ unsigned char brd_sversion; /* version of this structure */
+ unsigned char brd_brevision; /* board revision */
+ unsigned char brd_promver; /* board prom version, if any */
+ unsigned char brd_flags; /* Enabled, Disabled etc */
+ unsigned char brd_slot; /* slot number */
+ unsigned short brd_debugsw; /* Debug switches */
+ moduleid_t brd_module; /* module to which it belongs */
+ partid_t brd_partition; /* Partition number */
+ unsigned short brd_diagval; /* diagnostic value */
+ unsigned short brd_diagparm; /* diagnostic parameter */
+ unsigned char brd_inventory; /* inventory history */
+ unsigned char brd_numcompts; /* Number of components */
+ nic_t brd_nic; /* Number in CAN */
+ nasid_t brd_nasid; /* passed parameter */
+ klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+ klconf_off_t brd_errinfo; /* Board's error information */
struct lboard_s *brd_parent; /* Logical parent for this brd */
- vertex_hdl_t brd_graph_link; /* vertex hdl to connect extern compts */
+ vertex_hdl_t brd_graph_link; /* vertex hdl to connect extern compts */
confidence_t brd_confidence; /* confidence that the board is bad */
- nasid_t brd_owner; /* who owns this board */
- unsigned char brd_nic_flags; /* To handle 8 more NICs */
+ nasid_t brd_owner; /* who owns this board */
+ unsigned char brd_nic_flags; /* To handle 8 more NICs */
char brd_name[32];
} lboard_t;
@@ -456,23 +456,23 @@ typedef struct lboard_s {
#define KLCF_CLASS(_brd) KLCLASS((_brd)->brd_type)
#define KLCF_TYPE(_brd) KLTYPE((_brd)->brd_type)
-#define KLCF_REMOTE(_brd) (((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
+#define KLCF_REMOTE(_brd) (((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
#define KLCF_NUM_COMPS(_brd) ((_brd)->brd_numcompts)
#define KLCF_MODULE_ID(_brd) ((_brd)->brd_module)
-#define KLCF_NEXT(_brd) \
- ((_brd)->brd_next ? \
+#define KLCF_NEXT(_brd) \
+ ((_brd)->brd_next ? \
(lboard_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), (_brd)->brd_next)):\
NULL)
-#define KLCF_COMP(_brd, _ndx) \
- (klinfo_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), \
+#define KLCF_COMP(_brd, _ndx) \
+ (klinfo_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), \
(_brd)->brd_compts[(_ndx)]))
#define KLCF_COMP_ERROR(_brd, _comp) \
- (NODE_OFFSET_TO_K1(NASID_GET(_brd), (_comp)->errinfo))
+ (NODE_OFFSET_TO_K1(NASID_GET(_brd), (_comp)->errinfo))
#define KLCF_COMP_TYPE(_comp) ((_comp)->struct_type)
-#define KLCF_BRIDGE_W_ID(_comp) ((_comp)->physid) /* Widget ID */
+#define KLCF_BRIDGE_W_ID(_comp) ((_comp)->physid) /* Widget ID */
@@ -481,73 +481,73 @@ typedef struct lboard_s {
* component.
*/
-typedef struct klinfo_s { /* Generic info */
- unsigned char struct_type; /* type of this structure */
- unsigned char struct_version; /* version of this structure */
- unsigned char flags; /* Enabled, disabled etc */
- unsigned char revision; /* component revision */
- unsigned short diagval; /* result of diagnostics */
- unsigned short diagparm; /* diagnostic parameter */
- unsigned char inventory; /* previous inventory status */
- nic_t nic; /* MUst be aligned properly */
- unsigned char physid; /* physical id of component */
- unsigned int virtid; /* virtual id as seen by system */
- unsigned char widid; /* Widget id - if applicable */
- nasid_t nasid; /* node number - from parent */
+typedef struct klinfo_s { /* Generic info */
+ unsigned char struct_type; /* type of this structure */
+ unsigned char struct_version; /* version of this structure */
+ unsigned char flags; /* Enabled, disabled etc */
+ unsigned char revision; /* component revision */
+ unsigned short diagval; /* result of diagnostics */
+ unsigned short diagparm; /* diagnostic parameter */
+ unsigned char inventory; /* previous inventory status */
+ nic_t nic; /* MUst be aligned properly */
+ unsigned char physid; /* physical id of component */
+ unsigned int virtid; /* virtual id as seen by system */
+ unsigned char widid; /* Widget id - if applicable */
+ nasid_t nasid; /* node number - from parent */
char pad1; /* pad out structure. */
char pad2; /* pad out structure. */
- COMPONENT *arcs_compt; /* ptr to the arcs struct for ease*/
- klconf_off_t errinfo; /* component specific errors */
- unsigned short pad3; /* pci fields have moved over to */
- unsigned short pad4; /* klbri_t */
+ COMPONENT *arcs_compt; /* ptr to the arcs struct for ease*/
+ klconf_off_t errinfo; /* component specific errors */
+ unsigned short pad3; /* pci fields have moved over to */
+ unsigned short pad4; /* klbri_t */
} klinfo_t ;
#define KLCONFIG_INFO_ENABLED(_i) ((_i)->flags & KLINFO_ENABLE)
/*
* Component structures.
* Following are the currently identified components:
- * CPU, HUB, MEM_BANK,
- * XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
- * BRIDGE, IOC3, SuperIO, SCSI, FDDI
- * ROUTER
- * GRAPHICS
+ * CPU, HUB, MEM_BANK,
+ * XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
+ * BRIDGE, IOC3, SuperIO, SCSI, FDDI
+ * ROUTER
+ * GRAPHICS
*/
#define KLSTRUCT_UNKNOWN 0
-#define KLSTRUCT_CPU 1
-#define KLSTRUCT_HUB 2
-#define KLSTRUCT_MEMBNK 3
-#define KLSTRUCT_XBOW 4
-#define KLSTRUCT_BRI 5
-#define KLSTRUCT_IOC3 6
-#define KLSTRUCT_PCI 7
-#define KLSTRUCT_VME 8
+#define KLSTRUCT_CPU 1
+#define KLSTRUCT_HUB 2
+#define KLSTRUCT_MEMBNK 3
+#define KLSTRUCT_XBOW 4
+#define KLSTRUCT_BRI 5
+#define KLSTRUCT_IOC3 6
+#define KLSTRUCT_PCI 7
+#define KLSTRUCT_VME 8
#define KLSTRUCT_ROU 9
-#define KLSTRUCT_GFX 10
-#define KLSTRUCT_SCSI 11
-#define KLSTRUCT_FDDI 12
-#define KLSTRUCT_MIO 13
-#define KLSTRUCT_DISK 14
-#define KLSTRUCT_TAPE 15
-#define KLSTRUCT_CDROM 16
-#define KLSTRUCT_HUB_UART 17
-#define KLSTRUCT_IOC3ENET 18
-#define KLSTRUCT_IOC3UART 19
+#define KLSTRUCT_GFX 10
+#define KLSTRUCT_SCSI 11
+#define KLSTRUCT_FDDI 12
+#define KLSTRUCT_MIO 13
+#define KLSTRUCT_DISK 14
+#define KLSTRUCT_TAPE 15
+#define KLSTRUCT_CDROM 16
+#define KLSTRUCT_HUB_UART 17
+#define KLSTRUCT_IOC3ENET 18
+#define KLSTRUCT_IOC3UART 19
#define KLSTRUCT_UNUSED 20 /* XXX UNUSED */
-#define KLSTRUCT_IOC3PCKM 21
-#define KLSTRUCT_RAD 22
-#define KLSTRUCT_HUB_TTY 23
-#define KLSTRUCT_IOC3_TTY 24
+#define KLSTRUCT_IOC3PCKM 21
+#define KLSTRUCT_RAD 22
+#define KLSTRUCT_HUB_TTY 23
+#define KLSTRUCT_IOC3_TTY 24
/* Early Access IO proms are compatible
only with KLSTRUCT values up to 24. */
-#define KLSTRUCT_FIBERCHANNEL 25
+#define KLSTRUCT_FIBERCHANNEL 25
#define KLSTRUCT_MOD_SERIAL_NUM 26
-#define KLSTRUCT_IOC3MS 27
-#define KLSTRUCT_TPU 28
-#define KLSTRUCT_GSN_A 29
-#define KLSTRUCT_GSN_B 30
-#define KLSTRUCT_XTHD 31
+#define KLSTRUCT_IOC3MS 27
+#define KLSTRUCT_TPU 28
+#define KLSTRUCT_GSN_A 29
+#define KLSTRUCT_GSN_B 30
+#define KLSTRUCT_XTHD 31
/*
* These are the indices of various components within a lboard structure.
@@ -583,7 +583,7 @@ typedef u64 *router_t;
* The port info in ip27_cfg area translates to a lboart_t in the
* KLCONFIG area. But since KLCONFIG does not use pointers, lboart_t
* is stored in terms of a nasid and a offset from start of KLCONFIG
- * area on that nasid.
+ * area on that nasid.
*/
typedef struct klport_s {
nasid_t port_nasid;
@@ -591,20 +591,20 @@ typedef struct klport_s {
klconf_off_t port_offset;
} klport_t;
-typedef struct klcpu_s { /* CPU */
- klinfo_t cpu_info;
- unsigned short cpu_prid; /* Processor PRID value */
- unsigned short cpu_fpirr; /* FPU IRR value */
- unsigned short cpu_speed; /* Speed in MHZ */
- unsigned short cpu_scachesz; /* secondary cache size in MB */
- unsigned short cpu_scachespeed;/* secondary cache speed in MHz */
+typedef struct klcpu_s { /* CPU */
+ klinfo_t cpu_info;
+ unsigned short cpu_prid; /* Processor PRID value */
+ unsigned short cpu_fpirr; /* FPU IRR value */
+ unsigned short cpu_speed; /* Speed in MHZ */
+ unsigned short cpu_scachesz; /* secondary cache size in MB */
+ unsigned short cpu_scachespeed;/* secondary cache speed in MHz */
} klcpu_t ;
#define CPU_STRUCT_VERSION 2
typedef struct klhub_s { /* HUB */
- klinfo_t hub_info;
- unsigned int hub_flags; /* PCFG_HUB_xxx flags */
+ klinfo_t hub_info;
+ unsigned int hub_flags; /* PCFG_HUB_xxx flags */
klport_t hub_port; /* hub is connected to this */
nic_t hub_box_nic; /* nic of containing box */
klconf_off_t hub_mfg_nic; /* MFG NIC string */
@@ -612,36 +612,36 @@ typedef struct klhub_s { /* HUB */
} klhub_t ;
typedef struct klhub_uart_s { /* HUB */
- klinfo_t hubuart_info;
- unsigned int hubuart_flags; /* PCFG_HUB_xxx flags */
+ klinfo_t hubuart_info;
+ unsigned int hubuart_flags; /* PCFG_HUB_xxx flags */
nic_t hubuart_box_nic; /* nic of containing box */
} klhub_uart_t ;
-#define MEMORY_STRUCT_VERSION 2
+#define MEMORY_STRUCT_VERSION 2
typedef struct klmembnk_s { /* MEMORY BANK */
- klinfo_t membnk_info;
- short membnk_memsz; /* Total memory in megabytes */
+ klinfo_t membnk_info;
+ short membnk_memsz; /* Total memory in megabytes */
short membnk_dimm_select; /* bank to physical addr mapping*/
short membnk_bnksz[MD_MEM_BANKS]; /* Memory bank sizes */
short membnk_attr;
} klmembnk_t ;
#define KLCONFIG_MEMBNK_SIZE(_info, _bank) \
- ((_info)->membnk_bnksz[(_bank)])
+ ((_info)->membnk_bnksz[(_bank)])
#define MEMBNK_PREMIUM 1
#define KLCONFIG_MEMBNK_PREMIUM(_info, _bank) \
- ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
+ ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
#define MAX_SERIAL_NUM_SIZE 10
typedef struct klmod_serial_num_s {
- klinfo_t snum_info;
+ klinfo_t snum_info;
union {
- char snum_str[MAX_SERIAL_NUM_SIZE];
- unsigned long long snum_int;
+ char snum_str[MAX_SERIAL_NUM_SIZE];
+ unsigned long long snum_int;
} snum;
} klmod_serial_num_t;
@@ -650,43 +650,43 @@ typedef struct klmod_serial_num_s {
serial number struct as a component without losing compatibility
between prom versions. */
-#define GET_SNUM_COMP(_l) ((klmod_serial_num_t *)\
+#define GET_SNUM_COMP(_l) ((klmod_serial_num_t *)\
KLCF_COMP(_l, _l->brd_numcompts))
#define MAX_XBOW_LINKS 16
-typedef struct klxbow_s { /* XBOW */
- klinfo_t xbow_info ;
+typedef struct klxbow_s { /* XBOW */
+ klinfo_t xbow_info ;
klport_t xbow_port_info[MAX_XBOW_LINKS] ; /* Module number */
- int xbow_master_hub_link;
- /* type of brd connected+component struct ptr+flags */
+ int xbow_master_hub_link;
+ /* type of brd connected+component struct ptr+flags */
} klxbow_t ;
#define MAX_PCI_SLOTS 8
typedef struct klpci_device_s {
s32 pci_device_id; /* 32 bits of vendor/device ID. */
- s32 pci_device_pad; /* 32 bits of padding. */
+ s32 pci_device_pad; /* 32 bits of padding. */
} klpci_device_t;
#define BRIDGE_STRUCT_VERSION 2
-typedef struct klbri_s { /* BRIDGE */
- klinfo_t bri_info ;
- unsigned char bri_eprominfo ; /* IO6prom connected to bridge */
- unsigned char bri_bustype ; /* PCI/VME BUS bridge/GIO */
- pci_t pci_specific ; /* PCI Board config info */
+typedef struct klbri_s { /* BRIDGE */
+ klinfo_t bri_info ;
+ unsigned char bri_eprominfo ; /* IO6prom connected to bridge */
+ unsigned char bri_bustype ; /* PCI/VME BUS bridge/GIO */
+ pci_t pci_specific ; /* PCI Board config info */
klpci_device_t bri_devices[MAX_PCI_DEVS] ; /* PCI IDs */
klconf_off_t bri_mfg_nic ;
} klbri_t ;
#define MAX_IOC3_TTY 2
-typedef struct klioc3_s { /* IOC3 */
- klinfo_t ioc3_info ;
- unsigned char ioc3_ssram ; /* Info about ssram */
- unsigned char ioc3_nvram ; /* Info about nvram */
- klinfo_t ioc3_superio ; /* Info about superio */
+typedef struct klioc3_s { /* IOC3 */
+ klinfo_t ioc3_info ;
+ unsigned char ioc3_ssram ; /* Info about ssram */
+ unsigned char ioc3_nvram ; /* Info about nvram */
+ klinfo_t ioc3_superio ; /* Info about superio */
klconf_off_t ioc3_tty_off ;
klinfo_t ioc3_enet ;
klconf_off_t ioc3_enet_off ;
@@ -695,27 +695,27 @@ typedef struct klioc3_s { /* IOC3 */
#define MAX_VME_SLOTS 8
-typedef struct klvmeb_s { /* VME BRIDGE - PCI CTLR */
- klinfo_t vmeb_info ;
+typedef struct klvmeb_s { /* VME BRIDGE - PCI CTLR */
+ klinfo_t vmeb_info ;
vmeb_t vmeb_specific ;
- klconf_off_t vmeb_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
+ klconf_off_t vmeb_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
} klvmeb_t ;
-typedef struct klvmed_s { /* VME DEVICE - VME BOARD */
+typedef struct klvmed_s { /* VME DEVICE - VME BOARD */
klinfo_t vmed_info ;
vmed_t vmed_specific ;
- klconf_off_t vmed_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
+ klconf_off_t vmed_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
} klvmed_t ;
#define ROUTER_VECTOR_VERS 2
/* XXX - Don't we need the number of ports here?!? */
-typedef struct klrou_s { /* ROUTER */
- klinfo_t rou_info ;
- unsigned int rou_flags ; /* PCFG_ROUTER_xxx flags */
- nic_t rou_box_nic ; /* nic of the containing module */
- klport_t rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
- klconf_off_t rou_mfg_nic ; /* MFG NIC string */
+typedef struct klrou_s { /* ROUTER */
+ klinfo_t rou_info ;
+ unsigned int rou_flags ; /* PCFG_ROUTER_xxx flags */
+ nic_t rou_box_nic ; /* nic of the containing module */
+ klport_t rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
+ klconf_off_t rou_mfg_nic ; /* MFG NIC string */
u64 rou_vector; /* vector from master node */
} klrou_t ;
@@ -732,30 +732,30 @@ typedef struct klrou_s { /* ROUTER */
#define KLGFX_COOKIE 0x0c0de000
typedef struct klgfx_s { /* GRAPHICS Device */
- klinfo_t gfx_info;
- klconf_off_t old_gndevs; /* for compatibility with older proms */
- klconf_off_t old_gdoff0; /* for compatibility with older proms */
+ klinfo_t gfx_info;
+ klconf_off_t old_gndevs; /* for compatibility with older proms */
+ klconf_off_t old_gdoff0; /* for compatibility with older proms */
unsigned int cookie; /* for compatibility with older proms */
unsigned int moduleslot;
struct klgfx_s *gfx_next_pipe;
graphics_t gfx_specific;
- klconf_off_t pad0; /* for compatibility with older proms */
- klconf_off_t gfx_mfg_nic;
+ klconf_off_t pad0; /* for compatibility with older proms */
+ klconf_off_t gfx_mfg_nic;
} klgfx_t;
typedef struct klxthd_s {
- klinfo_t xthd_info ;
- klconf_off_t xthd_mfg_nic ; /* MFG NIC string */
+ klinfo_t xthd_info ;
+ klconf_off_t xthd_mfg_nic ; /* MFG NIC string */
} klxthd_t ;
-typedef struct kltpu_s { /* TPU board */
- klinfo_t tpu_info ;
- klconf_off_t tpu_mfg_nic ; /* MFG NIC string */
+typedef struct kltpu_s { /* TPU board */
+ klinfo_t tpu_info ;
+ klconf_off_t tpu_mfg_nic ; /* MFG NIC string */
} kltpu_t ;
-typedef struct klgsn_s { /* GSN board */
- klinfo_t gsn_info ;
- klconf_off_t gsn_mfg_nic ; /* MFG NIC string */
+typedef struct klgsn_s { /* GSN board */
+ klinfo_t gsn_info ;
+ klconf_off_t gsn_mfg_nic ; /* MFG NIC string */
} klgsn_t ;
#define MAX_SCSI_DEVS 16
@@ -767,57 +767,57 @@ typedef struct klgsn_s { /* GSN board */
* that as the size to be klmalloced.
*/
-typedef struct klscsi_s { /* SCSI Controller */
- klinfo_t scsi_info ;
- scsi_t scsi_specific ;
- unsigned char scsi_numdevs ;
+typedef struct klscsi_s { /* SCSI Controller */
+ klinfo_t scsi_info ;
+ scsi_t scsi_specific ;
+ unsigned char scsi_numdevs ;
klconf_off_t scsi_devinfo[MAX_SCSI_DEVS] ;
} klscsi_t ;
-typedef struct klscdev_s { /* SCSI device */
- klinfo_t scdev_info ;
+typedef struct klscdev_s { /* SCSI device */
+ klinfo_t scdev_info ;
struct scsidisk_data *scdev_cfg ; /* driver fills up this */
} klscdev_t ;
-typedef struct klttydev_s { /* TTY device */
- klinfo_t ttydev_info ;
+typedef struct klttydev_s { /* TTY device */
+ klinfo_t ttydev_info ;
struct terminal_data *ttydev_cfg ; /* driver fills up this */
} klttydev_t ;
-typedef struct klenetdev_s { /* ENET device */
- klinfo_t enetdev_info ;
+typedef struct klenetdev_s { /* ENET device */
+ klinfo_t enetdev_info ;
struct net_data *enetdev_cfg ; /* driver fills up this */
} klenetdev_t ;
-typedef struct klkbddev_s { /* KBD device */
- klinfo_t kbddev_info ;
+typedef struct klkbddev_s { /* KBD device */
+ klinfo_t kbddev_info ;
struct keyboard_data *kbddev_cfg ; /* driver fills up this */
} klkbddev_t ;
-typedef struct klmsdev_s { /* mouse device */
- klinfo_t msdev_info ;
- void *msdev_cfg ;
+typedef struct klmsdev_s { /* mouse device */
+ klinfo_t msdev_info ;
+ void *msdev_cfg ;
} klmsdev_t ;
#define MAX_FDDI_DEVS 10 /* XXX Is this true */
-typedef struct klfddi_s { /* FDDI */
- klinfo_t fddi_info ;
- fddi_t fddi_specific ;
+typedef struct klfddi_s { /* FDDI */
+ klinfo_t fddi_info ;
+ fddi_t fddi_specific ;
klconf_off_t fddi_devinfo[MAX_FDDI_DEVS] ;
} klfddi_t ;
-typedef struct klmio_s { /* MIO */
- klinfo_t mio_info ;
- mio_t mio_specific ;
+typedef struct klmio_s { /* MIO */
+ klinfo_t mio_info ;
+ mio_t mio_specific ;
} klmio_t ;
typedef union klcomp_s {
klcpu_t kc_cpu;
klhub_t kc_hub;
- klmembnk_t kc_mem;
- klxbow_t kc_xbow;
+ klmembnk_t kc_mem;
+ klxbow_t kc_xbow;
klbri_t kc_bri;
klioc3_t kc_ioc3;
klvmeb_t kc_vmeb;
@@ -831,11 +831,11 @@ typedef union klcomp_s {
klmod_serial_num_t kc_snum ;
} klcomp_t;
-typedef union kldev_s { /* for device structure allocation */
+typedef union kldev_s { /* for device structure allocation */
klscdev_t kc_scsi_dev ;
klttydev_t kc_tty_dev ;
klenetdev_t kc_enet_dev ;
- klkbddev_t kc_kbd_dev ;
+ klkbddev_t kc_kbd_dev ;
} kldev_t ;
/* Data structure interface routines. TBD */
diff --git a/arch/mips/include/asm/sn/kldir.h b/arch/mips/include/asm/sn/kldir.h
index 1327e12e9645..bfb3aec94539 100644
--- a/arch/mips/include/asm/sn/kldir.h
+++ b/arch/mips/include/asm/sn/kldir.h
@@ -16,8 +16,8 @@
* The kldir memory area resides at a fixed place in each node's memory and
* provides pointers to most other IP27 memory areas. This allows us to
* resize and/or relocate memory areas at a later time without breaking all
- * firmware and kernels that use them. Indices in the array are
- * permanently dedicated to areas listed below. Some memory areas (marked
+ * firmware and kernels that use them. Indices in the array are
+ * permanently dedicated to areas listed below. Some memory areas (marked
* below) reside at a permanently fixed location, but are included in the
* directory for completeness.
*/
@@ -28,98 +28,98 @@
* The upper portion of the memory map applies during boot
* only and is overwritten by IRIX/SYMMON.
*
- * MEMORY MAP PER NODE
+ * MEMORY MAP PER NODE
*
- * 0x2000000 (32M) +-----------------------------------------+
- * | IO6 BUFFERS FOR FLASH ENET IOC3 |
- * 0x1F80000 (31.5M) +-----------------------------------------+
- * | IO6 TEXT/DATA/BSS/stack |
- * 0x1C00000 (30M) +-----------------------------------------+
- * | IO6 PROM DEBUG TEXT/DATA/BSS/stack |
- * 0x0800000 (28M) +-----------------------------------------+
- * | IP27 PROM TEXT/DATA/BSS/stack |
- * 0x1B00000 (27M) +-----------------------------------------+
- * | IP27 CFG |
- * 0x1A00000 (26M) +-----------------------------------------+
- * | Graphics PROM |
- * 0x1800000 (24M) +-----------------------------------------+
- * | 3rd Party PROM drivers |
- * 0x1600000 (22M) +-----------------------------------------+
- * | |
- * | Free |
- * | |
- * +-----------------------------------------+
- * | UNIX DEBUG Version |
- * 0x190000 (2M--) +-----------------------------------------+
- * | SYMMON |
- * | (For UNIX Debug only) |
- * 0x34000 (208K) +-----------------------------------------+
- * | SYMMON STACK [NUM_CPU_PER_NODE] |
- * | (For UNIX Debug only) |
- * 0x25000 (148K) +-----------------------------------------+
- * | KLCONFIG - II (temp) |
- * | |
- * | ---------------------------- |
- * | |
- * | UNIX NON-DEBUG Version |
- * 0x19000 (100K) +-----------------------------------------+
+ * 0x2000000 (32M) +-----------------------------------------+
+ * | IO6 BUFFERS FOR FLASH ENET IOC3 |
+ * 0x1F80000 (31.5M) +-----------------------------------------+
+ * | IO6 TEXT/DATA/BSS/stack |
+ * 0x1C00000 (30M) +-----------------------------------------+
+ * | IO6 PROM DEBUG TEXT/DATA/BSS/stack |
+ * 0x0800000 (28M) +-----------------------------------------+
+ * | IP27 PROM TEXT/DATA/BSS/stack |
+ * 0x1B00000 (27M) +-----------------------------------------+
+ * | IP27 CFG |
+ * 0x1A00000 (26M) +-----------------------------------------+
+ * | Graphics PROM |
+ * 0x1800000 (24M) +-----------------------------------------+
+ * | 3rd Party PROM drivers |
+ * 0x1600000 (22M) +-----------------------------------------+
+ * | |
+ * | Free |
+ * | |
+ * +-----------------------------------------+
+ * | UNIX DEBUG Version |
+ * 0x190000 (2M--) +-----------------------------------------+
+ * | SYMMON |
+ * | (For UNIX Debug only) |
+ * 0x34000 (208K) +-----------------------------------------+
+ * | SYMMON STACK [NUM_CPU_PER_NODE] |
+ * | (For UNIX Debug only) |
+ * 0x25000 (148K) +-----------------------------------------+
+ * | KLCONFIG - II (temp) |
+ * | |
+ * | ---------------------------- |
+ * | |
+ * | UNIX NON-DEBUG Version |
+ * 0x19000 (100K) +-----------------------------------------+
*
*
* The lower portion of the memory map contains information that is
* permanent and is used by the IP27PROM, IO6PROM and IRIX.
*
- * 0x19000 (100K) +-----------------------------------------+
- * | |
- * | PI Error Spools (32K) |
- * | |
- * 0x12000 (72K) +-----------------------------------------+
- * | Unused |
- * 0x11c00 (71K) +-----------------------------------------+
- * | CPU 1 NMI Eframe area |
- * 0x11a00 (70.5K) +-----------------------------------------+
- * | CPU 0 NMI Eframe area |
- * 0x11800 (70K) +-----------------------------------------+
- * | CPU 1 NMI Register save area |
- * 0x11600 (69.5K) +-----------------------------------------+
- * | CPU 0 NMI Register save area |
- * 0x11400 (69K) +-----------------------------------------+
- * | GDA (1k) |
- * 0x11000 (68K) +-----------------------------------------+
- * | Early cache Exception stack |
- * | and/or |
- * | kernel/io6prom nmi registers |
+ * 0x19000 (100K) +-----------------------------------------+
+ * | |
+ * | PI Error Spools (32K) |
+ * | |
+ * 0x12000 (72K) +-----------------------------------------+
+ * | Unused |
+ * 0x11c00 (71K) +-----------------------------------------+
+ * | CPU 1 NMI Eframe area |
+ * 0x11a00 (70.5K) +-----------------------------------------+
+ * | CPU 0 NMI Eframe area |
+ * 0x11800 (70K) +-----------------------------------------+
+ * | CPU 1 NMI Register save area |
+ * 0x11600 (69.5K) +-----------------------------------------+
+ * | CPU 0 NMI Register save area |
+ * 0x11400 (69K) +-----------------------------------------+
+ * | GDA (1k) |
+ * 0x11000 (68K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
* 0x10800 (66k) +-----------------------------------------+
- * | cache error eframe |
- * 0x10400 (65K) +-----------------------------------------+
- * | Exception Handlers (UALIAS copy) |
- * 0x10000 (64K) +-----------------------------------------+
- * | |
- * | |
- * | KLCONFIG - I (permanent) (48K) |
- * | |
- * | |
- * | |
- * 0x4000 (16K) +-----------------------------------------+
- * | NMI Handler (Protected Page) |
- * 0x3000 (12K) +-----------------------------------------+
- * | ARCS PVECTORS (master node only) |
- * 0x2c00 (11K) +-----------------------------------------+
- * | ARCS TVECTORS (master node only) |
- * 0x2800 (10K) +-----------------------------------------+
- * | LAUNCH [NUM_CPU] |
- * 0x2400 (9K) +-----------------------------------------+
- * | Low memory directory (KLDIR) |
- * 0x2000 (8K) +-----------------------------------------+
- * | ARCS SPB (1K) |
- * 0x1000 (4K) +-----------------------------------------+
- * | Early cache Exception stack |
- * | and/or |
- * | kernel/io6prom nmi registers |
- * 0x800 (2k) +-----------------------------------------+
- * | cache error eframe |
- * 0x400 (1K) +-----------------------------------------+
- * | Exception Handlers |
- * 0x0 (0K) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x10400 (65K) +-----------------------------------------+
+ * | Exception Handlers (UALIAS copy) |
+ * 0x10000 (64K) +-----------------------------------------+
+ * | |
+ * | |
+ * | KLCONFIG - I (permanent) (48K) |
+ * | |
+ * | |
+ * | |
+ * 0x4000 (16K) +-----------------------------------------+
+ * | NMI Handler (Protected Page) |
+ * 0x3000 (12K) +-----------------------------------------+
+ * | ARCS PVECTORS (master node only) |
+ * 0x2c00 (11K) +-----------------------------------------+
+ * | ARCS TVECTORS (master node only) |
+ * 0x2800 (10K) +-----------------------------------------+
+ * | LAUNCH [NUM_CPU] |
+ * 0x2400 (9K) +-----------------------------------------+
+ * | Low memory directory (KLDIR) |
+ * 0x2000 (8K) +-----------------------------------------+
+ * | ARCS SPB (1K) |
+ * 0x1000 (4K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
+ * 0x800 (2k) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x400 (1K) +-----------------------------------------+
+ * | Exception Handlers |
+ * 0x0 (0K) +-----------------------------------------+
*/
#ifdef __ASSEMBLY__
@@ -202,13 +202,13 @@
#ifndef __ASSEMBLY__
typedef struct kldir_ent_s {
- u64 magic; /* Indicates validity of entry */
+ u64 magic; /* Indicates validity of entry */
off_t offset; /* Offset from start of node space */
unsigned long pointer; /* Pointer to area in some cases */
- size_t size; /* Size in bytes */
+ size_t size; /* Size in bytes */
u64 count; /* Repeat count if array, 1 if not */
- size_t stride; /* Stride if array, 0 if not */
- char rsvd[16]; /* Pad entry to 0x40 bytes */
+ size_t stride; /* Stride if array, 0 if not */
+ char rsvd[16]; /* Pad entry to 0x40 bytes */
/* NOTE: These 16 bytes are used in the Partition KLDIR
entry to store partition info. Refer to klpart.h for this. */
} kldir_ent_t;
diff --git a/arch/mips/include/asm/sn/launch.h b/arch/mips/include/asm/sn/launch.h
index b7c2226312c6..04226d8d30c4 100644
--- a/arch/mips/include/asm/sn/launch.h
+++ b/arch/mips/include/asm/sn/launch.h
@@ -19,7 +19,7 @@
*
* The master stores launch parameters in the launch structure
* corresponding to a target processor that is in a slave loop, then sends
- * an interrupt to the slave processor. The slave calls the desired
+ * an interrupt to the slave processor. The slave calls the desired
* function, then returns to the slave loop. The master may poll or wait
* for the slaves to finish.
*
@@ -33,7 +33,7 @@
#define LAUNCH_PADSZ 0xa0
#endif
-#define LAUNCH_OFF_MAGIC 0x00 /* Struct offsets for assembly */
+#define LAUNCH_OFF_MAGIC 0x00 /* Struct offsets for assembly */
#define LAUNCH_OFF_BUSY 0x08
#define LAUNCH_OFF_CALL 0x10
#define LAUNCH_OFF_CALLC 0x18
@@ -44,7 +44,7 @@
#define LAUNCH_OFF_BEVNORMAL 0x40
#define LAUNCH_OFF_BEVECC 0x48
-#define LAUNCH_STATE_DONE 0 /* Return value of LAUNCH_POLL */
+#define LAUNCH_STATE_DONE 0 /* Return value of LAUNCH_POLL */
#define LAUNCH_STATE_SENT 1
#define LAUNCH_STATE_RECD 2
@@ -65,16 +65,16 @@ typedef int launch_state_t;
typedef void (*launch_proc_t)(u64 call_parm);
typedef struct launch_s {
- volatile u64 magic; /* Magic number */
- volatile u64 busy; /* Slave currently active */
+ volatile u64 magic; /* Magic number */
+ volatile u64 busy; /* Slave currently active */
volatile launch_proc_t call_addr; /* Func. for slave to call */
volatile u64 call_addr_c; /* 1's complement of call_addr*/
volatile u64 call_parm; /* Single parm passed to call*/
volatile void *stack_addr; /* Stack pointer for slave function */
volatile void *gp_addr; /* Global pointer for slave func. */
- volatile char *bevutlb;/* Address of bev utlb ex handler */
- volatile char *bevnormal;/*Address of bev normal ex handler */
- volatile char *bevecc;/* Address of bev cache err handler */
+ volatile char *bevutlb;/* Address of bev utlb ex handler */
+ volatile char *bevnormal;/*Address of bev normal ex handler */
+ volatile char *bevecc;/* Address of bev cache err handler */
volatile char pad[160]; /* Pad to LAUNCH_SIZEOF */
} launch_t;
diff --git a/arch/mips/include/asm/sn/mapped_kernel.h b/arch/mips/include/asm/sn/mapped_kernel.h
index 721496a0bb92..401f3b0eee17 100644
--- a/arch/mips/include/asm/sn/mapped_kernel.h
+++ b/arch/mips/include/asm/sn/mapped_kernel.h
@@ -48,7 +48,7 @@
#endif /* CONFIG_MAPPED_KERNEL */
-#define MAPPED_KERN_RO_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RO_TO_PHYS(x))
-#define MAPPED_KERN_RW_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RW_TO_PHYS(x))
+#define MAPPED_KERN_RO_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RO_TO_PHYS(x))
+#define MAPPED_KERN_RW_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RW_TO_PHYS(x))
#endif /* __ASM_SN_MAPPED_KERNEL_H */
diff --git a/arch/mips/include/asm/sn/nmi.h b/arch/mips/include/asm/sn/nmi.h
index 1af49897d4e1..12ac210f12a1 100644
--- a/arch/mips/include/asm/sn/nmi.h
+++ b/arch/mips/include/asm/sn/nmi.h
@@ -19,7 +19,7 @@
*
* The master stores launch parameters in the launch structure
* corresponding to a target processor that is in a slave loop, then sends
- * an interrupt to the slave processor. The slave calls the desired
+ * an interrupt to the slave processor. The slave calls the desired
* function, followed by an optional rendezvous function, then returns to
* the slave loop. The master does not wait for the slaves before
* returning.
@@ -31,7 +31,7 @@
#define NMI_MAGIC 0x48414d4d455201
#define NMI_SIZEOF 0x40
-#define NMI_OFF_MAGIC 0x00 /* Struct offsets for assembly */
+#define NMI_OFF_MAGIC 0x00 /* Struct offsets for assembly */
#define NMI_OFF_FLAGS 0x08
#define NMI_OFF_CALL 0x10
#define NMI_OFF_CALLC 0x18
@@ -53,8 +53,8 @@
typedef struct nmi_s {
volatile unsigned long magic; /* Magic number */
volatile unsigned long flags; /* Combination of flags above */
- volatile void *call_addr; /* Routine for slave to call */
- volatile void *call_addr_c; /* 1's complement of address */
+ volatile void *call_addr; /* Routine for slave to call */
+ volatile void *call_addr_c; /* 1's complement of address */
volatile void *call_parm; /* Single parm passed to call */
volatile unsigned long gmaster; /* Flag true only on global master*/
} nmi_t;
diff --git a/arch/mips/include/asm/sn/sn0/addrs.h b/arch/mips/include/asm/sn/sn0/addrs.h
index b06190093bbc..6b53070f400f 100644
--- a/arch/mips/include/asm/sn/sn0/addrs.h
+++ b/arch/mips/include/asm/sn/sn0/addrs.h
@@ -29,7 +29,7 @@
* chapter of the Hub specification.
*
* NOTE: This header file is included both by C and by assembler source
- * files. Please bracket any language-dependent definitions
+ * files. Please bracket any language-dependent definitions
* appropriately.
*/
@@ -102,14 +102,14 @@
#define BWIN_INDEX_BITS 3
#define BWIN_SIZE (UINT64_CAST 1 << BWIN_SIZE_BITS)
-#define BWIN_SIZEMASK (BWIN_SIZE - 1)
-#define BWIN_WIDGET_MASK 0x7
+#define BWIN_SIZEMASK (BWIN_SIZE - 1)
+#define BWIN_WIDGET_MASK 0x7
#define NODE_BWIN_BASE0(nasid) (NODE_IO_BASE(nasid) + BWIN_SIZE)
-#define NODE_BWIN_BASE(nasid, bigwin) (NODE_BWIN_BASE0(nasid) + \
+#define NODE_BWIN_BASE(nasid, bigwin) (NODE_BWIN_BASE0(nasid) + \
(UINT64_CAST(bigwin) << BWIN_SIZE_BITS))
-#define BWIN_WIDGETADDR(addr) ((addr) & BWIN_SIZEMASK)
-#define BWIN_WINDOWNUM(addr) (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+#define BWIN_WIDGETADDR(addr) ((addr) & BWIN_SIZEMASK)
+#define BWIN_WINDOWNUM(addr) (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
/*
* Verify if addr belongs to large window address of node with "nasid"
*
@@ -120,7 +120,7 @@
*
*/
-#define NODE_BWIN_ADDR(nasid, addr) \
+#define NODE_BWIN_ADDR(nasid, addr) \
(((addr) >= NODE_BWIN_BASE0(nasid)) && \
((addr) < (NODE_BWIN_BASE(nasid, HUB_NUM_BIG_WINDOW) + \
BWIN_SIZE)))
@@ -129,7 +129,7 @@
* The following define the major position-independent aliases used
* in SN0.
* CALIAS -- Varies in size, points to the first n bytes of memory
- * on the reader's node.
+ * on the reader's node.
*/
#define CALIAS_BASE CAC_BASE
@@ -146,7 +146,7 @@
#ifndef __ASSEMBLY__
#define KERN_NMI_ADDR(nasid, slice) \
- TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET + \
+ TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET + \
(IP27_NMI_KREGS_CPU_SIZE * (slice)))
#endif /* !__ASSEMBLY__ */
@@ -203,7 +203,7 @@
#define IO6PROM_BASE PHYS_TO_K0(0x01c00000)
#define IO6PROM_SIZE 0x400000
-#define IO6PROM_BASE_MAPPED (UNCAC_BASE | 0x11c00000)
+#define IO6PROM_BASE_MAPPED (UNCAC_BASE | 0x11c00000)
#define IO6DPROM_BASE PHYS_TO_K0(0x01c00000)
#define IO6DPROM_SIZE 0x200000
diff --git a/arch/mips/include/asm/sn/sn0/arch.h b/arch/mips/include/asm/sn/sn0/arch.h
index f734f2007f24..425a67e6a947 100644
--- a/arch/mips/include/asm/sn/sn0/arch.h
+++ b/arch/mips/include/asm/sn/sn0/arch.h
@@ -12,23 +12,23 @@
#define _ASM_SN_SN0_ARCH_H
-#ifndef SN0XXL /* 128 cpu SMP max */
+#ifndef SN0XXL /* 128 cpu SMP max */
/*
* This is the maximum number of nodes that can be part of a kernel.
* Effectively, it's the maximum number of compact node ids (cnodeid_t).
*/
-#define MAX_COMPACT_NODES 64
+#define MAX_COMPACT_NODES 64
/*
* MAXCPUS refers to the maximum number of CPUs in a single kernel.
* This is not necessarily the same as MAXNODES * CPUS_PER_NODE
*/
-#define MAXCPUS 128
+#define MAXCPUS 128
#else /* SN0XXL system */
-#define MAX_COMPACT_NODES 128
-#define MAXCPUS 256
+#define MAX_COMPACT_NODES 128
+#define MAXCPUS 256
#endif /* SN0XXL */
@@ -41,9 +41,9 @@
/*
* MAX_REGIONS refers to the maximum number of hardware partitioned regions.
*/
-#define MAX_REGIONS 64
-#define MAX_NONPREMIUM_REGIONS 16
-#define MAX_PREMIUM_REGIONS MAX_REGIONS
+#define MAX_REGIONS 64
+#define MAX_NONPREMIUM_REGIONS 16
+#define MAX_PREMIUM_REGIONS MAX_REGIONS
/*
* MAX_PARITIONS refers to the maximum number of logically defined
@@ -57,12 +57,12 @@
* Slot constants for SN0
*/
#ifdef CONFIG_SGI_SN_N_MODE
-#define MAX_MEM_SLOTS 16 /* max slots per node */
+#define MAX_MEM_SLOTS 16 /* max slots per node */
#else /* !CONFIG_SGI_SN_N_MODE, assume CONFIG_SGI_SN_M_MODE */
-#define MAX_MEM_SLOTS 32 /* max slots per node */
+#define MAX_MEM_SLOTS 32 /* max slots per node */
#endif /* CONFIG_SGI_SN_M_MODE */
-#define SLOT_SHIFT (27)
+#define SLOT_SHIFT (27)
#define SLOT_MIN_MEM_SIZE (32*1024*1024)
#define CPUS_PER_NODE 2 /* CPUs on a single hub */
diff --git a/arch/mips/include/asm/sn/sn0/hub.h b/arch/mips/include/asm/sn/sn0/hub.h
index 3e228f8e7969..d78dd76d5dcf 100644
--- a/arch/mips/include/asm/sn/sn0/hub.h
+++ b/arch/mips/include/asm/sn/sn0/hub.h
@@ -19,8 +19,8 @@
#define HUB_REV_2_0 2
#define HUB_REV_2_1 3
#define HUB_REV_2_2 4
-#define HUB_REV_2_3 5
-#define HUB_REV_2_4 6
+#define HUB_REV_2_3 5
+#define HUB_REV_2_4 6
#define MAX_HUB_PATH 80
@@ -32,9 +32,9 @@
//#include <asm/sn/sn0/hubcore.h>
/* Translation of uncached attributes */
-#define UATTR_HSPEC 0
-#define UATTR_IO 1
-#define UATTR_MSPEC 2
-#define UATTR_UNCAC 3
+#define UATTR_HSPEC 0
+#define UATTR_IO 1
+#define UATTR_MSPEC 2
+#define UATTR_UNCAC 3
#endif /* _ASM_SN_SN0_HUB_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 46286d8302a7..5998b13e9764 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -8,8 +8,8 @@
* Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
* Copyright (C) 1999 by Ralf Baechle
*/
-#ifndef _ASM_SGI_SN_SN0_HUBIO_H
-#define _ASM_SGI_SN_SN0_HUBIO_H
+#ifndef _ASM_SGI_SN_SN0_HUBIO_H
+#define _ASM_SGI_SN_SN0_HUBIO_H
/*
* Hub I/O interface registers
@@ -22,7 +22,7 @@
* Slightly friendlier names for some common registers.
* The hardware definitions follow.
*/
-#define IIO_WIDGET IIO_WID /* Widget identification */
+#define IIO_WIDGET IIO_WID /* Widget identification */
#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
#define IIO_WIDGET_TOUT IIO_WRTO /* Widget request timeout */
@@ -37,21 +37,21 @@
#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/
#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
-#define IIO_BTE_CRB_CNT IIO_IBCN /* IO BTE CRB count */
+#define IIO_BTE_CRB_CNT IIO_IBCN /* IO BTE CRB count */
#define IIO_LLP_CSR_IS_UP 0x00002000
-#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
-#define IIO_LLP_CSR_LLP_STAT_SHFT 12
+#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT 12
/* key to IIO_PROTECT_OVRRD */
#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
/* BTE register names */
#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
-#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
+#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
-#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
+#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
#define IIO_BTE_OFF_1 IIO_IBLS_1 - IIO_IBLS_0 /* Offset from base to BTE 1 */
@@ -83,11 +83,11 @@
#define IIO_WSTAT 0x400008 /* Widget status */
#define IIO_WCR 0x400020 /* Widget control */
-#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
-#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
-#define IIO_WSTAT_TXRETRY_MASK (0x7F)
-#define IIO_WSTAT_TXRETRY_SHFT (16)
-#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK (0x7F)
+#define IIO_WSTAT_TXRETRY_SHFT (16)
+#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
IIO_WSTAT_TXRETRY_MASK)
#define IIO_ILAPR 0x400100 /* Local Access Protection */
@@ -130,12 +130,12 @@
#define IIO_IGFX_INIT(widget, node, cpu, valid) (\
(((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
(((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
- (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) | \
- (((valid) & IIO_IGFX_VLD_MASK) << IIO_IGFX_VLD_SHIFT) )
+ (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) | \
+ (((valid) & IIO_IGFX_VLD_MASK) << IIO_IGFX_VLD_SHIFT) )
/* Scratch registers (not all bits available) */
#define IIO_SCRATCH_REG0 0x400150
-#define IIO_SCRATCH_REG1 0x400158
+#define IIO_SCRATCH_REG1 0x400158
#define IIO_SCRATCH_MASK 0x0000000f00f11fff
#define IIO_SCRATCH_BIT0_0 0x0000000800000000
@@ -174,43 +174,43 @@
typedef union hubii_wid_u {
u64 wid_reg_value;
struct {
- u64 wid_rsvd: 32, /* unused */
+ u64 wid_rsvd: 32, /* unused */
wid_rev_num: 4, /* revision number */
wid_part_num: 16, /* the widget type: hub=c101 */
wid_mfg_num: 11, /* Manufacturer id (IBM) */
wid_rsvd1: 1; /* Reserved */
- } wid_fields_s;
+ } wid_fields_s;
} hubii_wid_t;
typedef union hubii_wcr_u {
u64 wcr_reg_value;
struct {
- u64 wcr_rsvd: 41, /* unused */
+ u64 wcr_rsvd: 41, /* unused */
wcr_e_thresh: 5, /* elasticity threshold */
wcr_dir_con: 1, /* widget direct connect */
wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */
wcr_xbar_crd: 3, /* LLP crossbar credit */
wcr_rsvd1: 8, /* Reserved */
- wcr_tag_mode: 1, /* Tag mode */
+ wcr_tag_mode: 1, /* Tag mode */
wcr_widget_id: 4; /* LLP crossbar credit */
- } wcr_fields_s;
+ } wcr_fields_s;
} hubii_wcr_t;
-#define iwcr_dir_con wcr_fields_s.wcr_dir_con
+#define iwcr_dir_con wcr_fields_s.wcr_dir_con
typedef union hubii_wstat_u {
- u64 reg_value;
+ u64 reg_value;
struct {
u64 rsvd1: 31,
crazy: 1, /* Crazy bit */
rsvd2: 8,
- llp_tx_cnt: 8, /* LLP Xmit retry counter */
+ llp_tx_cnt: 8, /* LLP Xmit retry counter */
rsvd3: 6,
tx_max_rtry: 1, /* LLP Retry Timeout Signal */
rsvd4: 2,
xt_tail_to: 1, /* Xtalk Tail Timeout */
- xt_crd_to: 1, /* Xtalk Credit Timeout */
+ xt_crd_to: 1, /* Xtalk Credit Timeout */
pending: 4; /* Pending Requests */
} wstat_fields_s;
} hubii_wstat_t;
@@ -219,50 +219,50 @@ typedef union hubii_wstat_u {
typedef union hubii_ilcsr_u {
u64 icsr_reg_value;
struct {
- u64 icsr_rsvd: 22, /* unused */
- icsr_max_burst: 10, /* max burst */
- icsr_rsvd4: 6, /* reserved */
- icsr_max_retry: 10, /* max retry */
- icsr_rsvd3: 2, /* reserved */
- icsr_lnk_stat: 2, /* link status */
- icsr_bm8: 1, /* Bit mode 8 */
- icsr_llp_en: 1, /* LLP enable bit */
- icsr_rsvd2: 1, /* reserver */
- icsr_wrm_reset: 1, /* Warm reset bit */
+ u64 icsr_rsvd: 22, /* unused */
+ icsr_max_burst: 10, /* max burst */
+ icsr_rsvd4: 6, /* reserved */
+ icsr_max_retry: 10, /* max retry */
+ icsr_rsvd3: 2, /* reserved */
+ icsr_lnk_stat: 2, /* link status */
+ icsr_bm8: 1, /* Bit mode 8 */
+ icsr_llp_en: 1, /* LLP enable bit */
+ icsr_rsvd2: 1, /* reserver */
+ icsr_wrm_reset: 1, /* Warm reset bit */
icsr_rsvd1: 2, /* Data ready offset */
- icsr_null_to: 6; /* Null timeout */
+ icsr_null_to: 6; /* Null timeout */
- } icsr_fields_s;
+ } icsr_fields_s;
} hubii_ilcsr_t;
typedef union hubii_iowa_u {
u64 iowa_reg_value;
struct {
- u64 iowa_rsvd: 48, /* unused */
+ u64 iowa_rsvd: 48, /* unused */
iowa_wxoac: 8, /* xtalk widget access bits */
iowa_rsvd1: 7, /* xtalk widget access bits */
iowa_w0oac: 1; /* xtalk widget access bits */
- } iowa_fields_s;
+ } iowa_fields_s;
} hubii_iowa_t;
typedef union hubii_iiwa_u {
u64 iiwa_reg_value;
struct {
- u64 iiwa_rsvd: 48, /* unused */
+ u64 iiwa_rsvd: 48, /* unused */
iiwa_wxiac: 8, /* hub wid access bits */
iiwa_rsvd1: 7, /* reserved */
iiwa_w0iac: 1; /* hub wid0 access */
- } iiwa_fields_s;
+ } iiwa_fields_s;
} hubii_iiwa_t;
typedef union hubii_illr_u {
u64 illr_reg_value;
struct {
- u64 illr_rsvd: 32, /* unused */
+ u64 illr_rsvd: 32, /* unused */
illr_cb_cnt: 16, /* checkbit error count */
illr_sn_cnt: 16; /* sequence number count */
- } illr_fields_s;
+ } illr_fields_s;
} hubii_illr_t;
/* The structures below are defined to extract and modify the ii
@@ -273,7 +273,7 @@ performance registers */
typedef union io_perf_sel {
u64 perf_sel_reg;
struct {
- u64 perf_rsvd : 48,
+ u64 perf_rsvd : 48,
perf_icct : 8,
perf_ippr1 : 4,
perf_ippr0 : 4;
@@ -301,7 +301,7 @@ typedef union io_perf_cnt {
#define IIO_LLP_SN_MAX 0xffff
/* IO PRB Entries */
-#define IIO_NUM_IPRBS (9)
+#define IIO_NUM_IPRBS (9)
#define IIO_IOPRB_0 0x400198 /* PRB entry 0 */
#define IIO_IOPRB_8 0x4001a0 /* PRB entry 8 */
#define IIO_IOPRB_9 0x4001a8 /* PRB entry 9 */
@@ -318,21 +318,21 @@ typedef union io_perf_cnt {
#define IIO_IMEM 0x4001e8 /* Miscellaneous Enable Mask */
#define IIO_IXTT 0x4001f0 /* Crosstalk tail timeout */
#define IIO_IECLR 0x4001f8 /* IO error clear */
-#define IIO_IBCN 0x400200 /* IO BTE CRB count */
+#define IIO_IBCN 0x400200 /* IO BTE CRB count */
/*
* IIO_IMEM Register fields.
*/
-#define IIO_IMEM_W0ESD 0x1 /* Widget 0 shut down due to error */
-#define IIO_IMEM_B0ESD (1 << 4) /* BTE 0 shut down due to error */
-#define IIO_IMEM_B1ESD (1 << 8) /* BTE 1 Shut down due to error */
+#define IIO_IMEM_W0ESD 0x1 /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD (1 << 4) /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD (1 << 8) /* BTE 1 Shut down due to error */
/* PIO Read address Table Entries */
#define IIO_IPCA 0x400300 /* PRB Counter adjust */
#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
#define IIO_PRTE_0 0x400308 /* PIO Read address table entry 0 */
#define IIO_PRTE(_x) (IIO_PRTE_0 + (8 * (_x)))
-#define IIO_WIDPRTE(x) IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
+#define IIO_WIDPRTE(x) IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
#define IIO_IPDR 0x400388 /* PIO table entry deallocation */
#define IIO_ICDR 0x400390 /* CRB Entry Deallocation */
#define IIO_IFDR 0x400398 /* IOQ FIFO Depth */
@@ -369,35 +369,35 @@ typedef union io_perf_cnt {
/*
* IIO PIO Deallocation register field masks : (IIO_IPDR)
*/
-#define IIO_IPDR_PND (1 << 4)
+#define IIO_IPDR_PND (1 << 4)
/*
* IIO CRB deallocation register field masks: (IIO_ICDR)
*/
-#define IIO_ICDR_PND (1 << 4)
+#define IIO_ICDR_PND (1 << 4)
/*
* IIO CRB control register Fields: IIO_ICCR
*/
-#define IIO_ICCR_PENDING (0x10000)
-#define IIO_ICCR_CMD_MASK (0xFF)
-#define IIO_ICCR_CMD_SHFT (7)
-#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
-#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
-#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
-#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
+#define IIO_ICCR_PENDING (0x10000)
+#define IIO_ICCR_CMD_MASK (0xFF)
+#define IIO_ICCR_CMD_SHFT (7)
+#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
+#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
+#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
+#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
* via a WB
*/
-#define IIO_ICCR_CMD_FLUSH (0x800)
+#define IIO_ICCR_CMD_FLUSH (0x800)
/*
* CRB manipulation macros
* The CRB macros are slightly complicated, since there are up to
- * four registers associated with each CRB entry.
+ * four registers associated with each CRB entry.
*/
#define IIO_NUM_CRBS 15 /* Number of CRBs */
-#define IIO_NUM_NORMAL_CRBS 12 /* Number of regular CRB entries */
-#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
+#define IIO_NUM_NORMAL_CRBS 12 /* Number of regular CRB entries */
+#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
#define IIO_ICRB_OFFSET 8
#define IIO_ICRB_0 0x400400
/* XXX - This is now tuneable:
@@ -405,9 +405,9 @@ typedef union io_perf_cnt {
*/
#define IIO_ICRB_A(_x) (IIO_ICRB_0 + (4 * IIO_ICRB_OFFSET * (_x)))
-#define IIO_ICRB_B(_x) (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
+#define IIO_ICRB_B(_x) (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
#define IIO_ICRB_C(_x) (IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)
-#define IIO_ICRB_D(_x) (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
+#define IIO_ICRB_D(_x) (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
/* XXX - IBUE register coming for Hub 2 */
@@ -444,16 +444,16 @@ typedef union io_perf_cnt {
typedef union icrba_u {
u64 reg_value;
struct {
- u64 resvd: 6,
+ u64 resvd: 6,
stall_bte0: 1, /* Stall BTE 0 */
stall_bte1: 1, /* Stall BTE 1 */
error: 1, /* CRB has an error */
- ecode: 3, /* Error Code */
+ ecode: 3, /* Error Code */
lnetuce: 1, /* SN0net Uncorrectable error */
- mark: 1, /* CRB Has been marked */
+ mark: 1, /* CRB Has been marked */
xerr: 1, /* Error bit set in xtalk header */
sidn: 4, /* SIDN field from xtalk */
- tnum: 5, /* TNUM field in xtalk */
+ tnum: 5, /* TNUM field in xtalk */
addr: 38, /* Address of request */
valid: 1, /* Valid status */
iow: 1; /* IO Write operation */
@@ -467,15 +467,15 @@ typedef union h1_icrba_u {
u64 reg_value;
struct {
- u64 resvd: 6,
- unused: 1, /* Unused but RW!! */
+ u64 resvd: 6,
+ unused: 1, /* Unused but RW!! */
error: 1, /* CRB has an error */
- ecode: 4, /* Error Code */
+ ecode: 4, /* Error Code */
lnetuce: 1, /* SN0net Uncorrectable error */
- mark: 1, /* CRB Has been marked */
+ mark: 1, /* CRB Has been marked */
xerr: 1, /* Error bit set in xtalk header */
sidn: 4, /* SIDN field from xtalk */
- tnum: 5, /* TNUM field in xtalk */
+ tnum: 5, /* TNUM field in xtalk */
addr: 38, /* Address of request */
valid: 1, /* Valid status */
iow: 1; /* IO Write operation */
@@ -488,21 +488,21 @@ typedef union h1_icrba_u {
#endif /* !__ASSEMBLY__ */
-#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
+#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
/*
* values for "ecode" field
*/
-#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
-#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
-#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
+#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
* e.g. WINV to a Read only line.
*/
-#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
-#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
-#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
-#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
-#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
+#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
+#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
+#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
@@ -513,10 +513,10 @@ typedef union h1_icrba_u {
typedef union icrbb_u {
u64 reg_value;
struct {
- u64 rsvd1: 5,
- btenum: 1, /* BTE to which entry belongs to */
- cohtrans: 1, /* Coherent transaction */
- xtsize: 2, /* Xtalk operation size
+ u64 rsvd1: 5,
+ btenum: 1, /* BTE to which entry belongs to */
+ cohtrans: 1, /* Coherent transaction */
+ xtsize: 2, /* Xtalk operation size
* 0: Double Word
* 1: 32 Bytes.
* 2: 128 Bytes,
@@ -526,11 +526,11 @@ typedef union icrbb_u {
srcinit: 2, /* Source Initiator:
* See below for field values.
*/
- useold: 1, /* Use OLD command for processing */
+ useold: 1, /* Use OLD command for processing */
imsgtype: 2, /* Incoming message type
* see below for field values
*/
- imsg: 8, /* Incoming message */
+ imsg: 8, /* Incoming message */
initator: 3, /* Initiator of original request
* See below for field values.
*/
@@ -538,12 +538,12 @@ typedef union icrbb_u {
* See below for field values.
*/
rsvd2: 7,
- ackcnt: 11, /* Invalidate ack count */
+ ackcnt: 11, /* Invalidate ack count */
resp: 1, /* data response given to processor */
- ack: 1, /* indicates data ack received */
+ ack: 1, /* indicates data ack received */
hold: 1, /* entry is gathering inval acks */
wb_pend:1, /* waiting for writeback to complete */
- intvn: 1, /* Intervention */
+ intvn: 1, /* Intervention */
stall_ib: 1, /* Stall Ibuf (from crosstalk) */
stall_intr: 1; /* Stall internal interrupts */
} icrbb_field_s;
@@ -556,9 +556,9 @@ typedef union h1_icrbb_u {
u64 reg_value;
struct {
u64 rsvd1: 5,
- btenum: 1, /* BTE to which entry belongs to */
- cohtrans: 1, /* Coherent transaction */
- xtsize: 2, /* Xtalk operation size
+ btenum: 1, /* BTE to which entry belongs to */
+ cohtrans: 1, /* Coherent transaction */
+ xtsize: 2, /* Xtalk operation size
* 0: Double Word
* 1: 32 Bytes.
* 2: 128 Bytes,
@@ -568,99 +568,99 @@ typedef union h1_icrbb_u {
srcinit: 2, /* Source Initiator:
* See below for field values.
*/
- useold: 1, /* Use OLD command for processing */
+ useold: 1, /* Use OLD command for processing */
imsgtype: 2, /* Incoming message type
* see below for field values
*/
- imsg: 8, /* Incoming message */
+ imsg: 8, /* Incoming message */
initator: 3, /* Initiator of original request
* See below for field values.
*/
- rsvd2: 1,
+ rsvd2: 1,
pcache: 1, /* entry belongs to partial cache */
reqtype: 5, /* Identifies type of request
* See below for field values.
*/
- stl_ib: 1, /* stall Ibus coming from xtalk */
+ stl_ib: 1, /* stall Ibus coming from xtalk */
stl_intr: 1, /* Stall internal interrupts */
- stl_bte0: 1, /* Stall BTE 0 */
+ stl_bte0: 1, /* Stall BTE 0 */
stl_bte1: 1, /* Stall BTE 1 */
- intrvn: 1, /* Req was target of intervention */
- ackcnt: 11, /* Invalidate ack count */
+ intrvn: 1, /* Req was target of intervention */
+ ackcnt: 11, /* Invalidate ack count */
resp: 1, /* data response given to processor */
- ack: 1, /* indicates data ack received */
+ ack: 1, /* indicates data ack received */
hold: 1, /* entry is gathering inval acks */
wb_pend:1, /* waiting for writeback to complete */
- sleep: 1, /* xtalk req sleeping till IO-sync */
+ sleep: 1, /* xtalk req sleeping till IO-sync */
pnd_reply: 1, /* replies not issed due to IOQ full */
pnd_req: 1; /* reqs not issued due to IOQ full */
} h1_icrbb_field_s;
} h1_icrbb_t;
-#define b_imsgtype icrbb_field_s.imsgtype
-#define b_btenum icrbb_field_s.btenum
-#define b_cohtrans icrbb_field_s.cohtrans
-#define b_xtsize icrbb_field_s.xtsize
-#define b_srcnode icrbb_field_s.srcnode
-#define b_srcinit icrbb_field_s.srcinit
-#define b_imsgtype icrbb_field_s.imsgtype
-#define b_imsg icrbb_field_s.imsg
-#define b_initiator icrbb_field_s.initiator
+#define b_imsgtype icrbb_field_s.imsgtype
+#define b_btenum icrbb_field_s.btenum
+#define b_cohtrans icrbb_field_s.cohtrans
+#define b_xtsize icrbb_field_s.xtsize
+#define b_srcnode icrbb_field_s.srcnode
+#define b_srcinit icrbb_field_s.srcinit
+#define b_imsgtype icrbb_field_s.imsgtype
+#define b_imsg icrbb_field_s.imsg
+#define b_initiator icrbb_field_s.initiator
#endif /* !__ASSEMBLY__ */
/*
* values for field xtsize
*/
-#define IIO_ICRB_XTSIZE_DW 0 /* Xtalk operation size is 8 bytes */
-#define IIO_ICRB_XTSIZE_32 1 /* Xtalk operation size is 32 bytes */
-#define IIO_ICRB_XTSIZE_128 2 /* Xtalk operation size is 128 bytes */
+#define IIO_ICRB_XTSIZE_DW 0 /* Xtalk operation size is 8 bytes */
+#define IIO_ICRB_XTSIZE_32 1 /* Xtalk operation size is 32 bytes */
+#define IIO_ICRB_XTSIZE_128 2 /* Xtalk operation size is 128 bytes */
/*
* values for field srcinit
*/
-#define IIO_ICRB_PROC0 0 /* Source of request is Proc 0 */
-#define IIO_ICRB_PROC1 1 /* Source of request is Proc 1 */
-#define IIO_ICRB_GB_REQ 2 /* Source is Guaranteed BW request */
-#define IIO_ICRB_IO_REQ 3 /* Source is Normal IO request */
+#define IIO_ICRB_PROC0 0 /* Source of request is Proc 0 */
+#define IIO_ICRB_PROC1 1 /* Source of request is Proc 1 */
+#define IIO_ICRB_GB_REQ 2 /* Source is Guaranteed BW request */
+#define IIO_ICRB_IO_REQ 3 /* Source is Normal IO request */
/*
* Values for field imsgtype
*/
-#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
-#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
-#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
-#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
+#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
+#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
+#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
/*
* values for field initiator.
*/
-#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
-#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
-#define IIO_ICRB_INIT_SN0NET 0x2 /* Message originated in SN0net */
-#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
-#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
+#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
+#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
+#define IIO_ICRB_INIT_SN0NET 0x2 /* Message originated in SN0net */
+#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
+#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
/*
* Values for field reqtype.
*/
/* XXX - Need to fix this for Hub 2 */
-#define IIO_ICRB_REQ_DWRD 0 /* Request type double word */
-#define IIO_ICRB_REQ_QCLRD 1 /* Request is Qrtr Caceh line Rd */
-#define IIO_ICRB_REQ_BLKRD 2 /* Request is block read */
-#define IIO_ICRB_REQ_RSHU 6 /* Request is BTE block read */
-#define IIO_ICRB_REQ_REXU 7 /* request is BTE Excl Read */
-#define IIO_ICRB_REQ_RDEX 8 /* Request is Read Exclusive */
-#define IIO_ICRB_REQ_WINC 9 /* Request is Write Invalidate */
-#define IIO_ICRB_REQ_BWINV 10 /* Request is BTE Winv */
-#define IIO_ICRB_REQ_PIORD 11 /* Request is PIO read */
-#define IIO_ICRB_REQ_PIOWR 12 /* Request is PIO Write */
-#define IIO_ICRB_REQ_PRDM 13 /* Request is Fetch&Op */
-#define IIO_ICRB_REQ_PWRM 14 /* Request is Store &Op */
-#define IIO_ICRB_REQ_PTPWR 15 /* Request is Peer to peer */
-#define IIO_ICRB_REQ_WB 16 /* Request is Write back */
-#define IIO_ICRB_REQ_DEX 17 /* Retained DEX Cache line */
+#define IIO_ICRB_REQ_DWRD 0 /* Request type double word */
+#define IIO_ICRB_REQ_QCLRD 1 /* Request is Qrtr Caceh line Rd */
+#define IIO_ICRB_REQ_BLKRD 2 /* Request is block read */
+#define IIO_ICRB_REQ_RSHU 6 /* Request is BTE block read */
+#define IIO_ICRB_REQ_REXU 7 /* request is BTE Excl Read */
+#define IIO_ICRB_REQ_RDEX 8 /* Request is Read Exclusive */
+#define IIO_ICRB_REQ_WINC 9 /* Request is Write Invalidate */
+#define IIO_ICRB_REQ_BWINV 10 /* Request is BTE Winv */
+#define IIO_ICRB_REQ_PIORD 11 /* Request is PIO read */
+#define IIO_ICRB_REQ_PIOWR 12 /* Request is PIO Write */
+#define IIO_ICRB_REQ_PRDM 13 /* Request is Fetch&Op */
+#define IIO_ICRB_REQ_PWRM 14 /* Request is Store &Op */
+#define IIO_ICRB_REQ_PTPWR 15 /* Request is Peer to peer */
+#define IIO_ICRB_REQ_WB 16 /* Request is Write back */
+#define IIO_ICRB_REQ_DEX 17 /* Retained DEX Cache line */
/*
* Fields in CRB Register C
@@ -674,8 +674,8 @@ typedef union icrbc_s {
u64 rsvd: 6,
sleep: 1,
pricnt: 4, /* Priority count sent with Read req */
- pripsc: 4, /* Priority Pre scalar */
- bteop: 1, /* BTE Operation */
+ pripsc: 4, /* Priority Pre scalar */
+ bteop: 1, /* BTE Operation */
push_be: 34, /* Push address Byte enable
* Holds push addr, if CRB is for BTE
* If CRB belongs to Partial cache,
@@ -684,20 +684,20 @@ typedef union icrbc_s {
*/
suppl: 11, /* Supplemental field */
barrop: 1, /* Barrier Op bit set in xtalk req */
- doresp: 1, /* Xtalk req needs a response */
- gbr: 1; /* GBR bit set in xtalk packet */
+ doresp: 1, /* Xtalk req needs a response */
+ gbr: 1; /* GBR bit set in xtalk packet */
} icrbc_field_s;
} icrbc_t;
-#define c_pricnt icrbc_field_s.pricnt
-#define c_pripsc icrbc_field_s.pripsc
-#define c_bteop icrbc_field_s.bteop
-#define c_bteaddr icrbc_field_s.push_be /* push_be field has 2 names */
-#define c_benable icrbc_field_s.push_be /* push_be field has 2 names */
-#define c_suppl icrbc_field_s.suppl
-#define c_barrop icrbc_field_s.barrop
-#define c_doresp icrbc_field_s.doresp
-#define c_gbr icrbc_field_s.gbr
+#define c_pricnt icrbc_field_s.pricnt
+#define c_pripsc icrbc_field_s.pripsc
+#define c_bteop icrbc_field_s.bteop
+#define c_bteaddr icrbc_field_s.push_be /* push_be field has 2 names */
+#define c_benable icrbc_field_s.push_be /* push_be field has 2 names */
+#define c_suppl icrbc_field_s.suppl
+#define c_barrop icrbc_field_s.barrop
+#define c_doresp icrbc_field_s.doresp
+#define c_gbr icrbc_field_s.gbr
#endif /* !__ASSEMBLY__ */
/*
@@ -708,31 +708,31 @@ typedef union icrbc_s {
typedef union icrbd_s {
u64 reg_value;
struct {
- u64 rsvd: 38,
+ u64 rsvd: 38,
toutvld: 1, /* Timeout in progress for this CRB */
- ctxtvld: 1, /* Context field below is valid */
+ ctxtvld: 1, /* Context field below is valid */
rsvd2: 1,
- context: 15, /* Bit vector:
+ context: 15, /* Bit vector:
* Has a bit set for each CRB entry
* which needs to be deallocated
* before this CRB entry is processed.
* Set only for barrier operations.
*/
- timeout: 8; /* Timeout Upper 8 bits */
+ timeout: 8; /* Timeout Upper 8 bits */
} icrbd_field_s;
} icrbd_t;
-#define icrbd_toutvld icrbd_field_s.toutvld
-#define icrbd_ctxtvld icrbd_field_s.ctxtvld
-#define icrbd_context icrbd_field_s.context
+#define icrbd_toutvld icrbd_field_s.toutvld
+#define icrbd_ctxtvld icrbd_field_s.ctxtvld
+#define icrbd_context icrbd_field_s.context
typedef union hubii_ifdr_u {
u64 hi_ifdr_value;
struct {
u64 ifdr_rsvd: 49,
- ifdr_maxrp: 7,
- ifdr_rsvd1: 1,
+ ifdr_maxrp: 7,
+ ifdr_rsvd1: 1,
ifdr_maxrq: 7;
} hi_ifdr_fields;
} hubii_ifdr_t;
@@ -789,26 +789,26 @@ typedef union hubii_ifdr_u {
typedef union iprte_a {
u64 entry;
struct {
- u64 rsvd1 : 7, /* Reserved field */
- valid : 1, /* Maps to a timeout entry */
- rsvd2 : 1,
- srcnode : 9, /* Node which did this PIO */
- initiator : 2, /* If T5A or T5B or IO */
- rsvd3 : 3,
- addr : 38, /* Physical address of PIO */
- rsvd4 : 3;
+ u64 rsvd1 : 7, /* Reserved field */
+ valid : 1, /* Maps to a timeout entry */
+ rsvd2 : 1,
+ srcnode : 9, /* Node which did this PIO */
+ initiator : 2, /* If T5A or T5B or IO */
+ rsvd3 : 3,
+ addr : 38, /* Physical address of PIO */
+ rsvd4 : 3;
} iprte_fields;
} iprte_a_t;
-#define iprte_valid iprte_fields.valid
-#define iprte_timeout iprte_fields.timeout
-#define iprte_srcnode iprte_fields.srcnode
-#define iprte_init iprte_fields.initiator
-#define iprte_addr iprte_fields.addr
+#define iprte_valid iprte_fields.valid
+#define iprte_timeout iprte_fields.timeout
+#define iprte_srcnode iprte_fields.srcnode
+#define iprte_init iprte_fields.initiator
+#define iprte_addr iprte_fields.addr
#endif /* !__ASSEMBLY__ */
-#define IPRTE_ADDRSHFT 3
+#define IPRTE_ADDRSHFT 3
/*
* Hub IIO PRB Register format.
@@ -823,14 +823,14 @@ typedef union iprte_a {
typedef union iprb_u {
u64 reg_value;
struct {
- u64 rsvd1: 15,
+ u64 rsvd1: 15,
error: 1, /* Widget rcvd wr resp pkt w/ error */
- ovflow: 5, /* Overflow count. perf measurement */
+ ovflow: 5, /* Overflow count. perf measurement */
fire_and_forget: 1, /* Launch Write without response */
mode: 2, /* Widget operation Mode */
rsvd2: 2,
bnakctr: 14,
- rsvd3: 2,
+ rsvd3: 2,
anakctr: 14,
xtalkctr: 8;
} iprb_fields_s;
@@ -838,13 +838,13 @@ typedef union iprb_u {
#define iprb_regval reg_value
-#define iprb_error iprb_fields_s.error
-#define iprb_ovflow iprb_fields_s.ovflow
-#define iprb_ff iprb_fields_s.fire_and_forget
-#define iprb_mode iprb_fields_s.mode
-#define iprb_bnakctr iprb_fields_s.bnakctr
-#define iprb_anakctr iprb_fields_s.anakctr
-#define iprb_xtalkctr iprb_fields_s.xtalkctr
+#define iprb_error iprb_fields_s.error
+#define iprb_ovflow iprb_fields_s.ovflow
+#define iprb_ff iprb_fields_s.fire_and_forget
+#define iprb_mode iprb_fields_s.mode
+#define iprb_bnakctr iprb_fields_s.bnakctr
+#define iprb_anakctr iprb_fields_s.anakctr
+#define iprb_xtalkctr iprb_fields_s.xtalkctr
#endif /* !__ASSEMBLY__ */
@@ -853,10 +853,10 @@ typedef union iprb_u {
* For details of the meanings of NAK and Accept, refer the PIO flow
* document
*/
-#define IPRB_MODE_NORMAL (0)
-#define IPRB_MODE_COLLECT_A (1) /* PRB in collect A mode */
-#define IPRB_MODE_SERVICE_A (2) /* NAK B and Accept A */
-#define IPRB_MODE_SERVICE_B (3) /* NAK A and Accept B */
+#define IPRB_MODE_NORMAL (0)
+#define IPRB_MODE_COLLECT_A (1) /* PRB in collect A mode */
+#define IPRB_MODE_SERVICE_A (2) /* NAK B and Accept A */
+#define IPRB_MODE_SERVICE_B (3) /* NAK A and Accept B */
/*
* IO CRB entry C_A to E_A : Partial (cache) CRBS
@@ -865,31 +865,31 @@ typedef union iprb_u {
typedef union icrbp_a {
u64 ip_reg; /* the entire register value */
struct {
- u64 error: 1, /* 63, error occurred */
- ln_uce: 1, /* 62: uncorrectable memory */
- ln_ae: 1, /* 61: protection violation */
- ln_werr:1, /* 60: write access error */
- ln_aerr:1, /* 59: sn0net: Address error */
- ln_perr:1, /* 58: sn0net: poison error */
- timeout:1, /* 57: CRB timed out */
- l_bdpkt:1, /* 56: truncated pkt on sn0net */
- c_bdpkt:1, /* 55: truncated pkt on xtalk */
- c_err: 1, /* 54: incoming xtalk req, err set*/
+ u64 error: 1, /* 63, error occurred */
+ ln_uce: 1, /* 62: uncorrectable memory */
+ ln_ae: 1, /* 61: protection violation */
+ ln_werr:1, /* 60: write access error */
+ ln_aerr:1, /* 59: sn0net: Address error */
+ ln_perr:1, /* 58: sn0net: poison error */
+ timeout:1, /* 57: CRB timed out */
+ l_bdpkt:1, /* 56: truncated pkt on sn0net */
+ c_bdpkt:1, /* 55: truncated pkt on xtalk */
+ c_err: 1, /* 54: incoming xtalk req, err set*/
rsvd1: 12, /* 53-42: reserved */
- valid: 1, /* 41: Valid status */
+ valid: 1, /* 41: Valid status */
sidn: 4, /* 40-37: SIDN field of xtalk rqst */
tnum: 5, /* 36-32: TNUM of xtalk request */
- bo: 1, /* 31: barrier op set in xtalk rqst*/
- resprqd:1, /* 30: xtalk rqst requires response*/
- gbr: 1, /* 29: gbr bit set in xtalk rqst */
+ bo: 1, /* 31: barrier op set in xtalk rqst*/
+ resprqd:1, /* 30: xtalk rqst requires response*/
+ gbr: 1, /* 29: gbr bit set in xtalk rqst */
size: 2, /* 28-27: size of xtalk request */
excl: 4, /* 26-23: exclusive bit(s) */
stall: 3, /* 22-20: stall (xtalk, bte 0/1) */
- intvn: 1, /* 19: rqst target of intervention*/
- resp: 1, /* 18: Data response given to t5 */
- ack: 1, /* 17: Data ack received. */
- hold: 1, /* 16: crb gathering invalidate acks*/
- wb: 1, /* 15: writeback pending. */
+ intvn: 1, /* 19: rqst target of intervention*/
+ resp: 1, /* 18: Data response given to t5 */
+ ack: 1, /* 17: Data ack received. */
+ hold: 1, /* 16: crb gathering invalidate acks*/
+ wb: 1, /* 15: writeback pending. */
ack_cnt:11, /* 14-04: counter of invalidate acks*/
tscaler:4; /* 03-00: Timeout prescaler */
} ip_fmt;
@@ -908,13 +908,13 @@ typedef union hubii_idsr {
u64 iin_reg;
struct {
u64 rsvd1 : 35,
- isent : 1,
- rsvd2 : 3,
- ienable: 1,
- rsvd : 7,
- node : 9,
- rsvd4 : 1,
- level : 7;
+ isent : 1,
+ rsvd2 : 3,
+ ienable: 1,
+ rsvd : 7,
+ node : 9,
+ rsvd4 : 1,
+ level : 7;
} iin_fmt;
} hubii_idsr_t;
#endif /* !__ASSEMBLY__ */
@@ -966,7 +966,7 @@ typedef union hubii_idsr {
* Value of 3 is required by Xbow 1.1
* We may be able to increase this to 4 with Xbow 1.2.
*/
-#define HUBII_XBOW_CREDIT 3
+#define HUBII_XBOW_CREDIT 3
#define HUBII_XBOW_REV2_CREDIT 4
#endif /* _ASM_SGI_SN_SN0_HUBIO_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubmd.h b/arch/mips/include/asm/sn/sn0/hubmd.h
index 14c225d80664..305d002be182 100644
--- a/arch/mips/include/asm/sn/sn0/hubmd.h
+++ b/arch/mips/include/asm/sn/sn0/hubmd.h
@@ -8,16 +8,16 @@
* Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
* Copyright (C) 1999 by Ralf Baechle
*/
-#ifndef _ASM_SN_SN0_HUBMD_H
-#define _ASM_SN_SN0_HUBMD_H
+#ifndef _ASM_SN_SN0_HUBMD_H
+#define _ASM_SN_SN0_HUBMD_H
/*
* Hub Memory/Directory interface registers
*/
-#define CACHE_SLINE_SIZE 128 /* Secondary cache line size on SN0 */
+#define CACHE_SLINE_SIZE 128 /* Secondary cache line size on SN0 */
-#define MAX_REGIONS 64
+#define MAX_REGIONS 64
/* Hardware page size and shift */
@@ -34,62 +34,62 @@
#define MD_IO_PROT_OVRRD 0x200008 /* Clear my bit in MD_IO_PROTECT */
#define MD_HSPEC_PROTECT 0x200010 /* BDDIR, LBOOT, RBOOT protection */
#define MD_MEMORY_CONFIG 0x200018 /* Memory/Directory DIMM control */
-#define MD_REFRESH_CONTROL 0x200020 /* Memory/Directory refresh ctrl */
-#define MD_FANDOP_CAC_STAT 0x200028 /* Fetch-and-op cache status */
-#define MD_MIG_DIFF_THRESH 0x200030 /* Page migr. count diff thresh. */
-#define MD_MIG_VALUE_THRESH 0x200038 /* Page migr. count abs. thresh. */
-#define MD_MIG_CANDIDATE 0x200040 /* Latest page migration candidate */
-#define MD_MIG_CANDIDATE_CLR 0x200048 /* Clear page migration candidate */
-#define MD_DIR_ERROR 0x200050 /* Directory DIMM error */
-#define MD_DIR_ERROR_CLR 0x200058 /* Directory DIMM error clear */
-#define MD_PROTOCOL_ERROR 0x200060 /* Directory protocol error */
+#define MD_REFRESH_CONTROL 0x200020 /* Memory/Directory refresh ctrl */
+#define MD_FANDOP_CAC_STAT 0x200028 /* Fetch-and-op cache status */
+#define MD_MIG_DIFF_THRESH 0x200030 /* Page migr. count diff thresh. */
+#define MD_MIG_VALUE_THRESH 0x200038 /* Page migr. count abs. thresh. */
+#define MD_MIG_CANDIDATE 0x200040 /* Latest page migration candidate */
+#define MD_MIG_CANDIDATE_CLR 0x200048 /* Clear page migration candidate */
+#define MD_DIR_ERROR 0x200050 /* Directory DIMM error */
+#define MD_DIR_ERROR_CLR 0x200058 /* Directory DIMM error clear */
+#define MD_PROTOCOL_ERROR 0x200060 /* Directory protocol error */
#define MD_PROTOCOL_ERROR_CLR 0x200068 /* Directory protocol error clear */
-#define MD_MEM_ERROR 0x200070 /* Memory DIMM error */
-#define MD_MEM_ERROR_CLR 0x200078 /* Memory DIMM error clear */
-#define MD_MISC_ERROR 0x200080 /* Miscellaneous MD error */
+#define MD_MEM_ERROR 0x200070 /* Memory DIMM error */
+#define MD_MEM_ERROR_CLR 0x200078 /* Memory DIMM error clear */
+#define MD_MISC_ERROR 0x200080 /* Miscellaneous MD error */
#define MD_MISC_ERROR_CLR 0x200088 /* Miscellaneous MD error clear */
#define MD_MEM_DIMM_INIT 0x200090 /* Memory DIMM mode initization. */
-#define MD_DIR_DIMM_INIT 0x200098 /* Directory DIMM mode init. */
-#define MD_MOQ_SIZE 0x2000a0 /* MD outgoing queue size */
+#define MD_DIR_DIMM_INIT 0x200098 /* Directory DIMM mode init. */
+#define MD_MOQ_SIZE 0x2000a0 /* MD outgoing queue size */
#define MD_MLAN_CTL 0x2000a8 /* NIC (Microlan) control register */
-#define MD_PERF_SEL 0x210000 /* Select perf monitor events */
-#define MD_PERF_CNT0 0x210010 /* Performance counter 0 */
-#define MD_PERF_CNT1 0x210018 /* Performance counter 1 */
-#define MD_PERF_CNT2 0x210020 /* Performance counter 2 */
-#define MD_PERF_CNT3 0x210028 /* Performance counter 3 */
-#define MD_PERF_CNT4 0x210030 /* Performance counter 4 */
-#define MD_PERF_CNT5 0x210038 /* Performance counter 5 */
-
-#define MD_UREG0_0 0x220000 /* uController/UART 0 register */
-#define MD_UREG0_1 0x220008 /* uController/UART 0 register */
-#define MD_UREG0_2 0x220010 /* uController/UART 0 register */
-#define MD_UREG0_3 0x220018 /* uController/UART 0 register */
-#define MD_UREG0_4 0x220020 /* uController/UART 0 register */
-#define MD_UREG0_5 0x220028 /* uController/UART 0 register */
-#define MD_UREG0_6 0x220030 /* uController/UART 0 register */
-#define MD_UREG0_7 0x220038 /* uController/UART 0 register */
+#define MD_PERF_SEL 0x210000 /* Select perf monitor events */
+#define MD_PERF_CNT0 0x210010 /* Performance counter 0 */
+#define MD_PERF_CNT1 0x210018 /* Performance counter 1 */
+#define MD_PERF_CNT2 0x210020 /* Performance counter 2 */
+#define MD_PERF_CNT3 0x210028 /* Performance counter 3 */
+#define MD_PERF_CNT4 0x210030 /* Performance counter 4 */
+#define MD_PERF_CNT5 0x210038 /* Performance counter 5 */
+
+#define MD_UREG0_0 0x220000 /* uController/UART 0 register */
+#define MD_UREG0_1 0x220008 /* uController/UART 0 register */
+#define MD_UREG0_2 0x220010 /* uController/UART 0 register */
+#define MD_UREG0_3 0x220018 /* uController/UART 0 register */
+#define MD_UREG0_4 0x220020 /* uController/UART 0 register */
+#define MD_UREG0_5 0x220028 /* uController/UART 0 register */
+#define MD_UREG0_6 0x220030 /* uController/UART 0 register */
+#define MD_UREG0_7 0x220038 /* uController/UART 0 register */
#define MD_SLOTID_USTAT 0x220048 /* Hub slot ID & UART/uCtlr status */
-#define MD_LED0 0x220050 /* Eight-bit LED for CPU A */
-#define MD_LED1 0x220058 /* Eight-bit LED for CPU B */
-
-#define MD_UREG1_0 0x220080 /* uController/UART 1 register */
-#define MD_UREG1_1 0x220088 /* uController/UART 1 register */
-#define MD_UREG1_2 0x220090 /* uController/UART 1 register */
-#define MD_UREG1_3 0x220098 /* uController/UART 1 register */
-#define MD_UREG1_4 0x2200a0 /* uController/UART 1 register */
-#define MD_UREG1_5 0x2200a8 /* uController/UART 1 register */
-#define MD_UREG1_6 0x2200b0 /* uController/UART 1 register */
-#define MD_UREG1_7 0x2200b8 /* uController/UART 1 register */
-#define MD_UREG1_8 0x2200c0 /* uController/UART 1 register */
-#define MD_UREG1_9 0x2200c8 /* uController/UART 1 register */
-#define MD_UREG1_10 0x2200d0 /* uController/UART 1 register */
-#define MD_UREG1_11 0x2200d8 /* uController/UART 1 register */
-#define MD_UREG1_12 0x2200e0 /* uController/UART 1 register */
-#define MD_UREG1_13 0x2200e8 /* uController/UART 1 register */
-#define MD_UREG1_14 0x2200f0 /* uController/UART 1 register */
-#define MD_UREG1_15 0x2200f8 /* uController/UART 1 register */
+#define MD_LED0 0x220050 /* Eight-bit LED for CPU A */
+#define MD_LED1 0x220058 /* Eight-bit LED for CPU B */
+
+#define MD_UREG1_0 0x220080 /* uController/UART 1 register */
+#define MD_UREG1_1 0x220088 /* uController/UART 1 register */
+#define MD_UREG1_2 0x220090 /* uController/UART 1 register */
+#define MD_UREG1_3 0x220098 /* uController/UART 1 register */
+#define MD_UREG1_4 0x2200a0 /* uController/UART 1 register */
+#define MD_UREG1_5 0x2200a8 /* uController/UART 1 register */
+#define MD_UREG1_6 0x2200b0 /* uController/UART 1 register */
+#define MD_UREG1_7 0x2200b8 /* uController/UART 1 register */
+#define MD_UREG1_8 0x2200c0 /* uController/UART 1 register */
+#define MD_UREG1_9 0x2200c8 /* uController/UART 1 register */
+#define MD_UREG1_10 0x2200d0 /* uController/UART 1 register */
+#define MD_UREG1_11 0x2200d8 /* uController/UART 1 register */
+#define MD_UREG1_12 0x2200e0 /* uController/UART 1 register */
+#define MD_UREG1_13 0x2200e8 /* uController/UART 1 register */
+#define MD_UREG1_14 0x2200f0 /* uController/UART 1 register */
+#define MD_UREG1_15 0x2200f8 /* uController/UART 1 register */
#ifdef CONFIG_SGI_SN_N_MODE
#define MD_MEM_BANKS 4 /* 4 banks of memory max in N mode */
@@ -106,14 +106,14 @@
* Bits not used by the MD are used by software.
*/
-#define MD_SIZE_EMPTY 0 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_EMPTY 0 /* Valid in MEMORY_CONFIG */
#define MD_SIZE_8MB 1
#define MD_SIZE_16MB 2
#define MD_SIZE_32MB 3 /* Broken in Hub 1 */
-#define MD_SIZE_64MB 4 /* Valid in MEMORY_CONFIG */
-#define MD_SIZE_128MB 5 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_64MB 4 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_128MB 5 /* Valid in MEMORY_CONFIG */
#define MD_SIZE_256MB 6
-#define MD_SIZE_512MB 7 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_512MB 7 /* Valid in MEMORY_CONFIG */
#define MD_SIZE_1GB 8
#define MD_SIZE_2GB 9
#define MD_SIZE_4GB 10
@@ -207,16 +207,16 @@
/* MD_SLOTID_USTAT bit definitions */
-#define MSU_CORECLK_TST_SHFT 7 /* You don't wanna know */
+#define MSU_CORECLK_TST_SHFT 7 /* You don't wanna know */
#define MSU_CORECLK_TST_MASK (UINT64_CAST 1 << 7)
#define MSU_CORECLK_TST (UINT64_CAST 1 << 7)
-#define MSU_CORECLK_SHFT 6 /* You don't wanna know */
+#define MSU_CORECLK_SHFT 6 /* You don't wanna know */
#define MSU_CORECLK_MASK (UINT64_CAST 1 << 6)
#define MSU_CORECLK (UINT64_CAST 1 << 6)
-#define MSU_NETSYNC_SHFT 5 /* You don't wanna know */
+#define MSU_NETSYNC_SHFT 5 /* You don't wanna know */
#define MSU_NETSYNC_MASK (UINT64_CAST 1 << 5)
#define MSU_NETSYNC (UINT64_CAST 1 << 5)
-#define MSU_FPROMRDY_SHFT 4 /* Flash PROM ready bit */
+#define MSU_FPROMRDY_SHFT 4 /* Flash PROM ready bit */
#define MSU_FPROMRDY_MASK (UINT64_CAST 1 << 4)
#define MSU_FPROMRDY (UINT64_CAST 1 << 4)
#define MSU_I2CINTR_SHFT 3 /* I2C interrupt bit */
@@ -228,8 +228,8 @@
#define MSU_SN00_SLOTID_SHFT 7
#define MSU_SN00_SLOTID_MASK (UINT64_CAST 0x80)
-#define MSU_PIMM_PSC_SHFT 4
-#define MSU_PIMM_PSC_MASK (0xf << MSU_PIMM_PSC_SHFT)
+#define MSU_PIMM_PSC_SHFT 4
+#define MSU_PIMM_PSC_MASK (0xf << MSU_PIMM_PSC_SHFT)
/* MD_MIG_DIFF_THRESH bit definitions */
@@ -260,7 +260,7 @@
/* Other MD definitions */
-#define MD_BANK_SHFT 29 /* log2(512 MB) */
+#define MD_BANK_SHFT 29 /* log2(512 MB) */
#define MD_BANK_MASK (UINT64_CAST 7 << 29)
#define MD_BANK_SIZE (UINT64_CAST 1 << MD_BANK_SHFT) /* 512 MB */
#define MD_BANK_OFFSET(_b) (UINT64_CAST (_b) << MD_BANK_SHFT)
@@ -300,32 +300,32 @@
* Format C: STATE != shared (FINE must be 0)
*/
-#define MD_PDIR_MASK 0xffffffffffff /* Whole entry */
+#define MD_PDIR_MASK 0xffffffffffff /* Whole entry */
#define MD_PDIR_ECC_SHFT 0 /* ABC low or high */
#define MD_PDIR_ECC_MASK 0x7f
-#define MD_PDIR_PRIO_SHFT 8 /* ABC low */
+#define MD_PDIR_PRIO_SHFT 8 /* ABC low */
#define MD_PDIR_PRIO_MASK (0xf << 8)
-#define MD_PDIR_AX_SHFT 7 /* ABC low */
+#define MD_PDIR_AX_SHFT 7 /* ABC low */
#define MD_PDIR_AX_MASK (1 << 7)
#define MD_PDIR_AX (1 << 7)
-#define MD_PDIR_FINE_SHFT 12 /* ABC low */
+#define MD_PDIR_FINE_SHFT 12 /* ABC low */
#define MD_PDIR_FINE_MASK (1 << 12)
#define MD_PDIR_FINE (1 << 12)
-#define MD_PDIR_OCT_SHFT 13 /* A low */
+#define MD_PDIR_OCT_SHFT 13 /* A low */
#define MD_PDIR_OCT_MASK (7 << 13)
-#define MD_PDIR_STATE_SHFT 13 /* BC low */
+#define MD_PDIR_STATE_SHFT 13 /* BC low */
#define MD_PDIR_STATE_MASK (7 << 13)
-#define MD_PDIR_ONECNT_SHFT 16 /* BC low */
+#define MD_PDIR_ONECNT_SHFT 16 /* BC low */
#define MD_PDIR_ONECNT_MASK (0x3f << 16)
-#define MD_PDIR_PTR_SHFT 22 /* C low */
+#define MD_PDIR_PTR_SHFT 22 /* C low */
#define MD_PDIR_PTR_MASK (UINT64_CAST 0x7ff << 22)
-#define MD_PDIR_VECMSB_SHFT 22 /* AB low */
+#define MD_PDIR_VECMSB_SHFT 22 /* AB low */
#define MD_PDIR_VECMSB_BITMASK 0x3ffffff
#define MD_PDIR_VECMSB_BITSHFT 27
#define MD_PDIR_VECMSB_MASK (UINT64_CAST MD_PDIR_VECMSB_BITMASK << 22)
-#define MD_PDIR_CWOFF_SHFT 7 /* C high */
+#define MD_PDIR_CWOFF_SHFT 7 /* C high */
#define MD_PDIR_CWOFF_MASK (7 << 7)
-#define MD_PDIR_VECLSB_SHFT 10 /* AB high */
+#define MD_PDIR_VECLSB_SHFT 10 /* AB high */
#define MD_PDIR_VECLSB_BITMASK (UINT64_CAST 0x3fffffffff)
#define MD_PDIR_VECLSB_BITSHFT 0
#define MD_PDIR_VECLSB_MASK (MD_PDIR_VECLSB_BITMASK << 10)
@@ -349,25 +349,25 @@
* Format C: STATE != shared
*/
-#define MD_SDIR_MASK 0xffff /* Whole entry */
+#define MD_SDIR_MASK 0xffff /* Whole entry */
#define MD_SDIR_ECC_SHFT 0 /* AC low or high */
#define MD_SDIR_ECC_MASK 0x1f
-#define MD_SDIR_PRIO_SHFT 6 /* AC low */
+#define MD_SDIR_PRIO_SHFT 6 /* AC low */
#define MD_SDIR_PRIO_MASK (1 << 6)
-#define MD_SDIR_AX_SHFT 5 /* AC low */
+#define MD_SDIR_AX_SHFT 5 /* AC low */
#define MD_SDIR_AX_MASK (1 << 5)
#define MD_SDIR_AX (1 << 5)
-#define MD_SDIR_STATE_SHFT 7 /* AC low */
+#define MD_SDIR_STATE_SHFT 7 /* AC low */
#define MD_SDIR_STATE_MASK (7 << 7)
-#define MD_SDIR_PTR_SHFT 10 /* C low */
+#define MD_SDIR_PTR_SHFT 10 /* C low */
#define MD_SDIR_PTR_MASK (0x3f << 10)
-#define MD_SDIR_CWOFF_SHFT 5 /* C high */
+#define MD_SDIR_CWOFF_SHFT 5 /* C high */
#define MD_SDIR_CWOFF_MASK (7 << 5)
-#define MD_SDIR_VECMSB_SHFT 11 /* A low */
+#define MD_SDIR_VECMSB_SHFT 11 /* A low */
#define MD_SDIR_VECMSB_BITMASK 0x1f
#define MD_SDIR_VECMSB_BITSHFT 7
#define MD_SDIR_VECMSB_MASK (MD_SDIR_VECMSB_BITMASK << 11)
-#define MD_SDIR_VECLSB_SHFT 5 /* A high */
+#define MD_SDIR_VECLSB_SHFT 5 /* A high */
#define MD_SDIR_VECLSB_BITMASK 0x7ff
#define MD_SDIR_VECLSB_BITSHFT 0
#define MD_SDIR_VECLSB_MASK (MD_SDIR_VECLSB_BITMASK << 5)
@@ -390,7 +390,7 @@
/* Premium SIMM protection entry shifts and masks. */
-#define MD_PPROT_SHFT 0 /* Prot. field */
+#define MD_PPROT_SHFT 0 /* Prot. field */
#define MD_PPROT_MASK 7
#define MD_PPROT_MIGMD_SHFT 3 /* Migration mode */
#define MD_PPROT_MIGMD_MASK (3 << 3)
@@ -403,7 +403,7 @@
/* Standard SIMM protection entry shifts and masks. */
-#define MD_SPROT_SHFT 0 /* Prot. field */
+#define MD_SPROT_SHFT 0 /* Prot. field */
#define MD_SPROT_MASK 7
#define MD_SPROT_MIGMD_SHFT 3 /* Migration mode */
#define MD_SPROT_MIGMD_MASK (3 << 3)
@@ -431,13 +431,13 @@
#define CPU_LED_ADDR(_nasid, _slice) \
(private.p_sn00 ? \
- REMOTE_HUB_ADDR((_nasid), MD_UREG1_0 + ((_slice) << 5)) : \
+ REMOTE_HUB_ADDR((_nasid), MD_UREG1_0 + ((_slice) << 5)) : \
REMOTE_HUB_ADDR((_nasid), MD_LED0 + ((_slice) << 3)))
#define SET_CPU_LEDS(_nasid, _slice, _val) \
(HUB_S(CPU_LED_ADDR(_nasid, _slice), (_val)))
-#define SET_MY_LEDS(_v) \
+#define SET_MY_LEDS(_v) \
SET_CPU_LEDS(get_nasid(), get_slice(), (_v))
/*
@@ -541,7 +541,7 @@
*/
struct dir_error_reg {
- u64 uce_vld: 1, /* 63: valid directory uce */
+ u64 uce_vld: 1, /* 63: valid directory uce */
ae_vld: 1, /* 62: valid dir prot ecc error */
ce_vld: 1, /* 61: valid correctable ECC err*/
rsvd1: 19, /* 60-42: reserved */
@@ -555,13 +555,13 @@ struct dir_error_reg {
};
typedef union md_dir_error {
- u64 derr_reg; /* the entire register */
+ u64 derr_reg; /* the entire register */
struct dir_error_reg derr_fmt; /* the register format */
} md_dir_error_t;
struct mem_error_reg {
- u64 uce_vld: 1, /* 63: valid memory uce */
+ u64 uce_vld: 1, /* 63: valid memory uce */
ce_vld: 1, /* 62: valid correctable ECC err*/
rsvd1: 22, /* 61-40: reserved */
bad_syn: 8, /* 39-32: bad mem ecc syndrome */
@@ -573,8 +573,8 @@ struct mem_error_reg {
typedef union md_mem_error {
- u64 merr_reg; /* the entire register */
- struct mem_error_reg merr_fmt; /* format of the mem_error reg */
+ u64 merr_reg; /* the entire register */
+ struct mem_error_reg merr_fmt; /* format of the mem_error reg */
} md_mem_error_t;
@@ -594,7 +594,7 @@ struct proto_error_reg {
};
typedef union md_proto_error {
- u64 perr_reg; /* the entire register */
+ u64 perr_reg; /* the entire register */
struct proto_error_reg perr_fmt; /* format of the register */
} md_proto_error_t;
@@ -695,33 +695,33 @@ typedef union md_pdir_loent {
* represent directory memory information.
*/
-typedef union md_dir_high {
- md_sdir_high_t md_sdir_high;
- md_pdir_high_t md_pdir_high;
+typedef union md_dir_high {
+ md_sdir_high_t md_sdir_high;
+ md_pdir_high_t md_pdir_high;
} md_dir_high_t;
-typedef union md_dir_low {
- md_sdir_low_t md_sdir_low;
- md_pdir_low_t md_pdir_low;
+typedef union md_dir_low {
+ md_sdir_low_t md_sdir_low;
+ md_pdir_low_t md_pdir_low;
} md_dir_low_t;
-typedef struct bddir_entry {
- md_dir_low_t md_dir_low;
- md_dir_high_t md_dir_high;
+typedef struct bddir_entry {
+ md_dir_low_t md_dir_low;
+ md_dir_high_t md_dir_high;
} bddir_entry_t;
typedef struct dir_mem_entry {
- u64 prcpf[MAX_REGIONS];
- bddir_entry_t directory_words[MD_PAGE_SIZE/CACHE_SLINE_SIZE];
+ u64 prcpf[MAX_REGIONS];
+ bddir_entry_t directory_words[MD_PAGE_SIZE/CACHE_SLINE_SIZE];
} dir_mem_entry_t;
typedef union md_perf_sel {
- u64 perf_sel_reg;
+ u64 perf_sel_reg;
struct {
u64 perf_rsvd : 60,
- perf_en : 1,
+ perf_en : 1,
perf_sel : 3;
} perf_sel_bits;
} md_perf_sel_t;
@@ -730,7 +730,7 @@ typedef union md_perf_cnt {
u64 perf_cnt;
struct {
u64 perf_rsvd : 44,
- perf_cnt : 20;
+ perf_cnt : 20;
} perf_cnt_bits;
} md_perf_cnt_t;
diff --git a/arch/mips/include/asm/sn/sn0/hubni.h b/arch/mips/include/asm/sn/sn0/hubni.h
index b40d3ef97a12..b73c4bee65f2 100644
--- a/arch/mips/include/asm/sn/sn0/hubni.h
+++ b/arch/mips/include/asm/sn/sn0/hubni.h
@@ -25,38 +25,38 @@
#define NI_BASE_TABLES 0x630000
#define NI_STATUS_REV_ID 0x600000 /* Hub network status, rev, and ID */
-#define NI_PORT_RESET 0x600008 /* Reset the network interface */
+#define NI_PORT_RESET 0x600008 /* Reset the network interface */
#define NI_PROTECTION 0x600010 /* NI register access permissions */
-#define NI_GLOBAL_PARMS 0x600018 /* LLP parameters */
+#define NI_GLOBAL_PARMS 0x600018 /* LLP parameters */
#define NI_SCRATCH_REG0 0x600100 /* Scratch register 0 (64 bits) */
#define NI_SCRATCH_REG1 0x600108 /* Scratch register 1 (64 bits) */
#define NI_DIAG_PARMS 0x600110 /* Parameters for diags */
#define NI_VECTOR_PARMS 0x600200 /* Vector PIO routing parameters */
-#define NI_VECTOR 0x600208 /* Vector PIO route */
-#define NI_VECTOR_DATA 0x600210 /* Vector PIO data */
-#define NI_VECTOR_STATUS 0x600300 /* Vector PIO return status */
-#define NI_RETURN_VECTOR 0x600308 /* Vector PIO return vector */
-#define NI_VECTOR_READ_DATA 0x600310 /* Vector PIO read data */
+#define NI_VECTOR 0x600208 /* Vector PIO route */
+#define NI_VECTOR_DATA 0x600210 /* Vector PIO data */
+#define NI_VECTOR_STATUS 0x600300 /* Vector PIO return status */
+#define NI_RETURN_VECTOR 0x600308 /* Vector PIO return vector */
+#define NI_VECTOR_READ_DATA 0x600310 /* Vector PIO read data */
#define NI_VECTOR_CLEAR 0x600380 /* Vector PIO read & clear status */
-#define NI_IO_PROTECT 0x600400 /* PIO protection bits */
-#define NI_IO_PROT_OVRRD 0x600408 /* PIO protection bit override */
-
-#define NI_AGE_CPU0_MEMORY 0x600500 /* CPU 0 memory age control */
-#define NI_AGE_CPU0_PIO 0x600508 /* CPU 0 PIO age control */
-#define NI_AGE_CPU1_MEMORY 0x600510 /* CPU 1 memory age control */
-#define NI_AGE_CPU1_PIO 0x600518 /* CPU 1 PIO age control */
-#define NI_AGE_GBR_MEMORY 0x600520 /* GBR memory age control */
-#define NI_AGE_GBR_PIO 0x600528 /* GBR PIO age control */
-#define NI_AGE_IO_MEMORY 0x600530 /* IO memory age control */
-#define NI_AGE_IO_PIO 0x600538 /* IO PIO age control */
+#define NI_IO_PROTECT 0x600400 /* PIO protection bits */
+#define NI_IO_PROT_OVRRD 0x600408 /* PIO protection bit override */
+
+#define NI_AGE_CPU0_MEMORY 0x600500 /* CPU 0 memory age control */
+#define NI_AGE_CPU0_PIO 0x600508 /* CPU 0 PIO age control */
+#define NI_AGE_CPU1_MEMORY 0x600510 /* CPU 1 memory age control */
+#define NI_AGE_CPU1_PIO 0x600518 /* CPU 1 PIO age control */
+#define NI_AGE_GBR_MEMORY 0x600520 /* GBR memory age control */
+#define NI_AGE_GBR_PIO 0x600528 /* GBR PIO age control */
+#define NI_AGE_IO_MEMORY 0x600530 /* IO memory age control */
+#define NI_AGE_IO_PIO 0x600538 /* IO PIO age control */
#define NI_AGE_REG_MIN NI_AGE_CPU0_MEMORY
#define NI_AGE_REG_MAX NI_AGE_IO_PIO
-#define NI_PORT_PARMS 0x608000 /* LLP Parameters */
-#define NI_PORT_ERROR 0x608008 /* LLP Errors */
-#define NI_PORT_ERROR_CLEAR 0x608088 /* Clear the error bits */
+#define NI_PORT_PARMS 0x608000 /* LLP Parameters */
+#define NI_PORT_ERROR 0x608008 /* LLP Errors */
+#define NI_PORT_ERROR_CLEAR 0x608088 /* Clear the error bits */
#define NI_META_TABLE0 0x638000 /* First meta routing table entry */
#define NI_META_TABLE(_x) (NI_META_TABLE0 + (8 * (_x)))
@@ -76,13 +76,13 @@
#define NSRI_LINKUP_SHFT 29
#define NSRI_LINKUP_MASK (UINT64_CAST 0x1 << 29)
#define NSRI_DOWNREASON_SHFT 28 /* 0=failed, 1=never came */
-#define NSRI_DOWNREASON_MASK (UINT64_CAST 0x1 << 28) /* out of reset. */
+#define NSRI_DOWNREASON_MASK (UINT64_CAST 0x1 << 28) /* out of reset. */
#define NSRI_MORENODES_SHFT 18
#define NSRI_MORENODES_MASK (UINT64_CAST 1 << 18) /* Max. # of nodes */
#define MORE_MEMORY 0
#define MORE_NODES 1
#define NSRI_REGIONSIZE_SHFT 17
-#define NSRI_REGIONSIZE_MASK (UINT64_CAST 1 << 17) /* Granularity */
+#define NSRI_REGIONSIZE_MASK (UINT64_CAST 1 << 17) /* Granularity */
#define REGIONSIZE_FINE 1
#define REGIONSIZE_COARSE 0
#define NSRI_NODEID_SHFT 8
@@ -90,14 +90,14 @@
#define NSRI_REV_SHFT 4
#define NSRI_REV_MASK (UINT64_CAST 0xf << 4) /* Chip Revision */
#define NSRI_CHIPID_SHFT 0
-#define NSRI_CHIPID_MASK (UINT64_CAST 0xf) /* Chip type ID */
+#define NSRI_CHIPID_MASK (UINT64_CAST 0xf) /* Chip type ID */
/*
- * In fine mode, each node is a region. In coarse mode, there are
+ * In fine mode, each node is a region. In coarse mode, there are
* eight nodes per region.
*/
#define NASID_TO_FINEREG_SHFT 0
-#define NASID_TO_COARSEREG_SHFT 3
+#define NASID_TO_COARSEREG_SHFT 3
/* NI_PORT_RESET mask definitions */
@@ -111,21 +111,21 @@
/* NI_GLOBAL_PARMS mask and shift definitions */
-#define NGP_MAXRETRY_SHFT 48 /* Maximum retries */
+#define NGP_MAXRETRY_SHFT 48 /* Maximum retries */
#define NGP_MAXRETRY_MASK (UINT64_CAST 0x3ff << 48)
-#define NGP_TAILTOWRAP_SHFT 32 /* Tail timeout wrap */
+#define NGP_TAILTOWRAP_SHFT 32 /* Tail timeout wrap */
#define NGP_TAILTOWRAP_MASK (UINT64_CAST 0xffff << 32)
-#define NGP_CREDITTOVAL_SHFT 16 /* Tail timeout wrap */
+#define NGP_CREDITTOVAL_SHFT 16 /* Tail timeout wrap */
#define NGP_CREDITTOVAL_MASK (UINT64_CAST 0xf << 16)
-#define NGP_TAILTOVAL_SHFT 4 /* Tail timeout value */
+#define NGP_TAILTOVAL_SHFT 4 /* Tail timeout value */
#define NGP_TAILTOVAL_MASK (UINT64_CAST 0xf << 4)
/* NI_DIAG_PARMS mask and shift definitions */
#define NDP_PORTTORESET (UINT64_CAST 1 << 18) /* Port tmout reset */
#define NDP_LLP8BITMODE (UINT64_CAST 1 << 12) /* LLP 8-bit mode */
-#define NDP_PORTDISABLE (UINT64_CAST 1 << 6) /* Port disable */
+#define NDP_PORTDISABLE (UINT64_CAST 1 << 6) /* Port disable */
#define NDP_SENDERROR (UINT64_CAST 1) /* Send data error */
/*
@@ -137,7 +137,7 @@
#define NVP_PIOID_MASK (UINT64_CAST 0x3ff << 40)
#define NVP_WRITEID_SHFT 32
#define NVP_WRITEID_MASK (UINT64_CAST 0xff << 32)
-#define NVP_ADDRESS_MASK (UINT64_CAST 0xffff8) /* Bits 19:3 */
+#define NVP_ADDRESS_MASK (UINT64_CAST 0xffff8) /* Bits 19:3 */
#define NVP_TYPE_SHFT 0
#define NVP_TYPE_MASK (UINT64_CAST 0x3)
@@ -151,7 +151,7 @@
#define NVS_PIOID_MASK (UINT64_CAST 0x3ff << 40)
#define NVS_WRITEID_SHFT 32
#define NVS_WRITEID_MASK (UINT64_CAST 0xff << 32)
-#define NVS_ADDRESS_MASK (UINT64_CAST 0xfffffff8) /* Bits 31:3 */
+#define NVS_ADDRESS_MASK (UINT64_CAST 0xfffffff8) /* Bits 31:3 */
#define NVS_TYPE_SHFT 0
#define NVS_TYPE_MASK (UINT64_CAST 0x7)
#define NVS_ERROR_MASK (UINT64_CAST 0x4) /* bit set means error */
@@ -161,10 +161,10 @@
#define PIOTYPE_WRITE 1 /* VECTOR_PARMS and VECTOR_STATUS */
#define PIOTYPE_UNDEFINED 2 /* VECTOR_PARMS and VECTOR_STATUS */
#define PIOTYPE_EXCHANGE 3 /* VECTOR_PARMS and VECTOR_STATUS */
-#define PIOTYPE_ADDR_ERR 4 /* VECTOR_STATUS only */
-#define PIOTYPE_CMD_ERR 5 /* VECTOR_STATUS only */
-#define PIOTYPE_PROT_ERR 6 /* VECTOR_STATUS only */
-#define PIOTYPE_UNKNOWN 7 /* VECTOR_STATUS only */
+#define PIOTYPE_ADDR_ERR 4 /* VECTOR_STATUS only */
+#define PIOTYPE_CMD_ERR 5 /* VECTOR_STATUS only */
+#define PIOTYPE_PROT_ERR 6 /* VECTOR_STATUS only */
+#define PIOTYPE_UNKNOWN 7 /* VECTOR_STATUS only */
/* NI_AGE_XXX mask and shift definitions */
@@ -215,7 +215,7 @@
#define NPE_FATAL_ERRORS (NPE_LINKRESET | NPE_INTERNALERROR | \
NPE_BADMESSAGE | NPE_BADDEST | \
- NPE_FIFOOVERFLOW | NPE_CREDITTO_MASK | \
+ NPE_FIFOOVERFLOW | NPE_CREDITTO_MASK | \
NPE_TAILTO_MASK)
/* NI_META_TABLE mask and shift definitions */
@@ -231,7 +231,7 @@
typedef union hubni_port_error_u {
u64 nipe_reg_value;
struct {
- u64 nipe_rsvd: 26, /* unused */
+ u64 nipe_rsvd: 26, /* unused */
nipe_lnk_reset: 1, /* link reset */
nipe_intl_err: 1, /* internal error */
nipe_bad_msg: 1, /* bad message */
diff --git a/arch/mips/include/asm/sn/sn0/hubpi.h b/arch/mips/include/asm/sn/sn0/hubpi.h
index e39f5f9da040..7b83655913c5 100644
--- a/arch/mips/include/asm/sn/sn0/hubpi.h
+++ b/arch/mips/include/asm/sn/sn0/hubpi.h
@@ -8,8 +8,8 @@
* Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
* Copyright (C) 1999 by Ralf Baechle
*/
-#ifndef _ASM_SN_SN0_HUBPI_H
-#define _ASM_SN_SN0_HUBPI_H
+#ifndef _ASM_SN_SN0_HUBPI_H
+#define _ASM_SN_SN0_HUBPI_H
#include <linux/types.h>
@@ -25,13 +25,13 @@
/* General protection and control registers */
-#define PI_CPU_PROTECT 0x000000 /* CPU Protection */
-#define PI_PROT_OVERRD 0x000008 /* Clear CPU Protection bit */
-#define PI_IO_PROTECT 0x000010 /* Interrupt Pending Protection */
+#define PI_CPU_PROTECT 0x000000 /* CPU Protection */
+#define PI_PROT_OVERRD 0x000008 /* Clear CPU Protection bit */
+#define PI_IO_PROTECT 0x000010 /* Interrupt Pending Protection */
#define PI_REGION_PRESENT 0x000018 /* Indicates whether region exists */
-#define PI_CPU_NUM 0x000020 /* CPU Number ID */
-#define PI_CALIAS_SIZE 0x000028 /* Cached Alias Size */
-#define PI_MAX_CRB_TIMEOUT 0x000030 /* Maximum Timeout for CRB */
+#define PI_CPU_NUM 0x000020 /* CPU Number ID */
+#define PI_CALIAS_SIZE 0x000028 /* Cached Alias Size */
+#define PI_MAX_CRB_TIMEOUT 0x000030 /* Maximum Timeout for CRB */
#define PI_CRB_SFACTOR 0x000038 /* Scale factor for CRB timeout */
/* CALIAS values */
@@ -54,28 +54,28 @@
/* Processor control and status checking */
-#define PI_CPU_PRESENT_A 0x000040 /* CPU Present A */
-#define PI_CPU_PRESENT_B 0x000048 /* CPU Present B */
-#define PI_CPU_ENABLE_A 0x000050 /* CPU Enable A */
-#define PI_CPU_ENABLE_B 0x000058 /* CPU Enable B */
-#define PI_REPLY_LEVEL 0x000060 /* Reply Level */
+#define PI_CPU_PRESENT_A 0x000040 /* CPU Present A */
+#define PI_CPU_PRESENT_B 0x000048 /* CPU Present B */
+#define PI_CPU_ENABLE_A 0x000050 /* CPU Enable A */
+#define PI_CPU_ENABLE_B 0x000058 /* CPU Enable B */
+#define PI_REPLY_LEVEL 0x000060 /* Reply Level */
#define PI_HARDRESET_BIT 0x020068 /* Bit cleared by s/w on SR */
-#define PI_NMI_A 0x000070 /* NMI to CPU A */
-#define PI_NMI_B 0x000078 /* NMI to CPU B */
+#define PI_NMI_A 0x000070 /* NMI to CPU A */
+#define PI_NMI_B 0x000078 /* NMI to CPU B */
#define PI_NMI_OFFSET (PI_NMI_B - PI_NMI_A)
-#define PI_SOFTRESET 0x000080 /* Softreset (to both CPUs) */
+#define PI_SOFTRESET 0x000080 /* Softreset (to both CPUs) */
-/* Regular Interrupt register checking. */
+/* Regular Interrupt register checking. */
#define PI_INT_PEND_MOD 0x000090 /* Write to set pending ints */
-#define PI_INT_PEND0 0x000098 /* Read to get pending ints */
-#define PI_INT_PEND1 0x0000a0 /* Read to get pending ints */
-#define PI_INT_MASK0_A 0x0000a8 /* Interrupt Mask 0 for CPU A */
-#define PI_INT_MASK1_A 0x0000b0 /* Interrupt Mask 1 for CPU A */
-#define PI_INT_MASK0_B 0x0000b8 /* Interrupt Mask 0 for CPU B */
-#define PI_INT_MASK1_B 0x0000c0 /* Interrupt Mask 1 for CPU B */
+#define PI_INT_PEND0 0x000098 /* Read to get pending ints */
+#define PI_INT_PEND1 0x0000a0 /* Read to get pending ints */
+#define PI_INT_MASK0_A 0x0000a8 /* Interrupt Mask 0 for CPU A */
+#define PI_INT_MASK1_A 0x0000b0 /* Interrupt Mask 1 for CPU A */
+#define PI_INT_MASK0_B 0x0000b8 /* Interrupt Mask 0 for CPU B */
+#define PI_INT_MASK1_B 0x0000c0 /* Interrupt Mask 1 for CPU B */
-#define PI_INT_MASK_OFFSET 0x10 /* Offset from A to B */
+#define PI_INT_MASK_OFFSET 0x10 /* Offset from A to B */
/* Crosscall interrupts */
@@ -83,49 +83,49 @@
#define PI_CC_PEND_SET_B 0x0000d0 /* CC Interrupt Pending Set, CPU B */
#define PI_CC_PEND_CLR_A 0x0000d8 /* CC Interrupt Pending Clr, CPU A */
#define PI_CC_PEND_CLR_B 0x0000e0 /* CC Interrupt Pending Clr, CPU B */
-#define PI_CC_MASK 0x0000e8 /* CC Interrupt mask */
+#define PI_CC_MASK 0x0000e8 /* CC Interrupt mask */
-#define PI_INT_SET_OFFSET 0x08 /* Offset from A to B */
+#define PI_INT_SET_OFFSET 0x08 /* Offset from A to B */
/* Realtime Counter and Profiler control registers */
-#define PI_RT_COUNT 0x030100 /* Real Time Counter */
-#define PI_RT_COMPARE_A 0x000108 /* Real Time Compare A */
-#define PI_RT_COMPARE_B 0x000110 /* Real Time Compare B */
+#define PI_RT_COUNT 0x030100 /* Real Time Counter */
+#define PI_RT_COMPARE_A 0x000108 /* Real Time Compare A */
+#define PI_RT_COMPARE_B 0x000110 /* Real Time Compare B */
#define PI_PROFILE_COMPARE 0x000118 /* L5 int to both cpus when == RTC */
-#define PI_RT_PEND_A 0x000120 /* Set if RT int for A pending */
-#define PI_RT_PEND_B 0x000128 /* Set if RT int for B pending */
+#define PI_RT_PEND_A 0x000120 /* Set if RT int for A pending */
+#define PI_RT_PEND_B 0x000128 /* Set if RT int for B pending */
#define PI_PROF_PEND_A 0x000130 /* Set if Prof int for A pending */
#define PI_PROF_PEND_B 0x000138 /* Set if Prof int for B pending */
-#define PI_RT_EN_A 0x000140 /* RT int for CPU A enable */
-#define PI_RT_EN_B 0x000148 /* RT int for CPU B enable */
-#define PI_PROF_EN_A 0x000150 /* PROF int for CPU A enable */
-#define PI_PROF_EN_B 0x000158 /* PROF int for CPU B enable */
-#define PI_RT_LOCAL_CTRL 0x000160 /* RT control register */
+#define PI_RT_EN_A 0x000140 /* RT int for CPU A enable */
+#define PI_RT_EN_B 0x000148 /* RT int for CPU B enable */
+#define PI_PROF_EN_A 0x000150 /* PROF int for CPU A enable */
+#define PI_PROF_EN_B 0x000158 /* PROF int for CPU B enable */
+#define PI_RT_LOCAL_CTRL 0x000160 /* RT control register */
#define PI_RT_FILTER_CTRL 0x000168 /* GCLK Filter control register */
#define PI_COUNT_OFFSET 0x08 /* A to B offset for all counts */
/* Built-In Self Test support */
-#define PI_BIST_WRITE_DATA 0x000200 /* BIST write data */
-#define PI_BIST_READ_DATA 0x000208 /* BIST read data */
-#define PI_BIST_COUNT_TARG 0x000210 /* BIST Count and Target */
-#define PI_BIST_READY 0x000218 /* BIST Ready indicator */
-#define PI_BIST_SHIFT_LOAD 0x000220 /* BIST control */
-#define PI_BIST_SHIFT_UNLOAD 0x000228 /* BIST control */
-#define PI_BIST_ENTER_RUN 0x000230 /* BIST control */
+#define PI_BIST_WRITE_DATA 0x000200 /* BIST write data */
+#define PI_BIST_READ_DATA 0x000208 /* BIST read data */
+#define PI_BIST_COUNT_TARG 0x000210 /* BIST Count and Target */
+#define PI_BIST_READY 0x000218 /* BIST Ready indicator */
+#define PI_BIST_SHIFT_LOAD 0x000220 /* BIST control */
+#define PI_BIST_SHIFT_UNLOAD 0x000228 /* BIST control */
+#define PI_BIST_ENTER_RUN 0x000230 /* BIST control */
/* Graphics control registers */
-#define PI_GFX_PAGE_A 0x000300 /* Graphics page A */
-#define PI_GFX_CREDIT_CNTR_A 0x000308 /* Graphics credit counter A */
-#define PI_GFX_BIAS_A 0x000310 /* Graphics bias A */
+#define PI_GFX_PAGE_A 0x000300 /* Graphics page A */
+#define PI_GFX_CREDIT_CNTR_A 0x000308 /* Graphics credit counter A */
+#define PI_GFX_BIAS_A 0x000310 /* Graphics bias A */
#define PI_GFX_INT_CNTR_A 0x000318 /* Graphics interrupt counter A */
#define PI_GFX_INT_CMP_A 0x000320 /* Graphics interrupt comparator A */
-#define PI_GFX_PAGE_B 0x000328 /* Graphics page B */
-#define PI_GFX_CREDIT_CNTR_B 0x000330 /* Graphics credit counter B */
-#define PI_GFX_BIAS_B 0x000338 /* Graphics bias B */
+#define PI_GFX_PAGE_B 0x000328 /* Graphics page B */
+#define PI_GFX_CREDIT_CNTR_B 0x000330 /* Graphics credit counter B */
+#define PI_GFX_BIAS_B 0x000338 /* Graphics bias B */
#define PI_GFX_INT_CNTR_B 0x000340 /* Graphics interrupt counter B */
#define PI_GFX_INT_CMP_B 0x000348 /* Graphics interrupt comparator B */
@@ -138,24 +138,24 @@
#define PI_ERR_INT_MASK_B 0x000410 /* Error Interrupt mask for CPU B */
#define PI_ERR_STACK_ADDR_A 0x000418 /* Error stack address for CPU A */
#define PI_ERR_STACK_ADDR_B 0x000420 /* Error stack address for CPU B */
-#define PI_ERR_STACK_SIZE 0x000428 /* Error Stack Size */
-#define PI_ERR_STATUS0_A 0x000430 /* Error Status 0A */
+#define PI_ERR_STACK_SIZE 0x000428 /* Error Stack Size */
+#define PI_ERR_STATUS0_A 0x000430 /* Error Status 0A */
#define PI_ERR_STATUS0_A_RCLR 0x000438 /* Error Status 0A clear on read */
-#define PI_ERR_STATUS1_A 0x000440 /* Error Status 1A */
+#define PI_ERR_STATUS1_A 0x000440 /* Error Status 1A */
#define PI_ERR_STATUS1_A_RCLR 0x000448 /* Error Status 1A clear on read */
-#define PI_ERR_STATUS0_B 0x000450 /* Error Status 0B */
+#define PI_ERR_STATUS0_B 0x000450 /* Error Status 0B */
#define PI_ERR_STATUS0_B_RCLR 0x000458 /* Error Status 0B clear on read */
-#define PI_ERR_STATUS1_B 0x000460 /* Error Status 1B */
+#define PI_ERR_STATUS1_B 0x000460 /* Error Status 1B */
#define PI_ERR_STATUS1_B_RCLR 0x000468 /* Error Status 1B clear on read */
-#define PI_SPOOL_CMP_A 0x000470 /* Spool compare for CPU A */
-#define PI_SPOOL_CMP_B 0x000478 /* Spool compare for CPU B */
-#define PI_CRB_TIMEOUT_A 0x000480 /* Timed out CRB entries for A */
-#define PI_CRB_TIMEOUT_B 0x000488 /* Timed out CRB entries for B */
+#define PI_SPOOL_CMP_A 0x000470 /* Spool compare for CPU A */
+#define PI_SPOOL_CMP_B 0x000478 /* Spool compare for CPU B */
+#define PI_CRB_TIMEOUT_A 0x000480 /* Timed out CRB entries for A */
+#define PI_CRB_TIMEOUT_B 0x000488 /* Timed out CRB entries for B */
#define PI_SYSAD_ERRCHK_EN 0x000490 /* Enables SYSAD error checking */
-#define PI_BAD_CHECK_BIT_A 0x000498 /* Force SYSAD check bit error */
-#define PI_BAD_CHECK_BIT_B 0x0004a0 /* Force SYSAD check bit error */
-#define PI_NACK_CNT_A 0x0004a8 /* Consecutive NACK counter */
-#define PI_NACK_CNT_B 0x0004b0 /* " " for CPU B */
+#define PI_BAD_CHECK_BIT_A 0x000498 /* Force SYSAD check bit error */
+#define PI_BAD_CHECK_BIT_B 0x0004a0 /* Force SYSAD check bit error */
+#define PI_NACK_CNT_A 0x0004a8 /* Consecutive NACK counter */
+#define PI_NACK_CNT_B 0x0004b0 /* " " for CPU B */
#define PI_NACK_CMP 0x0004b8 /* NACK count compare */
#define PI_STACKADDR_OFFSET (PI_ERR_STACK_ADDR_B - PI_ERR_STACK_ADDR_A)
#define PI_ERRSTAT_OFFSET (PI_ERR_STATUS0_B - PI_ERR_STATUS0_A)
@@ -168,7 +168,7 @@
#define PI_ERR_SPUR_MSG_A 0x00000008
#define PI_ERR_WRB_TERR_B 0x00000010 /* WRB TERR */
#define PI_ERR_WRB_TERR_A 0x00000020
-#define PI_ERR_WRB_WERR_B 0x00000040 /* WRB WERR */
+#define PI_ERR_WRB_WERR_B 0x00000040 /* WRB WERR */
#define PI_ERR_WRB_WERR_A 0x00000080
#define PI_ERR_SYSSTATE_B 0x00000100 /* SysState parity error */
#define PI_ERR_SYSSTATE_A 0x00000200
@@ -196,32 +196,32 @@
* The following three macros define all possible error int pends.
*/
-#define PI_FATAL_ERR_CPU_A (PI_ERR_SYSSTATE_TAG_A | \
- PI_ERR_BAD_SPOOL_A | \
- PI_ERR_SYSCMD_ADDR_A | \
- PI_ERR_SYSCMD_DATA_A | \
- PI_ERR_SYSAD_ADDR_A | \
+#define PI_FATAL_ERR_CPU_A (PI_ERR_SYSSTATE_TAG_A | \
+ PI_ERR_BAD_SPOOL_A | \
+ PI_ERR_SYSCMD_ADDR_A | \
+ PI_ERR_SYSCMD_DATA_A | \
+ PI_ERR_SYSAD_ADDR_A | \
PI_ERR_SYSAD_DATA_A | \
PI_ERR_SYSSTATE_A)
-#define PI_MISC_ERR_CPU_A (PI_ERR_UNCAC_UNCORR_A | \
- PI_ERR_WRB_WERR_A | \
- PI_ERR_WRB_TERR_A | \
- PI_ERR_SPUR_MSG_A | \
+#define PI_MISC_ERR_CPU_A (PI_ERR_UNCAC_UNCORR_A | \
+ PI_ERR_WRB_WERR_A | \
+ PI_ERR_WRB_TERR_A | \
+ PI_ERR_SPUR_MSG_A | \
PI_ERR_SPOOL_CMP_A)
-#define PI_FATAL_ERR_CPU_B (PI_ERR_SYSSTATE_TAG_B | \
- PI_ERR_BAD_SPOOL_B | \
- PI_ERR_SYSCMD_ADDR_B | \
- PI_ERR_SYSCMD_DATA_B | \
- PI_ERR_SYSAD_ADDR_B | \
+#define PI_FATAL_ERR_CPU_B (PI_ERR_SYSSTATE_TAG_B | \
+ PI_ERR_BAD_SPOOL_B | \
+ PI_ERR_SYSCMD_ADDR_B | \
+ PI_ERR_SYSCMD_DATA_B | \
+ PI_ERR_SYSAD_ADDR_B | \
PI_ERR_SYSAD_DATA_B | \
PI_ERR_SYSSTATE_B)
-#define PI_MISC_ERR_CPU_B (PI_ERR_UNCAC_UNCORR_B | \
- PI_ERR_WRB_WERR_B | \
- PI_ERR_WRB_TERR_B | \
- PI_ERR_SPUR_MSG_B | \
+#define PI_MISC_ERR_CPU_B (PI_ERR_UNCAC_UNCORR_B | \
+ PI_ERR_WRB_WERR_B | \
+ PI_ERR_WRB_TERR_B | \
+ PI_ERR_SPUR_MSG_B | \
PI_ERR_SPOOL_CMP_B)
#define PI_ERR_GENERIC (PI_ERR_MD_UNCORR)
@@ -242,24 +242,24 @@
#define PI_ERR_ST0_CMD_SHFT 17
#define PI_ERR_ST0_ADDR_MASK 0x3ffffffffe000000
#define PI_ERR_ST0_ADDR_SHFT 25
-#define PI_ERR_ST0_OVERRUN_MASK 0x4000000000000000
-#define PI_ERR_ST0_OVERRUN_SHFT 62
+#define PI_ERR_ST0_OVERRUN_MASK 0x4000000000000000
+#define PI_ERR_ST0_OVERRUN_SHFT 62
#define PI_ERR_ST0_VALID_MASK 0x8000000000000000
#define PI_ERR_ST0_VALID_SHFT 63
/* Fields in PI_ERR_STATUS1_[AB] */
#define PI_ERR_ST1_SPOOL_MASK 0x00000000001fffff
#define PI_ERR_ST1_SPOOL_SHFT 0
-#define PI_ERR_ST1_TOUTCNT_MASK 0x000000001fe00000
-#define PI_ERR_ST1_TOUTCNT_SHFT 21
+#define PI_ERR_ST1_TOUTCNT_MASK 0x000000001fe00000
+#define PI_ERR_ST1_TOUTCNT_SHFT 21
#define PI_ERR_ST1_INVCNT_MASK 0x0000007fe0000000
#define PI_ERR_ST1_INVCNT_SHFT 29
#define PI_ERR_ST1_CRBNUM_MASK 0x0000038000000000
#define PI_ERR_ST1_CRBNUM_SHFT 39
#define PI_ERR_ST1_WRBRRB_MASK 0x0000040000000000
#define PI_ERR_ST1_WRBRRB_SHFT 42
-#define PI_ERR_ST1_CRBSTAT_MASK 0x001ff80000000000
-#define PI_ERR_ST1_CRBSTAT_SHFT 43
+#define PI_ERR_ST1_CRBSTAT_MASK 0x001ff80000000000
+#define PI_ERR_ST1_CRBSTAT_SHFT 43
#define PI_ERR_ST1_MSGSRC_MASK 0xffe0000000000000
#define PI_ERR_ST1_MSGSRC_SHFT 53
@@ -274,8 +274,8 @@
#define PI_ERR_STK_CRBNUM_SHFT 9
#define PI_ERR_STK_WRBRRB_MASK 0x0000000000001000
#define PI_ERR_STK_WRBRRB_SHFT 12
-#define PI_ERR_STK_CRBSTAT_MASK 0x00000000007fe000
-#define PI_ERR_STK_CRBSTAT_SHFT 13
+#define PI_ERR_STK_CRBSTAT_MASK 0x00000000007fe000
+#define PI_ERR_STK_CRBSTAT_SHFT 13
#define PI_ERR_STK_CMD_MASK 0x000000007f800000
#define PI_ERR_STK_CMD_SHFT 23
#define PI_ERR_STK_ADDR_MASK 0xffffffff80000000
@@ -364,11 +364,11 @@ typedef u64 rtc_time_t;
/* Bits in PI_SYSAD_ERRCHK_EN */
#define PI_SYSAD_ERRCHK_ECCGEN 0x01 /* Enable ECC generation */
-#define PI_SYSAD_ERRCHK_QUALGEN 0x02 /* Enable data quality signal gen. */
-#define PI_SYSAD_ERRCHK_SADP 0x04 /* Enable SysAD parity checking */
+#define PI_SYSAD_ERRCHK_QUALGEN 0x02 /* Enable data quality signal gen. */
+#define PI_SYSAD_ERRCHK_SADP 0x04 /* Enable SysAD parity checking */
#define PI_SYSAD_ERRCHK_CMDP 0x08 /* Enable SysCmd parity checking */
#define PI_SYSAD_ERRCHK_STATE 0x10 /* Enable SysState parity checking */
-#define PI_SYSAD_ERRCHK_QUAL 0x20 /* Enable data quality checking */
+#define PI_SYSAD_ERRCHK_QUAL 0x20 /* Enable data quality checking */
#define PI_SYSAD_CHECK_ALL 0x3f /* Generate and check all signals. */
/* Interrupt pending bits on R10000 */
diff --git a/arch/mips/include/asm/sn/sn0/ip27.h b/arch/mips/include/asm/sn/sn0/ip27.h
index 3c97e0855c8d..3b5efeefcc3f 100644
--- a/arch/mips/include/asm/sn/sn0/ip27.h
+++ b/arch/mips/include/asm/sn/sn0/ip27.h
@@ -21,14 +21,14 @@
#ifndef __ASSEMBLY__
-#define CAUSE_BERRINTR IE_IRQ5
+#define CAUSE_BERRINTR IE_IRQ5
-#define ECCF_CACHE_ERR 0
-#define ECCF_TAGLO 1
-#define ECCF_ECC 2
-#define ECCF_ERROREPC 3
-#define ECCF_PADDR 4
-#define ECCF_SIZE (5 * sizeof(long))
+#define ECCF_CACHE_ERR 0
+#define ECCF_TAGLO 1
+#define ECCF_ECC 2
+#define ECCF_ERROREPC 3
+#define ECCF_PADDR 4
+#define ECCF_SIZE (5 * sizeof(long))
#endif /* !__ASSEMBLY__ */
@@ -39,8 +39,8 @@
* the processor number of the calling processor. The proc parameters
* must be a register.
*/
-#define KL_GET_CPUNUM(proc) \
- dli proc, LOCAL_HUB(0); \
+#define KL_GET_CPUNUM(proc) \
+ dli proc, LOCAL_HUB(0); \
ld proc, PI_CPU_NUM(proc)
#endif /* __ASSEMBLY__ */
@@ -71,15 +71,15 @@
#define NUM_CAUSE_INTRS 8
-#define SCACHE_LINESIZE 128
-#define SCACHE_LINEMASK (SCACHE_LINESIZE - 1)
+#define SCACHE_LINESIZE 128
+#define SCACHE_LINEMASK (SCACHE_LINESIZE - 1)
#include <asm/sn/addrs.h>
-#define LED_CYCLE_MASK 0x0f
-#define LED_CYCLE_SHFT 4
+#define LED_CYCLE_MASK 0x0f
+#define LED_CYCLE_SHFT 4
#define SEND_NMI(_nasid, _slice) \
- REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
+ REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
#endif /* _ASM_SN_SN0_IP27_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index 74d0bb260b86..c4813d67aec3 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
-typedef unsigned long cpuid_t;
+typedef unsigned long cpuid_t;
typedef unsigned long cnodemask_t;
typedef signed short nasid_t; /* node id in numa-as-id space */
typedef signed short cnodeid_t; /* node id in compact-id space */
@@ -19,7 +19,7 @@ typedef signed char partid_t; /* partition ID type */
typedef signed short moduleid_t; /* user-visible module number type */
typedef signed short cmoduleid_t; /* kernel compact module id type */
typedef unsigned char clusterid_t; /* Clusterid of the cell */
-typedef unsigned long pfn_t;
+typedef unsigned long pfn_t;
typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
diff --git a/arch/mips/include/asm/sni.h b/arch/mips/include/asm/sni.h
index 8c1eb02c6d16..a107201a2e1e 100644
--- a/arch/mips/include/asm/sni.h
+++ b/arch/mips/include/asm/sni.h
@@ -13,27 +13,27 @@
extern unsigned int sni_brd_type;
-#define SNI_BRD_10 2
-#define SNI_BRD_10NEW 3
-#define SNI_BRD_TOWER_OASIC 4
-#define SNI_BRD_MINITOWER 5
-#define SNI_BRD_PCI_TOWER 6
-#define SNI_BRD_RM200 7
-#define SNI_BRD_PCI_MTOWER 8
-#define SNI_BRD_PCI_DESKTOP 9
-#define SNI_BRD_PCI_TOWER_CPLUS 10
+#define SNI_BRD_10 2
+#define SNI_BRD_10NEW 3
+#define SNI_BRD_TOWER_OASIC 4
+#define SNI_BRD_MINITOWER 5
+#define SNI_BRD_PCI_TOWER 6
+#define SNI_BRD_RM200 7
+#define SNI_BRD_PCI_MTOWER 8
+#define SNI_BRD_PCI_DESKTOP 9
+#define SNI_BRD_PCI_TOWER_CPLUS 10
#define SNI_BRD_PCI_MTOWER_CPLUS 11
/* RM400 cpu types */
-#define SNI_CPU_M8021 0x01
-#define SNI_CPU_M8030 0x04
-#define SNI_CPU_M8031 0x06
-#define SNI_CPU_M8034 0x0f
-#define SNI_CPU_M8037 0x07
-#define SNI_CPU_M8040 0x05
-#define SNI_CPU_M8043 0x09
-#define SNI_CPU_M8050 0x0b
-#define SNI_CPU_M8053 0x0d
+#define SNI_CPU_M8021 0x01
+#define SNI_CPU_M8030 0x04
+#define SNI_CPU_M8031 0x06
+#define SNI_CPU_M8034 0x0f
+#define SNI_CPU_M8037 0x07
+#define SNI_CPU_M8040 0x05
+#define SNI_CPU_M8043 0x09
+#define SNI_CPU_M8050 0x0b
+#define SNI_CPU_M8053 0x0d
#define SNI_PORT_BASE CKSEG1ADDR(0xb4000000)
@@ -52,14 +52,14 @@ extern unsigned int sni_brd_type;
#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0044)
#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff004c)
#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0054)
-#define IT_INT2 0x01
-#define IT_INTD 0x02
-#define IT_INTC 0x04
-#define IT_INTB 0x08
-#define IT_INTA 0x10
-#define IT_EISA 0x20
-#define IT_SCSI 0x40
-#define IT_ETH 0x80
+#define IT_INT2 0x01
+#define IT_INTD 0x02
+#define IT_INTC 0x04
+#define IT_INTB 0x08
+#define IT_INTA 0x10
+#define IT_EISA 0x20
+#define IT_SCSI 0x40
+#define IT_ETH 0x80
#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff005c)
#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0064)
#define PCIMT_ECCREG CKSEG1ADDR(0xbfff006c)
@@ -86,14 +86,14 @@ extern unsigned int sni_brd_type;
#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0040)
#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff0048)
#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0050)
-#define IT_INT2 0x01
-#define IT_INTD 0x02
-#define IT_INTC 0x04
-#define IT_INTB 0x08
-#define IT_INTA 0x10
-#define IT_EISA 0x20
-#define IT_SCSI 0x40
-#define IT_ETH 0x80
+#define IT_INT2 0x01
+#define IT_INTD 0x02
+#define IT_INTC 0x04
+#define IT_INTB 0x08
+#define IT_INTA 0x10
+#define IT_EISA 0x20
+#define IT_SCSI 0x40
+#define IT_ETH 0x80
#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff0058)
#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0060)
#define PCIMT_ECCREG CKSEG1ADDR(0xbfff0068)
@@ -137,29 +137,29 @@ extern unsigned int sni_brd_type;
/*
* A20R based boards
*/
-#define A20R_PT_CLOCK_BASE CKSEG1ADDR(0xbc040000)
-#define A20R_PT_TIM0_ACK CKSEG1ADDR(0xbc050000)
-#define A20R_PT_TIM1_ACK CKSEG1ADDR(0xbc060000)
+#define A20R_PT_CLOCK_BASE CKSEG1ADDR(0xbc040000)
+#define A20R_PT_TIM0_ACK CKSEG1ADDR(0xbc050000)
+#define A20R_PT_TIM1_ACK CKSEG1ADDR(0xbc060000)
-#define SNI_A20R_IRQ_BASE MIPS_CPU_IRQ_BASE
-#define SNI_A20R_IRQ_TIMER (SNI_A20R_IRQ_BASE+5)
+#define SNI_A20R_IRQ_BASE MIPS_CPU_IRQ_BASE
+#define SNI_A20R_IRQ_TIMER (SNI_A20R_IRQ_BASE+5)
-#define SNI_PCIT_INT_REG CKSEG1ADDR(0xbfff000c)
+#define SNI_PCIT_INT_REG CKSEG1ADDR(0xbfff000c)
-#define SNI_PCIT_INT_START 24
-#define SNI_PCIT_INT_END 30
+#define SNI_PCIT_INT_START 24
+#define SNI_PCIT_INT_END 30
-#define PCIT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE + 5)
-#define PCIT_IRQ_INTA (SNI_PCIT_INT_START + 0)
-#define PCIT_IRQ_INTB (SNI_PCIT_INT_START + 1)
-#define PCIT_IRQ_INTC (SNI_PCIT_INT_START + 2)
-#define PCIT_IRQ_INTD (SNI_PCIT_INT_START + 3)
-#define PCIT_IRQ_SCSI0 (SNI_PCIT_INT_START + 4)
-#define PCIT_IRQ_SCSI1 (SNI_PCIT_INT_START + 5)
+#define PCIT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE + 5)
+#define PCIT_IRQ_INTA (SNI_PCIT_INT_START + 0)
+#define PCIT_IRQ_INTB (SNI_PCIT_INT_START + 1)
+#define PCIT_IRQ_INTC (SNI_PCIT_INT_START + 2)
+#define PCIT_IRQ_INTD (SNI_PCIT_INT_START + 3)
+#define PCIT_IRQ_SCSI0 (SNI_PCIT_INT_START + 4)
+#define PCIT_IRQ_SCSI1 (SNI_PCIT_INT_START + 5)
/*
- * Interrupt 0-16 are EISA interrupts. Interrupts from 16 on are assigned
+ * Interrupt 0-16 are EISA interrupts. Interrupts from 16 on are assigned
* to the other interrupts generated by ASIC PCI.
*
* INT2 is a wired-or of the push button interrupt, high temperature interrupt
@@ -204,12 +204,12 @@ extern unsigned int sni_brd_type;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define __SNI_END 3
#endif
-#define SNI_IDPROM_BASE CKSEG1ADDR(0x1ff00000)
+#define SNI_IDPROM_BASE CKSEG1ADDR(0x1ff00000)
#define SNI_IDPROM_MEMSIZE (SNI_IDPROM_BASE + (0x28 ^ __SNI_END))
#define SNI_IDPROM_BRDTYPE (SNI_IDPROM_BASE + (0x29 ^ __SNI_END))
#define SNI_IDPROM_CPUTYPE (SNI_IDPROM_BASE + (0x30 ^ __SNI_END))
-#define SNI_IDPROM_SIZE 0x1000
+#define SNI_IDPROM_SIZE 0x1000
/* board specific init functions */
extern void sni_a20r_init(void);
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 65900dab3ad3..d2da53c2c2f8 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -11,7 +11,7 @@
#else
# define SECTION_SIZE_BITS 28
#endif
-#define MAX_PHYSMEM_BITS 35
+#define MAX_PHYSMEM_BITS 35
#endif /* CONFIG_SPARSEMEM */
#endif /* _MIPS_SPARSEMEM_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index ca61e846ab0f..5130c88d6420 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -17,7 +17,7 @@
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
- * Simple spin lock operations. There are two variants, one clears IRQ's
+ * Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* These are fair FIFO ticket locks
@@ -222,7 +222,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define arch_write_can_lock(rw) (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index c52f36013a9d..9b2528e612c0 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -11,7 +11,7 @@
typedef union {
/*
- * bits 0..15 : serving_now
+ * bits 0..15 : serving_now
* bits 16..31 : ticket
*/
u32 lock;
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index cb41af5f3406..c99384018161 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -218,17 +218,17 @@
ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK
#ifdef CONFIG_CPU_CAVIUM_OCTEON
- .set mips64
- pref 0, 0($28) /* Prefetch the current pointer */
- pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
+ .set mips64
+ pref 0, 0($28) /* Prefetch the current pointer */
+ pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
/* The Octeon multiplier state is affected by general multiply
instructions. It must be saved before and kernel code might
corrupt it */
- jal octeon_mult_save
- LONG_L v1, 0($28) /* Load the current pointer */
+ jal octeon_mult_save
+ LONG_L v1, 0($28) /* Load the current pointer */
/* Restore $31(ra) that was changed by the jal */
- LONG_L ra, PT_R31(sp)
- pref 0, 0(v1) /* Prefetch the current thread */
+ LONG_L ra, PT_R31(sp)
+ pref 0, 0(v1) /* Prefetch the current thread */
#endif
.set pop
.endm
diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h
index 436e3ad352d9..29030cb398ee 100644
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -35,7 +35,7 @@ static __inline__ char *strcpy(char *__dest, __const__ char *__src)
".set\tat\n\t"
".set\treorder"
: "=r" (__dest), "=r" (__src)
- : "0" (__dest), "1" (__src)
+ : "0" (__dest), "1" (__src)
: "memory");
return __xdest;
@@ -62,9 +62,9 @@ static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n)
"2:\n\t"
".set\tat\n\t"
".set\treorder"
- : "=r" (__dest), "=r" (__src), "=r" (__n)
- : "0" (__dest), "1" (__src), "2" (__n)
- : "memory");
+ : "=r" (__dest), "=r" (__src), "=r" (__n)
+ : "0" (__dest), "1" (__src), "2" (__n)
+ : "memory");
return __xdest;
}
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 4f8ddba8c360..fd16bcb6c311 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -30,7 +30,7 @@ extern struct task_struct *ll_task;
#ifdef CONFIG_MIPS_MT_FPAFF
/*
- * Handle the scheduler resume end of FPU affinity management. We do this
+ * Handle the scheduler resume end of FPU affinity management. We do this
* inline to try to keep the overhead down. If we have been forced to run on
* a "CPU" with an FPU because of a previous high level of FP computation,
* but did not actually use the FPU during the most recent time-slice (CU1
@@ -72,7 +72,7 @@ do { \
__save_dsp(prev); \
__clear_software_ll_bit(); \
__usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
- (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
+ (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
} while (0)
#define finish_arch_switch(prev) \
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index b2050b9e64b1..178f7924149a 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -44,7 +44,7 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
+ .exec_domain = &default_exec_domain, \
.flags = _TIF_FIXADE, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 761f2e92119e..debc8009bd58 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -6,8 +6,8 @@
* include/asm-mips/time.h
* header file for the new style time.c file and time services.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -75,7 +75,7 @@ extern int init_r4k_clocksource(void);
static inline int init_mips_clocksource(void)
{
-#ifdef CONFIG_CSRC_R4K
+#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC)
return init_r4k_clocksource();
#else
return 0;
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index 80d9dfcf1e88..c67842bc8ef3 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -5,7 +5,7 @@
* MIPS doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
*/
-#define tlb_start_vma(tlb, vma) \
+#define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
index 259145e07e97..12609a17dc8b 100644
--- a/arch/mips/include/asm/topology.h
+++ b/arch/mips/include/asm/topology.h
@@ -11,7 +11,7 @@
#include <topology.h>
#ifdef CONFIG_SMP
-#define smt_capable() (smp_num_siblings > 1)
+#define smt_capable() (smp_num_siblings > 1)
#endif
#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index 420ca06b2f42..f41cf3ee82a7 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -14,7 +14,7 @@
/*
* Possible status responses for a board_be_handler backend.
*/
-#define MIPS_BE_DISCARD 0 /* return with no action */
+#define MIPS_BE_DISCARD 0 /* return with no action */
#define MIPS_BE_FIXUP 1 /* return to the fixup code */
#define MIPS_BE_FATAL 2 /* treat as an unrecoverable error */
diff --git a/arch/mips/include/asm/txx9/jmr3927.h b/arch/mips/include/asm/txx9/jmr3927.h
index 8808d7f82da0..aab959dc30ba 100644
--- a/arch/mips/include/asm/txx9/jmr3927.h
+++ b/arch/mips/include/asm/txx9/jmr3927.h
@@ -40,7 +40,7 @@
#define JMR3927_PCIIO_BASE (KSEG1 + JMR3927_PCIIO)
#define JMR3927_IOC_REV_ADDR (JMR3927_IOC_BASE + 0x00000000)
-#define JMR3927_IOC_NVRAMB_ADDR (JMR3927_IOC_BASE + 0x00010000)
+#define JMR3927_IOC_NVRAMB_ADDR (JMR3927_IOC_BASE + 0x00010000)
#define JMR3927_IOC_LED_ADDR (JMR3927_IOC_BASE + 0x00020000)
#define JMR3927_IOC_DIPSW_ADDR (JMR3927_IOC_BASE + 0x00030000)
#define JMR3927_IOC_BREV_ADDR (JMR3927_IOC_BASE + 0x00040000)
@@ -115,9 +115,9 @@
#define JMR3927_NR_IRQ_IRC 16 /* On-Chip IRC */
#define JMR3927_NR_IRQ_IOC 8 /* PCI/MODEM/INT[6:7] */
-#define JMR3927_IRQ_IRC TXX9_IRQ_BASE
-#define JMR3927_IRQ_IOC (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
-#define JMR3927_IRQ_END (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
+#define JMR3927_IRQ_IRC TXX9_IRQ_BASE
+#define JMR3927_IRQ_IOC (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
+#define JMR3927_IRQ_END (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
#define JMR3927_IRQ_IRC_INT0 (JMR3927_IRQ_IRC + TX3927_IR_INT0)
#define JMR3927_IRQ_IRC_INT1 (JMR3927_IRQ_IRC + TX3927_IR_INT1)
@@ -127,11 +127,11 @@
#define JMR3927_IRQ_IRC_INT5 (JMR3927_IRQ_IRC + TX3927_IR_INT5)
#define JMR3927_IRQ_IRC_SIO0 (JMR3927_IRQ_IRC + TX3927_IR_SIO0)
#define JMR3927_IRQ_IRC_SIO1 (JMR3927_IRQ_IRC + TX3927_IR_SIO1)
-#define JMR3927_IRQ_IRC_SIO(ch) (JMR3927_IRQ_IRC + TX3927_IR_SIO(ch))
+#define JMR3927_IRQ_IRC_SIO(ch) (JMR3927_IRQ_IRC + TX3927_IR_SIO(ch))
#define JMR3927_IRQ_IRC_DMA (JMR3927_IRQ_IRC + TX3927_IR_DMA)
#define JMR3927_IRQ_IRC_PIO (JMR3927_IRQ_IRC + TX3927_IR_PIO)
#define JMR3927_IRQ_IRC_PCI (JMR3927_IRQ_IRC + TX3927_IR_PCI)
-#define JMR3927_IRQ_IRC_TMR(ch) (JMR3927_IRQ_IRC + TX3927_IR_TMR(ch))
+#define JMR3927_IRQ_IRC_TMR(ch) (JMR3927_IRQ_IRC + TX3927_IR_TMR(ch))
#define JMR3927_IRQ_IOC_PCIA (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIA)
#define JMR3927_IRQ_IOC_PCIB (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIB)
#define JMR3927_IRQ_IOC_PCIC (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIC)
@@ -147,7 +147,7 @@
#define JMR3927_IRQ_ETHER0 JMR3927_IRQ_IRC_INT3
/* Clocks */
-#define JMR3927_CORECLK 132710400 /* 132.7MHz */
+#define JMR3927_CORECLK 132710400 /* 132.7MHz */
/*
* TX3927 Pin Configuration:
diff --git a/arch/mips/include/asm/txx9/rbtx4927.h b/arch/mips/include/asm/txx9/rbtx4927.h
index b2adab3d1acc..4060ad26ca99 100644
--- a/arch/mips/include/asm/txx9/rbtx4927.h
+++ b/arch/mips/include/asm/txx9/rbtx4927.h
@@ -1,6 +1,6 @@
/*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
@@ -38,7 +38,7 @@
#define RBTX4927_IMASK_ADDR (IO_BASE + TXX9_CE(2) + 0x00002000)
#define RBTX4927_IMSTAT_ADDR (IO_BASE + TXX9_CE(2) + 0x00002006)
#define RBTX4927_SOFTINT_ADDR (IO_BASE + TXX9_CE(2) + 0x00003000)
-#define RBTX4927_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f000)
+#define RBTX4927_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f000)
#define RBTX4927_SOFTRESETLOCK_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f002)
#define RBTX4927_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f006)
#define RBTX4927_BRAMRTC_BASE (IO_BASE + TXX9_CE(2) + 0x00010000)
@@ -50,7 +50,7 @@
#define rbtx4927_imask_addr ((__u8 __iomem *)RBTX4927_IMASK_ADDR)
#define rbtx4927_imstat_addr ((__u8 __iomem *)RBTX4927_IMSTAT_ADDR)
#define rbtx4927_softint_addr ((__u8 __iomem *)RBTX4927_SOFTINT_ADDR)
-#define rbtx4927_softreset_addr ((__u8 __iomem *)RBTX4927_SOFTRESET_ADDR)
+#define rbtx4927_softreset_addr ((__u8 __iomem *)RBTX4927_SOFTRESET_ADDR)
#define rbtx4927_softresetlock_addr \
((__u8 __iomem *)RBTX4927_SOFTRESETLOCK_ADDR)
#define rbtx4927_pcireset_addr ((__u8 __iomem *)RBTX4927_PCIRESET_ADDR)
diff --git a/arch/mips/include/asm/txx9/rbtx4938.h b/arch/mips/include/asm/txx9/rbtx4938.h
index 9f0441a28126..9c969dd3c6eb 100644
--- a/arch/mips/include/asm/txx9/rbtx4938.h
+++ b/arch/mips/include/asm/txx9/rbtx4938.h
@@ -36,7 +36,7 @@
#define RBTX4938_SPICS_ADDR (IO_BASE + TXX9_CE(2) + 0x00005002)
#define RBTX4938_SFPWR_ADDR (IO_BASE + TXX9_CE(2) + 0x00005008)
#define RBTX4938_SFVOL_ADDR (IO_BASE + TXX9_CE(2) + 0x0000500a)
-#define RBTX4938_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007000)
+#define RBTX4938_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007000)
#define RBTX4938_SOFTRESETLOCK_ADDR (IO_BASE + TXX9_CE(2) + 0x00007002)
#define RBTX4938_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007004)
#define RBTX4938_ETHER_BASE (IO_BASE + TXX9_CE(2) + 0x00020000)
@@ -78,7 +78,7 @@
#define rbtx4938_spics_addr ((__u8 __iomem *)RBTX4938_SPICS_ADDR)
#define rbtx4938_sfpwr_addr ((__u8 __iomem *)RBTX4938_SFPWR_ADDR)
#define rbtx4938_sfvol_addr ((__u8 __iomem *)RBTX4938_SFVOL_ADDR)
-#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
+#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
#define rbtx4938_softresetlock_addr \
((__u8 __iomem *)RBTX4938_SOFTRESETLOCK_ADDR)
#define rbtx4938_pcireset_addr ((__u8 __iomem *)RBTX4938_PCIRESET_ADDR)
@@ -94,7 +94,7 @@
/* These are the virtual IRQ numbers, we divide all IRQ's into
* 'spaces', the 'space' determines where and how to enable/disable
- * that particular IRQ on an RBTX4938 machine. Add new 'spaces' as new
+ * that particular IRQ on an RBTX4938 machine. Add new 'spaces' as new
* IRQ hardware is supported.
*/
#define RBTX4938_NR_IRQ_IOC 8
@@ -103,18 +103,18 @@
#define RBTX4938_IRQ_IOC (TXX9_IRQ_BASE + TX4938_NUM_IR)
#define RBTX4938_IRQ_END (RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC)
-#define RBTX4938_IRQ_IRC_ECCERR (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
-#define RBTX4938_IRQ_IRC_WTOERR (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
-#define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
-#define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
+#define RBTX4938_IRQ_IRC_ECCERR (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
+#define RBTX4938_IRQ_IRC_WTOERR (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
+#define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
+#define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
#define RBTX4938_IRQ_IRC_DMA(ch, n) (RBTX4938_IRQ_IRC + TX4938_IR_DMA(ch, n))
#define RBTX4938_IRQ_IRC_PIO (RBTX4938_IRQ_IRC + TX4938_IR_PIO)
#define RBTX4938_IRQ_IRC_PDMAC (RBTX4938_IRQ_IRC + TX4938_IR_PDMAC)
#define RBTX4938_IRQ_IRC_PCIC (RBTX4938_IRQ_IRC + TX4938_IR_PCIC)
-#define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
+#define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
#define RBTX4938_IRQ_IRC_NDFMC (RBTX4938_IRQ_IRC + TX4938_IR_NDFMC)
-#define RBTX4938_IRQ_IRC_PCIERR (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
-#define RBTX4938_IRQ_IRC_PCIPME (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
+#define RBTX4938_IRQ_IRC_PCIERR (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
+#define RBTX4938_IRQ_IRC_PCIPME (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
#define RBTX4938_IRQ_IRC_ACLC (RBTX4938_IRQ_IRC + TX4938_IR_ACLC)
#define RBTX4938_IRQ_IRC_ACLCPME (RBTX4938_IRQ_IRC + TX4938_IR_ACLCPME)
#define RBTX4938_IRQ_IRC_PCIC1 (RBTX4938_IRQ_IRC + TX4938_IR_PCIC1)
diff --git a/arch/mips/include/asm/txx9/rbtx4939.h b/arch/mips/include/asm/txx9/rbtx4939.h
index e517899794a8..6157bfd90848 100644
--- a/arch/mips/include/asm/txx9/rbtx4939.h
+++ b/arch/mips/include/asm/txx9/rbtx4939.h
@@ -17,7 +17,7 @@
/* Address map */
#define RBTX4939_IOC_REG_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
-#define RBTX4939_BOARD_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
+#define RBTX4939_BOARD_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
#define RBTX4939_IOC_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000002)
#define RBTX4939_CONFIG1_ADDR (IO_BASE + TXX9_CE(1) + 0x00000004)
#define RBTX4939_CONFIG2_ADDR (IO_BASE + TXX9_CE(1) + 0x00000006)
@@ -46,9 +46,9 @@
#define RBTX4939_VPSIN_ADDR (IO_BASE + TXX9_CE(1) + 0x0000500c)
#define RBTX4939_7SEG_ADDR(s, ch) \
(IO_BASE + TXX9_CE(1) + 0x00006000 + (s) * 16 + ((ch) & 3) * 2)
-#define RBTX4939_SOFTRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00007000)
+#define RBTX4939_SOFTRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00007000)
#define RBTX4939_RESETEN_ADDR (IO_BASE + TXX9_CE(1) + 0x00007002)
-#define RBTX4939_RESETSTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00007004)
+#define RBTX4939_RESETSTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00007004)
#define RBTX4939_ETHER_BASE (IO_BASE + TXX9_CE(1) + 0x00020000)
/* Ethernet port address */
@@ -77,11 +77,11 @@
#define RBTX4939_PE2_CIR 0x08
#define RBTX4939_PE2_SPI 0x10
#define RBTX4939_PE2_GPIO 0x20
-#define RBTX4939_PE3_VP 0x01
+#define RBTX4939_PE3_VP 0x01
#define RBTX4939_PE3_VP_P 0x02
#define RBTX4939_PE3_VP_S 0x04
-#define rbtx4939_board_rev_addr ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
+#define rbtx4939_board_rev_addr ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
#define rbtx4939_ioc_rev_addr ((u8 __iomem *)RBTX4939_IOC_REV_ADDR)
#define rbtx4939_config1_addr ((u8 __iomem *)RBTX4939_CONFIG1_ADDR)
#define rbtx4939_config2_addr ((u8 __iomem *)RBTX4939_CONFIG2_ADDR)
@@ -110,9 +110,9 @@
#define rbtx4939_vpsin_addr ((u8 __iomem *)RBTX4939_VPSIN_ADDR)
#define rbtx4939_7seg_addr(s, ch) \
((u8 __iomem *)RBTX4939_7SEG_ADDR(s, ch))
-#define rbtx4939_softreset_addr ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
+#define rbtx4939_softreset_addr ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
#define rbtx4939_reseten_addr ((u8 __iomem *)RBTX4939_RESETEN_ADDR)
-#define rbtx4939_resetstat_addr ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
+#define rbtx4939_resetstat_addr ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
/*
* IRQ mappings
diff --git a/arch/mips/include/asm/txx9/smsc_fdc37m81x.h b/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
index d1d6332b4ca6..926d08f18463 100644
--- a/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
+++ b/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
@@ -18,43 +18,43 @@
/* Common Registers */
#define SMSC_FDC37M81X_CONFIG_INDEX 0x00
#define SMSC_FDC37M81X_CONFIG_DATA 0x01
-#define SMSC_FDC37M81X_CONF 0x02
-#define SMSC_FDC37M81X_INDEX 0x03
-#define SMSC_FDC37M81X_DNUM 0x07
-#define SMSC_FDC37M81X_DID 0x20
-#define SMSC_FDC37M81X_DREV 0x21
-#define SMSC_FDC37M81X_PCNT 0x22
-#define SMSC_FDC37M81X_PMGT 0x23
-#define SMSC_FDC37M81X_OSC 0x24
-#define SMSC_FDC37M81X_CONFPA0 0x26
-#define SMSC_FDC37M81X_CONFPA1 0x27
-#define SMSC_FDC37M81X_TEST4 0x2B
-#define SMSC_FDC37M81X_TEST5 0x2C
-#define SMSC_FDC37M81X_TEST1 0x2D
-#define SMSC_FDC37M81X_TEST2 0x2E
-#define SMSC_FDC37M81X_TEST3 0x2F
+#define SMSC_FDC37M81X_CONF 0x02
+#define SMSC_FDC37M81X_INDEX 0x03
+#define SMSC_FDC37M81X_DNUM 0x07
+#define SMSC_FDC37M81X_DID 0x20
+#define SMSC_FDC37M81X_DREV 0x21
+#define SMSC_FDC37M81X_PCNT 0x22
+#define SMSC_FDC37M81X_PMGT 0x23
+#define SMSC_FDC37M81X_OSC 0x24
+#define SMSC_FDC37M81X_CONFPA0 0x26
+#define SMSC_FDC37M81X_CONFPA1 0x27
+#define SMSC_FDC37M81X_TEST4 0x2B
+#define SMSC_FDC37M81X_TEST5 0x2C
+#define SMSC_FDC37M81X_TEST1 0x2D
+#define SMSC_FDC37M81X_TEST2 0x2E
+#define SMSC_FDC37M81X_TEST3 0x2F
/* Logical device numbers */
-#define SMSC_FDC37M81X_FDD 0x00
-#define SMSC_FDC37M81X_PARALLEL 0x03
-#define SMSC_FDC37M81X_SERIAL1 0x04
-#define SMSC_FDC37M81X_SERIAL2 0x05
-#define SMSC_FDC37M81X_KBD 0x07
-#define SMSC_FDC37M81X_AUXIO 0x08
-#define SMSC_FDC37M81X_NONE 0xff
+#define SMSC_FDC37M81X_FDD 0x00
+#define SMSC_FDC37M81X_PARALLEL 0x03
+#define SMSC_FDC37M81X_SERIAL1 0x04
+#define SMSC_FDC37M81X_SERIAL2 0x05
+#define SMSC_FDC37M81X_KBD 0x07
+#define SMSC_FDC37M81X_AUXIO 0x08
+#define SMSC_FDC37M81X_NONE 0xff
/* Logical device Config Registers */
-#define SMSC_FDC37M81X_ACTIVE 0x30
+#define SMSC_FDC37M81X_ACTIVE 0x30
#define SMSC_FDC37M81X_BASEADDR0 0x60
#define SMSC_FDC37M81X_BASEADDR1 0x61
-#define SMSC_FDC37M81X_INT 0x70
-#define SMSC_FDC37M81X_INT2 0x72
-#define SMSC_FDC37M81X_LDCR_F0 0xF0
+#define SMSC_FDC37M81X_INT 0x70
+#define SMSC_FDC37M81X_INT2 0x72
+#define SMSC_FDC37M81X_LDCR_F0 0xF0
/* Chip Config Values */
#define SMSC_FDC37M81X_CONFIG_ENTER 0x55
#define SMSC_FDC37M81X_CONFIG_EXIT 0xaa
-#define SMSC_FDC37M81X_CHIP_ID 0x4d
+#define SMSC_FDC37M81X_CHIP_ID 0x4d
unsigned long smsc_fdc37m81x_init(unsigned long port);
diff --git a/arch/mips/include/asm/txx9/tx3927.h b/arch/mips/include/asm/txx9/tx3927.h
index dc30c8d42061..149fab4f8327 100644
--- a/arch/mips/include/asm/txx9/tx3927.h
+++ b/arch/mips/include/asm/txx9/tx3927.h
@@ -8,8 +8,8 @@
#ifndef __ASM_TXX9_TX3927_H
#define __ASM_TXX9_TX3927_H
-#define TX3927_REG_BASE 0xfffe0000UL
-#define TX3927_REG_SIZE 0x00010000
+#define TX3927_REG_BASE 0xfffe0000UL
+#define TX3927_REG_SIZE 0x00010000
#define TX3927_SDRAMC_REG (TX3927_REG_BASE + 0x8000)
#define TX3927_ROMC_REG (TX3927_REG_BASE + 0x9000)
#define TX3927_DMA_REG (TX3927_REG_BASE + 0xb000)
@@ -191,8 +191,8 @@ struct tx3927_ccfg_reg {
#define TX3927_DMA_CCR_XFSZ_1W TX3927_DMA_CCR_XFSZ(2)
#define TX3927_DMA_CCR_XFSZ_4W TX3927_DMA_CCR_XFSZ(4)
#define TX3927_DMA_CCR_XFSZ_8W TX3927_DMA_CCR_XFSZ(5)
-#define TX3927_DMA_CCR_XFSZ_16W TX3927_DMA_CCR_XFSZ(6)
-#define TX3927_DMA_CCR_XFSZ_32W TX3927_DMA_CCR_XFSZ(7)
+#define TX3927_DMA_CCR_XFSZ_16W TX3927_DMA_CCR_XFSZ(6)
+#define TX3927_DMA_CCR_XFSZ_32W TX3927_DMA_CCR_XFSZ(7)
#define TX3927_DMA_CCR_MEMIO 0x00000002
#define TX3927_DMA_CCR_ONEAD 0x00000001
@@ -250,7 +250,7 @@ struct tx3927_ccfg_reg {
/* see PCI_BASE_ADDRESS_XXX in linux/pci.h */
/* bits for PBAPMC */
-#define TX3927_PCIC_PBAPMC_RPBA 0x00000004
+#define TX3927_PCIC_PBAPMC_RPBA 0x00000004
#define TX3927_PCIC_PBAPMC_PBAEN 0x00000002
#define TX3927_PCIC_PBAPMC_BMCEN 0x00000001
@@ -282,7 +282,7 @@ struct tx3927_ccfg_reg {
#define TX3927_CCFG_TLBOFF 0x00020000
#define TX3927_CCFG_BEOW 0x00010000
#define TX3927_CCFG_WR 0x00008000
-#define TX3927_CCFG_TOE 0x00004000
+#define TX3927_CCFG_TOE 0x00004000
#define TX3927_CCFG_PCIXARB 0x00002000
#define TX3927_CCFG_PCI3 0x00001000
#define TX3927_CCFG_PSNP 0x00000800
@@ -301,8 +301,8 @@ struct tx3927_ccfg_reg {
#define TX3927_PCFG_SELALL 0x0003ffff
#define TX3927_PCFG_SELCS 0x00020000
#define TX3927_PCFG_SELDSF 0x00010000
-#define TX3927_PCFG_SELSIOC_ALL 0x0000c000
-#define TX3927_PCFG_SELSIOC(ch) (0x00004000<<(ch))
+#define TX3927_PCFG_SELSIOC_ALL 0x0000c000
+#define TX3927_PCFG_SELSIOC(ch) (0x00004000<<(ch))
#define TX3927_PCFG_SELSIO_ALL 0x00003000
#define TX3927_PCFG_SELSIO(ch) (0x00001000<<(ch))
#define TX3927_PCFG_SELTMR_ALL 0x00000e00
diff --git a/arch/mips/include/asm/txx9/tx4927.h b/arch/mips/include/asm/txx9/tx4927.h
index 18c98c52afdb..284eea752d55 100644
--- a/arch/mips/include/asm/txx9/tx4927.h
+++ b/arch/mips/include/asm/txx9/tx4927.h
@@ -1,6 +1,6 @@
/*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2006 MontaVista Software Inc.
*
@@ -33,11 +33,11 @@
#include <asm/txx9/tx4927pcic.h>
#ifdef CONFIG_64BIT
-#define TX4927_REG_BASE 0xffffffffff1f0000UL
+#define TX4927_REG_BASE 0xffffffffff1f0000UL
#else
-#define TX4927_REG_BASE 0xff1f0000UL
+#define TX4927_REG_BASE 0xff1f0000UL
#endif
-#define TX4927_REG_SIZE 0x00010000
+#define TX4927_REG_SIZE 0x00010000
#define TX4927_SDRAMC_REG (TX4927_REG_BASE + 0x8000)
#define TX4927_EBUSC_REG (TX4927_REG_BASE + 0x9000)
@@ -118,10 +118,10 @@ struct tx4927_ccfg_reg {
#define TX4927_CCFG_DIVMODE_2 (0x4 << 17)
#define TX4927_CCFG_DIVMODE_3 (0x5 << 17)
#define TX4927_CCFG_DIVMODE_4 (0x6 << 17)
-#define TX4927_CCFG_DIVMODE_2_5 (0x7 << 17)
+#define TX4927_CCFG_DIVMODE_2_5 (0x7 << 17)
#define TX4927_CCFG_BEOW 0x00010000
#define TX4927_CCFG_WR 0x00008000
-#define TX4927_CCFG_TOE 0x00004000
+#define TX4927_CCFG_TOE 0x00004000
#define TX4927_CCFG_PCIARB 0x00002000
#define TX4927_CCFG_PCIDIVMODE_MASK 0x00001800
#define TX4927_CCFG_PCIDIVMODE_2_5 0x00000000
@@ -136,10 +136,10 @@ struct tx4927_ccfg_reg {
/* PCFG : Pin Configuration */
#define TX4927_PCFG_SDCLKDLY_MASK 0x30000000
-#define TX4927_PCFG_SDCLKDLY(d) ((d)<<28)
+#define TX4927_PCFG_SDCLKDLY(d) ((d)<<28)
#define TX4927_PCFG_SYSCLKEN 0x08000000
-#define TX4927_PCFG_SDCLKEN_ALL 0x07800000
-#define TX4927_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
+#define TX4927_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4927_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
#define TX4927_PCFG_PCICLKEN_ALL 0x003f0000
#define TX4927_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
#define TX4927_PCFG_SEL2 0x00000200
diff --git a/arch/mips/include/asm/txx9/tx4927pcic.h b/arch/mips/include/asm/txx9/tx4927pcic.h
index c470b8a5fe57..9eab2698caec 100644
--- a/arch/mips/include/asm/txx9/tx4927pcic.h
+++ b/arch/mips/include/asm/txx9/tx4927pcic.h
@@ -93,7 +93,7 @@ struct tx4927_pcic_reg {
/* bits for PBACFG */
#define TX4927_PCIC_PBACFG_FIXPA 0x00000008
-#define TX4927_PCIC_PBACFG_RPBA 0x00000004
+#define TX4927_PCIC_PBACFG_RPBA 0x00000004
#define TX4927_PCIC_PBACFG_PBAEN 0x00000002
#define TX4927_PCIC_PBACFG_BMCEN 0x00000001
@@ -165,7 +165,7 @@ struct tx4927_pcic_reg {
#define TX4927_PCIC_PDMCFG_CHNEN 0x00000080
#define TX4927_PCIC_PDMCFG_XFRACT 0x00000040
#define TX4927_PCIC_PDMCFG_BSWAP 0x00000020
-#define TX4927_PCIC_PDMCFG_XFRSIZE_MASK 0x0000000c
+#define TX4927_PCIC_PDMCFG_XFRSIZE_MASK 0x0000000c
#define TX4927_PCIC_PDMCFG_XFRSIZE_1DW 0x00000000
#define TX4927_PCIC_PDMCFG_XFRSIZE_1QW 0x00000004
#define TX4927_PCIC_PDMCFG_XFRSIZE_4QW 0x00000008
@@ -174,7 +174,7 @@ struct tx4927_pcic_reg {
/* bits for PDMSTS */
#define TX4927_PCIC_PDMSTS_REQCNT_MASK 0x3f000000
-#define TX4927_PCIC_PDMSTS_FIFOCNT_MASK 0x00f00000
+#define TX4927_PCIC_PDMSTS_FIFOCNT_MASK 0x00f00000
#define TX4927_PCIC_PDMSTS_FIFOWP_MASK 0x000c0000
#define TX4927_PCIC_PDMSTS_FIFORP_MASK 0x00030000
#define TX4927_PCIC_PDMSTS_ERRINT 0x00000800
diff --git a/arch/mips/include/asm/txx9/tx4938.h b/arch/mips/include/asm/txx9/tx4938.h
index 8a178f186f7d..6ca767ee6467 100644
--- a/arch/mips/include/asm/txx9/tx4938.h
+++ b/arch/mips/include/asm/txx9/tx4938.h
@@ -16,11 +16,11 @@
#include <asm/txx9/tx4927.h>
#ifdef CONFIG_64BIT
-#define TX4938_REG_BASE 0xffffffffff1f0000UL /* == TX4937_REG_BASE */
+#define TX4938_REG_BASE 0xffffffffff1f0000UL /* == TX4937_REG_BASE */
#else
-#define TX4938_REG_BASE 0xff1f0000UL /* == TX4937_REG_BASE */
+#define TX4938_REG_BASE 0xff1f0000UL /* == TX4937_REG_BASE */
#endif
-#define TX4938_REG_SIZE 0x00010000 /* == TX4937_REG_SIZE */
+#define TX4938_REG_SIZE 0x00010000 /* == TX4937_REG_SIZE */
/* NDFMC, SRAMC, PCIC1, SPIC: TX4938 only */
#define TX4938_NDFMC_REG (TX4938_REG_BASE + 0x5000)
@@ -72,16 +72,16 @@ struct tx4938_ccfg_reg {
#define TX4938_NUM_IR_DMA 4
#define TX4938_IR_DMA(ch, n) ((ch ? 27 : 10) + (n)) /* 10-13, 27-30 */
#define TX4938_IR_PIO 14
-#define TX4938_IR_PDMAC 15
+#define TX4938_IR_PDMAC 15
#define TX4938_IR_PCIC 16
#define TX4938_NUM_IR_TMR 3
#define TX4938_IR_TMR(n) (17 + (n))
-#define TX4938_IR_NDFMC 21
+#define TX4938_IR_NDFMC 21
#define TX4938_IR_PCIERR 22
#define TX4938_IR_PCIPME 23
#define TX4938_IR_ACLC 24
#define TX4938_IR_ACLCPME 25
-#define TX4938_IR_PCIC1 26
+#define TX4938_IR_PCIC1 26
#define TX4938_IR_SPI 31
#define TX4938_NUM_IR 32
/* multiplex */
@@ -105,10 +105,10 @@ struct tx4938_ccfg_reg {
#define TX4938_CCFG_PCI1_66 0x00200000
#define TX4938_CCFG_DIVMODE_MASK 0x001e0000
#define TX4938_CCFG_DIVMODE_2 (0x4 << 17)
-#define TX4938_CCFG_DIVMODE_2_5 (0xf << 17)
+#define TX4938_CCFG_DIVMODE_2_5 (0xf << 17)
#define TX4938_CCFG_DIVMODE_3 (0x5 << 17)
#define TX4938_CCFG_DIVMODE_4 (0x6 << 17)
-#define TX4938_CCFG_DIVMODE_4_5 (0xd << 17)
+#define TX4938_CCFG_DIVMODE_4_5 (0xd << 17)
#define TX4938_CCFG_DIVMODE_8 (0x0 << 17)
#define TX4938_CCFG_DIVMODE_10 (0xb << 17)
#define TX4938_CCFG_DIVMODE_12 (0x1 << 17)
@@ -116,7 +116,7 @@ struct tx4938_ccfg_reg {
#define TX4938_CCFG_DIVMODE_18 (0x9 << 17)
#define TX4938_CCFG_BEOW 0x00010000
#define TX4938_CCFG_WR 0x00008000
-#define TX4938_CCFG_TOE 0x00004000
+#define TX4938_CCFG_TOE 0x00004000
#define TX4938_CCFG_PCIARB 0x00002000
#define TX4938_CCFG_PCIDIVMODE_MASK 0x00001c00
#define TX4938_CCFG_PCIDIVMODE_4 (0x1 << 10)
@@ -141,10 +141,10 @@ struct tx4938_ccfg_reg {
#define TX4938_PCFG_SPI_SEL 0x0800000000000000ULL
#define TX4938_PCFG_NDF_SEL 0x0400000000000000ULL
#define TX4938_PCFG_SDCLKDLY_MASK 0x30000000
-#define TX4938_PCFG_SDCLKDLY(d) ((d)<<28)
+#define TX4938_PCFG_SDCLKDLY(d) ((d)<<28)
#define TX4938_PCFG_SYSCLKEN 0x08000000
-#define TX4938_PCFG_SDCLKEN_ALL 0x07800000
-#define TX4938_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
+#define TX4938_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4938_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
#define TX4938_PCFG_PCICLKEN_ALL 0x003f0000
#define TX4938_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
#define TX4938_PCFG_SEL2 0x00000200
@@ -230,8 +230,8 @@ struct tx4938_ccfg_reg {
#define TX4938_DMA_CCR_XFSZ_2W TX4938_DMA_CCR_XFSZ(3)
#define TX4938_DMA_CCR_XFSZ_4W TX4938_DMA_CCR_XFSZ(4)
#define TX4938_DMA_CCR_XFSZ_8W TX4938_DMA_CCR_XFSZ(5)
-#define TX4938_DMA_CCR_XFSZ_16W TX4938_DMA_CCR_XFSZ(6)
-#define TX4938_DMA_CCR_XFSZ_32W TX4938_DMA_CCR_XFSZ(7)
+#define TX4938_DMA_CCR_XFSZ_16W TX4938_DMA_CCR_XFSZ(6)
+#define TX4938_DMA_CCR_XFSZ_32W TX4938_DMA_CCR_XFSZ(7)
#define TX4938_DMA_CCR_MEMIO 0x00000002
#define TX4938_DMA_CCR_SNGAD 0x00000001
@@ -263,9 +263,9 @@ struct tx4938_ccfg_reg {
#define TX4938_REV_PCODE() \
((__u32)__raw_readq(&tx4938_ccfgptr->crir) >> 16)
-#define tx4938_ccfg_clear(bits) tx4927_ccfg_clear(bits)
+#define tx4938_ccfg_clear(bits) tx4927_ccfg_clear(bits)
#define tx4938_ccfg_set(bits) tx4927_ccfg_set(bits)
-#define tx4938_ccfg_change(change, new) tx4927_ccfg_change(change, new)
+#define tx4938_ccfg_change(change, new) tx4927_ccfg_change(change, new)
#define TX4938_SDRAMC_CR(ch) TX4927_SDRAMC_CR(ch)
#define TX4938_SDRAMC_BA(ch) TX4927_SDRAMC_BA(ch)
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h
index d4f342cd5939..6d667087f2aa 100644
--- a/arch/mips/include/asm/txx9/tx4939.h
+++ b/arch/mips/include/asm/txx9/tx4939.h
@@ -14,11 +14,11 @@
#include <asm/txx9/tx4938.h>
#ifdef CONFIG_64BIT
-#define TX4939_REG_BASE 0xffffffffff1f0000UL /* == TX4938_REG_BASE */
+#define TX4939_REG_BASE 0xffffffffff1f0000UL /* == TX4938_REG_BASE */
#else
-#define TX4939_REG_BASE 0xff1f0000UL /* == TX4938_REG_BASE */
+#define TX4939_REG_BASE 0xff1f0000UL /* == TX4938_REG_BASE */
#endif
-#define TX4939_REG_SIZE 0x00010000 /* == TX4938_REG_SIZE */
+#define TX4939_REG_SIZE 0x00010000 /* == TX4938_REG_SIZE */
#define TX4939_ATA_REG(ch) (TX4939_REG_BASE + 0x3000 + (ch) * 0x1000)
#define TX4939_NDFMC_REG (TX4939_REG_BASE + 0x5000)
@@ -189,14 +189,14 @@ struct tx4939_vpc_desc {
#define TX4939_IR_INT(n) (3 + (n))
#define TX4939_NUM_IR_ETH 2
#define TX4939_IR_ETH(n) ((n) ? 43 : 6)
-#define TX4939_IR_VIDEO 7
+#define TX4939_IR_VIDEO 7
#define TX4939_IR_CIR 8
#define TX4939_NUM_IR_SIO 4
#define TX4939_IR_SIO(n) ((n) ? 43 + (n) : 9) /* 9,44-46 */
#define TX4939_NUM_IR_DMA 4
#define TX4939_IR_DMA(ch, n) (((ch) ? 22 : 10) + (n)) /* 10-13,22-25 */
#define TX4939_IR_IRC 14
-#define TX4939_IR_PDMAC 15
+#define TX4939_IR_PDMAC 15
#define TX4939_NUM_IR_TMR 6
#define TX4939_IR_TMR(n) (((n) >= 3 ? 45 : 16) + (n)) /* 16-18,48-50 */
#define TX4939_NUM_IR_ATA 2
@@ -210,10 +210,10 @@ struct tx4939_vpc_desc {
#define TX4939_IR_I2C 33
#define TX4939_IR_SPI 34
#define TX4939_IR_PCIC 35
-#define TX4939_IR_PCIC1 36
+#define TX4939_IR_PCIC1 36
#define TX4939_IR_PCIERR 37
#define TX4939_IR_PCIPME 38
-#define TX4939_IR_NDFMC 39
+#define TX4939_IR_NDFMC 39
#define TX4939_IR_ACLCPME 40
#define TX4939_IR_RTC 41
#define TX4939_IR_RND 42
@@ -239,7 +239,7 @@ struct tx4939_vpc_desc {
#define TX4939_CCFG_PCI66 0x00800000
#define TX4939_CCFG_PCIMODE 0x00400000
#define TX4939_CCFG_SSCG 0x00100000
-#define TX4939_CCFG_MULCLK_MASK 0x000e0000
+#define TX4939_CCFG_MULCLK_MASK 0x000e0000
#define TX4939_CCFG_MULCLK_8 (0x7 << 17)
#define TX4939_CCFG_MULCLK_9 (0x0 << 17)
#define TX4939_CCFG_MULCLK_10 (0x1 << 17)
@@ -250,7 +250,7 @@ struct tx4939_vpc_desc {
#define TX4939_CCFG_MULCLK_15 (0x6 << 17)
#define TX4939_CCFG_BEOW 0x00010000
#define TX4939_CCFG_WR 0x00008000
-#define TX4939_CCFG_TOE 0x00004000
+#define TX4939_CCFG_TOE 0x00004000
#define TX4939_CCFG_PCIARB 0x00002000
#define TX4939_CCFG_YDIVMODE_MASK 0x00001c00
#define TX4939_CCFG_YDIVMODE_2 (0x0 << 10)
@@ -275,7 +275,7 @@ struct tx4939_vpc_desc {
#define TX4939_PCFG_I2CMODE 0x1000000000000000ULL
#define TX4939_PCFG_I2SMODE_MASK 0x0c00000000000000ULL
#define TX4939_PCFG_I2SMODE_GPIO 0x0c00000000000000ULL
-#define TX4939_PCFG_I2SMODE_I2S 0x0800000000000000ULL
+#define TX4939_PCFG_I2SMODE_I2S 0x0800000000000000ULL
#define TX4939_PCFG_I2SMODE_I2S_ALT 0x0400000000000000ULL
#define TX4939_PCFG_I2SMODE_ACLC 0x0000000000000000ULL
#define TX4939_PCFG_SIO3MODE 0x0200000000000000ULL
@@ -392,15 +392,15 @@ struct tx4939_vpc_desc {
/*
* CRYPTO
*/
-#define TX4939_CRYPTO_CSR_SAESO 0x08000000
-#define TX4939_CRYPTO_CSR_SAESI 0x04000000
-#define TX4939_CRYPTO_CSR_SDESO 0x02000000
-#define TX4939_CRYPTO_CSR_SDESI 0x01000000
+#define TX4939_CRYPTO_CSR_SAESO 0x08000000
+#define TX4939_CRYPTO_CSR_SAESI 0x04000000
+#define TX4939_CRYPTO_CSR_SDESO 0x02000000
+#define TX4939_CRYPTO_CSR_SDESI 0x01000000
#define TX4939_CRYPTO_CSR_INDXBST_MASK 0x00700000
#define TX4939_CRYPTO_CSR_INDXBST(n) ((n) << 20)
-#define TX4939_CRYPTO_CSR_TOINT 0x00080000
-#define TX4939_CRYPTO_CSR_DCINT 0x00040000
-#define TX4939_CRYPTO_CSR_GBINT 0x00010000
+#define TX4939_CRYPTO_CSR_TOINT 0x00080000
+#define TX4939_CRYPTO_CSR_DCINT 0x00040000
+#define TX4939_CRYPTO_CSR_GBINT 0x00010000
#define TX4939_CRYPTO_CSR_INDXAST_MASK 0x0000e000
#define TX4939_CRYPTO_CSR_INDXAST(n) ((n) << 13)
#define TX4939_CRYPTO_CSR_CSWAP_MASK 0x00001800
@@ -418,7 +418,7 @@ struct tx4939_vpc_desc {
#define TX4939_CRYPTO_CSR_PDINT_END 0x00000040
#define TX4939_CRYPTO_CSR_PDINT_NEXT 0x00000080
#define TX4939_CRYPTO_CSR_PDINT_NONE 0x000000c0
-#define TX4939_CRYPTO_CSR_GINTE 0x00000008
+#define TX4939_CRYPTO_CSR_GINTE 0x00000008
#define TX4939_CRYPTO_CSR_RSTD 0x00000004
#define TX4939_CRYPTO_CSR_RSTC 0x00000002
#define TX4939_CRYPTO_CSR_ENCR 0x00000001
@@ -442,7 +442,7 @@ struct tx4939_vpc_desc {
#define TX4939_CRYPTO_DESC_START 0x00000200
#define TX4939_CRYPTO_DESC_END 0x00000100
#define TX4939_CRYPTO_DESC_XOR 0x00000010
-#define TX4939_CRYPTO_DESC_LAST 0x00000008
+#define TX4939_CRYPTO_DESC_LAST 0x00000008
#define TX4939_CRYPTO_DESC_ERR_MASK 0x00000006
#define TX4939_CRYPTO_DESC_ERR_NONE 0x00000000
#define TX4939_CRYPTO_DESC_ERR_TOUT 0x00000002
@@ -457,7 +457,7 @@ struct tx4939_vpc_desc {
#define TX4939_CRYPTO_NR_SET 6
-#define TX4939_CRYPTO_RCSR_INTE 0x00000008
+#define TX4939_CRYPTO_RCSR_INTE 0x00000008
#define TX4939_CRYPTO_RCSR_RST 0x00000004
#define TX4939_CRYPTO_RCSR_FIN 0x00000002
#define TX4939_CRYPTO_RCSR_ST 0x00000001
@@ -480,8 +480,8 @@ struct tx4939_vpc_desc {
#define TX4939_VPC_CTRLA_PDINT_ALL 0x00000000
#define TX4939_VPC_CTRLA_PDINT_NEXT 0x00000010
#define TX4939_VPC_CTRLA_PDINT_NONE 0x00000030
-#define TX4939_VPC_CTRLA_VDVLDP 0x00000008
-#define TX4939_VPC_CTRLA_VDMODE 0x00000004
+#define TX4939_VPC_CTRLA_VDVLDP 0x00000008
+#define TX4939_VPC_CTRLA_VDMODE 0x00000004
#define TX4939_VPC_CTRLA_VDFOR 0x00000002
#define TX4939_VPC_CTRLA_ENVPC 0x00000001
@@ -512,9 +512,9 @@ struct tx4939_vpc_desc {
((__u32)((__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_BCFG_MASK) \
>> 32))
-#define tx4939_ccfg_clear(bits) tx4938_ccfg_clear(bits)
+#define tx4939_ccfg_clear(bits) tx4938_ccfg_clear(bits)
#define tx4939_ccfg_set(bits) tx4938_ccfg_set(bits)
-#define tx4939_ccfg_change(change, new) tx4938_ccfg_change(change, new)
+#define tx4939_ccfg_change(change, new) tx4938_ccfg_change(change, new)
#define TX4939_EBUSC_CR(ch) TX4927_EBUSC_CR(ch)
#define TX4939_EBUSC_BA(ch) TX4927_EBUSC_BA(ch)
@@ -522,7 +522,7 @@ struct tx4939_vpc_desc {
#define TX4939_EBUSC_WIDTH(ch) \
(16 >> ((__u32)(TX4939_EBUSC_CR(ch) >> 20) & 0x1))
-/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2 (14.745MHz for MST 20MHz) */
+/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2 (14.745MHz for MST 20MHz) */
#define TX4939_SCLK0(mst) \
((((mst) + 245/2) / 245UL * 429 * 16 + 19) / 19 / 2)
diff --git a/arch/mips/include/asm/txx9tmr.h b/arch/mips/include/asm/txx9tmr.h
index 67f70a8f09bd..466a3def3866 100644
--- a/arch/mips/include/asm/txx9tmr.h
+++ b/arch/mips/include/asm/txx9tmr.h
@@ -59,9 +59,9 @@ void txx9_clockevent_init(unsigned long baseaddr, int irq,
void txx9_tmr_init(unsigned long baseaddr);
#ifdef CONFIG_CPU_TX39XX
-#define TXX9_TIMER_BITS 24
+#define TXX9_TIMER_BITS 24
#else
-#define TXX9_TIMER_BITS 32
+#define TXX9_TIMER_BITS 32
#endif
#endif /* __ASM_TXX9TMR_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 3b92efef56d3..bd87e36bf26a 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -87,12 +87,12 @@ extern u64 __ua_limit;
/*
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
- * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
- * to write to a block, it is always safe to read from it.
+ * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ * to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Checks if a pointer to a block of memory in user space is valid.
*
@@ -124,10 +124,10 @@ extern u64 __ua_limit;
/*
* put_user: - Write a simple value into user space.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -138,15 +138,15 @@ extern u64 __ua_limit;
*
* Returns zero on success, or -EFAULT on error.
*/
-#define put_user(x,ptr) \
+#define put_user(x,ptr) \
__put_user_check((x), (ptr), sizeof(*(ptr)))
/*
* get_user: - Get a simple variable from user space.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -163,10 +163,10 @@ extern u64 __ua_limit;
/*
* __put_user: - Write a simple value into user space, with less checking.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -185,10 +185,10 @@ extern u64 __ua_limit;
/*
* __get_user: - Get a simple variable from user space, with less checking.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -390,10 +390,10 @@ extern void __put_user_unknown(void);
/*
* put_user_unaligned: - Write a simple value into user space.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -409,10 +409,10 @@ extern void __put_user_unknown(void);
/*
* get_user_unaligned: - Get a simple variable from user space.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -429,10 +429,10 @@ extern void __put_user_unknown(void);
/*
* __put_user_unaligned: - Write a simple value into user space, with less checking.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -451,10 +451,10 @@ extern void __put_user_unknown(void);
/*
* __get_user_unaligned: - Get a simple variable from user space, with less checking.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -543,7 +543,7 @@ do { \
*/
#define __get_user_unaligned_asm_ll32(val, addr) \
{ \
- unsigned long long __gu_tmp; \
+ unsigned long long __gu_tmp; \
\
__asm__ __volatile__( \
"1: ulw %1, (%3) \n" \
@@ -631,7 +631,7 @@ do { \
#define __put_user_unaligned_asm_ll32(ptr) \
{ \
__asm__ __volatile__( \
- "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
+ "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
"2: sw %D2, 4(%3) \n" \
"3: \n" \
" .section .fixup,\"ax\" \n" \
@@ -658,7 +658,7 @@ extern void __put_user_unaligned_unknown(void);
#ifdef MODULE
#define __MODULE_JAL(destination) \
".set\tnoat\n\t" \
- __UA_LA "\t$1, " #destination "\n\t" \
+ __UA_LA "\t$1, " #destination "\n\t" \
"jalr\t$1\n\t" \
".set\tat\n\t"
#else
@@ -694,11 +694,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
/*
* __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
+ * @to: Destination address, in user space.
* @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -716,7 +716,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
@@ -731,7 +731,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
@@ -744,18 +744,18 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
+ __cu_len); \
__cu_len; \
})
/*
* copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
+ * @to: Destination address, in user space.
* @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space.
*
@@ -774,7 +774,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
} \
__cu_len; \
})
@@ -827,11 +827,11 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
/*
* __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to: Destination address, in kernel space.
+ * @to: Destination address, in kernel space.
* @from: Source address, in user space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -853,17 +853,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
__cu_len; \
})
/*
* copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
+ * @to: Destination address, in kernel space.
* @from: Source address, in user space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space.
*
@@ -885,7 +885,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
} \
__cu_len; \
})
@@ -901,7 +901,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
__cu_len; \
})
@@ -915,18 +915,18 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_from = (from); \
__cu_len = (n); \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
- access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
+ access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
} \
__cu_len; \
})
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
- * @to: Destination address, in user space.
- * @n: Number of bytes to zero.
+ * @to: Destination address, in user space.
+ * @n: Number of bytes to zero.
*
* Zero a block of memory in user space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -966,7 +966,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
/*
* __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
* @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
+ * least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
@@ -1005,7 +1005,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
/*
* strncpy_from_user: - Copy a NUL terminated string from userspace.
* @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
+ * least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
@@ -1060,7 +1060,7 @@ static inline long __strlen_user(const char __user *s)
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Get the size of a NUL-terminated string in user space.
*
@@ -1108,7 +1108,7 @@ static inline long __strnlen_user(const char __user *s, long n)
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Get the size of a NUL-terminated string in user space.
*
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 7e0bf17c9324..058e941626a6 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2012 MIPS Technologies, Inc.
diff --git a/arch/mips/include/asm/user.h b/arch/mips/include/asm/user.h
index afa83a4c1888..6bad61b0a53a 100644
--- a/arch/mips/include/asm/user.h
+++ b/arch/mips/include/asm/user.h
@@ -20,7 +20,7 @@
* upage: 1 page consisting of a user struct that tells gdb
* what is present in the file. Directly after this is a
* copy of the task_struct, which is currently not used by gdb,
- * but it may come in handy at some point. All of the registers
+ * but it may come in handy at some point. All of the registers
* are stored as part of the upage. The upage should always be
* only one page long.
* data: The data segment follows next. We use current->end_text to
diff --git a/arch/mips/include/asm/vr41xx/pci.h b/arch/mips/include/asm/vr41xx/pci.h
index c231a3d6cfd8..a866918cfea5 100644
--- a/arch/mips/include/asm/vr41xx/pci.h
+++ b/arch/mips/include/asm/vr41xx/pci.h
@@ -20,7 +20,7 @@
#ifndef __NEC_VR41XX_PCI_H
#define __NEC_VR41XX_PCI_H
-#define PCI_MASTER_ADDRESS_MASK 0x7fffffffU
+#define PCI_MASTER_ADDRESS_MASK 0x7fffffffU
struct pci_master_address_conversion {
uint32_t bus_base_address;
diff --git a/arch/mips/include/asm/vr41xx/tb0287.h b/arch/mips/include/asm/vr41xx/tb0287.h
index 61bead68abf0..d58b5678f243 100644
--- a/arch/mips/include/asm/vr41xx/tb0287.h
+++ b/arch/mips/include/asm/vr41xx/tb0287.h
@@ -1,7 +1,7 @@
/*
* tb0287.h, Include file for TANBAC TB0287 mini-ITX board.
*
- * Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp>
+ * Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp>
*
* This code is largely based on tb0219.h.
*
diff --git a/arch/mips/include/asm/war.h b/arch/mips/include/asm/war.h
index 65e344532ded..9344e247a6c8 100644
--- a/arch/mips/include/asm/war.h
+++ b/arch/mips/include/asm/war.h
@@ -83,30 +83,30 @@
#endif
/*
- * Pleasures of the R4600 V1.x. Cite from the IDT R4600 V1.7 errata:
+ * Pleasures of the R4600 V1.x. Cite from the IDT R4600 V1.7 errata:
*
* 18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D,
- * Hit_Invalidate_D and Create_Dirty_Excl_D should only be
- * executed if there is no other dcache activity. If the dcache is
- * accessed for another instruction immeidately preceding when these
- * cache instructions are executing, it is possible that the dcache
- * tag match outputs used by these cache instructions will be
- * incorrect. These cache instructions should be preceded by at least
- * four instructions that are not any kind of load or store
- * instruction.
- *
- * This is not allowed: lw
- * nop
- * nop
- * nop
- * cache Hit_Writeback_Invalidate_D
- *
- * This is allowed: lw
- * nop
- * nop
- * nop
- * nop
- * cache Hit_Writeback_Invalidate_D
+ * Hit_Invalidate_D and Create_Dirty_Excl_D should only be
+ * executed if there is no other dcache activity. If the dcache is
+ * accessed for another instruction immeidately preceding when these
+ * cache instructions are executing, it is possible that the dcache
+ * tag match outputs used by these cache instructions will be
+ * incorrect. These cache instructions should be preceded by at least
+ * four instructions that are not any kind of load or store
+ * instruction.
+ *
+ * This is not allowed: lw
+ * nop
+ * nop
+ * nop
+ * cache Hit_Writeback_Invalidate_D
+ *
+ * This is allowed: lw
+ * nop
+ * nop
+ * nop
+ * nop
+ * cache Hit_Writeback_Invalidate_D
*/
#ifndef R4600_V1_HIT_CACHEOP_WAR
#error Check setting of R4600_V1_HIT_CACHEOP_WAR for your platform
@@ -118,7 +118,7 @@
*
* R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
* Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only
- * operate correctly if the internal data cache refill buffer is empty. These
+ * operate correctly if the internal data cache refill buffer is empty. These
* CACHE instructions should be separated from any potential data cache miss
* by a load instruction to an uncached address to empty the response buffer."
* (Revision 2.0 device errata from IDT available on http://www.idt.com/
diff --git a/arch/mips/include/asm/xtalk/xtalk.h b/arch/mips/include/asm/xtalk/xtalk.h
index 79bac882a739..680e7efebbaf 100644
--- a/arch/mips/include/asm/xtalk/xtalk.h
+++ b/arch/mips/include/asm/xtalk/xtalk.h
@@ -16,15 +16,15 @@
/*
* User-level device driver visible types
*/
-typedef char xwidgetnum_t; /* xtalk widget number (0..15) */
+typedef char xwidgetnum_t; /* xtalk widget number (0..15) */
#define XWIDGET_NONE -1
-typedef int xwidget_part_num_t; /* xtalk widget part number */
+typedef int xwidget_part_num_t; /* xtalk widget part number */
#define XWIDGET_PART_NUM_NONE -1
-typedef int xwidget_rev_num_t; /* xtalk widget revision number */
+typedef int xwidget_rev_num_t; /* xtalk widget revision number */
#define XWIDGET_REV_NUM_NONE -1
@@ -37,15 +37,15 @@ typedef struct xtalk_piomap_s *xtalk_piomap_t;
/* It is often convenient to fold the XIO target port
* number into the XIO address.
*/
-#define XIO_NOWHERE (0xFFFFFFFFFFFFFFFFull)
-#define XIO_ADDR_BITS (0x0000FFFFFFFFFFFFull)
-#define XIO_PORT_BITS (0xF000000000000000ull)
-#define XIO_PORT_SHIFT (60)
-
-#define XIO_PACKED(x) (((x)&XIO_PORT_BITS) != 0)
-#define XIO_ADDR(x) ((x)&XIO_ADDR_BITS)
-#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
-#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
+#define XIO_NOWHERE (0xFFFFFFFFFFFFFFFFull)
+#define XIO_ADDR_BITS (0x0000FFFFFFFFFFFFull)
+#define XIO_PORT_BITS (0xF000000000000000ull)
+#define XIO_PORT_SHIFT (60)
+
+#define XIO_PACKED(x) (((x)&XIO_PORT_BITS) != 0)
+#define XIO_ADDR(x) ((x)&XIO_ADDR_BITS)
+#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
+#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/xtalk/xwidget.h b/arch/mips/include/asm/xtalk/xwidget.h
index b4a13d7405ee..32e4e884f9b9 100644
--- a/arch/mips/include/asm/xtalk/xwidget.h
+++ b/arch/mips/include/asm/xtalk/xwidget.h
@@ -45,12 +45,12 @@
#define WIDGET_PENDING 0x0000001f
/* WIDGET_ERR_UPPER_ADDR */
-#define WIDGET_ERR_UPPER_ADDR_ONLY 0x0000ffff
+#define WIDGET_ERR_UPPER_ADDR_ONLY 0x0000ffff
/* WIDGET_CONTROL */
#define WIDGET_F_BAD_PKT 0x00010000
#define WIDGET_LLP_XBAR_CRD 0x0000f000
-#define WIDGET_LLP_XBAR_CRD_SHFT 12
+#define WIDGET_LLP_XBAR_CRD_SHFT 12
#define WIDGET_CLR_RLLP_CNT 0x00000800
#define WIDGET_CLR_TLLP_CNT 0x00000400
#define WIDGET_SYS_END 0x00000200
@@ -86,8 +86,8 @@
/*
* according to the crosstalk spec, only 32-bits access to the widget
- * configuration registers is allowed. some widgets may allow 64-bits
- * access but software should not depend on it. registers beyond the
+ * configuration registers is allowed. some widgets may allow 64-bits
+ * access but software should not depend on it. registers beyond the
* widget target flush register are widget dependent thus will not be
* defined here
*/
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index 77d4fb33f75a..350ccccadcb9 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -8,6 +8,7 @@ header-y += byteorder.h
header-y += cachectl.h
header-y += errno.h
header-y += fcntl.h
+header-y += inst.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
diff --git a/arch/mips/include/uapi/asm/break.h b/arch/mips/include/uapi/asm/break.h
index 9161e684cb4c..002c39ea20c3 100644
--- a/arch/mips/include/uapi/asm/break.h
+++ b/arch/mips/include/uapi/asm/break.h
@@ -6,8 +6,8 @@
* Copyright (C) 1995, 2003 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
-#ifndef __ASM_BREAK_H
-#define __ASM_BREAK_H
+#ifndef __UAPI_ASM_BREAK_H
+#define __UAPI_ASM_BREAK_H
/*
* The following break codes are or were in use for specific purposes in
@@ -16,22 +16,14 @@
* non-Linux/MIPS object files or make use of them in the future.
*/
#define BRK_USERBP 0 /* User bp (used by debuggers) */
-#define BRK_KERNELBP 1 /* Break in the kernel */
-#define BRK_ABORT 2 /* Sometimes used by abort(3) to SIGIOT */
-#define BRK_BD_TAKEN 3 /* For bd slot emulation - not implemented */
-#define BRK_BD_NOTTAKEN 4 /* For bd slot emulation - not implemented */
#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */
#define BRK_OVERFLOW 6 /* Overflow check */
#define BRK_DIVZERO 7 /* Divide by zero check */
#define BRK_RANGE 8 /* Range error check */
-#define BRK_STACKOVERFLOW 9 /* For Ada stackchecking */
-#define BRK_NORLD 10 /* No rld found - not used by Linux/MIPS */
-#define _BRK_THREADBP 11 /* For threads, user bp (used by debuggers) */
-#define BRK_BUG 512 /* Used by BUG() */
-#define BRK_KDB 513 /* Used in KDB_ENTER() */
+#define BRK_BUG 12 /* Used by BUG() */
#define BRK_MEMU 514 /* Used by FPU emulator */
#define BRK_KPROBE_BP 515 /* Kprobe break */
#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
#define BRK_MULOVF 1023 /* Multiply overflow */
-#endif /* __ASM_BREAK_H */
+#endif /* __UAPI_ASM_BREAK_H */
diff --git a/arch/mips/include/uapi/asm/cachectl.h b/arch/mips/include/uapi/asm/cachectl.h
index f3ce721861d3..230390908773 100644
--- a/arch/mips/include/uapi/asm/cachectl.h
+++ b/arch/mips/include/uapi/asm/cachectl.h
@@ -5,15 +5,15 @@
*
* Copyright (C) 1994, 1995, 1996 by Ralf Baechle
*/
-#ifndef _ASM_CACHECTL
-#define _ASM_CACHECTL
+#ifndef _ASM_CACHECTL
+#define _ASM_CACHECTL
/*
* Options for cacheflush system call
*/
-#define ICACHE (1<<0) /* flush instruction cache */
-#define DCACHE (1<<1) /* writeback and flush data cache */
-#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+#define ICACHE (1<<0) /* flush instruction cache */
+#define DCACHE (1<<1) /* writeback and flush data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
/*
* Caching modes for the cachectl(2) call
diff --git a/arch/mips/include/uapi/asm/errno.h b/arch/mips/include/uapi/asm/errno.h
index bd67b15042ec..31575e2fd1bd 100644
--- a/arch/mips/include/uapi/asm/errno.h
+++ b/arch/mips/include/uapi/asm/errno.h
@@ -14,95 +14,95 @@
#include <asm-generic/errno-base.h>
-#define ENOMSG 35 /* No message of desired type */
-#define EIDRM 36 /* Identifier removed */
-#define ECHRNG 37 /* Channel number out of range */
-#define EL2NSYNC 38 /* Level 2 not synchronized */
-#define EL3HLT 39 /* Level 3 halted */
-#define EL3RST 40 /* Level 3 reset */
-#define ELNRNG 41 /* Link number out of range */
-#define EUNATCH 42 /* Protocol driver not attached */
-#define ENOCSI 43 /* No CSI structure available */
-#define EL2HLT 44 /* Level 2 halted */
-#define EDEADLK 45 /* Resource deadlock would occur */
-#define ENOLCK 46 /* No record locks available */
-#define EBADE 50 /* Invalid exchange */
-#define EBADR 51 /* Invalid request descriptor */
-#define EXFULL 52 /* Exchange full */
-#define ENOANO 53 /* No anode */
-#define EBADRQC 54 /* Invalid request code */
-#define EBADSLT 55 /* Invalid slot */
-#define EDEADLOCK 56 /* File locking deadlock error */
-#define EBFONT 59 /* Bad font file format */
-#define ENOSTR 60 /* Device not a stream */
-#define ENODATA 61 /* No data available */
-#define ETIME 62 /* Timer expired */
-#define ENOSR 63 /* Out of streams resources */
-#define ENONET 64 /* Machine is not on the network */
-#define ENOPKG 65 /* Package not installed */
-#define EREMOTE 66 /* Object is remote */
-#define ENOLINK 67 /* Link has been severed */
-#define EADV 68 /* Advertise error */
-#define ESRMNT 69 /* Srmount error */
-#define ECOMM 70 /* Communication error on send */
-#define EPROTO 71 /* Protocol error */
-#define EDOTDOT 73 /* RFS specific error */
-#define EMULTIHOP 74 /* Multihop attempted */
-#define EBADMSG 77 /* Not a data message */
-#define ENAMETOOLONG 78 /* File name too long */
-#define EOVERFLOW 79 /* Value too large for defined data type */
-#define ENOTUNIQ 80 /* Name not unique on network */
-#define EBADFD 81 /* File descriptor in bad state */
-#define EREMCHG 82 /* Remote address changed */
-#define ELIBACC 83 /* Can not access a needed shared library */
-#define ELIBBAD 84 /* Accessing a corrupted shared library */
-#define ELIBSCN 85 /* .lib section in a.out corrupted */
-#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
-#define ELIBEXEC 87 /* Cannot exec a shared library directly */
-#define EILSEQ 88 /* Illegal byte sequence */
-#define ENOSYS 89 /* Function not implemented */
-#define ELOOP 90 /* Too many symbolic links encountered */
-#define ERESTART 91 /* Interrupted system call should be restarted */
-#define ESTRPIPE 92 /* Streams pipe error */
-#define ENOTEMPTY 93 /* Directory not empty */
-#define EUSERS 94 /* Too many users */
-#define ENOTSOCK 95 /* Socket operation on non-socket */
-#define EDESTADDRREQ 96 /* Destination address required */
-#define EMSGSIZE 97 /* Message too long */
-#define EPROTOTYPE 98 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 99 /* Protocol not available */
-#define EPROTONOSUPPORT 120 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
-#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
-#define EPFNOSUPPORT 123 /* Protocol family not supported */
-#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
-#define EADDRINUSE 125 /* Address already in use */
-#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
-#define ENETDOWN 127 /* Network is down */
-#define ENETUNREACH 128 /* Network is unreachable */
-#define ENETRESET 129 /* Network dropped connection because of reset */
-#define ECONNABORTED 130 /* Software caused connection abort */
-#define ECONNRESET 131 /* Connection reset by peer */
-#define ENOBUFS 132 /* No buffer space available */
-#define EISCONN 133 /* Transport endpoint is already connected */
-#define ENOTCONN 134 /* Transport endpoint is not connected */
-#define EUCLEAN 135 /* Structure needs cleaning */
-#define ENOTNAM 137 /* Not a XENIX named type file */
-#define ENAVAIL 138 /* No XENIX semaphores available */
-#define EISNAM 139 /* Is a named type file */
-#define EREMOTEIO 140 /* Remote I/O error */
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define ENOLCK 46 /* No record locks available */
+#define EBADE 50 /* Invalid exchange */
+#define EBADR 51 /* Invalid request descriptor */
+#define EXFULL 52 /* Exchange full */
+#define ENOANO 53 /* No anode */
+#define EBADRQC 54 /* Invalid request code */
+#define EBADSLT 55 /* Invalid slot */
+#define EDEADLOCK 56 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EDOTDOT 73 /* RFS specific error */
+#define EMULTIHOP 74 /* Multihop attempted */
+#define EBADMSG 77 /* Not a data message */
+#define ENAMETOOLONG 78 /* File name too long */
+#define EOVERFLOW 79 /* Value too large for defined data type */
+#define ENOTUNIQ 80 /* Name not unique on network */
+#define EBADFD 81 /* File descriptor in bad state */
+#define EREMCHG 82 /* Remote address changed */
+#define ELIBACC 83 /* Can not access a needed shared library */
+#define ELIBBAD 84 /* Accessing a corrupted shared library */
+#define ELIBSCN 85 /* .lib section in a.out corrupted */
+#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 87 /* Cannot exec a shared library directly */
+#define EILSEQ 88 /* Illegal byte sequence */
+#define ENOSYS 89 /* Function not implemented */
+#define ELOOP 90 /* Too many symbolic links encountered */
+#define ERESTART 91 /* Interrupted system call should be restarted */
+#define ESTRPIPE 92 /* Streams pipe error */
+#define ENOTEMPTY 93 /* Directory not empty */
+#define EUSERS 94 /* Too many users */
+#define ENOTSOCK 95 /* Socket operation on non-socket */
+#define EDESTADDRREQ 96 /* Destination address required */
+#define EMSGSIZE 97 /* Message too long */
+#define EPROTOTYPE 98 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 99 /* Protocol not available */
+#define EPROTONOSUPPORT 120 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
+#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 123 /* Protocol family not supported */
+#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
+#define EADDRINUSE 125 /* Address already in use */
+#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
+#define ENETDOWN 127 /* Network is down */
+#define ENETUNREACH 128 /* Network is unreachable */
+#define ENETRESET 129 /* Network dropped connection because of reset */
+#define ECONNABORTED 130 /* Software caused connection abort */
+#define ECONNRESET 131 /* Connection reset by peer */
+#define ENOBUFS 132 /* No buffer space available */
+#define EISCONN 133 /* Transport endpoint is already connected */
+#define ENOTCONN 134 /* Transport endpoint is not connected */
+#define EUCLEAN 135 /* Structure needs cleaning */
+#define ENOTNAM 137 /* Not a XENIX named type file */
+#define ENAVAIL 138 /* No XENIX semaphores available */
+#define EISNAM 139 /* Is a named type file */
+#define EREMOTEIO 140 /* Remote I/O error */
#define EINIT 141 /* Reserved */
#define EREMDEV 142 /* Error 142 */
-#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
-#define ETOOMANYREFS 144 /* Too many references: cannot splice */
-#define ETIMEDOUT 145 /* Connection timed out */
-#define ECONNREFUSED 146 /* Connection refused */
-#define EHOSTDOWN 147 /* Host is down */
-#define EHOSTUNREACH 148 /* No route to host */
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define EALREADY 149 /* Operation already in progress */
-#define EINPROGRESS 150 /* Operation now in progress */
-#define ESTALE 151 /* Stale NFS file handle */
+#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 144 /* Too many references: cannot splice */
+#define ETIMEDOUT 145 /* Connection timed out */
+#define ECONNREFUSED 146 /* Connection refused */
+#define EHOSTDOWN 147 /* Host is down */
+#define EHOSTUNREACH 148 /* No route to host */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EALREADY 149 /* Operation already in progress */
+#define EINPROGRESS 150 /* Operation now in progress */
+#define ESTALE 151 /* Stale NFS file handle */
#define ECANCELED 158 /* AIO operation canceled */
/*
@@ -110,16 +110,16 @@
*/
#define ENOMEDIUM 159 /* No medium found */
#define EMEDIUMTYPE 160 /* Wrong medium type */
-#define ENOKEY 161 /* Required key not available */
-#define EKEYEXPIRED 162 /* Key has expired */
-#define EKEYREVOKED 163 /* Key has been revoked */
-#define EKEYREJECTED 164 /* Key was rejected by service */
+#define ENOKEY 161 /* Required key not available */
+#define EKEYEXPIRED 162 /* Key has expired */
+#define EKEYREVOKED 163 /* Key has been revoked */
+#define EKEYREJECTED 164 /* Key was rejected by service */
/* for robust mutexes */
-#define EOWNERDEAD 165 /* Owner died */
-#define ENOTRECOVERABLE 166 /* State not recoverable */
+#define EOWNERDEAD 165 /* Owner died */
+#define ENOTRECOVERABLE 166 /* State not recoverable */
-#define ERFKILL 167 /* Operation not possible due to RF-kill */
+#define ERFKILL 167 /* Operation not possible due to RF-kill */
#define EHWPOISON 168 /* Memory page has hardware error */
diff --git a/arch/mips/include/uapi/asm/fcntl.h b/arch/mips/include/uapi/asm/fcntl.h
index 75eddedcfc3e..0bda78f70e1e 100644
--- a/arch/mips/include/uapi/asm/fcntl.h
+++ b/arch/mips/include/uapi/asm/fcntl.h
@@ -12,7 +12,7 @@
#define O_APPEND 0x0008
#define O_DSYNC 0x0010 /* used to be O_SYNC, see below */
#define O_NONBLOCK 0x0080
-#define O_CREAT 0x0100 /* not fcntl */
+#define O_CREAT 0x0100 /* not fcntl */
#define O_TRUNC 0x0200 /* not fcntl */
#define O_EXCL 0x0400 /* not fcntl */
#define O_NOCTTY 0x0800 /* not fcntl */
@@ -50,7 +50,7 @@
/*
* The flavours of struct flock. "struct flock" is the ABI compliant
- * variant. Finally struct flock64 is the LFS variant of struct flock. As
+ * variant. Finally struct flock64 is the LFS variant of struct flock. As
* a historic accident and inconsistence with the ABI definition it doesn't
* contain all the same fields as struct flock.
*/
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
new file mode 100644
index 000000000000..4d078815eaa5
--- /dev/null
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -0,0 +1,331 @@
+/*
+ * Format of an instruction in memory.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 2000 by Ralf Baechle
+ * Copyright (C) 2006 by Thiemo Seufer
+ */
+#ifndef _UAPI_ASM_INST_H
+#define _UAPI_ASM_INST_H
+
+/*
+ * Major opcodes; before MIPS IV cop1x was called cop3.
+ */
+enum major_op {
+ spec_op, bcond_op, j_op, jal_op,
+ beq_op, bne_op, blez_op, bgtz_op,
+ addi_op, addiu_op, slti_op, sltiu_op,
+ andi_op, ori_op, xori_op, lui_op,
+ cop0_op, cop1_op, cop2_op, cop1x_op,
+ beql_op, bnel_op, blezl_op, bgtzl_op,
+ daddi_op, daddiu_op, ldl_op, ldr_op,
+ spec2_op, jalx_op, mdmx_op, spec3_op,
+ lb_op, lh_op, lwl_op, lw_op,
+ lbu_op, lhu_op, lwr_op, lwu_op,
+ sb_op, sh_op, swl_op, sw_op,
+ sdl_op, sdr_op, swr_op, cache_op,
+ ll_op, lwc1_op, lwc2_op, pref_op,
+ lld_op, ldc1_op, ldc2_op, ld_op,
+ sc_op, swc1_op, swc2_op, major_3b_op,
+ scd_op, sdc1_op, sdc2_op, sd_op
+};
+
+/*
+ * func field of spec opcode.
+ */
+enum spec_op {
+ sll_op, movc_op, srl_op, sra_op,
+ sllv_op, pmon_op, srlv_op, srav_op,
+ jr_op, jalr_op, movz_op, movn_op,
+ syscall_op, break_op, spim_op, sync_op,
+ mfhi_op, mthi_op, mflo_op, mtlo_op,
+ dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op,
+ mult_op, multu_op, div_op, divu_op,
+ dmult_op, dmultu_op, ddiv_op, ddivu_op,
+ add_op, addu_op, sub_op, subu_op,
+ and_op, or_op, xor_op, nor_op,
+ spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
+ dadd_op, daddu_op, dsub_op, dsubu_op,
+ tge_op, tgeu_op, tlt_op, tltu_op,
+ teq_op, spec5_unused_op, tne_op, spec6_unused_op,
+ dsll_op, spec7_unused_op, dsrl_op, dsra_op,
+ dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
+};
+
+/*
+ * func field of spec2 opcode.
+ */
+enum spec2_op {
+ madd_op, maddu_op, mul_op, spec2_3_unused_op,
+ msub_op, msubu_op, /* more unused ops */
+ clz_op = 0x20, clo_op,
+ dclz_op = 0x24, dclo_op,
+ sdbpp_op = 0x3f
+};
+
+/*
+ * func field of spec3 opcode.
+ */
+enum spec3_op {
+ ext_op, dextm_op, dextu_op, dext_op,
+ ins_op, dinsm_op, dinsu_op, dins_op,
+ lx_op = 0x0a,
+ bshfl_op = 0x20,
+ dbshfl_op = 0x24,
+ rdhwr_op = 0x3b
+};
+
+/*
+ * rt field of bcond opcodes.
+ */
+enum rt_op {
+ bltz_op, bgez_op, bltzl_op, bgezl_op,
+ spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
+ tgei_op, tgeiu_op, tlti_op, tltiu_op,
+ teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
+ bltzal_op, bgezal_op, bltzall_op, bgezall_op,
+ rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
+ rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
+ bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
+};
+
+/*
+ * rs field of cop opcodes.
+ */
+enum cop_op {
+ mfc_op = 0x00, dmfc_op = 0x01,
+ cfc_op = 0x02, mtc_op = 0x04,
+ dmtc_op = 0x05, ctc_op = 0x06,
+ bc_op = 0x08, cop_op = 0x10,
+ copm_op = 0x18
+};
+
+/*
+ * rt field of cop.bc_op opcodes
+ */
+enum bcop_op {
+ bcf_op, bct_op, bcfl_op, bctl_op
+};
+
+/*
+ * func field of cop0 coi opcodes.
+ */
+enum cop0_coi_func {
+ tlbr_op = 0x01, tlbwi_op = 0x02,
+ tlbwr_op = 0x06, tlbp_op = 0x08,
+ rfe_op = 0x10, eret_op = 0x18
+};
+
+/*
+ * func field of cop0 com opcodes.
+ */
+enum cop0_com_func {
+ tlbr1_op = 0x01, tlbw_op = 0x02,
+ tlbp1_op = 0x08, dctr_op = 0x09,
+ dctw_op = 0x0a
+};
+
+/*
+ * fmt field of cop1 opcodes.
+ */
+enum cop1_fmt {
+ s_fmt, d_fmt, e_fmt, q_fmt,
+ w_fmt, l_fmt
+};
+
+/*
+ * func field of cop1 instructions using d, s or w format.
+ */
+enum cop1_sdw_func {
+ fadd_op = 0x00, fsub_op = 0x01,
+ fmul_op = 0x02, fdiv_op = 0x03,
+ fsqrt_op = 0x04, fabs_op = 0x05,
+ fmov_op = 0x06, fneg_op = 0x07,
+ froundl_op = 0x08, ftruncl_op = 0x09,
+ fceill_op = 0x0a, ffloorl_op = 0x0b,
+ fround_op = 0x0c, ftrunc_op = 0x0d,
+ fceil_op = 0x0e, ffloor_op = 0x0f,
+ fmovc_op = 0x11, fmovz_op = 0x12,
+ fmovn_op = 0x13, frecip_op = 0x15,
+ frsqrt_op = 0x16, fcvts_op = 0x20,
+ fcvtd_op = 0x21, fcvte_op = 0x22,
+ fcvtw_op = 0x24, fcvtl_op = 0x25,
+ fcmp_op = 0x30
+};
+
+/*
+ * func field of cop1x opcodes (MIPS IV).
+ */
+enum cop1x_func {
+ lwxc1_op = 0x00, ldxc1_op = 0x01,
+ pfetch_op = 0x07, swxc1_op = 0x08,
+ sdxc1_op = 0x09, madd_s_op = 0x20,
+ madd_d_op = 0x21, madd_e_op = 0x22,
+ msub_s_op = 0x28, msub_d_op = 0x29,
+ msub_e_op = 0x2a, nmadd_s_op = 0x30,
+ nmadd_d_op = 0x31, nmadd_e_op = 0x32,
+ nmsub_s_op = 0x38, nmsub_d_op = 0x39,
+ nmsub_e_op = 0x3a
+};
+
+/*
+ * func field for mad opcodes (MIPS IV).
+ */
+enum mad_func {
+ madd_fp_op = 0x08, msub_fp_op = 0x0a,
+ nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e
+};
+
+/*
+ * func field for special3 lx opcodes (Cavium Octeon).
+ */
+enum lx_func {
+ lwx_op = 0x00,
+ lhx_op = 0x04,
+ lbux_op = 0x06,
+ ldx_op = 0x08,
+ lwux_op = 0x10,
+ lhux_op = 0x14,
+ lbx_op = 0x16,
+};
+
+/*
+ * Damn ... bitfields depend from byteorder :-(
+ */
+#ifdef __MIPSEB__
+#define BITFIELD_FIELD(field, more) \
+ field; \
+ more
+
+#elif defined(__MIPSEL__)
+
+#define BITFIELD_FIELD(field, more) \
+ more \
+ field;
+
+#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
+#endif
+
+struct j_format {
+ BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+ BITFIELD_FIELD(unsigned int target : 26,
+ ;))
+};
+
+struct i_format { /* signed immediate format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(signed int simmediate : 16,
+ ;))))
+};
+
+struct u_format { /* unsigned immediate format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int uimmediate : 16,
+ ;))))
+};
+
+struct c_format { /* Cache (>= R6000) format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int c_op : 3,
+ BITFIELD_FIELD(unsigned int cache : 2,
+ BITFIELD_FIELD(unsigned int simmediate : 16,
+ ;)))))
+};
+
+struct r_format { /* Register format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int re : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct p_format { /* Performance counter format (R10000) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int re : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct f_format { /* FPU register format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int : 1,
+ BITFIELD_FIELD(unsigned int fmt : 4,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int re : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct ma_format { /* FPU multiply and add format (MIPS IV) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int fr : 5,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int func : 4,
+ BITFIELD_FIELD(unsigned int fmt : 2,
+ ;)))))))
+};
+
+struct b_format { /* BREAK and SYSCALL */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int code : 20,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))
+};
+
+struct ps_format { /* MIPS-3D / paired single format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct v_format { /* MDMX vector format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int sel : 4,
+ BITFIELD_FIELD(unsigned int fmt : 1,
+ BITFIELD_FIELD(unsigned int vt : 5,
+ BITFIELD_FIELD(unsigned int vs : 5,
+ BITFIELD_FIELD(unsigned int vd : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+union mips_instruction {
+ unsigned int word;
+ unsigned short halfword[2];
+ unsigned char byte[4];
+ struct j_format j_format;
+ struct i_format i_format;
+ struct u_format u_format;
+ struct c_format c_format;
+ struct r_format r_format;
+ struct p_format p_format;
+ struct f_format f_format;
+ struct ma_format ma_format;
+ struct b_format b_format;
+ struct ps_format ps_format;
+ struct v_format v_format;
+};
+
+#endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h
index addd56b60694..b1e637757fe3 100644
--- a/arch/mips/include/uapi/asm/ioctls.h
+++ b/arch/mips/include/uapi/asm/ioctls.h
@@ -41,7 +41,7 @@
#define TIOCPKT_START 0x08 /* start output */
#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */
#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */
-#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */
+#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */
#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */
#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */
#define TIOCNOTTY 0x5471 /* void tty association */
@@ -63,9 +63,9 @@
#define FIONREAD 0x467f
#define TIOCINQ FIONREAD
-#define TIOCGETP 0x7408
-#define TIOCSETP 0x7409
-#define TIOCSETN 0x740a /* TIOCSETP wo flush */
+#define TIOCGETP 0x7408
+#define TIOCSETP 0x7409
+#define TIOCSETN 0x740a /* TIOCSETP wo flush */
/* #define TIOCSETA _IOW('t', 20, struct termios) set termios struct */
/* #define TIOCSETAW _IOW('t', 21, struct termios) drain output, set */
@@ -74,9 +74,9 @@
/* #define TIOCSETD _IOW('t', 27, int) set line discipline */
/* 127-124 compat */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x7416 /* Return the session ID of FD */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x7416 /* Return the session ID of FD */
#define TCGETS2 _IOR('T', 0x2A, struct termios2)
#define TCSETS2 _IOW('T', 0x2B, struct termios2)
#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
@@ -104,10 +104,10 @@
#define TIOCGLCKTRMIOS 0x548b
#define TIOCSLCKTRMIOS 0x548c
#define TIOCSERGSTRUCT 0x548d /* For debugging only */
-#define TIOCSERGETLSR 0x548e /* Get line status register */
-#define TIOCSERGETMULTI 0x548f /* Get multiport config */
+#define TIOCSERGETLSR 0x548e /* Get line status register */
+#define TIOCSERGETMULTI 0x548f /* Get multiport config */
#define TIOCSERSETMULTI 0x5490 /* Set multiport config */
-#define TIOCMIWAIT 0x5491 /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x5492 /* read serial port inline interrupt counts */
+#define TIOCMIWAIT 0x5491 /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x5492 /* read serial port inline interrupt counts */
#endif /* __ASM_IOCTLS_H */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 9a936ac9a942..cfcb876cae6b 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -64,7 +64,7 @@
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */
-#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
@@ -73,14 +73,14 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
-#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
-#define MADV_HWPOISON 100 /* poison a page for testing */
+#define MADV_HWPOISON 100 /* poison a page for testing */
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
-#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
-#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
index 1bc1f52f40d7..4d58d8468705 100644
--- a/arch/mips/include/uapi/asm/ptrace.h
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -49,8 +49,8 @@ struct pt_regs {
unsigned long cp0_tcstatus;
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON
- unsigned long long mpl[3]; /* MTM{0,1,2} */
- unsigned long long mtp[3]; /* MTP{0,1,2} */
+ unsigned long long mpl[3]; /* MTM{0,1,2} */
+ unsigned long long mtp[3]; /* MTP{0,1,2} */
#endif
} __attribute__ ((aligned (8)));
@@ -67,14 +67,14 @@ struct pt_regs {
#define PTRACE_GET_THREAD_AREA 25
#define PTRACE_SET_THREAD_AREA 26
-/* Calls to trace a 64bit program from a 32bit program. */
+/* Calls to trace a 64bit program from a 32bit program. */
#define PTRACE_PEEKTEXT_3264 0xc0
#define PTRACE_PEEKDATA_3264 0xc1
#define PTRACE_POKETEXT_3264 0xc2
#define PTRACE_POKEDATA_3264 0xc3
#define PTRACE_GET_THREAD_AREA_3264 0xc4
-/* Read and write watchpoint registers. */
+/* Read and write watchpoint registers. */
enum pt_watch_style {
pt_watch_style_mips32,
pt_watch_style_mips64
diff --git a/arch/mips/include/uapi/asm/sembuf.h b/arch/mips/include/uapi/asm/sembuf.h
index 7281a4decaa0..e1085ac880f2 100644
--- a/arch/mips/include/uapi/asm/sembuf.h
+++ b/arch/mips/include/uapi/asm/sembuf.h
@@ -12,8 +12,8 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ __kernel_time_t sem_otime; /* last semop time */
+ __kernel_time_t sem_ctime; /* last change time */
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused1;
unsigned long __unused2;
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index 73446508d846..6a8714193fb9 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -11,7 +11,7 @@
#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
-#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
+#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
#define HAVE_ARCH_SIGINFO_T
@@ -55,7 +55,7 @@ typedef struct siginfo {
int _overrun; /* overrun count */
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
+ int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
@@ -91,9 +91,9 @@ typedef struct siginfo {
short _addr_lsb;
} _sigfault;
- /* SIGPOLL, SIGXFSZ (To do ...) */
+ /* SIGPOLL, SIGXFSZ (To do ...) */
struct {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
index 6783c887a678..d6b18b4d0f3a 100644
--- a/arch/mips/include/uapi/asm/signal.h
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -24,28 +24,28 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SIGHUP 1 /* Hangup (POSIX). */
#define SIGINT 2 /* Interrupt (ANSI). */
#define SIGQUIT 3 /* Quit (POSIX). */
-#define SIGILL 4 /* Illegal instruction (ANSI). */
-#define SIGTRAP 5 /* Trace trap (POSIX). */
-#define SIGIOT 6 /* IOT trap (4.2 BSD). */
-#define SIGABRT SIGIOT /* Abort (ANSI). */
+#define SIGILL 4 /* Illegal instruction (ANSI). */
+#define SIGTRAP 5 /* Trace trap (POSIX). */
+#define SIGIOT 6 /* IOT trap (4.2 BSD). */
+#define SIGABRT SIGIOT /* Abort (ANSI). */
#define SIGEMT 7
#define SIGFPE 8 /* Floating-point exception (ANSI). */
#define SIGKILL 9 /* Kill, unblockable (POSIX). */
-#define SIGBUS 10 /* BUS error (4.2 BSD). */
+#define SIGBUS 10 /* BUS error (4.2 BSD). */
#define SIGSEGV 11 /* Segmentation violation (ANSI). */
#define SIGSYS 12
-#define SIGPIPE 13 /* Broken pipe (POSIX). */
-#define SIGALRM 14 /* Alarm clock (POSIX). */
-#define SIGTERM 15 /* Termination (ANSI). */
+#define SIGPIPE 13 /* Broken pipe (POSIX). */
+#define SIGALRM 14 /* Alarm clock (POSIX). */
+#define SIGTERM 15 /* Termination (ANSI). */
#define SIGUSR1 16 /* User-defined signal 1 (POSIX). */
#define SIGUSR2 17 /* User-defined signal 2 (POSIX). */
#define SIGCHLD 18 /* Child status has changed (POSIX). */
-#define SIGCLD SIGCHLD /* Same as SIGCHLD (System V). */
+#define SIGCLD SIGCHLD /* Same as SIGCHLD (System V). */
#define SIGPWR 19 /* Power failure restart (System V). */
#define SIGWINCH 20 /* Window size change (4.3 BSD, Sun). */
#define SIGURG 21 /* Urgent condition on socket (4.2 BSD). */
-#define SIGIO 22 /* I/O now possible (4.2 BSD). */
-#define SIGPOLL SIGIO /* Pollable event occurred (System V). */
+#define SIGIO 22 /* I/O now possible (4.2 BSD). */
+#define SIGPOLL SIGIO /* Pollable event occurred (System V). */
#define SIGSTOP 23 /* Stop, unblockable (POSIX). */
#define SIGTSTP 24 /* Keyboard stop (POSIX). */
#define SIGCONT 25 /* Continue (POSIX). */
@@ -54,7 +54,7 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SIGVTALRM 28 /* Virtual alarm clock (4.2 BSD). */
#define SIGPROF 29 /* Profiling alarm clock (4.2 BSD). */
#define SIGXCPU 30 /* CPU limit exceeded (4.2 BSD). */
-#define SIGXFSZ 31 /* File size limit exceeded (4.2 BSD). */
+#define SIGXFSZ 31 /* File size limit exceeded (4.2 BSD). */
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 3e68bfbda6bc..47132f44c955 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -24,21 +24,21 @@
SIGPIPE when they die. */
#define SO_DONTROUTE 0x0010 /* Don't do local routing. */
#define SO_BROADCAST 0x0020 /* Allow transmission of
- broadcast messages. */
+ broadcast messages. */
#define SO_LINGER 0x0080 /* Block on close of a reliable
socket to transmit pending data. */
#define SO_OOBINLINE 0x0100 /* Receive out-of-band data in-band. */
#define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
#define SO_TYPE 0x1008 /* Compatible name for SO_STYLE. */
-#define SO_STYLE SO_TYPE /* Synonym */
+#define SO_STYLE SO_TYPE /* Synonym */
#define SO_ERROR 0x1007 /* get error status and clear */
#define SO_SNDBUF 0x1001 /* Send buffer size. */
#define SO_RCVBUF 0x1002 /* Receive buffer. */
#define SO_SNDLOWAT 0x1003 /* send low-water mark */
#define SO_RCVLOWAT 0x1004 /* receive low-water mark */
#define SO_SNDTIMEO 0x1005 /* send timeout */
-#define SO_RCVTIMEO 0x1006 /* receive timeout */
+#define SO_RCVTIMEO 0x1006 /* receive timeout */
#define SO_ACCEPTCONN 0x1009
#define SO_PROTOCOL 0x1028 /* protocol type */
#define SO_DOMAIN 0x1029 /* domain/socket family */
@@ -59,11 +59,11 @@
#define SO_BINDTODEVICE 25
/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
#define SO_GET_FILTER SO_ATTACH_FILTER
-#define SO_PEERNAME 28
+#define SO_PEERNAME 28
#define SO_TIMESTAMP 29
#define SCM_TIMESTAMP SO_TIMESTAMP
@@ -79,7 +79,7 @@
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
-#define SO_RXQ_OVFL 40
+#define SO_RXQ_OVFL 40
#define SO_WIFI_STATUS 41
#define SCM_WIFI_STATUS SO_WIFI_STATUS
diff --git a/arch/mips/include/uapi/asm/sockios.h b/arch/mips/include/uapi/asm/sockios.h
index ed1a5f78d22f..419fbe661da3 100644
--- a/arch/mips/include/uapi/asm/sockios.h
+++ b/arch/mips/include/uapi/asm/sockios.h
@@ -14,7 +14,7 @@
/* Socket-level I/O control calls. */
#define FIOGETOWN _IOR('f', 123, int)
-#define FIOSETOWN _IOW('f', 124, int)
+#define FIOSETOWN _IOW('f', 124, int)
#define SIOCATMARK _IOR('s', 7, int)
#define SIOCSPGRP _IOW('s', 8, pid_t)
diff --git a/arch/mips/include/uapi/asm/stat.h b/arch/mips/include/uapi/asm/stat.h
index fe9a4c3ec5a1..b47bc541bbc0 100644
--- a/arch/mips/include/uapi/asm/stat.h
+++ b/arch/mips/include/uapi/asm/stat.h
@@ -23,7 +23,7 @@ struct stat {
__u32 st_nlink;
uid_t st_uid;
gid_t st_gid;
- unsigned st_rdev;
+ unsigned st_rdev;
long st_pad2[2];
off_t st_size;
long st_pad3;
diff --git a/arch/mips/include/uapi/asm/statfs.h b/arch/mips/include/uapi/asm/statfs.h
index 0f805c7a42a5..3305c834fc16 100644
--- a/arch/mips/include/uapi/asm/statfs.h
+++ b/arch/mips/include/uapi/asm/statfs.h
@@ -15,7 +15,7 @@
#include <linux/types.h>
-typedef __kernel_fsid_t fsid_t;
+typedef __kernel_fsid_t fsid_t;
#endif
@@ -31,7 +31,7 @@ struct statfs {
long f_bavail;
/* Linux specials */
- __kernel_fsid_t f_fsid;
+ __kernel_fsid_t f_fsid;
long f_namelen;
long f_flags;
long f_spare[5];
@@ -73,7 +73,7 @@ struct statfs64 { /* Same as struct statfs */
long f_bavail;
/* Linux specials */
- __kernel_fsid_t f_fsid;
+ __kernel_fsid_t f_fsid;
long f_namelen;
long f_flags;
long f_spare[5];
diff --git a/arch/mips/include/uapi/asm/sysmips.h b/arch/mips/include/uapi/asm/sysmips.h
index 4f47b7d6a5f7..ae637e907856 100644
--- a/arch/mips/include/uapi/asm/sysmips.h
+++ b/arch/mips/include/uapi/asm/sysmips.h
@@ -16,10 +16,10 @@
* sysmips(2) is deprecated - though some existing software uses it.
* We only support the following commands.
*/
-#define SETNAME 1 /* set hostname */
+#define SETNAME 1 /* set hostname */
#define FLUSH_CACHE 3 /* writeback and invalidate caches */
-#define MIPS_FIXADE 7 /* control address error fixing */
-#define MIPS_RDNVRAM 10 /* read NVRAM */
-#define MIPS_ATOMIC_SET 2001 /* atomically set variable */
+#define MIPS_FIXADE 7 /* control address error fixing */
+#define MIPS_RDNVRAM 10 /* read NVRAM */
+#define MIPS_ATOMIC_SET 2001 /* atomically set variable */
#endif /* _ASM_SYSMIPS_H */
diff --git a/arch/mips/include/uapi/asm/termbits.h b/arch/mips/include/uapi/asm/termbits.h
index 76630b396fac..2750203e1e7d 100644
--- a/arch/mips/include/uapi/asm/termbits.h
+++ b/arch/mips/include/uapi/asm/termbits.h
@@ -53,7 +53,7 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0 /* Interrupt character [ISIG]. */
+#define VINTR 0 /* Interrupt character [ISIG]. */
#define VQUIT 1 /* Quit character [ISIG]. */
#define VERASE 2 /* Erase character [ICANON]. */
#define VKILL 3 /* Kill-line character [ICANON]. */
@@ -72,7 +72,7 @@ struct ktermios {
#define VDSUSP 11 /* Delayed suspend character [ISIG]. */
#endif
#define VREPRINT 12 /* Reprint-line character [ICANON]. */
-#define VDISCARD 13 /* Discard character [IEXTEN]. */
+#define VDISCARD 13 /* Discard character [IEXTEN]. */
#define VWERASE 14 /* Word-erase character [ICANON]. */
#define VLNEXT 15 /* Literal-next character [IEXTEN]. */
#define VEOF 16 /* End-of-file character [ICANON]. */
@@ -92,7 +92,7 @@ struct ktermios {
#define IXON 0002000 /* Enable start/stop output control. */
#define IXANY 0004000 /* Any character will restart after stop. */
#define IXOFF 0010000 /* Enable start/stop input control. */
-#define IMAXBEL 0020000 /* Ring bell when input queue is full. */
+#define IMAXBEL 0020000 /* Ring bell when input queue is full. */
#define IUTF8 0040000 /* Input is UTF-8 */
/* c_oflag bits */
@@ -105,123 +105,123 @@ struct ktermios {
#define OFILL 0000100
#define OFDEL 0000200
#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
+#define NL0 0000000
+#define NL1 0000400
#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
+#define BS0 0000000
+#define BS1 0020000
#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
+#define VT0 0000000
+#define VT1 0040000
#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
+#define FF0 0000000
+#define FF1 0100000
/*
#define PAGEOUT ???
-#define WRAP ???
+#define WRAP ???
*/
/* c_cflag bit meaning */
#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
#define EXTA B19200
#define EXTB B38400
#define CSIZE 0000060 /* Number of bits per byte (mask). */
-#define CS5 0000000 /* 5 bits per byte. */
-#define CS6 0000020 /* 6 bits per byte. */
-#define CS7 0000040 /* 7 bits per byte. */
-#define CS8 0000060 /* 8 bits per byte. */
+#define CS5 0000000 /* 5 bits per byte. */
+#define CS6 0000020 /* 6 bits per byte. */
+#define CS7 0000040 /* 7 bits per byte. */
+#define CS8 0000060 /* 8 bits per byte. */
#define CSTOPB 0000100 /* Two stop bits instead of one. */
#define CREAD 0000200 /* Enable receiver. */
#define PARENB 0000400 /* Parity enable. */
-#define PARODD 0001000 /* Odd parity instead of even. */
+#define PARODD 0001000 /* Odd parity instead of even. */
#define HUPCL 0002000 /* Hang up on last close. */
#define CLOCAL 0004000 /* Ignore modem status lines. */
#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
/* c_lflag bits */
#define ISIG 0000001 /* Enable signals. */
#define ICANON 0000002 /* Do erase and kill processing. */
#define XCASE 0000004
-#define ECHO 0000010 /* Enable echo. */
+#define ECHO 0000010 /* Enable echo. */
#define ECHOE 0000020 /* Visual erase for ERASE. */
-#define ECHOK 0000040 /* Echo NL after KILL. */
-#define ECHONL 0000100 /* Echo NL even if ECHO is off. */
+#define ECHOK 0000040 /* Echo NL after KILL. */
+#define ECHONL 0000100 /* Echo NL even if ECHO is off. */
#define NOFLSH 0000200 /* Disable flush after interrupt. */
#define IEXTEN 0000400 /* Enable DISCARD and LNEXT. */
-#define ECHOCTL 0001000 /* Echo control characters as ^X. */
-#define ECHOPRT 0002000 /* Hardcopy visual erase. */
+#define ECHOCTL 0001000 /* Echo control characters as ^X. */
+#define ECHOPRT 0002000 /* Hardcopy visual erase. */
#define ECHOKE 0004000 /* Visual erase for KILL. */
#define FLUSHO 0020000
#define PENDIN 0040000 /* Retype pending input (state). */
-#define TOSTOP 0100000 /* Send SIGTTOU for background output. */
-#define ITOSTOP TOSTOP
-#define EXTPROC 0200000 /* External processing on pty */
+#define TOSTOP 0100000 /* Send SIGTTOU for background output. */
+#define ITOSTOP TOSTOP
+#define EXTPROC 0200000 /* External processing on pty */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
/* tcflow() and TCXONC use these */
-#define TCOOFF 0 /* Suspend output. */
-#define TCOON 1 /* Restart suspended output. */
-#define TCIOFF 2 /* Send a STOP character. */
-#define TCION 3 /* Send a START character. */
+#define TCOOFF 0 /* Suspend output. */
+#define TCOON 1 /* Restart suspended output. */
+#define TCIOFF 2 /* Send a STOP character. */
+#define TCION 3 /* Send a START character. */
/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0 /* Discard data received but not yet read. */
-#define TCOFLUSH 1 /* Discard data written but not yet sent. */
-#define TCIOFLUSH 2 /* Discard all pending data. */
+#define TCIFLUSH 0 /* Discard data received but not yet read. */
+#define TCOFLUSH 1 /* Discard data written but not yet sent. */
+#define TCIOFLUSH 2 /* Discard all pending data. */
/* tcsetattr uses these */
-#define TCSANOW TCSETS /* Change immediately. */
-#define TCSADRAIN TCSETSW /* Change when pending output is written. */
-#define TCSAFLUSH TCSETSF /* Flush pending input before changing. */
+#define TCSANOW TCSETS /* Change immediately. */
+#define TCSADRAIN TCSETSW /* Change when pending output is written. */
+#define TCSAFLUSH TCSETSF /* Flush pending input before changing. */
#endif /* _ASM_TERMBITS_H */
diff --git a/arch/mips/include/uapi/asm/termios.h b/arch/mips/include/uapi/asm/termios.h
index 574fbdfb7202..baeb2fa87451 100644
--- a/arch/mips/include/uapi/asm/termios.h
+++ b/arch/mips/include/uapi/asm/termios.h
@@ -31,12 +31,12 @@ struct tchars {
};
struct ltchars {
- char t_suspc; /* stop process signal */
- char t_dsuspc; /* delayed stop process signal */
- char t_rprntc; /* reprint line */
- char t_flushc; /* flush output (toggles) */
- char t_werasc; /* word erase */
- char t_lnextc; /* literal next character */
+ char t_suspc; /* stop process signal */
+ char t_dsuspc; /* delayed stop process signal */
+ char t_rprntc; /* reprint line */
+ char t_flushc; /* flush output (toggles) */
+ char t_werasc; /* word erase */
+ char t_lnextc; /* literal next character */
};
/* TIOCGSIZE, TIOCSSIZE not defined yet. Only needed for SunOS source
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 0eebf3c3e03c..16338b84fa79 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -20,16 +20,16 @@
* Linux o32 style syscalls are in the range from 4000 to 4999.
*/
#define __NR_Linux 4000
-#define __NR_syscall (__NR_Linux + 0)
-#define __NR_exit (__NR_Linux + 1)
-#define __NR_fork (__NR_Linux + 2)
-#define __NR_read (__NR_Linux + 3)
-#define __NR_write (__NR_Linux + 4)
-#define __NR_open (__NR_Linux + 5)
-#define __NR_close (__NR_Linux + 6)
-#define __NR_waitpid (__NR_Linux + 7)
-#define __NR_creat (__NR_Linux + 8)
-#define __NR_link (__NR_Linux + 9)
+#define __NR_syscall (__NR_Linux + 0)
+#define __NR_exit (__NR_Linux + 1)
+#define __NR_fork (__NR_Linux + 2)
+#define __NR_read (__NR_Linux + 3)
+#define __NR_write (__NR_Linux + 4)
+#define __NR_open (__NR_Linux + 5)
+#define __NR_close (__NR_Linux + 6)
+#define __NR_waitpid (__NR_Linux + 7)
+#define __NR_creat (__NR_Linux + 8)
+#define __NR_link (__NR_Linux + 9)
#define __NR_unlink (__NR_Linux + 10)
#define __NR_execve (__NR_Linux + 11)
#define __NR_chdir (__NR_Linux + 12)
@@ -386,16 +386,16 @@
* Linux 64-bit syscalls are in the range from 5000 to 5999.
*/
#define __NR_Linux 5000
-#define __NR_read (__NR_Linux + 0)
-#define __NR_write (__NR_Linux + 1)
-#define __NR_open (__NR_Linux + 2)
-#define __NR_close (__NR_Linux + 3)
-#define __NR_stat (__NR_Linux + 4)
-#define __NR_fstat (__NR_Linux + 5)
-#define __NR_lstat (__NR_Linux + 6)
-#define __NR_poll (__NR_Linux + 7)
-#define __NR_lseek (__NR_Linux + 8)
-#define __NR_mmap (__NR_Linux + 9)
+#define __NR_read (__NR_Linux + 0)
+#define __NR_write (__NR_Linux + 1)
+#define __NR_open (__NR_Linux + 2)
+#define __NR_close (__NR_Linux + 3)
+#define __NR_stat (__NR_Linux + 4)
+#define __NR_fstat (__NR_Linux + 5)
+#define __NR_lstat (__NR_Linux + 6)
+#define __NR_poll (__NR_Linux + 7)
+#define __NR_lseek (__NR_Linux + 8)
+#define __NR_mmap (__NR_Linux + 9)
#define __NR_mprotect (__NR_Linux + 10)
#define __NR_munmap (__NR_Linux + 11)
#define __NR_brk (__NR_Linux + 12)
@@ -711,16 +711,16 @@
* Linux N32 syscalls are in the range from 6000 to 6999.
*/
#define __NR_Linux 6000
-#define __NR_read (__NR_Linux + 0)
-#define __NR_write (__NR_Linux + 1)
-#define __NR_open (__NR_Linux + 2)
-#define __NR_close (__NR_Linux + 3)
-#define __NR_stat (__NR_Linux + 4)
-#define __NR_fstat (__NR_Linux + 5)
-#define __NR_lstat (__NR_Linux + 6)
-#define __NR_poll (__NR_Linux + 7)
-#define __NR_lseek (__NR_Linux + 8)
-#define __NR_mmap (__NR_Linux + 9)
+#define __NR_read (__NR_Linux + 0)
+#define __NR_write (__NR_Linux + 1)
+#define __NR_open (__NR_Linux + 2)
+#define __NR_close (__NR_Linux + 3)
+#define __NR_stat (__NR_Linux + 4)
+#define __NR_fstat (__NR_Linux + 5)
+#define __NR_lstat (__NR_Linux + 6)
+#define __NR_poll (__NR_Linux + 7)
+#define __NR_lseek (__NR_Linux + 8)
+#define __NR_mmap (__NR_Linux + 9)
#define __NR_mprotect (__NR_Linux + 10)
#define __NR_munmap (__NR_Linux + 11)
#define __NR_brk (__NR_Linux + 12)
diff --git a/arch/mips/jazz/Makefile b/arch/mips/jazz/Makefile
index dd9d99bfcf7a..624b0ee3e5d4 100644
--- a/arch/mips/jazz/Makefile
+++ b/arch/mips/jazz/Makefile
@@ -2,4 +2,4 @@
# Makefile for the Jazz family specific parts of the kernel
#
-obj-y := irq.o jazzdma.o reset.o setup.o
+obj-y := irq.o jazzdma.o reset.o setup.o
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index f21868b28b24..e1ea4f625f7a 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -111,7 +111,7 @@ asmlinkage void plat_irq_dispatch(void)
}
static void r4030_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
/* Nothing to do ... */
}
@@ -146,7 +146,7 @@ void __init plat_time_init(void)
BUG_ON(HZ != 100);
- cd->cpumask = cpumask_of(cpu);
+ cd->cpumask = cpumask_of(cpu);
clockevents_register_device(cd);
action->dev_id = cd;
setup_irq(JAZZ_TIMER_IRQ, action);
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 2d8e447cb828..db6f5afff4ff 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -63,7 +63,7 @@ static inline void vdma_pgtbl_init(void)
static int __init vdma_init(void)
{
/*
- * Allocate 32k of memory for DMA page tables. This needs to be page
+ * Allocate 32k of memory for DMA page tables. This needs to be page
* aligned and should be uncached to avoid cache flushing after every
* update.
*/
@@ -218,14 +218,14 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
printk
("vdma_map: Invalid logical address: %08lx\n",
laddr);
- return -EINVAL; /* invalid logical address */
+ return -EINVAL; /* invalid logical address */
}
if (paddr > 0x1fffffff) {
if (vdma_debug)
printk
("vdma_map: Invalid physical address: %08lx\n",
paddr);
- return -EINVAL; /* invalid physical address */
+ return -EINVAL; /* invalid physical address */
}
pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 820e926dacbc..e4374a5651ce 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -137,9 +137,9 @@ static struct resource jazz_esp_rsrc[] = {
};
static struct platform_device jazz_esp_pdev = {
- .name = "jazz_esp",
- .num_resources = ARRAY_SIZE(jazz_esp_rsrc),
- .resource = jazz_esp_rsrc
+ .name = "jazz_esp",
+ .num_resources = ARRAY_SIZE(jazz_esp_rsrc),
+ .resource = jazz_esp_rsrc
};
static struct resource jazz_sonic_rsrc[] = {
@@ -156,9 +156,9 @@ static struct resource jazz_sonic_rsrc[] = {
};
static struct platform_device jazz_sonic_pdev = {
- .name = "jazzsonic",
- .num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
- .resource = jazz_sonic_rsrc
+ .name = "jazzsonic",
+ .num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
+ .resource = jazz_sonic_rsrc
};
static struct resource jazz_cmos_rsrc[] = {
@@ -175,13 +175,13 @@ static struct resource jazz_cmos_rsrc[] = {
};
static struct platform_device jazz_cmos_pdev = {
- .name = "rtc_cmos",
- .num_resources = ARRAY_SIZE(jazz_cmos_rsrc),
- .resource = jazz_cmos_rsrc
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(jazz_cmos_rsrc),
+ .resource = jazz_cmos_rsrc
};
static struct platform_device pcspeaker_pdev = {
- .name = "pcspkr",
+ .name = "pcspkr",
.id = -1,
};
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 43d964d36288..be2b3deeef1d 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -52,7 +52,7 @@ static bool is_avt2;
static struct nand_ecclayout qi_lb60_ecclayout_1gb = {
.eccbytes = 36,
.eccpos = {
- 6, 7, 8, 9, 10, 11, 12, 13,
+ 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37,
@@ -210,7 +210,7 @@ static const uint32_t qi_lb60_keymap[] = {
KEY(6, 7, KEY_RIGHT), /* S57 */
KEY(7, 0, KEY_LEFTSHIFT), /* S58 */
- KEY(7, 1, KEY_LEFTALT), /* S59 */
+ KEY(7, 1, KEY_LEFTALT), /* S59 */
KEY(7, 2, KEY_QI_FN), /* S60 */
};
@@ -317,7 +317,7 @@ static struct spi_board_info qi_lb60_spi_board_info[] = {
/* Battery */
static struct jz_battery_platform_data qi_lb60_battery_pdata = {
- .gpio_charge = JZ_GPIO_PORTC(27),
+ .gpio_charge = JZ_GPIO_PORTC(27),
.gpio_charge_active_low = 1,
.info = {
.name = "battery",
@@ -344,7 +344,7 @@ static struct gpio_keys_platform_data qi_lb60_gpio_keys_data = {
};
static struct platform_device qi_lb60_gpio_keys = {
- .name = "gpio-keys",
+ .name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &qi_lb60_gpio_keys_data,
diff --git a/arch/mips/jz4740/clock-debugfs.c b/arch/mips/jz4740/clock-debugfs.c
index 330a0f2bf17b..a8acdeff267e 100644
--- a/arch/mips/jz4740/clock-debugfs.c
+++ b/arch/mips/jz4740/clock-debugfs.c
@@ -3,7 +3,7 @@
* JZ4740 SoC clock support debugfs entries
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/clock.c b/arch/mips/jz4740/clock.c
index 118a8a5562dd..484d38a0864f 100644
--- a/arch/mips/jz4740/clock.c
+++ b/arch/mips/jz4740/clock.c
@@ -3,7 +3,7 @@
* JZ4740 SoC clock support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -31,7 +31,7 @@
#define JZ_REG_CLOCK_LOW_POWER 0x04
#define JZ_REG_CLOCK_PLL 0x10
#define JZ_REG_CLOCK_GATE 0x20
-#define JZ_REG_CLOCK_SLEEP_CTRL 0x24
+#define JZ_REG_CLOCK_SLEEP_CTRL 0x24
#define JZ_REG_CLOCK_I2S 0x60
#define JZ_REG_CLOCK_LCD 0x64
#define JZ_REG_CLOCK_MMC 0x68
diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c
index d7feb898692c..317ec6fffb12 100644
--- a/arch/mips/jz4740/dma.c
+++ b/arch/mips/jz4740/dma.c
@@ -3,7 +3,7 @@
* JZ4740 SoC DMA support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index e1ddb95c05e3..00b798d2fb7c 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -3,7 +3,7 @@
* JZ4740 platform GPIO support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c
index fc57ded326d8..2531da1d3add 100644
--- a/arch/mips/jz4740/irq.c
+++ b/arch/mips/jz4740/irq.c
@@ -3,7 +3,7 @@
* JZ4740 platform IRQ support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/irq.h b/arch/mips/jz4740/irq.h
index f75e39d62885..0f48720b5b63 100644
--- a/arch/mips/jz4740/irq.h
+++ b/arch/mips/jz4740/irq.h
@@ -2,7 +2,7 @@
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index 6d14dcdbd908..e9348fd26a35 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -3,7 +3,7 @@
* JZ4740 platform devices
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -102,7 +102,7 @@ struct platform_device jz4740_mmc_device = {
.dma_mask = &jz4740_mmc_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
- .num_resources = ARRAY_SIZE(jz4740_mmc_resources),
+ .num_resources = ARRAY_SIZE(jz4740_mmc_resources),
.resource = jz4740_mmc_resources,
};
@@ -114,7 +114,7 @@ static struct resource jz4740_rtc_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = JZ4740_IRQ_RTC,
+ .start = JZ4740_IRQ_RTC,
.end = JZ4740_IRQ_RTC,
.flags = IORESOURCE_IRQ,
},
@@ -144,7 +144,7 @@ static struct resource jz4740_i2c_resources[] = {
struct platform_device jz4740_i2c_device = {
.name = "jz4740-i2c",
.id = 0,
- .num_resources = ARRAY_SIZE(jz4740_i2c_resources),
+ .num_resources = ARRAY_SIZE(jz4740_i2c_resources),
.resource = jz4740_i2c_resources,
};
@@ -318,8 +318,8 @@ static struct resource jz4740_wdt_resources[] = {
};
struct platform_device jz4740_wdt_device = {
- .name = "jz4740-wdt",
- .id = -1,
+ .name = "jz4740-wdt",
+ .id = -1,
.num_resources = ARRAY_SIZE(jz4740_wdt_resources),
.resource = jz4740_wdt_resources,
};
diff --git a/arch/mips/jz4740/pm.c b/arch/mips/jz4740/pm.c
index 6744fa723f72..d8e213010169 100644
--- a/arch/mips/jz4740/pm.c
+++ b/arch/mips/jz4740/pm.c
@@ -3,7 +3,7 @@
* JZ4740 SoC power management support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/prom.c b/arch/mips/jz4740/prom.c
index 4a70407f55bb..5a93f381590d 100644
--- a/arch/mips/jz4740/prom.c
+++ b/arch/mips/jz4740/prom.c
@@ -3,7 +3,7 @@
* JZ4740 SoC prom code
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/reset.c b/arch/mips/jz4740/reset.c
index 6c0da5afcf17..b6c6343d2834 100644
--- a/arch/mips/jz4740/reset.c
+++ b/arch/mips/jz4740/reset.c
@@ -2,7 +2,7 @@
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index d97cfbf882f5..76eafcb79c89 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -4,7 +4,7 @@
* JZ4740 setup code
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index 39bb4bbf43e7..5e430ce9ac7e 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -3,7 +3,7 @@
* JZ4740 platform time support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/timer.c b/arch/mips/jz4740/timer.c
index 22f11d73a17d..4992461787aa 100644
--- a/arch/mips/jz4740/timer.c
+++ b/arch/mips/jz4740/timer.c
@@ -3,7 +3,7 @@
* JZ4740 platform timer support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 007c33d73715..f81d98f6184c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -39,7 +40,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
@@ -53,7 +54,7 @@ obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_CPU_MIPSR2) += spram.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
-obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
@@ -98,4 +99,35 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+#
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
+# to enable DSP assembler support here even if the MIPS Release 2 CPU we
+# are targetting does not support DSP because all code-paths making use of
+# it properly check that the running CPU *actually does* support these
+# instructions.
+#
+ifeq ($(CONFIG_CPU_MIPSR2), y)
+CFLAGS_DSP = -DHAVE_AS_DSP
+
+#
+# Check if assembler supports DSP ASE
+#
+ifeq ($(call cc-option-yn,-mdsp), y)
+CFLAGS_DSP += -mdsp
+endif
+
+#
+# Check if assembler supports DSP ASE Rev2
+#
+ifeq ($(call cc-option-yn,-mdspr2), y)
+CFLAGS_DSP += -mdspr2
+endif
+
+CFLAGS_signal.o = $(CFLAGS_DSP)
+CFLAGS_signal32.o = $(CFLAGS_DSP)
+CFLAGS_process.o = $(CFLAGS_DSP)
+CFLAGS_branch.o = $(CFLAGS_DSP)
+CFLAGS_ptrace.o = $(CFLAGS_DSP)
+endif
+
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 9fdd8bcdd21e..e06f777e9c49 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -48,7 +48,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
@@ -67,8 +67,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -88,7 +88,7 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index ff448233dab5..556a4357d7fc 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -50,7 +50,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
@@ -86,8 +86,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -107,7 +107,7 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index e908e81330b1..64c4fd62cf08 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -170,7 +170,7 @@ bmips_smp_entry:
/* switch to permanent stack and continue booting */
- .global bmips_secondary_reentry
+ .global bmips_secondary_reentry
bmips_secondary_reentry:
la k0, bmips_smp_boot_sp
lw sp, 0(k0)
@@ -182,7 +182,7 @@ bmips_secondary_reentry:
#endif /* CONFIG_SMP */
.align 4
- .global bmips_reset_nmi_vec_end
+ .global bmips_reset_nmi_vec_end
bmips_reset_nmi_vec_end:
END(bmips_reset_nmi_vec)
@@ -206,7 +206,7 @@ LEAF(bmips_smp_int_vec)
eret
.align 4
- .global bmips_smp_int_vec_end
+ .global bmips_smp_int_vec_end
bmips_smp_int_vec_end:
END(bmips_smp_int_vec)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d735d0e58f5..83ffe950f710 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -57,7 +57,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
*/
case bcond_op:
switch (insn.i_format.rt) {
- case bltz_op:
+ case bltz_op:
case bltzl_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -197,8 +197,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
bit += (bit != 0);
bit += 23;
switch (insn.i_format.rt & 3) {
- case 0: /* bc1f */
- case 2: /* bc1fl */
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
if (~fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == 2)
@@ -208,8 +208,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
regs->cp0_epc = epc;
break;
- case 1: /* bc1t */
- case 3: /* bc1tl */
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
if (fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == 3)
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 69bbfae183bc..15f618b40cf6 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -41,7 +41,7 @@
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -144,7 +144,7 @@ void __cpuinit sb1480_clockevent_init(void)
bcm1480_unmask_irq(cpu, irq);
- action->handler = sibyte_counter_handler;
+ action->handler = sibyte_counter_handler;
action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index ed648cb5a69f..ff1f01b72270 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -1,7 +1,7 @@
/*
* DS1287 clockevent driver
*
- * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -89,7 +89,7 @@ static void ds1287_event_handler(struct clock_event_device *dev)
static struct clock_event_device ds1287_clockevent = {
.name = "ds1287",
.features = CLOCK_EVT_FEAT_PERIODIC,
- .set_next_event = ds1287_set_next_event,
+ .set_next_event = ds1287_set_next_event,
.set_mode = ds1287_set_mode,
.event_handler = ds1287_event_handler,
};
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 831b47585b7c..f069460751ab 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx clockevent routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -98,7 +98,7 @@ static struct clock_event_device gt641xx_timer0_clockevent = {
.name = "gt641xx-timer0",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.irq = GT641XX_TIMER0_IRQ,
- .set_next_event = gt641xx_timer0_set_next_event,
+ .set_next_event = gt641xx_timer0_set_next_event,
.set_mode = gt641xx_timer0_set_mode,
.event_handler = gt641xx_timer0_event_handler,
};
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 75323925e537..07b847d77f5d 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -25,7 +25,7 @@
#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cnt;
int res;
@@ -66,7 +66,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
goto out;
/*
- * The same applies to performance counter interrupts. But with the
+ * The same applies to performance counter interrupts. But with the
* above we now know that the reason we got here must be a timer
* interrupt. Being the paranoiacs we are we check anyway.
*/
@@ -119,7 +119,7 @@ int c0_compare_int_usable(void)
unsigned int cnt;
/*
- * IP7 already pending? Try to clear it by acking the timer.
+ * IP7 already pending? Try to clear it by acking the timer.
*/
if (c0_compare_int_pending()) {
cnt = read_c0_count();
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index e73439fd6850..200f2778bf36 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -39,7 +39,7 @@
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -143,7 +143,7 @@ void __cpuinit sb1250_clockevent_init(void)
sb1250_unmask_irq(cpu, irq);
- action->handler = sibyte_counter_handler;
+ action->handler = sibyte_counter_handler;
action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 2e72d30b2f05..9de5ed7ef1a3 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -49,7 +49,7 @@ static int smtc_nextinvpe[NR_CPUS];
/*
* Timestamps stored are absolute values to be programmed
- * into Count register. Valid timestamps will never be zero.
+ * into Count register. Valid timestamps will never be zero.
* If a Zero Count value is actually calculated, it is converted
* to be a 1, which will introduce 1 or two CPU cycles of error
* roughly once every four billion events, which at 1000 HZ means
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index e5c30b1d0860..2ae08462e46e 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -4,7 +4,7 @@
* for more details.
*
* Based on linux/arch/mips/kernel/cevt-r4k.c,
- * linux/arch/mips/jmr3927/rbhma3100/setup.c
+ * linux/arch/mips/jmr3927/rbhma3100/setup.c
*
* Copyright 2001 MontaVista Software Inc.
* Copyright (C) 2000-2001 Toshiba Corporation
@@ -129,7 +129,7 @@ static struct txx9_clock_event_device txx9_clock_event_device = {
CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_mode = txx9tmr_set_mode,
- .set_next_event = txx9tmr_set_next_event,
+ .set_next_event = txx9tmr_set_next_event,
},
};
@@ -139,7 +139,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
struct clock_event_device *cd = &txx9_cd->cd;
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
- __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+ __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
cd->event_handler(cd);
return IRQ_HANDLED;
}
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index d6a18644365a..de3c25ffd9f9 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -84,9 +84,9 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
".set noreorder\n\t"
".set nomacro\n\t"
"mult %2, %3\n\t"
- "dsll32 %0, %4, %5\n\t"
+ "dsll32 %0, %4, %5\n\t"
"mflo $0\n\t"
- "dsll32 %1, %4, %5\n\t"
+ "dsll32 %1, %4, %5\n\t"
"nop\n\t"
".set pop"
: "=&r" (lv1), "=r" (lw)
@@ -239,7 +239,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug = -1;
+int daddiu_bug = -1;
static inline void check_daddiu(void)
{
@@ -273,7 +273,7 @@ static inline void check_daddiu(void)
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2\n\t"
".set pop"
@@ -292,7 +292,7 @@ static inline void check_daddiu(void)
asm volatile(
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index cce3782c96c9..6bfccc227a95 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -4,7 +4,7 @@
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
- * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -69,12 +69,12 @@ void r4k_wait_irqoff(void)
" wait \n"
" .set pop \n");
local_irq_enable();
- __asm__(" .globl __pastwait \n"
+ __asm__(" .globl __pastwait \n"
"__pastwait: \n");
}
/*
- * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
static void rm7k_wait_irqoff(void)
@@ -201,6 +201,7 @@ void __init check_wait(void)
break;
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
@@ -331,6 +332,34 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
#endif
}
+static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
+{
+ switch (isa) {
+ case MIPS_CPU_ISA_M64R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+ case MIPS_CPU_ISA_M64R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+ case MIPS_CPU_ISA_V:
+ c->isa_level |= MIPS_CPU_ISA_V;
+ case MIPS_CPU_ISA_IV:
+ c->isa_level |= MIPS_CPU_ISA_IV;
+ case MIPS_CPU_ISA_III:
+ c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II |
+ MIPS_CPU_ISA_III;
+ break;
+
+ case MIPS_CPU_ISA_M32R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2;
+ case MIPS_CPU_ISA_M32R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1;
+ case MIPS_CPU_ISA_II:
+ c->isa_level |= MIPS_CPU_ISA_II;
+ case MIPS_CPU_ISA_I:
+ c->isa_level |= MIPS_CPU_ISA_I;
+ break;
+ }
+}
+
static char unknown_isa[] __cpuinitdata = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
@@ -348,10 +377,10 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
case 0:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
- c->isa_level = MIPS_CPU_ISA_M32R1;
+ set_isa(c, MIPS_CPU_ISA_M32R1);
break;
case 1:
- c->isa_level = MIPS_CPU_ISA_M32R2;
+ set_isa(c, MIPS_CPU_ISA_M32R2);
break;
default:
goto unknown;
@@ -360,10 +389,10 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
case 2:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
- c->isa_level = MIPS_CPU_ISA_M64R1;
+ set_isa(c, MIPS_CPU_ISA_M64R1);
break;
case 1:
- c->isa_level = MIPS_CPU_ISA_M64R2;
+ set_isa(c, MIPS_CPU_ISA_M64R2);
break;
default:
goto unknown;
@@ -439,6 +468,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->ases |= MIPS_ASE_MIPSMT;
if (config3 & MIPS_CONF3_ULRI)
c->options |= MIPS_CPU_ULRI;
+ if (config3 & MIPS_CONF3_ISA)
+ c->options |= MIPS_CPU_MICROMIPS;
+ if (config3 & MIPS_CONF3_VZ)
+ c->ases |= MIPS_ASE_VZ;
return config3 & MIPS_CONF_M;
}
@@ -469,7 +502,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
ok = decode_config0(c); /* Read Config registers. */
- BUG_ON(!ok); /* Arch spec violation! */
+ BUG_ON(!ok); /* Arch spec violation! */
if (ok)
ok = decode_config1(c);
if (ok)
@@ -494,7 +527,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
__cpu_name[cpu] = "R2000";
- c->isa_level = MIPS_CPU_ISA_I;
+ set_isa(c, MIPS_CPU_ISA_I);
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
@@ -514,7 +547,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R3000;
__cpu_name[cpu] = "R3000";
}
- c->isa_level = MIPS_CPU_ISA_I;
+ set_isa(c, MIPS_CPU_ISA_I);
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
@@ -540,7 +573,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
}
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_VCE |
MIPS_CPU_LLSC;
@@ -580,14 +613,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC Vr41xx";
break;
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS;
c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
__cpu_name[cpu] = "R4300";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -595,7 +628,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
__cpu_name[cpu] = "R4600";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -610,13 +643,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
*/
c->cputype = CPU_R4650;
__cpu_name[cpu] = "R4650";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
- c->isa_level = MIPS_CPU_ISA_I;
+ set_isa(c, MIPS_CPU_ISA_I);
c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
@@ -641,7 +674,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R4700:
c->cputype = CPU_R4700;
__cpu_name[cpu] = "R4700";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -649,7 +682,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_TX49:
c->cputype = CPU_TX49XX;
__cpu_name[cpu] = "R49XX";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_LLSC;
if (!(c->processor_id & 0x08))
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
@@ -658,7 +691,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R5000:
c->cputype = CPU_R5000;
__cpu_name[cpu] = "R5000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -666,7 +699,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R5432:
c->cputype = CPU_R5432;
__cpu_name[cpu] = "R5432";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -674,7 +707,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R5500:
c->cputype = CPU_R5500;
__cpu_name[cpu] = "R5500";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -682,7 +715,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_NEVADA:
c->cputype = CPU_NEVADA;
__cpu_name[cpu] = "Nevada";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -690,7 +723,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R6000:
c->cputype = CPU_R6000;
__cpu_name[cpu] = "R6000";
- c->isa_level = MIPS_CPU_ISA_II;
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -698,7 +731,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R6000A:
c->cputype = CPU_R6000A;
__cpu_name[cpu] = "R6000A";
- c->isa_level = MIPS_CPU_ISA_II;
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -706,38 +739,38 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_RM7000:
c->cputype = CPU_RM7000;
__cpu_name[cpu] = "RM7000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
- * Undocumented RM7000: Bit 29 in the info register of
+ * Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
* entries.
*
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_RM9000:
c->cputype = CPU_RM9000;
__cpu_name[cpu] = "RM9000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
* Bit 29 in the info register of the RM9000
* indicates if the TLB has 48 or 64 entries.
*
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_R8000:
c->cputype = CPU_R8000;
__cpu_name[cpu] = "RM8000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
@@ -746,7 +779,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R10000:
c->cputype = CPU_R10000;
__cpu_name[cpu] = "R10000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -756,7 +789,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R12000:
c->cputype = CPU_R12000;
__cpu_name[cpu] = "R12000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -766,7 +799,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R14000:
c->cputype = CPU_R14000;
__cpu_name[cpu] = "R14000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -786,7 +819,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
break;
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS |
MIPS_CPU_FPU | MIPS_CPU_LLSC |
MIPS_CPU_32FPR;
@@ -838,10 +871,13 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "MIPS 20Kc";
break;
case PRID_IMP_24K:
- case PRID_IMP_24KE:
c->cputype = CPU_24K;
__cpu_name[cpu] = "MIPS 24Kc";
break;
+ case PRID_IMP_24KE:
+ c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24KEc";
+ break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
__cpu_name[cpu] = "MIPS 25Kc";
@@ -858,6 +894,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_M14KC;
__cpu_name[cpu] = "MIPS M14Kc";
break;
+ case PRID_IMP_M14KEC:
+ c->cputype = CPU_M14KEC;
+ __cpu_name[cpu] = "MIPS M14KEc";
+ break;
case PRID_IMP_1004K:
c->cputype = CPU_1004K;
__cpu_name[cpu] = "MIPS 1004Kc";
@@ -946,7 +986,7 @@ static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
__cpu_name[cpu] = "Philips PR4450";
- c->isa_level = MIPS_CPU_ISA_M32R1;
+ set_isa(c, MIPS_CPU_ISA_M32R1);
break;
}
}
@@ -1053,12 +1093,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
return;
}
- c->options = (MIPS_CPU_TLB |
- MIPS_CPU_4KEX |
+ c->options = (MIPS_CPU_TLB |
+ MIPS_CPU_4KEX |
MIPS_CPU_COUNTER |
- MIPS_CPU_DIVEC |
- MIPS_CPU_WATCH |
- MIPS_CPU_EJTAG |
+ MIPS_CPU_DIVEC |
+ MIPS_CPU_WATCH |
+ MIPS_CPU_EJTAG |
MIPS_CPU_LLSC);
switch (c->processor_id & 0xff00) {
@@ -1105,12 +1145,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
}
if (c->cputype == CPU_XLP) {
- c->isa_level = MIPS_CPU_ISA_M64R2;
+ set_isa(c, MIPS_CPU_ISA_M64R2);
c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK);
/* This will be updated again after all threads are woken up */
c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
} else {
- c->isa_level = MIPS_CPU_ISA_M64R1;
+ set_isa(c, MIPS_CPU_ISA_M64R1);
c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
}
}
@@ -1129,7 +1169,7 @@ __cpuinit void cpu_probe(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
- c->processor_id = PRID_IMP_UNKNOWN;
+ c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
index e7c98e2b78b6..3237c5235f9c 100644
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -107,6 +107,8 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int i;
+ unsigned long rate;
+ int ret;
if (!cpu_online(policy->cpu))
return -ENODEV;
@@ -117,15 +119,22 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return PTR_ERR(cpuclk);
}
- cpuclk->rate = cpu_clock_freq / 1000;
- if (!cpuclk->rate)
+ rate = cpu_clock_freq / 1000;
+ if (!rate) {
+ clk_put(cpuclk);
return -EINVAL;
+ }
+ ret = clk_set_rate(cpuclk, rate);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
/* clock table init */
for (i = 2;
(loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
i++)
- loongson2_clockmod_table[i].frequency = (cpuclk->rate * i) / 8;
+ loongson2_clockmod_table[i].frequency = (rate * i) / 8;
policy->cur = loongson2_cpufreq_get(policy->cpu);
@@ -195,8 +204,8 @@ static void loongson2_cpu_wait(void)
spin_lock_irqsave(&loongson2_wait_lock, flags);
cpu_freq = LOONGSON_CHIPCFG0;
- LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
+ LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
}
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 0f53c39324bb..93aa302948d7 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -59,7 +59,7 @@ static void crash_kexec_prepare_cpus(void)
#else /* !defined(CONFIG_SMP) */
static void crash_kexec_prepare_cpus(void) {}
-#endif /* !defined(CONFIG_SMP) */
+#endif /* !defined(CONFIG_SMP) */
void default_machine_crash_shutdown(struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
index f96f99c794a3..468f3eba4132 100644
--- a/arch/mips/kernel/csrc-bcm1480.c
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -35,7 +35,7 @@ static cycle_t bcm1480_hpt_read(struct clocksource *cs)
struct clocksource bcm1480_clocksource = {
.name = "zbbus-cycles",
- .rating = 200,
+ .rating = 200,
.read = bcm1480_hpt_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
new file mode 100644
index 000000000000..5dca24bce51b
--- /dev/null
+++ b/arch/mips/kernel/csrc-gic.c
@@ -0,0 +1,49 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+
+static cycle_t gic_hpt_read(struct clocksource *cs)
+{
+ unsigned int hi, hi2, lo;
+
+ do {
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+ } while (hi2 != hi);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+
+static struct clocksource gic_clocksource = {
+ .name = "GIC",
+ .read = gic_hpt_read,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init gic_clocksource_init(unsigned int frequency)
+{
+ unsigned int config, bits;
+
+ /* Calculate the clocksource mask. */
+ GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
+ bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
+ (GIC_SH_CONFIG_COUNTBITS_SHF - 2));
+
+ /* Set clocksource mask. */
+ gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
+
+ /* Calculate a somewhat reasonable rating value. */
+ gic_clocksource.rating = 200 + frequency / 10000000;
+
+ clocksource_register_hz(&gic_clocksource, frequency);
+}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
index 46bd7fa98d6c..0654bff9b69c 100644
--- a/arch/mips/kernel/csrc-ioasic.c
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -1,7 +1,7 @@
/*
* DEC I/O ASIC's counter clocksource
*
- * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
index 2e7c5232da8d..abd99ea911ae 100644
--- a/arch/mips/kernel/csrc-powertv.c
+++ b/arch/mips/kernel/csrc-powertv.c
@@ -45,7 +45,7 @@ unsigned int __init mips_get_pll_freq(void)
m = PLL_GET_M(pll_reg);
n = PLL_GET_N(pll_reg);
p = PLL_GET_P(pll_reg);
- pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p);
+ pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p);
/* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
fout = ((2 * n * fin) / (m * (0x01 << p)));
@@ -83,8 +83,8 @@ static void __init powertv_c0_hpt_clocksource_init(void)
/**
* struct tim_c - free running counter
- * @hi: High 16 bits of the counter
- * @lo: Low 32 bits of the counter
+ * @hi: High 16 bits of the counter
+ * @lo: Low 32 bits of the counter
*
* Lays out the structure of the free running counter in memory. This counter
* increments at a rate of 27 MHz/8 on all platforms.
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
index e9606d907685..6ecb77d82063 100644
--- a/arch/mips/kernel/csrc-sb1250.c
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -44,7 +44,7 @@ static cycle_t sb1250_hpt_read(struct clocksource *cs)
struct clocksource bcm1250_clocksource = {
.name = "bcm1250-counter-3",
- .rating = 200,
+ .rating = 200,
.read = sb1250_hpt_read,
.mask = CLOCKSOURCE_MASK(23),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 9ae813eb782e..9e6440eaa455 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -14,8 +14,7 @@
extern void prom_putchar(char);
-static void __init
-early_console_write(struct console *con, const char *s, unsigned n)
+static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
if (*s == '\n')
@@ -25,7 +24,7 @@ early_console_write(struct console *con, const char *s, unsigned n)
}
}
-static struct console early_console __initdata = {
+static struct console early_console = {
.name = "early",
.write = early_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 83fa1460e294..cf5509f13dd5 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -125,21 +125,21 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
*
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
*
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
* addiu v1, v1, low_16bit_of_mcount
* move at, ra
* move $12, ra_address
* jalr v1
* sub sp, sp, 8
- * 1: offset = 5 instructions
+ * 1: offset = 5 instructions
* 2.2 For the Other situations
*
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
* addiu v1, v1, low_16bit_of_mcount
* move at, ra
* jalr v1
* nop | move $12, ra_address | sub sp, sp, 8
- * 1: offset = 4 instructions
+ * 1: offset = 4 instructions
*/
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
@@ -228,8 +228,8 @@ int ftrace_disable_ftrace_graph_caller(void)
#ifndef KBUILD_MCOUNT_RA_ADDRESS
-#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
-#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 8a0096d62812..ecb347ce1b3d 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -160,7 +160,7 @@ LEAF(r4k_wait)
.set pop
.endm
- .align 5
+ .align 5
BUILD_ROLLBACK_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -362,7 +362,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
SAVE_ALL
- move a0, sp
+ move a0, sp
jal nmi_exception_handler
RESTORE_ALL
.set mips3
@@ -409,7 +409,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
string escapes and emits bogus warnings if it believes to
recognize an unknown escape code. So make the arguments
start with an n and gas will believe \n is ok ... */
- .macro __BUILD_verbose nexception
+ .macro __BUILD_verbose nexception
LONG_L a1, PT_EPC(sp)
#ifdef CONFIG_32BIT
PRINT("Got \nexception at %08lx\012")
@@ -442,7 +442,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro BUILD_HANDLER exception handler clear verbose
- __BUILD_HANDLER \exception \handler \clear \verbose _int
+ __BUILD_HANDLER \exception \handler \clear \verbose _int
.endm
BUILD_HANDLER adel ade ade silent /* #4 */
@@ -456,7 +456,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER tr tr sti silent /* #13 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
-#ifdef CONFIG_HARDWARE_WATCHPOINTS
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
/*
* For watch, interrupts will be enabled after the watch
* registers are read.
@@ -482,8 +482,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
MFC0 k1, CP0_ENTRYHI
andi k1, 0xff /* ASID_MASK */
MFC0 k0, CP0_EPC
- PTR_SRL k0, _PAGE_SHIFT + 1
- PTR_SLL k0, _PAGE_SHIFT + 1
+ PTR_SRL k0, _PAGE_SHIFT + 1
+ PTR_SLL k0, _PAGE_SHIFT + 1
or k1, k0
MTC0 k1, CP0_ENTRYHI
mtc0_tlbw_hazard
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index fcf97312f328..c61cdaed2b1d 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -133,7 +133,7 @@ EXPORT(_stext)
#ifdef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
- * kernel load address. This is needed because this platform does
+ * kernel load address. This is needed because this platform does
* not have a ELF loader yet.
*/
FEXPORT(__kernel_entry)
@@ -201,7 +201,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
#ifdef CONFIG_SMP
/*
- * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 32b397b646ee..2b91fe80c436 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -178,7 +178,7 @@ handle_real_irq:
} else {
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
- outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
smtc_im_ack_irq(irq);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 883fc6cead36..44a1f792e399 100644
--- a/arch/mips/kernel/irq-gt641xx.c
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx IRQ routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@
#include <asm/gt64120.h>
-#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 14ac52c5ae86..fab40f7d2e03 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -86,7 +86,7 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
*/
void ll_msc_irq(void)
{
- unsigned int irq;
+ unsigned int irq;
/* read the interrupt vector register */
MSCIC_READ(MSC01_IC_VEC, irq);
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index b0662cf97ea8..26f4e4c9db1a 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2003 Ralf Baechle
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index a5aa43d07c8e..d1fea7a054be 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -48,7 +48,7 @@ again:
}
/*
- * Allocate the 16 legacy interrupts for i8259 devices. This happens early
+ * Allocate the 16 legacy interrupts for i8259 devices. This happens early
* in the kernel initialization so treating allocation failure as BUG() is
* ok.
*/
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 972263bcf403..72ef2d25cbf2 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -3,13 +3,13 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
*
* This file define the irq handler for MIPS CPU interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
@@ -113,3 +114,44 @@ void __init mips_cpu_irq_init(void)
irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
handle_percpu_irq);
}
+
+#ifdef CONFIG_IRQ_DOMAIN
+static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ static struct irq_chip *chip;
+
+ if (hw < 2 && cpu_has_mipsmt) {
+ /* Software interrupts are used for MT/CMT IPI */
+ chip = &mips_mt_cpu_irq_controller;
+ } else {
+ chip = &mips_cpu_irq_controller;
+ }
+
+ irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
+ .map = mips_cpu_intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int __init mips_cpu_intc_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain;
+
+ /* Mask interrupts. */
+ clear_c0_status(ST0_IM);
+ clear_c0_cause(CAUSEF_IP);
+
+ domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
+ &mips_cpu_intc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add irqdomain for MIPS CPU\n");
+
+ return 0;
+}
+#endif /* CONFIG_IRQ_DOMAIN */
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
index b0c55b50218e..ab00e490482f 100644
--- a/arch/mips/kernel/irq_txx9.c
+++ b/arch/mips/kernel/irq_txx9.c
@@ -1,12 +1,12 @@
/*
* Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
- * linux/arch/mips/tx4927/common/tx4927_irq.c,
- * linux/arch/mips/tx4938/common/irq.c
+ * linux/arch/mips/tx4927/common/tx4927_irq.c,
+ * linux/arch/mips/tx4938/common/irq.c
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- * source@mvista.com
+ * ahennessy@mvista.com
+ * source@mvista.com
* Copyright (C) 2000-2001 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -122,7 +122,7 @@ static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
- case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
+ case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
default:
return -EINVAL;
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 23817a6e32b6..fcaac2f132f0 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -40,7 +40,7 @@ static struct hard_trap_info {
{ 6, SIGBUS }, /* instruction bus error */
{ 7, SIGBUS }, /* data bus error */
{ 9, SIGTRAP }, /* break */
-/* { 11, SIGILL }, */ /* CPU unusable */
+/* { 11, SIGILL }, */ /* CPU unusable */
{ 12, SIGFPE }, /* overflow */
{ 13, SIGTRAP }, /* trap */
{ 14, SIGSEGV }, /* virtual instruction cache coherency */
@@ -321,7 +321,7 @@ int kgdb_ll_trap(int cmd, const char *str,
.regs = regs,
.str = str,
.err = err,
- .trapnr = trap,
+ .trapnr = trap,
.signr = sig,
};
@@ -371,7 +371,7 @@ int kgdb_arch_init(void)
union mips_instruction insn = {
.r_format = {
.opcode = spec_op,
- .func = break_op,
+ .func = break_op,
}
};
memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 158467da9bc1..12bc4ebdf55b 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -307,7 +307,7 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "break 0"
- * instruction. To avoid the SMP problems that can occur when we
+ * instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
@@ -535,7 +535,7 @@ void jprobe_return_end(void);
void __kprobes jprobe_return(void)
{
- /* Assembler quirk necessitates this '0,code' business. */
+ /* Assembler quirk necessitates this '0,code' business. */
asm volatile(
"break 0,%0\n\t"
".globl jprobe_return_end\n"
@@ -598,7 +598,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
@@ -614,11 +614,11 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -645,7 +645,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 253bd8ad7446..8eeee1c860c0 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -76,7 +76,7 @@ out:
return error;
}
-#define RLIM_INFINITY32 0x7fffffff
+#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
lseek back to original location. They fail just like lseek does on
- non-seekable files. */
+ non-seekable files. */
SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
unsigned long, unused, unsigned long, a4, unsigned long, a5)
@@ -247,7 +247,7 @@ SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
}
asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
- size_t count)
+ size_t count)
{
return sys_readahead(fd, merge_64(a2, a3), count);
}
@@ -276,7 +276,7 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
unsigned offset_a3, unsigned len_a4, unsigned len_a5)
{
return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
- merge_64(len_a4, len_a5));
+ merge_64(len_a4, len_a5));
}
asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
@@ -286,7 +286,7 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
}
SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
- u64, a3, u64, a4, int, dfd, const char __user *, pathname)
+ u64, a3, u64, a4, int, dfd, const char __user *, pathname)
{
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
dfd, pathname);
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index df1e3e455f9a..6e58e97fcd39 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -17,9 +17,9 @@
extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_user_nocheck_asm(char *__to,
- const char *__from, long __len);
+ const char *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char *__from,
- long __len);
+ long __len);
extern long __strlen_user_nocheck_asm(const char *s);
extern long __strlen_user_asm(const char *s);
extern long __strnlen_user_nocheck_asm(const char *s);
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
index 61d60028b888..2b70723071c3 100644
--- a/arch/mips/kernel/module-rela.c
+++ b/arch/mips/kernel/module-rela.c
@@ -55,7 +55,7 @@ static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) |
- ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
return 0;
}
@@ -78,7 +78,7 @@ static int apply_r_mips_higher_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
- ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
+ ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
return 0;
}
@@ -87,7 +87,7 @@ static int apply_r_mips_highest_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
- ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
+ ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
return 0;
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 07ff5812ffaf..977a623d9253 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -79,7 +79,7 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
}
*location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ ((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
@@ -122,7 +122,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
struct mips_hi16 *l;
Elf_Addr val, vallo;
- /* Sign extend the addend we extract from the lo insn. */
+ /* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (me->arch.r_mips_hi16_list != NULL) {
@@ -165,7 +165,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
}
/*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -230,7 +230,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
}
/*
- * Normally the hi16 list should be deallocated at this point. A
+ * Normally the hi16 list should be deallocated at this point. A
* malformed binary however could contain a series of R_MIPS_HI16
* relocations not followed by a R_MIPS_LO16 relocation. In that
* case, free up the list and return an error.
@@ -261,7 +261,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
- we cannot unload the module, hence no refcnt needed. */
+ we cannot unload the module, hence no refcnt needed. */
return e;
}
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 207f1341578b..0e23343eb0a9 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -30,7 +30,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 7
LEAF(resume)
@@ -69,7 +69,7 @@
1:
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
/* Check if we need to store CVMSEG state */
- mfc0 t0, $11,7 /* CvmMemCtl */
+ mfc0 t0, $11,7 /* CvmMemCtl */
bbit0 t0, 6, 3f /* Is user access enabled? */
/* Store the CVMSEG state */
@@ -77,8 +77,8 @@
andi t0, 0x3f
/* Multiply * (cache line size/sizeof(long)/2) */
sll t0, 7-LONGLOG-1
- li t1, -32768 /* Base address of CVMSEG */
- LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+ li t1, -32768 /* Base address of CVMSEG */
+ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
synciobdma
2:
.set noreorder
@@ -89,13 +89,13 @@
LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
bnez t0, 2b /* Loop until we've copied it all */
- LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
.set reorder
/* Disable access to CVMSEG */
- mfc0 t0, $11,7 /* CvmMemCtl */
+ mfc0 t0, $11,7 /* CvmMemCtl */
xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
- mtc0 t0, $11,7 /* CvmMemCtl */
+ mtc0 t0, $11,7 /* CvmMemCtl */
#endif
3:
/*
@@ -133,7 +133,7 @@
dmfc0 t9, $9,7 /* CvmCtl register. */
- /* Save the COP2 CRC state */
+ /* Save the COP2 CRC state */
dmfc2 t0, 0x0201
dmfc2 t1, 0x0202
dmfc2 t2, 0x0200
@@ -149,30 +149,30 @@
sd t0, OCTEON_CP2_LLM_DAT(a0)
sd t1, OCTEON_CP2_LLM_DAT+8(a0)
-1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
+1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
/* Save the COP2 crypto state */
- /* this part is mostly common to both pass 1 and later revisions */
- dmfc2 t0, 0x0084
- dmfc2 t1, 0x0080
- dmfc2 t2, 0x0081
- dmfc2 t3, 0x0082
+ /* this part is mostly common to both pass 1 and later revisions */
+ dmfc2 t0, 0x0084
+ dmfc2 t1, 0x0080
+ dmfc2 t2, 0x0081
+ dmfc2 t3, 0x0082
sd t0, OCTEON_CP2_3DES_IV(a0)
- dmfc2 t0, 0x0088
+ dmfc2 t0, 0x0088
sd t1, OCTEON_CP2_3DES_KEY(a0)
- dmfc2 t1, 0x0111 /* only necessary for pass 1 */
+ dmfc2 t1, 0x0111 /* only necessary for pass 1 */
sd t2, OCTEON_CP2_3DES_KEY+8(a0)
- dmfc2 t2, 0x0102
+ dmfc2 t2, 0x0102
sd t3, OCTEON_CP2_3DES_KEY+16(a0)
- dmfc2 t3, 0x0103
+ dmfc2 t3, 0x0103
sd t0, OCTEON_CP2_3DES_RESULT(a0)
- dmfc2 t0, 0x0104
- sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
- dmfc2 t1, 0x0105
+ dmfc2 t0, 0x0104
+ sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
+ dmfc2 t1, 0x0105
sd t2, OCTEON_CP2_AES_IV(a0)
dmfc2 t2, 0x0106
sd t3, OCTEON_CP2_AES_IV+8(a0)
- dmfc2 t3, 0x0107
+ dmfc2 t3, 0x0107
sd t0, OCTEON_CP2_AES_KEY(a0)
dmfc2 t0, 0x0110
sd t1, OCTEON_CP2_AES_KEY+8(a0)
@@ -180,7 +180,7 @@
sd t2, OCTEON_CP2_AES_KEY+16(a0)
dmfc2 t2, 0x0101
sd t3, OCTEON_CP2_AES_KEY+24(a0)
- mfc0 t3, $15,0 /* Get the processor ID register */
+ mfc0 t3, $15,0 /* Get the processor ID register */
sd t0, OCTEON_CP2_AES_KEYLEN(a0)
li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
sd t1, OCTEON_CP2_AES_RESULT(a0)
@@ -188,7 +188,7 @@
/* Skip to the Pass1 version of the remainder of the COP2 state */
beq t3, t0, 2f
- /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+ /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
dmfc2 t1, 0x0240
dmfc2 t2, 0x0241
dmfc2 t3, 0x0242
@@ -214,7 +214,7 @@
sd t2, OCTEON_CP2_HSH_DATW+72(a0)
dmfc2 t2, 0x024D
sd t3, OCTEON_CP2_HSH_DATW+80(a0)
- dmfc2 t3, 0x024E
+ dmfc2 t3, 0x024E
sd t0, OCTEON_CP2_HSH_DATW+88(a0)
dmfc2 t0, 0x0250
sd t1, OCTEON_CP2_HSH_DATW+96(a0)
@@ -232,9 +232,9 @@
sd t3, OCTEON_CP2_HSH_IVW+24(a0)
dmfc2 t3, 0x0257
sd t0, OCTEON_CP2_HSH_IVW+32(a0)
- dmfc2 t0, 0x0258
+ dmfc2 t0, 0x0258
sd t1, OCTEON_CP2_HSH_IVW+40(a0)
- dmfc2 t1, 0x0259
+ dmfc2 t1, 0x0259
sd t2, OCTEON_CP2_HSH_IVW+48(a0)
dmfc2 t2, 0x025E
sd t3, OCTEON_CP2_HSH_IVW+56(a0)
@@ -247,7 +247,7 @@
sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
jr ra
-2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
dmfc2 t3, 0x0040
dmfc2 t0, 0x0041
dmfc2 t1, 0x0042
@@ -269,7 +269,7 @@
sd t3, OCTEON_CP2_HSH_IVW+8(a0)
sd t0, OCTEON_CP2_HSH_IVW+16(a0)
-3: /* pass 1 or CvmCtl[NOCRYPTO] set */
+3: /* pass 1 or CvmCtl[NOCRYPTO] set */
jr ra
END(octeon_cop2_save)
@@ -280,19 +280,19 @@
.set push
.set noreorder
LEAF(octeon_cop2_restore)
- /* First cache line was prefetched before the call */
- pref 4, 128(a0)
+ /* First cache line was prefetched before the call */
+ pref 4, 128(a0)
dmfc0 t9, $9,7 /* CvmCtl register. */
- pref 4, 256(a0)
+ pref 4, 256(a0)
ld t0, OCTEON_CP2_CRC_IV(a0)
- pref 4, 384(a0)
+ pref 4, 384(a0)
ld t1, OCTEON_CP2_CRC_LENGTH(a0)
ld t2, OCTEON_CP2_CRC_POLY(a0)
/* Restore the COP2 CRC state */
dmtc2 t0, 0x0201
- dmtc2 t1, 0x1202
+ dmtc2 t1, 0x1202
bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
dmtc2 t2, 0x4200
@@ -310,19 +310,19 @@
ld t0, OCTEON_CP2_3DES_IV(a0)
ld t1, OCTEON_CP2_3DES_KEY(a0)
ld t2, OCTEON_CP2_3DES_KEY+8(a0)
- dmtc2 t0, 0x0084
+ dmtc2 t0, 0x0084
ld t0, OCTEON_CP2_3DES_KEY+16(a0)
- dmtc2 t1, 0x0080
+ dmtc2 t1, 0x0080
ld t1, OCTEON_CP2_3DES_RESULT(a0)
- dmtc2 t2, 0x0081
+ dmtc2 t2, 0x0081
ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
dmtc2 t0, 0x0082
ld t0, OCTEON_CP2_AES_IV(a0)
- dmtc2 t1, 0x0098
+ dmtc2 t1, 0x0098
ld t1, OCTEON_CP2_AES_IV+8(a0)
- dmtc2 t2, 0x010A /* only really needed for pass 1 */
+ dmtc2 t2, 0x010A /* only really needed for pass 1 */
ld t2, OCTEON_CP2_AES_KEY(a0)
- dmtc2 t0, 0x0102
+ dmtc2 t0, 0x0102
ld t0, OCTEON_CP2_AES_KEY+8(a0)
dmtc2 t1, 0x0103
ld t1, OCTEON_CP2_AES_KEY+16(a0)
@@ -334,14 +334,14 @@
ld t1, OCTEON_CP2_AES_RESULT(a0)
dmtc2 t2, 0x0107
ld t2, OCTEON_CP2_AES_RESULT+8(a0)
- mfc0 t3, $15,0 /* Get the processor ID register */
+ mfc0 t3, $15,0 /* Get the processor ID register */
dmtc2 t0, 0x0110
li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
dmtc2 t1, 0x0100
bne t0, t3, 3f /* Skip the next stuff for non-pass1 */
dmtc2 t2, 0x0101
- /* this code is specific for pass 1 */
+ /* this code is specific for pass 1 */
ld t0, OCTEON_CP2_HSH_DATW(a0)
ld t1, OCTEON_CP2_HSH_DATW+8(a0)
ld t2, OCTEON_CP2_HSH_DATW+16(a0)
@@ -361,10 +361,10 @@
ld t0, OCTEON_CP2_HSH_IVW+16(a0)
dmtc2 t1, 0x0048
dmtc2 t2, 0x0049
- b done_restore /* unconditional branch */
+ b done_restore /* unconditional branch */
dmtc2 t0, 0x004A
-3: /* this is post-pass1 code */
+3: /* this is post-pass1 code */
ld t2, OCTEON_CP2_HSH_DATW(a0)
ld t0, OCTEON_CP2_HSH_DATW+8(a0)
ld t1, OCTEON_CP2_HSH_DATW+16(a0)
@@ -433,7 +433,7 @@ done_restore:
* sp is assumed to point to a struct pt_regs
*
* NOTE: This is called in SAVE_SOME in stackframe.h. It can only
- * safely modify k0 and k1.
+ * safely modify k0 and k1.
*/
.align 7
.set push
@@ -446,14 +446,14 @@ done_restore:
/* Save the multiplier state */
v3mulu k0, $0, $0
v3mulu k1, $0, $0
- sd k0, PT_MTP(sp) /* PT_MTP has P0 */
+ sd k0, PT_MTP(sp) /* PT_MTP has P0 */
v3mulu k0, $0, $0
sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
ori k1, $0, 1
v3mulu k1, k1, $0
sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
v3mulu k0, $0, $0
- sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
+ sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
v3mulu k1, $0, $0
sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
jr ra
@@ -475,19 +475,19 @@ done_restore:
.set noreorder
LEAF(octeon_mult_restore)
dmfc0 k1, $9,7 /* CvmCtl register. */
- ld v0, PT_MPL(sp) /* MPL0 */
- ld v1, PT_MPL+8(sp) /* MPL1 */
- ld k0, PT_MPL+16(sp) /* MPL2 */
+ ld v0, PT_MPL(sp) /* MPL0 */
+ ld v1, PT_MPL+8(sp) /* MPL1 */
+ ld k0, PT_MPL+16(sp) /* MPL2 */
bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */
/* Normally falls through, so no time wasted here */
nop
/* Restore the multiplier state */
- ld k1, PT_MTP+16(sp) /* P2 */
+ ld k1, PT_MTP+16(sp) /* P2 */
MTM0 v0 /* MPL0 */
ld v0, PT_MTP+8(sp) /* P1 */
MTM1 v1 /* MPL1 */
- ld v1, PT_MTP(sp) /* P0 */
+ ld v1, PT_MTP(sp) /* P0 */
MTM2 k0 /* MPL2 */
MTP2 k1 /* P2 */
MTP1 v0 /* P1 */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d9c81c5a6c90..45f1ffcf1a4b 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -103,13 +103,13 @@ static struct mips_pmu mipspmu;
#define M_CONFIG1_PC (1 << 4)
-#define M_PERFCTL_EXL (1 << 0)
-#define M_PERFCTL_KERNEL (1 << 1)
-#define M_PERFCTL_SUPERVISOR (1 << 2)
-#define M_PERFCTL_USER (1 << 3)
-#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
+#define M_PERFCTL_EXL (1 << 0)
+#define M_PERFCTL_KERNEL (1 << 1)
+#define M_PERFCTL_SUPERVISOR (1 << 2)
+#define M_PERFCTL_USER (1 << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
-#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
#ifdef CONFIG_CPU_BMIPS5000
#define M_PERFCTL_MT_EN(filter) 0
@@ -117,13 +117,13 @@ static struct mips_pmu mipspmu;
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
#endif /* CONFIG_CPU_BMIPS5000 */
-#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
-#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
-#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
-#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
-#define M_PERFCTL_WIDE (1 << 30)
-#define M_PERFCTL_MORE (1 << 31)
-#define M_PERFCTL_TC (1 << 30)
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1 << 30)
+#define M_PERFCTL_MORE (1 << 31)
+#define M_PERFCTL_TC (1 << 30)
#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
M_PERFCTL_KERNEL | \
@@ -827,7 +827,7 @@ static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
- [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
@@ -1371,7 +1371,7 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
(b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
(r) == 176 || ((b) >= 50 && (b) <= 55) || \
((b) >= 64 && (b) <= 67))
-#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
#endif
/* 74K */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 07dff54f2ce8..135c4aadccbe 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1995, 1996, 2001 Ralf Baechle
* Copyright (C) 2001, 2004 MIPS Technologies, Inc.
- * Copyright (C) 2004 Maciej W. Rozycki
+ * Copyright (C) 2004 Maciej W. Rozycki
*/
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -64,6 +64,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_data[n].watch_reg_masks[i]);
seq_printf(m, "]\n");
}
+ if (cpu_has_mips_r) {
+ seq_printf(m, "isa\t\t\t:");
+ if (cpu_has_mips_1)
+ seq_printf(m, "%s", "mips1");
+ if (cpu_has_mips_2)
+ seq_printf(m, "%s", " mips2");
+ if (cpu_has_mips_3)
+ seq_printf(m, "%s", " mips3");
+ if (cpu_has_mips_4)
+ seq_printf(m, "%s", " mips4");
+ if (cpu_has_mips_5)
+ seq_printf(m, "%s", " mips5");
+ if (cpu_has_mips32r1)
+ seq_printf(m, "%s", " mips32r1");
+ if (cpu_has_mips32r2)
+ seq_printf(m, "%s", " mips32r2");
+ if (cpu_has_mips64r1)
+ seq_printf(m, "%s", " mips64r1");
+ if (cpu_has_mips64r2)
+ seq_printf(m, "%s", " mips64r2");
+ seq_printf(m, "\n");
+ }
seq_printf(m, "ASEs implemented\t:");
if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
@@ -73,6 +95,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_dsp) seq_printf(m, "%s", " dsp");
if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2");
if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
+ if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
+ if (cpu_has_vz) seq_printf(m, "%s", " vz");
seq_printf(m, "\n");
seq_printf(m, "shadow register sets\t: %d\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index a33d2ef8f273..3be4405c2d14 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -154,8 +154,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
*childregs = *regs;
- childregs->regs[7] = 0; /* Clear error flag */
- childregs->regs[2] = 0; /* Child gets zero as return value */
+ childregs->regs[7] = 0; /* Clear error flag */
+ childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
ti->addr_limit = USER_DS;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 4812c6d916e4..9c6299c733a3 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -50,7 +50,7 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Read a general register set. We always use the 64-bit format, even
+ * Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
@@ -326,7 +326,7 @@ long arch_ptrace(struct task_struct *child, long request,
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
+ case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long irqflags;
@@ -520,10 +520,10 @@ static inline int audit_arch(void)
{
int arch = EM_MIPS;
#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
+ arch |= __AUDIT_ARCH_64BIT;
#endif
#if defined(__LITTLE_ENDIAN)
- arch |= __AUDIT_ARCH_LE;
+ arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
@@ -546,7 +546,7 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
/* The 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
+ 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
@@ -581,7 +581,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
/* The 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
+ 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index a3b017815eff..9486055ba660 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -124,7 +124,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
+ case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned int irqflags;
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 61c8a0f2a60c..f31063dbdaeb 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -30,38 +30,38 @@
LEAF(_save_fp_context)
li v0, 0 # assume success
cfc1 t1,fcr31
- EX(swc1 $f0,(SC_FPREGS+0)(a0))
- EX(swc1 $f1,(SC_FPREGS+8)(a0))
- EX(swc1 $f2,(SC_FPREGS+16)(a0))
- EX(swc1 $f3,(SC_FPREGS+24)(a0))
- EX(swc1 $f4,(SC_FPREGS+32)(a0))
- EX(swc1 $f5,(SC_FPREGS+40)(a0))
- EX(swc1 $f6,(SC_FPREGS+48)(a0))
- EX(swc1 $f7,(SC_FPREGS+56)(a0))
- EX(swc1 $f8,(SC_FPREGS+64)(a0))
- EX(swc1 $f9,(SC_FPREGS+72)(a0))
- EX(swc1 $f10,(SC_FPREGS+80)(a0))
- EX(swc1 $f11,(SC_FPREGS+88)(a0))
- EX(swc1 $f12,(SC_FPREGS+96)(a0))
- EX(swc1 $f13,(SC_FPREGS+104)(a0))
- EX(swc1 $f14,(SC_FPREGS+112)(a0))
- EX(swc1 $f15,(SC_FPREGS+120)(a0))
- EX(swc1 $f16,(SC_FPREGS+128)(a0))
- EX(swc1 $f17,(SC_FPREGS+136)(a0))
- EX(swc1 $f18,(SC_FPREGS+144)(a0))
- EX(swc1 $f19,(SC_FPREGS+152)(a0))
- EX(swc1 $f20,(SC_FPREGS+160)(a0))
- EX(swc1 $f21,(SC_FPREGS+168)(a0))
- EX(swc1 $f22,(SC_FPREGS+176)(a0))
- EX(swc1 $f23,(SC_FPREGS+184)(a0))
- EX(swc1 $f24,(SC_FPREGS+192)(a0))
- EX(swc1 $f25,(SC_FPREGS+200)(a0))
- EX(swc1 $f26,(SC_FPREGS+208)(a0))
- EX(swc1 $f27,(SC_FPREGS+216)(a0))
- EX(swc1 $f28,(SC_FPREGS+224)(a0))
- EX(swc1 $f29,(SC_FPREGS+232)(a0))
- EX(swc1 $f30,(SC_FPREGS+240)(a0))
- EX(swc1 $f31,(SC_FPREGS+248)(a0))
+ EX(swc1 $f0,(SC_FPREGS+0)(a0))
+ EX(swc1 $f1,(SC_FPREGS+8)(a0))
+ EX(swc1 $f2,(SC_FPREGS+16)(a0))
+ EX(swc1 $f3,(SC_FPREGS+24)(a0))
+ EX(swc1 $f4,(SC_FPREGS+32)(a0))
+ EX(swc1 $f5,(SC_FPREGS+40)(a0))
+ EX(swc1 $f6,(SC_FPREGS+48)(a0))
+ EX(swc1 $f7,(SC_FPREGS+56)(a0))
+ EX(swc1 $f8,(SC_FPREGS+64)(a0))
+ EX(swc1 $f9,(SC_FPREGS+72)(a0))
+ EX(swc1 $f10,(SC_FPREGS+80)(a0))
+ EX(swc1 $f11,(SC_FPREGS+88)(a0))
+ EX(swc1 $f12,(SC_FPREGS+96)(a0))
+ EX(swc1 $f13,(SC_FPREGS+104)(a0))
+ EX(swc1 $f14,(SC_FPREGS+112)(a0))
+ EX(swc1 $f15,(SC_FPREGS+120)(a0))
+ EX(swc1 $f16,(SC_FPREGS+128)(a0))
+ EX(swc1 $f17,(SC_FPREGS+136)(a0))
+ EX(swc1 $f18,(SC_FPREGS+144)(a0))
+ EX(swc1 $f19,(SC_FPREGS+152)(a0))
+ EX(swc1 $f20,(SC_FPREGS+160)(a0))
+ EX(swc1 $f21,(SC_FPREGS+168)(a0))
+ EX(swc1 $f22,(SC_FPREGS+176)(a0))
+ EX(swc1 $f23,(SC_FPREGS+184)(a0))
+ EX(swc1 $f24,(SC_FPREGS+192)(a0))
+ EX(swc1 $f25,(SC_FPREGS+200)(a0))
+ EX(swc1 $f26,(SC_FPREGS+208)(a0))
+ EX(swc1 $f27,(SC_FPREGS+216)(a0))
+ EX(swc1 $f28,(SC_FPREGS+224)(a0))
+ EX(swc1 $f29,(SC_FPREGS+232)(a0))
+ EX(swc1 $f30,(SC_FPREGS+240)(a0))
+ EX(swc1 $f31,(SC_FPREGS+248)(a0))
EX(sw t1,(SC_FPC_CSR)(a0))
cfc1 t0,$0 # implementation/version
jr ra
@@ -82,38 +82,38 @@ LEAF(_save_fp_context)
LEAF(_restore_fp_context)
li v0, 0 # assume success
EX(lw t0,(SC_FPC_CSR)(a0))
- EX(lwc1 $f0,(SC_FPREGS+0)(a0))
- EX(lwc1 $f1,(SC_FPREGS+8)(a0))
- EX(lwc1 $f2,(SC_FPREGS+16)(a0))
- EX(lwc1 $f3,(SC_FPREGS+24)(a0))
- EX(lwc1 $f4,(SC_FPREGS+32)(a0))
- EX(lwc1 $f5,(SC_FPREGS+40)(a0))
- EX(lwc1 $f6,(SC_FPREGS+48)(a0))
- EX(lwc1 $f7,(SC_FPREGS+56)(a0))
- EX(lwc1 $f8,(SC_FPREGS+64)(a0))
- EX(lwc1 $f9,(SC_FPREGS+72)(a0))
- EX(lwc1 $f10,(SC_FPREGS+80)(a0))
- EX(lwc1 $f11,(SC_FPREGS+88)(a0))
- EX(lwc1 $f12,(SC_FPREGS+96)(a0))
- EX(lwc1 $f13,(SC_FPREGS+104)(a0))
- EX(lwc1 $f14,(SC_FPREGS+112)(a0))
- EX(lwc1 $f15,(SC_FPREGS+120)(a0))
- EX(lwc1 $f16,(SC_FPREGS+128)(a0))
- EX(lwc1 $f17,(SC_FPREGS+136)(a0))
- EX(lwc1 $f18,(SC_FPREGS+144)(a0))
- EX(lwc1 $f19,(SC_FPREGS+152)(a0))
- EX(lwc1 $f20,(SC_FPREGS+160)(a0))
- EX(lwc1 $f21,(SC_FPREGS+168)(a0))
- EX(lwc1 $f22,(SC_FPREGS+176)(a0))
- EX(lwc1 $f23,(SC_FPREGS+184)(a0))
- EX(lwc1 $f24,(SC_FPREGS+192)(a0))
- EX(lwc1 $f25,(SC_FPREGS+200)(a0))
- EX(lwc1 $f26,(SC_FPREGS+208)(a0))
- EX(lwc1 $f27,(SC_FPREGS+216)(a0))
- EX(lwc1 $f28,(SC_FPREGS+224)(a0))
- EX(lwc1 $f29,(SC_FPREGS+232)(a0))
- EX(lwc1 $f30,(SC_FPREGS+240)(a0))
- EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+ EX(lwc1 $f0,(SC_FPREGS+0)(a0))
+ EX(lwc1 $f1,(SC_FPREGS+8)(a0))
+ EX(lwc1 $f2,(SC_FPREGS+16)(a0))
+ EX(lwc1 $f3,(SC_FPREGS+24)(a0))
+ EX(lwc1 $f4,(SC_FPREGS+32)(a0))
+ EX(lwc1 $f5,(SC_FPREGS+40)(a0))
+ EX(lwc1 $f6,(SC_FPREGS+48)(a0))
+ EX(lwc1 $f7,(SC_FPREGS+56)(a0))
+ EX(lwc1 $f8,(SC_FPREGS+64)(a0))
+ EX(lwc1 $f9,(SC_FPREGS+72)(a0))
+ EX(lwc1 $f10,(SC_FPREGS+80)(a0))
+ EX(lwc1 $f11,(SC_FPREGS+88)(a0))
+ EX(lwc1 $f12,(SC_FPREGS+96)(a0))
+ EX(lwc1 $f13,(SC_FPREGS+104)(a0))
+ EX(lwc1 $f14,(SC_FPREGS+112)(a0))
+ EX(lwc1 $f15,(SC_FPREGS+120)(a0))
+ EX(lwc1 $f16,(SC_FPREGS+128)(a0))
+ EX(lwc1 $f17,(SC_FPREGS+136)(a0))
+ EX(lwc1 $f18,(SC_FPREGS+144)(a0))
+ EX(lwc1 $f19,(SC_FPREGS+152)(a0))
+ EX(lwc1 $f20,(SC_FPREGS+160)(a0))
+ EX(lwc1 $f21,(SC_FPREGS+168)(a0))
+ EX(lwc1 $f22,(SC_FPREGS+176)(a0))
+ EX(lwc1 $f23,(SC_FPREGS+184)(a0))
+ EX(lwc1 $f24,(SC_FPREGS+192)(a0))
+ EX(lwc1 $f25,(SC_FPREGS+200)(a0))
+ EX(lwc1 $f26,(SC_FPREGS+208)(a0))
+ EX(lwc1 $f27,(SC_FPREGS+216)(a0))
+ EX(lwc1 $f28,(SC_FPREGS+224)(a0))
+ EX(lwc1 $f29,(SC_FPREGS+232)(a0))
+ EX(lwc1 $f30,(SC_FPREGS+240)(a0))
+ EX(lwc1 $f31,(SC_FPREGS+248)(a0))
jr ra
ctc1 t0,fcr31
END(_restore_fp_context)
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 8d32d5a6b460..5266c6ee2b35 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -42,7 +42,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, int usedfpu)
*/
LEAF(resume)
mfc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 8decdfacb448..5e51219990aa 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -40,7 +40,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 5
LEAF(resume)
@@ -53,7 +53,7 @@
* check if we need to save FPU registers
*/
- beqz a3, 1f
+ beqz a3, 1f
PTR_L t3, TASK_THREAD_INFO(a0)
/*
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 804ebb2c34a6..43d2d78d3287 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -33,7 +33,7 @@ process_entry:
b process_entry
1:
- /* indirection page, update s0 */
+ /* indirection page, update s0 */
and s3, s2, 0x2
beq s3, zero, 1f
and s0, s2, ~0x2
@@ -69,7 +69,7 @@ done:
of kexec_flag. */
bal 1f
- 1: move t1,ra;
+ 1: move t1,ra;
PTR_LA t2,1b
PTR_LA t0,kexec_flag
PTR_SUB t0,t0,t2;
@@ -158,10 +158,10 @@ arg3: PTR 0x0
*/
secondary_kexec_args:
EXPORT(secondary_kexec_args)
-s_arg0: PTR 0x0
-s_arg1: PTR 0x0
-s_arg2: PTR 0x0
-s_arg3: PTR 0x0
+s_arg0: PTR 0x0
+s_arg1: PTR 0x0
+s_arg2: PTR 0x0
+s_arg3: PTR 0x0
.size secondary_kexec_args,PTRSIZE*4
kexec_flag:
LONG 0x1
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index b8c18dcdd2c4..93c070b41b0d 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -252,12 +252,12 @@ int rtlx_release(int index)
unsigned int rtlx_read_poll(int index, int can_sleep)
{
- struct rtlx_channel *chan;
+ struct rtlx_channel *chan;
- if (rtlx == NULL)
- return 0;
+ if (rtlx == NULL)
+ return 0;
- chan = &rtlx->channel[index];
+ chan = &rtlx->channel[index];
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
@@ -399,11 +399,9 @@ static int file_release(struct inode *inode, struct file *filp)
static unsigned int file_poll(struct file *file, poll_table * wait)
{
- int minor;
+ int minor = iminor(file_inode(file));
unsigned int mask = 0;
- minor = iminor(file->f_path.dentry->d_inode);
-
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
@@ -424,7 +422,7 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
loff_t * ppos)
{
- int minor = iminor(file->f_path.dentry->d_inode);
+ int minor = iminor(file_inode(file));
/* data available? */
if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
@@ -437,11 +435,8 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
static ssize_t file_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
- int minor;
- struct rtlx_channel *rt;
-
- minor = iminor(file->f_path.dentry->d_inode);
- rt = &rtlx->channel[minor];
+ int minor = iminor(file_inode(file));
+ struct rtlx_channel *rt = &rtlx->channel[minor];
/* any space left... */
if (!rtlx_write_poll(minor)) {
@@ -451,8 +446,8 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
return -EAGAIN;
__wait_event_interruptible(channel_wqs[minor].rt_queue,
- rtlx_write_poll(minor),
- ret);
+ rtlx_write_poll(minor),
+ ret);
if (ret)
return ret;
}
@@ -462,11 +457,11 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
static const struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
- .open = file_open,
+ .open = file_open,
.release = file_release,
.write = file_write,
- .read = file_read,
- .poll = file_poll,
+ .read = file_read,
+ .poll = file_poll,
.llseek = noop_llseek,
};
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 80ff9422215f..9ea29649fc28 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -24,7 +24,7 @@
/* Highest syscall used of any syscall flavour */
#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls
- .align 5
+ .align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
@@ -54,7 +54,7 @@ stack_done:
lw t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
and t0, t1
- bnez t0, syscall_trace_entry # -> yes
+ bnez t0, syscall_trace_entry # -> yes
jalr t2 # Do The Real Thing (TM)
@@ -126,8 +126,8 @@ stackargs:
la t1, 5f # load up to 3 arguments
subu t1, t3
1: lw t5, 16(t0) # argument #5 from usp
- .set push
- .set noreorder
+ .set push
+ .set noreorder
.set nomacro
jr t1
addiu t1, 6f - 5f
@@ -205,7 +205,7 @@ illegal_syscall:
jr t2
/* Unreached */
-einval: li v0, -ENOSYS
+einval: li v0, -ENOSYS
jr ra
END(sys_syscall)
@@ -354,7 +354,7 @@ einval: li v0, -ENOSYS
sys sys_ni_syscall 0 /* was create_module */
sys sys_init_module 5
sys sys_delete_module 1
- sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
+ sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
sys sys_quotactl 4
sys sys_getpgid 1
sys sys_fchdir 1
@@ -589,7 +589,7 @@ einval: li v0, -ENOSYS
/* We pre-compute the number of _instruction_ bytes needed to
load or store the arguments 6-8. Negative values are ignored. */
- .macro sys function, nargs
+ .macro sys function, nargs
PTR \function
LONG (\nargs << 2) - (5 << 2)
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 9444ad9ea575..36cfd4060e1f 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -25,7 +25,7 @@
#define handle_sys64 handle_sys
#endif
- .align 5
+ .align 5
NESTED(handle_sys64, PT_SIZE, sp)
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
/*
@@ -40,7 +40,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
#endif
dsubu t0, v0, __NR_64_Linux # check syscall number
- sltiu t0, t0, __NR_64_Linux_syscalls + 1
+ sltiu t0, t0, __NR_64_Linux_syscalls + 1
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
ld t1, PT_EPC(sp) # skip syscall on return
daddiu t1, 4 # skip to next instruction
@@ -290,7 +290,7 @@ sys_call_table:
PTR sys_quotactl
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 5175 for putpmsg */
+ PTR sys_ni_syscall /* 5175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 3b18a8e29215..693d60b0855f 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -22,7 +22,7 @@
#define handle_sysn32 handle_sys
#endif
- .align 5
+ .align 5
NESTED(handle_sysn32, PT_SIZE, sp)
#ifndef CONFIG_MIPS32_O32
.set noat
@@ -33,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
#endif
dsubu t0, v0, __NR_N32_Linux # check syscall number
- sltiu t0, t0, __NR_N32_Linux_syscalls + 1
+ sltiu t0, t0, __NR_N32_Linux_syscalls + 1
#ifndef CONFIG_MIPS32_O32
ld t1, PT_EPC(sp) # skip syscall on return
@@ -279,7 +279,7 @@ EXPORT(sysn32_call_table)
PTR sys_quotactl
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 6175 for putpmsg */
+ PTR sys_ni_syscall /* 6175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
@@ -402,8 +402,8 @@ EXPORT(sysn32_call_table)
PTR compat_sys_rt_tgsigqueueinfo /* 6295 */
PTR sys_perf_event_open
PTR sys_accept4
- PTR compat_sys_recvmmsg
- PTR sys_getdents64
+ PTR compat_sys_recvmmsg
+ PTR sys_getdents64
PTR sys_fanotify_init /* 6300 */
PTR sys_fanotify_mark
PTR sys_prlimit64
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 063cd0d6ddd2..af8887f779f1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -10,7 +10,7 @@
*
* Hairy, the userspace application uses a different argument passing
* convention than the kernel, so we have to translate things from o32
- * to ABI64 calling convention. 64-bit syscalls are also processed
+ * to ABI64 calling convention. 64-bit syscalls are also processed
* here for now.
*/
#include <linux/errno.h>
@@ -24,7 +24,7 @@
#include <asm/unistd.h>
#include <asm/sysmips.h>
- .align 5
+ .align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
@@ -185,7 +185,7 @@ LEAF(sys32_syscall)
jr t2
/* Unreached */
-einval: li v0, -ENOSYS
+einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
@@ -284,8 +284,8 @@ sys_call_table:
PTR compat_sys_old_readdir
PTR sys_mips_mmap /* 4090 */
PTR sys_munmap
- PTR sys_truncate
- PTR sys_ftruncate
+ PTR compat_sys_truncate
+ PTR compat_sys_ftruncate
PTR sys_fchmod
PTR sys_fchown /* 4095 */
PTR sys_getpriority
@@ -329,7 +329,7 @@ sys_call_table:
PTR sys_bdflush
PTR sys_sysfs /* 4135 */
PTR sys_32_personality
- PTR sys_ni_syscall /* for afs_syscall */
+ PTR sys_ni_syscall /* for afs_syscall */
PTR sys_setfsuid
PTR sys_setfsgid
PTR sys_32_llseek /* 4140 */
@@ -352,12 +352,12 @@ sys_call_table:
PTR sys_munlockall
PTR sys_sched_setparam
PTR sys_sched_getparam
- PTR sys_sched_setscheduler /* 4160 */
+ PTR sys_sched_setscheduler /* 4160 */
PTR sys_sched_getscheduler
PTR sys_sched_yield
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
- PTR compat_sys_sched_rr_get_interval /* 4165 */
+ PTR compat_sys_sched_rr_get_interval /* 4165 */
PTR compat_sys_nanosleep
PTR sys_mremap
PTR sys_accept
@@ -387,7 +387,7 @@ sys_call_table:
PTR sys_prctl
PTR sys32_rt_sigreturn
PTR compat_sys_rt_sigaction
- PTR compat_sys_rt_sigprocmask /* 4195 */
+ PTR compat_sys_rt_sigprocmask /* 4195 */
PTR compat_sys_rt_sigpending
PTR compat_sys_rt_sigtimedwait
PTR compat_sys_rt_sigqueueinfo
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8c41187801ce..4c774d5d5087 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -8,7 +8,7 @@
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
* Copyright (C) 1996 Stoned Elipot
* Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/ioport.h>
@@ -449,7 +449,7 @@ static void __init bootmem_init(void)
* At this stage the bootmem allocator is ready to use.
*
* NOTE: historically plat_mem_setup did the entire platform initialization.
- * This was rather impractical because it meant plat_mem_setup had to
+ * This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
* breaking plat_setup was just renamed to plat_setup and a second platform
* initialization hook for anything else was introduced.
@@ -469,7 +469,7 @@ static int __init early_parse_mem(char *p)
if (usermem == 0) {
boot_mem_map.nr_map = 0;
usermem = 1;
- }
+ }
start = 0;
size = memparse(p, &p);
if (*p == '@')
@@ -480,34 +480,75 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
-static void __init arch_mem_init(char **cmdline_p)
+#ifdef CONFIG_PROC_VMCORE
+unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
+static int __init early_parse_elfcorehdr(char *p)
+{
+ int i;
+
+ setup_elfcorehdr = memparse(p, &p);
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ unsigned long start = boot_mem_map.map[i].addr;
+ unsigned long end = (boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size);
+ if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ setup_elfcorehdr_size = end - setup_elfcorehdr;
+ break;
+ }
+ }
+ /*
+ * If we don't find it in the memory map, then we shouldn't
+ * have to worry about it, as the new kernel won't use it.
+ */
+ return 0;
+}
+early_param("elfcorehdr", early_parse_elfcorehdr);
+#endif
+
+static void __init arch_mem_addpart(phys_t mem, phys_t end, int type)
{
- phys_t init_mem, init_end, init_size;
+ phys_t size;
+ int i;
+
+ size = end - mem;
+ if (!size)
+ return;
+
+ /* Make sure it is in the boot_mem_map */
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ if (mem >= boot_mem_map.map[i].addr &&
+ mem < (boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size))
+ return;
+ }
+ add_memory_region(mem, size, type);
+}
+static void __init arch_mem_init(char **cmdline_p)
+{
extern void plat_mem_setup(void);
/* call board setup routine */
plat_mem_setup();
- init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT;
- init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT;
- init_size = init_end - init_mem;
- if (init_size) {
- /* Make sure it is in the boot_mem_map */
- int i, found;
- found = 0;
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- if (init_mem >= boot_mem_map.map[i].addr &&
- init_mem < (boot_mem_map.map[i].addr +
- boot_mem_map.map[i].size)) {
- found = 1;
- break;
- }
- }
- if (!found)
- add_memory_region(init_mem, init_size,
- BOOT_MEM_INIT_RAM);
- }
+ /*
+ * Make sure all kernel memory is in the maps. The "UP" and
+ * "DOWN" are opposite for initdata since if it crosses over
+ * into another memory section you don't want that to be
+ * freed when the initdata is freed.
+ */
+ arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+ PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+ BOOT_MEM_RAM);
+ arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+ PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+ BOOT_MEM_INIT_RAM);
pr_info("Determined physical RAM map:\n");
print_memory_map();
@@ -537,6 +578,14 @@ static void __init arch_mem_init(char **cmdline_p)
}
bootmem_init();
+#ifdef CONFIG_PROC_VMCORE
+ if (setup_elfcorehdr && setup_elfcorehdr_size) {
+ printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
+ setup_elfcorehdr, setup_elfcorehdr_size);
+ reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
+ BOOTMEM_DEFAULT);
+ }
+#endif
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
reserve_bootmem(crashk_res.start,
@@ -571,7 +620,7 @@ static void __init mips_parse_crashkernel(void)
return;
crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
+ crashk_res.end = crash_base + crash_size - 1;
}
static void __init request_crashkernel(struct resource *res)
@@ -585,7 +634,7 @@ static void __init request_crashkernel(struct resource *res)
crashk_res.start + 1) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
-#else /* !defined(CONFIG_KEXEC) */
+#else /* !defined(CONFIG_KEXEC) */
static void __init mips_parse_crashkernel(void)
{
}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 95b019d92f50..b5e88fd83277 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -412,7 +412,7 @@ give_sigsegv:
#endif
static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
- struct pt_regs *regs, int signr, sigset_t *set,
+ struct pt_regs *regs, int signr, sigset_t *set,
siginfo_t *info)
{
struct rt_sigframe __user *frame;
@@ -425,7 +425,7 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(NULL, &frame->rs_uc.uc_link);
err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -468,7 +468,7 @@ struct mips_abi mips_abi = {
.setup_frame = setup_frame,
.signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
#endif
- .setup_rt_frame = setup_rt_frame,
+ .setup_rt_frame = setup_rt_frame,
.rt_signal_return_offset =
offsetof(struct mips_vdso, rt_signal_trampoline),
.restart = __NR_restart_syscall
@@ -500,7 +500,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
regs->cp0_epc -= 4;
}
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
if (sig_uses_siginfo(ka))
@@ -524,7 +524,7 @@ static void do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Whee! Actually deliver the signal. */
+ /* Whee! Actually deliver the signal. */
handle_signal(signr, &info, &ka, regs);
return;
}
@@ -545,7 +545,7 @@ static void do_signal(struct pt_regs *regs)
regs->cp0_epc -= 4;
break;
}
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index ad7c2be0c33d..57de8b751627 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -48,7 +48,7 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
-#define __NR_O32_restart_syscall 4253
+#define __NR_O32_restart_syscall 4253
/* 32-bit compatibility types */
@@ -56,11 +56,11 @@ typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
struct ucontext32 {
- u32 uc_flags;
- s32 uc_link;
+ u32 uc_flags;
+ s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct sigframe32 {
@@ -302,7 +302,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *,
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
- &oact->sa_handler);
+ &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
@@ -507,7 +507,7 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka,
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -552,7 +552,7 @@ struct mips_abi mips_abi_32 = {
.setup_frame = setup_frame_32,
.signal_return_offset =
offsetof(struct mips_vdso, o32_signal_trampoline),
- .setup_rt_frame = setup_rt_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, o32_rt_signal_trampoline),
.restart = __NR_O32_restart_syscall
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 5f4ef2ae6199..b2241bb9cac1 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -51,11 +51,11 @@ extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
struct ucontextn32 {
- u32 uc_flags;
- s32 uc_link;
+ u32 uc_flags;
+ s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct rt_sigframe_n32 {
@@ -115,7 +115,7 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka,
/* Create siginfo. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -154,7 +154,7 @@ give_sigsegv:
}
struct mips_abi mips_abi_n32 = {
- .setup_rt_frame = setup_rt_frame_n32,
+ .setup_rt_frame = setup_rt_frame_n32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, n32_rt_signal_trampoline),
.restart = __NR_N32_restart_syscall
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 06cd0c610f44..c2e5d74739b4 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -172,7 +172,7 @@ void __init cmp_smp_setup(void)
if (amon_cpu_avail(i)) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
- __cpu_logical_map[ncpu] = i;
+ __cpu_logical_map[ncpu] = i;
}
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 2defa2bbdaa7..bfede063d96a 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -71,7 +71,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
/* Record this as available CPU */
set_cpu_possible(tc, true);
__cpu_number_map[tc] = ++ncpu;
- __cpu_logical_map[ncpu] = tc;
+ __cpu_logical_map[ncpu] = tc;
}
/* Disable multi-threading with TC's */
@@ -215,7 +215,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
- (unsigned long)(gp + sizeof(struct thread_info)));
+ (unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 20938a4cb52d..76016ac0a9c8 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -65,7 +65,7 @@ FEXPORT(__smtc_ipi_vector)
1:
/*
* The IPI sender has put some information on the anticipated
- * kernel stack frame. If we were in user mode, this will be
+ * kernel stack frame. If we were in user mode, this will be
* built above the saved kernel SP. If we were already in the
* kernel, it will be built above the current CPU SP.
*
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
index 145771c0ed7a..aee7c8177b5d 100644
--- a/arch/mips/kernel/smtc-proc.c
+++ b/arch/mips/kernel/smtc-proc.c
@@ -35,7 +35,7 @@ static struct proc_dir_entry *smtc_stats;
atomic_t smtc_fpu_recoveries;
static int proc_read_smtc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
int totalen = 0;
int len;
@@ -68,7 +68,7 @@ static int proc_read_smtc(char *page, char **start, off_t off,
page += len;
}
len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
- atomic_read(&smtc_fpu_recoveries));
+ atomic_read(&smtc_fpu_recoveries));
totalen += len;
page += len;
@@ -87,5 +87,5 @@ void init_smtc_stats(void)
atomic_set(&smtc_fpu_recoveries, 0);
smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
- proc_read_smtc, NULL);
+ proc_read_smtc, NULL);
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 1d47843d3cc0..7186222dc5bb 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -41,6 +41,7 @@
#include <asm/addrspace.h>
#include <asm/smtc.h>
#include <asm/smtc_proc.h>
+#include <asm/setup.h>
/*
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
@@ -235,7 +236,7 @@ static void smtc_configure_tlb(void)
mips_ihb();
/* No need to un-Halt - that happens later anyway */
for (i=0; i < vpes; i++) {
- write_tc_c0_tcbind(i);
+ write_tc_c0_tcbind(i);
/*
* To be 100% sure we're really getting the right
* information, we exit the configuration state
@@ -286,7 +287,7 @@ static void smtc_configure_tlb(void)
/*
* Incrementally build the CPU map out of constituent MIPS MT cores,
- * using the specified available VPEs and TCs. Plaform code needs
+ * using the specified available VPEs and TCs. Plaform code needs
* to ensure that each MIPS MT core invokes this routine on reset,
* one at a time(!).
*
@@ -348,7 +349,7 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
{
/*
* FIXME: Multi-core SMTC hasn't been tested and the
- * maximum number of VPEs may change.
+ * maximum number of VPEs may change.
*/
cp1contexts[0] = smtc_nconf1[0] - 1;
cp1contexts[1] = smtc_nconf1[1];
@@ -761,9 +762,9 @@ void smtc_forward_irq(struct irq_data *d)
* mask has been purged of bits corresponding to nonexistent and
* offline "CPUs", and to TCs bound to VPEs other than the VPE
* connected to the physical interrupt input for the interrupt
- * in question. Otherwise we have a nasty problem with interrupt
+ * in question. Otherwise we have a nasty problem with interrupt
* mask management. This is best handled in non-performance-critical
- * platform IRQ affinity setting code, to minimize interrupt-time
+ * platform IRQ affinity setting code, to minimize interrupt-time
* checks.
*/
@@ -899,10 +900,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
mips_ihb();
/*
- * Inspect TCStatus - if IXMT is set, we have to queue
+ * Inspect TCStatus - if IXMT is set, we have to queue
* a message. Otherwise, we set up the "interrupt"
* of the other TC
- */
+ */
tcstatus = read_tc_c0_tcstatus();
if ((tcstatus & TCSTATUS_IXMT) != 0) {
@@ -964,7 +965,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
* CU bit of Status is indicator that TC was
* already running on a kernel stack...
*/
- if (tcstatus & ST0_CU0) {
+ if (tcstatus & ST0_CU0) {
/* Note that this "- 1" is pointer arithmetic */
kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
} else {
@@ -1288,7 +1289,7 @@ void smtc_idle_loop_hook(void)
for (tc = 0; tc < hook_ntcs; tc++) {
tcnoprog[tc] = 0;
clock_hang_reported[tc] = 0;
- }
+ }
for (vpe = 0; vpe < 2; vpe++)
for (im = 0; im < 8; im++)
imstuckcount[vpe][im] = 0;
@@ -1485,7 +1486,7 @@ static int halt_state_save[NR_CPUS];
/*
* To really, really be sure that nothing is being done
- * by other TCs, halt them all. This code assumes that
+ * by other TCs, halt them all. This code assumes that
* a DVPE has already been done, so while their Halted
* state is theoretically architecturally unstable, in
* practice, it's not going to change while we're looking
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 7f1eca3858de..1ff43d5ac2c4 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -25,7 +25,7 @@ static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
-#define COUNTON 100
+#define COUNTON 100
#define NR_LOOPS 5
void __cpuinit synchronise_count_master(int cpu)
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b32466a1a1d2..b79d13f95bf0 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -41,9 +41,9 @@
/*
* For historic reasons the pipe(2) syscall on MIPS has an unusual calling
- * convention. It returns results in registers $v0 / $v1 which means there
+ * convention. It returns results in registers $v0 / $v1 which means there
* is no need for it to do verify the validity of a userspace pointer
- * argument. Historically that used to be expensive in Linux. These days
+ * argument. Historically that used to be expensive in Linux. These days
* the performance advantage is negligible.
*/
asmlinkage int sysm_pipe(void)
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 99d73b72b00b..9d686bf97b0e 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -5,8 +5,8 @@
*
* Common time service routines for MIPS machines.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -62,8 +62,8 @@ EXPORT_SYMBOL(perf_irq);
* time_init() - it does the following things.
*
* 1) plat_time_init() -
- * a) (optional) set up RTC routines,
- * b) (optional) calibrate and set the mips_hpt_frequency
+ * a) (optional) set up RTC routines,
+ * b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
* 2) calculate a couple of cached variables for later usage
@@ -75,7 +75,7 @@ unsigned int mips_hpt_frequency;
* This function exists in order to cause an error due to a duplicate
* definition if platform code should have its own implementation. The hook
* to use instead is plat_time_init. plat_time_init does not receive the
- * irqaction pointer argument anymore. This is because any function which
+ * irqaction pointer argument anymore. This is because any function which
* initializes an interrupt timer now takes care of its own request_irq rsp.
* setup_irq calls and each clock_event_device should use its own
* struct irqrequest.
@@ -93,7 +93,7 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4000MC:
/*
* V3.0 is documented as suffering from the mfc0 from count bug.
- * Afaik this is the last version of the R4000. Later versions
+ * Afaik this is the last version of the R4000. Later versions
* were marketed as R4400.
*/
return 1;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cf7ac5483f53..a200b5bdbb87 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -164,7 +164,7 @@ static void show_stacktrace(struct task_struct *task,
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0))
- printk("\n ");
+ printk("\n ");
if (i > 39) {
printk(" ...");
break;
@@ -279,7 +279,7 @@ static void __show_regs(const struct pt_regs *regs)
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);
- printk("Status: %08x ", (uint32_t) regs->cp0_status);
+ printk("Status: %08x ", (uint32_t) regs->cp0_status);
if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
if (regs->cp0_status & ST0_KUO)
@@ -396,7 +396,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
raw_spin_unlock_irq(&die_lock);
oops_exit();
@@ -441,7 +441,7 @@ asmlinkage void do_be(struct pt_regs *regs)
int data = regs->cp0_cause & 4;
int action = MIPS_BE_FATAL;
- /* XXX For now. Fixme, this searches the wrong table ... */
+ /* XXX For now. Fixme, this searches the wrong table ... */
if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
@@ -518,7 +518,7 @@ static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
offset >>= 16;
vaddr = (unsigned long __user *)
- ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3)
return SIGBUS;
@@ -558,7 +558,7 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
offset >>= 16;
vaddr = (unsigned long __user *)
- ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3)
@@ -739,7 +739,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
- own_fpu(1); /* Using the FPU again. */
+ own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
@@ -966,7 +966,7 @@ int cu2_notifier_call_chain(unsigned long val, void *v)
}
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
- void *data)
+ void *data)
{
struct pt_regs *regs = data;
@@ -974,7 +974,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
default:
die_if_kernel("Unhandled kernel unaligned access or invalid "
"instruction", regs);
- /* Fall through */
+ /* Fall through */
case CU2_EXCEPTION:
force_sig(SIGILL, current);
@@ -1029,10 +1029,10 @@ asmlinkage void do_cpu(struct pt_regs *regs)
/*
* Old (MIPS I and MIPS II) processors will set this code
* for COP1X opcode instructions that replaced the original
- * COP3 space. We don't limit COP1 space instructions in
+ * COP3 space. We don't limit COP1 space instructions in
* the emulator according to the CPU ISA, so we want to
* treat COP1X instructions consistently regardless of which
- * code the CPU chose. Therefore we redirect this trap to
+ * code the CPU chose. Therefore we redirect this trap to
* the FP emulator too.
*
* Then some newer FPU-less processors use this code
@@ -1044,9 +1044,9 @@ asmlinkage void do_cpu(struct pt_regs *regs)
/* Fall through. */
case 1:
- if (used_math()) /* Using the FPU again. */
+ if (used_math()) /* Using the FPU again. */
own_fpu(1);
- else { /* First time FPU user. */
+ else { /* First time FPU user. */
init_fpu();
set_used_math();
}
@@ -1114,7 +1114,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
show_regs(regs);
if (multi_match) {
- printk("Index : %0x\n", read_c0_index());
+ printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
@@ -1181,7 +1181,7 @@ asmlinkage void do_dsp(struct pt_regs *regs)
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
- * Game over - no way to handle this if it ever occurs. Most probably
+ * Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
@@ -1705,7 +1705,7 @@ void __init trap_init(void)
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
- return; /* Already done */
+ return; /* Already done */
#endif
if (cpu_has_veic || cpu_has_vint) {
@@ -1799,7 +1799,7 @@ void __init trap_init(void)
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
- * written yet. Well, anyway there is no R6000 machine on the
+ * written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 9c58bdf58f23..6087a54c86a0 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -21,11 +21,11 @@
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
- * problems with user programs have been fixed. For programmers this is the
+ * problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
- * across fork(2) and execve(2) calls. If you really want to use the
+ * across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
@@ -43,34 +43,34 @@
* #include <sys/sysmips.h>
*
* struct foo {
- * unsigned char bar[8];
+ * unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
- * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
- * unsigned int *p = (unsigned int *) (x.bar + 3);
- * int i;
+ * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
+ * unsigned int *p = (unsigned int *) (x.bar + 3);
+ * int i;
*
- * if (argc > 1)
- * sysmips(MIPS_FIXADE, atoi(argv[1]));
+ * if (argc > 1)
+ * sysmips(MIPS_FIXADE, atoi(argv[1]));
*
- * printf("*p = %08lx\n", *p);
+ * printf("*p = %08lx\n", *p);
*
- * *p = 0xdeadface;
+ * *p = 0xdeadface;
*
- * for(i = 0; i <= 7; i++)
- * printf("%02x ", x.bar[i]);
- * printf("\n");
+ * for(i = 0; i <= 7; i++)
+ * printf("%02x ", x.bar[i]);
+ * printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
- * exception for the R6000.
- * A store crossing a page boundary might be executed only partially.
- * Undo the partial store in this case.
+ * exception for the R6000.
+ * A store crossing a page boundary might be executed only partially.
+ * Undo the partial store in this case.
*/
#include <linux/mm.h>
#include <linux/signal.h>
@@ -86,7 +86,7 @@
#include <asm/inst.h>
#include <asm/uaccess.h>
-#define STR(x) __STR(x)
+#define STR(x) __STR(x)
#define __STR(x) #x
enum {
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 0a4336b803e9..05826d20a792 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -22,12 +22,12 @@ PHDRS {
#ifdef CONFIG_32BIT
#ifdef CONFIG_CPU_LITTLE_ENDIAN
- jiffies = jiffies_64;
+ jiffies = jiffies_64;
#else
- jiffies = jiffies_64 + 4;
+ jiffies = jiffies_64 + 4;
#endif
#else
- jiffies = jiffies_64;
+ jiffies = jiffies_64;
#endif
SECTIONS
@@ -139,7 +139,7 @@ SECTIONS
/*
* Force .bss to 64K alignment so that .bss..swapper_pg_dir
- * gets that alignment. .sbss should be empty, so there will be
+ * gets that alignment. .sbss should be empty, so there will be
* no holes after __init_end. */
BSS_SECTION(0, 0x10000, 0)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 147cec19621d..1765bab000a0 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -254,7 +254,7 @@ static void __maybe_unused dump_mtregs(void)
val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
}
-/* Find some VPE program space */
+/* Find some VPE program space */
static void *alloc_progmem(unsigned long len)
{
void *addr;
@@ -292,7 +292,7 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
}
/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
- might -- code, read-only data, read-write data, small data. Tally
+ might -- code, read-only data, read-write data, small data. Tally
sizes, and place the offsets into sh_entsize fields: high bit means it
belongs in init. */
static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
@@ -386,7 +386,7 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
if( (rel > 32768) || (rel < -32768) ) {
printk(KERN_DEBUG "VPE loader: "
- "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
+ "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
return -ENOEXEC;
}
@@ -458,7 +458,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
Elf32_Addr val, vallo;
struct mips_hi16 *l, *next;
- /* Sign extend the addend we extract from the lo insn. */
+ /* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (mips_hi16_list != NULL) {
@@ -470,7 +470,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
/*
* The value for the HI16 had best be the same.
*/
- if (v != l->value) {
+ if (v != l->value) {
printk(KERN_DEBUG "VPE loader: "
"apply_r_mips_lo16/hi16: \t"
"inconsistent value information\n");
@@ -505,7 +505,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
}
/*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -579,7 +579,7 @@ static int apply_relocations(Elf32_Shdr *sechdrs,
res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
if( res ) {
char *r = rstrs[ELF32_R_TYPE(r_info)];
- printk(KERN_WARNING "VPE loader: .text+0x%x "
+ printk(KERN_WARNING "VPE loader: .text+0x%x "
"relocation type %s for symbol \"%s\" failed\n",
rel[i].r_offset, r ? r : "UNKNOWN",
strtab + sym->st_name);
@@ -697,18 +697,7 @@ static int vpe_run(struct vpe * v)
dmt_flag = dmt();
vpeflags = dvpe();
- if (!list_empty(&v->tc)) {
- if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
- evpe(vpeflags);
- emt(dmt_flag);
- local_irq_restore(flags);
-
- printk(KERN_WARNING
- "VPE loader: TC %d is already in use.\n",
- v->tc->index);
- return -ENOEXEC;
- }
- } else {
+ if (list_empty(&v->tc)) {
evpe(vpeflags);
emt(dmt_flag);
local_irq_restore(flags);
@@ -720,6 +709,8 @@ static int vpe_run(struct vpe * v)
return -ENOEXEC;
}
+ t = list_first_entry(&v->tc, struct tc, tc);
+
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -772,7 +763,7 @@ static int vpe_run(struct vpe * v)
/* Set up the XTC bit in vpeconf0 to point at our tc */
write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
- | (t->index << VPECONF0_XTC_SHIFT));
+ | (t->index << VPECONF0_XTC_SHIFT));
back_to_back_c0_hazard();
@@ -926,34 +917,34 @@ static int vpe_elfload(struct vpe * v)
secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
}
- /* Fix up syms, so that st_value is a pointer to location. */
- simplify_symbols(sechdrs, symindex, strtab, secstrings,
- hdr->e_shnum, &mod);
-
- /* Now do relocations. */
- for (i = 1; i < hdr->e_shnum; i++) {
- const char *strtab = (char *)sechdrs[strindex].sh_addr;
- unsigned int info = sechdrs[i].sh_info;
-
- /* Not a valid relocation section? */
- if (info >= hdr->e_shnum)
- continue;
-
- /* Don't bother with non-allocated sections */
- if (!(sechdrs[info].sh_flags & SHF_ALLOC))
- continue;
-
- if (sechdrs[i].sh_type == SHT_REL)
- err = apply_relocations(sechdrs, strtab, symindex, i,
- &mod);
- else if (sechdrs[i].sh_type == SHT_RELA)
- err = apply_relocate_add(sechdrs, strtab, symindex, i,
- &mod);
- if (err < 0)
- return err;
-
- }
- } else {
+ /* Fix up syms, so that st_value is a pointer to location. */
+ simplify_symbols(sechdrs, symindex, strtab, secstrings,
+ hdr->e_shnum, &mod);
+
+ /* Now do relocations. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ if (sechdrs[i].sh_type == SHT_REL)
+ err = apply_relocations(sechdrs, strtab, symindex, i,
+ &mod);
+ else if (sechdrs[i].sh_type == SHT_RELA)
+ err = apply_relocate_add(sechdrs, strtab, symindex, i,
+ &mod);
+ if (err < 0)
+ return err;
+
+ }
+ } else {
struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
for (i = 0; i < hdr->e_phnum; i++) {
@@ -968,16 +959,16 @@ static int vpe_elfload(struct vpe * v)
}
for (i = 0; i < hdr->e_shnum; i++) {
- /* Internal symbols and strings. */
- if (sechdrs[i].sh_type == SHT_SYMTAB) {
- symindex = i;
- strindex = sechdrs[i].sh_link;
- strtab = (char *)hdr + sechdrs[strindex].sh_offset;
-
- /* mark the symtab's address for when we try to find the
- magic symbols */
- sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
- }
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr + sechdrs[strindex].sh_offset;
+
+ /* mark the symtab's address for when we try to find the
+ magic symbols */
+ sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
+ }
}
}
@@ -1049,7 +1040,7 @@ static int getcwd(char *buff, int size)
return ret;
}
-/* checks VPE is unused and gets ready to load program */
+/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp)
{
enum vpe_state state;
@@ -1121,11 +1112,11 @@ static int vpe_release(struct inode *inode, struct file *filp)
if (vpe_elfload(v) >= 0) {
vpe_run(v);
} else {
- printk(KERN_WARNING "VPE loader: ELF load failed.\n");
+ printk(KERN_WARNING "VPE loader: ELF load failed.\n");
ret = -ENOEXEC;
}
} else {
- printk(KERN_WARNING "VPE loader: only elf files are supported\n");
+ printk(KERN_WARNING "VPE loader: only elf files are supported\n");
ret = -ENOEXEC;
}
@@ -1149,7 +1140,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
size_t ret = count;
struct vpe *v;
- if (iminor(file->f_path.dentry->d_inode) != minor)
+ if (iminor(file_inode(file)) != minor)
return -ENODEV;
v = get_vpe(tclimit);
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index c15406968030..7726f6157d9e 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -12,7 +12,7 @@
#include <asm/watch.h>
/*
- * Install the watch registers for the current thread. A maximum of
+ * Install the watch registers for the current thread. A maximum of
* four registers are installed although the machine may have more.
*/
void mips_install_watch_registers(void)
@@ -72,7 +72,7 @@ void mips_read_watch_registers(void)
}
/*
- * Disable all watch registers. Although only four registers are
+ * Disable all watch registers. Although only four registers are
* installed, all are cleared to eliminate the possibility of endless
* looping in the watch handler.
*/
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index ce2f129b081f..3fc2e6d70c77 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -26,13 +26,15 @@
#include "prom.h"
/* lantiq socs have 3 static clocks */
-static struct clk cpu_clk_generic[3];
+static struct clk cpu_clk_generic[4];
-void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
+void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io, unsigned long ppe)
{
cpu_clk_generic[0].rate = cpu;
cpu_clk_generic[1].rate = fpi;
cpu_clk_generic[2].rate = io;
+ cpu_clk_generic[3].rate = ppe;
}
struct clk *clk_get_cpu(void)
@@ -51,6 +53,12 @@ struct clk *clk_get_io(void)
return &cpu_clk_generic[2];
}
+struct clk *clk_get_ppe(void)
+{
+ return &cpu_clk_generic[3];
+}
+EXPORT_SYMBOL_GPL(clk_get_ppe);
+
static inline int clk_good(struct clk *clk)
{
return clk && !IS_ERR(clk);
@@ -145,9 +153,9 @@ static inline u32 get_counter_resolution(void)
u32 res;
__asm__ __volatile__(
- ".set push\n"
- ".set mips32r2\n"
- "rdhwr %0, $3\n"
+ ".set push\n"
+ ".set mips32r2\n"
+ "rdhwr %0, $3\n"
".set pop\n"
: "=&r" (res)
: /* no input */
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index fa670602b91b..77e4bdb1fe8c 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -27,12 +27,15 @@
#define CLOCK_167M 166666667
#define CLOCK_196_608M 196608000
#define CLOCK_200M 200000000
+#define CLOCK_222M 222000000
+#define CLOCK_240M 240000000
#define CLOCK_250M 250000000
#define CLOCK_266M 266666666
#define CLOCK_300M 300000000
#define CLOCK_333M 333333333
#define CLOCK_393M 393215332
#define CLOCK_400M 400000000
+#define CLOCK_450M 450000000
#define CLOCK_500M 500000000
#define CLOCK_600M 600000000
@@ -64,15 +67,17 @@ struct clk {
};
extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
- unsigned long io);
+ unsigned long io, unsigned long ppe);
extern unsigned long ltq_danube_cpu_hz(void);
extern unsigned long ltq_danube_fpi_hz(void);
+extern unsigned long ltq_danube_pp32_hz(void);
extern unsigned long ltq_ar9_cpu_hz(void);
extern unsigned long ltq_ar9_fpi_hz(void);
extern unsigned long ltq_vr9_cpu_hz(void);
extern unsigned long ltq_vr9_fpi_hz(void);
+extern unsigned long ltq_vr9_pp32_hz(void);
#endif
diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/lantiq/dts/danube.dtsi
index 3a4520f009cf..d4c59e003708 100644
--- a/arch/mips/lantiq/dts/danube.dtsi
+++ b/arch/mips/lantiq/dts/danube.dtsi
@@ -97,7 +97,7 @@
compatible = "lantiq,pci-xway";
bus-range = <0x0 0x0>;
ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
- 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
+ 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
reg = <0x7000000 0x8000 /* config space */
0xE105400 0x400>; /* pci bridge */
};
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
index 68c17310bc82..fac1f5b178eb 100644
--- a/arch/mips/lantiq/dts/easy50712.dts
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -103,7 +103,7 @@
lantiq,bus-clock = <33333333>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
- 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+ 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
>;
gpios-reset = <&gpio 21 0>;
req-mask = <0x1>; /* GNT1 */
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 2d4ced332b37..ff4894a833ee 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -241,9 +241,9 @@ void __init ltq_soc_init(void)
/* get our 3 static rates for cpu, fpi and io clocks */
if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
- clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+ clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M, 0);
else
- clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+ clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M, 0);
/* add our clock domains */
clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index a7935bf0fecb..51194875f158 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -33,17 +33,10 @@
/* register definitions - external irqs */
#define LTQ_EIU_EXIN_C 0x0000
#define LTQ_EIU_EXIN_INIC 0x0004
+#define LTQ_EIU_EXIN_INC 0x0008
#define LTQ_EIU_EXIN_INEN 0x000C
-/* irq numbers used by the external interrupt unit (EIU) */
-#define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30)
-#define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31)
-#define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26)
-#define LTQ_EIU_IR3 INT_NUM_IM1_IRL0
-#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1)
-#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2)
-#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30)
-#define XWAY_EXIN_COUNT 3
+/* number of external interrupts */
#define MAX_EIU 6
/* the performance counter */
@@ -72,20 +65,19 @@
int gic_present;
#endif
-static unsigned short ltq_eiu_irq[MAX_EIU] = {
- LTQ_EIU_IR0,
- LTQ_EIU_IR1,
- LTQ_EIU_IR2,
- LTQ_EIU_IR3,
- LTQ_EIU_IR4,
- LTQ_EIU_IR5,
-};
-
static int exin_avail;
+static struct resource ltq_eiu_irq[MAX_EIU];
static void __iomem *ltq_icu_membase[MAX_IM];
static void __iomem *ltq_eiu_membase;
static struct irq_domain *ltq_domain;
+int ltq_eiu_get_irq(int exin)
+{
+ if (exin < exin_avail)
+ return ltq_eiu_irq[exin].start;
+ return -1;
+}
+
void ltq_disable_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
@@ -128,19 +120,65 @@ void ltq_enable_irq(struct irq_data *d)
ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
}
+static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
+{
+ int i;
+
+ for (i = 0; i < MAX_EIU; i++) {
+ if (d->hwirq == ltq_eiu_irq[i].start) {
+ int val = 0;
+ int edge = 0;
+
+ switch (type) {
+ case IRQF_TRIGGER_NONE:
+ break;
+ case IRQF_TRIGGER_RISING:
+ val = 1;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ val = 2;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
+ val = 3;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ val = 5;
+ break;
+ case IRQF_TRIGGER_LOW:
+ val = 6;
+ break;
+ default:
+ pr_err("invalid type %d for irq %ld\n",
+ type, d->hwirq);
+ return -EINVAL;
+ }
+
+ if (edge)
+ irq_set_handler(d->hwirq, handle_edge_irq);
+
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
+ (val << (i * 4)), LTQ_EIU_EXIN_C);
+ }
+ }
+
+ return 0;
+}
+
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
int i;
ltq_enable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i]) {
- /* low level - we should really handle set_type */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
- (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
+ if (d->hwirq == ltq_eiu_irq[i].start) {
+ /* by default we are low level triggered */
+ ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
/* clear all pending */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
- LTQ_EIU_EXIN_INIC);
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
+ LTQ_EIU_EXIN_INC);
/* enable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
LTQ_EIU_EXIN_INEN);
@@ -157,7 +195,7 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
ltq_disable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i]) {
+ if (d->hwirq == ltq_eiu_irq[i].start) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
@@ -186,6 +224,7 @@ static struct irq_chip ltq_eiu_type = {
.irq_ack = ltq_ack_irq,
.irq_mask = ltq_disable_irq,
.irq_mask_ack = ltq_mask_and_ack_irq,
+ .irq_set_type = ltq_eiu_settype,
};
static void ltq_hw_irqdispatch(int module)
@@ -301,7 +340,7 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
return 0;
for (i = 0; i < exin_avail; i++)
- if (hw == ltq_eiu_irq[i])
+ if (hw == ltq_eiu_irq[i].start)
chip = &ltq_eiu_type;
irq_set_chip_and_handler(hw, chip, handle_level_irq);
@@ -323,7 +362,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
{
struct device_node *eiu_node;
struct resource res;
- int i;
+ int i, ret;
for (i = 0; i < MAX_IM; i++) {
if (of_address_to_resource(node, i, &res))
@@ -340,17 +379,19 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
}
/* the external interrupts are optional and xway only */
- eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
+ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
/* find out how many external irq sources we have */
- const __be32 *count = of_get_property(node,
- "lantiq,count", NULL);
+ exin_avail = of_irq_count(eiu_node);
- if (count)
- exin_avail = *count;
if (exin_avail > MAX_EIU)
exin_avail = MAX_EIU;
+ ret = of_irq_to_resource_table(eiu_node,
+ ltq_eiu_irq, exin_avail);
+ if (ret != exin_avail)
+ panic("failed to load external irq resources\n");
+
if (request_mem_region(res.start, resource_size(&res),
res.name) < 0)
pr_err("Failed to request eiu memory");
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index a3fa1a2bfaae..8e07b5f28ef1 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -10,7 +10,7 @@
#define _LTQ_PROM_H__
#define LTQ_SYS_TYPE_LEN 0x100
-#define LTQ_SYS_REV_LEN 0x10
+#define LTQ_SYS_REV_LEN 0x10
struct ltq_soc_info {
unsigned char *name;
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
index 9aa17f79a742..1ab576dc9bd1 100644
--- a/arch/mips/lantiq/xway/clk.c
+++ b/arch/mips/lantiq/xway/clk.c
@@ -53,6 +53,29 @@ unsigned long ltq_danube_cpu_hz(void)
}
}
+unsigned long ltq_danube_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_240M;
+ break;
+ case 2:
+ clk = CLOCK_222M;
+ break;
+ case 3:
+ clk = CLOCK_133M;
+ break;
+ default:
+ clk = CLOCK_266M;
+ break;
+ }
+
+ return clk;
+}
+
unsigned long ltq_ar9_sys_hz(void)
{
if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
@@ -149,3 +172,23 @@ unsigned long ltq_vr9_fpi_hz(void)
return clk;
}
+
+unsigned long ltq_vr9_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 3;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_450M;
+ break;
+ case 2:
+ clk = CLOCK_300M;
+ break;
+ default:
+ clk = CLOCK_500M;
+ break;
+ }
+
+ return clk;
+}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 544dbb7fb421..1fa0f175357e 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -78,10 +78,19 @@ static struct ltq_xrx200_gphy_reset {
/* reset and boot a gphy. these phys only exist on xrx200 SoC */
int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr)
{
+ struct clk *clk;
+
if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) {
dev_err(dev, "this SoC has no GPHY\n");
return -EINVAL;
}
+
+ clk = clk_get_sys("1f203000.rcu", "gphy");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_enable(clk);
+
if (id > 1) {
dev_err(dev, "%u is an invalid gphy id\n", id);
return -EINVAL;
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3925e6609acc..c24924fe087d 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -305,7 +305,7 @@ void __init ltq_soc_init(void)
/* check if all the core register ranges are available */
if (!np_pmu || !np_cgu || !np_ebu)
- panic("Failed to load core nodess from devicetree");
+ panic("Failed to load core nodes from devicetree");
if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
of_address_to_resource(np_cgu, 0, &res_cgu) ||
@@ -356,14 +356,16 @@ void __init ltq_soc_init(void)
if (of_machine_is_compatible("lantiq,ase")) {
if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
- clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+ clkdev_add_static(CLOCK_266M, CLOCK_133M,
+ CLOCK_133M, CLOCK_266M);
else
- clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+ clkdev_add_static(CLOCK_133M, CLOCK_133M,
+ CLOCK_133M, CLOCK_133M);
clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
} else if (of_machine_is_compatible("lantiq,vr9")) {
clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
- ltq_vr9_fpi_hz());
+ ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz());
clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
@@ -374,12 +376,13 @@ void __init ltq_soc_init(void)
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
+ clkdev_add_pmu("1f203000.rcu", "gphy", 0, PMU_GPHY);
} else if (of_machine_is_compatible("lantiq,ar9")) {
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
- ltq_ar9_fpi_hz());
+ ltq_ar9_fpi_hz(), CLOCK_250M);
clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
} else {
clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
- ltq_danube_fpi_hz());
+ ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
}
}
diff --git a/arch/mips/lasat/Makefile b/arch/mips/lasat/Makefile
index 9cc4e4db8b99..869bd3b37c17 100644
--- a/arch/mips/lasat/Makefile
+++ b/arch/mips/lasat/Makefile
@@ -2,7 +2,7 @@
# Makefile for the LASAT specific kernel interface routines under Linux.
#
-obj-y += reset.o setup.o prom.o lasat_board.o \
+obj-y += reset.o setup.o prom.o lasat_board.o \
at93c.o interrupt.o serial.o
obj-$(CONFIG_LASAT_SYSCTL) += sysctl.o
diff --git a/arch/mips/lasat/ds1603.h b/arch/mips/lasat/ds1603.h
index 2da3704044fd..3e718b1cca02 100644
--- a/arch/mips/lasat/ds1603.h
+++ b/arch/mips/lasat/ds1603.h
@@ -25,7 +25,7 @@ void ds1603_enable(void);
void ds1603_disable(void);
void ds1603_init(struct ds_defs *);
-#define TRIMMER_DEFAULT 3
+#define TRIMMER_DEFAULT 3
#define TRIMMER_DISABLE_RTC 0
#endif
diff --git a/arch/mips/lasat/image/Makefile b/arch/mips/lasat/image/Makefile
index 460626b6d62f..dfb509d21d8e 100644
--- a/arch/mips/lasat/image/Makefile
+++ b/arch/mips/lasat/image/Makefile
@@ -28,7 +28,7 @@ $(obj)/head.o: $(obj)/head.S $(KERNEL_IMAGE)
OBJECTS = head.o kImage.o
-rom.sw: $(obj)/rom.sw
+rom.sw: $(obj)/rom.sw
rom.bin: $(obj)/rom.bin
$(obj)/rom.sw: $(obj)/rom.bin
diff --git a/arch/mips/lasat/image/head.S b/arch/mips/lasat/image/head.S
index e0ecda92c40a..41babbe43a8e 100644
--- a/arch/mips/lasat/image/head.S
+++ b/arch/mips/lasat/image/head.S
@@ -7,7 +7,7 @@
/* Magic words identifying a software image */
.word LASAT_K_MAGIC0_VAL
- .word LASAT_K_MAGIC1_VAL
+ .word LASAT_K_MAGIC1_VAL
/* Image header version */
.word 0x00000002
diff --git a/arch/mips/lasat/picvue.c b/arch/mips/lasat/picvue.c
index d3d04c392e25..7eb334892693 100644
--- a/arch/mips/lasat/picvue.c
+++ b/arch/mips/lasat/picvue.c
@@ -163,12 +163,12 @@ int pvc_program_cg(int charnum, u8 bitmap[BM_SIZE])
}
#define FUNC_SET_CMD 0x20
-#define EIGHT_BYTE (1 << 4)
-#define FOUR_BYTE 0
-#define TWO_LINES (1 << 3)
-#define ONE_LINE 0
-#define LARGE_FONT (1 << 2)
-#define SMALL_FONT 0
+#define EIGHT_BYTE (1 << 4)
+#define FOUR_BYTE 0
+#define TWO_LINES (1 << 3)
+#define ONE_LINE 0
+#define LARGE_FONT (1 << 2)
+#define SMALL_FONT 0
static void pvc_funcset(u8 cmd)
{
@@ -177,9 +177,9 @@ static void pvc_funcset(u8 cmd)
}
#define ENTRYMODE_CMD 0x4
-#define AUTO_INC (1 << 1)
-#define AUTO_DEC 0
-#define CURSOR_FOLLOWS_DISP (1 << 0)
+#define AUTO_INC (1 << 1)
+#define AUTO_DEC 0
+#define CURSOR_FOLLOWS_DISP (1 << 0)
static void pvc_entrymode(u8 cmd)
{
@@ -188,20 +188,20 @@ static void pvc_entrymode(u8 cmd)
}
#define DISP_CNT_CMD 0x08
-#define DISP_OFF 0
-#define DISP_ON (1 << 2)
-#define CUR_ON (1 << 1)
-#define CUR_BLINK (1 << 0)
+#define DISP_OFF 0
+#define DISP_ON (1 << 2)
+#define CUR_ON (1 << 1)
+#define CUR_BLINK (1 << 0)
void pvc_dispcnt(u8 cmd)
{
pvc_write(DISP_CNT_CMD | (cmd & (DISP_ON|CUR_ON|CUR_BLINK)), MODE_INST);
}
#define MOVE_CMD 0x10
-#define DISPLAY (1 << 3)
-#define CURSOR 0
-#define RIGHT (1 << 2)
-#define LEFT 0
+#define DISPLAY (1 << 3)
+#define CURSOR 0
+#define RIGHT (1 << 2)
+#define LEFT 0
void pvc_move(u8 cmd)
{
pvc_write(MOVE_CMD | (cmd & (DISPLAY|RIGHT)), MODE_INST);
diff --git a/arch/mips/lasat/picvue.h b/arch/mips/lasat/picvue.h
index 2f0757738fdb..d0119fca3862 100644
--- a/arch/mips/lasat/picvue.h
+++ b/arch/mips/lasat/picvue.h
@@ -29,16 +29,16 @@ void pvc_dump_string(const unsigned char *str);
int pvc_program_cg(int charnum, u8 bitmap[BM_SIZE]);
void pvc_dispcnt(u8 cmd);
-#define DISP_OFF 0
-#define DISP_ON (1 << 2)
-#define CUR_ON (1 << 1)
-#define CUR_BLINK (1 << 0)
+#define DISP_OFF 0
+#define DISP_ON (1 << 2)
+#define CUR_ON (1 << 1)
+#define CUR_BLINK (1 << 0)
void pvc_move(u8 cmd);
-#define DISPLAY (1 << 3)
-#define CURSOR 0
-#define RIGHT (1 << 2)
-#define LEFT 0
+#define DISPLAY (1 << 3)
+#define CURSOR 0
+#define RIGHT (1 << 2)
+#define LEFT 0
void pvc_clear(void);
void pvc_home(void);
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index 8e388da1926f..c592bc8b8c99 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -64,7 +64,7 @@ static int pvc_line_proc_open(struct inode *inode, struct file *file)
static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- int lineno = *(int *)PDE(file->f_path.dentry->d_inode)->data;
+ int lineno = *(int *)PDE(file_inode(file))->data;
char kbuf[PVC_LINELEN];
size_t len;
diff --git a/arch/mips/lasat/serial.c b/arch/mips/lasat/serial.c
index 5bcb6e89ab78..2e5fbed81206 100644
--- a/arch/mips/lasat/serial.c
+++ b/arch/mips/lasat/serial.c
@@ -1,7 +1,7 @@
/*
* Registration of Lasat UART platform device.
*
- * Copyright (C) 2007 Brian Murphy <brian@murphy.dk>
+ * Copyright (C) 2007 Brian Murphy <brian@murphy.dk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index d87ffd04cb0a..f27694fb2ad1 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -134,8 +134,8 @@ int proc_lasat_ip(ctl_table *table, int write,
} else {
ip = *(unsigned int *)(table->data);
sprintf(ipbuf, "%d.%d.%d.%d",
- (ip) & 0xff,
- (ip >> 8) & 0xff,
+ (ip) & 0xff,
+ (ip >> 8) & 0xff,
(ip >> 16) & 0xff,
(ip >> 24) & 0xff);
len = strlen(ipbuf);
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 239a9c957b02..81f1dcfdcab8 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
/**
- * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
* if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to start counting from
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
/**
- * __mips_test_and_change_bit - Change a bit and return its old value. This is
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
* called by test_and_change_bit() if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to count from
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 6b876ca299ee..507147aebd41 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -67,8 +67,8 @@
#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
LOAD _t0, (offset + UNIT(0))(src); \
LOAD _t1, (offset + UNIT(1))(src); \
- LOAD _t2, (offset + UNIT(2))(src); \
- LOAD _t3, (offset + UNIT(3))(src); \
+ LOAD _t2, (offset + UNIT(2))(src); \
+ LOAD _t3, (offset + UNIT(3))(src); \
ADDC(sum, _t0); \
ADDC(sum, _t1); \
ADDC(sum, _t2); \
@@ -285,7 +285,7 @@ LEAF(csum_partial)
1:
#endif
.set reorder
- /* Add the passed partial csum. */
+ /* Add the passed partial csum. */
ADDC32(sum, a2)
jr ra
.set noreorder
@@ -298,7 +298,7 @@ LEAF(csum_partial)
* csum_partial_copy_nocheck(src, dst, len, sum)
* __csum_partial_copy_user(src, dst, len, sum, errp)
*
- * See "Spec" in memcpy.S for details. Unlike __copy_user, all
+ * See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
*/
@@ -371,16 +371,16 @@ LEAF(csum_partial)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#define SHIFT_DISCARD_REVERT SRLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#define SHIFT_DISCARD_REVERT SLLV
#endif
@@ -430,7 +430,7 @@ FEXPORT(csum_partial_copy_nocheck)
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
nop
SUB len, 8*NBYTES # subtract here for bgez loop
@@ -518,7 +518,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
@@ -532,7 +532,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
+ SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
EXC( STREST t0, -1(t1), .Ls_exc)
SHIFT_DISCARD_REVERT t0, t0, bits
@@ -551,7 +551,7 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
ADD t2, zero, NBYTES
EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
SUB t2, t2, t1 # t2 = number of bytes copied
@@ -568,9 +568,9 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
@@ -578,13 +578,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
ADD src, src, 4*NBYTES
@@ -634,7 +634,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
#define SHIFT_INC -8
#endif
move t2, zero # partial word
- li t3, SHIFT_START # shift
+ li t3, SHIFT_START # shift
/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
EXC( lbu t0, N(src), .Ll_exc_copy); \
@@ -642,7 +642,7 @@ EXC( lbu t0, N(src), .Ll_exc_copy); \
EXC( sb t0, N(dst), .Ls_exc); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
- beqz len, .Lcopy_bytes_done; \
+ beqz len, .Lcopy_bytes_done; \
or t2, t0
COPY_BYTE(0)
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 288f7954988d..44713af15a62 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(__delay);
* Division by multiplication: you don't have to worry about
* loss of precision.
*
- * Use only for very small delays ( < 1 msec). Should probably use a
+ * Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index a99c1d3fc567..32b9f21bfd85 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -63,7 +63,7 @@ static void dump_tlb(int first, int last)
tlb_read();
BARRIER();
pagemask = read_c0_pagemask();
- entryhi = read_c0_entryhi();
+ entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 65192c06781e..c5c40dad0bbf 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -156,15 +156,15 @@
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
@@ -235,7 +235,7 @@ __copy_user_common:
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
PREF( 0, 3*32(src) )
@@ -313,7 +313,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
@@ -327,7 +327,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
+ SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
EXC( STREST t0, -1(t1), .Ls_exc)
jr ra
@@ -343,7 +343,7 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
ADD t2, zero, NBYTES
EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
SUB t2, t2, t1 # t2 = number of bytes copied
@@ -357,10 +357,10 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
PREF( 0, 3*32(src) )
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
PREF( 1, 3*32(dst) )
1:
/*
@@ -370,13 +370,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* are to the same unit (unless src is aligned, but it's not).
*/
R10KCBARRIER(0(ra))
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
@@ -388,7 +388,7 @@ EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
- PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
@@ -502,7 +502,7 @@ EXC( lb t1, 0(src), .Ll_exc)
#define SEXC(n) \
- .set reorder; /* DADDI_WAR */ \
+ .set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u: \
ADD len, len, n*NBYTES; \
jr ra; \
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 606c8a9efe3b..053d3b0b0317 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -21,8 +21,8 @@
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
- .section __ex_table,"a"; \
- PTR 9b, handler; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
.previous
.macro f_fill64 dst, offset, val, fixup
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 9cee907975ae..91615c2ef0cf 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -30,7 +30,7 @@ static void dump_tlb(int first, int last)
"tlbr\n\t"
"nop\n\t"
".set\treorder");
- entryhi = read_c0_entryhi();
+ entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 7201b2ff08c8..bad539487503 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -23,7 +23,7 @@
/*
* Ugly special case have to check: we might get passed a user space
- * pointer which wraps into the kernel space. We don't deal with that. If
+ * pointer which wraps into the kernel space. We don't deal with that. If
* it happens at most some bytes of the exceptions handlers will be copied.
*/
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index 64457162f7e0..beea03c8c0ce 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -21,9 +21,9 @@
* maximum of a1 or 0 in case of error.
*
* Note: for performance reasons we deliberately accept that a user may
- * make strlen_user and strnlen_user access the first few KSEG0
- * bytes. There's nothing secret there. On 64-bit accessing beyond
- * the maximum is a tad hairier ...
+ * make strlen_user and strnlen_user access the first few KSEG0
+ * bytes. There's nothing secret there. On 64-bit accessing beyond
+ * the maximum is a tad hairier ...
*/
LEAF(__strnlen_user_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index a6d1c77034d5..65e3dfc4e585 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 2005 Thiemo Seufer
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*/
diff --git a/arch/mips/loongson/Makefile b/arch/mips/loongson/Makefile
index 2b76cb0fb07d..0dc0055754cd 100644
--- a/arch/mips/loongson/Makefile
+++ b/arch/mips/loongson/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_MACH_LOONGSON) += common/
# Lemote Fuloong mini-PC (Loongson 2E-based)
#
-obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
+obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
#
# Lemote loongson2f family machines
diff --git a/arch/mips/loongson/common/bonito-irq.c b/arch/mips/loongson/common/bonito-irq.c
index f27d7ccca92a..cc0e4fd548e6 100644
--- a/arch/mips/loongson/common/bonito-irq.c
+++ b/arch/mips/loongson/common/bonito-irq.c
@@ -6,9 +6,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/interrupt.h>
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c
index 353e1d2e41a5..72fed003a536 100644
--- a/arch/mips/loongson/common/cmdline.c
+++ b/arch/mips/loongson/common/cmdline.c
@@ -12,8 +12,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_acc.c b/arch/mips/loongson/common/cs5536/cs5536_acc.c
index b3fd5eab6548..ab4d6cc57384 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_acc.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_acc.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ehci.c b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
index 5b5cbba699b3..ec2e360267a8 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ehci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ide.c b/arch/mips/loongson/common/cs5536/cs5536_ide.c
index 681d1291a2c7..a73414d9ee51 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ide.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ide.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_isa.c b/arch/mips/loongson/common/cs5536/cs5536_isa.c
index 4d9f65abeaff..a6eb2e853d94 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_isa.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_isa.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
index 5d1f48fa1a52..c639b9db0012 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
@@ -9,9 +9,9 @@
*
* Reference: AMD Geode(TM) CS5536 Companion Device Data Book
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ohci.c b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
index bdedf512baf7..f7c905e50dc4 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_pci.c b/arch/mips/loongson/common/cs5536/cs5536_pci.c
index 6dfeab11af08..81bed9d18061 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_pci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_pci.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/loongson/common/early_printk.c b/arch/mips/loongson/common/early_printk.c
index a71736f00443..ced461b39069 100644
--- a/arch/mips/loongson/common/early_printk.c
+++ b/arch/mips/loongson/common/early_printk.c
@@ -4,9 +4,9 @@
* Copyright (c) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/serial_reg.h>
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index d93830ad6113..0a18fcf2d372 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -12,8 +12,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c
index e8a0ffa935b4..21869908aaa4 100644
--- a/arch/mips/loongson/common/gpio.c
+++ b/arch/mips/loongson/common/gpio.c
@@ -1,7 +1,7 @@
/*
* STLS2F GPIO Support
*
- * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
+ * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
* Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -123,13 +123,13 @@ static void ls2f_gpio_set_value(struct gpio_chip *chip,
}
static struct gpio_chip ls2f_chip = {
- .label = "ls2f",
- .direction_input = ls2f_gpio_direction_input,
- .get = ls2f_gpio_get_value,
- .direction_output = ls2f_gpio_direction_output,
- .set = ls2f_gpio_set_value,
- .base = 0,
- .ngpio = STLS2F_N_GPIO,
+ .label = "ls2f",
+ .direction_input = ls2f_gpio_direction_input,
+ .get = ls2f_gpio_get_value,
+ .direction_output = ls2f_gpio_direction_output,
+ .set = ls2f_gpio_set_value,
+ .base = 0,
+ .ngpio = STLS2F_N_GPIO,
};
static int __init ls2f_gpio_setup(void)
diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c
index 19d341591254..ae7af1fd5d59 100644
--- a/arch/mips/loongson/common/init.c
+++ b/arch/mips/loongson/common/init.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/irq.c b/arch/mips/loongson/common/irq.c
index 5897471dedca..687003b19b45 100644
--- a/arch/mips/loongson/common/irq.c
+++ b/arch/mips/loongson/common/irq.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/delay.h>
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 2efd5d9dee27..4becd4f9ef2e 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -4,8 +4,8 @@
*
* Copyright (c) 2009 Zhang Le <r0bertz@gentoo.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -19,15 +19,15 @@
#define MACHTYPE_LEN 50
static const char *system_types[] = {
- [MACH_LOONGSON_UNKNOWN] "unknown loongson machine",
- [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box",
- [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box",
- [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
- [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
- [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
+ [MACH_LOONGSON_UNKNOWN] "unknown loongson machine",
+ [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box",
+ [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box",
+ [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
+ [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
+ [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
[MACH_LEMOTE_NAS] "lemote-nas-2f",
- [MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
- [MACH_LOONGSON_END] NULL,
+ [MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
+ [MACH_LOONGSON_END] NULL,
};
const char *get_system_type(void)
diff --git a/arch/mips/loongson/common/mem.c b/arch/mips/loongson/common/mem.c
index 30eba6001205..8626a42f5b94 100644
--- a/arch/mips/loongson/common/mem.c
+++ b/arch/mips/loongson/common/mem.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/pci.c b/arch/mips/loongson/common/pci.c
index 31d8c5ecd16c..fa7784459721 100644
--- a/arch/mips/loongson/common/pci.c
+++ b/arch/mips/loongson/common/pci.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/pci.h>
@@ -13,25 +13,25 @@
#include <loongson.h>
static struct resource loongson_pci_mem_resource = {
- .name = "pci memory space",
- .start = LOONGSON_PCI_MEM_START,
- .end = LOONGSON_PCI_MEM_END,
- .flags = IORESOURCE_MEM,
+ .name = "pci memory space",
+ .start = LOONGSON_PCI_MEM_START,
+ .end = LOONGSON_PCI_MEM_END,
+ .flags = IORESOURCE_MEM,
};
static struct resource loongson_pci_io_resource = {
- .name = "pci io space",
- .start = LOONGSON_PCI_IO_START,
- .end = IO_SPACE_LIMIT,
- .flags = IORESOURCE_IO,
+ .name = "pci io space",
+ .start = LOONGSON_PCI_IO_START,
+ .end = IO_SPACE_LIMIT,
+ .flags = IORESOURCE_IO,
};
static struct pci_controller loongson_pci_controller = {
- .pci_ops = &loongson_pci_ops,
- .io_resource = &loongson_pci_io_resource,
- .mem_resource = &loongson_pci_mem_resource,
- .mem_offset = 0x00000000UL,
- .io_offset = 0x00000000UL,
+ .pci_ops = &loongson_pci_ops,
+ .io_resource = &loongson_pci_io_resource,
+ .mem_resource = &loongson_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_offset = 0x00000000UL,
};
static void __init setup_pcimap(void)
@@ -42,7 +42,7 @@ static void __init setup_pcimap(void)
* we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M]
*
* pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0
- * [<2G] [384M,448M] [320M,384M] [0M,64M]
+ * [<2G] [384M,448M] [320M,384M] [0M,64M]
*/
LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 |
LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) |
diff --git a/arch/mips/loongson/common/platform.c b/arch/mips/loongson/common/platform.c
index 502b059de422..0ed38321a9a2 100644
--- a/arch/mips/loongson/common/platform.c
+++ b/arch/mips/loongson/common/platform.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/reset.c b/arch/mips/loongson/common/reset.c
index 9e10d6225d9b..35c8c6468494 100644
--- a/arch/mips/loongson/common/reset.c
+++ b/arch/mips/loongson/common/reset.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -26,9 +26,9 @@ static inline void loongson_reboot(void)
func = (void *)ioremap_nocache(LOONGSON_BOOT_BASE, 4);
__asm__ __volatile__(
- " .set noat \n"
- " jr %[func] \n"
- " .set at \n"
+ " .set noat \n"
+ " jr %[func] \n"
+ " .set at \n"
: /* No outputs */
: [func] "r" (func));
#endif
diff --git a/arch/mips/loongson/common/serial.c b/arch/mips/loongson/common/serial.c
index 7580873143c8..5f2b78ae97cc 100644
--- a/arch/mips/loongson/common/serial.c
+++ b/arch/mips/loongson/common/serial.c
@@ -39,15 +39,15 @@
}
static struct plat_serial8250_port uart8250_data[][2] = {
- [MACH_LOONGSON_UNKNOWN] {},
- [MACH_LEMOTE_FL2E] {PORT(4), {} },
- [MACH_LEMOTE_FL2F] {PORT(3), {} },
- [MACH_LEMOTE_ML2F7] {PORT_M(3), {} },
- [MACH_LEMOTE_YL2F89] {PORT_M(3), {} },
- [MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} },
- [MACH_LEMOTE_NAS] {PORT_M(3), {} },
- [MACH_LEMOTE_LL2F] {PORT(3), {} },
- [MACH_LOONGSON_END] {},
+ [MACH_LOONGSON_UNKNOWN] {},
+ [MACH_LEMOTE_FL2E] {PORT(4), {} },
+ [MACH_LEMOTE_FL2F] {PORT(3), {} },
+ [MACH_LEMOTE_ML2F7] {PORT_M(3), {} },
+ [MACH_LEMOTE_YL2F89] {PORT_M(3), {} },
+ [MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} },
+ [MACH_LEMOTE_NAS] {PORT_M(3), {} },
+ [MACH_LEMOTE_LL2F] {PORT(3), {} },
+ [MACH_LOONGSON_END] {},
};
static struct platform_device uart8250_device = {
diff --git a/arch/mips/loongson/common/setup.c b/arch/mips/loongson/common/setup.c
index 27d826bc7103..8223f8acfd59 100644
--- a/arch/mips/loongson/common/setup.c
+++ b/arch/mips/loongson/common/setup.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
diff --git a/arch/mips/loongson/common/time.c b/arch/mips/loongson/common/time.c
index 9fdd01f6c56a..262a1f65b05e 100644
--- a/arch/mips/loongson/common/time.c
+++ b/arch/mips/loongson/common/time.c
@@ -5,9 +5,9 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/mc146818-time.h>
diff --git a/arch/mips/loongson/common/uart_base.c b/arch/mips/loongson/common/uart_base.c
index d69ea54bc3d1..e192ad021edc 100644
--- a/arch/mips/loongson/common/uart_base.c
+++ b/arch/mips/loongson/common/uart_base.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c
index 3cf1fef29f0e..ef5ec8f3de5f 100644
--- a/arch/mips/loongson/fuloong-2e/irq.c
+++ b/arch/mips/loongson/fuloong-2e/irq.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/interrupt.h>
@@ -48,9 +48,9 @@ static struct irqaction cascade_irqaction = {
void __init mach_init_irq(void)
{
/* init all controller
- * 0-15 ------> i8259 interrupt
- * 16-23 ------> mips cpu interrupt
- * 32-63 ------> bonito irq
+ * 0-15 ------> i8259 interrupt
+ * 16-23 ------> mips cpu interrupt
+ * 32-63 ------> bonito irq
*/
/* most bonito irq should be level triggered */
diff --git a/arch/mips/loongson/fuloong-2e/reset.c b/arch/mips/loongson/fuloong-2e/reset.c
index bc39ec62c8c2..da4d2ae2a1f8 100644
--- a/arch/mips/loongson/fuloong-2e/reset.c
+++ b/arch/mips/loongson/fuloong-2e/reset.c
@@ -4,8 +4,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/lemote-2f/ec_kb3310b.h b/arch/mips/loongson/lemote-2f/ec_kb3310b.h
index 1595a21b315b..5a3f1860d4d2 100644
--- a/arch/mips/loongson/lemote-2f/ec_kb3310b.h
+++ b/arch/mips/loongson/lemote-2f/ec_kb3310b.h
@@ -30,141 +30,141 @@ extern sci_handler yeeloong_report_lid_status;
* 2, fill the PORT_LOW as EC register low part.
* 3, fill the PORT_DATA as EC register write data or get the data from it.
*/
-#define EC_IO_PORT_HIGH 0x0381
-#define EC_IO_PORT_LOW 0x0382
-#define EC_IO_PORT_DATA 0x0383
+#define EC_IO_PORT_HIGH 0x0381
+#define EC_IO_PORT_LOW 0x0382
+#define EC_IO_PORT_DATA 0x0383
/*
* EC delay time is 500us for register and status access
*/
-#define EC_REG_DELAY 500 /* unit : us */
-#define EC_CMD_TIMEOUT 0x1000
+#define EC_REG_DELAY 500 /* unit : us */
+#define EC_CMD_TIMEOUT 0x1000
/*
* EC access port for SCI communication
*/
-#define EC_CMD_PORT 0x66
-#define EC_STS_PORT 0x66
-#define EC_DAT_PORT 0x62
-#define CMD_INIT_IDLE_MODE 0xdd
-#define CMD_EXIT_IDLE_MODE 0xdf
-#define CMD_INIT_RESET_MODE 0xd8
-#define CMD_REBOOT_SYSTEM 0x8c
-#define CMD_GET_EVENT_NUM 0x84
-#define CMD_PROGRAM_PIECE 0xda
+#define EC_CMD_PORT 0x66
+#define EC_STS_PORT 0x66
+#define EC_DAT_PORT 0x62
+#define CMD_INIT_IDLE_MODE 0xdd
+#define CMD_EXIT_IDLE_MODE 0xdf
+#define CMD_INIT_RESET_MODE 0xd8
+#define CMD_REBOOT_SYSTEM 0x8c
+#define CMD_GET_EVENT_NUM 0x84
+#define CMD_PROGRAM_PIECE 0xda
/* temperature & fan registers */
-#define REG_TEMPERATURE_VALUE 0xF458
-#define REG_FAN_AUTO_MAN_SWITCH 0xF459
-#define BIT_FAN_AUTO 0
-#define BIT_FAN_MANUAL 1
-#define REG_FAN_CONTROL 0xF4D2
-#define BIT_FAN_CONTROL_ON (1 << 0)
-#define BIT_FAN_CONTROL_OFF (0 << 0)
-#define REG_FAN_STATUS 0xF4DA
-#define BIT_FAN_STATUS_ON (1 << 0)
-#define BIT_FAN_STATUS_OFF (0 << 0)
-#define REG_FAN_SPEED_HIGH 0xFE22
-#define REG_FAN_SPEED_LOW 0xFE23
-#define REG_FAN_SPEED_LEVEL 0xF4CC
+#define REG_TEMPERATURE_VALUE 0xF458
+#define REG_FAN_AUTO_MAN_SWITCH 0xF459
+#define BIT_FAN_AUTO 0
+#define BIT_FAN_MANUAL 1
+#define REG_FAN_CONTROL 0xF4D2
+#define BIT_FAN_CONTROL_ON (1 << 0)
+#define BIT_FAN_CONTROL_OFF (0 << 0)
+#define REG_FAN_STATUS 0xF4DA
+#define BIT_FAN_STATUS_ON (1 << 0)
+#define BIT_FAN_STATUS_OFF (0 << 0)
+#define REG_FAN_SPEED_HIGH 0xFE22
+#define REG_FAN_SPEED_LOW 0xFE23
+#define REG_FAN_SPEED_LEVEL 0xF4CC
/* fan speed divider */
-#define FAN_SPEED_DIVIDER 480000 /* (60*1000*1000/62.5/2)*/
+#define FAN_SPEED_DIVIDER 480000 /* (60*1000*1000/62.5/2)*/
/* battery registers */
-#define REG_BAT_DESIGN_CAP_HIGH 0xF77D
-#define REG_BAT_DESIGN_CAP_LOW 0xF77E
-#define REG_BAT_FULLCHG_CAP_HIGH 0xF780
-#define REG_BAT_FULLCHG_CAP_LOW 0xF781
-#define REG_BAT_DESIGN_VOL_HIGH 0xF782
-#define REG_BAT_DESIGN_VOL_LOW 0xF783
-#define REG_BAT_CURRENT_HIGH 0xF784
-#define REG_BAT_CURRENT_LOW 0xF785
-#define REG_BAT_VOLTAGE_HIGH 0xF786
-#define REG_BAT_VOLTAGE_LOW 0xF787
-#define REG_BAT_TEMPERATURE_HIGH 0xF788
-#define REG_BAT_TEMPERATURE_LOW 0xF789
-#define REG_BAT_RELATIVE_CAP_HIGH 0xF492
-#define REG_BAT_RELATIVE_CAP_LOW 0xF493
-#define REG_BAT_VENDOR 0xF4C4
-#define FLAG_BAT_VENDOR_SANYO 0x01
-#define FLAG_BAT_VENDOR_SIMPLO 0x02
-#define REG_BAT_CELL_COUNT 0xF4C6
-#define FLAG_BAT_CELL_3S1P 0x03
-#define FLAG_BAT_CELL_3S2P 0x06
-#define REG_BAT_CHARGE 0xF4A2
-#define FLAG_BAT_CHARGE_DISCHARGE 0x01
-#define FLAG_BAT_CHARGE_CHARGE 0x02
-#define FLAG_BAT_CHARGE_ACPOWER 0x00
-#define REG_BAT_STATUS 0xF4B0
-#define BIT_BAT_STATUS_LOW (1 << 5)
-#define BIT_BAT_STATUS_DESTROY (1 << 2)
-#define BIT_BAT_STATUS_FULL (1 << 1)
-#define BIT_BAT_STATUS_IN (1 << 0)
-#define REG_BAT_CHARGE_STATUS 0xF4B1
-#define BIT_BAT_CHARGE_STATUS_OVERTEMP (1 << 2)
-#define BIT_BAT_CHARGE_STATUS_PRECHG (1 << 1)
-#define REG_BAT_STATE 0xF482
-#define BIT_BAT_STATE_CHARGING (1 << 1)
-#define BIT_BAT_STATE_DISCHARGING (1 << 0)
-#define REG_BAT_POWER 0xF440
-#define BIT_BAT_POWER_S3 (1 << 2)
-#define BIT_BAT_POWER_ON (1 << 1)
-#define BIT_BAT_POWER_ACIN (1 << 0)
+#define REG_BAT_DESIGN_CAP_HIGH 0xF77D
+#define REG_BAT_DESIGN_CAP_LOW 0xF77E
+#define REG_BAT_FULLCHG_CAP_HIGH 0xF780
+#define REG_BAT_FULLCHG_CAP_LOW 0xF781
+#define REG_BAT_DESIGN_VOL_HIGH 0xF782
+#define REG_BAT_DESIGN_VOL_LOW 0xF783
+#define REG_BAT_CURRENT_HIGH 0xF784
+#define REG_BAT_CURRENT_LOW 0xF785
+#define REG_BAT_VOLTAGE_HIGH 0xF786
+#define REG_BAT_VOLTAGE_LOW 0xF787
+#define REG_BAT_TEMPERATURE_HIGH 0xF788
+#define REG_BAT_TEMPERATURE_LOW 0xF789
+#define REG_BAT_RELATIVE_CAP_HIGH 0xF492
+#define REG_BAT_RELATIVE_CAP_LOW 0xF493
+#define REG_BAT_VENDOR 0xF4C4
+#define FLAG_BAT_VENDOR_SANYO 0x01
+#define FLAG_BAT_VENDOR_SIMPLO 0x02
+#define REG_BAT_CELL_COUNT 0xF4C6
+#define FLAG_BAT_CELL_3S1P 0x03
+#define FLAG_BAT_CELL_3S2P 0x06
+#define REG_BAT_CHARGE 0xF4A2
+#define FLAG_BAT_CHARGE_DISCHARGE 0x01
+#define FLAG_BAT_CHARGE_CHARGE 0x02
+#define FLAG_BAT_CHARGE_ACPOWER 0x00
+#define REG_BAT_STATUS 0xF4B0
+#define BIT_BAT_STATUS_LOW (1 << 5)
+#define BIT_BAT_STATUS_DESTROY (1 << 2)
+#define BIT_BAT_STATUS_FULL (1 << 1)
+#define BIT_BAT_STATUS_IN (1 << 0)
+#define REG_BAT_CHARGE_STATUS 0xF4B1
+#define BIT_BAT_CHARGE_STATUS_OVERTEMP (1 << 2)
+#define BIT_BAT_CHARGE_STATUS_PRECHG (1 << 1)
+#define REG_BAT_STATE 0xF482
+#define BIT_BAT_STATE_CHARGING (1 << 1)
+#define BIT_BAT_STATE_DISCHARGING (1 << 0)
+#define REG_BAT_POWER 0xF440
+#define BIT_BAT_POWER_S3 (1 << 2)
+#define BIT_BAT_POWER_ON (1 << 1)
+#define BIT_BAT_POWER_ACIN (1 << 0)
/* other registers */
/* Audio: rd/wr */
-#define REG_AUDIO_VOLUME 0xF46C
-#define REG_AUDIO_MUTE 0xF4E7
-#define REG_AUDIO_BEEP 0xF4D0
+#define REG_AUDIO_VOLUME 0xF46C
+#define REG_AUDIO_MUTE 0xF4E7
+#define REG_AUDIO_BEEP 0xF4D0
/* USB port power or not: rd/wr */
-#define REG_USB0_FLAG 0xF461
-#define REG_USB1_FLAG 0xF462
-#define REG_USB2_FLAG 0xF463
-#define BIT_USB_FLAG_ON 1
-#define BIT_USB_FLAG_OFF 0
+#define REG_USB0_FLAG 0xF461
+#define REG_USB1_FLAG 0xF462
+#define REG_USB2_FLAG 0xF463
+#define BIT_USB_FLAG_ON 1
+#define BIT_USB_FLAG_OFF 0
/* LID */
-#define REG_LID_DETECT 0xF4BD
-#define BIT_LID_DETECT_ON 1
-#define BIT_LID_DETECT_OFF 0
+#define REG_LID_DETECT 0xF4BD
+#define BIT_LID_DETECT_ON 1
+#define BIT_LID_DETECT_OFF 0
/* CRT */
-#define REG_CRT_DETECT 0xF4AD
-#define BIT_CRT_DETECT_PLUG 1
-#define BIT_CRT_DETECT_UNPLUG 0
+#define REG_CRT_DETECT 0xF4AD
+#define BIT_CRT_DETECT_PLUG 1
+#define BIT_CRT_DETECT_UNPLUG 0
/* LCD backlight brightness adjust: 9 levels */
-#define REG_DISPLAY_BRIGHTNESS 0xF4F5
+#define REG_DISPLAY_BRIGHTNESS 0xF4F5
/* Black screen Status */
-#define BIT_DISPLAY_LCD_ON 1
-#define BIT_DISPLAY_LCD_OFF 0
+#define BIT_DISPLAY_LCD_ON 1
+#define BIT_DISPLAY_LCD_OFF 0
/* LCD backlight control: off/restore */
-#define REG_BACKLIGHT_CTRL 0xF7BD
-#define BIT_BACKLIGHT_ON 1
-#define BIT_BACKLIGHT_OFF 0
+#define REG_BACKLIGHT_CTRL 0xF7BD
+#define BIT_BACKLIGHT_ON 1
+#define BIT_BACKLIGHT_OFF 0
/* Reset the machine auto-clear: rd/wr */
-#define REG_RESET 0xF4EC
-#define BIT_RESET_ON 1
+#define REG_RESET 0xF4EC
+#define BIT_RESET_ON 1
/* Light the led: rd/wr */
-#define REG_LED 0xF4C8
-#define BIT_LED_RED_POWER (1 << 0)
-#define BIT_LED_ORANGE_POWER (1 << 1)
-#define BIT_LED_GREEN_CHARGE (1 << 2)
-#define BIT_LED_RED_CHARGE (1 << 3)
-#define BIT_LED_NUMLOCK (1 << 4)
+#define REG_LED 0xF4C8
+#define BIT_LED_RED_POWER (1 << 0)
+#define BIT_LED_ORANGE_POWER (1 << 1)
+#define BIT_LED_GREEN_CHARGE (1 << 2)
+#define BIT_LED_RED_CHARGE (1 << 3)
+#define BIT_LED_NUMLOCK (1 << 4)
/* Test led mode, all led on/off */
-#define REG_LED_TEST 0xF4C2
-#define BIT_LED_TEST_IN 1
-#define BIT_LED_TEST_OUT 0
+#define REG_LED_TEST 0xF4C2
+#define BIT_LED_TEST_IN 1
+#define BIT_LED_TEST_OUT 0
/* Camera on/off */
-#define REG_CAMERA_STATUS 0xF46A
-#define BIT_CAMERA_STATUS_ON 1
-#define BIT_CAMERA_STATUS_OFF 0
-#define REG_CAMERA_CONTROL 0xF7B7
-#define BIT_CAMERA_CONTROL_OFF 0
-#define BIT_CAMERA_CONTROL_ON 1
+#define REG_CAMERA_STATUS 0xF46A
+#define BIT_CAMERA_STATUS_ON 1
+#define BIT_CAMERA_STATUS_OFF 0
+#define REG_CAMERA_CONTROL 0xF7B7
+#define BIT_CAMERA_CONTROL_OFF 0
+#define BIT_CAMERA_CONTROL_ON 1
/* Wlan Status */
-#define REG_WLAN 0xF4FA
-#define BIT_WLAN_ON 1
-#define BIT_WLAN_OFF 0
-#define REG_DISPLAY_LCD 0xF79F
+#define REG_WLAN 0xF4FA
+#define BIT_WLAN_ON 1
+#define BIT_WLAN_OFF 0
+#define REG_DISPLAY_LCD 0xF79F
/* SCI Event Number from EC */
enum {
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
index 14b081841b6b..6f8682e44483 100644
--- a/arch/mips/loongson/lemote-2f/irq.c
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc.
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -18,10 +18,10 @@
#include <loongson.h>
#include <machine.h>
-#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
-#define LOONGSON_NORTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 6) /* bonito */
-#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
-#define LOONGSON_SOUTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
+#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
+#define LOONGSON_NORTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 6) /* bonito */
+#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
+#define LOONGSON_SOUTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
#define LOONGSON_INT_BIT_INT0 (1 << 11)
#define LOONGSON_INT_BIT_INT1 (1 << 12)
@@ -108,9 +108,9 @@ struct irqaction cascade_irqaction = {
void __init mach_init_irq(void)
{
/* init all controller
- * 0-15 ------> i8259 interrupt
- * 16-23 ------> mips cpu interrupt
- * 32-63 ------> bonito irq
+ * 0-15 ------> i8259 interrupt
+ * 16-23 ------> mips cpu interrupt
+ * 32-63 ------> bonito irq
*/
/* setup cs5536 as high level trigger */
diff --git a/arch/mips/loongson/lemote-2f/machtype.c b/arch/mips/loongson/lemote-2f/machtype.c
index e860a2705c27..b55e6eece5e0 100644
--- a/arch/mips/loongson/lemote-2f/machtype.c
+++ b/arch/mips/loongson/lemote-2f/machtype.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -22,11 +22,11 @@ void __init mach_prom_init_machtype(void)
* machines, this will help the users a lot.
*
* If no "machtype=" passed, get machine type from "PMON_VER=".
- * PMON_VER=LM8089 Lemote 8.9'' netbook
- * LM8101 Lemote 10.1'' netbook
- * (The above two netbooks have the same kernel support)
- * LM6XXX Lemote FuLoong(2F) box series
- * LM9XXX Lemote LynLoong PC series
+ * PMON_VER=LM8089 Lemote 8.9'' netbook
+ * LM8101 Lemote 10.1'' netbook
+ * (The above two netbooks have the same kernel support)
+ * LM6XXX Lemote FuLoong(2F) box series
+ * LM9XXX Lemote LynLoong PC series
*/
if (strstr(arcs_cmdline, "PMON_VER=LM")) {
if (strstr(arcs_cmdline, "PMON_VER=LM8"))
diff --git a/arch/mips/loongson/lemote-2f/reset.c b/arch/mips/loongson/lemote-2f/reset.c
index 36020a07e180..90962a3a1731 100644
--- a/arch/mips/loongson/lemote-2f/reset.c
+++ b/arch/mips/loongson/lemote-2f/reset.c
@@ -5,8 +5,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -90,9 +90,9 @@ void ml2f_reboot(void)
#define EC_SHUTDOWN_IO_PORT_HIGH 0xff2d
#define EC_SHUTDOWN_IO_PORT_LOW 0xff2e
#define EC_SHUTDOWN_IO_PORT_DATA 0xff2f
-#define REG_SHUTDOWN_HIGH 0xFC
-#define REG_SHUTDOWN_LOW 0x29
-#define BIT_SHUTDOWN_ON (1 << 1)
+#define REG_SHUTDOWN_HIGH 0xFC
+#define REG_SHUTDOWN_LOW 0x29
+#define BIT_SHUTDOWN_ON (1 << 1)
static void ml2f_shutdown(void)
{
diff --git a/arch/mips/loongson1/Platform b/arch/mips/loongson1/Platform
index 99bdefe627af..11863441dea3 100644
--- a/arch/mips/loongson1/Platform
+++ b/arch/mips/loongson1/Platform
@@ -1,4 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += \
+cflags-$(CONFIG_CPU_LOONGSON1) += \
$(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32r2 -Wa,--trap
diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c
index 07133defa148..b4437f19c3d9 100644
--- a/arch/mips/loongson1/common/clock.c
+++ b/arch/mips/loongson1/common/clock.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/common/irq.c b/arch/mips/loongson1/common/irq.c
index 41bc8ffe7bba..455a7704a90f 100644
--- a/arch/mips/loongson1/common/irq.c
+++ b/arch/mips/loongson1/common/irq.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/common/platform.c b/arch/mips/loongson1/common/platform.c
index 69dad4cfaaf4..fdf8cb5987a4 100644
--- a/arch/mips/loongson1/common/platform.c
+++ b/arch/mips/loongson1/common/platform.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -23,7 +23,7 @@
.mapbase = LS1X_UART ## _id ## _BASE, \
.irq = LS1X_UART ## _id ## _IRQ, \
.iotype = UPIO_MEM, \
- .flags = UPF_IOREMAP | UPF_FIXED_TYPE, \
+ .flags = UPF_IOREMAP | UPF_FIXED_TYPE, \
.type = PORT_16550A, \
}
diff --git a/arch/mips/loongson1/common/prom.c b/arch/mips/loongson1/common/prom.c
index 1f8e49f9886d..2a47af5a55c3 100644
--- a/arch/mips/loongson1/common/prom.c
+++ b/arch/mips/loongson1/common/prom.c
@@ -3,8 +3,8 @@
*
* Modified from arch/mips/pnx833x/common/prom.c.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -73,7 +73,7 @@ void __init prom_free_prom_memory(void)
#define PORT(offset) (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset))
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
int timeout;
diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c
index fb979a784eca..d4f610f9604a 100644
--- a/arch/mips/loongson1/common/reset.c
+++ b/arch/mips/loongson1/common/reset.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/common/setup.c b/arch/mips/loongson1/common/setup.c
index 62128cc27e68..62f41afee241 100644
--- a/arch/mips/loongson1/common/setup.c
+++ b/arch/mips/loongson1/common/setup.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/ls1b/board.c b/arch/mips/loongson1/ls1b/board.c
index 1fbd5264f667..b26b10dac70a 100644
--- a/arch/mips/loongson1/ls1b/board.c
+++ b/arch/mips/loongson1/ls1b/board.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 96607230d9ea..121a848a3594 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -9,4 +9,3 @@ obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
-
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 47c77e7ffbf8..afb5a0bcf7a5 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -169,7 +169,7 @@ static int isBranchInstr(mips_instruction * i)
/*
* In the Linux kernel, we support selection of FPR format on the
- * basis of the Status.FR bit. If an FPU is not present, the FR bit
+ * basis of the Status.FR bit. If an FPU is not present, the FR bit
* is hardwired to zero, which would imply a 32-bit FPU even for
* 64-bit CPUs so we rather look at TIF_32BIT_REGS.
* FPU emu is slow and bulky and optimizing this function offers fairly
@@ -234,7 +234,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
if (xcp->cp0_cause & CAUSEF_BD) {
/*
* The instruction to be emulated is in a branch delay slot
- * which means that we have to emulate the branch instruction
+ * which means that we have to emulate the branch instruction
* BEFORE we do the cop1 instruction.
*
* This branch could be a COP1 branch, but in that case we
@@ -1335,8 +1335,8 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
else {
/*
* The 'ieee754_csr' is an alias of
- * ctx->fcr31. No need to copy ctx->fcr31 to
- * ieee754_csr. But ieee754_csr.rm is ieee
+ * ctx->fcr31. No need to copy ctx->fcr31 to
+ * ieee754_csr. But ieee754_csr.rm is ieee
* library modes. (not mips rounding mode)
*/
/* convert to ieee library modes */
diff --git a/arch/mips/math-emu/dp_add.c b/arch/mips/math-emu/dp_add.c
index b422fcad852a..c57c8adc42c4 100644
--- a/arch/mips/math-emu/dp_add.c
+++ b/arch/mips/math-emu/dp_add.c
@@ -153,7 +153,7 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
xe = xe;
xs = xs;
- if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
xm = XDPSRS1(xm);
xe++;
}
diff --git a/arch/mips/math-emu/dp_sqrt.c b/arch/mips/math-emu/dp_sqrt.c
index a2a51b87ae8f..b874d60a942b 100644
--- a/arch/mips/math-emu/dp_sqrt.c
+++ b/arch/mips/math-emu/dp_sqrt.c
@@ -87,7 +87,7 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
if (xe > 512) { /* x > 2**-512? */
xe -= 512; /* x = x / 2**512 */
scalx += 256;
- } else if (xe < -512) { /* x < 2**-512? */
+ } else if (xe < -512) { /* x < 2**-512? */
xe += 512; /* x = x * 2**512 */
scalx -= 256;
}
@@ -108,13 +108,13 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
y.bits &= 0xffffffff00000000LL;
/* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */
- /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
+ /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
z = t = ieee754dp_mul(y, y);
t.parts.bexp += 0x001;
t = ieee754dp_add(t, z);
z = ieee754dp_mul(ieee754dp_sub(x, z), y);
- /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */
+ /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */
t = ieee754dp_div(z, ieee754dp_add(t, x));
t.parts.bexp += 0x001;
y = ieee754dp_add(y, t);
diff --git a/arch/mips/math-emu/dp_sub.c b/arch/mips/math-emu/dp_sub.c
index 0de098cbc77b..91e0a4b5cbc7 100644
--- a/arch/mips/math-emu/dp_sub.c
+++ b/arch/mips/math-emu/dp_sub.c
@@ -158,7 +158,7 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
xe = xe;
xs = xs;
- if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
xm = XDPSRS1(xm); /* shift preserving sticky */
xe++;
}
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index 30554e1c67b4..0015cf1989da 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -56,21 +56,21 @@
#endif
const struct ieee754dp_konst __ieee754dp_spcvals[] = {
- DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */
- DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */
+ DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */
+ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */
DPSTR(0, DP_EBIAS, 0, 0), /* + 1.0 */
DPSTR(1, DP_EBIAS, 0, 0), /* - 1.0 */
DPSTR(0, 3 + DP_EBIAS, 0x40000, 0), /* + 10.0 */
DPSTR(1, 3 + DP_EBIAS, 0x40000, 0), /* - 10.0 */
- DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */
- DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */
+ DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */
+ DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */
DPSTR(0, DP_EMAX+1+DP_EBIAS, 0x7FFFF, 0xFFFFFFFF), /* + indef quiet Nan */
DPSTR(0, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* + max */
DPSTR(1, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* - max */
DPSTR(0, DP_EMIN + DP_EBIAS, 0, 0), /* + min normal */
DPSTR(1, DP_EMIN + DP_EBIAS, 0, 0), /* - min normal */
- DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */
- DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */
+ DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */
+ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */
DPSTR(0, 31 + DP_EBIAS, 0, 0), /* + 1.0e31 */
DPSTR(0, 63 + DP_EBIAS, 0, 0), /* + 1.0e63 */
};
@@ -84,9 +84,9 @@ const struct ieee754sp_konst __ieee754sp_spcvals[] = {
SPSTR(1, 3 + SP_EBIAS, 0x200000), /* - 10.0 */
SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0), /* + infinity */
SPSTR(1, SP_EMAX + 1 + SP_EBIAS, 0), /* - infinity */
- SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */
- SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */
- SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */
+ SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */
+ SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */
+ SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */
SPSTR(0, SP_EMIN + SP_EBIAS, 0), /* + min normal */
SPSTR(1, SP_EMIN + SP_EBIAS, 0), /* - min normal */
SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 1), /* + min denormal */
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index 080b5ca03fc6..068e56be8de9 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -116,7 +116,7 @@ static u64 get_rounding(int sn, u64 xm)
xm += 0x8;
break;
case IEEE754_RD: /* toward -Infinity */
- if (sn) /* ?? */
+ if (sn) /* ?? */
xm += 0x8;
break;
}
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 2a7d43f4f161..4b6c6fb35304 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -56,7 +56,7 @@
#define CLPAIR(x, y) ((x)*6+(y))
-#define CLEARCX \
+#define CLEARCX \
(ieee754_csr.cx = 0)
#define SETCX(x) \
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index 271d00d6113a..15d1e36cfe64 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -117,7 +117,7 @@ static unsigned get_rounding(int sn, unsigned xm)
xm += 0x8;
break;
case IEEE754_RD: /* toward -Infinity */
- if (sn) /* ?? */
+ if (sn) /* ?? */
xm += 0x8;
break;
}
diff --git a/arch/mips/math-emu/ieee754xcpt.c b/arch/mips/math-emu/ieee754xcpt.c
index b99a693c05af..967167116ae8 100644
--- a/arch/mips/math-emu/ieee754xcpt.c
+++ b/arch/mips/math-emu/ieee754xcpt.c
@@ -25,7 +25,7 @@
* Added preprocessor hacks to map to Linux kernel diagnostics.
*
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*************************************************************************/
#include <linux/kernel.h>
diff --git a/arch/mips/math-emu/kernel_linkage.c b/arch/mips/math-emu/kernel_linkage.c
index 52e6c58c8de1..1c586575fe17 100644
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -1,6 +1,6 @@
/*
* Kevin D. Kissell, kevink@mips and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
index ae1a327ccac0..c446e64637e2 100644
--- a/arch/mips/math-emu/sp_add.c
+++ b/arch/mips/math-emu/sp_add.c
@@ -148,7 +148,7 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
xe = xe;
xs = xs;
- if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
SPXSRSX1();
}
} else {
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
index 2722a2570ea4..fa4675cf2aad 100644
--- a/arch/mips/math-emu/sp_mul.c
+++ b/arch/mips/math-emu/sp_mul.c
@@ -131,7 +131,7 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
hrm = hxm * hym; /* 16 * 16 => 32 */
{
- unsigned t = lxm * hym; /* 16 * 16 => 32 */
+ unsigned t = lxm * hym; /* 16 * 16 => 32 */
{
unsigned at = lrm + (t << 16);
hrm += at < lrm;
@@ -141,7 +141,7 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
}
{
- unsigned t = hxm * lym; /* 16 * 16 => 32 */
+ unsigned t = hxm * lym; /* 16 * 16 => 32 */
{
unsigned at = lrm + (t << 16);
hrm += at < lrm;
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
index 886ed5bcfefb..e595c6f3d0bb 100644
--- a/arch/mips/math-emu/sp_sub.c
+++ b/arch/mips/math-emu/sp_sub.c
@@ -153,7 +153,7 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
xe = xe;
xs = xs;
- if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
SPXSRSX1(); /* shift preserving sticky */
}
} else {
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 90ceb963aaf1..1dcec30ad1c4 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -16,9 +16,9 @@ obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
-obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
-obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 6ec04daf4231..8557fb552863 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -106,7 +106,7 @@ static void octeon_flush_icache_all(void)
* Called to flush all memory associated with a memory
* context.
*
- * @mm: Memory context to flush
+ * @mm: Memory context to flush
*/
static void octeon_flush_cache_mm(struct mm_struct *mm)
{
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 031c4c2cdf2e..704dc735a59d 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -119,7 +119,7 @@ static void r3k_flush_icache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -176,7 +176,7 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -285,13 +285,13 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
write_c0_status(flags&~ST0_IEC);
/* Fill the TLB to avoid an exception with caches isolated. */
- asm( "lw\t$0, 0x000(%0)\n\t"
+ asm( "lw\t$0, 0x000(%0)\n\t"
"lw\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 0f7d788e8810..ecca559b8d7b 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -160,7 +160,7 @@ static void __cpuinit r4k_blast_dcache_setup(void)
"1:\n\t" \
)
#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
-#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
static inline void blast_r4600_v1_icache32(void)
{
@@ -177,7 +177,7 @@ static inline void tx49_blast_icache32(void)
unsigned long end = start + current_cpu_data.icache.waysize;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -208,7 +208,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
unsigned long end = start + PAGE_SIZE;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -637,7 +637,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
* for the cache instruction on MIPS processors and
* some processors, among them the RM5200 and RM7000
* QED processors will throw an address error for cache
- * hit ops with insufficient alignment. Solved by
+ * hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
blast_inv_scache_range(addr, addr + size);
@@ -864,7 +864,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -923,7 +923,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -986,8 +986,8 @@ static void __cpuinit probe_pcache(void)
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
- c->icache.ways *
- c->icache.linesz;
+ c->icache.ways *
+ c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
if (config & 0x8) /* VI bit */
@@ -1006,8 +1006,8 @@ static void __cpuinit probe_pcache(void)
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
- c->dcache.ways *
- c->dcache.linesz;
+ c->dcache.ways *
+ c->dcache.linesz;
c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
c->options |= MIPS_CPU_PREFETCH;
@@ -1016,7 +1016,7 @@ static void __cpuinit probe_pcache(void)
/*
* Processor configuration sanity check for the R4000SC erratum
- * #5. With page sizes larger than 32kB there is no possibility
+ * #5. With page sizes larger than 32kB there is no possibility
* to get a VCE exception anymore so we don't care about this
* misconfiguration. The case is rather theoretical anyway;
* presumably no vendor is shipping his hardware in the "bad"
@@ -1057,6 +1057,7 @@ static void __cpuinit probe_pcache(void)
break;
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
@@ -1088,7 +1089,7 @@ static void __cpuinit probe_pcache(void)
break;
}
-#ifdef CONFIG_CPU_LOONGSON2
+#ifdef CONFIG_CPU_LOONGSON2
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
@@ -1228,7 +1229,7 @@ static void __cpuinit setup_scache(void)
#ifdef CONFIG_R5000_CPU_SCACHE
r5k_sc_init();
#endif
- return;
+ return;
case CPU_RM7000:
#ifdef CONFIG_RM7000_CPU_SCACHE
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 87d23cada6d6..ba9da270289f 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -33,9 +33,9 @@ extern int r3k_have_wired_reg; /* in r3k-tlb.c */
/* This sequence is required to ensure icache is disabled immediately */
#define TX39_STOP_STREAMING() \
__asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "b 1f\n\t" \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "b 1f\n\t" \
"nop\n\t" \
"1:\n\t" \
".set pop" \
@@ -361,7 +361,7 @@ void __cpuinit tx39_cache_init(void)
/* TX39/H core (writethru direct-map cache) */
__flush_cache_vmap = tx39__flush_cache_vmap;
__flush_cache_vunmap = tx39__flush_cache_vunmap;
- flush_cache_all = tx39h_flush_icache_all;
+ flush_cache_all = tx39h_flush_icache_all;
__flush_cache_all = tx39h_flush_icache_all;
flush_cache_mm = (void *) tx39h_flush_icache_all;
flush_cache_range = (void *) tx39h_flush_icache_all;
@@ -409,8 +409,8 @@ void __cpuinit tx39_cache_init(void)
_dma_cache_inv = tx39_dma_cache_inv;
shm_align_mask = max_t(unsigned long,
- (dcache_size / current_cpu_data.dcache.ways) - 1,
- PAGE_SIZE - 1);
+ (dcache_size / current_cpu_data.dcache.ways) - 1,
+ PAGE_SIZE - 1);
break;
}
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 3571090ba178..576add33bf5b 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -27,7 +27,7 @@
/*
* We'd like to dump the L2_ECC_TAG register on errors, but errata make
- * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
*/
#undef DUMP_L2_ECC_TAG_ON_ERROR
@@ -48,7 +48,7 @@
#define CP0_CERRI_EXTERNAL (1 << 26)
#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
-#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
+#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
#define CP0_CERRD_MULTIPLE (1 << 31)
#define CP0_CERRD_TAG_STATE (1 << 30)
@@ -56,8 +56,8 @@
#define CP0_CERRD_DATA_SBE (1 << 28)
#define CP0_CERRD_DATA_DBE (1 << 27)
#define CP0_CERRD_EXTERNAL (1 << 26)
-#define CP0_CERRD_LOAD (1 << 25)
-#define CP0_CERRD_STORE (1 << 24)
+#define CP0_CERRD_LOAD (1 << 25)
+#define CP0_CERRD_STORE (1 << 24)
#define CP0_CERRD_FILLWB (1 << 23)
#define CP0_CERRD_COHERENCY (1 << 22)
#define CP0_CERRD_DUPTAG (1 << 21)
@@ -69,10 +69,10 @@
(CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
#define CP0_CERRD_TYPES \
(CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
-#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
+#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
-static uint32_t extract_ic(unsigned short addr, int data);
-static uint32_t extract_dc(unsigned short addr, int data);
+static uint32_t extract_ic(unsigned short addr, int data);
+static uint32_t extract_dc(unsigned short addr, int data);
static inline void breakout_errctl(unsigned int val)
{
@@ -209,11 +209,11 @@ asmlinkage void sb1_cache_error(void)
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
- printk(" c0_errorepc == %08x\n", eepc);
- printk(" c0_errctl == %08x", errctl);
+ printk(" c0_errorepc == %08x\n", eepc);
+ printk(" c0_errctl == %08x", errctl);
breakout_errctl(errctl);
if (errctl & CP0_ERRCTL_ICACHE) {
- printk(" c0_cerr_i == %08x", cerr_i);
+ printk(" c0_cerr_i == %08x", cerr_i);
breakout_cerri(cerr_i);
if (CP0_CERRI_IDX_VALID(cerr_i)) {
/* Check index of EPC, allowing for delay slot */
@@ -229,7 +229,7 @@ asmlinkage void sb1_cache_error(void)
}
}
if (errctl & CP0_ERRCTL_DCACHE) {
- printk(" c0_cerr_d == %08x", cerr_d);
+ printk(" c0_cerr_d == %08x", cerr_d);
breakout_cerrd(cerr_d);
if (CP0_CERRD_DPA_VALID(cerr_d)) {
printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
@@ -256,7 +256,7 @@ asmlinkage void sb1_cache_error(void)
/*
* Calling panic() when a fatal cache error occurs scrambles the
* state of the system (and the cache), making it difficult to
- * investigate after the fact. However, if you just stall the CPU,
+ * investigate after the fact. However, if you just stall the CPU,
* the other CPU may keep on running, which is typically very
* undesirable.
*/
@@ -411,7 +411,7 @@ static uint32_t extract_ic(unsigned short addr, int data)
" dmfc0 $1, $28, 1\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
- " .set pop \n"
+ " .set pop \n"
: "=r" (datahi), "=r" (insta), "=r" (instb)
: "r" ((way << 13) | addr | (offset << 3)));
predecode = (datahi >> 8) & 0xff;
@@ -441,8 +441,8 @@ static uint8_t dc_ecc(uint64_t dword)
{
uint64_t t;
uint32_t w;
- uint8_t p;
- int i;
+ uint8_t p;
+ int i;
p = 0;
for (i = 7; i >= 0; i--)
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
index e743622fd24d..45dff5cd4b8e 100644
--- a/arch/mips/mm/cex-gen.S
+++ b/arch/mips/mm/cex-gen.S
@@ -14,17 +14,17 @@
#include <asm/stackframe.h>
/*
- * Game over. Go to the button. Press gently. Swear where allowed by
+ * Game over. Go to the button. Press gently. Swear where allowed by
* legislation.
*/
LEAF(except_vec2_generic)
.set noreorder
.set noat
- .set mips0
+ .set mips0
/*
* This is a very bad place to be. Our cache error
* detection has triggered. If we have write-back data
- * in the cache, we may not be able to recover. As a
+ * in the cache, we may not be able to recover. As a
* first-order desperate measure, turn off KSEG0 cacheing.
*/
mfc0 k0,CP0_CONFIG
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
index 3db8553fcd34..9029092aa740 100644
--- a/arch/mips/mm/cex-oct.S
+++ b/arch/mips/mm/cex-oct.S
@@ -18,7 +18,7 @@
*/
LEAF(except_vec2_octeon)
- .set push
+ .set push
.set mips64r2
.set noreorder
.set noat
@@ -27,19 +27,19 @@
/* due to an errata we need to read the COP0 CacheErr (Dcache)
* before any cache/DRAM access */
- rdhwr k0, $0 /* get core_id */
- PTR_LA k1, cache_err_dcache
- sll k0, k0, 3
+ rdhwr k0, $0 /* get core_id */
+ PTR_LA k1, cache_err_dcache
+ sll k0, k0, 3
PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
- dmfc0 k0, CP0_CACHEERR, 1
- sd k0, (k1)
- dmtc0 $0, CP0_CACHEERR, 1
+ dmfc0 k0, CP0_CACHEERR, 1
+ sd k0, (k1)
+ dmtc0 $0, CP0_CACHEERR, 1
- /* check whether this is a nested exception */
- mfc0 k1, CP0_STATUS
- andi k1, k1, ST0_EXL
- beqz k1, 1f
+ /* check whether this is a nested exception */
+ mfc0 k1, CP0_STATUS
+ andi k1, k1, ST0_EXL
+ beqz k1, 1f
nop
j cache_parity_error_octeon_non_recoverable
nop
@@ -48,22 +48,22 @@
1: j handle_cache_err
nop
- .set pop
+ .set pop
END(except_vec2_octeon)
/* We need to jump to handle_cache_err so that the previous handler
* can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
- * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
+ * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
LEAF(handle_cache_err)
- .set push
- .set noreorder
- .set noat
+ .set push
+ .set noreorder
+ .set noat
SAVE_ALL
KMODE
- jal cache_parity_error_octeon_recoverable
+ jal cache_parity_error_octeon_recoverable
nop
- j ret_from_exception
+ j ret_from_exception
nop
.set pop
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 89c412bc4b64..fe1d887e8d70 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -24,9 +24,9 @@
#include <asm/cacheops.h>
#include <asm/sibyte/board.h>
-#define C0_ERRCTL $26 /* CP0: Error info */
-#define C0_CERR_I $27 /* CP0: Icache error */
-#define C0_CERR_D $27,1 /* CP0: Dcache error */
+#define C0_ERRCTL $26 /* CP0: Error info */
+#define C0_CERR_I $27 /* CP0: Icache error */
+#define C0_CERR_D $27,1 /* CP0: Dcache error */
/*
* Based on SiByte sample software cache-err/cerr.S
@@ -88,7 +88,7 @@ attempt_recovery:
/*
* k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
* Dcache errors we can recover from will take more extensive
- * processing. For now, they are considered "unrecoverable".
+ * processing. For now, they are considered "unrecoverable".
* Note that 'DC' becoming set (outside of ERL mode) will
* cause 'IC' to clear; so if there's an Icache error, we'll
* only find out about it if we recover from this error and
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 3fab2046c8a4..f9ef83829a52 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index ddcec1e1a0cd..0fead53d1c26 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -52,7 +52,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
#ifdef CONFIG_KPROBES
/*
- * This is to notify the fault handler of the kprobes. The
+ * This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
@@ -216,7 +216,7 @@ bad_area_nosemaphore:
}
no_context:
- /* Are we prepared to handle this kernel fault? */
+ /* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index dcfd573871c1..d4ea5c9c4a93 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -249,7 +249,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
+ * Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index be9acb2b959d..67929251286c 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -66,7 +66,7 @@
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. This is necessary only on R4000 / R4400 SC and MC versions
+ * when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
@@ -380,7 +380,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+ totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index cacfd31e8ec9..7f840bc08abf 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -22,7 +22,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
phys_t end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
- | __WRITEABLE | flags);
+ | __WRITEABLE | flags);
address &= ~PMD_MASK;
end = address + size;
@@ -185,7 +185,7 @@ void __iounmap(const volatile void __iomem *addr)
if (!p)
printk(KERN_ERR "iounmap: bad address %p\n", addr);
- kfree(p);
+ kfree(p);
}
EXPORT_SYMBOL(__ioremap);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 8e666c55f4d4..a29fba55b53e 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -271,7 +271,7 @@ void __cpuinit build_clear_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
- * cache_line_size : 0;
+ * cache_line_size : 0;
while (off) {
build_clear_pref(&buf, -off);
off -= cache_line_size;
@@ -417,13 +417,13 @@ void __cpuinit build_copy_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_load_pref(&buf, -off);
off -= cache_line_size;
}
off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_store_pref(&buf, -off);
off -= cache_line_size;
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index ee331bbd8f8a..e8adc0069d66 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -24,7 +24,7 @@ void pgd_init(unsigned long page)
entry = (unsigned long)invalid_pmd_table;
#endif
- p = (unsigned long *) page;
+ p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
do {
@@ -45,7 +45,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
- p = (unsigned long *) addr;
+ p = (unsigned long *) addr;
end = p + PTRS_PER_PMD;
do {
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index 1eb708ef75ff..c6aaed934d53 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -159,7 +159,7 @@ static inline int __init indy_sc_probe(void)
}
/* XXX Check with wje if the Indy caches can differenciate between
- writeback + invalidate and just invalidate. */
+ writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8d90ff25b123..8bc67720e145 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -58,7 +58,7 @@ static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
static void r5k_sc_enable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
set_c0_config(R5K_CONF_SE);
@@ -68,7 +68,7 @@ static void r5k_sc_enable(void)
static void r5k_sc_disable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
blast_r5000_scache();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 2a7c9725b2a3..493131c81a29 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -424,7 +424,7 @@ void __cpuinit tlb_init(void)
write_c0_pagegrain(pg);
}
- /* From this point on the ARC firmware is dead. */
+ /* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 1c8ac49ec72c..820e6612d744 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -5,8 +5,8 @@
*
* Synthesize TLB refill handlers at runtime.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
- * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
* Copyright (C) 2011 MIPS Technologies, Inc.
@@ -212,7 +212,7 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
/*
* pgtable bits are assigned dynamically depending on processor feature
* and statically based on kernel configuration. This spits out the actual
- * values the kernel is using. Required to make sense from disassembled
+ * values the kernel is using. Required to make sense from disassembled
* TLB exception handlers.
*/
static void output_pgtable_bits_defines(void)
@@ -464,8 +464,8 @@ static u32 final_handler[64] __cpuinitdata;
* From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
* 2. A timing hazard exists for the TLBP instruction.
*
- * stalling_instruction
- * TLBP
+ * stalling_instruction
+ * TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction. This is not really correct as the stalling instruction
@@ -476,7 +476,7 @@ static u32 final_handler[64] __cpuinitdata;
* The software work-around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall.
*
- * Errata 2 will not be fixed. This errata is also on the R5000.
+ * Errata 2 will not be fixed. This errata is also on the R5000.
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
@@ -581,6 +581,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
@@ -748,7 +749,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
*/
small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
- /* We can clobber tmp. It isn't used after this.*/
+ /* We can clobber tmp. It isn't used after this.*/
if (!small_sequence)
uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
@@ -830,12 +831,12 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
#elif defined(CONFIG_SMP)
-# ifdef CONFIG_MIPS_MT_SMTC
+# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -955,7 +956,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -965,7 +966,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#else
/*
* smp_processor_id() << 3 is stored in CONTEXT.
- */
+ */
uasm_i_mfc0(p, ptr, C0_CONTEXT);
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_srl(p, ptr, ptr, 23);
@@ -1153,7 +1154,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
@@ -1171,9 +1172,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_l_vmalloc_done(l, *p);
/*
- * tmp ptr
- * fall-through case = badvaddr *pgd_current
- * vmalloc case = badvaddr swapper_pg_dir
+ * tmp ptr
+ * fall-through case = badvaddr *pgd_current
+ * vmalloc case = badvaddr swapper_pg_dir
*/
if (vmalloc_branch_delay_filled)
@@ -1212,7 +1213,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
- * delay slot. It cannot issue in the same cycle and may be
+ * delay slot. It cannot issue in the same cycle and may be
* speculative and unneeded.
*/
if (use_lwx_insns())
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 39b891056227..942ff6c2eba2 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -7,7 +7,7 @@
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
@@ -119,30 +119,30 @@ static struct insn insn_table[] __uasminitdata = {
{ insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
- { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
+ { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
{ insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
+ { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
@@ -345,7 +345,7 @@ Ip_u2u1msbu3(op) \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
-#define I_u2u1msbdu3(op) \
+#define I_u2u1msbdu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, d-1, c); \
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 469d9b0cee6d..1e4784458016 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -70,12 +70,12 @@ void amon_cpu_start(int cpu,
launch->sp = sp;
launch->a0 = a0;
- smp_wmb(); /* Target must see parameters before go */
+ smp_wmb(); /* Target must see parameters before go */
launch->flags |= LAUNCH_FGO;
- smp_wmb(); /* Target must see go before we poll */
+ smp_wmb(); /* Target must see go before we poll */
while ((launch->flags & LAUNCH_FGONE) == 0)
;
- smp_rmb(); /* Target will be updating flags soon */
+ smp_rmb(); /* Target will be updating flags soon */
pr_debug("launch: cpu%d gone!\n", cpu);
}
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
index 1871c30ed2eb..5576a306a145 100644
--- a/arch/mips/mti-malta/malta-cmdline.c
+++ b/arch/mips/mti-malta/malta-cmdline.c
@@ -46,7 +46,7 @@ void __init prom_init_cmdline(void)
cp = &(arcs_cmdline[0]);
while(actr < prom_argc) {
- strcpy(cp, prom_argv(actr));
+ strcpy(cp, prom_argv(actr));
cp += strlen(prom_argv(actr));
*cp++ = ' ';
actr++;
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
index 7c8828fcb0ad..9bc58a24e80a 100644
--- a/arch/mips/mti-malta/malta-display.c
+++ b/arch/mips/mti-malta/malta-display.c
@@ -37,10 +37,10 @@ void mips_display_message(const char *str)
display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
for (i = 0; i <= 14; i=i+2) {
- if (*str)
- __raw_writel(*str++, display + i);
+ if (*str)
+ __raw_writel(*str++, display + i);
else
- __raw_writel(' ', display + i);
+ __raw_writel(' ', display + i);
}
}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 27a6cdb36e37..c2cbce9e435e 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
@@ -110,20 +110,20 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
int get_ethernet_addr(char *ethernet_addr)
{
- char *ethaddr_str;
+ char *ethaddr_str;
- ethaddr_str = prom_getenv("ethaddr");
+ ethaddr_str = prom_getenv("ethaddr");
if (!ethaddr_str) {
- printk("ethaddr not set in boot prom\n");
+ printk("ethaddr not set in boot prom\n");
return -1;
}
str2eaddr(ethernet_addr, ethaddr_str);
if (init_debug > 1) {
- int i;
+ int i;
printk("get_ethernet_addr: ");
- for (i=0; i<5; i++)
- printk("%02x:", (unsigned char)*(ethernet_addr+i));
+ for (i=0; i<5; i++)
+ printk("%02x:", (unsigned char)*(ethernet_addr+i));
printk("%02x\n", *(ethernet_addr+i));
}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 647b86383184..e364af70e6cf 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -84,10 +84,10 @@ static inline int mips_pcibios_iack(void)
/* Flush Bonito register block */
(void) BONITO_PCIMAP_CFG;
- iob(); /* sync */
+ iob(); /* sync */
irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
- iob(); /* sync */
+ iob(); /* sync */
irq &= 0xff;
BONITO_PCIMAP_CFG = 0;
break;
@@ -136,7 +136,7 @@ static void malta_ipi_irqdispatch(void)
irq = gic_get_int();
if (irq < 0)
- return; /* interrupt has already been cleared */
+ return; /* interrupt has already been cleared */
do_IRQ(MIPS_GIC_IRQ_BASE + irq);
}
@@ -149,7 +149,7 @@ static void corehi_irqdispatch(void)
struct pt_regs *regs = get_irq_regs();
printk(KERN_EMERG "CoreHI interrupt, shouldn't happen, we die here!\n");
- printk(KERN_EMERG "epc : %08lx\nStatus: %08lx\n"
+ printk(KERN_EMERG "epc : %08lx\nStatus: %08lx\n"
"Cause : %08lx\nbadVaddr : %08lx\n",
regs->cp0_epc, regs->cp0_status,
regs->cp0_cause, regs->cp0_badvaddr);
@@ -249,20 +249,20 @@ static inline unsigned int irq_ffs(unsigned int pending)
* on hardware interrupt 0 (MIPS IRQ 2)) like:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 Combined hardware interrupt (hw0)
- * 3 Hardware (ignored)
- * 4 Hardware (ignored)
- * 5 Hardware (ignored)
- * 6 Hardware (ignored)
- * 7 R4k timer (what we use)
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Combined hardware interrupt (hw0)
+ * 3 Hardware (ignored)
+ * 4 Hardware (ignored)
+ * 5 Hardware (ignored)
+ * 6 Hardware (ignored)
+ * 7 R4k timer (what we use)
*
* We handle the IRQ according to _our_ priority which is:
*
- * Highest ---- R4k Timer
- * Lowest ---- Combined hardware interrupt
+ * Highest ---- R4k Timer
+ * Lowest ---- Combined hardware interrupt
*
* then we just return, if multiple IRQs are pending then we will just take
* another exception, big deal.
@@ -396,7 +396,7 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
- { X, X, X, X, 0 },
+ { X, X, X, X, 0 },
{ X, X, X, X, 0 },
{ 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
{ 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
@@ -410,7 +410,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
{ 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
{ 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { X, X, X, X, 0 },
+ { X, X, X, X, 0 },
/* The remainder of this table is initialised by fill_ipi_map */
};
#undef X
@@ -634,7 +634,7 @@ void malta_be_init(void)
static char *tr[8] = {
"mem", "gcr", "gic", "mmio",
- "0x04", "0x05", "0x06", "0x07"
+ "0x04", "0x05", "0x06", "0x07"
};
static char *mcmd[32] = {
@@ -673,10 +673,10 @@ static char *mcmd[32] = {
};
static char *core[8] = {
- "Invalid/OK", "Invalid/Data",
+ "Invalid/OK", "Invalid/Data",
"Shared/OK", "Shared/Data",
"Modified/OK", "Modified/Data",
- "Exclusive/OK", "Exclusive/Data"
+ "Exclusive/OK", "Exclusive/Data"
};
static char *causes[32] = {
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index a96d281f9221..f3d43aa023a9 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -47,7 +47,7 @@ static char *mtypes[3] = {
};
#endif
-/* determined physical memory size, not overridden by command line args */
+/* determined physical memory size, not overridden by command line args */
unsigned long physical_memsize = 0L;
static struct prom_pmemblock * __init prom_getmdesc(void)
@@ -158,7 +158,7 @@ void __init prom_meminit(void)
size = p->size;
add_memory_region(base, size, type);
- p++;
+ p++;
}
}
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 2147cb34e705..37134ddfeaa5 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
@@ -127,7 +127,7 @@ void __init mips_pcibios_init(void)
map = map1;
}
mask = ~(start ^ end);
- /* We don't support remapping with a discontiguous mask. */
+ /* We don't support remapping with a discontiguous mask. */
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
mask != ~((mask & -mask) - 1));
gt64120_mem_resource.start = start;
@@ -144,7 +144,7 @@ void __init mips_pcibios_init(void)
map = GT_READ(GT_PCI0IOREMAP_OFS);
end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK);
mask = ~(start ^ end);
- /* We don't support remapping with a discontiguous mask. */
+ /* We don't support remapping with a discontiguous mask. */
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
mask != ~((mask & -mask) - 1));
gt64120_io_resource.start = map & mask;
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 74732177851c..132f8663825e 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -93,7 +93,7 @@ static struct mtd_partition malta_mtd_partitions[] = {
.mask_flags = MTD_WRITEABLE
}, {
.name = "User FS",
- .offset = 0x100000,
+ .offset = 0x100000,
.size = 0x2e0000
}, {
.name = "Board Config",
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 2e28f653f66d..200f64df2c9b 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -78,9 +78,9 @@ const char *get_system_type(void)
}
#if defined(CONFIG_MIPS_MT_SMTC)
-const char display_string[] = " SMTC LINUX ON MALTA ";
+const char display_string[] = " SMTC LINUX ON MALTA ";
#else
-const char display_string[] = " LINUX ON MALTA ";
+const char display_string[] = " LINUX ON MALTA ";
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_BLK_DEV_FD
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 1efc8c394486..becbf47506a5 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -126,7 +126,7 @@ int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
* to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
* that signal is brought to IP2 of both VPEs. To avoid racing
* concurrent interrupt service events, IP2 is enabled only on
- * one VPE, by convention VPE0. So long as no bits are ever
+ * one VPE, by convention VPE0. So long as no bits are ever
* cleared in the affinity mask, there will never be any
* interrupt forwarding. But as soon as a program or operator
* sets affinity for one of the related IRQs, we need to make
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 115f5bc06003..a144b89cf9ba 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -17,7 +17,6 @@
*
* Setting up the clock on the MIPS boards.
*/
-
#include <linux/types.h>
#include <linux/i8253.h>
#include <linux/init.h>
@@ -25,7 +24,6 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/time.h>
#include <linux/timex.h>
#include <linux/mc146818rtc.h>
@@ -34,11 +32,11 @@
#include <asm/hardirq.h>
#include <asm/irq.h>
#include <asm/div64.h>
-#include <asm/cpu.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/mc146818-time.h>
#include <asm/msc01_ic.h>
+#include <asm/gic.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/prom.h>
@@ -46,6 +44,7 @@
#include <asm/mips-boards/maltaint.h>
unsigned long cpu_khz;
+int gic_frequency;
static int mips_cpu_timer_irq;
static int mips_cpu_perf_irq;
@@ -61,44 +60,50 @@ static void mips_perf_dispatch(void)
do_IRQ(mips_cpu_perf_irq);
}
+static unsigned int freqround(unsigned int freq, unsigned int amount)
+{
+ freq += amount;
+ freq -= freq % (amount*2);
+ return freq;
+}
+
/*
- * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
+ * Estimate CPU and GIC frequencies.
*/
-static unsigned int __init estimate_cpu_frequency(void)
+static void __init estimate_frequencies(void)
{
- unsigned int prid = read_c0_prid() & 0xffff00;
- unsigned int count;
-
unsigned long flags;
- unsigned int start;
+ unsigned int count, start;
+ unsigned int giccount = 0, gicstart = 0;
local_irq_save(flags);
- /* Start counter exactly on falling edge of update flag */
+ /* Start counter exactly on falling edge of update flag. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
- /* Start r4k counter. */
+ /* Initialize counters. */
start = read_c0_count();
+ if (gic_present)
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
- /* Read counter exactly on falling edge of update flag */
+ /* Read counter exactly on falling edge of update flag. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
- count = read_c0_count() - start;
+ count = read_c0_count();
+ if (gic_present)
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
- /* restore interrupts */
local_irq_restore(flags);
- mips_hpt_frequency = count;
- if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
- (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
- count *= 2;
-
- count += 5000; /* round */
- count -= count%10000;
+ count -= start;
+ if (gic_present)
+ giccount -= gicstart;
- return count;
+ mips_hpt_frequency = count;
+ if (gic_present)
+ gic_frequency = giccount;
}
void read_persistent_clock(struct timespec *ts)
@@ -144,22 +149,34 @@ unsigned int __cpuinit get_c0_compare_int(void)
void __init plat_time_init(void)
{
- unsigned int est_freq;
-
- /* Set Data mode - binary. */
- CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL);
-
- est_freq = estimate_cpu_frequency();
+ unsigned int prid = read_c0_prid() & 0xffff00;
+ unsigned int freq;
- printk("CPU frequency %d.%02d MHz\n", est_freq/1000000,
- (est_freq%1000000)*100/1000000);
+ estimate_frequencies();
- cpu_khz = est_freq / 1000;
+ freq = mips_hpt_frequency;
+ if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+ (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+ freq *= 2;
+ freq = freqround(freq, 5000);
+ pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+ (freq%1000000)*100/1000000);
+ cpu_khz = freq / 1000;
+
+ if (gic_present) {
+ freq = freqround(gic_frequency, 5000);
+ pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
+ (freq%1000000)*100/1000000);
+ gic_clocksource_init(gic_frequency);
+ } else
+ init_r4k_clocksource();
- mips_scroll_message();
-#ifdef CONFIG_I8253 /* Only Malta has a PIT */
+#ifdef CONFIG_I8253
+ /* Only Malta has a PIT. */
setup_pit_timer();
#endif
+ mips_scroll_message();
+
plat_perf_setup();
}
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 626afeac4386..10ec701ce6c7 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -5,10 +5,12 @@
# Copyright (C) 2008 Wind River Systems, Inc.
# written by Ralf Baechle <ralf@linux-mips.org>
#
+# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved.
+# Steven J. Hill <sjhill@mips.com>
+#
obj-y := sead3-lcd.o sead3-cmdline.o \
sead3-display.o sead3-init.o sead3-int.o \
- sead3-mtd.o sead3-net.o \
- sead3-memory.o sead3-platform.o \
+ sead3-mtd.o sead3-net.o sead3-platform.o \
sead3-reset.o sead3-setup.o sead3-time.o
obj-y += sead3-i2c-dev.o sead3-i2c.o \
@@ -17,3 +19,7 @@ obj-y += sead3-i2c-dev.o sead3-i2c.o \
obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o
obj-$(CONFIG_USB_EHCI_HCD) += sead3-ehci.o
+obj-$(CONFIG_OF) += sead3.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+ $(call if_changed,dtc)
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index a95ac5985206..322148c353ed 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -33,12 +33,12 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
static struct led_classdev sead3_pled = {
.name = "sead3::pled",
- .brightness_set = sead3_pled_set,
+ .brightness_set = sead3_pled_set,
};
static struct led_classdev sead3_fled = {
.name = "sead3::fled",
- .brightness_set = sead3_fled_set,
+ .brightness_set = sead3_fled_set,
};
#ifdef CONFIG_PM
@@ -125,4 +125,3 @@ module_exit(sead3_led_exit);
MODULE_AUTHOR("Kristian Kielhofner <kris@krisk.org>");
MODULE_DESCRIPTION("SEAD3 LED driver");
MODULE_LICENSE("GPL");
-
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
index b36739108a0f..2ddef19a9adc 100644
--- a/arch/mips/mti-sead3/sead3-console.c
+++ b/arch/mips/mti-sead3/sead3-console.c
@@ -10,8 +10,8 @@
#include <linux/serial_reg.h>
#include <linux/io.h>
-#define SEAD_UART1_REGS_BASE 0xbf000800 /* ttyS1 = DB9 port */
-#define SEAD_UART0_REGS_BASE 0xbf000900 /* ttyS0 = USB port */
+#define SEAD_UART1_REGS_BASE 0xbf000800 /* ttyS1 = DB9 port */
+#define SEAD_UART0_REGS_BASE 0xbf000900 /* ttyS0 = USB port */
#define PORT(base_addr, offset) ((unsigned int __iomem *)(base_addr+(offset)*4))
static char console_port = 1;
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
index 8308c7fc188a..e389326cfa42 100644
--- a/arch/mips/mti-sead3/sead3-display.c
+++ b/arch/mips/mti-sead3/sead3-display.c
@@ -21,7 +21,7 @@ static unsigned int max_display_count;
#define LCD_SETDDRAM 0x80
#define LCD_IR_BF 0x80
-const char display_string[] = " LINUX ON SEAD3 ";
+const char display_string[] = " LINUX ON SEAD3 ";
static void scroll_display_message(unsigned long data);
static DEFINE_TIMER(mips_scroll_timer, scroll_display_message, HZ, 0);
diff --git a/arch/mips/mti-sead3/sead3-i2c-drv.c b/arch/mips/mti-sead3/sead3-i2c-drv.c
index 7aa2225e75b9..1f787a6a7878 100644
--- a/arch/mips/mti-sead3/sead3-i2c-drv.c
+++ b/arch/mips/mti-sead3/sead3-i2c-drv.c
@@ -13,32 +13,32 @@
#include <linux/platform_device.h>
#define PIC32_I2CxCON 0x0000
-#define PIC32_I2CCON_ON (1<<15)
-#define PIC32_I2CCON_ACKDT (1<<5)
-#define PIC32_I2CCON_ACKEN (1<<4)
-#define PIC32_I2CCON_RCEN (1<<3)
-#define PIC32_I2CCON_PEN (1<<2)
-#define PIC32_I2CCON_RSEN (1<<1)
-#define PIC32_I2CCON_SEN (1<<0)
+#define PIC32_I2CCON_ON (1<<15)
+#define PIC32_I2CCON_ACKDT (1<<5)
+#define PIC32_I2CCON_ACKEN (1<<4)
+#define PIC32_I2CCON_RCEN (1<<3)
+#define PIC32_I2CCON_PEN (1<<2)
+#define PIC32_I2CCON_RSEN (1<<1)
+#define PIC32_I2CCON_SEN (1<<0)
#define PIC32_I2CxCONCLR 0x0004
#define PIC32_I2CxCONSET 0x0008
#define PIC32_I2CxSTAT 0x0010
#define PIC32_I2CxSTATCLR 0x0014
-#define PIC32_I2CSTAT_ACKSTAT (1<<15)
-#define PIC32_I2CSTAT_TRSTAT (1<<14)
-#define PIC32_I2CSTAT_BCL (1<<10)
-#define PIC32_I2CSTAT_IWCOL (1<<7)
-#define PIC32_I2CSTAT_I2COV (1<<6)
+#define PIC32_I2CSTAT_ACKSTAT (1<<15)
+#define PIC32_I2CSTAT_TRSTAT (1<<14)
+#define PIC32_I2CSTAT_BCL (1<<10)
+#define PIC32_I2CSTAT_IWCOL (1<<7)
+#define PIC32_I2CSTAT_I2COV (1<<6)
#define PIC32_I2CxBRG 0x0040
#define PIC32_I2CxTRN 0x0050
#define PIC32_I2CxRCV 0x0060
static DEFINE_SPINLOCK(pic32_bus_lock);
-static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
+static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
static void __iomem *bus_status = (void __iomem *)0xbf000060;
-#define DELAY() udelay(100)
+#define DELAY() udelay(100)
static inline unsigned int ioready(void)
{
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
index a958cad6fff6..f95abaa1aa5d 100644
--- a/arch/mips/mti-sead3/sead3-init.c
+++ b/arch/mips/mti-sead3/sead3-init.c
@@ -77,7 +77,6 @@ void __init prom_init(void)
board_ejtag_handler_setup = mips_ejtag_setup;
prom_init_cmdline();
- prom_meminit();
#ifdef CONFIG_EARLY_PRINTK
if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
prom_init_early_console(0);
@@ -89,3 +88,7 @@ void __init prom_init(void)
strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
#endif
}
+
+void prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/mti-sead3/sead3-memory.c b/arch/mips/mti-sead3/sead3-memory.c
deleted file mode 100644
index da9244106f86..000000000000
--- a/arch/mips/mti-sead3/sead3-memory.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/bootmem.h>
-
-#include <asm/bootinfo.h>
-#include <asm/sections.h>
-#include <asm/mips-boards/prom.h>
-
-enum yamon_memtypes {
- yamon_dontuse,
- yamon_prom,
- yamon_free,
-};
-
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-/* determined physical memory size, not overridden by command line args */
-unsigned long physical_memsize = 0L;
-
-struct prom_pmemblock * __init prom_getmdesc(void)
-{
- char *memsize_str, *ptr;
- unsigned int memsize;
- static char cmdline[COMMAND_LINE_SIZE] __initdata;
- long val;
- int tmp;
-
- /* otherwise look in the environment */
- memsize_str = prom_getenv("memsize");
- if (!memsize_str) {
- pr_warn("memsize not set in boot prom, set to default 32Mb\n");
- physical_memsize = 0x02000000;
- } else {
- tmp = kstrtol(memsize_str, 0, &val);
- physical_memsize = (unsigned long)val;
- }
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
- /* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last
- word of physical memory */
- physical_memsize -= PAGE_SIZE;
-#endif
-
- /* Check the command line for a memsize directive that overrides
- the physical/default amount */
- strcpy(cmdline, arcs_cmdline);
- ptr = strstr(cmdline, "memsize=");
- if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
- ptr = strstr(ptr, " memsize=");
-
- if (ptr)
- memsize = memparse(ptr + 8, &ptr);
- else
- memsize = physical_memsize;
-
- memset(mdesc, 0, sizeof(mdesc));
-
- mdesc[0].type = yamon_dontuse;
- mdesc[0].base = 0x00000000;
- mdesc[0].size = 0x00001000;
-
- mdesc[1].type = yamon_prom;
- mdesc[1].base = 0x00001000;
- mdesc[1].size = 0x000ef000;
-
- /*
- * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
- * south bridge and PCI access always forwarded to the ISA Bus and
- * BIOSCS# is always generated.
- * This mean that this area can't be used as DMA memory for PCI
- * devices.
- */
- mdesc[2].type = yamon_dontuse;
- mdesc[2].base = 0x000f0000;
- mdesc[2].size = 0x00010000;
-
- mdesc[3].type = yamon_dontuse;
- mdesc[3].base = 0x00100000;
- mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
- mdesc[3].base;
-
- mdesc[4].type = yamon_free;
- mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
- mdesc[4].size = memsize - mdesc[4].base;
-
- return &mdesc[0];
-}
-
-static int __init prom_memtype_classify(unsigned int type)
-{
- switch (type) {
- case yamon_free:
- return BOOT_MEM_RAM;
- case yamon_prom:
- return BOOT_MEM_ROM_DATA;
- default:
- return BOOT_MEM_RESERVED;
- }
-}
-
-void __init prom_meminit(void)
-{
- struct prom_pmemblock *p;
-
- p = prom_getmdesc();
-
- while (p->size) {
- long type;
- unsigned long base, size;
-
- type = prom_memtype_classify(p->type);
- base = p->base;
- size = p->size;
-
- add_memory_region(base, size, type);
- p++;
- }
-}
-
-void __init prom_free_prom_memory(void)
-{
- unsigned long addr;
- int i;
-
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
- continue;
-
- addr = boot_mem_map.map[i].addr;
- free_init_pages("prom memory",
- addr, addr + boot_mem_map.map[i].size);
- }
-}
diff --git a/arch/mips/mti-sead3/sead3-net.c b/arch/mips/mti-sead3/sead3-net.c
index 04d704df6098..dd11e7eb771c 100644
--- a/arch/mips/mti-sead3/sead3-net.c
+++ b/arch/mips/mti-sead3/sead3-net.c
@@ -19,8 +19,8 @@ static struct smsc911x_platform_config sead3_smsc911x_data = {
struct resource sead3_net_resourcess[] = {
{
- .start = 0x1f010000,
- .end = 0x1f01ffff,
+ .start = 0x1f010000,
+ .end = 0x1f01ffff,
.flags = IORESOURCE_MEM
},
{
diff --git a/arch/mips/mti-sead3/sead3-pic32-bus.c b/arch/mips/mti-sead3/sead3-pic32-bus.c
index 9f0d89bc800e..eb2bf936d102 100644
--- a/arch/mips/mti-sead3/sead3-pic32-bus.c
+++ b/arch/mips/mti-sead3/sead3-pic32-bus.c
@@ -17,16 +17,16 @@
#define PIC32_SYSRD 0x02
#define PIC32_WR 0x10
#define PIC32_SYSWR 0x20
-#define PIC32_IRQ_CLR 0x40
+#define PIC32_IRQ_CLR 0x40
#define PIC32_STATUS 0x80
-#define DELAY() udelay(100) /* FIXME: needed? */
+#define DELAY() udelay(100) /* FIXME: needed? */
/* spinlock to ensure atomic access to PIC32 */
static DEFINE_SPINLOCK(pic32_bus_lock);
/* FIXME: io_remap these */
-static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
+static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
static void __iomem *bus_status = (void __iomem *)0xbf000060;
static inline unsigned int ioready(void)
diff --git a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
index 514675ed0cde..b921e5ec507c 100644
--- a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
+++ b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
@@ -19,40 +19,40 @@
#define PIC32_I2CxCONCLR 0x0004
#define PIC32_I2CxCONSET 0x0008
#define PIC32_I2CxCONINV 0x000C
-#define I2CCON_ON (1<<15)
-#define I2CCON_FRZ (1<<14)
-#define I2CCON_SIDL (1<<13)
-#define I2CCON_SCLREL (1<<12)
-#define I2CCON_STRICT (1<<11)
-#define I2CCON_A10M (1<<10)
-#define I2CCON_DISSLW (1<<9)
-#define I2CCON_SMEN (1<<8)
-#define I2CCON_GCEN (1<<7)
-#define I2CCON_STREN (1<<6)
-#define I2CCON_ACKDT (1<<5)
-#define I2CCON_ACKEN (1<<4)
-#define I2CCON_RCEN (1<<3)
-#define I2CCON_PEN (1<<2)
-#define I2CCON_RSEN (1<<1)
-#define I2CCON_SEN (1<<0)
+#define I2CCON_ON (1<<15)
+#define I2CCON_FRZ (1<<14)
+#define I2CCON_SIDL (1<<13)
+#define I2CCON_SCLREL (1<<12)
+#define I2CCON_STRICT (1<<11)
+#define I2CCON_A10M (1<<10)
+#define I2CCON_DISSLW (1<<9)
+#define I2CCON_SMEN (1<<8)
+#define I2CCON_GCEN (1<<7)
+#define I2CCON_STREN (1<<6)
+#define I2CCON_ACKDT (1<<5)
+#define I2CCON_ACKEN (1<<4)
+#define I2CCON_RCEN (1<<3)
+#define I2CCON_PEN (1<<2)
+#define I2CCON_RSEN (1<<1)
+#define I2CCON_SEN (1<<0)
#define PIC32_I2CxSTAT 0x0010
#define PIC32_I2CxSTATCLR 0x0014
#define PIC32_I2CxSTATSET 0x0018
#define PIC32_I2CxSTATINV 0x001C
-#define I2CSTAT_ACKSTAT (1<<15)
-#define I2CSTAT_TRSTAT (1<<14)
-#define I2CSTAT_BCL (1<<10)
-#define I2CSTAT_GCSTAT (1<<9)
-#define I2CSTAT_ADD10 (1<<8)
-#define I2CSTAT_IWCOL (1<<7)
-#define I2CSTAT_I2COV (1<<6)
-#define I2CSTAT_DA (1<<5)
-#define I2CSTAT_P (1<<4)
-#define I2CSTAT_S (1<<3)
-#define I2CSTAT_RW (1<<2)
-#define I2CSTAT_RBF (1<<1)
-#define I2CSTAT_TBF (1<<0)
+#define I2CSTAT_ACKSTAT (1<<15)
+#define I2CSTAT_TRSTAT (1<<14)
+#define I2CSTAT_BCL (1<<10)
+#define I2CSTAT_GCSTAT (1<<9)
+#define I2CSTAT_ADD10 (1<<8)
+#define I2CSTAT_IWCOL (1<<7)
+#define I2CSTAT_I2COV (1<<6)
+#define I2CSTAT_DA (1<<5)
+#define I2CSTAT_P (1<<4)
+#define I2CSTAT_S (1<<3)
+#define I2CSTAT_RW (1<<2)
+#define I2CSTAT_RBF (1<<1)
+#define I2CSTAT_TBF (1<<0)
#define PIC32_I2CxADD 0x0020
#define PIC32_I2CxADDCLR 0x0024
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index 8ad46ad31b49..f012fd164cee 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -6,6 +6,12 @@
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/bootmem.h>
+
+#include <asm/mips-boards/generic.h>
+#include <asm/prom.h>
int coherentio; /* 0 => no DMA cache coherency (may be set by user) */
int hw_coherentio; /* 0 => no HW DMA cache coherency (reflects real HW) */
@@ -17,4 +23,25 @@ const char *get_system_type(void)
void __init plat_mem_setup(void)
{
+ /*
+ * Load the builtin devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(&__dtb_start);
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+ unflatten_device_tree();
}
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 048e781a17a0..239e4e32757f 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -43,11 +43,11 @@ static unsigned int __init estimate_cpu_frequency(void)
local_irq_save(flags);
- orig = readl(status_reg) & 0x2; /* get original sample */
+ orig = readl(status_reg) & 0x2; /* get original sample */
/* wait for transition */
while ((readl(status_reg) & 0x2) == orig)
;
- orig = orig ^ 0x2; /* flip the bit */
+ orig = orig ^ 0x2; /* flip the bit */
write_c0_count(0);
@@ -56,7 +56,7 @@ static unsigned int __init estimate_cpu_frequency(void)
/* wait for transition */
while ((readl(status_reg) & 0x2) == orig)
;
- orig = orig ^ 0x2; /* flip the bit */
+ orig = orig ^ 0x2; /* flip the bit */
tick++;
}
@@ -71,7 +71,7 @@ static unsigned int __init estimate_cpu_frequency(void)
(prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
freq *= 2;
- freq += 5000; /* rounding */
+ freq += 5000; /* rounding */
freq -= freq%10000;
return freq ;
diff --git a/arch/mips/mti-sead3/sead3.dts b/arch/mips/mti-sead3/sead3.dts
new file mode 100644
index 000000000000..658f43787056
--- /dev/null
+++ b/arch/mips/mti-sead3/sead3.dts
@@ -0,0 +1,26 @@
+/dts-v1/;
+
+/memreserve/ 0x00000000 0x00001000; // reserved
+/memreserve/ 0x00001000 0x000ef000; // ROM data
+/memreserve/ 0x000f0000 0x004cc000; // reserved
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mti,sead-3";
+
+ cpus {
+ cpu@0 {
+ compatible = "mti,mips14KEc", "mti,mips14Kc";
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS1,38400 rootdelay=10 root=/dev/sda3";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x08000000>;
+ };
+};
diff --git a/arch/mips/netlogic/Platform b/arch/mips/netlogic/Platform
index cdfc9abbbb7b..fb8eb4c0c6ec 100644
--- a/arch/mips/netlogic/Platform
+++ b/arch/mips/netlogic/Platform
@@ -13,5 +13,5 @@ cflags-$(CONFIG_CPU_XLP) += $(call cc-option,-march=xlp,-march=mips64r2)
#
# NETLOGIC processor support
#
-platform-$(CONFIG_NLM_COMMON) += netlogic/
-load-$(CONFIG_NLM_COMMON) += 0xffffffff80100000
+platform-$(CONFIG_NLM_COMMON) += netlogic/
+load-$(CONFIG_NLM_COMMON) += 0xffffffff80100000
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 00dcc7a2bc5a..9f84c60bf535 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -69,7 +69,7 @@
#else
#define SMP_IRQ_MASK 0
#endif
-#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
(1ull << IRQ_FMN))
struct nlm_pic_irq {
@@ -105,21 +105,23 @@ static void xlp_pic_disable(struct irq_data *d)
static void xlp_pic_mask_ack(struct irq_data *d)
{
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- uint64_t mask = 1ull << pd->picirq;
- write_c0_eirr(mask); /* ack by writing EIRR */
+ clear_c0_eimr(pd->picirq);
+ ack_c0_eirr(pd->picirq);
}
static void xlp_pic_unmask(struct irq_data *d)
{
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- if (!pd)
- return;
+ BUG_ON(!pd);
if (pd->extra_ack)
pd->extra_ack(d);
+ /* re-enable the intr on this cpu */
+ set_c0_eimr(pd->picirq);
+
/* Ack is a single write, no need to lock */
nlm_pic_ack(pd->node->picbase, pd->irt);
}
@@ -134,32 +136,17 @@ static struct irq_chip xlp_pic = {
static void cpuintr_disable(struct irq_data *d)
{
- uint64_t eimr;
- uint64_t mask = 1ull << d->irq;
-
- eimr = read_c0_eimr();
- write_c0_eimr(eimr & ~mask);
+ clear_c0_eimr(d->irq);
}
static void cpuintr_enable(struct irq_data *d)
{
- uint64_t eimr;
- uint64_t mask = 1ull << d->irq;
-
- eimr = read_c0_eimr();
- write_c0_eimr(eimr | mask);
+ set_c0_eimr(d->irq);
}
static void cpuintr_ack(struct irq_data *d)
{
- uint64_t mask = 1ull << d->irq;
-
- write_c0_eirr(mask);
-}
-
-static void cpuintr_nop(struct irq_data *d)
-{
- WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq);
+ ack_c0_eirr(d->irq);
}
/*
@@ -170,9 +157,9 @@ struct irq_chip nlm_cpu_intr = {
.name = "XLP-CPU-INTR",
.irq_enable = cpuintr_enable,
.irq_disable = cpuintr_disable,
- .irq_mask = cpuintr_nop,
- .irq_ack = cpuintr_nop,
- .irq_eoi = cpuintr_ack,
+ .irq_mask = cpuintr_disable,
+ .irq_ack = cpuintr_ack,
+ .irq_eoi = cpuintr_enable,
};
static void __init nlm_init_percpu_irqs(void)
@@ -230,7 +217,7 @@ static void nlm_init_node_irqs(int node)
nlm_setup_pic_irq(node, i, i, irt);
/* set interrupts to first cpu in node */
nlm_pic_init_irt(nodep->picbase, irt, i,
- node * NLM_CPUS_PER_NODE);
+ node * NLM_CPUS_PER_NODE, 0);
irqmask |= (1ull << i);
}
nodep->irqmask = irqmask;
@@ -265,7 +252,7 @@ asmlinkage void plat_irq_dispatch(void)
int i, node;
node = nlm_nodeid();
- eirr = read_c0_eirr() & read_c0_eimr();
+ eirr = read_c0_eirr_and_eimr();
i = __ilog2_u64(eirr);
if (i == -1)
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index a080d9ee3cd7..2bb95dcfe20a 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -84,15 +84,19 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
/* IRQ_IPI_SMP_FUNCTION Handler */
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
- write_c0_eirr(1ull << irq);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
smp_call_function_interrupt();
+ set_c0_eimr(irq);
}
/* IRQ_IPI_SMP_RESCHEDULE handler */
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
- write_c0_eirr(1ull << irq);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
scheduler_ipi();
+ set_c0_eimr(irq);
}
/*
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index a0b74874bebe..026517488584 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -49,12 +49,12 @@
#include <asm/netlogic/xlp-hal/sys.h>
#include <asm/netlogic/xlp-hal/cpucontrol.h>
-#define CP0_EBASE $15
+#define CP0_EBASE $15
#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
SYS_CPU_NONCOHERENT_MODE * 4
-#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
+#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
/* Enable XLP features and workarounds in the LSU */
.macro xlp_config_lsu
@@ -69,6 +69,12 @@
#endif
mtcr t1, t0
+ li t0, ICU_DEFEATURE
+ mfcr t1, t0
+ ori t1, 0x1000 /* Enable Icache partitioning */
+ mtcr t1, t0
+
+
#ifdef XLP_AX_WORKAROUND
li t0, SCHED_DEFEATURE
lui t1, 0x0100 /* Disable BRU accepting ALU ops */
@@ -85,7 +91,7 @@
li t0, LSU_DEBUG_DATA0
li t1, LSU_DEBUG_ADDR
li t2, 0 /* index */
- li t3, 0x1000 /* loop count */
+ li t3, 0x1000 /* loop count */
1:
sll v0, t2, 5
mtcr zero, t0
@@ -134,7 +140,7 @@ FEXPORT(nlm_reset_entry)
and k1, k0, k1
beqz k1, 1f /* go to real reset entry */
nop
- li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
ld k0, BOOT_NMI_HANDLER(k1)
jr k0
nop
@@ -235,7 +241,7 @@ EXPORT(nlm_reset_entry_end)
FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
xlp_config_lsu
- dmtc0 sp, $4, 2 /* SP saved in UserLocal */
+ dmtc0 sp, $4, 2 /* SP saved in UserLocal */
SAVE_ALL
sync
/* find the location to which nlm_boot_siblings was relocated */
@@ -301,13 +307,13 @@ NESTED(nlm_rmiboot_preboot, 16, sp)
*/
li t0, 0x400
mfcr t1, t0
- li t2, 6 /* XLR thread mode mask */
+ li t2, 6 /* XLR thread mode mask */
nor t3, t2, zero
and t2, t1, t2 /* t2 - current thread mode */
li v0, CKSEG1ADDR(RESET_DATA_PHYS)
lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
sll v1, 1
- beq v1, t2, 1f /* same as request value */
+ beq v1, t2, 1f /* same as request value */
nop /* nothing to do */
and t2, t1, t3 /* mask out old thread mode */
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index bd3e498157ff..5c56555380bb 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -35,17 +35,73 @@
#include <linux/init.h>
#include <asm/time.h>
+#include <asm/cpu-features.h>
+
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
unsigned int __cpuinit get_c0_compare_int(void)
{
return IRQ_TIMER;
}
+static cycle_t nlm_get_pic_timer(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
+}
+
+static cycle_t nlm_get_pic_timer32(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
+}
+
+static struct clocksource csrc_pic = {
+ .name = "PIC",
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void nlm_init_pic_timer(void)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
+ if (current_cpu_data.cputype == CPU_XLR) {
+ csrc_pic.mask = CLOCKSOURCE_MASK(32);
+ csrc_pic.read = nlm_get_pic_timer32;
+ } else {
+ csrc_pic.mask = CLOCKSOURCE_MASK(64);
+ csrc_pic.read = nlm_get_pic_timer;
+ }
+ csrc_pic.rating = 1000;
+ clocksource_register_hz(&csrc_pic, PIC_CLK_HZ);
+}
+
void __init plat_time_init(void)
{
+ nlm_init_pic_timer();
mips_hpt_frequency = nlm_get_cpu_frequency();
+ if (current_cpu_type() == CPU_XLR)
+ preset_lpj = mips_hpt_frequency / (3 * HZ);
+ else
+ preset_lpj = mips_hpt_frequency / (2 * HZ);
pr_info("MIPS counter frequency [%ld]\n",
(unsigned long)mips_hpt_frequency);
}
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index e14f42308064..7628b5464fc7 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -20,7 +20,7 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "simple-bus";
- ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
1 0 0 0x16000000 0x01000000>; // GBU chipselects
serial0: serial@30000 {
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 529e74742d9f..c68fd4026104 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -111,8 +111,8 @@ unsigned int nlm_get_core_frequency(int node, int core)
dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
pll_divf = ((rstval >> 10) & 0x7f) + 1;
pll_divr = ((rstval >> 8) & 0x3) + 1;
- ext_div = ((rstval >> 30) & 0x3) + 1;
- dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1;
+ ext_div = ((rstval >> 30) & 0x3) + 1;
+ dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1;
num = 800000000ULL * pll_divf;
denom = 3 * pll_divr * ext_div * dfs_div;
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index dbe083a93538..1d0b66c62fd1 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -52,7 +52,7 @@ static void nlm_usb_intr_en(int node, int port)
port_addr = nlm_get_usb_regbase(node, port);
val = nlm_read_usb_reg(port_addr, USB_INT_EN);
val = USB_CTRL_INTERRUPT_EN | USB_OHCI_INTERRUPT_EN |
- USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN |
+ USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN |
USB_OHCI_INTERRUPT_EN | USB_OHCI_INTERRUPT2_EN;
nlm_write_usb_reg(port_addr, USB_INT_EN, val);
}
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index cb9010642ac3..abb3e08cc052 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -51,7 +51,7 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
-static int xlp_wakeup_core(uint64_t sysbase, int core)
+static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
{
uint32_t coremask, value;
int count;
@@ -82,36 +82,51 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
struct nlm_soc_info *nodep;
uint64_t syspcibase;
uint32_t syscoremask;
- int core, n, cpu;
+ int core, n, cpu, count, val;
for (n = 0; n < NLM_NR_NODES; n++) {
syspcibase = nlm_get_sys_pcibase(n);
if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
break;
- /* read cores in reset from SYS and account for boot cpu */
- nlm_node_init(n);
+ /* read cores in reset from SYS */
+ if (n != 0)
+ nlm_node_init(n);
nodep = nlm_get_node(n);
syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
- if (n == 0)
+ /* The boot cpu */
+ if (n == 0) {
syscoremask |= 1;
+ nodep->coremask = 1;
+ }
for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+ /* we will be on node 0 core 0 */
+ if (n == 0 && core == 0)
+ continue;
+
/* see if the core exists */
if ((syscoremask & (1 << core)) == 0)
continue;
- /* see if at least the first thread is enabled */
+ /* see if at least the first hw thread is enabled */
cpu = (n * NLM_CORES_PER_NODE + core)
* NLM_THREADS_PER_CORE;
if (!cpumask_test_cpu(cpu, wakeup_mask))
continue;
/* wake up the core */
- if (xlp_wakeup_core(nodep->sysbase, core))
- nodep->coremask |= 1u << core;
- else
- pr_err("Failed to enable core %d\n", core);
+ if (!xlp_wakeup_core(nodep->sysbase, n, core))
+ continue;
+
+ /* core is up */
+ nodep->coremask |= 1u << core;
+
+ /* spin until the first hw thread sets its ready */
+ count = 0x20000000;
+ do {
+ val = *(volatile int *)&nlm_cpu_ready[cpu];
+ } while (val == 0 && --count > 0);
}
}
}
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
index bed2cffa1008..ed3bf0e3f309 100644
--- a/arch/mips/netlogic/xlr/fmn-config.c
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -164,8 +164,8 @@ static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
int i, j;
for (i = 0; i < num_core; i++) {
- cpu[i].start_stn_id = (8 * i);
- cpu[i].end_stn_id = (8 * i + 8);
+ cpu[i].start_stn_id = (8 * i);
+ cpu[i].end_stn_id = (8 * i + 8);
for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
xlr_board_fmn_config.bucket_size[j] = 32;
@@ -216,6 +216,8 @@ void xlr_board_info_setup(void)
case PRID_IMP_NETLOGIC_XLS404B:
case PRID_IMP_NETLOGIC_XLS408B:
case PRID_IMP_NETLOGIC_XLS416B:
+ case PRID_IMP_NETLOGIC_XLS608B:
+ case PRID_IMP_NETLOGIC_XLS616B:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 8, 32);
setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
diff --git a/arch/mips/netlogic/xlr/platform-flash.c b/arch/mips/netlogic/xlr/platform-flash.c
index 340ab1601c42..6d3c727e0ef8 100644
--- a/arch/mips/netlogic/xlr/platform-flash.c
+++ b/arch/mips/netlogic/xlr/platform-flash.c
@@ -36,7 +36,7 @@ static struct mtd_partition xlr_nor_parts[] = {
{
.name = "User FS",
.offset = 0x800000,
- .size = MTDPART_SIZ_FULL,
+ .size = MTDPART_SIZ_FULL,
}
};
@@ -46,13 +46,13 @@ static struct mtd_partition xlr_nor_parts[] = {
static struct mtd_partition xlr_nand_parts[] = {
{
.name = "Root Filesystem",
- .offset = 64 * 64 * 2048,
+ .offset = 64 * 64 * 2048,
.size = 432 * 64 * 2048,
},
{
.name = "Home Filesystem",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
},
};
@@ -74,8 +74,8 @@ static struct platform_device xlr_nor_dev = {
.dev = {
.platform_data = &xlr_nor_data,
},
- .num_resources = ARRAY_SIZE(xlr_nor_res),
- .resource = xlr_nor_res,
+ .num_resources = ARRAY_SIZE(xlr_nor_res),
+ .resource = xlr_nor_res,
};
const char *xlr_part_probes[] = { "cmdlinepart", NULL };
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
index 507230eeb768..7b96a91f4773 100644
--- a/arch/mips/netlogic/xlr/platform.c
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -64,7 +64,7 @@ void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
.iotype = UPIO_MEM32, \
.flags = (UPF_SKIP_TEST | \
UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\
- .uartclk = PIC_CLKS_PER_SEC, \
+ .uartclk = PIC_CLK_HZ, \
.type = PORT_16550A, \
.serial_in = nlm_xlr_uart_in, \
.serial_out = nlm_xlr_uart_out, \
@@ -162,18 +162,18 @@ int xls_platform_usb_init(void)
nlm_write_reg(usb_mmio, 50, 0x1f000000);
/* Enable ports */
- nlm_write_reg(usb_mmio, 1, 0x07000500);
+ nlm_write_reg(usb_mmio, 1, 0x07000500);
val = nlm_read_reg(gpio_mmio, 21);
if (((val >> 22) & 0x01) == 0) {
pr_info("Detected USB Device mode - Not supported!\n");
- nlm_write_reg(usb_mmio, 0, 0x01000000);
+ nlm_write_reg(usb_mmio, 0, 0x01000000);
return 0;
}
pr_info("Detected USB Host mode - Adding XLS USB devices.\n");
/* Clear reset, host mode */
- nlm_write_reg(usb_mmio, 0, 0x02000000);
+ nlm_write_reg(usb_mmio, 0, 0x02000000);
/* Memory resource for various XLS usb ports */
usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_0_OFFSET);
@@ -221,8 +221,8 @@ static struct resource i2c_resources[] = {
};
static struct platform_device nlm_xlr_i2c_1 = {
- .name = "xlr-i2cbus",
- .id = 1,
+ .name = "xlr-i2cbus",
+ .id = 1,
.num_resources = 1,
.resource = i2c_resources,
};
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index c5ce6992ac4c..e3e094100e3e 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -70,7 +70,7 @@ static void __init nlm_early_serial_setup(void)
s.iotype = UPIO_MEM32;
s.regshift = 2;
s.irq = PIC_UART_0_IRQ;
- s.uartclk = PIC_CLKS_PER_SEC;
+ s.uartclk = PIC_CLK_HZ;
s.serial_in = nlm_xlr_uart_in;
s.serial_out = nlm_xlr_uart_out;
s.mapbase = uart_base;
@@ -163,7 +163,7 @@ static void prom_add_memory(void)
{
struct nlm_boot_mem_map *bootm;
u64 start, size;
- u64 pref_backup = 512; /* avoid pref walking beyond end */
+ u64 pref_backup = 512; /* avoid pref walking beyond end */
int i;
bootm = (void *)(long)nlm_prom_info.psb_mem_map;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index e32db1ff02c7..af763e838fdd 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -27,10 +27,10 @@ static int op_mips_setup(void)
/* Pre-compute the values to stuff in the hardware registers. */
model->reg_setup(ctr);
- /* Configure the registers on all cpus. */
+ /* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, NULL, 1);
- return 0;
+ return 0;
}
static int op_mips_create_files(struct super_block *sb, struct dentry *root)
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
switch (current_cpu_type()) {
case CPU_5KC:
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_20KC:
case CPU_24K:
case CPU_25KF:
@@ -110,7 +111,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->create_files = op_mips_create_files;
ops->setup = op_mips_setup;
- //ops->shutdown = op_mips_shutdown;
+ //ops->shutdown = op_mips_shutdown;
ops->start = op_mips_start;
ops->stop = op_mips_stop;
ops->cpu_type = lmodel->cpu_type;
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index 60d3ea602118..b249ec0bebb2 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -18,13 +18,13 @@
#define LOONGSON2_CPU_TYPE "mips/loongson2"
-#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
+#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
-#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
-#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
-#define LOONGSON2_PERFCTRL_USER (1UL << 3)
-#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
+#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
+#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
+#define LOONGSON2_PERFCTRL_USER (1UL << 3)
+#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
(((event) & 0x0f) << ((idx) ? 9 : 5))
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 786254630403..1fd361462c03 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -14,25 +14,25 @@
#include "op_impl.h"
-#define M_PERFCTL_EXL (1UL << 0)
-#define M_PERFCTL_KERNEL (1UL << 1)
-#define M_PERFCTL_SUPERVISOR (1UL << 2)
-#define M_PERFCTL_USER (1UL << 3)
-#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
+#define M_PERFCTL_EXL (1UL << 0)
+#define M_PERFCTL_KERNEL (1UL << 1)
+#define M_PERFCTL_SUPERVISOR (1UL << 2)
+#define M_PERFCTL_USER (1UL << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
-#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
-#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
-#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
-#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
-#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
-#define M_PERFCTL_WIDE (1UL << 30)
-#define M_PERFCTL_MORE (1UL << 31)
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1UL << 30)
+#define M_PERFCTL_MORE (1UL << 31)
-#define M_COUNTER_OVERFLOW (1UL << 31)
+#define M_COUNTER_OVERFLOW (1UL << 31)
/* Netlogic XLR specific, count events in all threads in a core */
-#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
+#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
static int (*save_perf_irq)(void);
@@ -143,7 +143,7 @@ static struct mipsxx_register_config {
unsigned int counter[4];
} reg;
-/* Compute all of the registers in preparation for enabling profiling. */
+/* Compute all of the registers in preparation for enabling profiling. */
static void mipsxx_reg_setup(struct op_counter_config *ctr)
{
@@ -159,7 +159,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
continue;
reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
- M_PERFCTL_INTERRUPT_ENABLE;
+ M_PERFCTL_INTERRUPT_ENABLE;
if (ctr[i].kernel)
reg.control[i] |= M_PERFCTL_KERNEL;
if (ctr[i].user)
@@ -172,7 +172,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
}
}
-/* Program all of the registers in preparation for enabling profiling. */
+/* Program all of the registers in preparation for enabling profiling. */
static void mipsxx_cpu_setup(void *args)
{
@@ -351,6 +351,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
break;
+ case CPU_M14KEC:
+ op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
+ break;
+
case CPU_20KC:
op_model_mipsxx_ops.cpu_type = "mips/20K";
break;
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index ce995d3d9440..2cb1d315d225 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
#
obj-$(CONFIG_LASAT) += pci-lasat.o
obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o
-obj-$(CONFIG_SOC_PNX8550) += fixup-pnx8550.o ops-pnx8550.o
obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o
obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o
@@ -55,10 +54,10 @@ obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o
obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o
obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o
obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
obj-$(CONFIG_CPU_XLR) += pci-xlr.o
obj-$(CONFIG_CPU_XLP) += pci-xlp.o
ifdef CONFIG_PCI_MSI
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
endif
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c
index 9553b14002dd..a138e8ee5cfc 100644
--- a/arch/mips/pci/fixup-cobalt.c
+++ b/arch/mips/pci/fixup-cobalt.c
@@ -94,14 +94,14 @@ static void qube_raq_galileo_fixup(struct pci_dev *dev)
* --x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--
*
* On all machines prior to Q2, we had the STOP line disconnected
- * from Galileo to VIA on PCI. The new Galileo does not function
+ * from Galileo to VIA on PCI. The new Galileo does not function
* correctly unless we have it connected.
*
* Therefore we must set the disconnect/retry cycle values to
* something sensible when using the new Galileo.
*/
- printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
+ printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
#if 0
if (dev->revision >= 0x10) {
@@ -149,30 +149,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
qube_raq_via_board_id_fixup);
static char irq_tab_qube1[] __initdata = {
- [COBALT_PCICONF_CPU] = 0,
- [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
- [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
- [COBALT_PCICONF_ETH1] = 0
+ [COBALT_PCICONF_ETH1] = 0
};
static char irq_tab_cobalt[] __initdata = {
- [COBALT_PCICONF_CPU] = 0,
- [COBALT_PCICONF_ETH0] = ETH0_IRQ,
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
- [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
- [COBALT_PCICONF_ETH1] = ETH1_IRQ
+ [COBALT_PCICONF_ETH1] = ETH1_IRQ
};
static char irq_tab_raq2[] __initdata = {
- [COBALT_PCICONF_CPU] = 0,
- [COBALT_PCICONF_ETH0] = ETH0_IRQ,
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
- [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
- [COBALT_PCICONF_ETH1] = ETH1_IRQ
+ [COBALT_PCICONF_ETH1] = ETH1_IRQ
};
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/mips/pci/fixup-emma2rh.c b/arch/mips/pci/fixup-emma2rh.c
index beaec32b02e5..19caf775c206 100644
--- a/arch/mips/pci/fixup-emma2rh.c
+++ b/arch/mips/pci/fixup-emma2rh.c
@@ -42,7 +42,7 @@
*
*/
-#define MAX_SLOT_NUM 10
+#define MAX_SLOT_NUM 10
static unsigned char irq_map[][5] __initdata = {
[3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC,
MARKEINS_PCI_IRQ_INTD, 0,},
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c
index 63ab4a042cd6..50da773faede 100644
--- a/arch/mips/pci/fixup-fuloong2e.c
+++ b/arch/mips/pci/fixup-fuloong2e.c
@@ -6,9 +6,9 @@
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
@@ -152,7 +152,7 @@ static void loongson2e_686b_func1_fixup(struct pci_dev *pdev)
/* disable read prefetch/write post buffers */
pci_write_config_byte(pdev, 0x41, 0x02);
- /* use 3/4 as fifo thresh hold */
+ /* use 3/4 as fifo thresh hold */
pci_write_config_byte(pdev, 0x43, 0x0a);
pci_write_config_byte(pdev, 0x44, 0x00);
diff --git a/arch/mips/pci/fixup-ip32.c b/arch/mips/pci/fixup-ip32.c
index 190fffd08d3e..133685e215ee 100644
--- a/arch/mips/pci/fixup-ip32.c
+++ b/arch/mips/pci/fixup-ip32.c
@@ -22,13 +22,13 @@
#define INTC MACEPCI_SHARED1_IRQ
#define INTD MACEPCI_SHARED2_IRQ
static char irq_tab_mace[][5] __initdata = {
- /* Dummy INT#A INT#B INT#C INT#D */
- {0, 0, 0, 0, 0}, /* This is placeholder row - never used */
- {0, SCSI0, SCSI0, SCSI0, SCSI0},
- {0, SCSI1, SCSI1, SCSI1, SCSI1},
- {0, INTA0, INTB, INTC, INTD},
- {0, INTA1, INTC, INTD, INTB},
- {0, INTA2, INTD, INTB, INTC},
+ /* Dummy INT#A INT#B INT#C INT#D */
+ {0, 0, 0, 0, 0}, /* This is placeholder row - never used */
+ {0, SCSI0, SCSI0, SCSI0, SCSI0},
+ {0, SCSI1, SCSI1, SCSI1, SCSI1},
+ {0, INTA0, INTB, INTC, INTD},
+ {0, INTA1, INTC, INTD, INTB},
+ {0, INTA2, INTD, INTB, INTC},
};
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c
index 519daaebb5da..95ab9a1bd010 100644
--- a/arch/mips/pci/fixup-lemote2f.c
+++ b/arch/mips/pci/fixup-lemote2f.c
@@ -31,7 +31,7 @@
/* all the pci device has the PCIA pin, check the datasheet. */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
+ /* INTA INTB INTC INTD */
{0, 0, 0, 0, 0}, /* 11: Unused */
{0, 0, 0, 0, 0}, /* 12: Unused */
{0, 0, 0, 0, 0}, /* 13: Unused */
@@ -69,15 +69,15 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
case 2:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_IDE_INTR);
- return CS5536_IDE_INTR; /* for IDE */
+ return CS5536_IDE_INTR; /* for IDE */
case 3:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_ACC_INTR);
- return CS5536_ACC_INTR; /* for AUDIO */
- case 4: /* for OHCI */
- case 5: /* for EHCI */
- case 6: /* for UDC */
- case 7: /* for OTG */
+ return CS5536_ACC_INTR; /* for AUDIO */
+ case 4: /* for OHCI */
+ case 5: /* for EHCI */
+ case 6: /* for UDC */
+ case 7: /* for OTG */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_USB_INTR);
return CS5536_USB_INTR;
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 75d03f6be3bd..07ada7f8441e 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -12,7 +12,7 @@ static char pci_irq[5] = {
};
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
+ /* INTA INTB INTC INTD */
{0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */
{0, 0, 0, 0, 0 }, /* 1: Unused */
{0, 0, 0, 0, 0 }, /* 2: Unused */
@@ -23,7 +23,7 @@ static char irq_tab[][5] __initdata = {
{0, 0, 0, 0, 0 }, /* 7: Unused */
{0, 0, 0, 0, 0 }, /* 8: Unused */
{0, 0, 0, 0, 0 }, /* 9: Unused */
- {0, 0, 0, 0, PCID }, /* 10: PIIX4 USB */
+ {0, 0, 0, 0, PCID }, /* 10: PIIX4 USB */
{0, PCIB, 0, 0, 0 }, /* 11: AMD 79C973 Ethernet */
{0, PCIC, 0, 0, 0 }, /* 12: Crystal 4281 Sound */
{0, 0, 0, 0, 0 }, /* 13: Unused */
@@ -31,9 +31,9 @@ static char irq_tab[][5] __initdata = {
{0, 0, 0, 0, 0 }, /* 15: Unused */
{0, 0, 0, 0, 0 }, /* 16: Unused */
{0, 0, 0, 0, 0 }, /* 17: Bonito/SOC-it PCI Bridge*/
- {0, PCIA, PCIB, PCIC, PCID }, /* 18: PCI Slot 1 */
- {0, PCIB, PCIC, PCID, PCIA }, /* 19: PCI Slot 2 */
- {0, PCIC, PCID, PCIA, PCIB }, /* 20: PCI Slot 3 */
+ {0, PCIA, PCIB, PCIC, PCID }, /* 18: PCI Slot 1 */
+ {0, PCIB, PCIC, PCID, PCIA }, /* 19: PCI Slot 2 */
+ {0, PCIC, PCID, PCIA, PCIB }, /* 20: PCI Slot 3 */
{0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */
};
@@ -54,8 +54,8 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
static int piixirqmap[16] = { /* PIIX PIRQC[A:D] irq mappings */
- 0, 0, 0, 3,
- 4, 5, 6, 7,
+ 0, 0, 0, 3,
+ 4, 5, 6, 7,
0, 9, 10, 11,
12, 0, 14, 15
};
diff --git a/arch/mips/pci/fixup-pmcmsp.c b/arch/mips/pci/fixup-pmcmsp.c
index 65735b1b7665..fab405c21c2f 100644
--- a/arch/mips/pci/fixup-pmcmsp.c
+++ b/arch/mips/pci/fixup-pmcmsp.c
@@ -48,117 +48,117 @@
#if defined(CONFIG_PMC_MSP7120_GW)
/* Garibaldi Board IRQ wiring to PCI slots */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
- {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
- {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
- {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
- {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
- {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
- {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
- {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
- {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
- {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
- {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
- {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
- {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
- {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
- {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
- {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
- {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
- {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
- {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
- {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
- {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */
- {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
- {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */
- {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
+ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
+ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
+ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
+ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
+ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
+ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
+ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
+ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
+ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
+ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
+ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
+ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
+ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
+ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
+ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
+ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
+ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
+ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
+ {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */
+ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
+ {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */
+ {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */
};
#elif defined(CONFIG_PMC_MSP7120_EVAL)
/* MSP7120 Eval Board IRQ wiring to PCI slots */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
- {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
- {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
- {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
- {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
- {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
- {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
- {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
- {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */
- {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */
- {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */
- {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */
- {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
- {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
- {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
- {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
- {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
- {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
- {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
- {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
- {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
- {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
- {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
- {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
+ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
+ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
+ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
+ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
+ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
+ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
+ {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */
+ {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */
+ {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */
+ {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */
+ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
+ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
+ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
+ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
+ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
+ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
+ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
+ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
+ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
+ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
+ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
+ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
};
#else
/* Unknown board -- don't assign any IRQs */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
- {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
- {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
- {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
- {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
- {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
- {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
- {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
- {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
- {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
- {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
- {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
- {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
- {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
- {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
- {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
- {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
- {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
- {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
- {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
- {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
- {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
- {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
- {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
+ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
+ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
+ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
+ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
+ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
+ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
+ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
+ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
+ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
+ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
+ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
+ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
+ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
+ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
+ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
+ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
+ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
+ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
+ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
+ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
+ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
+ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
};
#endif
@@ -168,14 +168,14 @@ static char irq_tab[][5] __initdata = {
* _________________________________________________________________________
*
* DESCRIPTION: Perform platform specific device initialization at
- * pci_enable_device() time.
- * None are needed for the MSP7120 PCI Controller.
+ * pci_enable_device() time.
+ * None are needed for the MSP7120 PCI Controller.
*
- * INPUTS: dev - structure describing the PCI device
+ * INPUTS: dev - structure describing the PCI device
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: PCIBIOS_SUCCESSFUL
+ * RETURNS: PCIBIOS_SUCCESSFUL
*
****************************************************************************/
int pcibios_plat_dev_init(struct pci_dev *dev)
@@ -190,16 +190,16 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
*
* DESCRIPTION: Perform board supplied PCI IRQ mapping routine.
*
- * INPUTS: dev - unused
- * slot - PCI slot. Identified by which bit of the AD[] bus
- * drives the IDSEL line. AD[10] is 0, AD[31] is
- * slot 21.
- * pin - numbered using the scheme of the PCI_INTERRUPT_PIN
- * field of the config header.
+ * INPUTS: dev - unused
+ * slot - PCI slot. Identified by which bit of the AD[] bus
+ * drives the IDSEL line. AD[10] is 0, AD[31] is
+ * slot 21.
+ * pin - numbered using the scheme of the PCI_INTERRUPT_PIN
+ * field of the config header.
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: IRQ number
+ * RETURNS: IRQ number
*
****************************************************************************/
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/mips/pci/fixup-pnx8550.c b/arch/mips/pci/fixup-pnx8550.c
deleted file mode 100644
index 96857ac63bf5..000000000000
--- a/arch/mips/pci/fixup-pnx8550.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Philips PNX8550 pci fixups.
- *
- * Copyright 2005 Embedded Alley Solutions, Inc
- * source@embeddealley.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/mach-pnx8550/pci.h>
-#include <asm/mach-pnx8550/int.h>
-
-
-#undef DEBUG
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-extern char pnx8550_irq_tab[][5];
-
-void __init pcibios_fixup_resources(struct pci_dev *dev)
-{
- /* no need to fixup IO resources */
-}
-
-void __init pcibios_fixup(void)
-{
- /* nothing to do here */
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return pnx8550_irq_tab[slot][pin];
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/fixup-sni.c b/arch/mips/pci/fixup-sni.c
index 5c8a79bb2661..f67ebeeb4200 100644
--- a/arch/mips/pci/fixup-sni.c
+++ b/arch/mips/pci/fixup-sni.c
@@ -41,12 +41,12 @@
* Logic CL-GD5434 VGA is device 3.
*/
static char irq_tab_rm200[8][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* EISA bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
- { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
+ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
{ INTB, INTB, INTB, INTB, INTB }, /* VGA */
- { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
@@ -58,20 +58,20 @@ static char irq_tab_rm200[8][5] __initdata = {
* The VGA card is optional for RM300 systems.
*/
static char irq_tab_rm300d[8][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* EISA bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 1 */
{ INTB, INTB, INTB, INTB, INTB }, /* VGA */
- { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
};
static char irq_tab_rm300e[5][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ 0, INTC, INTD, INTA, INTB }, /* Bridge/i960 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 1 */
@@ -97,30 +97,30 @@ static char irq_tab_rm300e[5][5] __initdata = {
#define INTD PCIT_IRQ_INTD
static char irq_tab_pcit[13][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */
{ SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 }, /* SCSI */
- { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
- { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */
- { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
- { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
- { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
- { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */
+ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
+ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */
+ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
+ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
+ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
+ { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */
};
static char irq_tab_pcit_cplus[13][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* HOST bridge */
- { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */
- { 0, 0, 0, 0, 0 }, /* PCI-EISA */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
- { 0, INTB, INTC, INTD, INTA }, /* fixup */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */
+ { 0, 0, 0, 0, 0 }, /* PCI-EISA */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
+ { 0, INTB, INTC, INTD, INTA }, /* fixup */
};
static inline int is_rm300_revd(void)
@@ -146,18 +146,18 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
return irq_tab_pcit_cplus[slot][pin];
case SNI_BRD_PCI_TOWER:
- return irq_tab_pcit[slot][pin];
+ return irq_tab_pcit[slot][pin];
case SNI_BRD_PCI_MTOWER:
- if (is_rm300_revd())
- return irq_tab_rm300d[slot][pin];
- /* fall through */
+ if (is_rm300_revd())
+ return irq_tab_rm300d[slot][pin];
+ /* fall through */
case SNI_BRD_PCI_DESKTOP:
- return irq_tab_rm200[slot][pin];
+ return irq_tab_rm200[slot][pin];
case SNI_BRD_PCI_MTOWER_CPLUS:
- return irq_tab_rm300e[slot][pin];
+ return irq_tab_rm300e[slot][pin];
}
return 0;
diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c
index 8084b17d4406..d0b0083fbd27 100644
--- a/arch/mips/pci/fixup-tb0219.c
+++ b/arch/mips/pci/fixup-tb0219.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
*
- * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
+ * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
* Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c
index 2fe29db43725..8c5039ed75d7 100644
--- a/arch/mips/pci/fixup-tb0287.c
+++ b/arch/mips/pci/fixup-tb0287.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-wrppmc.c b/arch/mips/pci/fixup-wrppmc.c
index 3d277549d5df..29737edd121f 100644
--- a/arch/mips/pci/fixup-wrppmc.c
+++ b/arch/mips/pci/fixup-wrppmc.c
@@ -20,7 +20,7 @@
#define PCI_SLOT_MAXNR 32 /* Each PCI bus has 32 physical slots */
static char pci_irq_tab[PCI_SLOT_MAXNR][5] __initdata = {
- /* 0 INTA INTB INTC INTD */
+ /* 0 INTA INTB INTC INTD */
[0] = {0, 0, 0, 0, 0}, /* Slot 0: GT64120 PCI bridge */
[6] = {0, WRPPMC_PCI_INTA_IRQ, 0, 0, 0},
};
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index 4a156629e958..6144bb337e44 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -174,8 +174,8 @@ static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn,
}
struct pci_ops bcm63xx_pci_ops = {
- .read = bcm63xx_pci_read,
- .write = bcm63xx_pci_write
+ .read = bcm63xx_pci_read,
+ .write = bcm63xx_pci_write
};
#ifdef CONFIG_CARDBUS
@@ -370,8 +370,8 @@ static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn,
return fake_cb_bridge_read(where, size, val);
}
- /* a configuration cycle for the device behind the cardbus
- * bridge is actually done as a type 0 cycle on the primary
+ /* a configuration cycle for the device behind the cardbus
+ * bridge is actually done as a type 0 cycle on the primary
* bus. This means that only one device can be on the cardbus
* bus */
if (fake_cb_bridge_regs.bus_assigned &&
@@ -403,8 +403,8 @@ static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn,
}
struct pci_ops bcm63xx_cb_ops = {
- .read = bcm63xx_cb_read,
- .write = bcm63xx_cb_write,
+ .read = bcm63xx_cb_read,
+ .write = bcm63xx_cb_write,
};
/*
@@ -523,6 +523,6 @@ static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn,
struct pci_ops bcm63xx_pcie_ops = {
- .read = bcm63xx_pcie_read,
- .write = bcm63xx_pcie_write
+ .read = bcm63xx_pcie_read,
+ .write = bcm63xx_pcie_write
};
diff --git a/arch/mips/pci/ops-bonito64.c b/arch/mips/pci/ops-bonito64.c
index 1b3e03f20c54..830352e3aeda 100644
--- a/arch/mips/pci/ops-bonito64.c
+++ b/arch/mips/pci/ops-bonito64.c
@@ -26,7 +26,7 @@
#include <asm/mips-boards/bonito64.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset))
@@ -137,7 +137,7 @@ static int bonito64_pcibios_write(struct pci_bus *bus, unsigned int devfn,
data = val;
else {
if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
- where, &data))
+ where, &data))
return -1;
if (size == 1)
diff --git a/arch/mips/pci/ops-gt64xxx_pci0.c b/arch/mips/pci/ops-gt64xxx_pci0.c
index 3d896c5f413f..effcbda9f528 100644
--- a/arch/mips/pci/ops-gt64xxx_pci0.c
+++ b/arch/mips/pci/ops-gt64xxx_pci0.c
@@ -23,21 +23,21 @@
#include <asm/gt64120.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF 0
-#define PCI_CFG_TYPE0_FUNC_SHF 8
+#define PCI_CFG_TYPE0_REG_SHF 0
+#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF 0
-#define PCI_CFG_TYPE1_FUNC_SHF 8
-#define PCI_CFG_TYPE1_DEV_SHF 11
-#define PCI_CFG_TYPE1_BUS_SHF 16
+#define PCI_CFG_TYPE1_REG_SHF 0
+#define PCI_CFG_TYPE1_FUNC_SHF 8
+#define PCI_CFG_TYPE1_DEV_SHF 11
+#define PCI_CFG_TYPE1_BUS_SHF 16
static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
@@ -50,7 +50,7 @@ static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
/* Clear cause register bits */
GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
- GT_INTRCAUSE_TARABORT0_BIT));
+ GT_INTRCAUSE_TARABORT0_BIT));
/* Setup address */
GT_WRITE(GT_PCI0_CFGADDR_OFS,
@@ -87,7 +87,7 @@ static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
/* Clear bits */
GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
- GT_INTRCAUSE_TARABORT0_BIT));
+ GT_INTRCAUSE_TARABORT0_BIT));
return -1;
}
@@ -106,7 +106,7 @@ static int gt64xxx_pci0_pcibios_read(struct pci_bus *bus, unsigned int devfn,
u32 data = 0;
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
- where, &data))
+ where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
@@ -128,7 +128,7 @@ static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
data = val;
else {
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus,
- devfn, where, &data))
+ devfn, where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
@@ -140,7 +140,7 @@ static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
}
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn,
- where, &data))
+ where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c
index 1f2afb55cc71..16e7c2526d77 100644
--- a/arch/mips/pci/ops-lantiq.c
+++ b/arch/mips/pci/ops-lantiq.c
@@ -23,7 +23,7 @@
#define LTQ_PCI_CFG_DEVNUM_SHF 11
#define LTQ_PCI_CFG_FUNNUM_SHF 8
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus,
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index afd221122d22..98254afa0287 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -24,7 +24,7 @@
#include <cs5536/cs5536.h>
#endif
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define CFG_SPACE_REG(offset) \
diff --git a/arch/mips/pci/ops-msc.c b/arch/mips/pci/ops-msc.c
index 5d9fbb0f4670..92a8543361bb 100644
--- a/arch/mips/pci/ops-msc.c
+++ b/arch/mips/pci/ops-msc.c
@@ -1,8 +1,8 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
- * Maciej W. Rozycki <macro@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can distribute it and/or modify it
@@ -28,21 +28,21 @@
#include <asm/mips-boards/msc01_pci.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF 0
-#define PCI_CFG_TYPE0_FUNC_SHF 8
+#define PCI_CFG_TYPE0_REG_SHF 0
+#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF 0
-#define PCI_CFG_TYPE1_FUNC_SHF 8
-#define PCI_CFG_TYPE1_DEV_SHF 11
-#define PCI_CFG_TYPE1_BUS_SHF 16
+#define PCI_CFG_TYPE1_REG_SHF 0
+#define PCI_CFG_TYPE1_FUNC_SHF 8
+#define PCI_CFG_TYPE1_DEV_SHF 11
+#define PCI_CFG_TYPE1_BUS_SHF 16
static int msc_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
@@ -97,7 +97,7 @@ static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_BAD_REGISTER_NUMBER;
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
- &data))
+ &data))
return -1;
if (size == 1)
@@ -124,7 +124,7 @@ static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn,
data = val;
else {
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
- where, &data))
+ where, &data))
return -1;
if (size == 1)
diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c
index 99929cf88419..499e35c3eb35 100644
--- a/arch/mips/pci/ops-nile4.c
+++ b/arch/mips/pci/ops-nile4.c
@@ -6,7 +6,7 @@
#include <asm/lasat/lasat.h>
#include <asm/nile4.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define LO(reg) (reg / 4)
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index 389bf669d56e..d0b6f8399b07 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -9,8 +9,8 @@
* Much of the code is derived from the original DDB5074 port by
* Geert Uytterhoeven <geert@sonycom.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -57,18 +57,18 @@ static void pci_proc_init(void);
* _________________________________________________________________________
*
* DESCRIPTION: Prints the count of how many times each PCI
- * interrupt has asserted. Can be invoked by the
- * /proc filesystem.
+ * interrupt has asserted. Can be invoked by the
+ * /proc filesystem.
*
- * INPUTS: page - part of STDOUT calculation
- * off - part of STDOUT calculation
- * count - part of STDOUT calculation
- * data - unused
+ * INPUTS: page - part of STDOUT calculation
+ * off - part of STDOUT calculation
+ * count - part of STDOUT calculation
+ * data - unused
*
- * OUTPUTS: start - new start location
- * eof - end of file pointer
+ * OUTPUTS: start - new start location
+ * eof - end of file pointer
*
- * RETURNS: len - STDOUT length
+ * RETURNS: len - STDOUT length
*
****************************************************************************/
static int read_msp_pci_counts(char *page, char **start, off_t off,
@@ -106,21 +106,21 @@ static int read_msp_pci_counts(char *page, char **start, off_t off,
* _________________________________________________________________________
*
* DESCRIPTION: Generates a configuration write cycle for debug purposes.
- * The IDSEL line asserted and location and data written are
- * immaterial. Just want to be able to prove that a
- * configuration write can be correctly generated on the
- * PCI bus. Intent is that this function by invocable from
- * the /proc filesystem.
+ * The IDSEL line asserted and location and data written are
+ * immaterial. Just want to be able to prove that a
+ * configuration write can be correctly generated on the
+ * PCI bus. Intent is that this function by invocable from
+ * the /proc filesystem.
*
- * INPUTS: page - part of STDOUT calculation
- * off - part of STDOUT calculation
- * count - part of STDOUT calculation
- * data - unused
+ * INPUTS: page - part of STDOUT calculation
+ * off - part of STDOUT calculation
+ * count - part of STDOUT calculation
+ * data - unused
*
- * OUTPUTS: start - new start location
- * eof - end of file pointer
+ * OUTPUTS: start - new start location
+ * eof - end of file pointer
*
- * RETURNS: len - STDOUT length
+ * RETURNS: len - STDOUT length
*
****************************************************************************/
static int gen_pci_cfg_wr(char *page, char **start, off_t off,
@@ -190,11 +190,11 @@ static int gen_pci_cfg_wr(char *page, char **start, off_t off,
*
* DESCRIPTION: Create entries in the /proc filesystem for debug access.
*
- * INPUTS: none
+ * INPUTS: none
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: none
+ * RETURNS: none
*
****************************************************************************/
static void pci_proc_init(void)
@@ -214,44 +214,44 @@ static DEFINE_SPINLOCK(bpci_lock);
* _________________________________________________________________________
*
* DESCRIPTION: Defines the address range that pciauto() will use to
- * assign to the I/O BARs of PCI devices.
- *
- * Use the start and end addresses of the MSP7120 PCI Host
- * Controller I/O space, in the form that they appear on the
- * PCI bus AFTER MSP7120 has performed address translation.
- *
- * For I/O accesses, MSP7120 ignores OATRAN and maps I/O
- * accesses into the bottom 0xFFF region of address space,
- * so that is the range to put into the pci_io_resource
- * struct.
- *
- * In MSP4200, the start address was 0x04 instead of the
- * expected 0x00. Will just assume there was a good reason
- * for this!
- *
- * NOTES: Linux, by default, will assign I/O space to the lowest
- * region of address space. Since MSP7120 and Linux,
- * by default, have no offset in between how they map, the
- * io_offset element of pci_controller struct should be set
- * to zero.
+ * assign to the I/O BARs of PCI devices.
+ *
+ * Use the start and end addresses of the MSP7120 PCI Host
+ * Controller I/O space, in the form that they appear on the
+ * PCI bus AFTER MSP7120 has performed address translation.
+ *
+ * For I/O accesses, MSP7120 ignores OATRAN and maps I/O
+ * accesses into the bottom 0xFFF region of address space,
+ * so that is the range to put into the pci_io_resource
+ * struct.
+ *
+ * In MSP4200, the start address was 0x04 instead of the
+ * expected 0x00. Will just assume there was a good reason
+ * for this!
+ *
+ * NOTES: Linux, by default, will assign I/O space to the lowest
+ * region of address space. Since MSP7120 and Linux,
+ * by default, have no offset in between how they map, the
+ * io_offset element of pci_controller struct should be set
+ * to zero.
* ELEMENTS:
- * name - String used for a meaningful name.
+ * name - String used for a meaningful name.
*
- * start - Start address of MSP7120's I/O space, as MSP7120 presents
- * the address on the PCI bus.
+ * start - Start address of MSP7120's I/O space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * end - End address of MSP7120's I/O space, as MSP7120 presents
- * the address on the PCI bus.
+ * end - End address of MSP7120's I/O space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * flags - Attributes indicating the type of resource. In this case,
- * indicate I/O space.
+ * flags - Attributes indicating the type of resource. In this case,
+ * indicate I/O space.
*
****************************************************************************/
static struct resource pci_io_resource = {
.name = "pci IO space",
.start = 0x04,
.end = 0x0FFF,
- .flags = IORESOURCE_IO /* I/O space */
+ .flags = IORESOURCE_IO /* I/O space */
};
/*****************************************************************************
@@ -260,26 +260,26 @@ static struct resource pci_io_resource = {
* _________________________________________________________________________
*
* DESCRIPTION: Defines the address range that pciauto() will use to
- * assign to the memory BARs of PCI devices.
+ * assign to the memory BARs of PCI devices.
*
- * The .start and .end values are dependent upon how address
- * translation is performed by the OATRAN regiser.
+ * The .start and .end values are dependent upon how address
+ * translation is performed by the OATRAN regiser.
*
- * The values to use for .start and .end are the values
- * in the form they appear on the PCI bus AFTER MSP7120 has
- * performed OATRAN address translation.
+ * The values to use for .start and .end are the values
+ * in the form they appear on the PCI bus AFTER MSP7120 has
+ * performed OATRAN address translation.
*
* ELEMENTS:
- * name - String used for a meaningful name.
+ * name - String used for a meaningful name.
*
- * start - Start address of MSP7120's memory space, as MSP7120 presents
- * the address on the PCI bus.
+ * start - Start address of MSP7120's memory space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * end - End address of MSP7120's memory space, as MSP7120 presents
- * the address on the PCI bus.
+ * end - End address of MSP7120's memory space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * flags - Attributes indicating the type of resource. In this case,
- * indicate memory space.
+ * flags - Attributes indicating the type of resource. In this case,
+ * indicate memory space.
*
****************************************************************************/
static struct resource pci_mem_resource = {
@@ -295,17 +295,17 @@ static struct resource pci_mem_resource = {
* _________________________________________________________________________
*
* DESCRIPTION: PCI status interrupt handler. Updates the count of how
- * many times each status bit has been set, then clears
- * the status bits. If the appropriate macros are defined,
- * these counts can be viewed via the /proc filesystem.
+ * many times each status bit has been set, then clears
+ * the status bits. If the appropriate macros are defined,
+ * these counts can be viewed via the /proc filesystem.
*
- * INPUTS: irq - unused
- * dev_id - unused
- * pt_regs - unused
+ * INPUTS: irq - unused
+ * dev_id - unused
+ * pt_regs - unused
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
*
****************************************************************************/
static irqreturn_t bpci_interrupt(int irq, void *dev_id)
@@ -335,41 +335,41 @@ static irqreturn_t bpci_interrupt(int irq, void *dev_id)
* _________________________________________________________________________
*
* DESCRIPTION: Performs a PCI configuration access (rd or wr), then
- * checks that the access succeeded by querying MSP7120's
- * PCI status bits.
+ * checks that the access succeeded by querying MSP7120's
+ * PCI status bits.
*
* INPUTS:
- * access_type - kind of PCI configuration cycle to perform
- * (read or write). Legal values are
- * PCI_ACCESS_WRITE and PCI_ACCESS_READ.
- *
- * bus - pointer to the bus number of the device to
- * be targeted for the configuration cycle.
- * The only element of the pci_bus structure
- * used is bus->number. This argument determines
- * if the configuration access will be Type 0 or
- * Type 1. Since MSP7120 assumes itself to be the
- * PCI Host, any non-zero bus->number generates
- * a Type 1 access.
- *
- * devfn - this is an 8-bit field. The lower three bits
- * specify the function number of the device to
- * be targeted for the configuration cycle, with
- * all three-bit combinations being legal. The
- * upper five bits specify the device number,
- * with legal values being 10 to 31.
- *
- * where - address within the Configuration Header
- * space to access.
- *
- * data - for write accesses, contains the data to
- * write.
+ * access_type - kind of PCI configuration cycle to perform
+ * (read or write). Legal values are
+ * PCI_ACCESS_WRITE and PCI_ACCESS_READ.
+ *
+ * bus - pointer to the bus number of the device to
+ * be targeted for the configuration cycle.
+ * The only element of the pci_bus structure
+ * used is bus->number. This argument determines
+ * if the configuration access will be Type 0 or
+ * Type 1. Since MSP7120 assumes itself to be the
+ * PCI Host, any non-zero bus->number generates
+ * a Type 1 access.
+ *
+ * devfn - this is an 8-bit field. The lower three bits
+ * specify the function number of the device to
+ * be targeted for the configuration cycle, with
+ * all three-bit combinations being legal. The
+ * upper five bits specify the device number,
+ * with legal values being 10 to 31.
+ *
+ * where - address within the Configuration Header
+ * space to access.
+ *
+ * data - for write accesses, contains the data to
+ * write.
*
* OUTPUTS:
- * data - for read accesses, contains the value read.
+ * data - for read accesses, contains the value read.
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - access failure
*
****************************************************************************/
int msp_pcibios_config_access(unsigned char access_type,
@@ -429,7 +429,7 @@ int msp_pcibios_config_access(unsigned char access_type,
* for this Block Copy, called Block Copy 0 Fault (BC0F) and
* Block Copy 1 Fault (BC1F). MSP4200 and MSP7120 don't have this
* dedicated Block Copy block, so these two interrupts are now
- * marked reserved. In case the Block Copy is resurrected in a
+ * marked reserved. In case the Block Copy is resurrected in a
* future design, maintain the code that treats these two interrupts
* specially.
*
@@ -439,7 +439,7 @@ int msp_pcibios_config_access(unsigned char access_type,
preg->if_status = ~(BPCI_IFSTATUS_BC0F | BPCI_IFSTATUS_BC1F);
/* Setup address that is to appear on PCI bus */
- preg->config_addr = BPCI_CFGADDR_ENABLE |
+ preg->config_addr = BPCI_CFGADDR_ENABLE |
(bus_num << BPCI_CFGADDR_BUSNUM_SHF) |
(dev_fn << BPCI_CFGADDR_FUNCTNUM_SHF) |
(where & 0xFC);
@@ -494,21 +494,21 @@ int msp_pcibios_config_access(unsigned char access_type,
* _________________________________________________________________________
*
* DESCRIPTION: Read a byte from PCI configuration address spac
- * Since the hardware can't address 8 bit chunks
- * directly, read a 32-bit chunk, then mask off extraneous
- * bits.
+ * Since the hardware can't address 8 bit chunks
+ * directly, read a 32-bit chunk, then mask off extraneous
+ * bits.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the read is destined for.
- * devfn - device/function combination that the read is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the read is destined for.
+ * devfn - device/function combination that the read is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
*
- * OUTPUTS val - read data
+ * OUTPUTS val - read data
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - read access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - read access failure
*
****************************************************************************/
static int
@@ -541,22 +541,22 @@ msp_pcibios_read_config_byte(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Read a word (16 bits) from PCI configuration address space.
- * Since the hardware can't address 16 bit chunks
- * directly, read a 32-bit chunk, then mask off extraneous
- * bits.
+ * Since the hardware can't address 16 bit chunks
+ * directly, read a 32-bit chunk, then mask off extraneous
+ * bits.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the read is destined for.
- * devfn - device/function combination that the read is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the read is destined for.
+ * devfn - device/function combination that the read is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
*
- * OUTPUTS val - read data
+ * OUTPUTS val - read data
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - read access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - read access failure
*
****************************************************************************/
static int
@@ -600,20 +600,20 @@ msp_pcibios_read_config_word(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Read a double word (32 bits) from PCI configuration
- * address space.
+ * address space.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the read is destined for.
- * devfn - device/function combination that the read is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the read is destined for.
+ * devfn - device/function combination that the read is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
*
- * OUTPUTS val - read data
+ * OUTPUTS val - read data
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - read access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - read access failure
*
****************************************************************************/
static int
@@ -652,21 +652,21 @@ msp_pcibios_read_config_dword(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Write a byte to PCI configuration address space.
- * Since the hardware can't address 8 bit chunks
- * directly, a read-modify-write is performed.
+ * Since the hardware can't address 8 bit chunks
+ * directly, a read-modify-write is performed.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * val - value to write
*
- * OUTPUTS none
+ * OUTPUTS none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - write access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - write access failure
*
****************************************************************************/
static int
@@ -700,22 +700,22 @@ msp_pcibios_write_config_byte(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Write a word (16-bits) to PCI configuration address space.
- * Since the hardware can't address 16 bit chunks
- * directly, a read-modify-write is performed.
+ * Since the hardware can't address 16 bit chunks
+ * directly, a read-modify-write is performed.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * val - value to write
*
- * OUTPUTS none
+ * OUTPUTS none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - write access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - write access failure
*
****************************************************************************/
static int
@@ -753,21 +753,21 @@ msp_pcibios_write_config_word(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Write a double word (32-bits) to PCI configuration address
- * space.
+ * space.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * val - value to write
*
- * OUTPUTS none
+ * OUTPUTS none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - write access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - write access failure
*
****************************************************************************/
static int
@@ -794,22 +794,22 @@ msp_pcibios_write_config_dword(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Interface the PCI configuration read request with
- * the appropriate function, based on how many bytes
- * the read request is.
+ * the appropriate function, based on how many bytes
+ * the read request is.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * size - in units of bytes, should be 1, 2, or 4.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * size - in units of bytes, should be 1, 2, or 4.
*
- * OUTPUTS val - value read, with any extraneous bytes masked
- * to zero.
+ * OUTPUTS val - value read, with any extraneous bytes masked
+ * to zero.
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - failure
*
****************************************************************************/
int
@@ -845,22 +845,22 @@ msp_pcibios_read_config(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Interface the PCI configuration write request with
- * the appropriate function, based on how many bytes
- * the read request is.
+ * the appropriate function, based on how many bytes
+ * the read request is.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * size - in units of bytes, should be 1, 2, or 4.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * size - in units of bytes, should be 1, 2, or 4.
+ * val - value to write
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - failure
*
****************************************************************************/
int
@@ -897,11 +897,11 @@ msp_pcibios_write_config(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: structure to abstract the hardware specific PCI
- * configuration accesses.
+ * configuration accesses.
*
* ELEMENTS:
- * read - function for Linux to generate PCI Configuration reads.
- * write - function for Linux to generate PCI Configuration writes.
+ * read - function for Linux to generate PCI Configuration reads.
+ * write - function for Linux to generate PCI Configuration writes.
*
****************************************************************************/
struct pci_ops msp_pci_ops = {
@@ -917,27 +917,27 @@ struct pci_ops msp_pci_ops = {
* Describes the attributes of the MSP7120 PCI Host Controller
*
* ELEMENTS:
- * pci_ops - abstracts the hardware specific PCI configuration
- * accesses.
+ * pci_ops - abstracts the hardware specific PCI configuration
+ * accesses.
*
* mem_resource - address range pciauto() uses to assign to PCI device
- * memory BARs.
+ * memory BARs.
*
* mem_offset - offset between how MSP7120 outbound PCI memory
- * transaction addresses appear on the PCI bus and how Linux
- * wants to configure memory BARs of the PCI devices.
- * MSP7120 does nothing funky, so just set to zero.
+ * transaction addresses appear on the PCI bus and how Linux
+ * wants to configure memory BARs of the PCI devices.
+ * MSP7120 does nothing funky, so just set to zero.
*
* io_resource - address range pciauto() uses to assign to PCI device
- * I/O BARs.
+ * I/O BARs.
*
- * io_offset - offset between how MSP7120 outbound PCI I/O
- * transaction addresses appear on the PCI bus and how
- * Linux defaults to configure I/O BARs of the PCI devices.
- * MSP7120 maps outbound I/O accesses into the bottom
- * bottom 4K of PCI address space (and ignores OATRAN).
- * Since the Linux default is to configure I/O BARs to the
- * bottom 4K, no special offset is needed. Just set to zero.
+ * io_offset - offset between how MSP7120 outbound PCI I/O
+ * transaction addresses appear on the PCI bus and how
+ * Linux defaults to configure I/O BARs of the PCI devices.
+ * MSP7120 maps outbound I/O accesses into the bottom
+ * bottom 4K of PCI address space (and ignores OATRAN).
+ * Since the Linux default is to configure I/O BARs to the
+ * bottom 4K, no special offset is needed. Just set to zero.
*
****************************************************************************/
static struct pci_controller msp_pci_controller = {
@@ -955,7 +955,7 @@ static struct pci_controller msp_pci_controller = {
* _________________________________________________________________________
*
* DESCRIPTION: Initialize the PCI Host Controller and register it with
- * Linux so Linux can seize control of the PCI bus.
+ * Linux so Linux can seize control of the PCI bus.
*
****************************************************************************/
void __init msp_pci_init(void)
@@ -979,7 +979,7 @@ void __init msp_pci_init(void)
*(unsigned long *)QFLUSH_REG_1 = 3;
/* Configure PCI Host Controller. */
- preg->if_status = ~0; /* Clear cause register bits */
+ preg->if_status = ~0; /* Clear cause register bits */
preg->config_addr = 0; /* Clear config access */
preg->oatran = MSP_PCI_OATRAN; /* PCI outbound addr translation */
preg->if_mask = 0xF8BF87C0; /* Enable all PCI status interrupts */
diff --git a/arch/mips/pci/ops-pnx8550.c b/arch/mips/pci/ops-pnx8550.c
deleted file mode 100644
index 1e6213fa7bdb..000000000000
--- a/arch/mips/pci/ops-pnx8550.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *
- * 2.6 port, Embedded Alley Solutions, Inc
- *
- * Based on:
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-
-#include <asm/mach-pnx8550/pci.h>
-#include <asm/mach-pnx8550/glb.h>
-
-static inline void clear_status(void)
-{
- unsigned long pci_stat;
-
- pci_stat = inl(PCI_BASE | PCI_GPPM_STATUS);
- outl(pci_stat, PCI_BASE | PCI_GPPM_ICLR);
-}
-
-static inline unsigned int
-calc_cfg_addr(struct pci_bus *bus, unsigned int devfn, int where)
-{
- unsigned int addr;
-
- addr = ((bus->number > 0) ? (((bus->number & 0xff) << PCI_CFG_BUS_SHIFT) | 1) : 0);
- addr |= ((devfn & 0xff) << PCI_CFG_FUNC_SHIFT) | (where & 0xfc);
-
- return addr;
-}
-
-static int
-config_access(unsigned int pci_cmd, struct pci_bus *bus, unsigned int devfn, int where, unsigned int pci_mode, unsigned int *val)
-{
- unsigned int flags;
- unsigned long loops = 0;
- unsigned long ioaddr = calc_cfg_addr(bus, devfn, where);
-
- local_irq_save(flags);
- /*Clear pending interrupt status */
- if (inl(PCI_BASE | PCI_GPPM_STATUS)) {
- clear_status();
- while (!(inl(PCI_BASE | PCI_GPPM_STATUS) == 0)) ;
- }
-
- outl(ioaddr, PCI_BASE | PCI_GPPM_ADDR);
-
- if ((pci_cmd == PCI_CMD_IOW) || (pci_cmd == PCI_CMD_CONFIG_WRITE))
- outl(*val, PCI_BASE | PCI_GPPM_WDAT);
-
- outl(INIT_PCI_CYCLE | pci_cmd | (pci_mode & PCI_BYTE_ENABLE_MASK),
- PCI_BASE | PCI_GPPM_CTRL);
-
- loops =
- ((loops_per_jiffy *
- PCI_IO_JIFFIES_TIMEOUT) >> (PCI_IO_JIFFIES_SHIFT));
- while (1) {
- if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_DONE) {
- if ((pci_cmd == PCI_CMD_IOR) ||
- (pci_cmd == PCI_CMD_CONFIG_READ))
- *val = inl(PCI_BASE | PCI_GPPM_RDAT);
- clear_status();
- local_irq_restore(flags);
- return PCIBIOS_SUCCESSFUL;
- } else if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_R_MABORT) {
- break;
- }
-
- loops--;
- if (loops == 0) {
- printk("%s : Arbiter Locked.\n", __func__);
- }
- }
-
- clear_status();
- if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_IOW)) {
- printk("%s timeout (GPPM_CTRL=%X) ioaddr %lX pci_cmd %X\n",
- __func__, inl(PCI_BASE | PCI_GPPM_CTRL), ioaddr,
- pci_cmd);
- }
-
- if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_CONFIG_READ))
- *val = 0xffffffff;
- local_irq_restore(flags);
- return PCIBIOS_DEVICE_NOT_FOUND;
-}
-
-/*
- * We can't address 8 and 16 bit words directly. Instead we have to
- * read/write a 32bit word and mask/modify the data we actually want.
- */
-static int
-read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val)
-{
- unsigned int data = 0;
- int err;
-
- if (bus == NULL)
- return -1;
-
- err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data);
- switch (where & 0x03) {
- case 0:
- *val = (unsigned char)(data & 0x000000ff);
- break;
- case 1:
- *val = (unsigned char)((data & 0x0000ff00) >> 8);
- break;
- case 2:
- *val = (unsigned char)((data & 0x00ff0000) >> 16);
- break;
- case 3:
- *val = (unsigned char)((data & 0xff000000) >> 24);
- break;
- }
-
- return err;
-}
-
-static int
-read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val)
-{
- unsigned int data = 0;
- int err;
-
- if (bus == NULL)
- return -1;
-
- if (where & 0x01)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(3 << (where & 3)), &data);
- switch (where & 0x02) {
- case 0:
- *val = (unsigned short)(data & 0x0000ffff);
- break;
- case 2:
- *val = (unsigned short)((data & 0xffff0000) >> 16);
- break;
- }
-
- return err;
-}
-
-static int
-read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val)
-{
- int err;
- if (bus == NULL)
- return -1;
-
- if (where & 0x03)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, 0, val);
-
- return err;
-}
-
-static int
-write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val)
-{
- unsigned int data = (unsigned int)val;
- int err;
-
- if (bus == NULL)
- return -1;
-
- switch (where & 0x03) {
- case 1:
- data = (data << 8);
- break;
- case 2:
- data = (data << 16);
- break;
- case 3:
- data = (data << 24);
- break;
- default:
- break;
- }
-
- err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data);
-
- return err;
-}
-
-static int
-write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val)
-{
- unsigned int data = (unsigned int)val;
- int err;
-
- if (bus == NULL)
- return -1;
-
- if (where & 0x01)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- switch (where & 0x02) {
- case 2:
- data = (data << 16);
- break;
- default:
- break;
- }
- err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(3 << (where & 3)), &data);
-
- return err;
-}
-
-static int
-write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val)
-{
- int err;
- if (bus == NULL)
- return -1;
-
- if (where & 0x03)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, 0, &val);
-
- return err;
-}
-
-static int config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val)
-{
- switch (size) {
- case 1: {
- u8 _val;
- int rc = read_config_byte(bus, devfn, where, &_val);
- *val = _val;
- return rc;
- }
- case 2: {
- u16 _val;
- int rc = read_config_word(bus, devfn, where, &_val);
- *val = _val;
- return rc;
- }
- default:
- return read_config_dword(bus, devfn, where, val);
- }
-}
-
-static int config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
-{
- switch (size) {
- case 1:
- return write_config_byte(bus, devfn, where, (u8) val);
- case 2:
- return write_config_word(bus, devfn, where, (u16) val);
- default:
- return write_config_dword(bus, devfn, where, val);
- }
-}
-
-struct pci_ops pnx8550_pci_ops = {
- config_read,
- config_write
-};
diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
index d1f8fa210ca1..7c7182e2350a 100644
--- a/arch/mips/pci/ops-rc32434.c
+++ b/arch/mips/pci/ops-rc32434.c
@@ -35,7 +35,7 @@
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/pci.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
diff --git a/arch/mips/pci/ops-sni.c b/arch/mips/pci/ops-sni.c
index 97ed25b92edf..35daa7fe6571 100644
--- a/arch/mips/pci/ops-sni.c
+++ b/arch/mips/pci/ops-sni.c
@@ -14,8 +14,8 @@
/*
* It seems that on the RM200 only lower 3 bits of the 5 bit PCI device
- * address are decoded. We therefore manually have to reject attempts at
- * reading outside this range. Being on the paranoid side we only do this
+ * address are decoded. We therefore manually have to reject attempts at
+ * reading outside this range. Being on the paranoid side we only do this
* test for bus 0 and hope forwarding and decoding work properly for any
* subordinated busses.
*
@@ -31,8 +31,8 @@ static int set_config_address(unsigned int busno, unsigned int devfn, int reg)
*(volatile u32 *)PCIMT_CONFIG_ADDRESS =
((busno & 0xff) << 16) |
- ((devfn & 0xff) << 8) |
- (reg & 0xfc);
+ ((devfn & 0xff) << 8) |
+ (reg & 0xfc);
return PCIBIOS_SUCCESSFUL;
}
diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c
index 0d69d6f4ea44..3d5df514d024 100644
--- a/arch/mips/pci/ops-tx4927.c
+++ b/arch/mips/pci/ops-tx4927.c
@@ -2,16 +2,16 @@
* Define the pci_ops for the PCIC on Toshiba TX4927, TX4938, etc.
*
* Based on linux/arch/mips/pci/ops-tx4938.c,
- * linux/arch/mips/pci/fixup-rbtx4938.c,
- * linux/arch/mips/txx9/rbtx4938/setup.c,
+ * linux/arch/mips/pci/fixup-rbtx4938.c,
+ * linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* 2003-2005 (c) MontaVista Software, Inc.
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c
index 28962a7c6606..551128c7d927 100644
--- a/arch/mips/pci/ops-vr41xx.c
+++ b/arch/mips/pci/ops-vr41xx.c
@@ -33,7 +33,7 @@
#define PCICONFAREG (void __iomem *)KSEG1ADDR(0x0f000c18)
static inline int set_pci_configuration_address(unsigned char number,
- unsigned int devfn, int where)
+ unsigned int devfn, int where)
{
if (number == 0) {
/*
@@ -59,7 +59,7 @@ static inline int set_pci_configuration_address(unsigned char number,
}
static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t *val)
+ int size, uint32_t *val)
{
uint32_t data;
@@ -87,7 +87,7 @@ static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
}
static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t val)
+ int size, uint32_t val)
{
uint32_t data;
int shift;
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index c4ea6cc55f94..38a80c83fd67 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -29,7 +29,7 @@
#define PCI_ACCESS_WRITE 1
struct alchemy_pci_context {
- struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
+ struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
void __iomem *regs; /* ctrl base */
/* tools for wired entry for config space access */
unsigned long last_elo0;
@@ -381,7 +381,7 @@ static int alchemy_pci_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
- dev_err(&pdev->dev, "no pcictl ctrl regs resource\n");
+ dev_err(&pdev->dev, "no pcictl ctrl regs resource\n");
ret = -ENODEV;
goto out1;
}
@@ -482,7 +482,7 @@ out:
static struct platform_driver alchemy_pcictl_driver = {
.probe = alchemy_pci_probe,
- .driver = {
+ .driver = {
.name = "alchemy-pci",
.owner = THIS_MODULE,
},
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 6eaa4f2d0e38..412ec025cf55 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -18,26 +18,11 @@
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/pci.h>
-
-#define AR71XX_PCI_MEM_BASE 0x10000000
-#define AR71XX_PCI_MEM_SIZE 0x07000000
-
-#define AR71XX_PCI_WIN0_OFFS 0x10000000
-#define AR71XX_PCI_WIN1_OFFS 0x11000000
-#define AR71XX_PCI_WIN2_OFFS 0x12000000
-#define AR71XX_PCI_WIN3_OFFS 0x13000000
-#define AR71XX_PCI_WIN4_OFFS 0x14000000
-#define AR71XX_PCI_WIN5_OFFS 0x15000000
-#define AR71XX_PCI_WIN6_OFFS 0x16000000
-#define AR71XX_PCI_WIN7_OFFS 0x07000000
-
-#define AR71XX_PCI_CFG_BASE \
- (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
-#define AR71XX_PCI_CFG_SIZE 0x100
#define AR71XX_PCI_REG_CRP_AD_CBE 0x00
#define AR71XX_PCI_REG_CRP_WRDATA 0x04
@@ -63,8 +48,15 @@
#define AR71XX_PCI_IRQ_COUNT 5
-static DEFINE_SPINLOCK(ar71xx_pci_lock);
-static void __iomem *ar71xx_pcicfg_base;
+struct ar71xx_pci_controller {
+ void __iomem *cfg_base;
+ spinlock_t lock;
+ int irq;
+ int irq_base;
+ struct pci_controller pci_ctrl;
+ struct resource io_res;
+ struct resource mem_res;
+};
/* Byte lane enable bits */
static const u8 ar71xx_pci_ble_table[4][4] = {
@@ -107,9 +99,18 @@ static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
return ret;
}
-static int ar71xx_pci_check_error(int quiet)
+static inline struct ar71xx_pci_controller *
+pci_bus_to_ar71xx_controller(struct pci_bus *bus)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *) bus->sysdata;
+ return container_of(hose, struct ar71xx_pci_controller, pci_ctrl);
+}
+
+static int ar71xx_pci_check_error(struct ar71xx_pci_controller *apc, int quiet)
+{
+ void __iomem *base = apc->cfg_base;
u32 pci_err;
u32 ahb_err;
@@ -144,9 +145,10 @@ static int ar71xx_pci_check_error(int quiet)
return !!(ahb_err | pci_err);
}
-static inline void ar71xx_pci_local_write(int where, int size, u32 value)
+static inline void ar71xx_pci_local_write(struct ar71xx_pci_controller *apc,
+ int where, int size, u32 value)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ void __iomem *base = apc->cfg_base;
u32 ad_cbe;
value = value << (8 * (where & 3));
@@ -162,7 +164,8 @@ static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 cmd)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
u32 addr;
addr = ar71xx_pci_bus_addr(bus, devfn, where);
@@ -171,13 +174,14 @@ static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
__raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
base + AR71XX_PCI_REG_CFG_CBE);
- return ar71xx_pci_check_error(1);
+ return ar71xx_pci_check_error(apc, 1);
}
static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
unsigned long flags;
u32 data;
int err;
@@ -186,7 +190,7 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
ret = PCIBIOS_SUCCESSFUL;
data = ~0;
- spin_lock_irqsave(&ar71xx_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_READ);
@@ -195,7 +199,7 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
else
data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
- spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
*value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
@@ -205,7 +209,8 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
unsigned long flags;
int err;
int ret;
@@ -213,7 +218,7 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
value = value << (8 * (where & 3));
ret = PCIBIOS_SUCCESSFUL;
- spin_lock_irqsave(&ar71xx_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_WRITE);
@@ -222,7 +227,7 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
else
__raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
- spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return ret;
}
@@ -232,45 +237,28 @@ static struct pci_ops ar71xx_pci_ops = {
.write = ar71xx_pci_write_config,
};
-static struct resource ar71xx_pci_io_resource = {
- .name = "PCI IO space",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IO,
-};
-
-static struct resource ar71xx_pci_mem_resource = {
- .name = "PCI memory space",
- .start = AR71XX_PCI_MEM_BASE,
- .end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM
-};
-
-static struct pci_controller ar71xx_pci_controller = {
- .pci_ops = &ar71xx_pci_ops,
- .mem_resource = &ar71xx_pci_mem_resource,
- .io_resource = &ar71xx_pci_io_resource,
-};
-
static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
{
+ struct ar71xx_pci_controller *apc;
void __iomem *base = ath79_reset_base;
u32 pending;
+ apc = irq_get_handler_data(irq);
+
pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
__raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
if (pending & AR71XX_PCI_INT_DEV0)
- generic_handle_irq(ATH79_PCI_IRQ(0));
+ generic_handle_irq(apc->irq_base + 0);
else if (pending & AR71XX_PCI_INT_DEV1)
- generic_handle_irq(ATH79_PCI_IRQ(1));
+ generic_handle_irq(apc->irq_base + 1);
else if (pending & AR71XX_PCI_INT_DEV2)
- generic_handle_irq(ATH79_PCI_IRQ(2));
+ generic_handle_irq(apc->irq_base + 2);
else if (pending & AR71XX_PCI_INT_CORE)
- generic_handle_irq(ATH79_PCI_IRQ(4));
+ generic_handle_irq(apc->irq_base + 4);
else
spurious_interrupt();
@@ -278,10 +266,14 @@ static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
static void ar71xx_pci_irq_unmask(struct irq_data *d)
{
- unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ struct ar71xx_pci_controller *apc;
+ unsigned int irq;
void __iomem *base = ath79_reset_base;
u32 t;
+ apc = irq_data_get_irq_chip_data(d);
+ irq = d->irq - apc->irq_base;
+
t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
@@ -291,10 +283,14 @@ static void ar71xx_pci_irq_unmask(struct irq_data *d)
static void ar71xx_pci_irq_mask(struct irq_data *d)
{
- unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ struct ar71xx_pci_controller *apc;
+ unsigned int irq;
void __iomem *base = ath79_reset_base;
u32 t;
+ apc = irq_data_get_irq_chip_data(d);
+ irq = d->irq - apc->irq_base;
+
t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
@@ -309,7 +305,7 @@ static struct irq_chip ar71xx_pci_irq_chip = {
.irq_mask_ack = ar71xx_pci_irq_mask,
};
-static __init void ar71xx_pci_irq_init(void)
+static void ar71xx_pci_irq_init(struct ar71xx_pci_controller *apc)
{
void __iomem *base = ath79_reset_base;
int i;
@@ -319,15 +315,19 @@ static __init void ar71xx_pci_irq_init(void)
BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
- for (i = ATH79_PCI_IRQ_BASE;
- i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
+ apc->irq_base = ATH79_PCI_IRQ_BASE;
+ for (i = apc->irq_base;
+ i < apc->irq_base + AR71XX_PCI_IRQ_COUNT; i++) {
irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
handle_level_irq);
+ irq_set_chip_data(i, apc);
+ }
- irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
+ irq_set_handler_data(apc->irq, apc);
+ irq_set_chained_handler(apc->irq, ar71xx_pci_irq_handler);
}
-static __init void ar71xx_pci_reset(void)
+static void ar71xx_pci_reset(void)
{
void __iomem *ddr_base = ath79_ddr_base;
@@ -349,27 +349,83 @@ static __init void ar71xx_pci_reset(void)
mdelay(100);
}
-__init int ar71xx_pcibios_init(void)
+static int ar71xx_pci_probe(struct platform_device *pdev)
{
+ struct ar71xx_pci_controller *apc;
+ struct resource *res;
u32 t;
- ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
- if (ar71xx_pcicfg_base == NULL)
+ apc = devm_kzalloc(&pdev->dev, sizeof(struct ar71xx_pci_controller),
+ GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
+
+ spin_lock_init(&apc->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!apc->cfg_base)
return -ENOMEM;
+ apc->irq = platform_get_irq(pdev, 0);
+ if (apc->irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->io_res.parent = res;
+ apc->io_res.name = "PCI IO space";
+ apc->io_res.start = res->start;
+ apc->io_res.end = res->end;
+ apc->io_res.flags = IORESOURCE_IO;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.parent = res;
+ apc->mem_res.name = "PCI memory space";
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
ar71xx_pci_reset();
/* setup COMMAND register */
t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
| PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
- ar71xx_pci_local_write(PCI_COMMAND, 4, t);
+ ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t);
/* clear bus errors */
- ar71xx_pci_check_error(1);
+ ar71xx_pci_check_error(apc, 1);
+
+ ar71xx_pci_irq_init(apc);
- ar71xx_pci_irq_init();
+ apc->pci_ctrl.pci_ops = &ar71xx_pci_ops;
+ apc->pci_ctrl.mem_resource = &apc->mem_res;
+ apc->pci_ctrl.io_resource = &apc->io_res;
- register_pci_controller(&ar71xx_pci_controller);
+ register_pci_controller(&apc->pci_ctrl);
return 0;
}
+
+static struct platform_driver ar71xx_pci_driver = {
+ .probe = ar71xx_pci_probe,
+ .driver = {
+ .name = "ar71xx-pci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar71xx_pci_init(void)
+{
+ return platform_driver_register(&ar71xx_pci_driver);
+}
+
+postcore_initcall(ar71xx_pci_init);
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index c11c75be2d7e..8a0700d448fe 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -9,19 +9,13 @@
* by the Free Software Foundation.
*/
+#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/pci.h>
-
-#define AR724X_PCI_CFG_BASE 0x14000000
-#define AR724X_PCI_CFG_SIZE 0x1000
-#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
-#define AR724X_PCI_CTRL_SIZE 0x100
-
-#define AR724X_PCI_MEM_BASE 0x10000000
-#define AR724X_PCI_MEM_SIZE 0x04000000
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
@@ -35,38 +29,112 @@
#define AR7240_BAR0_WAR_VALUE 0xffff
-static DEFINE_SPINLOCK(ar724x_pci_lock);
-static void __iomem *ar724x_pci_devcfg_base;
-static void __iomem *ar724x_pci_ctrl_base;
+#define AR724X_PCI_CMD_INIT (PCI_COMMAND_MEMORY | \
+ PCI_COMMAND_MASTER | \
+ PCI_COMMAND_INVALIDATE | \
+ PCI_COMMAND_PARITY | \
+ PCI_COMMAND_SERR | \
+ PCI_COMMAND_FAST_BACK)
+
+struct ar724x_pci_controller {
+ void __iomem *devcfg_base;
+ void __iomem *ctrl_base;
+ void __iomem *crp_base;
+
+ int irq;
+ int irq_base;
+
+ bool link_up;
+ bool bar0_is_cached;
+ u32 bar0_value;
-static u32 ar724x_pci_bar0_value;
-static bool ar724x_pci_bar0_is_cached;
-static bool ar724x_pci_link_up;
+ spinlock_t lock;
-static inline bool ar724x_pci_check_link(void)
+ struct pci_controller pci_controller;
+ struct resource io_res;
+ struct resource mem_res;
+};
+
+static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc)
{
u32 reset;
- reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
+ reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET);
return reset & AR724X_PCI_RESET_LINK_UP;
}
+static inline struct ar724x_pci_controller *
+pci_bus_to_ar724x_controller(struct pci_bus *bus)
+{
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *) bus->sysdata;
+ return container_of(hose, struct ar724x_pci_controller, pci_controller);
+}
+
+static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
+ int where, int size, u32 value)
+{
+ unsigned long flags;
+ void __iomem *base;
+ u32 data;
+ int s;
+
+ WARN_ON(where & (size - 1));
+
+ if (!apc->link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ base = apc->crp_base;
+
+ spin_lock_irqsave(&apc->lock, flags);
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) * 8);
+ data &= ~(0xff << s);
+ data |= ((value & 0xff) << s);
+ break;
+ case 2:
+ s = ((where & 2) * 8);
+ data &= ~(0xffff << s);
+ data |= ((value & 0xffff) << s);
+ break;
+ case 4:
+ data = value;
+ break;
+ default:
+ spin_unlock_irqrestore(&apc->lock, flags);
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ __raw_writel(data, base + (where & ~3));
+ /* flush write */
+ __raw_readl(base + (where & ~3));
+ spin_unlock_irqrestore(&apc->lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
{
+ struct ar724x_pci_controller *apc;
unsigned long flags;
void __iomem *base;
u32 data;
- if (!ar724x_pci_link_up)
+ apc = pci_bus_to_ar724x_controller(bus);
+ if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
- base = ar724x_pci_devcfg_base;
+ base = apc->devcfg_base;
- spin_lock_irqsave(&ar724x_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
@@ -85,17 +153,17 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
case 4:
break;
default:
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
- ar724x_pci_bar0_is_cached) {
+ apc->bar0_is_cached) {
/* use the cached value */
- *value = ar724x_pci_bar0_value;
+ *value = apc->bar0_value;
} else {
*value = data;
}
@@ -106,12 +174,14 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t value)
{
+ struct ar724x_pci_controller *apc;
unsigned long flags;
void __iomem *base;
u32 data;
int s;
- if (!ar724x_pci_link_up)
+ apc = pci_bus_to_ar724x_controller(bus);
+ if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
@@ -129,18 +199,18 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
* BAR0 register in order to make the device memory
* accessible.
*/
- ar724x_pci_bar0_is_cached = true;
- ar724x_pci_bar0_value = value;
+ apc->bar0_is_cached = true;
+ apc->bar0_value = value;
value = AR7240_BAR0_WAR_VALUE;
} else {
- ar724x_pci_bar0_is_cached = false;
+ apc->bar0_is_cached = false;
}
}
- base = ar724x_pci_devcfg_base;
+ base = apc->devcfg_base;
- spin_lock_irqsave(&ar724x_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
@@ -158,7 +228,7 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
data = value;
break;
default:
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
@@ -166,7 +236,7 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -176,38 +246,20 @@ static struct pci_ops ar724x_pci_ops = {
.write = ar724x_pci_write,
};
-static struct resource ar724x_io_resource = {
- .name = "PCI IO space",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IO,
-};
-
-static struct resource ar724x_mem_resource = {
- .name = "PCI memory space",
- .start = AR724X_PCI_MEM_BASE,
- .end = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct pci_controller ar724x_pci_controller = {
- .pci_ops = &ar724x_pci_ops,
- .io_resource = &ar724x_io_resource,
- .mem_resource = &ar724x_mem_resource,
-};
-
static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
{
+ struct ar724x_pci_controller *apc;
void __iomem *base;
u32 pending;
- base = ar724x_pci_ctrl_base;
+ apc = irq_get_handler_data(irq);
+ base = apc->ctrl_base;
pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
if (pending & AR724X_PCI_INT_DEV0)
- generic_handle_irq(ATH79_PCI_IRQ(0));
+ generic_handle_irq(apc->irq_base + 0);
else
spurious_interrupt();
@@ -215,13 +267,17 @@ static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
static void ar724x_pci_irq_unmask(struct irq_data *d)
{
+ struct ar724x_pci_controller *apc;
void __iomem *base;
+ int offset;
u32 t;
- base = ar724x_pci_ctrl_base;
+ apc = irq_data_get_irq_chip_data(d);
+ base = apc->ctrl_base;
+ offset = apc->irq_base - d->irq;
- switch (d->irq) {
- case ATH79_PCI_IRQ(0):
+ switch (offset) {
+ case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t | AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
@@ -232,13 +288,17 @@ static void ar724x_pci_irq_unmask(struct irq_data *d)
static void ar724x_pci_irq_mask(struct irq_data *d)
{
+ struct ar724x_pci_controller *apc;
void __iomem *base;
+ int offset;
u32 t;
- base = ar724x_pci_ctrl_base;
+ apc = irq_data_get_irq_chip_data(d);
+ base = apc->ctrl_base;
+ offset = apc->irq_base - d->irq;
- switch (d->irq) {
- case ATH79_PCI_IRQ(0):
+ switch (offset) {
+ case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t & ~AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
@@ -262,53 +322,123 @@ static struct irq_chip ar724x_pci_irq_chip = {
.irq_mask_ack = ar724x_pci_irq_mask,
};
-static void __init ar724x_pci_irq_init(int irq)
+static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
+ int id)
{
void __iomem *base;
int i;
- base = ar724x_pci_ctrl_base;
+ base = apc->ctrl_base;
__raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
__raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
- BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
+ apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT);
- for (i = ATH79_PCI_IRQ_BASE;
- i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
+ for (i = apc->irq_base;
+ i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) {
irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
handle_level_irq);
+ irq_set_chip_data(i, apc);
+ }
- irq_set_chained_handler(irq, ar724x_pci_irq_handler);
+ irq_set_handler_data(apc->irq, apc);
+ irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler);
}
-int __init ar724x_pcibios_init(int irq)
+static int ar724x_pci_probe(struct platform_device *pdev)
{
- int ret;
+ struct ar724x_pci_controller *apc;
+ struct resource *res;
+ int id;
- ret = -ENOMEM;
+ id = pdev->id;
+ if (id == -1)
+ id = 0;
- ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
- AR724X_PCI_CFG_SIZE);
- if (ar724x_pci_devcfg_base == NULL)
- goto err;
+ apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller),
+ GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
- ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
- AR724X_PCI_CTRL_SIZE);
- if (ar724x_pci_ctrl_base == NULL)
- goto err_unmap_devcfg;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base");
+ if (!res)
+ return -EINVAL;
- ar724x_pci_link_up = ar724x_pci_check_link();
- if (!ar724x_pci_link_up)
- pr_warn("ar724x: PCIe link is down\n");
+ apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (apc->ctrl_base == NULL)
+ return -EBUSY;
- ar724x_pci_irq_init(irq);
- register_pci_controller(&ar724x_pci_controller);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
+ if (!res)
+ return -EINVAL;
- return PCIBIOS_SUCCESSFUL;
+ apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!apc->devcfg_base)
+ return -EBUSY;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
+ if (!res)
+ return -EINVAL;
-err_unmap_devcfg:
- iounmap(ar724x_pci_devcfg_base);
-err:
- return ret;
+ apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (apc->crp_base == NULL)
+ return -EBUSY;
+
+ apc->irq = platform_get_irq(pdev, 0);
+ if (apc->irq < 0)
+ return -EINVAL;
+
+ spin_lock_init(&apc->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->io_res.parent = res;
+ apc->io_res.name = "PCI IO space";
+ apc->io_res.start = res->start;
+ apc->io_res.end = res->end;
+ apc->io_res.flags = IORESOURCE_IO;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.parent = res;
+ apc->mem_res.name = "PCI memory space";
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
+ apc->pci_controller.pci_ops = &ar724x_pci_ops;
+ apc->pci_controller.io_resource = &apc->io_res;
+ apc->pci_controller.mem_resource = &apc->mem_res;
+
+ apc->link_up = ar724x_pci_check_link(apc);
+ if (!apc->link_up)
+ dev_warn(&pdev->dev, "PCIe link is down\n");
+
+ ar724x_pci_irq_init(apc, id);
+
+ ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT);
+
+ register_pci_controller(&apc->pci_controller);
+
+ return 0;
}
+
+static struct platform_driver ar724x_pci_driver = {
+ .probe = ar724x_pci_probe,
+ .driver = {
+ .name = "ar724x-pci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar724x_pci_init(void)
+{
+ return platform_driver_register(&ar724x_pci_driver);
+}
+
+postcore_initcall(ar724x_pci_init);
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 37b52dc3d27e..e2e69e1e9fe1 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -54,8 +54,8 @@
static void *cfg_space;
-#define PCI_BUS_ENABLED 1
-#define PCI_DEVICE_MODE 2
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
static int bcm1480_bus_status;
@@ -194,7 +194,7 @@ struct pci_controller bcm1480_controller = {
.pci_ops = &bcm1480_pci_ops,
.mem_resource = &bcm1480_mem_resource,
.io_resource = &bcm1480_io_resource,
- .io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
+ .io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
};
@@ -227,7 +227,7 @@ static int __init bcm1480_pcibios_init(void)
PCI_COMMAND));
if (!(cmdreg & PCI_COMMAND_MASTER)) {
printk
- ("PCI: Skipping PCI probe. Bus is not initialized.\n");
+ ("PCI: Skipping PCI probe. Bus is not initialized.\n");
iounmap(cfg_space);
return 1; /* XXX */
}
diff --git a/arch/mips/pci/pci-bcm1480ht.c b/arch/mips/pci/pci-bcm1480ht.c
index 50cc6e9e8240..1263c5e7dbe1 100644
--- a/arch/mips/pci/pci-bcm1480ht.c
+++ b/arch/mips/pci/pci-bcm1480ht.c
@@ -53,8 +53,8 @@
static void *ht_cfg_space;
-#define PCI_BUS_ENABLED 1
-#define PCI_DEVICE_MODE 2
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
static int bcm1480ht_bus_status;
@@ -191,7 +191,7 @@ struct pci_controller bcm1480ht_controller = {
.io_resource = &bcm1480ht_io_resource,
.index = 1,
.get_busno = bcm1480ht_pcibios_get_busno,
- .io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
+ .io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
};
static int __init bcm1480ht_pcibios_init(void)
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
index c682468010c5..76f16eaed0ad 100644
--- a/arch/mips/pci/pci-bcm47xx.c
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -91,7 +91,7 @@ static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev)
int pcibios_plat_dev_init(struct pci_dev *dev)
{
#ifdef CONFIG_BCM47XX_SSB
- if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB)
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB)
return bcm47xx_pcibios_plat_dev_init_ssb(dev);
else
#endif
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index ca179b6ff39b..88e781c6b5ba 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -25,21 +25,21 @@
int bcm63xx_pci_enabled;
static struct resource bcm_pci_mem_resource = {
- .name = "bcm63xx PCI memory space",
- .start = BCM_PCI_MEM_BASE_PA,
- .end = BCM_PCI_MEM_END_PA,
- .flags = IORESOURCE_MEM
+ .name = "bcm63xx PCI memory space",
+ .start = BCM_PCI_MEM_BASE_PA,
+ .end = BCM_PCI_MEM_END_PA,
+ .flags = IORESOURCE_MEM
};
static struct resource bcm_pci_io_resource = {
- .name = "bcm63xx PCI IO space",
- .start = BCM_PCI_IO_BASE_PA,
+ .name = "bcm63xx PCI IO space",
+ .start = BCM_PCI_IO_BASE_PA,
#ifdef CONFIG_CARDBUS
- .end = BCM_PCI_IO_HALF_PA,
+ .end = BCM_PCI_IO_HALF_PA,
#else
- .end = BCM_PCI_IO_END_PA,
+ .end = BCM_PCI_IO_END_PA,
#endif
- .flags = IORESOURCE_IO
+ .flags = IORESOURCE_IO
};
struct pci_controller bcm63xx_controller = {
@@ -55,17 +55,17 @@ struct pci_controller bcm63xx_controller = {
*/
#ifdef CONFIG_CARDBUS
static struct resource bcm_cb_mem_resource = {
- .name = "bcm63xx Cardbus memory space",
- .start = BCM_CB_MEM_BASE_PA,
- .end = BCM_CB_MEM_END_PA,
- .flags = IORESOURCE_MEM
+ .name = "bcm63xx Cardbus memory space",
+ .start = BCM_CB_MEM_BASE_PA,
+ .end = BCM_CB_MEM_END_PA,
+ .flags = IORESOURCE_MEM
};
static struct resource bcm_cb_io_resource = {
- .name = "bcm63xx Cardbus IO space",
- .start = BCM_PCI_IO_HALF_PA + 1,
- .end = BCM_PCI_IO_END_PA,
- .flags = IORESOURCE_IO
+ .name = "bcm63xx Cardbus IO space",
+ .start = BCM_PCI_IO_HALF_PA + 1,
+ .end = BCM_PCI_IO_END_PA,
+ .flags = IORESOURCE_IO
};
struct pci_controller bcm63xx_cb_controller = {
@@ -76,17 +76,17 @@ struct pci_controller bcm63xx_cb_controller = {
#endif
static struct resource bcm_pcie_mem_resource = {
- .name = "bcm63xx PCIe memory space",
- .start = BCM_PCIE_MEM_BASE_PA,
- .end = BCM_PCIE_MEM_END_PA,
- .flags = IORESOURCE_MEM,
+ .name = "bcm63xx PCIe memory space",
+ .start = BCM_PCIE_MEM_BASE_PA,
+ .end = BCM_PCIE_MEM_END_PA,
+ .flags = IORESOURCE_MEM,
};
static struct resource bcm_pcie_io_resource = {
- .name = "bcm63xx PCIe IO space",
- .start = 0,
- .end = 0,
- .flags = 0,
+ .name = "bcm63xx PCIe IO space",
+ .start = 0,
+ .end = 0,
+ .flags = 0,
};
struct pci_controller bcm63xx_pcie_controller = {
@@ -111,7 +111,7 @@ static void bcm63xx_int_cfg_writel(u32 val, u32 reg)
u32 tmp;
tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK;
- tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
+ tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG);
bcm_mpi_writel(val, MPI_PCICFGDATA_REG);
}
@@ -211,7 +211,7 @@ static int __init bcm63xx_register_pci(void)
* first bytes to access it from CPU.
*
* this means that no io access from CPU should happen while
- * we do a configuration cycle, but there's no way we can add
+ * we do a configuration cycle, but there's no way we can add
* a spinlock for each io access, so this is currently kind of
* broken on SMP.
*/
@@ -244,9 +244,9 @@ static int __init bcm63xx_register_pci(void)
bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG);
#endif
- /* setup local bus to PCI access (IO memory), we have only 1
- * IO window for both PCI and cardbus, but it cannot handle
- * both at the same time, assume standard PCI for now, if
+ /* setup local bus to PCI access (IO memory), we have only 1
+ * IO window for both PCI and cardbus, but it cannot handle
+ * both at the same time, assume standard PCI for now, if
* cardbus card has IO zone, PCI fixup will change window to
* cardbus */
val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK;
@@ -284,7 +284,7 @@ static int __init bcm63xx_register_pci(void)
bcm_mpi_writel(0, MPI_SP1_RANGE_REG);
}
- /* change host bridge retry counter to infinite number of
+ /* change host bridge retry counter to infinite number of
* retry, needed for some broadcom wifi cards with Silicon
* Backplane bus where access to srom seems very slow */
val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS);
diff --git a/arch/mips/pci/pci-bcm63xx.h b/arch/mips/pci/pci-bcm63xx.h
index e6736d558ac7..ffab4da7bd00 100644
--- a/arch/mips/pci/pci-bcm63xx.h
+++ b/arch/mips/pci/pci-bcm63xx.h
@@ -7,7 +7,7 @@
#include <bcm63xx_dev_pci.h>
/*
- * Cardbus shares the PCI bus, but has no IDSEL, so a special id is
+ * Cardbus shares the PCI bus, but has no IDSEL, so a special id is
* reserved for it. If you have a standard PCI device at this id, you
* need to change the following definition.
*/
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 7f4f49b09b5b..6eb65e44d9e4 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -30,7 +30,7 @@
/*
* XXX: No kmalloc available when we do our crosstalk scan,
- * we should try to move it later in the boot process.
+ * we should try to move it later in the boot process.
*/
static struct bridge_controller bridges[MAX_PCI_BUSSES];
@@ -103,7 +103,7 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
* swap pio's to pci mem and io space (big windows)
*/
bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP |
- BRIDGE_CTRL_MEM_SWAP;
+ BRIDGE_CTRL_MEM_SWAP;
#ifdef CONFIG_PAGE_SIZE_4KB
bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
#else /* 16kB or larger */
@@ -123,7 +123,7 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
bc->pci_int[slot] = -1;
}
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
+ bridge->b_wid_tflush; /* wait until Bridge PIO complete */
bc->base = bridge;
@@ -184,7 +184,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
}
/*
- * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
+ * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
* to find the slot number in sense of the bridge device register.
* XXX This also means multiple devices might rely on conflicting bridge
* settings.
diff --git a/arch/mips/pci/pci-ip32.c b/arch/mips/pci/pci-ip32.c
index 532b561b4442..b1e061f7fdc7 100644
--- a/arch/mips/pci/pci-ip32.c
+++ b/arch/mips/pci/pci-ip32.c
@@ -18,9 +18,9 @@
/*
* Handle errors from the bridge. This includes master and target aborts,
- * various command and address errors, and the interrupt test. This gets
- * registered on the bridge error irq. It's conceivable that some of these
- * conditions warrant a panic. Anybody care to say which ones?
+ * various command and address errors, and the interrupt test. This gets
+ * registered on the bridge error irq. It's conceivable that some of these
+ * conditions warrant a panic. Anybody care to say which ones?
*/
static irqreturn_t macepci_error(int irq, void *dev)
{
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 910fb4c20b9e..879077b01155 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -129,8 +129,16 @@ static int ltq_pci_startup(struct platform_device *pdev)
/* setup reset gpio used by pci */
reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
- if (gpio_is_valid(reset_gpio))
- devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
+ if (gpio_is_valid(reset_gpio)) {
+ int ret = devm_gpio_request(&pdev->dev,
+ reset_gpio, "pci-reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to request gpio %d\n", reset_gpio);
+ return ret;
+ }
+ gpio_direction_output(reset_gpio, 1);
+ }
/* enable auto-switching between PCI and EBU */
ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
diff --git a/arch/mips/pci/pci-lasat.c b/arch/mips/pci/pci-lasat.c
index a98e543a514a..40d2797d2bc4 100644
--- a/arch/mips/pci/pci-lasat.c
+++ b/arch/mips/pci/pci-lasat.c
@@ -51,15 +51,15 @@ static int __init lasat_pci_setup(void)
arch_initcall(lasat_pci_setup);
-#define LASAT_IRQ_ETH1 (LASAT_IRQ_BASE + 0)
-#define LASAT_IRQ_ETH0 (LASAT_IRQ_BASE + 1)
-#define LASAT_IRQ_HDC (LASAT_IRQ_BASE + 2)
-#define LASAT_IRQ_COMP (LASAT_IRQ_BASE + 3)
-#define LASAT_IRQ_HDLC (LASAT_IRQ_BASE + 4)
-#define LASAT_IRQ_PCIA (LASAT_IRQ_BASE + 5)
-#define LASAT_IRQ_PCIB (LASAT_IRQ_BASE + 6)
-#define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7)
-#define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8)
+#define LASAT_IRQ_ETH1 (LASAT_IRQ_BASE + 0)
+#define LASAT_IRQ_ETH0 (LASAT_IRQ_BASE + 1)
+#define LASAT_IRQ_HDC (LASAT_IRQ_BASE + 2)
+#define LASAT_IRQ_COMP (LASAT_IRQ_BASE + 3)
+#define LASAT_IRQ_HDLC (LASAT_IRQ_BASE + 4)
+#define LASAT_IRQ_PCIA (LASAT_IRQ_BASE + 5)
+#define LASAT_IRQ_PCIB (LASAT_IRQ_BASE + 6)
+#define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7)
+#define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8)
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
@@ -69,13 +69,13 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
case 3:
return LASAT_IRQ_PCIA + (((slot-1) + (pin-1)) % 4);
case 4:
- return LASAT_IRQ_ETH1; /* Ethernet 1 (LAN 2) */
+ return LASAT_IRQ_ETH1; /* Ethernet 1 (LAN 2) */
case 5:
- return LASAT_IRQ_ETH0; /* Ethernet 0 (LAN 1) */
+ return LASAT_IRQ_ETH0; /* Ethernet 0 (LAN 1) */
case 6:
- return LASAT_IRQ_HDC; /* IDE controller */
+ return LASAT_IRQ_HDC; /* IDE controller */
default:
- return 0xff; /* Illegal */
+ return 0xff; /* Illegal */
}
return -1;
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 5b5ed76c6f47..95c2ea815cac 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -30,8 +30,8 @@
* addresses. Use PCI endian swapping 1 so no address swapping is
* necessary. The Linux io routines will endian swap the data.
*/
-#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
-#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
+#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
+#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
@@ -68,10 +68,10 @@ enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
- * enumerates through all the bridges and figures out the
- * slot on Bus 0 where this device eventually hooks to.
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
- * as it goes through each bridge.
+ * as it goes through each bridge.
* Returns Interrupt number for the device
*/
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -120,8 +120,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
/* Enable the PCIe normal error reporting */
config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
- config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
- config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
+ config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
/* Find the Advanced Error Reporting capability */
@@ -226,10 +226,10 @@ const char *octeon_get_pci_interrupts(void)
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
- * enumerates through all the bridges and figures out the
- * slot on Bus 0 where this device eventually hooks to.
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
- * as it goes through each bridge.
+ * as it goes through each bridge.
* Returns Interrupt number for the device
*/
int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
@@ -404,8 +404,8 @@ static void octeon_pci_initialize(void)
ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */
ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */
ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */
- ctl_status_2.s.bb1 = 1; /* BAR1 is big */
- ctl_status_2.s.bb0 = 1; /* BAR0 is big */
+ ctl_status_2.s.bb1 = 1; /* BAR1 is big */
+ ctl_status_2.s.bb0 = 1; /* BAR0 is big */
}
octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
@@ -446,7 +446,7 @@ static void octeon_pci_initialize(void)
* count. [1..31] and 0=32. NOTE: If the user
* programs these bits beyond the Designed Maximum
* outstanding count, then the designed maximum table
- * depth will be used instead. No additional
+ * depth will be used instead. No additional
* Deferred/Split transactions will be accepted if
* this outstanding maximum count is
* reached. Furthermore, no additional deferred/split
@@ -456,7 +456,7 @@ static void octeon_pci_initialize(void)
cfg19.s.tdomc = 4;
/*
* Master Deferred Read Request Outstanding Max Count
- * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
+ * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
* cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
* 5 2 110 6 3 111 7 3 For example, if these bits are
* programmed to 100, the core can support 2 DAC
@@ -550,7 +550,7 @@ static void octeon_pci_initialize(void)
/*
* Affects PCI performance when OCTEON services reads to its
- * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
+ * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
* 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
* PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
* these values need to be changed so they won't possibly prefetch off
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index 5f3a69cebad1..b128cb973ebe 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -33,7 +33,7 @@
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/pci.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/* define an unsigned array for the PCI registers */
@@ -82,11 +82,11 @@ extern struct pci_ops rc32434_pci_ops;
#define PCI_MEM2_START (PCI_ADDR_START + CPUTOPCI_MEM_WIN)
#define PCI_MEM2_END (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) - 1)
#define PCI_IO1_START (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN))
-#define PCI_IO1_END \
+#define PCI_IO1_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN - 1)
#define PCI_IO2_START \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN)
-#define PCI_IO2_END \
+#define PCI_IO2_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + (2 * CPUTOPCI_IO_WIN) - 1)
struct pci_controller rc32434_controller2;
diff --git a/arch/mips/pci/pci-sb1250.c b/arch/mips/pci/pci-sb1250.c
index dd97f3a83baa..cdefcc4cb8d4 100644
--- a/arch/mips/pci/pci-sb1250.c
+++ b/arch/mips/pci/pci-sb1250.c
@@ -55,9 +55,9 @@
static void *cfg_space;
-#define PCI_BUS_ENABLED 1
-#define LDT_BUS_ENABLED 2
-#define PCI_DEVICE_MODE 4
+#define PCI_BUS_ENABLED 1
+#define LDT_BUS_ENABLED 2
+#define PCI_DEVICE_MODE 4
static int sb1250_bus_status;
@@ -239,7 +239,7 @@ static int __init sb1250_pcibios_init(void)
PCI_COMMAND));
if (!(cmdreg & PCI_COMMAND_MASTER)) {
printk
- ("PCI: Skipping PCI probe. Bus is not initialized.\n");
+ ("PCI: Skipping PCI probe. Bus is not initialized.\n");
iounmap(cfg_space);
return 0;
}
diff --git a/arch/mips/pci/pci-vr41xx.c b/arch/mips/pci/pci-vr41xx.c
index 444b8d8004ad..157c7715b7c8 100644
--- a/arch/mips/pci/pci-vr41xx.c
+++ b/arch/mips/pci/pci-vr41xx.c
@@ -69,17 +69,17 @@ static struct pci_target_address_window pci_target_window1 = {
};
static struct resource pci_mem_resource = {
- .name = "PCI Memory resources",
- .start = PCI_MEM_RESOURCE_START,
- .end = PCI_MEM_RESOURCE_END,
- .flags = IORESOURCE_MEM,
+ .name = "PCI Memory resources",
+ .start = PCI_MEM_RESOURCE_START,
+ .end = PCI_MEM_RESOURCE_END,
+ .flags = IORESOURCE_MEM,
};
static struct resource pci_io_resource = {
- .name = "PCI I/O resources",
- .start = PCI_IO_RESOURCE_START,
- .end = PCI_IO_RESOURCE_END,
- .flags = IORESOURCE_IO,
+ .name = "PCI I/O resources",
+ .start = PCI_IO_RESOURCE_START,
+ .end = PCI_IO_RESOURCE_END,
+ .flags = IORESOURCE_IO,
};
static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
@@ -97,7 +97,7 @@ static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
};
static struct pci_controller vr41xx_pci_controller = {
- .pci_ops = &vr41xx_pci_ops,
+ .pci_ops = &vr41xx_pci_ops,
.mem_resource = &pci_mem_resource,
.io_resource = &pci_io_resource,
};
@@ -148,7 +148,7 @@ static int __init vr41xx_pciu_init(void)
else if ((vtclock / 2) < pci_clock_max)
pciu_write(PCICLKSELREG, HALF_VTCLOCK);
else if (current_cpu_data.processor_id >= PRID_VR4131_REV2_1 &&
- (vtclock / 3) < pci_clock_max)
+ (vtclock / 3) < pci_clock_max)
pciu_write(PCICLKSELREG, ONE_THIRD_VTCLOCK);
else if ((vtclock / 4) < pci_clock_max)
pciu_write(PCICLKSELREG, QUARTER_VTCLOCK);
@@ -281,7 +281,7 @@ static int __init vr41xx_pciu_init(void)
pciu_write(PCIAPCNTREG, val);
pciu_write(COMMANDREG, PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
PCI_COMMAND_SERR);
/* Clear bus error */
diff --git a/arch/mips/pci/pci-vr41xx.h b/arch/mips/pci/pci-vr41xx.h
index 6b1ae2eb1c06..e6b4a1b969f7 100644
--- a/arch/mips/pci/pci-vr41xx.h
+++ b/arch/mips/pci/pci-vr41xx.h
@@ -1,7 +1,7 @@
/*
* pci-vr41xx.h, Include file for PCI Control Unit of the NEC VR4100 series.
*
- * Copyright (C) 2002 MontaVista Software Inc.
+ * Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
* Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c
index 140557a20488..653d2db9e0c5 100644
--- a/arch/mips/pci/pci-xlp.c
+++ b/arch/mips/pci/pci-xlp.c
@@ -46,6 +46,7 @@
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/pic.h>
@@ -55,7 +56,7 @@
static void *pci_config_base;
-#define pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
/* PCI ops */
static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
@@ -64,8 +65,12 @@ static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
u32 data;
u32 *cfgaddr;
+ where &= ~3;
+ if (bus->number == 0 && PCI_SLOT(devfn) == 1 && where == 0x954)
+ return 0xffffffff;
+
cfgaddr = (u32 *)(pci_config_base +
- pci_cfg_addr(bus->number, devfn, where & ~3));
+ pci_cfg_addr(bus->number, devfn, where));
data = *cfgaddr;
return data;
}
@@ -135,54 +140,60 @@ struct pci_ops nlm_pci_ops = {
};
static struct resource nlm_pci_mem_resource = {
- .name = "XLP PCI MEM",
- .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
- .end = 0xdfffffffUL,
- .flags = IORESOURCE_MEM,
+ .name = "XLP PCI MEM",
+ .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+ .end = 0xdfffffffUL,
+ .flags = IORESOURCE_MEM,
};
static struct resource nlm_pci_io_resource = {
- .name = "XLP IO MEM",
- .start = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
- .end = 0x17ffffffUL,
- .flags = IORESOURCE_IO,
+ .name = "XLP IO MEM",
+ .start = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
+ .end = 0x17ffffffUL,
+ .flags = IORESOURCE_IO,
};
struct pci_controller nlm_pci_controller = {
- .index = 0,
- .pci_ops = &nlm_pci_ops,
- .mem_resource = &nlm_pci_mem_resource,
- .mem_offset = 0x00000000UL,
- .io_resource = &nlm_pci_io_resource,
- .io_offset = 0x00000000UL,
+ .index = 0,
+ .pci_ops = &nlm_pci_ops,
+ .mem_resource = &nlm_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &nlm_pci_io_resource,
+ .io_offset = 0x00000000UL,
};
-static int get_irq_vector(const struct pci_dev *dev)
+static struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev)
{
- /*
- * For XLP PCIe, there is an IRQ per Link, find out which
- * link the device is on to assign interrupts
- */
- if (dev->bus->self == NULL)
- return 0;
+ struct pci_bus *bus, *p;
- switch (dev->bus->self->devfn) {
- case 0x8:
- return PIC_PCIE_LINK_0_IRQ;
- case 0x9:
- return PIC_PCIE_LINK_1_IRQ;
- case 0xa:
- return PIC_PCIE_LINK_2_IRQ;
- case 0xb:
- return PIC_PCIE_LINK_3_IRQ;
- }
- WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
- return 0;
+ /* Find the bridge on bus 0 */
+ bus = dev->bus;
+ for (p = bus->parent; p && p->number != 0; p = p->parent)
+ bus = p;
+
+ return p ? bus->self : NULL;
+}
+
+static inline int nlm_pci_link_to_irq(int link)
+{
+ return PIC_PCIE_LINK_0_IRQ + link;
}
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- return get_irq_vector(dev);
+ struct pci_dev *lnkdev;
+ int lnkslot, lnkfunc;
+
+ /*
+ * For XLP PCIe, there is an IRQ per Link, find out which
+ * link the device is on to assign interrupts
+ */
+ lnkdev = xlp_get_pcie_link(dev);
+ if (lnkdev == NULL)
+ return 0;
+ lnkfunc = PCI_FUNC(lnkdev->devfn);
+ lnkslot = PCI_SLOT(lnkdev->devfn);
+ return nlm_irq_to_xirq(lnkslot / 8, nlm_pci_link_to_irq(lnkfunc));
}
/* Do platform specific device initialization at pci_enable_device() time */
@@ -191,51 +202,76 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
-static int xlp_enable_pci_bswap(void)
+/*
+ * If big-endian, enable hardware byteswap on the PCIe bridges.
+ * This will make both the SoC and PCIe devices behave consistently with
+ * readl/writel.
+ */
+#ifdef __BIG_ENDIAN
+static void xlp_config_pci_bswap(int node, int link)
{
- uint64_t pciebase, sysbase;
- int node, i;
+ uint64_t nbubase, lnkbase;
u32 reg;
- /* Chip-0 so node set to 0 */
- node = 0;
- sysbase = nlm_get_bridge_regbase(node);
+ nbubase = nlm_get_bridge_regbase(node);
+ lnkbase = nlm_get_pcie_base(node, link);
+
/*
* Enable byte swap in hardware. Program each link's PCIe SWAP regions
* from the link's address ranges.
*/
- for (i = 0; i < 4; i++) {
- pciebase = nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, i));
- if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
- continue;
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_BASE, reg);
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_BASE0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_BASE, reg);
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_LIM, reg | 0xfff);
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_LIMIT0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_LIM,
- reg | 0xfff);
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_BASE, reg);
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_BASE0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_BASE, reg);
-
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_LIMIT0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
- }
- return 0;
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
}
+#else
+/* Swap configuration not needed in little-endian mode */
+static inline void xlp_config_pci_bswap(int node, int link) {}
+#endif /* __BIG_ENDIAN */
static int __init pcibios_init(void)
{
+ struct nlm_soc_info *nodep;
+ uint64_t pciebase;
+ int link, n;
+ u32 reg;
+
/* Firmware assigns PCI resources */
pci_set_flags(PCI_PROBE_ONLY);
pci_config_base = ioremap(XLP_DEFAULT_PCI_ECFG_BASE, 64 << 20);
/* Extend IO port for memory mapped io */
- ioport_resource.start = 0;
+ ioport_resource.start = 0;
ioport_resource.end = ~0;
- xlp_enable_pci_bswap();
+ for (n = 0; n < NLM_NR_NODES; n++) {
+ nodep = nlm_get_node(n);
+ if (!nodep->coremask)
+ continue; /* node does not exist */
+
+ for (link = 0; link < 4; link++) {
+ pciebase = nlm_get_pcie_base(n, link);
+ if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
+ continue;
+ xlp_config_pci_bswap(n, link);
+
+ /* put in intpin and irq - u-boot does not */
+ reg = nlm_read_pci_reg(pciebase, 0xf);
+ reg &= ~0x1fu;
+ reg |= (1 << 8) | nlm_pci_link_to_irq(link);
+ nlm_write_pci_reg(pciebase, 0xf, reg);
+ pr_info("XLP PCIe: Link %d-%d initialized.\n", n, link);
+ }
+ }
+
set_io_port_base(CKSEG1);
nlm_pci_controller.io_map_base = CKSEG1;
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
index 0c18ccc79623..4427abbd48b5 100644
--- a/arch/mips/pci/pci-xlr.c
+++ b/arch/mips/pci/pci-xlr.c
@@ -56,7 +56,7 @@
static void *pci_config_base;
-#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
/* PCI ops */
static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
@@ -136,26 +136,26 @@ struct pci_ops nlm_pci_ops = {
};
static struct resource nlm_pci_mem_resource = {
- .name = "XLR PCI MEM",
- .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
- .end = 0xdfffffffUL,
- .flags = IORESOURCE_MEM,
+ .name = "XLR PCI MEM",
+ .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+ .end = 0xdfffffffUL,
+ .flags = IORESOURCE_MEM,
};
static struct resource nlm_pci_io_resource = {
- .name = "XLR IO MEM",
- .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
- .end = 0x100fffffUL,
- .flags = IORESOURCE_IO,
+ .name = "XLR IO MEM",
+ .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
+ .end = 0x100fffffUL,
+ .flags = IORESOURCE_IO,
};
struct pci_controller nlm_pci_controller = {
- .index = 0,
- .pci_ops = &nlm_pci_ops,
- .mem_resource = &nlm_pci_mem_resource,
- .mem_offset = 0x00000000UL,
- .io_resource = &nlm_pci_io_resource,
- .io_offset = 0x00000000UL,
+ .index = 0,
+ .pci_ops = &nlm_pci_ops,
+ .mem_resource = &nlm_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &nlm_pci_io_resource,
+ .io_offset = 0x00000000UL,
};
/*
@@ -259,7 +259,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
MSI_ADDR_REDIRECTION_CPU;
msg.data = MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
+ MSI_DATA_LEVEL_ASSERT |
MSI_DATA_DELIVERY_FIXED;
ret = irq_set_msi_desc(irq, desc);
@@ -344,7 +344,7 @@ static int __init pcibios_init(void)
pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
/* Extend IO port for memory mapped io */
- ioport_resource.start = 0;
+ ioport_resource.start = 0;
ioport_resource.end = ~0;
set_io_port_base(CKSEG1);
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index a1843448fad3..0872f12f268d 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -175,9 +175,20 @@ static DEFINE_MUTEX(pci_scan_mutex);
void register_pci_controller(struct pci_controller *hose)
{
- if (request_resource(&iomem_resource, hose->mem_resource) < 0)
+ struct resource *parent;
+
+ parent = hose->mem_resource->parent;
+ if (!parent)
+ parent = &iomem_resource;
+
+ if (request_resource(parent, hose->mem_resource) < 0)
goto out;
- if (request_resource(&ioport_resource, hose->io_resource) < 0) {
+
+ parent = hose->io_resource->parent;
+ if (!parent)
+ parent = &ioport_resource;
+
+ if (request_resource(parent, hose->io_resource) < 0) {
release_resource(hose->mem_resource);
goto out;
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index fdb4d558c0cc..5e36c33e5543 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -43,7 +43,7 @@ union cvmx_pcie_address {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
- uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 1 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t es:2; /* Endian swap = 1 */
@@ -74,7 +74,7 @@ union cvmx_pcie_address {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
- uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 2 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t es:2; /* Endian swap = 1 */
@@ -85,7 +85,7 @@ union cvmx_pcie_address {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
- uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 3-6 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t address:36; /* PCIe Mem address */
@@ -166,7 +166,7 @@ static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
* Read a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
- * @pcie_port: PCIe port to read from
+ * @pcie_port: PCIe port to read from
* @cfg_offset: Address to read
*
* Returns Value read
@@ -194,9 +194,9 @@ static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
* Write a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
- * @pcie_port: PCIe port to write to
+ * @pcie_port: PCIe port to write to
* @cfg_offset: Address to write
- * @val: Value to write
+ * @val: Value to write
*/
static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
uint32_t val)
@@ -222,7 +222,7 @@ static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
* @pcie_port: PCIe port to access
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns 64bit Octeon IO address
@@ -259,7 +259,7 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
@@ -281,7 +281,7 @@ static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
@@ -303,7 +303,7 @@ static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
@@ -325,7 +325,7 @@ static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
@@ -344,7 +344,7 @@ static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
@@ -363,7 +363,7 @@ static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
@@ -883,14 +883,14 @@ retry:
/* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
- npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
- npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
- mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
+ mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
@@ -926,7 +926,7 @@ retry:
bar1_index.u32 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
- bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
@@ -1342,11 +1342,11 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
- mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
- mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
- mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
- mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
- mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
+ mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
+ mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
/* PCIe Adddress Bits <63:34>. */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
@@ -1409,7 +1409,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
bar1_index.u64 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
- bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
@@ -1458,10 +1458,10 @@ static int cvmx_pcie_rc_initialize(int pcie_port)
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
- * enumerates through all the bridges and figures out the
- * slot on Bus 0 where this device eventually hooks to.
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
- * as it goes through each bridge.
+ * as it goes through each bridge.
* Returns Interrupt number for the device
*/
int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
@@ -1503,7 +1503,7 @@ int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
return pin - 1 + OCTEON_IRQ_PCI_INT0;
}
-static void set_cfg_read_retry(u32 retry_cnt)
+static void set_cfg_read_retry(u32 retry_cnt)
{
union cvmx_pemx_ctl_status pemx_ctl;
pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
@@ -1931,7 +1931,7 @@ static int __init octeon_pcie_setup(void)
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
if (sriox_status_reg.s.srio) {
- srio_war15205 += 1; /* Port is SRIO */
+ srio_war15205 += 1; /* Port is SRIO */
port = 0;
}
}
@@ -2004,7 +2004,7 @@ static int __init octeon_pcie_setup(void)
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
if (sriox_status_reg.s.srio) {
- srio_war15205 += 1; /* Port is SRIO */
+ srio_war15205 += 1; /* Port is SRIO */
port = 1;
}
}
diff --git a/arch/mips/pmc-sierra/Platform b/arch/mips/pmc-sierra/Platform
deleted file mode 100644
index 387fda6c28c6..000000000000
--- a/arch/mips/pmc-sierra/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# PMC-Sierra MSP SOCs
-#
-platform-$(CONFIG_PMC_MSP) += pmc-sierra/msp71xx/
-cflags-$(CONFIG_PMC_MSP) += -I$(srctree)/arch/mips/include/asm/pmc-sierra/msp71xx \
- -mno-branch-likely
-load-$(CONFIG_PMC_MSP) += 0xffffffff80100000
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig
index 3482b8c8640c..3482b8c8640c 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmcs-msp71xx/Kconfig
diff --git a/arch/mips/pmc-sierra/msp71xx/Makefile b/arch/mips/pmcs-msp71xx/Makefile
index cefba7733b73..cefba7733b73 100644
--- a/arch/mips/pmc-sierra/msp71xx/Makefile
+++ b/arch/mips/pmcs-msp71xx/Makefile
diff --git a/arch/mips/pmcs-msp71xx/Platform b/arch/mips/pmcs-msp71xx/Platform
new file mode 100644
index 000000000000..7af0734a5007
--- /dev/null
+++ b/arch/mips/pmcs-msp71xx/Platform
@@ -0,0 +1,7 @@
+#
+# PMC-Sierra MSP SOCs
+#
+platform-$(CONFIG_PMC_MSP) += pmcs-msp71xx/
+cflags-$(CONFIG_PMC_MSP) += -I$(srctree)/arch/mips/include/asm/mach-pmcs-msp71xx \
+ -mno-branch-likely
+load-$(CONFIG_PMC_MSP) += 0xffffffff80100000
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio.c b/arch/mips/pmcs-msp71xx/gpio.c
index aaccbe524386..aaccbe524386 100644
--- a/arch/mips/pmc-sierra/msp71xx/gpio.c
+++ b/arch/mips/pmcs-msp71xx/gpio.c
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio_extended.c b/arch/mips/pmcs-msp71xx/gpio_extended.c
index 2a99f360fae4..2a99f360fae4 100644
--- a/arch/mips/pmc-sierra/msp71xx/gpio_extended.c
+++ b/arch/mips/pmcs-msp71xx/gpio_extended.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_elb.c b/arch/mips/pmcs-msp71xx/msp_elb.c
index 3e9641007216..3e9641007216 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_elb.c
+++ b/arch/mips/pmcs-msp71xx/msp_elb.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_eth.c b/arch/mips/pmcs-msp71xx/msp_eth.c
index c584df393de2..c584df393de2 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_eth.c
+++ b/arch/mips/pmcs-msp71xx/msp_eth.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c b/arch/mips/pmcs-msp71xx/msp_hwbutton.c
index bb57ed9ea2bd..bb57ed9ea2bd 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c
+++ b/arch/mips/pmcs-msp71xx/msp_hwbutton.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c
index d3c3d81757a5..9da5619c00a5 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq.c
@@ -41,9 +41,9 @@ static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); }
/*
* The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded
- * hierarchical system. The first level are the direct MIPS interrupts
+ * hierarchical system. The first level are the direct MIPS interrupts
* and are assigned the interrupt range 0-7. The second level is the SLM
- * interrupt controller and is assigned the range 8-39. The third level
+ * interrupt controller and is assigned the range 8-39. The third level
* comprises the Peripherial block, the PCI block, the PCI MSI block and
* the SLP. The PCI interrupts and the SLP errors are handled by the
* relevant subsystems so the core interrupt code needs only concern
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
index 2e6f7cab24c1..e49b499f66db 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -3,8 +3,8 @@
*
* This file define the irq handler for MSP CIC subsystem interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -171,7 +171,7 @@ void __init msp_cic_irq_init(void)
/* Mask/clear interrupts. */
*CIC_VPE0_MSK_REG = 0x00000000;
*CIC_VPE1_MSK_REG = 0x00000000;
- *CIC_STS_REG = 0xFFFFFFFF;
+ *CIC_STS_REG = 0xFFFFFFFF;
/*
* The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
* These inputs map to EXT_INT_POL[6:4] inside the CIC.
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmcs-msp71xx/msp_irq_per.c
index 598b6a66b970..d1fd530479d4 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_per.c
@@ -3,8 +3,8 @@
*
* This file define the irq handler for MSP PER subsystem interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmcs-msp71xx/msp_irq_slp.c
index 83a1c5eae3f8..5f66a76311c3 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_slp.c
@@ -4,8 +4,8 @@
* Copyright 2005-2006 PMC-Sierra, Inc, derived from irq_cpu.c
* Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_pci.c b/arch/mips/pmcs-msp71xx/msp_pci.c
index f764fe7748d6..428dea23c35c 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_pci.c
+++ b/arch/mips/pmcs-msp71xx/msp_pci.c
@@ -36,7 +36,7 @@ static int __init msp_pci_setup(void)
#if 0 /* Linux 2.6 initialization code to be completed */
if (getdeviceid() & DEV_ID_SINGLE_PC) {
/* If single card mode */
- slmRegs *sreg = (slmRegs *) SREG_BASE;
+ slmRegs *sreg = (slmRegs *) SREG_BASE;
sreg->single_pc_enable = SINGLE_PCCARD;
}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
index db00deb59b9c..0edb89a63516 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_prom.c
+++ b/arch/mips/pmcs-msp71xx/msp_prom.c
@@ -99,7 +99,7 @@ static inline int str2eaddr(unsigned char *ea, unsigned char *str)
}
}
- if (index == 5) {
+ if (index == 5) {
ea[index++] = num;
return 0;
} else
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(identify_revision);
char *prom_getenv(char *env_name)
{
/*
- * Return a pointer to the given environment variable. prom_envp
+ * Return a pointer to the given environment variable. prom_envp
* points to a null terminated array of pointers to variables.
* Environment variables are stored in the form of "memsize=64"
*/
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_serial.c b/arch/mips/pmcs-msp71xx/msp_serial.c
index a1c7c7da2336..d304be22b963 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_serial.c
+++ b/arch/mips/pmcs-msp71xx/msp_serial.c
@@ -90,8 +90,8 @@ static int msp_serial_handle_irq(struct uart_port *p)
void __init msp_serial_setup(void)
{
- char *s;
- char *endp;
+ char *s;
+ char *endp;
struct uart_port up;
unsigned int uartclk;
@@ -104,19 +104,19 @@ void __init msp_serial_setup(void)
ppfinit("UART clock set to %d\n", uartclk);
/* Initialize first serial port */
- up.mapbase = MSP_UART0_BASE;
- up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
- up.irq = MSP_INT_UART0;
- up.uartclk = uartclk;
- up.regshift = 2;
- up.iotype = UPIO_MEM;
- up.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
- up.type = PORT_16550A;
- up.line = 0;
+ up.mapbase = MSP_UART0_BASE;
+ up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+ up.irq = MSP_INT_UART0;
+ up.uartclk = uartclk;
+ up.regshift = 2;
+ up.iotype = UPIO_MEM;
+ up.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
+ up.type = PORT_16550A;
+ up.line = 0;
up.serial_out = msp_serial_out;
up.serial_in = msp_serial_in;
up.handle_irq = msp_serial_handle_irq;
- up.private_data = kzalloc(sizeof(struct msp_uart_data), GFP_KERNEL);
+ up.private_data = kzalloc(sizeof(struct msp_uart_data), GFP_KERNEL);
if (!up.private_data) {
pr_err("failed to allocate uart private data\n");
return;
@@ -142,10 +142,10 @@ void __init msp_serial_setup(void)
return; /* No second serial port, good-bye. */
}
- up.mapbase = MSP_UART1_BASE;
- up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
- up.irq = MSP_INT_UART1;
- up.line = 1;
+ up.mapbase = MSP_UART1_BASE;
+ up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+ up.irq = MSP_INT_UART1;
+ up.line = 1;
up.private_data = (void*)UART1_STATUS_REG;
if (early_serial_setup(&up)) {
kfree(up.private_data);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 7a834b2f8a5f..1651cfdbfe7b 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -4,8 +4,8 @@
* Copyright 2005-2007 PMC-Sierra, Inc,
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -168,7 +168,7 @@ void __init prom_init(void)
family = identify_family();
revision = identify_revision();
- switch (family) {
+ switch (family) {
case FAMILY_FPGA:
if (FPGA_IS_MSP4200(revision)) {
/* Old-style revision ID */
@@ -219,7 +219,7 @@ void __init prom_init(void)
/*
* Sub-system setup follows.
- * Setup functions can either be called here or using the
+ * Setup functions can either be called here or using the
* subsys_initcall mechanism (i.e. see msp_pci_setup). The
* order in which they are called can be changed by using the
* link order in arch/mips/pmc-sierra/msp71xx/Makefile.
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..10170580a2de 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c
index c8dcc1c01e18..c8dcc1c01e18 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_smtc.c
+++ b/arch/mips/pmcs-msp71xx/msp_smtc.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmcs-msp71xx/msp_time.c
index 8b42f307a7a7..8f12ecc55ace 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/arch/mips/pmcs-msp71xx/msp_time.c
@@ -45,7 +45,7 @@ static int tim_installed;
void __init plat_time_init(void)
{
- char *endp, *s;
+ char *endp, *s;
unsigned long cpu_rate = 0;
if (cpu_rate == 0) {
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_usb.c b/arch/mips/pmcs-msp71xx/msp_usb.c
index 9a1aef89bd4c..4dab915696e7 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_usb.c
+++ b/arch/mips/pmcs-msp71xx/msp_usb.c
@@ -40,14 +40,14 @@
#if defined(CONFIG_USB_EHCI_HCD)
static struct resource msp_usbhost0_resources[] = {
[0] = { /* EHCI-HS operational and capabilities registers */
- .start = MSP_USB0_HS_START,
- .end = MSP_USB0_HS_END,
- .flags = IORESOURCE_MEM,
+ .start = MSP_USB0_HS_START,
+ .end = MSP_USB0_HS_END,
+ .flags = IORESOURCE_MEM,
},
[1] = {
- .start = MSP_INT_USB,
- .end = MSP_INT_USB,
- .flags = IORESOURCE_IRQ,
+ .start = MSP_INT_USB,
+ .end = MSP_INT_USB,
+ .flags = IORESOURCE_IRQ,
},
[2] = { /* MSBus-to-AMBA bridge register space */
.start = MSP_USB0_MAB_START,
@@ -71,8 +71,8 @@ static struct mspusb_device msp_usbhost0_device = {
.dma_mask = &msp_usbhost0_dma_mask,
.coherent_dma_mask = 0xffffffffUL,
},
- .num_resources = ARRAY_SIZE(msp_usbhost0_resources),
- .resource = msp_usbhost0_resources,
+ .num_resources = ARRAY_SIZE(msp_usbhost0_resources),
+ .resource = msp_usbhost0_resources,
},
};
@@ -121,14 +121,14 @@ static struct mspusb_device msp_usbhost1_device = {
#if defined(CONFIG_USB_GADGET)
static struct resource msp_usbdev0_resources[] = {
[0] = { /* EHCI-HS operational and capabilities registers */
- .start = MSP_USB0_HS_START,
- .end = MSP_USB0_HS_END,
- .flags = IORESOURCE_MEM,
+ .start = MSP_USB0_HS_START,
+ .end = MSP_USB0_HS_END,
+ .flags = IORESOURCE_MEM,
},
[1] = {
- .start = MSP_INT_USB,
- .end = MSP_INT_USB,
- .flags = IORESOURCE_IRQ,
+ .start = MSP_INT_USB,
+ .end = MSP_INT_USB,
+ .flags = IORESOURCE_IRQ,
},
[2] = { /* MSBus-to-AMBA bridge register space */
.start = MSP_USB0_MAB_START,
@@ -153,22 +153,22 @@ static struct mspusb_device msp_usbdev0_device = {
.dma_mask = &msp_usbdev_dma_mask,
.coherent_dma_mask = 0xffffffffUL,
},
- .num_resources = ARRAY_SIZE(msp_usbdev0_resources),
- .resource = msp_usbdev0_resources,
+ .num_resources = ARRAY_SIZE(msp_usbdev0_resources),
+ .resource = msp_usbdev0_resources,
},
};
#ifdef CONFIG_MSP_HAS_DUAL_USB
static struct resource msp_usbdev1_resources[] = {
[0] = { /* EHCI-HS operational and capabilities registers */
- .start = MSP_USB1_HS_START,
- .end = MSP_USB1_HS_END,
- .flags = IORESOURCE_MEM,
+ .start = MSP_USB1_HS_START,
+ .end = MSP_USB1_HS_END,
+ .flags = IORESOURCE_MEM,
},
[1] = {
- .start = MSP_INT_USB,
- .end = MSP_INT_USB,
- .flags = IORESOURCE_IRQ,
+ .start = MSP_INT_USB,
+ .end = MSP_INT_USB,
+ .flags = IORESOURCE_IRQ,
},
[2] = { /* MSBus-to-AMBA bridge register space */
.start = MSP_USB1_MAB_START,
@@ -191,8 +191,8 @@ static struct mspusb_device msp_usbdev1_device = {
.dma_mask = &msp_usbdev_dma_mask,
.coherent_dma_mask = 0xffffffffUL,
},
- .num_resources = ARRAY_SIZE(msp_usbdev1_resources),
- .resource = msp_usbdev1_resources,
+ .num_resources = ARRAY_SIZE(msp_usbdev1_resources),
+ .resource = msp_usbdev1_resources,
},
};
@@ -211,7 +211,7 @@ static int __init msp_usb_setup(void)
/*
* Could this perhaps be integrated into the "features" env var?
* Use the features key "U", and follow with "H" for host-mode,
- * "D" for device-mode. If it works for Ethernet, why not USB...
+ * "D" for device-mode. If it works for Ethernet, why not USB...
* -- hammtrev, 2007/03/22
*/
snprintf((char *)&envstr[0], sizeof(envstr), "usbmode");
@@ -237,7 +237,7 @@ static int __init msp_usb_setup(void)
#endif
#else
ppfinit("%s: echi_hcd not supported\n", __FILE__);
-#endif /* CONFIG_USB_EHCI_HCD */
+#endif /* CONFIG_USB_EHCI_HCD */
} else {
#if defined(CONFIG_USB_GADGET)
/* get device mode structure */
@@ -251,7 +251,7 @@ static int __init msp_usb_setup(void)
#endif
#else
ppfinit("%s: usb_gadget not supported\n", __FILE__);
-#endif /* CONFIG_USB_GADGET */
+#endif /* CONFIG_USB_GADGET */
}
/* add device */
platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs));
diff --git a/arch/mips/pnx833x/Platform b/arch/mips/pnx833x/Platform
index 7e6ec4dbc8dd..794526caab12 100644
--- a/arch/mips/pnx833x/Platform
+++ b/arch/mips/pnx833x/Platform
@@ -1,5 +1,5 @@
# NXP STB225
platform-$(CONFIG_SOC_PNX833X) += pnx833x/
-cflags-$(CONFIG_SOC_PNX833X) += -Iarch/mips/include/asm/mach-pnx833x
+cflags-$(CONFIG_SOC_PNX833X) += -Iarch/mips/include/asm/mach-pnx833x
load-$(CONFIG_NXP_STB220) += 0xffffffff80001000
load-$(CONFIG_NXP_STB225) += 0xffffffff80001000
diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c
index a86d5d5fceb0..a4a90596c0ad 100644
--- a/arch/mips/pnx833x/common/interrupts.c
+++ b/arch/mips/pnx833x/common/interrupts.c
@@ -35,64 +35,64 @@ static int mips_cpu_timer_irq;
static const unsigned int irq_prio[PNX833X_PIC_NUM_IRQ] =
{
0, /* unused */
- 4, /* PNX833X_PIC_I2C0_INT 1 */
- 4, /* PNX833X_PIC_I2C1_INT 2 */
- 1, /* PNX833X_PIC_UART0_INT 3 */
- 1, /* PNX833X_PIC_UART1_INT 4 */
- 6, /* PNX833X_PIC_TS_IN0_DV_INT 5 */
- 6, /* PNX833X_PIC_TS_IN0_DMA_INT 6 */
- 7, /* PNX833X_PIC_GPIO_INT 7 */
- 4, /* PNX833X_PIC_AUDIO_DEC_INT 8 */
- 5, /* PNX833X_PIC_VIDEO_DEC_INT 9 */
- 4, /* PNX833X_PIC_CONFIG_INT 10 */
- 4, /* PNX833X_PIC_AOI_INT 11 */
- 9, /* PNX833X_PIC_SYNC_INT 12 */
- 9, /* PNX8335_PIC_SATA_INT 13 */
- 4, /* PNX833X_PIC_OSD_INT 14 */
- 9, /* PNX833X_PIC_DISP1_INT 15 */
- 4, /* PNX833X_PIC_DEINTERLACER_INT 16 */
- 9, /* PNX833X_PIC_DISPLAY2_INT 17 */
- 4, /* PNX833X_PIC_VC_INT 18 */
- 4, /* PNX833X_PIC_SC_INT 19 */
- 9, /* PNX833X_PIC_IDE_INT 20 */
- 9, /* PNX833X_PIC_IDE_DMA_INT 21 */
- 6, /* PNX833X_PIC_TS_IN1_DV_INT 22 */
- 6, /* PNX833X_PIC_TS_IN1_DMA_INT 23 */
- 4, /* PNX833X_PIC_SGDX_DMA_INT 24 */
- 4, /* PNX833X_PIC_TS_OUT_INT 25 */
- 4, /* PNX833X_PIC_IR_INT 26 */
- 3, /* PNX833X_PIC_VMSP1_INT 27 */
- 3, /* PNX833X_PIC_VMSP2_INT 28 */
- 4, /* PNX833X_PIC_PIBC_INT 29 */
- 4, /* PNX833X_PIC_TS_IN0_TRD_INT 30 */
- 4, /* PNX833X_PIC_SGDX_TPD_INT 31 */
- 5, /* PNX833X_PIC_USB_INT 32 */
- 4, /* PNX833X_PIC_TS_IN1_TRD_INT 33 */
- 4, /* PNX833X_PIC_CLOCK_INT 34 */
- 4, /* PNX833X_PIC_SGDX_PARSER_INT 35 */
- 4, /* PNX833X_PIC_VMSP_DMA_INT 36 */
+ 4, /* PNX833X_PIC_I2C0_INT 1 */
+ 4, /* PNX833X_PIC_I2C1_INT 2 */
+ 1, /* PNX833X_PIC_UART0_INT 3 */
+ 1, /* PNX833X_PIC_UART1_INT 4 */
+ 6, /* PNX833X_PIC_TS_IN0_DV_INT 5 */
+ 6, /* PNX833X_PIC_TS_IN0_DMA_INT 6 */
+ 7, /* PNX833X_PIC_GPIO_INT 7 */
+ 4, /* PNX833X_PIC_AUDIO_DEC_INT 8 */
+ 5, /* PNX833X_PIC_VIDEO_DEC_INT 9 */
+ 4, /* PNX833X_PIC_CONFIG_INT 10 */
+ 4, /* PNX833X_PIC_AOI_INT 11 */
+ 9, /* PNX833X_PIC_SYNC_INT 12 */
+ 9, /* PNX8335_PIC_SATA_INT 13 */
+ 4, /* PNX833X_PIC_OSD_INT 14 */
+ 9, /* PNX833X_PIC_DISP1_INT 15 */
+ 4, /* PNX833X_PIC_DEINTERLACER_INT 16 */
+ 9, /* PNX833X_PIC_DISPLAY2_INT 17 */
+ 4, /* PNX833X_PIC_VC_INT 18 */
+ 4, /* PNX833X_PIC_SC_INT 19 */
+ 9, /* PNX833X_PIC_IDE_INT 20 */
+ 9, /* PNX833X_PIC_IDE_DMA_INT 21 */
+ 6, /* PNX833X_PIC_TS_IN1_DV_INT 22 */
+ 6, /* PNX833X_PIC_TS_IN1_DMA_INT 23 */
+ 4, /* PNX833X_PIC_SGDX_DMA_INT 24 */
+ 4, /* PNX833X_PIC_TS_OUT_INT 25 */
+ 4, /* PNX833X_PIC_IR_INT 26 */
+ 3, /* PNX833X_PIC_VMSP1_INT 27 */
+ 3, /* PNX833X_PIC_VMSP2_INT 28 */
+ 4, /* PNX833X_PIC_PIBC_INT 29 */
+ 4, /* PNX833X_PIC_TS_IN0_TRD_INT 30 */
+ 4, /* PNX833X_PIC_SGDX_TPD_INT 31 */
+ 5, /* PNX833X_PIC_USB_INT 32 */
+ 4, /* PNX833X_PIC_TS_IN1_TRD_INT 33 */
+ 4, /* PNX833X_PIC_CLOCK_INT 34 */
+ 4, /* PNX833X_PIC_SGDX_PARSER_INT 35 */
+ 4, /* PNX833X_PIC_VMSP_DMA_INT 36 */
#if defined(CONFIG_SOC_PNX8335)
- 4, /* PNX8335_PIC_MIU_INT 37 */
- 4, /* PNX8335_PIC_AVCHIP_IRQ_INT 38 */
- 9, /* PNX8335_PIC_SYNC_HD_INT 39 */
- 9, /* PNX8335_PIC_DISP_HD_INT 40 */
- 9, /* PNX8335_PIC_DISP_SCALER_INT 41 */
- 4, /* PNX8335_PIC_OSD_HD1_INT 42 */
- 4, /* PNX8335_PIC_DTL_WRITER_Y_INT 43 */
- 4, /* PNX8335_PIC_DTL_WRITER_C_INT 44 */
+ 4, /* PNX8335_PIC_MIU_INT 37 */
+ 4, /* PNX8335_PIC_AVCHIP_IRQ_INT 38 */
+ 9, /* PNX8335_PIC_SYNC_HD_INT 39 */
+ 9, /* PNX8335_PIC_DISP_HD_INT 40 */
+ 9, /* PNX8335_PIC_DISP_SCALER_INT 41 */
+ 4, /* PNX8335_PIC_OSD_HD1_INT 42 */
+ 4, /* PNX8335_PIC_DTL_WRITER_Y_INT 43 */
+ 4, /* PNX8335_PIC_DTL_WRITER_C_INT 44 */
4, /* PNX8335_PIC_DTL_EMULATOR_Y_IR_INT 45 */
4, /* PNX8335_PIC_DTL_EMULATOR_C_IR_INT 46 */
- 4, /* PNX8335_PIC_DENC_TTX_INT 47 */
- 4, /* PNX8335_PIC_MMI_SIF0_INT 48 */
- 4, /* PNX8335_PIC_MMI_SIF1_INT 49 */
- 4, /* PNX8335_PIC_MMI_CDMMU_INT 50 */
- 4, /* PNX8335_PIC_PIBCS_INT 51 */
- 12, /* PNX8335_PIC_ETHERNET_INT 52 */
- 3, /* PNX8335_PIC_VMSP1_0_INT 53 */
- 3, /* PNX8335_PIC_VMSP1_1_INT 54 */
- 4, /* PNX8335_PIC_VMSP1_DMA_INT 55 */
- 4, /* PNX8335_PIC_TDGR_DE_INT 56 */
- 4, /* PNX8335_PIC_IR1_IRQ_INT 57 */
+ 4, /* PNX8335_PIC_DENC_TTX_INT 47 */
+ 4, /* PNX8335_PIC_MMI_SIF0_INT 48 */
+ 4, /* PNX8335_PIC_MMI_SIF1_INT 49 */
+ 4, /* PNX8335_PIC_MMI_CDMMU_INT 50 */
+ 4, /* PNX8335_PIC_PIBCS_INT 51 */
+ 12, /* PNX8335_PIC_ETHERNET_INT 52 */
+ 3, /* PNX8335_PIC_VMSP1_0_INT 53 */
+ 3, /* PNX8335_PIC_VMSP1_1_INT 54 */
+ 4, /* PNX8335_PIC_VMSP1_DMA_INT 55 */
+ 4, /* PNX8335_PIC_TDGR_DE_INT 56 */
+ 4, /* PNX8335_PIC_IR1_IRQ_INT 57 */
#endif
};
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index 05a1d922cd60..d22dc0d6f289 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@
#include <irq-mapping.h>
#include <pnx833x.h>
-static u64 uart_dmamask = DMA_BIT_MASK(32);
+static u64 uart_dmamask = DMA_BIT_MASK(32);
static struct resource pnx833x_uart_resources[] = {
[0] = {
@@ -69,7 +69,7 @@ static struct resource pnx833x_uart_resources[] = {
struct pnx8xxx_port pnx8xxx_ports[] = {
[0] = {
- .port = {
+ .port = {
.type = PORT_PNX8XXX,
.iotype = UPIO_MEM,
.membase = (void __iomem *)PNX833X_UART0_PORTS_START,
@@ -82,7 +82,7 @@ struct pnx8xxx_port pnx8xxx_ports[] = {
},
},
[1] = {
- .port = {
+ .port = {
.type = PORT_PNX8XXX,
.iotype = UPIO_MEM,
.membase = (void __iomem *)PNX833X_UART1_PORTS_START,
@@ -108,7 +108,7 @@ static struct platform_device pnx833x_uart_device = {
.resource = pnx833x_uart_resources,
};
-static u64 ehci_dmamask = DMA_BIT_MASK(32);
+static u64 ehci_dmamask = DMA_BIT_MASK(32);
static struct resource pnx833x_usb_ehci_resources[] = {
[0] = {
@@ -183,7 +183,7 @@ static struct platform_device pnx833x_i2c0_device = {
.dev = {
.platform_data = &pnx833x_i2c_dev[0],
},
- .num_resources = ARRAY_SIZE(pnx833x_i2c0_resources),
+ .num_resources = ARRAY_SIZE(pnx833x_i2c0_resources),
.resource = pnx833x_i2c0_resources,
};
@@ -193,7 +193,7 @@ static struct platform_device pnx833x_i2c1_device = {
.dev = {
.platform_data = &pnx833x_i2c_dev[1],
},
- .num_resources = ARRAY_SIZE(pnx833x_i2c1_resources),
+ .num_resources = ARRAY_SIZE(pnx833x_i2c1_resources),
.resource = pnx833x_i2c1_resources,
};
#endif
@@ -217,7 +217,7 @@ static struct platform_device pnx833x_ethernet_device = {
.name = "ip3902-eth",
.id = -1,
.dev = {
- .dma_mask = &ethernet_dmamask,
+ .dma_mask = &ethernet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(pnx833x_ethernet_resources),
@@ -238,8 +238,8 @@ static struct resource pnx833x_sata_resources[] = {
};
static struct platform_device pnx833x_sata_device = {
- .name = "pnx833x-sata",
- .id = -1,
+ .name = "pnx833x-sata",
+ .id = -1,
.num_resources = ARRAY_SIZE(pnx833x_sata_resources),
.resource = pnx833x_sata_resources,
};
@@ -265,7 +265,7 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
.chip_delay = 25,
},
.ctrl = {
- .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl
+ .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl
}
};
@@ -274,17 +274,17 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
* 12 bytes more seems to be the standard that allows for NAND access.
*/
static struct resource pnx833x_flash_nand_resource = {
- .start = PNX8335_NAND_BASE,
- .end = PNX8335_NAND_BASE + 12,
- .flags = IORESOURCE_MEM,
+ .start = PNX8335_NAND_BASE,
+ .end = PNX8335_NAND_BASE + 12,
+ .flags = IORESOURCE_MEM,
};
static struct platform_device pnx833x_flash_nand = {
- .name = "gen_nand",
- .id = -1,
+ .name = "gen_nand",
+ .id = -1,
.num_resources = 1,
.resource = &pnx833x_flash_nand_resource,
- .dev = {
+ .dev = {
.platform_data = &pnx833x_flash_nand_data,
},
};
diff --git a/arch/mips/pnx833x/common/prom.c b/arch/mips/pnx833x/common/prom.c
index 29969f90a6b0..dfafdd732ca1 100644
--- a/arch/mips/pnx833x/common/prom.c
+++ b/arch/mips/pnx833x/common/prom.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx833x/common/reset.c b/arch/mips/pnx833x/common/reset.c
index e0ea96d29fde..5cc9a9b3601c 100644
--- a/arch/mips/pnx833x/common/reset.c
+++ b/arch/mips/pnx833x/common/reset.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx833x/common/setup.c b/arch/mips/pnx833x/common/setup.c
index e51fbc4b644d..99b4d94236cc 100644
--- a/arch/mips/pnx833x/common/setup.c
+++ b/arch/mips/pnx833x/common/setup.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx833x/stb22x/board.c b/arch/mips/pnx833x/stb22x/board.c
index 4b328ac43050..2ac5203438d8 100644
--- a/arch/mips/pnx833x/stb22x/board.c
+++ b/arch/mips/pnx833x/stb22x/board.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx8550/Makefile b/arch/mips/pnx8550/Makefile
deleted file mode 100644
index 3f7e8561437b..000000000000
--- a/arch/mips/pnx8550/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_SOC_PNX8550) += common/
-obj-$(CONFIG_PNX8550_JBS) += jbs/
-obj-$(CONFIG_PNX8550_STB810) += stb810/
diff --git a/arch/mips/pnx8550/Platform b/arch/mips/pnx8550/Platform
deleted file mode 100644
index 0e7fbde768d5..000000000000
--- a/arch/mips/pnx8550/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-platform-$(CONFIG_SOC_PNX8550) += pnx8550/
-
-cflags-$(CONFIG_SOC_PNX8550) += \
- -I$(srctree)/arch/mips/include/asm/mach-pnx8550
-
-load-$(CONFIG_PNX8550_JBS) += 0xffffffff80060000
-load-$(CONFIG_PNX8550_STB810) += 0xffffffff80060000
diff --git a/arch/mips/pnx8550/common/Makefile b/arch/mips/pnx8550/common/Makefile
deleted file mode 100644
index f8ce695dc54f..000000000000
--- a/arch/mips/pnx8550/common/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Per Hallsmark, per.hallsmark@mvista.com
-#
-# ########################################################################
-#
-# This program is free software; you can distribute it and/or modify it
-# under the terms of the GNU General Public License (Version 2) as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-# #######################################################################
-#
-# Makefile for the PNX8550 specific kernel interface routines
-# under Linux.
-#
-
-obj-y := setup.o prom.o int.o reset.o time.o proc.o platform.o
-obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c
deleted file mode 100644
index ec684b8c3f79..000000000000
--- a/arch/mips/pnx8550/common/int.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- *
- * Copyright (C) 2005 Embedded Alley Solutions, Inc
- * Ported to 2.6.
- *
- * Per Hallsmark, per.hallsmark@mvista.com
- * Copyright (C) 2000, 2001 MIPS Technologies, Inc.
- * Copyright (C) 2001 Ralf Baechle
- *
- * Cleaned up and bug fixing: Pete Popov, ppopov@embeddedalley.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-#include <linux/compiler.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/random.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <int.h>
-#include <uart.h>
-
-/* default prio for interrupts */
-/* first one is a no-no so therefore always prio 0 (disabled) */
-static char gic_prio[PNX8550_INT_GIC_TOTINT] = {
- 0, 1, 1, 1, 1, 15, 1, 1, 1, 1, // 0 - 9
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 10 - 19
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 20 - 29
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 30 - 39
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 40 - 49
- 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, // 50 - 59
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 60 - 69
- 1 // 70
-};
-
-static void hw0_irqdispatch(int irq)
-{
- /* find out which interrupt */
- irq = PNX8550_GIC_VECTOR_0 >> 3;
-
- if (irq == 0) {
- printk("hw0_irqdispatch: irq 0, spurious interrupt?\n");
- return;
- }
- do_IRQ(PNX8550_INT_GIC_MIN + irq);
-}
-
-
-static void timer_irqdispatch(int irq)
-{
- irq = (0x01c0 & read_c0_config7()) >> 6;
-
- if (unlikely(irq == 0)) {
- printk("timer_irqdispatch: irq 0, spurious interrupt?\n");
- return;
- }
-
- if (irq & 0x1)
- do_IRQ(PNX8550_INT_TIMER1);
- if (irq & 0x2)
- do_IRQ(PNX8550_INT_TIMER2);
- if (irq & 0x4)
- do_IRQ(PNX8550_INT_TIMER3);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-
- if (pending & STATUSF_IP2)
- hw0_irqdispatch(2);
- else if (pending & STATUSF_IP7) {
- if (read_c0_config7() & 0x01c0)
- timer_irqdispatch(7);
- } else
- spurious_interrupt();
-}
-
-static inline void modify_cp0_intmask(unsigned clr_mask, unsigned set_mask)
-{
- unsigned long status = read_c0_status();
-
- status &= ~((clr_mask & 0xFF) << 8);
- status |= (set_mask & 0xFF) << 8;
-
- write_c0_status(status);
-}
-
-static inline void mask_gic_int(unsigned int irq_nr)
-{
- /* interrupt disabled, bit 26(WE_ENABLE)=1 and bit 16(enable)=0 */
- PNX8550_GIC_REQ(irq_nr) = 1<<28; /* set priority to 0 */
-}
-
-static inline void unmask_gic_int(unsigned int irq_nr)
-{
- /* set prio mask to lower four bits and enable interrupt */
- PNX8550_GIC_REQ(irq_nr) = (1<<26 | 1<<16) | (1<<28) | gic_prio[irq_nr];
-}
-
-static inline void mask_irq(struct irq_data *d)
-{
- unsigned int irq_nr = d->irq;
-
- if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
- modify_cp0_intmask(1 << irq_nr, 0);
- } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_GIC_MAX)) {
- mask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
- } else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_TIMER_MAX)) {
- modify_cp0_intmask(1 << 7, 0);
- } else {
- printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
- }
-}
-
-static inline void unmask_irq(struct irq_data *d)
-{
- unsigned int irq_nr = d->irq;
-
- if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
- modify_cp0_intmask(0, 1 << irq_nr);
- } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_GIC_MAX)) {
- unmask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
- } else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_TIMER_MAX)) {
- modify_cp0_intmask(0, 1 << 7);
- } else {
- printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
- }
-}
-
-int pnx8550_set_gic_priority(int irq, int priority)
-{
- int gic_irq = irq-PNX8550_INT_GIC_MIN;
- int prev_priority = PNX8550_GIC_REQ(gic_irq) & 0xf;
-
- gic_prio[gic_irq] = priority;
- PNX8550_GIC_REQ(gic_irq) |= (0x10000000 | gic_prio[gic_irq]);
-
- return prev_priority;
-}
-
-static struct irq_chip level_irq_type = {
- .name = "PNX Level IRQ",
- .irq_mask = mask_irq,
- .irq_unmask = unmask_irq,
-};
-
-static struct irqaction gic_action = {
- .handler = no_action,
- .flags = IRQF_NO_THREAD,
- .name = "GIC",
-};
-
-static struct irqaction timer_action = {
- .handler = no_action,
- .flags = IRQF_TIMER,
- .name = "Timer",
-};
-
-void __init arch_init_irq(void)
-{
- int i;
- int configPR;
-
- for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++)
- irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
-
- /* init of GIC/IPC interrupts */
- /* should be done before cp0 since cp0 init enables the GIC int */
- for (i = PNX8550_INT_GIC_MIN; i <= PNX8550_INT_GIC_MAX; i++) {
- int gic_int_line = i - PNX8550_INT_GIC_MIN;
- if (gic_int_line == 0 )
- continue; // don't fiddle with int 0
- /*
- * enable change of TARGET, ENABLE and ACTIVE_LOW bits
- * set TARGET 0 to route through hw0 interrupt
- * set ACTIVE_LOW 0 active high (correct?)
- *
- * We really should setup an interrupt description table
- * to do this nicely.
- * Note, PCI INTA is active low on the bus, but inverted
- * in the GIC, so to us it's active high.
- */
- PNX8550_GIC_REQ(i - PNX8550_INT_GIC_MIN) = 0x1E000000;
-
- /* mask/priority is still 0 so we will not get any
- * interrupts until it is unmasked */
-
- irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
- }
-
- /* Priority level 0 */
- PNX8550_GIC_PRIMASK_0 = PNX8550_GIC_PRIMASK_1 = 0;
-
- /* Set int vector table address */
- PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
-
- irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
- handle_level_irq);
- setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
-
- /* init of Timer interrupts */
- for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
- irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
-
- /* Stop Timer 1-3 */
- configPR = read_c0_config7();
- configPR |= 0x00000038;
- write_c0_config7(configPR);
-
- irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
- handle_level_irq);
- setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
-}
-
-EXPORT_SYMBOL(pnx8550_set_gic_priority);
diff --git a/arch/mips/pnx8550/common/pci.c b/arch/mips/pnx8550/common/pci.c
deleted file mode 100644
index 98e86ddb86cc..000000000000
--- a/arch/mips/pnx8550/common/pci.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <pci.h>
-#include <glb.h>
-#include <nand.h>
-
-static struct resource pci_io_resource = {
- .start = PNX8550_PCIIO + 0x1000, /* reserve regacy I/O space */
- .end = PNX8550_PCIIO + PNX8550_PCIIO_SIZE,
- .name = "pci IO space",
- .flags = IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
- .start = PNX8550_PCIMEM,
- .end = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1,
- .name = "pci memory space",
- .flags = IORESOURCE_MEM
-};
-
-extern struct pci_ops pnx8550_pci_ops;
-
-static struct pci_controller pnx8550_controller = {
- .pci_ops = &pnx8550_pci_ops,
- .io_map_base = PNX8550_PORT_BASE,
- .io_resource = &pci_io_resource,
- .mem_resource = &pci_mem_resource,
-};
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-static inline unsigned long get_system_mem_size(void)
-{
- /* Read IP2031_RANK0_ADDR_LO */
- unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
- /* Read IP2031_RANK1_ADDR_HI */
- unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
- return dram_r1_hi - dram_r0_lo + 1;
-}
-
-static int __init pnx8550_pci_setup(void)
-{
- int pci_mem_code;
- int mem_size = get_system_mem_size() >> 20;
-
- /* Clear the Global 2 Register, PCI Inta Output Enable Registers
- Bit 1:Enable DAC Powerdown
- -> 0:DACs are enabled and are working normally
- 1:DACs are powerdown
- Bit 0:Enable of PCI inta output
- -> 0 = Disable PCI inta output
- 1 = Enable PCI inta output
- */
- PNX8550_GLB2_ENAB_INTA_O = 0;
-
- /* Calc the PCI mem size code */
- if (mem_size >= 128)
- pci_mem_code = SIZE_128M;
- else if (mem_size >= 64)
- pci_mem_code = SIZE_64M;
- else if (mem_size >= 32)
- pci_mem_code = SIZE_32M;
- else
- pci_mem_code = SIZE_16M;
-
- /* Set PCI_XIO registers */
- outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO);
- outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI);
- outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO);
- outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI);
-
- /* Send memory transaction via PCI_BASE2 */
- outl(0x00000001, PCI_BASE | PCI_IO);
-
- /* Unlock the setup register */
- outl(0xca, PCI_BASE | PCI_UNLOCKREG);
-
- /*
- * BAR0 of PNX8550 (pci base 10) must be zero in order for ide
- * to work, and in order for bus_to_baddr to work without any
- * hacks.
- */
- outl(0x00000000, PCI_BASE | PCI_BASE10);
-
- /*
- *These two bars are set by default or the boot code.
- * However, it's safer to set them here so we're not boot
- * code dependent.
- */
- outl(0x1be00000, PCI_BASE | PCI_BASE14); /* PNX MMIO */
- outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18); /* XIO */
-
- outl(PCI_EN_TA |
- PCI_EN_PCI2MMI |
- PCI_EN_XIO |
- PCI_SETUP_BASE18_SIZE(SIZE_32M) |
- PCI_SETUP_BASE18_EN |
- PCI_SETUP_BASE14_EN |
- PCI_SETUP_BASE10_PREF |
- PCI_SETUP_BASE10_SIZE(pci_mem_code) |
- PCI_SETUP_CFGMANAGE_EN |
- PCI_SETUP_PCIARB_EN,
- PCI_BASE |
- PCI_SETUP); /* PCI_SETUP */
- outl(0x00000000, PCI_BASE | PCI_CTRL); /* PCI_CONTROL */
-
- register_pci_controller(&pnx8550_controller);
-
- return 0;
-}
-
-arch_initcall(pnx8550_pci_setup);
diff --git a/arch/mips/pnx8550/common/platform.c b/arch/mips/pnx8550/common/platform.c
deleted file mode 100644
index 0a8faeaa7b70..000000000000
--- a/arch/mips/pnx8550/common/platform.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Platform device support for NXP PNX8550 SoCs
- *
- * Copyright 2005, Embedded Alley Solutions, Inc
- *
- * Based on arch/mips/au1000/common/platform.c
- * Platform device support for Au1x00 SoCs.
- *
- * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/resource.h>
-#include <linux/serial.h>
-#include <linux/serial_pnx8xxx.h>
-#include <linux/platform_device.h>
-#include <linux/usb/ohci_pdriver.h>
-
-#include <int.h>
-#include <usb.h>
-#include <uart.h>
-
-static struct resource pnx8550_usb_ohci_resources[] = {
- [0] = {
- .start = PNX8550_USB_OHCI_OP_BASE,
- .end = PNX8550_USB_OHCI_OP_BASE +
- PNX8550_USB_OHCI_OP_LEN,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = PNX8550_INT_USB,
- .end = PNX8550_INT_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource pnx8550_uart_resources[] = {
- [0] = {
- .start = PNX8550_UART_PORT0,
- .end = PNX8550_UART_PORT0 + 0xfff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = PNX8550_UART_INT(0),
- .end = PNX8550_UART_INT(0),
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = PNX8550_UART_PORT1,
- .end = PNX8550_UART_PORT1 + 0xfff,
- .flags = IORESOURCE_MEM,
- },
- [3] = {
- .start = PNX8550_UART_INT(1),
- .end = PNX8550_UART_INT(1),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct pnx8xxx_port pnx8xxx_ports[] = {
- [0] = {
- .port = {
- .type = PORT_PNX8XXX,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)PNX8550_UART_PORT0,
- .mapbase = PNX8550_UART_PORT0,
- .irq = PNX8550_UART_INT(0),
- .uartclk = 3692300,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- },
- },
- [1] = {
- .port = {
- .type = PORT_PNX8XXX,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)PNX8550_UART_PORT1,
- .mapbase = PNX8550_UART_PORT1,
- .irq = PNX8550_UART_INT(1),
- .uartclk = 3692300,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
- },
- },
-};
-
-/* The dmamask must be set for OHCI to work */
-static u64 ohci_dmamask = DMA_BIT_MASK(32);
-
-static u64 uart_dmamask = DMA_BIT_MASK(32);
-
-static int pnx8550_usb_ohci_power_on(struct platform_device *pdev)
-{
- /*
- * Set register CLK48CTL to enable and 48MHz
- */
- outl(0x00000003, PCI_BASE | 0x0004770c);
-
- /*
- * Set register CLK12CTL to enable and 48MHz
- */
- outl(0x00000003, PCI_BASE | 0x00047710);
-
- udelay(100);
-
- return 0;
-}
-
-static void pnx8550_usb_ohci_power_off(struct platform_device *pdev)
-{
- udelay(10);
-}
-
-static struct usb_ohci_pdata pnx8550_usb_ohci_pdata = {
- .power_on = pnx8550_usb_ohci_power_on,
- .power_off = pnx8550_usb_ohci_power_off,
-};
-
-static struct platform_device pnx8550_usb_ohci_device = {
- .name = "ohci-platform",
- .id = -1,
- .dev = {
- .dma_mask = &ohci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &pnx8550_usb_ohci_pdata,
- },
- .num_resources = ARRAY_SIZE(pnx8550_usb_ohci_resources),
- .resource = pnx8550_usb_ohci_resources,
-};
-
-static struct platform_device pnx8550_uart_device = {
- .name = "pnx8xxx-uart",
- .id = -1,
- .dev = {
- .dma_mask = &uart_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = pnx8xxx_ports,
- },
- .num_resources = ARRAY_SIZE(pnx8550_uart_resources),
- .resource = pnx8550_uart_resources,
-};
-
-static struct platform_device *pnx8550_platform_devices[] __initdata = {
- &pnx8550_usb_ohci_device,
- &pnx8550_uart_device,
-};
-
-static int __init pnx8550_platform_init(void)
-{
- return platform_add_devices(pnx8550_platform_devices,
- ARRAY_SIZE(pnx8550_platform_devices));
-}
-
-arch_initcall(pnx8550_platform_init);
diff --git a/arch/mips/pnx8550/common/proc.c b/arch/mips/pnx8550/common/proc.c
deleted file mode 100644
index 3bba5ec828e8..000000000000
--- a/arch/mips/pnx8550/common/proc.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/random.h>
-
-#include <asm/io.h>
-#include <int.h>
-#include <uart.h>
-
-
-static int pnx8550_timers_read(char* page, char** start, off_t offset, int count, int* eof, void* data)
-{
- int len = 0;
- int configPR = read_c0_config7();
-
- if (offset==0) {
- len += sprintf(&page[len], "Timer: count, compare, tc, status\n");
- len += sprintf(&page[len], " 1: %11i, %8i, %1i, %s\n",
- read_c0_count(), read_c0_compare(),
- (configPR>>6)&0x1, ((configPR>>3)&0x1)? "off":"on");
- len += sprintf(&page[len], " 2: %11i, %8i, %1i, %s\n",
- read_c0_count2(), read_c0_compare2(),
- (configPR>>7)&0x1, ((configPR>>4)&0x1)? "off":"on");
- len += sprintf(&page[len], " 3: %11i, %8i, %1i, %s\n",
- read_c0_count3(), read_c0_compare3(),
- (configPR>>8)&0x1, ((configPR>>5)&0x1)? "off":"on");
- }
-
- return len;
-}
-
-static int pnx8550_registers_read(char* page, char** start, off_t offset, int count, int* eof, void* data)
-{
- int len = 0;
-
- if (offset==0) {
- len += sprintf(&page[len], "config1: %#10.8x\n", read_c0_config1());
- len += sprintf(&page[len], "config2: %#10.8x\n", read_c0_config2());
- len += sprintf(&page[len], "config3: %#10.8x\n", read_c0_config3());
- len += sprintf(&page[len], "configPR: %#10.8x\n", read_c0_config7());
- len += sprintf(&page[len], "status: %#10.8x\n", read_c0_status());
- len += sprintf(&page[len], "cause: %#10.8x\n", read_c0_cause());
- len += sprintf(&page[len], "count: %#10.8x\n", read_c0_count());
- len += sprintf(&page[len], "count_2: %#10.8x\n", read_c0_count2());
- len += sprintf(&page[len], "count_3: %#10.8x\n", read_c0_count3());
- len += sprintf(&page[len], "compare: %#10.8x\n", read_c0_compare());
- len += sprintf(&page[len], "compare_2: %#10.8x\n", read_c0_compare2());
- len += sprintf(&page[len], "compare_3: %#10.8x\n", read_c0_compare3());
- }
-
- return len;
-}
-
-static struct proc_dir_entry* pnx8550_dir;
-static struct proc_dir_entry* pnx8550_timers;
-static struct proc_dir_entry* pnx8550_registers;
-
-static int pnx8550_proc_init( void )
-{
-
- // Create /proc/pnx8550
- pnx8550_dir = proc_mkdir("pnx8550", NULL);
- if (!pnx8550_dir) {
- printk(KERN_ERR "Can't create pnx8550 proc dir\n");
- return -1;
- }
-
- // Create /proc/pnx8550/timers
- pnx8550_timers = create_proc_read_entry(
- "timers",
- 0,
- pnx8550_dir,
- pnx8550_timers_read,
- NULL);
-
- if (!pnx8550_timers)
- printk(KERN_ERR "Can't create pnx8550 timers proc file\n");
-
- // Create /proc/pnx8550/registers
- pnx8550_registers = create_proc_read_entry(
- "registers",
- 0,
- pnx8550_dir,
- pnx8550_registers_read,
- NULL);
-
- if (!pnx8550_registers)
- printk(KERN_ERR "Can't create pnx8550 registers proc file\n");
-
- return 0;
-}
-
-__initcall(pnx8550_proc_init);
diff --git a/arch/mips/pnx8550/common/prom.c b/arch/mips/pnx8550/common/prom.c
deleted file mode 100644
index 49639e8120d8..000000000000
--- a/arch/mips/pnx8550/common/prom.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- *
- * Per Hallsmark, per.hallsmark@mvista.com
- *
- * Based on jmr3927/common/prom.c
- *
- * 2004 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/serial_pnx8xxx.h>
-
-#include <asm/bootinfo.h>
-#include <uart.h>
-
-/* #define DEBUG_CMDLINE */
-
-extern int prom_argc;
-extern char **prom_argv, **prom_envp;
-
-typedef struct
-{
- char *name;
-/* char *val; */
-}t_env_var;
-
-
-char * __init prom_getcmdline(void)
-{
- return &(arcs_cmdline[0]);
-}
-
-void __init prom_init_cmdline(void)
-{
- int i;
-
- arcs_cmdline[0] = '\0';
- for (i = 0; i < prom_argc; i++) {
- strcat(arcs_cmdline, prom_argv[i]);
- strcat(arcs_cmdline, " ");
- }
-}
-
-char *prom_getenv(char *envname)
-{
- /*
- * Return a pointer to the given environment variable.
- * Environment variables are stored in the form of "memsize=64".
- */
-
- t_env_var *env = (t_env_var *)prom_envp;
- int i;
-
- i = strlen(envname);
-
- while(env->name) {
- if(strncmp(envname, env->name, i) == 0) {
- return(env->name + strlen(envname) + 1);
- }
- env++;
- }
- return(NULL);
-}
-
-inline unsigned char str2hexnum(unsigned char c)
-{
- if(c >= '0' && c <= '9')
- return c - '0';
- if(c >= 'a' && c <= 'f')
- return c - 'a' + 10;
- if(c >= 'A' && c <= 'F')
- return c - 'A' + 10;
- return 0; /* foo */
-}
-
-inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
- int i;
-
- for(i = 0; i < 6; i++) {
- unsigned char num;
-
- if((*str == '.') || (*str == ':'))
- str++;
- num = str2hexnum(*str++) << 4;
- num |= (str2hexnum(*str++));
- ea[i] = num;
- }
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
- char *ethaddr_str;
-
- ethaddr_str = prom_getenv("ethaddr");
- if (!ethaddr_str) {
- printk("ethaddr not set in boot prom\n");
- return -1;
- }
- str2eaddr(ethernet_addr, ethaddr_str);
- return 0;
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
-
-extern int pnx8550_console_port;
-
-/* used by early printk */
-void prom_putchar(char c)
-{
- if (pnx8550_console_port != -1) {
- /* Wait until FIFO not full */
- while( ((ip3106_fifo(UART_BASE, pnx8550_console_port) & PNX8XXX_UART_FIFO_TXFIFO) >> 16) >= 16)
- ;
- /* Send one char */
- ip3106_fifo(UART_BASE, pnx8550_console_port) = c;
- }
-}
-
-EXPORT_SYMBOL(get_ethernet_addr);
-EXPORT_SYMBOL(str2eaddr);
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c
deleted file mode 100644
index e7a12ff304b9..000000000000
--- a/arch/mips/pnx8550/common/reset.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * Reset the PNX8550 board.
- *
- */
-#include <linux/kernel.h>
-
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <glb.h>
-
-void pnx8550_machine_restart(char *command)
-{
- PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
-}
-
-void pnx8550_machine_halt(void)
-{
- while (1) {
- if (cpu_wait)
- cpu_wait();
- }
-}
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c
deleted file mode 100644
index fccd6b0c6d3f..000000000000
--- a/arch/mips/pnx8550/common/setup.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- *
- * 2.6 port, Embedded Alley Solutions, Inc
- *
- * Based on Per Hallsmark, per.hallsmark@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/serial_pnx8xxx.h>
-#include <linux/pm.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-#include <asm/time.h>
-
-#include <glb.h>
-#include <int.h>
-#include <pci.h>
-#include <uart.h>
-#include <nand.h>
-
-extern void __init board_setup(void);
-extern void pnx8550_machine_restart(char *);
-extern void pnx8550_machine_halt(void);
-extern struct resource ioport_resource;
-extern struct resource iomem_resource;
-extern char *prom_getcmdline(void);
-
-struct resource standard_io_resources[] = {
- {
- .start = 0x00,
- .end = 0x1f,
- .name = "dma1",
- .flags = IORESOURCE_BUSY
- }, {
- .start = 0x40,
- .end = 0x5f,
- .name = "timer",
- .flags = IORESOURCE_BUSY
- }, {
- .start = 0x80,
- .end = 0x8f,
- .name = "dma page reg",
- .flags = IORESOURCE_BUSY
- }, {
- .start = 0xc0,
- .end = 0xdf,
- .name = "dma2",
- .flags = IORESOURCE_BUSY
- },
-};
-
-#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
-
-extern struct resource pci_io_resource;
-extern struct resource pci_mem_resource;
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-unsigned long get_system_mem_size(void)
-{
- /* Read IP2031_RANK0_ADDR_LO */
- unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
- /* Read IP2031_RANK1_ADDR_HI */
- unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
- return dram_r1_hi - dram_r0_lo + 1;
-}
-
-int pnx8550_console_port = -1;
-
-void __init plat_mem_setup(void)
-{
- int i;
- char* argptr;
-
- board_setup(); /* board specific setup */
-
- _machine_restart = pnx8550_machine_restart;
- _machine_halt = pnx8550_machine_halt;
- pm_power_off = pnx8550_machine_halt;
-
- /* Clear the Global 2 Register, PCI Inta Output Enable Registers
- Bit 1:Enable DAC Powerdown
- -> 0:DACs are enabled and are working normally
- 1:DACs are powerdown
- Bit 0:Enable of PCI inta output
- -> 0 = Disable PCI inta output
- 1 = Enable PCI inta output
- */
- PNX8550_GLB2_ENAB_INTA_O = 0;
-
- /* IO/MEM resources. */
- set_io_port_base(PNX8550_PORT_BASE);
- ioport_resource.start = 0;
- ioport_resource.end = ~0;
- iomem_resource.start = 0;
- iomem_resource.end = ~0;
-
- /* Request I/O space for devices on this board */
- for (i = 0; i < STANDARD_IO_RESOURCES; i++)
- request_resource(&ioport_resource, standard_io_resources + i);
-
- /* Place the Mode Control bit for GPIO pin 16 in primary function */
- /* Pin 16 is used by UART1, UA1_TX */
- outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
- (PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
- PNX8550_GPIO_MC1);
-
- argptr = prom_getcmdline();
- if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
- argptr += strlen("console=ttyS");
- pnx8550_console_port = *argptr == '0' ? 0 : 1;
-
- /* We must initialize the UART (console) before early printk */
- /* Set LCR to 8-bit and BAUD to 38400 (no 5) */
- ip3106_lcr(UART_BASE, pnx8550_console_port) =
- PNX8XXX_UART_LCR_8BIT;
- ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
- }
-}
diff --git a/arch/mips/pnx8550/common/time.c b/arch/mips/pnx8550/common/time.c
deleted file mode 100644
index 831d6b369e9c..000000000000
--- a/arch/mips/pnx8550/common/time.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright 2001, 2002, 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
- *
- * Common time service routines for MIPS machines. See
- * Documents/MIPS/README.txt.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/param.h>
-#include <linux/time.h>
-#include <linux/timer.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-#include <asm/time.h>
-#include <asm/hardirq.h>
-#include <asm/div64.h>
-#include <asm/debug.h>
-
-#include <int.h>
-#include <cm.h>
-
-static unsigned long cpj;
-
-static cycle_t hpt_read(struct clocksource *cs)
-{
- return read_c0_count2();
-}
-
-static struct clocksource pnx_clocksource = {
- .name = "pnx8xxx",
- .rating = 200,
- .read = hpt_read,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static irqreturn_t pnx8xxx_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *c = dev_id;
-
- /* clear MATCH, signal the event */
- c->event_handler(c);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction pnx8xxx_timer_irq = {
- .handler = pnx8xxx_timer_interrupt,
- .flags = IRQF_PERCPU | IRQF_TIMER,
- .name = "pnx8xxx_timer",
-};
-
-static irqreturn_t monotonic_interrupt(int irq, void *dev_id)
-{
- /* Timer 2 clear interrupt */
- write_c0_compare2(-1);
- return IRQ_HANDLED;
-}
-
-static struct irqaction monotonic_irqaction = {
- .handler = monotonic_interrupt,
- .flags = IRQF_TIMER,
- .name = "Monotonic timer",
-};
-
-static int pnx8xxx_set_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- write_c0_compare(delta);
- return 0;
-}
-
-static struct clock_event_device pnx8xxx_clockevent = {
- .name = "pnx8xxx_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = pnx8xxx_set_next_event,
-};
-
-static inline void timer_ack(void)
-{
- write_c0_compare(cpj);
-}
-
-__init void plat_time_init(void)
-{
- unsigned int configPR;
- unsigned int n;
- unsigned int m;
- unsigned int p;
- unsigned int pow2p;
-
- pnx8xxx_clockevent.cpumask = cpu_none_mask;
- clockevents_register_device(&pnx8xxx_clockevent);
- clocksource_register(&pnx_clocksource);
-
- /* Timer 1 start */
- configPR = read_c0_config7();
- configPR &= ~0x00000008;
- write_c0_config7(configPR);
-
- /* Timer 2 start */
- configPR = read_c0_config7();
- configPR &= ~0x00000010;
- write_c0_config7(configPR);
-
- /* Timer 3 stop */
- configPR = read_c0_config7();
- configPR |= 0x00000020;
- write_c0_config7(configPR);
-
-
- /* PLL0 sets MIPS clock (PLL1 <=> TM1, PLL6 <=> TM2, PLL5 <=> mem) */
- /* (but only if CLK_MIPS_CTL select value [bits 3:1] is 1: FIXME) */
-
- n = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_N_MASK) >> 16;
- m = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_M_MASK) >> 8;
- p = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_P_MASK) >> 2;
- pow2p = (1 << p);
-
- db_assert(m != 0 && pow2p != 0);
-
- /*
- * Compute the frequency as in the PNX8550 User Manual 1.0, p.186
- * (a.k.a. 8-10). Divide by HZ for a timer offset that results in
- * HZ timer interrupts per second.
- */
- mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p));
- cpj = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
- write_c0_count(0);
- timer_ack();
-
- /* Setup Timer 2 */
- write_c0_count2(0);
- write_c0_compare2(0xffffffff);
-
- setup_irq(PNX8550_INT_TIMER1, &pnx8xxx_timer_irq);
- setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction);
-}
diff --git a/arch/mips/pnx8550/jbs/Makefile b/arch/mips/pnx8550/jbs/Makefile
deleted file mode 100644
index c4dc3d53eb5c..000000000000
--- a/arch/mips/pnx8550/jbs/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Makefile for the NXP JBS Board.
-
-obj-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/pnx8550/jbs/board_setup.c b/arch/mips/pnx8550/jbs/board_setup.c
deleted file mode 100644
index 57dd903ca408..000000000000
--- a/arch/mips/pnx8550/jbs/board_setup.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * JBS Specific board startup routines.
- *
- * Copyright 2005, Embedded Alley Solutions, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-
-#include <glb.h>
-
-/* CP0 hazard avoidance. */
-#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
- "nop; nop; nop; nop; nop; nop;\n\t" \
- ".set reorder\n\t")
-
-void __init board_setup(void)
-{
- unsigned long configpr;
-
- configpr = read_c0_config7();
- configpr |= (1<<19); /* enable tlb */
- write_c0_config7(configpr);
- BARRIER;
-}
diff --git a/arch/mips/pnx8550/jbs/init.c b/arch/mips/pnx8550/jbs/init.c
deleted file mode 100644
index d59b4a4e5e8b..000000000000
--- a/arch/mips/pnx8550/jbs/init.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- *
- * Copyright 2005 Embedded Alley Solutions, Inc
- * source@embeddedalley.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-int prom_argc;
-char **prom_argv, **prom_envp;
-extern void __init prom_init_cmdline(void);
-extern char *prom_getenv(char *envname);
-
-const char *get_system_type(void)
-{
- return "NXP PNX8550/JBS";
-}
-
-void __init prom_init(void)
-{
- unsigned long memsize;
-
- //memsize = 0x02800000; /* Trimedia uses memory above */
- memsize = 0x08000000; /* Trimedia uses memory above */
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/pnx8550/jbs/irqmap.c b/arch/mips/pnx8550/jbs/irqmap.c
deleted file mode 100644
index 7fc89842002c..000000000000
--- a/arch/mips/pnx8550/jbs/irqmap.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * NXP JBS board irqmap.
- *
- * Copyright 2005 Embedded Alley Solutions, Inc
- * source@embeddealley.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <int.h>
-
-char pnx8550_irq_tab[][5] __initdata = {
- [8] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [9] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [17] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-};
diff --git a/arch/mips/pnx8550/stb810/Makefile b/arch/mips/pnx8550/stb810/Makefile
deleted file mode 100644
index cb4ff022f1fb..000000000000
--- a/arch/mips/pnx8550/stb810/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Makefile for the NXP STB810 Board.
-
-obj-y := prom_init.o board_setup.o irqmap.o
diff --git a/arch/mips/pnx8550/stb810/board_setup.c b/arch/mips/pnx8550/stb810/board_setup.c
deleted file mode 100644
index af2a55e0b4e9..000000000000
--- a/arch/mips/pnx8550/stb810/board_setup.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * STB810 specific board startup routines.
- *
- * Based on the arch/mips/nxp/pnx8550/jbs/board_setup.c
- *
- * Author: MontaVista Software, Inc.
- * source@mvista.com
- *
- * Copyright 2005 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-
-#include <glb.h>
-
-void __init board_setup(void)
-{
- unsigned long configpr;
-
- configpr = read_c0_config7();
- configpr |= (1<<19); /* enable tlb */
- write_c0_config7(configpr);
-}
diff --git a/arch/mips/pnx8550/stb810/irqmap.c b/arch/mips/pnx8550/stb810/irqmap.c
deleted file mode 100644
index 8c034963ddcd..000000000000
--- a/arch/mips/pnx8550/stb810/irqmap.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * NXP STB810 board irqmap.
- *
- * Author: MontaVista Software, Inc.
- * source@mvista.com
- *
- * Copyright 2005 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <int.h>
-
-char pnx8550_irq_tab[][5] __initdata = {
- [8] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [9] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [10] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-};
diff --git a/arch/mips/pnx8550/stb810/prom_init.c b/arch/mips/pnx8550/stb810/prom_init.c
deleted file mode 100644
index ca7f4ada0640..000000000000
--- a/arch/mips/pnx8550/stb810/prom_init.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * STB810 specific prom routines
- *
- * Author: MontaVista Software, Inc.
- * source@mvista.com
- *
- * Copyright 2005 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-int prom_argc;
-char **prom_argv, **prom_envp;
-extern void __init prom_init_cmdline(void);
-extern char *prom_getenv(char *envname);
-
-const char *get_system_type(void)
-{
- return "NXP PNX8950/STB810";
-}
-
-void __init prom_init(void)
-{
- unsigned long memsize;
-
- prom_argc = (int) fw_arg0;
- prom_argv = (char **) fw_arg1;
- prom_envp = (char **) fw_arg2;
-
- prom_init_cmdline();
-
- memsize = 0x08000000; /* Trimedia uses memory above */
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
index 26a6ef19d71f..521e5963df05 100644
--- a/arch/mips/power/cpu.c
+++ b/arch/mips/power/cpu.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009 Lemote Inc.
* Author: Hu Hongbing <huhb@lemote.com>
- * Wu Zhangjin <wuzhangjin@gmail.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
*/
#include <asm/suspend.h>
#include <asm/fpu.h>
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index 61e2558a2dcb..7e0277a1048f 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009 Lemote Inc.
* Author: Hu Hongbing <huhb@lemote.com>
- * Wu Zhangjin <wuzhangjin@gmail.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
*/
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
index 7773f3d956b0..2f539b43f56b 100644
--- a/arch/mips/powertv/asic/asic-calliope.c
+++ b/arch/mips/powertv/asic/asic-calliope.c
@@ -17,10 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*/
#include <linux/init.h>
@@ -90,12 +90,12 @@ const struct register_map calliope_register_map __initconst = {
.usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)},
.usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)},
- .pcie_regs = {.phys = 0x000000}, /* -doesn't exist- */
+ .pcie_regs = {.phys = 0x000000}, /* -doesn't exist- */
.tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)},
.tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)},
.gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)},
.gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)},
.gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)},
.watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)},
- .front_panel = {.phys = 0x000000}, /* -not used- */
+ .front_panel = {.phys = 0x000000}, /* -not used- */
};
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
index da076db7b7ed..7f8f3429b35a 100644
--- a/arch/mips/powertv/asic/asic-cronus.c
+++ b/arch/mips/powertv/asic/asic-cronus.c
@@ -17,10 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*/
#include <linux/init.h>
diff --git a/arch/mips/powertv/asic/asic-gaia.c b/arch/mips/powertv/asic/asic-gaia.c
index 47683b370e74..1265b49012e6 100644
--- a/arch/mips/powertv/asic/asic-gaia.c
+++ b/arch/mips/powertv/asic/asic-gaia.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: David VomLehn
+ * Author: David VomLehn
*/
#include <linux/init.h>
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
index 6ff4b10f09da..14e7de137e03 100644
--- a/arch/mips/powertv/asic/asic-zeus.c
+++ b/arch/mips/powertv/asic/asic-zeus.c
@@ -17,10 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*/
#include <linux/init.h>
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index bce1872249ba..d38b095fd0d0 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -1,6 +1,6 @@
/*
*
- * Description: Defines the platform resources for Gaia-based settops.
+ * Description: Defines the platform resources for Gaia-based settops.
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
*
@@ -90,12 +90,12 @@ struct resource asic_resource = {
/*
* Allow override of bootloader-specified model
- * Returns zero on success, a negative errno value on failure. This parameter
+ * Returns zero on success, a negative errno value on failure. This parameter
* allows overriding of the bootloader-specified model.
*/
static char __initdata cmdline[COMMAND_LINE_SIZE];
-#define FORCEFAMILY_PARAM "forcefamily"
+#define FORCEFAMILY_PARAM "forcefamily"
/*
* check_forcefamily - check for, and parse, forcefamily command line parameter
@@ -486,7 +486,7 @@ static void __init pmem_setup_resource(void)
resource->start = phys_to_dma(pmemaddr - 0x80000000);
resource->end = resource->start + pmemlen - 1;
- pr_info("persistent memory: start=0x%x end=0x%x\n",
+ pr_info("persistent memory: start=0x%x end=0x%x\n",
resource->start, resource->end);
}
}
diff --git a/arch/mips/powertv/asic/asic_int.c b/arch/mips/powertv/asic/asic_int.c
index 99d82e10000b..f44cd9295cae 100644
--- a/arch/mips/powertv/asic/asic_int.c
+++ b/arch/mips/powertv/asic/asic_int.c
@@ -2,7 +2,7 @@
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
* Copyright (C) 2001 Ralf Baechle
- * Portions copyright (C) 2009 Cisco Systems, Inc.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
@@ -64,7 +64,7 @@ static void asic_irqdispatch(void)
irq = get_int();
if (irq < 0)
- return; /* interrupt has already been cleared */
+ return; /* interrupt has already been cleared */
do_IRQ(irq);
}
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c
index fa9ae9584710..9344902dc586 100644
--- a/arch/mips/powertv/asic/irq_asic.c
+++ b/arch/mips/powertv/asic/irq_asic.c
@@ -5,8 +5,8 @@
* Modified from arch/mips/kernel/irq-rm7000.c:
* Copyright (C) 2003 Ralf Baechle
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
index 3fc5d46687a9..98dc51650577 100644
--- a/arch/mips/powertv/asic/prealloc-calliope.c
+++ b/arch/mips/powertv/asic/prealloc-calliope.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -153,7 +153,7 @@ struct resource non_dvr_calliope_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -260,7 +260,7 @@ struct resource non_dvr_vze_calliope_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -380,6 +380,6 @@ struct resource non_dvr_vzf_calliope_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
index c532b50521ec..7c6ce7596935 100644
--- a/arch/mips/powertv/asic/prealloc-cronus.c
+++ b/arch/mips/powertv/asic/prealloc-cronus.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -78,7 +78,7 @@ struct resource dvr_cronus_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -185,7 +185,7 @@ struct resource dvr_cronus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -241,7 +241,7 @@ struct resource non_dvr_cronus_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -335,6 +335,6 @@ struct resource non_dvr_cronus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
index b5537e49e7f5..a7937ba7b4c0 100644
--- a/arch/mips/powertv/asic/prealloc-cronuslite.c
+++ b/arch/mips/powertv/asic/prealloc-cronuslite.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -65,7 +65,7 @@ struct resource non_dvr_cronuslite_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -169,6 +169,6 @@ struct resource non_dvr_cronuslite_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/asic/prealloc-gaia.c b/arch/mips/powertv/asic/prealloc-gaia.c
index 8ac8c7aeb986..2303bbfe6b82 100644
--- a/arch/mips/powertv/asic/prealloc-gaia.c
+++ b/arch/mips/powertv/asic/prealloc-gaia.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: David VomLehn
+ * Author: David VomLehn
*/
#include <linux/init.h>
@@ -33,22 +33,22 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x241FFFFF, /* 2MiB */
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24201FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -56,22 +56,22 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
+ .name = "ST231bImage", /* Delta-Mu 2 image and ram */
+ .start = 0x60000000,
+ .end = 0x601FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
},
{
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
+ .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x60200000,
+ .end = 0x60201FFF,
+ .flags = IORESOURCE_IO,
},
{
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
+ .name = "MediaMemory2",
+ .start = 0x60202000,
+ .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -87,28 +87,28 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -119,16 +119,16 @@ struct resource dvr_gaia_resources[] __initdata = {
* Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*
*/
{
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
+ .name = "AVMEMPartition0",
+ .start = 0x63580000,
+ .end = 0x64180000 - 1, /* 12 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -141,10 +141,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
+ .name = "Docsis",
+ .start = 0x62000000,
+ .end = 0x62700000 - 1, /* 7 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -157,10 +157,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
+ .name = "GraphicsHeap",
+ .start = 0x62700000,
+ .end = 0x63500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -173,10 +173,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "MulticomSHM",
+ .start = 0x26000000,
+ .end = 0x26020000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -189,10 +189,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x00280000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -205,10 +205,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -221,10 +221,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
+ .name = "DisplayBins1",
+ .start = 0x64AD4000,
+ .end = 0x64AD5000 - 1, /* 4 KB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -237,11 +237,11 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ITFS",
- .start = 0x64180000,
+ .name = "ITFS",
+ .start = 0x64180000,
/* 815,104 bytes each for 2 ITFS partitions. */
- .end = 0x6430DFFF,
- .flags = IORESOURCE_IO,
+ .end = 0x6430DFFF,
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -254,17 +254,17 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
+ .name = "AvfsDmaMem",
+ .start = 0x6430E000,
/* (945K * 8) = (128K *3) 5 playbacks / 3 server */
- .end = 0x64AD0000 - 1,
- .flags = IORESOURCE_IO,
+ .end = 0x64AD0000 - 1,
+ .flags = IORESOURCE_IO,
},
{
- .name = "AvfsFileSys",
- .start = 0x64AD0000,
- .end = 0x64AD1000 - 1, /* 4K */
- .flags = IORESOURCE_IO,
+ .name = "AvfsFileSys",
+ .start = 0x64AD0000,
+ .end = 0x64AD1000 - 1, /* 4K */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -277,10 +277,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
+ .name = "SmartCardInfo",
+ .start = 0x64AD1000,
+ .end = 0x64AD3800 - 1,
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -290,22 +290,22 @@ struct resource dvr_gaia_resources[] __initdata = {
* NP IPC - must be video bank 2
*/
{
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Reset_Vector",
+ .start = 0x27c00000,
+ .end = 0x27c01000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Image",
+ .start = 0x27020000,
+ .end = 0x27060000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
+ .name = "NP_IPC",
+ .start = 0x63500000,
+ .end = 0x63580000 - 1,
+ .flags = IORESOURCE_IO,
},
/*
* Add other resources here
@@ -323,22 +323,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x241FFFFF, /* 2MiB */
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24201FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -346,22 +346,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
+ .name = "ST231bImage", /* Delta-Mu 2 image and ram */
+ .start = 0x60000000,
+ .end = 0x601FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
},
{
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
+ .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x60200000,
+ .end = 0x60201FFF,
+ .flags = IORESOURCE_IO,
},
{
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
+ .name = "MediaMemory2",
+ .start = 0x60202000,
+ .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -377,28 +377,28 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -409,16 +409,16 @@ struct resource non_dvr_gaia_resources[] __initdata = {
* Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*
*/
{
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
+ .name = "AVMEMPartition0",
+ .start = 0x63580000,
+ .end = 0x64180000 - 1, /* 12 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -431,10 +431,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
+ .name = "Docsis",
+ .start = 0x62000000,
+ .end = 0x62700000 - 1, /* 7 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -447,10 +447,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
+ .name = "GraphicsHeap",
+ .start = 0x62700000,
+ .end = 0x63500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -463,10 +463,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "MulticomSHM",
+ .start = 0x26000000,
+ .end = 0x26020000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -479,10 +479,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -495,10 +495,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -511,10 +511,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
+ .name = "DisplayBins1",
+ .start = 0x64AD4000,
+ .end = 0x64AD5000 - 1, /* 4 KB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -523,10 +523,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_IO,
+ .name = "AvfsDmaMem",
+ .start = 0x6430E000,
+ .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -539,10 +539,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -555,10 +555,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
+ .name = "SmartCardInfo",
+ .start = 0x64AD1000,
+ .end = 0x64AD3800 - 1,
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -568,22 +568,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
* NP IPC - must be video bank 2
*/
{
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Reset_Vector",
+ .start = 0x27c00000,
+ .end = 0x27c01000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Image",
+ .start = 0x27020000,
+ .end = 0x27060000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
+ .name = "NP_IPC",
+ .start = 0x63500000,
+ .end = 0x63580000 - 1,
+ .flags = IORESOURCE_IO,
},
{ },
};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
index 96480a2395c0..6e76f09c68d6 100644
--- a/arch/mips/powertv/asic/prealloc-zeus.c
+++ b/arch/mips/powertv/asic/prealloc-zeus.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -78,7 +78,7 @@ struct resource dvr_zeus_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -175,7 +175,7 @@ struct resource dvr_zeus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -299,6 +299,6 @@ struct resource non_dvr_zeus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
index c6979353980b..5bd9d8f468cc 100644
--- a/arch/mips/powertv/init.c
+++ b/arch/mips/powertv/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
diff --git a/arch/mips/powertv/ioremap.c b/arch/mips/powertv/ioremap.c
index a77c6f62fe23..d060478aab03 100644
--- a/arch/mips/powertv/ioremap.c
+++ b/arch/mips/powertv/ioremap.c
@@ -19,9 +19,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: David VomLehn <dvomlehn@cisco.com>
+ * Author: David VomLehn <dvomlehn@cisco.com>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*
* NOTE: The bootloader allocates persistent memory at an address which is
* 16 MiB below the end of the highest address in KSEG0. All fixed
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index fb3d29660c42..6e5f1bdc59b5 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -60,7 +60,7 @@ unsigned long ptv_memsize;
* struct low_mem_reserved - Items in low memory that are reserved
* @start: Physical address of item
* @size: Size, in bytes, of this item
- * @is_aliased: True if this is RAM aliased from another location. If false,
+ * @is_aliased: True if this is RAM aliased from another location. If false,
* it is something other than aliased RAM and the RAM in the
* unaliased address is still visible outside of low memory.
*/
diff --git a/arch/mips/powertv/powertv-usb.c b/arch/mips/powertv/powertv-usb.c
index b0e2afa89395..d845eace58e9 100644
--- a/arch/mips/powertv/powertv-usb.c
+++ b/arch/mips/powertv/powertv-usb.c
@@ -1,7 +1,7 @@
/*
* powertv-usb.c
*
- * Description: ASIC-specific USB device setup and shutdown
+ * Description: ASIC-specific USB device setup and shutdown
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
* Copyright (C) 2009 Cisco Systems, Inc.
@@ -20,8 +20,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
* NOTE: The bootloader allocates persistent memory at an address which is
* 16 MiB below the end of the highest address in KSEG0. All fixed
@@ -70,15 +70,15 @@
#define MCC2_GMII_RX2_CLOCK_SELECT (1 << 16)
#define ETHER_CLK_CONFIG (MCC2_GMII_GCLK_TO_PAD | \
- MCC2_ETHER125_0_CLOCK_SELECT | \
+ MCC2_ETHER125_0_CLOCK_SELECT | \
MCC2_RMII_0_CLOCK_SELECT | \
MCC2_GMII_TX0_CLOCK_SELECT | \
MCC2_GMII_RX0_CLOCK_SELECT | \
- MCC2_ETHER125_1_CLOCK_SELECT | \
+ MCC2_ETHER125_1_CLOCK_SELECT | \
MCC2_RMII_1_CLOCK_SELECT | \
MCC2_GMII_TX1_CLOCK_SELECT | \
MCC2_GMII_RX1_CLOCK_SELECT | \
- MCC2_ETHER125_2_CLOCK_SELECT | \
+ MCC2_ETHER125_2_CLOCK_SELECT | \
MCC2_RMII_2_CLOCK_SELECT | \
MCC2_GMII_TX2_CLOCK_SELECT | \
MCC2_GMII_RX2_CLOCK_SELECT)
@@ -98,9 +98,9 @@
#define QAM_FS_DISABLE_DIVIDE_BY_3 (1 << 5)
#define QAM_FS_ENABLE_PROGRAM (1 << 4)
-#define QAM_FS_ENABLE_OUTPUT (1 << 3)
-#define QAM_FS_SELECT_TEST_BYPASS (1 << 2)
-#define QAM_FS_DISABLE_DIGITAL_STANDBY (1 << 1)
+#define QAM_FS_ENABLE_OUTPUT (1 << 3)
+#define QAM_FS_SELECT_TEST_BYPASS (1 << 2)
+#define QAM_FS_DISABLE_DIGITAL_STANDBY (1 << 1)
#define QAM_FS_CHOOSE_FS (1 << 0)
/* Definitions for fs432x4a_ctl register */
@@ -142,14 +142,14 @@
static struct resource ehci_resources[] = {
{
.parent = &asic_resource,
- .start = 0,
- .end = 0xff,
- .flags = IORESOURCE_MEM,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_MEM,
},
{
- .start = irq_usbehci,
- .end = irq_usbehci,
- .flags = IORESOURCE_IRQ,
+ .start = irq_usbehci,
+ .end = irq_usbehci,
+ .flags = IORESOURCE_IRQ,
},
};
@@ -169,14 +169,14 @@ static struct platform_device ehci_device = {
static struct resource ohci_resources[] = {
{
.parent = &asic_resource,
- .start = 0,
- .end = 0xff,
- .flags = IORESOURCE_MEM,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_MEM,
},
{
- .start = irq_usbohci,
- .end = irq_usbohci,
- .flags = IORESOURCE_IRQ,
+ .start = irq_usbohci,
+ .end = irq_usbohci,
+ .flags = IORESOURCE_IRQ,
},
};
@@ -207,9 +207,9 @@ static DEFINE_SPINLOCK(usb_regs_lock);
*
* QAM frequency selection code, which affects the frequency at which USB
* runs. The frequency is calculated as:
- * 2^15 * ndiv * Fin
+ * 2^15 * ndiv * Fin
* Fout = ------------------------------------------------------------
- * (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
+ * (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
* where:
* Fin 54 MHz
* ndiv QAM_FS_NSDIV_54MHZ ? 8 : 16
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
new file mode 100644
index 000000000000..a0b0197cab0a
--- /dev/null
+++ b/arch/mips/ralink/Kconfig
@@ -0,0 +1,32 @@
+if RALINK
+
+choice
+ prompt "Ralink SoC selection"
+ default SOC_RT305X
+ help
+ Select Ralink MIPS SoC type.
+
+ config SOC_RT305X
+ bool "RT305x"
+ select USB_ARCH_HAS_HCD
+ select USB_ARCH_HAS_OHCI
+ select USB_ARCH_HAS_EHCI
+
+endchoice
+
+choice
+ prompt "Devicetree selection"
+ default DTB_RT_NONE
+ help
+ Select the devicetree.
+
+ config DTB_RT_NONE
+ bool "None"
+
+ config DTB_RT305X_EVAL
+ bool "RT305x eval kit"
+ depends on SOC_RT305X
+
+endchoice
+
+endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
new file mode 100644
index 000000000000..939757f0e71f
--- /dev/null
+++ b/arch/mips/ralink/Makefile
@@ -0,0 +1,15 @@
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 as published
+# by the Free Software Foundation.#
+# Makefile for the Ralink common stuff
+#
+# Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+# Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+
+obj-y := prom.o of.o reset.o clk.o irq.o
+
+obj-$(CONFIG_SOC_RT305X) += rt305x.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-y += dts/
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
new file mode 100644
index 000000000000..6babd65765e6
--- /dev/null
+++ b/arch/mips/ralink/Platform
@@ -0,0 +1,10 @@
+#
+# Ralink SoC common stuff
+#
+core-$(CONFIG_RALINK) += arch/mips/ralink/
+cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink
+
+#
+# Ralink RT305x
+#
+load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
new file mode 100644
index 000000000000..8dfa22ff300b
--- /dev/null
+++ b/arch/mips/ralink/clk.c
@@ -0,0 +1,72 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+
+#include "common.h"
+
+struct clk {
+ struct clk_lookup cl;
+ unsigned long rate;
+};
+
+void ralink_clk_add(const char *dev, unsigned long rate)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ panic("failed to add clock\n");
+
+ clk->cl.dev_id = dev;
+ clk->cl.clk = clk;
+
+ clk->rate = rate;
+
+ clkdev_add(&clk->cl);
+}
+
+/*
+ * Linux clock API
+ */
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+void __init plat_time_init(void)
+{
+ struct clk *clk;
+
+ ralink_of_remap();
+
+ ralink_clk_init();
+ clk = clk_get_sys("cpu", NULL);
+ if (IS_ERR(clk))
+ panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
+ pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+ clk_put(clk);
+}
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
new file mode 100644
index 000000000000..300990313e1b
--- /dev/null
+++ b/arch/mips/ralink/common.h
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RALINK_COMMON_H__
+#define _RALINK_COMMON_H__
+
+#define RAMIPS_SYS_TYPE_LEN 32
+
+struct ralink_pinmux_grp {
+ const char *name;
+ u32 mask;
+ int gpio_first;
+ int gpio_last;
+};
+
+struct ralink_pinmux {
+ struct ralink_pinmux_grp *mode;
+ struct ralink_pinmux_grp *uart;
+ int uart_shift;
+ void (*wdt_reset)(void);
+};
+extern struct ralink_pinmux gpio_pinmux;
+
+struct ralink_soc_info {
+ unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
+ unsigned char *compatible;
+};
+extern struct ralink_soc_info soc_info;
+
+extern void ralink_of_remap(void);
+
+extern void ralink_clk_init(void);
+extern void ralink_clk_add(const char *dev, unsigned long rate);
+
+extern void prom_soc_init(struct ralink_soc_info *soc_info);
+
+__iomem void *plat_of_remap_node(const char *node);
+
+#endif /* _RALINK_COMMON_H__ */
diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile
new file mode 100644
index 000000000000..1a69fb300955
--- /dev/null
+++ b/arch/mips/ralink/dts/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
new file mode 100644
index 000000000000..069d0660e1dd
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -0,0 +1,106 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24KEc";
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600 init=/init";
+ };
+
+ cpuintc: cpuintc@0 {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,rt3052-sysc", "ralink,rt3050-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ timer@100 {
+ compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
+ reg = <0x100 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,rt3052-memc", "ralink,rt3050-memc";
+ reg = <0x300 0x100>;
+ };
+
+ gpio0: gpio@600 {
+ compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+ reg = <0x600 0x34>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ralink,ngpio = <24>;
+ ralink,regs = [ 00 04 08 0c
+ 20 24 28 2c
+ 30 34 ];
+ };
+
+ gpio1: gpio@638 {
+ compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+ reg = <0x638 0x24>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ralink,ngpio = <16>;
+ ralink,regs = [ 00 04 08 0c
+ 10 14 18 1c
+ 20 24 ];
+ };
+
+ gpio2: gpio@660 {
+ compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+ reg = <0x660 0x24>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ralink,ngpio = <12>;
+ ralink,regs = [ 00 04 08 0c
+ 10 14 18 1c
+ 20 24 ];
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
new file mode 100644
index 000000000000..148a590bc419
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -0,0 +1,52 @@
+/dts-v1/;
+
+/include/ "rt3050.dtsi"
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
+ model = "Ralink RT3052 evaluation board";
+
+ memory@0 {
+ reg = <0x0 0x2000000>;
+ };
+
+ palmbus@10000000 {
+ sysc@0 {
+ ralink,pinmmux = "uartlite", "spi";
+ ralink,uartmux = "gpio";
+ ralink,wdtmux = <0>;
+ };
+ };
+
+ cfi@1f000000 {
+ compatible = "cfi-flash";
+ reg = <0x1f000000 0x800000>;
+
+ bank-width = <2>;
+ device-width = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x30000>;
+ read-only;
+ };
+ partition@30000 {
+ label = "uboot-env";
+ reg = <0x30000 0x10000>;
+ read-only;
+ };
+ partition@40000 {
+ label = "calibration";
+ reg = <0x40000 0x10000>;
+ read-only;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x7b0000>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
new file mode 100644
index 000000000000..c4ae47eb24ab
--- /dev/null
+++ b/arch/mips/ralink/early_printk.c
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+
+#include <asm/addrspace.h>
+
+#define EARLY_UART_BASE 0x10000c00
+
+#define UART_REG_RX 0x00
+#define UART_REG_TX 0x04
+#define UART_REG_IER 0x08
+#define UART_REG_IIR 0x0c
+#define UART_REG_FCR 0x10
+#define UART_REG_LCR 0x14
+#define UART_REG_MCR 0x18
+#define UART_REG_LSR 0x1c
+
+static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE);
+
+static inline void uart_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, uart_membase + reg);
+}
+
+static inline u32 uart_r32(unsigned reg)
+{
+ return __raw_readl(uart_membase + reg);
+}
+
+void prom_putchar(unsigned char ch)
+{
+ while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+ ;
+ uart_w32(ch, UART_REG_TX);
+ while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+ ;
+}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
new file mode 100644
index 000000000000..6d054c5ec9ab
--- /dev/null
+++ b/arch/mips/ralink/irq.c
@@ -0,0 +1,180 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+
+#include "common.h"
+
+/* INTC register offsets */
+#define INTC_REG_STATUS0 0x00
+#define INTC_REG_STATUS1 0x04
+#define INTC_REG_TYPE 0x20
+#define INTC_REG_RAW_STATUS 0x30
+#define INTC_REG_ENABLE 0x34
+#define INTC_REG_DISABLE 0x38
+
+#define INTC_INT_GLOBAL BIT(31)
+
+#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5)
+#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6)
+#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
+
+/* we have a cascade of 8 irqs */
+#define RALINK_INTC_IRQ_BASE 8
+
+/* we have 32 SoC irqs */
+#define RALINK_INTC_IRQ_COUNT 32
+
+#define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9)
+
+static void __iomem *rt_intc_membase;
+
+static inline void rt_intc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_intc_membase + reg);
+}
+
+static inline u32 rt_intc_r32(unsigned reg)
+{
+ return __raw_readl(rt_intc_membase + reg);
+}
+
+static void ralink_intc_irq_unmask(struct irq_data *d)
+{
+ rt_intc_w32(BIT(d->hwirq), INTC_REG_ENABLE);
+}
+
+static void ralink_intc_irq_mask(struct irq_data *d)
+{
+ rt_intc_w32(BIT(d->hwirq), INTC_REG_DISABLE);
+}
+
+static struct irq_chip ralink_intc_irq_chip = {
+ .name = "INTC",
+ .irq_unmask = ralink_intc_irq_unmask,
+ .irq_mask = ralink_intc_irq_mask,
+ .irq_mask_ack = ralink_intc_irq_mask,
+};
+
+unsigned int __cpuinit get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ u32 pending = rt_intc_r32(INTC_REG_STATUS0);
+
+ if (pending) {
+ struct irq_domain *domain = irq_get_handler_data(irq);
+ generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+ } else {
+ spurious_interrupt();
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long pending;
+
+ pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+ if (pending & STATUSF_IP7)
+ do_IRQ(RALINK_CPU_IRQ_COUNTER);
+
+ else if (pending & STATUSF_IP5)
+ do_IRQ(RALINK_CPU_IRQ_FE);
+
+ else if (pending & STATUSF_IP6)
+ do_IRQ(RALINK_CPU_IRQ_WIFI);
+
+ else if (pending & STATUSF_IP2)
+ do_IRQ(RALINK_CPU_IRQ_INTC);
+
+ else
+ spurious_interrupt();
+}
+
+static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ralink_intc_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = intc_map,
+};
+
+static int __init intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ struct irq_domain *domain;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq)
+ panic("Failed to get INTC IRQ");
+
+ if (of_address_to_resource(node, 0, &res))
+ panic("Failed to get intc memory range");
+
+ if (request_mem_region(res.start, resource_size(&res),
+ res.name) < 0)
+ pr_err("Failed to request intc memory");
+
+ rt_intc_membase = ioremap_nocache(res.start,
+ resource_size(&res));
+ if (!rt_intc_membase)
+ panic("Failed to remap intc memory");
+
+ /* disable all interrupts */
+ rt_intc_w32(~0, INTC_REG_DISABLE);
+
+ /* route all INTC interrupts to MIPS HW0 interrupt */
+ rt_intc_w32(0, INTC_REG_TYPE);
+
+ domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT,
+ RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add irqdomain");
+
+ rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE);
+
+ irq_set_chained_handler(irq, ralink_intc_irq_handler);
+ irq_set_handler_data(irq, domain);
+
+ cp0_perfcount_irq = irq_create_mapping(domain, 9);
+
+ return 0;
+}
+
+static struct of_device_id __initdata of_irq_ids[] = {
+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+ { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
+
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
new file mode 100644
index 000000000000..4165e70775be
--- /dev/null
+++ b/arch/mips/ralink/of.c
@@ -0,0 +1,107 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <asm/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include "common.h"
+
+__iomem void *rt_sysc_membase;
+__iomem void *rt_memc_membase;
+
+extern struct boot_param_header __dtb_start;
+
+__iomem void *plat_of_remap_node(const char *node)
+{
+ struct resource res;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, node);
+ if (!np)
+ panic("Failed to find %s node", node);
+
+ if (of_address_to_resource(np, 0, &res))
+ panic("Failed to get resource for %s", node);
+
+ if ((request_mem_region(res.start,
+ resource_size(&res),
+ res.name) < 0))
+ panic("Failed to request resources for %s", node);
+
+ return ioremap_nocache(res.start, resource_size(&res));
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+ void *fdt_copy;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+ /* The strings in the flattened tree are referenced directly by the
+ * device tree, so copy the flattened device tree from init memory
+ * to regular memory.
+ */
+ fdt_copy = alloc_bootmem(size);
+ memcpy(fdt_copy, initial_boot_params, size);
+ initial_boot_params = fdt_copy;
+
+ unflatten_device_tree();
+
+ /* free the space reserved for the dt blob */
+ free_bootmem(base, size);
+}
+
+void __init plat_mem_setup(void)
+{
+ set_io_port_base(KSEG1);
+
+ /*
+ * Load the builtin devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(&__dtb_start);
+}
+
+static int __init plat_of_setup(void)
+{
+ static struct of_device_id of_ids[3];
+ int len = sizeof(of_ids[0].compatible);
+
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ strncpy(of_ids[0].compatible, soc_info.compatible, len);
+ strncpy(of_ids[1].compatible, "palmbus", len);
+
+ if (of_platform_populate(NULL, of_ids, NULL, NULL))
+ panic("failed to populate DT\n");
+
+ return 0;
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
new file mode 100644
index 000000000000..9c64f029d047
--- /dev/null
+++ b/arch/mips/ralink/prom.c
@@ -0,0 +1,69 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/string.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include "common.h"
+
+struct ralink_soc_info soc_info;
+
+const char *get_system_type(void)
+{
+ return soc_info.sys_type;
+}
+
+static __init void prom_init_cmdline(int argc, char **argv)
+{
+ int i;
+
+ pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
+ (unsigned int)fw_arg0, (unsigned int)fw_arg1,
+ (unsigned int)fw_arg2, (unsigned int)fw_arg3);
+
+ argc = fw_arg0;
+ argv = (char **) KSEG1ADDR(fw_arg1);
+
+ if (!argv) {
+ pr_debug("argv=%p is invalid, skipping\n",
+ argv);
+ return;
+ }
+
+ for (i = 0; i < argc; i++) {
+ char *p = (char *) KSEG1ADDR(argv[i]);
+
+ if (CPHYSADDR(p) && *p) {
+ pr_debug("argv[%d]: %s\n", i, p);
+ strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
+ strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
+ }
+ }
+}
+
+void __init prom_init(void)
+{
+ int argc;
+ char **argv;
+
+ prom_soc_init(&soc_info);
+
+ pr_info("SoC Type: %s\n", get_system_type());
+
+ prom_init_cmdline(argc, argv);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
new file mode 100644
index 000000000000..22120e512e7e
--- /dev/null
+++ b/arch/mips/ralink/reset.c
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/pm.h>
+#include <linux/io.h>
+
+#include <asm/reboot.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+/* Reset Control */
+#define SYSC_REG_RESET_CTRL 0x034
+#define RSTCTL_RESET_SYSTEM BIT(0)
+
+static void ralink_restart(char *command)
+{
+ local_irq_disable();
+ rt_sysc_w32(RSTCTL_RESET_SYSTEM, SYSC_REG_RESET_CTRL);
+ unreachable();
+}
+
+static void ralink_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = ralink_restart;
+ _machine_halt = ralink_halt;
+ pm_power_off = ralink_halt;
+
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
new file mode 100644
index 000000000000..0a4bbdcf59d9
--- /dev/null
+++ b/arch/mips/ralink/rt305x.c
@@ -0,0 +1,242 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt305x.h>
+
+#include "common.h"
+
+enum rt305x_soc_type rt305x_soc;
+
+struct ralink_pinmux_grp mode_mux[] = {
+ {
+ .name = "i2c",
+ .mask = RT305X_GPIO_MODE_I2C,
+ .gpio_first = RT305X_GPIO_I2C_SD,
+ .gpio_last = RT305X_GPIO_I2C_SCLK,
+ }, {
+ .name = "spi",
+ .mask = RT305X_GPIO_MODE_SPI,
+ .gpio_first = RT305X_GPIO_SPI_EN,
+ .gpio_last = RT305X_GPIO_SPI_CLK,
+ }, {
+ .name = "uartlite",
+ .mask = RT305X_GPIO_MODE_UART1,
+ .gpio_first = RT305X_GPIO_UART1_TXD,
+ .gpio_last = RT305X_GPIO_UART1_RXD,
+ }, {
+ .name = "jtag",
+ .mask = RT305X_GPIO_MODE_JTAG,
+ .gpio_first = RT305X_GPIO_JTAG_TDO,
+ .gpio_last = RT305X_GPIO_JTAG_TDI,
+ }, {
+ .name = "mdio",
+ .mask = RT305X_GPIO_MODE_MDIO,
+ .gpio_first = RT305X_GPIO_MDIO_MDC,
+ .gpio_last = RT305X_GPIO_MDIO_MDIO,
+ }, {
+ .name = "sdram",
+ .mask = RT305X_GPIO_MODE_SDRAM,
+ .gpio_first = RT305X_GPIO_SDRAM_MD16,
+ .gpio_last = RT305X_GPIO_SDRAM_MD31,
+ }, {
+ .name = "rgmii",
+ .mask = RT305X_GPIO_MODE_RGMII,
+ .gpio_first = RT305X_GPIO_GE0_TXD0,
+ .gpio_last = RT305X_GPIO_GE0_RXCLK,
+ }, {0}
+};
+
+struct ralink_pinmux_grp uart_mux[] = {
+ {
+ .name = "uartf",
+ .mask = RT305X_GPIO_MODE_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "pcm uartf",
+ .mask = RT305X_GPIO_MODE_PCM_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "pcm i2s",
+ .mask = RT305X_GPIO_MODE_PCM_I2S,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "i2s uartf",
+ .mask = RT305X_GPIO_MODE_I2S_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "pcm gpio",
+ .mask = RT305X_GPIO_MODE_PCM_GPIO,
+ .gpio_first = RT305X_GPIO_10,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "gpio uartf",
+ .mask = RT305X_GPIO_MODE_GPIO_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "gpio i2s",
+ .mask = RT305X_GPIO_MODE_GPIO_I2S,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "gpio",
+ .mask = RT305X_GPIO_MODE_GPIO,
+ }, {0}
+};
+
+void rt305x_wdt_reset(void)
+{
+ u32 t;
+
+ /* enable WDT reset output on pin SRAM_CS_N */
+ t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+ t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
+ RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
+ rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
+}
+
+struct ralink_pinmux gpio_pinmux = {
+ .mode = mode_mux,
+ .uart = uart_mux,
+ .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+ .wdt_reset = rt305x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+ u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+
+ if (soc_is_rt305x() || soc_is_rt3350()) {
+ t = (t >> RT305X_SYSCFG_CPUCLK_SHIFT) &
+ RT305X_SYSCFG_CPUCLK_MASK;
+ switch (t) {
+ case RT305X_SYSCFG_CPUCLK_LOW:
+ cpu_rate = 320000000;
+ break;
+ case RT305X_SYSCFG_CPUCLK_HIGH:
+ cpu_rate = 384000000;
+ break;
+ }
+ sys_rate = uart_rate = wdt_rate = cpu_rate / 3;
+ } else if (soc_is_rt3352()) {
+ t = (t >> RT3352_SYSCFG0_CPUCLK_SHIFT) &
+ RT3352_SYSCFG0_CPUCLK_MASK;
+ switch (t) {
+ case RT3352_SYSCFG0_CPUCLK_LOW:
+ cpu_rate = 384000000;
+ break;
+ case RT3352_SYSCFG0_CPUCLK_HIGH:
+ cpu_rate = 400000000;
+ break;
+ }
+ sys_rate = wdt_rate = cpu_rate / 3;
+ uart_rate = 40000000;
+ } else if (soc_is_rt5350()) {
+ t = (t >> RT5350_SYSCFG0_CPUCLK_SHIFT) &
+ RT5350_SYSCFG0_CPUCLK_MASK;
+ switch (t) {
+ case RT5350_SYSCFG0_CPUCLK_360:
+ cpu_rate = 360000000;
+ sys_rate = cpu_rate / 3;
+ break;
+ case RT5350_SYSCFG0_CPUCLK_320:
+ cpu_rate = 320000000;
+ sys_rate = cpu_rate / 4;
+ break;
+ case RT5350_SYSCFG0_CPUCLK_300:
+ cpu_rate = 300000000;
+ sys_rate = cpu_rate / 3;
+ break;
+ default:
+ BUG();
+ }
+ uart_rate = 40000000;
+ wdt_rate = sys_rate;
+ } else {
+ BUG();
+ }
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("10000b00.spi", sys_rate);
+ ralink_clk_add("10000100.timer", wdt_rate);
+ ralink_clk_add("10000500.uart", uart_rate);
+ ralink_clk_add("10000c00.uartlite", uart_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,rt3050-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,rt3050-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+ unsigned char *name;
+ u32 n0;
+ u32 n1;
+ u32 id;
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+ if (n0 == RT3052_CHIP_NAME0 && n1 == RT3052_CHIP_NAME1) {
+ unsigned long icache_sets;
+
+ icache_sets = (read_c0_config1() >> 22) & 7;
+ if (icache_sets == 1) {
+ rt305x_soc = RT305X_SOC_RT3050;
+ name = "RT3050";
+ soc_info->compatible = "ralink,rt3050-soc";
+ } else {
+ rt305x_soc = RT305X_SOC_RT3052;
+ name = "RT3052";
+ soc_info->compatible = "ralink,rt3052-soc";
+ }
+ } else if (n0 == RT3350_CHIP_NAME0 && n1 == RT3350_CHIP_NAME1) {
+ rt305x_soc = RT305X_SOC_RT3350;
+ name = "RT3350";
+ soc_info->compatible = "ralink,rt3350-soc";
+ } else if (n0 == RT3352_CHIP_NAME0 && n1 == RT3352_CHIP_NAME1) {
+ rt305x_soc = RT305X_SOC_RT3352;
+ name = "RT3352";
+ soc_info->compatible = "ralink,rt3352-soc";
+ } else if (n0 == RT5350_CHIP_NAME0 && n1 == RT5350_CHIP_NAME1) {
+ rt305x_soc = RT305X_SOC_RT5350;
+ name = "RT5350";
+ soc_info->compatible = "ralink,rt5350-soc";
+ } else {
+ panic("rt305x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+ }
+
+ id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s id:%u rev:%u",
+ name,
+ (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+ (id & CHIP_ID_REV_MASK));
+}
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 716e9a12f0e7..3af00b2a26ee 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -215,9 +215,9 @@ static struct resource rb532_wdt_res[] = {
};
static struct platform_device rb532_wdt = {
- .name = "rc32434_wdt",
- .id = -1,
- .resource = rb532_wdt_res,
+ .name = "rc32434_wdt",
+ .id = -1,
+ .resource = rb532_wdt_res,
.num_resources = ARRAY_SIZE(rb532_wdt_res),
};
@@ -235,8 +235,8 @@ static struct plat_serial8250_port rb532_uart_res[] = {
};
static struct platform_device rb532_uart = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
.dev.platform_data = &rb532_uart_res,
};
@@ -273,7 +273,7 @@ static void __init parse_mac_addr(char *macstr)
/* NAND definitions */
-#define NAND_CHIP_DELAY 25
+#define NAND_CHIP_DELAY 25
static void __init rb532_nand_setup(void)
{
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 6ec41df3cb99..a18007613c30 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -44,10 +44,10 @@ struct rb532_gpio_chip {
static struct resource rb532_gpio_reg0_res[] = {
{
- .name = "gpio_reg0",
- .start = REGBASE + GPIOBASE,
- .end = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
- .flags = IORESOURCE_MEM,
+ .name = "gpio_reg0",
+ .start = REGBASE + GPIOBASE,
+ .end = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
+ .flags = IORESOURCE_MEM,
}
};
diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c
index f298430cff07..3a431e802bbc 100644
--- a/arch/mips/rb532/irq.c
+++ b/arch/mips/rb532/irq.c
@@ -21,7 +21,7 @@
*
* Copyright 2002 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
- * stevel@mvista.com or source@mvista.com
+ * stevel@mvista.com or source@mvista.com
*/
#include <linux/bitops.h>
@@ -51,7 +51,7 @@ struct intr_group {
volatile u32 *base_addr;
};
-#define RC32434_NR_IRQS (GROUP4_IRQ_BASE + 32)
+#define RC32434_NR_IRQS (GROUP4_IRQ_BASE + 32)
#if (NR_IRQS < RC32434_NR_IRQS)
#error Too little irqs defined. Did you override <asm/irq.h> ?
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index 4a6057b35b9d..a0a79222ce0b 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -2,7 +2,7 @@
* Basic EISA bus support for the SGI Indigo-2.
*
* (C) 2002 Pascal Dameme <netinet@freesurf.fr>
- * and Marc Zyngier <mzyngier@freesurf.fr>
+ * and Marc Zyngier <mzyngier@freesurf.fr>
*
* This code is released under both the GPL version 2 and BSD
* licenses. Either license may be used.
@@ -40,13 +40,13 @@
/* I2 has four EISA slots. */
#define IP22_EISA_MAX_SLOTS 4
-#define EISA_MAX_IRQ 16
+#define EISA_MAX_IRQ 16
-#define EIU_MODE_REG 0x0001ffc0
-#define EIU_STAT_REG 0x0001ffc4
-#define EIU_PREMPT_REG 0x0001ffc8
-#define EIU_QUIET_REG 0x0001ffcc
-#define EIU_INTRPT_ACK 0x00010004
+#define EIU_MODE_REG 0x0001ffc0
+#define EIU_STAT_REG 0x0001ffc4
+#define EIU_PREMPT_REG 0x0001ffc8
+#define EIU_QUIET_REG 0x0001ffcc
+#define EIU_INTRPT_ACK 0x00010004
static char __init *decode_eisa_sig(unsigned long addr)
{
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index f5ebc092aed5..ab0e379dc7e0 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -15,7 +15,7 @@ static struct bus_type gio_bus_type;
static struct {
const char *name;
- __u8 id;
+ __u8 id;
} gio_name_table[] = {
{ .name = "SGI Impact", .id = 0x10 },
{ .name = "Phobos G160", .id = 0x35 },
@@ -376,15 +376,15 @@ static void ip22_check_gio(int slotno, unsigned long addr)
}
static struct bus_type gio_bus_type = {
- .name = "gio",
+ .name = "gio",
.dev_attrs = gio_dev_attrs,
- .match = gio_bus_match,
- .probe = gio_device_probe,
- .remove = gio_device_remove,
+ .match = gio_bus_match,
+ .probe = gio_device_probe,
+ .remove = gio_device_remove,
.suspend = gio_device_suspend,
- .resume = gio_device_resume,
+ .resume = gio_device_resume,
.shutdown = gio_device_shutdown,
- .uevent = gio_device_uevent,
+ .uevent = gio_device_uevent,
};
static struct resource gio_bus_resource = {
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index 3f2b7633f946..3db64d51798d 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -1,12 +1,12 @@
/*
* ip22-int.c: Routines for generic manipulation of the INT[23] ASIC
- * found on INDY and Indigo2 workstations.
+ * found on INDY and Indigo2 workstations.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
- * - Indigo2 changes
- * - Interrupt handling fixes
+ * - Indigo2 changes
+ * - Interrupt handling fixes
* Copyright (C) 2001, 2003 Ladislav Michl (ladis@linux-mips.org)
*/
#include <linux/types.h>
@@ -195,24 +195,24 @@ extern void indy_8254timer_irq(void);
* at all) like:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 Local IRQ level zero
- * 3 Local IRQ level one
- * 4 8254 Timer zero
- * 5 8254 Timer one
- * 6 Bus Error
- * 7 R4k timer (what we use)
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Local IRQ level zero
+ * 3 Local IRQ level one
+ * 4 8254 Timer zero
+ * 5 8254 Timer one
+ * 6 Bus Error
+ * 7 R4k timer (what we use)
*
* We handle the IRQ according to _our_ priority which is:
*
- * Highest ---- R4k Timer
- * Local IRQ zero
- * Local IRQ one
- * Bus Error
- * 8254 Timer zero
- * Lowest ---- 8254 Timer one
+ * Highest ---- R4k Timer
+ * Local IRQ zero
+ * Local IRQ one
+ * Bus Error
+ * 8254 Timer zero
+ * Lowest ---- 8254 Timer one
*
* then we just return, if multiple IRQs are pending then we will just take
* another exception, big deal.
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index 75ada8a9713b..7cec0a4e527d 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -121,22 +121,22 @@ void __init sgimc_init(void)
*/
/* Step 0: Make sure we turn off the watchdog in case it's
- * still running (which might be the case after a
- * soft reboot).
+ * still running (which might be the case after a
+ * soft reboot).
*/
tmp = sgimc->cpuctrl0;
tmp &= ~SGIMC_CCTRL0_WDOG;
sgimc->cpuctrl0 = tmp;
/* Step 1: The CPU/GIO error status registers will not latch
- * up a new error status until the register has been
- * cleared by the cpu. These status registers are
- * cleared by writing any value to them.
+ * up a new error status until the register has been
+ * cleared by the cpu. These status registers are
+ * cleared by writing any value to them.
*/
sgimc->cstat = sgimc->gstat = 0;
/* Step 2: Enable all parity checking in cpu control register
- * zero.
+ * zero.
*/
/* don't touch parity settings for IP28 */
tmp = sgimc->cpuctrl0;
@@ -147,7 +147,7 @@ void __init sgimc_init(void)
sgimc->cpuctrl0 = tmp;
/* Step 3: Setup the MC write buffer depth, this is controlled
- * in cpu control register 1 in the lower 4 bits.
+ * in cpu control register 1 in the lower 4 bits.
*/
tmp = sgimc->cpuctrl1;
tmp &= ~0xf;
@@ -155,26 +155,26 @@ void __init sgimc_init(void)
sgimc->cpuctrl1 = tmp;
/* Step 4: Initialize the RPSS divider register to run as fast
- * as it can correctly operate. The register is laid
- * out as follows:
+ * as it can correctly operate. The register is laid
+ * out as follows:
*
- * ----------------------------------------
- * | RESERVED | INCREMENT | DIVIDER |
- * ----------------------------------------
- * 31 16 15 8 7 0
+ * ----------------------------------------
+ * | RESERVED | INCREMENT | DIVIDER |
+ * ----------------------------------------
+ * 31 16 15 8 7 0
*
- * DIVIDER determines how often a 'tick' happens,
- * INCREMENT determines by how the RPSS increment
- * registers value increases at each 'tick'. Thus,
- * for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
+ * DIVIDER determines how often a 'tick' happens,
+ * INCREMENT determines by how the RPSS increment
+ * registers value increases at each 'tick'. Thus,
+ * for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
*/
sgimc->divider = 0x101;
/* Step 5: Initialize GIO64 arbitrator configuration register.
*
* NOTE: HPC init code in sgihpc_init() must run before us because
- * we need to know Guiness vs. FullHouse and the board
- * revision on this machine. You have been warned.
+ * we need to know Guiness vs. FullHouse and the board
+ * revision on this machine. You have been warned.
*/
/* First the basic invariants across all GIO64 implementations. */
@@ -187,18 +187,18 @@ void __init sgimc_init(void)
if (SGIOC_SYSID_BOARDREV(sgioc->sysid) < 2) {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC at 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp0 pipelines */
- tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
+ tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
tmp |= SGIMC_GIOPAR_RTIMEEXP0; /* exp0 is realtime */
} else {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp[01] pipelined */
tmp |= SGIMC_GIOPAR_PLINEEXP1;
- tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
+ tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
}
} else {
/* Guiness specific settings. */
tmp |= SGIMC_GIOPAR_EISA64; /* MC talks to EISA at 64bits */
- tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
+ tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
}
sgimc->giopar = tmp; /* poof */
diff --git a/arch/mips/sgi-ip22/ip22-nvram.c b/arch/mips/sgi-ip22/ip22-nvram.c
index 0177566475d4..e077036a676a 100644
--- a/arch/mips/sgi-ip22/ip22-nvram.c
+++ b/arch/mips/sgi-ip22/ip22-nvram.c
@@ -14,11 +14,11 @@
#define EEPROM_WRITE 0xa000 /* serial memory write */
#define EEPROM_WRALL 0x8800 /* write all registers */
#define EEPROM_WDS 0x8000 /* disable all programming */
-#define EEPROM_PRREAD 0xc000 /* read protect register */
-#define EEPROM_PREN 0x9800 /* enable protect register mode */
-#define EEPROM_PRCLEAR 0xffff /* clear protect register */
-#define EEPROM_PRWRITE 0xa000 /* write protect register */
-#define EEPROM_PRDS 0x8000 /* disable protect register, forever */
+#define EEPROM_PRREAD 0xc000 /* read protect register */
+#define EEPROM_PREN 0x9800 /* enable protect register mode */
+#define EEPROM_PRCLEAR 0xffff /* clear protect register */
+#define EEPROM_PRWRITE 0xa000 /* write protect register */
+#define EEPROM_PRDS 0x8000 /* disable protect register, forever */
#define EEPROM_EPROT 0x01 /* Protect register enable */
#define EEPROM_CSEL 0x02 /* Chip select */
@@ -27,7 +27,7 @@
#define EEPROM_DATI 0x10 /* Data in */
/* We need to use these functions early... */
-#define delay() ({ \
+#define delay() ({ \
int x; \
for (x=0; x<100000; x++) __asm__ __volatile__(""); })
@@ -35,7 +35,7 @@
__raw_writel(__raw_readl(ptr) & ~EEPROM_DATO, ptr); \
__raw_writel(__raw_readl(ptr) & ~EEPROM_ECLK, ptr); \
__raw_writel(__raw_readl(ptr) & ~EEPROM_EPROT, ptr); \
- delay(); \
+ delay(); \
__raw_writel(__raw_readl(ptr) | EEPROM_CSEL, ptr); \
__raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
@@ -46,7 +46,7 @@
__raw_writel(__raw_readl(ptr) | EEPROM_EPROT, ptr); \
__raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
-#define BITS_IN_COMMAND 11
+#define BITS_IN_COMMAND 11
/*
* clock in the nvram command and the register number. For the
* national semiconductor nv ram chip the op code is 3 bits and
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index 698904daf901..a14fd32b76bd 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -137,7 +137,7 @@ static int __init sgiseeq_devinit(void)
eth0_pd.hpc = hpc3c0;
eth0_pd.irq = SGI_ENET_IRQ;
-#define EADDR_NVOFS 250
+#define EADDR_NVOFS 250
for (i = 0; i < 3; i++) {
unsigned short tmp = ip22_nvram_read(EADDR_NVOFS / 2 + i);
@@ -155,17 +155,17 @@ static int __init sgiseeq_devinit(void)
return 0;
sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
- SGIMC_GIOPAR_HPC264;
+ SGIMC_GIOPAR_HPC264;
hpc3c1->pbus_piocfg[0][0] = 0x3ffff;
/* interrupt/config register on Challenge S Mezz board */
hpc3c1->pbus_extregs[0][0] = 0x30;
eth1_pd.hpc = hpc3c1;
eth1_pd.irq = SGI_GIO_0_IRQ;
-#define EADDR_NVOFS 250
+#define EADDR_NVOFS 250
for (i = 0; i < 3; i++) {
unsigned short tmp = ip22_eeprom_read(&hpc3c1->eeprom,
- EADDR_NVOFS / 2 + i);
+ EADDR_NVOFS / 2 + i);
eth1_pd.mac[2 * i] = tmp >> 8;
eth1_pd.mac[2 * i + 1] = tmp & 0xff;
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 20363d29cb58..063c2dd31e72 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -101,7 +101,7 @@ static void debounce(unsigned long data)
del_timer(&debounce_timer);
if (sgint->istat1 & SGINT_ISTAT1_PWR) {
/* Interrupt still being sent. */
- debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
+ debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
add_timer(&debounce_timer);
sgioc->panel = SGIOC_PANEL_POWERON | SGIOC_PANEL_POWERINTR |
@@ -166,7 +166,7 @@ static irqreturn_t panel_int(int irq, void *dev_id)
}
static int panic_event(struct notifier_block *this, unsigned long event,
- void *ptr)
+ void *ptr)
{
if (machine_state & MACHINE_PANICED)
return NOTIFY_DONE;
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 0626555fd1a3..3f47346608d7 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -136,14 +136,14 @@ static void save_and_clear_buserr(void)
hpc3.scsi[1].cbp = hpc3c0->scsi_chan1.cbptr;
hpc3.scsi[1].ndptr = hpc3c0->scsi_chan1.ndptr;
- hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
- hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
- hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr;
+ hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
+ hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
+ hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr;
hpc3.ethrx.ndptr = hpc3c0->ethregs.rx_ndptr;
- hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
- hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
- hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr;
+ hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
+ hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
+ hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr;
hpc3.ethtx.ndptr = hpc3c0->ethregs.tx_ndptr;
for (i = 0; i < 8; ++i) {
@@ -196,11 +196,11 @@ static void print_cache_tags(void)
scb | (1 << 12)*i);
}
i = read_c0_config();
- scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */
+ scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */
scw = ((i >> 16) & 7) + 19 - 1; /* scwaysize = 2^[24..19] / 2 */
i = ((1 << scw) - 1) & ~((1 << scb) - 1);
- printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n",
+ printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n",
cache_tags.tags[0][0].hi, cache_tags.tags[0][0].lo,
cache_tags.tags[0][1].hi, cache_tags.tags[0][1].lo,
scw-1, scb, i & (unsigned)cache_tags.err_addr);
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index 04cebadc2b3c..692778da9e76 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -39,7 +39,7 @@ static void dump_hub_information(unsigned long errst0, unsigned long errst1)
printk("Hub has valid error information:\n");
if (errst0 & PI_ERR_ST0_OVERRUN_MASK)
- printk("Overrun is set. Error stack may contain additional "
+ printk("Overrun is set. Error stack may contain additional "
"information.\n");
printk("Hub error address is %08lx\n",
(errst0 & PI_ERR_ST0_ADDR_MASK) >> (PI_ERR_ST0_ADDR_SHFT - 3));
@@ -85,7 +85,7 @@ void __init ip27_be_init(void)
board_be_handler = ip27_be_handler;
LOCAL_HUB_S(PI_ERR_INT_PEND,
- cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
+ cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
index 984e561f0f7a..b952d5b1af86 100644
--- a/arch/mips/sgi-ip27/ip27-console.c
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -31,7 +31,7 @@ static inline struct ioc3_uartregs *console_uart(void)
return &ioc3->sregs.uarta;
}
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
struct ioc3_uartregs *uart = console_uart();
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index cd0d5b06cd83..328ceb3c86ec 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -17,11 +17,11 @@
static int force_fire_and_forget = 1;
/**
- * hub_pio_map - establish a HUB PIO mapping
+ * hub_pio_map - establish a HUB PIO mapping
*
* @hub: hub to perform PIO mapping on
* @widget: widget ID to perform PIO mapping for
- * @xtalk_addr: xtalk_address that needs to be mapped
+ * @xtalk_addr: xtalk_address that needs to be mapped
* @size: size of the PIO mapping
*
**/
@@ -78,8 +78,8 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
/*
* hub_setup_prb(nasid, prbnum, credits, conveyor)
*
- * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
- * put it into conveyor belt mode with the specified number of credits.
+ * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
+ * put it into conveyor belt mode with the specified number of credits.
*/
static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
{
@@ -125,12 +125,12 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
* so we turn off access to all widgets for the duration of the function.
*
* XXX - This code should really check what kind of widget we're talking
- * to. Bridges can only handle three requests, but XG will do more.
+ * to. Bridges can only handle three requests, but XG will do more.
* How many can crossbow handle to widget 0? We're assuming 1.
*
* XXX - There is a bug in the crossbow that link reset PIOs do not
* return write responses. The easiest solution to this problem is to
- * leave widget 0 (xbow) in fire-and-forget mode at all times. This
+ * leave widget 0 (xbow) in fire-and-forget mode at all times. This
* only affects pio's to xbow registers, which should be rare.
**/
static void hub_set_piomode(nasid_t nasid)
@@ -167,7 +167,7 @@ static void hub_set_piomode(nasid_t nasid)
}
/*
- * hub_pio_init - PIO-related hub initialization
+ * hub_pio_init - PIO-related hub initialization
*
* @hub: hubinfo structure for our hub
*/
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 923c080f77bd..d41b1c6fb032 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -151,7 +151,7 @@ nasid_t
get_nasid(void)
{
return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
- >> NSRI_NODEID_SHFT);
+ >> NSRI_NODEID_SHFT);
}
/*
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 69a939ae65e4..2315cfeb2687 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -62,7 +62,7 @@ extern int irq_to_slot[];
* from the irq value
*/
#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
-#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
+#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
static inline int alloc_level(int cpu, int irq)
{
@@ -281,11 +281,11 @@ static unsigned int startup_bridge_irq(struct irq_data *d)
device |= (pin << (pin*3));
bridge->b_int_device = device;
- bridge->b_wid_tflush;
+ bridge->b_wid_tflush;
intr_connect_level(cpu, swlevel);
- return 0; /* Never anything pending. */
+ return 0; /* Never anything pending. */
}
/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index cd8fcab6b054..3505d08ff2fd 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -31,8 +31,8 @@
#include <asm/sn/sn_private.h>
-#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
-#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
+#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
+#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
struct node_data *__node_data[MAX_COMPACT_NODES];
@@ -43,7 +43,7 @@ static int fine_mode;
static int is_fine_dirmode(void)
{
return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
- >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
+ >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
}
static hubreg_t get_region(cnodeid_t cnode)
@@ -66,7 +66,7 @@ static void gen_region_mask(hubreg_t *region_mask)
}
}
-#define rou_rflag rou_flags
+#define rou_rflag rou_flags
static int router_distance;
@@ -412,7 +412,7 @@ static void __init node_mem_init(cnodeid_t node)
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
sizeof(struct hub_data));
- bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
+ bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
start_pfn, end_pfn);
free_bootmem_with_active_regions(node, end_pfn);
reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
@@ -422,7 +422,7 @@ static void __init node_mem_init(cnodeid_t node)
}
/*
- * A node with nothing. We use it to avoid any special casing in
+ * A node with nothing. We use it to avoid any special casing in
* cpumask_of_node
*/
static struct node_data null_node = {
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index 005c29ed419a..a2358b44420c 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -54,7 +54,7 @@ void install_cpu_nmi_handler(int slice)
void nmi_cpu_eframe_save(nasid_t nasid, int slice)
{
struct reg_struct *nr;
- int i;
+ int i;
/* Get the pointer to the current cpu's register set. */
nr = (struct reg_struct *)
@@ -86,12 +86,12 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice)
printk("%s\n", print_tainted());
printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc);
printk("ra : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]);
- printk("Status: %08lx ", nr->sr);
+ printk("Status: %08lx ", nr->sr);
if (nr->sr & ST0_KX)
printk("KX ");
if (nr->sr & ST0_SX)
- printk("SX ");
+ printk("SX ");
if (nr->sr & ST0_UX)
printk("UX ");
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
index f347bc6b7954..ac37e54b3d5e 100644
--- a/arch/mips/sgi-ip27/ip27-reset.c
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -29,7 +29,7 @@ void machine_restart(char *command) __attribute__((noreturn));
void machine_halt(void) __attribute__((noreturn));
void machine_power_off(void) __attribute__((noreturn));
-#define noreturn while(1); /* Silence gcc. */
+#define noreturn while(1); /* Silence gcc. */
/* XXX How to pass the reboot command to the firmware??? */
static void ip27_machine_restart(char *command)
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 735b43bf8f82..f94638141b20 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -191,7 +191,7 @@ static void __init ip27_cpus_done(void)
}
/*
- * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
+ * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
* set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work.
*/
@@ -219,7 +219,7 @@ static void __init ip27_smp_setup(void)
/*
* Assumption to be fixed: we're always booted on logical / physical
- * processor 0. While we're always running on logical processor 0
+ * processor 0. While we're always running on logical processor 0
* this still means this is physical processor zero; it might for
* example be disabled in the firmware.
*/
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 13cfeab50528..fff58ac176f3 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -117,8 +117,8 @@ void __cpuinit hub_rt_clock_event_init(void)
cd->name = name;
cd->features = CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, CYCLES_PER_SEC);
- cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
- cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
@@ -153,7 +153,7 @@ static cycle_t hub_rt_read(struct clocksource *cs)
struct clocksource hub_rt_clocksource = {
.name = "HUB-RT",
- .rating = 200,
+ .rating = 200,
.read = hub_rt_read,
.mask = CLOCKSOURCE_MASK(52),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 5e871e75a8d9..a4df7d0f6f12 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -17,15 +17,15 @@
#include <asm/xtalk/xtalk.h>
-#define XBOW_WIDGET_PART_NUM 0x0
-#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
-#define BASE_XBOW_PORT 8 /* Lowest external port */
+#define XBOW_WIDGET_PART_NUM 0x0
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
+#define BASE_XBOW_PORT 8 /* Lowest external port */
extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
{
- widgetreg_t widget_id;
+ widgetreg_t widget_id;
xwidget_part_num_t partnum;
widget_id = *(volatile widgetreg_t *)
@@ -102,10 +102,10 @@ static int __cpuinit xbow_probe(nasid_t nasid)
void __cpuinit xtalk_probe_node(cnodeid_t nid)
{
- volatile u64 hubreg;
- nasid_t nasid;
+ volatile u64 hubreg;
+ nasid_t nasid;
xwidget_part_num_t partnum;
- widgetreg_t widget_id;
+ widgetreg_t widget_id;
nasid = COMPACT_TO_NASID_NODEID(nid);
hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
@@ -115,7 +115,7 @@ void __cpuinit xtalk_probe_node(cnodeid_t nid)
return;
widget_id = *(volatile widgetreg_t *)
- (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
+ (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index e7d5054de8c8..e0c7d9e142fa 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -173,7 +173,7 @@ static struct irq_chip crime_edge_interrupt = {
/*
* This is for MACE PCI interrupts. We can decrease bus traffic by masking
- * as close to the source as possible. This also means we can take the
+ * as close to the source as possible. This also means we can take the
* next chunk of the CRIME register in one piece.
*/
@@ -271,11 +271,11 @@ static void disable_maceisa_irq(struct irq_data *d)
unsigned int crime_int = 0;
maceisa_mask &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
- if (!(maceisa_mask & MACEISA_AUDIO_INT))
+ if (!(maceisa_mask & MACEISA_AUDIO_INT))
crime_int |= MACE_AUDIO_INT;
- if (!(maceisa_mask & MACEISA_MISC_INT))
+ if (!(maceisa_mask & MACEISA_MISC_INT))
crime_int |= MACE_MISC_INT;
- if (!(maceisa_mask & MACEISA_SUPERIO_INT))
+ if (!(maceisa_mask & MACEISA_SUPERIO_INT))
crime_int |= MACE_SUPERIO_INT;
crime_mask &= ~crime_int;
crime->imask = crime_mask;
diff --git a/arch/mips/sibyte/Platform b/arch/mips/sibyte/Platform
index 911dfe39c631..d03a07516f83 100644
--- a/arch/mips/sibyte/Platform
+++ b/arch/mips/sibyte/Platform
@@ -9,7 +9,7 @@ platform-$(CONFIG_SIBYTE_BCM1x80) += sibyte/
#
# Sibyte SB1250 / BCM1480 family of SOCs
#
-cflags-$(CONFIG_SIBYTE_BCM112X) += \
+cflags-$(CONFIG_SIBYTE_BCM112X) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
@@ -18,11 +18,11 @@ cflags-$(CONFIG_SIBYTE_SB1250) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
-cflags-$(CONFIG_SIBYTE_BCM1x55) += \
+cflags-$(CONFIG_SIBYTE_BCM1x55) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
-cflags-$(CONFIG_SIBYTE_BCM1x80) += \
+cflags-$(CONFIG_SIBYTE_BCM1x80) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 215713e1f3c4..09d6e16a70f1 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -283,10 +283,10 @@ void __init arch_init_irq(void)
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
(K_BCM1480_INT_MBOX_0_0 << 3)));
- }
+ }
- /* Clear the mailboxes. The firmware may leave them dirty */
+ /* Clear the mailboxes. The firmware may leave them dirty */
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
@@ -307,7 +307,7 @@ void __init arch_init_irq(void)
/*
* Note that the timer interrupts are also mapped, but this is
- * done in bcm1480_time_init(). Also, the profiling driver
+ * done in bcm1480_time_init(). Also, the profiling driver
* does its own management of IP7.
*/
@@ -325,7 +325,7 @@ static inline void dispatch_ip2(void)
/*
* Default...we've hit an IP[2] interrupt, which means we've got to
- * check the 1480 interrupt registers to figure out what to do. Need
+ * check the 1480 interrupt registers to figure out what to do. Need
* to detect which CPU we're on, now that smp_affinity is supported.
*/
base = A_BCM1480_IMR_MAPPER(cpu);
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 6343011e9902..588e1806a1a3 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -127,8 +127,8 @@ static __init void prom_meminit(void)
if ((initrd_pstart > addr) &&
(initrd_pstart < (addr + size))) {
add_memory_region(addr,
- initrd_pstart - addr,
- BOOT_MEM_RAM);
+ initrd_pstart - addr,
+ BOOT_MEM_RAM);
rd_flag = 1;
}
if ((initrd_pend > addr) &&
@@ -195,7 +195,7 @@ static int __init initrd_setup(char *str)
/*
*Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>"
- * e.g. initrd=3abfd@80010000. This is set up by the loader.
+ * e.g. initrd=3abfd@80010000. This is set up by the loader.
*/
for (tmp = str; *tmp != '@'; tmp++) {
if (!*tmp) {
@@ -244,7 +244,7 @@ void __init prom_init(void)
int *prom_vec = (int *) fw_arg3;
_machine_restart = cfe_linux_restart;
- _machine_halt = cfe_linux_halt;
+ _machine_halt = cfe_linux_halt;
pm_power_off = cfe_linux_halt;
/*
@@ -299,7 +299,7 @@ void __init prom_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
{
char *ptr;
- /* Need to find out early whether we've got an initrd. So scan
+ /* Need to find out early whether we've got an initrd. So scan
the list looking now */
for (ptr = arcs_cmdline; *ptr; ptr++) {
while (*ptr == ' ') {
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index e8c4538c5f61..2188b39a1251 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -152,7 +152,7 @@ static u64 tb_period;
static void arm_tb(void)
{
- u64 scdperfcnt;
+ u64 scdperfcnt;
u64 next = (1ULL << 40) - tb_period;
u64 tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
@@ -257,8 +257,8 @@ static irqreturn_t sbprof_pc_intr(int irq, void *dev_id)
/*
* Requires: Already called zclk_timer_init with a value that won't
- * saturate 40 bits. No subsequent use of SCD performance counters
- * or trace buffer.
+ * saturate 40 bits. No subsequent use of SCD performance counters
+ * or trace buffer.
*/
static int sbprof_zbprof_start(struct file *filp)
@@ -288,8 +288,8 @@ static int sbprof_zbprof_start(struct file *filp)
/*
* We grab this interrupt to prevent others from trying to use
- * it, even though we don't want to service the interrupts
- * (they only feed into the trace-on-interrupt mechanism)
+ * it, even though we don't want to service the interrupts
+ * (they only feed into the trace-on-interrupt mechanism)
*/
if (request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
free_irq(K_INT_TRACE_FREEZE, &sbp);
@@ -298,7 +298,7 @@ static int sbprof_zbprof_start(struct file *filp)
/*
* I need the core to mask these, but the interrupt mapper to
- * pass them through. I am exploiting my knowledge that
+ * pass them through. I am exploiting my knowledge that
* cp0_status masks out IP[5]. krw
*/
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
@@ -328,7 +328,7 @@ static int sbprof_zbprof_start(struct file *filp)
__raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
/* Initialize Trace Event 0-7 */
- /* when interrupt */
+ /* when interrupt */
__raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
@@ -479,7 +479,7 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf,
return err;
}
pr_debug(DEVNAME ": read from sample %d, %d bytes\n",
- cur_sample, cur_count);
+ cur_sample, cur_count);
size -= cur_count;
sample_left -= cur_count;
if (!sample_left) {
@@ -540,7 +540,7 @@ static const struct file_operations sbprof_tb_fops = {
.open = sbprof_tb_open,
.release = sbprof_tb_release,
.read = sbprof_tb_read,
- .unlocked_ioctl = sbprof_tb_ioctl,
+ .unlocked_ioctl = sbprof_tb_ioctl,
.compat_ioctl = sbprof_tb_ioctl,
.mmap = NULL,
.llseek = default_llseek,
diff --git a/arch/mips/sibyte/sb1250/bus_watcher.c b/arch/mips/sibyte/sb1250/bus_watcher.c
index 86e6e54dd15d..e651105b3f0b 100644
--- a/arch/mips/sibyte/sb1250/bus_watcher.c
+++ b/arch/mips/sibyte/sb1250/bus_watcher.c
@@ -71,7 +71,7 @@ static void print_summary(uint32_t status, uint32_t l2_err,
* already been destructively read out of the registers.
*
* notes: this is currently used by the cache error handler
- * should provide locking against the interrupt handler
+ * should provide locking against the interrupt handler
*/
void check_bus_watcher(void)
{
@@ -119,7 +119,7 @@ static int bw_print_buffer(char *page, struct bw_stats_struct *stats)
(int)G_SCD_BERR_RID(stats->status),
(int)G_SCD_BERR_DCODE(stats->status));
/* XXXKW indicate multiple errors between printings, or stats
- collection (or both)? */
+ collection (or both)? */
if (stats->status & M_SCD_BERR_MULTERRS)
len += sprintf(page+len, "Multiple errors observed since last check.\n");
if (stats->status_printed) {
@@ -168,7 +168,7 @@ static void create_proc_decoder(struct bw_stats_struct *stats)
* sibyte_bw_int - handle bus watcher interrupts and accumulate counts
*
* notes: possible re-entry due to multiple sources
- * should check/indicate saturation
+ * should check/indicate saturation
*/
static irqreturn_t sibyte_bw_int(int irq, void *data)
{
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 340aaf626659..fca0cdb99509 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -264,7 +264,7 @@ void __init arch_init_irq(void)
IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
(K_INT_MBOX_0 << 3)));
- /* Clear the mailboxes. The firmware may leave them dirty */
+ /* Clear the mailboxes. The firmware may leave them dirty */
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
__raw_writeq(0xffffffffffffffffULL,
@@ -277,7 +277,7 @@ void __init arch_init_irq(void)
/*
* Note that the timer interrupts are also mapped, but this is
- * done in sb1250_time_init(). Also, the profiling driver
+ * done in sb1250_time_init(). Also, the profiling driver
* does its own management of IP7.
*/
@@ -294,7 +294,7 @@ static inline void dispatch_ip2(void)
/*
* Default...we've hit an IP[2] interrupt, which means we've got to
- * check the 1250 interrupt registers to figure out what to do. Need
+ * check the 1250 interrupt registers to figure out what to do. Need
* to detect which CPU we're on, now that smp_affinity is supported.
*/
mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
@@ -323,7 +323,7 @@ asmlinkage void plat_irq_dispatch(void)
if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & CAUSEF_IP4)
- do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
+ do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
#ifdef CONFIG_SMP
else if (pending & CAUSEF_IP3)
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 92da3155ce07..a14bd4cb0bc0 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -203,8 +203,8 @@ void __init sb1250_setup(void)
case K_SYS_REVISION_BCM1250_PASS1:
#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
- "and the kernel doesn't have the proper "
- "workarounds compiled in. @@@@\n");
+ "and the kernel doesn't have the proper "
+ "workarounds compiled in. @@@@\n");
bad_config = 1;
#endif
break;
@@ -213,28 +213,28 @@ void __init sb1250_setup(void)
#if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \
!defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS)
printk("@@@@ This is a BCM1250 A3-A10 board, and the "
- "kernel doesn't have the proper workarounds "
- "compiled in. @@@@\n");
+ "kernel doesn't have the proper workarounds "
+ "compiled in. @@@@\n");
bad_config = 1;
#endif
#ifdef CONFIG_CPU_HAS_PREFETCH
printk("@@@@ Prefetches may be enabled in this kernel, "
- "but are buggy on this board. @@@@\n");
+ "but are buggy on this board. @@@@\n");
bad_config = 1;
#endif
break;
case K_SYS_REVISION_BCM1250_PASS2_2:
#ifndef CONFIG_SB1_PASS_2_WORKAROUNDS
printk("@@@@ This is a BCM1250 B1/B2. board, and the "
- "kernel doesn't have the proper workarounds "
- "compiled in. @@@@\n");
+ "kernel doesn't have the proper workarounds "
+ "compiled in. @@@@\n");
bad_config = 1;
#endif
#if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \
!defined(CONFIG_CPU_HAS_PREFETCH)
printk("@@@@ This is a BCM1250 B1/B2, but the kernel is "
- "conservatively configured for an 'A' stepping. "
- "@@@@\n");
+ "conservatively configured for an 'A' stepping. "
+ "@@@@\n");
#endif
break;
default:
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index 097335262fb3..9480c14ec66a 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -13,7 +13,7 @@
#define DRV_NAME "pata-swarm"
-#define SWARM_IDE_SHIFT 5
+#define SWARM_IDE_SHIFT 5
#define SWARM_IDE_BASE 0x1f0
#define SWARM_IDE_CTRL 0x3f6
@@ -123,7 +123,7 @@ static int __init sb1250_device_init(void)
case K_SYS_SOC_TYPE_BCM1120:
case K_SYS_SOC_TYPE_BCM1125:
case K_SYS_SOC_TYPE_BCM1125H:
- case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
+ case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
ret = platform_add_devices(sb1250_devs, 2);
break;
case K_SYS_SOC_TYPE_BCM1x55:
diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c
index 4438b2195c44..178a824b28d4 100644
--- a/arch/mips/sibyte/swarm/rtc_xicor1241.c
+++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c
@@ -4,8 +4,8 @@
* Copyright (C) 2002 MontaVista Software Inc.
* Author: jsun@mvista.com or jsun@junsun.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -28,15 +28,15 @@
* Register bits
*/
-#define X1241REG_SR_BAT 0x80 /* currently on battery power */
+#define X1241REG_SR_BAT 0x80 /* currently on battery power */
#define X1241REG_SR_RWEL 0x04 /* r/w latch is enabled, can write RTC */
#define X1241REG_SR_WEL 0x02 /* r/w latch is unlocked, can enable r/w now */
#define X1241REG_SR_RTCF 0x01 /* clock failed */
#define X1241REG_BL_BP2 0x80 /* block protect 2 */
#define X1241REG_BL_BP1 0x40 /* block protect 1 */
#define X1241REG_BL_BP0 0x20 /* block protect 0 */
-#define X1241REG_BL_WD1 0x10
-#define X1241REG_BL_WD0 0x08
+#define X1241REG_BL_WD1 0x10
+#define X1241REG_BL_WD0 0x08
#define X1241REG_HR_MIL 0x80 /* military time format */
/*
@@ -61,50 +61,50 @@
static int xicor_read(uint8_t addr)
{
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
__raw_writeq((addr >> 8) & 0x7, SMB_CSR(R_SMB_CMD));
__raw_writeq(addr & 0xff, SMB_CSR(R_SMB_DATA));
__raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR2BYTE,
SMB_CSR(R_SMB_START));
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
__raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_RD1BYTE,
SMB_CSR(R_SMB_START));
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
- if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
- /* Clear error bit by writing a 1 */
- __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
- return -1;
- }
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ }
return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff);
}
static int xicor_write(uint8_t addr, int b)
{
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
__raw_writeq(addr, SMB_CSR(R_SMB_CMD));
__raw_writeq((addr & 0xff) | ((b & 0xff) << 8), SMB_CSR(R_SMB_DATA));
__raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR3BYTE,
SMB_CSR(R_SMB_START));
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
- if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
- /* Clear error bit by writing a 1 */
- __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
- return -1;
- } else {
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ } else {
return 0;
}
}
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index e05ad4da44d9..dd0ab982d77e 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -41,17 +41,17 @@ static struct platform_device a20r_serial8250_device = {
};
static struct resource a20r_ds1216_rsrc[] = {
- {
- .start = 0x1c081ffc,
- .end = 0x1c081fff,
- .flags = IORESOURCE_MEM
- }
+ {
+ .start = 0x1c081ffc,
+ .end = 0x1c081fff,
+ .flags = IORESOURCE_MEM
+ }
};
static struct platform_device a20r_ds1216_device = {
- .name = "rtc-ds1216",
- .num_resources = ARRAY_SIZE(a20r_ds1216_rsrc),
- .resource = a20r_ds1216_rsrc
+ .name = "rtc-ds1216",
+ .num_resources = ARRAY_SIZE(a20r_ds1216_rsrc),
+ .resource = a20r_ds1216_rsrc
};
static struct resource snirm_82596_rsrc[] = {
@@ -76,14 +76,14 @@ static struct resource snirm_82596_rsrc[] = {
.flags = IORESOURCE_IRQ
},
{
- .flags = 0x01 /* 16bit mpu port access */
+ .flags = 0x01 /* 16bit mpu port access */
}
};
static struct platform_device snirm_82596_pdev = {
- .name = "snirm_82596",
- .num_resources = ARRAY_SIZE(snirm_82596_rsrc),
- .resource = snirm_82596_rsrc
+ .name = "snirm_82596",
+ .num_resources = ARRAY_SIZE(snirm_82596_rsrc),
+ .resource = snirm_82596_rsrc
};
static struct resource snirm_53c710_rsrc[] = {
@@ -100,9 +100,9 @@ static struct resource snirm_53c710_rsrc[] = {
};
static struct platform_device snirm_53c710_pdev = {
- .name = "snirm_53c710",
- .num_resources = ARRAY_SIZE(snirm_53c710_rsrc),
- .resource = snirm_53c710_rsrc
+ .name = "snirm_53c710",
+ .num_resources = ARRAY_SIZE(snirm_53c710_rsrc),
+ .resource = snirm_53c710_rsrc
};
static struct resource sc26xx_rsrc[] = {
@@ -171,7 +171,7 @@ static u32 a20r_ack_hwint(void)
" addiu %1, -1 \n"
" sw $1, 0(%0) \n"
" sync \n"
- ".set pop \n"
+ ".set pop \n"
:
: "Jr" (PCIMT_UCONF), "Jr" (0xbc000000));
write_c0_status(status);
@@ -236,13 +236,13 @@ static int __init snirm_a20r_setup_devinit(void)
switch (sni_brd_type) {
case SNI_BRD_TOWER_OASIC:
case SNI_BRD_MINITOWER:
- platform_device_register(&snirm_82596_pdev);
- platform_device_register(&snirm_53c710_pdev);
- platform_device_register(&sc26xx_pdev);
- platform_device_register(&a20r_serial8250_device);
- platform_device_register(&a20r_ds1216_device);
+ platform_device_register(&snirm_82596_pdev);
+ platform_device_register(&snirm_53c710_pdev);
+ platform_device_register(&sc26xx_pdev);
+ platform_device_register(&a20r_serial8250_device);
+ platform_device_register(&a20r_ds1216_device);
sni_eisa_root_init();
- break;
+ break;
}
return 0;
}
diff --git a/arch/mips/sni/eisa.c b/arch/mips/sni/eisa.c
index 6827feb4de96..179b5d556ad2 100644
--- a/arch/mips/sni/eisa.c
+++ b/arch/mips/sni/eisa.c
@@ -22,7 +22,7 @@ static struct platform_device eisa_root_dev = {
};
static struct eisa_root_device eisa_bus_root = {
- .dev = &eisa_root_dev.dev,
+ .dev = &eisa_root_dev.dev,
.bus_base_addr = 0,
.res = &ioport_resource,
.slots = EISA_MAX_SLOTS,
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 5a4ec75382e2..ac61b90bcc66 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -58,25 +58,25 @@ void __init arch_init_irq(void)
case SNI_BRD_10NEW:
case SNI_BRD_TOWER_OASIC:
case SNI_BRD_MINITOWER:
- sni_a20r_irq_init();
- break;
+ sni_a20r_irq_init();
+ break;
case SNI_BRD_PCI_TOWER:
- sni_pcit_irq_init();
- break;
+ sni_pcit_irq_init();
+ break;
case SNI_BRD_PCI_TOWER_CPLUS:
- sni_pcit_cplus_irq_init();
- break;
+ sni_pcit_cplus_irq_init();
+ break;
case SNI_BRD_RM200:
- sni_rm200_irq_init();
- break;
+ sni_rm200_irq_init();
+ break;
case SNI_BRD_PCI_MTOWER:
case SNI_BRD_PCI_DESKTOP:
case SNI_BRD_PCI_MTOWER_CPLUS:
- sni_pcimt_irq_init();
- break;
+ sni_pcimt_irq_init();
+ break;
}
}
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c
index cdb1417fba59..cec4b8ca1438 100644
--- a/arch/mips/sni/pcimt.c
+++ b/arch/mips/sni/pcimt.c
@@ -60,7 +60,7 @@ static inline void sni_pcimt_detect(void)
p += sprintf(p, "%s PCI", (csmsr & 0x80) ? "RM200" : "RM300");
if ((csmsr & 0x80) == 0)
p += sprintf(p, ", board revision %s",
- (csmsr & 0x20) ? "D" : "C");
+ (csmsr & 0x20) ? "D" : "C");
asic = csmsr & 0x80;
asic = (csmsr & 0x08) ? asic : !asic;
p += sprintf(p, ", ASIC PCI Rev %s", asic ? "1.0" : "1.1");
@@ -91,22 +91,22 @@ static struct platform_device pcimt_serial8250_device = {
};
static struct resource pcimt_cmos_rsrc[] = {
- {
- .start = 0x70,
- .end = 0x71,
- .flags = IORESOURCE_IO
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_IRQ
- }
+ {
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ
+ }
};
static struct platform_device pcimt_cmos_device = {
- .name = "rtc_cmos",
- .num_resources = ARRAY_SIZE(pcimt_cmos_rsrc),
- .resource = pcimt_cmos_rsrc
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(pcimt_cmos_rsrc),
+ .resource = pcimt_cmos_rsrc
};
@@ -191,7 +191,7 @@ static struct pci_controller sni_controller = {
.mem_offset = 0x00000000UL,
.io_resource = &sni_io_resource,
.io_offset = 0x00000000UL,
- .io_map_base = SNI_PORT_BASE
+ .io_map_base = SNI_PORT_BASE
};
static void enable_pcimt_irq(struct irq_data *d)
@@ -319,9 +319,9 @@ static int __init snirm_pcimt_setup_devinit(void)
case SNI_BRD_PCI_MTOWER:
case SNI_BRD_PCI_DESKTOP:
case SNI_BRD_PCI_MTOWER_CPLUS:
- platform_device_register(&pcimt_serial8250_device);
- platform_device_register(&pcimt_cmos_device);
- break;
+ platform_device_register(&pcimt_serial8250_device);
+ platform_device_register(&pcimt_cmos_device);
+ break;
}
return 0;
diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c
index b5246373d16b..7cddd03d1fea 100644
--- a/arch/mips/sni/pcit.c
+++ b/arch/mips/sni/pcit.c
@@ -59,22 +59,22 @@ static struct platform_device pcit_cplus_serial8250_device = {
};
static struct resource pcit_cmos_rsrc[] = {
- {
- .start = 0x70,
- .end = 0x71,
- .flags = IORESOURCE_IO
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_IRQ
- }
+ {
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ
+ }
};
static struct platform_device pcit_cmos_device = {
- .name = "rtc_cmos",
- .num_resources = ARRAY_SIZE(pcit_cmos_rsrc),
- .resource = pcit_cmos_rsrc
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(pcit_cmos_rsrc),
+ .resource = pcit_cmos_rsrc
};
static struct platform_device pcit_pcspeaker_pdev = {
@@ -153,7 +153,7 @@ static struct pci_controller sni_pcit_controller = {
.mem_offset = 0x00000000UL,
.io_resource = &sni_io_resource,
.io_offset = 0x00000000UL,
- .io_map_base = SNI_PORT_BASE
+ .io_map_base = SNI_PORT_BASE
};
static void enable_pcit_irq(struct irq_data *d)
@@ -272,16 +272,16 @@ static int __init snirm_pcit_setup_devinit(void)
{
switch (sni_brd_type) {
case SNI_BRD_PCI_TOWER:
- platform_device_register(&pcit_serial8250_device);
- platform_device_register(&pcit_cmos_device);
+ platform_device_register(&pcit_serial8250_device);
+ platform_device_register(&pcit_cmos_device);
platform_device_register(&pcit_pcspeaker_pdev);
- break;
+ break;
case SNI_BRD_PCI_TOWER_CPLUS:
- platform_device_register(&pcit_cplus_serial8250_device);
- platform_device_register(&pcit_cmos_device);
+ platform_device_register(&pcit_cplus_serial8250_device);
+ platform_device_register(&pcit_cmos_device);
platform_device_register(&pcit_pcspeaker_pdev);
- break;
+ break;
}
return 0;
}
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 3ab5b5d25b0a..a046b302623e 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -48,17 +48,17 @@ static struct platform_device rm200_serial8250_device = {
};
static struct resource rm200_ds1216_rsrc[] = {
- {
- .start = 0x1cd41ffc,
- .end = 0x1cd41fff,
- .flags = IORESOURCE_MEM
- }
+ {
+ .start = 0x1cd41ffc,
+ .end = 0x1cd41fff,
+ .flags = IORESOURCE_MEM
+ }
};
static struct platform_device rm200_ds1216_device = {
- .name = "rtc-ds1216",
- .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc),
- .resource = rm200_ds1216_rsrc
+ .name = "rtc-ds1216",
+ .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc),
+ .resource = rm200_ds1216_rsrc
};
static struct resource snirm_82596_rm200_rsrc[] = {
@@ -88,9 +88,9 @@ static struct resource snirm_82596_rm200_rsrc[] = {
};
static struct platform_device snirm_82596_rm200_pdev = {
- .name = "snirm_82596",
- .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc),
- .resource = snirm_82596_rm200_rsrc
+ .name = "snirm_82596",
+ .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc),
+ .resource = snirm_82596_rm200_rsrc
};
static struct resource snirm_53c710_rm200_rsrc[] = {
@@ -107,9 +107,9 @@ static struct resource snirm_53c710_rm200_rsrc[] = {
};
static struct platform_device snirm_53c710_rm200_pdev = {
- .name = "snirm_53c710",
- .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
- .resource = snirm_53c710_rm200_rsrc
+ .name = "snirm_53c710",
+ .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
+ .resource = snirm_53c710_rm200_rsrc
};
static int __init snirm_setup_devinit(void)
@@ -134,9 +134,9 @@ device_initcall(snirm_setup_devinit);
*/
static DEFINE_RAW_SPINLOCK(sni_rm200_i8259A_lock);
-#define PIC_CMD 0x00
-#define PIC_IMR 0x01
-#define PIC_ISR PIC_CMD
+#define PIC_CMD 0x00
+#define PIC_IMR 0x01
+#define PIC_ISR PIC_CMD
#define PIC_POLL PIC_ISR
#define PIC_OCW3 PIC_ISR
@@ -421,8 +421,8 @@ void __init sni_rm200_i8259_irqs(void)
}
-#define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000)
-#define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000)
+#define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000)
+#define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000)
#define SNI_RM200_INT_START 24
#define SNI_RM200_INT_END 28
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 2e9c283b8e68..5b09b3544edd 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -204,23 +204,23 @@ void __init plat_mem_setup(void)
case SNI_BRD_10NEW:
case SNI_BRD_TOWER_OASIC:
case SNI_BRD_MINITOWER:
- sni_a20r_init();
- break;
+ sni_a20r_init();
+ break;
case SNI_BRD_PCI_TOWER:
case SNI_BRD_PCI_TOWER_CPLUS:
- sni_pcit_init();
+ sni_pcit_init();
break;
case SNI_BRD_RM200:
- sni_rm200_init();
- break;
+ sni_rm200_init();
+ break;
case SNI_BRD_PCI_MTOWER:
case SNI_BRD_PCI_DESKTOP:
case SNI_BRD_PCI_MTOWER_CPLUS:
- sni_pcimt_init();
- break;
+ sni_pcimt_init();
+ break;
}
_machine_restart = sni_machine_restart;
@@ -247,16 +247,16 @@ static void quirk_cirrus_ram_size(struct pci_dev *dev)
*/
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY))
- == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
- vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
+ == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
+ vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
vga_wseq(NULL, CL_SEQRF, 0x18);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8,
- quirk_cirrus_ram_size);
+ quirk_cirrus_ram_size);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436,
- quirk_cirrus_ram_size);
+ quirk_cirrus_ram_size);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
- quirk_cirrus_ram_size);
+ quirk_cirrus_ram_size);
#endif
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 494c9e7847aa..cf8ec568b9df 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -10,12 +10,12 @@
#include <asm/time.h>
#include <asm-generic/rtc.h>
-#define SNI_CLOCK_TICK_RATE 3686400
-#define SNI_COUNTER2_DIV 64
-#define SNI_COUNTER0_DIV ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
+#define SNI_CLOCK_TICK_RATE 3686400
+#define SNI_COUNTER2_DIV 64
+#define SNI_COUNTER0_DIV ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
static void a20r_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -33,14 +33,14 @@ static void a20r_set_mode(enum clock_event_mode mode,
*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8;
wmb();
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
}
static struct clock_event_device a20r_clockevent_device = {
@@ -82,15 +82,15 @@ static void __init sni_a20r_timer_setup(void)
struct irqaction *action = &a20r_irqaction;
unsigned int cpu = smp_processor_id();
- cd->cpumask = cpumask_of(cpu);
+ cd->cpumask = cpumask_of(cpu);
clockevents_register_device(cd);
action->dev_id = cd;
setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
}
-#define SNI_8254_TICK_RATE 1193182UL
+#define SNI_8254_TICK_RATE 1193182UL
-#define SNI_8254_TCSAMP_COUNTER ((SNI_8254_TICK_RATE / HZ) + 255)
+#define SNI_8254_TCSAMP_COUNTER ((SNI_8254_TICK_RATE / HZ) + 255)
static __init unsigned long dosample(void)
{
diff --git a/arch/mips/txx9/Platform b/arch/mips/txx9/Platform
index a801abbe138b..a176d1fd5799 100644
--- a/arch/mips/txx9/Platform
+++ b/arch/mips/txx9/Platform
@@ -6,5 +6,5 @@ cflags-$(CONFIG_MACH_TX39XX) += \
cflags-$(CONFIG_MACH_TX49XX) += \
-I$(srctree)/arch/mips/include/asm/mach-tx49xx
-load-$(CONFIG_MACH_TX39XX) += 0xffffffff80050000
-load-$(CONFIG_MACH_TX49XX) += 0xffffffff80100000
+load-$(CONFIG_MACH_TX39XX) += 0xffffffff80050000
+load-$(CONFIG_MACH_TX49XX) += 0xffffffff80100000
diff --git a/arch/mips/txx9/generic/irq_tx4927.c b/arch/mips/txx9/generic/irq_tx4927.c
index 7e3ac5782da4..ed8e702d448e 100644
--- a/arch/mips/txx9/generic/irq_tx4927.c
+++ b/arch/mips/txx9/generic/irq_tx4927.c
@@ -2,7 +2,7 @@
* Common tx4927 irq handler
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
diff --git a/arch/mips/txx9/generic/irq_tx4939.c b/arch/mips/txx9/generic/irq_tx4939.c
index 6b067dbd2ae1..0d7267e81a8c 100644
--- a/arch/mips/txx9/generic/irq_tx4939.c
+++ b/arch/mips/txx9/generic/irq_tx4939.c
@@ -5,8 +5,8 @@
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- * source@mvista.com
+ * ahennessy@mvista.com
+ * source@mvista.com
* Copyright (C) 2000-2001,2005-2007 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
diff --git a/arch/mips/txx9/generic/mem_tx4927.c b/arch/mips/txx9/generic/mem_tx4927.c
index 70f9626f8227..deea2ceae8a7 100644
--- a/arch/mips/txx9/generic/mem_tx4927.c
+++ b/arch/mips/txx9/generic/mem_tx4927.c
@@ -2,7 +2,7 @@
* common tx4927 memory interface
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index ce8f8b9b930c..28713274e0cc 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -2,7 +2,7 @@
* linux/arch/mips/txx9/pci.c
*
* Based on linux/arch/mips/txx9/rbtx4927/setup.c,
- * linux/arch/mips/txx9/rbtx4938/setup.c,
+ * linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* Copyright 2001-2005 MontaVista Software Inc.
@@ -107,7 +107,7 @@ int txx9_pci_mem_high __initdata;
/*
* allocate pci_controller and resources.
- * mem_base, io_base: physical address. 0 for auto assignment.
+ * mem_base, io_base: physical address. 0 for auto assignment.
* mem_size and io_size means max size on auto assignment.
* pcic must be &txx9_primary_pcic or NULL.
*/
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 560fe8991753..5524f2c7b05c 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -513,19 +513,19 @@ void __init txx9_sio_init(unsigned long baseaddr, int irq,
}
#ifdef CONFIG_EARLY_PRINTK
-static void __init null_prom_putchar(char c)
+static void null_prom_putchar(char c)
{
}
-void (*txx9_prom_putchar)(char c) __initdata = null_prom_putchar;
+void (*txx9_prom_putchar)(char c) = null_prom_putchar;
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
txx9_prom_putchar(c);
}
static void __iomem *early_txx9_sio_port;
-static void __init early_txx9_sio_putchar(char c)
+static void early_txx9_sio_putchar(char c)
{
#define TXX9_SICISR 0x0c
#define TXX9_SITFIFO 0x1c
diff --git a/arch/mips/txx9/generic/setup_tx3927.c b/arch/mips/txx9/generic/setup_tx3927.c
index 9505d58454c8..110e05c3eb8f 100644
--- a/arch/mips/txx9/generic/setup_tx3927.c
+++ b/arch/mips/txx9/generic/setup_tx3927.c
@@ -132,6 +132,6 @@ void __init tx3927_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(tx3927_romcptr->cr[ch] & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
index 3418b2a90f7e..e714d6ce9a82 100644
--- a/arch/mips/txx9/generic/setup_tx4927.c
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -250,7 +250,7 @@ void __init tx4927_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(TX4927_EBUSC_CR(ch) & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index eb2080110239..0a3bf2dfaba1 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -329,7 +329,7 @@ void __init tx4938_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(TX4938_EBUSC_CR(ch) & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index 5ff7a9584daf..729a50991780 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -301,7 +301,7 @@ void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
unsigned int ch_mask = 0;
__u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
- cts_mask |= ~1; /* only SIO0 have RTS/CTS */
+ cts_mask |= ~1; /* only SIO0 have RTS/CTS */
if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO0)
cts_mask |= 1 << 0; /* disable SIO0 RTS/CTS by PCFG setting */
if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2)
@@ -378,7 +378,7 @@ void __init tx4939_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(TX4939_EBUSC_CR(ch) & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/smsc_fdc37m81x.c b/arch/mips/txx9/generic/smsc_fdc37m81x.c
index 8ebc3848f3ac..f98baa6263d2 100644
--- a/arch/mips/txx9/generic/smsc_fdc37m81x.c
+++ b/arch/mips/txx9/generic/smsc_fdc37m81x.c
@@ -18,40 +18,40 @@
/* Common Registers */
#define SMSC_FDC37M81X_CONFIG_INDEX 0x00
#define SMSC_FDC37M81X_CONFIG_DATA 0x01
-#define SMSC_FDC37M81X_CONF 0x02
-#define SMSC_FDC37M81X_INDEX 0x03
-#define SMSC_FDC37M81X_DNUM 0x07
-#define SMSC_FDC37M81X_DID 0x20
-#define SMSC_FDC37M81X_DREV 0x21
-#define SMSC_FDC37M81X_PCNT 0x22
-#define SMSC_FDC37M81X_PMGT 0x23
-#define SMSC_FDC37M81X_OSC 0x24
-#define SMSC_FDC37M81X_CONFPA0 0x26
-#define SMSC_FDC37M81X_CONFPA1 0x27
-#define SMSC_FDC37M81X_TEST4 0x2B
-#define SMSC_FDC37M81X_TEST5 0x2C
-#define SMSC_FDC37M81X_TEST1 0x2D
-#define SMSC_FDC37M81X_TEST2 0x2E
-#define SMSC_FDC37M81X_TEST3 0x2F
+#define SMSC_FDC37M81X_CONF 0x02
+#define SMSC_FDC37M81X_INDEX 0x03
+#define SMSC_FDC37M81X_DNUM 0x07
+#define SMSC_FDC37M81X_DID 0x20
+#define SMSC_FDC37M81X_DREV 0x21
+#define SMSC_FDC37M81X_PCNT 0x22
+#define SMSC_FDC37M81X_PMGT 0x23
+#define SMSC_FDC37M81X_OSC 0x24
+#define SMSC_FDC37M81X_CONFPA0 0x26
+#define SMSC_FDC37M81X_CONFPA1 0x27
+#define SMSC_FDC37M81X_TEST4 0x2B
+#define SMSC_FDC37M81X_TEST5 0x2C
+#define SMSC_FDC37M81X_TEST1 0x2D
+#define SMSC_FDC37M81X_TEST2 0x2E
+#define SMSC_FDC37M81X_TEST3 0x2F
/* Logical device numbers */
-#define SMSC_FDC37M81X_FDD 0x00
-#define SMSC_FDC37M81X_SERIAL1 0x04
-#define SMSC_FDC37M81X_SERIAL2 0x05
-#define SMSC_FDC37M81X_KBD 0x07
+#define SMSC_FDC37M81X_FDD 0x00
+#define SMSC_FDC37M81X_SERIAL1 0x04
+#define SMSC_FDC37M81X_SERIAL2 0x05
+#define SMSC_FDC37M81X_KBD 0x07
/* Logical device Config Registers */
-#define SMSC_FDC37M81X_ACTIVE 0x30
+#define SMSC_FDC37M81X_ACTIVE 0x30
#define SMSC_FDC37M81X_BASEADDR0 0x60
#define SMSC_FDC37M81X_BASEADDR1 0x61
-#define SMSC_FDC37M81X_INT 0x70
-#define SMSC_FDC37M81X_INT2 0x72
-#define SMSC_FDC37M81X_MODE 0xF0
+#define SMSC_FDC37M81X_INT 0x70
+#define SMSC_FDC37M81X_INT2 0x72
+#define SMSC_FDC37M81X_MODE 0xF0
/* Chip Config Values */
#define SMSC_FDC37M81X_CONFIG_ENTER 0x55
#define SMSC_FDC37M81X_CONFIG_EXIT 0xaa
-#define SMSC_FDC37M81X_CHIP_ID 0x4d
+#define SMSC_FDC37M81X_CHIP_ID 0x4d
static unsigned long g_smsc_fdc37m81x_base;
diff --git a/arch/mips/txx9/rbtx4927/irq.c b/arch/mips/txx9/rbtx4927/irq.c
index 6c22c496090b..3f48292c9c6c 100644
--- a/arch/mips/txx9/rbtx4927/irq.c
+++ b/arch/mips/txx9/rbtx4927/irq.c
@@ -2,7 +2,7 @@
* Toshiba RBTX4927 specific interrupt handlers
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/rbtx4927/prom.c b/arch/mips/txx9/rbtx4927/prom.c
index cc97c6a6011b..fe6d0b54763f 100644
--- a/arch/mips/txx9/rbtx4927/prom.c
+++ b/arch/mips/txx9/rbtx4927/prom.c
@@ -2,7 +2,7 @@
* rbtx4927 specific prom routines
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/rbtx4927/setup.c b/arch/mips/txx9/rbtx4927/setup.c
index b15adfc2d726..3c516ef625e5 100644
--- a/arch/mips/txx9/rbtx4927/setup.c
+++ b/arch/mips/txx9/rbtx4927/setup.c
@@ -2,7 +2,7 @@
* Toshiba rbtx4927 specific setup
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c
index d6e70dab3bd3..c9afd05020e0 100644
--- a/arch/mips/txx9/rbtx4938/setup.c
+++ b/arch/mips/txx9/rbtx4938/setup.c
@@ -107,10 +107,10 @@ static void __init rbtx4938_pci_setup(void)
/* SPI support */
/* chip select for SPI devices */
-#define SEEPROM1_CS 7 /* PIO7 */
-#define SEEPROM2_CS 0 /* IOC */
-#define SEEPROM3_CS 1 /* IOC */
-#define SRTC_CS 2 /* IOC */
+#define SEEPROM1_CS 7 /* PIO7 */
+#define SEEPROM2_CS 0 /* IOC */
+#define SEEPROM3_CS 1 /* IOC */
+#define SRTC_CS 2 /* IOC */
#define SPI_BUSNO 0
static int __init rbtx4938_ethaddr_init(void)
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index e15641d93092..2da5f25f98bc 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -243,7 +243,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
}
static struct platform_driver rbtx4939_led_driver = {
- .driver = {
+ .driver = {
.name = "rbtx4939-led",
.owner = THIS_MODULE,
},
@@ -337,7 +337,7 @@ static void rbtx4939_flash_copy_from(struct map_info *map, void *to,
shift = bdipsw & 3;
while (len) {
curlen = min_t(unsigned long, len,
- 0x400000 - (from & (0x400000 - 1)));
+ 0x400000 - (from & (0x400000 - 1)));
memcpy(to,
(void *)((from & ~0xc00000) |
((((from >> 22) + shift) & 3) << 22)),
diff --git a/arch/mips/vr41xx/common/bcu.c b/arch/mips/vr41xx/common/bcu.c
index 6346c59c9f9d..ff7d1c66cf82 100644
--- a/arch/mips/vr41xx/common/bcu.c
+++ b/arch/mips/vr41xx/common/bcu.c
@@ -1,7 +1,7 @@
/*
* bcu.c, Bus Control Unit routines for the NEC VR4100 series.
*
- * Copyright (C) 2002 MontaVista Software Inc.
+ * Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
* Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
@@ -176,7 +176,7 @@ static inline unsigned long calculate_vtclock(uint16_t clkspeed, unsigned long p
}
static inline unsigned long calculate_tclock(uint16_t clkspeed, unsigned long pclock,
- unsigned long vtclock)
+ unsigned long vtclock)
{
unsigned long tclock = 0;
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index 8ba7d04a5ec5..05302bfdd114 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -217,24 +217,24 @@ static int __init vr41xx_cmu_init(void)
unsigned long start, size;
switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
+ case CPU_VR4111:
+ case CPU_VR4121:
start = CMU_TYPE1_BASE;
size = CMU_TYPE1_SIZE;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
start = CMU_TYPE2_BASE;
size = CMU_TYPE2_SIZE;
break;
- case CPU_VR4133:
+ case CPU_VR4133:
start = CMU_TYPE3_BASE;
size = CMU_TYPE3_SIZE;
- break;
+ break;
default:
panic("Unexpected CPU of NEC VR4100 series");
break;
- }
+ }
if (request_mem_region(start, size, "CMU") == NULL)
return -EBUSY;
diff --git a/arch/mips/vr41xx/common/giu.c b/arch/mips/vr41xx/common/giu.c
index b32b3bc60441..32cc8d66b34e 100644
--- a/arch/mips/vr41xx/common/giu.c
+++ b/arch/mips/vr41xx/common/giu.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series GIU platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index a39ef3207d71..41e873bc8474 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -49,11 +49,11 @@ static unsigned char sysint1_assign[16] = {
static unsigned char sysint2_assign[16] = {
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-#define ICU1_TYPE1_BASE 0x0b000080UL
-#define ICU2_TYPE1_BASE 0x0b000200UL
+#define ICU1_TYPE1_BASE 0x0b000080UL
+#define ICU2_TYPE1_BASE 0x0b000200UL
-#define ICU1_TYPE2_BASE 0x0f000080UL
-#define ICU2_TYPE2_BASE 0x0f0000a0UL
+#define ICU1_TYPE2_BASE 0x0f000080UL
+#define ICU2_TYPE2_BASE 0x0f0000a0UL
#define ICU1_SIZE 0x20
#define ICU2_SIZE 0x1c
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 9fbf5f0d1faf..70a3f90131d8 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -74,7 +74,7 @@ static inline void software_reset(void)
change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
flush_cache_all();
write_c0_wired(0);
- __asm__("jr %0"::"r"(0xbfc00000));
+ __asm__("jr %0"::"r"(0xbfc00000));
break;
}
}
diff --git a/arch/mips/vr41xx/common/rtc.c b/arch/mips/vr41xx/common/rtc.c
index 76e3e8af7c96..c1e3d200920e 100644
--- a/arch/mips/vr41xx/common/rtc.c
+++ b/arch/mips/vr41xx/common/rtc.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series RTC platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/type.c b/arch/mips/vr41xx/common/type.c
index ff841422b638..45836a92b7a1 100644
--- a/arch/mips/vr41xx/common/type.c
+++ b/arch/mips/vr41xx/common/type.c
@@ -1,7 +1,7 @@
/*
* type.c, System type for NEC VR4100 series.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/wrppmc/Platform b/arch/mips/wrppmc/Platform
index e758645e9681..dc78b25b95fe 100644
--- a/arch/mips/wrppmc/Platform
+++ b/arch/mips/wrppmc/Platform
@@ -2,6 +2,6 @@
# Wind River PPMC Board (4KC + GT64120)
#
platform-$(CONFIG_WR_PPMC) += wrppmc/
-cflags-$(CONFIG_WR_PPMC) += \
+cflags-$(CONFIG_WR_PPMC) += \
-I$(srctree)/arch/mips/include/asm/mach-wrppmc
load-$(CONFIG_WR_PPMC) += 0xffffffff80100000
diff --git a/arch/mips/wrppmc/irq.c b/arch/mips/wrppmc/irq.c
index c6e706274db4..f237bf4d5c3a 100644
--- a/arch/mips/wrppmc/irq.c
+++ b/arch/mips/wrppmc/irq.c
@@ -4,8 +4,8 @@
* Copyright (C) 2006, Wind River System Inc.
* Author: Rongkai.Zhan, <rongkai.zhan@windriver.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/wrppmc/serial.c b/arch/mips/wrppmc/serial.c
index 6f9d0858f596..83f0f7d05187 100644
--- a/arch/mips/wrppmc/serial.c
+++ b/arch/mips/wrppmc/serial.c
@@ -1,7 +1,7 @@
/*
* Registration of WRPPMC UART platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index ad0caea0bfea..b06c7360b1c6 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,6 +8,7 @@ config MN10300
select HAVE_ARCH_KGDB
select GENERIC_ATOMIC64
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
+ select HAVE_VIRT_TO_BUS
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index 4ebd6b3a0a1e..f592d7a9f032 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -150,9 +150,4 @@ do { \
*/
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-#endif
-
#endif /* _ASM_ELF_H */
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h
index 1194fe486b01..7fa66a0e4624 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.h
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.h
@@ -36,7 +36,6 @@ extern void pcibios_resource_survey(void);
/* pci.c */
extern int pcibios_last_bus;
-extern struct pci_bus *pci_root_bus;
extern struct pci_ops *pci_root_ops;
extern struct irq_routing_table *pcibios_get_irq_routing_table(void);
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6911e845b8cd..1adcf024bb9a 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -24,7 +24,6 @@
unsigned int pci_probe = 1;
int pcibios_last_bus = -1;
-struct pci_bus *pci_root_bus;
struct pci_ops *pci_root_ops;
/*
@@ -377,8 +376,7 @@ static int __init pcibios_init(void)
pci_add_resource_offset(&resources, &pci_ioport_resource, io_offset);
pci_add_resource_offset(&resources, &pci_iomem_resource, mem_offset);
- pci_root_bus = pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL,
- &resources);
+ pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources);
pcibios_irq_init();
pcibios_fixup_irqs();
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 0ac66f67521f..014a6482ed4c 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -12,6 +12,7 @@ config OPENRISC
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_HARDIRQS
+ select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@@ -26,10 +27,6 @@ config OPENRISC
config MMU
def_bool y
-config SYMBOL_PREFIX
- string
- default ""
-
config HAVE_DMA_ATTRS
def_bool y
diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h
index a9e11efae14d..2c64f2228dc7 100644
--- a/arch/openrisc/include/asm/bitops.h
+++ b/arch/openrisc/include/asm/bitops.h
@@ -54,6 +54,7 @@
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/arch/openrisc/include/asm/elf.h b/arch/openrisc/include/asm/elf.h
index f4aa8a542a22..d334e204bbdd 100644
--- a/arch/openrisc/include/asm/elf.h
+++ b/arch/openrisc/include/asm/elf.h
@@ -62,7 +62,4 @@ extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt);
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index 33691380608e..cab746fa9e87 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -70,7 +70,6 @@ struct thread_struct {
*/
#define task_pt_regs(task) user_regs(task_thread_info(task))
-#define current_regs() user_regs(current_thread_info())
#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 54afd0a129fe..d8a455ede5a7 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -201,12 +201,17 @@ EXCEPTION_ENTRY(_bus_fault_handler)
l.nop
/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+EXCEPTION_ENTRY(_dtlb_miss_page_fault_handler)
+ l.and r5,r5,r0
+ l.j 1f
+ l.nop
EXCEPTION_ENTRY(_data_page_fault_handler)
/* set up parameters for do_page_fault */
+ l.ori r5,r0,0x300 // exception vector
+1:
l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
- l.ori r5,r0,0x300 // exception vector
/*
* __PHX__: TODO
@@ -276,12 +281,17 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
l.nop
/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+EXCEPTION_ENTRY(_itlb_miss_page_fault_handler)
+ l.and r5,r5,r0
+ l.j 1f
+ l.nop
EXCEPTION_ENTRY(_insn_page_fault_handler)
/* set up parameters for do_page_fault */
+ l.ori r5,r0,0x400 // exception vector
+1:
l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
- l.ori r5,r0,0x400 // exception vector
l.ori r6,r0,0x0 // !write access
/* call fault.c handler in or32/mm/fault.c */
@@ -1040,7 +1050,7 @@ ENTRY(_switch)
* we are expected to have set up the arg to schedule_tail already,
* hence we do so here unconditionally:
*/
- l.lwz r3,TI_STACK(r3) /* Load 'prev' as schedule_tail arg */
+ l.lwz r3,TI_TASK(r3) /* Load 'prev' as schedule_tail arg */
l.jr r9
l.nop
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index 1088b5fca3bd..1d3c9c28ac25 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -19,6 +19,7 @@
#include <linux/threads.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/serial_reg.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -291,9 +292,9 @@
/* Jump to .init code at _start which lives in the .head section
* and will be discarded after boot.
*/
- LOAD_SYMBOL_2_GPR(r4, _start)
- tophys (r3,r4) /* MMU disabled */
- l.jr r3
+ LOAD_SYMBOL_2_GPR(r15, _start)
+ tophys (r13,r15) /* MMU disabled */
+ l.jr r13
l.nop
/* ---[ 0x200: BUS exception ]------------------------------------------- */
@@ -1069,8 +1070,7 @@ d_pte_not_present:
EXCEPTION_LOAD_GPR4
EXCEPTION_LOAD_GPR5
EXCEPTION_LOAD_GPR6
- l.j _dispatch_do_dpage_fault
- l.nop
+ EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
/* ==============================================[ ITLB miss handler ]=== */
ENTRY(itlb_miss_handler)
@@ -1192,8 +1192,7 @@ i_pte_not_present:
EXCEPTION_LOAD_GPR4
EXCEPTION_LOAD_GPR5
EXCEPTION_LOAD_GPR6
- l.j _dispatch_do_ipage_fault
- l.nop
+ EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
/* ==============================================[ boot tlb handlers ]=== */
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 79dea9740a3c..e7fdc50c4bf0 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -167,15 +167,26 @@ void __init paging_init(void)
unsigned long *dtlb_vector = __va(0x900);
unsigned long *itlb_vector = __va(0xa00);
+ printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
+ *itlb_vector = ((unsigned long)&itlb_miss_handler -
+ (unsigned long)itlb_vector) >> 2;
+
+ /* Soft ordering constraint to ensure that dtlb_vector is
+ * the last thing updated
+ */
+ barrier();
+
printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
(unsigned long)dtlb_vector) >> 2;
- printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
- *itlb_vector = ((unsigned long)&itlb_miss_handler -
- (unsigned long)itlb_vector) >> 2;
}
+ /* Soft ordering constraint to ensure that cache invalidation and
+ * TLB flush really happen _after_ code has been modified.
+ */
+ barrier();
+
/* Invalidate instruction caches after code modification */
mtspr(SPR_ICBIR, 0x900);
mtspr(SPR_ICBIR, 0xa00);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7f9b3c53f74a..a2a47d9d6a22 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,7 +18,9 @@ config PARISC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
+ select SYSCTL_ARCH_UNALIGN_ALLOW
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_VIRT_TO_BUS
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
select TTY # Needed for pdc_cons.c
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 54d619d4cac6..5dfd248e3f1a 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -35,22 +35,17 @@
static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
{
- struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
- addr = PAGE_ALIGN(addr);
-
- for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
- return addr;
- addr = vma->vm_end;
- }
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = PAGE_ALIGN(addr);
+ info.high_limit = TASK_SIZE;
+ info.align_mask = 0;
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
-#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
-
/*
* We need to know the offset to use. Old scheme was to look for
* existing mapping and use the same offset. New scheme is to use the
@@ -63,30 +58,21 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
*/
static int get_offset(struct address_space *mapping)
{
- int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
- return offset & 0x3FF000;
+ return (unsigned long) mapping >> 8;
}
static unsigned long get_shared_area(struct address_space *mapping,
unsigned long addr, unsigned long len, unsigned long pgoff)
{
- struct vm_area_struct *vma;
- int offset = mapping ? get_offset(mapping) : 0;
+ struct vm_unmapped_area_info info;
- offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
-
- addr = DCACHE_ALIGN(addr - offset) + offset;
-
- for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
- return addr;
- addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
- if (addr < vma->vm_end) /* handle wraparound */
- return -ENOMEM;
- }
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = PAGE_ALIGN(addr);
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & (SHMLBA - 1);
+ info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index eca69bb8ef5f..051c8b90231f 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -79,16 +79,6 @@ asmlinkage long sys32_sendfile64(u32 out_fd, u32 in_fd,
(loff_t __user *)offset, count);
}
-
-/* lseek() needs a wrapper because 'offset' can be negative, but the top
- * half of the argument has been zeroed by syscall.S.
- */
-
-asmlinkage int sys32_lseek(unsigned int fd, int offset, unsigned int origin)
-{
- return sys_lseek(fd, offset, origin);
-}
-
asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
{
union semun u;
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index fc9cab1cc2df..884b91b028f0 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -76,7 +76,7 @@
ENTRY_SAME(socket)
/* struct stat is MAYBE identical wide and narrow ?? */
ENTRY_COMP(newstat)
- ENTRY_DIFF(lseek)
+ ENTRY_COMP(lseek)
ENTRY_SAME(getpid) /* 20 */
/* the 'void * data' parameter may need re-packing in wide */
ENTRY_COMP(mount)
@@ -165,8 +165,8 @@
ENTRY_SAME(mmap2)
ENTRY_SAME(mmap) /* 90 */
ENTRY_SAME(munmap)
- ENTRY_SAME(truncate)
- ENTRY_SAME(ftruncate)
+ ENTRY_COMP(truncate)
+ ENTRY_COMP(ftruncate)
ENTRY_SAME(fchmod)
ENTRY_SAME(fchown) /* 95 */
ENTRY_SAME(getpriority)
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 45ba99f5080b..aeb8f8f2c07a 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -282,7 +282,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
show_regs(regs);
dump_stack();
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
if (in_interrupt())
panic("Fatal exception in interrupt");
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5c7470689a10..b89d7eb730a2 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -87,9 +87,6 @@ config GENERIC_GPIO
help
Generic GPIO API support
-config ARCH_NO_VIRT_TO_BUS
- def_bool PPC64
-
config PPC
bool
default y
@@ -101,6 +98,7 @@ config PPC
select HAVE_FUNCTION_GRAPH_TRACER
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 6abf0a163233..ac9790fc3836 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -103,8 +103,6 @@ do { \
# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
(exec_stk == EXSTACK_DEFAULT) : 0)
#else
-# define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 03d7beae89a0..d1bb86074721 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -37,10 +37,8 @@
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
-#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -523,6 +521,8 @@ struct kvm_vcpu_arch {
u8 sane;
u8 cpu_type;
u8 hcall_needed;
+ u8 epr_enabled;
+ u8 epr_needed;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 572aa7530619..44a657adf416 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -44,12 +44,11 @@ enum emulation_result {
EMULATE_DO_DCR, /* kvm_run filled with DCR request */
EMULATE_FAIL, /* can't emulate this instruction */
EMULATE_AGAIN, /* something went wrong. go again */
+ EMULATE_DO_PAPR, /* kvm_run filled with PAPR request */
};
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern char kvmppc_handlers_start[];
-extern unsigned long kvmppc_handler_len;
extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
@@ -263,6 +262,15 @@ static inline void kvm_linear_init(void)
{}
#endif
+static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GEPR, epr);
+#elif defined(CONFIG_BOOKE)
+ vcpu->arch.epr = epr;
+#endif
+}
+
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
struct kvm_config_tlb *cfg);
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7035e608f3fa..e66586122030 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -956,8 +956,6 @@
#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9
#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9
#endif
-#define SPRN_SPRG_RVCPU SPRN_SPRG1
-#define SPRN_SPRG_WVCPU SPRN_SPRG1
#endif
#ifdef CONFIG_8xx
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index e07e6af5e1ff..b417de3cc2c4 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -56,6 +56,7 @@
#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
+#define SPRN_DBCR4 0x233 /* Debug Control Register 4 */
#define SPRN_MSRP 0x137 /* MSR Protect Register */
#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index d906f33441c6..535b6d8a41cc 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -22,7 +22,7 @@ SYSCALL_SPU(chmod)
SYSCALL_SPU(lchown)
SYSCALL(ni_syscall)
OLDSYS(stat)
-SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek)
+COMPAT_SYS_SPU(lseek)
SYSCALL_SPU(getpid)
COMPAT_SYS(mount)
SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 2fba8a66fb10..16064d00adb9 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -114,7 +114,10 @@ struct kvm_regs {
/* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */
#define KVM_SREGS_E_SPE (1 << 9)
-/* External Proxy (EXP) -- EPR */
+/*
+ * DEPRECATED! USE ONE_REG FOR THIS ONE!
+ * External Proxy (EXP) -- EPR
+ */
#define KVM_SREGS_EXP (1 << 10)
/* External PID (E.PD) -- EPSC/EPLC */
@@ -412,5 +415,6 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
+#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 781190367292..b6c17ec9b169 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -118,7 +118,7 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
#endif
-#ifdef CONFIG_KVM_BOOKE_HV
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
#endif
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e88c64331819..11f5b03a0b06 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
@@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index c8ae3714e79b..f19d0bdc3241 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -32,7 +32,7 @@
static loff_t page_map_seek( struct file *file, loff_t off, int whence)
{
loff_t new;
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
switch(whence) {
case 0:
@@ -55,13 +55,13 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence)
static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
if ((vma->vm_end - vma->vm_start) > dp->size)
return -EINVAL;
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 8329190312c1..c642f0132988 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -191,7 +191,7 @@ static void free_flash_list(struct flash_block_list *f)
static int rtas_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_update_flash_t *uf;
uf = (struct rtas_update_flash_t *) dp->data;
@@ -253,7 +253,7 @@ static void get_flash_status_msg(int status, char *buf)
static ssize_t rtas_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_update_flash_t *uf;
char msg[RTAS_MSG_MAXLEN];
@@ -282,7 +282,7 @@ void rtas_block_ctor(void *ptr)
static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_update_flash_t *uf;
char *p;
int next_free;
@@ -374,7 +374,7 @@ static void manage_flash(struct rtas_manage_flash_t *args_buf)
static ssize_t manage_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_manage_flash_t *args_buf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
@@ -391,7 +391,7 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf,
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_manage_flash_t *args_buf;
const char reject_str[] = "0";
const char commit_str[] = "1";
@@ -462,7 +462,7 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
static ssize_t validate_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_validate_flash_t *args_buf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
@@ -477,7 +477,7 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
static ssize_t validate_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_validate_flash_t *args_buf;
int rc;
@@ -526,7 +526,7 @@ done:
static int validate_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file_inode(file));
struct rtas_validate_flash_t *args_buf;
args_buf = (struct rtas_validate_flash_t *) dp->data;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index dbc44ba5b078..d0bafc0cdf06 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -146,24 +146,6 @@ asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
(off_t __user *)offset, count);
}
-off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
-{
- /* sign extend n */
- return sys_lseek(fd, (int)offset, origin);
-}
-
-long compat_sys_truncate(const char __user * path, u32 length)
-{
- /* sign extend length */
- return sys_truncate(path, (int)length);
-}
-
-long compat_sys_ftruncate(int fd, u32 length)
-{
- /* sign extend length */
- return sys_ftruncate(fd, (int)length);
-}
-
unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f9b751b29558..37cc40ef5043 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -146,7 +146,7 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
{
bust_spinlocks(0);
die_owner = -1;
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
oops_exit();
printk("\n");
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 1e473d46322c..b772eded8c26 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -10,7 +10,8 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \
eventfd.o)
CFLAGS_44x_tlb.o := -I.
-CFLAGS_e500_tlb.o := -I.
+CFLAGS_e500_mmu.o := -I.
+CFLAGS_e500_mmu_host.o := -I.
CFLAGS_emulate.o := -I.
common-objs-y += powerpc.o emulate.o
@@ -35,7 +36,8 @@ kvm-e500-objs := \
booke_emulate.o \
booke_interrupts.o \
e500.o \
- e500_tlb.o \
+ e500_mmu.o \
+ e500_mmu_host.o \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
@@ -45,7 +47,8 @@ kvm-e500mc-objs := \
booke_emulate.o \
bookehv_interrupts.o \
e500mc.o \
- e500_tlb.o \
+ e500_mmu.o \
+ e500_mmu_host.o \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index d31a716f7f2b..836c56975e21 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -34,6 +34,8 @@
#define OP_31_XOP_MTSRIN 242
#define OP_31_XOP_TLBIEL 274
#define OP_31_XOP_TLBIE 306
+/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
+#define OP_31_XOP_FAKE_SC1 308
#define OP_31_XOP_SLBMTE 402
#define OP_31_XOP_SLBIE 434
#define OP_31_XOP_SLBIA 498
@@ -170,6 +172,32 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
+#ifdef CONFIG_KVM_BOOK3S_64_PR
+ case OP_31_XOP_FAKE_SC1:
+ {
+ /* SC 1 papr hypercalls */
+ ulong cmd = kvmppc_get_gpr(vcpu, 3);
+ int i;
+
+ if ((vcpu->arch.shared->msr & MSR_PR) ||
+ !vcpu->arch.papr_enabled) {
+ emulated = EMULATE_FAIL;
+ break;
+ }
+
+ if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
+ break;
+
+ run->papr_hcall.nr = cmd;
+ for (i = 0; i < 9; ++i) {
+ ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
+ run->papr_hcall.args[i] = gpr;
+ }
+
+ emulated = EMULATE_DO_PAPR;
+ break;
+ }
+#endif
case OP_31_XOP_EIOIO:
break;
case OP_31_XOP_SLBMTE:
@@ -427,6 +455,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
+ case SPRN_MSSSR0:
break;
unprivileged:
default:
@@ -523,6 +552,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
+ case SPRN_MSSSR0:
*spr_val = 0;
break;
default:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 71d0c90b62bf..80dcc53a1aba 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1549,7 +1549,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
+ if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 2c86b0d63714..da8b13c4b776 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
- struct hlist_node *node;
int i;
rcu_read_lock();
@@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
- hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+ hlist_for_each_entry_rcu(pte, list, list_vpte_long)
invalidate_pte(vcpu, pte);
}
@@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
@@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_pte)
+ hlist_for_each_entry_rcu(pte, list, list_pte)
if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
invalidate_pte(vcpu, pte);
@@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
@@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
+ hlist_for_each_entry_rcu(pte, list, list_pte_long)
if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
invalidate_pte(vcpu, pte);
@@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xfffffffffULL;
@@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_vpte)
+ hlist_for_each_entry_rcu(pte, list, list_vpte)
if ((pte->pte.vpage & vp_mask) == guest_vp)
invalidate_pte(vcpu, pte);
@@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xffffff000ULL;
@@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+ hlist_for_each_entry_rcu(pte, list, list_vpte_long)
if ((pte->pte.vpage & vp_mask) == guest_vp)
invalidate_pte(vcpu, pte);
@@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- struct hlist_node *node;
struct hpte_cache *pte;
int i;
@@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
- hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+ hlist_for_each_entry_rcu(pte, list, list_vpte_long)
if ((pte->pte.raddr >= pa_start) &&
(pte->pte.raddr < pa_end))
invalidate_pte(vcpu, pte);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 6702442ca818..5e93438afb06 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -762,6 +762,11 @@ program_interrupt:
run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST_NV;
break;
+ case EMULATE_DO_PAPR:
+ run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->arch.hcall_needed = 1;
+ r = RESUME_HOST_NV;
+ break;
default:
BUG();
}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 69f114015780..020923e43134 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -182,6 +182,14 @@ static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
}
+static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
+ ulong esr_flags)
+{
+ vcpu->arch.queued_dear = dear_flags;
+ vcpu->arch.queued_esr = esr_flags;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
+}
+
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
{
vcpu->arch.queued_esr = esr_flags;
@@ -300,13 +308,22 @@ static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
#endif
}
+static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ return mfspr(SPRN_GEPR);
+#else
+ return vcpu->arch.epr;
+#endif
+}
+
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
{
int allowed = 0;
ulong msr_mask = 0;
- bool update_esr = false, update_dear = false;
+ bool update_esr = false, update_dear = false, update_epr = false;
ulong crit_raw = vcpu->arch.shared->critical;
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
bool crit;
@@ -330,9 +347,13 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
keep_irq = true;
}
+ if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled)
+ update_epr = true;
+
switch (priority) {
case BOOKE_IRQPRIO_DTLB_MISS:
case BOOKE_IRQPRIO_DATA_STORAGE:
+ case BOOKE_IRQPRIO_ALIGNMENT:
update_dear = true;
/* fall through */
case BOOKE_IRQPRIO_INST_STORAGE:
@@ -346,7 +367,6 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
case BOOKE_IRQPRIO_AP_UNAVAIL:
- case BOOKE_IRQPRIO_ALIGNMENT:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
int_class = INT_CLASS_NONCRIT;
@@ -408,6 +428,8 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
set_guest_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true)
set_guest_dear(vcpu, vcpu->arch.queued_dear);
+ if (update_epr == true)
+ kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
new_msr &= msr_mask;
#if defined(CONFIG_64BIT)
@@ -581,6 +603,11 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
kvmppc_core_check_exceptions(vcpu);
+ if (vcpu->requests) {
+ /* Exception delivery raised request; start over */
+ return 1;
+ }
+
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
kvm_vcpu_block(vcpu);
@@ -610,6 +637,13 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
r = 0;
}
+ if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
+ vcpu->run->epr.epr = 0;
+ vcpu->arch.epr_needed = true;
+ vcpu->run->exit_reason = KVM_EXIT_EPR;
+ r = 0;
+ }
+
return r;
}
@@ -945,6 +979,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
+ case BOOKE_INTERRUPT_ALIGNMENT:
+ kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
+ vcpu->arch.fault_esr);
+ r = RESUME_GUEST;
+ break;
+
#ifdef CONFIG_KVM_BOOKE_HV
case BOOKE_INTERRUPT_HV_SYSCALL:
if (!(vcpu->arch.shared->msr & MSR_PR)) {
@@ -1388,6 +1428,11 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
&vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
break;
}
+ case KVM_REG_PPC_EPR: {
+ u32 epr = get_guest_epr(vcpu);
+ r = put_user(epr, (u32 __user *)(long)reg->addr);
+ break;
+ }
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR:
r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
@@ -1420,6 +1465,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
(u64 __user *)(long)reg->addr, sizeof(u64));
break;
}
+ case KVM_REG_PPC_EPR: {
+ u32 new_epr;
+ r = get_user(new_epr, (u32 __user *)(long)reg->addr);
+ if (!r)
+ kvmppc_set_epr(vcpu, new_epr);
+ break;
+ }
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR: {
u32 new_epcr;
@@ -1556,7 +1608,9 @@ int __init kvmppc_booke_init(void)
{
#ifndef CONFIG_KVM_BOOKE_HV
unsigned long ivor[16];
+ unsigned long *handler = kvmppc_booke_handler_addr;
unsigned long max_ivor = 0;
+ unsigned long handler_len;
int i;
/* We install our own exception handlers by hijacking IVPR. IVPR must
@@ -1589,14 +1643,16 @@ int __init kvmppc_booke_init(void)
for (i = 0; i < 16; i++) {
if (ivor[i] > max_ivor)
- max_ivor = ivor[i];
+ max_ivor = i;
+ handler_len = handler[i + 1] - handler[i];
memcpy((void *)kvmppc_booke_handlers + ivor[i],
- kvmppc_handlers_start + i * kvmppc_handler_len,
- kvmppc_handler_len);
+ (void *)handler[i], handler_len);
}
- flush_icache_range(kvmppc_booke_handlers,
- kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
+
+ handler_len = handler[max_ivor + 1] - handler[max_ivor];
+ flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
+ ivor[max_ivor] + handler_len);
#endif /* !BOOKE_HV */
return 0;
}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index e9b88e433f64..5fd1ba693579 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -65,6 +65,7 @@
(1 << BOOKE_IRQPRIO_CRITICAL))
extern unsigned long kvmppc_booke_handlers;
+extern unsigned long kvmppc_booke_handler_addr[];
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 4685b8cf2249..27a4b2877c10 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -269,6 +269,9 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_ESR:
*spr_val = vcpu->arch.shared->esr;
break;
+ case SPRN_EPR:
+ *spr_val = vcpu->arch.epr;
+ break;
case SPRN_CSRR0:
*spr_val = vcpu->arch.csrr0;
break;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index bb46b32f9813..f4bb55c96517 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -45,18 +45,21 @@
(1<<BOOKE_INTERRUPT_DEBUG))
#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
- (1<<BOOKE_INTERRUPT_DTLB_MISS))
+ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
+ (1<<BOOKE_INTERRUPT_ALIGNMENT))
#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
(1<<BOOKE_INTERRUPT_INST_STORAGE) | \
(1<<BOOKE_INTERRUPT_PROGRAM) | \
- (1<<BOOKE_INTERRUPT_DTLB_MISS))
+ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
+ (1<<BOOKE_INTERRUPT_ALIGNMENT))
.macro KVM_HANDLER ivor_nr scratch srr0
_GLOBAL(kvmppc_handler_\ivor_nr)
/* Get pointer to vcpu and record exit number. */
mtspr \scratch , r4
- mfspr r4, SPRN_SPRG_RVCPU
+ mfspr r4, SPRN_SPRG_THREAD
+ lwz r4, THREAD_KVM_VCPU(r4)
stw r3, VCPU_GPR(R3)(r4)
stw r5, VCPU_GPR(R5)(r4)
stw r6, VCPU_GPR(R6)(r4)
@@ -73,6 +76,14 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
bctr
.endm
+.macro KVM_HANDLER_ADDR ivor_nr
+ .long kvmppc_handler_\ivor_nr
+.endm
+
+.macro KVM_HANDLER_END
+ .long kvmppc_handlers_end
+.endm
+
_GLOBAL(kvmppc_handlers_start)
KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
@@ -93,9 +104,7 @@ KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
-
-_GLOBAL(kvmppc_handler_len)
- .long kvmppc_handler_1 - kvmppc_handler_0
+_GLOBAL(kvmppc_handlers_end)
/* Registers:
* SPRG_SCRATCH0: guest r4
@@ -402,9 +411,6 @@ lightweight_exit:
lwz r8, kvmppc_booke_handlers@l(r8)
mtspr SPRN_IVPR, r8
- /* Save vcpu pointer for the exception handlers. */
- mtspr SPRN_SPRG_WVCPU, r4
-
lwz r5, VCPU_SHARED(r4)
/* Can't switch the stack pointer until after IVPR is switched,
@@ -463,6 +469,31 @@ lightweight_exit:
lwz r4, VCPU_GPR(R4)(r4)
rfi
+ .data
+ .align 4
+ .globl kvmppc_booke_handler_addr
+kvmppc_booke_handler_addr:
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA
+KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND
+KVM_HANDLER_END /*Always keep this in end*/
+
#ifdef CONFIG_SPE
_GLOBAL(kvmppc_save_guest_spe)
cmpi 0,r3,0
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index b479ed77c515..6dd4de7802bf 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -491,6 +491,9 @@ static int __init kvmppc_e500_init(void)
{
int r, i;
unsigned long ivor[3];
+ /* Process remaining handlers above the generic first 16 */
+ unsigned long *handler = &kvmppc_booke_handler_addr[16];
+ unsigned long handler_len;
unsigned long max_ivor = 0;
r = kvmppc_core_check_processor_compat();
@@ -506,15 +509,16 @@ static int __init kvmppc_e500_init(void)
ivor[1] = mfspr(SPRN_IVOR33);
ivor[2] = mfspr(SPRN_IVOR34);
for (i = 0; i < 3; i++) {
- if (ivor[i] > max_ivor)
- max_ivor = ivor[i];
+ if (ivor[i] > ivor[max_ivor])
+ max_ivor = i;
+ handler_len = handler[i + 1] - handler[i];
memcpy((void *)kvmppc_booke_handlers + ivor[i],
- kvmppc_handlers_start + (i + 16) * kvmppc_handler_len,
- kvmppc_handler_len);
+ (void *)handler[i], handler_len);
}
- flush_icache_range(kvmppc_booke_handlers,
- kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
+ handler_len = handler[max_ivor + 1] - handler[max_ivor];
+ flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
+ ivor[max_ivor] + handler_len);
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
}
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c70d37ed770a..41cefd43655f 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -28,6 +28,7 @@
#define E500_TLB_VALID 1
#define E500_TLB_BITMAP 2
+#define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref {
pfn_t pfn;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_mmu.c
index cf3f18012371..5c4475983f78 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -1,10 +1,11 @@
/*
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, yu.liu@freescale.com
* Scott Wood, scottwood@freescale.com
* Ashish Kalra, ashish.kalra@freescale.com
* Varun Sethi, varun.sethi@freescale.com
+ * Alexander Graf, agraf@suse.de
*
* Description:
* This file is based on arch/powerpc/kvm/44x_tlb.c,
@@ -33,10 +34,7 @@
#include "e500.h"
#include "trace.h"
#include "timing.h"
-
-#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
-
-static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
+#include "e500_mmu_host.h"
static inline unsigned int gtlb0_get_next_victim(
struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -50,174 +48,6 @@ static inline unsigned int gtlb0_get_next_victim(
return victim;
}
-static inline unsigned int tlb1_max_shadow_size(void)
-{
- /* reserve one entry for magic page */
- return host_tlb_params[1].entries - tlbcam_index - 1;
-}
-
-static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
-}
-
-static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
-{
- /* Mask off reserved bits. */
- mas3 &= MAS3_ATTRIB_MASK;
-
-#ifndef CONFIG_KVM_BOOKE_HV
- if (!usermode) {
- /* Guest is in supervisor mode,
- * so we need to translate guest
- * supervisor permissions into user permissions. */
- mas3 &= ~E500_TLB_USER_PERM_MASK;
- mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
- }
- mas3 |= E500_TLB_SUPER_PERM_MASK;
-#endif
- return mas3;
-}
-
-static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
-{
-#ifdef CONFIG_SMP
- return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
-#else
- return mas2 & MAS2_ATTRIB_MASK;
-#endif
-}
-
-/*
- * writing shadow tlb entry to host TLB
- */
-static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
- uint32_t mas0)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mtspr(SPRN_MAS0, mas0);
- mtspr(SPRN_MAS1, stlbe->mas1);
- mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
- mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
- mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
-#ifdef CONFIG_KVM_BOOKE_HV
- mtspr(SPRN_MAS8, stlbe->mas8);
-#endif
- asm volatile("isync; tlbwe" : : : "memory");
-
-#ifdef CONFIG_KVM_BOOKE_HV
- /* Must clear mas8 for other host tlbwe's */
- mtspr(SPRN_MAS8, 0);
- isync();
-#endif
- local_irq_restore(flags);
-
- trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
- stlbe->mas2, stlbe->mas7_3);
-}
-
-/*
- * Acquire a mas0 with victim hint, as if we just took a TLB miss.
- *
- * We don't care about the address we're searching for, other than that it's
- * in the right set and is not present in the TLB. Using a zero PID and a
- * userspace address means we don't have to set and then restore MAS5, or
- * calculate a proper MAS6 value.
- */
-static u32 get_host_mas0(unsigned long eaddr)
-{
- unsigned long flags;
- u32 mas0;
-
- local_irq_save(flags);
- mtspr(SPRN_MAS6, 0);
- asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
- mas0 = mfspr(SPRN_MAS0);
- local_irq_restore(flags);
-
- return mas0;
-}
-
-/* sesel is for tlb1 only */
-static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
-{
- u32 mas0;
-
- if (tlbsel == 0) {
- mas0 = get_host_mas0(stlbe->mas2);
- __write_host_tlbe(stlbe, mas0);
- } else {
- __write_host_tlbe(stlbe,
- MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(sesel)));
- }
-}
-
-#ifdef CONFIG_KVM_E500V2
-void kvmppc_map_magic(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct kvm_book3e_206_tlb_entry magic;
- ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
- unsigned int stid;
- pfn_t pfn;
-
- pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
- get_page(pfn_to_page(pfn));
-
- preempt_disable();
- stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
-
- magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
- MAS1_TSIZE(BOOK3E_PAGESZ_4K);
- magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
- magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
- MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
- magic.mas8 = 0;
-
- __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
- preempt_enable();
-}
-#endif
-
-static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
-{
- struct kvm_book3e_206_tlb_entry *gtlbe =
- get_entry(vcpu_e500, tlbsel, esel);
-
- if (tlbsel == 1 &&
- vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
- u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
- int hw_tlb_indx;
- unsigned long flags;
-
- local_irq_save(flags);
- while (tmp) {
- hw_tlb_indx = __ilog2_u64(tmp & -tmp);
- mtspr(SPRN_MAS0,
- MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
- mtspr(SPRN_MAS1, 0);
- asm volatile("tlbwe");
- vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
- tmp &= tmp - 1;
- }
- mb();
- vcpu_e500->g2h_tlb1_map[esel] = 0;
- vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
- local_irq_restore(flags);
-
- return;
- }
-
- /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
- kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
-}
-
static int tlb0_set_base(gva_t addr, int sets, int ways)
{
int set_base;
@@ -296,70 +126,6 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
return -1;
}
-static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- struct kvm_book3e_206_tlb_entry *gtlbe,
- pfn_t pfn)
-{
- ref->pfn = pfn;
- ref->flags = E500_TLB_VALID;
-
- if (tlbe_is_writable(gtlbe))
- kvm_set_pfn_dirty(pfn);
-}
-
-static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
-{
- if (ref->flags & E500_TLB_VALID) {
- trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
- ref->flags = 0;
- }
-}
-
-static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- if (vcpu_e500->g2h_tlb1_map)
- memset(vcpu_e500->g2h_tlb1_map, 0,
- sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
- if (vcpu_e500->h2g_tlb1_rmap)
- memset(vcpu_e500->h2g_tlb1_rmap, 0,
- sizeof(unsigned int) * host_tlb_params[1].entries);
-}
-
-static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- int tlbsel = 0;
- int i;
-
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
-}
-
-static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- int stlbsel = 1;
- int i;
-
- kvmppc_e500_tlbil_all(vcpu_e500);
-
- for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->tlb_refs[stlbsel][i];
- kvmppc_e500_ref_release(ref);
- }
-
- clear_tlb_privs(vcpu_e500);
-}
-
-void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- clear_tlb_refs(vcpu_e500);
- clear_tlb1_bitmap(vcpu_e500);
-}
-
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
unsigned int eaddr, int as)
{
@@ -385,216 +151,6 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
| (as ? MAS6_SAS : 0);
}
-/* TID must be supplied by the caller */
-static inline void kvmppc_e500_setup_stlbe(
- struct kvm_vcpu *vcpu,
- struct kvm_book3e_206_tlb_entry *gtlbe,
- int tsize, struct tlbe_ref *ref, u64 gvaddr,
- struct kvm_book3e_206_tlb_entry *stlbe)
-{
- pfn_t pfn = ref->pfn;
- u32 pr = vcpu->arch.shared->msr & MSR_PR;
-
- BUG_ON(!(ref->flags & E500_TLB_VALID));
-
- /* Force IPROT=0 for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
- stlbe->mas2 = (gvaddr & MAS2_EPN) |
- e500_shadow_mas2_attrib(gtlbe->mas2, pr);
- stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
- e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
-
-#ifdef CONFIG_KVM_BOOKE_HV
- stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
-#endif
-}
-
-static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
- int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
- struct tlbe_ref *ref)
-{
- struct kvm_memory_slot *slot;
- unsigned long pfn = 0; /* silence GCC warning */
- unsigned long hva;
- int pfnmap = 0;
- int tsize = BOOK3E_PAGESZ_4K;
-
- /*
- * Translate guest physical to true physical, acquiring
- * a page reference if it is normal, non-reserved memory.
- *
- * gfn_to_memslot() must succeed because otherwise we wouldn't
- * have gotten this far. Eventually we should just pass the slot
- * pointer through from the first lookup.
- */
- slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
- hva = gfn_to_hva_memslot(slot, gfn);
-
- if (tlbsel == 1) {
- struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
-
- vma = find_vma(current->mm, hva);
- if (vma && hva >= vma->vm_start &&
- (vma->vm_flags & VM_PFNMAP)) {
- /*
- * This VMA is a physically contiguous region (e.g.
- * /dev/mem) that bypasses normal Linux page
- * management. Find the overlap between the
- * vma and the memslot.
- */
-
- unsigned long start, end;
- unsigned long slot_start, slot_end;
-
- pfnmap = 1;
-
- start = vma->vm_pgoff;
- end = start +
- ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
-
- pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
-
- slot_start = pfn - (gfn - slot->base_gfn);
- slot_end = slot_start + slot->npages;
-
- if (start < slot_start)
- start = slot_start;
- if (end > slot_end)
- end = slot_end;
-
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
- MAS1_TSIZE_SHIFT;
-
- /*
- * e500 doesn't implement the lowest tsize bit,
- * or 1K pages.
- */
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
-
- /*
- * Now find the largest tsize (up to what the guest
- * requested) that will cover gfn, stay within the
- * range, and for which gfn and pfn are mutually
- * aligned.
- */
-
- for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
- unsigned long gfn_start, gfn_end, tsize_pages;
- tsize_pages = 1 << (tsize - 2);
-
- gfn_start = gfn & ~(tsize_pages - 1);
- gfn_end = gfn_start + tsize_pages;
-
- if (gfn_start + pfn - gfn < start)
- continue;
- if (gfn_end + pfn - gfn > end)
- continue;
- if ((gfn & (tsize_pages - 1)) !=
- (pfn & (tsize_pages - 1)))
- continue;
-
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
- pfn &= ~(tsize_pages - 1);
- break;
- }
- } else if (vma && hva >= vma->vm_start &&
- (vma->vm_flags & VM_HUGETLB)) {
- unsigned long psize = vma_kernel_pagesize(vma);
-
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
- MAS1_TSIZE_SHIFT;
-
- /*
- * Take the largest page size that satisfies both host
- * and guest mapping
- */
- tsize = min(__ilog2(psize) - 10, tsize);
-
- /*
- * e500 doesn't implement the lowest tsize bit,
- * or 1K pages.
- */
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
- }
-
- up_read(&current->mm->mmap_sem);
- }
-
- if (likely(!pfnmap)) {
- unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
- pfn = gfn_to_pfn_memslot(slot, gfn);
- if (is_error_noslot_pfn(pfn)) {
- printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
- (long)gfn);
- return;
- }
-
- /* Align guest and physical address to page map boundaries */
- pfn &= ~(tsize_pages - 1);
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
- }
-
- /* Drop old ref and setup new one. */
- kvmppc_e500_ref_release(ref);
- kvmppc_e500_ref_setup(ref, gtlbe, pfn);
-
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
-
- /* Clear i-cache for new pages */
- kvmppc_mmu_flush_icache(pfn);
-
- /* Drop refcount on page, so that mmu notifiers can clear it */
- kvm_release_pfn_clean(pfn);
-}
-
-/* XXX only map the one-one case, for now use TLB0 */
-static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- int esel,
- struct kvm_book3e_206_tlb_entry *stlbe)
-{
- struct kvm_book3e_206_tlb_entry *gtlbe;
- struct tlbe_ref *ref;
-
- gtlbe = get_entry(vcpu_e500, 0, esel);
- ref = &vcpu_e500->gtlb_priv[0][esel].ref;
-
- kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
- get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
- gtlbe, 0, stlbe, ref);
-}
-
-/* Caller must ensure that the specified guest TLB entry is safe to insert into
- * the shadow TLB. */
-/* XXX for both one-one and one-to-many , for now use TLB1 */
-static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
- struct kvm_book3e_206_tlb_entry *stlbe, int esel)
-{
- struct tlbe_ref *ref;
- unsigned int victim;
-
- victim = vcpu_e500->host_tlb1_nv++;
-
- if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
- vcpu_e500->host_tlb1_nv = 0;
-
- ref = &vcpu_e500->tlb_refs[1][victim];
- kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
-
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
- if (vcpu_e500->h2g_tlb1_rmap[victim]) {
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
- vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
- }
- vcpu_e500->h2g_tlb1_rmap[victim] = esel;
-
- return victim;
-}
-
static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int size = vcpu_e500->gtlb_params[1].entries;
@@ -683,8 +239,8 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
- /* Invalidate all vcpu id mappings */
- kvmppc_e500_tlbil_all(vcpu_e500);
+ /* Invalidate all host shadow mappings */
+ kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
return EMULATE_DONE;
}
@@ -713,8 +269,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
}
- /* Invalidate all vcpu id mappings */
- kvmppc_e500_tlbil_all(vcpu_e500);
+ /* Invalidate all host shadow mappings */
+ kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
return EMULATE_DONE;
}
@@ -834,27 +390,11 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
return EMULATE_DONE;
}
-/* sesel is for tlb1 only */
-static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
- struct kvm_book3e_206_tlb_entry *gtlbe,
- struct kvm_book3e_206_tlb_entry *stlbe,
- int stlbsel, int sesel)
-{
- int stid;
-
- preempt_disable();
- stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
-
- stlbe->mas1 |= MAS1_TID(stid);
- write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
- preempt_enable();
-}
-
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
- int tlbsel, esel, stlbsel, sesel;
+ struct kvm_book3e_206_tlb_entry *gtlbe;
+ int tlbsel, esel;
int recal = 0;
tlbsel = get_tlb_tlbsel(vcpu);
@@ -892,40 +432,16 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
- u64 eaddr;
- u64 raddr;
+ u64 eaddr = get_tlb_eaddr(gtlbe);
+ u64 raddr = get_tlb_raddr(gtlbe);
- switch (tlbsel) {
- case 0:
- /* TLB0 */
+ if (tlbsel == 0) {
gtlbe->mas1 &= ~MAS1_TSIZE(~0);
gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
-
- stlbsel = 0;
- kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
- sesel = 0; /* unused */
-
- break;
-
- case 1:
- /* TLB1 */
- eaddr = get_tlb_eaddr(gtlbe);
- raddr = get_tlb_raddr(gtlbe);
-
- /* Create a 4KB mapping on the host.
- * If the guest wanted a large page,
- * only the first 4KB is mapped here and the rest
- * are mapped on the fly. */
- stlbsel = 1;
- sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
- raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
- break;
-
- default:
- BUG();
}
- write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
+ /* Premap the faulting page */
+ kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
}
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -1019,100 +535,14 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}
-void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
- unsigned int index)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct tlbe_priv *priv;
- struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
- int tlbsel = tlbsel_of(index);
- int esel = esel_of(index);
- int stlbsel, sesel;
-
- gtlbe = get_entry(vcpu_e500, tlbsel, esel);
-
- switch (tlbsel) {
- case 0:
- stlbsel = 0;
- sesel = 0; /* unused */
- priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
-
- /* Only triggers after clear_tlb_refs */
- if (unlikely(!(priv->ref.flags & E500_TLB_VALID)))
- kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
- else
- kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
- &priv->ref, eaddr, &stlbe);
- break;
-
- case 1: {
- gfn_t gfn = gpaddr >> PAGE_SHIFT;
-
- stlbsel = 1;
- sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
- gtlbe, &stlbe, esel);
- break;
- }
-
- default:
- BUG();
- break;
- }
-
- write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
-}
-
-/************* MMU Notifiers *************/
-
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- trace_kvm_unmap_hva(hva);
-
- /*
- * Flush all shadow tlb entries everywhere. This is slow, but
- * we are 100% sure that we catch the to be unmapped page
- */
- kvm_flush_remote_tlbs(kvm);
-
- return 0;
-}
-
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
-{
- /* kvm_unmap_hva flushes everything anyways */
- kvm_unmap_hva(kvm, start);
-
- return 0;
-}
-
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
-{
- /* XXX could be more clever ;) */
- return 0;
-}
-
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- /* XXX could be more clever ;) */
- return 0;
-}
-
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
-{
- /* The page will get remapped properly on its next fault */
- kvm_unmap_hva(kvm, hva);
-}
-
/*****************************************/
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int i;
- clear_tlb1_bitmap(vcpu_e500);
+ kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
kfree(vcpu_e500->g2h_tlb1_map);
-
- clear_tlb_refs(vcpu_e500);
kfree(vcpu_e500->gtlb_priv[0]);
kfree(vcpu_e500->gtlb_priv[1]);
@@ -1303,7 +733,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
kvmppc_recalc_tlb1map_range(vcpu_e500);
- clear_tlb_refs(vcpu_e500);
+ kvmppc_core_flush_tlb(vcpu);
return 0;
}
@@ -1313,37 +743,8 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
- host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
- host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
-
- /*
- * This should never happen on real e500 hardware, but is
- * architecturally possible -- e.g. in some weird nested
- * virtualization case.
- */
- if (host_tlb_params[0].entries == 0 ||
- host_tlb_params[1].entries == 0) {
- pr_err("%s: need to know host tlb size\n", __func__);
- return -ENODEV;
- }
-
- host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
- TLBnCFG_ASSOC_SHIFT;
- host_tlb_params[1].ways = host_tlb_params[1].entries;
-
- if (!is_power_of_2(host_tlb_params[0].entries) ||
- !is_power_of_2(host_tlb_params[0].ways) ||
- host_tlb_params[0].entries < host_tlb_params[0].ways ||
- host_tlb_params[0].ways == 0) {
- pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
- __func__, host_tlb_params[0].entries,
- host_tlb_params[0].ways);
- return -ENODEV;
- }
-
- host_tlb_params[0].sets =
- host_tlb_params[0].entries / host_tlb_params[0].ways;
- host_tlb_params[1].sets = 1;
+ if (e500_mmu_host_init(vcpu_e500))
+ goto err;
vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
@@ -1362,18 +763,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu_e500->gtlb_offset[0] = 0;
vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
- vcpu_e500->tlb_refs[0] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[0])
- goto err;
-
- vcpu_e500->tlb_refs[1] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[1])
- goto err;
-
vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
vcpu_e500->gtlb_params[0].entries,
GFP_KERNEL);
@@ -1392,12 +781,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
if (!vcpu_e500->g2h_tlb1_map)
goto err;
- vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
- host_tlb_params[1].entries,
- GFP_KERNEL);
- if (!vcpu_e500->h2g_tlb1_rmap)
- goto err;
-
/* Init TLB configuration register */
vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
@@ -1416,15 +799,11 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
err:
free_gtlb(vcpu_e500);
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
return -1;
}
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
free_gtlb(vcpu_e500);
- kfree(vcpu_e500->h2g_tlb1_rmap);
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
+ e500_mmu_host_uninit(vcpu_e500);
}
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
new file mode 100644
index 000000000000..a222edfb9a9b
--- /dev/null
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Yu Liu, yu.liu@freescale.com
+ * Scott Wood, scottwood@freescale.com
+ * Ashish Kalra, ashish.kalra@freescale.com
+ * Varun Sethi, varun.sethi@freescale.com
+ * Alexander Graf, agraf@suse.de
+ *
+ * Description:
+ * This file is based on arch/powerpc/kvm/44x_tlb.c,
+ * by Hollis Blanchard <hollisb@us.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/log2.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/vmalloc.h>
+#include <linux/hugetlb.h>
+#include <asm/kvm_ppc.h>
+
+#include "e500.h"
+#include "trace.h"
+#include "timing.h"
+#include "e500_mmu_host.h"
+
+#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
+
+static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
+
+static inline unsigned int tlb1_max_shadow_size(void)
+{
+ /* reserve one entry for magic page */
+ return host_tlb_params[1].entries - tlbcam_index - 1;
+}
+
+static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
+{
+ /* Mask off reserved bits. */
+ mas3 &= MAS3_ATTRIB_MASK;
+
+#ifndef CONFIG_KVM_BOOKE_HV
+ if (!usermode) {
+ /* Guest is in supervisor mode,
+ * so we need to translate guest
+ * supervisor permissions into user permissions. */
+ mas3 &= ~E500_TLB_USER_PERM_MASK;
+ mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
+ }
+ mas3 |= E500_TLB_SUPER_PERM_MASK;
+#endif
+ return mas3;
+}
+
+static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
+{
+#ifdef CONFIG_SMP
+ return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
+#else
+ return mas2 & MAS2_ATTRIB_MASK;
+#endif
+}
+
+/*
+ * writing shadow tlb entry to host TLB
+ */
+static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
+ uint32_t mas0)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS0, mas0);
+ mtspr(SPRN_MAS1, stlbe->mas1);
+ mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
+ mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
+ mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_MAS8, stlbe->mas8);
+#endif
+ asm volatile("isync; tlbwe" : : : "memory");
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ /* Must clear mas8 for other host tlbwe's */
+ mtspr(SPRN_MAS8, 0);
+ isync();
+#endif
+ local_irq_restore(flags);
+
+ trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
+ stlbe->mas2, stlbe->mas7_3);
+}
+
+/*
+ * Acquire a mas0 with victim hint, as if we just took a TLB miss.
+ *
+ * We don't care about the address we're searching for, other than that it's
+ * in the right set and is not present in the TLB. Using a zero PID and a
+ * userspace address means we don't have to set and then restore MAS5, or
+ * calculate a proper MAS6 value.
+ */
+static u32 get_host_mas0(unsigned long eaddr)
+{
+ unsigned long flags;
+ u32 mas0;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS6, 0);
+ asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
+ mas0 = mfspr(SPRN_MAS0);
+ local_irq_restore(flags);
+
+ return mas0;
+}
+
+/* sesel is for tlb1 only */
+static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
+{
+ u32 mas0;
+
+ if (tlbsel == 0) {
+ mas0 = get_host_mas0(stlbe->mas2);
+ __write_host_tlbe(stlbe, mas0);
+ } else {
+ __write_host_tlbe(stlbe,
+ MAS0_TLBSEL(1) |
+ MAS0_ESEL(to_htlb1_esel(sesel)));
+ }
+}
+
+/* sesel is for tlb1 only */
+static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ struct kvm_book3e_206_tlb_entry *stlbe,
+ int stlbsel, int sesel)
+{
+ int stid;
+
+ preempt_disable();
+ stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
+
+ stlbe->mas1 |= MAS1_TID(stid);
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
+ preempt_enable();
+}
+
+#ifdef CONFIG_KVM_E500V2
+/* XXX should be a hook in the gva2hpa translation */
+void kvmppc_map_magic(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct kvm_book3e_206_tlb_entry magic;
+ ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
+ unsigned int stid;
+ pfn_t pfn;
+
+ pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
+ get_page(pfn_to_page(pfn));
+
+ preempt_disable();
+ stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
+
+ magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
+ MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+ magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
+ magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
+ MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+ magic.mas8 = 0;
+
+ __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+ preempt_enable();
+}
+#endif
+
+void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
+ int esel)
+{
+ struct kvm_book3e_206_tlb_entry *gtlbe =
+ get_entry(vcpu_e500, tlbsel, esel);
+ struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
+
+ /* Don't bother with unmapped entries */
+ if (!(ref->flags & E500_TLB_VALID))
+ return;
+
+ if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
+ u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
+ int hw_tlb_indx;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ while (tmp) {
+ hw_tlb_indx = __ilog2_u64(tmp & -tmp);
+ mtspr(SPRN_MAS0,
+ MAS0_TLBSEL(1) |
+ MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
+ mtspr(SPRN_MAS1, 0);
+ asm volatile("tlbwe");
+ vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
+ tmp &= tmp - 1;
+ }
+ mb();
+ vcpu_e500->g2h_tlb1_map[esel] = 0;
+ ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
+ local_irq_restore(flags);
+ }
+
+ if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
+ /*
+ * TLB1 entry is backed by 4k pages. This should happen
+ * rarely and is not worth optimizing. Invalidate everything.
+ */
+ kvmppc_e500_tlbil_all(vcpu_e500);
+ ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
+ }
+
+ /* Already invalidated in between */
+ if (!(ref->flags & E500_TLB_VALID))
+ return;
+
+ /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
+ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
+
+ /* Mark the TLB as not backed by the host anymore */
+ ref->flags &= ~E500_TLB_VALID;
+}
+
+static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
+}
+
+static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ pfn_t pfn)
+{
+ ref->pfn = pfn;
+ ref->flags = E500_TLB_VALID;
+
+ if (tlbe_is_writable(gtlbe))
+ kvm_set_pfn_dirty(pfn);
+}
+
+static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
+{
+ if (ref->flags & E500_TLB_VALID) {
+ trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
+ ref->flags = 0;
+ }
+}
+
+static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ if (vcpu_e500->g2h_tlb1_map)
+ memset(vcpu_e500->g2h_tlb1_map, 0,
+ sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
+ if (vcpu_e500->h2g_tlb1_rmap)
+ memset(vcpu_e500->h2g_tlb1_rmap, 0,
+ sizeof(unsigned int) * host_tlb_params[1].entries);
+}
+
+static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ int tlbsel = 0;
+ int i;
+
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
+ }
+}
+
+static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ int stlbsel = 1;
+ int i;
+
+ kvmppc_e500_tlbil_all(vcpu_e500);
+
+ for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->tlb_refs[stlbsel][i];
+ kvmppc_e500_ref_release(ref);
+ }
+
+ clear_tlb_privs(vcpu_e500);
+}
+
+void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ clear_tlb_refs(vcpu_e500);
+ clear_tlb1_bitmap(vcpu_e500);
+}
+
+/* TID must be supplied by the caller */
+static void kvmppc_e500_setup_stlbe(
+ struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ int tsize, struct tlbe_ref *ref, u64 gvaddr,
+ struct kvm_book3e_206_tlb_entry *stlbe)
+{
+ pfn_t pfn = ref->pfn;
+ u32 pr = vcpu->arch.shared->msr & MSR_PR;
+
+ BUG_ON(!(ref->flags & E500_TLB_VALID));
+
+ /* Force IPROT=0 for all guest mappings. */
+ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
+ stlbe->mas2 = (gvaddr & MAS2_EPN) |
+ e500_shadow_mas2_attrib(gtlbe->mas2, pr);
+ stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
+ e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
+#endif
+}
+
+static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
+ int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
+ struct tlbe_ref *ref)
+{
+ struct kvm_memory_slot *slot;
+ unsigned long pfn = 0; /* silence GCC warning */
+ unsigned long hva;
+ int pfnmap = 0;
+ int tsize = BOOK3E_PAGESZ_4K;
+
+ /*
+ * Translate guest physical to true physical, acquiring
+ * a page reference if it is normal, non-reserved memory.
+ *
+ * gfn_to_memslot() must succeed because otherwise we wouldn't
+ * have gotten this far. Eventually we should just pass the slot
+ * pointer through from the first lookup.
+ */
+ slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
+ hva = gfn_to_hva_memslot(slot, gfn);
+
+ if (tlbsel == 1) {
+ struct vm_area_struct *vma;
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->mm, hva);
+ if (vma && hva >= vma->vm_start &&
+ (vma->vm_flags & VM_PFNMAP)) {
+ /*
+ * This VMA is a physically contiguous region (e.g.
+ * /dev/mem) that bypasses normal Linux page
+ * management. Find the overlap between the
+ * vma and the memslot.
+ */
+
+ unsigned long start, end;
+ unsigned long slot_start, slot_end;
+
+ pfnmap = 1;
+
+ start = vma->vm_pgoff;
+ end = start +
+ ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+
+ pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
+
+ slot_start = pfn - (gfn - slot->base_gfn);
+ slot_end = slot_start + slot->npages;
+
+ if (start < slot_start)
+ start = slot_start;
+ if (end > slot_end)
+ end = slot_end;
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
+
+ /*
+ * Now find the largest tsize (up to what the guest
+ * requested) that will cover gfn, stay within the
+ * range, and for which gfn and pfn are mutually
+ * aligned.
+ */
+
+ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
+ unsigned long gfn_start, gfn_end, tsize_pages;
+ tsize_pages = 1 << (tsize - 2);
+
+ gfn_start = gfn & ~(tsize_pages - 1);
+ gfn_end = gfn_start + tsize_pages;
+
+ if (gfn_start + pfn - gfn < start)
+ continue;
+ if (gfn_end + pfn - gfn > end)
+ continue;
+ if ((gfn & (tsize_pages - 1)) !=
+ (pfn & (tsize_pages - 1)))
+ continue;
+
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn &= ~(tsize_pages - 1);
+ break;
+ }
+ } else if (vma && hva >= vma->vm_start &&
+ (vma->vm_flags & VM_HUGETLB)) {
+ unsigned long psize = vma_kernel_pagesize(vma);
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * Take the largest page size that satisfies both host
+ * and guest mapping
+ */
+ tsize = min(__ilog2(psize) - 10, tsize);
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
+ }
+
+ up_read(&current->mm->mmap_sem);
+ }
+
+ if (likely(!pfnmap)) {
+ unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_noslot_pfn(pfn)) {
+ printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
+ (long)gfn);
+ return -EINVAL;
+ }
+
+ /* Align guest and physical address to page map boundaries */
+ pfn &= ~(tsize_pages - 1);
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ }
+
+ /* Drop old ref and setup new one. */
+ kvmppc_e500_ref_release(ref);
+ kvmppc_e500_ref_setup(ref, gtlbe, pfn);
+
+ kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ ref, gvaddr, stlbe);
+
+ /* Clear i-cache for new pages */
+ kvmppc_mmu_flush_icache(pfn);
+
+ /* Drop refcount on page, so that mmu notifiers can clear it */
+ kvm_release_pfn_clean(pfn);
+
+ return 0;
+}
+
+/* XXX only map the one-one case, for now use TLB0 */
+static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
+ struct kvm_book3e_206_tlb_entry *stlbe)
+{
+ struct kvm_book3e_206_tlb_entry *gtlbe;
+ struct tlbe_ref *ref;
+ int stlbsel = 0;
+ int sesel = 0;
+ int r;
+
+ gtlbe = get_entry(vcpu_e500, 0, esel);
+ ref = &vcpu_e500->gtlb_priv[0][esel].ref;
+
+ r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
+ get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
+ gtlbe, 0, stlbe, ref);
+ if (r)
+ return r;
+
+ write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
+
+ return 0;
+}
+
+static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct tlbe_ref *ref,
+ int esel)
+{
+ unsigned int sesel = vcpu_e500->host_tlb1_nv++;
+
+ if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
+ vcpu_e500->host_tlb1_nv = 0;
+
+ vcpu_e500->tlb_refs[1][sesel] = *ref;
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+ if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
+ vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
+ }
+ vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
+
+ return sesel;
+}
+
+/* Caller must ensure that the specified guest TLB entry is safe to insert into
+ * the shadow TLB. */
+/* For both one-one and one-to-many */
+static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
+ struct kvm_book3e_206_tlb_entry *stlbe, int esel)
+{
+ struct tlbe_ref ref;
+ int sesel;
+ int r;
+
+ ref.flags = 0;
+ r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
+ &ref);
+ if (r)
+ return r;
+
+ /* Use TLB0 when we can only map a page with 4k */
+ if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
+ write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
+ return 0;
+ }
+
+ /* Otherwise map into TLB1 */
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
+ write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
+
+ return 0;
+}
+
+void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
+ unsigned int index)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct tlbe_priv *priv;
+ struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
+ int tlbsel = tlbsel_of(index);
+ int esel = esel_of(index);
+
+ gtlbe = get_entry(vcpu_e500, tlbsel, esel);
+
+ switch (tlbsel) {
+ case 0:
+ priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
+
+ /* Triggers after clear_tlb_refs or on initial mapping */
+ if (!(priv->ref.flags & E500_TLB_VALID)) {
+ kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+ } else {
+ kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
+ &priv->ref, eaddr, &stlbe);
+ write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
+ }
+ break;
+
+ case 1: {
+ gfn_t gfn = gpaddr >> PAGE_SHIFT;
+ kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
+ esel);
+ break;
+ }
+
+ default:
+ BUG();
+ break;
+ }
+}
+
+/************* MMU Notifiers *************/
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ trace_kvm_unmap_hva(hva);
+
+ /*
+ * Flush all shadow tlb entries everywhere. This is slow, but
+ * we are 100% sure that we catch the to be unmapped page
+ */
+ kvm_flush_remote_tlbs(kvm);
+
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
+
+ return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ /* The page will get remapped properly on its next fault */
+ kvm_unmap_hva(kvm, hva);
+}
+
+/*****************************************/
+
+int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
+ host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+
+ /*
+ * This should never happen on real e500 hardware, but is
+ * architecturally possible -- e.g. in some weird nested
+ * virtualization case.
+ */
+ if (host_tlb_params[0].entries == 0 ||
+ host_tlb_params[1].entries == 0) {
+ pr_err("%s: need to know host tlb size\n", __func__);
+ return -ENODEV;
+ }
+
+ host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
+ TLBnCFG_ASSOC_SHIFT;
+ host_tlb_params[1].ways = host_tlb_params[1].entries;
+
+ if (!is_power_of_2(host_tlb_params[0].entries) ||
+ !is_power_of_2(host_tlb_params[0].ways) ||
+ host_tlb_params[0].entries < host_tlb_params[0].ways ||
+ host_tlb_params[0].ways == 0) {
+ pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
+ __func__, host_tlb_params[0].entries,
+ host_tlb_params[0].ways);
+ return -ENODEV;
+ }
+
+ host_tlb_params[0].sets =
+ host_tlb_params[0].entries / host_tlb_params[0].ways;
+ host_tlb_params[1].sets = 1;
+
+ vcpu_e500->tlb_refs[0] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[0])
+ goto err;
+
+ vcpu_e500->tlb_refs[1] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[1])
+ goto err;
+
+ vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
+ host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->h2g_tlb1_rmap)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
+ return -EINVAL;
+}
+
+void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kfree(vcpu_e500->h2g_tlb1_rmap);
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
+}
diff --git a/arch/powerpc/kvm/e500_mmu_host.h b/arch/powerpc/kvm/e500_mmu_host.h
new file mode 100644
index 000000000000..7624835b76c7
--- /dev/null
+++ b/arch/powerpc/kvm/e500_mmu_host.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef KVM_E500_MMU_HOST_H
+#define KVM_E500_MMU_HOST_H
+
+void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
+ int esel);
+
+int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500);
+void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
+
+#endif /* KVM_E500_MMU_HOST_H */
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 9d9cddc5b346..7a73b6f72a8b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -150,8 +150,6 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_TBWL: break;
case SPRN_TBWU: break;
- case SPRN_MSSSR0: break;
-
case SPRN_DEC:
vcpu->arch.dec = spr_val;
kvmppc_emulate_dec(vcpu);
@@ -202,9 +200,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PIR:
spr_val = vcpu->vcpu_id;
break;
- case SPRN_MSSSR0:
- spr_val = 0;
- break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 70739a089560..934413cd3a1b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -237,7 +237,8 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
default:
- BUG();
+ WARN_ON(1);
+ r = RESUME_GUEST;
}
return r;
@@ -305,6 +306,7 @@ int kvm_dev_ioctl_check_extension(long ext)
#ifdef CONFIG_BOOKE
case KVM_CAP_PPC_BOOKE_SREGS:
case KVM_CAP_PPC_BOOKE_WATCHDOG:
+ case KVM_CAP_PPC_EPR:
#else
case KVM_CAP_PPC_SEGSTATE:
case KVM_CAP_PPC_HIOR:
@@ -412,7 +414,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
}
@@ -420,7 +422,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc)
+ bool user_alloc)
{
kvmppc_core_commit_memory_region(kvm, mem, old);
}
@@ -720,6 +722,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
for (i = 0; i < 9; ++i)
kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
vcpu->arch.hcall_needed = 0;
+#ifdef CONFIG_BOOKE
+ } else if (vcpu->arch.epr_needed) {
+ kvmppc_set_epr(vcpu, run->epr.epr);
+ vcpu->arch.epr_needed = 0;
+#endif
}
r = kvmppc_vcpu_run(run, vcpu);
@@ -761,6 +768,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = 0;
vcpu->arch.papr_enabled = true;
break;
+ case KVM_CAP_PPC_EPR:
+ r = 0;
+ vcpu->arch.epr_enabled = cap->args[0];
+ break;
#ifdef CONFIG_BOOKE
case KVM_CAP_PPC_BOOKE_WATCHDOG:
r = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 657e3f233a64..c9500ea7be2f 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -111,7 +111,7 @@ static int match_context(const void *v, struct file *file, unsigned fd)
struct spu_context *ctx;
if (file->f_op != &spufs_context_fops)
return 0;
- ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+ ctx = SPUFS_I(file_inode(file))->i_ctx;
if (ctx->flags & SPU_CREATE_NOSCHED)
return 0;
return fd + 1;
@@ -137,7 +137,7 @@ static struct spu_context *coredump_next_context(int *fd)
return NULL;
*fd = n - 1;
file = fcheck(*fd);
- return SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+ return SPUFS_I(file_inode(file))->i_ctx;
}
int spufs_coredump_extra_notes_size(void)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 0cfece4cf6ef..68c57d38745a 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1852,7 +1852,7 @@ out:
static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (!err) {
mutex_lock(&inode->i_mutex);
@@ -2501,7 +2501,7 @@ static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int error = 0, cnt = 0;
@@ -2571,7 +2571,7 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
unsigned int mask = 0;
int rc;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index dba1ce235da5..863184b182f4 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -199,37 +199,18 @@ static int spufs_fill_dir(struct dentry *dir,
const struct spufs_tree_descr *files, umode_t mode,
struct spu_context *ctx)
{
- struct dentry *dentry, *tmp;
- int ret;
-
while (files->name && files->name[0]) {
- ret = -ENOMEM;
- dentry = d_alloc_name(dir, files->name);
+ int ret;
+ struct dentry *dentry = d_alloc_name(dir, files->name);
if (!dentry)
- goto out;
+ return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
if (ret)
- goto out;
+ return ret;
files++;
}
return 0;
-out:
- /*
- * remove all children from dir. dir->inode is not set so don't
- * just simply use spufs_prune_dir() and panic afterwards :)
- * dput() looks like it will do the right thing:
- * - dec parent's ref counter
- * - remove child from parent's child list
- * - free child's inode if possible
- * - free child
- */
- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
- dput(dentry);
- }
-
- shrink_dcache_parent(dir);
- return ret;
}
static int spufs_dir_close(struct inode *inode, struct file *file)
@@ -269,10 +250,9 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
struct inode *inode;
struct spu_context *ctx;
- ret = -ENOSPC;
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
if (!inode)
- goto out;
+ return -ENOSPC;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
@@ -280,40 +260,38 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
}
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
- if (!ctx)
- goto out_iput;
+ if (!ctx) {
+ iput(inode);
+ return -ENOSPC;
+ }
ctx->flags = flags;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
+
+ mutex_lock(&inode->i_mutex);
+
+ dget(dentry);
+ inc_nlink(dir);
+ inc_nlink(inode);
+
+ d_instantiate(dentry, inode);
+
if (flags & SPU_CREATE_NOSCHED)
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
mode, ctx);
else
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
- if (ret)
- goto out_free_ctx;
-
- if (spufs_get_sb_info(dir->i_sb)->debug)
+ if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
mode, ctx);
if (ret)
- goto out_free_ctx;
+ spufs_rmdir(dir, dentry);
- d_instantiate(dentry, inode);
- dget(dentry);
- inc_nlink(dir);
- inc_nlink(dentry->d_inode);
- goto out;
+ mutex_unlock(&inode->i_mutex);
-out_free_ctx:
- spu_forget(ctx);
- put_spu_context(ctx);
-out_iput:
- iput(inode);
-out:
return ret;
}
@@ -368,7 +346,7 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
return ERR_PTR(-EINVAL);
neighbor = get_spu_context(
- SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
+ SPUFS_I(file_inode(filp))->i_ctx);
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index baee994fe810..b045fdda4845 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -47,7 +47,7 @@ static long do_spu_run(struct file *filp,
if (filp->f_op != &spufs_context_fops)
goto out;
- i = SPUFS_I(filp->f_path.dentry->d_inode);
+ i = SPUFS_I(file_inode(filp));
ret = spufs_run_spu(i->i_ctx, &npc, &status);
if (put_user(npc, unpc))
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index c9311cfdfcac..cf4e7736e4f1 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -86,7 +86,7 @@ static int hcall_inst_seq_open(struct inode *inode, struct file *file)
rc = seq_open(file, &hcall_inst_seq_ops);
seq = file->private_data;
- seq->private = file->f_path.dentry->d_inode->i_private;
+ seq->private = file_inode(file)->i_private;
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 554457294a2b..47f3cda2a68b 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -46,16 +46,12 @@ static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */
static ssize_t scanlog_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
- struct proc_dir_entry *dp;
- unsigned int *data;
+ struct proc_dir_entry *dp = PDE(file_inode(file));
+ unsigned int *data = (unsigned int *)dp->data;
int status;
unsigned long len, off;
unsigned int wait_time;
- dp = PDE(inode);
- data = (unsigned int *)dp->data;
-
if (count > RTAS_DATA_BUF_SIZE)
count = RTAS_DATA_BUF_SIZE;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f09ae7b0b4c5..4b505370a1d5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -134,6 +134,7 @@ config S390
select HAVE_SYSCALL_WRAPPERS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
+ select HAVE_VIRT_TO_BUS
select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 13e76dabbe8b..9fd4a40c6752 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -54,7 +54,7 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
if (*ppos != 0)
return 0;
- df = file->f_path.dentry->d_inode->i_private;
+ df = file_inode(file)->i_private;
mutex_lock(&df->lock);
if (!df->data) {
data = hypfs_dbfs_data_alloc(df);
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 06ea69bd387a..280ded8b79ba 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -119,7 +119,7 @@ static void hypfs_evict_inode(struct inode *inode)
static int hypfs_open(struct inode *inode, struct file *filp)
{
- char *data = filp->f_path.dentry->d_inode->i_private;
+ char *data = file_inode(filp)->i_private;
struct hypfs_sb_info *fs_info;
if (filp->f_mode & FMODE_WRITE) {
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 178ff966a8ba..1bfdf24b85a2 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -180,10 +180,7 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
-#ifndef CONFIG_64BIT
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-#else /* CONFIG_64BIT */
+#ifdef CONFIG_64BIT
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 7def77302d63..87c17bfb2968 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -41,6 +41,7 @@ enum interruption_class {
IRQIO_CSC,
IRQIO_PCI,
IRQIO_MSI,
+ IRQIO_VIR,
NMI_NMI,
CPU_RST,
NR_ARCH_IRQS
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index b7841546991f..16bd5d169cdb 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -20,9 +20,7 @@
#include <asm/cpu.h>
#define KVM_MAX_VCPUS 64
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_USER_MEM_SLOTS 32
struct sca_entry {
atomic_t scn;
@@ -76,8 +74,11 @@ struct kvm_s390_sie_block {
__u64 epoch; /* 0x0038 */
__u8 reserved40[4]; /* 0x0040 */
#define LCTL_CR0 0x8000
+#define LCTL_CR6 0x0200
+#define LCTL_CR14 0x0002
__u16 lctl; /* 0x0044 */
__s16 icpua; /* 0x0046 */
+#define ICTL_LPSW 0x00400000
__u32 ictl; /* 0x0048 */
__u32 eca; /* 0x004c */
__u8 icptcode; /* 0x0050 */
@@ -127,6 +128,7 @@ struct kvm_vcpu_stat {
u32 deliver_prefix_signal;
u32 deliver_restart_signal;
u32 deliver_program_int;
+ u32 deliver_io_int;
u32 exit_wait_state;
u32 instruction_stidp;
u32 instruction_spx;
@@ -187,6 +189,11 @@ struct kvm_s390_emerg_info {
__u16 code;
};
+struct kvm_s390_mchk_info {
+ __u64 cr14;
+ __u64 mcic;
+};
+
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@@ -197,6 +204,7 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_mchk_info mchk;
};
};
@@ -254,6 +262,7 @@ struct kvm_arch{
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
struct gmap *gmap;
+ int css_support;
};
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index c14faf39ae36..3c98c4dc5aca 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -67,12 +67,6 @@ ENTRY(sys32_lchown16_wrapper)
llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_lchown16 # branch to system call
-ENTRY(sys32_lseek_wrapper)
- llgfr %r2,%r2 # unsigned int
- lgfr %r3,%r3 # off_t
- llgfr %r4,%r4 # unsigned int
- jg sys_lseek # branch to system call
-
#sys32_getpid_wrapper # void
ENTRY(sys32_mount_wrapper)
@@ -331,16 +325,6 @@ ENTRY(sys32_munmap_wrapper)
llgfr %r3,%r3 # size_t
jg sys_munmap # branch to system call
-ENTRY(sys32_truncate_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # long
- jg sys_truncate # branch to system call
-
-ENTRY(sys32_ftruncate_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned long
- jg sys_ftruncate # branch to system call
-
ENTRY(sys32_fchmod_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # mode_t
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 09a94cd9debc..f1279dc2e1bc 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -611,7 +611,7 @@ debug_open(struct inode *inode, struct file *file)
debug_info_t *debug_info, *debug_info_snapshot;
mutex_lock(&debug_mutex);
- debug_info = file->f_path.dentry->d_inode->i_private;
+ debug_info = file_inode(file)->i_private;
/* find debug view */
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!debug_info->views[i])
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 9df824ea1667..1630f439cd2a 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -81,6 +81,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
[IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
+ [IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
};
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d1c7214e157c..3388b2b2a07d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -354,7 +354,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
{
struct kretprobe_instance *ri;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address;
unsigned long trampoline_address;
kprobe_opcode_t *correct_ret_addr;
@@ -379,7 +379,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
orig_ret_address = 0;
correct_ret_addr = NULL;
trampoline_address = (unsigned long) &kretprobe_trampoline;
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -398,7 +398,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -427,7 +427,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index aaac708aa110..630b935d1284 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -27,7 +27,7 @@ SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper) /* 15 */
SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/
NI_SYSCALL /* old break syscall holder */
NI_SYSCALL /* old stat syscall holder */
-SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper)
+SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
@@ -100,8 +100,8 @@ SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper) /* 90 */
SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
-SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
-SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
+SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
+SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper) /* 95 old fchown16 syscall*/
SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 70ecfc5fe8f0..13dd63fba367 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -271,7 +271,7 @@ void die(struct pt_regs *regs, const char *str)
print_modules();
show_regs(regs);
bust_spinlocks(0);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 22798ec33fd1..f26ff1e31bdb 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -26,27 +26,20 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
- ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
u64 useraddr;
int reg, rc;
vcpu->stat.instruction_lctlg++;
- if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
- return -EOPNOTSUPP;
- useraddr = disp2;
- if (base2)
- useraddr += vcpu->run->s.regs.gprs[base2];
+ useraddr = kvm_s390_get_base_disp_rsy(vcpu);
if (useraddr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
reg = reg1;
- VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
- disp2);
+ VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
+ useraddr);
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
do {
@@ -68,23 +61,19 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 useraddr;
u32 val = 0;
int reg, rc;
vcpu->stat.instruction_lctl++;
- useraddr = disp2;
- if (base2)
- useraddr += vcpu->run->s.regs.gprs[base2];
+ useraddr = kvm_s390_get_base_disp_rs(vcpu);
if (useraddr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
- disp2);
+ VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
+ useraddr);
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
reg = reg1;
@@ -104,14 +93,31 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
return 0;
}
-static intercept_handler_t instruction_handlers[256] = {
+static const intercept_handler_t eb_handlers[256] = {
+ [0x2f] = handle_lctlg,
+ [0x8a] = kvm_s390_handle_priv_eb,
+};
+
+static int handle_eb(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
+ if (handler)
+ return handler(vcpu);
+ return -EOPNOTSUPP;
+}
+
+static const intercept_handler_t instruction_handlers[256] = {
[0x01] = kvm_s390_handle_01,
+ [0x82] = kvm_s390_handle_lpsw,
[0x83] = kvm_s390_handle_diag,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb7] = handle_lctl,
+ [0xb9] = kvm_s390_handle_b9,
[0xe5] = kvm_s390_handle_e5,
- [0xeb] = handle_lctlg,
+ [0xeb] = handle_eb,
};
static int handle_noop(struct kvm_vcpu *vcpu)
@@ -258,6 +264,7 @@ static const intercept_handler_t intercept_funcs[] = {
[0x0C >> 2] = handle_instruction_and_prog,
[0x10 >> 2] = handle_noop,
[0x14 >> 2] = handle_noop,
+ [0x18 >> 2] = handle_noop,
[0x1C >> 2] = kvm_s390_handle_wait,
[0x20 >> 2] = handle_validity,
[0x28 >> 2] = handle_stop,
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 87418b50f21c..37116a77cb4b 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -21,11 +21,31 @@
#include "gaccess.h"
#include "trace-s390.h"
+#define IOINT_SCHID_MASK 0x0000ffff
+#define IOINT_SSID_MASK 0x00030000
+#define IOINT_CSSID_MASK 0x03fc0000
+#define IOINT_AI_MASK 0x04000000
+
+static int is_ioint(u64 type)
+{
+ return ((type & 0xfffe0000u) != 0xfffe0000u);
+}
+
static int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
}
+static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
+}
+
+static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
+}
+
static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
{
if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
@@ -35,6 +55,13 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
return 1;
}
+static u64 int_word_to_isc_bits(u32 int_word)
+{
+ u8 isc = (int_word & 0x38000000) >> 27;
+
+ return (0x80 >> isc) << 24;
+}
+
static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti)
{
@@ -67,7 +94,22 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
case KVM_S390_SIGP_SET_PREFIX:
case KVM_S390_RESTART:
return 1;
+ case KVM_S390_MCHK:
+ if (psw_mchk_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
+ return 1;
+ return 0;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ if (psw_ioint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[6] &
+ int_word_to_isc_bits(inti->io.io_int_word))
+ return 1;
+ return 0;
default:
+ printk(KERN_WARNING "illegal interrupt type %llx\n",
+ inti->type);
BUG();
}
return 0;
@@ -93,6 +135,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
&vcpu->arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x0000;
+ vcpu->arch.sie_block->ictl &= ~ICTL_LPSW;
}
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -116,6 +159,18 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
case KVM_S390_SIGP_STOP:
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
break;
+ case KVM_S390_MCHK:
+ if (psw_mchk_disabled(vcpu))
+ vcpu->arch.sie_block->ictl |= ICTL_LPSW;
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR14;
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ if (psw_ioint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_IO_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR6;
+ break;
default:
BUG();
}
@@ -297,6 +352,73 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
exception = 1;
break;
+ case KVM_S390_MCHK:
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ inti->mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ inti->mchk.cr14,
+ inti->mchk.mcic);
+ rc = kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_PREFIXED);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_MCK_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ {
+ __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr;
+ __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word;
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ param0, param1);
+ rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID,
+ inti->io.subchannel_id);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR,
+ inti->io.subchannel_nr);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_IO_INT_PARM,
+ inti->io.io_int_parm);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_IO_INT_WORD,
+ inti->io.io_int_word);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_IO_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+ }
default:
BUG();
}
@@ -518,6 +640,61 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
}
}
+void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
+ struct kvm_s390_interrupt_info *n, *inti = NULL;
+ int deliver;
+
+ __reset_intercept_indicators(vcpu);
+ if (atomic_read(&li->active)) {
+ do {
+ deliver = 0;
+ spin_lock_bh(&li->lock);
+ list_for_each_entry_safe(inti, n, &li->list, list) {
+ if ((inti->type == KVM_S390_MCHK) &&
+ __interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&li->list))
+ atomic_set(&li->active, 0);
+ spin_unlock_bh(&li->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+
+ if (atomic_read(&fi->active)) {
+ do {
+ deliver = 0;
+ spin_lock(&fi->lock);
+ list_for_each_entry_safe(inti, n, &fi->list, list) {
+ if ((inti->type == KVM_S390_MCHK) &&
+ __interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&fi->list))
+ atomic_set(&fi->active, 0);
+ spin_unlock(&fi->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+}
+
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -540,12 +717,50 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
return 0;
}
+struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ u64 cr6, u64 schid)
+{
+ struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_interrupt_info *inti, *iter;
+
+ if ((!schid && !cr6) || (schid && cr6))
+ return NULL;
+ mutex_lock(&kvm->lock);
+ fi = &kvm->arch.float_int;
+ spin_lock(&fi->lock);
+ inti = NULL;
+ list_for_each_entry(iter, &fi->list, list) {
+ if (!is_ioint(iter->type))
+ continue;
+ if (cr6 &&
+ ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
+ continue;
+ if (schid) {
+ if (((schid & 0x00000000ffff0000) >> 16) !=
+ iter->io.subchannel_id)
+ continue;
+ if ((schid & 0x000000000000ffff) !=
+ iter->io.subchannel_nr)
+ continue;
+ }
+ inti = iter;
+ break;
+ }
+ if (inti)
+ list_del_init(&inti->list);
+ if (list_empty(&fi->list))
+ atomic_set(&fi->active, 0);
+ spin_unlock(&fi->lock);
+ mutex_unlock(&kvm->lock);
+ return inti;
+}
+
int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_interrupt_info *inti, *iter;
int sigcpu;
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -569,6 +784,29 @@ int kvm_s390_inject_vm(struct kvm *kvm,
case KVM_S390_SIGP_STOP:
case KVM_S390_INT_EXTERNAL_CALL:
case KVM_S390_INT_EMERGENCY:
+ kfree(inti);
+ return -EINVAL;
+ case KVM_S390_MCHK:
+ VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
+ s390int->parm64);
+ inti->type = s390int->type;
+ inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
+ inti->mchk.mcic = s390int->parm64;
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ if (s390int->type & IOINT_AI_MASK)
+ VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
+ else
+ VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
+ s390int->type & IOINT_CSSID_MASK,
+ s390int->type & IOINT_SSID_MASK,
+ s390int->type & IOINT_SCHID_MASK);
+ inti->type = s390int->type;
+ inti->io.subchannel_id = s390int->parm >> 16;
+ inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
+ inti->io.io_int_parm = s390int->parm64 >> 32;
+ inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
+ break;
default:
kfree(inti);
return -EINVAL;
@@ -579,7 +817,22 @@ int kvm_s390_inject_vm(struct kvm *kvm,
mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
- list_add_tail(&inti->list, &fi->list);
+ if (!is_ioint(inti->type))
+ list_add_tail(&inti->list, &fi->list);
+ else {
+ u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
+
+ /* Keep I/O interrupts sorted in isc order. */
+ list_for_each_entry(iter, &fi->list, list) {
+ if (!is_ioint(iter->type))
+ continue;
+ if (int_word_to_isc_bits(iter->io.io_int_word)
+ <= isc_bits)
+ continue;
+ break;
+ }
+ list_add_tail(&inti->list, &iter->list);
+ }
atomic_set(&fi->active, 1);
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
if (sigcpu == KVM_MAX_VCPUS) {
@@ -651,8 +904,15 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
inti->type = s390int->type;
inti->emerg.code = s390int->parm;
break;
+ case KVM_S390_MCHK:
+ VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
+ s390int->parm64);
+ inti->type = s390int->type;
+ inti->mchk.mcic = s390int->parm64;
+ break;
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
default:
kfree(inti);
return -EINVAL;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2923781590a6..4cf35a0a79e7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -140,6 +140,8 @@ int kvm_dev_ioctl_check_extension(long ext)
#endif
case KVM_CAP_SYNC_REGS:
case KVM_CAP_ONE_REG:
+ case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_S390_CSS_SUPPORT:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
@@ -234,6 +236,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.gmap)
goto out_nogmap;
}
+
+ kvm->arch.css_support = 0;
+
return 0;
out_nogmap:
debug_unregister(kvm->arch.dbf);
@@ -659,6 +664,7 @@ rerun_vcpu:
case KVM_EXIT_INTR:
case KVM_EXIT_S390_RESET:
case KVM_EXIT_S390_UCONTROL:
+ case KVM_EXIT_S390_TSCH:
break;
default:
BUG();
@@ -766,6 +772,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
} else
prefix = 0;
+ /*
+ * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
+ * copying in vcpu load/put. Lets update our copies before we save
+ * it into the save area
+ */
+ save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_access_regs(vcpu->run->s.regs.acrs);
+
if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
vcpu->arch.guest_fpregs.fprs, 128, prefix))
return -EFAULT;
@@ -810,6 +824,29 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return 0;
}
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r;
+
+ if (cap->flags)
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_S390_CSS_SUPPORT:
+ if (!vcpu->kvm->arch.css_support) {
+ vcpu->kvm->arch.css_support = 1;
+ trace_kvm_s390_enable_css(vcpu->kvm);
+ }
+ r = 0;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -896,6 +933,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_ENABLE_CAP:
+ {
+ struct kvm_enable_cap cap;
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ break;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -930,7 +976,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
@@ -960,7 +1006,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc)
+ bool user_alloc)
{
int rc;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index d75bc5e92c5b..4d89d64a8161 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -65,21 +65,67 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
vcpu->arch.sie_block->ihcpu = 0xffff;
}
+static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
+{
+ u32 base2 = vcpu->arch.sie_block->ipb >> 28;
+ u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+
+ return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+}
+
+static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
+ u64 *address1, u64 *address2)
+{
+ u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
+ u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
+ u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
+ u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
+
+ *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
+ *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+}
+
+static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
+{
+ u32 base2 = vcpu->arch.sie_block->ipb >> 28;
+ u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
+ ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
+ /* The displacement is a 20bit _SIGNED_ value */
+ if (disp2 & 0x80000)
+ disp2+=0xfff00000;
+
+ return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
+}
+
+static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
+{
+ u32 base2 = vcpu->arch.sie_block->ipb >> 28;
+ u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+
+ return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+}
+
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void kvm_s390_tasklet(unsigned long parm);
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
+void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int);
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt *s390int);
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
+struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ u64 cr6, u64 schid);
/* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d768906f15c8..0ef9894606e5 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -18,23 +18,21 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
+#include <asm/ptrace.h>
+#include <asm/compat.h>
#include "gaccess.h"
#include "kvm-s390.h"
#include "trace.h"
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
u32 address = 0;
u8 tmp;
vcpu->stat.instruction_spx++;
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */
if (operand2 & 3) {
@@ -67,15 +65,12 @@ out:
static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
u32 address;
vcpu->stat.instruction_stpx++;
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */
if (operand2 & 3) {
@@ -100,15 +95,12 @@ out:
static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 useraddr;
int rc;
vcpu->stat.instruction_stap++;
- useraddr = disp2;
- if (base2)
- useraddr += vcpu->run->s.regs.gprs[base2];
+
+ useraddr = kvm_s390_get_base_disp_s(vcpu);
if (useraddr & 1) {
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -135,24 +127,96 @@ static int handle_skey(struct kvm_vcpu *vcpu)
return 0;
}
-static int handle_stsch(struct kvm_vcpu *vcpu)
+static int handle_tpi(struct kvm_vcpu *vcpu)
{
- vcpu->stat.instruction_stsch++;
- VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
- /* condition code 3 */
+ u64 addr;
+ struct kvm_s390_interrupt_info *inti;
+ int cc;
+
+ addr = kvm_s390_get_base_disp_s(vcpu);
+
+ inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
+ if (inti) {
+ if (addr) {
+ /*
+ * Store the two-word I/O interruption code into the
+ * provided area.
+ */
+ put_guest_u16(vcpu, addr, inti->io.subchannel_id);
+ put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr);
+ put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm);
+ } else {
+ /*
+ * Store the three-word I/O interruption code into
+ * the appropriate lowcore area.
+ */
+ put_guest_u16(vcpu, 184, inti->io.subchannel_id);
+ put_guest_u16(vcpu, 186, inti->io.subchannel_nr);
+ put_guest_u32(vcpu, 188, inti->io.io_int_parm);
+ put_guest_u32(vcpu, 192, inti->io.io_int_word);
+ }
+ cc = 1;
+ } else
+ cc = 0;
+ kfree(inti);
+ /* Set condition code and we're done. */
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
- vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
return 0;
}
-static int handle_chsc(struct kvm_vcpu *vcpu)
+static int handle_tsch(struct kvm_vcpu *vcpu)
{
- vcpu->stat.instruction_chsc++;
- VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
- /* condition code 3 */
- vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
- vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
- return 0;
+ struct kvm_s390_interrupt_info *inti;
+
+ inti = kvm_s390_get_io_int(vcpu->kvm, 0,
+ vcpu->run->s.regs.gprs[1]);
+
+ /*
+ * Prepare exit to userspace.
+ * We indicate whether we dequeued a pending I/O interrupt
+ * so that userspace can re-inject it if the instruction gets
+ * a program check. While this may re-order the pending I/O
+ * interrupts, this is no problem since the priority is kept
+ * intact.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
+ vcpu->run->s390_tsch.dequeued = !!inti;
+ if (inti) {
+ vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
+ vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
+ vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
+ vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
+ }
+ vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
+ kfree(inti);
+ return -EREMOTE;
+}
+
+static int handle_io_inst(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
+
+ if (vcpu->kvm->arch.css_support) {
+ /*
+ * Most I/O instructions will be handled by userspace.
+ * Exceptions are tpi and the interrupt portion of tsch.
+ */
+ if (vcpu->arch.sie_block->ipa == 0xb236)
+ return handle_tpi(vcpu);
+ if (vcpu->arch.sie_block->ipa == 0xb235)
+ return handle_tsch(vcpu);
+ /* Handle in userspace. */
+ return -EOPNOTSUPP;
+ } else {
+ /*
+ * Set condition code 3 to stop the guest from issueing channel
+ * I/O instructions.
+ */
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ return 0;
+ }
}
static int handle_stfl(struct kvm_vcpu *vcpu)
@@ -176,17 +240,107 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
return 0;
}
+static void handle_new_psw(struct kvm_vcpu *vcpu)
+{
+ /* Check whether the new psw is enabled for machine checks. */
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
+ kvm_s390_deliver_pending_machine_checks(vcpu);
+}
+
+#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
+#define PSW_ADDR_24 0x00000000000fffffUL
+#define PSW_ADDR_31 0x000000007fffffffUL
+
+int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
+{
+ u64 addr;
+ psw_compat_t new_psw;
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+
+ addr = kvm_s390_get_base_disp_s(vcpu);
+
+ if (addr & 7) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ if (!(new_psw.mask & PSW32_MASK_BASE)) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ vcpu->arch.sie_block->gpsw.mask =
+ (new_psw.mask & ~PSW32_MASK_BASE) << 32;
+ vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
+
+ if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
+ (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
+ (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
+ ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+ PSW_MASK_EA)) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ handle_new_psw(vcpu);
+out:
+ return 0;
+}
+
+static int handle_lpswe(struct kvm_vcpu *vcpu)
+{
+ u64 addr;
+ psw_t new_psw;
+
+ addr = kvm_s390_get_base_disp_s(vcpu);
+
+ if (addr & 7) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
+ vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
+
+ if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
+ (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+ PSW_MASK_BA) &&
+ (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
+ (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
+ (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
+ ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+ PSW_MASK_EA)) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ handle_new_psw(vcpu);
+out:
+ return 0;
+}
+
static int handle_stidp(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
int rc;
vcpu->stat.instruction_stidp++;
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
if (operand2 & 7) {
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -240,17 +394,13 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
unsigned long mem;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
if (operand2 & 0xfff && fc > 0)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -297,7 +447,7 @@ out_fail:
return 0;
}
-static intercept_handler_t priv_handlers[256] = {
+static const intercept_handler_t b2_handlers[256] = {
[0x02] = handle_stidp,
[0x10] = handle_set_prefix,
[0x11] = handle_store_prefix,
@@ -305,10 +455,25 @@ static intercept_handler_t priv_handlers[256] = {
[0x29] = handle_skey,
[0x2a] = handle_skey,
[0x2b] = handle_skey,
- [0x34] = handle_stsch,
- [0x5f] = handle_chsc,
+ [0x30] = handle_io_inst,
+ [0x31] = handle_io_inst,
+ [0x32] = handle_io_inst,
+ [0x33] = handle_io_inst,
+ [0x34] = handle_io_inst,
+ [0x35] = handle_io_inst,
+ [0x36] = handle_io_inst,
+ [0x37] = handle_io_inst,
+ [0x38] = handle_io_inst,
+ [0x39] = handle_io_inst,
+ [0x3a] = handle_io_inst,
+ [0x3b] = handle_io_inst,
+ [0x3c] = handle_io_inst,
+ [0x5f] = handle_io_inst,
+ [0x74] = handle_io_inst,
+ [0x76] = handle_io_inst,
[0x7d] = handle_stsi,
[0xb1] = handle_stfl,
+ [0xb2] = handle_lpswe,
};
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
@@ -322,7 +487,7 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
* state bit and (a) handle the instruction or (b) send a code 2
* program check.
* Anything else goes to userspace.*/
- handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
if (handler) {
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu,
@@ -333,19 +498,74 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+static int handle_epsw(struct kvm_vcpu *vcpu)
+{
+ int reg1, reg2;
+
+ reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
+ reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
+
+ /* This basically extracts the mask half of the psw. */
+ vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
+ vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
+ if (reg2) {
+ vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
+ vcpu->run->s.regs.gprs[reg2] |=
+ vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
+ }
+ return 0;
+}
+
+static const intercept_handler_t b9_handlers[256] = {
+ [0x8d] = handle_epsw,
+ [0x9c] = handle_io_inst,
+};
+
+int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ /* This is handled just as for the B2 instructions. */
+ handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ if (handler) {
+ if ((handler != handle_epsw) &&
+ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+ else
+ return handler(vcpu);
+ }
+ return -EOPNOTSUPP;
+}
+
+static const intercept_handler_t eb_handlers[256] = {
+ [0x8a] = handle_io_inst,
+};
+
+int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ /* All eb instructions that end up here are privileged. */
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+ handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
+ if (handler)
+ return handler(vcpu);
+ return -EOPNOTSUPP;
+}
+
static int handle_tprot(struct kvm_vcpu *vcpu)
{
- int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
- int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
- int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
- int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
- u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0;
- u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0;
+ u64 address1, address2;
struct vm_area_struct *vma;
unsigned long user_address;
vcpu->stat.instruction_tprot++;
+ kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
+
/* we only handle the Linux memory detection case:
* access key == 0
* guest DAT == off
@@ -405,7 +625,7 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
return 0;
}
-static intercept_handler_t x01_handlers[256] = {
+static const intercept_handler_t x01_handlers[256] = {
[0x07] = handle_sckpf,
};
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 566ddf6e8dfb..1c48ab2845e0 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -137,8 +137,10 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
inti->type = KVM_S390_SIGP_STOP;
spin_lock_bh(&li->lock);
- if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
+ if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
+ kfree(inti);
goto out;
+ }
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
@@ -324,8 +326,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u32 parameter;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
u8 order_code;
@@ -336,9 +336,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu,
PGM_PRIVILEGED_OPERATION);
- order_code = disp2;
- if (base2)
- order_code += vcpu->run->s.regs.gprs[base2];
+ order_code = kvm_s390_get_base_disp_rs(vcpu);
if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1];
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 90fdf85b5ff7..13f30f58a2df 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -141,13 +141,13 @@ TRACE_EVENT(kvm_s390_inject_vcpu,
* Trace point for the actual delivery of interrupts.
*/
TRACE_EVENT(kvm_s390_deliver_interrupt,
- TP_PROTO(unsigned int id, __u64 type, __u32 data0, __u64 data1),
+ TP_PROTO(unsigned int id, __u64 type, __u64 data0, __u64 data1),
TP_ARGS(id, type, data0, data1),
TP_STRUCT__entry(
__field(int, id)
__field(__u32, inttype)
- __field(__u32, data0)
+ __field(__u64, data0)
__field(__u64, data1)
),
@@ -159,7 +159,7 @@ TRACE_EVENT(kvm_s390_deliver_interrupt,
),
TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \
- "data:%08x %016llx",
+ "data:%08llx %016llx",
__entry->id, __entry->inttype,
__print_symbolic(__entry->inttype, kvm_s390_int_type),
__entry->data0, __entry->data1)
@@ -204,6 +204,26 @@ TRACE_EVENT(kvm_s390_stop_request,
);
+/*
+ * Trace point for enabling channel I/O instruction support.
+ */
+TRACE_EVENT(kvm_s390_enable_css,
+ TP_PROTO(void *kvm),
+ TP_ARGS(kvm),
+
+ TP_STRUCT__entry(
+ __field(void *, kvm)
+ ),
+
+ TP_fast_assign(
+ __entry->kvm = kvm;
+ ),
+
+ TP_printk("enabling channel I/O support (kvm @ %p)\n",
+ __entry->kvm)
+ );
+
+
#endif /* _TRACE_KVMS390_H */
/* This part must be outside protection */
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index a303c95346cb..a5d07bc2a547 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -99,7 +99,7 @@ static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
static int pci_perf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, pci_perf_show,
- filp->f_path.dentry->d_inode->i_private);
+ file_inode(filp)->i_private);
}
static const struct file_operations debugfs_pci_perf_fops = {
@@ -121,7 +121,7 @@ static int pci_debug_show(struct seq_file *m, void *v)
static int pci_debug_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, pci_debug_show,
- filp->f_path.dentry->d_inode->i_private);
+ file_inode(filp)->i_private);
}
static const struct file_operations debugfs_pci_debug_fops = {
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 90fd3482b9e2..0297931335e1 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -25,10 +25,9 @@ static DEFINE_SPINLOCK(msi_map_lock);
struct msi_desc *__irq_get_msi_desc(unsigned int irq)
{
- struct hlist_node *entry;
struct msi_map *map;
- hlist_for_each_entry_rcu(map, entry,
+ hlist_for_each_entry_rcu(map,
&msi_hash[msi_hashfn(irq)], msi_chain)
if (map->irq == irq)
return map->msi;
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 3b1482e7afac..e569aa1fd2ba 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -12,6 +12,7 @@ config SCORE
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_VIRT_TO_BUS
select MODULES_USE_ELF_REL
select CLONE_BACKWARDS
diff --git a/arch/score/include/asm/elf.h b/arch/score/include/asm/elf.h
index 5d566c7a0af2..6a9421c693ca 100644
--- a/arch/score/include/asm/elf.h
+++ b/arch/score/include/asm/elf.h
@@ -52,11 +52,6 @@ typedef elf_fpreg_t elf_fpregset_t;
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_SCORE7
-#define SET_PERSONALITY(ex) \
-do { \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
-} while (0)
-
struct task_struct;
struct pt_regs;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ef6717a64bc7..5e859633ce69 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -148,9 +148,6 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
def_bool n
-config ARCH_NO_VIRT_TO_BUS
- def_bool y
-
config ARCH_HAS_DEFAULT_IDLE
def_bool y
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 1208b09e95c3..42b46e61a2d5 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -310,7 +310,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
@@ -330,7 +330,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -360,7 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 72246bc06884..dfdad72c61ca 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -38,7 +38,7 @@ void die(const char *str, struct pt_regs *regs, long err)
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
bust_spinlocks(0);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
oops_exit();
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c
index 620fa7ff9eec..aea14855e656 100644
--- a/arch/sh/mm/alignment.c
+++ b/arch/sh/mm/alignment.c
@@ -140,7 +140,7 @@ static int alignment_proc_open(struct inode *inode, struct file *file)
static ssize_t alignment_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- int *data = PDE(file->f_path.dentry->d_inode)->data;
+ int *data = PDE(file_inode(file))->data;
char mode;
if (count > 0) {
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58fb1e3f631d..289127d5241c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -146,9 +146,6 @@ config GENERIC_GPIO
help
Generic GPIO API support
-config ARCH_NO_VIRT_TO_BUS
- def_bool y
-
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y if SPARC64
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
index ac74a2c98e6d..a24e41fcdde1 100644
--- a/arch/sparc/include/asm/elf_32.h
+++ b/arch/sparc/include/asm/elf_32.h
@@ -128,7 +128,4 @@ typedef struct {
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif /* !(__ASMSPARC_ELF_H) */
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index a39d1ba5a119..e72212148d2a 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -511,7 +511,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
@@ -531,7 +531,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -559,7 +559,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 9fcc6b4e93b3..54df554b82d9 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -953,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list);
static int __ldc_channel_exists(unsigned long id)
{
struct ldc_channel *lp;
- struct hlist_node *n;
- hlist_for_each_entry(lp, n, &ldc_channel_list, list) {
+ hlist_for_each_entry(lp, &ldc_channel_list, list) {
if (lp->id == id)
return 1;
}
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 0eaf0059aaef..88a127b9c69e 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -115,7 +115,7 @@ static void __init process_switch(char c)
break;
}
cheetah_pcache_forced_on = 1;
- add_taint(TAINT_MACHINE_CHECK);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
cheetah_enable_pcache();
break;
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index cd5dc4d411d1..b524f91dd0e5 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -726,7 +726,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-void do_signal32(sigset_t *oldset, struct pt_regs * regs)
+void do_signal32(struct pt_regs * regs)
{
struct ksignal ksig;
unsigned long orig_i0 = 0;
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 260ddcd412bf..088134834dab 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -21,7 +21,7 @@ sys_call_table32:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
+/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
@@ -43,8 +43,8 @@ sys_call_table32:
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
.word sys_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
- .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
-/*130*/ .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
+ .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
+/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
/*140*/ .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
.word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index a5785ea2a85d..662982946a89 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -58,7 +58,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
show_regs(regs);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
__SAVE; __SAVE; __SAVE; __SAVE;
__SAVE; __SAVE; __SAVE; __SAVE;
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index e7ecf1507d90..8d38ca97aa23 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2383,7 +2383,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
__asm__ __volatile__("flushw");
show_regs(regs);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
if (regs->tstate & TSTATE_PRIV) {
struct thread_info *tp = current_thread_info();
struct reg_window *rw = (struct reg_window *)
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 4ce6e4c390e0..ff496ab1e794 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -17,6 +17,7 @@ config TILE
select GENERIC_IRQ_SHOW
select HAVE_DEBUG_BUGVERBOSE
select HAVE_SYSCALL_WRAPPERS if TILEGX
+ select HAVE_VIRT_TO_BUS
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 60651df5f952..dc50b157fc83 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -9,6 +9,7 @@ config UNICORE32
select GENERIC_ATOMIC64
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
+ select HAVE_VIRT_TO_BUS
select ARCH_HAVE_CUSTOM_GPIO_H
select GENERIC_FIND_FIRST_BIT
select GENERIC_IRQ_PROBE
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index 2054f0d4db13..0870b68d2ad9 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -231,7 +231,7 @@ void die(const char *str, struct pt_regs *regs, int err)
ret = __die(str, err, thread, regs);
bust_spinlocks(0);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
oops_exit();
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a9383370311..a4f24f5b1218 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,6 +112,7 @@ config X86
select GENERIC_STRNLEN_USER
select HAVE_CONTEXT_TRACKING if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_VIRT_TO_BUS
select MODULES_USE_ELF_REL if X86_32
select MODULES_USE_ELF_RELA if X86_64
select CLONE_BACKWARDS if X86_32
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index f8fa41190c35..c205035a6b96 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -19,23 +19,28 @@
static efi_system_table_t *sys_table;
+static void efi_char16_printk(efi_char16_t *str)
+{
+ struct efi_simple_text_output_protocol *out;
+
+ out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
+ efi_call_phys2(out->output_string, out, str);
+}
+
static void efi_printk(char *str)
{
char *s8;
for (s8 = str; *s8; s8++) {
- struct efi_simple_text_output_protocol *out;
efi_char16_t ch[2] = { 0 };
ch[0] = *s8;
- out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
-
if (*s8 == '\n') {
efi_char16_t nl[2] = { '\r', 0 };
- efi_call_phys2(out->output_string, out, nl);
+ efi_char16_printk(nl);
}
- efi_call_phys2(out->output_string, out, ch);
+ efi_char16_printk(ch);
}
}
@@ -709,7 +714,12 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
break;
- *p++ = *str++;
+ if (*str == '/') {
+ *p++ = '\\';
+ *str++;
+ } else {
+ *p++ = *str++;
+ }
}
*p = '\0';
@@ -737,7 +747,9 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
status = efi_call_phys5(fh->open, fh, &h, filename_16,
EFI_FILE_MODE_READ, (u64)0);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to open initrd file\n");
+ efi_printk("Failed to open initrd file: ");
+ efi_char16_printk(filename_16);
+ efi_printk("\n");
goto close_handles;
}
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index e0ca7c9ac383..63947a8f9f0f 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
+obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
aes-i586-y := aes-i586-asm_32.o aes_glue.o
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
@@ -52,3 +53,4 @@ ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
crc32c-intel-y := crc32c-intel_glue.o
crc32c-intel-$(CONFIG_CRYPTO_CRC32C_X86_64) += crc32c-pcl-intel-asm_64.o
+crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
index b949ec2f9af4..2849dbc59e11 100644
--- a/arch/x86/crypto/aes-i586-asm_32.S
+++ b/arch/x86/crypto/aes-i586-asm_32.S
@@ -36,6 +36,7 @@
.file "aes-i586-asm.S"
.text
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
@@ -219,14 +220,10 @@
// AES (Rijndael) Encryption Subroutine
/* void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
-.global aes_enc_blk
-
.extern crypto_ft_tab
.extern crypto_fl_tab
-.align 4
-
-aes_enc_blk:
+ENTRY(aes_enc_blk)
push %ebp
mov ctx(%esp),%ebp
@@ -290,18 +287,15 @@ aes_enc_blk:
mov %r0,(%ebp)
pop %ebp
ret
+ENDPROC(aes_enc_blk)
// AES (Rijndael) Decryption Subroutine
/* void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
-.global aes_dec_blk
-
.extern crypto_it_tab
.extern crypto_il_tab
-.align 4
-
-aes_dec_blk:
+ENTRY(aes_dec_blk)
push %ebp
mov ctx(%esp),%ebp
@@ -365,3 +359,4 @@ aes_dec_blk:
mov %r0,(%ebp)
pop %ebp
ret
+ENDPROC(aes_dec_blk)
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
index 5b577d5a059b..910565547163 100644
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
@@ -15,6 +15,7 @@
.text
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#define R1 %rax
@@ -49,10 +50,8 @@
#define R11 %r11
#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
- .global FUNC; \
- .type FUNC,@function; \
- .align 8; \
-FUNC: movq r1,r2; \
+ ENTRY(FUNC); \
+ movq r1,r2; \
movq r3,r4; \
leaq KEY+48(r8),r9; \
movq r10,r11; \
@@ -71,14 +70,15 @@ FUNC: movq r1,r2; \
je B192; \
leaq 32(r9),r9;
-#define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+#define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
movq r1,r2; \
movq r3,r4; \
movl r5 ## E,(r9); \
movl r6 ## E,4(r9); \
movl r7 ## E,8(r9); \
movl r8 ## E,12(r9); \
- ret;
+ ret; \
+ ENDPROC(FUNC);
#define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \
movzbl r2 ## H,r5 ## E; \
@@ -133,7 +133,7 @@ FUNC: movq r1,r2; \
#define entry(FUNC,KEY,B128,B192) \
prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
-#define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11)
+#define return(FUNC) epilogue(FUNC,R8,R2,R9,R7,R5,R6,R3,R4,R11)
#define encrypt_round(TAB,OFFSET) \
round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4) \
@@ -151,12 +151,12 @@ FUNC: movq r1,r2; \
/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
- entry(aes_enc_blk,0,enc128,enc192)
+ entry(aes_enc_blk,0,.Le128,.Le192)
encrypt_round(crypto_ft_tab,-96)
encrypt_round(crypto_ft_tab,-80)
-enc192: encrypt_round(crypto_ft_tab,-64)
+.Le192: encrypt_round(crypto_ft_tab,-64)
encrypt_round(crypto_ft_tab,-48)
-enc128: encrypt_round(crypto_ft_tab,-32)
+.Le128: encrypt_round(crypto_ft_tab,-32)
encrypt_round(crypto_ft_tab,-16)
encrypt_round(crypto_ft_tab, 0)
encrypt_round(crypto_ft_tab, 16)
@@ -166,16 +166,16 @@ enc128: encrypt_round(crypto_ft_tab,-32)
encrypt_round(crypto_ft_tab, 80)
encrypt_round(crypto_ft_tab, 96)
encrypt_final(crypto_fl_tab,112)
- return
+ return(aes_enc_blk)
/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
- entry(aes_dec_blk,240,dec128,dec192)
+ entry(aes_dec_blk,240,.Ld128,.Ld192)
decrypt_round(crypto_it_tab,-96)
decrypt_round(crypto_it_tab,-80)
-dec192: decrypt_round(crypto_it_tab,-64)
+.Ld192: decrypt_round(crypto_it_tab,-64)
decrypt_round(crypto_it_tab,-48)
-dec128: decrypt_round(crypto_it_tab,-32)
+.Ld128: decrypt_round(crypto_it_tab,-32)
decrypt_round(crypto_it_tab,-16)
decrypt_round(crypto_it_tab, 0)
decrypt_round(crypto_it_tab, 16)
@@ -185,4 +185,4 @@ dec128: decrypt_round(crypto_it_tab,-32)
decrypt_round(crypto_it_tab, 80)
decrypt_round(crypto_it_tab, 96)
decrypt_final(crypto_il_tab,112)
- return
+ return(aes_dec_blk)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 3470624d7835..04b797767b9e 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1262,7 +1262,6 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
* poly = x^128 + x^127 + x^126 + x^121 + 1
*
*****************************************************************************/
-
ENTRY(aesni_gcm_dec)
push %r12
push %r13
@@ -1437,6 +1436,7 @@ _return_T_done_decrypt:
pop %r13
pop %r12
ret
+ENDPROC(aesni_gcm_dec)
/*****************************************************************************
@@ -1700,10 +1700,12 @@ _return_T_done_encrypt:
pop %r13
pop %r12
ret
+ENDPROC(aesni_gcm_enc)
#endif
+.align 4
_key_expansion_128:
_key_expansion_256a:
pshufd $0b11111111, %xmm1, %xmm1
@@ -1715,6 +1717,8 @@ _key_expansion_256a:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
+ENDPROC(_key_expansion_128)
+ENDPROC(_key_expansion_256a)
.align 4
_key_expansion_192a:
@@ -1739,6 +1743,7 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
+ENDPROC(_key_expansion_192a)
.align 4
_key_expansion_192b:
@@ -1758,6 +1763,7 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
+ENDPROC(_key_expansion_192b)
.align 4
_key_expansion_256b:
@@ -1770,6 +1776,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
+ENDPROC(_key_expansion_256b)
/*
* int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1882,6 +1889,7 @@ ENTRY(aesni_set_key)
popl KEYP
#endif
ret
+ENDPROC(aesni_set_key)
/*
* void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -1903,6 +1911,7 @@ ENTRY(aesni_enc)
popl KEYP
#endif
ret
+ENDPROC(aesni_enc)
/*
* _aesni_enc1: internal ABI
@@ -1960,6 +1969,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
+ENDPROC(_aesni_enc1)
/*
* _aesni_enc4: internal ABI
@@ -2068,6 +2078,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
+ENDPROC(_aesni_enc4)
/*
* void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2090,6 +2101,7 @@ ENTRY(aesni_dec)
popl KEYP
#endif
ret
+ENDPROC(aesni_dec)
/*
* _aesni_dec1: internal ABI
@@ -2147,6 +2159,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
+ENDPROC(_aesni_dec1)
/*
* _aesni_dec4: internal ABI
@@ -2255,6 +2268,7 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
+ENDPROC(_aesni_dec4)
/*
* void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2312,6 +2326,7 @@ ENTRY(aesni_ecb_enc)
popl LEN
#endif
ret
+ENDPROC(aesni_ecb_enc)
/*
* void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2370,6 +2385,7 @@ ENTRY(aesni_ecb_dec)
popl LEN
#endif
ret
+ENDPROC(aesni_ecb_dec)
/*
* void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2411,6 +2427,7 @@ ENTRY(aesni_cbc_enc)
popl IVP
#endif
ret
+ENDPROC(aesni_cbc_enc)
/*
* void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2501,6 +2518,7 @@ ENTRY(aesni_cbc_dec)
popl IVP
#endif
ret
+ENDPROC(aesni_cbc_dec)
#ifdef __x86_64__
.align 16
@@ -2527,6 +2545,7 @@ _aesni_inc_init:
MOVQ_R64_XMM TCTR_LOW INC
MOVQ_R64_XMM CTR TCTR_LOW
ret
+ENDPROC(_aesni_inc_init)
/*
* _aesni_inc: internal ABI
@@ -2555,6 +2574,7 @@ _aesni_inc:
movaps CTR, IV
PSHUFB_XMM BSWAP_MASK IV
ret
+ENDPROC(_aesni_inc)
/*
* void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2615,4 +2635,5 @@ ENTRY(aesni_ctr_enc)
movups IV, (IVP)
.Lctr_enc_just_ret:
ret
+ENDPROC(aesni_ctr_enc)
#endif
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
index 391d245dc086..246c67006ed0 100644
--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
@@ -20,6 +20,8 @@
*
*/
+#include <linux/linkage.h>
+
.file "blowfish-x86_64-asm.S"
.text
@@ -116,11 +118,7 @@
bswapq RX0; \
xorq RX0, (RIO);
-.align 8
-.global __blowfish_enc_blk
-.type __blowfish_enc_blk,@function;
-
-__blowfish_enc_blk:
+ENTRY(__blowfish_enc_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -148,19 +146,16 @@ __blowfish_enc_blk:
movq %r10, RIO;
test %cl, %cl;
- jnz __enc_xor;
+ jnz .L__enc_xor;
write_block();
ret;
-__enc_xor:
+.L__enc_xor:
xor_block();
ret;
+ENDPROC(__blowfish_enc_blk)
-.align 8
-.global blowfish_dec_blk
-.type blowfish_dec_blk,@function;
-
-blowfish_dec_blk:
+ENTRY(blowfish_dec_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -189,6 +184,7 @@ blowfish_dec_blk:
movq %r11, %rbp;
ret;
+ENDPROC(blowfish_dec_blk)
/**********************************************************************
4-way blowfish, four blocks parallel
@@ -300,11 +296,7 @@ blowfish_dec_blk:
bswapq RX3; \
xorq RX3, 24(RIO);
-.align 8
-.global __blowfish_enc_blk_4way
-.type __blowfish_enc_blk_4way,@function;
-
-__blowfish_enc_blk_4way:
+ENTRY(__blowfish_enc_blk_4way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -336,7 +328,7 @@ __blowfish_enc_blk_4way:
movq %r11, RIO;
test %bpl, %bpl;
- jnz __enc_xor4;
+ jnz .L__enc_xor4;
write_block4();
@@ -344,18 +336,15 @@ __blowfish_enc_blk_4way:
popq %rbp;
ret;
-__enc_xor4:
+.L__enc_xor4:
xor_block4();
popq %rbx;
popq %rbp;
ret;
+ENDPROC(__blowfish_enc_blk_4way)
-.align 8
-.global blowfish_dec_blk_4way
-.type blowfish_dec_blk_4way,@function;
-
-blowfish_dec_blk_4way:
+ENTRY(blowfish_dec_blk_4way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -387,4 +376,4 @@ blowfish_dec_blk_4way:
popq %rbp;
ret;
-
+ENDPROC(blowfish_dec_blk_4way)
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index 2306d2e4816f..cfc163469c71 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -15,6 +15,8 @@
* http://koti.mbnet.fi/axh/crypto/camellia-BSD-1.2.0-aesni1.tar.xz
*/
+#include <linux/linkage.h>
+
#define CAMELLIA_TABLE_BYTE_LEN 272
/* struct camellia_ctx: */
@@ -190,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
%rcx, (%r9));
ret;
+ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
@@ -197,6 +200,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
%rax, (%r9));
ret;
+ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
* IN/OUT:
@@ -709,8 +713,6 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
.text
.align 8
-.type __camellia_enc_blk16,@function;
-
__camellia_enc_blk16:
/* input:
* %rdi: ctx, CTX
@@ -793,10 +795,9 @@ __camellia_enc_blk16:
%xmm15, %rax, %rcx, 24);
jmp .Lenc_done;
+ENDPROC(__camellia_enc_blk16)
.align 8
-.type __camellia_dec_blk16,@function;
-
__camellia_dec_blk16:
/* input:
* %rdi: ctx, CTX
@@ -877,12 +878,9 @@ __camellia_dec_blk16:
((key_table + (24) * 8) + 4)(CTX));
jmp .Ldec_max24;
+ENDPROC(__camellia_dec_blk16)
-.align 8
-.global camellia_ecb_enc_16way
-.type camellia_ecb_enc_16way,@function;
-
-camellia_ecb_enc_16way:
+ENTRY(camellia_ecb_enc_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -903,12 +901,9 @@ camellia_ecb_enc_16way:
%xmm8, %rsi);
ret;
+ENDPROC(camellia_ecb_enc_16way)
-.align 8
-.global camellia_ecb_dec_16way
-.type camellia_ecb_dec_16way,@function;
-
-camellia_ecb_dec_16way:
+ENTRY(camellia_ecb_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -934,12 +929,9 @@ camellia_ecb_dec_16way:
%xmm8, %rsi);
ret;
+ENDPROC(camellia_ecb_dec_16way)
-.align 8
-.global camellia_cbc_dec_16way
-.type camellia_cbc_dec_16way,@function;
-
-camellia_cbc_dec_16way:
+ENTRY(camellia_cbc_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -986,6 +978,7 @@ camellia_cbc_dec_16way:
%xmm8, %rsi);
ret;
+ENDPROC(camellia_cbc_dec_16way)
#define inc_le128(x, minus_one, tmp) \
vpcmpeqq minus_one, x, tmp; \
@@ -993,11 +986,7 @@ camellia_cbc_dec_16way:
vpslldq $8, tmp, tmp; \
vpsubq tmp, x, x;
-.align 8
-.global camellia_ctr_16way
-.type camellia_ctr_16way,@function;
-
-camellia_ctr_16way:
+ENTRY(camellia_ctr_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -1100,3 +1089,4 @@ camellia_ctr_16way:
%xmm8, %rsi);
ret;
+ENDPROC(camellia_ctr_16way)
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
index 0b3374335fdc..310319c601ed 100644
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
@@ -20,6 +20,8 @@
*
*/
+#include <linux/linkage.h>
+
.file "camellia-x86_64-asm_64.S"
.text
@@ -188,10 +190,7 @@
bswapq RAB0; \
movq RAB0, 4*2(RIO);
-.global __camellia_enc_blk;
-.type __camellia_enc_blk,@function;
-
-__camellia_enc_blk:
+ENTRY(__camellia_enc_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -214,33 +213,31 @@ __camellia_enc_blk:
movl $24, RT1d; /* max */
cmpb $16, key_length(CTX);
- je __enc_done;
+ je .L__enc_done;
enc_fls(24);
enc_rounds(24);
movl $32, RT1d; /* max */
-__enc_done:
+.L__enc_done:
testb RXORbl, RXORbl;
movq RDST, RIO;
- jnz __enc_xor;
+ jnz .L__enc_xor;
enc_outunpack(mov, RT1);
movq RRBP, %rbp;
ret;
-__enc_xor:
+.L__enc_xor:
enc_outunpack(xor, RT1);
movq RRBP, %rbp;
ret;
+ENDPROC(__camellia_enc_blk)
-.global camellia_dec_blk;
-.type camellia_dec_blk,@function;
-
-camellia_dec_blk:
+ENTRY(camellia_dec_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -258,12 +255,12 @@ camellia_dec_blk:
dec_inpack(RT2);
cmpb $24, RT2bl;
- je __dec_rounds16;
+ je .L__dec_rounds16;
dec_rounds(24);
dec_fls(24);
-__dec_rounds16:
+.L__dec_rounds16:
dec_rounds(16);
dec_fls(16);
dec_rounds(8);
@@ -276,6 +273,7 @@ __dec_rounds16:
movq RRBP, %rbp;
ret;
+ENDPROC(camellia_dec_blk)
/**********************************************************************
2-way camellia
@@ -426,10 +424,7 @@ __dec_rounds16:
bswapq RAB1; \
movq RAB1, 12*2(RIO);
-.global __camellia_enc_blk_2way;
-.type __camellia_enc_blk_2way,@function;
-
-__camellia_enc_blk_2way:
+ENTRY(__camellia_enc_blk_2way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -453,16 +448,16 @@ __camellia_enc_blk_2way:
movl $24, RT2d; /* max */
cmpb $16, key_length(CTX);
- je __enc2_done;
+ je .L__enc2_done;
enc_fls2(24);
enc_rounds2(24);
movl $32, RT2d; /* max */
-__enc2_done:
+.L__enc2_done:
test RXORbl, RXORbl;
movq RDST, RIO;
- jnz __enc2_xor;
+ jnz .L__enc2_xor;
enc_outunpack2(mov, RT2);
@@ -470,17 +465,15 @@ __enc2_done:
popq %rbx;
ret;
-__enc2_xor:
+.L__enc2_xor:
enc_outunpack2(xor, RT2);
movq RRBP, %rbp;
popq %rbx;
ret;
+ENDPROC(__camellia_enc_blk_2way)
-.global camellia_dec_blk_2way;
-.type camellia_dec_blk_2way,@function;
-
-camellia_dec_blk_2way:
+ENTRY(camellia_dec_blk_2way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -499,12 +492,12 @@ camellia_dec_blk_2way:
dec_inpack2(RT2);
cmpb $24, RT2bl;
- je __dec2_rounds16;
+ je .L__dec2_rounds16;
dec_rounds2(24);
dec_fls2(24);
-__dec2_rounds16:
+.L__dec2_rounds16:
dec_rounds2(16);
dec_fls2(16);
dec_rounds2(8);
@@ -518,3 +511,4 @@ __dec2_rounds16:
movq RRBP, %rbp;
movq RXOR, %rbx;
ret;
+ENDPROC(camellia_dec_blk_2way)
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index 15b00ac7cbd3..c35fd5d6ecd2 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -23,6 +23,8 @@
*
*/
+#include <linux/linkage.h>
+
.file "cast5-avx-x86_64-asm_64.S"
.extern cast_s1
@@ -211,8 +213,6 @@
.text
.align 16
-.type __cast5_enc_blk16,@function;
-
__cast5_enc_blk16:
/* input:
* %rdi: ctx, CTX
@@ -263,14 +263,14 @@ __cast5_enc_blk16:
movzbl rr(CTX), %eax;
testl %eax, %eax;
- jnz __skip_enc;
+ jnz .L__skip_enc;
round(RL, RR, 12, 1);
round(RR, RL, 13, 2);
round(RL, RR, 14, 3);
round(RR, RL, 15, 1);
-__skip_enc:
+.L__skip_enc:
popq %rbx;
popq %rbp;
@@ -282,10 +282,9 @@ __skip_enc:
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
+ENDPROC(__cast5_enc_blk16)
.align 16
-.type __cast5_dec_blk16,@function;
-
__cast5_dec_blk16:
/* input:
* %rdi: ctx, CTX
@@ -323,14 +322,14 @@ __cast5_dec_blk16:
movzbl rr(CTX), %eax;
testl %eax, %eax;
- jnz __skip_dec;
+ jnz .L__skip_dec;
round(RL, RR, 15, 1);
round(RR, RL, 14, 3);
round(RL, RR, 13, 2);
round(RR, RL, 12, 1);
-__dec_tail:
+.L__dec_tail:
round(RL, RR, 11, 3);
round(RR, RL, 10, 2);
round(RL, RR, 9, 1);
@@ -355,15 +354,12 @@ __dec_tail:
ret;
-__skip_dec:
+.L__skip_dec:
vpsrldq $4, RKR, RKR;
- jmp __dec_tail;
+ jmp .L__dec_tail;
+ENDPROC(__cast5_dec_blk16)
-.align 16
-.global cast5_ecb_enc_16way
-.type cast5_ecb_enc_16way,@function;
-
-cast5_ecb_enc_16way:
+ENTRY(cast5_ecb_enc_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -393,12 +389,9 @@ cast5_ecb_enc_16way:
vmovdqu RL4, (7*4*4)(%r11);
ret;
+ENDPROC(cast5_ecb_enc_16way)
-.align 16
-.global cast5_ecb_dec_16way
-.type cast5_ecb_dec_16way,@function;
-
-cast5_ecb_dec_16way:
+ENTRY(cast5_ecb_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -428,12 +421,9 @@ cast5_ecb_dec_16way:
vmovdqu RL4, (7*4*4)(%r11);
ret;
+ENDPROC(cast5_ecb_dec_16way)
-.align 16
-.global cast5_cbc_dec_16way
-.type cast5_cbc_dec_16way,@function;
-
-cast5_cbc_dec_16way:
+ENTRY(cast5_cbc_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -480,12 +470,9 @@ cast5_cbc_dec_16way:
popq %r12;
ret;
+ENDPROC(cast5_cbc_dec_16way)
-.align 16
-.global cast5_ctr_16way
-.type cast5_ctr_16way,@function;
-
-cast5_ctr_16way:
+ENTRY(cast5_ctr_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -556,3 +543,4 @@ cast5_ctr_16way:
popq %r12;
ret;
+ENDPROC(cast5_ctr_16way)
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index 2569d0da841f..f93b6105a0ce 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -23,6 +23,7 @@
*
*/
+#include <linux/linkage.h>
#include "glue_helper-asm-avx.S"
.file "cast6-avx-x86_64-asm_64.S"
@@ -250,8 +251,6 @@
.text
.align 8
-.type __cast6_enc_blk8,@function;
-
__cast6_enc_blk8:
/* input:
* %rdi: ctx, CTX
@@ -295,10 +294,9 @@ __cast6_enc_blk8:
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
+ENDPROC(__cast6_enc_blk8)
.align 8
-.type __cast6_dec_blk8,@function;
-
__cast6_dec_blk8:
/* input:
* %rdi: ctx, CTX
@@ -341,12 +339,9 @@ __cast6_dec_blk8:
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
+ENDPROC(__cast6_dec_blk8)
-.align 8
-.global cast6_ecb_enc_8way
-.type cast6_ecb_enc_8way,@function;
-
-cast6_ecb_enc_8way:
+ENTRY(cast6_ecb_enc_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -362,12 +357,9 @@ cast6_ecb_enc_8way:
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
ret;
+ENDPROC(cast6_ecb_enc_8way)
-.align 8
-.global cast6_ecb_dec_8way
-.type cast6_ecb_dec_8way,@function;
-
-cast6_ecb_dec_8way:
+ENTRY(cast6_ecb_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -383,12 +375,9 @@ cast6_ecb_dec_8way:
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
ret;
+ENDPROC(cast6_ecb_dec_8way)
-.align 8
-.global cast6_cbc_dec_8way
-.type cast6_cbc_dec_8way,@function;
-
-cast6_cbc_dec_8way:
+ENTRY(cast6_cbc_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -409,12 +398,9 @@ cast6_cbc_dec_8way:
popq %r12;
ret;
+ENDPROC(cast6_cbc_dec_8way)
-.align 8
-.global cast6_ctr_8way
-.type cast6_ctr_8way,@function;
-
-cast6_ctr_8way:
+ENTRY(cast6_ctr_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -437,3 +423,4 @@ cast6_ctr_8way:
popq %r12;
ret;
+ENDPROC(cast6_ctr_8way)
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
new file mode 100644
index 000000000000..c8335014a044
--- /dev/null
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -0,0 +1,246 @@
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
+ * calculation.
+ * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
+ * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
+ * at:
+ * http://www.intel.com/products/processor/manuals/
+ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+ * Volume 2B: Instruction Set Reference, N-Z
+ *
+ * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
+ * Alexander Boyko <Alexander_Boyko@xyratex.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/inst.h>
+
+
+.align 16
+/*
+ * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
+ * #define CONSTANT_R1 0x154442bd4LL
+ *
+ * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
+ * #define CONSTANT_R2 0x1c6e41596LL
+ */
+.Lconstant_R2R1:
+ .octa 0x00000001c6e415960000000154442bd4
+/*
+ * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
+ * #define CONSTANT_R3 0x1751997d0LL
+ *
+ * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
+ * #define CONSTANT_R4 0x0ccaa009eLL
+ */
+.Lconstant_R4R3:
+ .octa 0x00000000ccaa009e00000001751997d0
+/*
+ * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
+ * #define CONSTANT_R5 0x163cd6124LL
+ */
+.Lconstant_R5:
+ .octa 0x00000000000000000000000163cd6124
+.Lconstant_mask32:
+ .octa 0x000000000000000000000000FFFFFFFF
+/*
+ * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
+ *
+ * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
+ * #define CONSTANT_RU 0x1F7011641LL
+ */
+.Lconstant_RUpoly:
+ .octa 0x00000001F701164100000001DB710641
+
+#define CONSTANT %xmm0
+
+#ifdef __x86_64__
+#define BUF %rdi
+#define LEN %rsi
+#define CRC %edx
+#else
+#define BUF %eax
+#define LEN %edx
+#define CRC %ecx
+#endif
+
+
+
+.text
+/**
+ * Calculate crc32
+ * BUF - buffer (16 bytes aligned)
+ * LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
+ * CRC - initial crc32
+ * return %eax crc32
+ * uint crc32_pclmul_le_16(unsigned char const *buffer,
+ * size_t len, uint crc32)
+ */
+.globl crc32_pclmul_le_16
+.align 4, 0x90
+crc32_pclmul_le_16:/* buffer and buffer size are 16 bytes aligned */
+ movdqa (BUF), %xmm1
+ movdqa 0x10(BUF), %xmm2
+ movdqa 0x20(BUF), %xmm3
+ movdqa 0x30(BUF), %xmm4
+ movd CRC, CONSTANT
+ pxor CONSTANT, %xmm1
+ sub $0x40, LEN
+ add $0x40, BUF
+#ifndef __x86_64__
+ /* This is for position independent code(-fPIC) support for 32bit */
+ call delta
+delta:
+ pop %ecx
+#endif
+ cmp $0x40, LEN
+ jb less_64
+
+#ifdef __x86_64__
+ movdqa .Lconstant_R2R1(%rip), CONSTANT
+#else
+ movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
+#endif
+
+loop_64:/* 64 bytes Full cache line folding */
+ prefetchnta 0x40(BUF)
+ movdqa %xmm1, %xmm5
+ movdqa %xmm2, %xmm6
+ movdqa %xmm3, %xmm7
+#ifdef __x86_64__
+ movdqa %xmm4, %xmm8
+#endif
+ PCLMULQDQ 00, CONSTANT, %xmm1
+ PCLMULQDQ 00, CONSTANT, %xmm2
+ PCLMULQDQ 00, CONSTANT, %xmm3
+#ifdef __x86_64__
+ PCLMULQDQ 00, CONSTANT, %xmm4
+#endif
+ PCLMULQDQ 0x11, CONSTANT, %xmm5
+ PCLMULQDQ 0x11, CONSTANT, %xmm6
+ PCLMULQDQ 0x11, CONSTANT, %xmm7
+#ifdef __x86_64__
+ PCLMULQDQ 0x11, CONSTANT, %xmm8
+#endif
+ pxor %xmm5, %xmm1
+ pxor %xmm6, %xmm2
+ pxor %xmm7, %xmm3
+#ifdef __x86_64__
+ pxor %xmm8, %xmm4
+#else
+ /* xmm8 unsupported for x32 */
+ movdqa %xmm4, %xmm5
+ PCLMULQDQ 00, CONSTANT, %xmm4
+ PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pxor %xmm5, %xmm4
+#endif
+
+ pxor (BUF), %xmm1
+ pxor 0x10(BUF), %xmm2
+ pxor 0x20(BUF), %xmm3
+ pxor 0x30(BUF), %xmm4
+
+ sub $0x40, LEN
+ add $0x40, BUF
+ cmp $0x40, LEN
+ jge loop_64
+less_64:/* Folding cache line into 128bit */
+#ifdef __x86_64__
+ movdqa .Lconstant_R4R3(%rip), CONSTANT
+#else
+ movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT
+#endif
+ prefetchnta (BUF)
+
+ movdqa %xmm1, %xmm5
+ PCLMULQDQ 0x00, CONSTANT, %xmm1
+ PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pxor %xmm5, %xmm1
+ pxor %xmm2, %xmm1
+
+ movdqa %xmm1, %xmm5
+ PCLMULQDQ 0x00, CONSTANT, %xmm1
+ PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pxor %xmm5, %xmm1
+ pxor %xmm3, %xmm1
+
+ movdqa %xmm1, %xmm5
+ PCLMULQDQ 0x00, CONSTANT, %xmm1
+ PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pxor %xmm5, %xmm1
+ pxor %xmm4, %xmm1
+
+ cmp $0x10, LEN
+ jb fold_64
+loop_16:/* Folding rest buffer into 128bit */
+ movdqa %xmm1, %xmm5
+ PCLMULQDQ 0x00, CONSTANT, %xmm1
+ PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pxor %xmm5, %xmm1
+ pxor (BUF), %xmm1
+ sub $0x10, LEN
+ add $0x10, BUF
+ cmp $0x10, LEN
+ jge loop_16
+
+fold_64:
+ /* perform the last 64 bit fold, also adds 32 zeroes
+ * to the input stream */
+ PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
+ psrldq $0x08, %xmm1
+ pxor CONSTANT, %xmm1
+
+ /* final 32-bit fold */
+ movdqa %xmm1, %xmm2
+#ifdef __x86_64__
+ movdqa .Lconstant_R5(%rip), CONSTANT
+ movdqa .Lconstant_mask32(%rip), %xmm3
+#else
+ movdqa .Lconstant_R5 - delta(%ecx), CONSTANT
+ movdqa .Lconstant_mask32 - delta(%ecx), %xmm3
+#endif
+ psrldq $0x04, %xmm2
+ pand %xmm3, %xmm1
+ PCLMULQDQ 0x00, CONSTANT, %xmm1
+ pxor %xmm2, %xmm1
+
+ /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
+#ifdef __x86_64__
+ movdqa .Lconstant_RUpoly(%rip), CONSTANT
+#else
+ movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT
+#endif
+ movdqa %xmm1, %xmm2
+ pand %xmm3, %xmm1
+ PCLMULQDQ 0x10, CONSTANT, %xmm1
+ pand %xmm3, %xmm1
+ PCLMULQDQ 0x00, CONSTANT, %xmm1
+ pxor %xmm2, %xmm1
+ pextrd $0x01, %xmm1, %eax
+
+ ret
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
new file mode 100644
index 000000000000..9d014a74ef96
--- /dev/null
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -0,0 +1,201 @@
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/crc32.h>
+#include <crypto/internal/hash.h>
+
+#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
+#include <asm/i387.h>
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+#define PCLMUL_MIN_LEN 64L /* minimum size of buffer
+ * for crc32_pclmul_le_16 */
+#define SCALE_F 16L /* size of xmm register */
+#define SCALE_F_MASK (SCALE_F - 1)
+
+u32 crc32_pclmul_le_16(unsigned char const *buffer, size_t len, u32 crc32);
+
+static u32 __attribute__((pure))
+ crc32_pclmul_le(u32 crc, unsigned char const *p, size_t len)
+{
+ unsigned int iquotient;
+ unsigned int iremainder;
+ unsigned int prealign;
+
+ if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable())
+ return crc32_le(crc, p, len);
+
+ if ((long)p & SCALE_F_MASK) {
+ /* align p to 16 byte */
+ prealign = SCALE_F - ((long)p & SCALE_F_MASK);
+
+ crc = crc32_le(crc, p, prealign);
+ len -= prealign;
+ p = (unsigned char *)(((unsigned long)p + SCALE_F_MASK) &
+ ~SCALE_F_MASK);
+ }
+ iquotient = len & (~SCALE_F_MASK);
+ iremainder = len & SCALE_F_MASK;
+
+ kernel_fpu_begin();
+ crc = crc32_pclmul_le_16(p, iquotient, crc);
+ kernel_fpu_end();
+
+ if (iremainder)
+ crc = crc32_le(crc, p + iquotient, iremainder);
+
+ return crc;
+}
+
+static int crc32_pclmul_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = 0;
+
+ return 0;
+}
+
+static int crc32_pclmul_setkey(struct crypto_shash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *mctx = crypto_shash_ctx(hash);
+
+ if (keylen != sizeof(u32)) {
+ crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ *mctx = le32_to_cpup((__le32 *)key);
+ return 0;
+}
+
+static int crc32_pclmul_init(struct shash_desc *desc)
+{
+ u32 *mctx = crypto_shash_ctx(desc->tfm);
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *crcp = *mctx;
+
+ return 0;
+}
+
+static int crc32_pclmul_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *crcp = crc32_pclmul_le(*crcp, data, len);
+ return 0;
+}
+
+/* No final XOR 0xFFFFFFFF, like crc32_le */
+static int __crc32_pclmul_finup(u32 *crcp, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ *(__le32 *)out = cpu_to_le32(crc32_pclmul_le(*crcp, data, len));
+ return 0;
+}
+
+static int crc32_pclmul_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32_pclmul_finup(shash_desc_ctx(desc), data, len, out);
+}
+
+static int crc32_pclmul_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *(__le32 *)out = cpu_to_le32p(crcp);
+ return 0;
+}
+
+static int crc32_pclmul_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32_pclmul_finup(crypto_shash_ctx(desc->tfm), data, len,
+ out);
+}
+
+static struct shash_alg alg = {
+ .setkey = crc32_pclmul_setkey,
+ .init = crc32_pclmul_init,
+ .update = crc32_pclmul_update,
+ .final = crc32_pclmul_final,
+ .finup = crc32_pclmul_finup,
+ .digest = crc32_pclmul_digest,
+ .descsize = sizeof(u32),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "crc32-pclmul",
+ .cra_priority = 200,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(u32),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_pclmul_cra_init,
+ }
+};
+
+static const struct x86_cpu_id crc32pclmul_cpu_id[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, crc32pclmul_cpu_id);
+
+
+static int __init crc32_pclmul_mod_init(void)
+{
+
+ if (!x86_match_cpu(crc32pclmul_cpu_id)) {
+ pr_info("PCLMULQDQ-NI instructions are not detected.\n");
+ return -ENODEV;
+ }
+ return crypto_register_shash(&alg);
+}
+
+static void __exit crc32_pclmul_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(crc32_pclmul_mod_init);
+module_exit(crc32_pclmul_mod_fini);
+
+MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("crc32");
+MODULE_ALIAS("crc32-pclmul");
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 93c6d39237ac..cf1a7ec4cc3a 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -42,6 +42,8 @@
* SOFTWARE.
*/
+#include <linux/linkage.h>
+
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
.macro LABEL prefix n
@@ -68,8 +70,7 @@
# unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
-.global crc_pcl
-crc_pcl:
+ENTRY(crc_pcl)
#define bufp %rdi
#define bufp_dw %edi
#define bufp_w %di
@@ -323,6 +324,9 @@ JMPTBL_ENTRY %i
.noaltmacro
i=i+1
.endr
+
+ENDPROC(crc_pcl)
+
################################################################
## PCLMULQDQ tables
## Table is 128 entries x 2 quad words each
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 1eb7f90cb7b9..586f41aac361 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -94,6 +94,7 @@ __clmul_gf128mul_ble:
pxor T2, T1
pxor T1, DATA
ret
+ENDPROC(__clmul_gf128mul_ble)
/* void clmul_ghash_mul(char *dst, const be128 *shash) */
ENTRY(clmul_ghash_mul)
@@ -105,6 +106,7 @@ ENTRY(clmul_ghash_mul)
PSHUFB_XMM BSWAP DATA
movups DATA, (%rdi)
ret
+ENDPROC(clmul_ghash_mul)
/*
* void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
@@ -131,6 +133,7 @@ ENTRY(clmul_ghash_update)
movups DATA, (%rdi)
.Lupdate_just_ret:
ret
+ENDPROC(clmul_ghash_update)
/*
* void clmul_ghash_setkey(be128 *shash, const u8 *key);
@@ -155,3 +158,4 @@ ENTRY(clmul_ghash_setkey)
pxor %xmm1, %xmm0
movups %xmm0, (%rdi)
ret
+ENDPROC(clmul_ghash_setkey)
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S
index 72eb306680b2..329452b8f794 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -2,11 +2,12 @@
# D. J. Bernstein
# Public domain.
-# enter ECRYPT_encrypt_bytes
+#include <linux/linkage.h>
+
.text
-.p2align 5
-.globl ECRYPT_encrypt_bytes
-ECRYPT_encrypt_bytes:
+
+# enter salsa20_encrypt_bytes
+ENTRY(salsa20_encrypt_bytes)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -933,11 +934,10 @@ ECRYPT_encrypt_bytes:
add $64,%esi
# goto bytesatleast1
jmp ._bytesatleast1
-# enter ECRYPT_keysetup
-.text
-.p2align 5
-.globl ECRYPT_keysetup
-ECRYPT_keysetup:
+ENDPROC(salsa20_encrypt_bytes)
+
+# enter salsa20_keysetup
+ENTRY(salsa20_keysetup)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -1060,11 +1060,10 @@ ECRYPT_keysetup:
# leave
add %eax,%esp
ret
-# enter ECRYPT_ivsetup
-.text
-.p2align 5
-.globl ECRYPT_ivsetup
-ECRYPT_ivsetup:
+ENDPROC(salsa20_keysetup)
+
+# enter salsa20_ivsetup
+ENTRY(salsa20_ivsetup)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -1112,3 +1111,4 @@ ECRYPT_ivsetup:
# leave
add %eax,%esp
ret
+ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
index 6214a9b09706..9279e0b2d60e 100644
--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
@@ -1,8 +1,7 @@
-# enter ECRYPT_encrypt_bytes
-.text
-.p2align 5
-.globl ECRYPT_encrypt_bytes
-ECRYPT_encrypt_bytes:
+#include <linux/linkage.h>
+
+# enter salsa20_encrypt_bytes
+ENTRY(salsa20_encrypt_bytes)
mov %rsp,%r11
and $31,%r11
add $256,%r11
@@ -802,11 +801,10 @@ ECRYPT_encrypt_bytes:
# comment:fp stack unchanged by jump
# goto bytesatleast1
jmp ._bytesatleast1
-# enter ECRYPT_keysetup
-.text
-.p2align 5
-.globl ECRYPT_keysetup
-ECRYPT_keysetup:
+ENDPROC(salsa20_encrypt_bytes)
+
+# enter salsa20_keysetup
+ENTRY(salsa20_keysetup)
mov %rsp,%r11
and $31,%r11
add $256,%r11
@@ -892,11 +890,10 @@ ECRYPT_keysetup:
mov %rdi,%rax
mov %rsi,%rdx
ret
-# enter ECRYPT_ivsetup
-.text
-.p2align 5
-.globl ECRYPT_ivsetup
-ECRYPT_ivsetup:
+ENDPROC(salsa20_keysetup)
+
+# enter salsa20_ivsetup
+ENTRY(salsa20_ivsetup)
mov %rsp,%r11
and $31,%r11
add $256,%r11
@@ -918,3 +915,4 @@ ECRYPT_ivsetup:
mov %rdi,%rax
mov %rsi,%rdx
ret
+ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index a3a3c0205c16..5e8e67739bb5 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -26,11 +26,6 @@
#define SALSA20_MIN_KEY_SIZE 16U
#define SALSA20_MAX_KEY_SIZE 32U
-// use the ECRYPT_* function names
-#define salsa20_keysetup ECRYPT_keysetup
-#define salsa20_ivsetup ECRYPT_ivsetup
-#define salsa20_encrypt_bytes ECRYPT_encrypt_bytes
-
struct salsa20_ctx
{
u32 input[16];
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index 02b0e9fe997c..43c938612b74 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*
*/
+#include <linux/linkage.h>
#include "glue_helper-asm-avx.S"
.file "serpent-avx-x86_64-asm_64.S"
@@ -566,8 +567,6 @@
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
.align 8
-.type __serpent_enc_blk8_avx,@function;
-
__serpent_enc_blk8_avx:
/* input:
* %rdi: ctx, CTX
@@ -619,10 +618,9 @@ __serpent_enc_blk8_avx:
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
+ENDPROC(__serpent_enc_blk8_avx)
.align 8
-.type __serpent_dec_blk8_avx,@function;
-
__serpent_dec_blk8_avx:
/* input:
* %rdi: ctx, CTX
@@ -674,12 +672,9 @@ __serpent_dec_blk8_avx:
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
+ENDPROC(__serpent_dec_blk8_avx)
-.align 8
-.global serpent_ecb_enc_8way_avx
-.type serpent_ecb_enc_8way_avx,@function;
-
-serpent_ecb_enc_8way_avx:
+ENTRY(serpent_ecb_enc_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -693,12 +688,9 @@ serpent_ecb_enc_8way_avx:
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
ret;
+ENDPROC(serpent_ecb_enc_8way_avx)
-.align 8
-.global serpent_ecb_dec_8way_avx
-.type serpent_ecb_dec_8way_avx,@function;
-
-serpent_ecb_dec_8way_avx:
+ENTRY(serpent_ecb_dec_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -712,12 +704,9 @@ serpent_ecb_dec_8way_avx:
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
ret;
+ENDPROC(serpent_ecb_dec_8way_avx)
-.align 8
-.global serpent_cbc_dec_8way_avx
-.type serpent_cbc_dec_8way_avx,@function;
-
-serpent_cbc_dec_8way_avx:
+ENTRY(serpent_cbc_dec_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -731,12 +720,9 @@ serpent_cbc_dec_8way_avx:
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
ret;
+ENDPROC(serpent_cbc_dec_8way_avx)
-.align 8
-.global serpent_ctr_8way_avx
-.type serpent_ctr_8way_avx,@function;
-
-serpent_ctr_8way_avx:
+ENTRY(serpent_ctr_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -752,3 +738,4 @@ serpent_ctr_8way_avx:
store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
ret;
+ENDPROC(serpent_ctr_8way_avx)
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index c00053d42f99..d348f1553a79 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -24,6 +24,8 @@
*
*/
+#include <linux/linkage.h>
+
.file "serpent-sse2-i586-asm_32.S"
.text
@@ -510,11 +512,7 @@
pxor t0, x3; \
movdqu x3, (3*4*4)(out);
-.align 8
-.global __serpent_enc_blk_4way
-.type __serpent_enc_blk_4way,@function;
-
-__serpent_enc_blk_4way:
+ENTRY(__serpent_enc_blk_4way)
/* input:
* arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst
@@ -566,22 +564,19 @@ __serpent_enc_blk_4way:
movl arg_dst(%esp), %eax;
cmpb $0, arg_xor(%esp);
- jnz __enc_xor4;
+ jnz .L__enc_xor4;
write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret;
-__enc_xor4:
+.L__enc_xor4:
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret;
+ENDPROC(__serpent_enc_blk_4way)
-.align 8
-.global serpent_dec_blk_4way
-.type serpent_dec_blk_4way,@function;
-
-serpent_dec_blk_4way:
+ENTRY(serpent_dec_blk_4way)
/* input:
* arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst
@@ -633,3 +628,4 @@ serpent_dec_blk_4way:
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
ret;
+ENDPROC(serpent_dec_blk_4way)
diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
index 3ee1ff04d3e9..acc066c7c6b2 100644
--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
@@ -24,6 +24,8 @@
*
*/
+#include <linux/linkage.h>
+
.file "serpent-sse2-x86_64-asm_64.S"
.text
@@ -632,11 +634,7 @@
pxor t0, x3; \
movdqu x3, (3*4*4)(out);
-.align 8
-.global __serpent_enc_blk_8way
-.type __serpent_enc_blk_8way,@function;
-
-__serpent_enc_blk_8way:
+ENTRY(__serpent_enc_blk_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -687,24 +685,21 @@ __serpent_enc_blk_8way:
leaq (4*4*4)(%rsi), %rax;
testb %cl, %cl;
- jnz __enc_xor8;
+ jnz .L__enc_xor8;
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
-__enc_xor8:
+.L__enc_xor8:
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
+ENDPROC(__serpent_enc_blk_8way)
-.align 8
-.global serpent_dec_blk_8way
-.type serpent_dec_blk_8way,@function;
-
-serpent_dec_blk_8way:
+ENTRY(serpent_dec_blk_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -756,3 +751,4 @@ serpent_dec_blk_8way:
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
+ENDPROC(serpent_dec_blk_8way)
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
index 49d6987a73d9..a4109506a5e8 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -28,6 +28,8 @@
* (at your option) any later version.
*/
+#include <linux/linkage.h>
+
#define CTX %rdi // arg1
#define BUF %rsi // arg2
#define CNT %rdx // arg3
@@ -69,10 +71,8 @@
* param: function's name
*/
.macro SHA1_VECTOR_ASM name
- .global \name
- .type \name, @function
- .align 32
-\name:
+ ENTRY(\name)
+
push %rbx
push %rbp
push %r12
@@ -106,7 +106,7 @@
pop %rbx
ret
- .size \name, .-\name
+ ENDPROC(\name)
.endm
/*
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index ebac16bfa830..8d3e113b2c95 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -23,6 +23,7 @@
*
*/
+#include <linux/linkage.h>
#include "glue_helper-asm-avx.S"
.file "twofish-avx-x86_64-asm_64.S"
@@ -243,8 +244,6 @@
vpxor x3, wkey, x3;
.align 8
-.type __twofish_enc_blk8,@function;
-
__twofish_enc_blk8:
/* input:
* %rdi: ctx, CTX
@@ -284,10 +283,9 @@ __twofish_enc_blk8:
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
ret;
+ENDPROC(__twofish_enc_blk8)
.align 8
-.type __twofish_dec_blk8,@function;
-
__twofish_dec_blk8:
/* input:
* %rdi: ctx, CTX
@@ -325,12 +323,9 @@ __twofish_dec_blk8:
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
ret;
+ENDPROC(__twofish_dec_blk8)
-.align 8
-.global twofish_ecb_enc_8way
-.type twofish_ecb_enc_8way,@function;
-
-twofish_ecb_enc_8way:
+ENTRY(twofish_ecb_enc_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -346,12 +341,9 @@ twofish_ecb_enc_8way:
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
ret;
+ENDPROC(twofish_ecb_enc_8way)
-.align 8
-.global twofish_ecb_dec_8way
-.type twofish_ecb_dec_8way,@function;
-
-twofish_ecb_dec_8way:
+ENTRY(twofish_ecb_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -367,12 +359,9 @@ twofish_ecb_dec_8way:
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
ret;
+ENDPROC(twofish_ecb_dec_8way)
-.align 8
-.global twofish_cbc_dec_8way
-.type twofish_cbc_dec_8way,@function;
-
-twofish_cbc_dec_8way:
+ENTRY(twofish_cbc_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -393,12 +382,9 @@ twofish_cbc_dec_8way:
popq %r12;
ret;
+ENDPROC(twofish_cbc_dec_8way)
-.align 8
-.global twofish_ctr_8way
-.type twofish_ctr_8way,@function;
-
-twofish_ctr_8way:
+ENTRY(twofish_ctr_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -421,3 +407,4 @@ twofish_ctr_8way:
popq %r12;
ret;
+ENDPROC(twofish_ctr_8way)
diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S
index 658af4bb35c9..694ea4587ba7 100644
--- a/arch/x86/crypto/twofish-i586-asm_32.S
+++ b/arch/x86/crypto/twofish-i586-asm_32.S
@@ -20,6 +20,7 @@
.file "twofish-i586-asm.S"
.text
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
/* return address at 0 */
@@ -219,11 +220,7 @@
xor %esi, d ## D;\
ror $1, d ## D;
-.align 4
-.global twofish_enc_blk
-.global twofish_dec_blk
-
-twofish_enc_blk:
+ENTRY(twofish_enc_blk)
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
@@ -277,8 +274,9 @@ twofish_enc_blk:
pop %ebp
mov $1, %eax
ret
+ENDPROC(twofish_enc_blk)
-twofish_dec_blk:
+ENTRY(twofish_dec_blk)
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
@@ -333,3 +331,4 @@ twofish_dec_blk:
pop %ebp
mov $1, %eax
ret
+ENDPROC(twofish_dec_blk)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index 5b012a2c5119..1c3b7ceb36d2 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -20,6 +20,8 @@
*
*/
+#include <linux/linkage.h>
+
.file "twofish-x86_64-asm-3way.S"
.text
@@ -214,11 +216,7 @@
rorq $32, RAB2; \
outunpack3(mov, RIO, 2, RAB, 2);
-.align 8
-.global __twofish_enc_blk_3way
-.type __twofish_enc_blk_3way,@function;
-
-__twofish_enc_blk_3way:
+ENTRY(__twofish_enc_blk_3way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -250,7 +248,7 @@ __twofish_enc_blk_3way:
popq %rbp; /* bool xor */
testb %bpl, %bpl;
- jnz __enc_xor3;
+ jnz .L__enc_xor3;
outunpack_enc3(mov);
@@ -262,7 +260,7 @@ __twofish_enc_blk_3way:
popq %r15;
ret;
-__enc_xor3:
+.L__enc_xor3:
outunpack_enc3(xor);
popq %rbx;
@@ -272,11 +270,9 @@ __enc_xor3:
popq %r14;
popq %r15;
ret;
+ENDPROC(__twofish_enc_blk_3way)
-.global twofish_dec_blk_3way
-.type twofish_dec_blk_3way,@function;
-
-twofish_dec_blk_3way:
+ENTRY(twofish_dec_blk_3way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -313,4 +309,4 @@ twofish_dec_blk_3way:
popq %r14;
popq %r15;
ret;
-
+ENDPROC(twofish_dec_blk_3way)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
index 7bcf3fcc3668..a039d21986a2 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -20,6 +20,7 @@
.file "twofish-x86_64-asm.S"
.text
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#define a_offset 0
@@ -214,11 +215,7 @@
xor %r8d, d ## D;\
ror $1, d ## D;
-.align 8
-.global twofish_enc_blk
-.global twofish_dec_blk
-
-twofish_enc_blk:
+ENTRY(twofish_enc_blk)
pushq R1
/* %rdi contains the ctx address */
@@ -269,8 +266,9 @@ twofish_enc_blk:
popq R1
movq $1,%rax
ret
+ENDPROC(twofish_enc_blk)
-twofish_dec_blk:
+ENTRY(twofish_dec_blk)
pushq R1
/* %rdi contains the ctx address */
@@ -320,3 +318,4 @@ twofish_dec_blk:
popq R1
movq $1,%rax
ret
+ENDPROC(twofish_dec_blk)
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index a703af19c281..03abf9b70011 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -271,7 +271,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
N_TRSIZE(ex) || N_DRSIZE(ex) ||
- i_size_read(bprm->file->f_path.dentry->d_inode) <
+ i_size_read(file_inode(bprm->file)) <
ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
return -ENOEXEC;
}
@@ -425,12 +425,10 @@ beyond_if:
static int load_aout_library(struct file *file)
{
- struct inode *inode;
unsigned long bss, start_addr, len, error;
int retval;
struct exec ex;
- inode = file->f_path.dentry->d_inode;
retval = -ENOEXEC;
error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
@@ -440,7 +438,7 @@ static int load_aout_library(struct file *file)
/* We come in here for the regular a.out style of shared libraries */
if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
- i_size_read(inode) <
+ i_size_read(file_inode(file)) <
ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
goto out;
}
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 592f5a9a9c0e..ad7a20cbc699 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -218,11 +218,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
* Some system calls that need sign extended arguments. This could be
* done by a generic wrapper.
*/
-long sys32_lseek(unsigned int fd, int offset, unsigned int whence)
-{
- return sys_lseek(fd, offset, whence);
-}
-
long sys32_kill(int pid, int sig)
{
return sys_kill(pid, sig);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 28677c55113f..60c89f30c727 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -102,7 +102,14 @@ extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
-#ifndef CONFIG_EFI
+#ifdef CONFIG_EFI
+
+static inline bool efi_is_native(void)
+{
+ return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
+}
+
+#else
/*
* IF EFI is not configured, have the EFI calls return -ENOSYS.
*/
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 86cb51e1ca96..0525a8bdf65d 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -72,4 +72,28 @@ int ftrace_int3_handler(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
+
+#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
+
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
+#include <asm/compat.h>
+
+/*
+ * Because ia32 syscalls do not map to x86_64 syscall numbers
+ * this screws up the trace output when tracing a ia32 task.
+ * Instead of reporting bogus syscalls, just do not trace them.
+ *
+ * If the user realy wants these, then they should use the
+ * raw syscall tracepoints with filtering.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ if (is_compat_task())
+ return true;
+ return false;
+}
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
+#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
+
#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dc87b65e9c3a..635a74d22409 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -33,10 +33,10 @@
#define KVM_MAX_VCPUS 254
#define KVM_SOFT_MAX_VCPUS 160
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
-#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#define KVM_USER_MEM_SLOTS 125
+/* memory slots that are not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 3
+#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#define KVM_MMIO_SIZE 16
@@ -219,11 +219,6 @@ struct kvm_mmu_page {
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
- /*
- * One bit set per slot which has memory
- * in this shadow page.
- */
- DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
@@ -502,6 +497,13 @@ struct kvm_vcpu_arch {
u64 msr_val;
struct gfn_to_hva_cache data;
} pv_eoi;
+
+ /*
+ * Indicate whether the access faults on its page table in guest
+ * which is set when fix page fault and used to detect unhandeable
+ * instruction.
+ */
+ bool write_fault_to_shadow_pgtable;
};
struct kvm_lpage_info {
@@ -697,6 +699,11 @@ struct kvm_x86_ops {
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
+ int (*vm_has_apicv)(struct kvm *kvm);
+ void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+ void (*hwapic_isr_update)(struct kvm *kvm, int isr);
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
@@ -991,6 +998,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 65231e173baf..695399f2d5eb 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -27,7 +27,7 @@ static inline bool kvm_check_and_clear_guest_paused(void)
*
* Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
* The hypercall number should be placed in rax and the return value will be
- * placed in rax. No other registers will be clobbered unless explicited
+ * placed in rax. No other registers will be clobbered unless explicitly
* noted by the particular hypercall.
*/
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index c28fd02f4bf7..d9e9e6c7ed32 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -14,6 +14,9 @@
struct pci_sysdata {
int domain; /* PCI domain */
int node; /* NUMA node */
+#ifdef CONFIG_ACPI
+ void *acpi; /* ACPI-specific data */
+#endif
#ifdef CONFIG_X86_64
void *iommu; /* IOMMU private data */
#endif
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 747e5a38b590..fa1195dae425 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -54,7 +54,6 @@ void pcibios_set_cache_line_size(void);
/* pci-pc.c */
extern int pcibios_last_bus;
-extern struct pci_bus *pci_root_bus;
extern struct pci_ops pci_root_ops;
void pcibios_scan_specific_bus(int busn);
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 0218d917f509..8459efc39686 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -43,7 +43,6 @@ asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
asmlinkage long sys32_personality(unsigned long);
asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
-long sys32_lseek(unsigned int, int, unsigned int);
long sys32_kill(int, int);
long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
long sys32_vm86_warning(void);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2d946e63ee82..2cd056e3ada3 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -20,7 +20,6 @@
struct task_struct;
struct exec_domain;
#include <asm/processor.h>
-#include <asm/ftrace.h>
#include <linux/atomic.h>
struct thread_info {
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 235b49fa554b..b6fbf860e398 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -57,9 +57,12 @@
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
#define SECONDARY_EXEC_RDTSCP 0x00000008
+#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
+#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
+#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
@@ -97,6 +100,7 @@ enum vmcs_field {
GUEST_GS_SELECTOR = 0x0000080a,
GUEST_LDTR_SELECTOR = 0x0000080c,
GUEST_TR_SELECTOR = 0x0000080e,
+ GUEST_INTR_STATUS = 0x00000810,
HOST_ES_SELECTOR = 0x00000c00,
HOST_CS_SELECTOR = 0x00000c02,
HOST_SS_SELECTOR = 0x00000c04,
@@ -124,6 +128,14 @@ enum vmcs_field {
APIC_ACCESS_ADDR_HIGH = 0x00002015,
EPT_POINTER = 0x0000201a,
EPT_POINTER_HIGH = 0x0000201b,
+ EOI_EXIT_BITMAP0 = 0x0000201c,
+ EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
+ EOI_EXIT_BITMAP1 = 0x0000201e,
+ EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
+ EOI_EXIT_BITMAP2 = 0x00002020,
+ EOI_EXIT_BITMAP2_HIGH = 0x00002021,
+ EOI_EXIT_BITMAP3 = 0x00002022,
+ EOI_EXIT_BITMAP3_HIGH = 0x00002023,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
VMCS_LINK_POINTER = 0x00002800,
@@ -346,9 +358,9 @@ enum vmcs_field {
#define AR_RESERVD_MASK 0xfffe0f00
-#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0)
-#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1)
-#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2)
+#define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
+#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1)
+#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
#define VMX_NR_VPIDS (1 << 16)
#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index cc146d51449e..ca842f2769ef 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -16,4 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->flags);
}
+/* No need for a barrier -- XCHG is a barrier on x86. */
+#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
+
#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 472b9b783019..6aef9fbc09b7 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -212,4 +212,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);
+#define xen_remap(cookie, size) ioremap((cookie), (size));
+
#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 979d03bce135..2871fccfee68 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -62,10 +62,12 @@
#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
#define EXIT_REASON_APIC_ACCESS 44
+#define EXIT_REASON_EOI_INDUCED 45
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
+#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_INVPCID 58
#define VMX_EXIT_REASONS \
@@ -103,7 +105,12 @@
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
- { EXIT_REASON_WBINVD, "WBINVD" }
+ { EXIT_REASON_WBINVD, "WBINVD" }, \
+ { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
+ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
+ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
+ { EXIT_REASON_INVD, "INVD" }, \
+ { EXIT_REASON_INVPCID, "INVPCID" }
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a5b4dce1b7ac..904611bf0e5a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
{
if (config_enabled(CONFIG_X86_32) && !arg)
force_enable_local_apic = 1;
- else if (!strncmp(arg, "notscdeadline", 13))
+ else if (arg && !strncmp(arg, "notscdeadline", 13))
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return 0;
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index edd77e7508b3..fa96eb0d02fb 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -219,8 +219,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
*/
WARN_ONCE(1, "WARNING: This combination of AMD"
" processors is not suitable for SMP.\n");
- if (!test_taint(TAINT_UNSAFE_SMP))
- add_taint(TAINT_UNSAFE_SMP);
+ add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
valid_k7:
;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index fc7608a89d93..7bc126346ace 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1082,7 +1082,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
/*
* Set taint even when machine check was not enabled.
*/
- add_taint(TAINT_MACHINE_CHECK);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
severity = mce_severity(&m, cfg->tolerant, NULL);
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 2d5454cd2c4f..1c044b1ccc59 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -33,7 +33,7 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
smp_processor_id());
}
- add_taint(TAINT_MACHINE_CHECK);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
}
/* Set up machine check reporting for processors with Intel style MCE: */
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 2d7998fb628c..e9a701aecaa1 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -15,7 +15,7 @@
static void winchip_machine_check(struct pt_regs *regs, long error_code)
{
printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
- add_taint(TAINT_MACHINE_CHECK);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
}
/* Set up machine check reporting on the Winchip C6 series */
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index e9fe907cd249..fa72a39e5d46 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -542,7 +542,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
if (tmp != mask_lo) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
- add_taint(TAINT_FIRMWARE_WORKAROUND);
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
mask_lo = tmp;
}
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 4914e94ad6e8..529c8931fc02 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -107,6 +107,27 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
EVENT_CONSTRAINT_END
};
+static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
+{
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
+ INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
+ INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+ EVENT_CONSTRAINT_END
+};
+
static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
{
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
@@ -2095,7 +2116,7 @@ __init int intel_pmu_init(void)
intel_pmu_lbr_init_snb();
- x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 60c78917190c..1e4dbcfe6d31 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -85,7 +85,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
{
char __user *tmp = buf;
struct cpuid_regs cmd;
- int cpu = iminor(file->f_path.dentry->d_inode);
+ int cpu = iminor(file_inode(file));
u64 pos = *ppos;
ssize_t bytes = 0;
int err = 0;
@@ -116,7 +116,7 @@ static int cpuid_open(struct inode *inode, struct file *file)
unsigned int cpu;
struct cpuinfo_x86 *c;
- cpu = iminor(file->f_path.dentry->d_inode);
+ cpu = iminor(file_inode(file));
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index ae42418bc50f..c8797d55b245 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -232,7 +232,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
bust_spinlocks(0);
die_owner = -1;
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index 48d9d4ea1020..992f442ca155 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -5,8 +5,6 @@
#include <asm/setup.h>
#include <asm/bios_ebda.h>
-#define BIOS_LOWMEM_KILOBYTES 0x413
-
/*
* The BIOS places the EBDA/XBDA at the top of conventional
* memory, and usually decreases the reported amount of
@@ -16,17 +14,30 @@
* chipset: reserve a page before VGA to prevent PCI prefetch
* into it (errata #56). Usually the page is reserved anyways,
* unless you have no PS/2 mouse plugged in.
+ *
+ * This functions is deliberately very conservative. Losing
+ * memory in the bottom megabyte is rarely a problem, as long
+ * as we have enough memory to install the trampoline. Using
+ * memory that is in use by the BIOS or by some DMA device
+ * the BIOS didn't shut down *is* a big problem.
*/
+
+#define BIOS_LOWMEM_KILOBYTES 0x413
+#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
+#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
+
void __init reserve_ebda_region(void)
{
unsigned int lowmem, ebda_addr;
- /* To determine the position of the EBDA and the */
- /* end of conventional memory, we need to look at */
- /* the BIOS data area. In a paravirtual environment */
- /* that area is absent. We'll just have to assume */
- /* that the paravirt case can handle memory setup */
- /* correctly, without our help. */
+ /*
+ * To determine the position of the EBDA and the
+ * end of conventional memory, we need to look at
+ * the BIOS data area. In a paravirtual environment
+ * that area is absent. We'll just have to assume
+ * that the paravirt case can handle memory setup
+ * correctly, without our help.
+ */
if (paravirt_enabled())
return;
@@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
/* start of EBDA area */
ebda_addr = get_bios_ebda();
- /* Fixup: bios puts an EBDA in the top 64K segment */
- /* of conventional memory, but does not adjust lowmem. */
- if ((lowmem - ebda_addr) <= 0x10000)
- lowmem = ebda_addr;
+ /*
+ * Note: some old Dells seem to need 4k EBDA without
+ * reporting so, so just consider the memory above 0x9f000
+ * to be off limits (bugzilla 2990).
+ */
+
+ /* If the EBDA address is below 128K, assume it is bogus */
+ if (ebda_addr < INSANE_CUTOFF)
+ ebda_addr = LOWMEM_CAP;
- /* Fixup: bios does not report an EBDA at all. */
- /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
- if ((ebda_addr == 0) && (lowmem >= 0x9f000))
- lowmem = 0x9f000;
+ /* If lowmem is less than 128K, assume it is bogus */
+ if (lowmem < INSANE_CUTOFF)
+ lowmem = LOWMEM_CAP;
- /* Paranoia: should never happen, but... */
- if ((lowmem == 0) || (lowmem >= 0x100000))
- lowmem = 0x9f000;
+ /* Use the lower of the lowmem and EBDA markers as the cutoff */
+ lowmem = min(lowmem, ebda_addr);
+ lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
/* reserve all memory between lowmem and the 1MB mark */
memblock_reserve(lowmem, 0x100000 - lowmem);
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b7de3b25adb5..6859e9626442 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -48,7 +48,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
.globl startup_64
startup_64:
/*
- * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
+ * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded an identity mapped page table
* for us. These identity mapped page tables map all of the
* kernel pages and possibly all of memory.
@@ -159,7 +159,7 @@ startup_64:
jmp 1f
ENTRY(secondary_startup_64)
/*
- * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
+ * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded a mapped page table.
*
* %rsi holds a physical pointer to real_mode_data.
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index e124554598ee..3f06e6149981 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
@@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
* will be the real return address, and all the rest will
* point to kretprobe_trampoline.
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags);
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 9f966dc0b9e4..0732f0089a3d 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -218,6 +218,9 @@ static void kvm_shutdown(void)
void __init kvmclock_init(void)
{
unsigned long mem;
+ int size;
+
+ size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
if (!kvm_para_available())
return;
@@ -231,16 +234,14 @@ void __init kvmclock_init(void)
printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
- mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS,
- PAGE_SIZE);
+ mem = memblock_alloc(size, PAGE_SIZE);
if (!mem)
return;
hv_clock = __va(mem);
if (kvm_register_clock("boot clock")) {
hv_clock = NULL;
- memblock_free(mem,
- sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
+ memblock_free(mem, size);
return;
}
pv_time_ops.sched_clock = kvm_clock_read;
@@ -275,7 +276,7 @@ int __init kvm_setup_vsyscall_timeinfo(void)
struct pvclock_vcpu_time_info *vcpu_time;
unsigned int size;
- size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS;
+ size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
preempt_disable();
cpu = smp_processor_id();
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index f84f5c57de35..60308053fdb2 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -509,3 +509,4 @@ void local_touch_nmi(void)
{
__this_cpu_write(last_nmi_rip, 0);
}
+EXPORT_SYMBOL_GPL(local_touch_nmi);
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 85c39590c1a4..2cb9470ea85b 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -185,7 +185,7 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
- __pa_symbol(i) + (idx*PAGE_SIZE),
+ __pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9c857f05cef0..84d32855f65c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1056,15 +1056,6 @@ void __init setup_arch(char **cmdline_p)
setup_bios_corruption_check();
#endif
- /*
- * In the memory hotplug case, the kernel needs info from SRAT to
- * determine which memory is hotpluggable before allocating memory
- * using memblock.
- */
- acpi_boot_table_init();
- early_acpi_boot_init();
- early_parse_srat();
-
#ifdef CONFIG_X86_32
printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
(max_pfn_mapped<<PAGE_SHIFT) - 1);
@@ -1110,6 +1101,10 @@ void __init setup_arch(char **cmdline_p)
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
+ acpi_boot_table_init();
+
+ early_acpi_boot_init();
+
initmem_init();
memblock_find_dma_reserve();
@@ -1196,8 +1191,7 @@ void __init setup_arch(char **cmdline_p)
* mismatched firmware/kernel archtectures since there is no
* support for runtime services.
*/
- if (efi_enabled(EFI_BOOT) &&
- IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
+ if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
efi_unmap_memmap();
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a27e76371108..a335cc6cde72 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -24,6 +24,7 @@
#include "kvm_cache_regs.h"
#include <linux/module.h>
#include <asm/kvm_emulate.h>
+#include <linux/stringify.h>
#include "x86.h"
#include "tss.h"
@@ -43,7 +44,7 @@
#define OpCL 9ull /* CL register (for shifts) */
#define OpImmByte 10ull /* 8-bit sign extended immediate */
#define OpOne 11ull /* Implied 1 */
-#define OpImm 12ull /* Sign extended immediate */
+#define OpImm 12ull /* Sign extended up to 32-bit immediate */
#define OpMem16 13ull /* Memory operand (16-bit). */
#define OpMem32 14ull /* Memory operand (32-bit). */
#define OpImmU 15ull /* Immediate operand, zero extended */
@@ -58,6 +59,7 @@
#define OpFS 24ull /* FS */
#define OpGS 25ull /* GS */
#define OpMem8 26ull /* 8-bit zero extended memory operand */
+#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
@@ -101,6 +103,7 @@
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc (OpAcc << SrcShift)
#define SrcImmU16 (OpImmU16 << SrcShift)
+#define SrcImm64 (OpImm64 << SrcShift)
#define SrcDX (OpDX << SrcShift)
#define SrcMem8 (OpMem8 << SrcShift)
#define SrcMask (OpMask << SrcShift)
@@ -113,6 +116,7 @@
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
+#define Escape (5<<15) /* Escape to coprocessor instruction */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
@@ -146,6 +150,8 @@
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
+#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
+#define NoWrite ((u64)1 << 45) /* No writeback */
#define X2(x...) x, x
#define X3(x...) X2(x), x
@@ -156,6 +162,27 @@
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
+#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
+#define FASTOP_SIZE 8
+
+/*
+ * fastop functions have a special calling convention:
+ *
+ * dst: [rdx]:rax (in/out)
+ * src: rbx (in/out)
+ * src2: rcx (in)
+ * flags: rflags (in/out)
+ *
+ * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
+ * different operand sizes can be reached by calculation, rather than a jump
+ * table (which would be bigger than the code).
+ *
+ * fastop functions are declared as taking a never-defined fastop parameter,
+ * so they can't be called from C directly.
+ */
+
+struct fastop;
+
struct opcode {
u64 flags : 56;
u64 intercept : 8;
@@ -164,6 +191,8 @@ struct opcode {
const struct opcode *group;
const struct group_dual *gdual;
const struct gprefix *gprefix;
+ const struct escape *esc;
+ void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
};
@@ -180,6 +209,11 @@ struct gprefix {
struct opcode pfx_f3;
};
+struct escape {
+ struct opcode op[8];
+ struct opcode high[64];
+};
+
/* EFLAGS bit definitions. */
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
@@ -407,6 +441,97 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
} \
} while (0)
+static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
+
+#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
+#define FOP_RET "ret \n\t"
+
+#define FOP_START(op) \
+ extern void em_##op(struct fastop *fake); \
+ asm(".pushsection .text, \"ax\" \n\t" \
+ ".global em_" #op " \n\t" \
+ FOP_ALIGN \
+ "em_" #op ": \n\t"
+
+#define FOP_END \
+ ".popsection")
+
+#define FOPNOP() FOP_ALIGN FOP_RET
+
+#define FOP1E(op, dst) \
+ FOP_ALIGN #op " %" #dst " \n\t" FOP_RET
+
+#define FASTOP1(op) \
+ FOP_START(op) \
+ FOP1E(op##b, al) \
+ FOP1E(op##w, ax) \
+ FOP1E(op##l, eax) \
+ ON64(FOP1E(op##q, rax)) \
+ FOP_END
+
+#define FOP2E(op, dst, src) \
+ FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
+
+#define FASTOP2(op) \
+ FOP_START(op) \
+ FOP2E(op##b, al, bl) \
+ FOP2E(op##w, ax, bx) \
+ FOP2E(op##l, eax, ebx) \
+ ON64(FOP2E(op##q, rax, rbx)) \
+ FOP_END
+
+/* 2 operand, word only */
+#define FASTOP2W(op) \
+ FOP_START(op) \
+ FOPNOP() \
+ FOP2E(op##w, ax, bx) \
+ FOP2E(op##l, eax, ebx) \
+ ON64(FOP2E(op##q, rax, rbx)) \
+ FOP_END
+
+/* 2 operand, src is CL */
+#define FASTOP2CL(op) \
+ FOP_START(op) \
+ FOP2E(op##b, al, cl) \
+ FOP2E(op##w, ax, cl) \
+ FOP2E(op##l, eax, cl) \
+ ON64(FOP2E(op##q, rax, cl)) \
+ FOP_END
+
+#define FOP3E(op, dst, src, src2) \
+ FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
+
+/* 3-operand, word-only, src2=cl */
+#define FASTOP3WCL(op) \
+ FOP_START(op) \
+ FOPNOP() \
+ FOP3E(op##w, ax, bx, cl) \
+ FOP3E(op##l, eax, ebx, cl) \
+ ON64(FOP3E(op##q, rax, rbx, cl)) \
+ FOP_END
+
+/* Special case for SETcc - 1 instruction per cc */
+#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
+
+FOP_START(setcc)
+FOP_SETCC(seto)
+FOP_SETCC(setno)
+FOP_SETCC(setc)
+FOP_SETCC(setnc)
+FOP_SETCC(setz)
+FOP_SETCC(setnz)
+FOP_SETCC(setbe)
+FOP_SETCC(setnbe)
+FOP_SETCC(sets)
+FOP_SETCC(setns)
+FOP_SETCC(setp)
+FOP_SETCC(setnp)
+FOP_SETCC(setl)
+FOP_SETCC(setnl)
+FOP_SETCC(setle)
+FOP_SETCC(setnle)
+FOP_END;
+
#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
do { \
unsigned long _tmp; \
@@ -663,7 +788,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
ulong la;
u32 lim;
u16 sel;
- unsigned cpl, rpl;
+ unsigned cpl;
la = seg_base(ctxt, addr.seg) + addr.ea;
switch (ctxt->mode) {
@@ -697,11 +822,6 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
goto bad;
}
cpl = ctxt->ops->cpl(ctxt);
- if (ctxt->mode == X86EMUL_MODE_REAL)
- rpl = 0;
- else
- rpl = sel & 3;
- cpl = max(cpl, rpl);
if (!(desc.type & 8)) {
/* data segment */
if (cpl > desc.dpl)
@@ -852,39 +972,50 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
return rc;
}
-static int test_cc(unsigned int condition, unsigned int flags)
-{
- int rc = 0;
-
- switch ((condition & 15) >> 1) {
- case 0: /* o */
- rc |= (flags & EFLG_OF);
- break;
- case 1: /* b/c/nae */
- rc |= (flags & EFLG_CF);
- break;
- case 2: /* z/e */
- rc |= (flags & EFLG_ZF);
- break;
- case 3: /* be/na */
- rc |= (flags & (EFLG_CF|EFLG_ZF));
- break;
- case 4: /* s */
- rc |= (flags & EFLG_SF);
- break;
- case 5: /* p/pe */
- rc |= (flags & EFLG_PF);
- break;
- case 7: /* le/ng */
- rc |= (flags & EFLG_ZF);
- /* fall through */
- case 6: /* l/nge */
- rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
- break;
- }
-
- /* Odd condition identifiers (lsb == 1) have inverted sense. */
- return (!!rc ^ (condition & 1));
+FASTOP2(add);
+FASTOP2(or);
+FASTOP2(adc);
+FASTOP2(sbb);
+FASTOP2(and);
+FASTOP2(sub);
+FASTOP2(xor);
+FASTOP2(cmp);
+FASTOP2(test);
+
+FASTOP3WCL(shld);
+FASTOP3WCL(shrd);
+
+FASTOP2W(imul);
+
+FASTOP1(not);
+FASTOP1(neg);
+FASTOP1(inc);
+FASTOP1(dec);
+
+FASTOP2CL(rol);
+FASTOP2CL(ror);
+FASTOP2CL(rcl);
+FASTOP2CL(rcr);
+FASTOP2CL(shl);
+FASTOP2CL(shr);
+FASTOP2CL(sar);
+
+FASTOP2W(bsf);
+FASTOP2W(bsr);
+FASTOP2W(bt);
+FASTOP2W(bts);
+FASTOP2W(btr);
+FASTOP2W(btc);
+
+static u8 test_cc(unsigned int condition, unsigned long flags)
+{
+ u8 rc;
+ void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
+
+ flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
+ asm("push %[flags]; popf; call *%[fastop]"
+ : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
+ return rc;
}
static void fetch_register_operand(struct operand *op)
@@ -994,6 +1125,53 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
ctxt->ops->put_fpu(ctxt);
}
+static int em_fninit(struct x86_emulate_ctxt *ctxt)
+{
+ if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
+ return emulate_nm(ctxt);
+
+ ctxt->ops->get_fpu(ctxt);
+ asm volatile("fninit");
+ ctxt->ops->put_fpu(ctxt);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
+{
+ u16 fcw;
+
+ if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
+ return emulate_nm(ctxt);
+
+ ctxt->ops->get_fpu(ctxt);
+ asm volatile("fnstcw %0": "+m"(fcw));
+ ctxt->ops->put_fpu(ctxt);
+
+ /* force 2 byte destination */
+ ctxt->dst.bytes = 2;
+ ctxt->dst.val = fcw;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
+{
+ u16 fsw;
+
+ if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
+ return emulate_nm(ctxt);
+
+ ctxt->ops->get_fpu(ctxt);
+ asm volatile("fnstsw %0": "+m"(fsw));
+ ctxt->ops->put_fpu(ctxt);
+
+ /* force 2 byte destination */
+ ctxt->dst.bytes = 2;
+ ctxt->dst.val = fsw;
+
+ return X86EMUL_CONTINUE;
+}
+
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
@@ -1534,6 +1712,9 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
{
int rc;
+ if (ctxt->d & NoWrite)
+ return X86EMUL_CONTINUE;
+
switch (ctxt->dst.type) {
case OP_REG:
write_register_operand(&ctxt->dst);
@@ -1918,47 +2099,6 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_grp2(struct x86_emulate_ctxt *ctxt)
-{
- switch (ctxt->modrm_reg) {
- case 0: /* rol */
- emulate_2op_SrcB(ctxt, "rol");
- break;
- case 1: /* ror */
- emulate_2op_SrcB(ctxt, "ror");
- break;
- case 2: /* rcl */
- emulate_2op_SrcB(ctxt, "rcl");
- break;
- case 3: /* rcr */
- emulate_2op_SrcB(ctxt, "rcr");
- break;
- case 4: /* sal/shl */
- case 6: /* sal/shl */
- emulate_2op_SrcB(ctxt, "sal");
- break;
- case 5: /* shr */
- emulate_2op_SrcB(ctxt, "shr");
- break;
- case 7: /* sar */
- emulate_2op_SrcB(ctxt, "sar");
- break;
- }
- return X86EMUL_CONTINUE;
-}
-
-static int em_not(struct x86_emulate_ctxt *ctxt)
-{
- ctxt->dst.val = ~ctxt->dst.val;
- return X86EMUL_CONTINUE;
-}
-
-static int em_neg(struct x86_emulate_ctxt *ctxt)
-{
- emulate_1op(ctxt, "neg");
- return X86EMUL_CONTINUE;
-}
-
static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
{
u8 ex = 0;
@@ -2000,12 +2140,6 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
int rc = X86EMUL_CONTINUE;
switch (ctxt->modrm_reg) {
- case 0: /* inc */
- emulate_1op(ctxt, "inc");
- break;
- case 1: /* dec */
- emulate_1op(ctxt, "dec");
- break;
case 2: /* call near abs */ {
long int old_eip;
old_eip = ctxt->_eip;
@@ -2075,7 +2209,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
/* Save real source value, then compare EAX against destination. */
ctxt->src.orig_val = ctxt->src.val;
ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX);
- emulate_2op_SrcV(ctxt, "cmp");
+ fastop(ctxt, em_cmp);
if (ctxt->eflags & EFLG_ZF) {
/* Success: write back to memory. */
@@ -2843,7 +2977,7 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
- emulate_2op_SrcV(ctxt, "or");
+ fastop(ctxt, em_or);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
@@ -2852,6 +2986,24 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_aad(struct x86_emulate_ctxt *ctxt)
+{
+ u8 al = ctxt->dst.val & 0xff;
+ u8 ah = (ctxt->dst.val >> 8) & 0xff;
+
+ al = (al + (ah * ctxt->src.val)) & 0xff;
+
+ ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
+
+ /* Set PF, ZF, SF */
+ ctxt->src.type = OP_IMM;
+ ctxt->src.val = 0;
+ ctxt->src.bytes = 1;
+ fastop(ctxt, em_or);
+
+ return X86EMUL_CONTINUE;
+}
+
static int em_call(struct x86_emulate_ctxt *ctxt)
{
long rel = ctxt->src.val;
@@ -2900,64 +3052,6 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_add(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "add");
- return X86EMUL_CONTINUE;
-}
-
-static int em_or(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "or");
- return X86EMUL_CONTINUE;
-}
-
-static int em_adc(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "adc");
- return X86EMUL_CONTINUE;
-}
-
-static int em_sbb(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "sbb");
- return X86EMUL_CONTINUE;
-}
-
-static int em_and(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "and");
- return X86EMUL_CONTINUE;
-}
-
-static int em_sub(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "sub");
- return X86EMUL_CONTINUE;
-}
-
-static int em_xor(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "xor");
- return X86EMUL_CONTINUE;
-}
-
-static int em_cmp(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "cmp");
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- return X86EMUL_CONTINUE;
-}
-
-static int em_test(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV(ctxt, "test");
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- return X86EMUL_CONTINUE;
-}
-
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
/* Write back the register source. */
@@ -2970,16 +3064,10 @@ static int em_xchg(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_imul(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV_nobyte(ctxt, "imul");
- return X86EMUL_CONTINUE;
-}
-
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src2.val;
- return em_imul(ctxt);
+ return fastop(ctxt, em_imul);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
@@ -3300,47 +3388,6 @@ static int em_sti(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_bt(struct x86_emulate_ctxt *ctxt)
-{
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- /* only subword offset */
- ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
-
- emulate_2op_SrcV_nobyte(ctxt, "bt");
- return X86EMUL_CONTINUE;
-}
-
-static int em_bts(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV_nobyte(ctxt, "bts");
- return X86EMUL_CONTINUE;
-}
-
-static int em_btr(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV_nobyte(ctxt, "btr");
- return X86EMUL_CONTINUE;
-}
-
-static int em_btc(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV_nobyte(ctxt, "btc");
- return X86EMUL_CONTINUE;
-}
-
-static int em_bsf(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV_nobyte(ctxt, "bsf");
- return X86EMUL_CONTINUE;
-}
-
-static int em_bsr(struct x86_emulate_ctxt *ctxt)
-{
- emulate_2op_SrcV_nobyte(ctxt, "bsr");
- return X86EMUL_CONTINUE;
-}
-
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
@@ -3572,7 +3619,9 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
+#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
+#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
#define IIP(_f, _e, _i, _p) \
@@ -3583,12 +3632,13 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define D2bv(_f) D((_f) | ByteOp), D(_f)
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
+#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
#define I2bvIP(_f, _e, _i, _p) \
IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
-#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
- I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
- I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
+#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
+ F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
+ F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static const struct opcode group7_rm1[] = {
DI(SrcNone | Priv, monitor),
@@ -3614,25 +3664,36 @@ static const struct opcode group7_rm7[] = {
};
static const struct opcode group1[] = {
- I(Lock, em_add),
- I(Lock | PageTable, em_or),
- I(Lock, em_adc),
- I(Lock, em_sbb),
- I(Lock | PageTable, em_and),
- I(Lock, em_sub),
- I(Lock, em_xor),
- I(0, em_cmp),
+ F(Lock, em_add),
+ F(Lock | PageTable, em_or),
+ F(Lock, em_adc),
+ F(Lock, em_sbb),
+ F(Lock | PageTable, em_and),
+ F(Lock, em_sub),
+ F(Lock, em_xor),
+ F(NoWrite, em_cmp),
};
static const struct opcode group1A[] = {
I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
};
+static const struct opcode group2[] = {
+ F(DstMem | ModRM, em_rol),
+ F(DstMem | ModRM, em_ror),
+ F(DstMem | ModRM, em_rcl),
+ F(DstMem | ModRM, em_rcr),
+ F(DstMem | ModRM, em_shl),
+ F(DstMem | ModRM, em_shr),
+ F(DstMem | ModRM, em_shl),
+ F(DstMem | ModRM, em_sar),
+};
+
static const struct opcode group3[] = {
- I(DstMem | SrcImm, em_test),
- I(DstMem | SrcImm, em_test),
- I(DstMem | SrcNone | Lock, em_not),
- I(DstMem | SrcNone | Lock, em_neg),
+ F(DstMem | SrcImm | NoWrite, em_test),
+ F(DstMem | SrcImm | NoWrite, em_test),
+ F(DstMem | SrcNone | Lock, em_not),
+ F(DstMem | SrcNone | Lock, em_neg),
I(SrcMem, em_mul_ex),
I(SrcMem, em_imul_ex),
I(SrcMem, em_div_ex),
@@ -3640,14 +3701,14 @@ static const struct opcode group3[] = {
};
static const struct opcode group4[] = {
- I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
- I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
+ F(ByteOp | DstMem | SrcNone | Lock, em_inc),
+ F(ByteOp | DstMem | SrcNone | Lock, em_dec),
N, N, N, N, N, N,
};
static const struct opcode group5[] = {
- I(DstMem | SrcNone | Lock, em_grp45),
- I(DstMem | SrcNone | Lock, em_grp45),
+ F(DstMem | SrcNone | Lock, em_inc),
+ F(DstMem | SrcNone | Lock, em_dec),
I(SrcMem | Stack, em_grp45),
I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
I(SrcMem | Stack, em_grp45),
@@ -3682,10 +3743,10 @@ static const struct group_dual group7 = { {
static const struct opcode group8[] = {
N, N, N, N,
- I(DstMem | SrcImmByte, em_bt),
- I(DstMem | SrcImmByte | Lock | PageTable, em_bts),
- I(DstMem | SrcImmByte | Lock, em_btr),
- I(DstMem | SrcImmByte | Lock | PageTable, em_btc),
+ F(DstMem | SrcImmByte | NoWrite, em_bt),
+ F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
+ F(DstMem | SrcImmByte | Lock, em_btr),
+ F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static const struct group_dual group9 = { {
@@ -3707,33 +3768,96 @@ static const struct gprefix pfx_vmovntpx = {
I(0, em_mov), N, N, N,
};
+static const struct escape escape_d9 = { {
+ N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
+}, {
+ /* 0xC0 - 0xC7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xC8 - 0xCF */
+ N, N, N, N, N, N, N, N,
+ /* 0xD0 - 0xC7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xD8 - 0xDF */
+ N, N, N, N, N, N, N, N,
+ /* 0xE0 - 0xE7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xE8 - 0xEF */
+ N, N, N, N, N, N, N, N,
+ /* 0xF0 - 0xF7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xF8 - 0xFF */
+ N, N, N, N, N, N, N, N,
+} };
+
+static const struct escape escape_db = { {
+ N, N, N, N, N, N, N, N,
+}, {
+ /* 0xC0 - 0xC7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xC8 - 0xCF */
+ N, N, N, N, N, N, N, N,
+ /* 0xD0 - 0xC7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xD8 - 0xDF */
+ N, N, N, N, N, N, N, N,
+ /* 0xE0 - 0xE7 */
+ N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
+ /* 0xE8 - 0xEF */
+ N, N, N, N, N, N, N, N,
+ /* 0xF0 - 0xF7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xF8 - 0xFF */
+ N, N, N, N, N, N, N, N,
+} };
+
+static const struct escape escape_dd = { {
+ N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
+}, {
+ /* 0xC0 - 0xC7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xC8 - 0xCF */
+ N, N, N, N, N, N, N, N,
+ /* 0xD0 - 0xC7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xD8 - 0xDF */
+ N, N, N, N, N, N, N, N,
+ /* 0xE0 - 0xE7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xE8 - 0xEF */
+ N, N, N, N, N, N, N, N,
+ /* 0xF0 - 0xF7 */
+ N, N, N, N, N, N, N, N,
+ /* 0xF8 - 0xFF */
+ N, N, N, N, N, N, N, N,
+} };
+
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
- I6ALU(Lock, em_add),
+ F6ALU(Lock, em_add),
I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
/* 0x08 - 0x0F */
- I6ALU(Lock | PageTable, em_or),
+ F6ALU(Lock | PageTable, em_or),
I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
N,
/* 0x10 - 0x17 */
- I6ALU(Lock, em_adc),
+ F6ALU(Lock, em_adc),
I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
/* 0x18 - 0x1F */
- I6ALU(Lock, em_sbb),
+ F6ALU(Lock, em_sbb),
I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
/* 0x20 - 0x27 */
- I6ALU(Lock | PageTable, em_and), N, N,
+ F6ALU(Lock | PageTable, em_and), N, N,
/* 0x28 - 0x2F */
- I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
+ F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
- I6ALU(Lock, em_xor), N, N,
+ F6ALU(Lock, em_xor), N, N,
/* 0x38 - 0x3F */
- I6ALU(0, em_cmp), N, N,
+ F6ALU(NoWrite, em_cmp), N, N,
/* 0x40 - 0x4F */
- X16(D(DstReg)),
+ X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
/* 0x50 - 0x57 */
X8(I(SrcReg | Stack, em_push)),
/* 0x58 - 0x5F */
@@ -3757,7 +3881,7 @@ static const struct opcode opcode_table[256] = {
G(DstMem | SrcImm, group1),
G(ByteOp | DstMem | SrcImm | No64, group1),
G(DstMem | SrcImmByte, group1),
- I2bv(DstMem | SrcReg | ModRM, em_test),
+ F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
@@ -3777,18 +3901,18 @@ static const struct opcode opcode_table[256] = {
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
- I2bv(SrcSI | DstDI | String, em_cmp),
+ F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
/* 0xA8 - 0xAF */
- I2bv(DstAcc | SrcImm, em_test),
+ F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
- I2bv(SrcAcc | DstDI | String, em_cmp),
+ F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
- X8(I(DstReg | SrcImm | Mov, em_mov)),
+ X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
- D2bv(DstMem | SrcImmByte | ModRM),
+ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
I(ImplicitOps | Stack, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
@@ -3800,10 +3924,11 @@ static const struct opcode opcode_table[256] = {
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
- D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
- N, N, N, N,
+ G(Src2One | ByteOp, group2), G(Src2One, group2),
+ G(Src2CL | ByteOp, group2), G(Src2CL, group2),
+ N, I(DstAcc | SrcImmByte | No64, em_aad), N, N,
/* 0xD8 - 0xDF */
- N, N, N, N, N, N, N, N,
+ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
X3(I(SrcImmByte, em_loop)),
I(SrcImmByte, em_jcxz),
@@ -3870,28 +3995,29 @@ static const struct opcode twobyte_table[256] = {
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
- II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
- D(DstMem | SrcReg | Src2ImmByte | ModRM),
- D(DstMem | SrcReg | Src2CL | ModRM), N, N,
+ II(ImplicitOps, em_cpuid, cpuid),
+ F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
+ F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
+ F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
DI(ImplicitOps, rsm),
- I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
- D(DstMem | SrcReg | Src2ImmByte | ModRM),
- D(DstMem | SrcReg | Src2CL | ModRM),
- D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
+ F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
+ F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
+ F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
+ D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
- I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
+ F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xB8 - 0xBF */
N, N,
G(BitOp, group8),
- I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
- I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
+ F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
+ F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
D2bv(DstMem | SrcReg | ModRM | Lock),
@@ -3950,6 +4076,9 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
case 4:
op->val = insn_fetch(s32, ctxt);
break;
+ case 8:
+ op->val = insn_fetch(s64, ctxt);
+ break;
}
if (!sign_extension) {
switch (op->bytes) {
@@ -4028,6 +4157,9 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
case OpImm:
rc = decode_imm(ctxt, op, imm_size(ctxt), true);
break;
+ case OpImm64:
+ rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
+ break;
case OpMem8:
ctxt->memop.bytes = 1;
goto mem_common;
@@ -4222,6 +4354,12 @@ done_prefixes:
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
+ case Escape:
+ if (ctxt->modrm > 0xbf)
+ opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
+ else
+ opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
+ break;
default:
return EMULATION_FAILED;
}
@@ -4354,6 +4492,16 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}
+static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
+{
+ ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
+ fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
+ asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
+ : "+a"(ctxt->dst.val), "+b"(ctxt->src.val), [flags]"+D"(flags)
+ : "c"(ctxt->src2.val), [fastop]"S"(fop));
+ ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
+ return X86EMUL_CONTINUE;
+}
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
@@ -4483,6 +4631,13 @@ special_insn:
}
if (ctxt->execute) {
+ if (ctxt->d & Fastop) {
+ void (*fop)(struct fastop *) = (void *)ctxt->execute;
+ rc = fastop(ctxt, fop);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ goto writeback;
+ }
rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
@@ -4493,12 +4648,6 @@ special_insn:
goto twobyte_insn;
switch (ctxt->b) {
- case 0x40 ... 0x47: /* inc r16/r32 */
- emulate_1op(ctxt, "inc");
- break;
- case 0x48 ... 0x4f: /* dec r16/r32 */
- emulate_1op(ctxt, "dec");
- break;
case 0x63: /* movsxd */
if (ctxt->mode != X86EMUL_MODE_PROT64)
goto cannot_emulate;
@@ -4523,9 +4672,6 @@ special_insn:
case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
- case 0xc0 ... 0xc1:
- rc = em_grp2(ctxt);
- break;
case 0xcc: /* int3 */
rc = emulate_int(ctxt, 3);
break;
@@ -4536,13 +4682,6 @@ special_insn:
if (ctxt->eflags & EFLG_OF)
rc = emulate_int(ctxt, 4);
break;
- case 0xd0 ... 0xd1: /* Grp2 */
- rc = em_grp2(ctxt);
- break;
- case 0xd2 ... 0xd3: /* Grp2 */
- ctxt->src.val = reg_read(ctxt, VCPU_REGS_RCX);
- rc = em_grp2(ctxt);
- break;
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
jmp_rel(ctxt, ctxt->src.val);
@@ -4661,14 +4800,6 @@ twobyte_insn:
case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
- case 0xa4: /* shld imm8, r, r/m */
- case 0xa5: /* shld cl, r, r/m */
- emulate_2op_cl(ctxt, "shld");
- break;
- case 0xac: /* shrd imm8, r, r/m */
- case 0xad: /* shrd cl, r, r/m */
- emulate_2op_cl(ctxt, "shrd");
- break;
case 0xae: /* clflush */
break;
case 0xb6 ... 0xb7: /* movzx */
@@ -4682,7 +4813,7 @@ twobyte_insn:
(s16) ctxt->src.val;
break;
case 0xc0 ... 0xc1: /* xadd */
- emulate_2op_SrcV(ctxt, "add");
+ fastop(ctxt, em_add);
/* Write back the register source. */
ctxt->src.val = ctxt->dst.orig_val;
write_register_operand(&ctxt->src);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 11300d2fa714..c1d30b2fc9bb 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -122,7 +122,6 @@ static s64 __kpit_elapsed(struct kvm *kvm)
*/
remaining = hrtimer_get_remaining(&ps->timer);
elapsed = ps->period - ktime_to_ns(remaining);
- elapsed = mod_64(elapsed, ps->period);
return elapsed;
}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 848206df0967..cc31f7c06d3d 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -241,6 +241,8 @@ int kvm_pic_read_irq(struct kvm *kvm)
int irq, irq2, intno;
struct kvm_pic *s = pic_irqchip(kvm);
+ s->output = 0;
+
pic_lock(s);
irq = pic_get_irq(&s->pics[0]);
if (irq >= 0) {
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 7e06ba1618bd..484bc874688b 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -38,49 +38,81 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
/*
+ * check if there is pending interrupt from
+ * non-APIC source without intack.
+ */
+static int kvm_cpu_has_extint(struct kvm_vcpu *v)
+{
+ if (kvm_apic_accept_pic_intr(v))
+ return pic_irqchip(v->kvm)->output; /* PIC */
+ else
+ return 0;
+}
+
+/*
+ * check if there is injectable interrupt:
+ * when virtual interrupt delivery enabled,
+ * interrupt from apic will handled by hardware,
+ * we don't need to check it here.
+ */
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+{
+ if (!irqchip_in_kernel(v->kvm))
+ return v->arch.interrupt.pending;
+
+ if (kvm_cpu_has_extint(v))
+ return 1;
+
+ if (kvm_apic_vid_enabled(v->kvm))
+ return 0;
+
+ return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
+}
+
+/*
* check if there is pending interrupt without
* intack.
*/
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
- struct kvm_pic *s;
-
if (!irqchip_in_kernel(v->kvm))
return v->arch.interrupt.pending;
- if (kvm_apic_has_interrupt(v) == -1) { /* LAPIC */
- if (kvm_apic_accept_pic_intr(v)) {
- s = pic_irqchip(v->kvm); /* PIC */
- return s->output;
- } else
- return 0;
- }
- return 1;
+ if (kvm_cpu_has_extint(v))
+ return 1;
+
+ return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
/*
+ * Read pending interrupt(from non-APIC source)
+ * vector and intack.
+ */
+static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+{
+ if (kvm_cpu_has_extint(v))
+ return kvm_pic_read_irq(v->kvm); /* PIC */
+ return -1;
+}
+
+/*
* Read pending interrupt vector and intack.
*/
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
- struct kvm_pic *s;
int vector;
if (!irqchip_in_kernel(v->kvm))
return v->arch.interrupt.nr;
- vector = kvm_get_apic_interrupt(v); /* APIC */
- if (vector == -1) {
- if (kvm_apic_accept_pic_intr(v)) {
- s = pic_irqchip(v->kvm);
- s->output = 0; /* PIC */
- vector = kvm_pic_read_irq(v->kvm);
- }
- }
- return vector;
+ vector = kvm_cpu_get_extint(v);
+
+ if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
+ return vector; /* PIC */
+
+ return kvm_get_apic_interrupt(v); /* APIC */
}
-EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9392f527f107..02b51dd4e4ad 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -140,31 +140,56 @@ static inline int apic_enabled(struct kvm_lapic *apic)
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
-static inline int apic_x2apic_mode(struct kvm_lapic *apic)
-{
- return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
-}
-
static inline int kvm_apic_id(struct kvm_lapic *apic)
{
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
-static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
+void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+ struct kvm_lapic_irq *irq,
+ u64 *eoi_exit_bitmap)
{
- u16 cid;
- ldr >>= 32 - map->ldr_bits;
- cid = (ldr >> map->cid_shift) & map->cid_mask;
+ struct kvm_lapic **dst;
+ struct kvm_apic_map *map;
+ unsigned long bitmap = 1;
+ int i;
- BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
+ rcu_read_lock();
+ map = rcu_dereference(vcpu->kvm->arch.apic_map);
- return cid;
-}
+ if (unlikely(!map)) {
+ __set_bit(irq->vector, (unsigned long *)eoi_exit_bitmap);
+ goto out;
+ }
-static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
-{
- ldr >>= (32 - map->ldr_bits);
- return ldr & map->lid_mask;
+ if (irq->dest_mode == 0) { /* physical mode */
+ if (irq->delivery_mode == APIC_DM_LOWEST ||
+ irq->dest_id == 0xff) {
+ __set_bit(irq->vector,
+ (unsigned long *)eoi_exit_bitmap);
+ goto out;
+ }
+ dst = &map->phys_map[irq->dest_id & 0xff];
+ } else {
+ u32 mda = irq->dest_id << (32 - map->ldr_bits);
+
+ dst = map->logical_map[apic_cluster_id(map, mda)];
+
+ bitmap = apic_logical_id(map, mda);
+ }
+
+ for_each_set_bit(i, &bitmap, 16) {
+ if (!dst[i])
+ continue;
+ if (dst[i]->vcpu == vcpu) {
+ __set_bit(irq->vector,
+ (unsigned long *)eoi_exit_bitmap);
+ break;
+ }
+ }
+
+out:
+ rcu_read_unlock();
}
static void recalculate_apic_map(struct kvm *kvm)
@@ -230,6 +255,8 @@ out:
if (old)
kfree_rcu(old, rcu);
+
+ kvm_ioapic_make_eoibitmap_request(kvm);
}
static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
@@ -345,6 +372,10 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
{
int result;
+ /*
+ * Note that irr_pending is just a hint. It will be always
+ * true with virtual interrupt delivery enabled.
+ */
if (!apic->irr_pending)
return -1;
@@ -461,6 +492,8 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
{
int result;
+
+ /* Note that isr_count is always 1 with vid enabled */
if (!apic->isr_count)
return -1;
if (likely(apic->highest_isr_cache != -1))
@@ -740,6 +773,19 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
}
+static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
+{
+ if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
+ kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+ int trigger_mode;
+ if (apic_test_vector(vector, apic->regs + APIC_TMR))
+ trigger_mode = IOAPIC_LEVEL_TRIG;
+ else
+ trigger_mode = IOAPIC_EDGE_TRIG;
+ kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+ }
+}
+
static int apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
@@ -756,19 +802,26 @@ static int apic_set_eoi(struct kvm_lapic *apic)
apic_clear_isr(vector, apic);
apic_update_ppr(apic);
- if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
- kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
- int trigger_mode;
- if (apic_test_vector(vector, apic->regs + APIC_TMR))
- trigger_mode = IOAPIC_LEVEL_TRIG;
- else
- trigger_mode = IOAPIC_EDGE_TRIG;
- kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
- }
+ kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
return vector;
}
+/*
+ * this interface assumes a trap-like exit, which has already finished
+ * desired side effect including vISR and vPPR update.
+ */
+void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ trace_kvm_eoi(apic, vector);
+
+ kvm_ioapic_send_eoi(apic, vector);
+ kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
+
static void apic_send_ipi(struct kvm_lapic *apic)
{
u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
@@ -1212,6 +1265,21 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+/* emulate APIC access in a trap manner */
+void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+{
+ u32 val = 0;
+
+ /* hw has done the conditional check and inst decode */
+ offset &= 0xff0;
+
+ apic_reg_read(vcpu->arch.apic, offset, 4, &val);
+
+ /* TODO: optimize to just emulate side effect w/o one more write */
+ apic_reg_write(vcpu->arch.apic, offset, val);
+}
+EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
+
void kvm_free_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -1288,6 +1356,7 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
+ u64 old_value = vcpu->arch.apic_base;
struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic) {
@@ -1309,11 +1378,16 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
- if (apic_x2apic_mode(apic)) {
- u32 id = kvm_apic_id(apic);
- u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
- kvm_apic_set_ldr(apic, ldr);
+ if ((old_value ^ value) & X2APIC_ENABLE) {
+ if (value & X2APIC_ENABLE) {
+ u32 id = kvm_apic_id(apic);
+ u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+ kvm_apic_set_ldr(apic, ldr);
+ kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
+ } else
+ kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
}
+
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
@@ -1359,8 +1433,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
- apic->irr_pending = false;
- apic->isr_count = 0;
+ apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
+ apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
apic->highest_isr_cache = -1;
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
@@ -1575,8 +1649,10 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
update_divide_count(apic);
start_apic_timer(apic);
apic->irr_pending = true;
- apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+ apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
+ 1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1;
+ kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index e5ebf9f3571f..1676d34ddb4e 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -64,6 +64,9 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
+void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
+void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
+
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
@@ -124,4 +127,35 @@ static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic);
}
+static inline int apic_x2apic_mode(struct kvm_lapic *apic)
+{
+ return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
+}
+
+static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
+{
+ return kvm_x86_ops->vm_has_apicv(kvm);
+}
+
+static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
+{
+ u16 cid;
+ ldr >>= 32 - map->ldr_bits;
+ cid = (ldr >> map->cid_shift) & map->cid_mask;
+
+ BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
+
+ return cid;
+}
+
+static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
+{
+ ldr >>= (32 - map->ldr_bits);
+ return ldr & map->lid_mask;
+}
+
+void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+ struct kvm_lapic_irq *irq,
+ u64 *eoi_bitmap);
+
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01d7c2ad05f5..956ca358108a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -448,7 +448,8 @@ static bool __check_direct_spte_mmio_pf(u64 spte)
static bool spte_is_locklessly_modifiable(u64 spte)
{
- return !(~spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE));
+ return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
+ (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
}
static bool spte_has_volatile_bits(u64 spte)
@@ -831,8 +832,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
if (host_level == PT_PAGE_TABLE_LEVEL)
return host_level;
- max_level = kvm_x86_ops->get_lpage_level() < host_level ?
- kvm_x86_ops->get_lpage_level() : host_level;
+ max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
@@ -1142,7 +1142,7 @@ spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
}
static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
- int level, bool pt_protect)
+ bool pt_protect)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1180,7 +1180,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
while (mask) {
rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot);
- __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL, false);
+ __rmap_write_protect(kvm, rmapp, false);
/* clear the first set bit */
mask &= mask - 1;
@@ -1199,7 +1199,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
for (i = PT_PAGE_TABLE_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
rmapp = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(kvm, rmapp, i, true);
+ write_protected |= __rmap_write_protect(kvm, rmapp, true);
}
return write_protected;
@@ -1460,28 +1460,14 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
-/*
- * Remove the sp from shadow page cache, after call it,
- * we can not find this sp from the cache, and the shadow
- * page table is still valid.
- * It should be under the protection of mmu lock.
- */
-static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp)
+static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
- if (!sp->role.direct)
- free_page((unsigned long)sp->gfns);
-}
-
-/*
- * Free the shadow page table and the sp, we can do it
- * out of the protection of mmu lock.
- */
-static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
-{
list_del(&sp->link);
free_page((unsigned long)sp->spt);
+ if (!sp->role.direct)
+ free_page((unsigned long)sp->gfns);
kmem_cache_free(mmu_page_header_cache, sp);
}
@@ -1522,7 +1508,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM);
sp->parent_ptes = 0;
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
@@ -1659,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);
-#define for_each_gfn_sp(kvm, sp, gfn, pos) \
- hlist_for_each_entry(sp, pos, \
+#define for_each_gfn_sp(kvm, sp, gfn) \
+ hlist_for_each_entry(sp, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn)) {} else
-#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
- hlist_for_each_entry(sp, pos, \
+#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
+ hlist_for_each_entry(sp, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn) || (sp)->role.direct || \
(sp)->role.invalid) {} else
@@ -1721,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
- struct hlist_node *node;
LIST_HEAD(invalid_list);
bool flush = false;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (!s->unsync)
continue;
@@ -1863,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
union kvm_mmu_page_role role;
unsigned quadrant;
struct kvm_mmu_page *sp;
- struct hlist_node *node;
bool need_sync = false;
role = vcpu->arch.mmu.base_role;
@@ -1878,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
- for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
+ for_each_gfn_sp(vcpu->kvm, sp, gfn) {
if (!need_sync && sp->unsync)
need_sync = true;
@@ -1973,9 +1956,9 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
{
u64 spte;
- spte = __pa(sp->spt)
- | PT_PRESENT_MASK | PT_ACCESSED_MASK
- | PT_WRITABLE_MASK | PT_USER_MASK;
+ spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
+ shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
+
mmu_spte_set(sptep, spte);
}
@@ -2126,7 +2109,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
WARN_ON(!sp->role.invalid || sp->root_count);
- kvm_mmu_isolate_page(sp);
kvm_mmu_free_page(sp);
} while (!list_empty(invalid_list));
}
@@ -2144,6 +2126,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
* change the value
*/
+ spin_lock(&kvm->mmu_lock);
+
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) {
@@ -2158,19 +2142,20 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
}
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
+
+ spin_unlock(&kvm->mmu_lock);
}
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
- struct hlist_node *node;
LIST_HEAD(invalid_list);
int r;
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0;
spin_lock(&kvm->mmu_lock);
- for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
+ for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
@@ -2183,14 +2168,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
-static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
-{
- int slot = memslot_id(kvm, gfn);
- struct kvm_mmu_page *sp = page_header(__pa(pte));
-
- __set_bit(slot, sp->slot_bitmap);
-}
-
/*
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
@@ -2308,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
- struct hlist_node *node;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -2322,19 +2298,17 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
struct kvm_mmu_page *s;
- struct hlist_node *node;
bool need_unsync = false;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (!can_unsync)
return 1;
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
- if (!need_unsync && !s->unsync) {
+ if (!s->unsync)
need_unsync = true;
- }
}
if (need_unsync)
kvm_unsync_pages(vcpu, gfn);
@@ -2342,8 +2316,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
}
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
- unsigned pte_access, int user_fault,
- int write_fault, int level,
+ unsigned pte_access, int level,
gfn_t gfn, pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
@@ -2378,20 +2351,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= (u64)pfn << PAGE_SHIFT;
- if ((pte_access & ACC_WRITE_MASK)
- || (!vcpu->arch.mmu.direct_map && write_fault
- && !is_write_protection(vcpu) && !user_fault)) {
+ if (pte_access & ACC_WRITE_MASK) {
/*
- * There are two cases:
- * - the one is other vcpu creates new sp in the window
- * between mapping_level() and acquiring mmu-lock.
- * - the another case is the new sp is created by itself
- * (page-fault path) when guest uses the target gfn as
- * its page table.
- * Both of these cases can be fixed by allowing guest to
- * retry the access, it will refault, then we can establish
- * the mapping by using small page.
+ * Other vcpu creates new sp in the window between
+ * mapping_level() and acquiring mmu-lock. We can
+ * allow guest to retry the access, the mapping can
+ * be fixed if guest refault.
*/
if (level > PT_PAGE_TABLE_LEVEL &&
has_wrprotected_page(vcpu->kvm, gfn, level))
@@ -2399,19 +2365,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
- if (!vcpu->arch.mmu.direct_map
- && !(pte_access & ACC_WRITE_MASK)) {
- spte &= ~PT_USER_MASK;
- /*
- * If we converted a user page to a kernel page,
- * so that the kernel can write to it when cr0.wp=0,
- * then we should prevent the kernel from executing it
- * if SMEP is enabled.
- */
- if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
- spte |= PT64_NX_MASK;
- }
-
/*
* Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection
@@ -2441,19 +2394,15 @@ done:
}
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
- unsigned pt_access, unsigned pte_access,
- int user_fault, int write_fault,
- int *emulate, int level, gfn_t gfn,
- pfn_t pfn, bool speculative,
+ unsigned pte_access, int write_fault, int *emulate,
+ int level, gfn_t gfn, pfn_t pfn, bool speculative,
bool host_writable)
{
int was_rmapped = 0;
int rmap_count;
- pgprintk("%s: spte %llx access %x write_fault %d"
- " user_fault %d gfn %llx\n",
- __func__, *sptep, pt_access,
- write_fault, user_fault, gfn);
+ pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
+ *sptep, write_fault, gfn);
if (is_rmap_spte(*sptep)) {
/*
@@ -2477,9 +2426,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
was_rmapped = 1;
}
- if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
- level, gfn, pfn, speculative, true,
- host_writable)) {
+ if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
+ true, host_writable)) {
if (write_fault)
*emulate = 1;
kvm_mmu_flush_tlb(vcpu);
@@ -2497,7 +2445,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
++vcpu->kvm->stat.lpages;
if (is_shadow_present_pte(*sptep)) {
- page_header_update_slot(vcpu->kvm, sptep, gfn);
if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
@@ -2571,10 +2518,9 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
return -1;
for (i = 0; i < ret; i++, gfn++, start++)
- mmu_set_spte(vcpu, start, ACC_ALL,
- access, 0, 0, NULL,
- sp->role.level, gfn,
- page_to_pfn(pages[i]), true, true);
+ mmu_set_spte(vcpu, start, access, 0, NULL,
+ sp->role.level, gfn, page_to_pfn(pages[i]),
+ true, true);
return 0;
}
@@ -2633,11 +2579,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
- unsigned pte_access = ACC_ALL;
-
- mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
- 0, write, &emulate,
- level, gfn, pfn, prefault, map_writable);
+ mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
+ write, &emulate, level, gfn, pfn,
+ prefault, map_writable);
direct_pte_prefetch(vcpu, iterator.sptep);
++vcpu->stat.pf_fixed;
break;
@@ -2652,11 +2596,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
iterator.level - 1,
1, ACC_ALL, iterator.sptep);
- mmu_spte_set(iterator.sptep,
- __pa(sp->spt)
- | PT_PRESENT_MASK | PT_WRITABLE_MASK
- | shadow_user_mask | shadow_x_mask
- | shadow_accessed_mask);
+ link_shadow_page(iterator.sptep, sp);
}
}
return emulate;
@@ -3719,6 +3659,7 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
else
r = paging32_init_context(vcpu, context);
+ vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
vcpu->arch.mmu.base_role.smep_andnot_wp
@@ -3885,7 +3826,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
*gpa &= ~(gpa_t)7;
*bytes = 8;
- r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8));
+ r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8);
if (r)
gentry = 0;
new = (const u8 *)&gentry;
@@ -3987,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
union kvm_mmu_page_role mask = { .word = 0 };
struct kvm_mmu_page *sp;
- struct hlist_node *node;
LIST_HEAD(invalid_list);
u64 entry, gentry, *spte;
int npte;
@@ -4018,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) {
zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
@@ -4039,7 +3979,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
& mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
- if (!remote_flush && need_remote_flush(entry, *spte))
+ if (need_remote_flush(entry, *spte))
remote_flush = true;
++spte;
}
@@ -4198,26 +4138,36 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
- struct kvm_mmu_page *sp;
- bool flush = false;
+ struct kvm_memory_slot *memslot;
+ gfn_t last_gfn;
+ int i;
- list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
- int i;
- u64 *pt;
+ memslot = id_to_memslot(kvm->memslots, slot);
+ last_gfn = memslot->base_gfn + memslot->npages - 1;
- if (!test_bit(slot, sp->slot_bitmap))
- continue;
+ spin_lock(&kvm->mmu_lock);
- pt = sp->spt;
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- if (!is_shadow_present_pte(pt[i]) ||
- !is_last_spte(pt[i], sp->role.level))
- continue;
+ for (i = PT_PAGE_TABLE_LEVEL;
+ i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+ unsigned long *rmapp;
+ unsigned long last_index, index;
- spte_write_protect(kvm, &pt[i], &flush, false);
+ rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
+ last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
+
+ for (index = 0; index <= last_index; ++index, ++rmapp) {
+ if (*rmapp)
+ __rmap_write_protect(kvm, rmapp, false);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ kvm_flush_remote_tlbs(kvm);
+ cond_resched_lock(&kvm->mmu_lock);
+ }
}
}
+
kvm_flush_remote_tlbs(kvm);
+ spin_unlock(&kvm->mmu_lock);
}
void kvm_mmu_zap_all(struct kvm *kvm)
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index cd6e98333ba3..b8f6172f4174 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -195,12 +195,6 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
TP_ARGS(sp)
);
-DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages,
- TP_PROTO(struct kvm_mmu_page *sp),
-
- TP_ARGS(sp)
-);
-
TRACE_EVENT(
mark_mmio_spte,
TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 891eb6d93b8b..105dd5bd550e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -151,7 +151,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
pt_element_t pte;
pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
- unsigned index, pt_access, pte_access, accessed_dirty, shift;
+ unsigned index, pt_access, pte_access, accessed_dirty;
gpa_t pte_gpa;
int offset;
const int write_fault = access & PFERR_WRITE_MASK;
@@ -249,16 +249,12 @@ retry_walk:
if (!write_fault)
protect_clean_gpte(&pte_access, pte);
-
- /*
- * On a write fault, fold the dirty bit into accessed_dirty by shifting it one
- * place right.
- *
- * On a read fault, do nothing.
- */
- shift = write_fault >> ilog2(PFERR_WRITE_MASK);
- shift *= PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT;
- accessed_dirty &= pte >> shift;
+ else
+ /*
+ * On a write fault, fold the dirty bit into accessed_dirty by
+ * shifting it one place right.
+ */
+ accessed_dirty &= pte >> (PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT);
if (unlikely(!accessed_dirty)) {
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
@@ -330,8 +326,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* we call mmu_set_spte() with host_writable = true because
* pte_prefetch_gfn_to_pfn always gets a writable pfn.
*/
- mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
- NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);
+ mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
+ gfn, pfn, true, true);
return true;
}
@@ -405,7 +401,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
*/
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
- int user_fault, int write_fault, int hlevel,
+ int write_fault, int hlevel,
pfn_t pfn, bool map_writable, bool prefault)
{
struct kvm_mmu_page *sp = NULL;
@@ -413,9 +409,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
unsigned direct_access, access = gw->pt_access;
int top_level, emulate = 0;
- if (!is_present_gpte(gw->ptes[gw->level - 1]))
- return 0;
-
direct_access = gw->pte_access;
top_level = vcpu->arch.mmu.root_level;
@@ -477,9 +470,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
}
clear_sp_write_flooding_count(it.sptep);
- mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
- user_fault, write_fault, &emulate, it.level,
- gw->gfn, pfn, prefault, map_writable);
+ mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
+ it.level, gw->gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return emulate;
@@ -491,6 +483,46 @@ out_gpte_changed:
return 0;
}
+ /*
+ * To see whether the mapped gfn can write its page table in the current
+ * mapping.
+ *
+ * It is the helper function of FNAME(page_fault). When guest uses large page
+ * size to map the writable gfn which is used as current page table, we should
+ * force kvm to use small page size to map it because new shadow page will be
+ * created when kvm establishes shadow page table that stop kvm using large
+ * page size. Do it early can avoid unnecessary #PF and emulation.
+ *
+ * @write_fault_to_shadow_pgtable will return true if the fault gfn is
+ * currently used as its page table.
+ *
+ * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
+ * since the PDPT is always shadowed, that means, we can not use large page
+ * size to map the gfn which is used as PDPT.
+ */
+static bool
+FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
+ struct guest_walker *walker, int user_fault,
+ bool *write_fault_to_shadow_pgtable)
+{
+ int level;
+ gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
+ bool self_changed = false;
+
+ if (!(walker->pte_access & ACC_WRITE_MASK ||
+ (!is_write_protection(vcpu) && !user_fault)))
+ return false;
+
+ for (level = walker->level; level <= walker->max_level; level++) {
+ gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
+
+ self_changed |= !(gfn & mask);
+ *write_fault_to_shadow_pgtable |= !gfn;
+ }
+
+ return self_changed;
+}
+
/*
* Page fault handler. There are several causes for a page fault:
* - there is no shadow pte for the guest pte
@@ -516,7 +548,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int level = PT_PAGE_TABLE_LEVEL;
int force_pt_level;
unsigned long mmu_seq;
- bool map_writable;
+ bool map_writable, is_self_change_mapping;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -544,8 +576,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0;
}
+ vcpu->arch.write_fault_to_shadow_pgtable = false;
+
+ is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
+ &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
+
if (walker.level >= PT_DIRECTORY_LEVEL)
- force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
+ force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
+ || is_self_change_mapping;
else
force_pt_level = 1;
if (!force_pt_level) {
@@ -564,6 +602,26 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
walker.gfn, pfn, walker.pte_access, &r))
return r;
+ /*
+ * Do not change pte_access if the pfn is a mmio page, otherwise
+ * we will cache the incorrect access into mmio spte.
+ */
+ if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
+ !is_write_protection(vcpu) && !user_fault &&
+ !is_noslot_pfn(pfn)) {
+ walker.pte_access |= ACC_WRITE_MASK;
+ walker.pte_access &= ~ACC_USER_MASK;
+
+ /*
+ * If we converted a user page to a kernel page,
+ * so that the kernel can write to it when cr0.wp=0,
+ * then we should prevent the kernel from executing it
+ * if SMEP is enabled.
+ */
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ walker.pte_access &= ~ACC_EXEC_MASK;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
@@ -572,7 +630,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
kvm_mmu_free_some_pages(vcpu);
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
- r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+ r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
level, pfn, map_writable, prefault);
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
@@ -747,7 +805,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
- set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
+ set_spte(vcpu, &sp->spt[i], pte_access,
PT_PAGE_TABLE_LEVEL, gfn,
spte_to_pfn(sp->spt[i]), true, false,
host_writable);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d29d3cd1c156..e1b1ce21bc00 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3571,6 +3571,26 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
+static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
+{
+ return;
+}
+
+static int svm_vm_has_apicv(struct kvm *kvm)
+{
+ return 0;
+}
+
+static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+ return;
+}
+
+static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
+{
+ return;
+}
+
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4290,6 +4310,10 @@ static struct kvm_x86_ops svm_x86_ops = {
.enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
+ .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
+ .vm_has_apicv = svm_vm_has_apicv,
+ .load_eoi_exitmap = svm_load_eoi_exitmap,
+ .hwapic_isr_update = svm_hwapic_isr_update,
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9120ae1901e4..6667042714cc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -84,6 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
+static bool __read_mostly enable_apicv_reg_vid;
+
/*
* If nested=1, nested virtualization is supported, i.e., guests may use
* VMX and be a hypervisor for its own guests. If nested=0, guests may not
@@ -92,12 +94,8 @@ module_param(fasteoi, bool, S_IRUGO);
static bool __read_mostly nested = 0;
module_param(nested, bool, S_IRUGO);
-#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
-#define KVM_GUEST_CR0_MASK \
- (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE)
+#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_CR4_GUEST_OWNED_BITS \
@@ -624,6 +622,8 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
static void vmx_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
+static bool guest_state_valid(struct kvm_vcpu *vcpu);
+static u32 vmx_segment_access_rights(struct kvm_segment *var);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -638,6 +638,8 @@ static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
+static unsigned long *vmx_msr_bitmap_legacy_x2apic;
+static unsigned long *vmx_msr_bitmap_longmode_x2apic;
static bool cpu_has_load_ia32_efer;
static bool cpu_has_load_perf_global_ctrl;
@@ -762,6 +764,24 @@ static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
}
+static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+}
+
+static inline bool cpu_has_vmx_apic_register_virt(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_APIC_REGISTER_VIRT;
+}
+
+static inline bool cpu_has_vmx_virtual_intr_delivery(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
+}
+
static inline bool cpu_has_vmx_flexpriority(void)
{
return cpu_has_vmx_tpr_shadow() &&
@@ -1694,7 +1714,6 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
- __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
to_vmx(vcpu)->rflags = rflags;
if (to_vmx(vcpu)->rmode.vm86_active) {
to_vmx(vcpu)->rmode.save_rflags = rflags;
@@ -1820,6 +1839,25 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
vmx->guest_msrs[from] = tmp;
}
+static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
+{
+ unsigned long *msr_bitmap;
+
+ if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
+ if (is_long_mode(vcpu))
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
+ else
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
+ } else {
+ if (is_long_mode(vcpu))
+ msr_bitmap = vmx_msr_bitmap_longmode;
+ else
+ msr_bitmap = vmx_msr_bitmap_legacy;
+ }
+
+ vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
+}
+
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
@@ -1828,7 +1866,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
static void setup_msrs(struct vcpu_vmx *vmx)
{
int save_nmsrs, index;
- unsigned long *msr_bitmap;
save_nmsrs = 0;
#ifdef CONFIG_X86_64
@@ -1860,14 +1897,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx->save_nmsrs = save_nmsrs;
- if (cpu_has_vmx_msr_bitmap()) {
- if (is_long_mode(&vmx->vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode;
- else
- msr_bitmap = vmx_msr_bitmap_legacy;
-
- vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
- }
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_set_msr_bitmap(&vmx->vcpu);
}
/*
@@ -2533,13 +2564,16 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
min2 = 0;
opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_WBINVD_EXITING |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
SECONDARY_EXEC_RDTSCP |
- SECONDARY_EXEC_ENABLE_INVPCID;
+ SECONDARY_EXEC_ENABLE_INVPCID |
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -2550,6 +2584,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
#endif
+
+ if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+ _cpu_based_2nd_exec_control &= ~(
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+
if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
/* CR3 accesses and invlpg don't need to cause VM Exits when EPT
enabled */
@@ -2747,6 +2788,15 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_ple())
ple_gap = 0;
+ if (!cpu_has_vmx_apic_register_virt() ||
+ !cpu_has_vmx_virtual_intr_delivery())
+ enable_apicv_reg_vid = 0;
+
+ if (enable_apicv_reg_vid)
+ kvm_x86_ops->update_cr8_intercept = NULL;
+ else
+ kvm_x86_ops->hwapic_irr_update = NULL;
+
if (nested)
nested_vmx_setup_ctls_msrs();
@@ -2758,18 +2808,28 @@ static __exit void hardware_unsetup(void)
free_kvm_area();
}
-static void fix_pmode_dataseg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save)
+static bool emulation_required(struct kvm_vcpu *vcpu)
{
- const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
- struct kvm_segment tmp = *save;
+ return emulate_invalid_guest_state && !guest_state_valid(vcpu);
+}
- if (!(vmcs_readl(sf->base) == tmp.base && tmp.s)) {
- tmp.base = vmcs_readl(sf->base);
- tmp.selector = vmcs_read16(sf->selector);
- tmp.dpl = tmp.selector & SELECTOR_RPL_MASK;
- tmp.s = 1;
+static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
+ struct kvm_segment *save)
+{
+ if (!emulate_invalid_guest_state) {
+ /*
+ * CS and SS RPL should be equal during guest entry according
+ * to VMX spec, but in reality it is not always so. Since vcpu
+ * is in the middle of the transition from real mode to
+ * protected mode it is safe to assume that RPL 0 is a good
+ * default value.
+ */
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
+ save->selector &= ~SELECTOR_RPL_MASK;
+ save->dpl = save->selector & SELECTOR_RPL_MASK;
+ save->s = 1;
}
- vmx_set_segment(vcpu, &tmp, seg);
+ vmx_set_segment(vcpu, save, seg);
}
static void enter_pmode(struct kvm_vcpu *vcpu)
@@ -2777,7 +2837,17 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->emulation_required = 1;
+ /*
+ * Update real mode segment cache. It may be not up-to-date if sement
+ * register was written while vcpu was in a guest mode.
+ */
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
+
vmx->rmode.vm86_active = 0;
vmx_segment_cache_clear(vmx);
@@ -2794,22 +2864,16 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
update_exception_bitmap(vcpu);
- if (emulate_invalid_guest_state)
- return;
-
- fix_pmode_dataseg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
- fix_pmode_dataseg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
- fix_pmode_dataseg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
- fix_pmode_dataseg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
-
- vmx_segment_cache_clear(vmx);
+ fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
+ fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
- vmcs_write16(GUEST_SS_SELECTOR, 0);
- vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
-
- vmcs_write16(GUEST_CS_SELECTOR,
- vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
- vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
+ /* CPL is always 0 when CPU enters protected mode */
+ __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
+ vmx->cpl = 0;
}
static gva_t rmode_tss_base(struct kvm *kvm)
@@ -2831,36 +2895,51 @@ static gva_t rmode_tss_base(struct kvm *kvm)
static void fix_rmode_seg(int seg, struct kvm_segment *save)
{
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
-
- vmcs_write16(sf->selector, save->base >> 4);
- vmcs_write32(sf->base, save->base & 0xffff0);
- vmcs_write32(sf->limit, 0xffff);
- vmcs_write32(sf->ar_bytes, 0xf3);
- if (save->base & 0xf)
- printk_once(KERN_WARNING "kvm: segment base is not paragraph"
- " aligned when entering protected mode (seg=%d)",
- seg);
+ struct kvm_segment var = *save;
+
+ var.dpl = 0x3;
+ if (seg == VCPU_SREG_CS)
+ var.type = 0x3;
+
+ if (!emulate_invalid_guest_state) {
+ var.selector = var.base >> 4;
+ var.base = var.base & 0xffff0;
+ var.limit = 0xffff;
+ var.g = 0;
+ var.db = 0;
+ var.present = 1;
+ var.s = 1;
+ var.l = 0;
+ var.unusable = 0;
+ var.type = 0x3;
+ var.avl = 0;
+ if (save->base & 0xf)
+ printk_once(KERN_WARNING "kvm: segment base is not "
+ "paragraph aligned when entering "
+ "protected mode (seg=%d)", seg);
+ }
+
+ vmcs_write16(sf->selector, var.selector);
+ vmcs_write32(sf->base, var.base);
+ vmcs_write32(sf->limit, var.limit);
+ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
}
static void enter_rmode(struct kvm_vcpu *vcpu)
{
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_segment var;
-
- if (enable_unrestricted_guest)
- return;
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
- vmx->emulation_required = 1;
vmx->rmode.vm86_active = 1;
-
/*
* Very old userspace does not call KVM_SET_TSS_ADDR before entering
* vcpu. Call it here with phys address pointing 16M below 4G.
@@ -2888,28 +2967,13 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
update_exception_bitmap(vcpu);
- if (emulate_invalid_guest_state)
- goto continue_rmode;
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_SS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_SS);
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_CS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_CS);
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_ES);
- vmx_set_segment(vcpu, &var, VCPU_SREG_ES);
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_DS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_DS);
+ fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
+ fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
+ fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
+ fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
+ fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
+ fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
- vmx_get_segment(vcpu, &var, VCPU_SREG_GS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_GS);
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_FS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_FS);
-
-continue_rmode:
kvm_mmu_reset_context(vcpu);
}
@@ -3068,17 +3132,18 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long hw_cr0;
+ hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
if (enable_unrestricted_guest)
- hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
- | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
- else
- hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
+ else {
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
- if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
- enter_pmode(vcpu);
+ if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
+ enter_pmode(vcpu);
- if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
- enter_rmode(vcpu);
+ if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
+ enter_rmode(vcpu);
+ }
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME) {
@@ -3098,7 +3163,9 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0, hw_cr0);
vcpu->arch.cr0 = cr0;
- __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
+
+ /* depends on vcpu->arch.cr0 to be set to a new value */
+ vmx->emulation_required = emulation_required(vcpu);
}
static u64 construct_eptp(unsigned long root_hpa)
@@ -3155,6 +3222,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!is_paging(vcpu)) {
hw_cr4 &= ~X86_CR4_PAE;
hw_cr4 |= X86_CR4_PSE;
+ /*
+ * SMEP is disabled if CPU is in non-paging mode in
+ * hardware. However KVM always uses paging mode to
+ * emulate guest non-paging mode with TDP.
+ * To emulate this behavior, SMEP needs to be manually
+ * disabled when guest switches to non-paging mode.
+ */
+ hw_cr4 &= ~X86_CR4_SMEP;
} else if (!(cr4 & X86_CR4_PAE)) {
hw_cr4 &= ~X86_CR4_PAE;
}
@@ -3171,10 +3246,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 ar;
- if (vmx->rmode.vm86_active
- && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES
- || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS
- || seg == VCPU_SREG_GS)) {
+ if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
*var = vmx->rmode.segs[seg];
if (seg == VCPU_SREG_TR
|| var->selector == vmx_read_guest_seg_selector(vmx, seg))
@@ -3187,8 +3259,6 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
var->limit = vmx_read_guest_seg_limit(vmx, seg);
var->selector = vmx_read_guest_seg_selector(vmx, seg);
ar = vmx_read_guest_seg_ar(vmx, seg);
- if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
- ar = 0;
var->type = ar & 15;
var->s = (ar >> 4) & 1;
var->dpl = (ar >> 5) & 3;
@@ -3211,8 +3281,10 @@ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
}
-static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
+static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
if (!is_protmode(vcpu))
return 0;
@@ -3220,24 +3292,9 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
&& (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
return 3;
- return vmx_read_guest_seg_selector(to_vmx(vcpu), VCPU_SREG_CS) & 3;
-}
-
-static int vmx_get_cpl(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- /*
- * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations
- * fail; use the cache instead.
- */
- if (unlikely(vmx->emulation_required && emulate_invalid_guest_state)) {
- return vmx->cpl;
- }
-
if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
__set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
- vmx->cpl = __vmx_get_cpl(vcpu);
+ vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
}
return vmx->cpl;
@@ -3269,28 +3326,23 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
- u32 ar;
vmx_segment_cache_clear(vmx);
+ if (seg == VCPU_SREG_CS)
+ __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
- if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
- vmcs_write16(sf->selector, var->selector);
- vmx->rmode.segs[VCPU_SREG_TR] = *var;
- return;
+ if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
+ vmx->rmode.segs[seg] = *var;
+ if (seg == VCPU_SREG_TR)
+ vmcs_write16(sf->selector, var->selector);
+ else if (var->s)
+ fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
+ goto out;
}
+
vmcs_writel(sf->base, var->base);
vmcs_write32(sf->limit, var->limit);
vmcs_write16(sf->selector, var->selector);
- if (vmx->rmode.vm86_active && var->s) {
- vmx->rmode.segs[seg] = *var;
- /*
- * Hack real-mode segments into vm86 compatibility.
- */
- if (var->base == 0xffff0000 && var->selector == 0xf000)
- vmcs_writel(sf->base, 0xf0000);
- ar = 0xf3;
- } else
- ar = vmx_segment_access_rights(var);
/*
* Fix the "Accessed" bit in AR field of segment registers for older
@@ -3304,42 +3356,12 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
* kvm hack.
*/
if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
- ar |= 0x1; /* Accessed */
+ var->type |= 0x1; /* Accessed */
- vmcs_write32(sf->ar_bytes, ar);
- __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
+ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
- /*
- * Fix segments for real mode guest in hosts that don't have
- * "unrestricted_mode" or it was disabled.
- * This is done to allow migration of the guests from hosts with
- * unrestricted guest like Westmere to older host that don't have
- * unrestricted guest like Nehelem.
- */
- if (vmx->rmode.vm86_active) {
- switch (seg) {
- case VCPU_SREG_CS:
- vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
- vmcs_write32(GUEST_CS_LIMIT, 0xffff);
- if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
- vmcs_writel(GUEST_CS_BASE, 0xf0000);
- vmcs_write16(GUEST_CS_SELECTOR,
- vmcs_readl(GUEST_CS_BASE) >> 4);
- break;
- case VCPU_SREG_ES:
- case VCPU_SREG_DS:
- case VCPU_SREG_GS:
- case VCPU_SREG_FS:
- fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
- break;
- case VCPU_SREG_SS:
- vmcs_write16(GUEST_SS_SELECTOR,
- vmcs_readl(GUEST_SS_BASE) >> 4);
- vmcs_write32(GUEST_SS_LIMIT, 0xffff);
- vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
- break;
- }
- }
+out:
+ vmx->emulation_required |= emulation_required(vcpu);
}
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -3380,13 +3402,16 @@ static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
u32 ar;
vmx_get_segment(vcpu, &var, seg);
+ var.dpl = 0x3;
+ if (seg == VCPU_SREG_CS)
+ var.type = 0x3;
ar = vmx_segment_access_rights(&var);
if (var.base != (var.selector << 4))
return false;
- if (var.limit < 0xffff)
+ if (var.limit != 0xffff)
return false;
- if (((ar | (3 << AR_DPL_SHIFT)) & ~(AR_G_MASK | AR_DB_MASK)) != 0xf3)
+ if (ar != 0xf3)
return false;
return true;
@@ -3521,6 +3546,9 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
*/
static bool guest_state_valid(struct kvm_vcpu *vcpu)
{
+ if (enable_unrestricted_guest)
+ return true;
+
/* real mode guest state checks */
if (!is_protmode(vcpu)) {
if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
@@ -3644,12 +3672,9 @@ static void seg_setup(int seg)
vmcs_write16(sf->selector, 0);
vmcs_writel(sf->base, 0);
vmcs_write32(sf->limit, 0xffff);
- if (enable_unrestricted_guest) {
- ar = 0x93;
- if (seg == VCPU_SREG_CS)
- ar |= 0x08; /* code segment */
- } else
- ar = 0xf3;
+ ar = 0x93;
+ if (seg == VCPU_SREG_CS)
+ ar |= 0x08; /* code segment */
vmcs_write32(sf->ar_bytes, ar);
}
@@ -3667,7 +3692,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
kvm_userspace_mem.memory_size = PAGE_SIZE;
- r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+ r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
if (r)
goto out;
@@ -3697,7 +3722,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm_userspace_mem.guest_phys_addr =
kvm->arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE;
- r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+ r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
if (r)
goto out;
@@ -3739,7 +3764,10 @@ static void free_vpid(struct vcpu_vmx *vmx)
spin_unlock(&vmx_vpid_lock);
}
-static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
+#define MSR_TYPE_R 1
+#define MSR_TYPE_W 2
+static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
{
int f = sizeof(unsigned long);
@@ -3752,20 +3780,93 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
*/
if (msr <= 0x1fff) {
- __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
- __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
+ if (type & MSR_TYPE_R)
+ /* read-low */
+ __clear_bit(msr, msr_bitmap + 0x000 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-low */
+ __clear_bit(msr, msr_bitmap + 0x800 / f);
+
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
msr &= 0x1fff;
- __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
- __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
+ if (type & MSR_TYPE_R)
+ /* read-high */
+ __clear_bit(msr, msr_bitmap + 0x400 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-high */
+ __clear_bit(msr, msr_bitmap + 0xc00 / f);
+
+ }
+}
+
+static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
+{
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if (msr <= 0x1fff) {
+ if (type & MSR_TYPE_R)
+ /* read-low */
+ __set_bit(msr, msr_bitmap + 0x000 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-low */
+ __set_bit(msr, msr_bitmap + 0x800 / f);
+
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ if (type & MSR_TYPE_R)
+ /* read-high */
+ __set_bit(msr, msr_bitmap + 0x400 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-high */
+ __set_bit(msr, msr_bitmap + 0xc00 / f);
+
}
}
static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
{
if (!longmode_only)
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
+ msr, MSR_TYPE_R | MSR_TYPE_W);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
+ msr, MSR_TYPE_R | MSR_TYPE_W);
+}
+
+static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
+{
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ msr, MSR_TYPE_R);
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ msr, MSR_TYPE_R);
+}
+
+static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
+{
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ msr, MSR_TYPE_R);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ msr, MSR_TYPE_R);
+}
+
+static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
+{
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ msr, MSR_TYPE_W);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ msr, MSR_TYPE_W);
}
/*
@@ -3844,6 +3945,11 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
return exec_control;
}
+static int vmx_vm_has_apicv(struct kvm *kvm)
+{
+ return enable_apicv_reg_vid && irqchip_in_kernel(kvm);
+}
+
static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
{
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
@@ -3861,6 +3967,10 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (!ple_gap)
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+ if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
+ exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
return exec_control;
}
@@ -3905,6 +4015,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmx_secondary_exec_control(vmx));
}
+ if (enable_apicv_reg_vid) {
+ vmcs_write64(EOI_EXIT_BITMAP0, 0);
+ vmcs_write64(EOI_EXIT_BITMAP1, 0);
+ vmcs_write64(EOI_EXIT_BITMAP2, 0);
+ vmcs_write64(EOI_EXIT_BITMAP3, 0);
+
+ vmcs_write16(GUEST_INTR_STATUS, 0);
+ }
+
if (ple_gap) {
vmcs_write32(PLE_GAP, ple_gap);
vmcs_write32(PLE_WINDOW, ple_window);
@@ -3990,14 +4109,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx_segment_cache_clear(vmx);
seg_setup(VCPU_SREG_CS);
- /*
- * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
- * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
- */
- if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
+ if (kvm_vcpu_is_bsp(&vmx->vcpu))
vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
- vmcs_writel(GUEST_CS_BASE, 0x000f0000);
- } else {
+ else {
vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
}
@@ -4073,9 +4187,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
ret = 0;
- /* HACK: Don't enable emulation on guest boot/reset */
- vmx->emulation_required = 0;
-
return ret;
}
@@ -4251,7 +4362,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
.flags = 0,
};
- ret = kvm_set_memory_region(kvm, &tss_mem, 0);
+ ret = kvm_set_memory_region(kvm, &tss_mem, false);
if (ret)
return ret;
kvm->arch.tss_addr = addr;
@@ -4261,28 +4372,9 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
return 0;
}
-static int handle_rmode_exception(struct kvm_vcpu *vcpu,
- int vec, u32 err_code)
+static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
{
- /*
- * Instruction with address size override prefix opcode 0x67
- * Cause the #SS fault with 0 error code in VM86 mode.
- */
- if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
- if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
- return 1;
- /*
- * Forward all other exceptions that are valid in real mode.
- * FIXME: Breaks guest debugging in real mode, needs to be fixed with
- * the required debugging infrastructure rework.
- */
switch (vec) {
- case DB_VECTOR:
- if (vcpu->guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
- return 0;
- kvm_queue_exception(vcpu, vec);
- return 1;
case BP_VECTOR:
/*
* Update instruction length as we may reinject the exception
@@ -4291,7 +4383,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
- return 0;
+ return false;
+ /* fall through */
+ case DB_VECTOR:
+ if (vcpu->guest_debug &
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+ return false;
/* fall through */
case DE_VECTOR:
case OF_VECTOR:
@@ -4301,10 +4398,37 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
case SS_VECTOR:
case GP_VECTOR:
case MF_VECTOR:
- kvm_queue_exception(vcpu, vec);
- return 1;
+ return true;
+ break;
}
- return 0;
+ return false;
+}
+
+static int handle_rmode_exception(struct kvm_vcpu *vcpu,
+ int vec, u32 err_code)
+{
+ /*
+ * Instruction with address size override prefix opcode 0x67
+ * Cause the #SS fault with 0 error code in VM86 mode.
+ */
+ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
+ if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
+ if (vcpu->arch.halt_request) {
+ vcpu->arch.halt_request = 0;
+ return kvm_emulate_halt(vcpu);
+ }
+ return 1;
+ }
+ return 0;
+ }
+
+ /*
+ * Forward all other exceptions that are valid in real mode.
+ * FIXME: Breaks guest debugging in real mode, needs to be fixed with
+ * the required debugging infrastructure rework.
+ */
+ kvm_queue_exception(vcpu, vec);
+ return 1;
}
/*
@@ -4392,17 +4516,11 @@ static int handle_exception(struct kvm_vcpu *vcpu)
return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
}
- if (vmx->rmode.vm86_active &&
- handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
- error_code)) {
- if (vcpu->arch.halt_request) {
- vcpu->arch.halt_request = 0;
- return kvm_emulate_halt(vcpu);
- }
- return 1;
- }
-
ex_no = intr_info & INTR_INFO_VECTOR_MASK;
+
+ if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
+ return handle_rmode_exception(vcpu, ex_no, error_code);
+
switch (ex_no) {
case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION);
@@ -4820,6 +4938,26 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
+static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int vector = exit_qualification & 0xff;
+
+ /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
+ kvm_apic_set_eoi_accelerated(vcpu, vector);
+ return 1;
+}
+
+static int handle_apic_write(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 offset = exit_qualification & 0xfff;
+
+ /* APIC-write VM exit is trap-like and thus no need to adjust IP */
+ kvm_apic_write_nodecode(vcpu, offset);
+ return 1;
+}
+
static int handle_task_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5065,7 +5203,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
schedule();
}
- vmx->emulation_required = !guest_state_valid(vcpu);
+ vmx->emulation_required = emulation_required(vcpu);
out:
return ret;
}
@@ -5754,6 +5892,8 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_VMON] = handle_vmon,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
+ [EXIT_REASON_APIC_WRITE] = handle_apic_write,
+ [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
[EXIT_REASON_WBINVD] = handle_wbinvd,
[EXIT_REASON_XSETBV] = handle_xsetbv,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
@@ -5780,7 +5920,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
gpa_t bitmap;
- if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS))
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return 1;
/*
@@ -6008,7 +6148,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
u32 vectoring_info = vmx->idt_vectoring_info;
/* If guest state is invalid, start emulating */
- if (vmx->emulation_required && emulate_invalid_guest_state)
+ if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
/*
@@ -6103,6 +6243,85 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
vmcs_write32(TPR_THRESHOLD, irr);
}
+static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
+{
+ u32 sec_exec_control;
+
+ /*
+ * There is not point to enable virtualize x2apic without enable
+ * apicv
+ */
+ if (!cpu_has_vmx_virtualize_x2apic_mode() ||
+ !vmx_vm_has_apicv(vcpu->kvm))
+ return;
+
+ if (!vm_need_tpr_shadow(vcpu->kvm))
+ return;
+
+ sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+
+ if (set) {
+ sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+ } else {
+ sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+ sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ }
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
+
+ vmx_set_msr_bitmap(vcpu);
+}
+
+static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
+{
+ u16 status;
+ u8 old;
+
+ if (!vmx_vm_has_apicv(kvm))
+ return;
+
+ if (isr == -1)
+ isr = 0;
+
+ status = vmcs_read16(GUEST_INTR_STATUS);
+ old = status >> 8;
+ if (isr != old) {
+ status &= 0xff;
+ status |= isr << 8;
+ vmcs_write16(GUEST_INTR_STATUS, status);
+ }
+}
+
+static void vmx_set_rvi(int vector)
+{
+ u16 status;
+ u8 old;
+
+ status = vmcs_read16(GUEST_INTR_STATUS);
+ old = (u8)status & 0xff;
+ if ((u8)vector != old) {
+ status &= ~0xff;
+ status |= (u8)vector;
+ vmcs_write16(GUEST_INTR_STATUS, status);
+ }
+}
+
+static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+{
+ if (max_irr == -1)
+ return;
+
+ vmx_set_rvi(max_irr);
+}
+
+static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+ vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
+ vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
+ vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
+ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+}
+
static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
@@ -6291,7 +6510,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
- if (vmx->emulation_required && emulate_invalid_guest_state)
+ if (vmx->emulation_required)
return;
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
@@ -7366,6 +7585,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
.enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
+ .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
+ .vm_has_apicv = vmx_vm_has_apicv,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
@@ -7398,7 +7622,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
static int __init vmx_init(void)
{
- int r, i;
+ int r, i, msr;
rdmsrl_safe(MSR_EFER, &host_efer);
@@ -7419,11 +7643,19 @@ static int __init vmx_init(void)
if (!vmx_msr_bitmap_legacy)
goto out1;
+ vmx_msr_bitmap_legacy_x2apic =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_legacy_x2apic)
+ goto out2;
vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode)
- goto out2;
+ goto out3;
+ vmx_msr_bitmap_longmode_x2apic =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_longmode_x2apic)
+ goto out4;
/*
* Allow direct access to the PC debug port (it is often used for I/O
@@ -7455,6 +7687,28 @@ static int __init vmx_init(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+ if (enable_apicv_reg_vid) {
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
+
+ /* According SDM, in x2apic mode, the whole id reg is used.
+ * But in KVM, it only use the highest eight bits. Need to
+ * intercept it */
+ vmx_enable_intercept_msr_read_x2apic(0x802);
+ /* TMCCT */
+ vmx_enable_intercept_msr_read_x2apic(0x839);
+ /* TPR */
+ vmx_disable_intercept_msr_write_x2apic(0x808);
+ /* EOI */
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
+ /* SELF-IPI */
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
+ }
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
@@ -7468,8 +7722,10 @@ static int __init vmx_init(void)
return 0;
-out3:
+out4:
free_page((unsigned long)vmx_msr_bitmap_longmode);
+out3:
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
out2:
free_page((unsigned long)vmx_msr_bitmap_legacy);
out1:
@@ -7481,6 +7737,8 @@ out:
static void __exit vmx_exit(void)
{
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
free_page((unsigned long)vmx_msr_bitmap_legacy);
free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 37040079cd6b..f71500af1f81 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -872,8 +872,6 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
kvm_x86_ops->set_efer(vcpu, efer);
- vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
-
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
kvm_mmu_reset_context(vcpu);
@@ -2522,7 +2520,7 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
- r = KVM_MEMORY_SLOTS;
+ r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
@@ -3274,12 +3272,10 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
return -EINVAL;
mutex_lock(&kvm->slots_lock);
- spin_lock(&kvm->mmu_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
- spin_unlock(&kvm->mmu_lock);
mutex_unlock(&kvm->slots_lock);
return 0;
}
@@ -3439,7 +3435,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
+ if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
@@ -4495,8 +4491,10 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
*selector = var.selector;
- if (var.unusable)
+ if (var.unusable) {
+ memset(desc, 0, sizeof(*desc));
return false;
+ }
if (var.g)
var.limit >>= 12;
@@ -4757,26 +4755,26 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
return r;
}
-static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
+static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
+ bool write_fault_to_shadow_pgtable)
{
- gpa_t gpa;
+ gpa_t gpa = cr2;
pfn_t pfn;
- if (tdp_enabled)
- return false;
-
- /*
- * if emulation was due to access to shadowed page table
- * and it failed try to unshadow page and re-enter the
- * guest to let CPU execute the instruction.
- */
- if (kvm_mmu_unprotect_page_virt(vcpu, gva))
- return true;
-
- gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
+ if (!vcpu->arch.mmu.direct_map) {
+ /*
+ * Write permission should be allowed since only
+ * write access need to be emulated.
+ */
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
- if (gpa == UNMAPPED_GVA)
- return true; /* let cpu generate fault */
+ /*
+ * If the mapping is invalid in guest, let cpu retry
+ * it to generate fault.
+ */
+ if (gpa == UNMAPPED_GVA)
+ return true;
+ }
/*
* Do not retry the unhandleable instruction if it faults on the
@@ -4785,12 +4783,43 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
- if (!is_error_noslot_pfn(pfn)) {
- kvm_release_pfn_clean(pfn);
+
+ /*
+ * If the instruction failed on the error pfn, it can not be fixed,
+ * report the error to userspace.
+ */
+ if (is_error_noslot_pfn(pfn))
+ return false;
+
+ kvm_release_pfn_clean(pfn);
+
+ /* The instructions are well-emulated on direct mmu. */
+ if (vcpu->arch.mmu.direct_map) {
+ unsigned int indirect_shadow_pages;
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+ if (indirect_shadow_pages)
+ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+
return true;
}
- return false;
+ /*
+ * if emulation was due to access to shadowed page table
+ * and it failed try to unshadow page and re-enter the
+ * guest to let CPU execute the instruction.
+ */
+ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+
+ /*
+ * If the access faults on its page table, it can not
+ * be fixed by unprotecting shadow page and it should
+ * be reported to userspace.
+ */
+ return !write_fault_to_shadow_pgtable;
}
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
@@ -4832,7 +4861,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
if (!vcpu->arch.mmu.direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
- kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
@@ -4849,7 +4878,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
+ bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
+ /*
+ * Clear write_fault_to_shadow_pgtable here to ensure it is
+ * never reused.
+ */
+ vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
@@ -4868,7 +4903,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
- if (reexecute_instruction(vcpu, cr2))
+ if (reexecute_instruction(vcpu, cr2,
+ write_fault_to_spt))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
@@ -4898,7 +4934,7 @@ restart:
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
- if (reexecute_instruction(vcpu, cr2))
+ if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
@@ -5541,7 +5577,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
}
- } else if (kvm_cpu_has_interrupt(vcpu)) {
+ } else if (kvm_cpu_has_injectable_intr(vcpu)) {
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);
@@ -5609,6 +5645,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
#endif
}
+static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
+{
+ u64 eoi_exit_bitmap[4];
+
+ memset(eoi_exit_bitmap, 0, 32);
+
+ kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
+ kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+}
+
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
@@ -5662,6 +5708,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_deliver_pmi(vcpu);
+ if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
+ update_eoi_exitmap(vcpu);
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -5670,10 +5718,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
- else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+ else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
if (kvm_lapic_enabled(vcpu)) {
+ /*
+ * Update architecture specific hints for APIC
+ * virtual interrupt delivery.
+ */
+ if (kvm_x86_ops->hwapic_irr_update)
+ kvm_x86_ops->hwapic_irr_update(vcpu,
+ kvm_lapic_find_highest_irr(vcpu));
update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu);
}
@@ -6853,48 +6908,43 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
int npages = memslot->npages;
- int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
- /* Prevent internal slot pages from being moved by fork()/COW. */
- if (memslot->id >= KVM_MEMORY_SLOTS)
- map_flags = MAP_SHARED | MAP_ANONYMOUS;
-
- /*To keep backward compatibility with older userspace,
- *x86 needs to handle !user_alloc case.
+ /*
+ * Only private memory slots need to be mapped here since
+ * KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
- if (!user_alloc) {
- if (npages && !old.npages) {
- unsigned long userspace_addr;
+ if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
+ unsigned long userspace_addr;
- userspace_addr = vm_mmap(NULL, 0,
- npages * PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- map_flags,
- 0);
+ /*
+ * MAP_SHARED to prevent internal slot pages from being moved
+ * by fork()/COW.
+ */
+ userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, 0);
- if (IS_ERR((void *)userspace_addr))
- return PTR_ERR((void *)userspace_addr);
+ if (IS_ERR((void *)userspace_addr))
+ return PTR_ERR((void *)userspace_addr);
- memslot->userspace_addr = userspace_addr;
- }
+ memslot->userspace_addr = userspace_addr;
}
-
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc)
+ bool user_alloc)
{
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
- if (!user_alloc && !old.user_alloc && old.npages && !npages) {
+ if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
int ret;
ret = vm_munmap(old.userspace_addr,
@@ -6908,11 +6958,15 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
- spin_lock(&kvm->mmu_lock);
if (nr_mmu_pages)
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
- kvm_mmu_slot_remove_write_access(kvm, mem->slot);
- spin_unlock(&kvm->mmu_lock);
+ /*
+ * Write protect all pages for dirty logging.
+ * Existing largepage mappings are destroyed here and new ones will
+ * not be created until the end of the logging.
+ */
+ if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ kvm_mmu_slot_remove_write_access(kvm, mem->slot);
/*
* If memory slot is created, or moved, we need to clear all
* mmio sptes.
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index fb674fd3fc22..2b97525246d4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -939,14 +939,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
if (pmd_large(*pmd))
return spurious_fault_check(error_code, (pte_t *) pmd);
- /*
- * Note: don't use pte_present() here, since it returns true
- * if the _PAGE_PROTNONE bit is set. However, this aliases the
- * _PAGE_GLOBAL bit, which for kernel pages give false positives
- * when CONFIG_DEBUG_PAGEALLOC is used.
- */
pte = pte_offset_kernel(pmd, address);
- if (!(pte_flags(*pte) & _PAGE_PRESENT))
+ if (!pte_present(*pte))
return 0;
ret = spurious_fault_check(error_code, pte);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index dfd30259eb89..72fe01e9e414 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -97,8 +97,7 @@ void numa_set_node(int cpu, int node)
#endif
per_cpu(x86_cpu_to_node_map, cpu) = node;
- if (node != NUMA_NO_NODE)
- set_cpu_numa_node(cpu, node);
+ set_cpu_numa_node(cpu, node);
}
void numa_clear_node(int cpu)
@@ -213,9 +212,10 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
* Allocate node data. Try node-local memory and then any node.
* Never allocate in DMA zone.
*/
- nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
- pr_err("Cannot find %zu bytes in any node\n", nd_size);
+ pr_err("Cannot find %zu bytes in node %d\n",
+ nd_size, nid);
return;
}
nd = __va(nd_pa);
@@ -560,12 +560,10 @@ static int __init numa_init(int (*init_func)(void))
for (i = 0; i < MAX_LOCAL_APIC; i++)
set_apicid_to_node(i, NUMA_NO_NODE);
- /*
- * Do not clear numa_nodes_parsed or zero numa_meminfo here, because
- * SRAT was parsed earlier in early_parse_srat().
- */
+ nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance();
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index ca1f1c2bb7be..091934e1d0d9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -473,6 +473,19 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
/*
+ * Set the PSE and GLOBAL flags only if the PRESENT flag is
+ * set otherwise pmd_present/pmd_huge will return true even on
+ * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
+ * for the ancient hardware that doesn't support it.
+ */
+ if (pgprot_val(new_prot) & _PAGE_PRESENT)
+ pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+ else
+ pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+
+ new_prot = canon_pgprot(new_prot);
+
+ /*
* old_pte points to the large page base address. So we need
* to add the offset of the virtual address:
*/
@@ -517,7 +530,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* The address is aligned and the number of pages
* covers the full page.
*/
- new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
+ new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
__set_pmd_pte(kpte, address, new_pte);
cpa->flags |= CPA_FLUSHTLB;
do_split = 0;
@@ -561,16 +574,35 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
#ifdef CONFIG_X86_64
if (level == PG_LEVEL_1G) {
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
- pgprot_val(ref_prot) |= _PAGE_PSE;
+ /*
+ * Set the PSE flags only if the PRESENT flag is set
+ * otherwise pmd_present/pmd_huge will return true
+ * even on a non present pmd.
+ */
+ if (pgprot_val(ref_prot) & _PAGE_PRESENT)
+ pgprot_val(ref_prot) |= _PAGE_PSE;
+ else
+ pgprot_val(ref_prot) &= ~_PAGE_PSE;
}
#endif
/*
+ * Set the GLOBAL flags only if the PRESENT flag is set
+ * otherwise pmd/pte_present will return true even on a non
+ * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
+ * for the ancient hardware that doesn't support it.
+ */
+ if (pgprot_val(ref_prot) & _PAGE_PRESENT)
+ pgprot_val(ref_prot) |= _PAGE_GLOBAL;
+ else
+ pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
+
+ /*
* Get the target pfn from the original entry:
*/
pfn = pte_pfn(*kpte);
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
- set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
+ set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
PFN_DOWN(__pa(address)) + 1))
@@ -685,6 +717,18 @@ repeat:
new_prot = static_protections(new_prot, address, pfn);
/*
+ * Set the GLOBAL flags only if the PRESENT flag is
+ * set otherwise pte_present will return true even on
+ * a non present pte. The canon_pgprot will clear
+ * _PAGE_GLOBAL for the ancient hardware that doesn't
+ * support it.
+ */
+ if (pgprot_val(new_prot) & _PAGE_PRESENT)
+ pgprot_val(new_prot) |= _PAGE_GLOBAL;
+ else
+ pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
+
+ /*
* We need to keep the pfn from the existing PTE,
* after all we're only going to change it's attributes
* not the memory it points to
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 79836d01f789..cdd0da9dd530 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -141,126 +141,11 @@ static inline int save_add_info(void) {return 1;}
static inline int save_add_info(void) {return 0;}
#endif
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-static void __init
-handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
-{
- int overlap, i;
- unsigned long start_pfn, end_pfn;
-
- start_pfn = PFN_DOWN(start);
- end_pfn = PFN_UP(end);
-
- /*
- * For movablemem_map=acpi:
- *
- * SRAT: |_____| |_____| |_________| |_________| ......
- * node id: 0 1 1 2
- * hotpluggable: n y y n
- * movablemem_map: |_____| |_________|
- *
- * Using movablemem_map, we can prevent memblock from allocating memory
- * on ZONE_MOVABLE at boot time.
- *
- * Before parsing SRAT, memblock has already reserve some memory ranges
- * for other purposes, such as for kernel image. We cannot prevent
- * kernel from using these memory, so we need to exclude these memory
- * even if it is hotpluggable.
- * Furthermore, to ensure the kernel has enough memory to boot, we make
- * all the memory on the node which the kernel resides in
- * un-hotpluggable.
- */
- if (hotpluggable && movablemem_map.acpi) {
- /* Exclude ranges reserved by memblock. */
- struct memblock_type *rgn = &memblock.reserved;
-
- for (i = 0; i < rgn->cnt; i++) {
- if (end <= rgn->regions[i].base ||
- start >= rgn->regions[i].base +
- rgn->regions[i].size)
- continue;
-
- /*
- * If the memory range overlaps the memory reserved by
- * memblock, then the kernel resides in this node.
- */
- node_set(node, movablemem_map.numa_nodes_kernel);
-
- goto out;
- }
-
- /*
- * If the kernel resides in this node, then the whole node
- * should not be hotpluggable.
- */
- if (node_isset(node, movablemem_map.numa_nodes_kernel))
- goto out;
-
- insert_movablemem_map(start_pfn, end_pfn);
-
- /*
- * numa_nodes_hotplug nodemask represents which nodes are put
- * into movablemem_map.map[].
- */
- node_set(node, movablemem_map.numa_nodes_hotplug);
- goto out;
- }
-
- /*
- * For movablemem_map=nn[KMG]@ss[KMG]:
- *
- * SRAT: |_____| |_____| |_________| |_________| ......
- * node id: 0 1 1 2
- * user specified: |__| |___|
- * movablemem_map: |___| |_________| |______| ......
- *
- * Using movablemem_map, we can prevent memblock from allocating memory
- * on ZONE_MOVABLE at boot time.
- *
- * NOTE: In this case, SRAT info will be ingored.
- */
- overlap = movablemem_map_overlap(start_pfn, end_pfn);
- if (overlap >= 0) {
- /*
- * If part of this range is in movablemem_map, we need to
- * add the range after it to extend the range to the end
- * of the node, because from the min address specified to
- * the end of the node will be ZONE_MOVABLE.
- */
- start_pfn = max(start_pfn,
- movablemem_map.map[overlap].start_pfn);
- insert_movablemem_map(start_pfn, end_pfn);
-
- /*
- * Set the nodemask, so that if the address range on one node
- * is not continuse, we can add the subsequent ranges on the
- * same node into movablemem_map.
- */
- node_set(node, movablemem_map.numa_nodes_hotplug);
- } else {
- if (node_isset(node, movablemem_map.numa_nodes_hotplug))
- /*
- * Insert the range if we already have movable ranges
- * on the same node.
- */
- insert_movablemem_map(start_pfn, end_pfn);
- }
-out:
- return;
-}
-#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-static inline void
-handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
-{
-}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
- u32 hotpluggable;
int node, pxm;
if (srat_disabled())
@@ -269,8 +154,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
goto out_err_bad_srat;
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
goto out_err;
- hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
- if (hotpluggable && !save_add_info())
+ if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
goto out_err;
start = ma->base_address;
@@ -290,12 +174,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
node_set(node, numa_nodes_parsed);
- printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n",
+ printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
- (unsigned long long) start, (unsigned long long) end - 1,
- hotpluggable ? "Hot Pluggable": "");
-
- handle_movablemem(node, start, end, hotpluggable);
+ (unsigned long long) start, (unsigned long long) end - 1);
return 0;
out_err_bad_srat:
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 53ea60458e01..3e724256dbee 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -521,6 +521,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
sd = &info->sd;
sd->domain = domain;
sd->node = node;
+ sd->acpi = device->handle;
/*
* Maybe the desired pci bus has been already scanned. In such case
* it is unnecessary to scan the pci bus with the given domain,busnum.
@@ -592,6 +593,14 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return bus;
}
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_sysdata *sd = bridge->bus->sysdata;
+
+ ACPI_HANDLE_SET(&bridge->dev, sd->acpi);
+ return 0;
+}
+
int __init pci_acpi_init(void)
{
struct pci_dev *dev = NULL;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index ccd0ab3ab899..901177d75ff5 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -34,7 +34,6 @@ int noioapicreroute = 1;
#endif
int pcibios_last_bus = -1;
unsigned long pirq_table_addr;
-struct pci_bus *pci_root_bus;
const struct pci_raw_ops *__read_mostly raw_pci_ops;
const struct pci_raw_ops *__read_mostly raw_pci_ext_ops;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index dd8ca6f7223b..94919e307f8e 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -51,6 +51,7 @@ struct pcibios_fwaddrmap {
static LIST_HEAD(pcibios_fwaddrmappings);
static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
+static bool pcibios_fw_addr_done;
/* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
@@ -72,6 +73,9 @@ pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
unsigned long flags;
struct pcibios_fwaddrmap *map;
+ if (pcibios_fw_addr_done)
+ return;
+
spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
map = pcibios_fwaddrmap_lookup(dev);
if (!map) {
@@ -97,6 +101,9 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
struct pcibios_fwaddrmap *map;
resource_size_t fw_addr = 0;
+ if (pcibios_fw_addr_done)
+ return 0;
+
spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
map = pcibios_fwaddrmap_lookup(dev);
if (map)
@@ -106,7 +113,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
return fw_addr;
}
-static void pcibios_fw_addr_list_del(void)
+static void __init pcibios_fw_addr_list_del(void)
{
unsigned long flags;
struct pcibios_fwaddrmap *entry, *next;
@@ -118,6 +125,7 @@ static void pcibios_fw_addr_list_del(void)
kfree(entry);
}
spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
+ pcibios_fw_addr_done = true;
}
static int
@@ -193,46 +201,46 @@ EXPORT_SYMBOL(pcibios_align_resource);
* as well.
*/
-static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
+static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
{
- struct pci_bus *bus;
- struct pci_dev *dev;
int idx;
struct resource *r;
- /* Depth-First Search on bus tree */
- list_for_each_entry(bus, bus_list, node) {
- if ((dev = bus->self)) {
- for (idx = PCI_BRIDGE_RESOURCES;
- idx < PCI_NUM_RESOURCES; idx++) {
- r = &dev->resource[idx];
- if (!r->flags)
- continue;
- if (!r->start ||
- pci_claim_resource(dev, idx) < 0) {
- /*
- * Something is wrong with the region.
- * Invalidate the resource to prevent
- * child resource allocations in this
- * range.
- */
- r->start = r->end = 0;
- r->flags = 0;
- }
- }
+ for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ r = &dev->resource[idx];
+ if (!r->flags)
+ continue;
+ if (!r->start || pci_claim_resource(dev, idx) < 0) {
+ /*
+ * Something is wrong with the region.
+ * Invalidate the resource to prevent
+ * child resource allocations in this
+ * range.
+ */
+ r->start = r->end = 0;
+ r->flags = 0;
}
- pcibios_allocate_bus_resources(&bus->children);
}
}
+static void pcibios_allocate_bus_resources(struct pci_bus *bus)
+{
+ struct pci_bus *child;
+
+ /* Depth-First Search on bus tree */
+ if (bus->self)
+ pcibios_allocate_bridge_resources(bus->self);
+ list_for_each_entry(child, &bus->children, node)
+ pcibios_allocate_bus_resources(child);
+}
+
struct pci_check_idx_range {
int start;
int end;
};
-static void __init pcibios_allocate_resources(int pass)
+static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
{
- struct pci_dev *dev = NULL;
int idx, disabled, i;
u16 command;
struct resource *r;
@@ -244,14 +252,13 @@ static void __init pcibios_allocate_resources(int pass)
#endif
};
- for_each_pci_dev(dev) {
- pci_read_config_word(dev, PCI_COMMAND, &command);
- for (i = 0; i < ARRAY_SIZE(idx_range); i++)
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ for (i = 0; i < ARRAY_SIZE(idx_range); i++)
for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
r = &dev->resource[idx];
- if (r->parent) /* Already allocated */
+ if (r->parent) /* Already allocated */
continue;
- if (!r->start) /* Address not assigned at all */
+ if (!r->start) /* Address not assigned at all */
continue;
if (r->flags & IORESOURCE_IO)
disabled = !(command & PCI_COMMAND_IO);
@@ -270,44 +277,74 @@ static void __init pcibios_allocate_resources(int pass)
}
}
}
- if (!pass) {
- r = &dev->resource[PCI_ROM_RESOURCE];
- if (r->flags & IORESOURCE_ROM_ENABLE) {
- /* Turn the ROM off, leave the resource region,
- * but keep it unregistered. */
- u32 reg;
- dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
- r->flags &= ~IORESOURCE_ROM_ENABLE;
- pci_read_config_dword(dev,
- dev->rom_base_reg, &reg);
- pci_write_config_dword(dev, dev->rom_base_reg,
+ if (!pass) {
+ r = &dev->resource[PCI_ROM_RESOURCE];
+ if (r->flags & IORESOURCE_ROM_ENABLE) {
+ /* Turn the ROM off, leave the resource region,
+ * but keep it unregistered. */
+ u32 reg;
+ dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
+ r->flags &= ~IORESOURCE_ROM_ENABLE;
+ pci_read_config_dword(dev, dev->rom_base_reg, &reg);
+ pci_write_config_dword(dev, dev->rom_base_reg,
reg & ~PCI_ROM_ADDRESS_ENABLE);
- }
}
}
}
-static int __init pcibios_assign_resources(void)
+static void pcibios_allocate_resources(struct pci_bus *bus, int pass)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pcibios_allocate_dev_resources(dev, pass);
+
+ child = dev->subordinate;
+ if (child)
+ pcibios_allocate_resources(child, pass);
+ }
+}
+
+static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
{
- struct pci_dev *dev = NULL;
struct resource *r;
- if (!(pci_probe & PCI_ASSIGN_ROMS)) {
- /*
- * Try to use BIOS settings for ROMs, otherwise let
- * pci_assign_unassigned_resources() allocate the new
- * addresses.
- */
- for_each_pci_dev(dev) {
- r = &dev->resource[PCI_ROM_RESOURCE];
- if (!r->flags || !r->start)
- continue;
- if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
- r->end -= r->start;
- r->start = 0;
- }
- }
+ /*
+ * Try to use BIOS settings for ROMs, otherwise let
+ * pci_assign_unassigned_resources() allocate the new
+ * addresses.
+ */
+ r = &dev->resource[PCI_ROM_RESOURCE];
+ if (!r->flags || !r->start)
+ return;
+
+ if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
+ r->end -= r->start;
+ r->start = 0;
}
+}
+static void pcibios_allocate_rom_resources(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pcibios_allocate_dev_rom_resource(dev);
+
+ child = dev->subordinate;
+ if (child)
+ pcibios_allocate_rom_resources(child);
+ }
+}
+
+static int __init pcibios_assign_resources(void)
+{
+ struct pci_bus *bus;
+
+ if (!(pci_probe & PCI_ASSIGN_ROMS))
+ list_for_each_entry(bus, &pci_root_buses, node)
+ pcibios_allocate_rom_resources(bus);
pci_assign_unassigned_resources();
pcibios_fw_addr_list_del();
@@ -315,12 +352,32 @@ static int __init pcibios_assign_resources(void)
return 0;
}
+void pcibios_resource_survey_bus(struct pci_bus *bus)
+{
+ dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
+
+ pcibios_allocate_bus_resources(bus);
+
+ pcibios_allocate_resources(bus, 0);
+ pcibios_allocate_resources(bus, 1);
+
+ if (!(pci_probe & PCI_ASSIGN_ROMS))
+ pcibios_allocate_rom_resources(bus);
+}
+
void __init pcibios_resource_survey(void)
{
+ struct pci_bus *bus;
+
DBG("PCI: Allocating resources\n");
- pcibios_allocate_bus_resources(&pci_root_buses);
- pcibios_allocate_resources(0);
- pcibios_allocate_resources(1);
+
+ list_for_each_entry(bus, &pci_root_buses, node)
+ pcibios_allocate_bus_resources(bus);
+
+ list_for_each_entry(bus, &pci_root_buses, node)
+ pcibios_allocate_resources(bus, 0);
+ list_for_each_entry(bus, &pci_root_buses, node)
+ pcibios_allocate_resources(bus, 1);
e820_reserve_resources_late();
/*
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index 4a2ab9cb3659..4db96fb1c232 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -30,7 +30,7 @@ int __init pci_legacy_init(void)
}
printk("PCI: Probing PCI hardware\n");
- pci_root_bus = pcibios_scan_root(0);
+ pcibios_scan_root(0);
return 0;
}
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index b96b14c250b6..72c229f9ebcf 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -152,7 +152,7 @@ int __init pci_numaq_init(void)
raw_pci_ops = &pci_direct_conf1_mq;
- pci_root_bus = pcibios_scan_root(0);
+ pcibios_scan_root(0);
if (num_online_nodes() > 1)
for_each_online_node(quad) {
if (quad == 0)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 70b2a3a305d6..5f2ecaf3f9d8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -69,11 +69,6 @@ struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
-static inline bool efi_is_native(void)
-{
- return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
-}
-
unsigned long x86_efi_facility;
/*
@@ -85,9 +80,10 @@ int efi_enabled(int facility)
}
EXPORT_SYMBOL(efi_enabled);
+static bool __initdata disable_runtime = false;
static int __init setup_noefi(char *arg)
{
- clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ disable_runtime = true;
return 0;
}
early_param("noefi", setup_noefi);
@@ -734,7 +730,7 @@ void __init efi_init(void)
if (!efi_is_native())
pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
else {
- if (efi_runtime_init())
+ if (disable_runtime || efi_runtime_init())
return;
set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
}
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index f2fe78ff22cc..e6d55f0064df 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -25,7 +25,7 @@
16 i386 lchown sys_lchown16
17 i386 break
18 i386 oldstat sys_stat
-19 i386 lseek sys_lseek sys32_lseek
+19 i386 lseek sys_lseek compat_sys_lseek
20 i386 getpid sys_getpid
21 i386 mount sys_mount compat_sys_mount
22 i386 umount sys_oldumount
@@ -98,8 +98,8 @@
89 i386 readdir sys_old_readdir compat_sys_old_readdir
90 i386 mmap sys_old_mmap sys32_mmap
91 i386 munmap sys_munmap
-92 i386 truncate sys_truncate
-93 i386 ftruncate sys_ftruncate
+92 i386 truncate sys_truncate compat_sys_truncate
+93 i386 ftruncate sys_ftruncate compat_sys_ftruncate
94 i386 fchmod sys_fchmod
95 i386 fchown sys_fchown16
96 i386 getpriority sys_getpriority
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 34bc4cee8887..09ea61d2e02f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -300,8 +300,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
gdt = get_cpu_gdt_table(cpu);
ctxt->flags = VGCF_IN_KERNEL;
- ctxt->user_regs.ds = __USER_DS;
- ctxt->user_regs.es = __USER_DS;
ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
ctxt->user_regs.fs = __KERNEL_PERCPU;
@@ -310,35 +308,41 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
- ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
- xen_copy_trap_info(ctxt->trap_ctxt);
+ {
+ ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
+ ctxt->user_regs.ds = __USER_DS;
+ ctxt->user_regs.es = __USER_DS;
- ctxt->ldt_ents = 0;
+ xen_copy_trap_info(ctxt->trap_ctxt);
- BUG_ON((unsigned long)gdt & ~PAGE_MASK);
+ ctxt->ldt_ents = 0;
- gdt_mfn = arbitrary_virt_to_mfn(gdt);
- make_lowmem_page_readonly(gdt);
- make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
+ BUG_ON((unsigned long)gdt & ~PAGE_MASK);
- ctxt->gdt_frames[0] = gdt_mfn;
- ctxt->gdt_ents = GDT_ENTRIES;
+ gdt_mfn = arbitrary_virt_to_mfn(gdt);
+ make_lowmem_page_readonly(gdt);
+ make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
- ctxt->user_regs.cs = __KERNEL_CS;
- ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
+ ctxt->gdt_frames[0] = gdt_mfn;
+ ctxt->gdt_ents = GDT_ENTRIES;
- ctxt->kernel_ss = __KERNEL_DS;
- ctxt->kernel_sp = idle->thread.sp0;
+ ctxt->kernel_ss = __KERNEL_DS;
+ ctxt->kernel_sp = idle->thread.sp0;
#ifdef CONFIG_X86_32
- ctxt->event_callback_cs = __KERNEL_CS;
- ctxt->failsafe_callback_cs = __KERNEL_CS;
+ ctxt->event_callback_cs = __KERNEL_CS;
+ ctxt->failsafe_callback_cs = __KERNEL_CS;
#endif
- ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
- ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
+ ctxt->event_callback_eip =
+ (unsigned long)xen_hypervisor_callback;
+ ctxt->failsafe_callback_eip =
+ (unsigned long)xen_failsafe_callback;
+ }
+ ctxt->user_regs.cs = __KERNEL_CS;
+ ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 83e866d714ce..f7a080ef0354 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
if (per_cpu(lock_spinners, cpu) == xl) {
ADD_STATS(released_slow_kicked, 1);
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
- break;
}
}
}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index cb557be492b1..35876ffac11d 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,13 +9,16 @@ config XTENSA
select HAVE_IDE
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
+ select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
select GENERIC_PCI_IOMAP
+ select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_OPTIONAL_GPIOLIB
select CLONE_BACKWARDS
select IRQ_DOMAIN
+ select HAVE_OPROFILE
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
@@ -31,7 +34,7 @@ config GENERIC_HWEIGHT
def_bool y
config GENERIC_GPIO
- def_bool y
+ bool
config ARCH_HAS_ILOG2_U32
def_bool n
@@ -71,6 +74,12 @@ config XTENSA_VARIANT_DC232B
help
This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE).
+config XTENSA_VARIANT_DC233C
+ bool "dc233c - Diamond 233L Standard Core Rev.C (LE)"
+ select MMU
+ help
+ This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE).
+
config XTENSA_VARIANT_S6000
bool "s6000 - Stretch software configurable processor"
select VARIANT_IRQ_SWITCH
@@ -197,6 +206,42 @@ config BUILTIN_DTB
string "DTB to build into the kernel image"
depends on OF
+config BLK_DEV_SIMDISK
+ tristate "Host file-based simulated block device support"
+ default n
+ depends on XTENSA_PLATFORM_ISS
+ help
+ Create block devices that map to files in the host file system.
+ Device binding to host file may be changed at runtime via proc
+ interface provided the device is not in use.
+
+config BLK_DEV_SIMDISK_COUNT
+ int "Number of host file-based simulated block devices"
+ range 1 10
+ depends on BLK_DEV_SIMDISK
+ default 2
+ help
+ This is the default minimal number of created block devices.
+ Kernel/module parameter 'simdisk_count' may be used to change this
+ value at runtime. More file names (but no more than 10) may be
+ specified as parameters, simdisk_count grows accordingly.
+
+config SIMDISK0_FILENAME
+ string "Host filename for the first simulated device"
+ depends on BLK_DEV_SIMDISK = y
+ default ""
+ help
+ Attach a first simdisk to a host file. Conventionally, this file
+ contains a root file system.
+
+config SIMDISK1_FILENAME
+ string "Host filename for the second simulated device"
+ depends on BLK_DEV_SIMDISK = y && BLK_DEV_SIMDISK_COUNT != 1
+ default ""
+ help
+ Another simulated disk in a host file for a buildroot-independent
+ storage.
+
source "mm/Kconfig"
source "drivers/pcmcia/Kconfig"
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 0aa72702f179..136224b74d4f 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -15,6 +15,7 @@
variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b
+variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c
variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000
variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
@@ -86,9 +87,10 @@ core-y += arch/xtensa/kernel/ arch/xtensa/mm/
core-y += $(buildvar) $(buildplf)
libs-y += arch/xtensa/lib/ $(LIBGCC)
+drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
ifneq ($(CONFIG_BUILTIN_DTB),"")
-core-$(CONFIG_OF) += arch/xtensa/boot/
+core-$(CONFIG_OF) += arch/xtensa/boot/dts/
endif
boot := arch/xtensa/boot
@@ -101,7 +103,7 @@ zImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
%.dtb:
- $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
define archhelp
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 818647e815d7..64ffc4b53df6 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -25,18 +25,6 @@ bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot
bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot
-
-BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
-ifneq ($(CONFIG_BUILTIN_DTB),"")
-obj-$(CONFIG_OF) += $(BUILTIN_DTB)
-endif
-
-# Rule to build device tree blobs
-$(obj)/%.dtb: $(src)/dts/%.dts FORCE
- $(call if_changed_dep,dtc)
-
-clean-files := *.dtb.S
-
zImage Image: $(bootdir-y)
$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
new file mode 100644
index 000000000000..5f711bba8307
--- /dev/null
+++ b/arch/xtensa/boot/dts/Makefile
@@ -0,0 +1,15 @@
+#
+# arch/xtensa/boot/dts/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+endif
+
+clean-files := *.dtb.S
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index c3f289174c10..e7fb447bce8e 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*/
#ifndef _XTENSA_ATOMIC_H
@@ -24,11 +24,11 @@
/*
* This Xtensa implementation assumes that the right mechanism
- * for exclusion is for locking interrupts to level 1.
+ * for exclusion is for locking interrupts to level EXCM_LEVEL.
*
* Locking interrupts looks like this:
*
- * rsil a15, 1
+ * rsil a15, LOCKLEVEL
* <code>
* wsr a15, PS
* rsync
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index aed7ad68ca46..0593de689b56 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -12,6 +12,7 @@
#define _XTENSA_CHECKSUM_H
#include <linux/in6.h>
+#include <asm/uaccess.h>
#include <variant/core.h>
/*
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index 264d5fa450d8..eacb25a41718 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -84,7 +84,8 @@ typedef struct {
elf_greg_t sar;
elf_greg_t windowstart;
elf_greg_t windowbase;
- elf_greg_t reserved[8+48];
+ elf_greg_t threadptr;
+ elf_greg_t reserved[7+48];
elf_greg_t a[64];
} xtensa_gregset_t;
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index c90ea5bfa1b4..d7546c94da52 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t;
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
+/* We provide our own get_unmapped_area to cope with
+ * SHM area cache aliasing for userland.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
#include <asm-generic/pgtable.h>
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index e5fb6b0abdf4..7e409a5b0ec5 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*/
#ifndef _XTENSA_PROCESSOR_H
@@ -68,7 +68,7 @@
/* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts.
*/
-#define LOCKLEVEL 1
+#define LOCKLEVEL XCHAL_EXCM_LEVEL
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 682b1deac1f2..81f31bc9dde0 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -38,6 +38,7 @@ struct pt_regs {
unsigned long syscall; /* 56 */
unsigned long icountlevel; /* 60 */
unsigned long scompare1; /* 64 */
+ unsigned long threadptr; /* 68 */
/* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt;
@@ -48,7 +49,7 @@ struct pt_regs {
/* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers!
*/
- unsigned long areg[16]; /* 128 (64) */
+ unsigned long areg[16];
};
#include <variant/core.h>
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index 76096a4e5b8d..b24de6717020 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -88,6 +88,7 @@
#define PS_UM_BIT 5
#define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_WIDTH 4
#define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields. */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
index 405a8c49ff2c..8d5d9dfadb09 100644
--- a/arch/xtensa/include/asm/string.h
+++ b/arch/xtensa/include/asm/string.h
@@ -74,7 +74,7 @@ static inline int strcmp(const char *__cs, const char *__ct)
"beqz %2, 2f\n\t"
"beq %2, %3, 1b\n"
"2:\n\t"
- "sub %2, %3, %2"
+ "sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct));
@@ -99,7 +99,7 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
"beqz %3, 2f\n\t"
"beq %2, %3, 1b\n"
"2:\n\t"
- "sub %2, %3, %2"
+ "sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct), "r" (__cs+__n));
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index 175b3d5e1b01..9e85ce8bd8dd 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*/
#ifndef _XTENSA_TIMEX_H
@@ -19,13 +19,13 @@
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x)
-#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1
+#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 0
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1
+#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1
+#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 54f70440185e..b5464ef3cf66 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -20,4 +20,28 @@
extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+static inline void spill_registers(void)
+{
+ unsigned int a0, ps;
+
+ __asm__ __volatile__ (
+ "movi a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t"
+ "mov a12, a0\n\t"
+ "rsr a13, sar\n\t"
+ "xsr a14, ps\n\t"
+ "movi a0, _spill_registers\n\t"
+ "rsync\n\t"
+ "callx0 a0\n\t"
+ "mov a0, a12\n\t"
+ "wsr a13, sar\n\t"
+ "wsr a14, ps\n\t"
+ : : "a" (&a0), "a" (&ps)
+#if defined(CONFIG_FRAME_POINTER)
+ : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
+#else
+ : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
+#endif
+ "memory");
+}
+
#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
index dacf716dd3e0..586756ee267a 100644
--- a/arch/xtensa/include/uapi/asm/signal.h
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -102,16 +102,7 @@ typedef struct {
#ifndef __ASSEMBLY__
-#define SIG_BLOCK 0 /* for blocking signals */
-#define SIG_UNBLOCK 1 /* for unblocking signals */
-#define SIG_SETMASK 2 /* for setting the signal mask */
-
-/* Type of a signal handler. */
-typedef void (*__sighandler_t)(int);
-
-#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
-#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
-#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+#include <asm-generic/signal-defs.h>
#ifndef __KERNEL__
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 19fac3f543a2..51940fec6990 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -728,8 +728,13 @@ __SYSCALL(330, sys_prlimit64, 4)
#define __NR_kcmp 331
__SYSCALL(331, sys_kcmp, 5)
+#define __NR_finit_module 332
+__SYSCALL(332, sys_finit_module, 3)
-#define __NR_syscall_count 332
+#define __NR_accept4 333
+__SYSCALL(333, sys_accept4, 4)
+
+#define __NR_syscall_count 334
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 0701fad170db..1915c7c889ba 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -42,6 +42,7 @@ int main(void)
DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
+ DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 3777fec85e7c..63845f950792 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2007 by Tensilica Inc.
+ * Copyright (C) 2004 - 2008 by Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
@@ -130,6 +130,11 @@ _user_exception:
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
+#if XCHAL_HAVE_THREADPTR
+ rur a2, threadptr
+ s32i a2, a1, PT_THREADPTR
+#endif
+
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
@@ -349,15 +354,16 @@ common_exception:
* so we can allow exceptions and interrupts (*) again.
* Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
*
- * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
- * (interrupts disabled) and if this exception is not an interrupt.
+ * (*) We only allow interrupts of higher priority than current IRQ
*/
rsr a3, ps
addi a0, a0, -4
movi a2, 1
- extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
- moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ # a3 = PS.INTLEVEL
+ movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority
+ moveqz a3, a2, a0 # a3 = IRQ level iff interrupt
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
rsr a0, exccause
@@ -398,7 +404,7 @@ common_exception:
callx4 a4
/* Jump here for exception exit */
-
+ .global common_exception_return
common_exception_return:
/* Jump if we are returning from kernel exceptions. */
@@ -509,6 +515,11 @@ user_exception_exit:
* (if we have restored WSBITS-1 frames).
*/
+#if XCHAL_HAVE_THREADPTR
+ l32i a3, a1, PT_THREADPTR
+ wur a3, threadptr
+#endif
+
2: j common_exception_exit
/* This is the kernel exception exit.
@@ -641,19 +652,51 @@ common_exception_exit:
l32i a0, a1, PT_DEPC
l32i a3, a1, PT_AREG3
+ _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ wsr a0, depc
l32i a2, a1, PT_AREG2
- _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+ rfde
+1:
/* Restore a0...a3 and return */
+ rsr a0, ps
+ extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ movi a0, 2f
+ slli a2, a2, 4
+ add a0, a2, a0
+ l32i a2, a1, PT_AREG2
+ jx a0
+
+ .macro irq_exit_level level
+ .align 16
+ .if XCHAL_EXCM_LEVEL >= \level
+ l32i a0, a1, PT_PC
+ wsr a0, epc\level
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
- rfe
+ rfi \level
+ .endif
+ .endm
-1: wsr a0, depc
+ .align 16
+2:
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
- rfde
+ rfe
+
+ .align 16
+ /* no rfi for level-1 irq, handled by rfe above*/
+ nop
+
+ irq_exit_level 2
+ irq_exit_level 3
+ irq_exit_level 4
+ irq_exit_level 5
+ irq_exit_level 6
ENDPROC(kernel_exception)
@@ -753,7 +796,7 @@ ENTRY(unrecoverable_exception)
wsr a1, windowbase
rsync
- movi a1, (1 << PS_WOE_BIT) | 1
+ movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
wsr a1, ps
rsync
@@ -1474,7 +1517,7 @@ ENTRY(_spill_registers)
l32i a1, a3, EXC_TABLE_KSTK
wsr a3, excsave1
- movi a4, (1 << PS_WOE_BIT) | 1
+ movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
wsr a4, ps
rsync
@@ -1922,7 +1965,7 @@ ENTRY(_switch_to)
s32i a6, a3, EXC_TABLE_FIXUP
s32i a7, a3, EXC_TABLE_KSTK
- /* restore context of the task that 'next' addresses */
+ /* restore context of the task 'next' */
l32i a0, a13, THREAD_RA # restore return address
l32i a1, a13, THREAD_SP # restore stack pointer
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 91d9095284de..df88f98737f4 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -128,14 +128,14 @@ ENTRY(_startup)
wsr a0, cpenable
#endif
- /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
+ /* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0
*
* Note: PS.EXCM must be cleared before using any loop
* instructions; otherwise, they are silently disabled, and
* at most one iteration of the loop is executed.
*/
- movi a1, 1
+ movi a1, LOCKLEVEL
wsr a1, ps
rsync
@@ -211,7 +211,8 @@ ENTRY(_startup)
movi a1, init_thread_union
addi a1, a1, KERNEL_STACK_SIZE
- movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
+ movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
+ # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
wsr a2, ps # (enable reg-windows; progmode stack)
rsync
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 0dd5784416d3..5cd82e9f601c 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -259,9 +259,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
-// FIXME: we need to set THREADPTR in thread_info...
+
+ /* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
- childregs->areg[2] = childregs->areg[6];
+ childregs->threadptr = childregs->areg[5];
} else {
p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_kernel_thread, 1);
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 61fb2e9e9035..562fac664751 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -53,9 +53,8 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
xtensa_gregset_t __user *gregset = uregs;
- unsigned long wm = regs->wmask;
unsigned long wb = regs->windowbase;
- int live, i;
+ int i;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO;
@@ -67,13 +66,11 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
__put_user(regs->lcount, &gregset->lcount);
__put_user(regs->windowstart, &gregset->windowstart);
__put_user(regs->windowbase, &gregset->windowbase);
+ __put_user(regs->threadptr, &gregset->threadptr);
- live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
-
- for (i = 0; i < live; i++)
- __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
- for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++)
- __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
+ for (i = 0; i < XCHAL_NUM_AREGS; i++)
+ __put_user(regs->areg[i],
+ gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
return 0;
}
@@ -84,7 +81,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
xtensa_gregset_t *gregset = uregs;
const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
unsigned long ps;
- unsigned long wb;
+ unsigned long wb, ws;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO;
@@ -94,21 +91,33 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
__get_user(regs->lbeg, &gregset->lbeg);
__get_user(regs->lend, &gregset->lend);
__get_user(regs->lcount, &gregset->lcount);
- __get_user(regs->windowstart, &gregset->windowstart);
+ __get_user(ws, &gregset->windowstart);
__get_user(wb, &gregset->windowbase);
+ __get_user(regs->threadptr, &gregset->threadptr);
regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
if (wb >= XCHAL_NUM_AREGS / 4)
return -EFAULT;
- regs->windowbase = wb;
+ if (wb != regs->windowbase || ws != regs->windowstart) {
+ unsigned long rotws, wmask;
+
+ rotws = (((ws | (ws << WSBITS)) >> wb) &
+ ((1 << WSBITS) - 1)) & ~1;
+ wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
+ (rotws & 0xF) | 1;
+ regs->windowbase = wb;
+ regs->windowstart = ws;
+ regs->wmask = wmask;
+ }
if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
- gregset->a, wb * 16))
+ gregset->a, wb * 16))
return -EFAULT;
- if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16))
+ if (__copy_from_user(regs->areg, gregset->a + wb * 4,
+ (WSBITS - wb) * 16))
return -EFAULT;
return 0;
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 24c1a57abb40..6dd25ecde3f5 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -328,6 +328,27 @@ extern char _UserExceptionVector_literal_start;
extern char _UserExceptionVector_text_end;
extern char _DoubleExceptionVector_literal_start;
extern char _DoubleExceptionVector_text_end;
+#if XCHAL_EXCM_LEVEL >= 2
+extern char _Level2InterruptVector_text_start;
+extern char _Level2InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+extern char _Level3InterruptVector_text_start;
+extern char _Level3InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+extern char _Level4InterruptVector_text_start;
+extern char _Level4InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+extern char _Level5InterruptVector_text_start;
+extern char _Level5InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+extern char _Level6InterruptVector_text_start;
+extern char _Level6InterruptVector_text_end;
+#endif
+
#ifdef CONFIG_S32C1I_SELFTEST
@@ -482,6 +503,27 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
__pa(&_DoubleExceptionVector_text_end), 0);
+#if XCHAL_EXCM_LEVEL >= 2
+ mem_reserve(__pa(&_Level2InterruptVector_text_start),
+ __pa(&_Level2InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ mem_reserve(__pa(&_Level3InterruptVector_text_start),
+ __pa(&_Level3InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ mem_reserve(__pa(&_Level4InterruptVector_text_start),
+ __pa(&_Level4InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ mem_reserve(__pa(&_Level5InterruptVector_text_start),
+ __pa(&_Level5InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ mem_reserve(__pa(&_Level6InterruptVector_text_start),
+ __pa(&_Level6InterruptVector_text_end), 0);
+#endif
+
bootmem_init();
#ifdef CONFIG_OF
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index d7590dddd084..718eca1850bd 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -337,7 +337,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe *frame;
int err = 0;
int signal;
- unsigned long sp, ra;
+ unsigned long sp, ra, tp;
sp = regs->areg[1];
@@ -391,7 +391,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Return context not modified until this point.
*/
- /* Set up registers for signal handler */
+ /* Set up registers for signal handler; preserve the threadptr */
+ tp = regs->threadptr;
start_thread(regs, (unsigned long) ka->sa.sa_handler,
(unsigned long) frame);
@@ -402,6 +403,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->areg[6] = (unsigned long) signal;
regs->areg[7] = (unsigned long) &frame->info;
regs->areg[8] = (unsigned long) &frame->uc;
+ regs->threadptr = tp;
/* Set access mode to USER_DS. Nomenclature is outdated, but
* functionality is used in uaccess.h
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 54fa8425cee2..5d3f7a119ed1 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
#include <uapi/asm/unistd.h>
};
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
{
unsigned long ret;
@@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
{
return sys_fadvise64_64(fd, offset, len, advice);
}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vmm;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vmm || addr + len <= vmm->vm_start)
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+}
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 01e0111bf787..923db5c15278 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -37,6 +37,7 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
+#include <asm/traps.h>
#ifdef CONFIG_KGDB
extern int gdb_enter;
@@ -193,28 +194,49 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
}
/*
- * Level-1 interrupt.
- * We currently have no priority encoding.
+ * IRQ handler.
+ * PS.INTLEVEL is the current IRQ priority level.
*/
-unsigned long ignored_level1_interrupts;
extern void do_IRQ(int, struct pt_regs *);
-void do_interrupt (struct pt_regs *regs)
+void do_interrupt(struct pt_regs *regs)
{
- unsigned long intread = get_sr (interrupt);
- unsigned long intenable = get_sr (intenable);
- int i, mask;
-
- /* Handle all interrupts (no priorities).
- * (Clear the interrupt before processing, in case it's
- * edge-triggered or software-generated)
- */
+ static const unsigned int_level_mask[] = {
+ 0,
+ XCHAL_INTLEVEL1_MASK,
+ XCHAL_INTLEVEL2_MASK,
+ XCHAL_INTLEVEL3_MASK,
+ XCHAL_INTLEVEL4_MASK,
+ XCHAL_INTLEVEL5_MASK,
+ XCHAL_INTLEVEL6_MASK,
+ XCHAL_INTLEVEL7_MASK,
+ };
+ unsigned level = get_sr(ps) & PS_INTLEVEL_MASK;
+
+ if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask)))
+ return;
- for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
- if (mask & (intread & intenable)) {
- set_sr (mask, intclear);
- do_IRQ (i,regs);
+ for (;;) {
+ unsigned intread = get_sr(interrupt);
+ unsigned intenable = get_sr(intenable);
+ unsigned int_at_level = intread & intenable &
+ int_level_mask[level];
+
+ if (!int_at_level)
+ return;
+
+ /*
+ * Clear the interrupt before processing, in case it's
+ * edge-triggered or software-generated
+ */
+ while (int_at_level) {
+ unsigned i = __ffs(int_at_level);
+ unsigned mask = 1 << i;
+
+ int_at_level ^= mask;
+ set_sr(mask, intclear);
+ do_IRQ(i, regs);
}
}
}
@@ -392,26 +414,6 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task)
return sp;
}
-static inline void spill_registers(void)
-{
- unsigned int a0, ps;
-
- __asm__ __volatile__ (
- "movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t"
- "mov a12, a0\n\t"
- "rsr a13, sar\n\t"
- "xsr a14, ps\n\t"
- "movi a0, _spill_registers\n\t"
- "rsync\n\t"
- "callx0 a0\n\t"
- "mov a0, a12\n\t"
- "wsr a13, sar\n\t"
- "wsr a14, ps\n\t"
- :: "a" (&a0), "a" (&ps)
- : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
- "memory");
-}
-
void show_trace(struct task_struct *task, unsigned long *sp)
{
unsigned long a0, a1, pc;
@@ -524,7 +526,7 @@ void die(const char * str, struct pt_regs * regs, long err)
if (!user_mode(regs))
show_stack(NULL, (unsigned long*)regs->areg[1]);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 68df35f66ce3..82109b42e240 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -10,7 +10,7 @@
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
- * Copyright (C) 2005 Tensilica, Inc.
+ * Copyright (C) 2005 - 2008 Tensilica, Inc.
*
* Chris Zankel <chris@zankel.net>
*
@@ -366,6 +366,41 @@ ENTRY(_DebugInterruptVector)
ENDPROC(_DebugInterruptVector)
+
+/*
+ * Medium priority level interrupt vectors
+ *
+ * Each takes less than 16 (0x10) bytes, no literals, by placing
+ * the extra 8 bytes that would otherwise be required in the window
+ * vectors area where there is space. With relocatable vectors,
+ * all vectors are within ~ 4 kB range of each other, so we can
+ * simply jump (J) to another vector without having to use JX.
+ *
+ * common_exception code gets current IRQ level in PS.INTLEVEL
+ * and preserves it for the IRQ handling time.
+ */
+
+ .macro irq_entry_level level
+
+ .if XCHAL_EXCM_LEVEL >= \level
+ .section .Level\level\()InterruptVector.text, "ax"
+ENTRY(_Level\level\()InterruptVector)
+ wsr a0, epc1
+ rsr a0, epc\level
+ xsr a0, epc1
+ # branch to user or kernel vector
+ j _SimulateUserKernelVectorException
+ .endif
+
+ .endm
+
+ irq_entry_level 2
+ irq_entry_level 3
+ irq_entry_level 4
+ irq_entry_level 5
+ irq_entry_level 6
+
+
/* Window overflow and underflow handlers.
* The handlers must be 64 bytes apart, first starting with the underflow
* handlers underflow-4 to underflow-12, then the overflow handlers
@@ -396,6 +431,26 @@ ENTRY_ALIGN64(_WindowOverflow4)
ENDPROC(_WindowOverflow4)
+#if XCHAL_EXCM_LEVEL >= 2
+ /* Not a window vector - but a convenient location
+ * (where we know there's space) for continuation of
+ * medium priority interrupt dispatch code.
+ * On entry here, a0 contains PS, and EPC2 contains saved a0:
+ */
+ .align 4
+_SimulateUserKernelVectorException:
+ wsr a0, excsave2
+ movi a0, 4 # LEVEL1_INTERRUPT cause
+ wsr a0, exccause
+ rsr a0, ps
+ bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
+ rsr a0, excsave2 # restore a0
+ j _KernelExceptionVector # simulate kernel vector exception
+1: rsr a0, excsave2 # restore a0
+ j _UserExceptionVector # simulate user vector exception
+#endif
+
+
/* 4-Register Window Underflow Vector (Handler) */
ENTRY_ALIGN64(_WindowUnderflow4)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 255154f820b7..14695240536d 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -134,6 +134,26 @@ SECTIONS
RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text);
+#if XCHAL_EXCM_LEVEL >= 2
+ RELOCATE_ENTRY(_Level2InterruptVector_text,
+ .Level2InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ RELOCATE_ENTRY(_Level3InterruptVector_text,
+ .Level3InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ RELOCATE_ENTRY(_Level4InterruptVector_text,
+ .Level4InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ RELOCATE_ENTRY(_Level5InterruptVector_text,
+ .Level5InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ RELOCATE_ENTRY(_Level6InterruptVector_text,
+ .Level6InterruptVector.text);
+#endif
RELOCATE_ENTRY(_KernelExceptionVector_text,
.KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text,
@@ -177,11 +197,53 @@ SECTIONS
XCHAL_DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
+#undef LAST
+#define LAST .DebugInterruptVector.text
+#if XCHAL_EXCM_LEVEL >= 2
+ SECTION_VECTOR (_Level2InterruptVector_text,
+ .Level2InterruptVector.text,
+ XCHAL_INTLEVEL2_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level2InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ SECTION_VECTOR (_Level3InterruptVector_text,
+ .Level3InterruptVector.text,
+ XCHAL_INTLEVEL3_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level3InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ SECTION_VECTOR (_Level4InterruptVector_text,
+ .Level4InterruptVector.text,
+ XCHAL_INTLEVEL4_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level4InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ SECTION_VECTOR (_Level5InterruptVector_text,
+ .Level5InterruptVector.text,
+ XCHAL_INTLEVEL5_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level5InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ SECTION_VECTOR (_Level6InterruptVector_text,
+ .Level6InterruptVector.text,
+ XCHAL_INTLEVEL6_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level6InterruptVector.text
+#endif
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
XCHAL_KERNEL_VECTOR_VADDR - 4,
- SIZEOF(.DebugInterruptVector.text),
- .DebugInterruptVector.text)
+ SIZEOF(LAST), LAST)
+#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
XCHAL_KERNEL_VECTOR_VADDR,
diff --git a/arch/xtensa/oprofile/Makefile b/arch/xtensa/oprofile/Makefile
new file mode 100644
index 000000000000..69ffbe80f184
--- /dev/null
+++ b/arch/xtensa/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
new file mode 100644
index 000000000000..66f32ee2c982
--- /dev/null
+++ b/arch/xtensa/oprofile/backtrace.c
@@ -0,0 +1,171 @@
+/**
+ * @file backtrace.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/oprofile.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+
+/* Address of common_exception_return, used to check the
+ * transition from kernel to user space.
+ */
+extern int common_exception_return;
+
+/* A struct that maps to the part of the frame containing the a0 and
+ * a1 registers.
+ */
+struct frame_start {
+ unsigned long a0;
+ unsigned long a1;
+};
+
+static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth)
+{
+ unsigned long windowstart = regs->windowstart;
+ unsigned long windowbase = regs->windowbase;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+ unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc);
+ int index;
+
+ /* First add the current PC to the trace. */
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+
+ /* Two steps:
+ *
+ * 1. Look through the register window for the
+ * previous PCs in the call trace.
+ *
+ * 2. Look on the stack.
+ */
+
+ /* Step 1. */
+ /* Rotate WINDOWSTART to move the bit corresponding to
+ * the current window to the bit #0.
+ */
+ windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
+
+ /* Look for bits that are set, they correspond to
+ * valid windows.
+ */
+ for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
+ if (windowstart & (1 << index)) {
+ /* Read a0 and a1 from the
+ * corresponding position in AREGs.
+ */
+ a0 = regs->areg[index * 4];
+ a1 = regs->areg[index * 4 + 1];
+ /* Get the PC from a0 and a1. */
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ /* Add the PC to the trace. */
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+ }
+
+ /* Step 2. */
+ /* We are done with the register window, we need to
+ * look through the stack.
+ */
+ if (depth > 0) {
+ /* Start from the a1 register. */
+ /* a1 = regs->areg[1]; */
+ while (a0 != 0 && depth--) {
+
+ struct frame_start frame_start;
+ /* Get the location for a1, a0 for the
+ * previous frame from the current a1.
+ */
+ unsigned long *psp = (unsigned long *)a1;
+ psp -= 4;
+
+ /* Check if the region is OK to access. */
+ if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
+ return;
+ /* Copy a1, a0 from user space stack frame. */
+ if (__copy_from_user_inatomic(&frame_start, psp,
+ sizeof(frame_start)))
+ return;
+
+ a0 = frame_start.a0;
+ a1 = frame_start.a1;
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+ }
+ }
+}
+
+static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth)
+{
+ unsigned long pc = regs->pc;
+ unsigned long *psp;
+ unsigned long sp_start, sp_end;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+
+ sp_start = a1 & ~(THREAD_SIZE-1);
+ sp_end = sp_start + THREAD_SIZE;
+
+ /* Spill the register window to the stack first. */
+ spill_registers();
+
+ /* Read the stack frames one by one and create the PC
+ * from the a0 and a1 registers saved there.
+ */
+ while (a1 > sp_start && a1 < sp_end && depth--) {
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ /* Add the PC to the trace. */
+ if (kernel_text_address(pc))
+ oprofile_add_trace(pc);
+
+ if (pc == (unsigned long) &common_exception_return) {
+ regs = (struct pt_regs *)a1;
+ if (user_mode(regs)) {
+ pc = regs->pc;
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+ return xtensa_backtrace_user(regs, depth);
+ }
+ a0 = regs->areg[0];
+ a1 = regs->areg[1];
+ continue;
+ }
+
+ psp = (unsigned long *)a1;
+
+ a0 = *(psp - 4);
+ a1 = *(psp - 3);
+
+ if (a1 <= (unsigned long)psp)
+ return;
+
+ }
+ return;
+}
+
+void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+ if (user_mode(regs))
+ xtensa_backtrace_user(regs, depth);
+ else
+ xtensa_backtrace_kernel(regs, depth);
+}
diff --git a/arch/xtensa/oprofile/init.c b/arch/xtensa/oprofile/init.c
new file mode 100644
index 000000000000..a67eea379766
--- /dev/null
+++ b/arch/xtensa/oprofile/init.c
@@ -0,0 +1,26 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+
+extern void xtensa_backtrace(struct pt_regs *const regs, unsigned int depth);
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ ops->backtrace = xtensa_backtrace;
+ return -ENODEV;
+}
+
+
+void oprofile_arch_exit(void)
+{
+}
diff --git a/arch/xtensa/platforms/iss/Makefile b/arch/xtensa/platforms/iss/Makefile
index b7d1a5c0ff7f..d2369b799c50 100644
--- a/arch/xtensa/platforms/iss/Makefile
+++ b/arch/xtensa/platforms/iss/Makefile
@@ -6,3 +6,4 @@
obj-y = console.o setup.o
obj-$(CONFIG_NET) += network.o
+obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
new file mode 100644
index 000000000000..f58ffc3b68a8
--- /dev/null
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -0,0 +1,375 @@
+/*
+ * arch/xtensa/platforms/iss/simdisk.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2013 Tensilica Inc.
+ * Authors Victor Prupis
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <platform/simcall.h>
+
+#define SIMDISK_MAJOR 240
+#define SECTOR_SHIFT 9
+#define SIMDISK_MINORS 1
+#define MAX_SIMDISK_COUNT 10
+
+struct simdisk {
+ const char *filename;
+ spinlock_t lock;
+ struct request_queue *queue;
+ struct gendisk *gd;
+ struct proc_dir_entry *procfile;
+ int users;
+ unsigned long size;
+ int fd;
+};
+
+
+static int simdisk_count = CONFIG_BLK_DEV_SIMDISK_COUNT;
+module_param(simdisk_count, int, S_IRUGO);
+MODULE_PARM_DESC(simdisk_count, "Number of simdisk units.");
+
+static int n_files;
+static const char *filename[MAX_SIMDISK_COUNT] = {
+#ifdef CONFIG_SIMDISK0_FILENAME
+ CONFIG_SIMDISK0_FILENAME,
+#ifdef CONFIG_SIMDISK1_FILENAME
+ CONFIG_SIMDISK1_FILENAME,
+#endif
+#endif
+};
+
+static int simdisk_param_set_filename(const char *val,
+ const struct kernel_param *kp)
+{
+ if (n_files < ARRAY_SIZE(filename))
+ filename[n_files++] = val;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static const struct kernel_param_ops simdisk_param_ops_filename = {
+ .set = simdisk_param_set_filename,
+};
+module_param_cb(filename, &simdisk_param_ops_filename, &n_files, 0);
+MODULE_PARM_DESC(filename, "Backing storage filename.");
+
+static int simdisk_major = SIMDISK_MAJOR;
+
+static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
+ unsigned long nsect, char *buffer, int write)
+{
+ unsigned long offset = sector << SECTOR_SHIFT;
+ unsigned long nbytes = nsect << SECTOR_SHIFT;
+
+ if (offset > dev->size || dev->size - offset < nbytes) {
+ pr_notice("Beyond-end %s (%ld %ld)\n",
+ write ? "write" : "read", offset, nbytes);
+ return;
+ }
+
+ spin_lock(&dev->lock);
+ while (nbytes > 0) {
+ unsigned long io;
+
+ __simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0);
+ if (write)
+ io = simc_write(dev->fd, buffer, nbytes);
+ else
+ io = simc_read(dev->fd, buffer, nbytes);
+ if (io == -1) {
+ pr_err("SIMDISK: IO error %d\n", errno);
+ break;
+ }
+ buffer += io;
+ offset += io;
+ nbytes -= io;
+ }
+ spin_unlock(&dev->lock);
+}
+
+static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
+{
+ int i;
+ struct bio_vec *bvec;
+ sector_t sector = bio->bi_sector;
+
+ bio_for_each_segment(bvec, bio, i) {
+ char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);
+ unsigned len = bvec->bv_len >> SECTOR_SHIFT;
+
+ simdisk_transfer(dev, sector, len, buffer,
+ bio_data_dir(bio) == WRITE);
+ sector += len;
+ __bio_kunmap_atomic(bio, KM_USER0);
+ }
+ return 0;
+}
+
+static void simdisk_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct simdisk *dev = q->queuedata;
+ int status = simdisk_xfer_bio(dev, bio);
+ bio_endio(bio, status);
+}
+
+
+static int simdisk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct simdisk *dev = bdev->bd_disk->private_data;
+
+ spin_lock(&dev->lock);
+ if (!dev->users)
+ check_disk_change(bdev);
+ ++dev->users;
+ spin_unlock(&dev->lock);
+ return 0;
+}
+
+static int simdisk_release(struct gendisk *disk, fmode_t mode)
+{
+ struct simdisk *dev = disk->private_data;
+ spin_lock(&dev->lock);
+ --dev->users;
+ spin_unlock(&dev->lock);
+ return 0;
+}
+
+static const struct block_device_operations simdisk_ops = {
+ .owner = THIS_MODULE,
+ .open = simdisk_open,
+ .release = simdisk_release,
+};
+
+static struct simdisk *sddev;
+static struct proc_dir_entry *simdisk_procdir;
+
+static int simdisk_attach(struct simdisk *dev, const char *filename)
+{
+ int err = 0;
+
+ filename = kstrdup(filename, GFP_KERNEL);
+ if (filename == NULL)
+ return -ENOMEM;
+
+ spin_lock(&dev->lock);
+
+ if (dev->fd != -1) {
+ err = -EBUSY;
+ goto out;
+ }
+ dev->fd = simc_open(filename, O_RDWR, 0);
+ if (dev->fd == -1) {
+ pr_err("SIMDISK: Can't open %s: %d\n", filename, errno);
+ err = -ENODEV;
+ goto out;
+ }
+ dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0);
+ set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
+ dev->filename = filename;
+ pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
+out:
+ if (err)
+ kfree(filename);
+ spin_unlock(&dev->lock);
+
+ return err;
+}
+
+static int simdisk_detach(struct simdisk *dev)
+{
+ int err = 0;
+
+ spin_lock(&dev->lock);
+
+ if (dev->users != 0) {
+ err = -EBUSY;
+ } else if (dev->fd != -1) {
+ if (simc_close(dev->fd)) {
+ pr_err("SIMDISK: error closing %s: %d\n",
+ dev->filename, errno);
+ err = -EIO;
+ } else {
+ pr_info("SIMDISK: %s detached from %s\n",
+ dev->gd->disk_name, dev->filename);
+ dev->fd = -1;
+ kfree(dev->filename);
+ dev->filename = NULL;
+ }
+ }
+ spin_unlock(&dev->lock);
+ return err;
+}
+
+static int proc_read_simdisk(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ struct simdisk *dev = (struct simdisk *) data;
+ len = sprintf(page, "%s\n", dev->filename ? dev->filename : "");
+ return len;
+}
+
+static int proc_write_simdisk(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char *tmp = kmalloc(count + 1, GFP_KERNEL);
+ struct simdisk *dev = (struct simdisk *) data;
+ int err;
+
+ if (tmp == NULL)
+ return -ENOMEM;
+ if (copy_from_user(tmp, buffer, count)) {
+ err = -EFAULT;
+ goto out_free;
+ }
+
+ err = simdisk_detach(dev);
+ if (err != 0)
+ goto out_free;
+
+ if (count > 0 && tmp[count - 1] == '\n')
+ tmp[count - 1] = 0;
+ else
+ tmp[count] = 0;
+
+ if (tmp[0])
+ err = simdisk_attach(dev, tmp);
+
+ if (err == 0)
+ err = count;
+out_free:
+ kfree(tmp);
+ return err;
+}
+
+static int __init simdisk_setup(struct simdisk *dev, int which,
+ struct proc_dir_entry *procdir)
+{
+ char tmp[2] = { '0' + which, 0 };
+
+ dev->fd = -1;
+ dev->filename = NULL;
+ spin_lock_init(&dev->lock);
+ dev->users = 0;
+
+ dev->queue = blk_alloc_queue(GFP_KERNEL);
+ if (dev->queue == NULL) {
+ pr_err("blk_alloc_queue failed\n");
+ goto out_alloc_queue;
+ }
+
+ blk_queue_make_request(dev->queue, simdisk_make_request);
+ dev->queue->queuedata = dev;
+
+ dev->gd = alloc_disk(SIMDISK_MINORS);
+ if (dev->gd == NULL) {
+ pr_err("alloc_disk failed\n");
+ goto out_alloc_disk;
+ }
+ dev->gd->major = simdisk_major;
+ dev->gd->first_minor = which;
+ dev->gd->fops = &simdisk_ops;
+ dev->gd->queue = dev->queue;
+ dev->gd->private_data = dev;
+ snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
+ set_capacity(dev->gd, 0);
+ add_disk(dev->gd);
+
+ dev->procfile = create_proc_entry(tmp, 0644, procdir);
+ dev->procfile->data = dev;
+ dev->procfile->read_proc = proc_read_simdisk;
+ dev->procfile->write_proc = proc_write_simdisk;
+ return 0;
+
+out_alloc_disk:
+ blk_cleanup_queue(dev->queue);
+ dev->queue = NULL;
+out_alloc_queue:
+ simc_close(dev->fd);
+ return -EIO;
+}
+
+static int __init simdisk_init(void)
+{
+ int i;
+
+ if (register_blkdev(simdisk_major, "simdisk") < 0) {
+ pr_err("SIMDISK: register_blkdev: %d\n", simdisk_major);
+ return -EIO;
+ }
+ pr_info("SIMDISK: major: %d\n", simdisk_major);
+
+ if (n_files > simdisk_count)
+ simdisk_count = n_files;
+ if (simdisk_count > MAX_SIMDISK_COUNT)
+ simdisk_count = MAX_SIMDISK_COUNT;
+
+ sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
+ GFP_KERNEL);
+ if (sddev == NULL)
+ goto out_unregister;
+
+ simdisk_procdir = proc_mkdir("simdisk", 0);
+ if (simdisk_procdir == NULL)
+ goto out_free_unregister;
+
+ for (i = 0; i < simdisk_count; ++i) {
+ if (simdisk_setup(sddev + i, i, simdisk_procdir) == 0) {
+ if (filename[i] != NULL && filename[i][0] != 0 &&
+ (n_files == 0 || i < n_files))
+ simdisk_attach(sddev + i, filename[i]);
+ }
+ }
+
+ return 0;
+
+out_free_unregister:
+ kfree(sddev);
+out_unregister:
+ unregister_blkdev(simdisk_major, "simdisk");
+ return -ENOMEM;
+}
+module_init(simdisk_init);
+
+static void simdisk_teardown(struct simdisk *dev, int which,
+ struct proc_dir_entry *procdir)
+{
+ char tmp[2] = { '0' + which, 0 };
+
+ simdisk_detach(dev);
+ if (dev->gd)
+ del_gendisk(dev->gd);
+ if (dev->queue)
+ blk_cleanup_queue(dev->queue);
+ remove_proc_entry(tmp, procdir);
+}
+
+static void __exit simdisk_exit(void)
+{
+ int i;
+
+ for (i = 0; i < simdisk_count; ++i)
+ simdisk_teardown(sddev + i, i, simdisk_procdir);
+ remove_proc_entry("simdisk", 0);
+ kfree(sddev);
+ unregister_blkdev(simdisk_major, "simdisk");
+}
+module_exit(simdisk_exit);
+
+MODULE_ALIAS_BLOCKDEV_MAJOR(SIMDISK_MAJOR);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 4b9951a4569d..9d888a2a5755 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -100,7 +100,7 @@ static void __init update_clock_frequency(struct device_node *node)
}
*(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
- prom_update_property(node, newfreq);
+ of_update_property(node, newfreq);
}
#define MAC_LEN 6
@@ -128,7 +128,7 @@ static void __init update_local_mac(struct device_node *node)
memcpy(newmac->value, macaddr, MAC_LEN);
((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
- prom_update_property(node, newmac);
+ of_update_property(node, newmac);
}
static int __init machine_setup(void)
diff --git a/arch/xtensa/variants/dc233c/include/variant/core.h b/arch/xtensa/variants/dc233c/include/variant/core.h
new file mode 100644
index 000000000000..3a2e53b94930
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/core.h
@@ -0,0 +1,475 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 900001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc233c" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "dc233c"
+#define XCHAL_BUILD_UNIQUE_ID 0x00004B21 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56707FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x14404B21 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX4.0.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2400 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 240001 /* major*100+minor */
+#define XCHAL_HW_REL_LX4 1
+#define XCHAL_HW_REL_LX4_0 1
+#define XCHAL_HW_REL_LX4_0_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2400 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 240001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2400 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 240001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie-asm.h b/arch/xtensa/variants/dc233c/include/variant/tie-asm.h
new file mode 100644
index 000000000000..5dbd981ea424
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/tie-asm.h
@@ -0,0 +1,193 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+
+ /*
+ * Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr \at1, ACCLO // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr \at1, ACCHI // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ rsr \at1, M0 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr \at1, M1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr \at1, M2 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr \at1, M3 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr \at1, SCOMPARE1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to restore all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr \at1, ACCLO // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr \at1, ACCHI // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr \at1, M0 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr \at1, M1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr \at1, M2 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr \at1, M3 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr \at1, SCOMPARE1 // conditional store option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+
+
+#define XCHAL_SA_NUM_ATMPS 1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie.h b/arch/xtensa/variants/dc233c/include/variant/tie.h
new file mode 100644
index 000000000000..815e52bc3d00
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/tie.h
@@ -0,0 +1,150 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 1 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 32
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 8
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s) /* empty */
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/block/Kconfig b/block/Kconfig
index 4a85ccf8d4cf..a7e40a7c8214 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,7 +4,6 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
- select PERCPU_RWSEM
help
Provide block layer support for the kernel.
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b8858fb0cafa..b2b9837f9dd3 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -26,11 +26,32 @@
static DEFINE_MUTEX(blkcg_pol_mutex);
-struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
+ .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
EXPORT_SYMBOL_GPL(blkcg_root);
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
+static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q, bool update_hint);
+
+/**
+ * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_cgrp: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
+ * read locked. If called under either blkcg or queue lock, the iteration
+ * is guaranteed to include all and only online blkgs. The caller may
+ * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
+ * subtree.
+ */
+#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \
+ cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
+ if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
+ (p_blkg)->q, false)))
+
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@@ -112,9 +133,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->pd[i] = pd;
pd->blkg = blkg;
+ pd->plid = i;
/* invoke per-policy init */
- if (blkcg_policy_enabled(blkg->q, pol))
+ if (pol->pd_init_fn)
pol->pd_init_fn(blkg);
}
@@ -125,8 +147,19 @@ err_free:
return NULL;
}
+/**
+ * __blkg_lookup - internal version of blkg_lookup()
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ * @update_hint: whether to update lookup hint with the result or not
+ *
+ * This is internal version and shouldn't be used by policy
+ * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
+ * @q's bypass state. If @update_hint is %true, the caller should be
+ * holding @q->queue_lock and lookup hint is updated on success.
+ */
static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q)
+ struct request_queue *q, bool update_hint)
{
struct blkcg_gq *blkg;
@@ -135,14 +168,19 @@ static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
return blkg;
/*
- * Hint didn't match. Look up from the radix tree. Note that we
- * may not be holding queue_lock and thus are not sure whether
- * @blkg from blkg_tree has already been removed or not, so we
- * can't update hint to the lookup result. Leave it to the caller.
+ * Hint didn't match. Look up from the radix tree. Note that the
+ * hint can only be updated under queue_lock as otherwise @blkg
+ * could have already been removed from blkg_tree. The caller is
+ * responsible for grabbing queue_lock if @update_hint.
*/
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
- if (blkg && blkg->q == q)
+ if (blkg && blkg->q == q) {
+ if (update_hint) {
+ lockdep_assert_held(q->queue_lock);
+ rcu_assign_pointer(blkcg->blkg_hint, blkg);
+ }
return blkg;
+ }
return NULL;
}
@@ -162,7 +200,7 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
if (unlikely(blk_queue_bypass(q)))
return NULL;
- return __blkg_lookup(blkcg, q);
+ return __blkg_lookup(blkcg, q, false);
}
EXPORT_SYMBOL_GPL(blkg_lookup);
@@ -170,75 +208,129 @@ EXPORT_SYMBOL_GPL(blkg_lookup);
* If @new_blkg is %NULL, this function tries to allocate a new one as
* necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
*/
-static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q,
- struct blkcg_gq *new_blkg)
+static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
+ struct request_queue *q,
+ struct blkcg_gq *new_blkg)
{
struct blkcg_gq *blkg;
- int ret;
+ int i, ret;
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
- /* lookup and update hint on success, see __blkg_lookup() for details */
- blkg = __blkg_lookup(blkcg, q);
- if (blkg) {
- rcu_assign_pointer(blkcg->blkg_hint, blkg);
- goto out_free;
- }
-
/* blkg holds a reference to blkcg */
if (!css_tryget(&blkcg->css)) {
- blkg = ERR_PTR(-EINVAL);
- goto out_free;
+ ret = -EINVAL;
+ goto err_free_blkg;
}
/* allocate */
if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
if (unlikely(!new_blkg)) {
- blkg = ERR_PTR(-ENOMEM);
- goto out_put;
+ ret = -ENOMEM;
+ goto err_put_css;
}
}
blkg = new_blkg;
- /* insert */
+ /* link parent and insert */
+ if (blkcg_parent(blkcg)) {
+ blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
+ if (WARN_ON_ONCE(!blkg->parent)) {
+ blkg = ERR_PTR(-EINVAL);
+ goto err_put_css;
+ }
+ blkg_get(blkg->parent);
+ }
+
spin_lock(&blkcg->lock);
ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
if (likely(!ret)) {
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
list_add(&blkg->q_node, &q->blkg_list);
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkg->pd[i] && pol->pd_online_fn)
+ pol->pd_online_fn(blkg);
+ }
}
+ blkg->online = true;
spin_unlock(&blkcg->lock);
if (!ret)
return blkg;
- blkg = ERR_PTR(ret);
-out_put:
+ /* @blkg failed fully initialized, use the usual release path */
+ blkg_put(blkg);
+ return ERR_PTR(ret);
+
+err_put_css:
css_put(&blkcg->css);
-out_free:
+err_free_blkg:
blkg_free(new_blkg);
- return blkg;
+ return ERR_PTR(ret);
}
+/**
+ * blkg_lookup_create - lookup blkg, try to create one if not there
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
+ * create one. blkg creation is performed recursively from blkcg_root such
+ * that all non-root blkg's have access to the parent blkg. This function
+ * should be called under RCU read lock and @q->queue_lock.
+ *
+ * Returns pointer to the looked up or created blkg on success, ERR_PTR()
+ * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
+ * dead and bypassing, returns ERR_PTR(-EBUSY).
+ */
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q)
{
+ struct blkcg_gq *blkg;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ lockdep_assert_held(q->queue_lock);
+
/*
* This could be the first entry point of blkcg implementation and
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
- return __blkg_lookup_create(blkcg, q, NULL);
+
+ blkg = __blkg_lookup(blkcg, q, true);
+ if (blkg)
+ return blkg;
+
+ /*
+ * Create blkgs walking down from blkcg_root to @blkcg, so that all
+ * non-root blkgs have access to their parents.
+ */
+ while (true) {
+ struct blkcg *pos = blkcg;
+ struct blkcg *parent = blkcg_parent(blkcg);
+
+ while (parent && !__blkg_lookup(parent, q, false)) {
+ pos = parent;
+ parent = blkcg_parent(parent);
+ }
+
+ blkg = blkg_create(pos, q, NULL);
+ if (pos == blkcg || IS_ERR(blkg))
+ return blkg;
+ }
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
+ int i;
lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);
@@ -247,6 +339,14 @@ static void blkg_destroy(struct blkcg_gq *blkg)
WARN_ON_ONCE(list_empty(&blkg->q_node));
WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkg->pd[i] && pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg);
+ }
+ blkg->online = false;
+
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
list_del_init(&blkg->q_node);
hlist_del_init_rcu(&blkg->blkcg_node);
@@ -301,8 +401,10 @@ static void blkg_rcu_free(struct rcu_head *rcu_head)
void __blkg_release(struct blkcg_gq *blkg)
{
- /* release the extra blkcg reference this blkg has been holding */
+ /* release the blkcg and parent blkg refs this blkg has been holding */
css_put(&blkg->blkcg->css);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
/*
* A group is freed in rcu manner. But having an rcu lock does not
@@ -357,7 +459,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
{
struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
struct blkcg_gq *blkg;
- struct hlist_node *n;
int i;
mutex_lock(&blkcg_pol_mutex);
@@ -368,7 +469,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
* stat updates. This is a debug feature which shouldn't exist
* anyway. If you get hit by a race, retry.
*/
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -402,8 +503,9 @@ static const char *blkg_dev_name(struct blkcg_gq *blkg)
*
* This function invokes @prfill on each blkg of @blkcg if pd for the
* policy specified by @pol exists. @prfill is invoked with @sf, the
- * policy data and @data. If @show_total is %true, the sum of the return
- * values from @prfill is printed with "Total" label at the end.
+ * policy data and @data and the matching queue lock held. If @show_total
+ * is %true, the sum of the return values from @prfill is printed with
+ * "Total" label at the end.
*
* This is to be used to construct print functions for
* cftype->read_seq_string method.
@@ -415,14 +517,16 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
bool show_total)
{
struct blkcg_gq *blkg;
- struct hlist_node *n;
u64 total = 0;
- spin_lock_irq(&blkcg->lock);
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ spin_lock_irq(blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
- spin_unlock_irq(&blkcg->lock);
+ spin_unlock_irq(blkg->q->queue_lock);
+ }
+ rcu_read_unlock();
if (show_total)
seq_printf(sf, "Total %llu\n", (unsigned long long)total);
@@ -481,6 +585,7 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
return v;
}
+EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
/**
* blkg_prfill_stat - prfill callback for blkg_stat
@@ -514,6 +619,82 @@ u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
/**
+ * blkg_stat_recursive_sum - collect hierarchical blkg_stat
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * Collect the blkg_stat specified by @off from @pd and all its online
+ * descendants and return the sum. The caller must be holding the queue
+ * lock for online tests.
+ */
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
+{
+ struct blkcg_policy *pol = blkcg_policy[pd->plid];
+ struct blkcg_gq *pos_blkg;
+ struct cgroup *pos_cgrp;
+ u64 sum;
+
+ lockdep_assert_held(pd->blkg->q->queue_lock);
+
+ sum = blkg_stat_read((void *)pd + off);
+
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+ struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
+ struct blkg_stat *stat = (void *)pos_pd + off;
+
+ if (pos_blkg->online)
+ sum += blkg_stat_read(stat);
+ }
+ rcu_read_unlock();
+
+ return sum;
+}
+EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
+
+/**
+ * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * Collect the blkg_rwstat specified by @off from @pd and all its online
+ * descendants and return the sum. The caller must be holding the queue
+ * lock for online tests.
+ */
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+ int off)
+{
+ struct blkcg_policy *pol = blkcg_policy[pd->plid];
+ struct blkcg_gq *pos_blkg;
+ struct cgroup *pos_cgrp;
+ struct blkg_rwstat sum;
+ int i;
+
+ lockdep_assert_held(pd->blkg->q->queue_lock);
+
+ sum = blkg_rwstat_read((void *)pd + off);
+
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+ struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
+ struct blkg_rwstat *rwstat = (void *)pos_pd + off;
+ struct blkg_rwstat tmp;
+
+ if (!pos_blkg->online)
+ continue;
+
+ tmp = blkg_rwstat_read(rwstat);
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ sum.cnt[i] += tmp.cnt[i];
+ }
+ rcu_read_unlock();
+
+ return sum;
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
+
+/**
* blkg_conf_prep - parse and prepare for per-blkg config update
* @blkcg: target block cgroup
* @pol: target policy
@@ -658,6 +839,7 @@ static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
return ERR_PTR(-ENOMEM);
blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+ blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
done:
spin_lock_init(&blkcg->lock);
@@ -777,7 +959,7 @@ int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
LIST_HEAD(pds);
- struct blkcg_gq *blkg;
+ struct blkcg_gq *blkg, *new_blkg;
struct blkg_policy_data *pd, *n;
int cnt = 0, ret;
bool preloaded;
@@ -786,19 +968,27 @@ int blkcg_activate_policy(struct request_queue *q,
return 0;
/* preallocations for root blkg */
- blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
- if (!blkg)
+ new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
+ if (!new_blkg)
return -ENOMEM;
preloaded = !radix_tree_preload(GFP_KERNEL);
blk_queue_bypass_start(q);
- /* make sure the root blkg exists and count the existing blkgs */
+ /*
+ * Make sure the root blkg exists and count the existing blkgs. As
+ * @q is bypassing at this point, blkg_lookup_create() can't be
+ * used. Open code it.
+ */
spin_lock_irq(q->queue_lock);
rcu_read_lock();
- blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
+ blkg = __blkg_lookup(&blkcg_root, q, false);
+ if (blkg)
+ blkg_free(new_blkg);
+ else
+ blkg = blkg_create(&blkcg_root, q, new_blkg);
rcu_read_unlock();
if (preloaded)
@@ -846,6 +1036,7 @@ int blkcg_activate_policy(struct request_queue *q,
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
+ pd->plid = pol->plid;
pol->pd_init_fn(blkg);
spin_unlock(&blkg->blkcg->lock);
@@ -892,6 +1083,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
/* grab blkcg lock too while removing @pd from @blkg */
spin_lock(&blkg->blkcg->lock);
+ if (pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg);
if (pol->pd_exit_fn)
pol->pd_exit_fn(blkg);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 24597309e23d..f2b292925ccd 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -54,6 +54,7 @@ struct blkcg {
/* TODO: per-policy storage in blkcg */
unsigned int cfq_weight; /* belongs to cfq */
+ unsigned int cfq_leaf_weight;
};
struct blkg_stat {
@@ -80,8 +81,9 @@ struct blkg_rwstat {
* beginning and pd_size can't be smaller than pd.
*/
struct blkg_policy_data {
- /* the blkg this per-policy data belongs to */
+ /* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
+ int plid;
/* used during policy activation */
struct list_head alloc_node;
@@ -94,17 +96,27 @@ struct blkcg_gq {
struct list_head q_node;
struct hlist_node blkcg_node;
struct blkcg *blkcg;
+
+ /* all non-root blkcg_gq's are guaranteed to have access to parent */
+ struct blkcg_gq *parent;
+
/* request allocation list for this blkcg-q pair */
struct request_list rl;
+
/* reference count */
int refcnt;
+ /* is this blkg online? protected by both blkcg and q locks */
+ bool online;
+
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
};
typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
@@ -117,6 +129,8 @@ struct blkcg_policy {
/* operations */
blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_online_pd_fn *pd_online_fn;
+ blkcg_pol_offline_pd_fn *pd_offline_fn;
blkcg_pol_exit_pd_fn *pd_exit_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
@@ -150,6 +164,10 @@ u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
int off);
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+ int off);
+
struct blkg_conf_ctx {
struct gendisk *disk;
struct blkcg_gq *blkg;
@@ -181,6 +199,19 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
}
/**
+ * blkcg_parent - get the parent of a blkcg
+ * @blkcg: blkcg of interest
+ *
+ * Return the parent blkcg of @blkcg. Can be called anytime.
+ */
+static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
+{
+ struct cgroup *pcg = blkcg->css.cgroup->parent;
+
+ return pcg ? cgroup_to_blkcg(pcg) : NULL;
+}
+
+/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
* @pol: policy of interest
@@ -387,6 +418,18 @@ static inline void blkg_stat_reset(struct blkg_stat *stat)
}
/**
+ * blkg_stat_merge - merge a blkg_stat into another
+ * @to: the destination blkg_stat
+ * @from: the source
+ *
+ * Add @from's count to @to.
+ */
+static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
+{
+ blkg_stat_add(to, blkg_stat_read(from));
+}
+
+/**
* blkg_rwstat_add - add a value to a blkg_rwstat
* @rwstat: target blkg_rwstat
* @rw: mask of REQ_{WRITE|SYNC}
@@ -434,14 +477,14 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
}
/**
- * blkg_rwstat_sum - read the total count of a blkg_rwstat
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
* @rwstat: blkg_rwstat to read
*
* Return the total count of @rwstat regardless of the IO direction. This
* function can be called without synchronization and takes care of u64
* atomicity.
*/
-static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
{
struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
@@ -457,6 +500,25 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
}
+/**
+ * blkg_rwstat_merge - merge a blkg_rwstat into another
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's counts to @to.
+ */
+static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
+{
+ struct blkg_rwstat v = blkg_rwstat_read(from);
+ int i;
+
+ u64_stats_update_begin(&to->syncp);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ to->cnt[i] += v.cnt[i];
+ u64_stats_update_end(&to->syncp);
+}
+
#else /* CONFIG_BLK_CGROUP */
struct cgroup;
diff --git a/block/blk-core.c b/block/blk-core.c
index 277134cb5d32..074b758efc42 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -39,7 +39,6 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
-EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
@@ -1348,7 +1347,7 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
if (!ll_back_merge_fn(q, req, bio))
return false;
- trace_block_bio_backmerge(q, bio);
+ trace_block_bio_backmerge(q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
@@ -1370,7 +1369,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
if (!ll_front_merge_fn(q, req, bio))
return false;
- trace_block_bio_frontmerge(q, bio);
+ trace_block_bio_frontmerge(q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
@@ -1553,13 +1552,6 @@ get_rq:
if (list_empty(&plug->list))
trace_block_plug(q);
else {
- if (!plug->should_sort) {
- struct request *__rq;
-
- __rq = list_entry_rq(plug->list.prev);
- if (__rq->q != q)
- plug->should_sort = 1;
- }
if (request_count >= BLK_MAX_REQUEST_COUNT) {
blk_flush_plug_list(plug, false);
trace_block_plug(q);
@@ -2890,7 +2882,6 @@ void blk_start_plug(struct blk_plug *plug)
plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->cb_list);
- plug->should_sort = 0;
/*
* If this is a nested plug, don't actually assign it. It will be
@@ -2992,10 +2983,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
list_splice_init(&plug->list, &list);
- if (plug->should_sort) {
- list_sort(NULL, &list, plug_rq_cmp);
- plug->should_sort = 0;
- }
+ list_sort(NULL, &list, plug_rq_cmp);
q = NULL;
depth = 0;
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c88202f973d9..e70621396129 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -121,9 +121,9 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
- while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
+ while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
else
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
if (rq->errors)
err = -EIO;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 720ad607ff91..db8f1b507857 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -436,7 +436,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
bio_get(bio);
submit_bio(WRITE_FLUSH, bio);
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
/*
* The driver must store the error location in ->bi_sector, if
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fab4cdd3f7bb..9c4bb8266bc8 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
*/
void put_io_context_active(struct io_context *ioc)
{
- struct hlist_node *n;
unsigned long flags;
struct io_cq *icq;
@@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
*/
retry:
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
+ hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
if (spin_trylock(icq->q->queue_lock)) {
diff --git a/block/blk-lib.c b/block/blk-lib.c
index b3a1f2b70b31..d6f50d572565 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -126,7 +126,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
ret = -EIO;
@@ -200,7 +200,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
ret = -ENOTSUPP;
@@ -262,7 +262,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
- wait_for_completion(&wait);
+ wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
/* One of bios in the batch was completed with error.*/
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 788147797a79..6206a934eb8c 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -497,6 +497,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
return res;
}
+static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+{
+ struct request_queue *q = container_of(rcu_head, struct request_queue,
+ rcu_head);
+ kmem_cache_free(blk_requestq_cachep, q);
+}
+
/**
* blk_release_queue: - release a &struct request_queue when it is no longer needed
* @kobj: the kobj belonging to the request queue to be released
@@ -538,7 +545,7 @@ static void blk_release_queue(struct kobject *kobj)
bdi_destroy(&q->backing_dev_info);
ida_simple_remove(&blk_queue_ida, q->id);
- kmem_cache_free(blk_requestq_cachep, q);
+ call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
static const struct sysfs_ops queue_sysfs_ops = {
diff --git a/block/blk.h b/block/blk.h
index 47fdfdd41520..e837b8f619b7 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -61,7 +61,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
* Internal elevator interface
*/
-#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
void blk_insert_flush(struct request *rq);
void blk_abort_flushes(struct request_queue *q);
diff --git a/block/bsg.c b/block/bsg.c
index ff64ae3bacee..420a5a9f1b23 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;
- struct hlist_node *entry;
mutex_lock(&bsg_mutex);
- hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
+ hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
atomic_inc(&bd->ref_count);
goto found;
@@ -997,7 +996,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
{
struct bsg_class_device *bcd;
dev_t dev;
- int ret, minor;
+ int ret;
struct device *class_dev = NULL;
const char *devname;
@@ -1017,23 +1016,16 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
mutex_lock(&bsg_mutex);
- ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
- if (ret < 0)
+ ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ printk(KERN_ERR "bsg: too many bsg devices\n");
+ ret = -EINVAL;
+ }
goto unlock;
-
- if (minor >= BSG_MAX_DEVS) {
- printk(KERN_ERR "bsg: too many bsg devices\n");
- ret = -EINVAL;
- goto remove_idr;
}
- bcd->minor = minor;
+ bcd->minor = ret;
bcd->queue = q;
bcd->parent = get_device(parent);
bcd->release = release;
@@ -1059,8 +1051,7 @@ unregister_class_dev:
device_unregister(class_dev);
put_dev:
put_device(parent);
-remove_idr:
- idr_remove(&bsg_minor_idr, minor);
+ idr_remove(&bsg_minor_idr, bcd->minor);
unlock:
mutex_unlock(&bsg_mutex);
return ret;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e62e9205b80a..4f0ade74cfd0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -85,7 +85,6 @@ struct cfq_rb_root {
struct rb_root rb;
struct rb_node *left;
unsigned count;
- unsigned total_weight;
u64 min_vdisktime;
struct cfq_ttime ttime;
};
@@ -155,7 +154,7 @@ struct cfq_queue {
* First index in the service_trees.
* IDLE is handled separately, so it has negative index
*/
-enum wl_prio_t {
+enum wl_class_t {
BE_WORKLOAD = 0,
RT_WORKLOAD = 1,
IDLE_WORKLOAD = 2,
@@ -223,10 +222,45 @@ struct cfq_group {
/* group service_tree key */
u64 vdisktime;
+
+ /*
+ * The number of active cfqgs and sum of their weights under this
+ * cfqg. This covers this cfqg's leaf_weight and all children's
+ * weights, but does not cover weights of further descendants.
+ *
+ * If a cfqg is on the service tree, it's active. An active cfqg
+ * also activates its parent and contributes to the children_weight
+ * of the parent.
+ */
+ int nr_active;
+ unsigned int children_weight;
+
+ /*
+ * vfraction is the fraction of vdisktime that the tasks in this
+ * cfqg are entitled to. This is determined by compounding the
+ * ratios walking up from this cfqg to the root.
+ *
+ * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
+ * vfractions on a service tree is approximately 1. The sum may
+ * deviate a bit due to rounding errors and fluctuations caused by
+ * cfqgs entering and leaving the service tree.
+ */
+ unsigned int vfraction;
+
+ /*
+ * There are two weights - (internal) weight is the weight of this
+ * cfqg against the sibling cfqgs. leaf_weight is the wight of
+ * this cfqg against the child cfqgs. For the root cfqg, both
+ * weights are kept in sync for backward compatibility.
+ */
unsigned int weight;
unsigned int new_weight;
unsigned int dev_weight;
+ unsigned int leaf_weight;
+ unsigned int new_leaf_weight;
+ unsigned int dev_leaf_weight;
+
/* number of cfqq currently on this group */
int nr_cfqq;
@@ -248,14 +282,15 @@ struct cfq_group {
struct cfq_rb_root service_trees[2][3];
struct cfq_rb_root service_tree_idle;
- unsigned long saved_workload_slice;
- enum wl_type_t saved_workload;
- enum wl_prio_t saved_serving_prio;
+ unsigned long saved_wl_slice;
+ enum wl_type_t saved_wl_type;
+ enum wl_class_t saved_wl_class;
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
struct cfq_ttime ttime;
- struct cfqg_stats stats;
+ struct cfqg_stats stats; /* stats for this cfqg */
+ struct cfqg_stats dead_stats; /* stats pushed from dead children */
};
struct cfq_io_cq {
@@ -280,8 +315,8 @@ struct cfq_data {
/*
* The priority currently being served
*/
- enum wl_prio_t serving_prio;
- enum wl_type_t serving_type;
+ enum wl_class_t serving_wl_class;
+ enum wl_type_t serving_wl_type;
unsigned long workload_expires;
struct cfq_group *serving_group;
@@ -353,17 +388,17 @@ struct cfq_data {
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
-static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
- enum wl_prio_t prio,
+static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
+ enum wl_class_t class,
enum wl_type_t type)
{
if (!cfqg)
return NULL;
- if (prio == IDLE_WORKLOAD)
+ if (class == IDLE_WORKLOAD)
return &cfqg->service_tree_idle;
- return &cfqg->service_trees[prio][type];
+ return &cfqg->service_trees[class][type];
}
enum cfqq_state_flags {
@@ -502,7 +537,7 @@ static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
{
struct cfqg_stats *stats = &cfqg->stats;
- if (blkg_rwstat_sum(&stats->queued))
+ if (blkg_rwstat_total(&stats->queued))
return;
/*
@@ -546,7 +581,7 @@ static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
struct cfqg_stats *stats = &cfqg->stats;
blkg_stat_add(&stats->avg_queue_size_sum,
- blkg_rwstat_sum(&stats->queued));
+ blkg_rwstat_total(&stats->queued));
blkg_stat_add(&stats->avg_queue_size_samples, 1);
cfqg_stats_update_group_wait_time(stats);
}
@@ -572,6 +607,13 @@ static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
}
+static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
+{
+ struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
+
+ return pblkg ? blkg_to_cfqg(pblkg) : NULL;
+}
+
static inline void cfqg_get(struct cfq_group *cfqg)
{
return blkg_get(cfqg_to_blkg(cfqg));
@@ -586,8 +628,9 @@ static inline void cfqg_put(struct cfq_group *cfqg)
char __pbuf[128]; \
\
blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
- blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
__pbuf, ##args); \
} while (0)
@@ -646,11 +689,9 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
io_start_time - start_time);
}
-static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+/* @stats = 0 */
+static void cfqg_stats_reset(struct cfqg_stats *stats)
{
- struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- struct cfqg_stats *stats = &cfqg->stats;
-
/* queued stats shouldn't be cleared */
blkg_rwstat_reset(&stats->service_bytes);
blkg_rwstat_reset(&stats->serviced);
@@ -669,13 +710,58 @@ static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
#endif
}
+/* @to += @from */
+static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
+{
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
+ blkg_rwstat_merge(&to->serviced, &from->serviced);
+ blkg_rwstat_merge(&to->merged, &from->merged);
+ blkg_rwstat_merge(&to->service_time, &from->service_time);
+ blkg_rwstat_merge(&to->wait_time, &from->wait_time);
+ blkg_stat_merge(&from->time, &from->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
+ blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
+ blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
+ blkg_stat_merge(&to->dequeue, &from->dequeue);
+ blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
+ blkg_stat_merge(&to->idle_time, &from->idle_time);
+ blkg_stat_merge(&to->empty_time, &from->empty_time);
+#endif
+}
+
+/*
+ * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
+ * recursive stats can still account for the amount used by this cfqg after
+ * it's gone.
+ */
+static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
+{
+ struct cfq_group *parent = cfqg_parent(cfqg);
+
+ lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
+
+ if (unlikely(!parent))
+ return;
+
+ cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
+ cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
+ cfqg_stats_reset(&cfqg->stats);
+ cfqg_stats_reset(&cfqg->dead_stats);
+}
+
#else /* CONFIG_CFQ_GROUP_IOSCHED */
+static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
static inline void cfqg_get(struct cfq_group *cfqg) { }
static inline void cfqg_put(struct cfq_group *cfqg) { }
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
+ blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
+ ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
@@ -732,7 +818,7 @@ static inline bool iops_mode(struct cfq_data *cfqd)
return false;
}
-static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
+static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
{
if (cfq_class_idle(cfqq))
return IDLE_WORKLOAD;
@@ -751,23 +837,23 @@ static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
return SYNC_WORKLOAD;
}
-static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
+static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
struct cfq_data *cfqd,
struct cfq_group *cfqg)
{
- if (wl == IDLE_WORKLOAD)
+ if (wl_class == IDLE_WORKLOAD)
return cfqg->service_tree_idle.count;
- return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
- + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
- + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
+ return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
+ cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
+ cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
}
static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
struct cfq_group *cfqg)
{
- return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
- + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
+ return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
}
static void cfq_dispatch_insert(struct request_queue *, struct request *);
@@ -847,13 +933,27 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
}
-static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
+/**
+ * cfqg_scale_charge - scale disk time charge according to cfqg weight
+ * @charge: disk time being charged
+ * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
+ *
+ * Scale @charge according to @vfraction, which is in range (0, 1]. The
+ * scaling is inversely proportional.
+ *
+ * scaled = charge / vfraction
+ *
+ * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
+ */
+static inline u64 cfqg_scale_charge(unsigned long charge,
+ unsigned int vfraction)
{
- u64 d = delta << CFQ_SERVICE_SHIFT;
+ u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
- d = d * CFQ_WEIGHT_DEFAULT;
- do_div(d, cfqg->weight);
- return d;
+ /* charge / vfraction */
+ c <<= CFQ_SERVICE_SHIFT;
+ do_div(c, vfraction);
+ return c;
}
static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
@@ -909,9 +1009,7 @@ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
static inline unsigned
cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
-
- return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
+ return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
}
static inline unsigned
@@ -1178,20 +1276,61 @@ static void
cfq_update_group_weight(struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
+
if (cfqg->new_weight) {
cfqg->weight = cfqg->new_weight;
cfqg->new_weight = 0;
}
+
+ if (cfqg->new_leaf_weight) {
+ cfqg->leaf_weight = cfqg->new_leaf_weight;
+ cfqg->new_leaf_weight = 0;
+ }
}
static void
cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
+ unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
+ struct cfq_group *pos = cfqg;
+ struct cfq_group *parent;
+ bool propagate;
+
+ /* add to the service tree */
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
cfq_update_group_weight(cfqg);
__cfq_group_service_tree_add(st, cfqg);
- st->total_weight += cfqg->weight;
+
+ /*
+ * Activate @cfqg and calculate the portion of vfraction @cfqg is
+ * entitled to. vfraction is calculated by walking the tree
+ * towards the root calculating the fraction it has at each level.
+ * The compounded ratio is how much vfraction @cfqg owns.
+ *
+ * Start with the proportion tasks in this cfqg has against active
+ * children cfqgs - its leaf_weight against children_weight.
+ */
+ propagate = !pos->nr_active++;
+ pos->children_weight += pos->leaf_weight;
+ vfr = vfr * pos->leaf_weight / pos->children_weight;
+
+ /*
+ * Compound ->weight walking up the tree. Both activation and
+ * vfraction calculation are done in the same loop. Propagation
+ * stops once an already activated node is met. vfraction
+ * calculation should always continue to the root.
+ */
+ while ((parent = cfqg_parent(pos))) {
+ if (propagate) {
+ propagate = !parent->nr_active++;
+ parent->children_weight += pos->weight;
+ }
+ vfr = vfr * pos->weight / parent->children_weight;
+ pos = parent;
+ }
+
+ cfqg->vfraction = max_t(unsigned, vfr, 1);
}
static void
@@ -1222,7 +1361,32 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
static void
cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
- st->total_weight -= cfqg->weight;
+ struct cfq_group *pos = cfqg;
+ bool propagate;
+
+ /*
+ * Undo activation from cfq_group_service_tree_add(). Deactivate
+ * @cfqg and propagate deactivation upwards.
+ */
+ propagate = !--pos->nr_active;
+ pos->children_weight -= pos->leaf_weight;
+
+ while (propagate) {
+ struct cfq_group *parent = cfqg_parent(pos);
+
+ /* @pos has 0 nr_active at this point */
+ WARN_ON_ONCE(pos->children_weight);
+ pos->vfraction = 0;
+
+ if (!parent)
+ break;
+
+ propagate = !--parent->nr_active;
+ parent->children_weight -= pos->weight;
+ pos = parent;
+ }
+
+ /* remove from the service tree */
if (!RB_EMPTY_NODE(&cfqg->rb_node))
cfq_rb_erase(&cfqg->rb_node, st);
}
@@ -1241,7 +1405,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
cfq_group_service_tree_del(st, cfqg);
- cfqg->saved_workload_slice = 0;
+ cfqg->saved_wl_slice = 0;
cfqg_stats_update_dequeue(cfqg);
}
@@ -1284,6 +1448,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
unsigned int used_sl, charge, unaccounted_sl = 0;
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- cfqg->service_tree_idle.count;
+ unsigned int vfr;
BUG_ON(nr_sync < 0);
used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
@@ -1293,20 +1458,25 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
charge = cfqq->allocated_slice;
- /* Can't update vdisktime while group is on service tree */
+ /*
+ * Can't update vdisktime while on service tree and cfqg->vfraction
+ * is valid only while on it. Cache vfr, leave the service tree,
+ * update vdisktime and go back on. The re-addition to the tree
+ * will also update the weights as necessary.
+ */
+ vfr = cfqg->vfraction;
cfq_group_service_tree_del(st, cfqg);
- cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
- /* If a new weight was requested, update now, off tree */
+ cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
cfq_group_service_tree_add(st, cfqg);
/* This group is being expired. Save the context */
if (time_after(cfqd->workload_expires, jiffies)) {
- cfqg->saved_workload_slice = cfqd->workload_expires
+ cfqg->saved_wl_slice = cfqd->workload_expires
- jiffies;
- cfqg->saved_workload = cfqd->serving_type;
- cfqg->saved_serving_prio = cfqd->serving_prio;
+ cfqg->saved_wl_type = cfqd->serving_wl_type;
+ cfqg->saved_wl_class = cfqd->serving_wl_class;
} else
- cfqg->saved_workload_slice = 0;
+ cfqg->saved_wl_slice = 0;
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
@@ -1344,6 +1514,52 @@ static void cfq_pd_init(struct blkcg_gq *blkg)
cfq_init_cfqg_base(cfqg);
cfqg->weight = blkg->blkcg->cfq_weight;
+ cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
+}
+
+static void cfq_pd_offline(struct blkcg_gq *blkg)
+{
+ /*
+ * @blkg is going offline and will be ignored by
+ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
+ * that they don't get lost. If IOs complete after this point, the
+ * stats for them will be lost. Oh well...
+ */
+ cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
+}
+
+/* offset delta from cfqg->stats to cfqg->dead_stats */
+static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
+ offsetof(struct cfq_group, stats);
+
+/* to be used by recursive prfill, sums live and dead stats recursively */
+static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
+{
+ u64 sum = 0;
+
+ sum += blkg_stat_recursive_sum(pd, off);
+ sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
+ return sum;
+}
+
+/* to be used by recursive prfill, sums live and dead rwstats recursively */
+static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
+ int off)
+{
+ struct blkg_rwstat a, b;
+
+ a = blkg_rwstat_recursive_sum(pd, off);
+ b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
+ blkg_rwstat_merge(&a, &b);
+ return a;
+}
+
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+ cfqg_stats_reset(&cfqg->stats);
+ cfqg_stats_reset(&cfqg->dead_stats);
}
/*
@@ -1400,6 +1616,26 @@ static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
+static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
+
+ if (!cfqg->dev_leaf_weight)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
+}
+
+static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
+ struct cftype *cft,
+ struct seq_file *sf)
+{
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
+ cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
+ false);
+ return 0;
+}
+
static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *sf)
{
@@ -1407,8 +1643,16 @@ static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
-static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
+static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ seq_printf(sf, "%u\n",
+ cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
+ return 0;
+}
+
+static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf, bool is_leaf_weight)
{
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
struct blkg_conf_ctx ctx;
@@ -1422,8 +1666,13 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
ret = -EINVAL;
cfqg = blkg_to_cfqg(ctx.blkg);
if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
- cfqg->dev_weight = ctx.v;
- cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
+ if (!is_leaf_weight) {
+ cfqg->dev_weight = ctx.v;
+ cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
+ } else {
+ cfqg->dev_leaf_weight = ctx.v;
+ cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
+ }
ret = 0;
}
@@ -1431,29 +1680,63 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
return ret;
}
-static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ return __cfqg_set_weight_device(cgrp, cft, buf, false);
+}
+
+static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ return __cfqg_set_weight_device(cgrp, cft, buf, true);
+}
+
+static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
+ bool is_leaf_weight)
{
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
struct blkcg_gq *blkg;
- struct hlist_node *n;
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
return -EINVAL;
spin_lock_irq(&blkcg->lock);
- blkcg->cfq_weight = (unsigned int)val;
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ if (!is_leaf_weight)
+ blkcg->cfq_weight = val;
+ else
+ blkcg->cfq_leaf_weight = val;
+
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- if (cfqg && !cfqg->dev_weight)
- cfqg->new_weight = blkcg->cfq_weight;
+ if (!cfqg)
+ continue;
+
+ if (!is_leaf_weight) {
+ if (!cfqg->dev_weight)
+ cfqg->new_weight = blkcg->cfq_weight;
+ } else {
+ if (!cfqg->dev_leaf_weight)
+ cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
+ }
}
spin_unlock_irq(&blkcg->lock);
return 0;
}
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+ return __cfq_set_weight(cgrp, cft, val, false);
+}
+
+static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+ return __cfq_set_weight(cgrp, cft, val, true);
+}
+
static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *sf)
{
@@ -1474,6 +1757,42 @@ static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
+static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
+
+ return __blkg_prfill_u64(sf, pd, sum);
+}
+
+static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
+
+ return __blkg_prfill_rwstat(sf, pd, &sum);
+}
+
+static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
+ &blkcg_policy_cfq, cft->private, false);
+ return 0;
+}
+
+static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
+ &blkcg_policy_cfq, cft->private, true);
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_BLK_CGROUP
static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
@@ -1503,17 +1822,49 @@ static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
#endif /* CONFIG_DEBUG_BLK_CGROUP */
static struct cftype cfq_blkcg_files[] = {
+ /* on root, weight is mapped to leaf_weight */
+ {
+ .name = "weight_device",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .read_seq_string = cfqg_print_leaf_weight_device,
+ .write_string = cfqg_set_leaf_weight_device,
+ .max_write_len = 256,
+ },
+ {
+ .name = "weight",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .read_seq_string = cfq_print_leaf_weight,
+ .write_u64 = cfq_set_leaf_weight,
+ },
+
+ /* no such mapping necessary for !roots */
{
.name = "weight_device",
+ .flags = CFTYPE_NOT_ON_ROOT,
.read_seq_string = cfqg_print_weight_device,
.write_string = cfqg_set_weight_device,
.max_write_len = 256,
},
{
.name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT,
.read_seq_string = cfq_print_weight,
.write_u64 = cfq_set_weight,
},
+
+ {
+ .name = "leaf_weight_device",
+ .read_seq_string = cfqg_print_leaf_weight_device,
+ .write_string = cfqg_set_leaf_weight_device,
+ .max_write_len = 256,
+ },
+ {
+ .name = "leaf_weight",
+ .read_seq_string = cfq_print_leaf_weight,
+ .write_u64 = cfq_set_leaf_weight,
+ },
+
+ /* statistics, covers only the tasks in the cfqg */
{
.name = "time",
.private = offsetof(struct cfq_group, stats.time),
@@ -1554,6 +1905,48 @@ static struct cftype cfq_blkcg_files[] = {
.private = offsetof(struct cfq_group, stats.queued),
.read_seq_string = cfqg_print_rwstat,
},
+
+ /* the same statictics which cover the cfqg and its descendants */
+ {
+ .name = "time_recursive",
+ .private = offsetof(struct cfq_group, stats.time),
+ .read_seq_string = cfqg_print_stat_recursive,
+ },
+ {
+ .name = "sectors_recursive",
+ .private = offsetof(struct cfq_group, stats.sectors),
+ .read_seq_string = cfqg_print_stat_recursive,
+ },
+ {
+ .name = "io_service_bytes_recursive",
+ .private = offsetof(struct cfq_group, stats.service_bytes),
+ .read_seq_string = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_serviced_recursive",
+ .private = offsetof(struct cfq_group, stats.serviced),
+ .read_seq_string = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_service_time_recursive",
+ .private = offsetof(struct cfq_group, stats.service_time),
+ .read_seq_string = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_wait_time_recursive",
+ .private = offsetof(struct cfq_group, stats.wait_time),
+ .read_seq_string = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_merged_recursive",
+ .private = offsetof(struct cfq_group, stats.merged),
+ .read_seq_string = cfqg_print_rwstat_recursive,
+ },
+ {
+ .name = "io_queued_recursive",
+ .private = offsetof(struct cfq_group, stats.queued),
+ .read_seq_string = cfqg_print_rwstat_recursive,
+ },
#ifdef CONFIG_DEBUG_BLK_CGROUP
{
.name = "avg_queue_size",
@@ -1612,15 +2005,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
unsigned long rb_key;
- struct cfq_rb_root *service_tree;
+ struct cfq_rb_root *st;
int left;
int new_cfqq = 1;
- service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
- cfqq_type(cfqq));
+ st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY;
- parent = rb_last(&service_tree->rb);
+ parent = rb_last(&st->rb);
if (parent && parent != &cfqq->rb_node) {
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
rb_key += __cfqq->rb_key;
@@ -1638,7 +2030,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->slice_resid = 0;
} else {
rb_key = -HZ;
- __cfqq = cfq_rb_first(service_tree);
+ __cfqq = cfq_rb_first(st);
rb_key += __cfqq ? __cfqq->rb_key : jiffies;
}
@@ -1647,8 +2039,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/*
* same position, nothing more to do
*/
- if (rb_key == cfqq->rb_key &&
- cfqq->service_tree == service_tree)
+ if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
return;
cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
@@ -1657,11 +2048,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
left = 1;
parent = NULL;
- cfqq->service_tree = service_tree;
- p = &service_tree->rb.rb_node;
+ cfqq->service_tree = st;
+ p = &st->rb.rb_node;
while (*p) {
- struct rb_node **n;
-
parent = *p;
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
@@ -1669,22 +2058,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* sort by key, that represents service time.
*/
if (time_before(rb_key, __cfqq->rb_key))
- n = &(*p)->rb_left;
+ p = &parent->rb_left;
else {
- n = &(*p)->rb_right;
+ p = &parent->rb_right;
left = 0;
}
-
- p = n;
}
if (left)
- service_tree->left = &cfqq->rb_node;
+ st->left = &cfqq->rb_node;
cfqq->rb_key = rb_key;
rb_link_node(&cfqq->rb_node, parent, p);
- rb_insert_color(&cfqq->rb_node, &service_tree->rb);
- service_tree->count++;
+ rb_insert_color(&cfqq->rb_node, &st->rb);
+ st->count++;
if (add_front || !new_cfqq)
return;
cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
@@ -2030,8 +2417,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
- cfqd->serving_prio, cfqd->serving_type);
+ cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
+ cfqd->serving_wl_class, cfqd->serving_wl_type);
cfqg_stats_update_avg_queue_size(cfqq->cfqg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
@@ -2117,19 +2504,18 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
*/
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{
- struct cfq_rb_root *service_tree =
- service_tree_for(cfqd->serving_group, cfqd->serving_prio,
- cfqd->serving_type);
+ struct cfq_rb_root *st = st_for(cfqd->serving_group,
+ cfqd->serving_wl_class, cfqd->serving_wl_type);
if (!cfqd->rq_queued)
return NULL;
/* There is nothing to dispatch */
- if (!service_tree)
+ if (!st)
return NULL;
- if (RB_EMPTY_ROOT(&service_tree->rb))
+ if (RB_EMPTY_ROOT(&st->rb))
return NULL;
- return cfq_rb_first(service_tree);
+ return cfq_rb_first(st);
}
static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
@@ -2285,17 +2671,17 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- enum wl_prio_t prio = cfqq_prio(cfqq);
- struct cfq_rb_root *service_tree = cfqq->service_tree;
+ enum wl_class_t wl_class = cfqq_class(cfqq);
+ struct cfq_rb_root *st = cfqq->service_tree;
- BUG_ON(!service_tree);
- BUG_ON(!service_tree->count);
+ BUG_ON(!st);
+ BUG_ON(!st->count);
if (!cfqd->cfq_slice_idle)
return false;
/* We never do for idle class queues. */
- if (prio == IDLE_WORKLOAD)
+ if (wl_class == IDLE_WORKLOAD)
return false;
/* We do for queues that were marked with idle window flag. */
@@ -2307,11 +2693,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* Otherwise, we do only if they are the last ones
* in their service tree.
*/
- if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
- !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
+ if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
+ !cfq_io_thinktime_big(cfqd, &st->ttime, false))
return true;
- cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
- service_tree->count);
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
return false;
}
@@ -2494,8 +2879,8 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
}
}
-static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
- struct cfq_group *cfqg, enum wl_prio_t prio)
+static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
+ struct cfq_group *cfqg, enum wl_class_t wl_class)
{
struct cfq_queue *queue;
int i;
@@ -2505,7 +2890,7 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
for (i = 0; i <= SYNC_WORKLOAD; ++i) {
/* select the one with lowest rb_key */
- queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
+ queue = cfq_rb_first(st_for(cfqg, wl_class, i));
if (queue &&
(!key_valid || time_before(queue->rb_key, lowest_key))) {
lowest_key = queue->rb_key;
@@ -2517,26 +2902,27 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
return cur_best;
}
-static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static void
+choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
unsigned slice;
unsigned count;
struct cfq_rb_root *st;
unsigned group_slice;
- enum wl_prio_t original_prio = cfqd->serving_prio;
+ enum wl_class_t original_class = cfqd->serving_wl_class;
/* Choose next priority. RT > BE > IDLE */
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
- cfqd->serving_prio = RT_WORKLOAD;
+ cfqd->serving_wl_class = RT_WORKLOAD;
else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
- cfqd->serving_prio = BE_WORKLOAD;
+ cfqd->serving_wl_class = BE_WORKLOAD;
else {
- cfqd->serving_prio = IDLE_WORKLOAD;
+ cfqd->serving_wl_class = IDLE_WORKLOAD;
cfqd->workload_expires = jiffies + 1;
return;
}
- if (original_prio != cfqd->serving_prio)
+ if (original_class != cfqd->serving_wl_class)
goto new_workload;
/*
@@ -2544,7 +2930,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
* (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
* expiration time
*/
- st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+ st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
count = st->count;
/*
@@ -2555,9 +2941,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
new_workload:
/* otherwise select new workload type */
- cfqd->serving_type =
- cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
- st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+ cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
+ cfqd->serving_wl_class);
+ st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
count = st->count;
/*
@@ -2568,10 +2954,11 @@ new_workload:
group_slice = cfq_group_slice(cfqd, cfqg);
slice = group_slice * count /
- max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
- cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
+ max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
+ cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
+ cfqg));
- if (cfqd->serving_type == ASYNC_WORKLOAD) {
+ if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
unsigned int tmp;
/*
@@ -2617,14 +3004,14 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
cfqd->serving_group = cfqg;
/* Restore the workload type data */
- if (cfqg->saved_workload_slice) {
- cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
- cfqd->serving_type = cfqg->saved_workload;
- cfqd->serving_prio = cfqg->saved_serving_prio;
+ if (cfqg->saved_wl_slice) {
+ cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
+ cfqd->serving_wl_type = cfqg->saved_wl_type;
+ cfqd->serving_wl_class = cfqg->saved_wl_class;
} else
cfqd->workload_expires = jiffies - 1;
- choose_service_tree(cfqd, cfqg);
+ choose_wl_class_and_type(cfqd, cfqg);
}
/*
@@ -3206,6 +3593,8 @@ retry:
spin_lock_irq(cfqd->queue->queue_lock);
if (new_cfqq)
goto retry;
+ else
+ return &cfqd->oom_cfqq;
} else {
cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
@@ -3403,7 +3792,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
return true;
/* Allow preemption only if we are idling on sync-noidle tree */
- if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
+ if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
new_cfqq->service_tree->count == 2 &&
RB_EMPTY_ROOT(&cfqq->sort_list))
@@ -3455,7 +3844,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* doesn't happen
*/
if (old_type != cfqq_type(cfqq))
- cfqq->cfqg->saved_workload_slice = 0;
+ cfqq->cfqg->saved_wl_slice = 0;
/*
* Put the new queue at the front of the of the current list,
@@ -3637,16 +4026,17 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
if (sync) {
- struct cfq_rb_root *service_tree;
+ struct cfq_rb_root *st;
RQ_CIC(rq)->ttime.last_end_request = now;
if (cfq_cfqq_on_rr(cfqq))
- service_tree = cfqq->service_tree;
+ st = cfqq->service_tree;
else
- service_tree = service_tree_for(cfqq->cfqg,
- cfqq_prio(cfqq), cfqq_type(cfqq));
- service_tree->ttime.last_end_request = now;
+ st = st_for(cfqq->cfqg, cfqq_class(cfqq),
+ cfqq_type(cfqq));
+
+ st->ttime.last_end_request = now;
if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
cfqd->last_delayed_sync = now;
}
@@ -3993,6 +4383,7 @@ static int cfq_init_queue(struct request_queue *q)
cfq_init_cfqg_base(cfqd->root_group);
#endif
cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+ cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
/*
* Not strictly needed (since RB_ROOT just clears the node and we
@@ -4177,6 +4568,7 @@ static struct blkcg_policy blkcg_policy_cfq = {
.cftypes = cfq_blkcg_files,
.pd_init_fn = cfq_pd_init,
+ .pd_offline_fn = cfq_pd_offline,
.pd_reset_stats_fn = cfq_pd_reset_stats,
};
#endif
diff --git a/block/elevator.c b/block/elevator.c
index 603b2c178740..a0ffdd943c98 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -46,11 +46,6 @@ static LIST_HEAD(elv_list);
/*
* Merge hash stuff.
*/
-static const int elv_hash_shift = 6;
-#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
-#define ELV_HASH_FN(sec) \
- (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
-#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
/*
@@ -158,7 +153,6 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q,
struct elevator_type *e)
{
struct elevator_queue *eq;
- int i;
eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
if (unlikely(!eq))
@@ -167,14 +161,7 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q,
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
-
- eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
- GFP_KERNEL, q->node);
- if (!eq->hash)
- goto err;
-
- for (i = 0; i < ELV_HASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&eq->hash[i]);
+ hash_init(eq->hash);
return eq;
err:
@@ -189,7 +176,6 @@ static void elevator_release(struct kobject *kobj)
e = container_of(kobj, struct elevator_queue, kobj);
elevator_put(e->type);
- kfree(e->hash);
kfree(e);
}
@@ -261,7 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
- hlist_del_init(&rq->hash);
+ hash_del(&rq->hash);
}
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -275,7 +261,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
struct elevator_queue *e = q->elevator;
BUG_ON(ELV_ON_HASH(rq));
- hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
+ hash_add(e->hash, &rq->hash, rq_hash_key(rq));
}
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
@@ -287,11 +273,10 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
struct elevator_queue *e = q->elevator;
- struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
- struct hlist_node *entry, *next;
+ struct hlist_node *next;
struct request *rq;
- hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
+ hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
BUG_ON(!ELV_ON_HASH(rq));
if (unlikely(!rq_mergeable(rq))) {
diff --git a/block/genhd.c b/block/genhd.c
index 5f73c2435fde..3c001fba80c7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -26,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
struct kobject *block_depr;
/* for extended dynamic devt allocation, currently only one major is used */
-#define MAX_EXT_DEVT (1 << MINORBITS)
+#define NR_EXT_DEVT (1 << MINORBITS)
/* For extended devt allocation. ext_devt_mutex prevents look up
* results from going away underneath its user.
@@ -411,7 +411,7 @@ static int blk_mangle_minor(int minor)
int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
{
struct gendisk *disk = part_to_disk(part);
- int idx, rc;
+ int idx;
/* in consecutive minor range? */
if (part->partno < disk->minors) {
@@ -420,19 +420,11 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
}
/* allocate ext devt */
- do {
- if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
- return -ENOMEM;
- rc = idr_get_new(&ext_devt_idr, part, &idx);
- } while (rc == -EAGAIN);
-
- if (rc)
- return rc;
-
- if (idx > MAX_EXT_DEVT) {
- idr_remove(&ext_devt_idr, idx);
- return -EBUSY;
- }
+ mutex_lock(&ext_devt_mutex);
+ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
+ mutex_unlock(&ext_devt_mutex);
+ if (idx < 0)
+ return idx == -ENOSPC ? -EBUSY : idx;
*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
return 0;
@@ -655,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_exit(&piter);
invalidate_partition(disk, 0);
- blk_free_devt(disk_to_dev(disk)->devt);
set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP;
@@ -674,6 +665,7 @@ void del_gendisk(struct gendisk *disk)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
+ blk_free_devt(disk_to_dev(disk)->devt);
}
EXPORT_SYMBOL(del_gendisk);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index f1d14519cc04..789cdea05893 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
if (!part)
return;
- blk_free_devt(part_devt(part));
rcu_assign_pointer(ptbl->part[partno], NULL);
rcu_assign_pointer(ptbl->last_lookup, NULL);
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
+ blk_free_devt(part_devt(part));
hd_struct_put(part);
}
@@ -418,7 +418,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
int p, highest, res;
rescan:
if (state && !IS_ERR(state)) {
- kfree(state);
+ free_partitions(state);
state = NULL;
}
@@ -525,7 +525,7 @@ rescan:
md_autodetect_dev(part_to_dev(part)->devt);
#endif
}
- kfree(state);
+ free_partitions(state);
return 0;
}
diff --git a/block/partitions/check.c b/block/partitions/check.c
index bc908672c976..19ba207ea7d1 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -14,6 +14,7 @@
*/
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/ctype.h>
#include <linux/genhd.h>
@@ -106,18 +107,45 @@ static int (*check_part[])(struct parsed_partitions *) = {
NULL
};
+static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
+{
+ struct parsed_partitions *state;
+ int nr;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ nr = disk_max_parts(hd);
+ state->parts = vzalloc(nr * sizeof(state->parts[0]));
+ if (!state->parts) {
+ kfree(state);
+ return NULL;
+ }
+
+ state->limit = nr;
+
+ return state;
+}
+
+void free_partitions(struct parsed_partitions *state)
+{
+ vfree(state->parts);
+ kfree(state);
+}
+
struct parsed_partitions *
check_partition(struct gendisk *hd, struct block_device *bdev)
{
struct parsed_partitions *state;
int i, res, err;
- state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+ state = allocate_partitions(hd);
if (!state)
return NULL;
state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
if (!state->pp_buf) {
- kfree(state);
+ free_partitions(state);
return NULL;
}
state->pp_buf[0] = '\0';
@@ -128,10 +156,9 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
if (isdigit(state->name[strlen(state->name)-1]))
sprintf(state->name, "p");
- state->limit = disk_max_parts(hd);
i = res = err = 0;
while (!res && check_part[i]) {
- memset(&state->parts, 0, sizeof(state->parts));
+ memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
res = check_part[i++](state);
if (res < 0) {
/* We have hit an I/O error which we don't report now.
@@ -161,6 +188,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
printk(KERN_INFO "%s", state->pp_buf);
free_page((unsigned long)state->pp_buf);
- kfree(state);
+ free_partitions(state);
return ERR_PTR(res);
}
diff --git a/block/partitions/check.h b/block/partitions/check.h
index 52b100311ec3..eade17ea910b 100644
--- a/block/partitions/check.h
+++ b/block/partitions/check.h
@@ -15,13 +15,15 @@ struct parsed_partitions {
int flags;
bool has_info;
struct partition_meta_info info;
- } parts[DISK_MAX_PARTS];
+ } *parts;
int next;
int limit;
bool access_beyond_eod;
char *pp_buf;
};
+void free_partitions(struct parsed_partitions *state);
+
struct parsed_partitions *
check_partition(struct gendisk *, struct block_device *);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index b62fb88b8711..ff5804e2f1d2 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -310,15 +310,23 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
goto fail;
}
- /* Check the GUID Partition Table header size */
+ /* Check the GUID Partition Table header size is too big */
if (le32_to_cpu((*gpt)->header_size) >
bdev_logical_block_size(state->bdev)) {
- pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+ pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
le32_to_cpu((*gpt)->header_size),
bdev_logical_block_size(state->bdev));
goto fail;
}
+ /* Check the GUID Partition Table header size is too small */
+ if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) {
+ pr_debug("GUID Partition Table Header size is too small: %u < %zu\n",
+ le32_to_cpu((*gpt)->header_size),
+ sizeof(gpt_header));
+ goto fail;
+ }
+
/* Check the GUID Partition Table CRC */
origcrc = le32_to_cpu((*gpt)->header_crc32);
(*gpt)->header_crc32 = 0;
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index 11f688bd76c5..76d8ba6379a9 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -63,6 +63,10 @@ int mac_partition(struct parsed_partitions *state)
put_dev_sector(sect);
return 0;
}
+
+ if (blocks_in_map >= state->limit)
+ blocks_in_map = state->limit - 1;
+
strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
for (slot = 1; slot <= blocks_in_map; ++slot) {
int pos = slot * secsize;
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 8752a5d26565..7681cd295ab8 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -455,14 +455,19 @@ int msdos_partition(struct parsed_partitions *state)
data = read_part_sector(state, 0, &sect);
if (!data)
return -1;
- if (!msdos_magic_present(data + 510)) {
+
+ /*
+ * Note order! (some AIX disks, e.g. unbootable kind,
+ * have no MSDOS 55aa)
+ */
+ if (aix_magic_present(state, data)) {
put_dev_sector(sect);
+ strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
}
- if (aix_magic_present(state, data)) {
+ if (!msdos_magic_present(data + 510)) {
put_dev_sector(sect);
- strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 0880a14834aa..05c0ce52f96d 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -353,6 +353,27 @@ config CRYPTO_CRC32C_SPARC64
CRC32c CRC algorithm implemented using sparc64 crypto instructions,
when available.
+config CRYPTO_CRC32
+ tristate "CRC32 CRC algorithm"
+ select CRYPTO_HASH
+ select CRC32
+ help
+ CRC-32-IEEE 802.3 cyclic redundancy-check algorithm.
+ Shash crypto api wrappers to crc32_le function.
+
+config CRYPTO_CRC32_PCLMUL
+ tristate "CRC32 PCLMULQDQ hardware acceleration"
+ depends on X86
+ select CRYPTO_HASH
+ select CRC32
+ help
+ From Intel Westmere and AMD Bulldozer processor with SSE4.2
+ and PCLMULQDQ supported, the processor will support
+ CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ
+ instruction. This option will create 'crc32-plcmul' module,
+ which will enable any routine to use the CRC-32-IEEE 802.3 checksum
+ and gain better performance as compared with the table implementation.
+
config CRYPTO_GHASH
tristate "GHASH digest algorithm"
select CRYPTO_GF128MUL
diff --git a/crypto/Makefile b/crypto/Makefile
index d59dec749804..be1a1bebbb86 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
+obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_842) += 842.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 533de9550a82..7d4a8d28277e 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
- alg->cra_ablkcipher.geniv ?: "<default>");
+ strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
+ sizeof(rblkcipher.geniv));
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
- alg->cra_ablkcipher.geniv ?: "<built-in>");
+ strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
+ sizeof(rblkcipher.geniv));
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/aead.c b/crypto/aead.c
index 0b8121ebec07..547491e35c63 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
- aead->geniv ?: "<built-in>");
+ strncpy(raead.type, "aead", sizeof(raead.type));
+ strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
@@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
+ strncpy(raead.type, "nivaead", sizeof(raead.type));
+ strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
@@ -282,18 +281,16 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
algt->mask)
return ERR_PTR(-EINVAL);
name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(name);
if (IS_ERR(name))
- return ERR_PTR(err);
+ return ERR_CAST(name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3887856c2dd6..793a27f2493e 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
+ strncpy(rhash.type, "ahash", sizeof(rhash.type));
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index c3b9bfeeb7ff..6149a6e09643 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(crypto_register_template);
void crypto_unregister_template(struct crypto_template *tmpl)
{
struct crypto_instance *inst;
- struct hlist_node *p, *n;
+ struct hlist_node *n;
struct hlist_head *list;
LIST_HEAD(users);
@@ -457,7 +457,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
list_del_init(&tmpl->list);
list = &tmpl->instances;
- hlist_for_each_entry(inst, p, list, list) {
+ hlist_for_each_entry(inst, list, list) {
int err = crypto_remove_alg(&inst->alg, &users);
BUG_ON(err);
}
@@ -466,7 +466,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
up_write(&crypto_alg_sem);
- hlist_for_each_entry_safe(inst, p, n, list, list) {
+ hlist_for_each_entry_safe(inst, n, list, list) {
BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
tmpl->free(inst);
}
@@ -749,12 +749,10 @@ struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
u32 type, u32 mask)
{
const char *name;
- int err;
name = crypto_attr_alg_name(rta);
- err = PTR_ERR(name);
if (IS_ERR(name))
- return ERR_PTR(err);
+ return ERR_CAST(name);
return crypto_find_alg(name, frontend, type, mask);
}
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 361b5e8239bc..9e62feffb374 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -67,6 +67,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
len, dma_prep_flags);
+ if (!tx) {
+ dma_unmap_page(device->dev, dma_dest, len,
+ DMA_FROM_DEVICE);
+ dma_unmap_page(device->dev, dma_src, len,
+ DMA_TO_DEVICE);
+ }
}
if (tx) {
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 58e4a8752aee..05a4d1e00148 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -25,6 +25,7 @@
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/async_tx.h>
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 842120979374..7be34248b450 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,8 +128,8 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
device->device_issue_pending(chan);
} else {
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
+ if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
+ panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
}
@@ -280,8 +280,9 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
- if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
- panic("DMA_ERROR waiting for transaction\n");
+ if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
+ panic("%s: DMA error waiting for transaction\n",
+ __func__);
async_tx_ack(*tx);
*tx = NULL;
}
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 154cc84381c2..8ade0a0481c6 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -230,9 +230,7 @@ EXPORT_SYMBOL_GPL(async_xor);
static int page_is_zero(struct page *p, unsigned int offset, size_t len)
{
- char *a = page_address(p) + offset;
- return ((*(u32 *) a) == 0 &&
- memcmp(a, a + 4, len - 4) == 0);
+ return !memchr_inv(page_address(p) + offset, 0, len);
}
static inline struct dma_chan *
diff --git a/crypto/authenc.c b/crypto/authenc.c
index d0583a4489e6..ffce19de05cf 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -592,9 +592,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 136b68b9d8d4..ab53762fc309 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -715,9 +715,8 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index a8d85a1d670e..a79e7e9ab86e 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
- alg->cra_blkcipher.geniv ?: "<default>");
+ strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
+ strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
+ sizeof(rblkcipher.geniv));
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
@@ -588,18 +588,16 @@ struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
algt->mask)
return ERR_PTR(-EINVAL);
name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(name);
if (IS_ERR(name))
- return ERR_PTR(err);
+ return ERR_CAST(name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 32fe1bb5decb..499c91717d93 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -484,18 +484,16 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
- err = PTR_ERR(cipher);
if (IS_ERR(cipher))
- return ERR_PTR(err);
+ return ERR_CAST(cipher);
err = -EINVAL;
if (cipher->cra_blocksize != 16)
@@ -573,15 +571,13 @@ out_put_cipher:
static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
{
- int err;
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(cipher_name);
if (IS_ERR(cipher_name))
- return ERR_PTR(err);
+ return ERR_CAST(cipher_name);
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -612,20 +608,17 @@ static struct crypto_template crypto_ccm_tmpl = {
static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
{
- int err;
const char *ctr_name;
const char *cipher_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(ctr_name);
if (IS_ERR(ctr_name))
- return ERR_PTR(err);
+ return ERR_CAST(ctr_name);
cipher_name = crypto_attr_alg_name(tb[2]);
- err = PTR_ERR(cipher_name);
if (IS_ERR(cipher_name))
- return ERR_PTR(err);
+ return ERR_CAST(cipher_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -760,17 +753,15 @@ static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
ccm_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(ccm_name);
if (IS_ERR(ccm_name))
- return ERR_PTR(err);
+ return ERR_CAST(ccm_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index ba200b07449d..834d8dd3d4fc 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -291,9 +291,8 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
err = crypto_get_default_rng();
if (err)
diff --git a/crypto/crc32.c b/crypto/crc32.c
new file mode 100644
index 000000000000..9d1c41569898
--- /dev/null
+++ b/crypto/crc32.c
@@ -0,0 +1,158 @@
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ */
+
+/*
+ * This is crypto api shash wrappers to crc32_le.
+ */
+
+#include <linux/crc32.h>
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+static u32 __crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+ return crc32_le(crc, p, len);
+}
+
+/** No default init with ~0 */
+static int crc32_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = 0;
+
+ return 0;
+}
+
+
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *mctx = crypto_shash_ctx(hash);
+
+ if (keylen != sizeof(u32)) {
+ crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ *mctx = le32_to_cpup((__le32 *)key);
+ return 0;
+}
+
+static int crc32_init(struct shash_desc *desc)
+{
+ u32 *mctx = crypto_shash_ctx(desc->tfm);
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *crcp = *mctx;
+
+ return 0;
+}
+
+static int crc32_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *crcp = __crc32_le(*crcp, data, len);
+ return 0;
+}
+
+/* No final XOR 0xFFFFFFFF, like crc32_le */
+static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ *(__le32 *)out = cpu_to_le32(__crc32_le(*crcp, data, len));
+ return 0;
+}
+
+static int crc32_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32_finup(shash_desc_ctx(desc), data, len, out);
+}
+
+static int crc32_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *crcp = shash_desc_ctx(desc);
+
+ *(__le32 *)out = cpu_to_le32p(crcp);
+ return 0;
+}
+
+static int crc32_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len,
+ out);
+}
+static struct shash_alg alg = {
+ .setkey = crc32_setkey,
+ .init = crc32_init,
+ .update = crc32_update,
+ .final = crc32_final,
+ .finup = crc32_finup,
+ .digest = crc32_digest,
+ .descsize = sizeof(u32),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "crc32-table",
+ .cra_priority = 100,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(u32),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_cra_init,
+ }
+};
+
+static int __init crc32_mod_init(void)
+{
+ return crypto_register_shash(&alg);
+}
+
+static void __exit crc32_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(crc32_mod_init);
+module_exit(crc32_mod_fini);
+
+MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
+MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
+MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 35d700a97d79..dfd511fb39ee 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -30,6 +30,8 @@
#include "internal.h"
+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
+
static DEFINE_MUTEX(crypto_cfg_mutex);
/* The crypto netlink socket */
@@ -75,7 +77,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_cipher rcipher;
- snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
+ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@@ -94,8 +96,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rcomp;
- snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
-
+ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rcomp))
goto nla_put_failure;
@@ -108,12 +109,14 @@ nla_put_failure:
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
- memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
- memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
- sizeof(ualg->cru_driver_name));
- memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
- CRYPTO_MAX_ALG_NAME);
-
+ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
+ sizeof(ualg->cru_driver_name));
+ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
+ sizeof(ualg->cru_module_name));
+
+ ualg->cru_type = 0;
+ ualg->cru_mask = 0;
ualg->cru_flags = alg->cra_flags;
ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
@@ -122,8 +125,7 @@ static int crypto_report_one(struct crypto_alg *alg,
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
- snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
-
+ strncpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
sizeof(struct crypto_report_larval), &rl))
goto nla_put_failure;
@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct crypto_dump_info info;
int err;
- if (!p->cru_driver_name)
+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+ return -EINVAL;
+
+ if (!p->cru_driver_name[0])
return -EINVAL;
alg = crypto_alg_match(p, 1);
@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
LIST_HEAD(list);
+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+ return -EINVAL;
+
if (priority && !strlen(p->cru_driver_name))
return -EINVAL;
@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+ return -EINVAL;
+
alg = crypto_alg_match(p, 1);
if (!alg)
return -ENOENT;
@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct crypto_user_alg *p = nlmsg_data(nlh);
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+ return -EINVAL;
+
if (strlen(p->cru_driver_name))
exact = 1;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 1f2997cbfdd4..f2b94f27bb2c 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -343,17 +343,15 @@ static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask)
return ERR_PTR(-EINVAL);
cipher_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(cipher_name);
if (IS_ERR(cipher_name))
- return ERR_PTR(err);
+ return ERR_CAST(cipher_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
diff --git a/crypto/cts.c b/crypto/cts.c
index ccf9c5de3958..042223f8e733 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -282,9 +282,8 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
- err = PTR_ERR(alg);
if (IS_ERR(alg))
- return ERR_PTR(err);
+ return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
if (!is_power_of_2(alg->cra_blocksize))
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 1a252639ef91..137ad1ec5438 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -701,9 +701,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
@@ -711,9 +710,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
- err = PTR_ERR(ghash_alg);
if (IS_ERR(ghash_alg))
- return ERR_PTR(err);
+ return ERR_CAST(ghash_alg);
err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -787,15 +785,13 @@ out_put_ghash:
static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
{
- int err;
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(cipher_name);
if (IS_ERR(cipher_name))
- return ERR_PTR(err);
+ return ERR_CAST(cipher_name);
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >=
CRYPTO_MAX_ALG_NAME)
@@ -826,20 +822,17 @@ static struct crypto_template crypto_gcm_tmpl = {
static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
{
- int err;
const char *ctr_name;
const char *ghash_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(ctr_name);
if (IS_ERR(ctr_name))
- return ERR_PTR(err);
+ return ERR_CAST(ctr_name);
ghash_name = crypto_attr_alg_name(tb[2]);
- err = PTR_ERR(ghash_name);
if (IS_ERR(ghash_name))
- return ERR_PTR(err);
+ return ERR_CAST(ghash_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
@@ -971,17 +964,15 @@ static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
ccm_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(ccm_name);
if (IS_ERR(ccm_name))
- return ERR_PTR(err);
+ return ERR_CAST(ccm_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -1222,17 +1213,15 @@ static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
ccm_name = crypto_attr_alg_name(tb[1]);
- err = PTR_ERR(ccm_name);
if (IS_ERR(ccm_name))
- return ERR_PTR(err);
+ return ERR_CAST(ccm_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index 04e083ff5373..7140fe70c7af 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rpcomp;
- snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
-
+ strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rpcomp))
goto nla_put_failure;
diff --git a/crypto/rng.c b/crypto/rng.c
index f3b7894dec00..e0a25c2456de 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
- snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
+ strncpy(rrng.type, "rng", sizeof(rrng.type));
rrng.seedsize = alg->cra_rng.seedsize;
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 4c4491229417..f2cba4ed6f25 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -305,9 +305,8 @@ static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
int err;
algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
if (IS_ERR(algt))
- return ERR_PTR(err);
+ return ERR_CAST(algt);
err = crypto_get_default_rng();
if (err)
diff --git a/crypto/shash.c b/crypto/shash.c
index f426330f1017..929058a68561 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
+ strncpy(rhash.type, "shash", sizeof(rhash.type));
+
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index edf4a0818773..efd8b20e13dc 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2269,6 +2269,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ecb(fcrypt)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = fcrypt_pcbc_enc_tv_template,
+ .count = 1
+ },
+ .dec = {
+ .vecs = fcrypt_pcbc_dec_tv_template,
+ .count = 1
+ }
+ }
+ }
+ }, {
.alg = "ecb(khazad)",
.test = alg_test_skcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index b5721e0b979c..3db1b7591559 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25084,38 +25084,40 @@ static struct pcomp_testvec zlib_decomp_tv_template[] = {
static struct comp_testvec lzo_comp_tv_template[] = {
{
.inlen = 70,
- .outlen = 46,
+ .outlen = 57,
.input = "Join us now and share the software "
"Join us now and share the software ",
.output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
- "\x3d\x88\x00\x11\x00\x00",
+ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
+ "\x64\x20\x73\x68\x61\x72\x65\x20"
+ "\x74\x68\x65\x20\x73\x6f\x66\x74"
+ "\x77\x70\x01\x32\x88\x00\x0c\x65"
+ "\x20\x74\x68\x65\x20\x73\x6f\x66"
+ "\x74\x77\x61\x72\x65\x20\x11\x00"
+ "\x00",
}, {
.inlen = 159,
- .outlen = 133,
+ .outlen = 131,
.input = "This document describes a compression method based on the LZO "
"compression algorithm. This document defines the application of "
"the LZO algorithm used in UBIFS.",
- .output = "\x00\x2b\x54\x68\x69\x73\x20\x64"
+ .output = "\x00\x2c\x54\x68\x69\x73\x20\x64"
"\x6f\x63\x75\x6d\x65\x6e\x74\x20"
"\x64\x65\x73\x63\x72\x69\x62\x65"
"\x73\x20\x61\x20\x63\x6f\x6d\x70"
"\x72\x65\x73\x73\x69\x6f\x6e\x20"
"\x6d\x65\x74\x68\x6f\x64\x20\x62"
"\x61\x73\x65\x64\x20\x6f\x6e\x20"
- "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
- "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
- "\x69\x74\x68\x6d\x2e\x20\x20\x54"
- "\x68\x69\x73\x2a\x54\x01\x02\x66"
- "\x69\x6e\x65\x73\x94\x06\x05\x61"
- "\x70\x70\x6c\x69\x63\x61\x74\x76"
- "\x0a\x6f\x66\x88\x02\x60\x09\x27"
- "\xf0\x00\x0c\x20\x75\x73\x65\x64"
- "\x20\x69\x6e\x20\x55\x42\x49\x46"
- "\x53\x2e\x11\x00\x00",
+ "\x74\x68\x65\x20\x4c\x5a\x4f\x20"
+ "\x2a\x8c\x00\x09\x61\x6c\x67\x6f"
+ "\x72\x69\x74\x68\x6d\x2e\x20\x20"
+ "\x2e\x54\x01\x03\x66\x69\x6e\x65"
+ "\x73\x20\x74\x06\x05\x61\x70\x70"
+ "\x6c\x69\x63\x61\x74\x76\x0a\x6f"
+ "\x66\x88\x02\x60\x09\x27\xf0\x00"
+ "\x0c\x20\x75\x73\x65\x64\x20\x69"
+ "\x6e\x20\x55\x42\x49\x46\x53\x2e"
+ "\x11\x00\x00",
},
};
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1a4ed64586a7..92ed9692c47e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -266,7 +266,8 @@ config ACPI_CUSTOM_DSDT
default ACPI_CUSTOM_DSDT_FILE != ""
config ACPI_INITRD_TABLE_OVERRIDE
- bool "ACPI tables can be passed via uncompressed cpio in initrd"
+ bool "ACPI tables override via initrd"
+ depends on BLK_DEV_INITRD && X86
default n
help
This option provides functionality to override arbitrary ACPI tables
@@ -306,7 +307,7 @@ config ACPI_DEBUG_FUNC_TRACE
is about half of the penalty and is rarely useful.
config ACPI_PCI_SLOT
- tristate "PCI slot detection driver"
+ bool "PCI slot detection driver"
depends on SYSFS
default n
help
@@ -315,9 +316,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
- To compile this driver as a module, choose M here:
- the module will be called pci_slot.
-
config X86_PM_TIMER
bool "Power Management Timer Support" if EXPERT
depends on X86
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 7ae2750bb457..d668a8ae602b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -48,8 +48,8 @@
#include <linux/genalloc.h>
#include <linux/pci.h>
#include <linux/aer.h>
-#include <acpi/apei.h>
-#include <acpi/hed.h>
+
+#include <acpi/ghes.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
#include <asm/nmi.h>
@@ -84,42 +84,6 @@
((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
-/*
- * One struct ghes is created for each generic hardware error source.
- * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
- * handler.
- *
- * estatus: memory buffer for error status block, allocated during
- * HEST parsing.
- */
-#define GHES_TO_CLEAR 0x0001
-#define GHES_EXITING 0x0002
-
-struct ghes {
- struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
- u64 buffer_paddr;
- unsigned long flags;
- union {
- struct list_head list;
- struct timer_list timer;
- unsigned int irq;
- };
-};
-
-struct ghes_estatus_node {
- struct llist_node llnode;
- struct acpi_hest_generic *generic;
-};
-
-struct ghes_estatus_cache {
- u32 estatus_len;
- atomic_t count;
- struct acpi_hest_generic *generic;
- unsigned long long time_in;
- struct rcu_head rcu;
-};
-
bool ghes_disable;
module_param_named(disable, ghes_disable, bool, 0);
@@ -333,13 +297,6 @@ static void ghes_fini(struct ghes *ghes)
apei_unmap_generic_address(&ghes->generic->error_status_address);
}
-enum {
- GHES_SEV_NO = 0x0,
- GHES_SEV_CORRECTED = 0x1,
- GHES_SEV_RECOVERABLE = 0x2,
- GHES_SEV_PANIC = 0x3,
-};
-
static inline int ghes_severity(int severity)
{
switch (severity) {
@@ -452,7 +409,8 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
+static void ghes_do_proc(struct ghes *ghes,
+ const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
@@ -464,6 +422,8 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err;
mem_err = (struct cper_sec_mem_err *)(gdata+1);
+ ghes_edac_report_mem_error(ghes, sev, mem_err);
+
#ifdef CONFIG_X86_MCE
apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
mem_err);
@@ -682,7 +642,7 @@ static int ghes_proc(struct ghes *ghes)
if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
ghes_estatus_cache_add(ghes->generic, ghes->estatus);
}
- ghes_do_proc(ghes->estatus);
+ ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
return 0;
@@ -775,7 +735,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = apei_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- ghes_do_proc(estatus);
+ ghes_do_proc(estatus_node->ghes, estatus);
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
@@ -864,6 +824,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
node_len);
if (estatus_node) {
+ estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
memcpy(estatus, ghes->estatus, len);
@@ -942,6 +903,11 @@ static int ghes_probe(struct platform_device *ghes_dev)
ghes = NULL;
goto err;
}
+
+ rc = ghes_edac_register(ghes, &ghes_dev->dev);
+ if (rc < 0)
+ goto err;
+
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
ghes->timer.function = ghes_poll_func;
@@ -954,13 +920,13 @@ static int ghes_probe(struct platform_device *ghes_dev)
if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
generic->header.source_id);
- goto err;
+ goto err_edac_unreg;
}
if (request_irq(ghes->irq, ghes_irq_func,
0, "GHES IRQ", ghes)) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
- goto err;
+ goto err_edac_unreg;
}
break;
case ACPI_HEST_NOTIFY_SCI:
@@ -986,6 +952,8 @@ static int ghes_probe(struct platform_device *ghes_dev)
platform_set_drvdata(ghes_dev, ghes);
return 0;
+err_edac_unreg:
+ ghes_edac_unregister(ghes);
err:
if (ghes) {
ghes_fini(ghes);
@@ -1038,6 +1006,9 @@ static int ghes_remove(struct platform_device *ghes_dev)
}
ghes_fini(ghes);
+
+ ghes_edac_unregister(ghes);
+
kfree(ghes);
platform_set_drvdata(ghes_dev, NULL);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 7f00cf38098f..f5ef5d54e4ac 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -89,7 +89,7 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
struct acpi_hest_header *hest_hdr;
int i, rc, len;
- if (hest_disable)
+ if (hest_disable || !hest_tab)
return -EINVAL;
hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
@@ -216,9 +216,6 @@ void __init acpi_hest_init(void)
return;
}
- if (acpi_disabled)
- goto err;
-
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
if (status == AE_NOT_FOUND)
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 6adfc706a1de..12b62f2cdb3f 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -66,7 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
return count;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 79092328cf06..3c94a732b4b3 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -25,8 +25,14 @@
int init_acpi_device_notify(void);
int acpi_scan_init(void);
+#ifdef CONFIG_ACPI_PCI_SLOT
+void acpi_pci_slot_init(void);
+#else
+static inline void acpi_pci_slot_init(void) { }
+#endif
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
+void acpi_pci_root_hp_init(void);
void acpi_platform_init(void);
int acpi_sysfs_init(void);
void acpi_csrt_init(void);
@@ -65,7 +71,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
int acpi_add_power_resource(acpi_handle handle);
void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
-int acpi_power_min_system_level(struct list_head *list);
+int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 59844ee149be..33e609f63585 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -282,10 +282,10 @@ acpi_table_parse_srat(enum acpi_srat_type id,
handler, max_entries);
}
-static int srat_mem_cnt;
-
-void __init early_parse_srat(void)
+int __init acpi_numa_init(void)
{
+ int cnt = 0;
+
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
@@ -295,24 +295,21 @@ void __init early_parse_srat(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
+ acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
- srat_mem_cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_processor_affinity, 0);
+ cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ acpi_parse_memory_affinity,
+ NR_NODE_MEMBLKS);
}
-}
-int __init acpi_numa_init(void)
-{
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
acpi_numa_arch_fixup();
- if (srat_mem_cnt < 0)
- return srat_mem_cnt;
+ if (cnt < 0)
+ return cnt;
else if (!parsed_numa_memblks)
return -ENOENT;
return 0;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 908b02d5da1b..586e7e993d3d 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -84,8 +84,7 @@ static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
-struct workqueue_struct *kacpi_hotplug_wq;
-EXPORT_SYMBOL(kacpi_hotplug_wq);
+static struct workqueue_struct *kacpi_hotplug_wq;
/*
* This list of permanent mappings is for memory that may be accessed from
@@ -661,7 +660,7 @@ static void acpi_table_taint(struct acpi_table_header *table)
pr_warn(PREFIX
"Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
table->signature, table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
@@ -1778,3 +1777,24 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
{
__acpi_os_prepare_sleep = func;
}
+
+void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
+ void (*func)(struct work_struct *work))
+{
+ struct acpi_hp_work *hp_work;
+ int ret;
+
+ hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
+ if (!hp_work)
+ return;
+
+ hp_work->handle = handle;
+ hp_work->type = type;
+ hp_work->context = context;
+
+ INIT_WORK(&hp_work->work, func);
+ ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
+ if (!ret)
+ kfree(hp_work);
+}
+EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 68a921d03247..41c5e1b799ef 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -53,9 +53,6 @@ struct acpi_prt_entry {
u32 index; /* GSI, or link _CRS index */
};
-static LIST_HEAD(acpi_prt_list);
-static DEFINE_SPINLOCK(acpi_prt_lock);
-
static inline char pin_name(int pin)
{
return 'A' + pin - 1;
@@ -65,28 +62,6 @@ static inline char pin_name(int pin)
PCI IRQ Routing Table (PRT) Support
-------------------------------------------------------------------------- */
-static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(struct pci_dev *dev,
- int pin)
-{
- struct acpi_prt_entry *entry;
- int segment = pci_domain_nr(dev->bus);
- int bus = dev->bus->number;
- int device = PCI_SLOT(dev->devfn);
-
- spin_lock(&acpi_prt_lock);
- list_for_each_entry(entry, &acpi_prt_list, list) {
- if ((segment == entry->id.segment)
- && (bus == entry->id.bus)
- && (device == entry->id.device)
- && (pin == entry->pin)) {
- spin_unlock(&acpi_prt_lock);
- return entry;
- }
- }
- spin_unlock(&acpi_prt_lock);
- return NULL;
-}
-
/* http://bugzilla.kernel.org/show_bug.cgi?id=4773 */
static const struct dmi_system_id medion_md9580[] = {
{
@@ -184,11 +159,19 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
}
}
-static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
- struct acpi_pci_routing_table *prt)
+static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
+ int pin, struct acpi_pci_routing_table *prt,
+ struct acpi_prt_entry **entry_ptr)
{
+ int segment = pci_domain_nr(dev->bus);
+ int bus = dev->bus->number;
+ int device = PCI_SLOT(dev->devfn);
struct acpi_prt_entry *entry;
+ if (((prt->address >> 16) & 0xffff) != device ||
+ prt->pin + 1 != pin)
+ return -ENODEV;
+
entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -237,43 +220,37 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
entry->id.device, pin_name(entry->pin),
prt->source, entry->index));
- spin_lock(&acpi_prt_lock);
- list_add_tail(&entry->list, &acpi_prt_list);
- spin_unlock(&acpi_prt_lock);
+ *entry_ptr = entry;
return 0;
}
-int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
+static int acpi_pci_irq_find_prt_entry(struct pci_dev *dev,
+ int pin, struct acpi_prt_entry **entry_ptr)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_pci_routing_table *entry;
+ acpi_handle handle = NULL;
- /* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */
- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n",
- (char *) buffer.pointer);
-
- kfree(buffer.pointer);
+ if (dev->bus->bridge)
+ handle = ACPI_HANDLE(dev->bus->bridge);
- buffer.length = ACPI_ALLOCATE_BUFFER;
- buffer.pointer = NULL;
+ if (!handle)
+ return -ENODEV;
+ /* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */
status = acpi_get_irq_routing_table(handle, &buffer);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRT [%s]",
- acpi_format_exception(status)));
kfree(buffer.pointer);
return -ENODEV;
}
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
- acpi_pci_irq_add_entry(handle, segment, bus, entry);
+ if (!acpi_pci_irq_check_entry(handle, dev, pin,
+ entry, entry_ptr))
+ break;
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
@@ -282,23 +259,6 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
return 0;
}
-void acpi_pci_irq_del_prt(int segment, int bus)
-{
- struct acpi_prt_entry *entry, *tmp;
-
- printk(KERN_DEBUG
- "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
- segment, bus);
- spin_lock(&acpi_prt_lock);
- list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
- if (segment == entry->id.segment && bus == entry->id.bus) {
- list_del(&entry->list);
- kfree(entry);
- }
- }
- spin_unlock(&acpi_prt_lock);
-}
-
/* --------------------------------------------------------------------------
PCI Interrupt Routing Support
-------------------------------------------------------------------------- */
@@ -359,12 +319,13 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
- struct acpi_prt_entry *entry;
+ struct acpi_prt_entry *entry = NULL;
struct pci_dev *bridge;
u8 bridge_pin, orig_pin = pin;
+ int ret;
- entry = acpi_pci_irq_find_prt_entry(dev, pin);
- if (entry) {
+ ret = acpi_pci_irq_find_prt_entry(dev, pin, &entry);
+ if (!ret && entry) {
#ifdef CONFIG_X86_IO_APIC
acpi_reroute_boot_interrupt(dev, entry);
#endif /* CONFIG_X86_IO_APIC */
@@ -373,7 +334,7 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
return entry;
}
- /*
+ /*
* Attempt to derive an IRQ for this device from a parent bridge's
* PCI interrupt routing entry (eg. yenta bridge and add-in card bridge).
*/
@@ -393,8 +354,8 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
pin = bridge_pin;
}
- entry = acpi_pci_irq_find_prt_entry(bridge, pin);
- if (entry) {
+ ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry);
+ if (!ret && entry) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Derived GSI for %s INT %c from %s\n",
pci_name(dev), pin_name(orig_pin),
@@ -470,6 +431,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
pin_name(pin));
}
+
return 0;
}
@@ -477,6 +439,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
if (rc < 0) {
dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
pin_name(pin));
+ kfree(entry);
return rc;
}
dev->irq = rc;
@@ -491,6 +454,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
(polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ kfree(entry);
return 0;
}
@@ -513,6 +477,8 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
else
gsi = entry->index;
+ kfree(entry);
+
/*
* TBD: It might be worth clearing dev->irq by magic constant
* (e.g. PCI_UNDEFINED_IRQ).
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index b3cc69c5caf1..0ac546d5e53f 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -103,24 +103,6 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
}
EXPORT_SYMBOL(acpi_pci_unregister_driver);
-acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
-{
- struct acpi_pci_root *root;
- acpi_handle handle = NULL;
-
- mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry(root, &acpi_pci_roots, node)
- if ((root->segment == (u16) seg) &&
- (root->secondary.start == (u16) bus)) {
- handle = root->device->handle;
- break;
- }
- mutex_unlock(&acpi_pci_root_lock);
- return handle;
-}
-
-EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
-
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle - the ACPI CA node in question.
@@ -431,7 +413,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_status status;
int result;
struct acpi_pci_root *root;
- acpi_handle handle;
struct acpi_pci_driver *driver;
u32 flags, base_flags;
bool is_osc_granted = false;
@@ -486,16 +467,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
- /*
- * PCI Routing Table
- * -----------------
- * Evaluate and parse _PRT, if exists.
- */
- status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_SUCCESS(status))
- result = acpi_pci_irq_add_prt(device->handle, root->segment,
- root->secondary.start);
-
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
/*
@@ -597,8 +568,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
- if (system_state != SYSTEM_BOOTING)
+ if (system_state != SYSTEM_BOOTING) {
+ pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_bus_resources(root->bus);
+ }
mutex_lock(&acpi_pci_root_lock);
list_for_each_entry(driver, &acpi_pci_drivers, node)
@@ -618,7 +591,6 @@ out_del_root:
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
- acpi_pci_irq_del_prt(root->segment, root->secondary.start);
end:
kfree(root);
return result;
@@ -626,8 +598,6 @@ end:
static void acpi_pci_root_remove(struct acpi_device *device)
{
- acpi_status status;
- acpi_handle handle;
struct acpi_pci_root *root = acpi_driver_data(device);
struct acpi_pci_driver *driver;
@@ -642,10 +612,6 @@ static void acpi_pci_root_remove(struct acpi_device *device)
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
- status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_SUCCESS(status))
- acpi_pci_irq_del_prt(root->segment, root->secondary.start);
-
pci_remove_root_bus(root->bus);
mutex_lock(&acpi_pci_root_lock);
@@ -663,3 +629,133 @@ void __init acpi_pci_root_init(void)
acpi_scan_add_handler(&pci_root_handler);
}
}
+/* Support root bridge hotplug */
+
+static void handle_root_bridge_insertion(acpi_handle handle)
+{
+ struct acpi_device *device;
+
+ if (!acpi_bus_get_device(handle, &device)) {
+ printk(KERN_DEBUG "acpi device exists...\n");
+ return;
+ }
+
+ if (acpi_bus_scan(handle))
+ printk(KERN_ERR "cannot add bridge to acpi list\n");
+}
+
+static void handle_root_bridge_removal(struct acpi_device *device)
+{
+ struct acpi_eject_event *ej_event;
+
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ /* Inform firmware the hot-remove operation has error */
+ (void) acpi_evaluate_hotplug_ost(device->handle,
+ ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE,
+ NULL);
+ return;
+ }
+
+ ej_event->device = device;
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+
+ acpi_bus_hot_remove_device(ej_event);
+}
+
+static void _handle_hotplug_event_root(struct work_struct *work)
+{
+ struct acpi_pci_root *root;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
+ struct acpi_hp_work *hp_work;
+ acpi_handle handle;
+ u32 type;
+
+ hp_work = container_of(work, struct acpi_hp_work, work);
+ handle = hp_work->handle;
+ type = hp_work->type;
+
+ root = acpi_pci_find_root(handle);
+
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ /* bus enumerate */
+ printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__,
+ (char *)buffer.pointer);
+ if (!root)
+ handle_root_bridge_insertion(handle);
+
+ break;
+
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ /* device check */
+ printk(KERN_DEBUG "%s: Device check notify on %s\n", __func__,
+ (char *)buffer.pointer);
+ if (!root)
+ handle_root_bridge_insertion(handle);
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ /* request device eject */
+ printk(KERN_DEBUG "%s: Device eject notify on %s\n", __func__,
+ (char *)buffer.pointer);
+ if (root)
+ handle_root_bridge_removal(root->device);
+ break;
+ default:
+ printk(KERN_WARNING "notify_handler: unknown event type 0x%x for %s\n",
+ type, (char *)buffer.pointer);
+ break;
+ }
+
+ kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
+ kfree(buffer.pointer);
+}
+
+static void handle_hotplug_event_root(acpi_handle handle, u32 type,
+ void *context)
+{
+ alloc_acpi_hp_work(handle, type, context,
+ _handle_hotplug_event_root);
+}
+
+static acpi_status __init
+find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ acpi_status status;
+ char objname[64];
+ struct acpi_buffer buffer = { .length = sizeof(objname),
+ .pointer = objname };
+ int *count = (int *)context;
+
+ if (!acpi_is_root_bridge(handle))
+ return AE_OK;
+
+ (*count)++;
+
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ handle_hotplug_event_root, NULL);
+ if (ACPI_FAILURE(status))
+ printk(KERN_DEBUG "acpi root: %s notify handler is not installed, exit status: %u\n",
+ objname, (unsigned int)status);
+ else
+ printk(KERN_DEBUG "acpi root: %s notify handler is installed\n",
+ objname);
+
+ return AE_OK;
+}
+
+void __init acpi_pci_root_hp_init(void)
+{
+ int num = 0;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL);
+
+ printk(KERN_DEBUG "Found %d acpi root devices\n", num);
+}
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 2c630c006c2f..cd1434eb1de8 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -329,19 +329,8 @@ static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
{}
};
-static int __init
-acpi_pci_slot_init(void)
+void __init acpi_pci_slot_init(void)
{
dmi_check_system(acpi_pci_slot_dmi_table);
acpi_pci_register_driver(&acpi_pci_slot_driver);
- return 0;
}
-
-static void __exit
-acpi_pci_slot_exit(void)
-{
- acpi_pci_unregister_driver(&acpi_pci_slot_driver);
-}
-
-module_init(acpi_pci_slot_init);
-module_exit(acpi_pci_slot_exit);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b820528a5fa3..34f5ef11d427 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -73,6 +73,7 @@ struct acpi_power_resource {
u32 system_level;
u32 order;
unsigned int ref_count;
+ bool wakeup_enabled;
struct mutex resource_lock;
};
@@ -272,11 +273,9 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
return 0;
}
-static int acpi_power_on(struct acpi_power_resource *resource)
+static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
{
- int result = 0;;
-
- mutex_lock(&resource->resource_lock);
+ int result = 0;
if (resource->ref_count++) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -293,9 +292,16 @@ static int acpi_power_on(struct acpi_power_resource *resource)
schedule_work(&dep->work);
}
}
+ return result;
+}
- mutex_unlock(&resource->resource_lock);
+static int acpi_power_on(struct acpi_power_resource *resource)
+{
+ int result;
+ mutex_lock(&resource->resource_lock);
+ result = acpi_power_on_unlocked(resource);
+ mutex_unlock(&resource->resource_lock);
return result;
}
@@ -313,17 +319,15 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
return 0;
}
-static int acpi_power_off(struct acpi_power_resource *resource)
+static int acpi_power_off_unlocked(struct acpi_power_resource *resource)
{
int result = 0;
- mutex_lock(&resource->resource_lock);
-
if (!resource->ref_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Power resource [%s] already off",
resource->name));
- goto unlock;
+ return 0;
}
if (--resource->ref_count) {
@@ -335,10 +339,16 @@ static int acpi_power_off(struct acpi_power_resource *resource)
if (result)
resource->ref_count++;
}
+ return result;
+}
- unlock:
- mutex_unlock(&resource->resource_lock);
+static int acpi_power_off(struct acpi_power_resource *resource)
+{
+ int result;
+ mutex_lock(&resource->resource_lock);
+ result = acpi_power_off_unlocked(resource);
+ mutex_unlock(&resource->resource_lock);
return result;
}
@@ -521,18 +531,35 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
}
}
-int acpi_power_min_system_level(struct list_head *list)
+int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
{
struct acpi_power_resource_entry *entry;
int system_level = 5;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
+ acpi_handle handle = resource->device.handle;
+ int result;
+ int state;
+ mutex_lock(&resource->resource_lock);
+
+ result = acpi_power_get_state(handle, &state);
+ if (result) {
+ mutex_unlock(&resource->resource_lock);
+ return result;
+ }
+ if (state == ACPI_POWER_RESOURCE_STATE_ON) {
+ resource->ref_count++;
+ resource->wakeup_enabled = true;
+ }
if (system_level > resource->system_level)
system_level = resource->system_level;
+
+ mutex_unlock(&resource->resource_lock);
}
- return system_level;
+ *system_level_p = system_level;
+ return 0;
}
/* --------------------------------------------------------------------------
@@ -610,6 +637,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
+ struct acpi_power_resource_entry *entry;
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
@@ -620,17 +648,31 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (dev->wakeup.prepare_count++)
goto out;
- err = acpi_power_on_list(&dev->wakeup.resources);
- if (err) {
- dev_err(&dev->dev, "Cannot turn wakeup power resources on\n");
- dev->wakeup.flags.valid = 0;
- } else {
- /*
- * Passing 3 as the third argument below means the device may be
- * put into arbitrary power state afterward.
- */
- err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
+ list_for_each_entry(entry, &dev->wakeup.resources, node) {
+ struct acpi_power_resource *resource = entry->resource;
+
+ mutex_lock(&resource->resource_lock);
+
+ if (!resource->wakeup_enabled) {
+ err = acpi_power_on_unlocked(resource);
+ if (!err)
+ resource->wakeup_enabled = true;
+ }
+
+ mutex_unlock(&resource->resource_lock);
+
+ if (err) {
+ dev_err(&dev->dev,
+ "Cannot turn wakeup power resources on\n");
+ dev->wakeup.flags.valid = 0;
+ goto out;
+ }
}
+ /*
+ * Passing 3 as the third argument below means the device may be
+ * put into arbitrary power state afterward.
+ */
+ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
if (err)
dev->wakeup.prepare_count = 0;
@@ -647,6 +689,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
*/
int acpi_disable_wakeup_device_power(struct acpi_device *dev)
{
+ struct acpi_power_resource_entry *entry;
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
@@ -668,10 +711,25 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
if (err)
goto out;
- err = acpi_power_off_list(&dev->wakeup.resources);
- if (err) {
- dev_err(&dev->dev, "Cannot turn wakeup power resources off\n");
- dev->wakeup.flags.valid = 0;
+ list_for_each_entry(entry, &dev->wakeup.resources, node) {
+ struct acpi_power_resource *resource = entry->resource;
+
+ mutex_lock(&resource->resource_lock);
+
+ if (resource->wakeup_enabled) {
+ err = acpi_power_off_unlocked(resource);
+ if (!err)
+ resource->wakeup_enabled = false;
+ }
+
+ mutex_unlock(&resource->resource_lock);
+
+ if (err) {
+ dev_err(&dev->dev,
+ "Cannot turn wakeup power resources off\n");
+ dev->wakeup.flags.valid = 0;
+ break;
+ }
}
out:
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index daee7497efd3..5e7e991717d7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1002,7 +1002,14 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
if (!list_empty(&wakeup->resources)) {
int sleep_state;
- sleep_state = acpi_power_min_system_level(&wakeup->resources);
+ err = acpi_power_wakeup_list_init(&wakeup->resources,
+ &sleep_state);
+ if (err) {
+ acpi_handle_warn(handle, "Retrieving current states "
+ "of wakeup power resources failed\n");
+ acpi_power_resources_list_free(&wakeup->resources);
+ goto out;
+ }
if (sleep_state < wakeup->sleep_state) {
acpi_handle_warn(handle, "Overriding _PRW sleep state "
"(S%d) by S%d from power resources\n",
@@ -1783,6 +1790,7 @@ int __init acpi_scan_init(void)
acpi_platform_init();
acpi_csrt_init();
acpi_container_init();
+ acpi_pci_slot_init();
mutex_lock(&acpi_scan_lock);
/*
@@ -1804,6 +1812,8 @@ int __init acpi_scan_init(void)
acpi_update_all_gpes();
+ acpi_pci_root_hp_init();
+
out:
mutex_unlock(&acpi_scan_lock);
return result;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4cfb7200260d..3e751b74615e 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -58,6 +58,19 @@ config ATA_ACPI
You can disable this at kernel boot time by using the
option libata.noacpi=1
+config SATA_ZPODD
+ bool "SATA Zero Power ODD Support"
+ depends on ATA_ACPI
+ default n
+ help
+ This option adds support for SATA ZPODD. It requires both
+ ODD and the platform support, and if enabled, will automatically
+ power on/off the ODD when certain condition is satisfied. This
+ does not impact user's experience of the ODD, only power is saved
+ when ODD is not in use(i.e. no disc inside).
+
+ If unsure, say N.
+
config SATA_PMP
bool "SATA Port Multiplier support"
default y
@@ -247,6 +260,14 @@ config SATA_PROMISE
If unsure, say N.
+config SATA_RCAR
+ tristate "Renesas R-Car SATA support"
+ depends on ARCH_SHMOBILE && ARCH_R8A7779
+ help
+ This option enables support for Renesas R-Car Serial ATA.
+
+ If unsure, say N.
+
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 9329dafba91b..c04d0fd038a3 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_ATA_PIIX) += ata_piix.o
obj-$(CONFIG_SATA_MV) += sata_mv.o
obj-$(CONFIG_SATA_NV) += sata_nv.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
+obj-$(CONFIG_SATA_RCAR) += sata_rcar.o
obj-$(CONFIG_SATA_SIL) += sata_sil.o
obj-$(CONFIG_SATA_SIS) += sata_sis.o
obj-$(CONFIG_SATA_SVW) += sata_svw.o
@@ -107,3 +108,4 @@ libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o
libata-$(CONFIG_ATA_SFF) += libata-sff.o
libata-$(CONFIG_SATA_PMP) += libata-pmp.o
libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
+libata-$(CONFIG_SATA_ZPODD) += libata-zpodd.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 495aeed26779..a99112cfd8b1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -265,6 +265,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 174eca609b42..d2ba439cfe54 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -317,6 +317,23 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (DH89xxCC) */
{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+
{ } /* terminate list */
};
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 6fc67f7efb22..0ea1018280bd 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -17,6 +17,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
#include <scsi/scsi_device.h>
#include "libata.h"
@@ -835,50 +836,95 @@ void ata_acpi_on_resume(struct ata_port *ap)
}
}
-/**
- * ata_acpi_set_state - set the port power state
- * @ap: target ATA port
- * @state: state, on/off
- *
- * This function executes the _PS0/_PS3 ACPI method to set the power state.
- * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
- */
-void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime)
+{
+ int d_max_in = ACPI_STATE_D3_COLD;
+ if (!runtime)
+ goto out;
+
+ /*
+ * For ATAPI, runtime D3 cold is only allowed
+ * for ZPODD in zero power ready state
+ */
+ if (dev->class == ATA_DEV_ATAPI &&
+ !(zpodd_dev_enabled(dev) && zpodd_zpready(dev)))
+ d_max_in = ACPI_STATE_D3_HOT;
+
+out:
+ return acpi_pm_device_sleep_state(&dev->sdev->sdev_gendev,
+ NULL, d_max_in);
+}
+
+static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state)
{
+ bool runtime = PMSG_IS_AUTO(state);
struct ata_device *dev;
acpi_handle handle;
int acpi_state;
- /* channel first and then drives for power on and vica versa
- for power off */
- handle = ata_ap_acpi_handle(ap);
- if (handle && state.event == PM_EVENT_ON)
- acpi_bus_set_power(handle, ACPI_STATE_D0);
-
ata_for_each_dev(dev, &ap->link, ENABLED) {
handle = ata_dev_acpi_handle(dev);
if (!handle)
continue;
- if (state.event != PM_EVENT_ON) {
- acpi_state = acpi_pm_device_sleep_state(
- &dev->sdev->sdev_gendev, NULL, ACPI_STATE_D3);
- if (acpi_state > 0)
- acpi_bus_set_power(handle, acpi_state);
- /* TBD: need to check if it's runtime pm request */
- acpi_pm_device_run_wake(
- &dev->sdev->sdev_gendev, true);
+ if (!(state.event & PM_EVENT_RESUME)) {
+ acpi_state = ata_acpi_choose_suspend_state(dev, runtime);
+ if (acpi_state == ACPI_STATE_D0)
+ continue;
+ if (runtime && zpodd_dev_enabled(dev) &&
+ acpi_state == ACPI_STATE_D3_COLD)
+ zpodd_enable_run_wake(dev);
+ acpi_bus_set_power(handle, acpi_state);
} else {
- /* Ditto */
- acpi_pm_device_run_wake(
- &dev->sdev->sdev_gendev, false);
+ if (runtime && zpodd_dev_enabled(dev))
+ zpodd_disable_run_wake(dev);
acpi_bus_set_power(handle, ACPI_STATE_D0);
}
}
+}
- handle = ata_ap_acpi_handle(ap);
- if (handle && state.event != PM_EVENT_ON)
- acpi_bus_set_power(handle, ACPI_STATE_D3);
+/* ACPI spec requires _PS0 when IDE power on and _PS3 when power off */
+static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ struct ata_device *dev;
+ acpi_handle port_handle;
+
+ port_handle = ata_ap_acpi_handle(ap);
+ if (!port_handle)
+ return;
+
+ /* channel first and then drives for power on and vica versa
+ for power off */
+ if (state.event & PM_EVENT_RESUME)
+ acpi_bus_set_power(port_handle, ACPI_STATE_D0);
+
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ acpi_handle dev_handle = ata_dev_acpi_handle(dev);
+ if (!dev_handle)
+ continue;
+
+ acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
+ ACPI_STATE_D0 : ACPI_STATE_D3);
+ }
+
+ if (!(state.event & PM_EVENT_RESUME))
+ acpi_bus_set_power(port_handle, ACPI_STATE_D3);
+}
+
+/**
+ * ata_acpi_set_state - set the port power state
+ * @ap: target ATA port
+ * @state: state, on/off
+ *
+ * This function sets a proper ACPI D state for the device on
+ * system and runtime PM operations.
+ */
+void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ if (ap->flags & ATA_FLAG_ACPI_SATA)
+ sata_acpi_set_state(ap, state);
+ else
+ pata_acpi_set_state(ap, state);
}
/**
@@ -974,57 +1020,6 @@ void ata_acpi_on_disable(struct ata_device *dev)
ata_acpi_clear_gtf(dev);
}
-static void ata_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
-{
- struct ata_device *ata_dev = context;
-
- if (event == ACPI_NOTIFY_DEVICE_WAKE && ata_dev &&
- pm_runtime_suspended(&ata_dev->sdev->sdev_gendev))
- scsi_autopm_get_device(ata_dev->sdev);
-}
-
-static void ata_acpi_add_pm_notifier(struct ata_device *dev)
-{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
- acpi_status status;
-
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return;
-
- if (dev->sdev->can_power_off) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- ata_acpi_wake_dev, dev);
- device_set_run_wake(&dev->sdev->sdev_gendev, true);
- }
-}
-
-static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
-{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
- acpi_status status;
-
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return;
-
- if (dev->sdev->can_power_off) {
- device_set_run_wake(&dev->sdev->sdev_gendev, false);
- acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- ata_acpi_wake_dev);
- }
-}
-
static void ata_acpi_register_power_resource(struct ata_device *dev)
{
struct scsi_device *sdev = dev->sdev;
@@ -1047,13 +1042,13 @@ static void ata_acpi_unregister_power_resource(struct ata_device *dev)
void ata_acpi_bind(struct ata_device *dev)
{
- ata_acpi_add_pm_notifier(dev);
ata_acpi_register_power_resource(dev);
+ if (zpodd_dev_enabled(dev))
+ dev_pm_qos_expose_flags(&dev->sdev->sdev_gendev, 0);
}
void ata_acpi_unbind(struct ata_device *dev)
{
- ata_acpi_remove_pm_notifier(dev);
ata_acpi_unregister_power_resource(dev);
}
@@ -1095,9 +1090,6 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
acpi_handle *handle)
{
struct ata_device *ata_dev;
- acpi_status status;
- struct acpi_device *acpi_dev;
- struct acpi_device_power_state *states;
if (ap->flags & ATA_FLAG_ACPI_SATA) {
if (!sata_pmp_attached(ap))
@@ -1114,21 +1106,6 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
if (!*handle)
return -ENODEV;
- status = acpi_bus_get_device(*handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return 0;
-
- /*
- * If firmware has _PS3 or _PR3 for this device,
- * and this ata ODD device support device attention,
- * it means this device can be powered off
- */
- states = acpi_dev->power.states;
- if ((states[ACPI_STATE_D3_HOT].flags.valid ||
- states[ACPI_STATE_D3_COLD].flags.explicit_set) &&
- ata_dev->flags & ATA_DFLAG_DA)
- sdev->can_power_off = 1;
-
return 0;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 46cd3f4c6aaa..497adea1f0d6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2400,8 +2400,10 @@ int ata_dev_configure(struct ata_device *dev)
dma_dir_string = ", DMADIR";
}
- if (ata_id_has_da(dev->id))
+ if (ata_id_has_da(dev->id)) {
dev->flags |= ATA_DFLAG_DA;
+ zpodd_init(dev);
+ }
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info)
@@ -5331,9 +5333,6 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
{
- unsigned int ehi_flags = ATA_EHI_QUIET;
- int rc;
-
/*
* On some hardware, device fails to respond after spun down
* for suspend. As the device won't be used before being
@@ -5342,11 +5341,9 @@ static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int
*
* http://thread.gmane.org/gmane.linux.ide/46764
*/
- if (mesg.event == PM_EVENT_SUSPEND)
- ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
-
- rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
- return rc;
+ unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
+ ATA_EHI_NO_RECOVERY;
+ return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
}
static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
@@ -5367,40 +5364,38 @@ static int ata_port_suspend(struct device *dev)
static int ata_port_do_freeze(struct device *dev)
{
if (pm_runtime_suspended(dev))
- pm_runtime_resume(dev);
+ return 0;
return ata_port_suspend_common(dev, PMSG_FREEZE);
}
static int ata_port_poweroff(struct device *dev)
{
- if (pm_runtime_suspended(dev))
- return 0;
-
return ata_port_suspend_common(dev, PMSG_HIBERNATE);
}
-static int __ata_port_resume_common(struct ata_port *ap, int *async)
+static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
+ int *async)
{
int rc;
- rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
+ rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
return rc;
}
-static int ata_port_resume_common(struct device *dev)
+static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
{
struct ata_port *ap = to_ata_port(dev);
- return __ata_port_resume_common(ap, NULL);
+ return __ata_port_resume_common(ap, mesg, NULL);
}
static int ata_port_resume(struct device *dev)
{
int rc;
- rc = ata_port_resume_common(dev);
+ rc = ata_port_resume_common(dev, PMSG_RESUME);
if (!rc) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
@@ -5410,11 +5405,40 @@ static int ata_port_resume(struct device *dev)
return rc;
}
+/*
+ * For ODDs, the upper layer will poll for media change every few seconds,
+ * which will make it enter and leave suspend state every few seconds. And
+ * as each suspend will cause a hard/soft reset, the gain of runtime suspend
+ * is very little and the ODD may malfunction after constantly being reset.
+ * So the idle callback here will not proceed to suspend if a non-ZPODD capable
+ * ODD is attached to the port.
+ */
static int ata_port_runtime_idle(struct device *dev)
{
+ struct ata_port *ap = to_ata_port(dev);
+ struct ata_link *link;
+ struct ata_device *adev;
+
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(adev, link, ENABLED)
+ if (adev->class == ATA_DEV_ATAPI &&
+ !zpodd_dev_enabled(adev))
+ return -EBUSY;
+ }
+
return pm_runtime_suspend(dev);
}
+static int ata_port_runtime_suspend(struct device *dev)
+{
+ return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
+}
+
+static int ata_port_runtime_resume(struct device *dev)
+{
+ return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
+}
+
static const struct dev_pm_ops ata_port_pm_ops = {
.suspend = ata_port_suspend,
.resume = ata_port_resume,
@@ -5423,8 +5447,8 @@ static const struct dev_pm_ops ata_port_pm_ops = {
.poweroff = ata_port_poweroff,
.restore = ata_port_resume,
- .runtime_suspend = ata_port_suspend,
- .runtime_resume = ata_port_resume_common,
+ .runtime_suspend = ata_port_runtime_suspend,
+ .runtime_resume = ata_port_runtime_resume,
.runtime_idle = ata_port_runtime_idle,
};
@@ -5441,7 +5465,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
int ata_sas_port_async_resume(struct ata_port *ap, int *async)
{
- return __ata_port_resume_common(ap, async);
+ return __ata_port_resume_common(ap, PMSG_RESUME, async);
}
EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index bcf4437214f5..f9476fb3ac43 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1591,7 +1591,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
* RETURNS:
* 0 on success, AC_ERR_* mask on failure.
*/
-static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
+unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
struct ata_taskfile tf;
@@ -1624,7 +1624,7 @@ static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
* RETURNS:
* 0 on success, AC_ERR_* mask on failure
*/
-static unsigned int atapi_eh_request_sense(struct ata_device *dev,
+unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] =
@@ -3857,6 +3857,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
rc = atapi_eh_clear_ua(dev);
if (rc)
goto rest_fail;
+ if (zpodd_dev_enabled(dev))
+ zpodd_post_poweron(dev);
}
}
@@ -4022,11 +4024,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
{
unsigned long flags;
int rc = 0;
+ struct ata_device *dev;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
- ap->pm_mesg.event == PM_EVENT_ON) {
+ ap->pm_mesg.event & PM_EVENT_RESUME) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
@@ -4034,6 +4037,18 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ /*
+ * If we have a ZPODD attached, check its zero
+ * power ready status before the port is frozen.
+ * Only needed for runtime suspend.
+ */
+ if (PMSG_IS_AUTO(ap->pm_mesg)) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (zpodd_dev_enabled(dev))
+ zpodd_on_suspend(dev);
+ }
+ }
+
/* tell ACPI we're suspending */
rc = ata_acpi_on_suspend(ap);
if (rc)
@@ -4045,7 +4060,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
if (ap->ops->port_suspend)
rc = ap->ops->port_suspend(ap, ap->pm_mesg);
- ata_acpi_set_state(ap, PMSG_SUSPEND);
+ ata_acpi_set_state(ap, ap->pm_mesg);
out:
/* report result */
spin_lock_irqsave(ap->lock, flags);
@@ -4085,7 +4100,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* are we resuming? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
- ap->pm_mesg.event != PM_EVENT_ON) {
+ !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
@@ -4104,7 +4119,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
ata_for_each_dev(dev, link, ALL)
ata_ering_clear(&dev->ering);
- ata_acpi_set_state(ap, PMSG_ON);
+ ata_acpi_set_state(ap, ap->pm_mesg);
if (ap->ops->port_resume)
rc = ap->ops->port_resume(ap);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7c337e754dab..318b41358187 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -933,7 +933,11 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
* block specified for the ATA pass through commands. Regardless
* of whether the command errored or not, return a sense
* block. Copy all controller registers into the sense
- * block. Clear sense key, ASC & ASCQ if there is no error.
+ * block. If there was no error, we get the request from an ATA
+ * passthrough command, so we use the following sense data:
+ * sk = RECOVERED ERROR
+ * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
+ *
*
* LOCKING:
* None.
@@ -959,6 +963,10 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3], verbose);
sb[1] &= 0x0f;
+ } else {
+ sb[1] = RECOVERED_ERROR;
+ sb[2] = 0;
+ sb[3] = 0x1D;
}
/*
@@ -1733,10 +1741,12 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
- * generate because the user forced us to, a check condition
- * is generated and the ATA register values are returned
+ * generate because the user forced us to [CK_COND =1], a check
+ * condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
- * was no error, SK, ASC and ASCQ will all be zero.
+ * was no error, we use the following sense data:
+ * sk = RECOVERED ERROR
+ * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense)) {
@@ -3755,6 +3765,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
ata_acpi_unbind(dev);
/* clearing dev->sdev is protected by host lock */
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
new file mode 100644
index 000000000000..90b159b740b3
--- /dev/null
+++ b/drivers/ata/libata-zpodd.c
@@ -0,0 +1,299 @@
+#include <linux/libata.h>
+#include <linux/cdrom.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <scsi/scsi_device.h>
+
+#include "libata.h"
+
+static int zpodd_poweroff_delay = 30; /* 30 seconds for power off delay */
+module_param(zpodd_poweroff_delay, int, 0644);
+MODULE_PARM_DESC(zpodd_poweroff_delay, "Poweroff delay for ZPODD in seconds");
+
+enum odd_mech_type {
+ ODD_MECH_TYPE_SLOT,
+ ODD_MECH_TYPE_DRAWER,
+ ODD_MECH_TYPE_UNSUPPORTED,
+};
+
+struct zpodd {
+ enum odd_mech_type mech_type; /* init during probe, RO afterwards */
+ struct ata_device *dev;
+
+ /* The following fields are synchronized by PM core. */
+ bool from_notify; /* resumed as a result of
+ * acpi wake notification */
+ bool zp_ready; /* ZP ready state */
+ unsigned long last_ready; /* last ZP ready timestamp */
+ bool zp_sampled; /* ZP ready state sampled */
+ bool powered_off; /* ODD is powered off
+ * during suspend */
+};
+
+static int eject_tray(struct ata_device *dev)
+{
+ struct ata_taskfile tf = {};
+ const char cdb[] = { GPCMD_START_STOP_UNIT,
+ 0, 0, 0,
+ 0x02, /* LoEj */
+ 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_NODATA;
+
+ return ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
+}
+
+/* Per the spec, only slot type and drawer type ODD can be supported */
+static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
+{
+ char buf[16];
+ unsigned int ret;
+ struct rm_feature_desc *desc = (void *)(buf + 8);
+ struct ata_taskfile tf = {};
+
+ char cdb[] = { GPCMD_GET_CONFIGURATION,
+ 2, /* only 1 feature descriptor requested */
+ 0, 3, /* 3, removable medium feature */
+ 0, 0, 0,/* reserved */
+ 0, sizeof(buf),
+ 0, 0, 0,
+ };
+
+ tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_PIO;
+ tf.lbam = sizeof(buf);
+
+ ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
+ buf, sizeof(buf), 0);
+ if (ret)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+
+ if (be16_to_cpu(desc->feature_code) != 3)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+
+ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+ return ODD_MECH_TYPE_SLOT;
+ else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+ return ODD_MECH_TYPE_DRAWER;
+ else
+ return ODD_MECH_TYPE_UNSUPPORTED;
+}
+
+static bool odd_can_poweroff(struct ata_device *ata_dev)
+{
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_device *acpi_dev;
+
+ handle = ata_dev_acpi_handle(ata_dev);
+ if (!handle)
+ return false;
+
+ status = acpi_bus_get_device(handle, &acpi_dev);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return acpi_device_can_poweroff(acpi_dev);
+}
+
+/* Test if ODD is zero power ready by sense code */
+static bool zpready(struct ata_device *dev)
+{
+ u8 sense_key, *sense_buf;
+ unsigned int ret, asc, ascq, add_len;
+ struct zpodd *zpodd = dev->zpodd;
+
+ ret = atapi_eh_tur(dev, &sense_key);
+
+ if (!ret || sense_key != NOT_READY)
+ return false;
+
+ sense_buf = dev->link->ap->sector_buf;
+ ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
+ if (ret)
+ return false;
+
+ /* sense valid */
+ if ((sense_buf[0] & 0x7f) != 0x70)
+ return false;
+
+ add_len = sense_buf[7];
+ /* has asc and ascq */
+ if (add_len < 6)
+ return false;
+
+ asc = sense_buf[12];
+ ascq = sense_buf[13];
+
+ if (zpodd->mech_type == ODD_MECH_TYPE_SLOT)
+ /* no media inside */
+ return asc == 0x3a;
+ else
+ /* no media inside and door closed */
+ return asc == 0x3a && ascq == 0x01;
+}
+
+/*
+ * Update the zpodd->zp_ready field. This field will only be set
+ * if the ODD has stayed in ZP ready state for zpodd_poweroff_delay
+ * time, and will be used to decide if power off is allowed. If it
+ * is set, it will be cleared during resume from powered off state.
+ */
+void zpodd_on_suspend(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+ unsigned long expires;
+
+ if (!zpready(dev)) {
+ zpodd->zp_sampled = false;
+ zpodd->zp_ready = false;
+ return;
+ }
+
+ if (!zpodd->zp_sampled) {
+ zpodd->zp_sampled = true;
+ zpodd->last_ready = jiffies;
+ return;
+ }
+
+ expires = zpodd->last_ready +
+ msecs_to_jiffies(zpodd_poweroff_delay * 1000);
+ if (time_before(jiffies, expires))
+ return;
+
+ zpodd->zp_ready = true;
+}
+
+bool zpodd_zpready(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+ return zpodd->zp_ready;
+}
+
+/*
+ * Enable runtime wake capability through ACPI and set the powered_off flag,
+ * this flag will be used during resume to decide what operations are needed
+ * to take.
+ *
+ * Also, media poll needs to be silenced, so that it doesn't bring the ODD
+ * back to full power state every few seconds.
+ */
+void zpodd_enable_run_wake(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ sdev_disable_disk_events(dev->sdev);
+
+ zpodd->powered_off = true;
+ device_set_run_wake(&dev->sdev->sdev_gendev, true);
+ acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, true);
+}
+
+/* Disable runtime wake capability if it is enabled */
+void zpodd_disable_run_wake(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ if (zpodd->powered_off) {
+ acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, false);
+ device_set_run_wake(&dev->sdev->sdev_gendev, false);
+ }
+}
+
+/*
+ * Post power on processing after the ODD has been recovered. If the
+ * ODD wasn't powered off during suspend, it doesn't do anything.
+ *
+ * For drawer type ODD, if it is powered on due to user pressed the
+ * eject button, the tray needs to be ejected. This can only be done
+ * after the ODD has been recovered, i.e. link is initialized and
+ * device is able to process NON_DATA PIO command, as eject needs to
+ * send command for the ODD to process.
+ *
+ * The from_notify flag set in wake notification handler function
+ * zpodd_wake_dev represents if power on is due to user's action.
+ *
+ * For both types of ODD, several fields need to be reset.
+ */
+void zpodd_post_poweron(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ if (!zpodd->powered_off)
+ return;
+
+ zpodd->powered_off = false;
+
+ if (zpodd->from_notify) {
+ zpodd->from_notify = false;
+ if (zpodd->mech_type == ODD_MECH_TYPE_DRAWER)
+ eject_tray(dev);
+ }
+
+ zpodd->zp_sampled = false;
+ zpodd->zp_ready = false;
+
+ sdev_enable_disk_events(dev->sdev);
+}
+
+static void zpodd_wake_dev(acpi_handle handle, u32 event, void *context)
+{
+ struct ata_device *ata_dev = context;
+ struct zpodd *zpodd = ata_dev->zpodd;
+ struct device *dev = &ata_dev->sdev->sdev_gendev;
+
+ if (event == ACPI_NOTIFY_DEVICE_WAKE && pm_runtime_suspended(dev)) {
+ zpodd->from_notify = true;
+ pm_runtime_resume(dev);
+ }
+}
+
+static void ata_acpi_add_pm_notifier(struct ata_device *dev)
+{
+ acpi_handle handle = ata_dev_acpi_handle(dev);
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ zpodd_wake_dev, dev);
+}
+
+static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->sdev->sdev_gendev);
+ acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, zpodd_wake_dev);
+}
+
+void zpodd_init(struct ata_device *dev)
+{
+ enum odd_mech_type mech_type;
+ struct zpodd *zpodd;
+
+ if (dev->zpodd)
+ return;
+
+ if (!odd_can_poweroff(dev))
+ return;
+
+ mech_type = zpodd_get_mech_type(dev);
+ if (mech_type == ODD_MECH_TYPE_UNSUPPORTED)
+ return;
+
+ zpodd = kzalloc(sizeof(struct zpodd), GFP_KERNEL);
+ if (!zpodd)
+ return;
+
+ zpodd->mech_type = mech_type;
+
+ ata_acpi_add_pm_notifier(dev);
+ zpodd->dev = dev;
+ dev->zpodd = zpodd;
+}
+
+void zpodd_exit(struct ata_device *dev)
+{
+ ata_acpi_remove_pm_notifier(dev);
+ kfree(dev->zpodd);
+ dev->zpodd = NULL;
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 7148a58020b9..c949dd311b2e 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -182,6 +182,9 @@ extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
void *arg);
+extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
+extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
+ u8 *sense_buf, u8 dfl_sense_key);
/* libata-pmp.c */
#ifdef CONFIG_SATA_PMP
@@ -230,4 +233,28 @@ static inline void ata_sff_exit(void)
{ }
#endif /* CONFIG_ATA_SFF */
+/* libata-zpodd.c */
+#ifdef CONFIG_SATA_ZPODD
+void zpodd_init(struct ata_device *dev);
+void zpodd_exit(struct ata_device *dev);
+static inline bool zpodd_dev_enabled(struct ata_device *dev)
+{
+ return dev->zpodd != NULL;
+}
+void zpodd_on_suspend(struct ata_device *dev);
+bool zpodd_zpready(struct ata_device *dev);
+void zpodd_enable_run_wake(struct ata_device *dev);
+void zpodd_disable_run_wake(struct ata_device *dev);
+void zpodd_post_poweron(struct ata_device *dev);
+#else /* CONFIG_SATA_ZPODD */
+static inline void zpodd_init(struct ata_device *dev) {}
+static inline void zpodd_exit(struct ata_device *dev) {}
+static inline bool zpodd_dev_enabled(struct ata_device *dev) { return false; }
+static inline void zpodd_on_suspend(struct ata_device *dev) {}
+static inline bool zpodd_zpready(struct ata_device *dev) { return false; }
+static inline void zpodd_enable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_disable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_post_poweron(struct ata_device *dev) {}
+#endif /* CONFIG_SATA_ZPODD */
+
#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 63ffb002ec67..70b0e01372b3 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -512,7 +512,7 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
return -ENOMEM;
}
- info->clk = clk_get(&pdev->dev, "cfcon");
+ info->clk = devm_clk_get(&pdev->dev, "cfcon");
if (IS_ERR(info->clk)) {
dev_err(dev, "failed to get access to cf controller clock\n");
ret = PTR_ERR(info->clk);
@@ -589,7 +589,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
stop_clk:
clk_disable(info->clk);
- clk_put(info->clk);
return ret;
}
@@ -601,7 +600,6 @@ static int __exit pata_s3c_remove(struct platform_device *pdev)
ata_host_detach(host);
clk_disable(info->clk);
- clk_put(info->clk);
return 0;
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
new file mode 100644
index 000000000000..caf33f620c35
--- /dev/null
+++ b/drivers/ata/sata_rcar.c
@@ -0,0 +1,910 @@
+/*
+ * Renesas R-Car SATA driver
+ *
+ * Author: Vladimir Barinov <source@cogentembedded.com>
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define DRV_NAME "sata_rcar"
+
+/* SH-Navi2G/ATAPI-ATA compatible task registers */
+#define DATA_REG 0x100
+#define SDEVCON_REG 0x138
+
+/* SH-Navi2G/ATAPI module compatible control registers */
+#define ATAPI_CONTROL1_REG 0x180
+#define ATAPI_STATUS_REG 0x184
+#define ATAPI_INT_ENABLE_REG 0x188
+#define ATAPI_DTB_ADR_REG 0x198
+#define ATAPI_DMA_START_ADR_REG 0x19C
+#define ATAPI_DMA_TRANS_CNT_REG 0x1A0
+#define ATAPI_CONTROL2_REG 0x1A4
+#define ATAPI_SIG_ST_REG 0x1B0
+#define ATAPI_BYTE_SWAP_REG 0x1BC
+
+/* ATAPI control 1 register (ATAPI_CONTROL1) bits */
+#define ATAPI_CONTROL1_ISM BIT(16)
+#define ATAPI_CONTROL1_DTA32M BIT(11)
+#define ATAPI_CONTROL1_RESET BIT(7)
+#define ATAPI_CONTROL1_DESE BIT(3)
+#define ATAPI_CONTROL1_RW BIT(2)
+#define ATAPI_CONTROL1_STOP BIT(1)
+#define ATAPI_CONTROL1_START BIT(0)
+
+/* ATAPI status register (ATAPI_STATUS) bits */
+#define ATAPI_STATUS_SATAINT BIT(11)
+#define ATAPI_STATUS_DNEND BIT(6)
+#define ATAPI_STATUS_DEVTRM BIT(5)
+#define ATAPI_STATUS_DEVINT BIT(4)
+#define ATAPI_STATUS_ERR BIT(2)
+#define ATAPI_STATUS_NEND BIT(1)
+#define ATAPI_STATUS_ACT BIT(0)
+
+/* Interrupt enable register (ATAPI_INT_ENABLE) bits */
+#define ATAPI_INT_ENABLE_SATAINT BIT(11)
+#define ATAPI_INT_ENABLE_DNEND BIT(6)
+#define ATAPI_INT_ENABLE_DEVTRM BIT(5)
+#define ATAPI_INT_ENABLE_DEVINT BIT(4)
+#define ATAPI_INT_ENABLE_ERR BIT(2)
+#define ATAPI_INT_ENABLE_NEND BIT(1)
+#define ATAPI_INT_ENABLE_ACT BIT(0)
+
+/* Access control registers for physical layer control register */
+#define SATAPHYADDR_REG 0x200
+#define SATAPHYWDATA_REG 0x204
+#define SATAPHYACCEN_REG 0x208
+#define SATAPHYRESET_REG 0x20C
+#define SATAPHYRDATA_REG 0x210
+#define SATAPHYACK_REG 0x214
+
+/* Physical layer control address command register (SATAPHYADDR) bits */
+#define SATAPHYADDR_PHYRATEMODE BIT(10)
+#define SATAPHYADDR_PHYCMD_READ BIT(9)
+#define SATAPHYADDR_PHYCMD_WRITE BIT(8)
+
+/* Physical layer control enable register (SATAPHYACCEN) bits */
+#define SATAPHYACCEN_PHYLANE BIT(0)
+
+/* Physical layer control reset register (SATAPHYRESET) bits */
+#define SATAPHYRESET_PHYRST BIT(1)
+#define SATAPHYRESET_PHYSRES BIT(0)
+
+/* Physical layer control acknowledge register (SATAPHYACK) bits */
+#define SATAPHYACK_PHYACK BIT(0)
+
+/* Serial-ATA HOST control registers */
+#define BISTCONF_REG 0x102C
+#define SDATA_REG 0x1100
+#define SSDEVCON_REG 0x1204
+
+#define SCRSSTS_REG 0x1400
+#define SCRSERR_REG 0x1404
+#define SCRSCON_REG 0x1408
+#define SCRSACT_REG 0x140C
+
+#define SATAINTSTAT_REG 0x1508
+#define SATAINTMASK_REG 0x150C
+
+/* SATA INT status register (SATAINTSTAT) bits */
+#define SATAINTSTAT_SERR BIT(3)
+#define SATAINTSTAT_ATA BIT(0)
+
+/* SATA INT mask register (SATAINTSTAT) bits */
+#define SATAINTMASK_SERRMSK BIT(3)
+#define SATAINTMASK_ERRMSK BIT(2)
+#define SATAINTMASK_ERRCRTMSK BIT(1)
+#define SATAINTMASK_ATAMSK BIT(0)
+
+#define SATA_RCAR_INT_MASK (SATAINTMASK_SERRMSK | \
+ SATAINTMASK_ATAMSK)
+
+/* Physical Layer Control Registers */
+#define SATAPCTLR1_REG 0x43
+#define SATAPCTLR2_REG 0x52
+#define SATAPCTLR3_REG 0x5A
+#define SATAPCTLR4_REG 0x60
+
+/* Descriptor table word 0 bit (when DTA32M = 1) */
+#define SATA_RCAR_DTEND BIT(0)
+
+struct sata_rcar_priv {
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static void sata_rcar_phy_initialize(struct sata_rcar_priv *priv)
+{
+ /* idle state */
+ iowrite32(0, priv->base + SATAPHYADDR_REG);
+ /* reset */
+ iowrite32(SATAPHYRESET_PHYRST, priv->base + SATAPHYRESET_REG);
+ udelay(10);
+ /* deassert reset */
+ iowrite32(0, priv->base + SATAPHYRESET_REG);
+}
+
+static void sata_rcar_phy_write(struct sata_rcar_priv *priv, u16 reg, u32 val,
+ int group)
+{
+ int timeout;
+
+ /* deassert reset */
+ iowrite32(0, priv->base + SATAPHYRESET_REG);
+ /* lane 1 */
+ iowrite32(SATAPHYACCEN_PHYLANE, priv->base + SATAPHYACCEN_REG);
+ /* write phy register value */
+ iowrite32(val, priv->base + SATAPHYWDATA_REG);
+ /* set register group */
+ if (group)
+ reg |= SATAPHYADDR_PHYRATEMODE;
+ /* write command */
+ iowrite32(SATAPHYADDR_PHYCMD_WRITE | reg, priv->base + SATAPHYADDR_REG);
+ /* wait for ack */
+ for (timeout = 0; timeout < 100; timeout++) {
+ val = ioread32(priv->base + SATAPHYACK_REG);
+ if (val & SATAPHYACK_PHYACK)
+ break;
+ }
+ if (timeout >= 100)
+ pr_err("%s timeout\n", __func__);
+ /* idle state */
+ iowrite32(0, priv->base + SATAPHYADDR_REG);
+}
+
+static void sata_rcar_freeze(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* mask */
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ ata_sff_freeze(ap);
+}
+
+static void sata_rcar_thaw(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* ack */
+ iowrite32(~SATA_RCAR_INT_MASK, priv->base + SATAINTSTAT_REG);
+
+ ata_sff_thaw(ap);
+
+ /* unmask */
+ iowrite32(0x7ff & ~SATA_RCAR_INT_MASK, priv->base + SATAINTMASK_REG);
+}
+
+static void sata_rcar_ioread16_rep(void __iomem *reg, void *buffer, int count)
+{
+ u16 *ptr = buffer;
+
+ while (count--) {
+ u16 data = ioread32(reg);
+
+ *ptr++ = data;
+ }
+}
+
+static void sata_rcar_iowrite16_rep(void __iomem *reg, void *buffer, int count)
+{
+ const u16 *ptr = buffer;
+
+ while (count--)
+ iowrite32(*ptr++, reg);
+}
+
+static u8 sata_rcar_check_status(struct ata_port *ap)
+{
+ return ioread32(ap->ioaddr.status_addr);
+}
+
+static u8 sata_rcar_check_altstatus(struct ata_port *ap)
+{
+ return ioread32(ap->ioaddr.altstatus_addr);
+}
+
+static void sata_rcar_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ iowrite32(ctl, ap->ioaddr.ctl_addr);
+}
+
+static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device)
+{
+ iowrite32(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
+ ata_sff_pause(ap); /* needed; also flushes, for mmio */
+}
+
+static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ sata_rcar_dev_select(ap, device);
+
+ iowrite32(0x55, ioaddr->nsect_addr);
+ iowrite32(0xaa, ioaddr->lbal_addr);
+
+ iowrite32(0xaa, ioaddr->nsect_addr);
+ iowrite32(0x55, ioaddr->lbal_addr);
+
+ iowrite32(0x55, ioaddr->nsect_addr);
+ iowrite32(0xaa, ioaddr->lbal_addr);
+
+ nsect = ioread32(ioaddr->nsect_addr);
+ lbal = ioread32(ioaddr->lbal_addr);
+
+ if (nsect == 0x55 && lbal == 0xaa)
+ return 1; /* found a device */
+
+ return 0; /* nothing found */
+}
+
+static int sata_rcar_wait_after_reset(struct ata_link *link,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+
+ ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+ return ata_sff_wait_ready(link, deadline);
+}
+
+static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+ /* software reset. causes dev0 to be selected */
+ iowrite32(ap->ctl, ioaddr->ctl_addr);
+ udelay(20);
+ iowrite32(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20);
+ iowrite32(ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+
+ /* wait the port to become ready */
+ return sata_rcar_wait_after_reset(&ap->link, deadline);
+}
+
+static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ /* determine if device 0 is present */
+ if (sata_rcar_ata_devchk(ap, 0))
+ devmask |= 1 << 0;
+
+ /* issue bus reset */
+ DPRINTK("about to softreset, devmask=%x\n", devmask);
+ rc = sata_rcar_bus_softreset(ap, deadline);
+ /* if link is occupied, -ENODEV too is an error */
+ if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err);
+
+ DPRINTK("classes[0]=%u\n", classes[0]);
+ return 0;
+}
+
+static void sata_rcar_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ iowrite32(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ iowrite32(tf->hob_feature, ioaddr->feature_addr);
+ iowrite32(tf->hob_nsect, ioaddr->nsect_addr);
+ iowrite32(tf->hob_lbal, ioaddr->lbal_addr);
+ iowrite32(tf->hob_lbam, ioaddr->lbam_addr);
+ iowrite32(tf->hob_lbah, ioaddr->lbah_addr);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ iowrite32(tf->feature, ioaddr->feature_addr);
+ iowrite32(tf->nsect, ioaddr->nsect_addr);
+ iowrite32(tf->lbal, ioaddr->lbal_addr);
+ iowrite32(tf->lbam, ioaddr->lbam_addr);
+ iowrite32(tf->lbah, ioaddr->lbah_addr);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ iowrite32(tf->device, ioaddr->device_addr);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->command = sata_rcar_check_status(ap);
+ tf->feature = ioread32(ioaddr->error_addr);
+ tf->nsect = ioread32(ioaddr->nsect_addr);
+ tf->lbal = ioread32(ioaddr->lbal_addr);
+ tf->lbam = ioread32(ioaddr->lbam_addr);
+ tf->lbah = ioread32(ioaddr->lbah_addr);
+ tf->device = ioread32(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ iowrite32(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+ tf->hob_feature = ioread32(ioaddr->error_addr);
+ tf->hob_nsect = ioread32(ioaddr->nsect_addr);
+ tf->hob_lbal = ioread32(ioaddr->lbal_addr);
+ tf->hob_lbam = ioread32(ioaddr->lbam_addr);
+ tf->hob_lbah = ioread32(ioaddr->lbah_addr);
+ iowrite32(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+static void sata_rcar_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ iowrite32(tf->command, ap->ioaddr.command_addr);
+ ata_sff_pause(ap);
+}
+
+static unsigned int sata_rcar_data_xfer(struct ata_device *dev,
+ unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ void __iomem *data_addr = ap->ioaddr.data_addr;
+ unsigned int words = buflen >> 1;
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ)
+ sata_rcar_ioread16_rep(data_addr, buf, words);
+ else
+ sata_rcar_iowrite16_rep(data_addr, buf, words);
+
+ /* Transfer trailing byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ unsigned char pad[2] = { };
+
+ /* Point buf to the tail of buffer */
+ buf += buflen - 1;
+
+ /*
+ * Use io*16_rep() accessors here as well to avoid pointlessly
+ * swapping bytes to and from on the big endian machines...
+ */
+ if (rw == READ) {
+ sata_rcar_ioread16_rep(data_addr, pad, 1);
+ *buf = pad[0];
+ } else {
+ pad[0] = *buf;
+ sata_rcar_iowrite16_rep(data_addr, pad, 1);
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc)
+{
+ int count;
+ struct ata_port *ap;
+
+ /* We only need to flush incoming data when a command was running */
+ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+ return;
+
+ ap = qc->ap;
+ /* Drain up to 64K of data before we give up this recovery method */
+ for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) &&
+ count < 65536; count += 2)
+ ioread32(ap->ioaddr.data_addr);
+
+ /* Can become DEBUG later */
+ if (count)
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
+}
+
+static int sata_rcar_scr_read(struct ata_link *link, unsigned int sc_reg,
+ u32 *val)
+{
+ if (sc_reg > SCR_ACTIVE)
+ return -EINVAL;
+
+ *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg << 2));
+ return 0;
+}
+
+static int sata_rcar_scr_write(struct ata_link *link, unsigned int sc_reg,
+ u32 val)
+{
+ if (sc_reg > SCR_ACTIVE)
+ return -EINVAL;
+
+ iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg << 2));
+ return 0;
+}
+
+static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, sg_len, len;
+
+ /*
+ * Note: h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32)sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ /* H/w transfer count is only 29 bits long, let's be careful */
+ while (sg_len) {
+ len = sg_len;
+ if (len > 0x1ffffffe)
+ len = 0x1ffffffe;
+
+ prd[pi].addr = cpu_to_le32(addr);
+ prd[pi].flags_len = cpu_to_le32(len);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /* end-of-table flag */
+ prd[pi - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
+}
+
+static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ sata_rcar_bmdma_fill_sg(qc);
+}
+
+static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = qc->tf.flags & ATA_TFLAG_WRITE;
+ u32 dmactl;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ iowrite32(ap->bmdma_prd_dma, priv->base + ATAPI_DTB_ADR_REG);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl &= ~(ATAPI_CONTROL1_RW | ATAPI_CONTROL1_STOP);
+ if (dmactl & ATAPI_CONTROL1_START) {
+ dmactl &= ~ATAPI_CONTROL1_START;
+ dmactl |= ATAPI_CONTROL1_STOP;
+ }
+ if (!rw)
+ dmactl |= ATAPI_CONTROL1_RW;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u32 dmactl;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* start host DMA transaction */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl |= ATAPI_CONTROL1_START;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+}
+
+static void sata_rcar_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ u32 dmactl;
+
+ /* force termination of DMA transfer if active */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ if (dmactl & ATAPI_CONTROL1_START) {
+ dmactl &= ~ATAPI_CONTROL1_START;
+ dmactl |= ATAPI_CONTROL1_STOP;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+ }
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+}
+
+static u8 sata_rcar_bmdma_status(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ u32 status;
+ u8 host_stat = 0;
+
+ status = ioread32(priv->base + ATAPI_STATUS_REG);
+ if (status & ATAPI_STATUS_DEVINT)
+ host_stat |= ATA_DMA_INTR;
+ if (status & ATAPI_STATUS_ACT)
+ host_stat |= ATA_DMA_ACTIVE;
+
+ return host_stat;
+}
+
+static struct scsi_host_template sata_rcar_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sata_rcar_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .freeze = sata_rcar_freeze,
+ .thaw = sata_rcar_thaw,
+ .softreset = sata_rcar_softreset,
+
+ .scr_read = sata_rcar_scr_read,
+ .scr_write = sata_rcar_scr_write,
+
+ .sff_dev_select = sata_rcar_dev_select,
+ .sff_set_devctl = sata_rcar_set_devctl,
+ .sff_check_status = sata_rcar_check_status,
+ .sff_check_altstatus = sata_rcar_check_altstatus,
+ .sff_tf_load = sata_rcar_tf_load,
+ .sff_tf_read = sata_rcar_tf_read,
+ .sff_exec_command = sata_rcar_exec_command,
+ .sff_data_xfer = sata_rcar_data_xfer,
+ .sff_drain_fifo = sata_rcar_drain_fifo,
+
+ .qc_prep = sata_rcar_qc_prep,
+
+ .bmdma_setup = sata_rcar_bmdma_setup,
+ .bmdma_start = sata_rcar_bmdma_start,
+ .bmdma_stop = sata_rcar_bmdma_stop,
+ .bmdma_status = sata_rcar_bmdma_status,
+};
+
+static int sata_rcar_serr_interrupt(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ int freeze = 0;
+ int handled = 0;
+ u32 serror;
+
+ serror = ioread32(priv->base + SCRSERR_REG);
+ if (!serror)
+ return 0;
+
+ DPRINTK("SError @host_intr: 0x%x\n", serror);
+
+ /* first, analyze and record host port events */
+ ata_ehi_clear_desc(ehi);
+
+ if (serror & (SERR_DEV_XCHG | SERR_PHYRDY_CHG)) {
+ /* Setup a soft-reset EH action */
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "%s", "hotplug");
+
+ freeze = serror & SERR_COMM_WAKE ? 0 : 1;
+ handled = 1;
+ }
+
+ /* freeze or abort */
+ if (freeze)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+
+ return handled;
+}
+
+static int sata_rcar_ata_interrupt(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ int handled = 0;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc)
+ handled |= ata_bmdma_port_intr(ap, qc);
+
+ return handled;
+}
+
+static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct sata_rcar_priv *priv = host->private_data;
+ struct ata_port *ap;
+ unsigned int handled = 0;
+ u32 sataintstat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
+ if (!sataintstat)
+ goto done;
+ /* ack */
+ iowrite32(sataintstat & ~SATA_RCAR_INT_MASK,
+ priv->base + SATAINTSTAT_REG);
+
+ ap = host->ports[0];
+
+ if (sataintstat & SATAINTSTAT_ATA)
+ handled |= sata_rcar_ata_interrupt(ap);
+
+ if (sataintstat & SATAINTSTAT_SERR)
+ handled |= sata_rcar_serr_interrupt(ap);
+
+done:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void sata_rcar_setup_port(struct ata_host *host)
+{
+ struct ata_port *ap = host->ports[0];
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct sata_rcar_priv *priv = host->private_data;
+
+ ap->ops = &sata_rcar_port_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->udma_mask = ATA_UDMA6;
+ ap->flags |= ATA_FLAG_SATA;
+
+ ioaddr->cmd_addr = priv->base + SDATA_REG;
+ ioaddr->ctl_addr = priv->base + SSDEVCON_REG;
+ ioaddr->scr_addr = priv->base + SCRSSTS_REG;
+ ioaddr->altstatus_addr = ioaddr->ctl_addr;
+
+ ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
+ ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
+ ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
+ ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
+ ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
+ ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
+ ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
+ ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
+ ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
+ ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
+}
+
+static void sata_rcar_init_controller(struct ata_host *host)
+{
+ struct sata_rcar_priv *priv = host->private_data;
+ u32 val;
+
+ /* reset and setup phy */
+ sata_rcar_phy_initialize(priv);
+ sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1);
+ sata_rcar_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1);
+ sata_rcar_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0);
+
+ /* SATA-IP reset state */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val |= ATAPI_CONTROL1_RESET;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* ISM mode, PRD mode, DTEND flag at bit 0 */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val |= ATAPI_CONTROL1_ISM;
+ val |= ATAPI_CONTROL1_DESE;
+ val |= ATAPI_CONTROL1_DTA32M;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* Release the SATA-IP from the reset state */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val &= ~ATAPI_CONTROL1_RESET;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+ /* enable interrupts */
+ iowrite32(ATAPI_INT_ENABLE_SATAINT, priv->base + ATAPI_INT_ENABLE_REG);
+}
+
+static int sata_rcar_probe(struct platform_device *pdev)
+{
+ struct ata_host *host;
+ struct sata_rcar_priv *priv;
+ struct resource *mem;
+ int irq;
+ int ret = 0;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get access to sata clock\n");
+ return PTR_ERR(priv->clk);
+ }
+ clk_enable(priv->clk);
+
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host) {
+ dev_err(&pdev->dev, "ata_host_alloc failed\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ host->private_data = priv;
+
+ priv->base = devm_request_and_ioremap(&pdev->dev, mem);
+ if (!priv->base) {
+ ret = -EADDRNOTAVAIL;
+ goto cleanup;
+ }
+
+ /* setup port */
+ sata_rcar_setup_port(host);
+
+ /* initialize host controller */
+ sata_rcar_init_controller(host);
+
+ ret = ata_host_activate(host, irq, sata_rcar_interrupt, 0,
+ &sata_rcar_sht);
+ if (!ret)
+ return 0;
+
+cleanup:
+ clk_disable(priv->clk);
+
+ return ret;
+}
+
+static int sata_rcar_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct sata_rcar_priv *priv = host->private_data;
+
+ ata_host_detach(host);
+
+ /* disable interrupts */
+ iowrite32(0, priv->base + ATAPI_INT_ENABLE_REG);
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sata_rcar_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_rcar_priv *priv = host->private_data;
+ int ret;
+
+ ret = ata_host_suspend(host, PMSG_SUSPEND);
+ if (!ret) {
+ /* disable interrupts */
+ iowrite32(0, priv->base + ATAPI_INT_ENABLE_REG);
+ /* mask */
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ clk_disable(priv->clk);
+ }
+
+ return ret;
+}
+
+static int sata_rcar_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_rcar_priv *priv = host->private_data;
+
+ clk_enable(priv->clk);
+
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+ /* enable interrupts */
+ iowrite32(ATAPI_INT_ENABLE_SATAINT, priv->base + ATAPI_INT_ENABLE_REG);
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sata_rcar_pm_ops = {
+ .suspend = sata_rcar_suspend,
+ .resume = sata_rcar_resume,
+};
+#endif
+
+static struct of_device_id sata_rcar_match[] = {
+ { .compatible = "renesas,rcar-sata", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sata_rcar_match);
+
+static struct platform_driver sata_rcar_driver = {
+ .probe = sata_rcar_probe,
+ .remove = sata_rcar_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sata_rcar_match,
+#ifdef CONFIG_PM
+ .pm = &sata_rcar_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(sata_rcar_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vladimir Barinov");
+MODULE_DESCRIPTION("Renesas R-Car SATA controller low level driver");
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b22d71cac54c..0e3f8f9dcd29 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -157,7 +157,6 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
{
struct atm_cirange ci;
struct atm_vcc *vcc;
- struct hlist_node *node;
struct sock *s;
int i;
@@ -171,7 +170,7 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev != dev)
continue;
@@ -264,12 +263,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc;
- struct hlist_node *node;
struct sock *s;
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev == dev &&
vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index c1eb6fa8ac35..b1955ba40d63 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2093,7 +2093,6 @@ static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr)
static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
{
- struct hlist_node *node;
struct sock *s;
static const char *signal[] = { "LOST","unknown","okay" };
struct eni_dev *eni_dev = ENI_DEV(dev);
@@ -2171,7 +2170,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
struct eni_vcc *eni_vcc;
int length;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 72b6960fa95f..d6891267f5bb 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -329,7 +329,6 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
{
struct hlist_head *head;
struct atm_vcc *vcc;
- struct hlist_node *node;
struct sock *s;
short vpi;
int vci;
@@ -338,7 +337,7 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
vci = cid & ((1 << he_dev->vcibits) - 1);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev == he_dev->atm_dev &&
vcc->vci == vci && vcc->vpi == vpi &&
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index ed1d2b7f923b..6587dc295eb0 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -251,7 +251,6 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
if (card->scd2vc[j] != NULL)
free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
}
- idr_remove_all(&card->idr);
idr_destroy(&card->idr);
pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
@@ -950,11 +949,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
{
struct sk_buff *handle1, *handle2;
- u32 id1 = 0, id2 = 0;
+ int id1, id2;
u32 addr1, addr2;
u32 stat;
unsigned long flags;
- int err;
/* *BARF* */
handle2 = NULL;
@@ -1027,23 +1025,12 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
card->lbfqc += 2;
}
- do {
- if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
- printk(KERN_ERR
- "nicstar%d: no free memory for idr\n",
- card->index);
- goto out;
- }
-
- if (!id1)
- err = idr_get_new_above(&card->idr, handle1, 0, &id1);
-
- if (!id2 && err == 0)
- err = idr_get_new_above(&card->idr, handle2, 0, &id2);
-
- } while (err == -EAGAIN);
+ id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
+ if (id1 < 0)
+ goto out;
- if (err)
+ id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
+ if (id2 < 0)
goto out;
spin_lock_irqsave(&card->res_lock, flags);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 0474a89170b9..32784d18d1f7 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -896,12 +896,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
{
struct hlist_head *head;
struct atm_vcc *vcc = NULL;
- struct hlist_node *node;
struct sock *s;
read_lock(&vcc_sklist_lock);
head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
if (vcc->dev == dev && vcc->vci == vci &&
vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 17cf7cad601e..01fc5b07f951 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -302,7 +302,8 @@ static int handle_remove(const char *nodename, struct device *dev)
if (dentry->d_inode) {
struct kstat stat;
- err = vfs_getattr(parent.mnt, dentry, &stat);
+ struct path p = {.mnt = parent.mnt, .dentry = dentry};
+ err = vfs_getattr(&p, &stat);
if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
struct iattr newattrs;
/*
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index ff5b745c4705..2a7cb0df176b 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -39,6 +39,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dmabuf = file->private_data;
+ BUG_ON(dmabuf->vmapping_counter);
+
dmabuf->ops->release(dmabuf);
kfree(dmabuf);
return 0;
@@ -445,6 +447,9 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
+ struct file *oldfile;
+ int ret;
+
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -458,14 +463,22 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
- if (vma->vm_file)
- fput(vma->vm_file);
-
- vma->vm_file = get_file(dmabuf->file);
-
+ get_file(dmabuf->file);
+ oldfile = vma->vm_file;
+ vma->vm_file = dmabuf->file;
vma->vm_pgoff = pgoff;
- return dmabuf->ops->mmap(dmabuf, vma);
+ ret = dmabuf->ops->mmap(dmabuf, vma);
+ if (ret) {
+ /* restore old parameters on failure */
+ vma->vm_file = oldfile;
+ fput(dmabuf->file);
+ } else {
+ if (oldfile)
+ fput(oldfile);
+ }
+ return ret;
+
}
EXPORT_SYMBOL_GPL(dma_buf_mmap);
@@ -481,12 +494,34 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
+ void *ptr;
+
if (WARN_ON(!dmabuf))
return NULL;
- if (dmabuf->ops->vmap)
- return dmabuf->ops->vmap(dmabuf);
- return NULL;
+ if (!dmabuf->ops->vmap)
+ return NULL;
+
+ mutex_lock(&dmabuf->lock);
+ if (dmabuf->vmapping_counter) {
+ dmabuf->vmapping_counter++;
+ BUG_ON(!dmabuf->vmap_ptr);
+ ptr = dmabuf->vmap_ptr;
+ goto out_unlock;
+ }
+
+ BUG_ON(dmabuf->vmap_ptr);
+
+ ptr = dmabuf->ops->vmap(dmabuf);
+ if (IS_ERR_OR_NULL(ptr))
+ goto out_unlock;
+
+ dmabuf->vmap_ptr = ptr;
+ dmabuf->vmapping_counter = 1;
+
+out_unlock:
+ mutex_unlock(&dmabuf->lock);
+ return ptr;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);
@@ -500,7 +535,16 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
if (WARN_ON(!dmabuf))
return;
- if (dmabuf->ops->vunmap)
- dmabuf->ops->vunmap(dmabuf, vaddr);
+ BUG_ON(!dmabuf->vmap_ptr);
+ BUG_ON(dmabuf->vmapping_counter == 0);
+ BUG_ON(dmabuf->vmap_ptr != vaddr);
+
+ mutex_lock(&dmabuf->lock);
+ if (--dmabuf->vmapping_counter == 0) {
+ if (dmabuf->ops->vunmap)
+ dmabuf->ops->vunmap(dmabuf, vaddr);
+ dmabuf->vmap_ptr = NULL;
+ }
+ mutex_unlock(&dmabuf->lock);
}
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4a223fedcd73..4b1f9265887f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -279,7 +279,7 @@ MODULE_PARM_DESC(path, "customized firmware image search path with a higher prio
static noinline_for_stack long fw_file_size(struct file *file)
{
struct kstat st;
- if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
+ if (vfs_getattr(&file->f_path, &st))
return -1;
if (!S_ISREG(st.mode))
return -1;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 78d5f20c5f5b..81d6f605c92e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -279,7 +279,7 @@ static ssize_t regmap_map_write_file(struct file *file,
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
- add_taint(TAINT_USER);
+ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
regmap_write(map, reg, value);
return buf_size;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 9a13e889837e..5b5ee79ff236 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6547,7 +6547,7 @@ static ssize_t dac960_user_command_proc_write(struct file *file,
const char __user *Buffer,
size_t Count, loff_t *pos)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
+ DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file_inode(file))->data;
unsigned char CommandBuffer[80];
int Length;
if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
@@ -7054,6 +7054,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
else
ErrorCode = 0;
}
+ break;
default:
ErrorCode = -ENOTTY;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 824e09c4d0d7..5dc0daed8fac 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -63,19 +63,6 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
-config BLK_DEV_XD
- tristate "XT hard disk support"
- depends on ISA && ISA_DMA_API
- select CHECK_SIGNATURE
- help
- Very old 8 bit hard disk controllers used in the IBM XT computer
- will be supported if you say Y here.
-
- To compile this driver as a module, choose M here: the
- module will be called xd.
-
- It's pretty unlikely that you have one of these: say N.
-
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
@@ -544,4 +531,14 @@ config BLK_DEV_RBD
If unsure, say N.
+config BLK_DEV_RSXX
+ tristate "RamSam PCIe Flash SSD Device Driver"
+ depends on PCI
+ help
+ Device driver for IBM's high speed PCIe SSD
+ storage devices: RamSan-70 and RamSan-80.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rsxx.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 17e82df3df74..a3b40232c6ab 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_DEV_XD) += xd.o
obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
@@ -41,4 +40,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
+obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8c13eeb83c53..e98da675f0c1 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2660,25 +2660,24 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
mdev->read_requests = RB_ROOT;
mdev->write_requests = RB_ROOT;
- if (!idr_pre_get(&minors, GFP_KERNEL))
- goto out_no_minor_idr;
- if (idr_get_new_above(&minors, mdev, minor, &minor_got))
+ minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
+ if (minor_got < 0) {
+ if (minor_got == -ENOSPC) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
+ }
goto out_no_minor_idr;
- if (minor_got != minor) {
- err = ERR_MINOR_EXISTS;
- drbd_msg_put_info("requested minor exists already");
- goto out_idr_remove_minor;
}
- if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
- goto out_idr_remove_minor;
- if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
+ vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
+ if (vnr_got < 0) {
+ if (vnr_got == -ENOSPC) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ }
goto out_idr_remove_minor;
- if (vnr_got != vnr) {
- err = ERR_INVALID_REQUEST;
- drbd_msg_put_info("requested volume exists already");
- goto out_idr_remove_vol;
}
+
add_disk(disk);
kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
@@ -2689,8 +2688,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
return NO_ERROR;
-out_idr_remove_vol:
- idr_remove(&tconn->volumes, vnr_got);
out_idr_remove_minor:
idr_remove(&minors, minor_got);
synchronize_rcu();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ae1251270624..747bb2af69dc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -162,12 +162,13 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
- loff_t size, loopsize;
+ loff_t loopsize;
/* Compute loopsize in bytes */
- size = i_size_read(file->f_mapping->host);
- loopsize = size - offset;
- /* offset is beyond i_size, wierd but possible */
+ loopsize = i_size_read(file->f_mapping->host);
+ if (offset > 0)
+ loopsize -= offset;
+ /* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
@@ -190,6 +191,7 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
+ struct block_device *bdev = lo->lo_device;
if (unlikely((loff_t)x != size))
return -EFBIG;
@@ -198,6 +200,9 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
set_capacity(lo->lo_disk, x);
+ bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
return 0;
}
@@ -1091,10 +1096,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
return err;
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit) {
+ lo->lo_sizelimit != info->lo_sizelimit)
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
return -EFBIG;
- }
+
loop_config_discard(lo);
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
@@ -1139,7 +1144,7 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
+ error = vfs_getattr(&file->f_path, &stat);
if (error)
return error;
memset(info, 0, sizeof(*info));
@@ -1271,28 +1276,10 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
{
- int err;
- sector_t sec;
- loff_t sz;
-
- err = -ENXIO;
if (unlikely(lo->lo_state != Lo_bound))
- goto out;
- err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
- if (unlikely(err))
- goto out;
- sec = get_capacity(lo->lo_disk);
- /* the width of sector_t may be narrow for bit-shift */
- sz = sec;
- sz <<= 9;
- mutex_lock(&bdev->bd_mutex);
- bd_set_size(bdev, sz);
- /* let user-space know about the new size */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- mutex_unlock(&bdev->bd_mutex);
+ return -ENXIO;
- out:
- return err;
+ return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1624,30 +1611,17 @@ static int loop_add(struct loop_device **l, int i)
if (!lo)
goto out;
- if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
- goto out_free_dev;
-
+ /* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
- int m;
-
- /* create specific i in the index */
- err = idr_get_new_above(&loop_index_idr, lo, i, &m);
- if (err >= 0 && i != m) {
- idr_remove(&loop_index_idr, m);
+ err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
+ if (err == -ENOSPC)
err = -EEXIST;
- }
- } else if (i == -1) {
- int m;
-
- /* get next free nr */
- err = idr_get_new(&loop_index_idr, lo, &m);
- if (err >= 0)
- i = m;
} else {
- err = -EINVAL;
+ err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
}
if (err < 0)
goto out_free_dev;
+ i = err;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
@@ -1858,11 +1832,15 @@ static int __init loop_init(void)
max_part = (1UL << part_shift) - 1;
}
- if ((1UL << part_shift) > DISK_MAX_PARTS)
- return -EINVAL;
+ if ((1UL << part_shift) > DISK_MAX_PARTS) {
+ err = -EINVAL;
+ goto misc_out;
+ }
- if (max_loop > 1UL << (MINORBITS - part_shift))
- return -EINVAL;
+ if (max_loop > 1UL << (MINORBITS - part_shift)) {
+ err = -EINVAL;
+ goto misc_out;
+ }
/*
* If max_loop is specified, create that many devices upfront.
@@ -1880,8 +1858,10 @@ static int __init loop_init(void)
range = 1UL << MINORBITS;
}
- if (register_blkdev(LOOP_MAJOR, "loop"))
- return -EIO;
+ if (register_blkdev(LOOP_MAJOR, "loop")) {
+ err = -EIO;
+ goto misc_out;
+ }
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
THIS_MODULE, loop_probe, NULL, NULL);
@@ -1894,6 +1874,10 @@ static int __init loop_init(void)
printk(KERN_INFO "loop: module loaded\n");
return 0;
+
+misc_out:
+ misc_deregister(&loop_misc);
+ return err;
}
static int loop_exit_cb(int id, void *ptr, void *data)
@@ -1911,7 +1895,6 @@ static void __exit loop_exit(void)
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
- idr_remove_all(&loop_index_idr);
idr_destroy(&loop_index_idr);
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index 0ba837fc62a8..1fca1f996b45 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -4,6 +4,6 @@
config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3fd100990453..11cc9522cdd4 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -88,6 +88,8 @@ static int instance;
static int mtip_major;
static struct dentry *dfs_parent;
+static u32 cpu_use[NR_CPUS];
+
static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida);
@@ -296,16 +298,17 @@ static int hba_reset_nosleep(struct driver_data *dd)
*/
static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
{
- atomic_set(&port->commands[tag].active, 1);
+ int group = tag >> 5;
- spin_lock(&port->cmd_issue_lock);
+ atomic_set(&port->commands[tag].active, 1);
+ /* guard SACT and CI registers */
+ spin_lock(&port->cmd_issue_lock[group]);
writel((1 << MTIP_TAG_BIT(tag)),
port->s_active[MTIP_TAG_INDEX(tag)]);
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
-
- spin_unlock(&port->cmd_issue_lock);
+ spin_unlock(&port->cmd_issue_lock[group]);
/* Set the command's timeout value.*/
port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -964,56 +967,56 @@ handle_tfe_exit:
/*
* Handle a set device bits interrupt
*/
-static inline void mtip_process_sdbf(struct driver_data *dd)
+static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
+ u32 completed)
{
- struct mtip_port *port = dd->port;
- int group, tag, bit;
- u32 completed;
+ struct driver_data *dd = port->dd;
+ int tag, bit;
struct mtip_cmd *command;
- /* walk all bits in all slot groups */
- for (group = 0; group < dd->slot_groups; group++) {
- completed = readl(port->completed[group]);
- if (!completed)
- continue;
+ if (!completed) {
+ WARN_ON_ONCE(!completed);
+ return;
+ }
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
- /* clear completed status register in the hardware.*/
- writel(completed, port->completed[group]);
+ /* Process completed commands. */
+ for (bit = 0; (bit < 32) && completed; bit++) {
+ if (completed & 0x01) {
+ tag = (group << 5) | bit;
- /* Process completed commands. */
- for (bit = 0;
- (bit < 32) && completed;
- bit++, completed >>= 1) {
- if (completed & 0x01) {
- tag = (group << 5) | bit;
+ /* skip internal command slot. */
+ if (unlikely(tag == MTIP_TAG_INTERNAL))
+ continue;
- /* skip internal command slot. */
- if (unlikely(tag == MTIP_TAG_INTERNAL))
- continue;
+ command = &port->commands[tag];
+ /* make internal callback */
+ if (likely(command->comp_func)) {
+ command->comp_func(
+ port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "Null completion "
+ "for tag %d",
+ tag);
- command = &port->commands[tag];
- /* make internal callback */
- if (likely(command->comp_func)) {
- command->comp_func(
- port,
- tag,
- command->comp_data,
- 0);
- } else {
- dev_warn(&dd->pdev->dev,
- "Null completion "
- "for tag %d",
- tag);
-
- if (mtip_check_surprise_removal(
- dd->pdev)) {
- mtip_command_cleanup(dd);
- return;
- }
+ if (mtip_check_surprise_removal(
+ dd->pdev)) {
+ mtip_command_cleanup(dd);
+ return;
}
}
}
+ completed >>= 1;
}
+
+ /* If last, re-enable interrupts */
+ if (atomic_dec_return(&dd->irq_workers_active) == 0)
+ writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
}
/*
@@ -1072,6 +1075,8 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
struct mtip_port *port = dd->port;
u32 hba_stat, port_stat;
int rv = IRQ_NONE;
+ int do_irq_enable = 1, i, workers;
+ struct mtip_work *twork;
hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
if (hba_stat) {
@@ -1082,8 +1087,42 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
writel(port_stat, port->mmio + PORT_IRQ_STAT);
/* Demux port status */
- if (likely(port_stat & PORT_IRQ_SDB_FIS))
- mtip_process_sdbf(dd);
+ if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
+ do_irq_enable = 0;
+ WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
+
+ /* Start at 1: group zero is always local? */
+ for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
+ i++) {
+ twork = &dd->work[i];
+ twork->completed = readl(port->completed[i]);
+ if (twork->completed)
+ workers++;
+ }
+
+ atomic_set(&dd->irq_workers_active, workers);
+ if (workers) {
+ for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
+ twork = &dd->work[i];
+ if (twork->completed)
+ queue_work_on(
+ twork->cpu_binding,
+ dd->isr_workq,
+ &twork->work);
+ }
+
+ if (likely(dd->work[0].completed))
+ mtip_workq_sdbfx(port, 0,
+ dd->work[0].completed);
+
+ } else {
+ /*
+ * Chip quirk: SDB interrupt but nothing
+ * to complete
+ */
+ do_irq_enable = 1;
+ }
+ }
if (unlikely(port_stat & PORT_IRQ_ERR)) {
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
@@ -1103,21 +1142,13 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
}
/* acknowledge interrupt */
- writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
+ if (unlikely(do_irq_enable))
+ writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
return rv;
}
/*
- * Wrapper for mtip_handle_irq
- * (ignores return code)
- */
-static void mtip_tasklet(unsigned long data)
-{
- mtip_handle_irq((struct driver_data *) data);
-}
-
-/*
* HBA interrupt subroutine.
*
* @irq IRQ number.
@@ -1130,8 +1161,8 @@ static void mtip_tasklet(unsigned long data)
static irqreturn_t mtip_irq_handler(int irq, void *instance)
{
struct driver_data *dd = instance;
- tasklet_schedule(&dd->tasklet);
- return IRQ_HANDLED;
+
+ return mtip_handle_irq(dd);
}
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
@@ -1489,6 +1520,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
}
#endif
+ /* Demux ID.DRAT & ID.RZAT to determine trim support */
+ if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
+ port->dd->trim_supp = true;
+ else
+ port->dd->trim_supp = false;
+
/* Set the identify buffer as valid. */
port->identify_valid = 1;
@@ -1676,6 +1713,81 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
}
/*
+ * Trim unused sectors
+ *
+ * @dd pointer to driver_data structure
+ * @lba starting lba
+ * @len # of 512b sectors to trim
+ *
+ * return value
+ * -ENOMEM Out of dma memory
+ * -EINVAL Invalid parameters passed in, trim not supported
+ * -EIO Error submitting trim request to hw
+ */
+static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len)
+{
+ int i, rv = 0;
+ u64 tlba, tlen, sect_left;
+ struct mtip_trim_entry *buf;
+ dma_addr_t dma_addr;
+ struct host_to_dev_fis fis;
+
+ if (!len || dd->trim_supp == false)
+ return -EINVAL;
+
+ /* Trim request too big */
+ WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
+
+ /* Trim request not aligned on 4k boundary */
+ WARN_ON(len % 8 != 0);
+
+ /* Warn if vu_trim structure is too big */
+ WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
+
+ /* Allocate a DMA buffer for the trim structure */
+ buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memset(buf, 0, ATA_SECT_SIZE);
+
+ for (i = 0, sect_left = len, tlba = lba;
+ i < MTIP_MAX_TRIM_ENTRIES && sect_left;
+ i++) {
+ tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
+ MTIP_MAX_TRIM_ENTRY_LEN :
+ sect_left);
+ buf[i].lba = __force_bit2int cpu_to_le32(tlba);
+ buf[i].range = __force_bit2int cpu_to_le16(tlen);
+ tlba += tlen;
+ sect_left -= tlen;
+ }
+ WARN_ON(sect_left != 0);
+
+ /* Build the fis */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = 0xfb;
+ fis.features = 0x60;
+ fis.sect_count = 1;
+ fis.device = ATA_DEVICE_OBS;
+
+ if (mtip_exec_internal_command(dd->port,
+ &fis,
+ 5,
+ dma_addr,
+ ATA_SECT_SIZE,
+ 0,
+ GFP_KERNEL,
+ MTIP_TRIM_TIMEOUT_MS) < 0)
+ rv = -EIO;
+
+ dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
+ return rv;
+}
+
+/*
* Get the drive capacity.
*
* @dd Pointer to the device data structure.
@@ -3005,20 +3117,24 @@ static int mtip_hw_init(struct driver_data *dd)
hba_setup(dd);
- tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
-
- dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
+ dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
+ dd->numa_node);
if (!dd->port) {
dev_err(&dd->pdev->dev,
"Memory allocation: port structure\n");
return -ENOMEM;
}
+ /* Continue workqueue setup */
+ for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
+ dd->work[i].port = dd->port;
+
/* Counting semaphore to track command slot usage */
sema_init(&dd->port->cmd_slot, num_command_slots - 1);
/* Spinlock to prevent concurrent issue */
- spin_lock_init(&dd->port->cmd_issue_lock);
+ for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
+ spin_lock_init(&dd->port->cmd_issue_lock[i]);
/* Set the port mmio base address. */
dd->port->mmio = dd->mmio + PORT_OFFSET;
@@ -3165,6 +3281,7 @@ static int mtip_hw_init(struct driver_data *dd)
"Unable to allocate IRQ %d\n", dd->pdev->irq);
goto out2;
}
+ irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
/* Enable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
@@ -3241,7 +3358,8 @@ out3:
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
- /*Release the IRQ. */
+ /* Release the IRQ. */
+ irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
out2:
@@ -3291,11 +3409,9 @@ static int mtip_hw_exit(struct driver_data *dd)
del_timer_sync(&dd->port->cmd_timer);
/* Release the IRQ. */
+ irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
- /* Stop the bottom half tasklet. */
- tasklet_kill(&dd->tasklet);
-
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
@@ -3641,6 +3757,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
}
+ if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+ bio_sectors(bio)));
+ return;
+ }
+
if (unlikely(!bio_has_data(bio))) {
blk_queue_flush(queue, 0);
bio_endio(bio, 0);
@@ -3711,7 +3833,7 @@ static int mtip_block_initialize(struct driver_data *dd)
goto protocol_init_error;
}
- dd->disk = alloc_disk(MTIP_MAX_MINORS);
+ dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
if (dd->disk == NULL) {
dev_err(&dd->pdev->dev,
"Unable to allocate gendisk structure\n");
@@ -3755,7 +3877,7 @@ static int mtip_block_initialize(struct driver_data *dd)
skip_create_disk:
/* Allocate the request queue. */
- dd->queue = blk_alloc_queue(GFP_KERNEL);
+ dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node);
if (dd->queue == NULL) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
@@ -3783,6 +3905,15 @@ skip_create_disk:
*/
blk_queue_flush(dd->queue, 0);
+ /* Signal trim support */
+ if (dd->trim_supp == true) {
+ set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
+ dd->queue->limits.discard_granularity = 4096;
+ blk_queue_max_discard_sectors(dd->queue,
+ MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
+ dd->queue->limits.discard_zeroes_data = 0;
+ }
+
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
@@ -3813,9 +3944,8 @@ skip_create_disk:
start_service_thread:
sprintf(thd_name, "mtip_svc_thd_%02d", index);
-
- dd->mtip_svc_handler = kthread_run(mtip_service_thread,
- dd, thd_name);
+ dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
+ dd, dd->numa_node, thd_name);
if (IS_ERR(dd->mtip_svc_handler)) {
dev_err(&dd->pdev->dev, "service thread failed to start\n");
@@ -3823,7 +3953,7 @@ start_service_thread:
rv = -EFAULT;
goto kthread_run_error;
}
-
+ wake_up_process(dd->mtip_svc_handler);
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
rv = wait_for_rebuild;
@@ -3963,6 +4093,56 @@ static int mtip_block_resume(struct driver_data *dd)
return 0;
}
+static void drop_cpu(int cpu)
+{
+ cpu_use[cpu]--;
+}
+
+static int get_least_used_cpu_on_node(int node)
+{
+ int cpu, least_used_cpu, least_cnt;
+ const struct cpumask *node_mask;
+
+ node_mask = cpumask_of_node(node);
+ least_used_cpu = cpumask_first(node_mask);
+ least_cnt = cpu_use[least_used_cpu];
+ cpu = least_used_cpu;
+
+ for_each_cpu(cpu, node_mask) {
+ if (cpu_use[cpu] < least_cnt) {
+ least_used_cpu = cpu;
+ least_cnt = cpu_use[cpu];
+ }
+ }
+ cpu_use[least_used_cpu]++;
+ return least_used_cpu;
+}
+
+/* Helper for selecting a node in round robin mode */
+static inline int mtip_get_next_rr_node(void)
+{
+ static int next_node = -1;
+
+ if (next_node == -1) {
+ next_node = first_online_node;
+ return next_node;
+ }
+
+ next_node = next_online_node(next_node);
+ if (next_node == MAX_NUMNODES)
+ next_node = first_online_node;
+ return next_node;
+}
+
+static DEFINE_HANDLER(0);
+static DEFINE_HANDLER(1);
+static DEFINE_HANDLER(2);
+static DEFINE_HANDLER(3);
+static DEFINE_HANDLER(4);
+static DEFINE_HANDLER(5);
+static DEFINE_HANDLER(6);
+static DEFINE_HANDLER(7);
+
/*
* Called for each supported PCI device detected.
*
@@ -3977,9 +4157,25 @@ static int mtip_pci_probe(struct pci_dev *pdev,
{
int rv = 0;
struct driver_data *dd = NULL;
+ char cpu_list[256];
+ const struct cpumask *node_mask;
+ int cpu, i = 0, j = 0;
+ int my_node = NUMA_NO_NODE;
/* Allocate memory for this devices private data. */
- dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
+ my_node = pcibus_to_node(pdev->bus);
+ if (my_node != NUMA_NO_NODE) {
+ if (!node_online(my_node))
+ my_node = mtip_get_next_rr_node();
+ } else {
+ dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
+ my_node = mtip_get_next_rr_node();
+ }
+ dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
+ my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
+ cpu_to_node(smp_processor_id()), smp_processor_id());
+
+ dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (dd == NULL) {
dev_err(&pdev->dev,
"Unable to allocate memory for driver data\n");
@@ -4016,19 +4212,82 @@ static int mtip_pci_probe(struct pci_dev *pdev,
}
}
- pci_set_master(pdev);
+ /* Copy the info we may need later into the private data structure. */
+ dd->major = mtip_major;
+ dd->instance = instance;
+ dd->pdev = pdev;
+ dd->numa_node = my_node;
+ memset(dd->workq_name, 0, 32);
+ snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
+
+ dd->isr_workq = create_workqueue(dd->workq_name);
+ if (!dd->isr_workq) {
+ dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
+ goto block_initialize_err;
+ }
+
+ memset(cpu_list, 0, sizeof(cpu_list));
+
+ node_mask = cpumask_of_node(dd->numa_node);
+ if (!cpumask_empty(node_mask)) {
+ for_each_cpu(cpu, node_mask)
+ {
+ snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
+ j = strlen(cpu_list);
+ }
+
+ dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
+ dd->numa_node,
+ topology_physical_package_id(cpumask_first(node_mask)),
+ nr_cpus_node(dd->numa_node),
+ cpu_list);
+ } else
+ dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
+
+ dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
+ dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
+ cpu_to_node(dd->isr_binding), dd->isr_binding);
+
+ /* first worker context always runs in ISR */
+ dd->work[0].cpu_binding = dd->isr_binding;
+ dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
+ dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
+ dd->work[3].cpu_binding = dd->work[0].cpu_binding;
+ dd->work[4].cpu_binding = dd->work[1].cpu_binding;
+ dd->work[5].cpu_binding = dd->work[2].cpu_binding;
+ dd->work[6].cpu_binding = dd->work[2].cpu_binding;
+ dd->work[7].cpu_binding = dd->work[1].cpu_binding;
+
+ /* Log the bindings */
+ for_each_present_cpu(cpu) {
+ memset(cpu_list, 0, sizeof(cpu_list));
+ for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
+ if (dd->work[i].cpu_binding == cpu) {
+ snprintf(&cpu_list[j], 256 - j, "%d ", i);
+ j = strlen(cpu_list);
+ }
+ }
+ if (j)
+ dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
+ }
+
+ INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
+ INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
+ INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
+ INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
+ INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
+ INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
+ INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
+ INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
+
+ pci_set_master(pdev);
if (pci_enable_msi(pdev)) {
dev_warn(&pdev->dev,
"Unable to enable MSI interrupt.\n");
goto block_initialize_err;
}
- /* Copy the info we may need later into the private data structure. */
- dd->major = mtip_major;
- dd->instance = instance;
- dd->pdev = pdev;
-
/* Initialize the block layer. */
rv = mtip_block_initialize(dd);
if (rv < 0) {
@@ -4048,7 +4307,13 @@ static int mtip_pci_probe(struct pci_dev *pdev,
block_initialize_err:
pci_disable_msi(pdev);
-
+ if (dd->isr_workq) {
+ flush_workqueue(dd->isr_workq);
+ destroy_workqueue(dd->isr_workq);
+ drop_cpu(dd->work[0].cpu_binding);
+ drop_cpu(dd->work[1].cpu_binding);
+ drop_cpu(dd->work[2].cpu_binding);
+ }
setmask_err:
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
@@ -4089,6 +4354,14 @@ static void mtip_pci_remove(struct pci_dev *pdev)
/* Clean up the block layer. */
mtip_block_remove(dd);
+ if (dd->isr_workq) {
+ flush_workqueue(dd->isr_workq);
+ destroy_workqueue(dd->isr_workq);
+ drop_cpu(dd->work[0].cpu_binding);
+ drop_cpu(dd->work[1].cpu_binding);
+ drop_cpu(dd->work[2].cpu_binding);
+ }
+
pci_disable_msi(pdev);
kfree(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b1742640556a..3bffff5f670c 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -164,6 +164,35 @@ struct smart_attr {
u8 res[3];
} __packed;
+struct mtip_work {
+ struct work_struct work;
+ void *port;
+ int cpu_binding;
+ u32 completed;
+} ____cacheline_aligned_in_smp;
+
+#define DEFINE_HANDLER(group) \
+ void mtip_workq_sdbf##group(struct work_struct *work) \
+ { \
+ struct mtip_work *w = (struct mtip_work *) work; \
+ mtip_workq_sdbfx(w->port, group, w->completed); \
+ }
+
+#define MTIP_TRIM_TIMEOUT_MS 240000
+#define MTIP_MAX_TRIM_ENTRIES 8
+#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
+
+struct mtip_trim_entry {
+ u32 lba; /* starting lba of region */
+ u16 rsvd; /* unused */
+ u16 range; /* # of 512b blocks to trim */
+} __packed;
+
+struct mtip_trim {
+ /* Array of regions to trim */
+ struct mtip_trim_entry entry[MTIP_MAX_TRIM_ENTRIES];
+} __packed;
+
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
/*
@@ -424,7 +453,7 @@ struct mtip_port {
*/
struct semaphore cmd_slot;
/* Spinlock for working around command-issue bug. */
- spinlock_t cmd_issue_lock;
+ spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
};
/*
@@ -447,9 +476,6 @@ struct driver_data {
struct mtip_port *port; /* Pointer to the port data structure. */
- /* Tasklet used to process the bottom half of the ISR. */
- struct tasklet_struct tasklet;
-
unsigned product_type; /* magic value declaring the product type */
unsigned slot_groups; /* number of slot groups the product supports */
@@ -461,6 +487,20 @@ struct driver_data {
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
struct dentry *dfs_node;
+
+ bool trim_supp; /* flag indicating trim support */
+
+ int numa_node; /* NUMA support */
+
+ char workq_name[32];
+
+ struct workqueue_struct *isr_workq;
+
+ struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
+
+ atomic_t irq_workers_active;
+
+ int isr_binding;
};
#endif
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 043ddcca4abf..7fecc784be01 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -98,6 +98,7 @@ static const char *nbdcmd_to_ascii(int cmd)
case NBD_CMD_READ: return "read";
case NBD_CMD_WRITE: return "write";
case NBD_CMD_DISC: return "disconnect";
+ case NBD_CMD_FLUSH: return "flush";
case NBD_CMD_TRIM: return "trim/discard";
}
return "invalid";
@@ -244,8 +245,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
- request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
- request.len = htonl(size);
+
+ if (nbd_cmd(req) == NBD_CMD_FLUSH) {
+ /* Other values are reserved for FLUSH requests. */
+ request.from = 0;
+ request.len = 0;
+ } else {
+ request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
+ request.len = htonl(size);
+ }
memcpy(request.handle, &req, sizeof(req));
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
@@ -482,6 +490,11 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
}
}
+ if (req->cmd_flags & REQ_FLUSH) {
+ BUG_ON(unlikely(blk_rq_sectors(req)));
+ nbd_cmd(req) = NBD_CMD_FLUSH;
+ }
+
req->errors = 0;
mutex_lock(&nbd->tx_lock);
@@ -551,6 +564,7 @@ static int nbd_thread(void *data)
*/
static void do_nbd_request(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
{
struct request *req;
@@ -595,12 +609,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
struct request sreq;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
+ if (!nbd->sock)
+ return -EINVAL;
+ mutex_unlock(&nbd->tx_lock);
+ fsync_bdev(bdev);
+ mutex_lock(&nbd->tx_lock);
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
+
+ /* Check again after getting mutex back. */
if (!nbd->sock)
return -EINVAL;
+
nbd_send_req(nbd, &sreq);
return 0;
}
@@ -614,6 +636,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
+ kill_bdev(bdev);
if (file)
fput(file);
return 0;
@@ -625,7 +648,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return -EBUSY;
file = fget(arg);
if (file) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
if (S_ISSOCK(inode->i_mode)) {
nbd->file = file;
nbd->sock = SOCKET_I(inode);
@@ -681,9 +704,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
mutex_unlock(&nbd->tx_lock);
+ if (nbd->flags & NBD_FLAG_READ_ONLY)
+ set_device_ro(bdev, true);
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
nbd->disk->queue);
+ if (nbd->flags & NBD_FLAG_SEND_FLUSH)
+ blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+ else
+ blk_queue_flush(nbd->disk->queue, 0);
thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
if (IS_ERR(thread)) {
@@ -702,9 +731,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->file = NULL;
nbd_clear_que(nbd);
dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
+ kill_bdev(bdev);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ set_device_ro(bdev, false);
if (file)
fput(file);
+ nbd->flags = 0;
nbd->bytesize = 0;
bdev->bd_inode->i_size = 0;
set_capacity(nbd->disk, 0);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 89576a0b3f2e..6c81a4c040b9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -52,9 +52,12 @@
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
-/* It might be useful to have this defined elsewhere too */
+/* It might be useful to have these defined elsewhere */
-#define U64_MAX ((u64) (~0ULL))
+#define U8_MAX ((u8) (~0U))
+#define U16_MAX ((u16) (~0U))
+#define U32_MAX ((u32) (~0U))
+#define U64_MAX ((u64) (~0ULL))
#define RBD_DRV_NAME "rbd"
#define RBD_DRV_NAME_LONG "rbd (rados block device)"
@@ -66,7 +69,6 @@
(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
-#define RBD_MAX_OPT_LEN 1024
#define RBD_SNAP_HEAD_NAME "-"
@@ -93,8 +95,6 @@
#define DEV_NAME_LEN 32
#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
-#define RBD_READ_ONLY_DEFAULT false
-
/*
* block device image metadata (in-memory version)
*/
@@ -119,16 +119,33 @@ struct rbd_image_header {
* An rbd image specification.
*
* The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
- * identify an image.
+ * identify an image. Each rbd_dev structure includes a pointer to
+ * an rbd_spec structure that encapsulates this identity.
+ *
+ * Each of the id's in an rbd_spec has an associated name. For a
+ * user-mapped image, the names are supplied and the id's associated
+ * with them are looked up. For a layered image, a parent image is
+ * defined by the tuple, and the names are looked up.
+ *
+ * An rbd_dev structure contains a parent_spec pointer which is
+ * non-null if the image it represents is a child in a layered
+ * image. This pointer will refer to the rbd_spec structure used
+ * by the parent rbd_dev for its own identity (i.e., the structure
+ * is shared between the parent and child).
+ *
+ * Since these structures are populated once, during the discovery
+ * phase of image construction, they are effectively immutable so
+ * we make no effort to synchronize access to them.
+ *
+ * Note that code herein does not assume the image name is known (it
+ * could be a null pointer).
*/
struct rbd_spec {
u64 pool_id;
char *pool_name;
char *image_id;
- size_t image_id_len;
char *image_name;
- size_t image_name_len;
u64 snap_id;
char *snap_name;
@@ -136,10 +153,6 @@ struct rbd_spec {
struct kref kref;
};
-struct rbd_options {
- bool read_only;
-};
-
/*
* an instance of the client. multiple devices may share an rbd client.
*/
@@ -149,37 +162,76 @@ struct rbd_client {
struct list_head node;
};
-/*
- * a request completion status
- */
-struct rbd_req_status {
- int done;
- int rc;
- u64 bytes;
+struct rbd_img_request;
+typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
+
+#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
+
+struct rbd_obj_request;
+typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
+
+enum obj_request_type {
+ OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
-/*
- * a collection of requests
- */
-struct rbd_req_coll {
- int total;
- int num_done;
+struct rbd_obj_request {
+ const char *object_name;
+ u64 offset; /* object start byte */
+ u64 length; /* bytes from offset */
+
+ struct rbd_img_request *img_request;
+ struct list_head links; /* img_request->obj_requests */
+ u32 which; /* posn image request list */
+
+ enum obj_request_type type;
+ union {
+ struct bio *bio_list;
+ struct {
+ struct page **pages;
+ u32 page_count;
+ };
+ };
+
+ struct ceph_osd_request *osd_req;
+
+ u64 xferred; /* bytes transferred */
+ u64 version;
+ int result;
+ atomic_t done;
+
+ rbd_obj_callback_t callback;
+ struct completion completion;
+
struct kref kref;
- struct rbd_req_status status[0];
};
-/*
- * a single io request
- */
-struct rbd_request {
- struct request *rq; /* blk layer request */
- struct bio *bio; /* cloned bio */
- struct page **pages; /* list of used pages */
- u64 len;
- int coll_index;
- struct rbd_req_coll *coll;
+struct rbd_img_request {
+ struct request *rq;
+ struct rbd_device *rbd_dev;
+ u64 offset; /* starting image byte offset */
+ u64 length; /* byte count from offset */
+ bool write_request; /* false for read */
+ union {
+ struct ceph_snap_context *snapc; /* for writes */
+ u64 snap_id; /* for reads */
+ };
+ spinlock_t completion_lock;/* protects next_completion */
+ u32 next_completion;
+ rbd_img_callback_t callback;
+
+ u32 obj_request_count;
+ struct list_head obj_requests; /* rbd_obj_request structs */
+
+ struct kref kref;
};
+#define for_each_obj_request(ireq, oreq) \
+ list_for_each_entry(oreq, &(ireq)->obj_requests, links)
+#define for_each_obj_request_from(ireq, oreq) \
+ list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
+#define for_each_obj_request_safe(ireq, oreq, n) \
+ list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
+
struct rbd_snap {
struct device dev;
const char *name;
@@ -209,16 +261,18 @@ struct rbd_device {
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
- spinlock_t lock; /* queue lock */
+ spinlock_t lock; /* queue, flags, open_count */
struct rbd_image_header header;
- bool exists;
+ unsigned long flags; /* possibly lock protected */
struct rbd_spec *spec;
char *header_name;
+ struct ceph_file_layout layout;
+
struct ceph_osd_event *watch_event;
- struct ceph_osd_request *watch_request;
+ struct rbd_obj_request *watch_request;
struct rbd_spec *parent_spec;
u64 parent_overlap;
@@ -235,7 +289,19 @@ struct rbd_device {
/* sysfs related */
struct device dev;
- unsigned long open_count;
+ unsigned long open_count; /* protected by lock */
+};
+
+/*
+ * Flag bits for rbd_dev->flags. If atomicity is required,
+ * rbd_dev->lock is used to protect access.
+ *
+ * Currently, only the "removing" flag (which is coupled with the
+ * "open_count" field) requires atomic access.
+ */
+enum rbd_dev_flags {
+ RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
+ RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
};
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
@@ -277,6 +343,33 @@ static struct device rbd_root_dev = {
.release = rbd_root_dev_release,
};
+static __printf(2, 3)
+void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (!rbd_dev)
+ printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
+ else if (rbd_dev->disk)
+ printk(KERN_WARNING "%s: %s: %pV\n",
+ RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
+ else if (rbd_dev->spec && rbd_dev->spec->image_name)
+ printk(KERN_WARNING "%s: image %s: %pV\n",
+ RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
+ else if (rbd_dev->spec && rbd_dev->spec->image_id)
+ printk(KERN_WARNING "%s: id %s: %pV\n",
+ RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
+ else /* punt */
+ printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
+ RBD_DRV_NAME, rbd_dev, &vaf);
+ va_end(args);
+}
+
#ifdef RBD_DEBUG
#define rbd_assert(expr) \
if (unlikely(!(expr))) { \
@@ -296,14 +389,23 @@ static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
+ bool removing = false;
if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
return -EROFS;
+ spin_lock_irq(&rbd_dev->lock);
+ if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
+ removing = true;
+ else
+ rbd_dev->open_count++;
+ spin_unlock_irq(&rbd_dev->lock);
+ if (removing)
+ return -ENOENT;
+
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
(void) get_device(&rbd_dev->dev);
set_device_ro(bdev, rbd_dev->mapping.read_only);
- rbd_dev->open_count++;
mutex_unlock(&ctl_mutex);
return 0;
@@ -312,10 +414,14 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
static int rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
+ unsigned long open_count_before;
+
+ spin_lock_irq(&rbd_dev->lock);
+ open_count_before = rbd_dev->open_count--;
+ spin_unlock_irq(&rbd_dev->lock);
+ rbd_assert(open_count_before > 0);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rbd_assert(rbd_dev->open_count > 0);
- rbd_dev->open_count--;
put_device(&rbd_dev->dev);
mutex_unlock(&ctl_mutex);
@@ -337,7 +443,7 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
struct rbd_client *rbdc;
int ret = -ENOMEM;
- dout("rbd_client_create\n");
+ dout("%s:\n", __func__);
rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
if (!rbdc)
goto out_opt;
@@ -361,8 +467,8 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
spin_unlock(&rbd_client_list_lock);
mutex_unlock(&ctl_mutex);
+ dout("%s: rbdc %p\n", __func__, rbdc);
- dout("rbd_client_create created %p\n", rbdc);
return rbdc;
out_err:
@@ -373,6 +479,8 @@ out_mutex:
out_opt:
if (ceph_opts)
ceph_destroy_options(ceph_opts);
+ dout("%s: error %d\n", __func__, ret);
+
return ERR_PTR(ret);
}
@@ -426,6 +534,12 @@ static match_table_t rbd_opts_tokens = {
{-1, NULL}
};
+struct rbd_options {
+ bool read_only;
+};
+
+#define RBD_READ_ONLY_DEFAULT false
+
static int parse_rbd_opts_token(char *c, void *private)
{
struct rbd_options *rbd_opts = private;
@@ -493,7 +607,7 @@ static void rbd_client_release(struct kref *kref)
{
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
- dout("rbd_release_client %p\n", rbdc);
+ dout("%s: rbdc %p\n", __func__, rbdc);
spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
spin_unlock(&rbd_client_list_lock);
@@ -512,18 +626,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
kref_put(&rbdc->kref, rbd_client_release);
}
-/*
- * Destroy requests collection
- */
-static void rbd_coll_release(struct kref *kref)
-{
- struct rbd_req_coll *coll =
- container_of(kref, struct rbd_req_coll, kref);
-
- dout("rbd_coll_release %p\n", coll);
- kfree(coll);
-}
-
static bool rbd_image_format_valid(u32 image_format)
{
return image_format == 1 || image_format == 2;
@@ -707,7 +809,8 @@ static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
goto done;
rbd_dev->mapping.read_only = true;
}
- rbd_dev->exists = true;
+ set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+
done:
return ret;
}
@@ -724,7 +827,7 @@ static void rbd_header_free(struct rbd_image_header *header)
header->snapc = NULL;
}
-static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
+static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
{
char *name;
u64 segment;
@@ -767,23 +870,6 @@ static u64 rbd_segment_length(struct rbd_device *rbd_dev,
return length;
}
-static int rbd_get_num_segments(struct rbd_image_header *header,
- u64 ofs, u64 len)
-{
- u64 start_seg;
- u64 end_seg;
-
- if (!len)
- return 0;
- if (len - 1 > U64_MAX - ofs)
- return -ERANGE;
-
- start_seg = ofs >> header->obj_order;
- end_seg = (ofs + len - 1) >> header->obj_order;
-
- return end_seg - start_seg + 1;
-}
-
/*
* returns the size of an object in the image
*/
@@ -949,8 +1035,10 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
unsigned int bi_size;
struct bio *bio;
- if (!bi)
+ if (!bi) {
+ rbd_warn(NULL, "bio_chain exhausted with %u left", len);
goto out_err; /* EINVAL; ran out of bio's */
+ }
bi_size = min_t(unsigned int, bi->bi_size - off, len);
bio = bio_clone_range(bi, off, bi_size, gfpmask);
if (!bio)
@@ -976,399 +1064,721 @@ out_err:
return NULL;
}
-/*
- * helpers for osd request op vectors.
- */
-static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
- int opcode, u32 payload_len)
+static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
- struct ceph_osd_req_op *ops;
+ dout("%s: obj %p (was %d)\n", __func__, obj_request,
+ atomic_read(&obj_request->kref.refcount));
+ kref_get(&obj_request->kref);
+}
+
+static void rbd_obj_request_destroy(struct kref *kref);
+static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
+{
+ rbd_assert(obj_request != NULL);
+ dout("%s: obj %p (was %d)\n", __func__, obj_request,
+ atomic_read(&obj_request->kref.refcount));
+ kref_put(&obj_request->kref, rbd_obj_request_destroy);
+}
+
+static void rbd_img_request_get(struct rbd_img_request *img_request)
+{
+ dout("%s: img %p (was %d)\n", __func__, img_request,
+ atomic_read(&img_request->kref.refcount));
+ kref_get(&img_request->kref);
+}
+
+static void rbd_img_request_destroy(struct kref *kref);
+static void rbd_img_request_put(struct rbd_img_request *img_request)
+{
+ rbd_assert(img_request != NULL);
+ dout("%s: img %p (was %d)\n", __func__, img_request,
+ atomic_read(&img_request->kref.refcount));
+ kref_put(&img_request->kref, rbd_img_request_destroy);
+}
+
+static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
+ struct rbd_obj_request *obj_request)
+{
+ rbd_assert(obj_request->img_request == NULL);
+
+ rbd_obj_request_get(obj_request);
+ obj_request->img_request = img_request;
+ obj_request->which = img_request->obj_request_count;
+ rbd_assert(obj_request->which != BAD_WHICH);
+ img_request->obj_request_count++;
+ list_add_tail(&obj_request->links, &img_request->obj_requests);
+ dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
+ obj_request->which);
+}
- ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
- if (!ops)
+static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
+ struct rbd_obj_request *obj_request)
+{
+ rbd_assert(obj_request->which != BAD_WHICH);
+
+ dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
+ obj_request->which);
+ list_del(&obj_request->links);
+ rbd_assert(img_request->obj_request_count > 0);
+ img_request->obj_request_count--;
+ rbd_assert(obj_request->which == img_request->obj_request_count);
+ obj_request->which = BAD_WHICH;
+ rbd_assert(obj_request->img_request == img_request);
+ obj_request->img_request = NULL;
+ obj_request->callback = NULL;
+ rbd_obj_request_put(obj_request);
+}
+
+static bool obj_request_type_valid(enum obj_request_type type)
+{
+ switch (type) {
+ case OBJ_REQUEST_NODATA:
+ case OBJ_REQUEST_BIO:
+ case OBJ_REQUEST_PAGES:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
+{
+ struct ceph_osd_req_op *op;
+ va_list args;
+ size_t size;
+
+ op = kzalloc(sizeof (*op), GFP_NOIO);
+ if (!op)
return NULL;
+ op->op = opcode;
+ va_start(args, opcode);
+ switch (opcode) {
+ case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_WRITE:
+ /* rbd_osd_req_op_create(READ, offset, length) */
+ /* rbd_osd_req_op_create(WRITE, offset, length) */
+ op->extent.offset = va_arg(args, u64);
+ op->extent.length = va_arg(args, u64);
+ if (opcode == CEPH_OSD_OP_WRITE)
+ op->payload_len = op->extent.length;
+ break;
+ case CEPH_OSD_OP_STAT:
+ break;
+ case CEPH_OSD_OP_CALL:
+ /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
+ op->cls.class_name = va_arg(args, char *);
+ size = strlen(op->cls.class_name);
+ rbd_assert(size <= (size_t) U8_MAX);
+ op->cls.class_len = size;
+ op->payload_len = size;
+
+ op->cls.method_name = va_arg(args, char *);
+ size = strlen(op->cls.method_name);
+ rbd_assert(size <= (size_t) U8_MAX);
+ op->cls.method_len = size;
+ op->payload_len += size;
+
+ op->cls.argc = 0;
+ op->cls.indata = va_arg(args, void *);
+ size = va_arg(args, size_t);
+ rbd_assert(size <= (size_t) U32_MAX);
+ op->cls.indata_len = (u32) size;
+ op->payload_len += size;
+ break;
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ case CEPH_OSD_OP_WATCH:
+ /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
+ /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
+ op->watch.cookie = va_arg(args, u64);
+ op->watch.ver = va_arg(args, u64);
+ op->watch.ver = cpu_to_le64(op->watch.ver);
+ if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
+ op->watch.flag = (u8) 1;
+ break;
+ default:
+ rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
+ kfree(op);
+ op = NULL;
+ break;
+ }
+ va_end(args);
- ops[0].op = opcode;
+ return op;
+}
- /*
- * op extent offset and length will be set later on
- * in calc_raw_layout()
- */
- ops[0].payload_len = payload_len;
+static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
+{
+ kfree(op);
+}
+
+static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
+ struct rbd_obj_request *obj_request)
+{
+ dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
- return ops;
+ return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}
-static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
+static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
- kfree(ops);
+ dout("%s: img %p\n", __func__, img_request);
+ if (img_request->callback)
+ img_request->callback(img_request);
+ else
+ rbd_img_request_put(img_request);
}
-static void rbd_coll_end_req_index(struct request *rq,
- struct rbd_req_coll *coll,
- int index,
- int ret, u64 len)
+/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
+
+static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
- struct request_queue *q;
- int min, max, i;
+ dout("%s: obj %p\n", __func__, obj_request);
- dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
- coll, index, ret, (unsigned long long) len);
+ return wait_for_completion_interruptible(&obj_request->completion);
+}
- if (!rq)
- return;
+static void obj_request_done_init(struct rbd_obj_request *obj_request)
+{
+ atomic_set(&obj_request->done, 0);
+ smp_wmb();
+}
- if (!coll) {
- blk_end_request(rq, ret, len);
- return;
+static void obj_request_done_set(struct rbd_obj_request *obj_request)
+{
+ int done;
+
+ done = atomic_inc_return(&obj_request->done);
+ if (done > 1) {
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = img_request ? img_request->rbd_dev : NULL;
+ rbd_warn(rbd_dev, "obj_request %p was already done\n",
+ obj_request);
}
+}
- q = rq->q;
-
- spin_lock_irq(q->queue_lock);
- coll->status[index].done = 1;
- coll->status[index].rc = ret;
- coll->status[index].bytes = len;
- max = min = coll->num_done;
- while (max < coll->total && coll->status[max].done)
- max++;
-
- for (i = min; i<max; i++) {
- __blk_end_request(rq, coll->status[i].rc,
- coll->status[i].bytes);
- coll->num_done++;
- kref_put(&coll->kref, rbd_coll_release);
+static bool obj_request_done_test(struct rbd_obj_request *obj_request)
+{
+ smp_mb();
+ return atomic_read(&obj_request->done) != 0;
+}
+
+static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p cb %p\n", __func__, obj_request,
+ obj_request->callback);
+ if (obj_request->callback)
+ obj_request->callback(obj_request);
+ else
+ complete_all(&obj_request->completion);
+}
+
+static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+ obj_request_done_set(obj_request);
+}
+
+static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
+ obj_request->result, obj_request->xferred, obj_request->length);
+ /*
+ * ENOENT means a hole in the object. We zero-fill the
+ * entire length of the request. A short read also implies
+ * zero-fill to the end of the request. Either way we
+ * update the xferred count to indicate the whole request
+ * was satisfied.
+ */
+ if (obj_request->result == -ENOENT) {
+ zero_bio_chain(obj_request->bio_list, 0);
+ obj_request->result = 0;
+ obj_request->xferred = obj_request->length;
+ } else if (obj_request->xferred < obj_request->length &&
+ !obj_request->result) {
+ zero_bio_chain(obj_request->bio_list, obj_request->xferred);
+ obj_request->xferred = obj_request->length;
}
- spin_unlock_irq(q->queue_lock);
+ obj_request_done_set(obj_request);
}
-static void rbd_coll_end_req(struct rbd_request *req,
- int ret, u64 len)
+static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
{
- rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
+ dout("%s: obj %p result %d %llu\n", __func__, obj_request,
+ obj_request->result, obj_request->length);
+ /*
+ * There is no such thing as a successful short write.
+ * Our xferred value is the number of bytes transferred
+ * back. Set it to our originally-requested length.
+ */
+ obj_request->xferred = obj_request->length;
+ obj_request_done_set(obj_request);
}
/*
- * Send ceph osd request
+ * For a simple stat call there's nothing to do. We'll do more if
+ * this is part of a write sequence for a layered image.
*/
-static int rbd_do_request(struct request *rq,
- struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 snapid,
- const char *object_name, u64 ofs, u64 len,
- struct bio *bio,
- struct page **pages,
- int num_pages,
- int flags,
- struct ceph_osd_req_op *ops,
- struct rbd_req_coll *coll,
- int coll_index,
- void (*rbd_cb)(struct ceph_osd_request *req,
- struct ceph_msg *msg),
- struct ceph_osd_request **linger_req,
- u64 *ver)
-{
- struct ceph_osd_request *req;
- struct ceph_file_layout *layout;
- int ret;
- u64 bno;
- struct timespec mtime = CURRENT_TIME;
- struct rbd_request *req_data;
- struct ceph_osd_request_head *reqhead;
- struct ceph_osd_client *osdc;
+static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+ obj_request_done_set(obj_request);
+}
- req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
- if (!req_data) {
- if (coll)
- rbd_coll_end_req_index(rq, coll, coll_index,
- -ENOMEM, len);
- return -ENOMEM;
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ struct ceph_msg *msg)
+{
+ struct rbd_obj_request *obj_request = osd_req->r_priv;
+ u16 opcode;
+
+ dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+ rbd_assert(osd_req == obj_request->osd_req);
+ rbd_assert(!!obj_request->img_request ^
+ (obj_request->which == BAD_WHICH));
+
+ if (osd_req->r_result < 0)
+ obj_request->result = osd_req->r_result;
+ obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
+
+ WARN_ON(osd_req->r_num_ops != 1); /* For now */
+
+ /*
+ * We support a 64-bit length, but ultimately it has to be
+ * passed to blk_end_request(), which takes an unsigned int.
+ */
+ obj_request->xferred = osd_req->r_reply_op_len[0];
+ rbd_assert(obj_request->xferred < (u64) UINT_MAX);
+ opcode = osd_req->r_request_ops[0].op;
+ switch (opcode) {
+ case CEPH_OSD_OP_READ:
+ rbd_osd_read_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_WRITE:
+ rbd_osd_write_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_STAT:
+ rbd_osd_stat_callback(obj_request);
+ break;
+ case CEPH_OSD_OP_CALL:
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ case CEPH_OSD_OP_WATCH:
+ rbd_osd_trivial_callback(obj_request);
+ break;
+ default:
+ rbd_warn(NULL, "%s: unsupported op %hu\n",
+ obj_request->object_name, (unsigned short) opcode);
+ break;
}
- if (coll) {
- req_data->coll = coll;
- req_data->coll_index = coll_index;
+ if (obj_request_done_test(obj_request))
+ rbd_obj_request_complete(obj_request);
+}
+
+static struct ceph_osd_request *rbd_osd_req_create(
+ struct rbd_device *rbd_dev,
+ bool write_request,
+ struct rbd_obj_request *obj_request,
+ struct ceph_osd_req_op *op)
+{
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct ceph_snap_context *snapc = NULL;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_request *osd_req;
+ struct timespec now;
+ struct timespec *mtime;
+ u64 snap_id = CEPH_NOSNAP;
+ u64 offset = obj_request->offset;
+ u64 length = obj_request->length;
+
+ if (img_request) {
+ rbd_assert(img_request->write_request == write_request);
+ if (img_request->write_request)
+ snapc = img_request->snapc;
+ else
+ snap_id = img_request->snap_id;
}
- dout("rbd_do_request object_name=%s ofs=%llu len=%llu coll=%p[%d]\n",
- object_name, (unsigned long long) ofs,
- (unsigned long long) len, coll, coll_index);
+ /* Allocate and initialize the request, for the single op */
osdc = &rbd_dev->rbd_client->client->osdc;
- req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
- false, GFP_NOIO, pages, bio);
- if (!req) {
- ret = -ENOMEM;
- goto done_pages;
+ osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
+ if (!osd_req)
+ return NULL; /* ENOMEM */
+
+ rbd_assert(obj_request_type_valid(obj_request->type));
+ switch (obj_request->type) {
+ case OBJ_REQUEST_NODATA:
+ break; /* Nothing to do */
+ case OBJ_REQUEST_BIO:
+ rbd_assert(obj_request->bio_list != NULL);
+ osd_req->r_bio = obj_request->bio_list;
+ break;
+ case OBJ_REQUEST_PAGES:
+ osd_req->r_pages = obj_request->pages;
+ osd_req->r_num_pages = obj_request->page_count;
+ osd_req->r_page_alignment = offset & ~PAGE_MASK;
+ break;
}
- req->r_callback = rbd_cb;
+ if (write_request) {
+ osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+ now = CURRENT_TIME;
+ mtime = &now;
+ } else {
+ osd_req->r_flags = CEPH_OSD_FLAG_READ;
+ mtime = NULL; /* not needed for reads */
+ offset = 0; /* These are not used... */
+ length = 0; /* ...for osd read requests */
+ }
- req_data->rq = rq;
- req_data->bio = bio;
- req_data->pages = pages;
- req_data->len = len;
+ osd_req->r_callback = rbd_osd_req_callback;
+ osd_req->r_priv = obj_request;
- req->r_priv = req_data;
+ osd_req->r_oid_len = strlen(obj_request->object_name);
+ rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
+ memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
- reqhead = req->r_request->front.iov_base;
- reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
+ osd_req->r_file_layout = rbd_dev->layout; /* struct */
- strncpy(req->r_oid, object_name, sizeof(req->r_oid));
- req->r_oid_len = strlen(req->r_oid);
+ /* osd_req will get its own reference to snapc (if non-null) */
- layout = &req->r_file_layout;
- memset(layout, 0, sizeof(*layout));
- layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_stripe_count = cpu_to_le32(1);
- layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_pool = cpu_to_le32((int) rbd_dev->spec->pool_id);
- ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
- req, ops);
- rbd_assert(ret == 0);
+ ceph_osdc_build_request(osd_req, offset, length, 1, op,
+ snapc, snap_id, mtime);
- ceph_osdc_build_request(req, ofs, &len,
- ops,
- snapc,
- &mtime,
- req->r_oid, req->r_oid_len);
+ return osd_req;
+}
- if (linger_req) {
- ceph_osdc_set_request_linger(osdc, req);
- *linger_req = req;
- }
+static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
+{
+ ceph_osdc_put_request(osd_req);
+}
- ret = ceph_osdc_start_request(osdc, req, false);
- if (ret < 0)
- goto done_err;
-
- if (!rbd_cb) {
- ret = ceph_osdc_wait_request(osdc, req);
- if (ver)
- *ver = le64_to_cpu(req->r_reassert_version.version);
- dout("reassert_ver=%llu\n",
- (unsigned long long)
- le64_to_cpu(req->r_reassert_version.version));
- ceph_osdc_put_request(req);
+/* object_name is assumed to be a non-null pointer and NUL-terminated */
+
+static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
+ u64 offset, u64 length,
+ enum obj_request_type type)
+{
+ struct rbd_obj_request *obj_request;
+ size_t size;
+ char *name;
+
+ rbd_assert(obj_request_type_valid(type));
+
+ size = strlen(object_name) + 1;
+ obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
+ if (!obj_request)
+ return NULL;
+
+ name = (char *)(obj_request + 1);
+ obj_request->object_name = memcpy(name, object_name, size);
+ obj_request->offset = offset;
+ obj_request->length = length;
+ obj_request->which = BAD_WHICH;
+ obj_request->type = type;
+ INIT_LIST_HEAD(&obj_request->links);
+ obj_request_done_init(obj_request);
+ init_completion(&obj_request->completion);
+ kref_init(&obj_request->kref);
+
+ dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
+ offset, length, (int)type, obj_request);
+
+ return obj_request;
+}
+
+static void rbd_obj_request_destroy(struct kref *kref)
+{
+ struct rbd_obj_request *obj_request;
+
+ obj_request = container_of(kref, struct rbd_obj_request, kref);
+
+ dout("%s: obj %p\n", __func__, obj_request);
+
+ rbd_assert(obj_request->img_request == NULL);
+ rbd_assert(obj_request->which == BAD_WHICH);
+
+ if (obj_request->osd_req)
+ rbd_osd_req_destroy(obj_request->osd_req);
+
+ rbd_assert(obj_request_type_valid(obj_request->type));
+ switch (obj_request->type) {
+ case OBJ_REQUEST_NODATA:
+ break; /* Nothing to do */
+ case OBJ_REQUEST_BIO:
+ if (obj_request->bio_list)
+ bio_chain_put(obj_request->bio_list);
+ break;
+ case OBJ_REQUEST_PAGES:
+ if (obj_request->pages)
+ ceph_release_page_vector(obj_request->pages,
+ obj_request->page_count);
+ break;
}
- return ret;
-done_err:
- bio_chain_put(req_data->bio);
- ceph_osdc_put_request(req);
-done_pages:
- rbd_coll_end_req(req_data, ret, len);
- kfree(req_data);
- return ret;
+ kfree(obj_request);
}
/*
- * Ceph osd op callback
+ * Caller is responsible for filling in the list of object requests
+ * that comprises the image request, and the Linux request pointer
+ * (if there is one).
*/
-static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
-{
- struct rbd_request *req_data = req->r_priv;
- struct ceph_osd_reply_head *replyhead;
- struct ceph_osd_op *op;
- __s32 rc;
- u64 bytes;
- int read_op;
-
- /* parse reply */
- replyhead = msg->front.iov_base;
- WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
- op = (void *)(replyhead + 1);
- rc = le32_to_cpu(replyhead->result);
- bytes = le64_to_cpu(op->extent.length);
- read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
-
- dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
- (unsigned long long) bytes, read_op, (int) rc);
-
- if (rc == -ENOENT && read_op) {
- zero_bio_chain(req_data->bio, 0);
- rc = 0;
- } else if (rc == 0 && read_op && bytes < req_data->len) {
- zero_bio_chain(req_data->bio, bytes);
- bytes = req_data->len;
- }
+static struct rbd_img_request *rbd_img_request_create(
+ struct rbd_device *rbd_dev,
+ u64 offset, u64 length,
+ bool write_request)
+{
+ struct rbd_img_request *img_request;
+ struct ceph_snap_context *snapc = NULL;
- rbd_coll_end_req(req_data, rc, bytes);
+ img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
+ if (!img_request)
+ return NULL;
- if (req_data->bio)
- bio_chain_put(req_data->bio);
+ if (write_request) {
+ down_read(&rbd_dev->header_rwsem);
+ snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ up_read(&rbd_dev->header_rwsem);
+ if (WARN_ON(!snapc)) {
+ kfree(img_request);
+ return NULL; /* Shouldn't happen */
+ }
+ }
- ceph_osdc_put_request(req);
- kfree(req_data);
+ img_request->rq = NULL;
+ img_request->rbd_dev = rbd_dev;
+ img_request->offset = offset;
+ img_request->length = length;
+ img_request->write_request = write_request;
+ if (write_request)
+ img_request->snapc = snapc;
+ else
+ img_request->snap_id = rbd_dev->spec->snap_id;
+ spin_lock_init(&img_request->completion_lock);
+ img_request->next_completion = 0;
+ img_request->callback = NULL;
+ img_request->obj_request_count = 0;
+ INIT_LIST_HEAD(&img_request->obj_requests);
+ kref_init(&img_request->kref);
+
+ rbd_img_request_get(img_request); /* Avoid a warning */
+ rbd_img_request_put(img_request); /* TEMPORARY */
+
+ dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
+ write_request ? "write" : "read", offset, length,
+ img_request);
+
+ return img_request;
}
-static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
+static void rbd_img_request_destroy(struct kref *kref)
{
- ceph_osdc_put_request(req);
+ struct rbd_img_request *img_request;
+ struct rbd_obj_request *obj_request;
+ struct rbd_obj_request *next_obj_request;
+
+ img_request = container_of(kref, struct rbd_img_request, kref);
+
+ dout("%s: img %p\n", __func__, img_request);
+
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request)
+ rbd_img_obj_request_del(img_request, obj_request);
+ rbd_assert(img_request->obj_request_count == 0);
+
+ if (img_request->write_request)
+ ceph_put_snap_context(img_request->snapc);
+
+ kfree(img_request);
}
-/*
- * Do a synchronous ceph osd operation
- */
-static int rbd_req_sync_op(struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 snapid,
- int flags,
- struct ceph_osd_req_op *ops,
- const char *object_name,
- u64 ofs, u64 inbound_size,
- char *inbound,
- struct ceph_osd_request **linger_req,
- u64 *ver)
+static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
+ struct bio *bio_list)
{
- int ret;
- struct page **pages;
- int num_pages;
-
- rbd_assert(ops != NULL);
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+ struct rbd_obj_request *obj_request = NULL;
+ struct rbd_obj_request *next_obj_request;
+ unsigned int bio_offset;
+ u64 image_offset;
+ u64 resid;
+ u16 opcode;
+
+ dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
+
+ opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
+ : CEPH_OSD_OP_READ;
+ bio_offset = 0;
+ image_offset = img_request->offset;
+ rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ resid = img_request->length;
+ rbd_assert(resid > 0);
+ while (resid) {
+ const char *object_name;
+ unsigned int clone_size;
+ struct ceph_osd_req_op *op;
+ u64 offset;
+ u64 length;
+
+ object_name = rbd_segment_name(rbd_dev, image_offset);
+ if (!object_name)
+ goto out_unwind;
+ offset = rbd_segment_offset(rbd_dev, image_offset);
+ length = rbd_segment_length(rbd_dev, image_offset, resid);
+ obj_request = rbd_obj_request_create(object_name,
+ offset, length,
+ OBJ_REQUEST_BIO);
+ kfree(object_name); /* object request has its own copy */
+ if (!obj_request)
+ goto out_unwind;
+
+ rbd_assert(length <= (u64) UINT_MAX);
+ clone_size = (unsigned int) length;
+ obj_request->bio_list = bio_chain_clone_range(&bio_list,
+ &bio_offset, clone_size,
+ GFP_ATOMIC);
+ if (!obj_request->bio_list)
+ goto out_partial;
- num_pages = calc_pages_for(ofs, inbound_size);
- pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ /*
+ * Build up the op to use in building the osd
+ * request. Note that the contents of the op are
+ * copied by rbd_osd_req_create().
+ */
+ op = rbd_osd_req_op_create(opcode, offset, length);
+ if (!op)
+ goto out_partial;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev,
+ img_request->write_request,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out_partial;
+ /* status and version are initially zero-filled */
+
+ rbd_img_obj_request_add(img_request, obj_request);
+
+ image_offset += length;
+ resid -= length;
+ }
- ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
- object_name, ofs, inbound_size, NULL,
- pages, num_pages,
- flags,
- ops,
- NULL, 0,
- NULL,
- linger_req, ver);
- if (ret < 0)
- goto done;
+ return 0;
- if ((flags & CEPH_OSD_FLAG_READ) && inbound)
- ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret);
+out_partial:
+ rbd_obj_request_put(obj_request);
+out_unwind:
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request)
+ rbd_obj_request_put(obj_request);
-done:
- ceph_release_page_vector(pages, num_pages);
- return ret;
+ return -ENOMEM;
}
-/*
- * Do an asynchronous ceph osd operation
- */
-static int rbd_do_op(struct request *rq,
- struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- char *seg_name;
- u64 seg_ofs;
- u64 seg_len;
- int ret;
- struct ceph_osd_req_op *ops;
- u32 payload_len;
- int opcode;
- int flags;
- u64 snapid;
-
- seg_name = rbd_segment_name(rbd_dev, ofs);
- if (!seg_name)
- return -ENOMEM;
- seg_len = rbd_segment_length(rbd_dev, ofs, len);
- seg_ofs = rbd_segment_offset(rbd_dev, ofs);
-
- if (rq_data_dir(rq) == WRITE) {
- opcode = CEPH_OSD_OP_WRITE;
- flags = CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK;
- snapid = CEPH_NOSNAP;
- payload_len = seg_len;
- } else {
- opcode = CEPH_OSD_OP_READ;
- flags = CEPH_OSD_FLAG_READ;
- snapc = NULL;
- snapid = rbd_dev->spec->snap_id;
- payload_len = 0;
+static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+{
+ struct rbd_img_request *img_request;
+ u32 which = obj_request->which;
+ bool more = true;
+
+ img_request = obj_request->img_request;
+
+ dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
+ rbd_assert(img_request != NULL);
+ rbd_assert(img_request->rq != NULL);
+ rbd_assert(img_request->obj_request_count > 0);
+ rbd_assert(which != BAD_WHICH);
+ rbd_assert(which < img_request->obj_request_count);
+ rbd_assert(which >= img_request->next_completion);
+
+ spin_lock_irq(&img_request->completion_lock);
+ if (which != img_request->next_completion)
+ goto out;
+
+ for_each_obj_request_from(img_request, obj_request) {
+ unsigned int xferred;
+ int result;
+
+ rbd_assert(more);
+ rbd_assert(which < img_request->obj_request_count);
+
+ if (!obj_request_done_test(obj_request))
+ break;
+
+ rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
+ xferred = (unsigned int) obj_request->xferred;
+ result = (int) obj_request->result;
+ if (result)
+ rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
+ img_request->write_request ? "write" : "read",
+ result, xferred);
+
+ more = blk_end_request(img_request->rq, result, xferred);
+ which++;
}
- ret = -ENOMEM;
- ops = rbd_create_rw_ops(1, opcode, payload_len);
- if (!ops)
- goto done;
+ rbd_assert(more ^ (which == img_request->obj_request_count));
+ img_request->next_completion = which;
+out:
+ spin_unlock_irq(&img_request->completion_lock);
- /* we've taken care of segment sizes earlier when we
- cloned the bios. We should never have a segment
- truncated at this point */
- rbd_assert(seg_len == len);
-
- ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
- seg_name, seg_ofs, seg_len,
- bio,
- NULL, 0,
- flags,
- ops,
- coll, coll_index,
- rbd_req_cb, 0, NULL);
-
- rbd_destroy_ops(ops);
-done:
- kfree(seg_name);
- return ret;
+ if (!more)
+ rbd_img_request_complete(img_request);
}
-/*
- * Request sync osd read
- */
-static int rbd_req_sync_read(struct rbd_device *rbd_dev,
- u64 snapid,
- const char *object_name,
- u64 ofs, u64 len,
- char *buf,
- u64 *ver)
-{
- struct ceph_osd_req_op *ops;
- int ret;
+static int rbd_img_request_submit(struct rbd_img_request *img_request)
+{
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_obj_request *obj_request;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
- if (!ops)
- return -ENOMEM;
+ dout("%s: img %p\n", __func__, img_request);
+ for_each_obj_request(img_request, obj_request) {
+ int ret;
- ret = rbd_req_sync_op(rbd_dev, NULL,
- snapid,
- CEPH_OSD_FLAG_READ,
- ops, object_name, ofs, len, buf, NULL, ver);
- rbd_destroy_ops(ops);
+ obj_request->callback = rbd_img_obj_callback;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ return ret;
+ /*
+ * The image request has its own reference to each
+ * of its object requests, so we can safely drop the
+ * initial one here.
+ */
+ rbd_obj_request_put(obj_request);
+ }
- return ret;
+ return 0;
}
-/*
- * Request sync osd watch
- */
-static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
- u64 ver,
- u64 notify_id)
+static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
+ u64 ver, u64 notify_id)
{
- struct ceph_osd_req_op *ops;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_req_op *op;
+ struct ceph_osd_client *osdc;
int ret;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
- if (!ops)
+ obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
+ OBJ_REQUEST_NODATA);
+ if (!obj_request)
return -ENOMEM;
- ops[0].watch.ver = cpu_to_le64(ver);
- ops[0].watch.cookie = notify_id;
- ops[0].watch.flag = 0;
+ ret = -ENOMEM;
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
+ if (!op)
+ goto out;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out;
- ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
- rbd_dev->header_name, 0, 0, NULL,
- NULL, 0,
- CEPH_OSD_FLAG_READ,
- ops,
- NULL, 0,
- rbd_simple_req_cb, 0, NULL);
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ obj_request->callback = rbd_obj_request_put;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+out:
+ if (ret)
+ rbd_obj_request_put(obj_request);
- rbd_destroy_ops(ops);
return ret;
}
@@ -1381,95 +1791,103 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (!rbd_dev)
return;
- dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
+ dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long) notify_id,
(unsigned int) opcode);
rc = rbd_dev_refresh(rbd_dev, &hver);
if (rc)
- pr_warning(RBD_DRV_NAME "%d got notification but failed to "
- " update snaps: %d\n", rbd_dev->major, rc);
+ rbd_warn(rbd_dev, "got notification but failed to "
+ " update snaps: %d\n", rc);
- rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
+ rbd_obj_notify_ack(rbd_dev, hver, notify_id);
}
/*
- * Request sync osd watch
+ * Request sync osd watch/unwatch. The value of "start" determines
+ * whether a watch request is being initiated or torn down.
*/
-static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
{
- struct ceph_osd_req_op *ops;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_req_op *op;
int ret;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
- if (!ops)
- return -ENOMEM;
+ rbd_assert(start ^ !!rbd_dev->watch_event);
+ rbd_assert(start ^ !!rbd_dev->watch_request);
- ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
- (void *)rbd_dev, &rbd_dev->watch_event);
- if (ret < 0)
- goto fail;
+ if (start) {
+ ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
+ &rbd_dev->watch_event);
+ if (ret < 0)
+ return ret;
+ rbd_assert(rbd_dev->watch_event != NULL);
+ }
- ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
- ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
- ops[0].watch.flag = 1;
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
+ OBJ_REQUEST_NODATA);
+ if (!obj_request)
+ goto out_cancel;
+
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
+ rbd_dev->watch_event->cookie,
+ rbd_dev->header.obj_version, start);
+ if (!op)
+ goto out_cancel;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out_cancel;
+
+ if (start)
+ ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
+ else
+ ceph_osdc_unregister_linger_request(osdc,
+ rbd_dev->watch_request->osd_req);
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ goto out_cancel;
+ ret = rbd_obj_request_wait(obj_request);
+ if (ret)
+ goto out_cancel;
+ ret = obj_request->result;
+ if (ret)
+ goto out_cancel;
- ret = rbd_req_sync_op(rbd_dev, NULL,
- CEPH_NOSNAP,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- rbd_dev->header_name,
- 0, 0, NULL,
- &rbd_dev->watch_request, NULL);
+ /*
+ * A watch request is set to linger, so the underlying osd
+ * request won't go away until we unregister it. We retain
+ * a pointer to the object request during that time (in
+ * rbd_dev->watch_request), so we'll keep a reference to
+ * it. We'll drop that reference (below) after we've
+ * unregistered it.
+ */
+ if (start) {
+ rbd_dev->watch_request = obj_request;
- if (ret < 0)
- goto fail_event;
+ return 0;
+ }
- rbd_destroy_ops(ops);
- return 0;
+ /* We have successfully torn down the watch request */
-fail_event:
+ rbd_obj_request_put(rbd_dev->watch_request);
+ rbd_dev->watch_request = NULL;
+out_cancel:
+ /* Cancel the event if we're tearing down, or on error */
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
-fail:
- rbd_destroy_ops(ops);
- return ret;
-}
+ if (obj_request)
+ rbd_obj_request_put(obj_request);
-/*
- * Request sync osd unwatch
- */
-static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
-{
- struct ceph_osd_req_op *ops;
- int ret;
-
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
- if (!ops)
- return -ENOMEM;
-
- ops[0].watch.ver = 0;
- ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
- ops[0].watch.flag = 0;
-
- ret = rbd_req_sync_op(rbd_dev, NULL,
- CEPH_NOSNAP,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- rbd_dev->header_name,
- 0, 0, NULL, NULL, NULL);
-
-
- rbd_destroy_ops(ops);
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
return ret;
}
/*
* Synchronous osd object method call
*/
-static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
+static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
const char *object_name,
const char *class_name,
const char *method_name,
@@ -1477,169 +1895,154 @@ static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
size_t outbound_size,
char *inbound,
size_t inbound_size,
- int flags,
- u64 *ver)
+ u64 *version)
{
- struct ceph_osd_req_op *ops;
- int class_name_len = strlen(class_name);
- int method_name_len = strlen(method_name);
- int payload_size;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_req_op *op;
+ struct page **pages;
+ u32 page_count;
int ret;
/*
- * Any input parameters required by the method we're calling
- * will be sent along with the class and method names as
- * part of the message payload. That data and its size are
- * supplied via the indata and indata_len fields (named from
- * the perspective of the server side) in the OSD request
- * operation.
+ * Method calls are ultimately read operations but they
+ * don't involve object data (so no offset or length).
+ * The result should placed into the inbound buffer
+ * provided. They also supply outbound data--parameters for
+ * the object method. Currently if this is present it will
+ * be a snapshot id.
*/
- payload_size = class_name_len + method_name_len + outbound_size;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size);
- if (!ops)
- return -ENOMEM;
+ page_count = (u32) calc_pages_for(0, inbound_size);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- ops[0].cls.class_name = class_name;
- ops[0].cls.class_len = (__u8) class_name_len;
- ops[0].cls.method_name = method_name;
- ops[0].cls.method_len = (__u8) method_name_len;
- ops[0].cls.argc = 0;
- ops[0].cls.indata = outbound;
- ops[0].cls.indata_len = outbound_size;
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(object_name, 0, 0,
+ OBJ_REQUEST_PAGES);
+ if (!obj_request)
+ goto out;
- ret = rbd_req_sync_op(rbd_dev, NULL,
- CEPH_NOSNAP,
- flags, ops,
- object_name, 0, inbound_size, inbound,
- NULL, ver);
+ obj_request->pages = pages;
+ obj_request->page_count = page_count;
- rbd_destroy_ops(ops);
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
+ method_name, outbound, outbound_size);
+ if (!op)
+ goto out;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out;
- dout("cls_exec returned %d\n", ret);
- return ret;
-}
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ goto out;
+ ret = rbd_obj_request_wait(obj_request);
+ if (ret)
+ goto out;
-static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
-{
- struct rbd_req_coll *coll =
- kzalloc(sizeof(struct rbd_req_coll) +
- sizeof(struct rbd_req_status) * num_reqs,
- GFP_ATOMIC);
+ ret = obj_request->result;
+ if (ret < 0)
+ goto out;
+ ret = 0;
+ ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
+ if (version)
+ *version = obj_request->version;
+out:
+ if (obj_request)
+ rbd_obj_request_put(obj_request);
+ else
+ ceph_release_page_vector(pages, page_count);
- if (!coll)
- return NULL;
- coll->total = num_reqs;
- kref_init(&coll->kref);
- return coll;
+ return ret;
}
-/*
- * block device queue callback
- */
-static void rbd_rq_fn(struct request_queue *q)
+static void rbd_request_fn(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
{
struct rbd_device *rbd_dev = q->queuedata;
+ bool read_only = rbd_dev->mapping.read_only;
struct request *rq;
+ int result;
while ((rq = blk_fetch_request(q))) {
- struct bio *bio;
- bool do_write;
- unsigned int size;
- u64 ofs;
- int num_segs, cur_seg = 0;
- struct rbd_req_coll *coll;
- struct ceph_snap_context *snapc;
- unsigned int bio_offset;
-
- dout("fetched request\n");
-
- /* filter out block requests we don't understand */
- if ((rq->cmd_type != REQ_TYPE_FS)) {
- __blk_end_request_all(rq, 0);
- continue;
- }
+ bool write_request = rq_data_dir(rq) == WRITE;
+ struct rbd_img_request *img_request;
+ u64 offset;
+ u64 length;
+
+ /* Ignore any non-FS requests that filter through. */
- /* deduce our operation (read, write) */
- do_write = (rq_data_dir(rq) == WRITE);
- if (do_write && rbd_dev->mapping.read_only) {
- __blk_end_request_all(rq, -EROFS);
+ if (rq->cmd_type != REQ_TYPE_FS) {
+ dout("%s: non-fs request type %d\n", __func__,
+ (int) rq->cmd_type);
+ __blk_end_request_all(rq, 0);
continue;
}
- spin_unlock_irq(q->queue_lock);
+ /* Ignore/skip any zero-length requests */
- down_read(&rbd_dev->header_rwsem);
+ offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
+ length = (u64) blk_rq_bytes(rq);
- if (!rbd_dev->exists) {
- rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
- up_read(&rbd_dev->header_rwsem);
- dout("request for non-existent snapshot");
- spin_lock_irq(q->queue_lock);
- __blk_end_request_all(rq, -ENXIO);
+ if (!length) {
+ dout("%s: zero-length request\n", __func__);
+ __blk_end_request_all(rq, 0);
continue;
}
- snapc = ceph_get_snap_context(rbd_dev->header.snapc);
-
- up_read(&rbd_dev->header_rwsem);
-
- size = blk_rq_bytes(rq);
- ofs = blk_rq_pos(rq) * SECTOR_SIZE;
- bio = rq->bio;
+ spin_unlock_irq(q->queue_lock);
- dout("%s 0x%x bytes at 0x%llx\n",
- do_write ? "write" : "read",
- size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
+ /* Disallow writes to a read-only device */
- num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
- if (num_segs <= 0) {
- spin_lock_irq(q->queue_lock);
- __blk_end_request_all(rq, num_segs);
- ceph_put_snap_context(snapc);
- continue;
+ if (write_request) {
+ result = -EROFS;
+ if (read_only)
+ goto end_request;
+ rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
}
- coll = rbd_alloc_coll(num_segs);
- if (!coll) {
- spin_lock_irq(q->queue_lock);
- __blk_end_request_all(rq, -ENOMEM);
- ceph_put_snap_context(snapc);
- continue;
- }
-
- bio_offset = 0;
- do {
- u64 limit = rbd_segment_length(rbd_dev, ofs, size);
- unsigned int chain_size;
- struct bio *bio_chain;
-
- BUG_ON(limit > (u64) UINT_MAX);
- chain_size = (unsigned int) limit;
- dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
- kref_get(&coll->kref);
+ /*
+ * Quit early if the mapped snapshot no longer
+ * exists. It's still possible the snapshot will
+ * have disappeared by the time our request arrives
+ * at the osd, but there's no sense in sending it if
+ * we already know.
+ */
+ if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
+ dout("request for non-existent snapshot");
+ rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
+ result = -ENXIO;
+ goto end_request;
+ }
- /* Pass a cloned bio chain via an osd request */
+ result = -EINVAL;
+ if (WARN_ON(offset && length > U64_MAX - offset + 1))
+ goto end_request; /* Shouldn't happen */
- bio_chain = bio_chain_clone_range(&bio,
- &bio_offset, chain_size,
- GFP_ATOMIC);
- if (bio_chain)
- (void) rbd_do_op(rq, rbd_dev, snapc,
- ofs, chain_size,
- bio_chain, coll, cur_seg);
- else
- rbd_coll_end_req_index(rq, coll, cur_seg,
- -ENOMEM, chain_size);
- size -= chain_size;
- ofs += chain_size;
+ result = -ENOMEM;
+ img_request = rbd_img_request_create(rbd_dev, offset, length,
+ write_request);
+ if (!img_request)
+ goto end_request;
- cur_seg++;
- } while (size > 0);
- kref_put(&coll->kref, rbd_coll_release);
+ img_request->rq = rq;
+ result = rbd_img_request_fill_bio(img_request, rq->bio);
+ if (!result)
+ result = rbd_img_request_submit(img_request);
+ if (result)
+ rbd_img_request_put(img_request);
+end_request:
spin_lock_irq(q->queue_lock);
-
- ceph_put_snap_context(snapc);
+ if (result < 0) {
+ rbd_warn(rbd_dev, "obj_request %s result %d\n",
+ write_request ? "write" : "read", result);
+ __blk_end_request_all(rq, result);
+ }
}
}
@@ -1703,6 +2106,71 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
put_disk(disk);
}
+static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
+ const char *object_name,
+ u64 offset, u64 length,
+ char *buf, u64 *version)
+
+{
+ struct ceph_osd_req_op *op;
+ struct rbd_obj_request *obj_request;
+ struct ceph_osd_client *osdc;
+ struct page **pages = NULL;
+ u32 page_count;
+ size_t size;
+ int ret;
+
+ page_count = (u32) calc_pages_for(offset, length);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+ ret = PTR_ERR(pages);
+
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(object_name, offset, length,
+ OBJ_REQUEST_PAGES);
+ if (!obj_request)
+ goto out;
+
+ obj_request->pages = pages;
+ obj_request->page_count = page_count;
+
+ op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
+ if (!op)
+ goto out;
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+ obj_request, op);
+ rbd_osd_req_op_destroy(op);
+ if (!obj_request->osd_req)
+ goto out;
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ goto out;
+ ret = rbd_obj_request_wait(obj_request);
+ if (ret)
+ goto out;
+
+ ret = obj_request->result;
+ if (ret < 0)
+ goto out;
+
+ rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
+ size = (size_t) obj_request->xferred;
+ ceph_copy_from_page_vector(pages, buf, 0, size);
+ rbd_assert(size <= (size_t) INT_MAX);
+ ret = (int) size;
+ if (version)
+ *version = obj_request->version;
+out:
+ if (obj_request)
+ rbd_obj_request_put(obj_request);
+ else
+ ceph_release_page_vector(pages, page_count);
+
+ return ret;
+}
+
/*
* Read the complete header for the given rbd device.
*
@@ -1741,24 +2209,20 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
if (!ondisk)
return ERR_PTR(-ENOMEM);
- ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP,
- rbd_dev->header_name,
+ ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
0, size,
(char *) ondisk, version);
-
if (ret < 0)
goto out_err;
if (WARN_ON((size_t) ret < size)) {
ret = -ENXIO;
- pr_warning("short header read for image %s"
- " (want %zd got %d)\n",
- rbd_dev->spec->image_name, size, ret);
+ rbd_warn(rbd_dev, "short header read (want %zd got %d)",
+ size, ret);
goto out_err;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
- pr_warning("invalid header for image %s\n",
- rbd_dev->spec->image_name);
+ rbd_warn(rbd_dev, "invalid header");
goto out_err;
}
@@ -1895,8 +2359,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
- /* init rq */
- q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
+ q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
if (!q)
goto out_disk;
@@ -2233,7 +2696,7 @@ static void rbd_spec_free(struct kref *kref)
kfree(spec);
}
-struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
@@ -2243,6 +2706,7 @@ struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return NULL;
spin_lock_init(&rbd_dev->lock);
+ rbd_dev->flags = 0;
INIT_LIST_HEAD(&rbd_dev->node);
INIT_LIST_HEAD(&rbd_dev->snaps);
init_rwsem(&rbd_dev->header_rwsem);
@@ -2250,6 +2714,13 @@ struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->spec = spec;
rbd_dev->rbd_client = rbdc;
+ /* Initialize the layout used for all rbd requests */
+
+ rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+ rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
+ rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+ rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
+
return rbd_dev;
}
@@ -2360,12 +2831,11 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
__le64 size;
} __attribute__ ((packed)) size_buf = { 0 };
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_size",
(char *) &snapid, sizeof (snapid),
- (char *) &size_buf, sizeof (size_buf),
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ (char *) &size_buf, sizeof (size_buf), NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
@@ -2396,15 +2866,13 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_object_prefix",
NULL, 0,
- reply_buf, RBD_OBJ_PREFIX_LEN_MAX,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
- ret = 0; /* rbd_req_sync_exec() can return positive */
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
@@ -2435,12 +2903,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 incompat;
int ret;
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_features",
(char *) &snapid, sizeof (snapid),
(char *) &features_buf, sizeof (features_buf),
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
@@ -2474,7 +2942,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
void *end;
char *image_id;
u64 overlap;
- size_t len = 0;
int ret;
parent_spec = rbd_spec_alloc();
@@ -2492,12 +2959,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
snapid = cpu_to_le64(CEPH_NOSNAP);
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_parent",
(char *) &snapid, sizeof (snapid),
- (char *) reply_buf, size,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ (char *) reply_buf, size, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out_err;
@@ -2508,13 +2974,18 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (parent_spec->pool_id == CEPH_NOPOOL)
goto out; /* No parent? No problem. */
- image_id = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ /* The ceph file layout needs to fit pool id in 32 bits */
+
+ ret = -EIO;
+ if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
+ goto out;
+
+ image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(image_id)) {
ret = PTR_ERR(image_id);
goto out_err;
}
parent_spec->image_id = image_id;
- parent_spec->image_id_len = len;
ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
ceph_decode_64_safe(&p, end, overlap, out_err);
@@ -2544,26 +3015,25 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
rbd_assert(!rbd_dev->spec->image_name);
- image_id_size = sizeof (__le32) + rbd_dev->spec->image_id_len;
+ len = strlen(rbd_dev->spec->image_id);
+ image_id_size = sizeof (__le32) + len;
image_id = kmalloc(image_id_size, GFP_KERNEL);
if (!image_id)
return NULL;
p = image_id;
end = (char *) image_id + image_id_size;
- ceph_encode_string(&p, end, rbd_dev->spec->image_id,
- (u32) rbd_dev->spec->image_id_len);
+ ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
reply_buf = kmalloc(size, GFP_KERNEL);
if (!reply_buf)
goto out;
- ret = rbd_req_sync_exec(rbd_dev, RBD_DIRECTORY,
+ ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
"rbd", "dir_get_name",
image_id, image_id_size,
- (char *) reply_buf, size,
- CEPH_OSD_FLAG_READ, NULL);
+ (char *) reply_buf, size, NULL);
if (ret < 0)
goto out;
p = reply_buf;
@@ -2602,8 +3072,11 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
osdc = &rbd_dev->rbd_client->client->osdc;
name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
- if (!name)
- return -EIO; /* pool id too large (>= 2^31) */
+ if (!name) {
+ rbd_warn(rbd_dev, "there is no pool with id %llu",
+ rbd_dev->spec->pool_id); /* Really a BUG() */
+ return -EIO;
+ }
rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
if (!rbd_dev->spec->pool_name)
@@ -2612,19 +3085,17 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
/* Fetch the image name; tolerate failure here */
name = rbd_dev_image_name(rbd_dev);
- if (name) {
- rbd_dev->spec->image_name_len = strlen(name);
+ if (name)
rbd_dev->spec->image_name = (char *) name;
- } else {
- pr_warning(RBD_DRV_NAME "%d "
- "unable to get image name for image id %s\n",
- rbd_dev->major, rbd_dev->spec->image_id);
- }
+ else
+ rbd_warn(rbd_dev, "unable to get image name");
/* Look up the snapshot name. */
name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
if (!name) {
+ rbd_warn(rbd_dev, "no snapshot with id %llu",
+ rbd_dev->spec->snap_id); /* Really a BUG() */
ret = -EIO;
goto out_err;
}
@@ -2665,12 +3136,11 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapcontext",
NULL, 0,
- reply_buf, size,
- CEPH_OSD_FLAG_READ, ver);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ reply_buf, size, ver);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
@@ -2735,12 +3205,11 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
return ERR_PTR(-ENOMEM);
snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapshot_name",
(char *) &snap_id, sizeof (snap_id),
- reply_buf, size,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ reply_buf, size, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
@@ -2766,7 +3235,7 @@ out:
static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
u64 *snap_size, u64 *snap_features)
{
- __le64 snap_id;
+ u64 snap_id;
u8 order;
int ret;
@@ -2865,10 +3334,17 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
struct list_head *next = links->next;
- /* Existing snapshot not in the new snap context */
-
+ /*
+ * A previously-existing snapshot is not in
+ * the new snap context.
+ *
+ * If the now missing snapshot is the one the
+ * image is mapped to, clear its exists flag
+ * so we can avoid sending any more requests
+ * to it.
+ */
if (rbd_dev->spec->snap_id == snap->id)
- rbd_dev->exists = false;
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_remove_snap_dev(snap);
dout("%ssnap id %llu has been removed\n",
rbd_dev->spec->snap_id == snap->id ?
@@ -2942,7 +3418,7 @@ static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
struct rbd_snap *snap;
int ret = 0;
- dout("%s called\n", __func__);
+ dout("%s:\n", __func__);
if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
return -EIO;
@@ -2983,22 +3459,6 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
device_unregister(&rbd_dev->dev);
}
-static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
-{
- int ret, rc;
-
- do {
- ret = rbd_req_sync_watch(rbd_dev);
- if (ret == -ERANGE) {
- rc = rbd_dev_refresh(rbd_dev, NULL);
- if (rc < 0)
- return rc;
- }
- } while (ret == -ERANGE);
-
- return ret;
-}
-
static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
/*
@@ -3138,11 +3598,9 @@ static inline char *dup_token(const char **buf, size_t *lenp)
size_t len;
len = next_token(buf);
- dup = kmalloc(len + 1, GFP_KERNEL);
+ dup = kmemdup(*buf, len + 1, GFP_KERNEL);
if (!dup)
return NULL;
-
- memcpy(dup, *buf, len);
*(dup + len) = '\0';
*buf += len;
@@ -3210,8 +3668,10 @@ static int rbd_add_parse_args(const char *buf,
/* The first four tokens are required */
len = next_token(&buf);
- if (!len)
- return -EINVAL; /* Missing monitor address(es) */
+ if (!len) {
+ rbd_warn(NULL, "no monitor address(es) provided");
+ return -EINVAL;
+ }
mon_addrs = buf;
mon_addrs_size = len + 1;
buf += len;
@@ -3220,8 +3680,10 @@ static int rbd_add_parse_args(const char *buf,
options = dup_token(&buf, NULL);
if (!options)
return -ENOMEM;
- if (!*options)
- goto out_err; /* Missing options */
+ if (!*options) {
+ rbd_warn(NULL, "no options provided");
+ goto out_err;
+ }
spec = rbd_spec_alloc();
if (!spec)
@@ -3230,14 +3692,18 @@ static int rbd_add_parse_args(const char *buf,
spec->pool_name = dup_token(&buf, NULL);
if (!spec->pool_name)
goto out_mem;
- if (!*spec->pool_name)
- goto out_err; /* Missing pool name */
+ if (!*spec->pool_name) {
+ rbd_warn(NULL, "no pool name provided");
+ goto out_err;
+ }
- spec->image_name = dup_token(&buf, &spec->image_name_len);
+ spec->image_name = dup_token(&buf, NULL);
if (!spec->image_name)
goto out_mem;
- if (!*spec->image_name)
- goto out_err; /* Missing image name */
+ if (!*spec->image_name) {
+ rbd_warn(NULL, "no image name provided");
+ goto out_err;
+ }
/*
* Snapshot name is optional; default is to use "-"
@@ -3251,10 +3717,9 @@ static int rbd_add_parse_args(const char *buf,
ret = -ENAMETOOLONG;
goto out_err;
}
- spec->snap_name = kmalloc(len + 1, GFP_KERNEL);
+ spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
if (!spec->snap_name)
goto out_mem;
- memcpy(spec->snap_name, buf, len);
*(spec->snap_name + len) = '\0';
/* Initialize all rbd options to the defaults */
@@ -3323,7 +3788,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
* First, see if the format 2 image id file exists, and if
* so, get the image's persistent id from it.
*/
- size = sizeof (RBD_ID_PREFIX) + rbd_dev->spec->image_name_len;
+ size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
object_name = kmalloc(size, GFP_NOIO);
if (!object_name)
return -ENOMEM;
@@ -3339,21 +3804,18 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
goto out;
}
- ret = rbd_req_sync_exec(rbd_dev, object_name,
+ ret = rbd_obj_method_sync(rbd_dev, object_name,
"rbd", "get_id",
NULL, 0,
- response, RBD_IMAGE_ID_LEN_MAX,
- CEPH_OSD_FLAG_READ, NULL);
- dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ response, RBD_IMAGE_ID_LEN_MAX, NULL);
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
- ret = 0; /* rbd_req_sync_exec() can return positive */
p = response;
rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
p + RBD_IMAGE_ID_LEN_MAX,
- &rbd_dev->spec->image_id_len,
- GFP_NOIO);
+ NULL, GFP_NOIO);
if (IS_ERR(rbd_dev->spec->image_id)) {
ret = PTR_ERR(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@@ -3377,11 +3839,10 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
if (!rbd_dev->spec->image_id)
return -ENOMEM;
- rbd_dev->spec->image_id_len = 0;
/* Record the header object name for this rbd image. */
- size = rbd_dev->spec->image_name_len + sizeof (RBD_SUFFIX);
+ size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name) {
ret = -ENOMEM;
@@ -3427,7 +3888,7 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
* Image id was filled in by the caller. Record the header
* object name for this rbd image.
*/
- size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->spec->image_id_len;
+ size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name)
return -ENOMEM;
@@ -3542,7 +4003,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
if (ret)
goto err_out_bus;
- ret = rbd_init_watch_dev(rbd_dev);
+ ret = rbd_dev_header_watch_sync(rbd_dev, 1);
if (ret)
goto err_out_bus;
@@ -3638,6 +4099,13 @@ static ssize_t rbd_add(struct bus_type *bus,
goto err_out_client;
spec->pool_id = (u64) rc;
+ /* The ceph file layout needs to fit pool id in 32 bits */
+
+ if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
+ rc = -EIO;
+ goto err_out_client;
+ }
+
rbd_dev = rbd_dev_create(rbdc, spec);
if (!rbd_dev)
goto err_out_client;
@@ -3691,15 +4159,8 @@ static void rbd_dev_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- if (rbd_dev->watch_request) {
- struct ceph_client *client = rbd_dev->rbd_client->client;
-
- ceph_osdc_unregister_linger_request(&client->osdc,
- rbd_dev->watch_request);
- }
if (rbd_dev->watch_event)
- rbd_req_sync_unwatch(rbd_dev);
-
+ rbd_dev_header_watch_sync(rbd_dev, 0);
/* clean up and free blkdev */
rbd_free_disk(rbd_dev);
@@ -3743,10 +4204,14 @@ static ssize_t rbd_remove(struct bus_type *bus,
goto done;
}
- if (rbd_dev->open_count) {
+ spin_lock_irq(&rbd_dev->lock);
+ if (rbd_dev->open_count)
ret = -EBUSY;
+ else
+ set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
+ spin_unlock_irq(&rbd_dev->lock);
+ if (ret < 0)
goto done;
- }
rbd_remove_all_snaps(rbd_dev);
rbd_bus_del_dev(rbd_dev);
@@ -3782,10 +4247,15 @@ static void rbd_sysfs_cleanup(void)
device_unregister(&rbd_root_dev);
}
-int __init rbd_init(void)
+static int __init rbd_init(void)
{
int rc;
+ if (!libceph_compatible(NULL)) {
+ rbd_warn(NULL, "libceph incompatibility (quitting)");
+
+ return -EINVAL;
+ }
rc = rbd_sysfs_init();
if (rc)
return rc;
@@ -3793,7 +4263,7 @@ int __init rbd_init(void)
return 0;
}
-void __exit rbd_exit(void)
+static void __exit rbd_exit(void)
{
rbd_sysfs_cleanup();
}
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
new file mode 100644
index 000000000000..f35cd0b71f7b
--- /dev/null
+++ b/drivers/block/rsxx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
+rsxx-y := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
new file mode 100644
index 000000000000..a295e7e9ee41
--- /dev/null
+++ b/drivers/block/rsxx/config.c
@@ -0,0 +1,213 @@
+/*
+* Filename: config.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include <linux/swab.h>
+
+#include "rsxx_priv.h"
+#include "rsxx_cfg.h"
+
+static void initialize_config(void *config)
+{
+ struct rsxx_card_cfg *cfg = config;
+
+ cfg->hdr.version = RSXX_CFG_VERSION;
+
+ cfg->data.block_size = RSXX_HW_BLK_SIZE;
+ cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
+ cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM;
+ cfg->data.cache_order = (-1);
+ cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
+ cfg->data.intr_coal.count = 0;
+ cfg->data.intr_coal.latency = 0;
+}
+
+static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
+{
+ /*
+ * Return the compliment of the CRC to ensure compatibility
+ * (i.e. this is how early rsxx drivers did it.)
+ */
+
+ return ~crc32(~0, &cfg->data, sizeof(cfg->data));
+}
+
+
+/*----------------- Config Byte Swap Functions -------------------*/
+static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
+{
+ hdr->version = be32_to_cpu((__force __be32) hdr->version);
+ hdr->crc = be32_to_cpu((__force __be32) hdr->crc);
+}
+
+static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
+{
+ hdr->version = (__force u32) cpu_to_be32(hdr->version);
+ hdr->crc = (__force u32) cpu_to_be32(hdr->crc);
+}
+
+static void config_data_swab(struct rsxx_card_cfg *cfg)
+{
+ u32 *data = (u32 *) &cfg->data;
+ int i;
+
+ for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+ data[i] = swab32(data[i]);
+}
+
+static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
+{
+ u32 *data = (u32 *) &cfg->data;
+ int i;
+
+ for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+ data[i] = le32_to_cpu((__force __le32) data[i]);
+}
+
+static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
+{
+ u32 *data = (u32 *) &cfg->data;
+ int i;
+
+ for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+ data[i] = (__force u32) cpu_to_le32(data[i]);
+}
+
+
+/*----------------- Config Operations ------------------*/
+static int rsxx_save_config(struct rsxx_cardinfo *card)
+{
+ struct rsxx_card_cfg cfg;
+ int st;
+
+ memcpy(&cfg, &card->config, sizeof(cfg));
+
+ if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
+ dev_err(CARD_TO_DEV(card),
+ "Cannot save config with invalid version %d\n",
+ cfg.hdr.version);
+ return -EINVAL;
+ }
+
+ /* Convert data to little endian for the CRC calculation. */
+ config_data_cpu_to_le(&cfg);
+
+ cfg.hdr.crc = config_data_crc32(&cfg);
+
+ /*
+ * Swap the data from little endian to big endian so it can be
+ * stored.
+ */
+ config_data_swab(&cfg);
+ config_hdr_cpu_to_be(&cfg.hdr);
+
+ st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
+ if (st)
+ return st;
+
+ return 0;
+}
+
+int rsxx_load_config(struct rsxx_cardinfo *card)
+{
+ int st;
+ u32 crc;
+
+ st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
+ &card->config, 1);
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "Failed reading card config.\n");
+ return st;
+ }
+
+ config_hdr_be_to_cpu(&card->config.hdr);
+
+ if (card->config.hdr.version == RSXX_CFG_VERSION) {
+ /*
+ * We calculate the CRC with the data in little endian, because
+ * early drivers did not take big endian CPUs into account.
+ * The data is always stored in big endian, so we need to byte
+ * swap it before calculating the CRC.
+ */
+
+ config_data_swab(&card->config);
+
+ /* Check the CRC */
+ crc = config_data_crc32(&card->config);
+ if (crc != card->config.hdr.crc) {
+ dev_err(CARD_TO_DEV(card),
+ "Config corruption detected!\n");
+ dev_info(CARD_TO_DEV(card),
+ "CRC (sb x%08x is x%08x)\n",
+ card->config.hdr.crc, crc);
+ return -EIO;
+ }
+
+ /* Convert the data to CPU byteorder */
+ config_data_le_to_cpu(&card->config);
+
+ } else if (card->config.hdr.version != 0) {
+ dev_err(CARD_TO_DEV(card),
+ "Invalid config version %d.\n",
+ card->config.hdr.version);
+ /*
+ * Config version changes require special handling from the
+ * user
+ */
+ return -EINVAL;
+ } else {
+ dev_info(CARD_TO_DEV(card),
+ "Initializing card configuration.\n");
+ initialize_config(card);
+ st = rsxx_save_config(card);
+ if (st)
+ return st;
+ }
+
+ card->config_valid = 1;
+
+ dev_dbg(CARD_TO_DEV(card), "version: x%08x\n",
+ card->config.hdr.version);
+ dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n",
+ card->config.hdr.crc);
+ dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n",
+ card->config.data.block_size);
+ dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
+ card->config.data.stripe_size);
+ dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n",
+ card->config.data.vendor_id);
+ dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
+ card->config.data.cache_order);
+ dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n",
+ card->config.data.intr_coal.mode);
+ dev_dbg(CARD_TO_DEV(card), "count: x%08x\n",
+ card->config.data.intr_coal.count);
+ dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n",
+ card->config.data.intr_coal.latency);
+
+ return 0;
+}
+
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
new file mode 100644
index 000000000000..e5162487686a
--- /dev/null
+++ b/drivers/block/rsxx/core.c
@@ -0,0 +1,649 @@
+/*
+* Filename: core.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include <linux/genhd.h>
+#include <linux/idr.h>
+
+#include "rsxx_priv.h"
+#include "rsxx_cfg.h"
+
+#define NO_LEGACY 0
+
+MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
+MODULE_AUTHOR("IBM <support@ramsan.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+static unsigned int force_legacy = NO_LEGACY;
+module_param(force_legacy, uint, 0444);
+MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
+
+static DEFINE_IDA(rsxx_disk_ida);
+static DEFINE_SPINLOCK(rsxx_ida_lock);
+
+/*----------------- Interrupt Control & Handling -------------------*/
+static void __enable_intr(unsigned int *mask, unsigned int intr)
+{
+ *mask |= intr;
+}
+
+static void __disable_intr(unsigned int *mask, unsigned int intr)
+{
+ *mask &= ~intr;
+}
+
+/*
+ * NOTE: Disabling the IER will disable the hardware interrupt.
+ * Disabling the ISR will disable the software handling of the ISR bit.
+ *
+ * Enable/Disable interrupt functions assume the card->irq_lock
+ * is held by the caller.
+ */
+void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
+{
+ if (unlikely(card->halt))
+ return;
+
+ __enable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
+{
+ __disable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr)
+{
+ if (unlikely(card->halt))
+ return;
+
+ __enable_intr(&card->isr_mask, intr);
+ __enable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr)
+{
+ __disable_intr(&card->isr_mask, intr);
+ __disable_intr(&card->ier_mask, intr);
+ iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+static irqreturn_t rsxx_isr(int irq, void *pdata)
+{
+ struct rsxx_cardinfo *card = pdata;
+ unsigned int isr;
+ int handled = 0;
+ int reread_isr;
+ int i;
+
+ spin_lock(&card->irq_lock);
+
+ do {
+ reread_isr = 0;
+
+ isr = ioread32(card->regmap + ISR);
+ if (isr == 0xffffffff) {
+ /*
+ * A few systems seem to have an intermittent issue
+ * where PCI reads return all Fs, but retrying the read
+ * a little later will return as expected.
+ */
+ dev_info(CARD_TO_DEV(card),
+ "ISR = 0xFFFFFFFF, retrying later\n");
+ break;
+ }
+
+ isr &= card->isr_mask;
+ if (!isr)
+ break;
+
+ for (i = 0; i < card->n_targets; i++) {
+ if (isr & CR_INTR_DMA(i)) {
+ if (card->ier_mask & CR_INTR_DMA(i)) {
+ rsxx_disable_ier(card, CR_INTR_DMA(i));
+ reread_isr = 1;
+ }
+ queue_work(card->ctrl[i].done_wq,
+ &card->ctrl[i].dma_done_work);
+ handled++;
+ }
+ }
+
+ if (isr & CR_INTR_CREG) {
+ schedule_work(&card->creg_ctrl.done_work);
+ handled++;
+ }
+
+ if (isr & CR_INTR_EVENT) {
+ schedule_work(&card->event_work);
+ rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
+ handled++;
+ }
+ } while (reread_isr);
+
+ spin_unlock(&card->irq_lock);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*----------------- Card Event Handler -------------------*/
+static char *rsxx_card_state_to_str(unsigned int state)
+{
+ static char *state_strings[] = {
+ "Unknown", "Shutdown", "Starting", "Formatting",
+ "Uninitialized", "Good", "Shutting Down",
+ "Fault", "Read Only Fault", "dStroying"
+ };
+
+ return state_strings[ffs(state)];
+}
+
+static void card_state_change(struct rsxx_cardinfo *card,
+ unsigned int new_state)
+{
+ int st;
+
+ dev_info(CARD_TO_DEV(card),
+ "card state change detected.(%s -> %s)\n",
+ rsxx_card_state_to_str(card->state),
+ rsxx_card_state_to_str(new_state));
+
+ card->state = new_state;
+
+ /* Don't attach DMA interfaces if the card has an invalid config */
+ if (!card->config_valid)
+ return;
+
+ switch (new_state) {
+ case CARD_STATE_RD_ONLY_FAULT:
+ dev_crit(CARD_TO_DEV(card),
+ "Hardware has entered read-only mode!\n");
+ /*
+ * Fall through so the DMA devices can be attached and
+ * the user can attempt to pull off their data.
+ */
+ case CARD_STATE_GOOD:
+ st = rsxx_get_card_size8(card, &card->size8);
+ if (st)
+ dev_err(CARD_TO_DEV(card),
+ "Failed attaching DMA devices\n");
+
+ if (card->config_valid)
+ set_capacity(card->gendisk, card->size8 >> 9);
+ break;
+
+ case CARD_STATE_FAULT:
+ dev_crit(CARD_TO_DEV(card),
+ "Hardware Fault reported!\n");
+ /* Fall through. */
+
+ /* Everything else, detach DMA interface if it's attached. */
+ case CARD_STATE_SHUTDOWN:
+ case CARD_STATE_STARTING:
+ case CARD_STATE_FORMATTING:
+ case CARD_STATE_UNINITIALIZED:
+ case CARD_STATE_SHUTTING_DOWN:
+ /*
+ * dStroy is a term coined by marketing to represent the low level
+ * secure erase.
+ */
+ case CARD_STATE_DSTROYING:
+ set_capacity(card->gendisk, 0);
+ break;
+ }
+}
+
+static void card_event_handler(struct work_struct *work)
+{
+ struct rsxx_cardinfo *card;
+ unsigned int state;
+ unsigned long flags;
+ int st;
+
+ card = container_of(work, struct rsxx_cardinfo, event_work);
+
+ if (unlikely(card->halt))
+ return;
+
+ /*
+ * Enable the interrupt now to avoid any weird race conditions where a
+ * state change might occur while rsxx_get_card_state() is
+ * processing a returned creg cmd.
+ */
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ st = rsxx_get_card_state(card, &state);
+ if (st) {
+ dev_info(CARD_TO_DEV(card),
+ "Failed reading state after event.\n");
+ return;
+ }
+
+ if (card->state != state)
+ card_state_change(card, state);
+
+ if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
+ rsxx_read_hw_log(card);
+}
+
+/*----------------- Card Operations -------------------*/
+static int card_shutdown(struct rsxx_cardinfo *card)
+{
+ unsigned int state;
+ signed long start;
+ const int timeout = msecs_to_jiffies(120000);
+ int st;
+
+ /* We can't issue a shutdown if the card is in a transition state */
+ start = jiffies;
+ do {
+ st = rsxx_get_card_state(card, &state);
+ if (st)
+ return st;
+ } while (state == CARD_STATE_STARTING &&
+ (jiffies - start < timeout));
+
+ if (state == CARD_STATE_STARTING)
+ return -ETIMEDOUT;
+
+ /* Only issue a shutdown if we need to */
+ if ((state != CARD_STATE_SHUTTING_DOWN) &&
+ (state != CARD_STATE_SHUTDOWN)) {
+ st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
+ if (st)
+ return st;
+ }
+
+ start = jiffies;
+ do {
+ st = rsxx_get_card_state(card, &state);
+ if (st)
+ return st;
+ } while (state != CARD_STATE_SHUTDOWN &&
+ (jiffies - start < timeout));
+
+ if (state != CARD_STATE_SHUTDOWN)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/*----------------- Driver Initialization & Setup -------------------*/
+/* Returns: 0 if the driver is compatible with the device
+ -1 if the driver is NOT compatible with the device */
+static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
+{
+ unsigned char pci_rev;
+
+ pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
+
+ if (pci_rev > RS70_PCI_REV_SUPPORTED)
+ return -1;
+ return 0;
+}
+
+static int rsxx_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct rsxx_cardinfo *card;
+ int st;
+
+ dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = dev;
+ pci_set_drvdata(dev, card);
+
+ do {
+ if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
+ st = -ENOMEM;
+ goto failed_ida_get;
+ }
+
+ spin_lock(&rsxx_ida_lock);
+ st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
+ spin_unlock(&rsxx_ida_lock);
+ } while (st == -EAGAIN);
+
+ if (st)
+ goto failed_ida_get;
+
+ st = pci_enable_device(dev);
+ if (st)
+ goto failed_enable;
+
+ pci_set_master(dev);
+ pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
+
+ st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "No usable DMA configuration,aborting\n");
+ goto failed_dma_mask;
+ }
+
+ st = pci_request_regions(dev, DRIVER_NAME);
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "Failed to request memory region\n");
+ goto failed_request_regions;
+ }
+
+ if (pci_resource_len(dev, 0) == 0) {
+ dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
+ st = -ENOMEM;
+ goto failed_iomap;
+ }
+
+ card->regmap = pci_iomap(dev, 0, 0);
+ if (!card->regmap) {
+ dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
+ st = -ENOMEM;
+ goto failed_iomap;
+ }
+
+ spin_lock_init(&card->irq_lock);
+ card->halt = 0;
+
+ spin_lock_irq(&card->irq_lock);
+ rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+ spin_unlock_irq(&card->irq_lock);
+
+ if (!force_legacy) {
+ st = pci_enable_msi(dev);
+ if (st)
+ dev_warn(CARD_TO_DEV(card),
+ "Failed to enable MSI\n");
+ }
+
+ st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
+ DRIVER_NAME, card);
+ if (st) {
+ dev_err(CARD_TO_DEV(card),
+ "Failed requesting IRQ%d\n", dev->irq);
+ goto failed_irq;
+ }
+
+ /************* Setup Processor Command Interface *************/
+ rsxx_creg_setup(card);
+
+ spin_lock_irq(&card->irq_lock);
+ rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
+ spin_unlock_irq(&card->irq_lock);
+
+ st = rsxx_compatibility_check(card);
+ if (st) {
+ dev_warn(CARD_TO_DEV(card),
+ "Incompatible driver detected. Please update the driver.\n");
+ st = -EINVAL;
+ goto failed_compatiblity_check;
+ }
+
+ /************* Load Card Config *************/
+ st = rsxx_load_config(card);
+ if (st)
+ dev_err(CARD_TO_DEV(card),
+ "Failed loading card config\n");
+
+ /************* Setup DMA Engine *************/
+ st = rsxx_get_num_targets(card, &card->n_targets);
+ if (st)
+ dev_info(CARD_TO_DEV(card),
+ "Failed reading the number of DMA targets\n");
+
+ card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
+ if (!card->ctrl) {
+ st = -ENOMEM;
+ goto failed_dma_setup;
+ }
+
+ st = rsxx_dma_setup(card);
+ if (st) {
+ dev_info(CARD_TO_DEV(card),
+ "Failed to setup DMA engine\n");
+ goto failed_dma_setup;
+ }
+
+ /************* Setup Card Event Handler *************/
+ INIT_WORK(&card->event_work, card_event_handler);
+
+ st = rsxx_setup_dev(card);
+ if (st)
+ goto failed_create_dev;
+
+ rsxx_get_card_state(card, &card->state);
+
+ dev_info(CARD_TO_DEV(card),
+ "card state: %s\n",
+ rsxx_card_state_to_str(card->state));
+
+ /*
+ * Now that the DMA Engine and devices have been setup,
+ * we can enable the event interrupt(it kicks off actions in
+ * those layers so we couldn't enable it right away.)
+ */
+ spin_lock_irq(&card->irq_lock);
+ rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
+ spin_unlock_irq(&card->irq_lock);
+
+ if (card->state == CARD_STATE_SHUTDOWN) {
+ st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
+ if (st)
+ dev_crit(CARD_TO_DEV(card),
+ "Failed issuing card startup\n");
+ } else if (card->state == CARD_STATE_GOOD ||
+ card->state == CARD_STATE_RD_ONLY_FAULT) {
+ st = rsxx_get_card_size8(card, &card->size8);
+ if (st)
+ card->size8 = 0;
+ }
+
+ rsxx_attach_dev(card);
+
+ return 0;
+
+failed_create_dev:
+ rsxx_dma_destroy(card);
+failed_dma_setup:
+failed_compatiblity_check:
+ spin_lock_irq(&card->irq_lock);
+ rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+ spin_unlock_irq(&card->irq_lock);
+ free_irq(dev->irq, card);
+ if (!force_legacy)
+ pci_disable_msi(dev);
+failed_irq:
+ pci_iounmap(dev, card->regmap);
+failed_iomap:
+ pci_release_regions(dev);
+failed_request_regions:
+failed_dma_mask:
+ pci_disable_device(dev);
+failed_enable:
+ spin_lock(&rsxx_ida_lock);
+ ida_remove(&rsxx_disk_ida, card->disk_id);
+ spin_unlock(&rsxx_ida_lock);
+failed_ida_get:
+ kfree(card);
+
+ return st;
+}
+
+static void rsxx_pci_remove(struct pci_dev *dev)
+{
+ struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+ unsigned long flags;
+ int st;
+ int i;
+
+ if (!card)
+ return;
+
+ dev_info(CARD_TO_DEV(card),
+ "Removing PCI-Flash SSD.\n");
+
+ rsxx_detach_dev(card);
+
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ }
+
+ st = card_shutdown(card);
+ if (st)
+ dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
+
+ /* Sync outstanding event handlers. */
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ /* Prevent work_structs from re-queuing themselves. */
+ card->halt = 1;
+
+ cancel_work_sync(&card->event_work);
+
+ rsxx_destroy_dev(card);
+ rsxx_dma_destroy(card);
+
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ free_irq(dev->irq, card);
+
+ if (!force_legacy)
+ pci_disable_msi(dev);
+
+ rsxx_creg_destroy(card);
+
+ pci_iounmap(dev, card->regmap);
+
+ pci_disable_device(dev);
+ pci_release_regions(dev);
+
+ kfree(card);
+}
+
+static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ /* We don't support suspend at this time. */
+ return -ENOSYS;
+}
+
+static void rsxx_pci_shutdown(struct pci_dev *dev)
+{
+ struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+ unsigned long flags;
+ int i;
+
+ if (!card)
+ return;
+
+ dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
+
+ rsxx_detach_dev(card);
+
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ }
+
+ card_shutdown(card);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
+
+static struct pci_driver rsxx_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = rsxx_pci_ids,
+ .probe = rsxx_pci_probe,
+ .remove = rsxx_pci_remove,
+ .suspend = rsxx_pci_suspend,
+ .shutdown = rsxx_pci_shutdown,
+};
+
+static int __init rsxx_core_init(void)
+{
+ int st;
+
+ st = rsxx_dev_init();
+ if (st)
+ return st;
+
+ st = rsxx_dma_init();
+ if (st)
+ goto dma_init_failed;
+
+ st = rsxx_creg_init();
+ if (st)
+ goto creg_init_failed;
+
+ return pci_register_driver(&rsxx_pci_driver);
+
+creg_init_failed:
+ rsxx_dma_cleanup();
+dma_init_failed:
+ rsxx_dev_cleanup();
+
+ return st;
+}
+
+static void __exit rsxx_core_cleanup(void)
+{
+ pci_unregister_driver(&rsxx_pci_driver);
+ rsxx_creg_cleanup();
+ rsxx_dma_cleanup();
+ rsxx_dev_cleanup();
+}
+
+module_init(rsxx_core_init);
+module_exit(rsxx_core_cleanup);
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
new file mode 100644
index 000000000000..80bbe639fccd
--- /dev/null
+++ b/drivers/block/rsxx/cregs.c
@@ -0,0 +1,758 @@
+/*
+* Filename: cregs.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/completion.h>
+#include <linux/slab.h>
+
+#include "rsxx_priv.h"
+
+#define CREG_TIMEOUT_MSEC 10000
+
+typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
+ struct creg_cmd *cmd,
+ int st);
+
+struct creg_cmd {
+ struct list_head list;
+ creg_cmd_cb cb;
+ void *cb_private;
+ unsigned int op;
+ unsigned int addr;
+ int cnt8;
+ void *buf;
+ unsigned int stream;
+ unsigned int status;
+};
+
+static struct kmem_cache *creg_cmd_pool;
+
+
+/*------------ Private Functions --------------*/
+
+#if defined(__LITTLE_ENDIAN)
+#define LITTLE_ENDIAN 1
+#elif defined(__BIG_ENDIAN)
+#define LITTLE_ENDIAN 0
+#else
+#error Unknown endianess!!! Aborting...
+#endif
+
+static void copy_to_creg_data(struct rsxx_cardinfo *card,
+ int cnt8,
+ void *buf,
+ unsigned int stream)
+{
+ int i = 0;
+ u32 *data = buf;
+
+ for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
+ /*
+ * Firmware implementation makes it necessary to byte swap on
+ * little endian processors.
+ */
+ if (LITTLE_ENDIAN && stream)
+ iowrite32be(data[i], card->regmap + CREG_DATA(i));
+ else
+ iowrite32(data[i], card->regmap + CREG_DATA(i));
+ }
+}
+
+
+static void copy_from_creg_data(struct rsxx_cardinfo *card,
+ int cnt8,
+ void *buf,
+ unsigned int stream)
+{
+ int i = 0;
+ u32 *data = buf;
+
+ for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
+ /*
+ * Firmware implementation makes it necessary to byte swap on
+ * little endian processors.
+ */
+ if (LITTLE_ENDIAN && stream)
+ data[i] = ioread32be(card->regmap + CREG_DATA(i));
+ else
+ data[i] = ioread32(card->regmap + CREG_DATA(i));
+ }
+}
+
+static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
+{
+ struct creg_cmd *cmd;
+
+ /*
+ * Spin lock is needed because this can be called in atomic/interrupt
+ * context.
+ */
+ spin_lock_bh(&card->creg_ctrl.lock);
+ cmd = card->creg_ctrl.active_cmd;
+ card->creg_ctrl.active_cmd = NULL;
+ spin_unlock_bh(&card->creg_ctrl.lock);
+
+ return cmd;
+}
+
+static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
+{
+ iowrite32(cmd->addr, card->regmap + CREG_ADD);
+ iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
+
+ if (cmd->op == CREG_OP_WRITE) {
+ if (cmd->buf)
+ copy_to_creg_data(card, cmd->cnt8,
+ cmd->buf, cmd->stream);
+ }
+
+ /*
+ * Data copy must complete before initiating the command. This is
+ * needed for weakly ordered processors (i.e. PowerPC), so that all
+ * neccessary registers are written before we kick the hardware.
+ */
+ wmb();
+
+ /* Setting the valid bit will kick off the command. */
+ iowrite32(cmd->op, card->regmap + CREG_CMD);
+}
+
+static void creg_kick_queue(struct rsxx_cardinfo *card)
+{
+ if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
+ return;
+
+ card->creg_ctrl.active = 1;
+ card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
+ struct creg_cmd, list);
+ list_del(&card->creg_ctrl.active_cmd->list);
+ card->creg_ctrl.q_depth--;
+
+ /*
+ * We have to set the timer before we push the new command. Otherwise,
+ * we could create a race condition that would occur if the timer
+ * was not canceled, and expired after the new command was pushed,
+ * but before the command was issued to hardware.
+ */
+ mod_timer(&card->creg_ctrl.cmd_timer,
+ jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
+
+ creg_issue_cmd(card, card->creg_ctrl.active_cmd);
+}
+
+static int creg_queue_cmd(struct rsxx_cardinfo *card,
+ unsigned int op,
+ unsigned int addr,
+ unsigned int cnt8,
+ void *buf,
+ int stream,
+ creg_cmd_cb callback,
+ void *cb_private)
+{
+ struct creg_cmd *cmd;
+
+ /* Don't queue stuff up if we're halted. */
+ if (unlikely(card->halt))
+ return -EINVAL;
+
+ if (card->creg_ctrl.reset)
+ return -EAGAIN;
+
+ if (cnt8 > MAX_CREG_DATA8)
+ return -EINVAL;
+
+ cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cmd->list);
+
+ cmd->op = op;
+ cmd->addr = addr;
+ cmd->cnt8 = cnt8;
+ cmd->buf = buf;
+ cmd->stream = stream;
+ cmd->cb = callback;
+ cmd->cb_private = cb_private;
+ cmd->status = 0;
+
+ spin_lock(&card->creg_ctrl.lock);
+ list_add_tail(&cmd->list, &card->creg_ctrl.queue);
+ card->creg_ctrl.q_depth++;
+ creg_kick_queue(card);
+ spin_unlock(&card->creg_ctrl.lock);
+
+ return 0;
+}
+
+static void creg_cmd_timed_out(unsigned long data)
+{
+ struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+ struct creg_cmd *cmd;
+
+ cmd = pop_active_cmd(card);
+ if (cmd == NULL) {
+ card->creg_ctrl.creg_stats.creg_timeout++;
+ dev_warn(CARD_TO_DEV(card),
+ "No active command associated with timeout!\n");
+ return;
+ }
+
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ETIMEDOUT);
+
+ kmem_cache_free(creg_cmd_pool, cmd);
+
+
+ spin_lock(&card->creg_ctrl.lock);
+ card->creg_ctrl.active = 0;
+ creg_kick_queue(card);
+ spin_unlock(&card->creg_ctrl.lock);
+}
+
+
+static void creg_cmd_done(struct work_struct *work)
+{
+ struct rsxx_cardinfo *card;
+ struct creg_cmd *cmd;
+ int st = 0;
+
+ card = container_of(work, struct rsxx_cardinfo,
+ creg_ctrl.done_work);
+
+ /*
+ * The timer could not be cancelled for some reason,
+ * race to pop the active command.
+ */
+ if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
+ card->creg_ctrl.creg_stats.failed_cancel_timer++;
+
+ cmd = pop_active_cmd(card);
+ if (cmd == NULL) {
+ dev_err(CARD_TO_DEV(card),
+ "Spurious creg interrupt!\n");
+ return;
+ }
+
+ card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
+ cmd->status = card->creg_ctrl.creg_stats.stat;
+ if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
+ dev_err(CARD_TO_DEV(card),
+ "Invalid status on creg command\n");
+ /*
+ * At this point we're probably reading garbage from HW. Don't
+ * do anything else that could mess up the system and let
+ * the sync function return an error.
+ */
+ st = -EIO;
+ goto creg_done;
+ } else if (cmd->status & CREG_STAT_ERROR) {
+ st = -EIO;
+ }
+
+ if ((cmd->op == CREG_OP_READ)) {
+ unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
+
+ /* Paranoid Sanity Checks */
+ if (!cmd->buf) {
+ dev_err(CARD_TO_DEV(card),
+ "Buffer not given for read.\n");
+ st = -EIO;
+ goto creg_done;
+ }
+ if (cnt8 != cmd->cnt8) {
+ dev_err(CARD_TO_DEV(card),
+ "count mismatch\n");
+ st = -EIO;
+ goto creg_done;
+ }
+
+ copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
+ }
+
+creg_done:
+ if (cmd->cb)
+ cmd->cb(card, cmd, st);
+
+ kmem_cache_free(creg_cmd_pool, cmd);
+
+ spin_lock(&card->creg_ctrl.lock);
+ card->creg_ctrl.active = 0;
+ creg_kick_queue(card);
+ spin_unlock(&card->creg_ctrl.lock);
+}
+
+static void creg_reset(struct rsxx_cardinfo *card)
+{
+ struct creg_cmd *cmd = NULL;
+ struct creg_cmd *tmp;
+ unsigned long flags;
+
+ /*
+ * mutex_trylock is used here because if reset_lock is taken then a
+ * reset is already happening. So, we can just go ahead and return.
+ */
+ if (!mutex_trylock(&card->creg_ctrl.reset_lock))
+ return;
+
+ card->creg_ctrl.reset = 1;
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ dev_warn(CARD_TO_DEV(card),
+ "Resetting creg interface for recovery\n");
+
+ /* Cancel outstanding commands */
+ spin_lock(&card->creg_ctrl.lock);
+ list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
+ list_del(&cmd->list);
+ card->creg_ctrl.q_depth--;
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ kmem_cache_free(creg_cmd_pool, cmd);
+ }
+
+ cmd = card->creg_ctrl.active_cmd;
+ card->creg_ctrl.active_cmd = NULL;
+ if (cmd) {
+ if (timer_pending(&card->creg_ctrl.cmd_timer))
+ del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ kmem_cache_free(creg_cmd_pool, cmd);
+
+ card->creg_ctrl.active = 0;
+ }
+ spin_unlock(&card->creg_ctrl.lock);
+
+ card->creg_ctrl.reset = 0;
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+
+ mutex_unlock(&card->creg_ctrl.reset_lock);
+}
+
+/* Used for synchronous accesses */
+struct creg_completion {
+ struct completion *cmd_done;
+ int st;
+ u32 creg_status;
+};
+
+static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
+ struct creg_cmd *cmd,
+ int st)
+{
+ struct creg_completion *cmd_completion;
+
+ cmd_completion = cmd->cb_private;
+ BUG_ON(!cmd_completion);
+
+ cmd_completion->st = st;
+ cmd_completion->creg_status = cmd->status;
+ complete(cmd_completion->cmd_done);
+}
+
+static int __issue_creg_rw(struct rsxx_cardinfo *card,
+ unsigned int op,
+ unsigned int addr,
+ unsigned int cnt8,
+ void *buf,
+ int stream,
+ unsigned int *hw_stat)
+{
+ DECLARE_COMPLETION_ONSTACK(cmd_done);
+ struct creg_completion completion;
+ unsigned long timeout;
+ int st;
+
+ completion.cmd_done = &cmd_done;
+ completion.st = 0;
+ completion.creg_status = 0;
+
+ st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
+ &completion);
+ if (st)
+ return st;
+
+ /*
+ * This timeout is neccessary for unresponsive hardware. The additional
+ * 20 seconds to used to guarantee that each cregs requests has time to
+ * complete.
+ */
+ timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
+ card->creg_ctrl.q_depth) + 20000);
+
+ /*
+ * The creg interface is guaranteed to complete. It has a timeout
+ * mechanism that will kick in if hardware does not respond.
+ */
+ st = wait_for_completion_timeout(completion.cmd_done, timeout);
+ if (st == 0) {
+ /*
+ * This is really bad, because the kernel timer did not
+ * expire and notify us of a timeout!
+ */
+ dev_crit(CARD_TO_DEV(card),
+ "cregs timer failed\n");
+ creg_reset(card);
+ return -EIO;
+ }
+
+ *hw_stat = completion.creg_status;
+
+ if (completion.st) {
+ dev_warn(CARD_TO_DEV(card),
+ "creg command failed(%d x%08x)\n",
+ completion.st, addr);
+ return completion.st;
+ }
+
+ return 0;
+}
+
+static int issue_creg_rw(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int stream,
+ int read)
+{
+ unsigned int hw_stat;
+ unsigned int xfer;
+ unsigned int op;
+ int st;
+
+ op = read ? CREG_OP_READ : CREG_OP_WRITE;
+
+ do {
+ xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
+
+ st = __issue_creg_rw(card, op, addr, xfer,
+ data, stream, &hw_stat);
+ if (st)
+ return st;
+
+ data = (char *)data + xfer;
+ addr += xfer;
+ size8 -= xfer;
+ } while (size8);
+
+ return 0;
+}
+
+/* ---------------------------- Public API ---------------------------------- */
+int rsxx_creg_write(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream)
+{
+ return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
+}
+
+int rsxx_creg_read(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream)
+{
+ return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
+}
+
+int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
+{
+ return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
+ sizeof(*state), state, 0);
+}
+
+int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
+{
+ unsigned int size;
+ int st;
+
+ st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
+ sizeof(size), &size, 0);
+ if (st)
+ return st;
+
+ *size8 = (u64)size * RSXX_HW_BLK_SIZE;
+ return 0;
+}
+
+int rsxx_get_num_targets(struct rsxx_cardinfo *card,
+ unsigned int *n_targets)
+{
+ return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
+ sizeof(*n_targets), n_targets, 0);
+}
+
+int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
+ u32 *capabilities)
+{
+ return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
+ sizeof(*capabilities), capabilities, 0);
+}
+
+int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
+{
+ return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
+ sizeof(cmd), &cmd, 0);
+}
+
+
+/*----------------- HW Log Functions -------------------*/
+static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
+{
+ static char level;
+
+ /*
+ * New messages start with "<#>", where # is the log level. Messages
+ * that extend past the log buffer will use the previous level
+ */
+ if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
+ level = str[1];
+ str += 3; /* Skip past the log level. */
+ len -= 3;
+ }
+
+ switch (level) {
+ case '0':
+ dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '1':
+ dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '2':
+ dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '3':
+ dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '4':
+ dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '5':
+ dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '6':
+ dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ case '7':
+ dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ default:
+ dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
+ break;
+ }
+}
+
+/*
+ * The substrncpy function copies the src string (which includes the
+ * terminating '\0' character), up to the count into the dest pointer.
+ * Returns the number of bytes copied to dest.
+ */
+static int substrncpy(char *dest, const char *src, int count)
+{
+ int max_cnt = count;
+
+ while (count) {
+ count--;
+ *dest = *src;
+ if (*dest == '\0')
+ break;
+ src++;
+ dest++;
+ }
+ return max_cnt - count;
+}
+
+
+static void read_hw_log_done(struct rsxx_cardinfo *card,
+ struct creg_cmd *cmd,
+ int st)
+{
+ char *buf;
+ char *log_str;
+ int cnt;
+ int len;
+ int off;
+
+ buf = cmd->buf;
+ off = 0;
+
+ /* Failed getting the log message */
+ if (st)
+ return;
+
+ while (off < cmd->cnt8) {
+ log_str = &card->log.buf[card->log.buf_len];
+ cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
+ len = substrncpy(log_str, &buf[off], cnt);
+
+ off += len;
+ card->log.buf_len += len;
+
+ /*
+ * Flush the log if we've hit the end of a message or if we've
+ * run out of buffer space.
+ */
+ if ((log_str[len - 1] == '\0') ||
+ (card->log.buf_len == LOG_BUF_SIZE8)) {
+ if (card->log.buf_len != 1) /* Don't log blank lines. */
+ hw_log_msg(card, card->log.buf,
+ card->log.buf_len);
+ card->log.buf_len = 0;
+ }
+
+ }
+
+ if (cmd->status & CREG_STAT_LOG_PENDING)
+ rsxx_read_hw_log(card);
+}
+
+int rsxx_read_hw_log(struct rsxx_cardinfo *card)
+{
+ int st;
+
+ st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
+ sizeof(card->log.tmp), card->log.tmp,
+ 1, read_hw_log_done, NULL);
+ if (st)
+ dev_err(CARD_TO_DEV(card),
+ "Failed getting log text\n");
+
+ return st;
+}
+
+/*-------------- IOCTL REG Access ------------------*/
+static int issue_reg_cmd(struct rsxx_cardinfo *card,
+ struct rsxx_reg_access *cmd,
+ int read)
+{
+ unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
+
+ return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
+ cmd->stream, &cmd->stat);
+}
+
+int rsxx_reg_access(struct rsxx_cardinfo *card,
+ struct rsxx_reg_access __user *ucmd,
+ int read)
+{
+ struct rsxx_reg_access cmd;
+ int st;
+
+ st = copy_from_user(&cmd, ucmd, sizeof(cmd));
+ if (st)
+ return -EFAULT;
+
+ if (cmd.cnt > RSXX_MAX_REG_CNT)
+ return -EFAULT;
+
+ st = issue_reg_cmd(card, &cmd, read);
+ if (st)
+ return st;
+
+ st = put_user(cmd.stat, &ucmd->stat);
+ if (st)
+ return -EFAULT;
+
+ if (read) {
+ st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
+ if (st)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*------------ Initialization & Setup --------------*/
+int rsxx_creg_setup(struct rsxx_cardinfo *card)
+{
+ card->creg_ctrl.active_cmd = NULL;
+
+ INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
+ mutex_init(&card->creg_ctrl.reset_lock);
+ INIT_LIST_HEAD(&card->creg_ctrl.queue);
+ spin_lock_init(&card->creg_ctrl.lock);
+ setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
+ (unsigned long) card);
+
+ return 0;
+}
+
+void rsxx_creg_destroy(struct rsxx_cardinfo *card)
+{
+ struct creg_cmd *cmd;
+ struct creg_cmd *tmp;
+ int cnt = 0;
+
+ /* Cancel outstanding commands */
+ spin_lock(&card->creg_ctrl.lock);
+ list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
+ list_del(&cmd->list);
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ kmem_cache_free(creg_cmd_pool, cmd);
+ cnt++;
+ }
+
+ if (cnt)
+ dev_info(CARD_TO_DEV(card),
+ "Canceled %d queue creg commands\n", cnt);
+
+ cmd = card->creg_ctrl.active_cmd;
+ card->creg_ctrl.active_cmd = NULL;
+ if (cmd) {
+ if (timer_pending(&card->creg_ctrl.cmd_timer))
+ del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+ if (cmd->cb)
+ cmd->cb(card, cmd, -ECANCELED);
+ dev_info(CARD_TO_DEV(card),
+ "Canceled active creg command\n");
+ kmem_cache_free(creg_cmd_pool, cmd);
+ }
+ spin_unlock(&card->creg_ctrl.lock);
+
+ cancel_work_sync(&card->creg_ctrl.done_work);
+}
+
+
+int rsxx_creg_init(void)
+{
+ creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
+ if (!creg_cmd_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rsxx_creg_cleanup(void)
+{
+ kmem_cache_destroy(creg_cmd_pool);
+}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
new file mode 100644
index 000000000000..4346d17d2949
--- /dev/null
+++ b/drivers/block/rsxx/dev.c
@@ -0,0 +1,367 @@
+/*
+* Filename: dev.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+
+#include <linux/fs.h>
+
+#include "rsxx_priv.h"
+
+static unsigned int blkdev_minors = 64;
+module_param(blkdev_minors, uint, 0444);
+MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
+
+/*
+ * For now I'm making this tweakable in case any applications hit this limit.
+ * If you see a "bio too big" error in the log you will need to raise this
+ * value.
+ */
+static unsigned int blkdev_max_hw_sectors = 1024;
+module_param(blkdev_max_hw_sectors, uint, 0444);
+MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
+
+static unsigned int enable_blkdev = 1;
+module_param(enable_blkdev , uint, 0444);
+MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
+
+
+struct rsxx_bio_meta {
+ struct bio *bio;
+ atomic_t pending_dmas;
+ atomic_t error;
+ unsigned long start_time;
+};
+
+static struct kmem_cache *bio_meta_pool;
+
+/*----------------- Block Device Operations -----------------*/
+static int rsxx_blkdev_ioctl(struct block_device *bdev,
+ fmode_t mode,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case RSXX_GETREG:
+ return rsxx_reg_access(card, (void __user *)arg, 1);
+ case RSXX_SETREG:
+ return rsxx_reg_access(card, (void __user *)arg, 0);
+ }
+
+ return -ENOTTY;
+}
+
+static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
+ u64 blocks = card->size8 >> 9;
+
+ /*
+ * get geometry: Fake it. I haven't found any drivers that set
+ * geo->start, so we won't either.
+ */
+ if (card->size8) {
+ geo->heads = 64;
+ geo->sectors = 16;
+ do_div(blocks, (geo->heads * geo->sectors));
+ geo->cylinders = blocks;
+ } else {
+ geo->heads = 0;
+ geo->sectors = 0;
+ geo->cylinders = 0;
+ }
+ return 0;
+}
+
+static const struct block_device_operations rsxx_fops = {
+ .owner = THIS_MODULE,
+ .getgeo = rsxx_getgeo,
+ .ioctl = rsxx_blkdev_ioctl,
+};
+
+static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
+{
+ struct hd_struct *part0 = &card->gendisk->part0;
+ int rw = bio_data_dir(bio);
+ int cpu;
+
+ cpu = part_stat_lock();
+
+ part_round_stats(cpu, part0);
+ part_inc_in_flight(part0, rw);
+
+ part_stat_unlock();
+}
+
+static void disk_stats_complete(struct rsxx_cardinfo *card,
+ struct bio *bio,
+ unsigned long start_time)
+{
+ struct hd_struct *part0 = &card->gendisk->part0;
+ unsigned long duration = jiffies - start_time;
+ int rw = bio_data_dir(bio);
+ int cpu;
+
+ cpu = part_stat_lock();
+
+ part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio));
+ part_stat_inc(cpu, part0, ios[rw]);
+ part_stat_add(cpu, part0, ticks[rw], duration);
+
+ part_round_stats(cpu, part0);
+ part_dec_in_flight(part0, rw);
+
+ part_stat_unlock();
+}
+
+static void bio_dma_done_cb(struct rsxx_cardinfo *card,
+ void *cb_data,
+ unsigned int error)
+{
+ struct rsxx_bio_meta *meta = cb_data;
+
+ if (error)
+ atomic_set(&meta->error, 1);
+
+ if (atomic_dec_and_test(&meta->pending_dmas)) {
+ disk_stats_complete(card, meta->bio, meta->start_time);
+
+ bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
+ kmem_cache_free(bio_meta_pool, meta);
+ }
+}
+
+static void rsxx_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct rsxx_cardinfo *card = q->queuedata;
+ struct rsxx_bio_meta *bio_meta;
+ int st = -EINVAL;
+
+ might_sleep();
+
+ if (unlikely(card->halt)) {
+ st = -EFAULT;
+ goto req_err;
+ }
+
+ if (unlikely(card->dma_fault)) {
+ st = (-EFAULT);
+ goto req_err;
+ }
+
+ if (bio->bi_size == 0) {
+ dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
+ goto req_err;
+ }
+
+ bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
+ if (!bio_meta) {
+ st = -ENOMEM;
+ goto req_err;
+ }
+
+ bio_meta->bio = bio;
+ atomic_set(&bio_meta->error, 0);
+ atomic_set(&bio_meta->pending_dmas, 0);
+ bio_meta->start_time = jiffies;
+
+ disk_stats_start(card, bio);
+
+ dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
+ bio_data_dir(bio) ? 'W' : 'R', bio_meta,
+ (u64)bio->bi_sector << 9, bio->bi_size);
+
+ st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
+ bio_dma_done_cb, bio_meta);
+ if (st)
+ goto queue_err;
+
+ return;
+
+queue_err:
+ kmem_cache_free(bio_meta_pool, bio_meta);
+req_err:
+ bio_endio(bio, st);
+}
+
+/*----------------- Device Setup -------------------*/
+static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
+{
+ unsigned char pci_rev;
+
+ pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
+
+ return (pci_rev >= RSXX_DISCARD_SUPPORT);
+}
+
+static unsigned short rsxx_get_logical_block_size(
+ struct rsxx_cardinfo *card)
+{
+ u32 capabilities = 0;
+ int st;
+
+ st = rsxx_get_card_capabilities(card, &capabilities);
+ if (st)
+ dev_warn(CARD_TO_DEV(card),
+ "Failed reading card capabilities register\n");
+
+ /* Earlier firmware did not have support for 512 byte accesses */
+ if (capabilities & CARD_CAP_SUBPAGE_WRITES)
+ return 512;
+ else
+ return RSXX_HW_BLK_SIZE;
+}
+
+int rsxx_attach_dev(struct rsxx_cardinfo *card)
+{
+ mutex_lock(&card->dev_lock);
+
+ /* The block device requires the stripe size from the config. */
+ if (enable_blkdev) {
+ if (card->config_valid)
+ set_capacity(card->gendisk, card->size8 >> 9);
+ else
+ set_capacity(card->gendisk, 0);
+ add_disk(card->gendisk);
+
+ card->bdev_attached = 1;
+ }
+
+ mutex_unlock(&card->dev_lock);
+
+ return 0;
+}
+
+void rsxx_detach_dev(struct rsxx_cardinfo *card)
+{
+ mutex_lock(&card->dev_lock);
+
+ if (card->bdev_attached) {
+ del_gendisk(card->gendisk);
+ card->bdev_attached = 0;
+ }
+
+ mutex_unlock(&card->dev_lock);
+}
+
+int rsxx_setup_dev(struct rsxx_cardinfo *card)
+{
+ unsigned short blk_size;
+
+ mutex_init(&card->dev_lock);
+
+ if (!enable_blkdev)
+ return 0;
+
+ card->major = register_blkdev(0, DRIVER_NAME);
+ if (card->major < 0) {
+ dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
+ return -ENOMEM;
+ }
+
+ card->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!card->queue) {
+ dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
+ unregister_blkdev(card->major, DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ card->gendisk = alloc_disk(blkdev_minors);
+ if (!card->gendisk) {
+ dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
+ blk_cleanup_queue(card->queue);
+ unregister_blkdev(card->major, DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ blk_size = rsxx_get_logical_block_size(card);
+
+ blk_queue_make_request(card->queue, rsxx_make_request);
+ blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
+ blk_queue_dma_alignment(card->queue, blk_size - 1);
+ blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
+ blk_queue_logical_block_size(card->queue, blk_size);
+ blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
+ if (rsxx_discard_supported(card)) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
+ blk_queue_max_discard_sectors(card->queue,
+ RSXX_HW_BLK_SIZE >> 9);
+ card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
+ card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE;
+ card->queue->limits.discard_zeroes_data = 1;
+ }
+
+ card->queue->queuedata = card;
+
+ snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
+ "rsxx%d", card->disk_id);
+ card->gendisk->driverfs_dev = &card->dev->dev;
+ card->gendisk->major = card->major;
+ card->gendisk->first_minor = 0;
+ card->gendisk->fops = &rsxx_fops;
+ card->gendisk->private_data = card;
+ card->gendisk->queue = card->queue;
+
+ return 0;
+}
+
+void rsxx_destroy_dev(struct rsxx_cardinfo *card)
+{
+ if (!enable_blkdev)
+ return;
+
+ put_disk(card->gendisk);
+ card->gendisk = NULL;
+
+ blk_cleanup_queue(card->queue);
+ unregister_blkdev(card->major, DRIVER_NAME);
+}
+
+int rsxx_dev_init(void)
+{
+ bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
+ if (!bio_meta_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rsxx_dev_cleanup(void)
+{
+ kmem_cache_destroy(bio_meta_pool);
+}
+
+
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
new file mode 100644
index 000000000000..63176e67662f
--- /dev/null
+++ b/drivers/block/rsxx/dma.c
@@ -0,0 +1,998 @@
+/*
+* Filename: dma.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/slab.h>
+#include "rsxx_priv.h"
+
+struct rsxx_dma {
+ struct list_head list;
+ u8 cmd;
+ unsigned int laddr; /* Logical address on the ramsan */
+ struct {
+ u32 off;
+ u32 cnt;
+ } sub_page;
+ dma_addr_t dma_addr;
+ struct page *page;
+ unsigned int pg_off; /* Page Offset */
+ rsxx_dma_cb cb;
+ void *cb_data;
+};
+
+/* This timeout is used to detect a stalled DMA channel */
+#define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
+
+struct hw_status {
+ u8 status;
+ u8 tag;
+ __le16 count;
+ __le32 _rsvd2;
+ __le64 _rsvd3;
+} __packed;
+
+enum rsxx_dma_status {
+ DMA_SW_ERR = 0x1,
+ DMA_HW_FAULT = 0x2,
+ DMA_CANCELLED = 0x4,
+};
+
+struct hw_cmd {
+ u8 command;
+ u8 tag;
+ u8 _rsvd;
+ u8 sub_page; /* Bit[0:2]: 512byte offset */
+ /* Bit[4:6]: 512byte count */
+ __le32 device_addr;
+ __le64 host_addr;
+} __packed;
+
+enum rsxx_hw_cmd {
+ HW_CMD_BLK_DISCARD = 0x70,
+ HW_CMD_BLK_WRITE = 0x80,
+ HW_CMD_BLK_READ = 0xC0,
+ HW_CMD_BLK_RECON_READ = 0xE0,
+};
+
+enum rsxx_hw_status {
+ HW_STATUS_CRC = 0x01,
+ HW_STATUS_HARD_ERR = 0x02,
+ HW_STATUS_SOFT_ERR = 0x04,
+ HW_STATUS_FAULT = 0x08,
+};
+
+#define STATUS_BUFFER_SIZE8 4096
+#define COMMAND_BUFFER_SIZE8 4096
+
+static struct kmem_cache *rsxx_dma_pool;
+
+struct dma_tracker {
+ int next_tag;
+ struct rsxx_dma *dma;
+};
+
+#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
+ (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
+
+struct dma_tracker_list {
+ spinlock_t lock;
+ int head;
+ struct dma_tracker list[0];
+};
+
+
+/*----------------- Misc Utility Functions -------------------*/
+static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
+{
+ unsigned long long tgt_addr8;
+
+ tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
+ card->_stripe.upper_mask) |
+ ((addr8) & card->_stripe.lower_mask);
+ do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
+ return tgt_addr8;
+}
+
+static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
+{
+ unsigned int tgt;
+
+ tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
+
+ return tgt;
+}
+
+static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
+{
+ /* Reset all DMA Command/Status Queues */
+ iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
+}
+
+static unsigned int get_dma_size(struct rsxx_dma *dma)
+{
+ if (dma->sub_page.cnt)
+ return dma->sub_page.cnt << 9;
+ else
+ return RSXX_HW_BLK_SIZE;
+}
+
+
+/*----------------- DMA Tracker -------------------*/
+static void set_tracker_dma(struct dma_tracker_list *trackers,
+ int tag,
+ struct rsxx_dma *dma)
+{
+ trackers->list[tag].dma = dma;
+}
+
+static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
+ int tag)
+{
+ return trackers->list[tag].dma;
+}
+
+static int pop_tracker(struct dma_tracker_list *trackers)
+{
+ int tag;
+
+ spin_lock(&trackers->lock);
+ tag = trackers->head;
+ if (tag != -1) {
+ trackers->head = trackers->list[tag].next_tag;
+ trackers->list[tag].next_tag = -1;
+ }
+ spin_unlock(&trackers->lock);
+
+ return tag;
+}
+
+static void push_tracker(struct dma_tracker_list *trackers, int tag)
+{
+ spin_lock(&trackers->lock);
+ trackers->list[tag].next_tag = trackers->head;
+ trackers->head = tag;
+ trackers->list[tag].dma = NULL;
+ spin_unlock(&trackers->lock);
+}
+
+
+/*----------------- Interrupt Coalescing -------------*/
+/*
+ * Interrupt Coalescing Register Format:
+ * Interrupt Timer (64ns units) [15:0]
+ * Interrupt Count [24:16]
+ * Reserved [31:25]
+*/
+#define INTR_COAL_LATENCY_MASK (0x0000ffff)
+
+#define INTR_COAL_COUNT_SHIFT 16
+#define INTR_COAL_COUNT_BITS 9
+#define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
+ INTR_COAL_COUNT_SHIFT)
+#define INTR_COAL_LATENCY_UNITS_NS 64
+
+
+static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
+{
+ u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
+
+ if (mode == RSXX_INTR_COAL_DISABLED)
+ return 0;
+
+ return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
+ (latency_units & INTR_COAL_LATENCY_MASK);
+
+}
+
+static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
+{
+ int i;
+ u32 q_depth = 0;
+ u32 intr_coal;
+
+ if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
+ return;
+
+ for (i = 0; i < card->n_targets; i++)
+ q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
+
+ intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
+ q_depth / 2,
+ card->config.data.intr_coal.latency);
+ iowrite32(intr_coal, card->regmap + INTR_COAL);
+}
+
+/*----------------- RSXX DMA Handling -------------------*/
+static void rsxx_complete_dma(struct rsxx_cardinfo *card,
+ struct rsxx_dma *dma,
+ unsigned int status)
+{
+ if (status & DMA_SW_ERR)
+ printk_ratelimited(KERN_ERR
+ "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
+ dma->cmd, dma->laddr);
+ if (status & DMA_HW_FAULT)
+ printk_ratelimited(KERN_ERR
+ "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
+ dma->cmd, dma->laddr);
+ if (status & DMA_CANCELLED)
+ printk_ratelimited(KERN_ERR
+ "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
+ dma->cmd, dma->laddr);
+
+ if (dma->dma_addr)
+ pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+
+ if (dma->cb)
+ dma->cb(card, dma->cb_data, status ? 1 : 0);
+
+ kmem_cache_free(rsxx_dma_pool, dma);
+}
+
+static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
+ struct rsxx_dma *dma)
+{
+ /*
+ * Requeued DMAs go to the front of the queue so they are issued
+ * first.
+ */
+ spin_lock(&ctrl->queue_lock);
+ list_add(&dma->list, &ctrl->queue);
+ spin_unlock(&ctrl->queue_lock);
+}
+
+static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
+ struct rsxx_dma *dma,
+ u8 hw_st)
+{
+ unsigned int status = 0;
+ int requeue_cmd = 0;
+
+ dev_dbg(CARD_TO_DEV(ctrl->card),
+ "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
+ dma->cmd, dma->laddr, hw_st);
+
+ if (hw_st & HW_STATUS_CRC)
+ ctrl->stats.crc_errors++;
+ if (hw_st & HW_STATUS_HARD_ERR)
+ ctrl->stats.hard_errors++;
+ if (hw_st & HW_STATUS_SOFT_ERR)
+ ctrl->stats.soft_errors++;
+
+ switch (dma->cmd) {
+ case HW_CMD_BLK_READ:
+ if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
+ if (ctrl->card->scrub_hard) {
+ dma->cmd = HW_CMD_BLK_RECON_READ;
+ requeue_cmd = 1;
+ ctrl->stats.reads_retried++;
+ } else {
+ status |= DMA_HW_FAULT;
+ ctrl->stats.reads_failed++;
+ }
+ } else if (hw_st & HW_STATUS_FAULT) {
+ status |= DMA_HW_FAULT;
+ ctrl->stats.reads_failed++;
+ }
+
+ break;
+ case HW_CMD_BLK_RECON_READ:
+ if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
+ /* Data could not be reconstructed. */
+ status |= DMA_HW_FAULT;
+ ctrl->stats.reads_failed++;
+ }
+
+ break;
+ case HW_CMD_BLK_WRITE:
+ status |= DMA_HW_FAULT;
+ ctrl->stats.writes_failed++;
+
+ break;
+ case HW_CMD_BLK_DISCARD:
+ status |= DMA_HW_FAULT;
+ ctrl->stats.discards_failed++;
+
+ break;
+ default:
+ dev_err(CARD_TO_DEV(ctrl->card),
+ "Unknown command in DMA!(cmd: x%02x "
+ "laddr x%08x st: x%02x\n",
+ dma->cmd, dma->laddr, hw_st);
+ status |= DMA_SW_ERR;
+
+ break;
+ }
+
+ if (requeue_cmd)
+ rsxx_requeue_dma(ctrl, dma);
+ else
+ rsxx_complete_dma(ctrl->card, dma, status);
+}
+
+static void dma_engine_stalled(unsigned long data)
+{
+ struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+
+ if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+ return;
+
+ if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
+ /*
+ * The dma engine was stalled because the SW_CMD_IDX write
+ * was lost. Issue it again to recover.
+ */
+ dev_warn(CARD_TO_DEV(ctrl->card),
+ "SW_CMD_IDX write was lost, re-writing...\n");
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+ mod_timer(&ctrl->activity_timer,
+ jiffies + DMA_ACTIVITY_TIMEOUT);
+ } else {
+ dev_warn(CARD_TO_DEV(ctrl->card),
+ "DMA channel %d has stalled, faulting interface.\n",
+ ctrl->id);
+ ctrl->card->dma_fault = 1;
+ }
+}
+
+static void rsxx_issue_dmas(struct work_struct *work)
+{
+ struct rsxx_dma_ctrl *ctrl;
+ struct rsxx_dma *dma;
+ int tag;
+ int cmds_pending = 0;
+ struct hw_cmd *hw_cmd_buf;
+
+ ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
+ hw_cmd_buf = ctrl->cmd.buf;
+
+ if (unlikely(ctrl->card->halt))
+ return;
+
+ while (1) {
+ spin_lock(&ctrl->queue_lock);
+ if (list_empty(&ctrl->queue)) {
+ spin_unlock(&ctrl->queue_lock);
+ break;
+ }
+ spin_unlock(&ctrl->queue_lock);
+
+ tag = pop_tracker(ctrl->trackers);
+ if (tag == -1)
+ break;
+
+ spin_lock(&ctrl->queue_lock);
+ dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
+ list_del(&dma->list);
+ ctrl->stats.sw_q_depth--;
+ spin_unlock(&ctrl->queue_lock);
+
+ /*
+ * This will catch any DMAs that slipped in right before the
+ * fault, but was queued after all the other DMAs were
+ * cancelled.
+ */
+ if (unlikely(ctrl->card->dma_fault)) {
+ push_tracker(ctrl->trackers, tag);
+ rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
+ continue;
+ }
+
+ set_tracker_dma(ctrl->trackers, tag, dma);
+ hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
+ hw_cmd_buf[ctrl->cmd.idx].tag = tag;
+ hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
+ hw_cmd_buf[ctrl->cmd.idx].sub_page =
+ ((dma->sub_page.cnt & 0x7) << 4) |
+ (dma->sub_page.off & 0x7);
+
+ hw_cmd_buf[ctrl->cmd.idx].device_addr =
+ cpu_to_le32(dma->laddr);
+
+ hw_cmd_buf[ctrl->cmd.idx].host_addr =
+ cpu_to_le64(dma->dma_addr);
+
+ dev_dbg(CARD_TO_DEV(ctrl->card),
+ "Issue DMA%d(laddr %d tag %d) to idx %d\n",
+ ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
+
+ ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
+ cmds_pending++;
+
+ if (dma->cmd == HW_CMD_BLK_WRITE)
+ ctrl->stats.writes_issued++;
+ else if (dma->cmd == HW_CMD_BLK_DISCARD)
+ ctrl->stats.discards_issued++;
+ else
+ ctrl->stats.reads_issued++;
+ }
+
+ /* Let HW know we've queued commands. */
+ if (cmds_pending) {
+ /*
+ * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
+ * (which is in PCI-consistent system-memory) from the loop
+ * above make it into the coherency domain before the
+ * following PIO "trigger" updating the cmd.idx. A WMB is
+ * sufficient. We need not explicitly CPU cache-flush since
+ * the memory is a PCI-consistent (ie; coherent) mapping.
+ */
+ wmb();
+
+ atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
+ mod_timer(&ctrl->activity_timer,
+ jiffies + DMA_ACTIVITY_TIMEOUT);
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+ }
+}
+
+static void rsxx_dma_done(struct work_struct *work)
+{
+ struct rsxx_dma_ctrl *ctrl;
+ struct rsxx_dma *dma;
+ unsigned long flags;
+ u16 count;
+ u8 status;
+ u8 tag;
+ struct hw_status *hw_st_buf;
+
+ ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
+ hw_st_buf = ctrl->status.buf;
+
+ if (unlikely(ctrl->card->halt) ||
+ unlikely(ctrl->card->dma_fault))
+ return;
+
+ count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
+
+ while (count == ctrl->e_cnt) {
+ /*
+ * The read memory-barrier is necessary to keep aggressive
+ * processors/optimizers (such as the PPC Apple G5) from
+ * reordering the following status-buffer tag & status read
+ * *before* the count read on subsequent iterations of the
+ * loop!
+ */
+ rmb();
+
+ status = hw_st_buf[ctrl->status.idx].status;
+ tag = hw_st_buf[ctrl->status.idx].tag;
+
+ dma = get_tracker_dma(ctrl->trackers, tag);
+ if (dma == NULL) {
+ spin_lock_irqsave(&ctrl->card->irq_lock, flags);
+ rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
+ spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
+
+ dev_err(CARD_TO_DEV(ctrl->card),
+ "No tracker for tag %d "
+ "(idx %d id %d)\n",
+ tag, ctrl->status.idx, ctrl->id);
+ return;
+ }
+
+ dev_dbg(CARD_TO_DEV(ctrl->card),
+ "Completing DMA%d"
+ "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
+ ctrl->id, dma->laddr, tag, status, count,
+ ctrl->status.idx);
+
+ atomic_dec(&ctrl->stats.hw_q_depth);
+
+ mod_timer(&ctrl->activity_timer,
+ jiffies + DMA_ACTIVITY_TIMEOUT);
+
+ if (status)
+ rsxx_handle_dma_error(ctrl, dma, status);
+ else
+ rsxx_complete_dma(ctrl->card, dma, 0);
+
+ push_tracker(ctrl->trackers, tag);
+
+ ctrl->status.idx = (ctrl->status.idx + 1) &
+ RSXX_CS_IDX_MASK;
+ ctrl->e_cnt++;
+
+ count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
+ }
+
+ dma_intr_coal_auto_tune(ctrl->card);
+
+ if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+ del_timer_sync(&ctrl->activity_timer);
+
+ spin_lock_irqsave(&ctrl->card->irq_lock, flags);
+ rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
+ spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
+
+ spin_lock(&ctrl->queue_lock);
+ if (ctrl->stats.sw_q_depth)
+ queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
+ spin_unlock(&ctrl->queue_lock);
+}
+
+static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
+ struct list_head *q)
+{
+ struct rsxx_dma *dma;
+ struct rsxx_dma *tmp;
+ int cnt = 0;
+
+ list_for_each_entry_safe(dma, tmp, q, list) {
+ list_del(&dma->list);
+
+ if (dma->dma_addr)
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ (dma->cmd == HW_CMD_BLK_WRITE) ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ kmem_cache_free(rsxx_dma_pool, dma);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int rsxx_queue_discard(struct rsxx_cardinfo *card,
+ struct list_head *q,
+ unsigned int laddr,
+ rsxx_dma_cb cb,
+ void *cb_data)
+{
+ struct rsxx_dma *dma;
+
+ dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->cmd = HW_CMD_BLK_DISCARD;
+ dma->laddr = laddr;
+ dma->dma_addr = 0;
+ dma->sub_page.off = 0;
+ dma->sub_page.cnt = 0;
+ dma->page = NULL;
+ dma->pg_off = 0;
+ dma->cb = cb;
+ dma->cb_data = cb_data;
+
+ dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
+
+ list_add_tail(&dma->list, q);
+
+ return 0;
+}
+
+static int rsxx_queue_dma(struct rsxx_cardinfo *card,
+ struct list_head *q,
+ int dir,
+ unsigned int dma_off,
+ unsigned int dma_len,
+ unsigned int laddr,
+ struct page *page,
+ unsigned int pg_off,
+ rsxx_dma_cb cb,
+ void *cb_data)
+{
+ struct rsxx_dma *dma;
+
+ dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
+ dir ? PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ if (!dma->dma_addr) {
+ kmem_cache_free(rsxx_dma_pool, dma);
+ return -ENOMEM;
+ }
+
+ dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
+ dma->laddr = laddr;
+ dma->sub_page.off = (dma_off >> 9);
+ dma->sub_page.cnt = (dma_len >> 9);
+ dma->page = page;
+ dma->pg_off = pg_off;
+ dma->cb = cb;
+ dma->cb_data = cb_data;
+
+ dev_dbg(CARD_TO_DEV(card),
+ "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
+ dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
+ dma->sub_page.cnt, dma->page, dma->pg_off);
+
+ /* Queue the DMA */
+ list_add_tail(&dma->list, q);
+
+ return 0;
+}
+
+int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+ struct bio *bio,
+ atomic_t *n_dmas,
+ rsxx_dma_cb cb,
+ void *cb_data)
+{
+ struct list_head dma_list[RSXX_MAX_TARGETS];
+ struct bio_vec *bvec;
+ unsigned long long addr8;
+ unsigned int laddr;
+ unsigned int bv_len;
+ unsigned int bv_off;
+ unsigned int dma_off;
+ unsigned int dma_len;
+ int dma_cnt[RSXX_MAX_TARGETS];
+ int tgt;
+ int st;
+ int i;
+
+ addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+ atomic_set(n_dmas, 0);
+
+ for (i = 0; i < card->n_targets; i++) {
+ INIT_LIST_HEAD(&dma_list[i]);
+ dma_cnt[i] = 0;
+ }
+
+ if (bio->bi_rw & REQ_DISCARD) {
+ bv_len = bio->bi_size;
+
+ while (bv_len > 0) {
+ tgt = rsxx_get_dma_tgt(card, addr8);
+ laddr = rsxx_addr8_to_laddr(addr8, card);
+
+ st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
+ cb, cb_data);
+ if (st)
+ goto bvec_err;
+
+ dma_cnt[tgt]++;
+ atomic_inc(n_dmas);
+ addr8 += RSXX_HW_BLK_SIZE;
+ bv_len -= RSXX_HW_BLK_SIZE;
+ }
+ } else {
+ bio_for_each_segment(bvec, bio, i) {
+ bv_len = bvec->bv_len;
+ bv_off = bvec->bv_offset;
+
+ while (bv_len > 0) {
+ tgt = rsxx_get_dma_tgt(card, addr8);
+ laddr = rsxx_addr8_to_laddr(addr8, card);
+ dma_off = addr8 & RSXX_HW_BLK_MASK;
+ dma_len = min(bv_len,
+ RSXX_HW_BLK_SIZE - dma_off);
+
+ st = rsxx_queue_dma(card, &dma_list[tgt],
+ bio_data_dir(bio),
+ dma_off, dma_len,
+ laddr, bvec->bv_page,
+ bv_off, cb, cb_data);
+ if (st)
+ goto bvec_err;
+
+ dma_cnt[tgt]++;
+ atomic_inc(n_dmas);
+ addr8 += dma_len;
+ bv_off += dma_len;
+ bv_len -= dma_len;
+ }
+ }
+ }
+
+ for (i = 0; i < card->n_targets; i++) {
+ if (!list_empty(&dma_list[i])) {
+ spin_lock(&card->ctrl[i].queue_lock);
+ card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
+ list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
+ spin_unlock(&card->ctrl[i].queue_lock);
+
+ queue_work(card->ctrl[i].issue_wq,
+ &card->ctrl[i].issue_dma_work);
+ }
+ }
+
+ return 0;
+
+bvec_err:
+ for (i = 0; i < card->n_targets; i++)
+ rsxx_cleanup_dma_queue(card, &dma_list[i]);
+
+ return st;
+}
+
+
+/*----------------- DMA Engine Initialization & Setup -------------------*/
+static int rsxx_dma_ctrl_init(struct pci_dev *dev,
+ struct rsxx_dma_ctrl *ctrl)
+{
+ int i;
+
+ memset(&ctrl->stats, 0, sizeof(ctrl->stats));
+
+ ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
+ &ctrl->status.dma_addr);
+ ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
+ &ctrl->cmd.dma_addr);
+ if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
+ return -ENOMEM;
+
+ ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
+ if (!ctrl->trackers)
+ return -ENOMEM;
+
+ ctrl->trackers->head = 0;
+ for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
+ ctrl->trackers->list[i].next_tag = i + 1;
+ ctrl->trackers->list[i].dma = NULL;
+ }
+ ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
+ spin_lock_init(&ctrl->trackers->lock);
+
+ spin_lock_init(&ctrl->queue_lock);
+ INIT_LIST_HEAD(&ctrl->queue);
+
+ setup_timer(&ctrl->activity_timer, dma_engine_stalled,
+ (unsigned long)ctrl);
+
+ ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
+ if (!ctrl->issue_wq)
+ return -ENOMEM;
+
+ ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
+ if (!ctrl->done_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
+ INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
+
+ memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
+ iowrite32(lower_32_bits(ctrl->status.dma_addr),
+ ctrl->regmap + SB_ADD_LO);
+ iowrite32(upper_32_bits(ctrl->status.dma_addr),
+ ctrl->regmap + SB_ADD_HI);
+
+ memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
+ iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
+ iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
+
+ ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
+ if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+ dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
+ ctrl->status.idx);
+ return -EINVAL;
+ }
+ iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
+ iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
+
+ ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
+ if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+ dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
+ ctrl->status.idx);
+ return -EINVAL;
+ }
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
+ iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+
+ wmb();
+
+ return 0;
+}
+
+static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
+ unsigned int stripe_size8)
+{
+ if (!is_power_of_2(stripe_size8)) {
+ dev_err(CARD_TO_DEV(card),
+ "stripe_size is NOT a power of 2!\n");
+ return -EINVAL;
+ }
+
+ card->_stripe.lower_mask = stripe_size8 - 1;
+
+ card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
+ card->_stripe.upper_shift = ffs(card->n_targets) - 1;
+
+ card->_stripe.target_mask = card->n_targets - 1;
+ card->_stripe.target_shift = ffs(stripe_size8) - 1;
+
+ dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
+ card->_stripe.lower_mask);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
+ card->_stripe.upper_shift);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
+ card->_stripe.upper_mask);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
+ card->_stripe.target_mask);
+ dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
+ card->_stripe.target_shift);
+
+ return 0;
+}
+
+static int rsxx_dma_configure(struct rsxx_cardinfo *card)
+{
+ u32 intr_coal;
+
+ intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
+ card->config.data.intr_coal.count,
+ card->config.data.intr_coal.latency);
+ iowrite32(intr_coal, card->regmap + INTR_COAL);
+
+ return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
+}
+
+int rsxx_dma_setup(struct rsxx_cardinfo *card)
+{
+ unsigned long flags;
+ int st;
+ int i;
+
+ dev_info(CARD_TO_DEV(card),
+ "Initializing %d DMA targets\n",
+ card->n_targets);
+
+ /* Regmap is divided up into 4K chunks. One for each DMA channel */
+ for (i = 0; i < card->n_targets; i++)
+ card->ctrl[i].regmap = card->regmap + (i * 4096);
+
+ card->dma_fault = 0;
+
+ /* Reset the DMA queues */
+ rsxx_dma_queue_reset(card);
+
+ /************* Setup DMA Control *************/
+ for (i = 0; i < card->n_targets; i++) {
+ st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
+ if (st)
+ goto failed_dma_setup;
+
+ card->ctrl[i].card = card;
+ card->ctrl[i].id = i;
+ }
+
+ card->scrub_hard = 1;
+
+ if (card->config_valid)
+ rsxx_dma_configure(card);
+
+ /* Enable the interrupts after all setup has completed. */
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_irqsave(&card->irq_lock, flags);
+ rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
+ spin_unlock_irqrestore(&card->irq_lock, flags);
+ }
+
+ return 0;
+
+failed_dma_setup:
+ for (i = 0; i < card->n_targets; i++) {
+ struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
+
+ if (ctrl->issue_wq) {
+ destroy_workqueue(ctrl->issue_wq);
+ ctrl->issue_wq = NULL;
+ }
+
+ if (ctrl->done_wq) {
+ destroy_workqueue(ctrl->done_wq);
+ ctrl->done_wq = NULL;
+ }
+
+ if (ctrl->trackers)
+ vfree(ctrl->trackers);
+
+ if (ctrl->status.buf)
+ pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf,
+ ctrl->status.dma_addr);
+ if (ctrl->cmd.buf)
+ pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ }
+
+ return st;
+}
+
+
+void rsxx_dma_destroy(struct rsxx_cardinfo *card)
+{
+ struct rsxx_dma_ctrl *ctrl;
+ struct rsxx_dma *dma;
+ int i, j;
+ int cnt = 0;
+
+ for (i = 0; i < card->n_targets; i++) {
+ ctrl = &card->ctrl[i];
+
+ if (ctrl->issue_wq) {
+ destroy_workqueue(ctrl->issue_wq);
+ ctrl->issue_wq = NULL;
+ }
+
+ if (ctrl->done_wq) {
+ destroy_workqueue(ctrl->done_wq);
+ ctrl->done_wq = NULL;
+ }
+
+ if (timer_pending(&ctrl->activity_timer))
+ del_timer_sync(&ctrl->activity_timer);
+
+ /* Clean up the DMA queue */
+ spin_lock(&ctrl->queue_lock);
+ cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
+ spin_unlock(&ctrl->queue_lock);
+
+ if (cnt)
+ dev_info(CARD_TO_DEV(card),
+ "Freed %d queued DMAs on channel %d\n",
+ cnt, i);
+
+ /* Clean up issued DMAs */
+ for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
+ dma = get_tracker_dma(ctrl->trackers, j);
+ if (dma) {
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ (dma->cmd == HW_CMD_BLK_WRITE) ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ kmem_cache_free(rsxx_dma_pool, dma);
+ cnt++;
+ }
+ }
+
+ if (cnt)
+ dev_info(CARD_TO_DEV(card),
+ "Freed %d pending DMAs on channel %d\n",
+ cnt, i);
+
+ vfree(ctrl->trackers);
+
+ pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf, ctrl->status.dma_addr);
+ pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ }
+}
+
+
+int rsxx_dma_init(void)
+{
+ rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
+ if (!rsxx_dma_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+
+void rsxx_dma_cleanup(void)
+{
+ kmem_cache_destroy(rsxx_dma_pool);
+}
+
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
new file mode 100644
index 000000000000..2e50b65902b7
--- /dev/null
+++ b/drivers/block/rsxx/rsxx.h
@@ -0,0 +1,45 @@
+/*
+* Filename: rsxx.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_H__
+#define __RSXX_H__
+
+/*----------------- IOCTL Definitions -------------------*/
+
+struct rsxx_reg_access {
+ __u32 addr;
+ __u32 cnt;
+ __u32 stat;
+ __u32 stream;
+ __u32 data[8];
+};
+
+#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32)))
+
+#define RSXX_IOC_MAGIC 'r'
+
+#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
+#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
+
+#endif /* __RSXX_H_ */
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
new file mode 100644
index 000000000000..c025fe5fdb70
--- /dev/null
+++ b/drivers/block/rsxx/rsxx_cfg.h
@@ -0,0 +1,72 @@
+/*
+* Filename: rsXX_cfg.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_CFG_H__
+#define __RSXX_CFG_H__
+
+/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
+#include <linux/types.h>
+
+/*
+ * The card config version must match the driver's expected version. If it does
+ * not, the DMA interfaces will not be attached and the user will need to
+ * initialize/upgrade the card configuration using the card config utility.
+ */
+#define RSXX_CFG_VERSION 4
+
+struct card_cfg_hdr {
+ __u32 version;
+ __u32 crc;
+};
+
+struct card_cfg_data {
+ __u32 block_size;
+ __u32 stripe_size;
+ __u32 vendor_id;
+ __u32 cache_order;
+ struct {
+ __u32 mode; /* Disabled, manual, auto-tune... */
+ __u32 count; /* Number of intr to coalesce */
+ __u32 latency;/* Max wait time (in ns) */
+ } intr_coal;
+};
+
+struct rsxx_card_cfg {
+ struct card_cfg_hdr hdr;
+ struct card_cfg_data data;
+};
+
+/* Vendor ID Values */
+#define RSXX_VENDOR_ID_TMS_IBM 0
+#define RSXX_VENDOR_ID_DSI 1
+#define RSXX_VENDOR_COUNT 2
+
+/* Interrupt Coalescing Values */
+#define RSXX_INTR_COAL_DISABLED 0
+#define RSXX_INTR_COAL_EXPLICIT 1
+#define RSXX_INTR_COAL_AUTO_TUNE 2
+
+
+#endif /* __RSXX_CFG_H__ */
+
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
new file mode 100644
index 000000000000..a1ac907d8f4c
--- /dev/null
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -0,0 +1,399 @@
+/*
+* Filename: rsxx_priv.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_PRIV_H__
+#define __RSXX_PRIV_H__
+
+#include <linux/version.h>
+#include <linux/semaphore.h>
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/bio.h>
+#include <linux/vmalloc.h>
+#include <linux/timer.h>
+#include <linux/ioctl.h>
+
+#include "rsxx.h"
+#include "rsxx_cfg.h"
+
+struct proc_cmd;
+
+#define PCI_VENDOR_ID_TMS_IBM 0x15B6
+#define PCI_DEVICE_ID_RS70_FLASH 0x0019
+#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
+#define PCI_DEVICE_ID_RS80_FLASH 0x001C
+#define PCI_DEVICE_ID_RS81_FLASH 0x001E
+
+#define RS70_PCI_REV_SUPPORTED 4
+
+#define DRIVER_NAME "rsxx"
+#define DRIVER_VERSION "3.7"
+
+/* Block size is 4096 */
+#define RSXX_HW_BLK_SHIFT 12
+#define RSXX_HW_BLK_SIZE (1 << RSXX_HW_BLK_SHIFT)
+#define RSXX_HW_BLK_MASK (RSXX_HW_BLK_SIZE - 1)
+
+#define MAX_CREG_DATA8 32
+#define LOG_BUF_SIZE8 128
+
+#define RSXX_MAX_OUTSTANDING_CMDS 255
+#define RSXX_CS_IDX_MASK 0xff
+
+#define RSXX_MAX_TARGETS 8
+
+struct dma_tracker_list;
+
+/* DMA Command/Status Buffer structure */
+struct rsxx_cs_buffer {
+ dma_addr_t dma_addr;
+ void *buf;
+ u32 idx;
+};
+
+struct rsxx_dma_stats {
+ u32 crc_errors;
+ u32 hard_errors;
+ u32 soft_errors;
+ u32 writes_issued;
+ u32 writes_failed;
+ u32 reads_issued;
+ u32 reads_failed;
+ u32 reads_retried;
+ u32 discards_issued;
+ u32 discards_failed;
+ u32 done_rescheduled;
+ u32 issue_rescheduled;
+ u32 sw_q_depth; /* Number of DMAs on the SW queue. */
+ atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
+};
+
+struct rsxx_dma_ctrl {
+ struct rsxx_cardinfo *card;
+ int id;
+ void __iomem *regmap;
+ struct rsxx_cs_buffer status;
+ struct rsxx_cs_buffer cmd;
+ u16 e_cnt;
+ spinlock_t queue_lock;
+ struct list_head queue;
+ struct workqueue_struct *issue_wq;
+ struct work_struct issue_dma_work;
+ struct workqueue_struct *done_wq;
+ struct work_struct dma_done_work;
+ struct timer_list activity_timer;
+ struct dma_tracker_list *trackers;
+ struct rsxx_dma_stats stats;
+};
+
+struct rsxx_cardinfo {
+ struct pci_dev *dev;
+ unsigned int halt;
+
+ void __iomem *regmap;
+ spinlock_t irq_lock;
+ unsigned int isr_mask;
+ unsigned int ier_mask;
+
+ struct rsxx_card_cfg config;
+ int config_valid;
+
+ /* Embedded CPU Communication */
+ struct {
+ spinlock_t lock;
+ bool active;
+ struct creg_cmd *active_cmd;
+ struct work_struct done_work;
+ struct list_head queue;
+ unsigned int q_depth;
+ /* Cache the creg status to prevent ioreads */
+ struct {
+ u32 stat;
+ u32 failed_cancel_timer;
+ u32 creg_timeout;
+ } creg_stats;
+ struct timer_list cmd_timer;
+ struct mutex reset_lock;
+ int reset;
+ } creg_ctrl;
+
+ struct {
+ char tmp[MAX_CREG_DATA8];
+ char buf[LOG_BUF_SIZE8]; /* terminated */
+ int buf_len;
+ } log;
+
+ struct work_struct event_work;
+ unsigned int state;
+ u64 size8;
+
+ /* Lock the device attach/detach function */
+ struct mutex dev_lock;
+
+ /* Block Device Variables */
+ bool bdev_attached;
+ int disk_id;
+ int major;
+ struct request_queue *queue;
+ struct gendisk *gendisk;
+ struct {
+ /* Used to convert a byte address to a device address. */
+ u64 lower_mask;
+ u64 upper_shift;
+ u64 upper_mask;
+ u64 target_mask;
+ u64 target_shift;
+ } _stripe;
+ unsigned int dma_fault;
+
+ int scrub_hard;
+
+ int n_targets;
+ struct rsxx_dma_ctrl *ctrl;
+};
+
+enum rsxx_pci_regmap {
+ HWID = 0x00, /* Hardware Identification Register */
+ SCRATCH = 0x04, /* Scratch/Debug Register */
+ RESET = 0x08, /* Reset Register */
+ ISR = 0x10, /* Interrupt Status Register */
+ IER = 0x14, /* Interrupt Enable Register */
+ IPR = 0x18, /* Interrupt Poll Register */
+ CB_ADD_LO = 0x20, /* Command Host Buffer Address [31:0] */
+ CB_ADD_HI = 0x24, /* Command Host Buffer Address [63:32]*/
+ HW_CMD_IDX = 0x28, /* Hardware Processed Command Index */
+ SW_CMD_IDX = 0x2C, /* Software Processed Command Index */
+ SB_ADD_LO = 0x30, /* Status Host Buffer Address [31:0] */
+ SB_ADD_HI = 0x34, /* Status Host Buffer Address [63:32] */
+ HW_STATUS_CNT = 0x38, /* Hardware Status Counter */
+ SW_STATUS_CNT = 0x3C, /* Deprecated */
+ CREG_CMD = 0x40, /* CPU Command Register */
+ CREG_ADD = 0x44, /* CPU Address Register */
+ CREG_CNT = 0x48, /* CPU Count Register */
+ CREG_STAT = 0x4C, /* CPU Status Register */
+ CREG_DATA0 = 0x50, /* CPU Data Registers */
+ CREG_DATA1 = 0x54,
+ CREG_DATA2 = 0x58,
+ CREG_DATA3 = 0x5C,
+ CREG_DATA4 = 0x60,
+ CREG_DATA5 = 0x64,
+ CREG_DATA6 = 0x68,
+ CREG_DATA7 = 0x6c,
+ INTR_COAL = 0x70, /* Interrupt Coalescing Register */
+ HW_ERROR = 0x74, /* Card Error Register */
+ PCI_DEBUG0 = 0x78, /* PCI Debug Registers */
+ PCI_DEBUG1 = 0x7C,
+ PCI_DEBUG2 = 0x80,
+ PCI_DEBUG3 = 0x84,
+ PCI_DEBUG4 = 0x88,
+ PCI_DEBUG5 = 0x8C,
+ PCI_DEBUG6 = 0x90,
+ PCI_DEBUG7 = 0x94,
+ PCI_POWER_THROTTLE = 0x98,
+ PERF_CTRL = 0x9c,
+ PERF_TIMER_LO = 0xa0,
+ PERF_TIMER_HI = 0xa4,
+ PERF_RD512_LO = 0xa8,
+ PERF_RD512_HI = 0xac,
+ PERF_WR512_LO = 0xb0,
+ PERF_WR512_HI = 0xb4,
+};
+
+enum rsxx_intr {
+ CR_INTR_DMA0 = 0x00000001,
+ CR_INTR_CREG = 0x00000002,
+ CR_INTR_DMA1 = 0x00000004,
+ CR_INTR_EVENT = 0x00000008,
+ CR_INTR_DMA2 = 0x00000010,
+ CR_INTR_DMA3 = 0x00000020,
+ CR_INTR_DMA4 = 0x00000040,
+ CR_INTR_DMA5 = 0x00000080,
+ CR_INTR_DMA6 = 0x00000100,
+ CR_INTR_DMA7 = 0x00000200,
+ CR_INTR_DMA_ALL = 0x000003f5,
+ CR_INTR_ALL = 0xffffffff,
+};
+
+static inline int CR_INTR_DMA(int N)
+{
+ static const unsigned int _CR_INTR_DMA[] = {
+ CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3,
+ CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7
+ };
+ return _CR_INTR_DMA[N];
+}
+enum rsxx_pci_reset {
+ DMA_QUEUE_RESET = 0x00000001,
+};
+
+enum rsxx_pci_revision {
+ RSXX_DISCARD_SUPPORT = 2,
+};
+
+enum rsxx_creg_cmd {
+ CREG_CMD_TAG_MASK = 0x0000FF00,
+ CREG_OP_WRITE = 0x000000C0,
+ CREG_OP_READ = 0x000000E0,
+};
+
+enum rsxx_creg_addr {
+ CREG_ADD_CARD_CMD = 0x80001000,
+ CREG_ADD_CARD_STATE = 0x80001004,
+ CREG_ADD_CARD_SIZE = 0x8000100c,
+ CREG_ADD_CAPABILITIES = 0x80001050,
+ CREG_ADD_LOG = 0x80002000,
+ CREG_ADD_NUM_TARGETS = 0x80003000,
+ CREG_ADD_CONFIG = 0xB0000000,
+};
+
+enum rsxx_creg_card_cmd {
+ CARD_CMD_STARTUP = 1,
+ CARD_CMD_SHUTDOWN = 2,
+ CARD_CMD_LOW_LEVEL_FORMAT = 3,
+ CARD_CMD_FPGA_RECONFIG_BR = 4,
+ CARD_CMD_FPGA_RECONFIG_MAIN = 5,
+ CARD_CMD_BACKUP = 6,
+ CARD_CMD_RESET = 7,
+ CARD_CMD_deprecated = 8,
+ CARD_CMD_UNINITIALIZE = 9,
+ CARD_CMD_DSTROY_EMERGENCY = 10,
+ CARD_CMD_DSTROY_NORMAL = 11,
+ CARD_CMD_DSTROY_EXTENDED = 12,
+ CARD_CMD_DSTROY_ABORT = 13,
+};
+
+enum rsxx_card_state {
+ CARD_STATE_SHUTDOWN = 0x00000001,
+ CARD_STATE_STARTING = 0x00000002,
+ CARD_STATE_FORMATTING = 0x00000004,
+ CARD_STATE_UNINITIALIZED = 0x00000008,
+ CARD_STATE_GOOD = 0x00000010,
+ CARD_STATE_SHUTTING_DOWN = 0x00000020,
+ CARD_STATE_FAULT = 0x00000040,
+ CARD_STATE_RD_ONLY_FAULT = 0x00000080,
+ CARD_STATE_DSTROYING = 0x00000100,
+};
+
+enum rsxx_led {
+ LED_DEFAULT = 0x0,
+ LED_IDENTIFY = 0x1,
+ LED_SOAK = 0x2,
+};
+
+enum rsxx_creg_flash_lock {
+ CREG_FLASH_LOCK = 1,
+ CREG_FLASH_UNLOCK = 2,
+};
+
+enum rsxx_card_capabilities {
+ CARD_CAP_SUBPAGE_WRITES = 0x00000080,
+};
+
+enum rsxx_creg_stat {
+ CREG_STAT_STATUS_MASK = 0x00000003,
+ CREG_STAT_SUCCESS = 0x1,
+ CREG_STAT_ERROR = 0x2,
+ CREG_STAT_CHAR_PENDING = 0x00000004, /* Character I/O pending bit */
+ CREG_STAT_LOG_PENDING = 0x00000008, /* HW log message pending bit */
+ CREG_STAT_TAG_MASK = 0x0000ff00,
+};
+
+static inline unsigned int CREG_DATA(int N)
+{
+ return CREG_DATA0 + (N << 2);
+}
+
+/*----------------- Convenient Log Wrappers -------------------*/
+#define CARD_TO_DEV(__CARD) (&(__CARD)->dev->dev)
+
+/***** config.c *****/
+int rsxx_load_config(struct rsxx_cardinfo *card);
+
+/***** core.c *****/
+void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
+void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr);
+void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr);
+void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
+ unsigned int intr);
+
+/***** dev.c *****/
+int rsxx_attach_dev(struct rsxx_cardinfo *card);
+void rsxx_detach_dev(struct rsxx_cardinfo *card);
+int rsxx_setup_dev(struct rsxx_cardinfo *card);
+void rsxx_destroy_dev(struct rsxx_cardinfo *card);
+int rsxx_dev_init(void);
+void rsxx_dev_cleanup(void);
+
+/***** dma.c ****/
+typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
+ void *cb_data,
+ unsigned int status);
+int rsxx_dma_setup(struct rsxx_cardinfo *card);
+void rsxx_dma_destroy(struct rsxx_cardinfo *card);
+int rsxx_dma_init(void);
+void rsxx_dma_cleanup(void);
+int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+ struct bio *bio,
+ atomic_t *n_dmas,
+ rsxx_dma_cb cb,
+ void *cb_data);
+
+/***** cregs.c *****/
+int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream);
+int rsxx_creg_read(struct rsxx_cardinfo *card,
+ u32 addr,
+ unsigned int size8,
+ void *data,
+ int byte_stream);
+int rsxx_read_hw_log(struct rsxx_cardinfo *card);
+int rsxx_get_card_state(struct rsxx_cardinfo *card,
+ unsigned int *state);
+int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8);
+int rsxx_get_num_targets(struct rsxx_cardinfo *card,
+ unsigned int *n_targets);
+int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
+ u32 *capabilities);
+int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd);
+int rsxx_creg_setup(struct rsxx_cardinfo *card);
+void rsxx_creg_destroy(struct rsxx_cardinfo *card);
+int rsxx_creg_init(void);
+void rsxx_creg_cleanup(void);
+
+int rsxx_reg_access(struct rsxx_cardinfo *card,
+ struct rsxx_reg_access __user *ucmd,
+ int read);
+
+
+
+#endif /* __DRIVERS_BLOCK_RSXX_H__ */
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 57763c54363a..758f2ac878cf 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1090,10 +1090,13 @@ static const struct block_device_operations floppy_fops = {
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
- struct swim3 __iomem *sw = fs->swim3;
+ struct swim3 __iomem *sw;
if (!fs)
return;
+
+ sw = fs->swim3;
+
if (mb_state != MB_FD)
return;
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
deleted file mode 100644
index ff540520bada..000000000000
--- a/drivers/block/xd.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-/*
- * This file contains the driver for an XT hard disk controller
- * (at least the DTC 5150X) for Linux.
- *
- * Author: Pat Mackinlay, pat@it.com.au
- * Date: 29/09/92
- *
- * Revised: 01/01/93, ...
- *
- * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler,
- * kevinf@agora.rain.com)
- * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and
- * Wim Van Dorst.
- *
- * Revised: 04/04/94 by Risto Kankkunen
- * Moved the detection code from xd_init() to xd_geninit() as it needed
- * interrupts enabled and Linus didn't want to enable them in that first
- * phase. xd_geninit() is the place to do these kinds of things anyway,
- * he says.
- *
- * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu
- *
- * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl
- * Fixed some problems with disk initialization and module initiation.
- * Added support for manual geometry setting (except Seagate controllers)
- * in form:
- * xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>]
- * Recovered DMA access. Abridged messages. Added support for DTC5051CX,
- * WD1002-27X & XEBEC controllers. Driver uses now some jumper settings.
- * Extended ioctl() support.
- *
- * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
-#include <linux/blkpg.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/gfp.h>
-
-#include <asm/uaccess.h>
-#include <asm/dma.h>
-
-#include "xd.h"
-
-static DEFINE_MUTEX(xd_mutex);
-static void __init do_xd_setup (int *integers);
-#ifdef MODULE
-static int xd[5] = { -1,-1,-1,-1, };
-#endif
-
-#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using
- "nodma" module option */
-#define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */
-
-/* Above may need to be increased if a problem with the 2nd drive detection
- (ST11M controller) or resetting a controller (WD) appears */
-
-static XD_INFO xd_info[XD_MAXDRIVES];
-
-/* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS
- signature and details to the following list of signatures. A BIOS signature is a string embedded into the first
- few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG
- command. Run DEBUG, and then you can examine your BIOS signature with:
-
- d xxxx:0000
-
- where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should
- be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters
- in the table are, in order:
-
- offset ; this is the offset (in bytes) from the start of your ROM where the signature starts
- signature ; this is the actual text of the signature
- xd_?_init_controller ; this is the controller init routine used by your controller
- xd_?_init_drive ; this is the drive init routine used by your controller
-
- The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is
- made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your
- best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and
- may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>.
-
- NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver
- should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */
-
-#include <asm/page.h>
-#define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size))
-#define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-static char *xd_dma_buffer;
-
-static XD_SIGNATURE xd_sigs[] __initdata = {
- { 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */
- { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
- { 0x000B,"CRD18A Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */
- { 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */
- { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
- { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */
- { 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */
- { 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */
- { 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */
- { 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */
- { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
- { 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" },
- { 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */
-};
-
-static unsigned int xd_bases[] __initdata =
-{
- 0xC8000, 0xCA000, 0xCC000,
- 0xCE000, 0xD0000, 0xD2000,
- 0xD4000, 0xD6000, 0xD8000,
- 0xDA000, 0xDC000, 0xDE000,
- 0xE0000
-};
-
-static DEFINE_SPINLOCK(xd_lock);
-
-static struct gendisk *xd_gendisk[2];
-
-static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static const struct block_device_operations xd_fops = {
- .owner = THIS_MODULE,
- .ioctl = xd_ioctl,
- .getgeo = xd_getgeo,
-};
-static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
-static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors;
-static u_char xd_override __initdata = 0, xd_type __initdata = 0;
-static u_short xd_iobase = 0x320;
-static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, };
-
-static volatile int xdc_busy;
-static struct timer_list xd_watchdog_int;
-
-static volatile u_char xd_error;
-static bool nodma = XD_DONT_USE_DMA;
-
-static struct request_queue *xd_queue;
-
-/* xd_init: register the block device number and set up pointer tables */
-static int __init xd_init(void)
-{
- u_char i,controller;
- unsigned int address;
- int err;
-
-#ifdef MODULE
- {
- u_char count = 0;
- for (i = 4; i > 0; i--)
- if (((xd[i] = xd[i-1]) >= 0) && !count)
- count = i;
- if ((xd[0] = count))
- do_xd_setup(xd);
- }
-#endif
-
- init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
-
- err = -EBUSY;
- if (register_blkdev(XT_DISK_MAJOR, "xd"))
- goto out1;
-
- err = -ENOMEM;
- xd_queue = blk_init_queue(do_xd_request, &xd_lock);
- if (!xd_queue)
- goto out1a;
-
- if (xd_detect(&controller,&address)) {
-
- printk("Detected a%s controller (type %d) at address %06x\n",
- xd_sigs[controller].name,controller,address);
- if (!request_region(xd_iobase,4,"xd")) {
- printk("xd: Ports at 0x%x are not available\n",
- xd_iobase);
- goto out2;
- }
- if (controller)
- xd_sigs[controller].init_controller(address);
- xd_drives = xd_initdrives(xd_sigs[controller].init_drive);
-
- printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n",
- xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
- }
-
- /*
- * With the drive detected, xd_maxsectors should now be known.
- * If xd_maxsectors is 0, nothing was detected and we fall through
- * to return -ENODEV
- */
- if (!xd_dma_buffer && xd_maxsectors) {
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- if (!xd_dma_buffer) {
- printk(KERN_ERR "xd: Out of memory.\n");
- goto out3;
- }
- }
-
- err = -ENODEV;
- if (!xd_drives)
- goto out3;
-
- for (i = 0; i < xd_drives; i++) {
- XD_INFO *p = &xd_info[i];
- struct gendisk *disk = alloc_disk(64);
- if (!disk)
- goto Enomem;
- p->unit = i;
- disk->major = XT_DISK_MAJOR;
- disk->first_minor = i<<6;
- sprintf(disk->disk_name, "xd%c", i+'a');
- disk->fops = &xd_fops;
- disk->private_data = p;
- disk->queue = xd_queue;
- set_capacity(disk, p->heads * p->cylinders * p->sectors);
- printk(" %s: CHS=%d/%d/%d\n", disk->disk_name,
- p->cylinders, p->heads, p->sectors);
- xd_gendisk[i] = disk;
- }
-
- err = -EBUSY;
- if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) {
- printk("xd: unable to get IRQ%d\n",xd_irq);
- goto out4;
- }
-
- if (request_dma(xd_dma,"xd")) {
- printk("xd: unable to get DMA%d\n",xd_dma);
- goto out5;
- }
-
- /* xd_maxsectors depends on controller - so set after detection */
- blk_queue_max_hw_sectors(xd_queue, xd_maxsectors);
-
- for (i = 0; i < xd_drives; i++)
- add_disk(xd_gendisk[i]);
-
- return 0;
-
-out5:
- free_irq(xd_irq, NULL);
-out4:
- for (i = 0; i < xd_drives; i++)
- put_disk(xd_gendisk[i]);
-out3:
- if (xd_maxsectors)
- release_region(xd_iobase,4);
-
- if (xd_dma_buffer)
- xd_dma_mem_free((unsigned long)xd_dma_buffer,
- xd_maxsectors * 0x200);
-out2:
- blk_cleanup_queue(xd_queue);
-out1a:
- unregister_blkdev(XT_DISK_MAJOR, "xd");
-out1:
- return err;
-Enomem:
- err = -ENOMEM;
- while (i--)
- put_disk(xd_gendisk[i]);
- goto out3;
-}
-
-/* xd_detect: scan the possible BIOS ROM locations for the signature strings */
-static u_char __init xd_detect (u_char *controller, unsigned int *address)
-{
- int i, j;
-
- if (xd_override)
- {
- *controller = xd_type;
- *address = 0;
- return(1);
- }
-
- for (i = 0; i < ARRAY_SIZE(xd_bases); i++) {
- void __iomem *p = ioremap(xd_bases[i], 0x2000);
- if (!p)
- continue;
- for (j = 1; j < ARRAY_SIZE(xd_sigs); j++) {
- const char *s = xd_sigs[j].string;
- if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) {
- *controller = j;
- xd_type = j;
- *address = xd_bases[i];
- iounmap(p);
- return 1;
- }
- }
- iounmap(p);
- }
- return 0;
-}
-
-/* do_xd_request: handle an incoming request */
-static void do_xd_request (struct request_queue * q)
-{
- struct request *req;
-
- if (xdc_busy)
- return;
-
- req = blk_fetch_request(q);
- while (req) {
- unsigned block = blk_rq_pos(req);
- unsigned count = blk_rq_cur_sectors(req);
- XD_INFO *disk = req->rq_disk->private_data;
- int res = -EIO;
- int retry;
-
- if (req->cmd_type != REQ_TYPE_FS)
- goto done;
- if (block + count > get_capacity(req->rq_disk))
- goto done;
- for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
- res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
- block, count);
- done:
- /* wrap up, 0 = success, -errno = fail */
- if (!__blk_end_request_cur(req, res))
- req = blk_fetch_request(q);
- }
-}
-
-static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- XD_INFO *p = bdev->bd_disk->private_data;
-
- geo->heads = p->heads;
- geo->sectors = p->sectors;
- geo->cylinders = p->cylinders;
- return 0;
-}
-
-/* xd_ioctl: handle device ioctl's */
-static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
-{
- switch (cmd) {
- case HDIO_SET_DMA:
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
- if (xdc_busy) return -EBUSY;
- nodma = !arg;
- if (nodma && xd_dma_buffer) {
- xd_dma_mem_free((unsigned long)xd_dma_buffer,
- xd_maxsectors * 0x200);
- xd_dma_buffer = NULL;
- } else if (!nodma && !xd_dma_buffer) {
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- if (!xd_dma_buffer) {
- nodma = XD_DONT_USE_DMA;
- return -ENOMEM;
- }
- }
- return 0;
- case HDIO_GET_DMA:
- return put_user(!nodma, (long __user *) arg);
- case HDIO_GET_MULTCOUNT:
- return put_user(xd_maxsectors, (long __user *) arg);
- default:
- return -EINVAL;
- }
-}
-
-static int xd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long param)
-{
- int ret;
-
- mutex_lock(&xd_mutex);
- ret = xd_locked_ioctl(bdev, mode, cmd, param);
- mutex_unlock(&xd_mutex);
-
- return ret;
-}
-
-/* xd_readwrite: handle a read/write request */
-static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
-{
- int drive = p->unit;
- u_char cmdblk[6],sense[4];
- u_short track,cylinder;
- u_char head,sector,control,mode = PIO_MODE,temp;
- char **real_buffer;
- register int i;
-
-#ifdef DEBUG_READWRITE
- printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count);
-#endif /* DEBUG_READWRITE */
-
- spin_unlock_irq(&xd_lock);
-
- control = p->control;
- if (!xd_dma_buffer)
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- while (count) {
- temp = count < xd_maxsectors ? count : xd_maxsectors;
-
- track = block / p->sectors;
- head = track % p->heads;
- cylinder = track / p->heads;
- sector = block % p->sectors;
-
-#ifdef DEBUG_READWRITE
- printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp);
-#endif /* DEBUG_READWRITE */
-
- if (xd_dma_buffer) {
- mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200);
- real_buffer = &xd_dma_buffer;
- for (i=0; i < (temp * 0x200); i++)
- xd_dma_buffer[i] = buffer[i];
- }
- else
- real_buffer = &buffer;
-
- xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control);
-
- switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) {
- case 1:
- printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
- xd_recalibrate(drive);
- spin_lock_irq(&xd_lock);
- return -EIO;
- case 2:
- if (sense[0] & 0x30) {
- printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
- switch ((sense[0] & 0x30) >> 4) {
- case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F);
- break;
- case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F);
- break;
- case 2: printk("command error, code = 0x%X",sense[0] & 0x0F);
- break;
- case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F);
- break;
- }
- }
- if (sense[0] & 0x80)
- printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F);
- /* reported drive number = (sense[1] & 0xE0) >> 5 */
- else
- printk(" - no valid disk address\n");
- spin_lock_irq(&xd_lock);
- return -EIO;
- }
- if (xd_dma_buffer)
- for (i=0; i < (temp * 0x200); i++)
- buffer[i] = xd_dma_buffer[i];
-
- count -= temp, buffer += temp * 0x200, block += temp;
- }
- spin_lock_irq(&xd_lock);
- return 0;
-}
-
-/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
-static void xd_recalibrate (u_char drive)
-{
- u_char cmdblk[6];
-
- xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0);
- if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8))
- printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive);
-}
-
-/* xd_interrupt_handler: interrupt service routine */
-static irqreturn_t xd_interrupt_handler(int irq, void *dev_id)
-{
- if (inb(XD_STATUS) & STAT_INTERRUPT) { /* check if it was our device */
-#ifdef DEBUG_OTHER
- printk("xd_interrupt_handler: interrupt detected\n");
-#endif /* DEBUG_OTHER */
- outb(0,XD_CONTROL); /* acknowledge interrupt */
- wake_up(&xd_wait_int); /* and wake up sleeping processes */
- return IRQ_HANDLED;
- }
- else
- printk("xd: unexpected interrupt\n");
- return IRQ_NONE;
-}
-
-/* xd_setup_dma: set up the DMA controller for a data transfer */
-static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count)
-{
- unsigned long f;
-
- if (nodma)
- return (PIO_MODE);
- if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) {
-#ifdef DEBUG_OTHER
- printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n");
-#endif /* DEBUG_OTHER */
- return (PIO_MODE);
- }
-
- f=claim_dma_lock();
- disable_dma(xd_dma);
- clear_dma_ff(xd_dma);
- set_dma_mode(xd_dma,mode);
- set_dma_addr(xd_dma, (unsigned long) buffer);
- set_dma_count(xd_dma,count);
-
- release_dma_lock(f);
-
- return (DMA_MODE); /* use DMA and INT */
-}
-
-/* xd_build: put stuff into an array in a format suitable for the controller */
-static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control)
-{
- cmdblk[0] = command;
- cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F);
- cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F);
- cmdblk[3] = cylinder & 0xFF;
- cmdblk[4] = count;
- cmdblk[5] = control;
-
- return (cmdblk);
-}
-
-static void xd_watchdog (unsigned long unused)
-{
- xd_error = 1;
- wake_up(&xd_wait_int);
-}
-
-/* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */
-static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout)
-{
- u_long expiry = jiffies + timeout;
- int success;
-
- xdc_busy = 1;
- while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry))
- schedule_timeout_uninterruptible(1);
- xdc_busy = 0;
- return (success);
-}
-
-static inline u_int xd_wait_for_IRQ (void)
-{
- unsigned long flags;
- xd_watchdog_int.expires = jiffies + 8 * HZ;
- add_timer(&xd_watchdog_int);
-
- flags=claim_dma_lock();
- enable_dma(xd_dma);
- release_dma_lock(flags);
-
- sleep_on(&xd_wait_int);
- del_timer(&xd_watchdog_int);
- xdc_busy = 0;
-
- flags=claim_dma_lock();
- disable_dma(xd_dma);
- release_dma_lock(flags);
-
- if (xd_error) {
- printk("xd: missed IRQ - command aborted\n");
- xd_error = 0;
- return (1);
- }
- return (0);
-}
-
-/* xd_command: handle all data transfers necessary for a single command */
-static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout)
-{
- u_char cmdblk[6],csb,complete = 0;
-
-#ifdef DEBUG_COMMAND
- printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense);
-#endif /* DEBUG_COMMAND */
-
- outb(0,XD_SELECT);
- outb(mode,XD_CONTROL);
-
- if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout))
- return (1);
-
- while (!complete) {
- if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout))
- return (1);
-
- switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) {
- case 0:
- if (mode == DMA_MODE) {
- if (xd_wait_for_IRQ())
- return (1);
- } else
- outb(outdata ? *outdata++ : 0,XD_DATA);
- break;
- case STAT_INPUT:
- if (mode == DMA_MODE) {
- if (xd_wait_for_IRQ())
- return (1);
- } else
- if (indata)
- *indata++ = inb(XD_DATA);
- else
- inb(XD_DATA);
- break;
- case STAT_COMMAND:
- outb(command ? *command++ : 0,XD_DATA);
- break;
- case STAT_COMMAND | STAT_INPUT:
- complete = 1;
- break;
- }
- }
- csb = inb(XD_DATA);
-
- if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout)) /* wait until deselected */
- return (1);
-
- if (csb & CSB_ERROR) { /* read sense data if error */
- xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0);
- if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT))
- printk("xd: warning! sense command failed!\n");
- }
-
-#ifdef DEBUG_COMMAND
- printk("xd_command: completed with csb = 0x%X\n",csb);
-#endif /* DEBUG_COMMAND */
-
- return (csb & CSB_ERROR);
-}
-
-static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
-{
- u_char cmdblk[6],i,count = 0;
-
- for (i = 0; i < XD_MAXDRIVES; i++) {
- xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
- if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
- msleep_interruptible(XD_INIT_DISK_DELAY);
-
- init_drive(count);
- count++;
-
- msleep_interruptible(XD_INIT_DISK_DELAY);
- }
- }
- return (count);
-}
-
-static void __init xd_manual_geo_set (u_char drive)
-{
- xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]);
- xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]);
- xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]);
-}
-
-static void __init xd_dtc_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xCA000: xd_iobase = 0x324;
- case 0xD0000: /*5150CX*/
- case 0xD8000: break; /*5150CX & 5150XL*/
- default: printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
- xd_maxsectors = 0x01; /* my card seems to have trouble doing multi-block transfers? */
-
- outb(0,XD_RESET); /* reset the controller */
-}
-
-
-static void __init xd_dtc5150cx_init_drive (u_char drive)
-{
- /* values from controller's BIOS - BIOS chip may be removed */
- static u_short geometry_table[][4] = {
- {0x200,8,0x200,0x100},
- {0x267,2,0x267,0x267},
- {0x264,4,0x264,0x80},
- {0x132,4,0x132,0x0},
- {0x132,2,0x80, 0x132},
- {0x177,8,0x177,0x0},
- {0x132,8,0x84, 0x0},
- {}, /* not used */
- {0x132,6,0x80, 0x100},
- {0x200,6,0x100,0x100},
- {0x264,2,0x264,0x80},
- {0x280,4,0x280,0x100},
- {0x2B9,3,0x2B9,0x2B9},
- {0x2B9,5,0x2B9,0x2B9},
- {0x280,6,0x280,0x100},
- {0x132,4,0x132,0x0}};
- u_char n;
-
- n = inb(XD_JUMPER);
- n = (drive ? n : (n >> 2)) & 0x33;
- n = (n | (n >> 2)) & 0x0F;
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
- else
- if (n != 7) {
- xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
- xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
-#if 0
- xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
- xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
- xd_info[drive].ecc = 0x0B; /* ecc length */
-#endif /* 0 */
- }
- else {
- printk("xd%c: undetermined drive geometry\n",'a'+drive);
- return;
- }
- xd_info[drive].control = 5; /* control byte */
- xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
- xd_recalibrate(drive);
-}
-
-static void __init xd_dtc_init_drive (u_char drive)
-{
- u_char cmdblk[6],buf[64];
-
- xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0);
- if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
- xd_info[drive].heads = buf[0x0A]; /* heads */
- xd_info[drive].cylinders = ((u_short *) (buf))[0x04]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
-#if 0
- xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05]; /* reduced write */
- xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */
- xd_info[drive].ecc = buf[0x0F]; /* ecc length */
-#endif /* 0 */
- xd_info[drive].control = 0; /* control byte */
-
- xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]);
- xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7);
- if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
- printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive);
- }
- else
- printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive);
-}
-
-static void __init xd_wd_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xCA000: xd_iobase = 0x324; break;
- case 0xCC000: xd_iobase = 0x328; break;
- case 0xCE000: xd_iobase = 0x32C; break;
- case 0xD0000: xd_iobase = 0x328; break; /* ? */
- case 0xD8000: xd_iobase = 0x32C; break; /* ? */
- default: printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
- xd_maxsectors = 0x01; /* this one doesn't wrap properly either... */
-
- outb(0,XD_RESET); /* reset the controller */
-
- msleep(XD_INIT_DISK_DELAY);
-}
-
-static void __init xd_wd_init_drive (u_char drive)
-{
- /* values from controller's BIOS - BIOS may be disabled */
- static u_short geometry_table[][4] = {
- {0x264,4,0x1C2,0x1C2}, /* common part */
- {0x132,4,0x099,0x0},
- {0x267,2,0x1C2,0x1C2},
- {0x267,4,0x1C2,0x1C2},
-
- {0x334,6,0x335,0x335}, /* 1004 series RLL */
- {0x30E,4,0x30F,0x3DC},
- {0x30E,2,0x30F,0x30F},
- {0x267,4,0x268,0x268},
-
- {0x3D5,5,0x3D6,0x3D6}, /* 1002 series RLL */
- {0x3DB,7,0x3DC,0x3DC},
- {0x264,4,0x265,0x265},
- {0x267,4,0x268,0x268}};
-
- u_char cmdblk[6],buf[0x200];
- u_char n = 0,rll,jumper_state,use_jumper_geo;
- u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6');
-
- jumper_state = ~(inb(0x322));
- if (jumper_state & 0x40)
- xd_irq = 9;
- rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0;
- xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0);
- if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
- xd_info[drive].heads = buf[0x1AF]; /* heads */
- xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
-#if 0
- xd_info[drive].rwrite = ((u_short *) (buf))[0xD8]; /* reduced write */
- xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA]; /* write precomp */
- xd_info[drive].ecc = buf[0x1B4]; /* ecc length */
-#endif /* 0 */
- xd_info[drive].control = buf[0x1B5]; /* control byte */
- use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders);
- if (xd_geo[3*drive]) {
- xd_manual_geo_set(drive);
- xd_info[drive].control = rll ? 7 : 5;
- }
- else if (use_jumper_geo) {
- n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll;
- xd_info[drive].cylinders = geometry_table[n][0];
- xd_info[drive].heads = (u_char)(geometry_table[n][1]);
- xd_info[drive].control = rll ? 7 : 5;
-#if 0
- xd_info[drive].rwrite = geometry_table[n][2];
- xd_info[drive].wprecomp = geometry_table[n][3];
- xd_info[drive].ecc = 0x0B;
-#endif /* 0 */
- }
- if (!wd_1002) {
- if (use_jumper_geo)
- xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
- geometry_table[n][2],geometry_table[n][3],0x0B);
- else
- xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
- ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]);
- }
- /* 1002 based RLL controller requests converted addressing, but reports physical
- (physical 26 sec., logical 17 sec.)
- 1004 based ???? */
- if (rll & wd_1002) {
- if ((xd_info[drive].cylinders *= 26,
- xd_info[drive].cylinders /= 17) > 1023)
- xd_info[drive].cylinders = 1023; /* 1024 ? */
-#if 0
- xd_info[drive].rwrite *= 26;
- xd_info[drive].rwrite /= 17;
- xd_info[drive].wprecomp *= 26
- xd_info[drive].wprecomp /= 17;
-#endif /* 0 */
- }
- }
- else
- printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive);
-
-}
-
-static void __init xd_seagate_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xD0000: xd_iobase = 0x324; break;
- case 0xD8000: xd_iobase = 0x328; break;
- case 0xE0000: xd_iobase = 0x32C; break;
- default: printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
- xd_maxsectors = 0x40;
-
- outb(0,XD_RESET); /* reset the controller */
-}
-
-static void __init xd_seagate_init_drive (u_char drive)
-{
- u_char cmdblk[6],buf[0x200];
-
- xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0);
- if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
- xd_info[drive].heads = buf[0x04]; /* heads */
- xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03]; /* cylinders */
- xd_info[drive].sectors = buf[0x05]; /* sectors */
- xd_info[drive].control = 0; /* control byte */
- }
- else
- printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive);
-}
-
-/* Omti support courtesy Dirk Melchers */
-static void __init xd_omti_init_controller (unsigned int address)
-{
- switch (address) {
- case 0x00000:
- case 0xC8000: break; /*initial: 0x320 */
- case 0xD0000: xd_iobase = 0x324; break;
- case 0xD8000: xd_iobase = 0x328; break;
- case 0xE0000: xd_iobase = 0x32C; break;
- default: printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
-
- xd_maxsectors = 0x40;
-
- outb(0,XD_RESET); /* reset the controller */
-}
-
-static void __init xd_omti_init_drive (u_char drive)
-{
- /* gets infos from drive */
- xd_override_init_drive(drive);
-
- /* set other parameters, Hardcoded, not that nice :-) */
- xd_info[drive].control = 2;
-}
-
-/* Xebec support (AK) */
-static void __init xd_xebec_init_controller (unsigned int address)
-{
-/* iobase may be set manually in range 0x300 - 0x33C
- irq may be set manually to 2(9),3,4,5,6,7
- dma may be set manually to 1,2,3
- (How to detect them ???)
-BIOS address may be set manually in range 0x0 - 0xF8000
-If you need non-standard settings use the xd=... command */
-
- switch (address) {
- case 0x00000:
- case 0xC8000: /* initially: xd_iobase==0x320 */
- case 0xD0000:
- case 0xD2000:
- case 0xD4000:
- case 0xD6000:
- case 0xD8000:
- case 0xDA000:
- case 0xDC000:
- case 0xDE000:
- case 0xE0000: break;
- default: printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address);
- break;
- }
-
- xd_maxsectors = 0x01;
- outb(0,XD_RESET); /* reset the controller */
-
- msleep(XD_INIT_DISK_DELAY);
-}
-
-static void __init xd_xebec_init_drive (u_char drive)
-{
- /* values from controller's BIOS - BIOS chip may be removed */
- static u_short geometry_table[][5] = {
- {0x132,4,0x080,0x080,0x7},
- {0x132,4,0x080,0x080,0x17},
- {0x264,2,0x100,0x100,0x7},
- {0x264,2,0x100,0x100,0x17},
- {0x132,8,0x080,0x080,0x7},
- {0x132,8,0x080,0x080,0x17},
- {0x264,4,0x100,0x100,0x6},
- {0x264,4,0x100,0x100,0x17},
- {0x2BC,5,0x2BC,0x12C,0x6},
- {0x3A5,4,0x3A5,0x3A5,0x7},
- {0x26C,6,0x26C,0x26C,0x7},
- {0x200,8,0x200,0x100,0x17},
- {0x400,5,0x400,0x400,0x7},
- {0x400,6,0x400,0x400,0x7},
- {0x264,8,0x264,0x200,0x17},
- {0x33E,7,0x33E,0x200,0x7}};
- u_char n;
-
- n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry
- is assumed for BOTH drives */
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
- else {
- xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
- xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
- xd_info[drive].sectors = 17; /* sectors */
-#if 0
- xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
- xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
- xd_info[drive].ecc = 0x0B; /* ecc length */
-#endif /* 0 */
- }
- xd_info[drive].control = geometry_table[n][4]; /* control byte */
- xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
- xd_recalibrate(drive);
-}
-
-/* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads
- etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */
-static void __init xd_override_init_drive (u_char drive)
-{
- u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 };
- u_char cmdblk[6],i;
-
- if (xd_geo[3*drive])
- xd_manual_geo_set(drive);
- else {
- for (i = 0; i < 3; i++) {
- while (min[i] != max[i] - 1) {
- test[i] = (min[i] + max[i]) / 2;
- xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0);
- if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
- min[i] = test[i];
- else
- max[i] = test[i];
- }
- test[i] = min[i];
- }
- xd_info[drive].heads = (u_char) min[0] + 1;
- xd_info[drive].cylinders = (u_short) min[1] + 1;
- xd_info[drive].sectors = (u_char) min[2] + 1;
- }
- xd_info[drive].control = 0;
-}
-
-/* xd_setup: initialise controller from command line parameters */
-static void __init do_xd_setup (int *integers)
-{
- switch (integers[0]) {
- case 4: if (integers[4] < 0)
- nodma = 1;
- else if (integers[4] < 8)
- xd_dma = integers[4];
- case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC))
- xd_iobase = integers[3];
- case 2: if ((integers[2] > 0) && (integers[2] < 16))
- xd_irq = integers[2];
- case 1: xd_override = 1;
- if ((integers[1] >= 0) && (integers[1] < ARRAY_SIZE(xd_sigs)))
- xd_type = integers[1];
- case 0: break;
- default:printk("xd: too many parameters for xd\n");
- }
- xd_maxsectors = 0x01;
-}
-
-/* xd_setparam: set the drive characteristics */
-static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc)
-{
- u_char cmdblk[14];
-
- xd_build(cmdblk,command,drive,0,0,0,0,0);
- cmdblk[6] = (u_char) (cylinders >> 8) & 0x03;
- cmdblk[7] = (u_char) (cylinders & 0xFF);
- cmdblk[8] = heads & 0x1F;
- cmdblk[9] = (u_char) (rwrite >> 8) & 0x03;
- cmdblk[10] = (u_char) (rwrite & 0xFF);
- cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03;
- cmdblk[12] = (u_char) (wprecomp & 0xFF);
- cmdblk[13] = ecc;
-
- /* Some controllers require geometry info as data, not command */
-
- if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2))
- printk("xd: error setting characteristics for xd%c\n", 'a'+drive);
-}
-
-
-#ifdef MODULE
-
-module_param_array(xd, int, NULL, 0);
-module_param_array(xd_geo, int, NULL, 0);
-module_param(nodma, bool, 0);
-
-MODULE_LICENSE("GPL");
-
-void cleanup_module(void)
-{
- int i;
- unregister_blkdev(XT_DISK_MAJOR, "xd");
- for (i = 0; i < xd_drives; i++) {
- del_gendisk(xd_gendisk[i]);
- put_disk(xd_gendisk[i]);
- }
- blk_cleanup_queue(xd_queue);
- release_region(xd_iobase,4);
- if (xd_drives) {
- free_irq(xd_irq, NULL);
- free_dma(xd_dma);
- if (xd_dma_buffer)
- xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200);
- }
-}
-#else
-
-static int __init xd_setup (char *str)
-{
- int ints[5];
- get_options (str, ARRAY_SIZE (ints), ints);
- do_xd_setup (ints);
- return 1;
-}
-
-/* xd_manual_geo_init: initialise drive geometry from command line parameters
- (used only for WD drives) */
-static int __init xd_manual_geo_init (char *str)
-{
- int i, integers[1 + 3*XD_MAXDRIVES];
-
- get_options (str, ARRAY_SIZE (integers), integers);
- if (integers[0]%3 != 0) {
- printk("xd: incorrect number of parameters for xd_geo\n");
- return 1;
- }
- for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++)
- xd_geo[i] = integers[i+1];
- return 1;
-}
-
-__setup ("xd=", xd_setup);
-__setup ("xd_geo=", xd_manual_geo_init);
-
-#endif /* MODULE */
-
-module_init(xd_init);
-MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR);
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
deleted file mode 100644
index 37cacef16e93..000000000000
--- a/drivers/block/xd.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef _LINUX_XD_H
-#define _LINUX_XD_H
-
-/*
- * This file contains the definitions for the IO ports and errors etc. for XT hard disk controllers (at least the DTC 5150X).
- *
- * Author: Pat Mackinlay, pat@it.com.au
- * Date: 29/09/92
- *
- * Revised: 01/01/93, ...
- *
- * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, kevinf@agora.rain.com)
- * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and Wim Van Dorst.
- */
-
-#include <linux/interrupt.h>
-
-/* XT hard disk controller registers */
-#define XD_DATA (xd_iobase + 0x00) /* data RW register */
-#define XD_RESET (xd_iobase + 0x01) /* reset WO register */
-#define XD_STATUS (xd_iobase + 0x01) /* status RO register */
-#define XD_SELECT (xd_iobase + 0x02) /* select WO register */
-#define XD_JUMPER (xd_iobase + 0x02) /* jumper RO register */
-#define XD_CONTROL (xd_iobase + 0x03) /* DMAE/INTE WO register */
-#define XD_RESERVED (xd_iobase + 0x03) /* reserved */
-
-/* XT hard disk controller commands (incomplete list) */
-#define CMD_TESTREADY 0x00 /* test drive ready */
-#define CMD_RECALIBRATE 0x01 /* recalibrate drive */
-#define CMD_SENSE 0x03 /* request sense */
-#define CMD_FORMATDRV 0x04 /* format drive */
-#define CMD_VERIFY 0x05 /* read verify */
-#define CMD_FORMATTRK 0x06 /* format track */
-#define CMD_FORMATBAD 0x07 /* format bad track */
-#define CMD_READ 0x08 /* read */
-#define CMD_WRITE 0x0A /* write */
-#define CMD_SEEK 0x0B /* seek */
-
-/* Controller specific commands */
-#define CMD_DTCSETPARAM 0x0C /* set drive parameters (DTC 5150X & CX only?) */
-#define CMD_DTCGETECC 0x0D /* get ecc error length (DTC 5150X only?) */
-#define CMD_DTCREADBUF 0x0E /* read sector buffer (DTC 5150X only?) */
-#define CMD_DTCWRITEBUF 0x0F /* write sector buffer (DTC 5150X only?) */
-#define CMD_DTCREMAPTRK 0x11 /* assign alternate track (DTC 5150X only?) */
-#define CMD_DTCGETPARAM 0xFB /* get drive parameters (DTC 5150X only?) */
-#define CMD_DTCSETSTEP 0xFC /* set step rate (DTC 5150X only?) */
-#define CMD_DTCSETGEOM 0xFE /* set geometry data (DTC 5150X only?) */
-#define CMD_DTCGETGEOM 0xFF /* get geometry data (DTC 5150X only?) */
-#define CMD_ST11GETGEOM 0xF8 /* get geometry data (Seagate ST11R/M only?) */
-#define CMD_WDSETPARAM 0x0C /* set drive parameters (WD 1004A27X only?) */
-#define CMD_XBSETPARAM 0x0C /* set drive parameters (XEBEC only?) */
-
-/* Bits for command status byte */
-#define CSB_ERROR 0x02 /* error */
-#define CSB_LUN 0x20 /* logical Unit Number */
-
-/* XT hard disk controller status bits */
-#define STAT_READY 0x01 /* controller is ready */
-#define STAT_INPUT 0x02 /* data flowing from controller to host */
-#define STAT_COMMAND 0x04 /* controller in command phase */
-#define STAT_SELECT 0x08 /* controller is selected */
-#define STAT_REQUEST 0x10 /* controller requesting data */
-#define STAT_INTERRUPT 0x20 /* controller requesting interrupt */
-
-/* XT hard disk controller control bits */
-#define PIO_MODE 0x00 /* control bits to set for PIO */
-#define DMA_MODE 0x03 /* control bits to set for DMA & interrupt */
-
-#define XD_MAXDRIVES 2 /* maximum 2 drives */
-#define XD_TIMEOUT HZ /* 1 second timeout */
-#define XD_RETRIES 4 /* maximum 4 retries */
-
-#undef DEBUG /* define for debugging output */
-
-#ifdef DEBUG
- #define DEBUG_STARTUP /* debug driver initialisation */
- #define DEBUG_OVERRIDE /* debug override geometry detection */
- #define DEBUG_READWRITE /* debug each read/write command */
- #define DEBUG_OTHER /* debug misc. interrupt/DMA stuff */
- #define DEBUG_COMMAND /* debug each controller command */
-#endif /* DEBUG */
-
-/* this structure defines the XT drives and their types */
-typedef struct {
- u_char heads;
- u_short cylinders;
- u_char sectors;
- u_char control;
- int unit;
-} XD_INFO;
-
-/* this structure defines a ROM BIOS signature */
-typedef struct {
- unsigned int offset;
- const char *string;
- void (*init_controller)(unsigned int address);
- void (*init_drive)(u_char drive);
- const char *name;
-} XD_SIGNATURE;
-
-#ifndef MODULE
-static int xd_manual_geo_init (char *command);
-#endif /* MODULE */
-static u_char xd_detect (u_char *controller, unsigned int *address);
-static u_char xd_initdrives (void (*init_drive)(u_char drive));
-
-static void do_xd_request (struct request_queue * q);
-static int xd_ioctl (struct block_device *bdev,fmode_t mode,unsigned int cmd,unsigned long arg);
-static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
-static void xd_recalibrate (u_char drive);
-
-static irqreturn_t xd_interrupt_handler(int irq, void *dev_id);
-static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count);
-static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control);
-static void xd_watchdog (unsigned long unused);
-static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout);
-static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout);
-
-/* card specific setup and geometry gathering code */
-static void xd_dtc_init_controller (unsigned int address);
-static void xd_dtc5150cx_init_drive (u_char drive);
-static void xd_dtc_init_drive (u_char drive);
-static void xd_wd_init_controller (unsigned int address);
-static void xd_wd_init_drive (u_char drive);
-static void xd_seagate_init_controller (unsigned int address);
-static void xd_seagate_init_drive (u_char drive);
-static void xd_omti_init_controller (unsigned int address);
-static void xd_omti_init_drive (u_char drive);
-static void xd_xebec_init_controller (unsigned int address);
-static void xd_xebec_init_drive (u_char drive);
-static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc);
-static void xd_override_init_drive (u_char drive);
-
-#endif /* _LINUX_XD_H */
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 5ac841ff6cc7..de1f319f7bd7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -46,6 +46,7 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include <xen/balloon.h>
#include "common.h"
/*
@@ -239,6 +240,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret);
+ free_xenballooned_pages(segs_to_unmap, pages);
segs_to_unmap = 0;
}
@@ -527,8 +529,8 @@ static int xen_blkbk_map(struct blkif_request *req,
GFP_KERNEL);
if (!persistent_gnt)
return -ENOMEM;
- persistent_gnt->page = alloc_page(GFP_KERNEL);
- if (!persistent_gnt->page) {
+ if (alloc_xenballooned_pages(1, &persistent_gnt->page,
+ false)) {
kfree(persistent_gnt);
return -ENOMEM;
}
@@ -879,7 +881,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
goto fail_response;
}
- preq.dev = req->u.rw.handle;
preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 63980722db41..5e237f630c47 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -367,6 +367,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
be->blkif = NULL;
}
+ kfree(be->mode);
kfree(be);
dev_set_drvdata(&dev->dev, NULL);
return 0;
@@ -502,6 +503,7 @@ static void backend_changed(struct xenbus_watch *watch,
= container_of(watch, struct backend_info, backend_watch);
struct xenbus_device *dev = be->dev;
int cdrom = 0;
+ unsigned long handle;
char *device_type;
DPRINTK("");
@@ -521,10 +523,10 @@ static void backend_changed(struct xenbus_watch *watch,
return;
}
- if ((be->major || be->minor) &&
- ((be->major != major) || (be->minor != minor))) {
- pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
- be->major, be->minor, major, minor);
+ if (be->major | be->minor) {
+ if (be->major != major || be->minor != minor)
+ pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+ be->major, be->minor, major, minor);
return;
}
@@ -542,36 +544,33 @@ static void backend_changed(struct xenbus_watch *watch,
kfree(device_type);
}
- if (be->major == 0 && be->minor == 0) {
- /* Front end dir is a number, which is used as the handle. */
-
- char *p = strrchr(dev->otherend, '/') + 1;
- long handle;
- err = strict_strtoul(p, 0, &handle);
- if (err)
- return;
+ /* Front end dir is a number, which is used as the handle. */
+ err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+ if (err)
+ return;
- be->major = major;
- be->minor = minor;
+ be->major = major;
+ be->minor = minor;
- err = xen_vbd_create(be->blkif, handle, major, minor,
- (NULL == strchr(be->mode, 'w')), cdrom);
- if (err) {
- be->major = 0;
- be->minor = 0;
- xenbus_dev_fatal(dev, err, "creating vbd structure");
- return;
- }
+ err = xen_vbd_create(be->blkif, handle, major, minor,
+ !strchr(be->mode, 'w'), cdrom);
+ if (err)
+ xenbus_dev_fatal(dev, err, "creating vbd structure");
+ else {
err = xenvbd_sysfs_addif(dev);
if (err) {
xen_vbd_free(&be->blkif->vbd);
- be->major = 0;
- be->minor = 0;
xenbus_dev_fatal(dev, err, "creating sysfs entries");
- return;
}
+ }
+ if (err) {
+ kfree(be->mode);
+ be->mode = NULL;
+ be->major = 0;
+ be->minor = 0;
+ } else {
/* We're potentially connected now */
xen_update_blkif_status(be->blkif);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 11043c18ac5a..c3dae2e0f290 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -791,7 +791,7 @@ static void blkif_restart_queue(struct work_struct *work)
static void blkif_free(struct blkfront_info *info, int suspend)
{
struct llist_node *all_gnts;
- struct grant *persistent_gnt;
+ struct grant *persistent_gnt, *tmp;
struct llist_node *n;
/* Prevent new requests being issued until we fix things up. */
@@ -805,10 +805,17 @@ static void blkif_free(struct blkfront_info *info, int suspend)
/* Remove all persistent grants */
if (info->persistent_gnts_c) {
all_gnts = llist_del_all(&info->persistent_gnts);
- llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
+ persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
+ while (persistent_gnt) {
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
__free_page(pfn_to_page(persistent_gnt->pfn));
- kfree(persistent_gnt);
+ tmp = persistent_gnt;
+ n = persistent_gnt->node.next;
+ if (n)
+ persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
+ else
+ persistent_gnt = NULL;
+ kfree(tmp);
}
info->persistent_gnts_c = 0;
}
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea6..b8e2014cb9cb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -60,7 +60,6 @@ struct intel_gtt_driver {
};
static struct _intel_private {
- struct intel_gtt base;
const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
@@ -75,7 +74,18 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
+ phys_addr_t scratch_page_dma;
int refcount;
+ /* Whether i915 needs to use the dmar apis or not. */
+ unsigned int needs_dmar : 1;
+ phys_addr_t gma_bus_addr;
+ /* Size of memory reserved for graphics by the BIOS */
+ unsigned int stolen_size;
+ /* Total number of gtt entries. */
+ unsigned int gtt_total_entries;
+ /* Part of the gtt that is mappable by the cpu, for those chips where
+ * this is not the full gtt. */
+ unsigned int gtt_mappable_entries;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page);
set_pages_uc(page, 1);
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL;
- intel_private.base.scratch_page_dma = dma_addr;
+ intel_private.scratch_page_dma = dma_addr;
} else
- intel_private.base.scratch_page_dma = page_to_phys(page);
+ intel_private.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page;
@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
- return intel_private.base.gtt_mappable_entries;
+ return intel_private.gtt_mappable_entries;
}
}
@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
- pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
+ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
@@ -562,6 +572,40 @@ static void intel_gtt_cleanup(void)
intel_gtt_teardown_scratch_page();
}
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_ilk_vtd_wa(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ const unsigned short gpu_devid = intel_private.pcidev->device;
+
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ intel_iommu_gfx_mapped)
+ return 1;
+#endif
+ return 0;
+}
+
+static bool intel_gtt_can_wc(void)
+{
+ if (INTEL_GTT_GEN <= 2)
+ return false;
+
+ if (INTEL_GTT_GEN >= 6)
+ return false;
+
+ /* Reports of major corruption with ILK vt'd enabled */
+ if (needs_ilk_vtd_wa())
+ return false;
+
+ return true;
+}
+
static int intel_gtt_init(void)
{
u32 gma_addr;
@@ -572,8 +616,8 @@ static int intel_gtt_init(void)
if (ret != 0)
return ret;
- intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
- intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+ intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.gtt_total_entries = intel_gtt_total_entries();
/* save the PGETBL reg for resume */
intel_private.PGETBL_save =
@@ -585,13 +629,13 @@ static int intel_gtt_init(void)
dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n",
- intel_private.base.gtt_total_entries * 4,
- intel_private.base.gtt_mappable_entries * 4);
+ intel_private.gtt_total_entries * 4,
+ intel_private.gtt_mappable_entries * 4);
- gtt_map_size = intel_private.base.gtt_total_entries * 4;
+ gtt_map_size = intel_private.gtt_total_entries * 4;
intel_private.gtt = NULL;
- if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
+ if (intel_gtt_can_wc())
intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
gtt_map_size);
if (intel_private.gtt == NULL)
@@ -602,13 +646,12 @@ static int intel_gtt_init(void)
iounmap(intel_private.registers);
return -ENOMEM;
}
- intel_private.base.gtt = intel_private.gtt;
global_cache_flush(); /* FIXME: ? */
- intel_private.base.stolen_size = intel_gtt_stolen_size();
+ intel_private.stolen_size = intel_gtt_stolen_size();
- intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+ intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
@@ -623,7 +666,7 @@ static int intel_gtt_init(void)
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
- intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
return 0;
}
@@ -634,8 +677,7 @@ static int intel_fake_agp_fetch_size(void)
unsigned int aper_size;
int i;
- aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
- / MB(1);
+ aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
for (i = 0; i < num_sizes; i++) {
if (aper_size == intel_fake_agp_sizes[i].size) {
@@ -779,7 +821,7 @@ static int intel_fake_agp_configure(void)
return -EIO;
intel_private.clear_fake_agp = true;
- agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
return 0;
}
@@ -841,12 +883,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{
int ret = -EINVAL;
- if (intel_private.base.do_idle_maps)
- return -ENODEV;
-
if (intel_private.clear_fake_agp) {
- int start = intel_private.base.stolen_size / PAGE_SIZE;
- int end = intel_private.base.gtt_mappable_entries;
+ int start = intel_private.stolen_size / PAGE_SIZE;
+ int end = intel_private.gtt_mappable_entries;
intel_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
@@ -857,7 +896,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (mem->page_count == 0)
goto out;
- if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
+ if (pg_start + mem->page_count > intel_private.gtt_total_entries)
goto out_err;
if (type != mem->type)
@@ -869,7 +908,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed)
global_cache_flush();
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
@@ -895,7 +934,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
- intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
@@ -908,12 +947,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
- if (intel_private.base.do_idle_maps)
- return -ENODEV;
-
intel_gtt_clear_range(pg_start, mem->page_count);
- if (intel_private.base.needs_dmar) {
+ if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
@@ -1070,25 +1106,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static inline int needs_idle_maps(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
- const unsigned short gpu_devid = intel_private.pcidev->device;
-
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
-}
-
static int i9xx_setup(void)
{
u32 reg_addr, gtt_addr;
@@ -1116,9 +1133,6 @@ static int i9xx_setup(void)
break;
}
- if (needs_idle_maps())
- intel_private.base.do_idle_maps = 1;
-
intel_i9xx_setup_flush();
return 0;
@@ -1390,9 +1404,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-struct intel_gtt *intel_gtt_get(void)
+void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, unsigned long *mappable_end)
{
- return &intel_private.base;
+ *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+ *stolen_size = intel_private.stolen_size;
+ *mappable_base = intel_private.gma_bus_addr;
+ *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
}
EXPORT_SYMBOL(intel_gtt_get);
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 052797b32bd3..01a5ca7425d7 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -181,7 +181,7 @@ static int dsp56k_upload(u_char __user *bin, int len)
static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int dev = iminor(inode) & 0x0f;
switch(dev)
@@ -244,7 +244,7 @@ static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int dev = iminor(inode) & 0x0f;
switch(dev)
@@ -306,7 +306,7 @@ static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t co
static long dsp56k_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+ int dev = iminor(file_inode(file)) & 0x0f;
void __user *argp = (void __user *)arg;
switch(dev)
@@ -408,7 +408,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
#if 0
static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
{
- int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+ int dev = iminor(file_inode(file)) & 0x0f;
switch(dev)
{
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 85156dd0caee..65a8d96c0e93 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -125,7 +125,7 @@ static char dtlk_write_tts(char);
static ssize_t dtlk_read(struct file *file, char __user *buf,
size_t count, loff_t * ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
char ch;
int i = 0, retries;
@@ -177,7 +177,7 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
}
#endif
- if (iminor(file->f_path.dentry->d_inode) != DTLK_MINOR)
+ if (iminor(file_inode(file)) != DTLK_MINOR)
return -EINVAL;
while (1) {
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index b65c10395959..10fd71ccf587 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -154,18 +154,7 @@ static struct virtio_driver virtio_rng_driver = {
#endif
};
-static int __init init(void)
-{
- return register_virtio_driver(&virtio_rng_driver);
-}
-
-static void __exit fini(void)
-{
- unregister_virtio_driver(&virtio_rng_driver);
-}
-module_init(init);
-module_exit(fini);
-
+module_virtio_driver(virtio_rng_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio random number driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1c7fdcd22a98..0ac9b45a585e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1208,6 +1208,16 @@ static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
#define DEFAULT_REGSIZE 1
+#ifdef CONFIG_ACPI
+static bool si_tryacpi = 1;
+#endif
+#ifdef CONFIG_DMI
+static bool si_trydmi = 1;
+#endif
+static bool si_tryplatform = 1;
+#ifdef CONFIG_PCI
+static bool si_trypci = 1;
+#endif
static bool si_trydefaults = 1;
static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30
@@ -1238,6 +1248,25 @@ MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
" Documentation/IPMI.txt in the kernel sources for the"
" gory details.");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via DMI");
+#endif
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via platform"
+ " interfaces like openfirmware");
+#ifdef CONFIG_PCI
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via pci");
+#endif
module_param_named(trydefaults, si_trydefaults, bool, 0);
MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
" default scan of the KCS and SMIC interface at the standard"
@@ -3371,13 +3400,15 @@ static int init_ipmi_si(void)
return 0;
initialized = 1;
- rv = platform_driver_register(&ipmi_driver);
- if (rv) {
- printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
- return rv;
+ if (si_tryplatform) {
+ rv = platform_driver_register(&ipmi_driver);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to register "
+ "driver: %d\n", rv);
+ return rv;
+ }
}
-
/* Parse out the si_type string into its components. */
str = si_type_str;
if (*str != '\0') {
@@ -3400,24 +3431,31 @@ static int init_ipmi_si(void)
return 0;
#ifdef CONFIG_PCI
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
- else
- pci_registered = 1;
+ if (si_trypci) {
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register "
+ "PCI driver: %d\n", rv);
+ else
+ pci_registered = 1;
+ }
#endif
#ifdef CONFIG_ACPI
- pnp_register_driver(&ipmi_pnp_driver);
- pnp_registered = 1;
+ if (si_tryacpi) {
+ pnp_register_driver(&ipmi_pnp_driver);
+ pnp_registered = 1;
+ }
#endif
#ifdef CONFIG_DMI
- dmi_find_bmc();
+ if (si_trydmi)
+ dmi_find_bmc();
#endif
#ifdef CONFIG_ACPI
- spmi_find_bmc();
+ if (si_tryacpi)
+ spmi_find_bmc();
#endif
/* We prefer devices with interrupts, but in the case of a machine
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index a741e418b456..dafd9ac6428f 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -294,7 +294,7 @@ static int lp_wait_ready(int minor, int nonblock)
static ssize_t lp_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct parport *port = lp_table[minor].dev->port;
char *kbuf = lp_table[minor].lp_buffer;
ssize_t retv = 0;
@@ -413,7 +413,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
DEFINE_WAIT(wait);
- unsigned int minor=iminor(file->f_path.dentry->d_inode);
+ unsigned int minor=iminor(file_inode(file));
struct parport *port = lp_table[minor].dev->port;
ssize_t retval = 0;
char *kbuf = lp_table[minor].lp_buffer;
@@ -679,7 +679,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd,
struct timeval par_timeout;
int ret;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
@@ -707,7 +707,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
struct timeval par_timeout;
int ret;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6f6e92a3102d..2c644afbcdd4 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -708,7 +708,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
switch (orig) {
case SEEK_CUR:
offset += file->f_pos;
@@ -725,7 +725,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
default:
ret = -EINVAL;
}
- mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return ret;
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 522136d40843..190d4423653f 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -183,19 +183,12 @@ static const struct file_operations misc_fops = {
int misc_register(struct miscdevice * misc)
{
- struct miscdevice *c;
dev_t dev;
int err = 0;
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == misc->minor) {
- mutex_unlock(&misc_mtx);
- return -EBUSY;
- }
- }
if (misc->minor == MISC_DYNAMIC_MINOR) {
int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
@@ -205,6 +198,15 @@ int misc_register(struct miscdevice * misc)
}
misc->minor = DYNAMIC_MINORS - i - 1;
set_bit(i, misc_minors);
+ } else {
+ struct miscdevice *c;
+
+ list_for_each_entry(c, &misc_list, list) {
+ if (c->minor == misc->minor) {
+ mutex_unlock(&misc_mtx);
+ return -EBUSY;
+ }
+ }
}
dev = MKDEV(MISC_MAJOR, misc->minor);
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
index 808d44e9a32a..b07b119ae57f 100644
--- a/drivers/char/nsc_gpio.c
+++ b/drivers/char/nsc_gpio.c
@@ -41,7 +41,7 @@ void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
ssize_t nsc_gpio_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
- unsigned m = iminor(file->f_path.dentry->d_inode);
+ unsigned m = iminor(file_inode(file));
struct nsc_gpio_ops *amp = file->private_data;
struct device *dev = amp->dev;
size_t i;
@@ -104,7 +104,7 @@ ssize_t nsc_gpio_write(struct file *file, const char __user *data,
ssize_t nsc_gpio_read(struct file *file, char __user * buf,
size_t len, loff_t * ppos)
{
- unsigned m = iminor(file->f_path.dentry->d_inode);
+ unsigned m = iminor(file_inode(file));
int value;
struct nsc_gpio_ops *amp = file->private_data;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index a7584860e9a7..c115217c79ae 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1400,7 +1400,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct cm4000_dev *dev = filp->private_data;
unsigned int iobase = dev->p_dev->resource[0]->start;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct pcmcia_device *link;
int size;
int rc;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 1cd49241e60e..ae0b42b66e55 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -107,7 +107,7 @@ static inline void pp_enable_irq (struct pp_struct *pp)
static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
loff_t * ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
char * kbuffer;
ssize_t bytes_read = 0;
@@ -189,7 +189,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
static ssize_t pp_write (struct file * file, const char __user * buf,
size_t count, loff_t * ppos)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
char * kbuffer;
ssize_t bytes_written = 0;
@@ -324,7 +324,7 @@ static enum ieee1284_phase init_phase (int mode)
static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
struct parport * port;
void __user *argp = (void __user *)arg;
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 588063ac9517..8cafa9ccd43f 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -312,7 +312,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err;
mutex_lock(&inode->i_mutex);
err = ps3flash_writeback(ps3flash_dev);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 54a3a6d09819..f3223aac4df1 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -80,7 +80,7 @@ static int raw_open(struct inode *inode, struct file *filp)
filp->f_flags |= O_DIRECT;
filp->f_mapping = bdev->bd_inode->i_mapping;
if (++raw_devices[minor].inuse == 1)
- filp->f_path.dentry->d_inode->i_mapping =
+ file_inode(filp)->i_mapping =
bdev->bd_inode->i_mapping;
filp->private_data = bdev;
mutex_unlock(&raw_mutex);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 6386a98e43c1..bf2349dbbf7f 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -938,7 +938,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
}
if (ret > 0) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
inode->i_atime = current_fs_time(inode->i_sb);
}
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 34c63f85104d..47b9fdfcf083 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -164,7 +164,7 @@ static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t le
unsigned int minor;
char value;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
switch (minor) {
case 0:
value = get_led();
@@ -200,7 +200,7 @@ static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data,
int retval = 0;
char c;
- minor = iminor(file->f_path.dentry->d_inode);
+ minor = iminor(file_inode(file));
switch (minor) {
case 0:
type = TYPE_LED;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ee4dbeafb377..e905d5f53051 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -61,9 +61,6 @@ struct ports_driver_data {
/* List of all the devices we're handling */
struct list_head portdevs;
- /* Number of devices this driver is handling */
- unsigned int index;
-
/*
* This is used to keep track of the number of hvc consoles
* spawned by this driver. This number is given as the first
@@ -169,9 +166,6 @@ struct ports_device {
/* Array of per-port IO virtqueues */
struct virtqueue **in_vqs, **out_vqs;
- /* Used for numbering devices for sysfs and debugfs */
- unsigned int drv_index;
-
/* Major number for this device. Ports will be created as minors. */
int chr_major;
};
@@ -1415,7 +1409,7 @@ static int add_port(struct ports_device *portdev, u32 id)
}
port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
devt, port, "vport%up%u",
- port->portdev->drv_index, id);
+ port->portdev->vdev->index, id);
if (IS_ERR(port->dev)) {
err = PTR_ERR(port->dev);
dev_err(&port->portdev->vdev->dev,
@@ -1442,7 +1436,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* rproc_serial does not want the console port, only
* the generic port implementation.
*/
- port->host_connected = true;
+ port->host_connected = port->guest_connected = true;
else if (!use_multiport(port->portdev)) {
/*
* If we're not using multiport support,
@@ -1470,7 +1464,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* inspect a port's state at any time
*/
sprintf(debugfs_name, "vport%up%u",
- port->portdev->drv_index, id);
+ port->portdev->vdev->index, id);
port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
pdrvdata.debugfs_dir,
port,
@@ -1958,16 +1952,12 @@ static int virtcons_probe(struct virtio_device *vdev)
portdev->vdev = vdev;
vdev->priv = portdev;
- spin_lock_irq(&pdrvdata_lock);
- portdev->drv_index = pdrvdata.index++;
- spin_unlock_irq(&pdrvdata_lock);
-
portdev->chr_major = register_chrdev(0, "virtio-portsdev",
&portdev_fops);
if (portdev->chr_major < 0) {
dev_err(&vdev->dev,
"Error %d registering chrdev for device %u\n",
- portdev->chr_major, portdev->drv_index);
+ portdev->chr_major, vdev->index);
err = portdev->chr_major;
goto free;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fabbfe1a9253..ed87b2405806 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -52,31 +52,29 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
int level)
{
struct clk *child;
- struct hlist_node *tmp;
if (!c)
return;
clk_summary_show_one(s, c, level);
- hlist_for_each_entry(child, tmp, &c->children, child_node)
+ hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
}
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk *c;
- struct hlist_node *tmp;
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n");
mutex_lock(&prepare_lock);
- hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
+ hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
- hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
mutex_unlock(&prepare_lock);
@@ -111,14 +109,13 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
- struct hlist_node *tmp;
if (!c)
return;
clk_dump_one(s, c, level);
- hlist_for_each_entry(child, tmp, &c->children, child_node) {
+ hlist_for_each_entry(child, &c->children, child_node) {
seq_printf(s, ",");
clk_dump_subtree(s, child, level + 1);
}
@@ -129,21 +126,20 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
static int clk_dump(struct seq_file *s, void *data)
{
struct clk *c;
- struct hlist_node *tmp;
bool first_node = true;
seq_printf(s, "{");
mutex_lock(&prepare_lock);
- hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
+ hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
seq_printf(s, ",");
first_node = false;
clk_dump_subtree(s, c, 0);
}
- hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
+ hlist_for_each_entry(c, &clk_orphan_list, child_node) {
seq_printf(s, ",");
clk_dump_subtree(s, c, 0);
}
@@ -222,7 +218,6 @@ out:
static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
{
struct clk *child;
- struct hlist_node *tmp;
int ret = -EINVAL;;
if (!clk || !pdentry)
@@ -233,7 +228,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
if (ret)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_debug_create_subtree(child, clk->dentry);
ret = 0;
@@ -299,7 +294,6 @@ out:
static int __init clk_debug_init(void)
{
struct clk *clk;
- struct hlist_node *tmp;
struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL);
@@ -324,10 +318,10 @@ static int __init clk_debug_init(void)
mutex_lock(&prepare_lock);
- hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
- hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_debug_create_subtree(clk, orphandir);
inited = 1;
@@ -345,13 +339,12 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
static void clk_disable_unused_subtree(struct clk *clk)
{
struct clk *child;
- struct hlist_node *tmp;
unsigned long flags;
if (!clk)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
spin_lock_irqsave(&enable_lock, flags);
@@ -384,14 +377,13 @@ out:
static int clk_disable_unused(void)
{
struct clk *clk;
- struct hlist_node *tmp;
mutex_lock(&prepare_lock);
- hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
- hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk);
mutex_unlock(&prepare_lock);
@@ -484,12 +476,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{
struct clk *child;
struct clk *ret;
- struct hlist_node *tmp;
if (!strcmp(clk->name, name))
return clk;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_lookup_subtree(name, child);
if (ret)
return ret;
@@ -502,20 +493,19 @@ struct clk *__clk_lookup(const char *name)
{
struct clk *root_clk;
struct clk *ret;
- struct hlist_node *tmp;
if (!name)
return NULL;
/* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
+ hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
/* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
+ hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
@@ -812,7 +802,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
- struct hlist_node *tmp;
struct clk *child;
old_rate = clk->rate;
@@ -832,7 +821,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
if (clk->notifier_count && msg)
__clk_notify(clk, msg, old_rate, clk->rate);
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
__clk_recalc_rates(child, msg);
}
@@ -878,7 +867,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
*/
static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
{
- struct hlist_node *tmp;
struct clk *child;
unsigned long new_rate;
int ret = NOTIFY_DONE;
@@ -895,7 +883,7 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
if (ret == NOTIFY_BAD)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
if (ret == NOTIFY_BAD)
break;
@@ -908,11 +896,10 @@ out:
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
{
struct clk *child;
- struct hlist_node *tmp;
clk->new_rate = new_rate;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else
@@ -983,7 +970,6 @@ out:
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
- struct hlist_node *tmp;
struct clk *child, *fail_clk = NULL;
int ret = NOTIFY_DONE;
@@ -996,7 +982,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
fail_clk = clk;
}
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
clk = clk_propagate_rate_change(child, event);
if (clk)
fail_clk = clk;
@@ -1014,7 +1000,6 @@ static void clk_change_rate(struct clk *clk)
struct clk *child;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
- struct hlist_node *tmp;
old_rate = clk->rate;
@@ -1032,7 +1017,7 @@ static void clk_change_rate(struct clk *clk)
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_change_rate(child);
}
@@ -1348,7 +1333,7 @@ int __clk_init(struct device *dev, struct clk *clk)
{
int i, ret = 0;
struct clk *orphan;
- struct hlist_node *tmp, *tmp2;
+ struct hlist_node *tmp2;
if (!clk)
return -EINVAL;
@@ -1448,7 +1433,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* walk the list of orphan clocks and reparent any that are children of
* this clock
*/
- hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
+ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
if (orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i]))
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 435e54d55bbd..071f6eadfea2 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -240,6 +240,7 @@ void __init nmdk_timer_init(void __iomem *base, int irq)
/* Timer 1 is used for events, register irq and clockevents */
setup_irq(irq, &nmdk_timer_irq);
nmdk_clkevt.cpumask = cpumask_of(0);
+ nmdk_clkevt.irq = irq;
clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index a4605fd7e303..47a673070d70 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -27,8 +27,10 @@
#include <linux/of_address.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <asm/sched_clock.h>
+#include <asm/sched_clock.h>
+#include <asm/localtimer.h>
+#include <linux/percpu.h>
/*
* Timer block registers.
*/
@@ -49,6 +51,7 @@
#define TIMER1_RELOAD_OFF 0x0018
#define TIMER1_VAL_OFF 0x001c
+#define LCL_TIMER_EVENTS_STATUS 0x0028
/* Global timers are connected to the coherency fabric clock, and the
below divider reduces their incrementing frequency. */
#define TIMER_DIVIDER_SHIFT 5
@@ -57,14 +60,17 @@
/*
* SoC-specific data.
*/
-static void __iomem *timer_base;
-static int timer_irq;
+static void __iomem *timer_base, *local_base;
+static unsigned int timer_clk;
+static bool timer25Mhz = true;
/*
* Number of timer ticks per jiffy.
*/
static u32 ticks_per_jiffy;
+static struct clock_event_device __percpu **percpu_armada_370_xp_evt;
+
static u32 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -78,24 +84,23 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
struct clock_event_device *dev)
{
u32 u;
-
/*
* Clear clockevent timer interrupt.
*/
- writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+ writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
/*
* Setup new clockevent timer value.
*/
- writel(delta, timer_base + TIMER1_VAL_OFF);
+ writel(delta, local_base + TIMER0_VAL_OFF);
/*
* Enable the timer.
*/
- u = readl(timer_base + TIMER_CTRL_OFF);
- u = ((u & ~TIMER1_RELOAD_EN) | TIMER1_EN |
- TIMER1_DIV(TIMER_DIVIDER_SHIFT));
- writel(u, timer_base + TIMER_CTRL_OFF);
+ u = readl(local_base + TIMER_CTRL_OFF);
+ u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ writel(u, local_base + TIMER_CTRL_OFF);
return 0;
}
@@ -107,37 +112,38 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
u32 u;
if (mode == CLOCK_EVT_MODE_PERIODIC) {
+
/*
* Setup timer to fire at 1/HZ intervals.
*/
- writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
- writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
+ writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
+ writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
/*
* Enable timer.
*/
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel((u | TIMER1_EN | TIMER1_RELOAD_EN |
- TIMER1_DIV(TIMER_DIVIDER_SHIFT)),
- timer_base + TIMER_CTRL_OFF);
+ u = readl(local_base + TIMER_CTRL_OFF);
+
+ writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT)),
+ local_base + TIMER_CTRL_OFF);
} else {
/*
* Disable timer.
*/
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
+ u = readl(local_base + TIMER_CTRL_OFF);
+ writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF);
/*
* ACK pending timer interrupt.
*/
- writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
-
+ writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
}
}
static struct clock_event_device armada_370_xp_clkevt = {
- .name = "armada_370_xp_tick",
+ .name = "armada_370_xp_per_cpu_tick",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.shift = 32,
.rating = 300,
@@ -150,32 +156,78 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
/*
* ACK timer interrupt and call event handler.
*/
+ struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
- writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
- armada_370_xp_clkevt.event_handler(&armada_370_xp_clkevt);
+ writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
+ evt->event_handler(evt);
return IRQ_HANDLED;
}
-static struct irqaction armada_370_xp_timer_irq = {
- .name = "armada_370_xp_tick",
- .flags = IRQF_DISABLED | IRQF_TIMER,
- .handler = armada_370_xp_timer_interrupt
+/*
+ * Setup the local clock events for a CPU.
+ */
+static int __cpuinit armada_370_xp_timer_setup(struct clock_event_device *evt)
+{
+ u32 u;
+ int cpu = smp_processor_id();
+
+ /* Use existing clock_event for cpu 0 */
+ if (!smp_processor_id())
+ return 0;
+
+ u = readl(local_base + TIMER_CTRL_OFF);
+ if (timer25Mhz)
+ writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+ else
+ writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+
+ evt->name = armada_370_xp_clkevt.name;
+ evt->irq = armada_370_xp_clkevt.irq;
+ evt->features = armada_370_xp_clkevt.features;
+ evt->shift = armada_370_xp_clkevt.shift;
+ evt->rating = armada_370_xp_clkevt.rating,
+ evt->set_next_event = armada_370_xp_clkevt_next_event,
+ evt->set_mode = armada_370_xp_clkevt_mode,
+ evt->cpumask = cpumask_of(cpu);
+
+ *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt;
+
+ clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
+ enable_percpu_irq(evt->irq, 0);
+
+ return 0;
+}
+
+static void armada_370_xp_timer_stop(struct clock_event_device *evt)
+{
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ disable_percpu_irq(evt->irq);
+}
+
+static struct local_timer_ops armada_370_xp_local_timer_ops __cpuinitdata = {
+ .setup = armada_370_xp_timer_setup,
+ .stop = armada_370_xp_timer_stop,
};
void __init armada_370_xp_timer_init(void)
{
u32 u;
struct device_node *np;
- unsigned int timer_clk;
+ int res;
+
np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
+ local_base = of_iomap(np, 1);
if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
/* The fixed 25MHz timer is available so let's use it */
+ u = readl(local_base + TIMER_CTRL_OFF);
+ writel(u | TIMER0_25MHZ,
+ local_base + TIMER_CTRL_OFF);
u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u | TIMER0_25MHZ | TIMER1_25MHZ,
+ writel(u | TIMER0_25MHZ,
timer_base + TIMER_CTRL_OFF);
timer_clk = 25000000;
} else {
@@ -183,15 +235,23 @@ void __init armada_370_xp_timer_init(void)
struct clk *clk = of_clk_get(np, 0);
WARN_ON(IS_ERR(clk));
rate = clk_get_rate(clk);
+ u = readl(local_base + TIMER_CTRL_OFF);
+ writel(u & ~(TIMER0_25MHZ),
+ local_base + TIMER_CTRL_OFF);
+
u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
+ writel(u & ~(TIMER0_25MHZ),
timer_base + TIMER_CTRL_OFF);
+
timer_clk = rate / TIMER_DIVIDER;
+ timer25Mhz = false;
}
- /* We use timer 0 as clocksource, and timer 1 for
- clockevents */
- timer_irq = irq_of_parse_and_map(np, 1);
+ /*
+ * We use timer 0 as clocksource, and private(local) timer 0
+ * for clockevents
+ */
+ armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4);
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
@@ -216,12 +276,26 @@ void __init armada_370_xp_timer_init(void)
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
- /*
- * Setup clockevent timer (interrupt-driven).
- */
- setup_irq(timer_irq, &armada_370_xp_timer_irq);
+ /* Register the clockevent on the private timer of CPU 0 */
armada_370_xp_clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&armada_370_xp_clkevt,
timer_clk, 1, 0xfffffffe);
-}
+ percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *);
+
+
+ /*
+ * Setup clockevent timer (interrupt-driven).
+ */
+ *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt;
+ res = request_percpu_irq(armada_370_xp_clkevt.irq,
+ armada_370_xp_timer_interrupt,
+ armada_370_xp_clkevt.name,
+ percpu_armada_370_xp_evt);
+ if (!res) {
+ enable_percpu_irq(armada_370_xp_clkevt.irq, 0);
+#ifdef CONFIG_LOCAL_TIMERS
+ local_timer_register(&armada_370_xp_local_timer_ops);
+#endif
+ }
+}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index d6b6ef350cb6..54e336de373b 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -245,7 +245,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
arm_reg = devm_regulator_get(cpu_dev, "arm");
pu_reg = devm_regulator_get(cpu_dev, "pu");
soc_reg = devm_regulator_get(cpu_dev, "soc");
- if (!arm_reg || !pu_reg || !soc_reg) {
+ if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
dev_err(cpu_dev, "failed to get regulators\n");
ret = -ENOENT;
goto put_node;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index c9d9d5c16f94..6f22ba51f969 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -332,7 +332,7 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
return -EINVAL;
dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
- if (!dd->nb_in_sg)
+ if (!dd->nb_out_sg)
return -EINVAL;
dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a22f1a9f895f..827913d7d33a 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -694,7 +694,7 @@ out_error_dma:
dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
free_dma(crc->dma_ch);
out_error_irq:
- free_irq(crc->irq, crc->dev);
+ free_irq(crc->irq, crc);
out_error_unmap:
iounmap((void *)crc->regs);
out_error_free_mem:
@@ -720,10 +720,10 @@ static int bfin_crypto_crc_remove(struct platform_device *pdev)
crypto_unregister_ahash(&algs);
tasklet_kill(&crc->done_task);
- iounmap((void *)crc->regs);
free_dma(crc->dma_ch);
if (crc->irq > 0)
- free_irq(crc->irq, crc->dev);
+ free_irq(crc->irq, crc);
+ iounmap((void *)crc->regs);
kfree(crc);
return 0;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e66e8ee5a9af..6aa425fe0ed5 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -5,6 +5,7 @@
*
* Copyright (c) 2010 Nokia Corporation
* Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * Copyright (c) 2011 Texas Instruments Incorporated
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
@@ -19,28 +20,39 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
-#include <linux/omap-dma.h>
+#define DST_MAXBURST 4
+#define DMA_MIN (DST_MAXBURST * sizeof(u32))
/* OMAP TRM gives bitfields as start:end, where start is the higher bit
number. For example 7:0 */
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
-#define AES_REG_IV(x) (0x20 + ((x) * 0x04))
+#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
+ ((x ^ 0x01) * 0x04))
+#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
-#define AES_REG_CTRL 0x30
-#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
+#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
+#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
+#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
#define AES_REG_CTRL_CTR (1 << 6)
#define AES_REG_CTRL_CBC (1 << 5)
#define AES_REG_CTRL_KEY_SIZE (3 << 3)
@@ -48,14 +60,11 @@
#define AES_REG_CTRL_INPUT_READY (1 << 1)
#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
-#define AES_REG_DATA 0x34
-#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
+#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
-#define AES_REG_REV 0x44
-#define AES_REG_REV_MAJOR 0xF0
-#define AES_REG_REV_MINOR 0x0F
+#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
-#define AES_REG_MASK 0x48
+#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
#define AES_REG_MASK_SIDLE (1 << 6)
#define AES_REG_MASK_START (1 << 5)
#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
@@ -63,8 +72,7 @@
#define AES_REG_MASK_SOFTRESET (1 << 1)
#define AES_REG_AUTOIDLE (1 << 0)
-#define AES_REG_SYSSTATUS 0x4C
-#define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
+#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
#define DEFAULT_TIMEOUT (5*HZ)
@@ -72,6 +80,7 @@
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
#define FLAGS_GIV BIT(2)
+#define FLAGS_CTR BIT(3)
#define FLAGS_INIT BIT(4)
#define FLAGS_FAST BIT(5)
@@ -92,11 +101,39 @@ struct omap_aes_reqctx {
#define OMAP_AES_QUEUE_LENGTH 1
#define OMAP_AES_CACHE_SIZE 0
+struct omap_aes_algs_info {
+ struct crypto_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
+};
+
+struct omap_aes_pdata {
+ struct omap_aes_algs_info *algs_info;
+ unsigned int algs_info_size;
+
+ void (*trigger)(struct omap_aes_dev *dd, int length);
+
+ u32 key_ofs;
+ u32 iv_ofs;
+ u32 ctrl_ofs;
+ u32 data_ofs;
+ u32 rev_ofs;
+ u32 mask_ofs;
+
+ u32 dma_enable_in;
+ u32 dma_enable_out;
+ u32 dma_start;
+
+ u32 major_mask;
+ u32 major_shift;
+ u32 minor_mask;
+ u32 minor_shift;
+};
+
struct omap_aes_dev {
struct list_head list;
unsigned long phys_base;
void __iomem *io_base;
- struct clk *iclk;
struct omap_aes_ctx *ctx;
struct device *dev;
unsigned long flags;
@@ -111,20 +148,24 @@ struct omap_aes_dev {
struct ablkcipher_request *req;
size_t total;
struct scatterlist *in_sg;
+ struct scatterlist in_sgl;
size_t in_offset;
struct scatterlist *out_sg;
+ struct scatterlist out_sgl;
size_t out_offset;
size_t buflen;
void *buf_in;
size_t dma_size;
int dma_in;
- int dma_lch_in;
+ struct dma_chan *dma_lch_in;
dma_addr_t dma_addr_in;
void *buf_out;
int dma_out;
- int dma_lch_out;
+ struct dma_chan *dma_lch_out;
dma_addr_t dma_addr_out;
+
+ const struct omap_aes_pdata *pdata;
};
/* keep registered devices data here */
@@ -160,19 +201,6 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
omap_aes_write(dd, offset, *value);
}
-static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
-{
- unsigned long timeout = jiffies + DEFAULT_TIMEOUT;
-
- while (!(omap_aes_read(dd, offset) & bit)) {
- if (time_is_before_jiffies(timeout)) {
- dev_err(dd->dev, "omap-aes timeout\n");
- return -ETIMEDOUT;
- }
- }
- return 0;
-}
-
static int omap_aes_hw_init(struct omap_aes_dev *dd)
{
/*
@@ -180,23 +208,9 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
* It may be long delays between requests.
* Device might go to off mode to save power.
*/
- clk_enable(dd->iclk);
+ pm_runtime_get_sync(dd->dev);
if (!(dd->flags & FLAGS_INIT)) {
- /* is it necessary to reset before every operation? */
- omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
- AES_REG_MASK_SOFTRESET);
- /*
- * prevent OCP bus error (SRESP) in case an access to the module
- * is performed while the module is coming out of soft reset
- */
- __asm__ __volatile__("nop");
- __asm__ __volatile__("nop");
-
- if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
- AES_REG_SYSSTATUS_RESETDONE))
- return -ETIMEDOUT;
-
dd->flags |= FLAGS_INIT;
dd->err = 0;
}
@@ -208,59 +222,75 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
{
unsigned int key32;
int i, err;
- u32 val, mask;
+ u32 val, mask = 0;
err = omap_aes_hw_init(dd);
if (err)
return err;
- val = 0;
- if (dd->dma_lch_out >= 0)
- val |= AES_REG_MASK_DMA_OUT_EN;
- if (dd->dma_lch_in >= 0)
- val |= AES_REG_MASK_DMA_IN_EN;
-
- mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
-
- omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
-
key32 = dd->ctx->keylen / sizeof(u32);
/* it seems a key should always be set even if it has not changed */
for (i = 0; i < key32; i++) {
- omap_aes_write(dd, AES_REG_KEY(i),
+ omap_aes_write(dd, AES_REG_KEY(dd, i),
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & FLAGS_CBC) && dd->req->info)
- omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
+ if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
+ omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
+ if (dd->flags & FLAGS_CTR) {
+ val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32;
+ mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
+ }
if (dd->flags & FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
- mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
+ mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
AES_REG_CTRL_KEY_SIZE;
- omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
+ omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
- /* IN */
- omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + AES_REG_DATA, 0, 4);
+ return 0;
+}
- omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
- omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
+{
+ u32 mask, val;
- /* OUT */
- omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + AES_REG_DATA, 0, 4);
+ val = dd->pdata->dma_start;
- omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
- omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+ if (dd->dma_lch_out != NULL)
+ val |= dd->pdata->dma_enable_out;
+ if (dd->dma_lch_in != NULL)
+ val |= dd->pdata->dma_enable_in;
+
+ mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+ dd->pdata->dma_start;
+
+ omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
- return 0;
+}
+
+static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
+{
+ omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
+ omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
+
+ omap_aes_dma_trigger_omap2(dd, length);
+}
+
+static void omap_aes_dma_stop(struct omap_aes_dev *dd)
+{
+ u32 mask;
+
+ mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+ dd->pdata->dma_start;
+
+ omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
}
static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -284,18 +314,10 @@ static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
return dd;
}
-static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
+static void omap_aes_dma_out_callback(void *data)
{
struct omap_aes_dev *dd = data;
- if (ch_status != OMAP_DMA_BLOCK_IRQ) {
- pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
- dd->err = -EIO;
- dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
- } else if (lch == dd->dma_lch_in) {
- return;
- }
-
/* dma_lch_out - completed */
tasklet_schedule(&dd->done_task);
}
@@ -303,9 +325,10 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
static int omap_aes_dma_init(struct omap_aes_dev *dd)
{
int err = -ENOMEM;
+ dma_cap_mask_t mask;
- dd->dma_lch_out = -1;
- dd->dma_lch_in = -1;
+ dd->dma_lch_out = NULL;
+ dd->dma_lch_in = NULL;
dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
@@ -334,23 +357,31 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
goto err_map_out;
}
- err = omap_request_dma(dd->dma_in, "omap-aes-rx",
- omap_aes_dma_callback, dd, &dd->dma_lch_in);
- if (err) {
- dev_err(dd->dev, "Unable to request DMA channel\n");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dd->dma_lch_in = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn,
+ &dd->dma_in,
+ dd->dev, "rx");
+ if (!dd->dma_lch_in) {
+ dev_err(dd->dev, "Unable to request in DMA channel\n");
goto err_dma_in;
}
- err = omap_request_dma(dd->dma_out, "omap-aes-tx",
- omap_aes_dma_callback, dd, &dd->dma_lch_out);
- if (err) {
- dev_err(dd->dev, "Unable to request DMA channel\n");
+
+ dd->dma_lch_out = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn,
+ &dd->dma_out,
+ dd->dev, "tx");
+ if (!dd->dma_lch_out) {
+ dev_err(dd->dev, "Unable to request out DMA channel\n");
goto err_dma_out;
}
return 0;
err_dma_out:
- omap_free_dma(dd->dma_lch_in);
+ dma_release_channel(dd->dma_lch_in);
err_dma_in:
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
@@ -367,8 +398,8 @@ err_alloc:
static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
{
- omap_free_dma(dd->dma_lch_out);
- omap_free_dma(dd->dma_lch_in);
+ dma_release_channel(dd->dma_lch_out);
+ dma_release_channel(dd->dma_lch_in);
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
@@ -426,12 +457,15 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
return off;
}
-static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
+ struct scatterlist *in_sg, struct scatterlist *out_sg)
{
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
struct omap_aes_dev *dd = ctx->dd;
- int len32;
+ struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_slave_config cfg;
+ dma_addr_t dma_addr_in = sg_dma_address(in_sg);
+ int ret, length = sg_dma_len(in_sg);
pr_debug("len: %d\n", length);
@@ -441,30 +475,61 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
dma_sync_single_for_device(dd->dev, dma_addr_in, length,
DMA_TO_DEVICE);
- len32 = DIV_ROUND_UP(length, sizeof(u32));
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
+ cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = DST_MAXBURST;
+ cfg.dst_maxburst = DST_MAXBURST;
/* IN */
- omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
- len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
- OMAP_DMA_DST_SYNC);
+ ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_in) {
+ dev_err(dd->dev, "IN prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
- omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr_in, 0, 0);
+ /* No callback necessary */
+ tx_in->callback_param = dd;
/* OUT */
- omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
- len32, 1, OMAP_DMA_SYNC_PACKET,
- dd->dma_out, OMAP_DMA_SRC_SYNC);
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
- omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr_out, 0, 0);
+ tx_out->callback = omap_aes_dma_out_callback;
+ tx_out->callback_param = dd;
- omap_start_dma(dd->dma_lch_in);
- omap_start_dma(dd->dma_lch_out);
+ dmaengine_submit(tx_in);
+ dmaengine_submit(tx_out);
- /* start DMA or disable idle mode */
- omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
- AES_REG_MASK_START);
+ dma_async_issue_pending(dd->dma_lch_in);
+ dma_async_issue_pending(dd->dma_lch_out);
+
+ /* start DMA */
+ dd->pdata->trigger(dd, length);
return 0;
}
@@ -476,6 +541,8 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
+ struct scatterlist *in_sg, *out_sg;
+ int len32;
pr_debug("total: %d\n", dd->total);
@@ -514,6 +581,9 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
addr_in = sg_dma_address(dd->in_sg);
addr_out = sg_dma_address(dd->out_sg);
+ in_sg = dd->in_sg;
+ out_sg = dd->out_sg;
+
dd->flags |= FLAGS_FAST;
} else {
@@ -521,6 +591,27 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
dd->buflen, dd->total, 0);
+ len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
+
+ /*
+ * The data going into the AES module has been copied
+ * to a local buffer and the data coming out will go
+ * into a local buffer so set up local SG entries for
+ * both.
+ */
+ sg_init_table(&dd->in_sgl, 1);
+ dd->in_sgl.offset = dd->in_offset;
+ sg_dma_len(&dd->in_sgl) = len32;
+ sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
+
+ sg_init_table(&dd->out_sgl, 1);
+ dd->out_sgl.offset = dd->out_offset;
+ sg_dma_len(&dd->out_sgl) = len32;
+ sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
+
+ in_sg = &dd->in_sgl;
+ out_sg = &dd->out_sgl;
+
addr_in = dd->dma_addr_in;
addr_out = dd->dma_addr_out;
@@ -530,7 +621,7 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->total -= count;
- err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
+ err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
if (err) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
@@ -545,7 +636,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- clk_disable(dd->iclk);
+ pm_runtime_put_sync(dd->dev);
dd->flags &= ~FLAGS_BUSY;
req->base.complete(&req->base, err);
@@ -558,10 +649,10 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
pr_debug("total: %d\n", dd->total);
- omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
+ omap_aes_dma_stop(dd);
- omap_stop_dma(dd->dma_lch_in);
- omap_stop_dma(dd->dma_lch_out);
+ dmaengine_terminate_all(dd->dma_lch_in);
+ dmaengine_terminate_all(dd->dma_lch_out);
if (dd->flags & FLAGS_FAST) {
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
@@ -734,6 +825,16 @@ static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
return omap_aes_crypt(req, FLAGS_CBC);
}
+static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
+}
+
+static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return omap_aes_crypt(req, FLAGS_CTR);
+}
+
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
pr_debug("enter\n");
@@ -750,7 +851,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs[] = {
+static struct crypto_alg algs_ecb_cbc[] = {
{
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-omap",
@@ -798,11 +899,213 @@ static struct crypto_alg algs[] = {
}
};
+static struct crypto_alg algs_ctr[] = {
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-omap",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_aes_cra_init,
+ .cra_exit = omap_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .geniv = "eseqiv",
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ }
+} ,
+};
+
+static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
+ {
+ .algs_list = algs_ecb_cbc,
+ .size = ARRAY_SIZE(algs_ecb_cbc),
+ },
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
+ .algs_info = omap_aes_algs_info_ecb_cbc,
+ .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
+ .trigger = omap_aes_dma_trigger_omap2,
+ .key_ofs = 0x1c,
+ .iv_ofs = 0x20,
+ .ctrl_ofs = 0x30,
+ .data_ofs = 0x34,
+ .rev_ofs = 0x44,
+ .mask_ofs = 0x48,
+ .dma_enable_in = BIT(2),
+ .dma_enable_out = BIT(3),
+ .dma_start = BIT(5),
+ .major_mask = 0xf0,
+ .major_shift = 4,
+ .minor_mask = 0x0f,
+ .minor_shift = 0,
+};
+
+#ifdef CONFIG_OF
+static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
+ {
+ .algs_list = algs_ecb_cbc,
+ .size = ARRAY_SIZE(algs_ecb_cbc),
+ },
+ {
+ .algs_list = algs_ctr,
+ .size = ARRAY_SIZE(algs_ctr),
+ },
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
+ .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
+ .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
+ .trigger = omap_aes_dma_trigger_omap2,
+ .key_ofs = 0x1c,
+ .iv_ofs = 0x20,
+ .ctrl_ofs = 0x30,
+ .data_ofs = 0x34,
+ .rev_ofs = 0x44,
+ .mask_ofs = 0x48,
+ .dma_enable_in = BIT(2),
+ .dma_enable_out = BIT(3),
+ .dma_start = BIT(5),
+ .major_mask = 0xf0,
+ .major_shift = 4,
+ .minor_mask = 0x0f,
+ .minor_shift = 0,
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
+ .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
+ .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
+ .trigger = omap_aes_dma_trigger_omap4,
+ .key_ofs = 0x3c,
+ .iv_ofs = 0x40,
+ .ctrl_ofs = 0x50,
+ .data_ofs = 0x60,
+ .rev_ofs = 0x80,
+ .mask_ofs = 0x84,
+ .dma_enable_in = BIT(5),
+ .dma_enable_out = BIT(6),
+ .major_mask = 0x0700,
+ .major_shift = 8,
+ .minor_mask = 0x003f,
+ .minor_shift = 0,
+};
+
+static const struct of_device_id omap_aes_of_match[] = {
+ {
+ .compatible = "ti,omap2-aes",
+ .data = &omap_aes_pdata_omap2,
+ },
+ {
+ .compatible = "ti,omap3-aes",
+ .data = &omap_aes_pdata_omap3,
+ },
+ {
+ .compatible = "ti,omap4-aes",
+ .data = &omap_aes_pdata_omap4,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_aes_of_match);
+
+static int omap_aes_get_res_of(struct omap_aes_dev *dd,
+ struct device *dev, struct resource *res)
+{
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+ int err = 0;
+
+ match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
+ if (!match) {
+ dev_err(dev, "no compatible OF match\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = of_address_to_resource(node, 0, res);
+ if (err < 0) {
+ dev_err(dev, "can't translate OF node address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dd->dma_out = -1; /* Dummy value that's unused */
+ dd->dma_in = -1; /* Dummy value that's unused */
+
+ dd->pdata = match->data;
+
+err:
+ return err;
+}
+#else
+static const struct of_device_id omap_aes_of_match[] = {
+ {},
+};
+
+static int omap_aes_get_res_of(struct omap_aes_dev *dd,
+ struct device *dev, struct resource *res)
+{
+ return -EINVAL;
+}
+#endif
+
+static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
+ struct platform_device *pdev, struct resource *res)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ int err = 0;
+
+ /* Get the base address */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ memcpy(res, r, sizeof(*res));
+
+ /* Get the DMA out channel */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(dev, "no DMA out resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dd->dma_out = r->start;
+
+ /* Get the DMA in channel */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!r) {
+ dev_err(dev, "no DMA in resource info\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dd->dma_in = r->start;
+
+ /* Only OMAP2/3 can be non-DT */
+ dd->pdata = &omap_aes_pdata_omap2;
+
+err:
+ return err;
+}
+
static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct resource *res;
+ struct crypto_alg *algp;
+ struct resource res;
int err = -ENOMEM, i, j;
u32 reg;
@@ -817,49 +1120,31 @@ static int omap_aes_probe(struct platform_device *pdev)
spin_lock_init(&dd->lock);
crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
- /* Get the base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "invalid resource type\n");
- err = -ENODEV;
- goto err_res;
- }
- dd->phys_base = res->start;
-
- /* Get the DMA */
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!res)
- dev_info(dev, "no DMA info\n");
- else
- dd->dma_out = res->start;
-
- /* Get the DMA */
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!res)
- dev_info(dev, "no DMA info\n");
- else
- dd->dma_in = res->start;
-
- /* Initializing the clock */
- dd->iclk = clk_get(dev, "ick");
- if (IS_ERR(dd->iclk)) {
- dev_err(dev, "clock intialization failed.\n");
- err = PTR_ERR(dd->iclk);
+ err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
+ omap_aes_get_res_pdev(dd, pdev, &res);
+ if (err)
goto err_res;
- }
- dd->io_base = ioremap(dd->phys_base, SZ_4K);
+ dd->io_base = devm_request_and_ioremap(dev, &res);
if (!dd->io_base) {
dev_err(dev, "can't ioremap\n");
err = -ENOMEM;
- goto err_io;
+ goto err_res;
}
+ dd->phys_base = res.start;
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ omap_aes_dma_stop(dd);
+
+ reg = omap_aes_read(dd, AES_REG_REV(dd));
+
+ pm_runtime_put_sync(dev);
- clk_enable(dd->iclk);
- reg = omap_aes_read(dd, AES_REG_REV);
dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
- (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
- clk_disable(dd->iclk);
+ (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
+ (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
@@ -873,26 +1158,32 @@ static int omap_aes_probe(struct platform_device *pdev)
list_add_tail(&dd->list, &dev_list);
spin_unlock(&list_lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- pr_debug("i: %d\n", i);
- err = crypto_register_alg(&algs[i]);
- if (err)
- goto err_algs;
- }
+ for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ algp = &dd->pdata->algs_info[i].algs_list[j];
+
+ pr_debug("reg alg: %s\n", algp->cra_name);
+ INIT_LIST_HEAD(&algp->cra_list);
+
+ err = crypto_register_alg(algp);
+ if (err)
+ goto err_algs;
- pr_info("probe() done\n");
+ dd->pdata->algs_info[i].registered++;
+ }
+ }
return 0;
err_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_alg(
+ &dd->pdata->algs_info[i].algs_list[j]);
omap_aes_dma_cleanup(dd);
err_dma:
tasklet_kill(&dd->done_task);
tasklet_kill(&dd->queue_task);
- iounmap(dd->io_base);
-err_io:
- clk_put(dd->iclk);
+ pm_runtime_disable(dev);
err_res:
kfree(dd);
dd = NULL;
@@ -904,7 +1195,7 @@ err_data:
static int omap_aes_remove(struct platform_device *pdev)
{
struct omap_aes_dev *dd = platform_get_drvdata(pdev);
- int i;
+ int i, j;
if (!dd)
return -ENODEV;
@@ -913,33 +1204,52 @@ static int omap_aes_remove(struct platform_device *pdev)
list_del(&dd->list);
spin_unlock(&list_lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_alg(&algs[i]);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_alg(
+ &dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
tasklet_kill(&dd->queue_task);
omap_aes_dma_cleanup(dd);
- iounmap(dd->io_base);
- clk_put(dd->iclk);
+ pm_runtime_disable(dd->dev);
kfree(dd);
dd = NULL;
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int omap_aes_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int omap_aes_resume(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_aes_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
+};
+
static struct platform_driver omap_aes_driver = {
.probe = omap_aes_probe,
.remove = omap_aes_remove,
.driver = {
.name = "omap-aes",
.owner = THIS_MODULE,
+ .pm = &omap_aes_pm_ops,
+ .of_match_table = omap_aes_of_match,
},
};
static int __init omap_aes_mod_init(void)
{
- pr_info("loading %s driver\n", "omap-aes");
-
return platform_driver_register(&omap_aes_driver);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 9e6947bc296f..3d1611f5aecf 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -5,6 +5,7 @@
*
* Copyright (c) 2010 Nokia Corporation
* Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * Copyright (c) 2011 Texas Instruments Incorporated
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
@@ -22,12 +23,18 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -37,19 +44,17 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/omap-dma.h>
-
-#ifdef CONFIG_ARCH_OMAP1
-#include <mach/irqs.h>
-#endif
-
-#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
-#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
-
#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
#define MD5_DIGEST_SIZE 16
-#define SHA_REG_DIGCNT 0x14
+#define DST_MAXBURST 16
+#define DMA_MIN (DST_MAXBURST * sizeof(u32))
+
+#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
+#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
+#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
+
+#define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04))
#define SHA_REG_CTRL 0x18
#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
@@ -59,19 +64,42 @@
#define SHA_REG_CTRL_INPUT_READY (1 << 1)
#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
-#define SHA_REG_REV 0x5C
-#define SHA_REG_REV_MAJOR 0xF0
-#define SHA_REG_REV_MINOR 0x0F
+#define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
-#define SHA_REG_MASK 0x60
+#define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
#define SHA_REG_MASK_DMA_EN (1 << 3)
#define SHA_REG_MASK_IT_EN (1 << 2)
#define SHA_REG_MASK_SOFTRESET (1 << 1)
#define SHA_REG_AUTOIDLE (1 << 0)
-#define SHA_REG_SYSSTATUS 0x64
+#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
+#define SHA_REG_MODE 0x44
+#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
+#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
+#define SHA_REG_MODE_CLOSE_HASH (1 << 4)
+#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
+#define SHA_REG_MODE_ALGO_MASK (3 << 1)
+#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
+#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
+
+#define SHA_REG_LENGTH 0x48
+
+#define SHA_REG_IRQSTATUS 0x118
+#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
+#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
+#define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
+#define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
+
+#define SHA_REG_IRQENA 0x11C
+#define SHA_REG_IRQENA_CTX_RDY (1 << 3)
+#define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
+#define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
+#define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
+
#define DEFAULT_TIMEOUT_INTERVAL HZ
/* mostly device flags */
@@ -82,20 +110,33 @@
#define FLAGS_INIT 4
#define FLAGS_CPU 5
#define FLAGS_DMA_READY 6
+#define FLAGS_AUTO_XOR 7
+#define FLAGS_BE32_SHA1 8
/* context flags */
#define FLAGS_FINUP 16
#define FLAGS_SG 17
-#define FLAGS_SHA1 18
-#define FLAGS_HMAC 19
-#define FLAGS_ERROR 20
-#define OP_UPDATE 1
-#define OP_FINAL 2
+#define FLAGS_MODE_SHIFT 18
+#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \
+ << (FLAGS_MODE_SHIFT - 1))
+#define FLAGS_HMAC 20
+#define FLAGS_ERROR 21
+
+#define OP_UPDATE 1
+#define OP_FINAL 2
#define OMAP_ALIGN_MASK (sizeof(u32)-1)
#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
-#define BUFLEN PAGE_SIZE
+#define BUFLEN PAGE_SIZE
struct omap_sham_dev;
@@ -104,7 +145,7 @@ struct omap_sham_reqctx {
unsigned long flags;
unsigned long op;
- u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
+ u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt;
size_t bufcnt;
size_t buflen;
@@ -112,6 +153,7 @@ struct omap_sham_reqctx {
/* walk state */
struct scatterlist *sg;
+ struct scatterlist sgl;
unsigned int offset; /* offset in current sg */
unsigned int total; /* total request */
@@ -120,8 +162,8 @@ struct omap_sham_reqctx {
struct omap_sham_hmac_ctx {
struct crypto_shash *shash;
- u8 ipad[SHA1_MD5_BLOCK_SIZE];
- u8 opad[SHA1_MD5_BLOCK_SIZE];
+ u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
+ u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
};
struct omap_sham_ctx {
@@ -137,22 +179,56 @@ struct omap_sham_ctx {
#define OMAP_SHAM_QUEUE_LENGTH 1
+struct omap_sham_algs_info {
+ struct ahash_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
+};
+
+struct omap_sham_pdata {
+ struct omap_sham_algs_info *algs_info;
+ unsigned int algs_info_size;
+ unsigned long flags;
+ int digest_size;
+
+ void (*copy_hash)(struct ahash_request *req, int out);
+ void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
+ int final, int dma);
+ void (*trigger)(struct omap_sham_dev *dd, size_t length);
+ int (*poll_irq)(struct omap_sham_dev *dd);
+ irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
+
+ u32 odigest_ofs;
+ u32 idigest_ofs;
+ u32 din_ofs;
+ u32 digcnt_ofs;
+ u32 rev_ofs;
+ u32 mask_ofs;
+ u32 sysstatus_ofs;
+
+ u32 major_mask;
+ u32 major_shift;
+ u32 minor_mask;
+ u32 minor_shift;
+};
+
struct omap_sham_dev {
struct list_head list;
unsigned long phys_base;
struct device *dev;
void __iomem *io_base;
int irq;
- struct clk *iclk;
spinlock_t lock;
int err;
- int dma;
- int dma_lch;
+ unsigned int dma;
+ struct dma_chan *dma_lch;
struct tasklet_struct done_task;
unsigned long flags;
struct crypto_queue queue;
struct ahash_request *req;
+
+ const struct omap_sham_pdata *pdata;
};
struct omap_sham_drv {
@@ -200,21 +276,44 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
return 0;
}
-static void omap_sham_copy_hash(struct ahash_request *req, int out)
+static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
u32 *hash = (u32 *)ctx->digest;
int i;
- /* MD5 is almost unused. So copy sha1 size to reduce code */
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
if (out)
- hash[i] = omap_sham_read(ctx->dd,
- SHA_REG_DIGEST(i));
+ hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
else
- omap_sham_write(ctx->dd,
- SHA_REG_DIGEST(i), hash[i]);
+ omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
+ }
+}
+
+static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ int i;
+
+ if (ctx->flags & BIT(FLAGS_HMAC)) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ u32 *opad = (u32 *)bctx->opad;
+
+ for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
+ if (out)
+ opad[i] = omap_sham_read(dd,
+ SHA_REG_ODIGEST(i));
+ else
+ omap_sham_write(dd, SHA_REG_ODIGEST(i),
+ opad[i]);
+ }
}
+
+ omap_sham_copy_hash_omap2(req, out);
}
static void omap_sham_copy_ready_hash(struct ahash_request *req)
@@ -222,34 +321,44 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
u32 *in = (u32 *)ctx->digest;
u32 *hash = (u32 *)req->result;
- int i;
+ int i, d, big_endian = 0;
if (!hash)
return;
- if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
- /* SHA1 results are in big endian */
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+ switch (ctx->flags & FLAGS_MODE_MASK) {
+ case FLAGS_MODE_MD5:
+ d = MD5_DIGEST_SIZE / sizeof(u32);
+ break;
+ case FLAGS_MODE_SHA1:
+ /* OMAP2 SHA1 is big endian */
+ if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
+ big_endian = 1;
+ d = SHA1_DIGEST_SIZE / sizeof(u32);
+ break;
+ case FLAGS_MODE_SHA224:
+ d = SHA224_DIGEST_SIZE / sizeof(u32);
+ break;
+ case FLAGS_MODE_SHA256:
+ d = SHA256_DIGEST_SIZE / sizeof(u32);
+ break;
+ default:
+ d = 0;
+ }
+
+ if (big_endian)
+ for (i = 0; i < d; i++)
hash[i] = be32_to_cpu(in[i]);
- } else {
- /* MD5 results are in little endian */
- for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+ else
+ for (i = 0; i < d; i++)
hash[i] = le32_to_cpu(in[i]);
- }
}
static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
- clk_enable(dd->iclk);
+ pm_runtime_get_sync(dd->dev);
if (!test_bit(FLAGS_INIT, &dd->flags)) {
- omap_sham_write_mask(dd, SHA_REG_MASK,
- SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
-
- if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
- SHA_REG_SYSSTATUS_RESETDONE))
- return -ETIMEDOUT;
-
set_bit(FLAGS_INIT, &dd->flags);
dd->err = 0;
}
@@ -257,23 +366,23 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
return 0;
}
-static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
int final, int dma)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val = length << 5, mask;
if (likely(ctx->digcnt))
- omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
- omap_sham_write_mask(dd, SHA_REG_MASK,
+ omap_sham_write_mask(dd, SHA_REG_MASK(dd),
SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
/*
* Setting ALGO_CONST only for the first iteration
* and CLOSE_HASH only for the last one.
*/
- if (ctx->flags & BIT(FLAGS_SHA1))
+ if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
val |= SHA_REG_CTRL_ALGO;
if (!ctx->digcnt)
val |= SHA_REG_CTRL_ALGO_CONST;
@@ -286,6 +395,81 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
}
+static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
+{
+}
+
+static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
+{
+ return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
+}
+
+static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
+ u32 *value, int count)
+{
+ for (; count--; value++, offset += 4)
+ omap_sham_write(dd, offset, *value);
+}
+
+static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
+ int final, int dma)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val, mask;
+
+ /*
+ * Setting ALGO_CONST only for the first iteration and
+ * CLOSE_HASH only for the last one. Note that flags mode bits
+ * correspond to algorithm encoding in mode register.
+ */
+ val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1);
+ if (!ctx->digcnt) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ val |= SHA_REG_MODE_ALGO_CONSTANT;
+
+ if (ctx->flags & BIT(FLAGS_HMAC)) {
+ val |= SHA_REG_MODE_HMAC_KEY_PROC;
+ omap_sham_write_n(dd, SHA_REG_ODIGEST(0),
+ (u32 *)bctx->ipad,
+ SHA1_BLOCK_SIZE / sizeof(u32));
+ ctx->digcnt += SHA1_BLOCK_SIZE;
+ }
+ }
+
+ if (final) {
+ val |= SHA_REG_MODE_CLOSE_HASH;
+
+ if (ctx->flags & BIT(FLAGS_HMAC))
+ val |= SHA_REG_MODE_HMAC_OUTER_HASH;
+ }
+
+ mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
+ SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
+ SHA_REG_MODE_HMAC_KEY_PROC;
+
+ dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
+ omap_sham_write_mask(dd, SHA_REG_MODE, val, mask);
+ omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
+ omap_sham_write_mask(dd, SHA_REG_MASK(dd),
+ SHA_REG_MASK_IT_EN |
+ (dma ? SHA_REG_MASK_DMA_EN : 0),
+ SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+}
+
+static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
+{
+ omap_sham_write(dd, SHA_REG_LENGTH, length);
+}
+
+static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
+{
+ return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
+ SHA_REG_IRQSTATUS_INPUT_RDY);
+}
+
static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
size_t length, int final)
{
@@ -296,12 +480,13 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
- omap_sham_write_ctrl(dd, length, final, 0);
+ dd->pdata->write_ctrl(dd, length, final, 0);
+ dd->pdata->trigger(dd, length);
/* should be non-zero before next lines to disable clocks later */
ctx->digcnt += length;
- if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+ if (dd->pdata->poll_irq(dd))
return -ETIMEDOUT;
if (final)
@@ -312,30 +497,73 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
len32 = DIV_ROUND_UP(length, sizeof(u32));
for (count = 0; count < len32; count++)
- omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+ omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]);
return -EINPROGRESS;
}
+static void omap_sham_dma_callback(void *param)
+{
+ struct omap_sham_dev *dd = param;
+
+ set_bit(FLAGS_DMA_READY, &dd->flags);
+ tasklet_schedule(&dd->done_task);
+}
+
static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
- size_t length, int final)
+ size_t length, int final, int is_sg)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- int len32;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config cfg;
+ int len32, ret;
dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
- len32 = DIV_ROUND_UP(length, sizeof(u32));
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_maxburst = DST_MAXBURST;
+
+ ret = dmaengine_slave_config(dd->dma_lch, &cfg);
+ if (ret) {
+ pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
+ return ret;
+ }
+
+ len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
+
+ if (is_sg) {
+ /*
+ * The SG entry passed in may not have the 'length' member
+ * set correctly so use a local SG entry (sgl) with the
+ * proper value for 'length' instead. If this is not done,
+ * the dmaengine may try to DMA the incorrect amount of data.
+ */
+ sg_init_table(&ctx->sgl, 1);
+ ctx->sgl.page_link = ctx->sg->page_link;
+ ctx->sgl.offset = ctx->sg->offset;
+ sg_dma_len(&ctx->sgl) = len32;
+ sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
+
+ tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ } else {
+ tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
- omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
- 1, OMAP_DMA_SYNC_PACKET, dd->dma,
- OMAP_DMA_DST_SYNC_PREFETCH);
+ if (!tx) {
+ dev_err(dd->dev, "prep_slave_sg/single() failed\n");
+ return -EINVAL;
+ }
- omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
+ tx->callback = omap_sham_dma_callback;
+ tx->callback_param = dd;
- omap_sham_write_ctrl(dd, length, final, 1);
+ dd->pdata->write_ctrl(dd, length, final, 1);
ctx->digcnt += length;
@@ -344,7 +572,10 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
- omap_start_dma(dd->dma_lch);
+ dmaengine_submit(tx);
+ dma_async_issue_pending(dd->dma_lch);
+
+ dd->pdata->trigger(dd, length);
return -EINPROGRESS;
}
@@ -391,6 +622,8 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
struct omap_sham_reqctx *ctx,
size_t length, int final)
{
+ int ret;
+
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
@@ -400,8 +633,12 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
ctx->flags &= ~BIT(FLAGS_SG);
- /* next call does not fail... so no unmap in the case of error */
- return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
+ ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
+ if (ret != -EINPROGRESS)
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+ DMA_TO_DEVICE);
+
+ return ret;
}
static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
@@ -436,6 +673,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
unsigned int length, final, tail;
struct scatterlist *sg;
+ int ret;
if (!ctx->total)
return 0;
@@ -443,6 +681,15 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
if (ctx->bufcnt || ctx->offset)
return omap_sham_update_dma_slow(dd);
+ /*
+ * Don't use the sg interface when the transfer size is less
+ * than the number of elements in a DMA frame. Otherwise,
+ * the dmaengine infrastructure will calculate that it needs
+ * to transfer 0 frames which ultimately fails.
+ */
+ if (ctx->total < (DST_MAXBURST * sizeof(u32)))
+ return omap_sham_update_dma_slow(dd);
+
dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
ctx->digcnt, ctx->bufcnt, ctx->total);
@@ -480,8 +727,11 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
- /* next call does not fail... so no unmap in the case of error */
- return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
+ ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
+ if (ret != -EINPROGRESS)
+ dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+ return ret;
}
static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -500,7 +750,8 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- omap_stop_dma(dd->dma_lch);
+ dmaengine_terminate_all(dd->dma_lch);
+
if (ctx->flags & BIT(FLAGS_SG)) {
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
if (ctx->sg->length == ctx->offset) {
@@ -542,18 +793,33 @@ static int omap_sham_init(struct ahash_request *req)
dev_dbg(dd->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
- if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
- ctx->flags |= BIT(FLAGS_SHA1);
+ switch (crypto_ahash_digestsize(tfm)) {
+ case MD5_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_MD5;
+ break;
+ case SHA1_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_SHA1;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_SHA224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->flags |= FLAGS_MODE_SHA256;
+ break;
+ }
ctx->bufcnt = 0;
ctx->digcnt = 0;
ctx->buflen = BUFLEN;
if (tctx->flags & BIT(FLAGS_HMAC)) {
- struct omap_sham_hmac_ctx *bctx = tctx->base;
+ if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+ ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+ }
- memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
- ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
ctx->flags |= BIT(FLAGS_HMAC);
}
@@ -587,7 +853,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
- if (ctx->bufcnt <= 64)
+ if (ctx->bufcnt <= DMA_MIN)
/* faster to handle last block with cpu */
use_dma = 0;
@@ -630,7 +896,8 @@ static int omap_sham_finish(struct ahash_request *req)
if (ctx->digcnt) {
omap_sham_copy_ready_hash(req);
- if (ctx->flags & BIT(FLAGS_HMAC))
+ if ((ctx->flags & BIT(FLAGS_HMAC)) &&
+ !test_bit(FLAGS_AUTO_XOR, &dd->flags))
err = omap_sham_finish_hmac(req);
}
@@ -645,7 +912,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
struct omap_sham_dev *dd = ctx->dd;
if (!err) {
- omap_sham_copy_hash(req, 1);
+ dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
err = omap_sham_finish(req);
} else {
@@ -655,7 +922,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
/* atomic operation is not needed here */
dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- clk_disable(dd->iclk);
+
+ pm_runtime_put_sync(dd->dev);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -702,19 +970,9 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
if (err)
goto err1;
- omap_set_dma_dest_params(dd->dma_lch, 0,
- OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + SHA_REG_DIN(0), 0, 16);
-
- omap_set_dma_dest_burst_mode(dd->dma_lch,
- OMAP_DMA_DATA_BURST_16);
-
- omap_set_dma_src_burst_mode(dd->dma_lch,
- OMAP_DMA_DATA_BURST_4);
-
if (ctx->digcnt)
/* request has changed - restore hash */
- omap_sham_copy_hash(req, 0);
+ dd->pdata->copy_hash(req, 0);
if (ctx->op == OP_UPDATE) {
err = omap_sham_update_req(dd);
@@ -853,7 +1111,21 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
int ds = crypto_shash_digestsize(bctx->shash);
+ struct omap_sham_dev *dd = NULL, *tmp;
int err, i;
+
+ spin_lock_bh(&sham.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &sham.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&sham.lock);
+
err = crypto_shash_setkey(tctx->fallback, key, keylen);
if (err)
return err;
@@ -870,11 +1142,14 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
}
memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= 0x36;
- bctx->opad[i] ^= 0x5c;
+ if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= 0x36;
+ bctx->opad[i] ^= 0x5c;
+ }
}
return err;
@@ -924,6 +1199,16 @@ static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
return omap_sham_cra_init_alg(tfm, "sha1");
}
+static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha224");
+}
+
+static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha256");
+}
+
static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
{
return omap_sham_cra_init_alg(tfm, "md5");
@@ -942,7 +1227,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm)
}
}
-static struct ahash_alg algs[] = {
+static struct ahash_alg algs_sha1_md5[] = {
{
.init = omap_sham_init,
.update = omap_sham_update,
@@ -1041,6 +1326,102 @@ static struct ahash_alg algs[] = {
}
};
+/* OMAP4 has some algs in addition to what OMAP2 has */
+static struct ahash_alg algs_sha224_sha256[] = {
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "omap-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "omap-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "omap-hmac-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = OMAP_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha224_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "omap-hmac-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = OMAP_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha256_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+};
+
static void omap_sham_done_task(unsigned long data)
{
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
@@ -1079,7 +1460,19 @@ finish:
omap_sham_finish_req(dd->req, err);
}
-static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
+{
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+ dev_warn(dd->dev, "Interrupt when no active requests.\n");
+ } else {
+ set_bit(FLAGS_OUTPUT_READY, &dd->flags);
+ tasklet_schedule(&dd->done_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
{
struct omap_sham_dev *dd = dev_id;
@@ -1091,61 +1484,188 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
SHA_REG_CTRL_OUTPUT_READY);
omap_sham_read(dd, SHA_REG_CTRL);
- if (!test_bit(FLAGS_BUSY, &dd->flags)) {
- dev_warn(dd->dev, "Interrupt when no active requests.\n");
- return IRQ_HANDLED;
- }
+ return omap_sham_irq_common(dd);
+}
- set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
+{
+ struct omap_sham_dev *dd = dev_id;
- return IRQ_HANDLED;
+ omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
+
+ return omap_sham_irq_common(dd);
}
-static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
+ {
+ .algs_list = algs_sha1_md5,
+ .size = ARRAY_SIZE(algs_sha1_md5),
+ },
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
+ .algs_info = omap_sham_algs_info_omap2,
+ .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
+ .flags = BIT(FLAGS_BE32_SHA1),
+ .digest_size = SHA1_DIGEST_SIZE,
+ .copy_hash = omap_sham_copy_hash_omap2,
+ .write_ctrl = omap_sham_write_ctrl_omap2,
+ .trigger = omap_sham_trigger_omap2,
+ .poll_irq = omap_sham_poll_irq_omap2,
+ .intr_hdlr = omap_sham_irq_omap2,
+ .idigest_ofs = 0x00,
+ .din_ofs = 0x1c,
+ .digcnt_ofs = 0x14,
+ .rev_ofs = 0x5c,
+ .mask_ofs = 0x60,
+ .sysstatus_ofs = 0x64,
+ .major_mask = 0xf0,
+ .major_shift = 4,
+ .minor_mask = 0x0f,
+ .minor_shift = 0,
+};
+
+#ifdef CONFIG_OF
+static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
+ {
+ .algs_list = algs_sha1_md5,
+ .size = ARRAY_SIZE(algs_sha1_md5),
+ },
+ {
+ .algs_list = algs_sha224_sha256,
+ .size = ARRAY_SIZE(algs_sha224_sha256),
+ },
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
+ .algs_info = omap_sham_algs_info_omap4,
+ .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
+ .flags = BIT(FLAGS_AUTO_XOR),
+ .digest_size = SHA256_DIGEST_SIZE,
+ .copy_hash = omap_sham_copy_hash_omap4,
+ .write_ctrl = omap_sham_write_ctrl_omap4,
+ .trigger = omap_sham_trigger_omap4,
+ .poll_irq = omap_sham_poll_irq_omap4,
+ .intr_hdlr = omap_sham_irq_omap4,
+ .idigest_ofs = 0x020,
+ .din_ofs = 0x080,
+ .digcnt_ofs = 0x040,
+ .rev_ofs = 0x100,
+ .mask_ofs = 0x110,
+ .sysstatus_ofs = 0x114,
+ .major_mask = 0x0700,
+ .major_shift = 8,
+ .minor_mask = 0x003f,
+ .minor_shift = 0,
+};
+
+static const struct of_device_id omap_sham_of_match[] = {
+ {
+ .compatible = "ti,omap2-sham",
+ .data = &omap_sham_pdata_omap2,
+ },
+ {
+ .compatible = "ti,omap4-sham",
+ .data = &omap_sham_pdata_omap4,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_sham_of_match);
+
+static int omap_sham_get_res_of(struct omap_sham_dev *dd,
+ struct device *dev, struct resource *res)
{
- struct omap_sham_dev *dd = data;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+ int err = 0;
- if (ch_status != OMAP_DMA_BLOCK_IRQ) {
- pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
- dd->err = -EIO;
- clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
+ match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
+ if (!match) {
+ dev_err(dev, "no compatible OF match\n");
+ err = -EINVAL;
+ goto err;
}
- set_bit(FLAGS_DMA_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ err = of_address_to_resource(node, 0, res);
+ if (err < 0) {
+ dev_err(dev, "can't translate OF node address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dd->irq = of_irq_to_resource(node, 0, NULL);
+ if (!dd->irq) {
+ dev_err(dev, "can't translate OF irq value\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dd->dma = -1; /* Dummy value that's unused */
+ dd->pdata = match->data;
+
+err:
+ return err;
}
+#else
+static const struct of_device_id omap_sham_of_match[] = {
+ {},
+};
-static int omap_sham_dma_init(struct omap_sham_dev *dd)
+static int omap_sham_get_res_of(struct omap_sham_dev *dd,
+ struct device *dev, struct resource *res)
{
- int err;
+ return -EINVAL;
+}
+#endif
- dd->dma_lch = -1;
+static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
+ struct platform_device *pdev, struct resource *res)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ int err = 0;
- err = omap_request_dma(dd->dma, dev_name(dd->dev),
- omap_sham_dma_callback, dd, &dd->dma_lch);
- if (err) {
- dev_err(dd->dev, "Unable to request DMA channel\n");
- return err;
+ /* Get the base address */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto err;
}
+ memcpy(res, r, sizeof(*res));
- return 0;
-}
+ /* Get the IRQ */
+ dd->irq = platform_get_irq(pdev, 0);
+ if (dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = dd->irq;
+ goto err;
+ }
-static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
-{
- if (dd->dma_lch >= 0) {
- omap_free_dma(dd->dma_lch);
- dd->dma_lch = -1;
+ /* Get the DMA */
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(dev, "no DMA resource info\n");
+ err = -ENODEV;
+ goto err;
}
+ dd->dma = r->start;
+
+ /* Only OMAP2/3 can be non-DT */
+ dd->pdata = &omap_sham_pdata_omap2;
+
+err:
+ return err;
}
static int omap_sham_probe(struct platform_device *pdev)
{
struct omap_sham_dev *dd;
struct device *dev = &pdev->dev;
- struct resource *res;
+ struct resource res;
+ dma_cap_mask_t mask;
int err, i, j;
+ u32 rev;
dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
if (dd == NULL) {
@@ -1161,89 +1681,75 @@ static int omap_sham_probe(struct platform_device *pdev)
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
- dd->irq = -1;
-
- /* Get the base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
- goto res_err;
- }
- dd->phys_base = res->start;
-
- /* Get the DMA */
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!res) {
- dev_err(dev, "no DMA resource info\n");
- err = -ENODEV;
+ err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
+ omap_sham_get_res_pdev(dd, pdev, &res);
+ if (err)
goto res_err;
- }
- dd->dma = res->start;
- /* Get the IRQ */
- dd->irq = platform_get_irq(pdev, 0);
- if (dd->irq < 0) {
- dev_err(dev, "no IRQ resource info\n");
- err = dd->irq;
+ dd->io_base = devm_request_and_ioremap(dev, &res);
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
goto res_err;
}
+ dd->phys_base = res.start;
- err = request_irq(dd->irq, omap_sham_irq,
- IRQF_TRIGGER_LOW, dev_name(dev), dd);
+ err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW,
+ dev_name(dev), dd);
if (err) {
dev_err(dev, "unable to request irq.\n");
goto res_err;
}
- err = omap_sham_dma_init(dd);
- if (err)
- goto dma_err;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- /* Initializing the clock */
- dd->iclk = clk_get(dev, "ick");
- if (IS_ERR(dd->iclk)) {
- dev_err(dev, "clock intialization failed.\n");
- err = PTR_ERR(dd->iclk);
- goto clk_err;
+ dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &dd->dma, dev, "rx");
+ if (!dd->dma_lch) {
+ dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
+ dd->dma);
+ err = -ENXIO;
+ goto dma_err;
}
- dd->io_base = ioremap(dd->phys_base, SZ_4K);
- if (!dd->io_base) {
- dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
- goto io_err;
- }
+ dd->flags |= dd->pdata->flags;
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ rev = omap_sham_read(dd, SHA_REG_REV(dd));
+ pm_runtime_put_sync(&pdev->dev);
- clk_enable(dd->iclk);
dev_info(dev, "hw accel on OMAP rev %u.%u\n",
- (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
- omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
- clk_disable(dd->iclk);
+ (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
+ (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
spin_lock(&sham.lock);
list_add_tail(&dd->list, &sham.dev_list);
spin_unlock(&sham.lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- err = crypto_register_ahash(&algs[i]);
- if (err)
- goto err_algs;
+ for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ err = crypto_register_ahash(
+ &dd->pdata->algs_info[i].algs_list[j]);
+ if (err)
+ goto err_algs;
+
+ dd->pdata->algs_info[i].registered++;
+ }
}
return 0;
err_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_ahash(&algs[j]);
- iounmap(dd->io_base);
-io_err:
- clk_put(dd->iclk);
-clk_err:
- omap_sham_dma_cleanup(dd);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_ahash(
+ &dd->pdata->algs_info[i].algs_list[j]);
+ pm_runtime_disable(dev);
+ dma_release_channel(dd->dma_lch);
dma_err:
- if (dd->irq >= 0)
- free_irq(dd->irq, dd);
+ free_irq(dd->irq, dd);
res_err:
kfree(dd);
dd = NULL;
@@ -1256,7 +1762,7 @@ data_err:
static int omap_sham_remove(struct platform_device *pdev)
{
static struct omap_sham_dev *dd;
- int i;
+ int i, j;
dd = platform_get_drvdata(pdev);
if (!dd)
@@ -1264,33 +1770,51 @@ static int omap_sham_remove(struct platform_device *pdev)
spin_lock(&sham.lock);
list_del(&dd->list);
spin_unlock(&sham.lock);
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_ahash(&algs[i]);
+ for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ crypto_unregister_ahash(
+ &dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
- iounmap(dd->io_base);
- clk_put(dd->iclk);
- omap_sham_dma_cleanup(dd);
- if (dd->irq >= 0)
- free_irq(dd->irq, dd);
+ pm_runtime_disable(&pdev->dev);
+ dma_release_channel(dd->dma_lch);
+ free_irq(dd->irq, dd);
kfree(dd);
dd = NULL;
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int omap_sham_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int omap_sham_resume(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_sham_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
+};
+
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
.remove = omap_sham_remove,
.driver = {
.name = "omap-sham",
.owner = THIS_MODULE,
+ .pm = &omap_sham_pm_ops,
+ .of_match_table = omap_sham_of_match,
},
};
static int __init omap_sham_mod_init(void)
{
- pr_info("loading %s driver\n", "omap-sham");
-
return platform_driver_register(&omap_sham_driver);
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 49ad8cbade69..4b314326f48a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -580,7 +580,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
resource_size(res), pdev->name))
return -EBUSY;
- pdata->clk = clk_get(dev, "secss");
+ pdata->clk = devm_clk_get(dev, "secss");
if (IS_ERR(pdata->clk)) {
dev_err(dev, "failed to find secss clock source\n");
return -ENOENT;
@@ -645,7 +645,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
err_irq:
clk_disable(pdata->clk);
- clk_put(pdata->clk);
s5p_dev = NULL;
platform_set_drvdata(pdev, NULL);
@@ -667,7 +666,6 @@ static int s5p_aes_remove(struct platform_device *pdev)
tasklet_kill(&pdata->tasklet);
clk_disable(pdata->clk);
- clk_put(pdata->clk);
s5p_dev = NULL;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bc6f5faa1e9e..819dfda88236 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
raw_spin_lock_irqsave(&dca_lock, flags);
+ if (list_empty(&dca_domains)) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
list_del(&dca->node);
pci_rc = dca_pci_rc_from_dev(dev);
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 591b6597c00a..126cf295b198 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -53,22 +53,19 @@ void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
{
struct device *cd;
- int err = 0;
+ int ret;
-idr_try_again:
- if (!idr_pre_get(&dca_idr, GFP_KERNEL))
- return -ENOMEM;
+ idr_preload(GFP_KERNEL);
spin_lock(&dca_idr_lock);
- err = idr_get_new(&dca_idr, dca, &dca->id);
+
+ ret = idr_alloc(&dca_idr, dca, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ dca->id = ret;
+
spin_unlock(&dca_idr_lock);
- switch (err) {
- case 0:
- break;
- case -EAGAIN:
- goto idr_try_again;
- default:
- return err;
- }
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
if (IS_ERR(cd)) {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 40179e749f08..80b69971cf28 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -51,7 +51,7 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
- depends on ARM_AMBA && EXPERIMENTAL
+ depends on ARM_AMBA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -83,7 +83,6 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
- depends on HAVE_CLK
select DMA_ENGINE
default y if CPU_AT32AP7000
help
@@ -215,8 +214,8 @@ config TIMB_DMA
Enable support for the Timberdale FPGA DMA engine.
config SIRF_DMA
- tristate "CSR SiRFprimaII DMA support"
- depends on ARCH_PRIMA2
+ tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+ depends on ARCH_SIRF
select DMA_ENGINE
help
Enable support for the CSR SiRFprimaII DMA engine.
@@ -328,6 +327,10 @@ config DMA_ENGINE
config DMA_VIRTUAL_CHANNELS
tristate
+config DMA_OF
+ def_bool y
+ depends on OF
+
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 642d96736cf5..488e3ff85b52 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,6 +3,8 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+obj-$(CONFIG_DMA_OF) += of-dma.o
+
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index d1cc5791476b..8bad254a498d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,7 +83,7 @@
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <asm/hardware/pl080.h>
+#include <linux/amba/pl080.h>
#include "dmaengine.h"
#include "virt-dma.h"
@@ -1096,15 +1096,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
struct pl08x_dma_chan *plchan)
{
LIST_HEAD(head);
- struct pl08x_txd *txd;
vchan_get_all_descriptors(&plchan->vc, &head);
-
- while (!list_empty(&head)) {
- txd = list_first_entry(&head, struct pl08x_txd, vd.node);
- list_del(&txd->vd.node);
- pl08x_desc_free(&txd->vd);
- }
+ vchan_dma_desc_free_list(&plchan->vc, &head);
}
/*
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 13a02f4425b0..6e13f262139a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -778,7 +778,7 @@ err:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -786,8 +786,6 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
- goto err_out;
return 0;
@@ -886,14 +884,16 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
return NULL;
}
+ if (unlikely(!is_slave_direction(direction)))
+ goto err_out;
+
if (sconfig->direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
/* Check for too big/unaligned periods and unaligned DMA buffer */
- if (atc_dma_cyclic_check_values(reg_width, buf_addr,
- period_len, direction))
+ if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
goto err_out;
/* build cyclic linked list */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 116e4adffb08..0eb3c1388667 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -369,10 +369,10 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
- " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
- lli->saddr, lli->daddr,
- lli->ctrla, lli->ctrlb, lli->dscr);
+ dev_crit(chan2dev(&atchan->chan_common),
+ " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
+ lli->saddr, lli->daddr,
+ lli->ctrla, lli->ctrlb, lli->dscr);
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a2f079aca550..797940e532ff 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2355,7 +2355,9 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- /* FIXME: should be conditional on ret != DMA_SUCCESS? */
+ if (ret == DMA_SUCCESS)
+ return ret;
+
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
if (ret == DMA_IN_PROGRESS && cohc->stopped)
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 3e96610e18e2..702112d547c8 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -61,7 +61,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
dma_addr_t phy;
if (len == 0)
- goto err;
+ return NULL;
spin_lock(&pool->lock);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a815d44c70a4..b2728d6ba2fd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,6 +62,7 @@
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/of_dma.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDR(dma_idr);
@@ -266,7 +267,10 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
pr_err("%s: timeout!\n", __func__);
return DMA_ERROR;
}
- } while (status == DMA_IN_PROGRESS);
+ if (status != DMA_IN_PROGRESS)
+ break;
+ cpu_relax();
+ } while (1);
return status;
}
@@ -546,6 +550,21 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
}
EXPORT_SYMBOL_GPL(__dma_request_channel);
+/**
+ * dma_request_slave_channel - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ */
+struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
+{
+ /* If device-tree is present get slave info from here */
+ if (dev->of_node)
+ return of_dma_request_slave_channel(dev->of_node, name);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+
void dma_release_channel(struct dma_chan *chan)
{
mutex_lock(&dma_list_mutex);
@@ -667,18 +686,14 @@ static int get_dma_id(struct dma_device *device)
{
int rc;
- idr_retry:
- if (!idr_pre_get(&dma_idr, GFP_KERNEL))
- return -ENOMEM;
mutex_lock(&dma_list_mutex);
- rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
- mutex_unlock(&dma_list_mutex);
- if (rc == -EAGAIN)
- goto idr_retry;
- else if (rc != 0)
- return rc;
- return 0;
+ rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
+ if (rc >= 0)
+ device->dev_id = rc;
+
+ mutex_unlock(&dma_list_mutex);
+ return rc < 0 ? rc : 0;
}
/**
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 64b048d7fba7..a2c8904b63ea 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
}
+static unsigned int min_odd(unsigned int x, unsigned int y)
+{
+ unsigned int val = min(x, y);
+
+ return val % 2 ? val : val - 1;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -262,6 +269,7 @@ static int dmatest_func(void *data)
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
+ struct dma_device *dev;
const char *thread_name;
unsigned int src_off, dst_off, len;
unsigned int error_count;
@@ -283,13 +291,16 @@ static int dmatest_func(void *data)
smp_rmb();
chan = thread->chan;
+ dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
else if (thread->type == DMA_XOR) {
- src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(xor_sources | 1, dev->max_xor);
dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
- src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
@@ -327,7 +338,6 @@ static int dmatest_func(void *data)
while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
- struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
@@ -526,7 +536,9 @@ err_srcs:
thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ if (ret)
+ dmaengine_terminate_all(chan);
+
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -551,7 +563,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
}
/* terminate all transfers on specified channels */
- dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(dtc->chan);
kfree(dtc);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index b33d1f6e1333..51c3ea2ed41a 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1,6 +1,5 @@
/*
- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
- * AVR32 systems.)
+ * Core driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
@@ -9,11 +8,13 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -47,15 +48,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
return slave ? slave->src_master : 1;
}
+#define SRC_MASTER 0
+#define DST_MASTER 1
+
+static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_slave *dws = chan->private;
+ unsigned int m;
+
+ if (master == SRC_MASTER)
+ m = dwc_get_sms(dws);
+ else
+ m = dwc_get_dms(dws);
+
+ return min_t(unsigned int, dw->nr_masters - 1, m);
+}
+
#define DWC_DEFAULT_CTLLO(_chan) ({ \
- struct dw_dma_slave *__slave = (_chan->private); \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
- int _dms = dwc_get_dms(__slave); \
- int _sms = dwc_get_sms(__slave); \
- u8 _smsize = __slave ? _sconfig->src_maxburst : \
+ bool _is_slave = is_slave_direction(_dwc->direction); \
+ int _dms = dwc_get_master(_chan, DST_MASTER); \
+ int _sms = dwc_get_master(_chan, SRC_MASTER); \
+ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
DW_DMA_MSIZE_16; \
- u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
+ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
DW_DMA_MSIZE_16; \
\
(DWC_CTLL_DST_MSIZE(_dmsize) \
@@ -73,15 +91,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
*/
#define NR_DESCS_PER_CHANNEL 64
-/*----------------------------------------------------------------------*/
+static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
-/*
- * Because we're not relying on writeback from the controller (it may not
- * even be configured into the core!) we don't need to use dma_pool. These
- * descriptors -- and associated data -- are cacheable. We do need to make
- * sure their dcache entries are written back before handing them off to
- * the controller, though.
- */
+ return dw->data_width[dwc_get_master(chan, master)];
+}
+
+/*----------------------------------------------------------------------*/
static struct device *chan2dev(struct dma_chan *chan)
{
@@ -94,7 +111,7 @@ static struct device *chan2parent(struct dma_chan *chan)
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
- return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
+ return to_dw_desc(dwc->active_list.next);
}
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
@@ -121,19 +138,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
return ret;
}
-static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
-{
- struct dw_desc *child;
-
- list_for_each_entry(child, &desc->tx_list, desc_node)
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- child->txd.phys, sizeof(child->lli),
- DMA_TO_DEVICE);
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- desc->txd.phys, sizeof(desc->lli),
- DMA_TO_DEVICE);
-}
-
/*
* Move a descriptor, including any children, to the free list.
* `desc' must not be on any lists.
@@ -145,8 +149,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
if (desc) {
struct dw_desc *child;
- dwc_sync_desc_for_cpu(dwc, desc);
-
spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
@@ -179,9 +181,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
cfghi = dws->cfg_hi;
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
} else {
- if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ if (dwc->direction == DMA_MEM_TO_DEV)
cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
- else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+ else if (dwc->direction == DMA_DEV_TO_MEM)
cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
}
@@ -223,7 +225,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
channel_readl(dwc, CTL_LO));
}
-
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
channel_clear_bit(dw, CH_EN, dwc->mask);
@@ -249,6 +250,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, ctllo);
channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
channel_set_bit(dw, CH_EN, dwc->mask);
+
+ /* Move pointer to next descriptor */
+ dwc->tx_node_active = dwc->tx_node_active->next;
}
/* Called with dwc->lock held and bh disabled */
@@ -279,9 +283,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
dwc_initialize(dwc);
- dwc->tx_list = &first->tx_list;
- dwc->tx_node_active = first->tx_list.next;
+ dwc->residue = first->total_len;
+ dwc->tx_node_active = &first->tx_list;
+ /* Submit first block */
dwc_do_single_block(dwc, first);
return;
@@ -317,8 +322,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
param = txd->callback_param;
}
- dwc_sync_desc_for_cpu(dwc, desc);
-
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
async_tx_ack(&child->txd);
@@ -327,29 +330,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
- if (!dwc->chan.private) {
+ if (!is_slave_direction(dwc->direction)) {
struct device *parent = chan2parent(&dwc->chan);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
else
dma_unmap_page(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
}
}
spin_unlock_irqrestore(&dwc->lock, flags);
- if (callback_required && callback)
+ if (callback)
callback(param);
}
@@ -384,6 +387,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, desc, true);
}
+/* Returns how many bytes were already received from source */
+static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
+{
+ u32 ctlhi = channel_readl(dwc, CTL_HI);
+ u32 ctllo = channel_readl(dwc, CTL_LO);
+
+ return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+}
+
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
dma_addr_t llp;
@@ -399,6 +411,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ struct list_head *head, *active = dwc->tx_node_active;
+
+ /*
+ * We are inside first active descriptor.
+ * Otherwise something is really wrong.
+ */
+ desc = dwc_first_active(dwc);
+
+ head = &desc->tx_list;
+ if (active != head) {
+ /* Update desc to reflect last sent one */
+ if (active != head->next)
+ desc = to_dw_desc(active->prev);
+
+ dwc->residue -= desc->len;
+
+ child = to_dw_desc(active);
+
+ /* Submit next block */
+ dwc_do_single_block(dwc, child);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* We are done here */
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+ }
+
+ dwc->residue = 0;
+
spin_unlock_irqrestore(&dwc->lock, flags);
dwc_complete_all(dw, dwc);
@@ -406,6 +451,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
}
if (list_empty(&dwc->active_list)) {
+ dwc->residue = 0;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
@@ -414,6 +466,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
(unsigned long long)llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+ /* initial residue value */
+ dwc->residue = desc->total_len;
+
/* check first descriptors addr */
if (desc->txd.phys == llp) {
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -423,16 +478,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* check first descriptors llp */
if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- list_for_each_entry(child, &desc->tx_list, desc_node)
+ dwc->residue -= desc->len;
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
if (child->lli.llp == llp) {
/* Currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
+ dwc->residue -= child->len;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
@@ -458,9 +518,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
- lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+ dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
}
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -488,16 +547,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
/*
- * KERN_CRITICAL may seem harsh, but since this only happens
+ * WARN may seem harsh, but since this only happens
* when someone submits a bad physical address in a
* descriptor, we should consider ourselves lucky that the
* controller flagged an error instead of scribbling over
* random memory locations.
*/
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- "Bad descriptor submitted for DMA!\n");
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " cookie: %d\n", bad_desc->txd.cookie);
+ dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
+ " cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
@@ -598,36 +655,8 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
- else if (status_xfer & (1 << i)) {
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
- if (dwc->tx_node_active != dwc->tx_list) {
- struct dw_desc *desc =
- list_entry(dwc->tx_node_active,
- struct dw_desc,
- desc_node);
-
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- /* move pointer to next descriptor */
- dwc->tx_node_active =
- dwc->tx_node_active->next;
-
- dwc_do_single_block(dwc, desc);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
- continue;
- } else {
- /* we are done here */
- clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
- }
- }
- spin_unlock_irqrestore(&dwc->lock, flags);
-
+ else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc);
- }
}
/*
@@ -709,7 +738,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_slave *dws = chan->private;
struct dw_desc *desc;
struct dw_desc *first;
struct dw_desc *prev;
@@ -730,8 +758,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
- dwc->dw->data_width[dwc_get_dms(dws)]);
+ dwc->direction = DMA_MEM_TO_MEM;
+
+ data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
+ dwc_get_data_width(chan, DST_MASTER));
src_width = dst_width = min_t(unsigned int, data_width,
dwc_fast_fls(src | dest | len));
@@ -756,32 +786,25 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->lli.dar = dest + offset;
desc->lli.ctllo = ctllo;
desc->lli.ctlhi = xfer_count;
+ desc->len = xfer_count << src_width;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
prev = desc;
}
-
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last block */
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
-
first->txd.flags = flags;
- first->len = len;
+ first->total_len = len;
return &first->txd;
@@ -796,7 +819,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_slave *dws = chan->private;
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
@@ -811,9 +833,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "%s\n", __func__);
- if (unlikely(!dws || !sg_len))
+ if (unlikely(!is_slave_direction(direction) || !sg_len))
return NULL;
+ dwc->direction = direction;
+
prev = first = NULL;
switch (direction) {
@@ -828,7 +852,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
- data_width = dwc->dw->data_width[dwc_get_sms(dws)];
+ data_width = dwc_get_data_width(chan, SRC_MASTER);
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -861,15 +885,12 @@ slave_sg_todev_fill_desc:
}
desc->lli.ctlhi = dlen >> mem_width;
+ desc->len = dlen;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys,
- sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -891,7 +912,7 @@ slave_sg_todev_fill_desc:
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
- data_width = dwc->dw->data_width[dwc_get_dms(dws)];
+ data_width = dwc_get_data_width(chan, DST_MASTER);
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -923,15 +944,12 @@ slave_sg_fromdev_fill_desc:
len = 0;
}
desc->lli.ctlhi = dlen >> reg_width;
+ desc->len = dlen;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys,
- sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -951,11 +969,7 @@ slave_sg_fromdev_fill_desc:
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
-
- first->len = total_len;
+ first->total_len = total_len;
return &first->txd;
@@ -985,11 +999,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- /* Check if it is chan is configured for slave transfers */
- if (!chan->private)
+ /* Check if chan will be configured for slave transfers */
+ if (!is_slave_direction(sconfig->direction))
return -EINVAL;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+ dwc->direction = sconfig->direction;
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
@@ -997,6 +1012,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
return 0;
}
+static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
+
+ dwc->paused = true;
+}
+
+static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+
+ dwc->paused = false;
+}
+
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
@@ -1004,18 +1039,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
unsigned long flags;
- u32 cfglo;
LIST_HEAD(list);
if (cmd == DMA_PAUSE) {
spin_lock_irqsave(&dwc->lock, flags);
- cfglo = channel_readl(dwc, CFG_LO);
- channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
- while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
- cpu_relax();
+ dwc_chan_pause(dwc);
- dwc->paused = true;
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_RESUME) {
if (!dwc->paused)
@@ -1023,9 +1053,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_lock_irqsave(&dwc->lock, flags);
- cfglo = channel_readl(dwc, CFG_LO);
- channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
- dwc->paused = false;
+ dwc_chan_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
@@ -1035,7 +1063,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
dwc_chan_disable(dw, dwc);
- dwc->paused = false;
+ dwc_chan_resume(dwc);
/* active_list entries will end up before queued entries */
list_splice_init(&dwc->queue, &list);
@@ -1055,6 +1083,21 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return 0;
}
+static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
+{
+ unsigned long flags;
+ u32 residue;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ residue = dwc->residue;
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
+ residue -= dwc_get_sent(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return residue;
+}
+
static enum dma_status
dwc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
@@ -1071,7 +1114,7 @@ dwc_tx_status(struct dma_chan *chan,
}
if (ret != DMA_SUCCESS)
- dma_set_residue(txstate, dwc_first_active(dwc)->len);
+ dma_set_residue(txstate, dwc_get_residue(dwc));
if (dwc->paused)
return DMA_PAUSED;
@@ -1114,22 +1157,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+ dma_addr_t phys;
+
spin_unlock_irqrestore(&dwc->lock, flags);
- desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
- if (!desc) {
- dev_info(chan2dev(chan),
- "only allocated %d descriptors\n", i);
- spin_lock_irqsave(&dwc->lock, flags);
- break;
- }
+ desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
+ if (!desc)
+ goto err_desc_alloc;
+
+ memset(desc, 0, sizeof(struct dw_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK;
- desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
- sizeof(desc->lli), DMA_TO_DEVICE);
+ desc->txd.phys = phys;
+
dwc_desc_put(dwc, desc);
spin_lock_irqsave(&dwc->lock, flags);
@@ -1141,6 +1184,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
return i;
+
+err_desc_alloc:
+ dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
+
+ return i;
}
static void dwc_free_chan_resources(struct dma_chan *chan)
@@ -1172,14 +1220,56 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
- dma_unmap_single(chan2parent(chan), desc->txd.phys,
- sizeof(desc->lli), DMA_TO_DEVICE);
- kfree(desc);
+ dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
}
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
+bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ static struct dw_dma *last_dw;
+ static char *last_bus_id;
+ int i = -1;
+
+ /*
+ * dmaengine framework calls this routine for all channels of all dma
+ * controller, until true is returned. If 'param' bus_id is not
+ * registered with a dma controller (dw), then there is no need of
+ * running below function for all channels of dw.
+ *
+ * This block of code does this by saving the parameters of last
+ * failure. If dw and param are same, i.e. trying on same dw with
+ * different channel, return false.
+ */
+ if ((last_dw == dw) && (last_bus_id == param))
+ return false;
+ /*
+ * Return true:
+ * - If dw_dma's platform data is not filled with slave info, then all
+ * dma controllers are fine for transfer.
+ * - Or if param is NULL
+ */
+ if (!dw->sd || !param)
+ return true;
+
+ while (++i < dw->sd_count) {
+ if (!strcmp(dw->sd[i].bus_id, param)) {
+ chan->private = &dw->sd[i];
+ last_dw = NULL;
+ last_bus_id = NULL;
+
+ return true;
+ }
+ }
+
+ last_dw = dw;
+ last_bus_id = param;
+ return false;
+}
+EXPORT_SYMBOL(dw_dma_generic_filter);
+
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
@@ -1299,6 +1389,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
retval = ERR_PTR(-EINVAL);
+ if (unlikely(!is_slave_direction(direction)))
+ goto out_err;
+
+ dwc->direction = direction;
+
if (direction == DMA_MEM_TO_DEV)
reg_width = __ffs(sconfig->dst_addr_width);
else
@@ -1313,8 +1408,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
- goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1372,20 +1465,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
desc->lli.ctlhi = (period_len >> reg_width);
cdesc->desc[i] = desc;
- if (last) {
+ if (last)
last->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- last->txd.phys, sizeof(last->lli),
- DMA_TO_DEVICE);
- }
last = desc;
}
/* lets make a cyclic list */
last->lli.llp = cdesc->desc[0]->txd.phys;
- dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
- sizeof(last->lli), DMA_TO_DEVICE);
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
"period %zu periods %d\n", (unsigned long long)buf_addr,
@@ -1463,6 +1550,91 @@ static void dw_dma_off(struct dw_dma *dw)
dw->chan[i].initialized = false;
}
+#ifdef CONFIG_OF
+static struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *sn, *cn, *np = pdev->dev.of_node;
+ struct dw_dma_platform_data *pdata;
+ struct dw_dma_slave *sd;
+ u32 tmp, arr[4];
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels))
+ return NULL;
+
+ if (of_property_read_bool(np, "is_private"))
+ pdata->is_private = true;
+
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+ pdata->chan_allocation_order = (unsigned char)tmp;
+
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
+ pdata->chan_priority = tmp;
+
+ if (!of_property_read_u32(np, "block_size", &tmp))
+ pdata->block_size = tmp;
+
+ if (!of_property_read_u32(np, "nr_masters", &tmp)) {
+ if (tmp > 4)
+ return NULL;
+
+ pdata->nr_masters = tmp;
+ }
+
+ if (!of_property_read_u32_array(np, "data_width", arr,
+ pdata->nr_masters))
+ for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+ pdata->data_width[tmp] = arr[tmp];
+
+ /* parse slave data */
+ sn = of_find_node_by_name(np, "slave_info");
+ if (!sn)
+ return pdata;
+
+ /* calculate number of slaves */
+ tmp = of_get_child_count(sn);
+ if (!tmp)
+ return NULL;
+
+ sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL);
+ if (!sd)
+ return NULL;
+
+ pdata->sd = sd;
+ pdata->sd_count = tmp;
+
+ for_each_child_of_node(sn, cn) {
+ sd->dma_dev = &pdev->dev;
+ of_property_read_string(cn, "bus_id", &sd->bus_id);
+ of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi);
+ of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo);
+ if (!of_property_read_u32(cn, "src_master", &tmp))
+ sd->src_master = tmp;
+
+ if (!of_property_read_u32(cn, "dst_master", &tmp))
+ sd->dst_master = tmp;
+ sd++;
+ }
+
+ return pdata;
+}
+#else
+static inline struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
static int dw_probe(struct platform_device *pdev)
{
struct dw_dma_platform_data *pdata;
@@ -1478,10 +1650,6 @@ static int dw_probe(struct platform_device *pdev)
int err;
int i;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
-
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
return -EINVAL;
@@ -1494,9 +1662,33 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ /* Apply default dma_mask if needed */
+ if (!pdev->dev.dma_mask) {
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+
dw_params = dma_read_byaddr(regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+ dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata)
+ pdata = dw_dma_parse_dt(pdev);
+
+ if (!pdata && autocfg) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* Fill platform data with the default values */
+ pdata->is_private = true;
+ pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
+ pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
+ } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return -EINVAL;
+
if (autocfg)
nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
else
@@ -1513,6 +1705,8 @@ static int dw_probe(struct platform_device *pdev)
clk_prepare_enable(dw->clk);
dw->regs = regs;
+ dw->sd = pdata->sd;
+ dw->sd_count = pdata->sd_count;
/* get hardware configuration parameters */
if (autocfg) {
@@ -1544,6 +1738,14 @@ static int dw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dw);
+ /* create a pool of consistent memory blocks for hardware descriptors */
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
+ sizeof(struct dw_desc), 4, 0);
+ if (!dw->desc_pool) {
+ dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ return -ENOMEM;
+ }
+
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
INIT_LIST_HEAD(&dw->dma.channels);
@@ -1575,7 +1777,7 @@ static int dw_probe(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
- dwc->dw = dw;
+ dwc->direction = DMA_TRANS_NONE;
/* hardware configuration */
if (autocfg) {
@@ -1584,6 +1786,9 @@ static int dw_probe(struct platform_device *pdev)
dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
DWC_PARAMS);
+ dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
+ dwc_params);
+
/* Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
* up to 0x0a for 4095. */
@@ -1627,8 +1832,8 @@ static int dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
- printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
- dev_name(&pdev->dev), nr_channels);
+ dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
+ nr_channels);
dma_async_device_register(&dw->dma);
@@ -1658,7 +1863,7 @@ static void dw_shutdown(struct platform_device *pdev)
{
struct dw_dma *dw = platform_get_drvdata(pdev);
- dw_dma_off(platform_get_drvdata(pdev));
+ dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
}
@@ -1667,7 +1872,7 @@ static int dw_suspend_noirq(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
- dw_dma_off(platform_get_drvdata(pdev));
+ dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
return 0;
@@ -1680,6 +1885,7 @@ static int dw_resume_noirq(struct device *dev)
clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
+
return 0;
}
@@ -1700,7 +1906,13 @@ static const struct of_device_id dw_dma_id_table[] = {
MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif
+static const struct platform_device_id dw_dma_ids[] = {
+ { "INTL9C60", 0 },
+ { }
+};
+
static struct platform_driver dw_driver = {
+ .probe = dw_probe,
.remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
@@ -1708,11 +1920,12 @@ static struct platform_driver dw_driver = {
.pm = &dw_dev_pm_ops,
.of_match_table = of_match_ptr(dw_dma_id_table),
},
+ .id_table = dw_dma_ids,
};
static int __init dw_init(void)
{
- return platform_driver_probe(&dw_driver, dw_probe);
+ return platform_driver_register(&dw_driver);
}
subsys_initcall(dw_init);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 88965597b7d0..88dd8eb31957 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
#include <linux/dw_dmac.h>
#define DW_DMA_MAX_NR_CHANNELS 8
@@ -184,15 +185,15 @@ enum dw_dmac_flags {
};
struct dw_dma_chan {
- struct dma_chan chan;
- void __iomem *ch_regs;
- u8 mask;
- u8 priority;
- bool paused;
- bool initialized;
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ enum dma_transfer_direction direction;
+ bool paused;
+ bool initialized;
/* software emulation of the LLP transfers */
- struct list_head *tx_list;
struct list_head *tx_node_active;
spinlock_t lock;
@@ -202,6 +203,7 @@ struct dw_dma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
+ u32 residue;
struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
@@ -212,9 +214,6 @@ struct dw_dma_chan {
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
-
- /* backlink to dw_dma */
- struct dw_dma *dw;
};
static inline struct dw_dma_chan_regs __iomem *
@@ -236,9 +235,14 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
+ struct dma_pool *desc_pool;
struct tasklet_struct tasklet;
struct clk *clk;
+ /* slave information */
+ struct dw_dma_slave *sd;
+ unsigned int sd_count;
+
u8 all_chan_mask;
/* hardware configuration */
@@ -293,8 +297,11 @@ struct dw_desc {
struct list_head tx_list;
struct dma_async_tx_descriptor txd;
size_t len;
+ size_t total_len;
};
+#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
+
static inline struct dw_desc *
txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
{
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index f424298f1ac5..cd7e3280fadd 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -69,9 +69,7 @@ struct edma_chan {
int ch_num;
bool alloced;
int slot[EDMA_MAX_SLOTS];
- dma_addr_t addr;
- int addr_width;
- int maxburst;
+ struct dma_slave_config cfg;
};
struct edma_cc {
@@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan)
return 0;
}
-
static int edma_slave_config(struct edma_chan *echan,
- struct dma_slave_config *config)
+ struct dma_slave_config *cfg)
{
- if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
- if (config->direction == DMA_MEM_TO_DEV) {
- if (config->dst_addr)
- echan->addr = config->dst_addr;
- if (config->dst_addr_width)
- echan->addr_width = config->dst_addr_width;
- if (config->dst_maxburst)
- echan->maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_DEV_TO_MEM) {
- if (config->src_addr)
- echan->addr = config->src_addr;
- if (config->src_addr_width)
- echan->addr_width = config->src_addr_width;
- if (config->src_maxburst)
- echan->maxburst = config->src_maxburst;
- }
+ memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
return 0;
}
@@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
+ dma_addr_t dev_addr;
+ enum dma_slave_buswidth dev_width;
+ u32 burst;
struct scatterlist *sg;
int i;
int acnt, bcnt, ccnt, src, dst, cidx;
@@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
- if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+ if (direction == DMA_DEV_TO_MEM) {
+ dev_addr = echan->cfg.src_addr;
+ dev_width = echan->cfg.src_addr_width;
+ burst = echan->cfg.src_maxburst;
+ } else if (direction == DMA_MEM_TO_DEV) {
+ dev_addr = echan->cfg.dst_addr;
+ dev_width = echan->cfg.dst_addr_width;
+ burst = echan->cfg.dst_maxburst;
+ } else {
+ dev_err(dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
dev_err(dev, "Undefined slave buswidth\n");
return NULL;
}
@@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
}
}
- acnt = echan->addr_width;
+ acnt = dev_width;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
* buffer transfers using only one PaRAM set.
*/
- if (echan->maxburst == 1) {
+ if (burst == 1) {
edesc->absync = false;
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
@@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
*/
} else {
edesc->absync = true;
- bcnt = echan->maxburst;
+ bcnt = burst;
ccnt = sg_dma_len(sg) / (acnt * bcnt);
if (ccnt > (SZ_64K - 1)) {
dev_err(dev, "Exceeded max SG segment size\n");
@@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (direction == DMA_MEM_TO_DEV) {
src = sg_dma_address(sg);
- dst = echan->addr;
+ dst = dev_addr;
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
} else {
- src = echan->addr;
+ src = dev_addr;
dst = sg_dma_address(sg);
src_bidx = 0;
src_cidx = 0;
@@ -621,13 +620,11 @@ static struct platform_device *pdev0, *pdev1;
static const struct platform_device_info edma_dev_info0 = {
.name = "edma-dma-engine",
.id = 0,
- .dma_mask = DMA_BIT_MASK(32),
};
static const struct platform_device_info edma_dev_info1 = {
.name = "edma-dma-engine",
.id = 1,
- .dma_mask = DMA_BIT_MASK(32),
};
static int edma_init(void)
@@ -641,6 +638,8 @@ static int edma_init(void)
ret = PTR_ERR(pdev0);
goto out;
}
+ pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
+ pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
if (EDMA_CTLRS == 2) {
@@ -650,6 +649,8 @@ static int edma_init(void)
platform_device_unregister(pdev0);
ret = PTR_ERR(pdev1);
}
+ pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
+ pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
out:
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index bcfde400904f..f2bf8c0c4675 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -903,8 +903,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_MEM_TO_DEV &&
- data->direction != DMA_DEV_TO_MEM)
+ if (!is_slave_direction(data->direction))
return -EINVAL;
break;
default:
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a68a8ba87e6..1879a5942bfc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -833,14 +833,14 @@ int ioat_dma_self_test(struct ioatdma_device *device)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
- flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
+ flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
if (!tx) {
dev_err(dev, "Self-test prep failed, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
async_tx_ack(tx);
@@ -851,7 +851,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test setup failed, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
dma->device_issue_pending(dma_chan);
@@ -862,7 +862,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
@@ -870,6 +870,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
goto free_resources;
}
+unmap_dma:
+ dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 087935f1565f..53a4cbb78f47 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -97,6 +97,7 @@ struct ioat_chan_common {
#define IOAT_KOBJ_INIT_FAIL 3
#define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5
+ #define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 82d4e306c32e..b925e1b1d139 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -269,61 +269,22 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
__ioat2_restart_chan(ioat);
}
-void ioat2_timer_event(unsigned long data)
+static void check_active(struct ioat2_dma_chan *ioat)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete)) {
- __cleanup(ioat, phys_complete);
- } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat2_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
- spin_unlock_bh(&chan->cleanup_lock);
- } else {
- u16 active;
+ if (ioat2_ring_active(ioat)) {
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ return;
+ }
+ if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- active = ioat2_ring_active(ioat);
- if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
- reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
+ reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
@@ -331,6 +292,60 @@ void ioat2_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+
+}
+
+void ioat2_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
+ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
+ ioat2_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ } else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+
+
+ if (ioat2_ring_active(ioat))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ else {
+ spin_lock_bh(&ioat->prep_lock);
+ check_active(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
}
static int ioat2_reset_hw(struct ioat_chan_common *chan)
@@ -404,7 +419,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
- if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+ if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
/* make descriptor updates visible before advancing ioat->head,
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 3e9d66920eb3..e8336cce360b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -342,61 +342,22 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
__ioat2_restart_chan(ioat);
}
-static void ioat3_timer_event(unsigned long data)
+static void check_active(struct ioat2_dma_chan *ioat)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat3_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
- spin_unlock_bh(&chan->cleanup_lock);
- } else {
- u16 active;
+ if (ioat2_ring_active(ioat)) {
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ return;
+ }
+ if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- active = ioat2_ring_active(ioat);
- if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
- reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
+ reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
@@ -404,6 +365,60 @@ static void ioat3_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+
+}
+
+static void ioat3_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
+ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
+ ioat3_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ } else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+
+
+ if (ioat2_ring_active(ioat))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ else {
+ spin_lock_bh(&ioat->prep_lock);
+ check_active(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
}
static enum dma_status
@@ -863,6 +878,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
unsigned long tmo;
struct device *dev = &device->pdev->dev;
struct dma_device *dma = &device->common;
+ u8 op = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -908,18 +924,22 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
/* test xor */
+ op = IOAT_OP_XOR;
+
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test xor prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -930,7 +950,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test xor setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -939,9 +959,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
@@ -957,6 +981,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
goto free_resources;
+ op = IOAT_OP_XOR_VAL;
+
/* validate the sources with the destintation page */
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
xor_val_srcs[i] = xor_srcs[i];
@@ -969,11 +995,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
+ &xor_val_result, DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -984,7 +1012,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test zero setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -993,9 +1021,12 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
if (xor_val_result != 0) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
@@ -1007,14 +1038,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
/* test memset */
+ op = IOAT_OP_FILL;
+
dma_addr = dma_map_page(dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test memset prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -1025,7 +1060,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test memset setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -1034,9 +1069,11 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test memset timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
u32 *ptr = page_address(dest);
if (ptr[i]) {
@@ -1047,17 +1084,21 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
/* test for non-zero parity sum */
+ op = IOAT_OP_XOR_VAL;
+
xor_val_result = 0;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
+ &xor_val_result, DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -1068,7 +1109,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test 2nd zero setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -1077,15 +1118,31 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
if (xor_val_result != SUM_CHECK_P_RESULT) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+ goto free_resources;
+dma_unmap:
+ if (op == IOAT_OP_XOR) {
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ } else if (op == IOAT_OP_XOR_VAL) {
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ } else if (op == IOAT_OP_FILL)
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
@@ -1126,12 +1183,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- /* -= IOAT ver.3 workarounds =- */
- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3, and clear any
- * pending errors
- */
- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+ /* clear any pending errors */
err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
if (err) {
dev_err(&pdev->dev, "channel error register unreachable\n");
@@ -1187,6 +1239,26 @@ static bool is_snb_ioat(struct pci_dev *pdev)
}
}
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
@@ -1207,7 +1279,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
+ if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
dma->copy_align = 6;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index d2ff3fda0b18..7cb74c62c719 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -35,6 +35,17 @@
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
+
int system_has_dca_enabled(struct pci_dev *pdev);
struct ioat_dma_descriptor {
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 4f686c527ab6..71c7ecd80fac 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
-
static struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v1 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index eacb8be99812..7dafb9f3785f 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -936,7 +936,7 @@ static irqreturn_t iop_adma_err_handler(int irq, void *data)
struct iop_adma_chan *chan = data;
unsigned long status = iop_chan_get_status(chan);
- dev_printk(KERN_ERR, chan->device->common.dev,
+ dev_err(chan->device->common.dev,
"error ( %s%s%s%s%s%s%s)\n",
iop_is_err_int_parity(status, chan) ? "int_parity " : "",
iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
@@ -1017,7 +1017,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1027,7 +1027,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1117,7 +1117,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test xor failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1163,14 +1163,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
if (zero_sum_result != 0) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test zero sum failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1187,7 +1187,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test memset timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1196,7 +1196,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
u32 *ptr = page_address(dest);
if (ptr[i]) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test memset failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1219,14 +1219,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
if (zero_sum_result != 1) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test non-zero sum failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1579,15 +1579,14 @@ static int iop_adma_probe(struct platform_device *pdev)
goto err_free_iop_chan;
}
- dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
- "( %s%s%s%s%s%s%s)\n",
- dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
- dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
- dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+ dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
+ dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
goto out;
@@ -1651,8 +1650,8 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
/* run the descriptor */
iop_chan_enable(iop_chan);
} else
- dev_printk(KERN_ERR, iop_chan->device->common.dev,
- "failed to allocate null descriptor\n");
+ dev_err(iop_chan->device->common.dev,
+ "failed to allocate null descriptor\n");
spin_unlock_bh(&iop_chan->lock);
}
@@ -1704,7 +1703,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
/* run the descriptor */
iop_chan_enable(iop_chan);
} else
- dev_printk(KERN_ERR, iop_chan->device->common.dev,
+ dev_err(iop_chan->device->common.dev,
"failed to allocate null descriptor\n");
spin_unlock_bh(&iop_chan->lock);
}
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 65855373cee6..8c61d17a86bf 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
+ if (!is_slave_direction(direction)) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index a5ee37d5320f..2e284a4438bc 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -44,7 +44,6 @@ static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
struct ipu_irq_bank {
unsigned int control;
unsigned int status;
- spinlock_t lock;
struct ipu *ipu;
};
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index dc7466563507..c26699f9c4df 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -618,10 +618,8 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
else if (maxburst == 32)
chan->dcmd |= DCMD_BURST32;
- if (cfg) {
- chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
- }
+ chan->dir = cfg->direction;
+ chan->drcmr = cfg->slave_id;
chan->dev_addr = addr;
break;
default:
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e17fad03cb80..d64ae14f2706 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -210,7 +210,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
break;
default:
dev_err(mv_chan_to_devp(chan),
- "error: unsupported operation %d.\n",
+ "error: unsupported operation %d\n",
type);
BUG();
return;
@@ -828,28 +828,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
u32 val;
val = __raw_readl(XOR_CONFIG(chan));
- dev_err(mv_chan_to_devp(chan),
- "config 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
val = __raw_readl(XOR_ACTIVATION(chan));
- dev_err(mv_chan_to_devp(chan),
- "activation 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
val = __raw_readl(XOR_INTR_CAUSE(chan));
- dev_err(mv_chan_to_devp(chan),
- "intr cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
val = __raw_readl(XOR_INTR_MASK(chan));
- dev_err(mv_chan_to_devp(chan),
- "intr mask 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
val = __raw_readl(XOR_ERROR_CAUSE(chan));
- dev_err(mv_chan_to_devp(chan),
- "error cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
val = __raw_readl(XOR_ERROR_ADDR(chan));
- dev_err(mv_chan_to_devp(chan),
- "error addr 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
@@ -862,7 +856,7 @@ static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
}
dev_err(mv_chan_to_devp(chan),
- "error on chan %d. intr cause 0x%08x.\n",
+ "error on chan %d. intr cause 0x%08x\n",
chan->idx, intr_cause);
mv_dump_xor_regs(chan);
@@ -1052,9 +1046,8 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
dev_err(dma_chan->device->dev,
- "Self-test xor failed compare, disabling."
- " index %d, data %x, expected %x\n", i,
- ptr[i], cmp_word);
+ "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
+ i, ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
@@ -1194,12 +1187,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR: "
- "( %s%s%s%s)\n",
- dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+ dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
return mv_chan;
@@ -1253,7 +1245,7 @@ static int mv_xor_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
- dev_notice(&pdev->dev, "Marvell XOR driver\n");
+ dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
if (!xordev)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 9f02e794b12b..8f6d30d37c45 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -109,7 +109,7 @@ struct mxs_dma_chan {
struct dma_chan chan;
struct dma_async_tx_descriptor desc;
struct tasklet_struct tasklet;
- int chan_irq;
+ unsigned int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
int desc_count;
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
struct mxs_dma_ccw *ccw;
struct scatterlist *sg;
- int i, j;
+ u32 i, j;
u32 *pio;
bool append = flags & DMA_PREP_INTERRUPT;
int idx = append ? mxs_chan->desc_count : 0;
@@ -537,8 +537,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int num_periods = buf_len / period_len;
- int i = 0, buf = 0;
+ u32 num_periods = buf_len / period_len;
+ u32 i = 0, buf = 0;
if (mxs_chan->status == DMA_IN_PROGRESS)
return NULL;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
new file mode 100644
index 000000000000..69d04d28b1ef
--- /dev/null
+++ b/drivers/dma/of-dma.c
@@ -0,0 +1,267 @@
+/*
+ * Device tree helpers for DMA request / controller
+ *
+ * Based on of_gpio.c
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+static LIST_HEAD(of_dma_list);
+static DEFINE_SPINLOCK(of_dma_lock);
+
+/**
+ * of_dma_get_controller - Get a DMA controller in DT DMA helpers list
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ *
+ * Finds a DMA controller with matching device node and number for dma cells
+ * in a list of registered DMA controllers. If a match is found the use_count
+ * variable is increased and a valid pointer to the DMA data stored is retuned.
+ * A NULL pointer is returned if no match is found.
+ */
+static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
+{
+ struct of_dma *ofdma;
+
+ spin_lock(&of_dma_lock);
+
+ if (list_empty(&of_dma_list)) {
+ spin_unlock(&of_dma_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
+ if ((ofdma->of_node == dma_spec->np) &&
+ (ofdma->of_dma_nbcells == dma_spec->args_count)) {
+ ofdma->use_count++;
+ spin_unlock(&of_dma_lock);
+ return ofdma;
+ }
+
+ spin_unlock(&of_dma_lock);
+
+ pr_debug("%s: can't find DMA controller %s\n", __func__,
+ dma_spec->np->full_name);
+
+ return NULL;
+}
+
+/**
+ * of_dma_put_controller - Decrement use count for a registered DMA controller
+ * @of_dma: pointer to DMA controller data
+ *
+ * Decrements the use_count variable in the DMA data structure. This function
+ * should be called only when a valid pointer is returned from
+ * of_dma_get_controller() and no further accesses to data referenced by that
+ * pointer are needed.
+ */
+static void of_dma_put_controller(struct of_dma *ofdma)
+{
+ spin_lock(&of_dma_lock);
+ ofdma->use_count--;
+ spin_unlock(&of_dma_lock);
+}
+
+/**
+ * of_dma_controller_register - Register a DMA controller to DT DMA helpers
+ * @np: device node of DMA controller
+ * @of_dma_xlate: translation function which converts a phandle
+ * arguments list into a dma_chan structure
+ * @data pointer to controller specific data to be used by
+ * translation function
+ *
+ * Returns 0 on success or appropriate errno value on error.
+ *
+ * Allocated memory should be freed with appropriate of_dma_controller_free()
+ * call.
+ */
+int of_dma_controller_register(struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data)
+{
+ struct of_dma *ofdma;
+ int nbcells;
+
+ if (!np || !of_dma_xlate) {
+ pr_err("%s: not enough information provided\n", __func__);
+ return -EINVAL;
+ }
+
+ ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
+ if (!ofdma)
+ return -ENOMEM;
+
+ nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
+ if (!nbcells) {
+ pr_err("%s: #dma-cells property is missing or invalid\n",
+ __func__);
+ kfree(ofdma);
+ return -EINVAL;
+ }
+
+ ofdma->of_node = np;
+ ofdma->of_dma_nbcells = nbcells;
+ ofdma->of_dma_xlate = of_dma_xlate;
+ ofdma->of_dma_data = data;
+ ofdma->use_count = 0;
+
+ /* Now queue of_dma controller structure in list */
+ spin_lock(&of_dma_lock);
+ list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
+ spin_unlock(&of_dma_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_controller_register);
+
+/**
+ * of_dma_controller_free - Remove a DMA controller from DT DMA helpers list
+ * @np: device node of DMA controller
+ *
+ * Memory allocated by of_dma_controller_register() is freed here.
+ */
+int of_dma_controller_free(struct device_node *np)
+{
+ struct of_dma *ofdma;
+
+ spin_lock(&of_dma_lock);
+
+ if (list_empty(&of_dma_list)) {
+ spin_unlock(&of_dma_lock);
+ return -ENODEV;
+ }
+
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
+ if (ofdma->of_node == np) {
+ if (ofdma->use_count) {
+ spin_unlock(&of_dma_lock);
+ return -EBUSY;
+ }
+
+ list_del(&ofdma->of_dma_controllers);
+ spin_unlock(&of_dma_lock);
+ kfree(ofdma);
+ return 0;
+ }
+
+ spin_unlock(&of_dma_lock);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(of_dma_controller_free);
+
+/**
+ * of_dma_match_channel - Check if a DMA specifier matches name
+ * @np: device node to look for DMA channels
+ * @name: channel name to be matched
+ * @index: index of DMA specifier in list of DMA specifiers
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ *
+ * Check if the DMA specifier pointed to by the index in a list of DMA
+ * specifiers, matches the name provided. Returns 0 if the name matches and
+ * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
+ */
+static int of_dma_match_channel(struct device_node *np, char *name, int index,
+ struct of_phandle_args *dma_spec)
+{
+ const char *s;
+
+ if (of_property_read_string_index(np, "dma-names", index, &s))
+ return -ENODEV;
+
+ if (strcmp(name, s))
+ return -ENODEV;
+
+ if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
+ dma_spec))
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * of_dma_request_slave_channel - Get the DMA slave channel
+ * @np: device node to get DMA request from
+ * @name: name of desired channel
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
+ char *name)
+{
+ struct of_phandle_args dma_spec;
+ struct of_dma *ofdma;
+ struct dma_chan *chan;
+ int count, i;
+
+ if (!np || !name) {
+ pr_err("%s: not enough information provided\n", __func__);
+ return NULL;
+ }
+
+ count = of_property_count_strings(np, "dma-names");
+ if (count < 0) {
+ pr_err("%s: dma-names property missing or empty\n", __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (of_dma_match_channel(np, name, i, &dma_spec))
+ continue;
+
+ ofdma = of_dma_get_controller(&dma_spec);
+
+ if (!ofdma)
+ continue;
+
+ chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+
+ of_dma_put_controller(ofdma);
+
+ of_node_put(dma_spec.np);
+
+ if (chan)
+ return chan;
+ }
+
+ return NULL;
+}
+
+/**
+ * of_dma_simple_xlate - Simple DMA engine translation function
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ * @of_dma: pointer to DMA controller data
+ *
+ * A simple translation function for devices that use a 32-bit value for the
+ * filter_param when calling the DMA engine dma_request_channel() function.
+ * Note that this translation function requires that #dma-cells is equal to 1
+ * and the argument of the dma specifier is the 32-bit filter_param. Returns
+ * pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ if (count != 1)
+ return NULL;
+
+ return dma_request_channel(info->dma_cap, info->filter_fn,
+ &dma_spec->args[0]);
+}
+EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 3f2617255ef2..d01faeb0f27c 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1029,18 +1029,7 @@ static struct pci_driver pch_dma_driver = {
#endif
};
-static int __init pch_dma_init(void)
-{
- return pci_register_driver(&pch_dma_driver);
-}
-
-static void __exit pch_dma_exit(void)
-{
- pci_unregister_driver(&pch_dma_driver);
-}
-
-module_init(pch_dma_init);
-module_exit(pch_dma_exit);
+module_pci_driver(pch_dma_driver);
MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 80680eee0171..718153122759 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -25,6 +25,7 @@
#include <linux/amba/pl330.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@@ -606,6 +607,11 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+struct dma_pl330_filter_args {
+ struct dma_pl330_dmac *pdmac;
+ unsigned int chan_id;
+};
+
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
{
if (r && r->xfer_cb)
@@ -2352,6 +2358,16 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
+static bool pl330_dt_filter(struct dma_chan *chan, void *param)
+{
+ struct dma_pl330_filter_args *fargs = param;
+
+ if (chan->device != &fargs->pdmac->ddma)
+ return false;
+
+ return (chan->chan_id == fargs->chan_id);
+}
+
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
@@ -2359,25 +2375,35 @@ bool pl330_filter(struct dma_chan *chan, void *param)
if (chan->device->dev->driver != &pl330_driver.drv)
return false;
-#ifdef CONFIG_OF
- if (chan->device->dev->of_node) {
- const __be32 *prop_value;
- phandle phandle;
- struct device_node *node;
-
- prop_value = ((struct property *)param)->value;
- phandle = be32_to_cpup(prop_value++);
- node = of_find_node_by_phandle(phandle);
- return ((chan->private == node) &&
- (chan->chan_id == be32_to_cpup(prop_value)));
- }
-#endif
-
peri_id = chan->private;
return *peri_id == (unsigned)param;
}
EXPORT_SYMBOL(pl330_filter);
+static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
+ struct dma_pl330_filter_args fargs;
+ dma_cap_mask_t cap;
+
+ if (!pdmac)
+ return NULL;
+
+ if (count != 1)
+ return NULL;
+
+ fargs.pdmac = pdmac;
+ fargs.chan_id = dma_spec->args[0];
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+ dma_cap_set(DMA_CYCLIC, cap);
+
+ return dma_request_channel(cap, pl330_dt_filter, &fargs);
+}
+
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -2866,7 +2892,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pdat = adev->dev.platform_data;
/* Allocate a new DMAC and its Channels */
- pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
+ pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
@@ -2878,13 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
- request_mem_region(res->start, resource_size(res), "dma-pl330");
-
- pi->base = ioremap(res->start, resource_size(res));
- if (!pi->base) {
- ret = -ENXIO;
- goto probe_err1;
- }
+ pi->base = devm_request_and_ioremap(&adev->dev, res);
+ if (!pi->base)
+ return -ENXIO;
amba_set_drvdata(adev, pdmac);
@@ -2892,11 +2914,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
if (ret)
- goto probe_err2;
+ return ret;
ret = pl330_add(pi);
if (ret)
- goto probe_err3;
+ goto probe_err1;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -2918,7 +2940,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (!pdmac->peripherals) {
ret = -ENOMEM;
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
- goto probe_err4;
+ goto probe_err2;
}
for (i = 0; i < num_chan; i++) {
@@ -2962,7 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err4;
+ goto probe_err2;
}
dev_info(&adev->dev,
@@ -2973,17 +2995,20 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
pi->pcfg.num_peri, pi->pcfg.num_events);
+ ret = of_dma_controller_register(adev->dev.of_node,
+ of_dma_pl330_xlate, pdmac);
+ if (ret) {
+ dev_err(&adev->dev,
+ "unable to register DMA to the generic DT DMA helpers\n");
+ goto probe_err2;
+ }
+
return 0;
-probe_err4:
- pl330_del(pi);
-probe_err3:
- free_irq(irq, pi);
probe_err2:
- iounmap(pi->base);
+ pl330_del(pi);
probe_err1:
- release_mem_region(res->start, resource_size(res));
- kfree(pdmac);
+ free_irq(irq, pi);
return ret;
}
@@ -2993,12 +3018,13 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
- struct resource *res;
int irq;
if (!pdmac)
return 0;
+ of_dma_controller_free(adev->dev.of_node);
+
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
@@ -3020,13 +3046,6 @@ static int pl330_remove(struct amba_device *adev)
irq = adev->irq[0];
free_irq(irq, pi);
- iounmap(pi->base);
-
- res = &adev->res;
- release_mem_region(res->start, resource_size(res));
-
- kfree(pdmac);
-
return 0;
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index f4cd946d259d..4acb85a10250 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -638,9 +638,6 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long flags;
int ret;
- if (!chan)
- return -EINVAL;
-
switch (cmd) {
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&schan->chan_lock, flags);
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index 3315e4be9b85..b70709b030d8 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -326,7 +326,7 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
shdma_chan);
const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
if (!cfg)
- return -ENODEV;
+ return -ENXIO;
if (!try)
sh_chan->config = cfg;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 94674a96c646..1d627e2391f4 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -32,7 +32,9 @@
#define SIRFSOC_DMA_CH_VALID 0x140
#define SIRFSOC_DMA_CH_INT 0x144
#define SIRFSOC_DMA_INT_EN 0x148
+#define SIRFSOC_DMA_INT_EN_CLR 0x14C
#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
+#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
#define SIRFSOC_DMA_MODE_CTRL_BIT 4
#define SIRFSOC_DMA_DIR_CTRL_BIT 5
@@ -76,6 +78,7 @@ struct sirfsoc_dma {
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
void __iomem *base;
int irq;
+ bool is_marco;
};
#define DRV_NAME "sirfsoc_dma"
@@ -288,17 +291,67 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
int cid = schan->chan.chan_id;
unsigned long flags;
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
- ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+ spin_lock_irqsave(&schan->lock, flags);
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
- & ~((1 << cid) | 1 << (cid + 16)),
+ if (!sdma->is_marco) {
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ } else {
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
+ }
+
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
- spin_lock_irqsave(&schan->lock, flags);
list_splice_tail_init(&schan->active, &schan->free);
list_splice_tail_init(&schan->queued, &schan->free);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (!sdma->is_marco)
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ else
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (!sdma->is_marco)
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ | ((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ else
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
spin_unlock_irqrestore(&schan->lock, flags);
return 0;
@@ -311,6 +364,10 @@ static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
switch (cmd) {
+ case DMA_PAUSE:
+ return sirfsoc_dma_pause_chan(schan);
+ case DMA_RESUME:
+ return sirfsoc_dma_resume_chan(schan);
case DMA_TERMINATE_ALL:
return sirfsoc_dma_terminate_all(schan);
case DMA_SLAVE_CONFIG:
@@ -568,6 +625,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
return -ENOMEM;
}
+ if (of_device_is_compatible(dn, "sirf,marco-dmac"))
+ sdma->is_marco = true;
+
if (of_property_read_u32(dn, "cell-index", &id)) {
dev_err(dev, "Fail to get DMAC index\n");
return -ENODEV;
@@ -668,6 +728,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
static struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
+ { .compatible = "sirf,marco-dmac", },
{},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 23c5573e62dd..1734feec47b1 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -53,6 +53,8 @@
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
+#define MAX(a, b) (((a) < (b)) ? (b) : (a))
+
/**
* enum 40_command - The different commands and/or statuses.
*
@@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = {
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
-/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
-static u32 d40_backup_regs_v3[] = {
+/*
+ * since 9540 and 8540 has the same HW revision
+ * use v4a for 9540 or ealier
+ * use v4b for 8540 or later
+ * HW revision:
+ * DB8500ed has revision 0
+ * DB8500v1 has revision 2
+ * DB8500v2 has revision 3
+ * AP9540v1 has revision 4
+ * DB8540v1 has revision 4
+ * TODO: Check if all these registers have to be saved/restored on dma40 v4a
+ */
+static u32 d40_backup_regs_v4a[] = {
D40_DREG_PSEG1,
D40_DREG_PSEG2,
D40_DREG_PSEG3,
@@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = {
D40_DREG_RCEG4,
};
-#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
+
+static u32 d40_backup_regs_v4b[] = {
+ D40_DREG_CPSEG1,
+ D40_DREG_CPSEG2,
+ D40_DREG_CPSEG3,
+ D40_DREG_CPSEG4,
+ D40_DREG_CPSEG5,
+ D40_DREG_CPCEG1,
+ D40_DREG_CPCEG2,
+ D40_DREG_CPCEG3,
+ D40_DREG_CPCEG4,
+ D40_DREG_CPCEG5,
+ D40_DREG_CRSEG1,
+ D40_DREG_CRSEG2,
+ D40_DREG_CRSEG3,
+ D40_DREG_CRSEG4,
+ D40_DREG_CRSEG5,
+ D40_DREG_CRCEG1,
+ D40_DREG_CRCEG2,
+ D40_DREG_CRCEG3,
+ D40_DREG_CRCEG4,
+ D40_DREG_CRCEG5,
+};
+
+#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
static u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
@@ -134,6 +172,102 @@ static u32 d40_backup_regs_chan[] = {
};
/**
+ * struct d40_interrupt_lookup - lookup table for interrupt handler
+ *
+ * @src: Interrupt mask register.
+ * @clr: Interrupt clear register.
+ * @is_error: true if this is an error interrupt.
+ * @offset: start delta in the lookup_log_chans in d40_base. If equals to
+ * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
+ */
+struct d40_interrupt_lookup {
+ u32 src;
+ u32 clr;
+ bool is_error;
+ int offset;
+};
+
+
+static struct d40_interrupt_lookup il_v4a[] = {
+ {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
+ {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
+ {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
+ {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
+ {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
+ {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
+ {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
+ {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
+ {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
+ {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
+};
+
+static struct d40_interrupt_lookup il_v4b[] = {
+ {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
+ {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
+ {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
+ {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
+ {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
+ {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
+ {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
+ {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
+ {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
+ {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
+ {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
+ {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
+};
+
+/**
+ * struct d40_reg_val - simple lookup struct
+ *
+ * @reg: The register.
+ * @val: The value that belongs to the register in reg.
+ */
+struct d40_reg_val {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
+};
+static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
+};
+
+/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
* @base: Pointer to memory area when the pre_alloc_lli's are not large
@@ -221,6 +355,7 @@ struct d40_lcla_pool {
* @allocated_dst: Same as for src but is dst.
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
* event line number.
+ * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
*/
struct d40_phy_res {
spinlock_t lock;
@@ -228,6 +363,7 @@ struct d40_phy_res {
int num;
u32 allocated_src;
u32 allocated_dst;
+ bool use_soft_lli;
};
struct d40_base;
@@ -248,6 +384,7 @@ struct d40_base;
* @client: Cliented owned descriptor list.
* @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
+ * @done: Completed jobs
* @queue: Queued jobs.
* @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
@@ -273,6 +410,7 @@ struct d40_chan {
struct list_head client;
struct list_head pending_queue;
struct list_head active;
+ struct list_head done;
struct list_head queue;
struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
@@ -289,6 +427,38 @@ struct d40_chan {
};
/**
+ * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
+ * controller
+ *
+ * @backup: the pointer to the registers address array for backup
+ * @backup_size: the size of the registers address array for backup
+ * @realtime_en: the realtime enable register
+ * @realtime_clear: the realtime clear register
+ * @high_prio_en: the high priority enable register
+ * @high_prio_clear: the high priority clear register
+ * @interrupt_en: the interrupt enable register
+ * @interrupt_clear: the interrupt clear register
+ * @il: the pointer to struct d40_interrupt_lookup
+ * @il_size: the size of d40_interrupt_lookup array
+ * @init_reg: the pointer to the struct d40_reg_val
+ * @init_reg_size: the size of d40_reg_val array
+ */
+struct d40_gen_dmac {
+ u32 *backup;
+ u32 backup_size;
+ u32 realtime_en;
+ u32 realtime_clear;
+ u32 high_prio_en;
+ u32 high_prio_clear;
+ u32 interrupt_en;
+ u32 interrupt_clear;
+ struct d40_interrupt_lookup *il;
+ u32 il_size;
+ struct d40_reg_val *init_reg;
+ u32 init_reg_size;
+};
+
+/**
* struct d40_base - The big global struct, one for each probe'd instance.
*
* @interrupt_lock: Lock used to make sure one interrupt is handle a time.
@@ -326,11 +496,13 @@ struct d40_chan {
* @desc_slab: cache for descriptors.
* @reg_val_backup: Here the values of some hardware registers are stored
* before the DMA is powered off. They are restored when the power is back on.
- * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
- * later.
+ * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
+ * later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
* @initialized: true if the dma has been initialized
+ * @gen_dmac: the struct for generic registers values to represent u8500/8540
+ * DMA controller
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -344,6 +516,7 @@ struct d40_base {
int irq;
int num_phy_chans;
int num_log_chans;
+ struct device_dma_parameters dma_parms;
struct dma_device dma_both;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
@@ -361,37 +534,11 @@ struct d40_base {
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
u32 reg_val_backup[BACKUP_REGS_SZ];
- u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask;
bool initialized;
-};
-
-/**
- * struct d40_interrupt_lookup - lookup table for interrupt handler
- *
- * @src: Interrupt mask register.
- * @clr: Interrupt clear register.
- * @is_error: true if this is an error interrupt.
- * @offset: start delta in the lookup_log_chans in d40_base. If equals to
- * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
- */
-struct d40_interrupt_lookup {
- u32 src;
- u32 clr;
- bool is_error;
- int offset;
-};
-
-/**
- * struct d40_reg_val - simple lookup struct
- *
- * @reg: The register.
- * @val: The value that belongs to the register in reg.
- */
-struct d40_reg_val {
- unsigned int reg;
- unsigned int val;
+ struct d40_gen_dmac gen_dmac;
};
static struct device *chan2dev(struct d40_chan *d40c)
@@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c,
unsigned long flags;
int i;
int ret = -EINVAL;
- int p;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
- p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
-
/*
* Allocate both src and dst at the same time, therefore the half
* start on 1 since 0 can't be used since zero is used as end marker.
*/
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
- if (!d40c->base->lcla_pool.alloc_map[p + i]) {
- d40c->base->lcla_pool.alloc_map[p + i] = d40d;
+ int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
+
+ if (!d40c->base->lcla_pool.alloc_map[idx]) {
+ d40c->base->lcla_pool.alloc_map[idx] = d40d;
d40d->lcla_alloc++;
ret = i;
break;
@@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
- if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
- D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
- D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
+ int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
+
+ if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
+ d40c->base->lcla_pool.alloc_map[idx] = NULL;
d40d->lcla_alloc--;
if (d40d->lcla_alloc == 0) {
ret = 0;
@@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
}
+static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->done);
+}
+
static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
{
struct d40_lcla_pool *pool = &chan->base->lcla_pool;
@@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
* can't link back to the one in LCPA space
*/
if (linkback || (lli_len - lli_current > 1)) {
- curr_lcla = d40_lcla_alloc_one(chan, desc);
+ /*
+ * If the channel is expected to use only soft_lli don't
+ * allocate a lcla. This is to avoid a HW issue that exists
+ * in some controller during a peripheral to memory transfer
+ * that uses linked lists.
+ */
+ if (!(chan->phy_chan->use_soft_lli &&
+ chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
+ curr_lcla = d40_lcla_alloc_one(chan, desc);
+
first_lcla = curr_lcla;
}
@@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
+static struct d40_desc *d40_first_done(struct d40_chan *d40c)
+{
+ if (list_empty(&d40c->done))
+ return NULL;
+
+ return list_first_entry(&d40c->done, struct d40_desc, node);
+}
+
static int d40_psize_2_burst_size(bool is_log, int psize)
{
if (is_log) {
@@ -874,11 +1042,11 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
save);
/* Save/Restore registers only existing on dma40 v3 and later */
- if (base->rev >= 3)
- dma40_backup(base->virtbase, base->reg_val_backup_v3,
- d40_backup_regs_v3,
- ARRAY_SIZE(d40_backup_regs_v3),
- save);
+ if (base->gen_dmac.backup)
+ dma40_backup(base->virtbase, base->reg_val_backup_v4,
+ base->gen_dmac.backup,
+ base->gen_dmac.backup_size,
+ save);
}
#else
static void d40_save_restore_registers(struct d40_base *base, bool save)
@@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c)
struct d40_desc *d40d;
struct d40_desc *_d;
+ /* Release completed descriptors */
+ while ((d40d = d40_first_done(d40c))) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
d40_desc_remove(d40d);
@@ -1396,6 +1570,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
d40c->busy = false;
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
+
+ d40_desc_remove(d40d);
+ d40_desc_done(d40c, d40d);
}
d40c->pending_tx++;
@@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data)
spin_lock_irqsave(&d40c->lock, flags);
- /* Get first active entry from list */
- d40d = d40_first_active_get(d40c);
- if (d40d == NULL)
- goto err;
+ /* Get first entry from the done list */
+ d40d = d40_first_done(d40c);
+ if (d40d == NULL) {
+ /* Check if we have reached here for cyclic job */
+ d40d = d40_first_active_get(d40c);
+ if (d40d == NULL || !d40d->cyclic)
+ goto err;
+ }
if (!d40d->cyclic)
dma_cookie_complete(&d40d->txd);
@@ -1438,13 +1619,11 @@ static void dma_tasklet(unsigned long data)
if (async_tx_test_ack(&d40d->txd)) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
- } else {
- if (!d40d->is_in_client_list) {
- d40_desc_remove(d40d);
- d40_lcla_free_all(d40c, d40d);
- list_add_tail(&d40d->node, &d40c->client);
- d40d->is_in_client_list = true;
- }
+ } else if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
}
}
@@ -1469,53 +1648,51 @@ err:
static irqreturn_t d40_handle_interrupt(int irq, void *data)
{
- static const struct d40_interrupt_lookup il[] = {
- {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
- {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
- {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
- {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
- {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
- {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
- {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
- {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
- {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
- {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
- };
-
int i;
- u32 regs[ARRAY_SIZE(il)];
u32 idx;
u32 row;
long chan = -1;
struct d40_chan *d40c;
unsigned long flags;
struct d40_base *base = data;
+ u32 regs[base->gen_dmac.il_size];
+ struct d40_interrupt_lookup *il = base->gen_dmac.il;
+ u32 il_size = base->gen_dmac.il_size;
spin_lock_irqsave(&base->interrupt_lock, flags);
/* Read interrupt status of both logical and physical channels */
- for (i = 0; i < ARRAY_SIZE(il); i++)
+ for (i = 0; i < il_size; i++)
regs[i] = readl(base->virtbase + il[i].src);
for (;;) {
chan = find_next_bit((unsigned long *)regs,
- BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
+ BITS_PER_LONG * il_size, chan + 1);
/* No more set bits found? */
- if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
+ if (chan == BITS_PER_LONG * il_size)
break;
row = chan / BITS_PER_LONG;
idx = chan & (BITS_PER_LONG - 1);
- /* ACK interrupt */
- writel(1 << idx, base->virtbase + il[row].clr);
-
if (il[row].offset == D40_PHY_CHAN)
d40c = base->lookup_phy_chans[idx];
else
d40c = base->lookup_log_chans[il[row].offset + idx];
+
+ if (!d40c) {
+ /*
+ * No error because this can happen if something else
+ * in the system is using the channel.
+ */
+ continue;
+ }
+
+ /* ACK interrupt */
+ writel(1 << idx, base->virtbase + il[row].clr);
+
spin_lock(&d40c->lock);
if (!il[row].is_error)
@@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
int i;
int j;
int log_num;
+ int num_phy_chans;
bool is_src;
bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
phys = d40c->base->phy_res;
+ num_phy_chans = d40c->base->num_phy_chans;
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
dev_type = d40c->dma_cfg.src_dev_type;
@@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
if (!is_log) {
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
/* Find physical half channel */
- for (i = 0; i < d40c->base->num_phy_chans; i++) {
-
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
if (d40_alloc_mask_set(&phys[i], is_src,
0, is_log,
first_phy_user))
goto found_phy;
+ } else {
+ for (i = 0; i < num_phy_chans; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log,
+ first_phy_user))
+ goto found_phy;
+ }
}
} else
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
@@ -1954,7 +2140,6 @@ _exit:
}
-
static u32 stedma40_residue(struct dma_chan *chan)
{
struct d40_chan *d40c =
@@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
return ret < 0 ? ret : 0;
}
-
static struct d40_desc *
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
unsigned int sg_len, unsigned long dma_flags)
@@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
goto err;
}
-
desc->lli_current = 0;
desc->txd.flags = dma_flags;
desc->txd.tx_submit = d40_tx_submit;
@@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
return NULL;
}
-
spin_lock_irqsave(&chan->lock, flags);
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
@@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
{
bool realtime = d40c->dma_cfg.realtime;
bool highprio = d40c->dma_cfg.high_priority;
- u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
- u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
+ u32 rtreg;
u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type);
u32 bit = 1 << event;
+ u32 prioreg;
+ struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
+
+ rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
+ /*
+ * Due to a hardware bug, in some cases a logical channel triggered by
+ * a high priority destination event line can generate extra packet
+ * transactions.
+ *
+ * The workaround is to not set the high priority level for the
+ * destination event lines that trigger logical channels.
+ */
+ if (!src && chan_is_logical(d40c))
+ highprio = false;
+
+ prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
/* Destination event lines are stored in the upper halfword */
if (!src)
@@ -2248,11 +2445,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
+ d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
else
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.dst_dev_type *
- D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
+ d40c->dma_cfg.dst_dev_type *
+ D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
@@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan)
return;
}
-
spin_lock_irqsave(&d40c->lock, flags);
err = d40_free_dma(d40c);
@@ -2330,14 +2526,12 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
}
-static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
- struct scatterlist *sgl,
- unsigned int sg_len,
- enum dma_transfer_direction direction,
- unsigned long dma_flags,
- void *context)
+static struct dma_async_tx_descriptor *
+d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long dma_flags, void *context)
{
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
+ if (!is_slave_direction(direction))
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
+ if (src_maxburst > 16) {
+ src_maxburst = 16;
+ dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
+ } else if (dst_maxburst > 16) {
+ dst_maxburst = 16;
+ src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
+ }
+
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width,
src_maxburst);
@@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
d40c->log_num = D40_PHY_CHAN;
+ INIT_LIST_HEAD(&d40c->done);
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue);
@@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
int ret = 0;
- if (!pm_runtime_suspended(dev))
- return -EBUSY;
if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator);
@@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base)
num_phy_chans_avail--;
}
+ /* Mark soft_lli channels */
+ for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
+ int chan = base->plat_data->soft_lli_chans[i];
+
+ base->phy_res[chan].use_soft_lli = true;
+ }
+
dev_info(base->dev, "%d of %d physical DMA channels available\n",
num_phy_chans_avail, base->num_phy_chans);
@@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
* ? has revision 1
* DB8500v1 has revision 2
* DB8500v2 has revision 3
+ * AP9540v1 has revision 4
+ * DB8540v1 has revision 4
*/
rev = AMBA_REV_BITS(pid);
+ plat_data = pdev->dev.platform_data;
+
/* The number of physical channels on this HW */
- num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+ if (plat_data->num_of_phy_chans)
+ num_phy_chans = plat_data->num_of_phy_chans;
+ else
+ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
- dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
- rev, res->start);
+ dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
+ rev, res->start, num_phy_chans);
if (rev < 2) {
d40_err(&pdev->dev, "hardware revision: %d is not supported",
@@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- plat_data = pdev->dev.platform_data;
-
/* Count the number of logical channels in use */
for (i = 0; i < plat_data->dev_len; i++)
if (plat_data->dev_rx[i] != 0)
@@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
base->log_chans = &base->phy_chans[num_phy_chans];
+ if (base->plat_data->num_of_phy_chans == 14) {
+ base->gen_dmac.backup = d40_backup_regs_v4b;
+ base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
+ base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
+ base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
+ base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
+ base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
+ base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
+ base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
+ base->gen_dmac.il = il_v4b;
+ base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
+ base->gen_dmac.init_reg = dma_init_reg_v4b;
+ base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
+ } else {
+ if (base->rev >= 3) {
+ base->gen_dmac.backup = d40_backup_regs_v4a;
+ base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
+ }
+ base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
+ base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
+ base->gen_dmac.realtime_en = D40_DREG_RSEG1;
+ base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
+ base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
+ base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
+ base->gen_dmac.il = il_v4a;
+ base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
+ base->gen_dmac.init_reg = dma_init_reg_v4a;
+ base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
+ }
+
base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
GFP_KERNEL);
if (!base->phy_res)
@@ -3093,31 +3336,15 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static struct d40_reg_val dma_init_reg[] = {
- /* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
-
- /* Interrupts on all logical channels */
- { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
- };
int i;
u32 prmseo[2] = {0, 0};
u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
u32 pcmis = 0;
u32 pcicr = 0;
+ struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
+ u32 reg_size = base->gen_dmac.init_reg_size;
- for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
+ for (i = 0; i < reg_size; i++)
writel(dma_init_reg[i].val,
base->virtbase + dma_init_reg[i].reg);
@@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base)
writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
/* Write which interrupt to enable */
- writel(pcmis, base->virtbase + D40_DREG_PCMIS);
+ writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
/* Write which interrupt to clear */
- writel(pcicr, base->virtbase + D40_DREG_PCICR);
+ writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
+ /* These are __initdata and cannot be accessed after init */
+ base->gen_dmac.init_reg = NULL;
+ base->gen_dmac.init_reg_size = 0;
}
static int __init d40_lcla_allocate(struct d40_base *base)
@@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev)
if (err)
goto failure;
+ base->dev->dma_parms = &base->dma_parms;
+ err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
+ if (err) {
+ d40_err(&pdev->dev, "Failed to set dma max seg size\n");
+ goto failure;
+ }
+
d40_hw_init(base);
dev_info(base->dev, "initialized\n");
@@ -3397,7 +3634,7 @@ failure:
release_mem_region(base->phy_start,
base->phy_size);
if (base->clk) {
- clk_disable(base->clk);
+ clk_disable_unprepare(base->clk);
clk_put(base->clk);
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 851ad56e8409..7180e0d41722 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ /* Set the priority bit to high for the physical channel */
+ if (cfg->high_priority) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
} else {
/* Logical channel */
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->high_priority) {
- src |= 1 << D40_SREG_CFG_PRI_POS;
- dst |= 1 << D40_SREG_CFG_PRI_POS;
- }
-
if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS;
if (cfg->dst_info.big_endian)
@@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
return lli;
- err:
+err:
return NULL;
}
@@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcpa[0].lcsp0);
- writel(lli_src->lcsp13, &lcpa[0].lcsp1);
- writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
- writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+ writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
}
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
@@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcla[0].lcsp02);
- writel(lli_src->lcsp13, &lcla[0].lcsp13);
- writel(lli_dst->lcsp02, &lcla[1].lcsp02);
- writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+ writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
}
static void d40_log_fill_lli(struct d40_log_lli *lli,
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 6d47373f3f58..fdde8ef77542 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -125,7 +125,7 @@
#define D40_DREG_GCC 0x000
#define D40_DREG_GCC_ENA 0x1
/* This assumes that there are only 4 event groups */
-#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_ENABLE_ALL 0x3ff01
#define D40_DREG_GCC_EVTGRP_POS 8
#define D40_DREG_GCC_SRC 0
#define D40_DREG_GCC_DST 1
@@ -148,14 +148,31 @@
#define D40_DREG_LCPA 0x020
#define D40_DREG_LCLA 0x024
+
+#define D40_DREG_SSEG1 0x030
+#define D40_DREG_SSEG2 0x034
+#define D40_DREG_SSEG3 0x038
+#define D40_DREG_SSEG4 0x03C
+
+#define D40_DREG_SCEG1 0x040
+#define D40_DREG_SCEG2 0x044
+#define D40_DREG_SCEG3 0x048
+#define D40_DREG_SCEG4 0x04C
+
#define D40_DREG_ACTIVE 0x050
#define D40_DREG_ACTIVO 0x054
-#define D40_DREG_FSEB1 0x058
-#define D40_DREG_FSEB2 0x05C
+#define D40_DREG_CIDMOD 0x058
+#define D40_DREG_TCIDV 0x05C
#define D40_DREG_PCMIS 0x060
#define D40_DREG_PCICR 0x064
#define D40_DREG_PCTIS 0x068
#define D40_DREG_PCEIS 0x06C
+
+#define D40_DREG_SPCMIS 0x070
+#define D40_DREG_SPCICR 0x074
+#define D40_DREG_SPCTIS 0x078
+#define D40_DREG_SPCEIS 0x07C
+
#define D40_DREG_LCMIS0 0x080
#define D40_DREG_LCMIS1 0x084
#define D40_DREG_LCMIS2 0x088
@@ -172,6 +189,33 @@
#define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC
+
+#define D40_DREG_SLCMIS1 0x0C0
+#define D40_DREG_SLCMIS2 0x0C4
+#define D40_DREG_SLCMIS3 0x0C8
+#define D40_DREG_SLCMIS4 0x0CC
+
+#define D40_DREG_SLCICR1 0x0D0
+#define D40_DREG_SLCICR2 0x0D4
+#define D40_DREG_SLCICR3 0x0D8
+#define D40_DREG_SLCICR4 0x0DC
+
+#define D40_DREG_SLCTIS1 0x0E0
+#define D40_DREG_SLCTIS2 0x0E4
+#define D40_DREG_SLCTIS3 0x0E8
+#define D40_DREG_SLCTIS4 0x0EC
+
+#define D40_DREG_SLCEIS1 0x0F0
+#define D40_DREG_SLCEIS2 0x0F4
+#define D40_DREG_SLCEIS3 0x0F8
+#define D40_DREG_SLCEIS4 0x0FC
+
+#define D40_DREG_FSESS1 0x100
+#define D40_DREG_FSESS2 0x104
+
+#define D40_DREG_FSEBS1 0x108
+#define D40_DREG_FSEBS2 0x10C
+
#define D40_DREG_PSEG1 0x110
#define D40_DREG_PSEG2 0x114
#define D40_DREG_PSEG3 0x118
@@ -188,6 +232,86 @@
#define D40_DREG_RCEG2 0x144
#define D40_DREG_RCEG3 0x148
#define D40_DREG_RCEG4 0x14C
+
+#define D40_DREG_PREFOT 0x15C
+#define D40_DREG_EXTCFG 0x160
+
+#define D40_DREG_CPSEG1 0x200
+#define D40_DREG_CPSEG2 0x204
+#define D40_DREG_CPSEG3 0x208
+#define D40_DREG_CPSEG4 0x20C
+#define D40_DREG_CPSEG5 0x210
+
+#define D40_DREG_CPCEG1 0x220
+#define D40_DREG_CPCEG2 0x224
+#define D40_DREG_CPCEG3 0x228
+#define D40_DREG_CPCEG4 0x22C
+#define D40_DREG_CPCEG5 0x230
+
+#define D40_DREG_CRSEG1 0x240
+#define D40_DREG_CRSEG2 0x244
+#define D40_DREG_CRSEG3 0x248
+#define D40_DREG_CRSEG4 0x24C
+#define D40_DREG_CRSEG5 0x250
+
+#define D40_DREG_CRCEG1 0x260
+#define D40_DREG_CRCEG2 0x264
+#define D40_DREG_CRCEG3 0x268
+#define D40_DREG_CRCEG4 0x26C
+#define D40_DREG_CRCEG5 0x270
+
+#define D40_DREG_CFSESS1 0x280
+#define D40_DREG_CFSESS2 0x284
+#define D40_DREG_CFSESS3 0x288
+
+#define D40_DREG_CFSEBS1 0x290
+#define D40_DREG_CFSEBS2 0x294
+#define D40_DREG_CFSEBS3 0x298
+
+#define D40_DREG_CLCMIS1 0x300
+#define D40_DREG_CLCMIS2 0x304
+#define D40_DREG_CLCMIS3 0x308
+#define D40_DREG_CLCMIS4 0x30C
+#define D40_DREG_CLCMIS5 0x310
+
+#define D40_DREG_CLCICR1 0x320
+#define D40_DREG_CLCICR2 0x324
+#define D40_DREG_CLCICR3 0x328
+#define D40_DREG_CLCICR4 0x32C
+#define D40_DREG_CLCICR5 0x330
+
+#define D40_DREG_CLCTIS1 0x340
+#define D40_DREG_CLCTIS2 0x344
+#define D40_DREG_CLCTIS3 0x348
+#define D40_DREG_CLCTIS4 0x34C
+#define D40_DREG_CLCTIS5 0x350
+
+#define D40_DREG_CLCEIS1 0x360
+#define D40_DREG_CLCEIS2 0x364
+#define D40_DREG_CLCEIS3 0x368
+#define D40_DREG_CLCEIS4 0x36C
+#define D40_DREG_CLCEIS5 0x370
+
+#define D40_DREG_CPCMIS 0x380
+#define D40_DREG_CPCICR 0x384
+#define D40_DREG_CPCTIS 0x388
+#define D40_DREG_CPCEIS 0x38C
+
+#define D40_DREG_SCCIDA1 0xE80
+#define D40_DREG_SCCIDA2 0xE90
+#define D40_DREG_SCCIDA3 0xEA0
+#define D40_DREG_SCCIDA4 0xEB0
+#define D40_DREG_SCCIDA5 0xEC0
+
+#define D40_DREG_SCCIDB1 0xE84
+#define D40_DREG_SCCIDB2 0xE94
+#define D40_DREG_SCCIDB3 0xEA4
+#define D40_DREG_SCCIDB4 0xEB4
+#define D40_DREG_SCCIDB5 0xEC4
+
+#define D40_DREG_PRSCCIDA 0xF80
+#define D40_DREG_PRSCCIDB 0xF84
+
#define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index f6c018f1b453..fcee27eae1f6 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -63,6 +63,9 @@
#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
+#define TEGRA_APBDMA_CHAN_CSRE 0x00C
+#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
+
/* AHB memory address */
#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
@@ -113,10 +116,12 @@ struct tegra_dma;
* tegra_dma_chip_data Tegra chip specific DMA data
* @nr_channels: Number of channels available in the controller.
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
+ * @support_channel_pause: Support channel wise pause of dma.
*/
struct tegra_dma_chip_data {
int nr_channels;
int max_dma_count;
+ bool support_channel_pause;
};
/* DMA channel registers */
@@ -355,6 +360,32 @@ static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
spin_unlock(&tdma->global_lock);
}
+static void tegra_dma_pause(struct tegra_dma_channel *tdc,
+ bool wait_for_burst_complete)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ if (tdma->chip_data->support_channel_pause) {
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
+ TEGRA_APBDMA_CHAN_CSRE_PAUSE);
+ if (wait_for_burst_complete)
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+ } else {
+ tegra_dma_global_pause(tdc, wait_for_burst_complete);
+ }
+}
+
+static void tegra_dma_resume(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ if (tdma->chip_data->support_channel_pause) {
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
+ } else {
+ tegra_dma_global_resume(tdc);
+ }
+}
+
static void tegra_dma_stop(struct tegra_dma_channel *tdc)
{
u32 csr;
@@ -410,7 +441,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
* If there is already IEC status then interrupt handler need to
* load new configuration.
*/
- tegra_dma_global_pause(tdc, false);
+ tegra_dma_pause(tdc, false);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
/*
@@ -420,7 +451,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
dev_err(tdc2dev(tdc),
"Skipping new configuration as interrupt is pending\n");
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
return;
}
@@ -431,7 +462,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
}
static void tdc_start_head_req(struct tegra_dma_channel *tdc)
@@ -692,7 +723,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
goto skip_dma_stop;
/* Pause DMA before checking the queue status */
- tegra_dma_global_pause(tdc, true);
+ tegra_dma_pause(tdc, true);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
@@ -710,7 +741,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
sgreq->dma_desc->bytes_transferred +=
get_current_xferred_count(tdc, sgreq, status);
}
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
skip_dma_stop:
tegra_dma_abort_all(tdc);
@@ -738,7 +769,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_SUCCESS) {
- dma_set_residue(txstate, 0);
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
@@ -1180,6 +1210,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
.nr_channels = 16,
.max_dma_count = 1024UL * 64,
+ .support_channel_pause = false,
};
#if defined(CONFIG_OF)
@@ -1187,10 +1218,22 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.nr_channels = 32,
.max_dma_count = 1024UL * 64,
+ .support_channel_pause = false,
};
+/* Tegra114 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
+ .nr_channels = 32,
+ .max_dma_count = 1024UL * 64,
+ .support_channel_pause = true,
+};
+
+
static const struct of_device_id tegra_dma_of_match[] = {
{
+ .compatible = "nvidia,tegra114-apbdma",
+ .data = &tegra114_dma_chip_data,
+ }, {
.compatible = "nvidia,tegra30-apbdma",
.data = &tegra30_dma_chip_data,
}, {
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index acb709bfac0f..e443f2c1dfd1 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -80,6 +80,29 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
+config EDAC_GHES
+ bool "Output ACPI APEI/GHES BIOS detected errors via EDAC"
+ depends on ACPI_APEI_GHES && (EDAC_MM_EDAC=y)
+ default y
+ help
+ Not all machines support hardware-driven error report. Some of those
+ provide a BIOS-driven error report mechanism via ACPI, using the
+ APEI/GHES driver. By enabling this option, the error reports provided
+ by GHES are sent to userspace via the EDAC API.
+
+ When this option is enabled, it will disable the hardware-driven
+ mechanisms, if a GHES BIOS is detected, entering into the
+ "Firmware First" mode.
+
+ It should be noticed that keeping both GHES and a hardware-driven
+ error mechanism won't work well, as BIOS will race with OS, while
+ reading the error registers. So, if you want to not use "Firmware
+ first" GHES error mechanism, you should disable GHES either at
+ compilation time or by passing "ghes.disable=1" Kernel parameter
+ at boot time.
+
+ In doubt, say 'Y'.
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h"
depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 5608a9ba61b7..4154ed6a02c6 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -16,6 +16,7 @@ ifdef CONFIG_PCI
edac_core-y += edac_pci.o edac_pci_sysfs.o
endif
+obj-$(CONFIG_EDAC_GHES) += ghes_edac.o
obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o
edac_mce_amd-y := mce_amd.o
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 23bb99fa44f1..3c2625e7980d 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -453,6 +453,11 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
+
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ struct edac_raw_error_desc *e);
+
void edac_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
const u16 error_count,
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d1e9eb191f2b..cdb81aa73ab7 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -42,6 +42,12 @@
static DEFINE_MUTEX(mem_ctls_mutex);
static LIST_HEAD(mc_devices);
+/*
+ * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
+ * apei/ghes and i7core_edac to be used at the same time.
+ */
+static void const *edac_mc_owner;
+
unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
unsigned len)
{
@@ -441,13 +447,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
mci->op_state = OP_ALLOC;
- /* at this point, the root kobj is valid, and in order to
- * 'free' the object, then the function:
- * edac_mc_unregister_sysfs_main_kobj() must be called
- * which will perform kobj unregistration and the actual free
- * will occur during the kobject callback operation
- */
-
return mci;
error:
@@ -666,9 +665,9 @@ fail1:
return 1;
}
-static void del_mc_from_global_list(struct mem_ctl_info *mci)
+static int del_mc_from_global_list(struct mem_ctl_info *mci)
{
- atomic_dec(&edac_handlers);
+ int handlers = atomic_dec_return(&edac_handlers);
list_del_rcu(&mci->link);
/* these are for safe removal of devices from global list while
@@ -676,6 +675,8 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci)
*/
synchronize_rcu();
INIT_LIST_HEAD(&mci->link);
+
+ return handlers;
}
/**
@@ -719,6 +720,7 @@ EXPORT_SYMBOL(edac_mc_find);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc(struct mem_ctl_info *mci)
{
+ int ret = -EINVAL;
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
@@ -749,6 +751,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
#endif
mutex_lock(&mem_ctls_mutex);
+ if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
+ ret = -EPERM;
+ goto fail0;
+ }
+
if (add_mc_to_global_list(mci))
goto fail0;
@@ -775,6 +782,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
" DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
+ edac_mc_owner = mci->mod_name;
+
mutex_unlock(&mem_ctls_mutex);
return 0;
@@ -783,7 +792,7 @@ fail1:
fail0:
mutex_unlock(&mem_ctls_mutex);
- return 1;
+ return ret;
}
EXPORT_SYMBOL_GPL(edac_mc_add_mc);
@@ -809,7 +818,8 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
return NULL;
}
- del_mc_from_global_list(mci);
+ if (!del_mc_from_global_list(mci))
+ edac_mc_owner = NULL;
mutex_unlock(&mem_ctls_mutex);
/* flush workq processes */
@@ -907,6 +917,7 @@ const char *edac_layer_name[] = {
[EDAC_MC_LAYER_CHANNEL] = "channel",
[EDAC_MC_LAYER_SLOT] = "slot",
[EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+ [EDAC_MC_LAYER_ALL_MEM] = "memory",
};
EXPORT_SYMBOL_GPL(edac_layer_name);
@@ -1054,7 +1065,46 @@ static void edac_ue_error(struct mem_ctl_info *mci,
edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
}
-#define OTHER_LABEL " or "
+/**
+ * edac_raw_mc_handle_error - reports a memory event to userspace without doing
+ * anything to discover the error location
+ *
+ * @type: severity of the error (CE/UE/Fatal)
+ * @mci: a struct mem_ctl_info pointer
+ * @e: error description
+ *
+ * This raw function is used internally by edac_mc_handle_error(). It should
+ * only be called directly when the hardware error come directly from BIOS,
+ * like in the case of APEI GHES driver.
+ */
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ struct edac_raw_error_desc *e)
+{
+ char detail[80];
+ int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+
+ /* Memory type dependent details about the error */
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
+ e->page_frame_number, e->offset_in_page,
+ e->grain, e->syndrome);
+ edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
+ detail, e->other_detail, e->enable_per_layer_report,
+ e->page_frame_number, e->offset_in_page, e->grain);
+ } else {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%ld",
+ e->page_frame_number, e->offset_in_page, e->grain);
+
+ edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
+ detail, e->other_detail, e->enable_per_layer_report);
+ }
+
+
+}
+EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
/**
* edac_mc_handle_error - reports a memory event to userspace
@@ -1086,19 +1136,27 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
const char *msg,
const char *other_detail)
{
- /* FIXME: too much for stack: move it to some pre-alocated area */
- char detail[80], location[80];
- char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
char *p;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
- int i;
- long grain;
- bool enable_per_layer_report = false;
+ int i, n_labels = 0;
u8 grain_bits;
+ struct edac_raw_error_desc *e = &mci->error_desc;
edac_dbg(3, "MC%d\n", mci->mc_idx);
+ /* Fills the error report buffer */
+ memset(e, 0, sizeof (*e));
+ e->error_count = error_count;
+ e->top_layer = top_layer;
+ e->mid_layer = mid_layer;
+ e->low_layer = low_layer;
+ e->page_frame_number = page_frame_number;
+ e->offset_in_page = offset_in_page;
+ e->syndrome = syndrome;
+ e->msg = msg;
+ e->other_detail = other_detail;
+
/*
* Check if the event report is consistent and if the memory
* location is known. If it is known, enable_per_layer_report will be
@@ -1121,7 +1179,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
pos[i] = -1;
}
if (pos[i] >= 0)
- enable_per_layer_report = true;
+ e->enable_per_layer_report = true;
}
/*
@@ -1135,8 +1193,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* where each memory belongs to a separate channel within the same
* branch.
*/
- grain = 0;
- p = label;
+ p = e->label;
*p = '\0';
for (i = 0; i < mci->tot_dimms; i++) {
@@ -1150,8 +1207,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
continue;
/* get the max grain, over the error match range */
- if (dimm->grain > grain)
- grain = dimm->grain;
+ if (dimm->grain > e->grain)
+ e->grain = dimm->grain;
/*
* If the error is memory-controller wide, there's no need to
@@ -1159,8 +1216,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* channel/memory controller/... may be affected.
* Also, don't show errors for empty DIMM slots.
*/
- if (enable_per_layer_report && dimm->nr_pages) {
- if (p != label) {
+ if (e->enable_per_layer_report && dimm->nr_pages) {
+ if (n_labels >= EDAC_MAX_LABELS) {
+ e->enable_per_layer_report = false;
+ break;
+ }
+ n_labels++;
+ if (p != e->label) {
strcpy(p, OTHER_LABEL);
p += strlen(OTHER_LABEL);
}
@@ -1187,12 +1249,12 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
}
}
- if (!enable_per_layer_report) {
- strcpy(label, "any memory");
+ if (!e->enable_per_layer_report) {
+ strcpy(e->label, "any memory");
} else {
edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
- if (p == label)
- strcpy(label, "unknown memory");
+ if (p == e->label)
+ strcpy(e->label, "unknown memory");
if (type == HW_EVENT_ERR_CORRECTED) {
if (row >= 0) {
mci->csrows[row]->ce_count += error_count;
@@ -1205,7 +1267,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
}
/* Fill the RAM location data */
- p = location;
+ p = e->location;
for (i = 0; i < mci->n_layers; i++) {
if (pos[i] < 0)
@@ -1215,32 +1277,16 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
edac_layer_name[mci->layers[i].type],
pos[i]);
}
- if (p > location)
+ if (p > e->location)
*(p - 1) = '\0';
/* Report the error via the trace interface */
- grain_bits = fls_long(grain) + 1;
- trace_mc_event(type, msg, label, error_count,
- mci->mc_idx, top_layer, mid_layer, low_layer,
- PAGES_TO_MiB(page_frame_number) | offset_in_page,
- grain_bits, syndrome, other_detail);
+ grain_bits = fls_long(e->grain) + 1;
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
+ PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+ grain_bits, e->syndrome, e->other_detail);
- /* Memory type dependent details about the error */
- if (type == HW_EVENT_ERR_CORRECTED) {
- snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
- page_frame_number, offset_in_page,
- grain, syndrome);
- edac_ce_error(mci, error_count, pos, msg, location, label,
- detail, other_detail, enable_per_layer_report,
- page_frame_number, offset_in_page, grain);
- } else {
- snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%ld",
- page_frame_number, offset_in_page, grain);
-
- edac_ue_error(mci, error_count, pos, msg, location, label,
- detail, other_detail, enable_per_layer_report);
- }
+ edac_raw_mc_handle_error(type, mci, e);
}
EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0ca1ca71157f..4f4b6137d74e 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -7,7 +7,7 @@
*
* Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
*
- * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com>
+ * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com>
* The entire API were re-written, and ported to use struct device
*
*/
@@ -429,8 +429,12 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci)
if (!nr_pages_per_csrow(csrow))
continue;
err = edac_create_csrow_object(mci, mci->csrows[i], i);
- if (err < 0)
+ if (err < 0) {
+ edac_dbg(1,
+ "failure: create csrow objects for csrow %d\n",
+ i);
goto error;
+ }
}
return 0;
@@ -677,9 +681,6 @@ static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
unsigned long bandwidth = 0;
int new_bw = 0;
- if (!mci->set_sdram_scrub_rate)
- return -ENODEV;
-
if (strict_strtoul(data, 10, &bandwidth) < 0)
return -EINVAL;
@@ -703,9 +704,6 @@ static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
int bandwidth = 0;
- if (!mci->get_sdram_scrub_rate)
- return -ENODEV;
-
bandwidth = mci->get_sdram_scrub_rate(mci);
if (bandwidth < 0) {
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
@@ -866,8 +864,7 @@ DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
/* memory scrubber attribute file */
-DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
- mci_sdram_scrub_rate_store);
+DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
static struct attribute *mci_attrs[] = {
&dev_attr_reset_counters.attr,
@@ -878,7 +875,6 @@ static struct attribute *mci_attrs[] = {
&dev_attr_ce_noinfo_count.attr,
&dev_attr_ue_count.attr,
&dev_attr_ce_count.attr,
- &dev_attr_sdram_scrub_rate.attr,
&dev_attr_max_location.attr,
NULL
};
@@ -1007,11 +1003,28 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
err = device_add(&mci->dev);
if (err < 0) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
bus_unregister(&mci->bus);
kfree(mci->bus.name);
return err;
}
+ if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
+ if (mci->get_sdram_scrub_rate) {
+ dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
+ dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
+ }
+ if (mci->set_sdram_scrub_rate) {
+ dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
+ dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
+ }
+ err = device_create_file(&mci->dev,
+ &dev_attr_sdram_scrub_rate);
+ if (err) {
+ edac_dbg(1, "failure: create sdram_scrub_rate\n");
+ goto fail2;
+ }
+ }
/*
* Create the dimm/rank devices
*/
@@ -1056,6 +1069,7 @@ fail:
continue;
device_unregister(&dimm->dev);
}
+fail2:
device_unregister(&mci->dev);
bus_unregister(&mci->bus);
kfree(mci->bus.name);
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 12c951a2c33d..a66941fea5a4 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -146,7 +146,7 @@ static void __exit edac_exit(void)
/*
* Inform the kernel of our entry and exit points
*/
-module_init(edac_init);
+subsys_initcall(edac_init);
module_exit(edac_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 0056c4dae9d5..e8658e451762 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -429,8 +429,8 @@ static void edac_pci_main_kobj_teardown(void)
if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
edac_dbg(0, "called kobject_put on main kobj\n");
kobject_put(edac_pci_top_main_kobj);
+ edac_put_sysfs_subsys();
}
- edac_put_sysfs_subsys();
}
/*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
new file mode 100644
index 000000000000..bb534670ec02
--- /dev/null
+++ b/drivers/edac/ghes_edac.c
@@ -0,0 +1,537 @@
+/*
+ * GHES/EDAC Linux driver
+ *
+ * This file may be distributed under the terms of the GNU General Public
+ * License version 2.
+ *
+ * Copyright (c) 2013 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <acpi/ghes.h>
+#include <linux/edac.h>
+#include <linux/dmi.h>
+#include "edac_core.h"
+#include <ras/ras_event.h>
+
+#define GHES_EDAC_REVISION " Ver: 1.0.0"
+
+struct ghes_edac_pvt {
+ struct list_head list;
+ struct ghes *ghes;
+ struct mem_ctl_info *mci;
+
+ /* Buffers for the error handling routine */
+ char detail_location[240];
+ char other_detail[160];
+ char msg[80];
+};
+
+static LIST_HEAD(ghes_reglist);
+static DEFINE_MUTEX(ghes_edac_lock);
+static int ghes_edac_mc_num;
+
+
+/* Memory Device - Type 17 of SMBIOS spec */
+struct memdev_dmi_entry {
+ u8 type;
+ u8 length;
+ u16 handle;
+ u16 phys_mem_array_handle;
+ u16 mem_err_info_handle;
+ u16 total_width;
+ u16 data_width;
+ u16 size;
+ u8 form_factor;
+ u8 device_set;
+ u8 device_locator;
+ u8 bank_locator;
+ u8 memory_type;
+ u16 type_detail;
+ u16 speed;
+ u8 manufacturer;
+ u8 serial_number;
+ u8 asset_tag;
+ u8 part_number;
+ u8 attributes;
+ u32 extended_size;
+ u16 conf_mem_clk_speed;
+} __attribute__((__packed__));
+
+struct ghes_edac_dimm_fill {
+ struct mem_ctl_info *mci;
+ unsigned count;
+};
+
+char *memory_type[] = {
+ [MEM_EMPTY] = "EMPTY",
+ [MEM_RESERVED] = "RESERVED",
+ [MEM_UNKNOWN] = "UNKNOWN",
+ [MEM_FPM] = "FPM",
+ [MEM_EDO] = "EDO",
+ [MEM_BEDO] = "BEDO",
+ [MEM_SDR] = "SDR",
+ [MEM_RDR] = "RDR",
+ [MEM_DDR] = "DDR",
+ [MEM_RDDR] = "RDDR",
+ [MEM_RMBS] = "RMBS",
+ [MEM_DDR2] = "DDR2",
+ [MEM_FB_DDR2] = "FB_DDR2",
+ [MEM_RDDR2] = "RDDR2",
+ [MEM_XDR] = "XDR",
+ [MEM_DDR3] = "DDR3",
+ [MEM_RDDR3] = "RDDR3",
+};
+
+static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
+{
+ int *num_dimm = arg;
+
+ if (dh->type == DMI_ENTRY_MEM_DEVICE)
+ (*num_dimm)++;
+}
+
+static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
+{
+ struct ghes_edac_dimm_fill *dimm_fill = arg;
+ struct mem_ctl_info *mci = dimm_fill->mci;
+
+ if (dh->type == DMI_ENTRY_MEM_DEVICE) {
+ struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
+ struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers,
+ dimm_fill->count, 0, 0);
+
+ if (entry->size == 0xffff) {
+ pr_info("Can't get DIMM%i size\n",
+ dimm_fill->count);
+ dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
+ } else if (entry->size == 0x7fff) {
+ dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
+ } else {
+ if (entry->size & 1 << 15)
+ dimm->nr_pages = MiB_TO_PAGES((entry->size &
+ 0x7fff) << 10);
+ else
+ dimm->nr_pages = MiB_TO_PAGES(entry->size);
+ }
+
+ switch (entry->memory_type) {
+ case 0x12:
+ if (entry->type_detail & 1 << 13)
+ dimm->mtype = MEM_RDDR;
+ else
+ dimm->mtype = MEM_DDR;
+ break;
+ case 0x13:
+ if (entry->type_detail & 1 << 13)
+ dimm->mtype = MEM_RDDR2;
+ else
+ dimm->mtype = MEM_DDR2;
+ break;
+ case 0x14:
+ dimm->mtype = MEM_FB_DDR2;
+ break;
+ case 0x18:
+ if (entry->type_detail & 1 << 13)
+ dimm->mtype = MEM_RDDR3;
+ else
+ dimm->mtype = MEM_DDR3;
+ break;
+ default:
+ if (entry->type_detail & 1 << 6)
+ dimm->mtype = MEM_RMBS;
+ else if ((entry->type_detail & ((1 << 7) | (1 << 13)))
+ == ((1 << 7) | (1 << 13)))
+ dimm->mtype = MEM_RDR;
+ else if (entry->type_detail & 1 << 7)
+ dimm->mtype = MEM_SDR;
+ else if (entry->type_detail & 1 << 9)
+ dimm->mtype = MEM_EDO;
+ else
+ dimm->mtype = MEM_UNKNOWN;
+ }
+
+ /*
+ * Actually, we can only detect if the memory has bits for
+ * checksum or not
+ */
+ if (entry->total_width == entry->data_width)
+ dimm->edac_mode = EDAC_NONE;
+ else
+ dimm->edac_mode = EDAC_SECDED;
+
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->grain = 128; /* Likely, worse case */
+
+ /*
+ * FIXME: It shouldn't be hard to also fill the DIMM labels
+ */
+
+ if (dimm->nr_pages) {
+ edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
+ dimm_fill->count, memory_type[dimm->mtype],
+ PAGES_TO_MiB(dimm->nr_pages),
+ (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
+ edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
+ entry->memory_type, entry->type_detail,
+ entry->total_width, entry->data_width);
+ }
+
+ dimm_fill->count++;
+ }
+}
+
+void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+ struct cper_sec_mem_err *mem_err)
+{
+ enum hw_event_mc_err_type type;
+ struct edac_raw_error_desc *e;
+ struct mem_ctl_info *mci;
+ struct ghes_edac_pvt *pvt = NULL;
+ char *p;
+ u8 grain_bits;
+
+ list_for_each_entry(pvt, &ghes_reglist, list) {
+ if (ghes == pvt->ghes)
+ break;
+ }
+ if (!pvt) {
+ pr_err("Internal error: Can't find EDAC structure\n");
+ return;
+ }
+ mci = pvt->mci;
+ e = &mci->error_desc;
+
+ /* Cleans the error report buffer */
+ memset(e, 0, sizeof (*e));
+ e->error_count = 1;
+ strcpy(e->label, "unknown label");
+ e->msg = pvt->msg;
+ e->other_detail = pvt->other_detail;
+ e->top_layer = -1;
+ e->mid_layer = -1;
+ e->low_layer = -1;
+ *pvt->other_detail = '\0';
+ *pvt->msg = '\0';
+
+ switch (sev) {
+ case GHES_SEV_CORRECTED:
+ type = HW_EVENT_ERR_CORRECTED;
+ break;
+ case GHES_SEV_RECOVERABLE:
+ type = HW_EVENT_ERR_UNCORRECTED;
+ break;
+ case GHES_SEV_PANIC:
+ type = HW_EVENT_ERR_FATAL;
+ break;
+ default:
+ case GHES_SEV_NO:
+ type = HW_EVENT_ERR_INFO;
+ }
+
+ edac_dbg(1, "error validation_bits: 0x%08llx\n",
+ (long long)mem_err->validation_bits);
+
+ /* Error type, mapped on e->msg */
+ if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+ p = pvt->msg;
+ switch (mem_err->error_type) {
+ case 0:
+ p += sprintf(p, "Unknown");
+ break;
+ case 1:
+ p += sprintf(p, "No error");
+ break;
+ case 2:
+ p += sprintf(p, "Single-bit ECC");
+ break;
+ case 3:
+ p += sprintf(p, "Multi-bit ECC");
+ break;
+ case 4:
+ p += sprintf(p, "Single-symbol ChipKill ECC");
+ break;
+ case 5:
+ p += sprintf(p, "Multi-symbol ChipKill ECC");
+ break;
+ case 6:
+ p += sprintf(p, "Master abort");
+ break;
+ case 7:
+ p += sprintf(p, "Target abort");
+ break;
+ case 8:
+ p += sprintf(p, "Parity Error");
+ break;
+ case 9:
+ p += sprintf(p, "Watchdog timeout");
+ break;
+ case 10:
+ p += sprintf(p, "Invalid address");
+ break;
+ case 11:
+ p += sprintf(p, "Mirror Broken");
+ break;
+ case 12:
+ p += sprintf(p, "Memory Sparing");
+ break;
+ case 13:
+ p += sprintf(p, "Scrub corrected error");
+ break;
+ case 14:
+ p += sprintf(p, "Scrub uncorrected error");
+ break;
+ case 15:
+ p += sprintf(p, "Physical Memory Map-out event");
+ break;
+ default:
+ p += sprintf(p, "reserved error (%d)",
+ mem_err->error_type);
+ }
+ } else {
+ strcpy(pvt->msg, "unknown error");
+ }
+
+ /* Error address */
+ if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
+ e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
+ }
+
+ /* Error grain */
+ if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) {
+ e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+ }
+
+ /* Memory error location, mapped on e->location */
+ p = e->location;
+ if (mem_err->validation_bits & CPER_MEM_VALID_NODE)
+ p += sprintf(p, "node:%d ", mem_err->node);
+ if (mem_err->validation_bits & CPER_MEM_VALID_CARD)
+ p += sprintf(p, "card:%d ", mem_err->card);
+ if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
+ p += sprintf(p, "module:%d ", mem_err->module);
+ if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
+ p += sprintf(p, "bank:%d ", mem_err->bank);
+ if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
+ p += sprintf(p, "row:%d ", mem_err->row);
+ if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN)
+ p += sprintf(p, "col:%d ", mem_err->column);
+ if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+ p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
+ if (p > e->location)
+ *(p - 1) = '\0';
+
+ /* All other fields are mapped on e->other_detail */
+ p = pvt->other_detail;
+ if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
+ u64 status = mem_err->error_status;
+
+ p += sprintf(p, "status(0x%016llx): ", (long long)status);
+ switch ((status >> 8) & 0xff) {
+ case 1:
+ p += sprintf(p, "Error detected internal to the component ");
+ break;
+ case 16:
+ p += sprintf(p, "Error detected in the bus ");
+ break;
+ case 4:
+ p += sprintf(p, "Storage error in DRAM memory ");
+ break;
+ case 5:
+ p += sprintf(p, "Storage error in TLB ");
+ break;
+ case 6:
+ p += sprintf(p, "Storage error in cache ");
+ break;
+ case 7:
+ p += sprintf(p, "Error in one or more functional units ");
+ break;
+ case 8:
+ p += sprintf(p, "component failed self test ");
+ break;
+ case 9:
+ p += sprintf(p, "Overflow or undervalue of internal queue ");
+ break;
+ case 17:
+ p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR ");
+ break;
+ case 18:
+ p += sprintf(p, "Improper access error ");
+ break;
+ case 19:
+ p += sprintf(p, "Access to a memory address which is not mapped to any component ");
+ break;
+ case 20:
+ p += sprintf(p, "Loss of Lockstep ");
+ break;
+ case 21:
+ p += sprintf(p, "Response not associated with a request ");
+ break;
+ case 22:
+ p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits ");
+ break;
+ case 23:
+ p += sprintf(p, "Detection of a PATH_ERROR ");
+ break;
+ case 25:
+ p += sprintf(p, "Bus operation timeout ");
+ break;
+ case 26:
+ p += sprintf(p, "A read was issued to data that has been poisoned ");
+ break;
+ default:
+ p += sprintf(p, "reserved ");
+ break;
+ }
+ }
+ if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+ p += sprintf(p, "requestorID: 0x%016llx ",
+ (long long)mem_err->requestor_id);
+ if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+ p += sprintf(p, "responderID: 0x%016llx ",
+ (long long)mem_err->responder_id);
+ if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID)
+ p += sprintf(p, "targetID: 0x%016llx ",
+ (long long)mem_err->responder_id);
+ if (p > pvt->other_detail)
+ *(p - 1) = '\0';
+
+ /* Generate the trace event */
+ grain_bits = fls_long(e->grain);
+ sprintf(pvt->detail_location, "APEI location: %s %s",
+ e->location, e->other_detail);
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
+ PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+ grain_bits, e->syndrome, pvt->detail_location);
+
+ /* Report the error via EDAC API */
+ edac_raw_mc_handle_error(type, mci, e);
+}
+EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
+
+int ghes_edac_register(struct ghes *ghes, struct device *dev)
+{
+ bool fake = false;
+ int rc, num_dimm = 0;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ struct ghes_edac_pvt *pvt;
+ struct ghes_edac_dimm_fill dimm_fill;
+
+ /* Get the number of DIMMs */
+ dmi_walk(ghes_edac_count_dimms, &num_dimm);
+
+ /* Check if we've got a bogus BIOS */
+ if (num_dimm == 0) {
+ fake = true;
+ num_dimm = 1;
+ }
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = num_dimm;
+ layers[0].is_virt_csrow = true;
+
+ /*
+ * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
+ * to avoid duplicated memory controller numbers
+ */
+ mutex_lock(&ghes_edac_lock);
+ mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
+ if (!mci) {
+ pr_info("Can't allocate memory for EDAC data\n");
+ mutex_unlock(&ghes_edac_lock);
+ return -ENOMEM;
+ }
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+ list_add_tail(&pvt->list, &ghes_reglist);
+ pvt->ghes = ghes;
+ pvt->mci = mci;
+ mci->pdev = dev;
+
+ mci->mtype_cap = MEM_FLAG_EMPTY;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "ghes_edac.c";
+ mci->mod_ver = GHES_EDAC_REVISION;
+ mci->ctl_name = "ghes_edac";
+ mci->dev_name = "ghes";
+
+ if (!ghes_edac_mc_num) {
+ if (!fake) {
+ pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
+ pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
+ pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
+ pr_info("If you find incorrect reports, please contact your hardware vendor\n");
+ pr_info("to correct its BIOS.\n");
+ pr_info("This system has %d DIMM sockets.\n",
+ num_dimm);
+ } else {
+ pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
+ pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
+ pr_info("work on such system. Use this driver with caution\n");
+ }
+ }
+
+ if (!fake) {
+ /*
+ * Fill DIMM info from DMI for the memory controller #0
+ *
+ * Keep it in blank for the other memory controllers, as
+ * there's no reliable way to properly credit each DIMM to
+ * the memory controller, as different BIOSes fill the
+ * DMI bank location fields on different ways
+ */
+ if (!ghes_edac_mc_num) {
+ dimm_fill.count = 0;
+ dimm_fill.mci = mci;
+ dmi_walk(ghes_edac_dmidecode, &dimm_fill);
+ }
+ } else {
+ struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, 0, 0, 0);
+
+ dimm->nr_pages = 1;
+ dimm->grain = 128;
+ dimm->mtype = MEM_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_SECDED;
+ }
+
+ rc = edac_mc_add_mc(mci);
+ if (rc < 0) {
+ pr_info("Can't register at EDAC core\n");
+ edac_mc_free(mci);
+ mutex_unlock(&ghes_edac_lock);
+ return -ENODEV;
+ }
+
+ ghes_edac_mc_num++;
+ mutex_unlock(&ghes_edac_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ghes_edac_register);
+
+void ghes_edac_unregister(struct ghes *ghes)
+{
+ struct mem_ctl_info *mci;
+ struct ghes_edac_pvt *pvt, *tmp;
+
+ list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
+ if (ghes == pvt->ghes) {
+ mci = pvt->mci;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
+ list_del(&pvt->list);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(ghes_edac_unregister);
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 4e8337602e78..aa44c1718f50 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -106,16 +106,26 @@ static int nr_channels;
static int how_many_channels(struct pci_dev *pdev)
{
+ int n_channels;
+
unsigned char capid0_8b; /* 8th byte of CAPID0 */
pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
+
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
edac_dbg(0, "In single channel mode\n");
- return 1;
+ n_channels = 1;
} else {
edac_dbg(0, "In dual channel mode\n");
- return 2;
+ n_channels = 2;
}
+
+ if (capid0_8b & 0x10) /* check if both channels are filled */
+ edac_dbg(0, "2 DIMMS per channel disabled\n");
+ else
+ edac_dbg(0, "2 DIMMS per channel enabled\n");
+
+ return n_channels;
}
static unsigned long eccerrlog_syndrome(u64 log)
@@ -290,6 +300,8 @@ static void i3200_get_drbs(void __iomem *window,
for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
+
+ edac_dbg(0, "drb[0][%d] = %d, drb[1][%d] = %d\n", i, drbs[0][i], i, drbs[1][i]);
}
}
@@ -311,6 +323,9 @@ static unsigned long drb_to_nr_pages(
int n;
n = drbs[channel][rank];
+ if (!n)
+ return 0;
+
if (rank > 0)
n -= drbs[channel][rank - 1];
if (stacked && (channel == 1) &&
@@ -377,19 +392,19 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- for (i = 0; i < mci->nr_csrows; i++) {
+ for (i = 0; i < I3200_DIMMS; i++) {
unsigned long nr_pages;
- struct csrow_info *csrow = mci->csrows[i];
- nr_pages = drb_to_nr_pages(drbs, stacked,
- i / I3200_RANKS_PER_CHANNEL,
- i % I3200_RANKS_PER_CHANNEL);
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, i, j, 0);
- if (nr_pages == 0)
- continue;
+ nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
+ if (nr_pages == 0)
+ continue;
- for (j = 0; j < nr_channels; j++) {
- struct dimm_info *dimm = csrow->channels[j]->dimm;
+ edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
+ stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
dimm->nr_pages = nr_pages;
dimm->grain = nr_pages << PAGE_SHIFT;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d6955b2cc99f..1b635178cc44 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -27,6 +27,7 @@
#include <linux/edac.h>
#include <linux/delay.h>
#include <linux/mmzone.h>
+#include <linux/debugfs.h>
#include "edac_core.h"
@@ -68,6 +69,14 @@
I5100_FERR_NF_MEM_M1ERR_MASK)
#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
+#define I5100_MEM0EINJMSK0 0x200 /* Injection Mask0 Register Channel 0 */
+#define I5100_MEM1EINJMSK0 0x208 /* Injection Mask0 Register Channel 1 */
+#define I5100_MEMXEINJMSK0_EINJEN (1 << 27)
+#define I5100_MEM0EINJMSK1 0x204 /* Injection Mask1 Register Channel 0 */
+#define I5100_MEM1EINJMSK1 0x206 /* Injection Mask1 Register Channel 1 */
+
+/* Device 19, Function 0 */
+#define I5100_DINJ0 0x9a
/* device 21 and 22, func 0 */
#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
@@ -338,13 +347,26 @@ struct i5100_priv {
unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
+ struct pci_dev *einj; /* device 19 func 0 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
struct delayed_work i5100_scrubbing;
int scrub_enable;
+
+ /* Error injection */
+ u8 inject_channel;
+ u8 inject_hlinesel;
+ u8 inject_deviceptr1;
+ u8 inject_deviceptr2;
+ u16 inject_eccmask1;
+ u16 inject_eccmask2;
+
+ struct dentry *debugfs;
};
+static struct dentry *i5100_debugfs;
+
/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
int chan, int rank)
@@ -863,13 +885,126 @@ static void i5100_init_csrows(struct mem_ctl_info *mci)
}
}
+/****************************************************************************
+ * Error injection routines
+ ****************************************************************************/
+
+static void i5100_do_inject(struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 mask0;
+ u16 mask1;
+
+ /* MEM[1:0]EINJMSK0
+ * 31 - ADDRMATCHEN
+ * 29:28 - HLINESEL
+ * 00 Reserved
+ * 01 Lower half of cache line
+ * 10 Upper half of cache line
+ * 11 Both upper and lower parts of cache line
+ * 27 - EINJEN
+ * 25:19 - XORMASK1 for deviceptr1
+ * 9:5 - SEC2RAM for deviceptr2
+ * 4:0 - FIR2RAM for deviceptr1
+ */
+ mask0 = ((priv->inject_hlinesel & 0x3) << 28) |
+ I5100_MEMXEINJMSK0_EINJEN |
+ ((priv->inject_eccmask1 & 0xffff) << 10) |
+ ((priv->inject_deviceptr2 & 0x1f) << 5) |
+ (priv->inject_deviceptr1 & 0x1f);
+
+ /* MEM[1:0]EINJMSK1
+ * 15:0 - XORMASK2 for deviceptr2
+ */
+ mask1 = priv->inject_eccmask2;
+
+ if (priv->inject_channel == 0) {
+ pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0);
+ pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1);
+ } else {
+ pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0);
+ pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1);
+ }
+
+ /* Error Injection Response Function
+ * Intel 5100 Memory Controller Hub Chipset (318378) datasheet
+ * hints about this register but carry no data about them. All
+ * data regarding device 19 is based on experimentation and the
+ * Intel 7300 Chipset Memory Controller Hub (318082) datasheet
+ * which appears to be accurate for the i5100 in this area.
+ *
+ * The injection code don't work without setting this register.
+ * The register needs to be flipped off then on else the hardware
+ * will only preform the first injection.
+ *
+ * Stop condition bits 7:4
+ * 1010 - Stop after one injection
+ * 1011 - Never stop injecting faults
+ *
+ * Start condition bits 3:0
+ * 1010 - Never start
+ * 1011 - Start immediately
+ */
+ pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa);
+ pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab);
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+static ssize_t inject_enable_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+
+ i5100_do_inject(mci);
+
+ return count;
+}
+
+static const struct file_operations i5100_inject_enable_fops = {
+ .open = simple_open,
+ .write = inject_enable_write,
+ .llseek = generic_file_llseek,
+};
+
+static int i5100_setup_debugfs(struct mem_ctl_info *mci)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+
+ if (!i5100_debugfs)
+ return -ENODEV;
+
+ priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs);
+
+ if (!priv->debugfs)
+ return -ENOMEM;
+
+ debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_channel);
+ debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_hlinesel);
+ debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_deviceptr1);
+ debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_deviceptr2);
+ debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_eccmask1);
+ debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
+ &priv->inject_eccmask2);
+ debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
+ &mci->dev, &i5100_inject_enable_fops);
+
+ return 0;
+
+}
+
static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct i5100_priv *priv;
- struct pci_dev *ch0mm, *ch1mm;
+ struct pci_dev *ch0mm, *ch1mm, *einj;
int ret = 0;
u32 dw;
int ranksperch;
@@ -941,6 +1076,22 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto bail_disable_ch1;
}
+
+ /* device 19, func 0, Error injection */
+ einj = pci_get_device_func(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_5100_19, 0);
+ if (!einj) {
+ ret = -ENODEV;
+ goto bail_einj;
+ }
+
+ rc = pci_enable_device(einj);
+ if (rc < 0) {
+ ret = rc;
+ goto bail_disable_einj;
+ }
+
+
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
@@ -948,6 +1099,7 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
+ priv->einj = einj;
INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
@@ -975,6 +1127,13 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
+ priv->inject_channel = 0;
+ priv->inject_hlinesel = 0;
+ priv->inject_deviceptr1 = 0;
+ priv->inject_deviceptr2 = 0;
+ priv->inject_eccmask1 = 0;
+ priv->inject_eccmask2 = 0;
+
i5100_init_csrows(mci);
/* this strange construction seems to be in every driver, dunno why */
@@ -992,6 +1151,8 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto bail_scrub;
}
+ i5100_setup_debugfs(mci);
+
return ret;
bail_scrub:
@@ -999,6 +1160,12 @@ bail_scrub:
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
+bail_disable_einj:
+ pci_disable_device(einj);
+
+bail_einj:
+ pci_dev_put(einj);
+
bail_disable_ch1:
pci_disable_device(ch1mm);
@@ -1030,14 +1197,18 @@ static void i5100_remove_one(struct pci_dev *pdev)
priv = mci->pvt_info;
+ debugfs_remove_recursive(priv->debugfs);
+
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);
+ pci_disable_device(priv->einj);
pci_dev_put(priv->ch0mm);
pci_dev_put(priv->ch1mm);
+ pci_dev_put(priv->einj);
edac_mc_free(mci);
}
@@ -1060,13 +1231,16 @@ static int __init i5100_init(void)
{
int pci_rc;
- pci_rc = pci_register_driver(&i5100_driver);
+ i5100_debugfs = debugfs_create_dir("i5100_edac", NULL);
+ pci_rc = pci_register_driver(&i5100_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
static void __exit i5100_exit(void)
{
+ debugfs_remove(i5100_debugfs);
+
pci_unregister_driver(&i5100_driver);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index e213d030b0dd..0ec3e95a12cd 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -420,21 +420,21 @@ static inline int numdimms(u32 dimms)
static inline int numrank(u32 rank)
{
- static int ranks[4] = { 1, 2, 4, -EINVAL };
+ static const int ranks[] = { 1, 2, 4, -EINVAL };
return ranks[rank & 0x3];
}
static inline int numbank(u32 bank)
{
- static int banks[4] = { 4, 8, 16, -EINVAL };
+ static const int banks[] = { 4, 8, 16, -EINVAL };
return banks[bank & 0x3];
}
static inline int numrow(u32 row)
{
- static int rows[8] = {
+ static const int rows[] = {
1 << 12, 1 << 13, 1 << 14, 1 << 15,
1 << 16, -EINVAL, -EINVAL, -EINVAL,
};
@@ -444,7 +444,7 @@ static inline int numrow(u32 row)
static inline int numcol(u32 col)
{
- static int cols[8] = {
+ static const int cols[] = {
1 << 10, 1 << 11, 1 << 12, -EINVAL,
};
return cols[col & 0x3];
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index da7e2986e3d5..57244f995614 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -639,7 +639,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = (1 + pvt->tohm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm);
+ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
/*
* Step 2) Get SAD range and SAD Interleave list
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index f8d22872d753..27ac423ab25e 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -487,27 +487,28 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
static int add_client_resource(struct client *client,
struct client_resource *resource, gfp_t gfp_mask)
{
+ bool preload = gfp_mask & __GFP_WAIT;
unsigned long flags;
int ret;
- retry:
- if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
- return -ENOMEM;
-
+ if (preload)
+ idr_preload(gfp_mask);
spin_lock_irqsave(&client->lock, flags);
+
if (client->in_shutdown)
ret = -ECANCELED;
else
- ret = idr_get_new(&client->resource_idr, resource,
- &resource->handle);
+ ret = idr_alloc(&client->resource_idr, resource, 0, 0,
+ GFP_NOWAIT);
if (ret >= 0) {
+ resource->handle = ret;
client_get(client);
schedule_if_iso_resource(resource);
}
- spin_unlock_irqrestore(&client->lock, flags);
- if (ret == -EAGAIN)
- goto retry;
+ spin_unlock_irqrestore(&client->lock, flags);
+ if (preload)
+ idr_preload_end();
return ret < 0 ? ret : 0;
}
@@ -1779,7 +1780,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
idr_for_each(&client->resource_idr, shutdown_resource, client);
- idr_remove_all(&client->resource_idr);
idr_destroy(&client->resource_idr);
list_for_each_entry_safe(event, next_event, &client->event_list, link)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 3873d535b28d..03ce7d980c6a 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -1017,12 +1017,11 @@ static void fw_device_init(struct work_struct *work)
fw_device_get(device);
down_write(&fw_device_rwsem);
- ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
- idr_get_new(&fw_device_idr, device, &minor) :
- -ENOMEM;
+ minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
+ GFP_KERNEL);
up_write(&fw_device_rwsem);
- if (ret < 0)
+ if (minor < 0)
goto error;
device->device.bus = &fw_bus_type;
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index fed08b661711..7320bf891706 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -79,6 +79,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/pstore.h>
+#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/ramfs.h>
@@ -908,6 +909,48 @@ static struct inode *efivarfs_get_inode(struct super_block *sb,
return inode;
}
+/*
+ * Return true if 'str' is a valid efivarfs filename of the form,
+ *
+ * VariableName-12345678-1234-1234-1234-1234567891bc
+ */
+static bool efivarfs_valid_name(const char *str, int len)
+{
+ static const char dashes[GUID_LEN] = {
+ [8] = 1, [13] = 1, [18] = 1, [23] = 1
+ };
+ const char *s = str + len - GUID_LEN;
+ int i;
+
+ /*
+ * We need a GUID, plus at least one letter for the variable name,
+ * plus the '-' separator
+ */
+ if (len < GUID_LEN + 2)
+ return false;
+
+ /* GUID should be right after the first '-' */
+ if (s - 1 != strchr(str, '-'))
+ return false;
+
+ /*
+ * Validate that 's' is of the correct format, e.g.
+ *
+ * 12345678-1234-1234-1234-123456789abc
+ */
+ for (i = 0; i < GUID_LEN; i++) {
+ if (dashes[i]) {
+ if (*s++ != '-')
+ return false;
+ } else {
+ if (!isxdigit(*s++))
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
{
guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
@@ -936,11 +979,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
struct efivar_entry *var;
int namelen, i = 0, err = 0;
- /*
- * We need a GUID, plus at least one letter for the variable name,
- * plus the '-' separator
- */
- if (dentry->d_name.len < GUID_LEN + 2)
+ if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
return -EINVAL;
inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
@@ -1012,6 +1051,84 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
return -EINVAL;
};
+/*
+ * Compare two efivarfs file names.
+ *
+ * An efivarfs filename is composed of two parts,
+ *
+ * 1. A case-sensitive variable name
+ * 2. A case-insensitive GUID
+ *
+ * So we need to perform a case-sensitive match on part 1 and a
+ * case-insensitive match on part 2.
+ */
+static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode,
+ const struct dentry *dentry, const struct inode *inode,
+ unsigned int len, const char *str,
+ const struct qstr *name)
+{
+ int guid = len - GUID_LEN;
+
+ if (name->len != len)
+ return 1;
+
+ /* Case-sensitive compare for the variable name */
+ if (memcmp(str, name->name, guid))
+ return 1;
+
+ /* Case-insensitive compare for the GUID */
+ return strncasecmp(name->name + guid, str + guid, GUID_LEN);
+}
+
+static int efivarfs_d_hash(const struct dentry *dentry,
+ const struct inode *inode, struct qstr *qstr)
+{
+ unsigned long hash = init_name_hash();
+ const unsigned char *s = qstr->name;
+ unsigned int len = qstr->len;
+
+ if (!efivarfs_valid_name(s, len))
+ return -EINVAL;
+
+ while (len-- > GUID_LEN)
+ hash = partial_name_hash(*s++, hash);
+
+ /* GUID is case-insensitive. */
+ while (len--)
+ hash = partial_name_hash(tolower(*s++), hash);
+
+ qstr->hash = end_name_hash(hash);
+ return 0;
+}
+
+/*
+ * Retaining negative dentries for an in-memory filesystem just wastes
+ * memory and lookup time: arrange for them to be deleted immediately.
+ */
+static int efivarfs_delete_dentry(const struct dentry *dentry)
+{
+ return 1;
+}
+
+static struct dentry_operations efivarfs_d_ops = {
+ .d_compare = efivarfs_d_compare,
+ .d_hash = efivarfs_d_hash,
+ .d_delete = efivarfs_delete_dentry,
+};
+
+static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
+{
+ struct qstr q;
+
+ q.name = name;
+ q.len = strlen(name);
+
+ if (efivarfs_d_hash(NULL, NULL, &q))
+ return NULL;
+
+ return d_alloc(parent, &q);
+}
+
static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode = NULL;
@@ -1027,6 +1144,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = EFIVARFS_MAGIC;
sb->s_op = &efivarfs_ops;
+ sb->s_d_op = &efivarfs_d_ops;
sb->s_time_gran = 1;
inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
@@ -1067,7 +1185,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
if (!inode)
goto fail_name;
- dentry = d_alloc_name(root, name);
+ dentry = efivarfs_alloc_dentry(root, name);
if (!dentry)
goto fail_inode;
@@ -1084,7 +1202,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
mutex_lock(&inode->i_mutex);
inode->i_private = entry;
- i_size_write(inode, size+4);
+ i_size_write(inode, size + sizeof(entry->var.Attributes));
mutex_unlock(&inode->i_mutex);
d_add(dentry, inode);
}
@@ -1117,8 +1235,20 @@ static struct file_system_type efivarfs_type = {
.kill_sb = efivarfs_kill_sb,
};
+/*
+ * Handle negative dentry.
+ */
+static struct dentry *efivarfs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ if (dentry->d_name.len > NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+ d_add(dentry, NULL);
+ return NULL;
+}
+
static const struct inode_operations efivarfs_dir_inode_operations = {
- .lookup = simple_lookup,
+ .lookup = efivarfs_lookup,
.unlink = efivarfs_unlink,
.create = efivarfs_create,
};
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b89d250f56e7..93aaadf99f28 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -30,6 +30,9 @@ config ARCH_REQUIRE_GPIOLIB
Selecting this from the architecture code will cause the gpiolib
code to always get built in.
+config GPIO_DEVRES
+ def_bool y
+ depends on HAS_IOMEM
menuconfig GPIOLIB
@@ -298,6 +301,14 @@ config GPIO_GE_FPGA
and write pin state) for GPIO implemented in a number of GE single
board computers.
+config GPIO_LYNXPOINT
+ bool "Intel Lynxpoint GPIO support"
+ depends on ACPI
+ select IRQ_DOMAIN
+ help
+ driver for GPIO functionality on Intel Lynxpoint PCH chipset
+ Requires ACPI device enumeration code to set up a platform device.
+
comment "I2C GPIO expanders:"
config GPIO_ARIZONA
@@ -657,6 +668,13 @@ config GPIO_JANZ_TTL
This driver provides support for driving the pins in output
mode only. Input mode is not supported.
+config GPIO_PALMAS
+ bool "TI PALMAS series PMICs GPIO"
+ depends on MFD_PALMAS
+ help
+ Select this option to enable GPIO driver for the TI PALMAS
+ series chip family.
+
config GPIO_TPS6586X
bool "TPS6586X GPIO"
depends on MFD_TPS6586X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 45a388c21d04..22e07bc9fcb5 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,8 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
+obj-$(CONFIG_GPIO_DEVRES) += devres.o
+obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
@@ -68,6 +70,7 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
+obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index bdc8302e711a..deca78f99316 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -299,8 +299,9 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_set_type = em_gio_irq_set_type;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
- p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+ p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
pdata->number_of_pins,
+ pdata->irq_base,
&em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index e77b2b3e94af..634c3d37f7b5 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -71,10 +71,12 @@ struct lnw_gpio {
struct irq_domain *domain;
};
+#define to_lnw_priv(chip) container_of(chip, struct lnw_gpio, chip)
+
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
enum GPIO_REG reg_type)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 32;
void __iomem *ptr;
@@ -86,7 +88,7 @@ static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
enum GPIO_REG reg_type)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 16;
void __iomem *ptr;
@@ -130,7 +132,7 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
u32 value;
unsigned long flags;
@@ -153,7 +155,7 @@ static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
unsigned long flags;
@@ -176,7 +178,7 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ struct lnw_gpio *lnw = to_lnw_priv(chip);
return irq_create_mapping(lnw->domain, offset);
}
@@ -234,6 +236,8 @@ static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = 96 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -299,17 +303,6 @@ static const struct irq_domain_ops lnw_gpio_irq_ops = {
.xlate = irq_domain_xlate_twocell,
};
-#ifdef CONFIG_PM
-static int lnw_gpio_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-static int lnw_gpio_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int lnw_gpio_runtime_idle(struct device *dev)
{
int err = pm_schedule_suspend(dev, 500);
@@ -320,16 +313,8 @@ static int lnw_gpio_runtime_idle(struct device *dev)
return -EBUSY;
}
-#else
-#define lnw_gpio_runtime_suspend NULL
-#define lnw_gpio_runtime_resume NULL
-#define lnw_gpio_runtime_idle NULL
-#endif
-
static const struct dev_pm_ops lnw_gpio_pm_ops = {
- .runtime_suspend = lnw_gpio_runtime_suspend,
- .runtime_resume = lnw_gpio_runtime_resume,
- .runtime_idle = lnw_gpio_runtime_idle,
+ SET_RUNTIME_PM_OPS(NULL, NULL, lnw_gpio_runtime_idle)
};
static int lnw_gpio_probe(struct pci_dev *pdev,
@@ -349,7 +334,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
retval = pci_request_regions(pdev, "langwell_gpio");
if (retval) {
dev_err(&pdev->dev, "error requesting resources\n");
- goto err2;
+ goto err_pci_req_region;
}
/* get the gpio_base from bar1 */
start = pci_resource_start(pdev, 1);
@@ -358,7 +343,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
if (!base) {
dev_err(&pdev->dev, "error mapping bar1\n");
retval = -EFAULT;
- goto err3;
+ goto err_ioremap;
}
gpio_base = *((u32 *)base + 1);
/* release the IO mapping, since we already get the info from bar1 */
@@ -370,21 +355,21 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
if (!base) {
dev_err(&pdev->dev, "error mapping bar0\n");
retval = -EFAULT;
- goto err3;
+ goto err_ioremap;
}
- lnw = devm_kzalloc(&pdev->dev, sizeof(struct lnw_gpio), GFP_KERNEL);
+ lnw = devm_kzalloc(&pdev->dev, sizeof(*lnw), GFP_KERNEL);
if (!lnw) {
dev_err(&pdev->dev, "can't allocate langwell_gpio chip data\n");
retval = -ENOMEM;
- goto err3;
+ goto err_ioremap;
}
lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
&lnw_gpio_irq_ops, lnw);
if (!lnw->domain) {
retval = -ENOMEM;
- goto err3;
+ goto err_ioremap;
}
lnw->reg_base = base;
@@ -403,7 +388,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
retval = gpiochip_add(&lnw->chip);
if (retval) {
dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval);
- goto err3;
+ goto err_ioremap;
}
lnw_irq_init_hw(lnw);
@@ -418,9 +403,9 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
return 0;
-err3:
+err_ioremap:
pci_release_regions(pdev);
-err2:
+err_pci_req_region:
pci_disable_device(pdev);
return retval;
}
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
new file mode 100644
index 000000000000..3472b05ac512
--- /dev/null
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -0,0 +1,469 @@
+/*
+ * GPIO controller driver for Intel Lynxpoint PCH chipset>
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/* LynxPoint chipset has support for 94 gpio pins */
+
+#define LP_NUM_GPIO 94
+
+/* Bitmapped register offsets */
+#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
+#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
+#define LP_INT_STAT 0x80
+#define LP_INT_ENABLE 0x90
+
+/* Each pin has two 32 bit config registers, starting at 0x100 */
+#define LP_CONFIG1 0x100
+#define LP_CONFIG2 0x104
+
+/* LP_CONFIG1 reg bits */
+#define OUT_LVL_BIT BIT(31)
+#define IN_LVL_BIT BIT(30)
+#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
+#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
+#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
+#define USE_SEL_BIT BIT(0) /* 0: Native, 1: GPIO */
+
+/* LP_CONFIG2 reg bits */
+#define GPINDIS_BIT BIT(2) /* disable input sensing */
+#define GPIWP_BIT (BIT(0) | BIT(1)) /* weak pull options */
+
+struct lp_gpio {
+ struct gpio_chip chip;
+ struct irq_domain *domain;
+ struct platform_device *pdev;
+ spinlock_t lock;
+ unsigned long reg_base;
+};
+
+/*
+ * Lynxpoint gpios are controlled through both bitmapped registers and
+ * per gpio specific registers. The bitmapped registers are in chunks of
+ * 3 x 32bit registers to cover all 94 gpios
+ *
+ * per gpio specific registers consist of two 32bit registers per gpio
+ * (LP_CONFIG1 and LP_CONFIG2), with 94 gpios there's a total of
+ * 188 config registes.
+ *
+ * A simplified view of the register layout look like this:
+ *
+ * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
+ * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
+ * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
+ * ...
+ * LP_INT_ENABLE[31:0] ...
+ * LP_INT_ENABLE[63:31] ...
+ * LP_INT_ENABLE[94:64] ...
+ * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
+ * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
+ * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
+ * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
+ * LP2_CONFIG1 (gpio 2) ...
+ * LP2_CONFIG2 (gpio 2) ...
+ * ...
+ * LP94_CONFIG1 (gpio 94) ...
+ * LP94_CONFIG2 (gpio 94) ...
+ */
+
+static unsigned long lp_gpio_reg(struct gpio_chip *chip, unsigned offset,
+ int reg)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ int reg_offset;
+
+ if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
+ /* per gpio specific config registers */
+ reg_offset = offset * 8;
+ else
+ /* bitmapped registers */
+ reg_offset = (offset / 32) * 4;
+
+ return lg->reg_base + reg + reg_offset;
+}
+
+static int lp_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
+ unsigned long acpi_use = lp_gpio_reg(chip, offset, LP_ACPI_OWNED);
+
+ pm_runtime_get(&lg->pdev->dev); /* should we put if failed */
+
+ /* Fail if BIOS reserved pin for ACPI use */
+ if (!(inl(acpi_use) & BIT(offset % 32))) {
+ dev_err(&lg->pdev->dev, "gpio %d reserved for ACPI\n", offset);
+ return -EBUSY;
+ }
+ /* Fail if pin is in alternate function mode (not GPIO mode) */
+ if (!(inl(reg) & USE_SEL_BIT))
+ return -ENODEV;
+
+ /* enable input sensing */
+ outl(inl(conf2) & ~GPINDIS_BIT, conf2);
+
+
+ return 0;
+}
+
+static void lp_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
+
+ /* disable input sensing */
+ outl(inl(conf2) | GPINDIS_BIT, conf2);
+
+ pm_runtime_put(&lg->pdev->dev);
+}
+
+static int lp_irq_type(struct irq_data *d, unsigned type)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ u32 hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 value;
+ unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+
+ if (hwirq >= lg->chip.ngpio)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ value = inl(reg);
+
+ /* set both TRIG_SEL and INV bits to 0 for rising edge */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
+
+ /* TRIG_SEL bit 0, INV bit 1 for falling edge */
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 0 for level low */
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 1 for level high */
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ value |= TRIG_SEL_BIT | INT_INV_BIT;
+
+ outl(value, reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ return inl(reg) & IN_LVL_BIT;
+}
+
+static void lp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+
+ if (value)
+ outl(inl(reg) | OUT_LVL_BIT, reg);
+ else
+ outl(inl(reg) & ~OUT_LVL_BIT, reg);
+
+ spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) | DIR_BIT, reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ lp_gpio_set(chip, offset, value);
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) & ~DIR_BIT, reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
+ return irq_create_mapping(lg->domain, offset);
+}
+
+static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ u32 base, pin, mask;
+ unsigned long reg, pending;
+ unsigned virq;
+
+ /* check from GPIO controller which pin triggered the interrupt */
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+
+ while ((pending = inl(reg))) {
+ pin = __ffs(pending);
+ mask = BIT(pin);
+ /* Clear before handling so we don't lose an edge */
+ outl(mask, reg);
+ virq = irq_find_mapping(lg->domain, base + pin);
+ generic_handle_irq(virq);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
+static void lp_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lp_irq_mask(struct irq_data *d)
+{
+}
+
+static void lp_irq_enable(struct irq_data *d)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ u32 hwirq = irqd_to_hwirq(d);
+ unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) | BIT(hwirq % 32), reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_disable(struct irq_data *d)
+{
+ struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ u32 hwirq = irqd_to_hwirq(d);
+ unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lg->lock, flags);
+ outl(inl(reg) & ~BIT(hwirq % 32), reg);
+ spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static struct irq_chip lp_irqchip = {
+ .name = "LP-GPIO",
+ .irq_mask = lp_irq_mask,
+ .irq_unmask = lp_irq_unmask,
+ .irq_enable = lp_irq_enable,
+ .irq_disable = lp_irq_disable,
+ .irq_set_type = lp_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
+{
+ unsigned long reg;
+ unsigned base;
+
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ /* disable gpio pin interrupts */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+ outl(0, reg);
+ /* Clear interrupt status register */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ outl(0xffffffff, reg);
+ }
+}
+
+static int lp_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct lp_gpio *lg = d->host_data;
+
+ irq_set_chip_and_handler_name(virq, &lp_irqchip, handle_simple_irq,
+ "demux");
+ irq_set_chip_data(virq, lg);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static const struct irq_domain_ops lp_gpio_irq_ops = {
+ .map = lp_gpio_irq_map,
+};
+
+static int lp_gpio_probe(struct platform_device *pdev)
+{
+ struct lp_gpio *lg;
+ struct gpio_chip *gc;
+ struct resource *io_rc, *irq_rc;
+ struct device *dev = &pdev->dev;
+ unsigned long reg_len;
+ unsigned hwirq;
+ int ret = -ENODEV;
+
+ lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
+ if (!lg) {
+ dev_err(dev, "can't allocate lp_gpio chip data\n");
+ return -ENOMEM;
+ }
+
+ lg->pdev = pdev;
+ platform_set_drvdata(pdev, lg);
+
+ io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!io_rc) {
+ dev_err(dev, "missing IO resources\n");
+ return -EINVAL;
+ }
+
+ lg->reg_base = io_rc->start;
+ reg_len = resource_size(io_rc);
+
+ if (!devm_request_region(dev, lg->reg_base, reg_len, "lp-gpio")) {
+ dev_err(dev, "failed requesting IO region 0x%x\n",
+ (unsigned int)lg->reg_base);
+ return -EBUSY;
+ }
+
+ spin_lock_init(&lg->lock);
+
+ gc = &lg->chip;
+ gc->label = dev_name(dev);
+ gc->owner = THIS_MODULE;
+ gc->request = lp_gpio_request;
+ gc->free = lp_gpio_free;
+ gc->direction_input = lp_gpio_direction_input;
+ gc->direction_output = lp_gpio_direction_output;
+ gc->get = lp_gpio_get;
+ gc->set = lp_gpio_set;
+ gc->base = -1;
+ gc->ngpio = LP_NUM_GPIO;
+ gc->can_sleep = 0;
+ gc->dev = dev;
+
+ /* set up interrupts */
+ if (irq_rc && irq_rc->start) {
+ hwirq = irq_rc->start;
+ gc->to_irq = lp_gpio_to_irq;
+
+ lg->domain = irq_domain_add_linear(NULL, LP_NUM_GPIO,
+ &lp_gpio_irq_ops, lg);
+ if (!lg->domain)
+ return -ENXIO;
+
+ lp_gpio_irq_init_hw(lg);
+
+ irq_set_handler_data(hwirq, lg);
+ irq_set_chained_handler(hwirq, lp_gpio_irq_handler);
+ }
+
+ ret = gpiochip_add(gc);
+ if (ret) {
+ dev_err(dev, "failed adding lp-gpio chip\n");
+ return ret;
+ }
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int lp_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops lp_gpio_pm_ops = {
+ .runtime_suspend = lp_gpio_runtime_suspend,
+ .runtime_resume = lp_gpio_runtime_resume,
+};
+
+static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
+ { "INT33C7", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
+
+static int lp_gpio_remove(struct platform_device *pdev)
+{
+ struct lp_gpio *lg = platform_get_drvdata(pdev);
+ int err;
+ err = gpiochip_remove(&lg->chip);
+ if (err)
+ dev_warn(&pdev->dev, "failed to remove gpio_chip.\n");
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver lp_gpio_driver = {
+ .probe = lp_gpio_probe,
+ .remove = lp_gpio_remove,
+ .driver = {
+ .name = "lp_gpio",
+ .owner = THIS_MODULE,
+ .pm = &lp_gpio_pm_ops,
+ .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
+ },
+};
+
+static int __init lp_gpio_init(void)
+{
+ return platform_driver_register(&lp_gpio_driver);
+}
+
+subsys_initcall(lp_gpio_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 9ae29cc0d17f..a0b33a216d4a 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -292,7 +292,6 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 45d97c46831a..25000b0f8453 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -66,6 +66,7 @@ struct mxs_gpio_port {
struct irq_domain *domain;
struct bgpio_chip bgc;
enum mxs_gpio_id devid;
+ u32 both_edges;
};
static inline int is_imx23_gpio(struct mxs_gpio_port *port)
@@ -82,13 +83,23 @@ static inline int is_imx28_gpio(struct mxs_gpio_port *port)
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
{
+ u32 val;
u32 pin_mask = 1 << d->hwirq;
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxs_gpio_port *port = gc->private;
void __iomem *pin_addr;
int edge;
+ port->both_edges &= ~pin_mask;
switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = gpio_get_value(port->bgc.gc.base + d->hwirq);
+ if (val)
+ edge = GPIO_INT_FALL_EDGE;
+ else
+ edge = GPIO_INT_RISE_EDGE;
+ port->both_edges |= pin_mask;
+ break;
case IRQ_TYPE_EDGE_RISING:
edge = GPIO_INT_RISE_EDGE;
break;
@@ -125,6 +136,23 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio)
+{
+ u32 bit, val, edge;
+ void __iomem *pin_addr;
+
+ bit = 1 << gpio;
+
+ pin_addr = port->base + PINCTRL_IRQPOL(port);
+ val = readl(pin_addr);
+ edge = val & bit;
+
+ if (edge)
+ writel(bit, pin_addr + MXS_CLR);
+ else
+ writel(bit, pin_addr + MXS_SET);
+}
+
/* MXS has one interrupt *per* gpio port */
static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
@@ -138,6 +166,9 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
+ if (port->both_edges & (1 << irqoffset))
+ mxs_flip_edge(port, irqoffset);
+
generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
irq_stat &= ~(1 << irqoffset);
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f1fbedb2a6f9..159f5c57eb45 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1476,19 +1476,19 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.fallingdetect = OMAP4_GPIO_FALLINGDETECT,
};
-const static struct omap_gpio_platform_data omap2_pdata = {
+static const struct omap_gpio_platform_data omap2_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = false,
};
-const static struct omap_gpio_platform_data omap3_pdata = {
+static const struct omap_gpio_platform_data omap3_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
};
-const static struct omap_gpio_platform_data omap4_pdata = {
+static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
new file mode 100644
index 000000000000..e3a4e56f5a42
--- /dev/null
+++ b/drivers/gpio/gpio-palmas.c
@@ -0,0 +1,184 @@
+/*
+ * TI Palma series PMIC's GPIO driver.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/palmas.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct palmas_gpio {
+ struct gpio_chip gpio_chip;
+ struct palmas *palmas;
+};
+
+static inline struct palmas_gpio *to_palmas_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct palmas_gpio, gpio_chip);
+}
+
+static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ unsigned int val;
+ int ret;
+
+ ret = palmas_read(palmas, PALMAS_GPIO_BASE, PALMAS_GPIO_DATA_IN, &val);
+ if (ret < 0) {
+ dev_err(gc->dev, "GPIO_DATA_IN read failed, err = %d\n", ret);
+ return ret;
+ }
+ return !!(val & BIT(offset));
+}
+
+static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ int ret;
+
+ if (value)
+ ret = palmas_write(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_SET_DATA_OUT, BIT(offset));
+ else
+ ret = palmas_write(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_CLEAR_DATA_OUT, BIT(offset));
+ if (ret < 0)
+ dev_err(gc->dev, "%s write failed, err = %d\n",
+ (value) ? "GPIO_SET_DATA_OUT" : "GPIO_CLEAR_DATA_OUT",
+ ret);
+}
+
+static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ int ret;
+
+ /* Set the initial value */
+ palmas_gpio_set(gc, offset, value);
+
+ ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_DATA_DIR, BIT(offset), BIT(offset));
+ if (ret < 0)
+ dev_err(gc->dev, "GPIO_DATA_DIR write failed, err = %d\n", ret);
+ return ret;
+}
+
+static int palmas_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+ int ret;
+
+ ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE,
+ PALMAS_GPIO_DATA_DIR, BIT(offset), 0);
+ if (ret < 0)
+ dev_err(gc->dev, "GPIO_DATA_DIR write failed, err = %d\n", ret);
+ return ret;
+}
+
+static int palmas_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct palmas_gpio *pg = to_palmas_gpio(gc);
+ struct palmas *palmas = pg->palmas;
+
+ return palmas_irq_get_virq(palmas, PALMAS_GPIO_0_IRQ + offset);
+}
+
+static int palmas_gpio_probe(struct platform_device *pdev)
+{
+ struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+ struct palmas_platform_data *palmas_pdata;
+ struct palmas_gpio *palmas_gpio;
+ int ret;
+
+ palmas_gpio = devm_kzalloc(&pdev->dev,
+ sizeof(*palmas_gpio), GFP_KERNEL);
+ if (!palmas_gpio) {
+ dev_err(&pdev->dev, "Could not allocate palmas_gpio\n");
+ return -ENOMEM;
+ }
+
+ palmas_gpio->palmas = palmas;
+ palmas_gpio->gpio_chip.owner = THIS_MODULE;
+ palmas_gpio->gpio_chip.label = dev_name(&pdev->dev);
+ palmas_gpio->gpio_chip.ngpio = 8;
+ palmas_gpio->gpio_chip.can_sleep = 1;
+ palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
+ palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
+ palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq;
+ palmas_gpio->gpio_chip.set = palmas_gpio_set;
+ palmas_gpio->gpio_chip.get = palmas_gpio_get;
+ palmas_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ palmas_gpio->gpio_chip.of_node = palmas->dev->of_node;
+#endif
+ palmas_pdata = dev_get_platdata(palmas->dev);
+ if (palmas_pdata && palmas_pdata->gpio_base)
+ palmas_gpio->gpio_chip.base = palmas_pdata->gpio_base;
+ else
+ palmas_gpio->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&palmas_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, palmas_gpio);
+ return ret;
+}
+
+static int palmas_gpio_remove(struct platform_device *pdev)
+{
+ struct palmas_gpio *palmas_gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&palmas_gpio->gpio_chip);
+}
+
+static struct platform_driver palmas_gpio_driver = {
+ .driver.name = "palmas-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = palmas_gpio_probe,
+ .remove = palmas_gpio_remove,
+};
+
+static int __init palmas_gpio_init(void)
+{
+ return platform_driver_register(&palmas_gpio_driver);
+}
+subsys_initcall(palmas_gpio_init);
+
+static void __exit palmas_gpio_exit(void)
+{
+ platform_driver_unregister(&palmas_gpio_driver);
+}
+module_exit(palmas_gpio_exit);
+
+MODULE_ALIAS("platform:palmas-gpio");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("GPIO driver for TI Palmas series PMICs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index cc102d25ee24..24059462c87f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -46,6 +46,7 @@
#define PCA957X_TYPE 0x2000
static const struct i2c_device_id pca953x_id[] = {
+ { "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9536", 4 | PCA953X_TYPE, },
@@ -71,19 +72,23 @@ static const struct i2c_device_id pca953x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
+#define MAX_BANK 5
+#define BANK_SZ 8
+
+#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
+
struct pca953x_chip {
unsigned gpio_start;
- u32 reg_output;
- u32 reg_direction;
+ u8 reg_output[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
struct mutex i2c_lock;
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
- u32 irq_mask;
- u32 irq_stat;
- u32 irq_trig_raise;
- u32 irq_trig_fall;
- int irq_base;
+ u8 irq_mask[MAX_BANK];
+ u8 irq_stat[MAX_BANK];
+ u8 irq_trig_raise[MAX_BANK];
+ u8 irq_trig_fall[MAX_BANK];
struct irq_domain *domain;
#endif
@@ -93,33 +98,69 @@ struct pca953x_chip {
int chip_type;
};
-static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
+static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val,
+ int off)
+{
+ int ret;
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+ int offset = off / BANK_SZ;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ (reg << bank_shift) + offset);
+ *val = ret;
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed reading register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pca953x_write_single(struct pca953x_chip *chip, int reg, u32 val,
+ int off)
+{
+ int ret = 0;
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+ int offset = off / BANK_SZ;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << bank_shift) + offset, val);
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed writing register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
{
int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
- ret = i2c_smbus_write_byte_data(chip->client, reg, val);
- else if (chip->gpio_chip.ngpio == 24) {
- cpu_to_le32s(&val);
+ ret = i2c_smbus_write_byte_data(chip->client, reg, *val);
+ else if (chip->gpio_chip.ngpio >= 24) {
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
ret = i2c_smbus_write_i2c_block_data(chip->client,
- (reg << 2) | REG_ADDR_AI,
- 3,
- (u8 *) &val);
+ (reg << bank_shift) | REG_ADDR_AI,
+ NBANK(chip), val);
}
else {
switch (chip->chip_type) {
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
- reg << 1, val);
+ reg << 1, (u16) *val);
break;
case PCA957X_TYPE:
ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
- val & 0xff);
+ val[0]);
if (ret < 0)
break;
ret = i2c_smbus_write_byte_data(chip->client,
(reg << 1) + 1,
- (val & 0xff00) >> 8);
+ val[1]);
break;
}
}
@@ -132,26 +173,24 @@ static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
return 0;
}
-static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
+static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
{
int ret;
if (chip->gpio_chip.ngpio <= 8) {
ret = i2c_smbus_read_byte_data(chip->client, reg);
*val = ret;
- }
- else if (chip->gpio_chip.ngpio == 24) {
- *val = 0;
+ } else if (chip->gpio_chip.ngpio >= 24) {
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+
ret = i2c_smbus_read_i2c_block_data(chip->client,
- (reg << 2) | REG_ADDR_AI,
- 3,
- (u8 *) val);
- le32_to_cpus(val);
+ (reg << bank_shift) | REG_ADDR_AI,
+ NBANK(chip), val);
} else {
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
- *val = ret;
+ val[0] = (u16)ret & 0xFF;
+ val[1] = (u16)ret >> 8;
}
-
if (ret < 0) {
dev_err(&chip->client->dev, "failed reading register\n");
return ret;
@@ -163,13 +202,13 @@ static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
- uint reg_val;
+ u8 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
- reg_val = chip->reg_direction | (1u << off);
+ reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -179,11 +218,11 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
offset = PCA957X_CFG;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_direction = reg_val;
+ chip->reg_direction[off / BANK_SZ] = reg_val;
ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
@@ -194,7 +233,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip;
- uint reg_val;
+ u8 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -202,9 +241,11 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
mutex_lock(&chip->i2c_lock);
/* set output level */
if (val)
- reg_val = chip->reg_output | (1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ | (1u << (off % BANK_SZ));
else
- reg_val = chip->reg_output & ~(1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ & ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -214,14 +255,14 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
offset = PCA957X_OUT;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_output = reg_val;
+ chip->reg_output[off / BANK_SZ] = reg_val;
/* then direction */
- reg_val = chip->reg_direction & ~(1u << off);
+ reg_val = chip->reg_direction[off / BANK_SZ] & ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_DIRECTION;
@@ -230,11 +271,11 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
offset = PCA957X_CFG;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_direction = reg_val;
+ chip->reg_direction[off / BANK_SZ] = reg_val;
ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
@@ -258,7 +299,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &reg_val);
+ ret = pca953x_read_single(chip, offset, &reg_val, off);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
@@ -274,16 +315,18 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
- u32 reg_val;
+ u8 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
if (val)
- reg_val = chip->reg_output | (1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ | (1u << (off % BANK_SZ));
else
- reg_val = chip->reg_output & ~(1u << off);
+ reg_val = chip->reg_output[off / BANK_SZ]
+ & ~(1u << (off % BANK_SZ));
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -293,11 +336,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
offset = PCA957X_OUT;
break;
}
- ret = pca953x_write_reg(chip, offset, reg_val);
+ ret = pca953x_write_single(chip, offset, reg_val, off);
if (ret)
goto exit;
- chip->reg_output = reg_val;
+ chip->reg_output[off / BANK_SZ] = reg_val;
exit:
mutex_unlock(&chip->i2c_lock);
}
@@ -328,21 +371,21 @@ static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
struct pca953x_chip *chip;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
- return chip->irq_base + off;
+ return irq_create_mapping(chip->domain, off);
}
static void pca953x_irq_mask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask &= ~(1 << d->hwirq);
+ chip->irq_mask[d->hwirq / BANK_SZ] &= ~(1 << (d->hwirq % BANK_SZ));
}
static void pca953x_irq_unmask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask |= 1 << d->hwirq;
+ chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ);
}
static void pca953x_irq_bus_lock(struct irq_data *d)
@@ -355,17 +398,20 @@ static void pca953x_irq_bus_lock(struct irq_data *d)
static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- u32 new_irqs;
- u32 level;
+ u8 new_irqs;
+ int level, i;
/* Look for any newly setup interrupt */
- new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
- new_irqs &= ~chip->reg_direction;
-
- while (new_irqs) {
- level = __ffs(new_irqs);
- pca953x_gpio_direction_input(&chip->gpio_chip, level);
- new_irqs &= ~(1 << level);
+ for (i = 0; i < NBANK(chip); i++) {
+ new_irqs = chip->irq_trig_fall[i] | chip->irq_trig_raise[i];
+ new_irqs &= ~chip->reg_direction[i];
+
+ while (new_irqs) {
+ level = __ffs(new_irqs);
+ pca953x_gpio_direction_input(&chip->gpio_chip,
+ level + (BANK_SZ * i));
+ new_irqs &= ~(1 << level);
+ }
}
mutex_unlock(&chip->irq_lock);
@@ -374,7 +420,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << d->hwirq;
+ int bank_nb = d->hwirq / BANK_SZ;
+ u8 mask = 1 << (d->hwirq % BANK_SZ);
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -383,14 +430,14 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
}
if (type & IRQ_TYPE_EDGE_FALLING)
- chip->irq_trig_fall |= mask;
+ chip->irq_trig_fall[bank_nb] |= mask;
else
- chip->irq_trig_fall &= ~mask;
+ chip->irq_trig_fall[bank_nb] &= ~mask;
if (type & IRQ_TYPE_EDGE_RISING)
- chip->irq_trig_raise |= mask;
+ chip->irq_trig_raise[bank_nb] |= mask;
else
- chip->irq_trig_raise &= ~mask;
+ chip->irq_trig_raise[bank_nb] &= ~mask;
return 0;
}
@@ -404,13 +451,13 @@ static struct irq_chip pca953x_irq_chip = {
.irq_set_type = pca953x_irq_set_type,
};
-static u32 pca953x_irq_pending(struct pca953x_chip *chip)
+static u8 pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
{
- u32 cur_stat;
- u32 old_stat;
- u32 pending;
- u32 trigger;
- int ret, offset = 0;
+ u8 cur_stat[MAX_BANK];
+ u8 old_stat[MAX_BANK];
+ u8 pendings = 0;
+ u8 trigger[MAX_BANK], triggers = 0;
+ int ret, i, offset = 0;
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -420,60 +467,88 @@ static u32 pca953x_irq_pending(struct pca953x_chip *chip)
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &cur_stat);
+ ret = pca953x_read_regs(chip, offset, cur_stat);
if (ret)
return 0;
/* Remove output pins from the equation */
- cur_stat &= chip->reg_direction;
+ for (i = 0; i < NBANK(chip); i++)
+ cur_stat[i] &= chip->reg_direction[i];
+
+ memcpy(old_stat, chip->irq_stat, NBANK(chip));
- old_stat = chip->irq_stat;
- trigger = (cur_stat ^ old_stat) & chip->irq_mask;
+ for (i = 0; i < NBANK(chip); i++) {
+ trigger[i] = (cur_stat[i] ^ old_stat[i]) & chip->irq_mask[i];
+ triggers += trigger[i];
+ }
- if (!trigger)
+ if (!triggers)
return 0;
- chip->irq_stat = cur_stat;
+ memcpy(chip->irq_stat, cur_stat, NBANK(chip));
- pending = (old_stat & chip->irq_trig_fall) |
- (cur_stat & chip->irq_trig_raise);
- pending &= trigger;
+ for (i = 0; i < NBANK(chip); i++) {
+ pending[i] = (old_stat[i] & chip->irq_trig_fall[i]) |
+ (cur_stat[i] & chip->irq_trig_raise[i]);
+ pending[i] &= trigger[i];
+ pendings += pending[i];
+ }
- return pending;
+ return pendings;
}
static irqreturn_t pca953x_irq_handler(int irq, void *devid)
{
struct pca953x_chip *chip = devid;
- u32 pending;
- u32 level;
-
- pending = pca953x_irq_pending(chip);
+ u8 pending[MAX_BANK];
+ u8 level;
+ int i;
- if (!pending)
+ if (!pca953x_irq_pending(chip, pending))
return IRQ_HANDLED;
- do {
- level = __ffs(pending);
- handle_nested_irq(irq_find_mapping(chip->domain, level));
-
- pending &= ~(1 << level);
- } while (pending);
+ for (i = 0; i < NBANK(chip); i++) {
+ while (pending[i]) {
+ level = __ffs(pending[i]);
+ handle_nested_irq(irq_find_mapping(chip->domain,
+ level + (BANK_SZ * i)));
+ pending[i] &= ~(1 << level);
+ }
+ }
return IRQ_HANDLED;
}
+static int pca953x_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip(irq, &pca953x_irq_chip);
+ irq_set_nested_thread(irq, true);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+
+ return 0;
+}
+
+static const struct irq_domain_ops pca953x_irq_simple_ops = {
+ .map = pca953x_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
static int pca953x_irq_setup(struct pca953x_chip *chip,
const struct i2c_device_id *id,
int irq_base)
{
struct i2c_client *client = chip->client;
- int ret, offset = 0;
- u32 temporary;
+ int ret, i, offset = 0;
if (irq_base != -1
&& (id->driver_data & PCA_INT)) {
- int lvl;
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -483,49 +558,29 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &temporary);
- chip->irq_stat = temporary;
+ ret = pca953x_read_regs(chip, offset, chip->irq_stat);
if (ret)
- goto out_failed;
+ return ret;
/*
* There is no way to know which GPIO line generated the
* interrupt. We have to rely on the previous read for
* this purpose.
*/
- chip->irq_stat &= chip->reg_direction;
+ for (i = 0; i < NBANK(chip); i++)
+ chip->irq_stat[i] &= chip->reg_direction[i];
mutex_init(&chip->irq_lock);
- chip->irq_base = irq_alloc_descs(-1, irq_base, chip->gpio_chip.ngpio, -1);
- if (chip->irq_base < 0)
- goto out_failed;
-
- chip->domain = irq_domain_add_legacy(client->dev.of_node,
+ chip->domain = irq_domain_add_simple(client->dev.of_node,
chip->gpio_chip.ngpio,
- chip->irq_base,
- 0,
- &irq_domain_simple_ops,
+ irq_base,
+ &pca953x_irq_simple_ops,
NULL);
- if (!chip->domain) {
- ret = -ENODEV;
- goto out_irqdesc_free;
- }
+ if (!chip->domain)
+ return -ENODEV;
- for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
- int irq = lvl + chip->irq_base;
-
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
- irq_set_chip_data(irq, chip);
- irq_set_chip(irq, &pca953x_irq_chip);
- irq_set_nested_thread(irq, true);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
- }
-
- ret = request_threaded_irq(client->irq,
+ ret = devm_request_threaded_irq(&client->dev,
+ client->irq,
NULL,
pca953x_irq_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
@@ -533,28 +588,15 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
client->irq);
- goto out_irqdesc_free;
+ return ret;
}
chip->gpio_chip.to_irq = pca953x_gpio_to_irq;
}
return 0;
-
-out_irqdesc_free:
- irq_free_descs(chip->irq_base, chip->gpio_chip.ngpio);
-out_failed:
- chip->irq_base = -1;
- return ret;
}
-static void pca953x_irq_teardown(struct pca953x_chip *chip)
-{
- if (chip->irq_base != -1) {
- irq_free_descs(chip->irq_base, chip->gpio_chip.ngpio);
- free_irq(chip->client->irq, chip);
- }
-}
#else /* CONFIG_GPIO_PCA953X_IRQ */
static int pca953x_irq_setup(struct pca953x_chip *chip,
const struct i2c_device_id *id,
@@ -567,10 +609,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return 0;
}
-
-static void pca953x_irq_teardown(struct pca953x_chip *chip)
-{
-}
#endif
/*
@@ -619,18 +657,24 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
static int device_pca953x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
+ u8 val[MAX_BANK];
- ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ ret = pca953x_read_regs(chip, PCA953X_OUTPUT, chip->reg_output);
if (ret)
goto out;
- ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
- &chip->reg_direction);
+ ret = pca953x_read_regs(chip, PCA953X_DIRECTION,
+ chip->reg_direction);
if (ret)
goto out;
/* set platform specific polarity inversion */
- ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
+ if (invert)
+ memset(val, 0xFF, NBANK(chip));
+ else
+ memset(val, 0, NBANK(chip));
+
+ ret = pca953x_write_regs(chip, PCA953X_INVERT, val);
out:
return ret;
}
@@ -638,28 +682,36 @@ out:
static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
- u32 val = 0;
+ u8 val[MAX_BANK];
/* Let every port in proper state, that could save power */
- pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
- pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
- pca953x_write_reg(chip, PCA957X_OUT, 0x0);
-
- ret = pca953x_read_reg(chip, PCA957X_IN, &val);
+ memset(val, 0, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_PUPD, val);
+ memset(val, 0xFF, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_CFG, val);
+ memset(val, 0, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_OUT, val);
+
+ ret = pca953x_read_regs(chip, PCA957X_IN, val);
if (ret)
goto out;
- ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
+ ret = pca953x_read_regs(chip, PCA957X_OUT, chip->reg_output);
if (ret)
goto out;
- ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
+ ret = pca953x_read_regs(chip, PCA957X_CFG, chip->reg_direction);
if (ret)
goto out;
/* set platform specific polarity inversion */
- pca953x_write_reg(chip, PCA957X_INVRT, invert);
+ if (invert)
+ memset(val, 0xFF, NBANK(chip));
+ else
+ memset(val, 0, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_INVRT, val);
/* To enable register 6, 7 to controll pull up and pull down */
- pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
+ memset(val, 0x02, NBANK(chip));
+ pca953x_write_regs(chip, PCA957X_BKEN, val);
return 0;
out:
@@ -675,7 +727,8 @@ static int pca953x_probe(struct i2c_client *client,
int ret;
u32 invert = 0;
- chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev,
+ sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
@@ -710,15 +763,15 @@ static int pca953x_probe(struct i2c_client *client,
else
ret = device_pca957x_init(chip, invert);
if (ret)
- goto out_failed;
+ return ret;
ret = pca953x_irq_setup(chip, id, irq_base);
if (ret)
- goto out_failed;
+ return ret;
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
- goto out_failed_irq;
+ return ret;
if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@@ -729,12 +782,6 @@ static int pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
return 0;
-
-out_failed_irq:
- pca953x_irq_teardown(chip);
-out_failed:
- kfree(chip);
- return ret;
}
static int pca953x_remove(struct i2c_client *client)
@@ -760,12 +807,11 @@ static int pca953x_remove(struct i2c_client *client)
return ret;
}
- pca953x_irq_teardown(chip);
- kfree(chip);
return 0;
}
static const struct of_device_id pca953x_dt_ids[] = {
+ { .compatible = "nxp,pca9505", },
{ .compatible = "nxp,pca9534", },
{ .compatible = "nxp,pca9535", },
{ .compatible = "nxp,pca9536", },
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index c1720de18a4f..b820869ca93c 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -365,7 +365,7 @@ static int __init pl061_gpio_init(void)
{
return amba_driver_register(&pl061_gpio_driver);
}
-subsys_initcall(pl061_gpio_init);
+module_init(pl061_gpio_init);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("PL061 GPIO driver");
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 8325f580c0f1..9cc108d2b770 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -642,12 +642,7 @@ static struct platform_driver pxa_gpio_driver = {
.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
},
};
-
-static int __init pxa_gpio_init(void)
-{
- return platform_driver_register(&pxa_gpio_driver);
-}
-postcore_initcall(pxa_gpio_init);
+module_platform_driver(pxa_gpio_driver);
#ifdef CONFIG_PM
static int pxa_gpio_suspend(void)
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 9572aa137e6f..4d330e36da1d 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -37,7 +37,6 @@
#include <linux/i2c/twl.h>
-
/*
* The GPIO "subchip" supports 18 GPIOs which can be configured as
* inputs or outputs, with pullups or pulldowns on each pin. Each
@@ -49,11 +48,6 @@
* There are also two LED pins used sometimes as output-only GPIOs.
*/
-
-static struct gpio_chip twl_gpiochip;
-static int twl4030_gpio_base;
-static int twl4030_gpio_irq_base;
-
/* genirq interfaces are not available to modules */
#ifdef MODULE
#define is_module() true
@@ -69,14 +63,24 @@ static int twl4030_gpio_irq_base;
/* Mask for GPIO registers when aggregated into a 32-bit integer */
#define GPIO_32_MASK 0x0003ffff
-/* Data structures */
-static DEFINE_MUTEX(gpio_lock);
+struct gpio_twl4030_priv {
+ struct gpio_chip gpio_chip;
+ struct mutex mutex;
+ int irq_base;
-/* store usage of each GPIO. - each bit represents one GPIO */
-static unsigned int gpio_usage_count;
+ /* Bitfields for state caching */
+ unsigned int usage_count;
+ unsigned int direction;
+ unsigned int out_state;
+};
/*----------------------------------------------------------------------*/
+static inline struct gpio_twl4030_priv *to_gpio_twl4030(struct gpio_chip *chip)
+{
+ return container_of(chip, struct gpio_twl4030_priv, gpio_chip);
+}
+
/*
* To configure TWL4030 GPIO module registers
*/
@@ -126,7 +130,7 @@ static inline int gpio_twl4030_read(u8 address)
/*----------------------------------------------------------------------*/
-static u8 cached_leden; /* protected by gpio_lock */
+static u8 cached_leden;
/* The LED lines are open drain outputs ... a FET pulls to GND, so an
* external pullup is needed. We could also expose the integrated PWM
@@ -140,14 +144,12 @@ static void twl4030_led_set_value(int led, int value)
if (led)
mask <<= 1;
- mutex_lock(&gpio_lock);
if (value)
cached_leden &= ~mask;
else
cached_leden |= mask;
status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
TWL4030_LED_LEDEN_REG);
- mutex_unlock(&gpio_lock);
}
static int twl4030_set_gpio_direction(int gpio, int is_input)
@@ -158,7 +160,6 @@ static int twl4030_set_gpio_direction(int gpio, int is_input)
u8 base = REG_GPIODATADIR1 + d_bnk;
int ret = 0;
- mutex_lock(&gpio_lock);
ret = gpio_twl4030_read(base);
if (ret >= 0) {
if (is_input)
@@ -168,7 +169,6 @@ static int twl4030_set_gpio_direction(int gpio, int is_input)
ret = gpio_twl4030_write(base, reg);
}
- mutex_unlock(&gpio_lock);
return ret;
}
@@ -193,10 +193,6 @@ static int twl4030_get_gpio_datain(int gpio)
u8 base = 0;
int ret = 0;
- if (unlikely((gpio >= TWL4030_GPIO_MAX)
- || !(gpio_usage_count & BIT(gpio))))
- return -EPERM;
-
base = REG_GPIODATAIN1 + d_bnk;
ret = gpio_twl4030_read(base);
if (ret > 0)
@@ -209,9 +205,10 @@ static int twl4030_get_gpio_datain(int gpio)
static int twl_request(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
int status = 0;
- mutex_lock(&gpio_lock);
+ mutex_lock(&priv->mutex);
/* Support the two LED outputs as output-only GPIOs. */
if (offset >= TWL4030_GPIO_MAX) {
@@ -252,7 +249,7 @@ static int twl_request(struct gpio_chip *chip, unsigned offset)
}
/* on first use, turn GPIO module "on" */
- if (!gpio_usage_count) {
+ if (!priv->usage_count) {
struct twl4030_gpio_platform_data *pdata;
u8 value = MASK_GPIO_CTRL_GPIO_ON;
@@ -266,79 +263,120 @@ static int twl_request(struct gpio_chip *chip, unsigned offset)
status = gpio_twl4030_write(REG_GPIO_CTRL, value);
}
+done:
if (!status)
- gpio_usage_count |= (0x1 << offset);
+ priv->usage_count |= BIT(offset);
-done:
- mutex_unlock(&gpio_lock);
+ mutex_unlock(&priv->mutex);
return status;
}
static void twl_free(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ mutex_lock(&priv->mutex);
if (offset >= TWL4030_GPIO_MAX) {
twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
- return;
+ goto out;
}
- mutex_lock(&gpio_lock);
-
- gpio_usage_count &= ~BIT(offset);
+ priv->usage_count &= ~BIT(offset);
/* on last use, switch off GPIO module */
- if (!gpio_usage_count)
+ if (!priv->usage_count)
gpio_twl4030_write(REG_GPIO_CTRL, 0x0);
- mutex_unlock(&gpio_lock);
+out:
+ mutex_unlock(&priv->mutex);
}
static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
{
- return (offset < TWL4030_GPIO_MAX)
- ? twl4030_set_gpio_direction(offset, 1)
- : -EINVAL;
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+ int ret;
+
+ mutex_lock(&priv->mutex);
+ if (offset < TWL4030_GPIO_MAX)
+ ret = twl4030_set_gpio_direction(offset, 1);
+ else
+ ret = -EINVAL;
+
+ if (!ret)
+ priv->direction &= ~BIT(offset);
+
+ mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int twl_get(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+ int ret;
int status = 0;
- if (offset < TWL4030_GPIO_MAX)
- status = twl4030_get_gpio_datain(offset);
- else if (offset == TWL4030_GPIO_MAX)
- status = cached_leden & LEDEN_LEDAON;
+ mutex_lock(&priv->mutex);
+ if (!(priv->usage_count & BIT(offset))) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (priv->direction & BIT(offset))
+ status = priv->out_state & BIT(offset);
else
- status = cached_leden & LEDEN_LEDBON;
- return (status < 0) ? 0 : status;
+ status = twl4030_get_gpio_datain(offset);
+
+ ret = (status <= 0) ? 0 : 1;
+out:
+ mutex_unlock(&priv->mutex);
+ return ret;
}
-static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
{
- if (offset < TWL4030_GPIO_MAX) {
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ mutex_lock(&priv->mutex);
+ if (offset < TWL4030_GPIO_MAX)
twl4030_set_gpio_dataout(offset, value);
- return twl4030_set_gpio_direction(offset, 0);
- } else {
+ else
twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
- return 0;
- }
+
+ if (value)
+ priv->out_state |= BIT(offset);
+ else
+ priv->out_state &= ~BIT(offset);
+
+ mutex_unlock(&priv->mutex);
}
-static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
twl4030_set_gpio_dataout(offset, value);
- else
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+
+ priv->direction |= BIT(offset);
+ mutex_unlock(&priv->mutex);
+
+ twl_set(chip, offset, value);
+
+ return 0;
}
static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX))
- ? (twl4030_gpio_irq_base + offset)
+ struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
+
+ return (priv->irq_base && (offset < TWL4030_GPIO_MAX))
+ ? (priv->irq_base + offset)
: -EINVAL;
}
-static struct gpio_chip twl_gpiochip = {
+static struct gpio_chip template_chip = {
.label = "twl4030",
.owner = THIS_MODULE,
.request = twl_request,
@@ -424,8 +462,14 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
struct device_node *node = pdev->dev.of_node;
+ struct gpio_twl4030_priv *priv;
int ret, irq_base;
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_twl4030_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
/* maybe setup IRQs */
if (is_module()) {
dev_err(&pdev->dev, "can't dispatch IRQs from modules\n");
@@ -445,12 +489,15 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- twl4030_gpio_irq_base = irq_base;
+ priv->irq_base = irq_base;
no_irqs:
- twl_gpiochip.base = -1;
- twl_gpiochip.ngpio = TWL4030_GPIO_MAX;
- twl_gpiochip.dev = &pdev->dev;
+ priv->gpio_chip = template_chip;
+ priv->gpio_chip.base = -1;
+ priv->gpio_chip.ngpio = TWL4030_GPIO_MAX;
+ priv->gpio_chip.dev = &pdev->dev;
+
+ mutex_init(&priv->mutex);
if (node)
pdata = of_gpio_twl4030(&pdev->dev);
@@ -481,23 +528,23 @@ no_irqs:
* is (still) clear if use_leds is set.
*/
if (pdata->use_leds)
- twl_gpiochip.ngpio += 2;
+ priv->gpio_chip.ngpio += 2;
- ret = gpiochip_add(&twl_gpiochip);
+ ret = gpiochip_add(&priv->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
- twl_gpiochip.ngpio = 0;
+ priv->gpio_chip.ngpio = 0;
gpio_twl4030_remove(pdev);
goto out;
}
- twl4030_gpio_base = twl_gpiochip.base;
+ platform_set_drvdata(pdev, priv);
if (pdata && pdata->setup) {
int status;
- status = pdata->setup(&pdev->dev,
- twl4030_gpio_base, TWL4030_GPIO_MAX);
+ status = pdata->setup(&pdev->dev, priv->gpio_chip.base,
+ TWL4030_GPIO_MAX);
if (status)
dev_dbg(&pdev->dev, "setup --> %d\n", status);
}
@@ -510,18 +557,19 @@ out:
static int gpio_twl4030_remove(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
int status;
if (pdata && pdata->teardown) {
- status = pdata->teardown(&pdev->dev,
- twl4030_gpio_base, TWL4030_GPIO_MAX);
+ status = pdata->teardown(&pdev->dev, priv->gpio_chip.base,
+ TWL4030_GPIO_MAX);
if (status) {
dev_dbg(&pdev->dev, "teardown --> %d\n", status);
return status;
}
}
- status = gpiochip_remove(&twl_gpiochip);
+ status = gpiochip_remove(&priv->gpio_chip);
if (status < 0)
return status;
diff --git a/drivers/gpio/gpio-vt8500.c b/drivers/gpio/gpio-vt8500.c
index b53320a16fc8..81683ca35ac1 100644
--- a/drivers/gpio/gpio-vt8500.c
+++ b/drivers/gpio/gpio-vt8500.c
@@ -73,19 +73,20 @@ struct vt8500_gpio_data {
static struct vt8500_gpio_data vt8500_data = {
.num_banks = 7,
.banks = {
+ VT8500_BANK(NO_REG, 0x3C, 0x5C, 0x7C, 9),
VT8500_BANK(0x00, 0x20, 0x40, 0x60, 26),
VT8500_BANK(0x04, 0x24, 0x44, 0x64, 28),
VT8500_BANK(0x08, 0x28, 0x48, 0x68, 31),
VT8500_BANK(0x0C, 0x2C, 0x4C, 0x6C, 19),
VT8500_BANK(0x10, 0x30, 0x50, 0x70, 19),
VT8500_BANK(0x14, 0x34, 0x54, 0x74, 23),
- VT8500_BANK(NO_REG, 0x3C, 0x5C, 0x7C, 9),
},
};
static struct vt8500_gpio_data wm8505_data = {
.num_banks = 10,
.banks = {
+ VT8500_BANK(0x64, 0x8C, 0xB4, 0xDC, 22),
VT8500_BANK(0x40, 0x68, 0x90, 0xB8, 8),
VT8500_BANK(0x44, 0x6C, 0x94, 0xBC, 32),
VT8500_BANK(0x48, 0x70, 0x98, 0xC0, 6),
@@ -95,7 +96,6 @@ static struct vt8500_gpio_data wm8505_data = {
VT8500_BANK(0x58, 0x80, 0xA8, 0xD0, 5),
VT8500_BANK(0x5C, 0x84, 0xAC, 0xD4, 12),
VT8500_BANK(0x60, 0x88, 0xB0, 0xD8, 16),
- VT8500_BANK(0x64, 0x8C, 0xB4, 0xDC, 22),
VT8500_BANK(0x500, 0x504, 0x508, 0x50C, 6),
},
};
@@ -127,6 +127,12 @@ struct vt8500_gpio_chip {
void __iomem *base;
};
+struct vt8500_data {
+ struct vt8500_gpio_chip *chip;
+ void __iomem *iobase;
+ int num_banks;
+};
+
#define to_vt8500(__chip) container_of(__chip, struct vt8500_gpio_chip, chip)
@@ -224,19 +230,32 @@ static int vt8500_of_xlate(struct gpio_chip *gc,
static int vt8500_add_chips(struct platform_device *pdev, void __iomem *base,
const struct vt8500_gpio_data *data)
{
+ struct vt8500_data *priv;
struct vt8500_gpio_chip *vtchip;
struct gpio_chip *chip;
int i;
int pin_cnt = 0;
- vtchip = devm_kzalloc(&pdev->dev,
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_data), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ priv->chip = devm_kzalloc(&pdev->dev,
sizeof(struct vt8500_gpio_chip) * data->num_banks,
GFP_KERNEL);
- if (!vtchip) {
- pr_err("%s: failed to allocate chip memory\n", __func__);
+ if (!priv->chip) {
+ dev_err(&pdev->dev, "failed to allocate chip memory\n");
return -ENOMEM;
}
+ priv->iobase = base;
+ priv->num_banks = data->num_banks;
+ platform_set_drvdata(pdev, priv);
+
+ vtchip = priv->chip;
+
for (i = 0; i < data->num_banks; i++) {
vtchip[i].base = base;
vtchip[i].regs = &data->banks[i];
@@ -273,36 +292,54 @@ static struct of_device_id vt8500_gpio_dt_ids[] = {
static int vt8500_gpio_probe(struct platform_device *pdev)
{
+ int ret;
void __iomem *gpio_base;
- struct device_node *np;
+ struct resource *res;
const struct of_device_id *of_id =
of_match_device(vt8500_gpio_dt_ids, &pdev->dev);
if (!of_id) {
- dev_err(&pdev->dev, "Failed to find gpio controller\n");
+ dev_err(&pdev->dev, "No matching driver data\n");
return -ENODEV;
}
- np = pdev->dev.of_node;
- if (!np) {
- dev_err(&pdev->dev, "Missing GPIO description in devicetree\n");
- return -EFAULT;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get IO resource\n");
+ return -ENODEV;
}
- gpio_base = of_iomap(np, 0);
+ gpio_base = devm_request_and_ioremap(&pdev->dev, res);
if (!gpio_base) {
dev_err(&pdev->dev, "Unable to map GPIO registers\n");
- of_node_put(np);
return -ENOMEM;
}
- vt8500_add_chips(pdev, gpio_base, of_id->data);
+ ret = vt8500_add_chips(pdev, gpio_base, of_id->data);
+
+ return ret;
+}
+
+static int vt8500_gpio_remove(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+ struct vt8500_data *priv = platform_get_drvdata(pdev);
+ struct vt8500_gpio_chip *vtchip = priv->chip;
+
+ for (i = 0; i < priv->num_banks; i++) {
+ ret = gpiochip_remove(&vtchip[i].chip);
+ if (ret)
+ dev_warn(&pdev->dev, "gpiochip_remove returned %d\n",
+ ret);
+ }
return 0;
}
static struct platform_driver vt8500_gpio_driver = {
.probe = vt8500_gpio_probe,
+ .remove = vt8500_gpio_remove,
.driver = {
.name = "vt8500-gpio",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index cbad6e908d30..a063eb04b6ce 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/acpi_gpio.h>
#include <linux/acpi.h>
+#include <linux/interrupt.h>
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
@@ -52,3 +53,89 @@ int acpi_get_gpio(char *path, int pin)
return chip->base + pin;
}
EXPORT_SYMBOL_GPL(acpi_get_gpio);
+
+
+static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
+{
+ acpi_handle handle = data;
+
+ acpi_evaluate_object(handle, NULL, NULL, NULL);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
+ * @chip: gpio chip
+ *
+ * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
+ * handled by ACPI event methods which need to be called from the GPIO
+ * chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
+ * gpio pins have acpi event methods and assigns interrupt handlers that calls
+ * the acpi event methods for those pins.
+ *
+ * Interrupts are automatically freed on driver detach
+ */
+
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_resource *res;
+ acpi_handle handle, ev_handle;
+ acpi_status status;
+ unsigned int pin;
+ int irq, ret;
+ char ev_name[5];
+
+ if (!chip->dev || !chip->to_irq)
+ return;
+
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ status = acpi_get_event_resources(handle, &buf);
+ if (ACPI_FAILURE(status))
+ return;
+
+ /* If a gpio interrupt has an acpi event handler method, then
+ * set up an interrupt handler that calls the acpi event handler
+ */
+
+ for (res = buf.pointer;
+ res && (res->type != ACPI_RESOURCE_TYPE_END_TAG);
+ res = ACPI_NEXT_RESOURCE(res)) {
+
+ if (res->type != ACPI_RESOURCE_TYPE_GPIO ||
+ res->data.gpio.connection_type !=
+ ACPI_RESOURCE_GPIO_TYPE_INT)
+ continue;
+
+ pin = res->data.gpio.pin_table[0];
+ if (pin > chip->ngpio)
+ continue;
+
+ sprintf(ev_name, "_%c%02X",
+ res->data.gpio.triggering ? 'E' : 'L', pin);
+
+ status = acpi_get_handle(handle, ev_name, &ev_handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ irq = chip->to_irq(chip, pin);
+ if (irq < 0)
+ continue;
+
+ /* Assume BIOS sets the triggering, so no flags */
+ ret = devm_request_threaded_irq(chip->dev, irq, NULL,
+ acpi_gpio_irq_handler,
+ 0,
+ "GPIO-signaled-ACPI-event",
+ ev_handle);
+ if (ret)
+ dev_err(chip->dev,
+ "Failed to request IRQ %d ACPI event handler\n",
+ irq);
+ }
+}
+EXPORT_SYMBOL(acpi_gpiochip_request_interrupts);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5359ca78130f..fff9786cdc64 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3,6 +3,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <linux/list.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/debugfs.h>
@@ -52,14 +53,13 @@ struct gpio_desc {
/* flag symbols are bit numbers */
#define FLAG_REQUESTED 0
#define FLAG_IS_OUT 1
-#define FLAG_RESERVED 2
-#define FLAG_EXPORT 3 /* protected by sysfs_lock */
-#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
-#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
-#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
-#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
-#define FLAG_OPEN_DRAIN 8 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 9 /* Gpio is open source type */
+#define FLAG_EXPORT 2 /* protected by sysfs_lock */
+#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
+#define FLAG_TRIG_FALL 4 /* trigger on falling edge */
+#define FLAG_TRIG_RISE 5 /* trigger on rising edge */
+#define FLAG_ACTIVE_LOW 6 /* sysfs value has active low */
+#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
+#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define ID_SHIFT 16 /* add new flags before this one */
@@ -72,10 +72,36 @@ struct gpio_desc {
};
static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
+#define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio)
+
+static LIST_HEAD(gpio_chips);
+
#ifdef CONFIG_GPIO_SYSFS
static DEFINE_IDR(dirent_idr);
#endif
+/*
+ * Internal gpiod_* API using descriptors instead of the integer namespace.
+ * Most of this should eventually go public.
+ */
+static int gpiod_request(struct gpio_desc *desc, const char *label);
+static void gpiod_free(struct gpio_desc *desc);
+static int gpiod_direction_input(struct gpio_desc *desc);
+static int gpiod_direction_output(struct gpio_desc *desc, int value);
+static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+static int gpiod_get_value_cansleep(struct gpio_desc *desc);
+static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+static int gpiod_get_value(struct gpio_desc *desc);
+static void gpiod_set_value(struct gpio_desc *desc, int value);
+static int gpiod_cansleep(struct gpio_desc *desc);
+static int gpiod_to_irq(struct gpio_desc *desc);
+static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
+static int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc);
+static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
+static void gpiod_unexport(struct gpio_desc *desc);
+
+
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
#ifdef CONFIG_DEBUG_FS
@@ -83,6 +109,36 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
#endif
}
+/*
+ * Return the GPIO number of the passed descriptor relative to its chip
+ */
+static int gpio_chip_hwgpio(const struct gpio_desc *desc)
+{
+ return desc - &desc->chip->desc[0];
+}
+
+/**
+ * Convert a GPIO number to its descriptor
+ */
+static struct gpio_desc *gpio_to_desc(unsigned gpio)
+{
+ if (WARN(!gpio_is_valid(gpio), "invalid GPIO %d\n", gpio))
+ return NULL;
+ else
+ return &gpio_desc[gpio];
+}
+
+/**
+ * Convert a GPIO descriptor to the integer namespace.
+ * This should disappear in the future but is needed since we still
+ * use GPIO numbers for error messages and sysfs nodes
+ */
+static int desc_to_gpio(const struct gpio_desc *desc)
+{
+ return desc->chip->base + gpio_chip_hwgpio(desc);
+}
+
+
/* Warn when drivers omit gpio_request() calls -- legal but ill-advised
* when setting direction, and otherwise illegal. Until board setup code
* and drivers use explicit requests everywhere (which won't happen when
@@ -94,10 +150,10 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
* only "legal" in the sense that (old) code using it won't break yet,
* but instead only triggers a WARN() stack dump.
*/
-static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
+static int gpio_ensure_requested(struct gpio_desc *desc)
{
const struct gpio_chip *chip = desc->chip;
- const int gpio = chip->base + offset;
+ const int gpio = desc_to_gpio(desc);
if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0,
"autorequest GPIO-%d\n", gpio)) {
@@ -116,95 +172,54 @@ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
+static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
+{
+ return desc->chip;
+}
+
struct gpio_chip *gpio_to_chip(unsigned gpio)
{
- return gpio_desc[gpio].chip;
+ return gpiod_to_chip(gpio_to_desc(gpio));
}
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
static int gpiochip_find_base(int ngpio)
{
- int i;
- int spare = 0;
- int base = -ENOSPC;
-
- for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) {
- struct gpio_desc *desc = &gpio_desc[i];
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip;
+ int base = ARCH_NR_GPIOS - ngpio;
- if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) {
- spare++;
- if (spare == ngpio) {
- base = i;
- break;
- }
- } else {
- spare = 0;
- if (chip)
- i -= chip->ngpio - 1;
- }
+ list_for_each_entry_reverse(chip, &gpio_chips, list) {
+ /* found a free space? */
+ if (chip->base + chip->ngpio <= base)
+ break;
+ else
+ /* nope, check the space right before the chip */
+ base = chip->base - ngpio;
}
- if (gpio_is_valid(base))
+ if (gpio_is_valid(base)) {
pr_debug("%s: found new base at %d\n", __func__, base);
- return base;
-}
-
-/**
- * gpiochip_reserve() - reserve range of gpios to use with platform code only
- * @start: starting gpio number
- * @ngpio: number of gpios to reserve
- * Context: platform init, potentially before irqs or kmalloc will work
- *
- * Returns a negative errno if any gpio within the range is already reserved
- * or registered, else returns zero as a success code. Use this function
- * to mark a range of gpios as unavailable for dynamic gpio number allocation,
- * for example because its driver support is not yet loaded.
- */
-int __init gpiochip_reserve(int start, int ngpio)
-{
- int ret = 0;
- unsigned long flags;
- int i;
-
- if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio - 1))
- return -EINVAL;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- for (i = start; i < start + ngpio; i++) {
- struct gpio_desc *desc = &gpio_desc[i];
-
- if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) {
- ret = -EBUSY;
- goto err;
- }
-
- set_bit(FLAG_RESERVED, &desc->flags);
+ return base;
+ } else {
+ pr_err("%s: cannot find free range\n", __func__);
+ return -ENOSPC;
}
-
- pr_debug("%s: reserved gpios from %d to %d\n",
- __func__, start, start + ngpio - 1);
-err:
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return ret;
}
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
-static int gpio_get_direction(unsigned gpio)
+static int gpiod_get_direction(struct gpio_desc *desc)
{
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
+ unsigned offset;
int status = -EINVAL;
- chip = gpio_to_chip(gpio);
- gpio -= chip->base;
+ chip = gpiod_to_chip(desc);
+ offset = gpio_chip_hwgpio(desc);
if (!chip->get_direction)
return status;
- status = chip->get_direction(chip, gpio);
+ status = chip->get_direction(chip, offset);
if (status > 0) {
/* GPIOF_DIR_IN, or other positive */
status = 1;
@@ -248,19 +263,19 @@ static DEFINE_MUTEX(sysfs_lock);
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
- else
- gpio_get_direction(gpio);
+ } else {
+ gpiod_get_direction(desc);
status = sprintf(buf, "%s\n",
test_bit(FLAG_IS_OUT, &desc->flags)
? "out" : "in");
+ }
mutex_unlock(&sysfs_lock);
return status;
@@ -269,8 +284,7 @@ static ssize_t gpio_direction_show(struct device *dev,
static ssize_t gpio_direction_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -278,11 +292,11 @@ static ssize_t gpio_direction_store(struct device *dev,
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else if (sysfs_streq(buf, "high"))
- status = gpio_direction_output(gpio, 1);
+ status = gpiod_direction_output(desc, 1);
else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
- status = gpio_direction_output(gpio, 0);
+ status = gpiod_direction_output(desc, 0);
else if (sysfs_streq(buf, "in"))
- status = gpio_direction_input(gpio);
+ status = gpiod_direction_input(desc);
else
status = -EINVAL;
@@ -296,8 +310,7 @@ static /* const */ DEVICE_ATTR(direction, 0644,
static ssize_t gpio_value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -307,7 +320,7 @@ static ssize_t gpio_value_show(struct device *dev,
} else {
int value;
- value = !!gpio_get_value_cansleep(gpio);
+ value = !!gpiod_get_value_cansleep(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
@@ -321,8 +334,7 @@ static ssize_t gpio_value_show(struct device *dev,
static ssize_t gpio_value_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- unsigned gpio = desc - gpio_desc;
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -338,7 +350,7 @@ static ssize_t gpio_value_store(struct device *dev,
if (status == 0) {
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- gpio_set_value_cansleep(gpio, value != 0);
+ gpiod_set_value_cansleep(desc, value != 0);
status = size;
}
}
@@ -368,7 +380,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags)
return 0;
- irq = gpio_to_irq(desc - gpio_desc);
+ irq = gpiod_to_irq(desc);
if (irq < 0)
return -EIO;
@@ -399,15 +411,10 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
goto err_out;
}
- do {
- ret = -ENOMEM;
- if (idr_pre_get(&dirent_idr, GFP_KERNEL))
- ret = idr_get_new_above(&dirent_idr, value_sd,
- 1, &id);
- } while (ret == -EAGAIN);
-
- if (ret)
+ ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
+ if (ret < 0)
goto free_sd;
+ id = ret;
desc->flags &= GPIO_FLAGS_MASK;
desc->flags |= (unsigned long)id << ID_SHIFT;
@@ -638,29 +645,32 @@ static ssize_t export_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t len)
{
- long gpio;
- int status;
+ long gpio;
+ struct gpio_desc *desc;
+ int status;
status = strict_strtol(buf, 0, &gpio);
if (status < 0)
goto done;
+ desc = gpio_to_desc(gpio);
+
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
- status = gpio_request(gpio, "sysfs");
+ status = gpiod_request(desc, "sysfs");
if (status < 0) {
if (status == -EPROBE_DEFER)
status = -ENODEV;
goto done;
}
- status = gpio_export(gpio, true);
+ status = gpiod_export(desc, true);
if (status < 0)
- gpio_free(gpio);
+ gpiod_free(desc);
else
- set_bit(FLAG_SYSFS, &gpio_desc[gpio].flags);
+ set_bit(FLAG_SYSFS, &desc->flags);
done:
if (status)
@@ -672,8 +682,9 @@ static ssize_t unexport_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t len)
{
- long gpio;
- int status;
+ long gpio;
+ struct gpio_desc *desc;
+ int status;
status = strict_strtol(buf, 0, &gpio);
if (status < 0)
@@ -681,17 +692,18 @@ static ssize_t unexport_store(struct class *class,
status = -EINVAL;
+ desc = gpio_to_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto done;
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
- if (test_and_clear_bit(FLAG_SYSFS, &gpio_desc[gpio].flags)) {
+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
status = 0;
- gpio_free(gpio);
+ gpiod_free(desc);
}
done:
if (status)
@@ -728,13 +740,13 @@ static struct class gpio_class = {
*
* Returns zero on success, else an error.
*/
-int gpio_export(unsigned gpio, bool direction_may_change)
+static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
unsigned long flags;
- struct gpio_desc *desc;
int status;
const char *ioname = NULL;
struct device *dev;
+ int offset;
/* can't export until sysfs is available ... */
if (!gpio_class.p) {
@@ -742,20 +754,19 @@ int gpio_export(unsigned gpio, bool direction_may_change)
return -ENOENT;
}
- if (!gpio_is_valid(gpio)) {
- pr_debug("%s: gpio %d is not valid\n", __func__, gpio);
+ if (!desc) {
+ pr_debug("%s: invalid gpio descriptor\n", __func__);
return -EINVAL;
}
mutex_lock(&sysfs_lock);
spin_lock_irqsave(&gpio_lock, flags);
- desc = &gpio_desc[gpio];
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags)) {
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio %d unavailable (requested=%d, exported=%d)\n",
- __func__, gpio,
+ __func__, desc_to_gpio(desc),
test_bit(FLAG_REQUESTED, &desc->flags),
test_bit(FLAG_EXPORT, &desc->flags));
status = -EPERM;
@@ -766,11 +777,13 @@ int gpio_export(unsigned gpio, bool direction_may_change)
direction_may_change = false;
spin_unlock_irqrestore(&gpio_lock, flags);
- if (desc->chip->names && desc->chip->names[gpio - desc->chip->base])
- ioname = desc->chip->names[gpio - desc->chip->base];
+ offset = gpio_chip_hwgpio(desc);
+ if (desc->chip->names && desc->chip->names[offset])
+ ioname = desc->chip->names[offset];
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u", gpio);
+ desc, ioname ? ioname : "gpio%u",
+ desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
goto fail_unlock;
@@ -786,7 +799,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
goto fail_unregister_device;
}
- if (gpio_to_irq(gpio) >= 0 && (direction_may_change ||
+ if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
!test_bit(FLAG_IS_OUT, &desc->flags))) {
status = device_create_file(dev, &dev_attr_edge);
if (status)
@@ -801,9 +814,15 @@ fail_unregister_device:
device_unregister(dev);
fail_unlock:
mutex_unlock(&sysfs_lock);
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
return status;
}
+
+int gpio_export(unsigned gpio, bool direction_may_change)
+{
+ return gpiod_export(gpio_to_desc(gpio), direction_may_change);
+}
EXPORT_SYMBOL_GPL(gpio_export);
static int match_export(struct device *dev, const void *data)
@@ -822,18 +841,16 @@ static int match_export(struct device *dev, const void *data)
*
* Returns zero on success, else an error.
*/
-int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
+static int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
{
- struct gpio_desc *desc;
int status = -EINVAL;
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto done;
mutex_lock(&sysfs_lock);
- desc = &gpio_desc[gpio];
-
if (test_bit(FLAG_EXPORT, &desc->flags)) {
struct device *tdev;
@@ -850,12 +867,17 @@ int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
done:
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
return status;
}
-EXPORT_SYMBOL_GPL(gpio_export_link);
+int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
+{
+ return gpiod_export_link(dev, name, gpio_to_desc(gpio));
+}
+EXPORT_SYMBOL_GPL(gpio_export_link);
/**
* gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
@@ -869,19 +891,16 @@ EXPORT_SYMBOL_GPL(gpio_export_link);
*
* Returns zero on success, else an error.
*/
-int gpio_sysfs_set_active_low(unsigned gpio, int value)
+static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
{
- struct gpio_desc *desc;
struct device *dev = NULL;
int status = -EINVAL;
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto done;
mutex_lock(&sysfs_lock);
- desc = &gpio_desc[gpio];
-
if (test_bit(FLAG_EXPORT, &desc->flags)) {
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev == NULL) {
@@ -897,10 +916,16 @@ unlock:
done:
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
return status;
}
+
+int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
+}
EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
/**
@@ -909,21 +934,18 @@ EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
*
* This is implicit on gpio_free().
*/
-void gpio_unexport(unsigned gpio)
+static void gpiod_unexport(struct gpio_desc *desc)
{
- struct gpio_desc *desc;
int status = 0;
struct device *dev = NULL;
- if (!gpio_is_valid(gpio)) {
+ if (!desc) {
status = -EINVAL;
goto done;
}
mutex_lock(&sysfs_lock);
- desc = &gpio_desc[gpio];
-
if (test_bit(FLAG_EXPORT, &desc->flags)) {
dev = class_find_device(&gpio_class, NULL, desc, match_export);
@@ -935,13 +957,20 @@ void gpio_unexport(unsigned gpio)
}
mutex_unlock(&sysfs_lock);
+
if (dev) {
device_unregister(dev);
put_device(dev);
}
done:
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
+ status);
+}
+
+void gpio_unexport(unsigned gpio)
+{
+ gpiod_unexport(gpio_to_desc(gpio));
}
EXPORT_SYMBOL_GPL(gpio_unexport);
@@ -975,9 +1004,9 @@ static int gpiochip_export(struct gpio_chip *chip)
unsigned gpio;
spin_lock_irqsave(&gpio_lock, flags);
- gpio = chip->base;
- while (gpio_desc[gpio].chip == chip)
- gpio_desc[gpio++].chip = NULL;
+ gpio = 0;
+ while (gpio < chip->ngpio)
+ chip->desc[gpio++].chip = NULL;
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: chip %s status %d\n", __func__,
@@ -1012,7 +1041,7 @@ static int __init gpiolib_sysfs_init(void)
{
int status;
unsigned long flags;
- unsigned gpio;
+ struct gpio_chip *chip;
status = class_register(&gpio_class);
if (status < 0)
@@ -1025,10 +1054,7 @@ static int __init gpiolib_sysfs_init(void)
* registered, and so arch_initcall() can always gpio_export().
*/
spin_lock_irqsave(&gpio_lock, flags);
- for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
- struct gpio_chip *chip;
-
- chip = gpio_desc[gpio].chip;
+ list_for_each_entry(chip, &gpio_chips, list) {
if (!chip || chip->exported)
continue;
@@ -1053,8 +1079,66 @@ static inline void gpiochip_unexport(struct gpio_chip *chip)
{
}
+static inline int gpiod_export(struct gpio_desc *desc,
+ bool direction_may_change)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
+{
+ return -ENOSYS;
+}
+
+static inline void gpiod_unexport(struct gpio_desc *desc)
+{
+}
+
#endif /* CONFIG_GPIO_SYSFS */
+/*
+ * Add a new chip to the global chips list, keeping the list of chips sorted
+ * by base order.
+ *
+ * Return -EBUSY if the new chip overlaps with some other chip's integer
+ * space.
+ */
+static int gpiochip_add_to_list(struct gpio_chip *chip)
+{
+ struct list_head *pos = &gpio_chips;
+ struct gpio_chip *_chip;
+ int err = 0;
+
+ /* find where to insert our chip */
+ list_for_each(pos, &gpio_chips) {
+ _chip = list_entry(pos, struct gpio_chip, list);
+ /* shall we insert before _chip? */
+ if (_chip->base >= chip->base + chip->ngpio)
+ break;
+ }
+
+ /* are we stepping on the chip right before? */
+ if (pos != &gpio_chips && pos->prev != &gpio_chips) {
+ _chip = list_entry(pos->prev, struct gpio_chip, list);
+ if (_chip->base + _chip->ngpio > chip->base) {
+ dev_err(chip->dev,
+ "GPIO integer space overlap, cannot add chip\n");
+ err = -EBUSY;
+ }
+ }
+
+ if (!err)
+ list_add_tail(&chip->list, pos);
+
+ return err;
+}
+
/**
* gpiochip_add() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
@@ -1096,16 +1180,14 @@ int gpiochip_add(struct gpio_chip *chip)
chip->base = base;
}
- /* these GPIO numbers must not be managed by another gpio_chip */
- for (id = base; id < base + chip->ngpio; id++) {
- if (gpio_desc[id].chip != NULL) {
- status = -EBUSY;
- break;
- }
- }
+ status = gpiochip_add_to_list(chip);
+
if (status == 0) {
- for (id = base; id < base + chip->ngpio; id++) {
- gpio_desc[id].chip = chip;
+ chip->desc = &gpio_desc[chip->base];
+
+ for (id = 0; id < chip->ngpio; id++) {
+ struct gpio_desc *desc = &chip->desc[id];
+ desc->chip = chip;
/* REVISIT: most hardware initializes GPIOs as
* inputs (often with pullups enabled) so power
@@ -1114,7 +1196,7 @@ int gpiochip_add(struct gpio_chip *chip)
* and in case chip->get_direction is not set,
* we may expose the wrong direction in sysfs.
*/
- gpio_desc[id].flags = !chip->direction_input
+ desc->flags = !chip->direction_input
? (1 << FLAG_IS_OUT)
: 0;
}
@@ -1167,15 +1249,17 @@ int gpiochip_remove(struct gpio_chip *chip)
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
- for (id = chip->base; id < chip->base + chip->ngpio; id++) {
- if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) {
+ for (id = 0; id < chip->ngpio; id++) {
+ if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) {
status = -EBUSY;
break;
}
}
if (status == 0) {
- for (id = chip->base; id < chip->base + chip->ngpio; id++)
- gpio_desc[id].chip = NULL;
+ for (id = 0; id < chip->ngpio; id++)
+ chip->desc[id].chip = NULL;
+
+ list_del(&chip->list);
}
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1202,20 +1286,17 @@ struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip,
void *data))
{
- struct gpio_chip *chip = NULL;
+ struct gpio_chip *chip;
unsigned long flags;
- int i;
spin_lock_irqsave(&gpio_lock, flags);
- for (i = 0; i < ARCH_NR_GPIOS; i++) {
- if (!gpio_desc[i].chip)
- continue;
-
- if (match(gpio_desc[i].chip, data)) {
- chip = gpio_desc[i].chip;
+ list_for_each_entry(chip, &gpio_chips, list)
+ if (match(chip, data))
break;
- }
- }
+
+ /* No match? */
+ if (&chip->list == &gpio_chips)
+ chip = NULL;
spin_unlock_irqrestore(&gpio_lock, flags);
return chip;
@@ -1297,20 +1378,18 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
* on each other, and help provide better diagnostics in debugfs.
* They're called even less than the "set direction" calls.
*/
-int gpio_request(unsigned gpio, const char *label)
+static int gpiod_request(struct gpio_desc *desc, const char *label)
{
- struct gpio_desc *desc;
struct gpio_chip *chip;
int status = -EPROBE_DEFER;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio)) {
+ if (!desc) {
status = -EINVAL;
goto done;
}
- desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip == NULL)
goto done;
@@ -1334,7 +1413,7 @@ int gpio_request(unsigned gpio, const char *label)
if (chip->request) {
/* chip->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
- status = chip->request(chip, gpio - chip->base);
+ status = chip->request(chip, gpio_chip_hwgpio(desc));
spin_lock_irqsave(&gpio_lock, flags);
if (status < 0) {
@@ -1347,42 +1426,46 @@ int gpio_request(unsigned gpio, const char *label)
if (chip->get_direction) {
/* chip->get_direction may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
- gpio_get_direction(gpio);
+ gpiod_get_direction(desc);
spin_lock_irqsave(&gpio_lock, flags);
}
done:
if (status)
- pr_debug("gpio_request: gpio-%d (%s) status %d\n",
- gpio, label ? : "?", status);
+ pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
+ desc ? desc_to_gpio(desc) : -1,
+ label ? : "?", status);
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
+
+int gpio_request(unsigned gpio, const char *label)
+{
+ return gpiod_request(gpio_to_desc(gpio), label);
+}
EXPORT_SYMBOL_GPL(gpio_request);
-void gpio_free(unsigned gpio)
+static void gpiod_free(struct gpio_desc *desc)
{
unsigned long flags;
- struct gpio_desc *desc;
struct gpio_chip *chip;
might_sleep();
- if (!gpio_is_valid(gpio)) {
+ if (!desc) {
WARN_ON(extra_checks);
return;
}
- gpio_unexport(gpio);
+ gpiod_unexport(desc);
spin_lock_irqsave(&gpio_lock, flags);
- desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
if (chip->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
might_sleep_if(chip->can_sleep);
- chip->free(chip, gpio - chip->base);
+ chip->free(chip, gpio_chip_hwgpio(desc));
spin_lock_irqsave(&gpio_lock, flags);
}
desc_set_label(desc, NULL);
@@ -1396,6 +1479,11 @@ void gpio_free(unsigned gpio)
spin_unlock_irqrestore(&gpio_lock, flags);
}
+
+void gpio_free(unsigned gpio)
+{
+ gpiod_free(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(gpio_free);
/**
@@ -1406,29 +1494,32 @@ EXPORT_SYMBOL_GPL(gpio_free);
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
+ struct gpio_desc *desc;
int err;
- err = gpio_request(gpio, label);
+ desc = gpio_to_desc(gpio);
+
+ err = gpiod_request(desc, label);
if (err)
return err;
if (flags & GPIOF_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags);
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (flags & GPIOF_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags);
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
if (flags & GPIOF_DIR_IN)
- err = gpio_direction_input(gpio);
+ err = gpiod_direction_input(desc);
else
- err = gpio_direction_output(gpio,
+ err = gpiod_direction_output(desc,
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
goto free_gpio;
if (flags & GPIOF_EXPORT) {
- err = gpio_export(gpio, flags & GPIOF_EXPORT_CHANGEABLE);
+ err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE);
if (err)
goto free_gpio;
}
@@ -1436,7 +1527,7 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
return 0;
free_gpio:
- gpio_free(gpio);
+ gpiod_free(desc);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
@@ -1491,14 +1582,17 @@ EXPORT_SYMBOL_GPL(gpio_free_array);
*/
const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
{
- unsigned gpio = chip->base + offset;
+ struct gpio_desc *desc;
- if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip)
+ if (!GPIO_OFFSET_VALID(chip, offset))
return NULL;
- if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0)
+
+ desc = &chip->desc[offset];
+
+ if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
return NULL;
#ifdef CONFIG_DEBUG_FS
- return gpio_desc[gpio].label;
+ return desc->label;
#else
return "?";
#endif
@@ -1515,24 +1609,21 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* rely on gpio_request() having been called beforehand.
*/
-int gpio_direction_input(unsigned gpio)
+static int gpiod_direction_input(struct gpio_desc *desc)
{
unsigned long flags;
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
+ int offset;
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->get || !chip->direction_input)
goto fail;
- gpio -= chip->base;
- if (gpio >= chip->ngpio)
- goto fail;
- status = gpio_ensure_requested(desc, gpio);
+ status = gpio_ensure_requested(desc);
if (status < 0)
goto fail;
@@ -1542,11 +1633,12 @@ int gpio_direction_input(unsigned gpio)
might_sleep_if(chip->can_sleep);
+ offset = gpio_chip_hwgpio(desc);
if (status) {
- status = chip->request(chip, gpio);
+ status = chip->request(chip, offset);
if (status < 0) {
pr_debug("GPIO-%d: chip request fail, %d\n",
- chip->base + gpio, status);
+ desc_to_gpio(desc), status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -1554,48 +1646,54 @@ int gpio_direction_input(unsigned gpio)
}
}
- status = chip->direction_input(chip, gpio);
+ status = chip->direction_input(chip, offset);
if (status == 0)
clear_bit(FLAG_IS_OUT, &desc->flags);
- trace_gpio_direction(chip->base + gpio, 1, status);
+ trace_gpio_direction(desc_to_gpio(desc), 1, status);
lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
+ if (status) {
+ int gpio = -1;
+ if (desc)
+ gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
+ }
return status;
}
+
+int gpio_direction_input(unsigned gpio)
+{
+ return gpiod_direction_input(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(gpio_direction_input);
-int gpio_direction_output(unsigned gpio, int value)
+static int gpiod_direction_output(struct gpio_desc *desc, int value)
{
unsigned long flags;
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
+ int offset;
/* Open drain pin should not be driven to 1 */
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- return gpio_direction_input(gpio);
+ return gpiod_direction_input(desc);
/* Open source pin should not be driven to 0 */
if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- return gpio_direction_input(gpio);
+ return gpiod_direction_input(desc);
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->direction_output)
goto fail;
- gpio -= chip->base;
- if (gpio >= chip->ngpio)
- goto fail;
- status = gpio_ensure_requested(desc, gpio);
+ status = gpio_ensure_requested(desc);
if (status < 0)
goto fail;
@@ -1605,11 +1703,12 @@ int gpio_direction_output(unsigned gpio, int value)
might_sleep_if(chip->can_sleep);
+ offset = gpio_chip_hwgpio(desc);
if (status) {
- status = chip->request(chip, gpio);
+ status = chip->request(chip, offset);
if (status < 0) {
pr_debug("GPIO-%d: chip request fail, %d\n",
- chip->base + gpio, status);
+ desc_to_gpio(desc), status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
@@ -1617,20 +1716,29 @@ int gpio_direction_output(unsigned gpio, int value)
}
}
- status = chip->direction_output(chip, gpio, value);
+ status = chip->direction_output(chip, offset, value);
if (status == 0)
set_bit(FLAG_IS_OUT, &desc->flags);
- trace_gpio_value(chip->base + gpio, 0, value);
- trace_gpio_direction(chip->base + gpio, 0, status);
+ trace_gpio_value(desc_to_gpio(desc), 0, value);
+ trace_gpio_direction(desc_to_gpio(desc), 0, status);
lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
+ if (status) {
+ int gpio = -1;
+ if (desc)
+ gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
+ }
return status;
}
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+ return gpiod_direction_output(gpio_to_desc(gpio), value);
+}
EXPORT_SYMBOL_GPL(gpio_direction_output);
/**
@@ -1638,24 +1746,22 @@ EXPORT_SYMBOL_GPL(gpio_direction_output);
* @gpio: the gpio to set debounce time
* @debounce: debounce time is microseconds
*/
-int gpio_set_debounce(unsigned gpio, unsigned debounce)
+static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
unsigned long flags;
struct gpio_chip *chip;
- struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
+ int offset;
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->set_debounce)
goto fail;
- gpio -= chip->base;
- if (gpio >= chip->ngpio)
- goto fail;
- status = gpio_ensure_requested(desc, gpio);
+
+ status = gpio_ensure_requested(desc);
if (status < 0)
goto fail;
@@ -1665,16 +1771,26 @@ int gpio_set_debounce(unsigned gpio, unsigned debounce)
might_sleep_if(chip->can_sleep);
- return chip->set_debounce(chip, gpio, debounce);
+ offset = gpio_chip_hwgpio(desc);
+ return chip->set_debounce(chip, offset, debounce);
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
+ if (status) {
+ int gpio = -1;
+ if (desc)
+ gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
+ }
return status;
}
+
+int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
+}
EXPORT_SYMBOL_GPL(gpio_set_debounce);
/* I/O calls are only valid after configuration completed; the relevant
@@ -1708,18 +1824,25 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
* It returns the zero or nonzero value provided by the associated
* gpio_chip.get() method; or zero if no such method is provided.
*/
-int __gpio_get_value(unsigned gpio)
+static int gpiod_get_value(struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
+ int offset;
- chip = gpio_to_chip(gpio);
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
/* Should be using gpio_get_value_cansleep() */
WARN_ON(chip->can_sleep);
- value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
- trace_gpio_value(gpio, 1, value);
+ value = chip->get ? chip->get(chip, offset) : 0;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
+
+int __gpio_get_value(unsigned gpio)
+{
+ return gpiod_get_value(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(__gpio_get_value);
/*
@@ -1728,23 +1851,25 @@ EXPORT_SYMBOL_GPL(__gpio_get_value);
* @chip: Gpio chip.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
-static void _gpio_set_open_drain_value(unsigned gpio,
- struct gpio_chip *chip, int value)
+static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
{
int err = 0;
+ struct gpio_chip *chip = desc->chip;
+ int offset = gpio_chip_hwgpio(desc);
+
if (value) {
- err = chip->direction_input(chip, gpio - chip->base);
+ err = chip->direction_input(chip, offset);
if (!err)
- clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ clear_bit(FLAG_IS_OUT, &desc->flags);
} else {
- err = chip->direction_output(chip, gpio - chip->base, 0);
+ err = chip->direction_output(chip, offset, 0);
if (!err)
- set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ set_bit(FLAG_IS_OUT, &desc->flags);
}
- trace_gpio_direction(gpio, value, err);
+ trace_gpio_direction(desc_to_gpio(desc), value, err);
if (err < 0)
pr_err("%s: Error in set_value for open drain gpio%d err %d\n",
- __func__, gpio, err);
+ __func__, desc_to_gpio(desc), err);
}
/*
@@ -1753,26 +1878,27 @@ static void _gpio_set_open_drain_value(unsigned gpio,
* @chip: Gpio chip.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
-static void _gpio_set_open_source_value(unsigned gpio,
- struct gpio_chip *chip, int value)
+static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
{
int err = 0;
+ struct gpio_chip *chip = desc->chip;
+ int offset = gpio_chip_hwgpio(desc);
+
if (value) {
- err = chip->direction_output(chip, gpio - chip->base, 1);
+ err = chip->direction_output(chip, offset, 1);
if (!err)
- set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- err = chip->direction_input(chip, gpio - chip->base);
+ err = chip->direction_input(chip, offset);
if (!err)
- clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
+ clear_bit(FLAG_IS_OUT, &desc->flags);
}
- trace_gpio_direction(gpio, !value, err);
+ trace_gpio_direction(desc_to_gpio(desc), !value, err);
if (err < 0)
pr_err("%s: Error in set_value for open source gpio%d err %d\n",
- __func__, gpio, err);
+ __func__, desc_to_gpio(desc), err);
}
-
/**
* __gpio_set_value() - assign a gpio's value
* @gpio: gpio whose value will be assigned
@@ -1782,20 +1908,25 @@ static void _gpio_set_open_source_value(unsigned gpio,
* This is used directly or indirectly to implement gpio_set_value().
* It invokes the associated gpio_chip.set() method.
*/
-void __gpio_set_value(unsigned gpio, int value)
+static void gpiod_set_value(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
- chip = gpio_to_chip(gpio);
+ chip = desc->chip;
/* Should be using gpio_set_value_cansleep() */
WARN_ON(chip->can_sleep);
- trace_gpio_value(gpio, 0, value);
- if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags))
- _gpio_set_open_drain_value(gpio, chip, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags))
- _gpio_set_open_source_value(gpio, chip, value);
+ trace_gpio_value(desc_to_gpio(desc), 0, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ _gpio_set_open_drain_value(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ _gpio_set_open_source_value(desc, value);
else
- chip->set(chip, gpio - chip->base, value);
+ chip->set(chip, gpio_chip_hwgpio(desc), value);
+}
+
+void __gpio_set_value(unsigned gpio, int value)
+{
+ return gpiod_set_value(gpio_to_desc(gpio), value);
}
EXPORT_SYMBOL_GPL(__gpio_set_value);
@@ -1807,14 +1938,15 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
* This is used directly or indirectly to implement gpio_cansleep(). It
* returns nonzero if access reading or writing the GPIO value can sleep.
*/
-int __gpio_cansleep(unsigned gpio)
+static int gpiod_cansleep(struct gpio_desc *desc)
{
- struct gpio_chip *chip;
-
/* only call this on GPIOs that are valid! */
- chip = gpio_to_chip(gpio);
+ return desc->chip->can_sleep;
+}
- return chip->can_sleep;
+int __gpio_cansleep(unsigned gpio)
+{
+ return gpiod_cansleep(gpio_to_desc(gpio));
}
EXPORT_SYMBOL_GPL(__gpio_cansleep);
@@ -1827,50 +1959,67 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
* It returns the number of the IRQ signaled by this (input) GPIO,
* or a negative errno.
*/
-int __gpio_to_irq(unsigned gpio)
+static int gpiod_to_irq(struct gpio_desc *desc)
{
struct gpio_chip *chip;
+ int offset;
- chip = gpio_to_chip(gpio);
- return chip->to_irq ? chip->to_irq(chip, gpio - chip->base) : -ENXIO;
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
+ return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
}
-EXPORT_SYMBOL_GPL(__gpio_to_irq);
+int __gpio_to_irq(unsigned gpio)
+{
+ return gpiod_to_irq(gpio_to_desc(gpio));
+}
+EXPORT_SYMBOL_GPL(__gpio_to_irq);
/* There's no value in making it easy to inline GPIO calls that may sleep.
* Common examples include ones connected to I2C or SPI chips.
*/
-int gpio_get_value_cansleep(unsigned gpio)
+static int gpiod_get_value_cansleep(struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
+ int offset;
might_sleep_if(extra_checks);
- chip = gpio_to_chip(gpio);
- value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
- trace_gpio_value(gpio, 1, value);
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
+ value = chip->get ? chip->get(chip, offset) : 0;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
+
+int gpio_get_value_cansleep(unsigned gpio)
+{
+ return gpiod_get_value_cansleep(gpio_to_desc(gpio));
+}
EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
-void gpio_set_value_cansleep(unsigned gpio, int value)
+static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
might_sleep_if(extra_checks);
- chip = gpio_to_chip(gpio);
- trace_gpio_value(gpio, 0, value);
- if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags))
- _gpio_set_open_drain_value(gpio, chip, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags))
- _gpio_set_open_source_value(gpio, chip, value);
+ chip = desc->chip;
+ trace_gpio_value(desc_to_gpio(desc), 0, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ _gpio_set_open_drain_value(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ _gpio_set_open_source_value(desc, value);
else
- chip->set(chip, gpio - chip->base, value);
+ chip->set(chip, gpio_chip_hwgpio(desc), value);
}
-EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
+void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ return gpiod_set_value_cansleep(gpio_to_desc(gpio), value);
+}
+EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
#ifdef CONFIG_DEBUG_FS
@@ -1878,14 +2027,14 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
unsigned i;
unsigned gpio = chip->base;
- struct gpio_desc *gdesc = &gpio_desc[gpio];
+ struct gpio_desc *gdesc = &chip->desc[0];
int is_out;
for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
continue;
- gpio_get_direction(gpio);
+ gpiod_get_direction(gdesc);
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
gpio, gdesc->label,
@@ -1899,46 +2048,35 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
{
+ unsigned long flags;
struct gpio_chip *chip = NULL;
- unsigned int gpio;
- void *ret = NULL;
- loff_t index = 0;
-
- /* REVISIT this isn't locked against gpio_chip removal ... */
+ loff_t index = *pos;
- for (gpio = 0; gpio_is_valid(gpio); gpio++) {
- if (gpio_desc[gpio].chip == chip)
- continue;
-
- chip = gpio_desc[gpio].chip;
- if (!chip)
- continue;
+ s->private = "";
- if (index++ >= *pos) {
- ret = chip;
- break;
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_for_each_entry(chip, &gpio_chips, list)
+ if (index-- == 0) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return chip;
}
- }
-
- s->private = "";
+ spin_unlock_irqrestore(&gpio_lock, flags);
- return ret;
+ return NULL;
}
static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
+ unsigned long flags;
struct gpio_chip *chip = v;
- unsigned int gpio;
void *ret = NULL;
- /* skip GPIOs provided by the current chip */
- for (gpio = chip->base + chip->ngpio; gpio_is_valid(gpio); gpio++) {
- chip = gpio_desc[gpio].chip;
- if (chip) {
- ret = chip;
- break;
- }
- }
+ spin_lock_irqsave(&gpio_lock, flags);
+ if (list_is_last(&chip->list, &gpio_chips))
+ ret = NULL;
+ else
+ ret = list_entry(chip->list.next, struct gpio_chip, list);
+ spin_unlock_irqrestore(&gpio_lock, flags);
s->private = "\n";
++*pos;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd0..30879df3daea 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/ vga/ stub/
+obj-y += drm/ vga/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 983201b450f1..1e82882da9de 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,7 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ select HDMI
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
@@ -69,6 +70,8 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+source "drivers/gpu/drm/i2c/Kconfig"
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -96,6 +99,7 @@ config DRM_RADEON
select DRM_TTM
select POWER_SUPPLY
select HWMON
+ select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -212,3 +216,7 @@ source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/omapdrm/Kconfig"
+
+source "drivers/gpu/drm/tilcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6f58c81cfcbc..0d59b24f8d23 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,4 +50,6 @@ obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_TEGRA) += tegra/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
+obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 2d2c2f8d6dc6..df0d0a08097a 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -94,9 +94,9 @@ static int ast_drm_thaw(struct drm_device *dev)
ast_post_gpu(dev);
drm_mode_config_reset(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
console_lock();
ast_fbdev_set_suspend(dev, 0);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5ccf984f063a..528429252f0f 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,8 @@ struct ast_private {
struct drm_gem_object *cursor_cache;
uint64_t cursor_cache_gpu_addr;
+ /* Acces to this cache is protected by the crtc->mutex of the only crtc
+ * we have. */
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index d9ec77959dff..34931fe7d2c5 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -40,6 +40,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include "ast_drv.h"
static void ast_dirty_update(struct ast_fbdev *afbdev,
@@ -145,9 +146,10 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
return ret;
}
-static int astfb_create(struct ast_fbdev *afbdev,
+static int astfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
struct drm_device *dev = afbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_framebuffer *fb;
@@ -248,26 +250,10 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = ast_crtc->lut_b[regno] << 8;
}
-static int ast_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = astfb_create(afbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
.gamma_set = ast_fb_gamma_set,
.gamma_get = ast_fb_gamma_get,
- .fb_probe = ast_find_or_create_single,
+ .fb_probe = astfb_create,
};
static void ast_fbdev_destroy(struct drm_device *dev,
@@ -290,6 +276,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&afbdev->helper);
vfree(afbdev->sysram);
+ drm_framebuffer_unregister_private(&afb->base);
drm_framebuffer_cleanup(&afb->base);
}
@@ -313,6 +300,10 @@ int ast_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&afbdev->helper, 32);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f668e6cc0f7a..f60fd7bd1183 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -246,16 +246,8 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned int *handle)
-{
- return -EINVAL;
-}
-
static const struct drm_framebuffer_funcs ast_fb_funcs = {
.destroy = ast_user_framebuffer_destroy,
- .create_handle = ast_user_framebuffer_create_handle,
};
@@ -266,13 +258,13 @@ int ast_framebuffer_init(struct drm_device *dev,
{
int ret;
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
- ast_fb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 6c6b4c87d309..e25afccaf85b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
return ret;
}
-static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
struct drm_device *dev = gfbdev->helper.dev;
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
@@ -219,23 +221,6 @@ out_iounmap:
return ret;
}
-static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = cirrusfb_create(gfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
@@ -258,6 +243,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_unregister_private(&gfb->base);
drm_framebuffer_cleanup(&gfb->base);
return 0;
@@ -266,7 +252,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
.gamma_set = cirrus_crtc_fb_gamma_set,
.gamma_get = cirrus_crtc_fb_gamma_get,
- .fb_probe = cirrus_fb_find_or_create_single,
+ .fb_probe = cirrusfb_create,
};
int cirrus_fbdev_init(struct cirrus_device *cdev)
@@ -290,6 +276,9 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(cdev->dev);
drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 6a9b12e88d46..35cbae827771 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -23,16 +23,8 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
.destroy = cirrus_user_framebuffer_destroy,
- .create_handle = cirrus_user_framebuffer_create_handle,
};
int cirrus_framebuffer_init(struct drm_device *dev,
@@ -42,13 +34,13 @@ int cirrus_framebuffer_init(struct drm_device *dev,
{
int ret;
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
- gfb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 45adf97e678f..725968d38976 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -74,24 +74,13 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
*/
static int drm_ctxbitmap_next(struct drm_device * dev)
{
- int new_id;
int ret;
-again:
- if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&dev->ctx_idr, NULL,
- DRM_RESERVED_CONTEXTS, &new_id);
+ ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
+ GFP_KERNEL);
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ret;
-
- return new_id;
+ return ret;
}
/**
@@ -118,7 +107,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
- idr_remove_all(&dev->ctx_idr);
+ idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f2d667b8bee2..792c3e3795ca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -37,6 +37,54 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_lock_all);
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ mutex_unlock(&crtc->mutex);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_all);
+
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
+
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
@@ -203,12 +251,10 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
}
/**
- * drm_mode_object_get - allocate a new identifier
+ * drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
- * @ptr: object pointer, used to generate unique ID
- * @type: object type
- *
- * LOCKING:
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors.
@@ -220,35 +266,27 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
static int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
- int new_id = 0;
int ret;
-again:
- if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Ran out memory getting a mode number\n");
- return -ENOMEM;
- }
-
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = ret;
+ obj->type = obj_type;
+ }
mutex_unlock(&dev->mode_config.idr_mutex);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ret;
- obj->id = new_id;
- obj->type = obj_type;
- return 0;
+ return ret < 0 ? ret : 0;
}
/**
- * drm_mode_object_put - free an identifer
+ * drm_mode_object_put - free a modeset identifer
* @dev: DRM device
- * @id: ID to free
- *
- * LOCKING:
- * Caller must hold DRM mode_config lock.
+ * @object: object to free
*
* Free @id from @dev's unique identifier pool.
*/
@@ -260,11 +298,24 @@ static void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+ * are reference counted, they need special treatment.
+ */
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
+ /* Framebuffers are reference counted and need their own lookup
+ * function.*/
+ WARN_ON(type == DRM_MODE_OBJECT_FB);
+
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != type) || (obj->id != id))
@@ -278,13 +329,18 @@ EXPORT_SYMBOL(drm_mode_object_find);
/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
- *
- * LOCKING:
- * Caller must hold mode config lock.
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
*
* Allocates an ID for the framebuffer's parent mode object, sets its mode
* functions & device file and adds it to the master fd list.
*
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
* RETURNS:
* Zero on success, error code on failure.
*/
@@ -293,16 +349,23 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
+ mutex_lock(&dev->mode_config.fb_lock);
kref_init(&fb->refcount);
+ INIT_LIST_HEAD(&fb->filp_head);
+ fb->dev = dev;
+ fb->funcs = funcs;
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret)
- return ret;
+ goto out;
+
+ /* Grab the idr reference. */
+ drm_framebuffer_reference(fb);
- fb->dev = dev;
- fb->funcs = funcs;
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
+out:
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -315,23 +378,63 @@ static void drm_framebuffer_free(struct kref *kref)
fb->funcs->destroy(fb);
}
+static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj = NULL;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
+ fb = NULL;
+ else
+ fb = obj_to_fb(obj);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, id);
+ if (fb)
+ kref_get(&fb->refcount);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
/**
* drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- struct drm_device *dev = fb->dev;
DRM_DEBUG("FB ID: %d\n", fb->base.id);
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
/**
* drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
@@ -340,29 +443,74 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
}
EXPORT_SYMBOL(drm_framebuffer_reference);
+static void drm_framebuffer_free_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_put(&fb->refcount, drm_framebuffer_free_bug);
+}
+
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ fb->base.id = 0;
+
+ __drm_framebuffer_unreference(fb);
+}
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped and drop idr ref. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
+ * Cleanup references to a user-created framebuffer. This function is intended
+ * to be used from the drivers ->destroy callback.
*
- * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
- * it, setting it to NULL.
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
- /*
- * This could be moved to drm_framebuffer_remove(), but for
- * debugging is nice to keep around the list of fb's that are
- * no longer associated w/ a drm_file but are not unreferenced
- * yet. (i915 and omapdrm have debugfs files which will show
- * this.)
- */
- drm_mode_object_put(dev, &fb->base);
+
+ mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);
dev->mode_config.num_fb--;
+ mutex_unlock(&dev->mode_config.fb_lock);
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
@@ -370,11 +518,13 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
* drm_framebuffer_remove - remove and unreference a framebuffer object
* @fb: framebuffer to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Scans all the CRTCs and planes in @dev's mode_config. If they're
- * using @fb, removes it, setting it to NULL.
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
*/
void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
@@ -384,33 +534,53 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
struct drm_mode_set set;
int ret;
- /* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb) {
- /* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = crtc->funcs->set_config(&set);
- if (ret)
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ /*
+ * drm ABI mandates that we remove any deleted framebuffers from active
+ * useage. But since most sane clients only remove framebuffers they no
+ * longer need, try to optimize this away.
+ *
+ * Since we're holding a reference ourselves, observing a refcount of 1
+ * means that we're the last holder and can skip it. Also, the refcount
+ * can never increase from 1 again, so we don't need any barriers or
+ * locks.
+ *
+ * Note that userspace could try to race with use and instate a new
+ * usage _after_ we've cleared all current ones. End result will be an
+ * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+ * in this manner.
+ */
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
}
- }
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- if (plane->fb == fb) {
- /* should turn off the crtc */
- ret = plane->funcs->disable_plane(plane);
- if (ret)
- DRM_ERROR("failed to disable plane with busy fb\n");
- /* disconnect the plane from the fb and crtc: */
- plane->fb = NULL;
- plane->crtc = NULL;
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ __drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
}
+ drm_modeset_unlock_all(dev);
}
- list_del(&fb->filp_head);
-
drm_framebuffer_unreference(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
@@ -421,9 +591,6 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
- * LOCKING:
- * Takes mode_config lock.
- *
* Inits a new object created as base part of an driver crtc object.
*
* RETURNS:
@@ -438,7 +605,9 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
crtc->funcs = funcs;
crtc->invert_dimensions = false;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ mutex_init(&crtc->mutex);
+ mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
@@ -450,7 +619,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
dev->mode_config.num_crtc++;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -460,9 +629,6 @@ EXPORT_SYMBOL(drm_crtc_init);
* drm_crtc_cleanup - Cleans up the core crtc usage.
* @crtc: CRTC to cleanup
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Cleanup @crtc. Removes from drm modesetting space
* does NOT free object, caller does that.
*/
@@ -484,9 +650,6 @@ EXPORT_SYMBOL(drm_crtc_cleanup);
* @connector: connector the new mode
* @mode: mode data
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Add @mode to @connector's mode list for later use.
*/
void drm_mode_probed_add(struct drm_connector *connector,
@@ -501,9 +664,6 @@ EXPORT_SYMBOL(drm_mode_probed_add);
* @connector: connector list to modify
* @mode: mode to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Remove @mode from @connector's mode list, then free it.
*/
void drm_mode_remove(struct drm_connector *connector,
@@ -519,10 +679,7 @@ EXPORT_SYMBOL(drm_mode_remove);
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
- * @name: user visible name of the connector
- *
- * LOCKING:
- * Takes mode config lock.
+ * @connector_type: user visible type of the connector
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
@@ -537,7 +694,7 @@ int drm_connector_init(struct drm_device *dev,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
@@ -567,7 +724,7 @@ int drm_connector_init(struct drm_device *dev,
dev->mode_config.dpms_property, 0);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -577,9 +734,6 @@ EXPORT_SYMBOL(drm_connector_init);
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
- * LOCKING:
- * Takes mode config lock.
- *
* Cleans up the connector but doesn't free the object.
*/
void drm_connector_cleanup(struct drm_connector *connector)
@@ -596,11 +750,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
- mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -622,7 +774,7 @@ int drm_encoder_init(struct drm_device *dev,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
@@ -636,7 +788,7 @@ int drm_encoder_init(struct drm_device *dev,
dev->mode_config.num_encoder++;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -645,11 +797,11 @@ EXPORT_SYMBOL(drm_encoder_init);
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_encoder_cleanup);
@@ -661,7 +813,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
{
int ret;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
@@ -695,7 +847,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -705,7 +857,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
/* if not added to a list, it must be a private plane */
@@ -713,7 +865,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
list_del(&plane->head);
dev->mode_config.num_plane--;
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -721,9 +873,6 @@ EXPORT_SYMBOL(drm_plane_cleanup);
* drm_mode_create - create a new display mode
* @dev: DRM device
*
- * LOCKING:
- * Caller must hold DRM mode_config lock.
- *
* Create a new drm_display_mode, give it an ID, and return it.
*
* RETURNS:
@@ -751,9 +900,6 @@ EXPORT_SYMBOL(drm_mode_create);
* @dev: DRM device
* @mode: mode to remove
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Free @mode's unique identifier, then free it.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
@@ -978,16 +1124,19 @@ EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
- * LOCKING:
- * None, should happen single threaded at init time.
- *
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
*/
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
@@ -997,9 +1146,9 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
/* Just to be sure */
dev->mode_config.num_fb = 0;
@@ -1057,12 +1206,13 @@ EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
- * LOCKING:
- * Caller must hold mode config lock.
- *
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
* FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
@@ -1089,6 +1239,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_property_destroy(dev, property);
}
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
drm_framebuffer_remove(fb);
}
@@ -1102,7 +1261,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
- idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
@@ -1112,9 +1270,6 @@ EXPORT_SYMBOL(drm_mode_config_cleanup);
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
@@ -1151,9 +1306,6 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
- * LOCKING:
- * None.
- *
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
@@ -1188,13 +1340,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
/**
* drm_mode_getresources - get graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
@@ -1228,8 +1376,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&file_priv->fbs_lock);
/*
* For the non-control nodes we need to limit the list of resources
* by IDs in the group list for this node
@@ -1237,6 +1385,23 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each(lh, &file_priv->fbs)
fb_count++;
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ drm_modeset_lock_all(dev);
mode_group = &file_priv->master->minor->mode_group;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
@@ -1260,21 +1425,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
-
/* CRTCs */
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
@@ -1370,19 +1520,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->count_connectors, card_res->count_encoders);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_mode_getcrtc - get CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a CRTC configuration structure to return to the user.
*
@@ -1402,7 +1548,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -1430,19 +1576,15 @@ int drm_mode_getcrtc(struct drm_device *dev,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_mode_getconnector - get connector configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Construct a connector configuration structure to return to the user.
*
@@ -1575,6 +1717,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out:
mutex_unlock(&dev->mode_config.mutex);
+
return ret;
}
@@ -1589,7 +1732,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
@@ -1608,7 +1751,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
enc_resp->possible_clones = encoder->possible_clones;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1618,9 +1761,6 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
- * LOCKING:
- * Takes mode config lock.
- *
* Return an plane count and set of IDs.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
@@ -1635,7 +1775,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
config = &dev->mode_config;
/*
@@ -1657,7 +1797,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
plane_resp->count_planes = config->num_plane;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1667,9 +1807,6 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
- * LOCKING:
- * Takes mode config lock.
- *
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
*/
@@ -1685,7 +1822,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -1725,7 +1862,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
plane_resp->count_format_types = plane->format_count;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1733,10 +1870,7 @@ out:
* drm_mode_setplane - set up or tear down an plane
* @dev: DRM device
* @data: ioctl data*
- * @file_prive: DRM file info
- *
- * LOCKING:
- * Takes mode config lock.
+ * @file_priv: DRM file info
*
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
@@ -1748,7 +1882,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
@@ -1756,8 +1890,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
-
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
@@ -1767,16 +1899,18 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
plane = obj_to_plane(obj);
/* No fb means shut it down */
if (!plane_req->fb_id) {
+ drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
plane->funcs->disable_plane(plane);
plane->crtc = NULL;
plane->fb = NULL;
+ drm_modeset_unlock_all(dev);
goto out;
}
@@ -1790,15 +1924,13 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
crtc = obj_to_crtc(obj);
- obj = drm_mode_object_find(dev, plane_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
ret = -ENOENT;
goto out;
}
- fb = obj_to_fb(obj);
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
@@ -1844,31 +1976,62 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
+ drm_modeset_lock_all(dev);
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
+ old_fb = plane->fb;
plane->crtc = crtc;
plane->fb = fb;
+ fb = NULL;
}
+ drm_modeset_unlock_all(dev);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
return ret;
}
/**
- * drm_mode_setcrtc - set CRTC configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
*
- * LOCKING:
- * Takes mode config lock.
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
+ */
+int drm_mode_set_config_internal(struct drm_mode_set *set)
+{
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_framebuffer *fb, *old_fb;
+ int ret;
+
+ old_fb = crtc->fb;
+ fb = set->fb;
+
+ ret = crtc->funcs->set_config(set);
+ if (ret == 0) {
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ if (fb)
+ drm_framebuffer_reference(fb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_set_config_internal);
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Build a new CRTC configuration based on user request.
*
@@ -1899,7 +2062,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
return -ERANGE;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1921,16 +2084,16 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
fb = crtc->fb;
+ /* Make refcounting symmetric with the lookup path. */
+ drm_framebuffer_reference(fb);
} else {
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -EINVAL;
goto out;
}
- fb = obj_to_fb(obj);
}
mode = drm_mode_create(dev);
@@ -2027,12 +2190,15 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
- ret = crtc->funcs->set_config(&set);
+ ret = drm_mode_set_config_internal(&set);
out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
kfree(connector_set);
drm_mode_destroy(dev, mode);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -2050,15 +2216,14 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
+ mutex_lock(&crtc->mutex);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
ret = -ENXIO;
@@ -2078,7 +2243,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
}
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
+
return ret;
}
@@ -2089,7 +2255,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
switch (bpp) {
case 8:
- fmt = DRM_FORMAT_RGB332;
+ fmt = DRM_FORMAT_C8;
break;
case 16:
if (depth == 15)
@@ -2120,13 +2286,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
* drm_mode_addfb - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request.
*
@@ -2161,24 +2323,19 @@ int drm_mode_addfb(struct drm_device *dev,
if ((config->min_height > r.height) || (r.height > config->max_height))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
-
- /* TODO check buffer is sufficiently large */
- /* TODO setup destructor callback */
-
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- ret = PTR_ERR(fb);
- goto out;
+ drm_modeset_unlock_all(dev);
+ return PTR_ERR(fb);
}
+ mutex_lock(&file_priv->fbs_lock);
or->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -2304,13 +2461,9 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request with format.
*
@@ -2350,33 +2503,28 @@ int drm_mode_addfb2(struct drm_device *dev,
if (ret)
return ret;
- mutex_lock(&dev->mode_config.mutex);
-
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- ret = PTR_ERR(fb);
- goto out;
+ drm_modeset_unlock_all(dev);
+ return PTR_ERR(fb);
}
+ mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
+
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_rmfb - remove an FB from the configuration
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Remove the FB specified by the user.
*
@@ -2388,50 +2536,49 @@ out:
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
uint32_t *id = data;
- int ret = 0;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
- /* TODO check that we really get a framebuffer back. */
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ mutex_lock(&file_priv->fbs_lock);
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ goto fail_lookup;
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
+ if (!found)
+ goto fail_lookup;
- if (!found) {
- ret = -EINVAL;
- goto out;
- }
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
drm_framebuffer_remove(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return 0;
+
+fail_lookup:
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ return -EINVAL;
}
/**
* drm_mode_getfb - get FB info
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Lookup the FB given its ID and return info about it.
*
@@ -2444,30 +2591,28 @@ int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- ret = -EINVAL;
- goto out;
- }
- fb = obj_to_fb(obj);
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
r->height = fb->height;
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
- fb->funcs->create_handle(fb, file_priv, &r->handle);
+ if (fb->funcs->create_handle)
+ ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
+ else
+ ret = -ENODEV;
+
+ drm_framebuffer_unreference(fb);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -2477,7 +2622,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_mode_object *obj;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
@@ -2486,13 +2630,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- ret = -EINVAL;
- goto out_err1;
- }
- fb = obj_to_fb(obj);
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -2530,27 +2670,26 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
+ drm_modeset_lock_all(dev);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
+ drm_modeset_unlock_all(dev);
} else {
ret = -ENOSYS;
- goto out_err2;
}
out_err2:
kfree(clips);
out_err1:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(fb);
+
return ret;
}
/**
* drm_fb_release - remove and free the FBs on this file
- * @filp: file * from the ioctl
- *
- * LOCKING:
- * Takes mode config lock.
+ * @priv: drm file for the ioctl
*
* Destroy all the FBs associated with @filp.
*
@@ -2564,11 +2703,20 @@ void drm_fb_release(struct drm_file *priv)
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&priv->fbs_lock);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ list_del_init(&fb->filp_head);
+
+ /* This will also drop the fpriv->fbs reference. */
drm_framebuffer_remove(fb);
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&priv->fbs_lock);
}
/**
@@ -2660,10 +2808,9 @@ EXPORT_SYMBOL(drm_mode_detachmode_crtc);
/**
* drm_fb_attachmode - Attach a user mode to an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* This attaches a user specified mode to an connector.
* Called by the user via ioctl.
@@ -2684,7 +2831,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
@@ -2708,17 +2855,16 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
drm_mode_attachmode(dev, connector, mode);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
/**
* drm_fb_detachmode - Detach a user specified mode from an connector
- * @inode: inode from the ioctl
- * @filp: file * from the ioctl
- * @cmd: cmd from ioctl
- * @arg: arg from ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
*
* Called by the user via ioctl.
*
@@ -2738,7 +2884,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
@@ -2755,7 +2901,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
ret = drm_mode_detachmode(dev, connector, &mode);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3001,7 +3147,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
ret = -EINVAL;
@@ -3079,7 +3225,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = blob_count;
}
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3130,7 +3276,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
ret = -EINVAL;
@@ -3148,7 +3294,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
out_resp->length = blob->length;
done:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3290,7 +3436,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
@@ -3327,7 +3473,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
}
arg->count_props = props_count;
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3344,7 +3490,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
@@ -3382,7 +3528,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3444,7 +3590,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
@@ -3485,7 +3631,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3503,7 +3649,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
@@ -3536,7 +3682,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
goto out;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3546,7 +3692,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_mode_crtc_page_flip *page_flip = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int hdisplay, vdisplay;
@@ -3556,12 +3702,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
page_flip->reserved != 0)
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
- goto out;
+ return -EINVAL;
crtc = obj_to_crtc(obj);
+ mutex_lock(&crtc->mutex);
if (crtc->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@@ -3574,10 +3720,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (crtc->funcs->page_flip == NULL)
goto out;
- obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj)
+ fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ if (!fb)
goto out;
- fb = obj_to_fb(obj);
hdisplay = crtc->mode.hdisplay;
vdisplay = crtc->mode.vdisplay;
@@ -3623,6 +3768,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
(void (*) (struct drm_pending_event *)) kfree;
}
+ old_fb = crtc->fb;
ret = crtc->funcs->page_flip(crtc, fb, e);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -3631,10 +3777,27 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
+ /* Keep the old fb, don't unref it. */
+ old_fb = NULL;
+ } else {
+ /*
+ * Warn if the driver hasn't properly updated the crtc->fb
+ * field to reflect that the new framebuffer is now used.
+ * Failing to do so will screw with the reference counting
+ * on framebuffers.
+ */
+ WARN_ON(crtc->fb != fb);
+ /* Unref only the old framebuffer. */
+ fb = NULL;
}
out:
- mutex_unlock(&dev->mode_config.mutex);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ mutex_unlock(&crtc->mutex);
+
return ret;
}
@@ -3702,6 +3865,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
{
switch (format) {
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
*depth = 8;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be174cab105a..25f91cd23e60 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -297,7 +297,6 @@ static void __exit drm_core_exit(void)
unregister_chrdev(DRM_MAJOR, "drm");
- idr_remove_all(&drm_minors_idr);
idr_destroy(&drm_minors_idr);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a3770fbd770..c194f4e680ad 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -29,11 +29,11 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
-#include "drm_edid_modes.h"
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
@@ -87,9 +87,6 @@ static struct edid_quirk {
int product_id;
u32 quirks;
} edid_quirk_list[] = {
- /* ASUS VW222S */
- { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
-
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
@@ -130,6 +127,746 @@ static struct edid_quirk {
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+};
+
+static const struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+ short w;
+ short h;
+ short r;
+ short rb;
+};
+
+static const struct minimode est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 1 - 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3 - 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 4 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 5 - 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 6 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 7 - 1440x480i@60Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 8 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 9 - 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 10 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 11 - 2880x480i@60Hz */
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 12 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 13 - 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 14 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 15 - 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 16 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 17 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 18 - 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 19 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 20 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 21 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 22 - 1440x576i@50Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 23 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 24 - 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 25 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 26 - 2880x576i@50Hz */
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 27 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 28 - 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 29 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 30 - 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 31 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 32 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 33 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 34 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 35 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 36 - 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 37 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 38 - 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 39 - 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 40 - 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 41 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 42 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 43 - 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 44 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 45 - 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 46 - 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 47 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 48 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 49 - 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 50 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 51 - 1440x480i@120Hz */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 52 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 53 - 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 54 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 55 - 1440x576i@200Hz */
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 56 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 57 - 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 58 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 59 - 1440x480i@240 */
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 60 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 61 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 62 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 63 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 64 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
/*** DDC fetch and block validation ***/
static const u8 edid_header[] = {
@@ -357,10 +1094,14 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
break;
}
}
- if (i == 4)
+
+ if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
+
+ connector->bad_edid_counter++;
+ }
}
if (valid_extensions != block[0x7e]) {
@@ -541,7 +1282,7 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
{
int i;
- for (i = 0; i < drm_num_dmt_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize != ptr->hdisplay)
continue;
@@ -1082,7 +1823,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
- for (i = 0; i < drm_num_dmt_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
@@ -1117,7 +1858,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
- for (i = 0; i < num_extra_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
@@ -1146,7 +1887,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(edid);
- for (i = 0; i < num_extra_modes; i++) {
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
@@ -1483,9 +2224,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
+#define VIDEO_CAPABILITY_BLOCK 0x07
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
+#define EDID_CEA_VCDB_QS (1 << 6)
/**
* Search EDID for CEA extension block.
@@ -1513,16 +2256,19 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
-/*
- * Looks for a CEA mode matching given drm_display_mode.
- * Returns its CEA Video ID code, or 0 if not found.
+/**
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
+ *
+ * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
*/
-u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
struct drm_display_mode *cea_mode;
u8 mode;
- for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
if (drm_mode_equal(to_match, cea_mode))
@@ -1542,7 +2288,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
- if (cea_mode < drm_num_cea_modes) {
+ if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
@@ -1902,6 +2648,37 @@ end:
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
+ */
+bool drm_rgb_quant_range_selectable(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, start, end;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start, &end))
+ return false;
+
+ for_each_cea_db(edid_ext, i, start, end) {
+ if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
+ cea_db_payload_len(&edid_ext[i]) == 2) {
+ DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
+ return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
+
+/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
@@ -2020,7 +2797,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
- num_modes += add_inferred_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
@@ -2081,20 +2859,33 @@ int drm_add_modes_noedid(struct drm_connector *connector,
EXPORT_SYMBOL(drm_add_modes_noedid);
/**
- * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
- * @mode: mode
+ * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI AVI infoframe
+ * @mode: DRM display mode
*
- * RETURNS:
- * The VIC number, 0 in case it's not a CEA-861 mode.
+ * Returns 0 on success or a negative error code on failure.
*/
-uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+int
+drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode)
{
- uint8_t i;
+ int err;
+
+ if (!frame || !mode)
+ return -EINVAL;
+
+ err = hdmi_avi_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ frame->video_code = drm_match_cea_mode(mode);
+ if (!frame->video_code)
+ return 0;
- for (i = 0; i < drm_num_cea_modes; i++)
- if (drm_mode_equal(mode, &edid_cea_modes[i]))
- return i + 1;
+ frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+ frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
return 0;
}
-EXPORT_SYMBOL(drm_mode_cea_vic);
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
deleted file mode 100644
index 5dbf7d2557b4..000000000000
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * Copyright (c) 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
- * Copyright 2010 Red Hat, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <drm/drmP.h>
-#include <drm/drm_edid.h>
-
-/*
- * Autogenerated from the DMT spec.
- * This table is copied from xfree86/modes/xf86EdidModes.c.
- */
-static const struct drm_display_mode drm_dmt_modes[] = {
- /* 640x350@85Hz */
- { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 350, 382, 385, 445, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x400@85Hz */
- { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 400, 401, 404, 445, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x400@85Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
- 828, 936, 0, 400, 401, 404, 446, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 492, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@85Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
- 752, 832, 0, 480, 481, 484, 509, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@56Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@72Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@85Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
- 896, 1048, 0, 600, 601, 604, 631, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@120Hz RB */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
- 880, 960, 0, 600, 603, 607, 636, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 848x480@60Hz */
- { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
- 976, 1088, 0, 480, 486, 494, 517, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@85Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1168, 1376, 0, 768, 769, 772, 808, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@120Hz RB */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
- 1104, 1184, 0, 768, 771, 775, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz RB */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
- 1360, 1440, 0, 768, 771, 778, 790, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@75Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
- 1488, 1696, 0, 768, 771, 778, 805, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@85Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
- 1496, 1712, 0, 768, 771, 778, 809, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@120Hz RB */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
- 1360, 1440, 0, 768, 771, 778, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@60Hz RB */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
- 1360, 1440, 0, 800, 803, 809, 823, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@75Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
- 1488, 1696, 0, 800, 803, 809, 838, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@85Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
- 1496, 1712, 0, 800, 803, 809, 843, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@120Hz RB */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
- 1360, 1440, 0, 800, 803, 809, 847, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@85Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
- 1504, 1728, 0, 960, 961, 964, 1011, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@120Hz RB */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
- 1360, 1440, 0, 960, 963, 967, 1017, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@75Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@85Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
- 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@120Hz RB */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
- 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@120Hz RB */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
- 1440, 1520, 0, 768, 771, 776, 813, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1400x1050@60Hz RB */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
- 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1400x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@75Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
- 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@85Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
- 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1400x1050@120Hz RB */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
- 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x900@60Hz RB */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
- 1520, 1600, 0, 900, 903, 909, 926, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@75Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
- 1688, 1936, 0, 900, 903, 909, 942, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@85Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
- 1696, 1952, 0, 900, 903, 909, 948, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@120Hz RB */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
- 1520, 1600, 0, 900, 903, 909, 953, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@65Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@70Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@85Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@120Hz RB */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
- 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1680x1050@60Hz RB */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
- 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@75Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
- 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@85Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
- 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@120Hz RB */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
- 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@75Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
- 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@120Hz RB */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
- 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1856x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@75Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
- 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@120Hz RB */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
- 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1200@60Hz RB */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
- 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@75Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
- 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@85Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
- 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@120Hz RB */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
- 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@75Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
- 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@120Hz RB */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
- 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz RB */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
- 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@75HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@85HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@120Hz RB */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
- 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
-
-};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-
-static const struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
-
-struct minimode {
- short w;
- short h;
- short r;
- short rb;
-};
-
-static const struct minimode est3_modes[] = {
- /* byte 6 */
- { 640, 350, 85, 0 },
- { 640, 400, 85, 0 },
- { 720, 400, 85, 0 },
- { 640, 480, 85, 0 },
- { 848, 480, 60, 0 },
- { 800, 600, 85, 0 },
- { 1024, 768, 85, 0 },
- { 1152, 864, 75, 0 },
- /* byte 7 */
- { 1280, 768, 60, 1 },
- { 1280, 768, 60, 0 },
- { 1280, 768, 75, 0 },
- { 1280, 768, 85, 0 },
- { 1280, 960, 60, 0 },
- { 1280, 960, 85, 0 },
- { 1280, 1024, 60, 0 },
- { 1280, 1024, 85, 0 },
- /* byte 8 */
- { 1360, 768, 60, 0 },
- { 1440, 900, 60, 1 },
- { 1440, 900, 60, 0 },
- { 1440, 900, 75, 0 },
- { 1440, 900, 85, 0 },
- { 1400, 1050, 60, 1 },
- { 1400, 1050, 60, 0 },
- { 1400, 1050, 75, 0 },
- /* byte 9 */
- { 1400, 1050, 85, 0 },
- { 1680, 1050, 60, 1 },
- { 1680, 1050, 60, 0 },
- { 1680, 1050, 75, 0 },
- { 1680, 1050, 85, 0 },
- { 1600, 1200, 60, 0 },
- { 1600, 1200, 65, 0 },
- { 1600, 1200, 70, 0 },
- /* byte 10 */
- { 1600, 1200, 75, 0 },
- { 1600, 1200, 85, 0 },
- { 1792, 1344, 60, 0 },
- { 1792, 1344, 85, 0 },
- { 1856, 1392, 60, 0 },
- { 1856, 1392, 75, 0 },
- { 1920, 1200, 60, 1 },
- { 1920, 1200, 60, 0 },
- /* byte 11 */
- { 1920, 1200, 75, 0 },
- { 1920, 1200, 85, 0 },
- { 1920, 1440, 60, 0 },
- { 1920, 1440, 75, 0 },
-};
-static const int num_est3_modes = ARRAY_SIZE(est3_modes);
-
-static const struct minimode extra_modes[] = {
- { 1024, 576, 60, 0 },
- { 1366, 768, 60, 0 },
- { 1600, 900, 60, 0 },
- { 1680, 945, 60, 0 },
- { 1920, 1080, 60, 0 },
- { 2048, 1152, 60, 0 },
- { 2048, 1536, 60, 0 },
-};
-static const int num_extra_modes = ARRAY_SIZE(extra_modes);
-
-/*
- * Probably taken from CEA-861 spec.
- * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
- */
-static const struct drm_display_mode edid_cea_modes[] = {
- /* 1 - 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2 - 720x480@60Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 3 - 720x480@60Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 4 - 1280x720@60Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
- 1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 5 - 1920x1080i@60Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 6 - 1440x480i@60Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 7 - 1440x480i@60Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 8 - 1440x240@60Hz */
- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 9 - 1440x240@60Hz */
- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
- 1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 10 - 2880x480i@60Hz */
- { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 11 - 2880x480i@60Hz */
- { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 12 - 2880x240@60Hz */
- { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 13 - 2880x240@60Hz */
- { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
- 3204, 3432, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 14 - 1440x480@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
- 1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 15 - 1440x480@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
- 1596, 1716, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 16 - 1920x1080@60Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 17 - 720x576@50Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 18 - 720x576@50Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 19 - 1280x720@50Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
- 1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 20 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 21 - 1440x576i@50Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 22 - 1440x576i@50Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 23 - 1440x288@50Hz */
- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 24 - 1440x288@50Hz */
- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
- 1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 25 - 2880x576i@50Hz */
- { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 26 - 2880x576i@50Hz */
- { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 27 - 2880x288@50Hz */
- { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 28 - 2880x288@50Hz */
- { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
- 3180, 3456, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 29 - 1440x576@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 30 - 1440x576@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1592, 1728, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 31 - 1920x1080@50Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 32 - 1920x1080@24Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
- 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 33 - 1920x1080@25Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 34 - 1920x1080@30Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 35 - 2880x480@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
- 3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 36 - 2880x480@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
- 3192, 3432, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 37 - 2880x576@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
- 3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 38 - 2880x576@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
- 3184, 3456, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 39 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
- 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 40 - 1920x1080i@100Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 41 - 1280x720@100Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
- 1760, 1980, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 42 - 720x576@100Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 43 - 720x576@100Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 44 - 1440x576i@100Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 45 - 1440x576i@100Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK) },
- /* 46 - 1920x1080i@120Hz */
- { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 47 - 1280x720@120Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
- 1430, 1650, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 48 - 720x480@120Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 49 - 720x480@120Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 50 - 1440x480i@120Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 51 - 1440x480i@120Hz */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 52 - 720x576@200Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 53 - 720x576@200Hz */
- { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
- 796, 864, 0, 576, 581, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 54 - 1440x576i@200Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 55 - 1440x576i@200Hz */
- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
- 1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 56 - 720x480@240Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 57 - 720x480@240Hz */
- { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
- 798, 858, 0, 480, 489, 495, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 58 - 1440x480i@240 */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 59 - 1440x480i@240 */
- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
- 1602, 1716, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
- /* 60 - 1280x720@24Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
- 3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 61 - 1280x720@25Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
- 3740, 3960, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 62 - 1280x720@30Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
- 3080, 3300, 0, 720, 725, 730, 750, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 63 - 1920x1080@120Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
- 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 64 - 1920x1080@100Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-};
-static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 63e733408b6d..48c52f7df4e6 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -123,3 +123,66 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
module_put(module);
}
EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
+void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ get_slave_funcs(encoder)->dpms(encoder, mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_dpms);
+
+bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
+
+void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_prepare);
+
+void drm_i2c_encoder_commit(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_commit);
+
+void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
+
+enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_detect);
+
+void drm_i2c_encoder_save(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->save(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_save);
+
+void drm_i2c_encoder_restore(struct drm_encoder *encoder)
+{
+ get_slave_funcs(encoder)->restore(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index fd9d0af4d536..0b5af7d0edb1 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -85,6 +85,11 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
if (!fb_cma)
return ERR_PTR(-ENOMEM);
+ drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb_cma->obj[i] = obj[i];
+
ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
if (ret) {
dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
@@ -92,11 +97,6 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
-
- for (i = 0; i < num_planes; i++)
- fb_cma->obj[i] = obj[i];
-
return fb_cma;
}
@@ -180,6 +180,59 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+#ifdef CONFIG_DEBUG_FS
+/**
+ * drm_fb_cma_describe() - Helper to dump information about a single
+ * CMA framebuffer object
+ */
+void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+ (char *)&fb->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ drm_gem_cma_describe(fb_cma->obj[i], m);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
+
+/**
+ * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
+ * in debugfs.
+ */
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+ drm_fb_cma_describe(fb, m);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
+#endif
+
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
.fb_fillrect = sys_fillrect,
@@ -266,6 +319,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
err_drm_fb_cma_destroy:
+ drm_framebuffer_unregister_private(fb);
drm_fb_cma_destroy(fb);
err_framebuffer_release:
framebuffer_release(fbi);
@@ -274,23 +328,8 @@ err_drm_gem_cma_free_object:
return ret;
}
-static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- if (!helper->fb) {
- ret = drm_fbdev_cma_create(helper, sizes);
- if (ret < 0)
- return ret;
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
- .fb_probe = drm_fbdev_cma_probe,
+ .fb_probe = drm_fbdev_cma_create,
};
/**
@@ -332,6 +371,9 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, preferred_bpp);
if (ret < 0) {
dev_err(dev->dev, "Failed to set inital hw configuration.\n");
@@ -370,8 +412,10 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
framebuffer_release(info);
}
- if (fbdev_cma->fb)
+ if (fbdev_cma->fb) {
+ drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+ }
drm_fb_helper_fini(&fbdev_cma->fb_helper);
kfree(fbdev_cma);
@@ -386,8 +430,13 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
*/
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
- if (fbdev_cma)
+ if (fbdev_cma) {
+ struct drm_device *dev = fbdev_cma->fb_helper.dev;
+
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+ drm_modeset_unlock_all(dev);
+ }
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 954d175bd7fa..59d6b9bf204b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -52,9 +52,36 @@ static LIST_HEAD(kernel_fb_helper_list);
* mode setting driver. They can be used mostly independantely from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default beheviour can override the
+ * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code proves a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
*/
-/* simple single crtc case helper function */
+/**
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
@@ -163,6 +190,10 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
+/**
+ * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -208,6 +239,10 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
return NULL;
}
+/**
+ * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -239,13 +274,24 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+/**
+ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ */
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
bool error = false;
int i, ret;
+
+ drm_warn_on_modeset_not_all_locked(fb_helper->dev);
+
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
- ret = mode_set->crtc->funcs->set_config(mode_set);
+ ret = drm_mode_set_config_internal(mode_set);
if (ret)
error = true;
}
@@ -253,6 +299,10 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+/*
+ * restore fbcon display for all kms driver's using this helper, used for sysrq
+ * and panic handling.
+ */
static bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
@@ -272,7 +322,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
return error;
}
-int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
/*
@@ -285,30 +335,36 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
-EXPORT_SYMBOL(drm_fb_helper_panic);
static struct notifier_block paniced = {
.notifier_call = drm_fb_helper_panic,
};
-/**
- * drm_fb_helper_restore - restore the framebuffer console (kernel) config
- *
- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
- */
-void drm_fb_helper_restore(void)
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
{
- bool ret;
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound++;
+ if (crtc->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+ return true;
}
-EXPORT_SYMBOL(drm_fb_helper_restore);
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
- drm_fb_helper_restore();
+ bool ret;
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
@@ -335,9 +391,22 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
int i, j;
/*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress)
+ return;
+
+ /*
* For each CRTC in this fb, turn the connectors on/off.
*/
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return;
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -352,9 +421,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
dev->mode_config.dpms_property, dpms_mode);
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
+/**
+ * drm_fb_helper_blank - implementation for ->fb_blank
+ * @blank: desired blanking state
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
@@ -398,6 +472,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->crtc_info);
}
+/**
+ * drm_fb_helper_init - initialize a drm_fb_helper structure
+ * @dev: drm device
+ * @fb_helper: driver-allocated fbdev helper structure to initialize
+ * @crtc_count: maximum number of crtcs to support in this fbdev emulation
+ * @max_conn_count: max connector count
+ *
+ * This allocates the structures for the fbdev helper with the given limits.
+ * Note that this won't yet touch the hardware (through the driver interfaces)
+ * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
+ * to allow driver writes more control over the exact init sequence.
+ *
+ * Drivers must set fb_helper->funcs before calling
+ * drm_fb_helper_initial_config().
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int crtc_count, int max_conn_count)
@@ -526,6 +618,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
return 0;
}
+/**
+ * drm_fb_helper_setcmap - implementation for ->fb_setcmap
+ * @cmap: cmap to set
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -565,6 +662,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
+/**
+ * drm_fb_helper_check_var - implementation for ->fb_check_var
+ * @var: screeninfo to check
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -657,13 +759,19 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
-/* this will let fbcon do the mode init */
+/**
+ * drm_fb_helper_set_par - implementation for ->fb_set_par
+ * @info: fbdev registered by the helper
+ *
+ * This will let fbcon do the mode init and is called at initialization time by
+ * the fbdev core when registering the driver, and later on through the hotplug
+ * callback.
+ */
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
- struct drm_crtc *crtc;
int ret;
int i;
@@ -672,16 +780,15 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
@@ -691,6 +798,11 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
+/**
+ * drm_fb_helper_pan_display - implementation for ->fb_pan_display
+ * @var: updated screen information
+ * @info: fbdev registered by the helper
+ */
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -701,7 +813,12 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -711,22 +828,27 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- ret = crtc->funcs->set_config(modeset);
+ ret = drm_mode_set_config_internal(modeset);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback and then registers the fbdev and sets up the panic
+ * notifier.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- int new_fb = 0;
+ int ret = 0;
int crtc_count = 0;
int i;
struct fb_info *info;
@@ -804,27 +926,30 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
/* push down into drivers */
- new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
- if (new_fb < 0)
- return new_fb;
+ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (ret < 0)
+ return ret;
info = fb_helper->fbdev;
- /* set the fb pointer */
+ /*
+ * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+ * events, but at init time drm_setup_crtcs needs to be called before
+ * the fb is allocated (since we need to figure out the desired size of
+ * the fb before we can allocate it ...). Hence we need to fix things up
+ * here again.
+ */
for (i = 0; i < fb_helper->crtc_count; i++)
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- if (new_fb) {
- info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
- dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ info->var.pixclock = 0;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
- } else {
- drm_fb_helper_set_par(info);
- }
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
@@ -834,13 +959,25 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- if (new_fb)
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
+/**
+ * drm_fb_helper_fill_fix - initializes fixed fbdev information
+ * @info: fbdev registered by the helper
+ * @pitch: desired pitch
+ * @depth: desired depth
+ *
+ * Helper to fill in the fixed fbdev information useful for a non-accelerated
+ * fbdev emulations. Drivers which support acceleration methods which impose
+ * additional constraints need to set up their own limits.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback.
+ */
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -861,6 +998,20 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+/**
+ * drm_fb_helper_fill_var - initalizes variable fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @fb_width: desired fb width
+ * @fb_height: desired fb height
+ *
+ * Sets up the variable fbdev metainformation from the given fb helper instance
+ * and the drm framebuffer allocated in fb_helper->fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback after having allocated the fbdev backing
+ * storage framebuffer.
+ */
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
@@ -1284,6 +1435,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->num_connectors = 0;
+ modeset->fb = NULL;
}
for (i = 0; i < fb_helper->connector_count; i++) {
@@ -1300,9 +1452,21 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
}
}
+ /* Clear out any old modes if there are no more connected outputs. */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+ BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+ }
+ }
out:
kfree(crtcs);
kfree(modes);
@@ -1310,18 +1474,23 @@ out:
}
/**
- * drm_helper_initial_config - setup a sane initial connector configuration
+ * drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
* @bpp_sel: bpp value to use for the framebuffer configuration
*
- * LOCKING:
- * Called at init time by the driver to set up the @fb_helper initial
- * configuration, must take the mode config lock.
- *
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
@@ -1330,9 +1499,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
struct drm_device *dev = fb_helper->dev;
int count = 0;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(fb_helper->dev);
-
drm_fb_helper_parse_command_line(fb_helper);
count = drm_fb_helper_probe_connector_modes(fb_helper,
@@ -1355,12 +1521,17 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
- * LOCKING:
- * Called at runtime, must take mode config lock.
- *
* Scan the connectors attached to the fb_helper and try to put together a
* setup after *notification of a change in output configuration.
*
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Note that the driver must ensure that this is only called _after_ the fb has
+ * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ *
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
@@ -1369,23 +1540,14 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
- int bound = 0, crtcs_bound = 0;
- struct drm_crtc *crtc;
if (!fb_helper->fb)
return 0;
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb)
- crtcs_bound++;
- if (crtc->fb == fb_helper->fb)
- bound++;
- }
-
- if (bound < crtcs_bound) {
+ drm_modeset_lock_all(dev);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
DRM_DEBUG_KMS("\n");
@@ -1397,9 +1559,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
drm_setup_crtcs(fb_helper);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ drm_fb_helper_set_par(fb_helper->fbdev);
+
+ return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 133b4132983e..13fdcd10a605 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -276,6 +276,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
+ mutex_init(&priv->fbs_lock);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 24efae464e2c..af779ae19ebf 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -270,21 +270,19 @@ drm_gem_handle_create(struct drm_file *file_priv,
int ret;
/*
- * Get the user-visible handle using idr.
+ * Get the user-visible handle using idr. Preload and perform
+ * allocation under our spinlock.
*/
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
-
- /* do the allocation under our spinlock */
+ idr_preload(GFP_KERNEL);
spin_lock(&file_priv->table_lock);
- ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
+
+ ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+
spin_unlock(&file_priv->table_lock);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
+ idr_preload_end();
+ if (ret < 0)
return ret;
+ *handlep = ret;
drm_gem_object_handle_reference(obj);
@@ -451,29 +449,25 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
-again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock(&dev->object_name_lock);
if (!obj->name) {
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
+ ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
+ obj->name = ret;
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
+ idr_preload_end();
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
+ if (ret < 0)
goto err;
+ ret = 0;
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
} else {
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
+ idr_preload_end();
ret = 0;
}
@@ -561,8 +555,6 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, file_private);
-
- idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1aa8fee1e865..0a7e011509bd 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -249,3 +249,24 @@ int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
return drm_gem_handle_delete(file_priv, handle);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+{
+ struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_device *dev = obj->dev;
+ uint64_t off = 0;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (obj->map_list.map)
+ off = (uint64_t)obj->map_list.hash.key;
+
+ seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
+ obj->name, obj->refcount.refcount.counter,
+ off, cma_obj->paddr, cma_obj->vaddr, obj->size);
+
+ seq_printf(m, "\n");
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
+#endif
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 80254547a3f8..7e4bae760e27 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -60,14 +60,13 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, list, h_list, head)
+ hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
@@ -76,14 +75,13 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, list, h_list, head) {
+ hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
- return list;
+ return &entry->head;
if (entry->key > key)
break;
}
@@ -95,14 +93,13 @@ static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ hlist_for_each_entry_rcu(entry, h_list, head) {
if (entry->key == key)
- return list;
+ return &entry->head;
if (entry->key > key)
break;
}
@@ -113,19 +110,19 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
- struct hlist_node *list, *parent;
+ struct hlist_node *parent;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each_entry(entry, list, h_list, head) {
+ hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
- parent = list;
+ parent = &entry->head;
}
if (parent) {
hlist_add_after_rcu(parent, &item->head);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 19c01ca3cc76..a6a8643a6a77 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -505,6 +505,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Valid dotclock? */
if (dotclock > 0) {
+ int frame_size;
/* Convert scanline length in pixels and video dot clock to
* line duration, frame duration and pixel duration in
* nanoseconds:
@@ -512,7 +513,10 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
1000000000), dotclock);
- framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+ frame_size = crtc->hwmode.crtc_htotal *
+ crtc->hwmode.crtc_vtotal;
+ framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
+ dotclock);
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
@@ -863,6 +867,7 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
now = get_drm_timestamp();
}
+ e->pipe = crtc;
send_vblank_event(dev, e, seq, &now);
}
EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1218,8 +1223,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret;
unsigned int flags, seq, crtc, high_crtc;
- if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
- return -EINVAL;
+ if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
+ return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2aa331499f81..db1e2d6f90d7 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
}
EXPORT_SYMBOL(drm_mm_pre_get);
-static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
-{
- return hole_node->start + hole_node->size;
-}
-
-static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
-{
- struct drm_mm_node *next_node =
- list_entry(hole_node->node_list.next, struct drm_mm_node,
- node_list);
-
- return next_node->start;
-}
-
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
- BUG_ON(!hole_node->hole_follows || node->allocated);
+ BUG_ON(node->allocated);
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0;
- if (node->start + node->size < hole_end) {
+ if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
}
+struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic)
+{
+ struct drm_mm_node *hole, *node;
+ unsigned long end = start + size;
+ unsigned long hole_start;
+ unsigned long hole_end;
+
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (hole_start > start || hole_end < end)
+ continue;
+
+ node = drm_mm_kmalloc(mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ node->start = start;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole->node_list);
+
+ if (start == hole_start) {
+ hole->hole_follows = 0;
+ list_del_init(&hole->hole_stack);
+ }
+
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ }
+
+ return node;
+ }
+
+ WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mm_create_block);
+
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
@@ -253,7 +284,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > end);
node->hole_follows = 0;
- if (node->start + node->size < hole_end) {
+ if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
@@ -327,12 +358,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
if (node->hole_follows) {
- BUG_ON(drm_mm_hole_node_start(node)
- == drm_mm_hole_node_end(node));
+ BUG_ON(__drm_mm_hole_node_start(node) ==
+ __drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
} else
- BUG_ON(drm_mm_hole_node_start(node)
- != drm_mm_hole_node_end(node));
+ BUG_ON(__drm_mm_hole_node_start(node) !=
+ __drm_mm_hole_node_end(node));
+
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
@@ -390,6 +422,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
@@ -397,17 +431,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
- unsigned long adj_start = drm_mm_hole_node_start(entry);
- unsigned long adj_end = drm_mm_hole_node_end(entry);
-
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
- BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -434,6 +464,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
@@ -441,13 +473,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
- unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
- start : drm_mm_hole_node_start(entry);
- unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
- end : drm_mm_hole_node_end(entry);
-
- BUG_ON(!entry->hole_follows);
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d8da30e90db5..04fa6f1808d1 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -35,6 +35,8 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
/**
* drm_mode_debug_printmodeline - debug print a mode
@@ -504,6 +506,74 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
}
EXPORT_SYMBOL(drm_gtf_mode);
+#if IS_ENABLED(CONFIG_VIDEOMODE)
+int drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
+{
+ dmode->hdisplay = vm->hactive;
+ dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+ dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
+ dmode->htotal = dmode->hsync_end + vm->hback_porch;
+
+ dmode->vdisplay = vm->vactive;
+ dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
+ dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
+ dmode->vtotal = dmode->vsync_end + vm->vback_porch;
+
+ dmode->clock = vm->pixelclock / 1000;
+
+ dmode->flags = 0;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
+ dmode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
+ dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+ drm_mode_set_name(dmode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
+#endif
+
+#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
+/**
+ * of_get_drm_display_mode - get a drm_display_mode from devicetree
+ * @np: device_node with the timing specification
+ * @dmode: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
+ */
+int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ drm_display_mode_from_videomode(&vm, dmode);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ drm_mode_debug_printmodeline(dmode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
+#endif
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 754bc96e10c7..bd719e936e13 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -439,78 +439,67 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return 0;
}
-#else
-
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- return -1;
-}
-
-#endif
-
-EXPORT_SYMBOL(drm_pci_init);
-
-/*@}*/
-void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(pdriver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_pci_exit);
-
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
- int pos;
- u32 lnkcap = 0, lnkcap2 = 0;
+ u32 lnkcap, lnkcap2;
*mask = 0;
if (!dev->pdev)
return -EINVAL;
- if (!pci_is_pcie(dev->pdev))
- return -EINVAL;
-
root = dev->pdev->bus->self;
- pos = pci_pcie_cap(root);
- if (!pos)
- return -EINVAL;
-
/* we've been informed via and serverworks don't make the cut */
if (root->vendor == PCI_VENDOR_ID_VIA ||
root->vendor == PCI_VENDOR_ID_SERVERWORKS)
return -EINVAL;
- pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
- pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
- lnkcap &= PCI_EXP_LNKCAP_SLS;
- lnkcap2 &= 0xfe;
-
- if (lnkcap2) { /* PCIE GEN 3.0 */
+ if (lnkcap2) { /* PCIe r3.0-compliant */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*mask |= DRM_PCIE_SPEED_80;
- } else {
- if (lnkcap & 1)
+ } else { /* pre-r3.0 */
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
- if (lnkcap & 2)
- *mask |= DRM_PCIE_SPEED_50;
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
}
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
return 0;
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
+
+#else
+
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ return -1;
+}
+
+#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
+/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7f125738f44e..366910ddcfcb 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -53,7 +53,8 @@
* Self-importing: if userspace is using PRIME as a replacement for flink
* then it will get a fd->handle request for a GEM object that it created.
* Drivers should detect this situation and return back the gem object
- * from the dma-buf private.
+ * from the dma-buf private. Prime will do this automatically for drivers that
+ * use the drm_gem_prime_{import,export} helpers.
*/
struct drm_prime_member {
@@ -62,6 +63,137 @@ struct drm_prime_member {
uint32_t handle;
};
+static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!IS_ERR_OR_NULL(sgt))
+ dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ mutex_unlock(&obj->dev->struct_mutex);
+ return sgt;
+}
+
+static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ if (obj->export_dma_buf == dma_buf) {
+ /* drop the reference on the export fd holds */
+ obj->export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+}
+
+static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ return dev->driver->gem_prime_vmap(obj);
+}
+
+static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+
+static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .kmap = drm_gem_dmabuf_kmap,
+ .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
+ .kunmap = drm_gem_dmabuf_kunmap,
+ .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
+ * simpler APIs by using the helper functions @drm_gem_prime_export and
+ * @drm_gem_prime_import. These functions implement dma-buf support in terms of
+ * five lower-level driver callbacks:
+ *
+ * Export callbacks:
+ *
+ * - @gem_prime_pin (optional): prepare a GEM object for exporting
+ *
+ * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ *
+ * - @gem_prime_vmap: vmap a buffer exported by your driver
+ *
+ * - @gem_prime_vunmap: vunmap a buffer exported by your driver
+ *
+ * Import callback:
+ *
+ * - @gem_prime_import_sg_table (import): produce a GEM object from another
+ * driver's scatter/gather table
+ */
+
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ if (dev->driver->gem_prime_pin) {
+ int ret = dev->driver->gem_prime_pin(obj);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
+ 0600);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd)
@@ -117,6 +249,58 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!dev->driver->gem_prime_import_sg_table)
+ return ERR_PTR(-EINVAL);
+
+ if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ dma_buf_put(dma_buf);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_prime_import);
+
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle)
{
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 200e104f1fa0..7d30802a018f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -109,7 +109,6 @@ EXPORT_SYMBOL(drm_ut_debug_printk);
static int drm_minor_get_id(struct drm_device *dev, int type)
{
- int new_id;
int ret;
int base = 0, limit = 63;
@@ -121,25 +120,11 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
limit = base + 255;
}
-again:
- if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&drm_minors_idr, NULL,
- base, &new_id);
+ ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ret;
- if (new_id >= limit) {
- idr_remove(&drm_minors_idr, new_id);
- return -EINVAL;
- }
- return new_id;
+ return ret == -ENOSPC ? -EINVAL : ret;
}
struct drm_master *drm_master_create(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 3cec30611417..34a156f0c336 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
usbdev = interface_to_usbdev(interface);
dev->usbdev = usbdev;
- dev->dev = &usbdev->dev;
+ dev->dev = &interface->dev;
mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 294c0513f587..0e04f4ea441f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -99,6 +99,10 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* This fb should have only one gem object. */
+ if (WARN_ON(exynos_fb->buf_cnt != 1))
+ return -EINVAL;
+
return drm_gem_handle_create(file_priv,
&exynos_fb->exynos_gem_obj[0]->base, handle);
}
@@ -217,23 +221,25 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_fb *exynos_fb;
int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object\n");
- return ERR_PTR(-ENOENT);
- }
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
@@ -241,43 +247,44 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
- struct exynos_drm_gem_obj *exynos_gem_obj;
- int ret;
-
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- kfree(exynos_fb);
- return ERR_PTR(-ENOENT);
+ ret = -ENOENT;
+ exynos_fb->buf_cnt = i;
+ goto err_unreference;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
+ exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
- kfree(exynos_fb);
- return ERR_PTR(ret);
+ goto err_unreference;
}
-
- exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
- for (i = 0; i < exynos_fb->buf_cnt; i++) {
- struct exynos_drm_gem_obj *gem_obj;
-
- gem_obj = exynos_fb->exynos_gem_obj[i];
- drm_gem_object_unreference_unlocked(&gem_obj->base);
- }
-
- kfree(exynos_fb);
- return ERR_PTR(ret);
+ DRM_ERROR("failed to init framebuffer.\n");
+ goto err_unreference;
}
return &exynos_fb->fb;
+
+err_unreference:
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct drm_gem_object *obj;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ }
+err_free:
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 71f867340a88..68f0045f86b8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -226,36 +226,8 @@ out:
return ret;
}
-static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * with !helper->fb, it means that this funcion is called first time
- * and after that, the helper->fb would be used as clone mode.
- */
- if (!helper->fb) {
- ret = exynos_drm_fbdev_create(helper, sizes);
- if (ret < 0) {
- DRM_ERROR("failed to create fbdev.\n");
- return ret;
- }
-
- /*
- * fb_helper expects a value more than 1 if succeed
- * because register_framebuffer() should be called.
- */
- ret = 1;
- }
-
- return ret;
-}
-
static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
- .fb_probe = exynos_drm_fbdev_probe,
+ .fb_probe = exynos_drm_fbdev_create,
};
int exynos_drm_fbdev_init(struct drm_device *dev)
@@ -295,6 +267,9 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
}
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_ERROR("failed to set up hw configuration.\n");
@@ -326,8 +301,10 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
- if (fb)
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
+ }
}
/* release linux framebuffer */
@@ -374,5 +351,7 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
if (!private || !private->fb_helper)
return;
+ drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+ drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fb2f81b8063d..3b0da0378acf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/dma-attrs.h>
+#include <linux/of.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -429,7 +430,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->pages = pages;
- sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
@@ -1239,6 +1240,14 @@ static int g2d_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_g2d_match[] = {
+ { .compatible = "samsung,exynos5250-g2d" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
+#endif
+
struct platform_driver g2d_driver = {
.probe = g2d_probe,
.remove = g2d_remove,
@@ -1246,5 +1255,6 @@ struct platform_driver g2d_driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
.pm = &g2d_pm_ops,
+ .of_match_table = of_match_ptr(exynos_g2d_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 473180776528..67e17ce112b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -329,17 +329,11 @@ static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
{
struct drm_file *file_priv;
- mutex_lock(&drm_dev->struct_mutex);
-
/* find current process's drm_file from filelist. */
- list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
- if (file_priv->filp == filp) {
- mutex_unlock(&drm_dev->struct_mutex);
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
+ if (file_priv->filp == filp)
return file_priv;
- }
- }
- mutex_unlock(&drm_dev->struct_mutex);
WARN_ON(1);
return ERR_PTR(-EFAULT);
@@ -400,9 +394,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
*/
drm_gem_object_reference(obj);
- mutex_lock(&drm_dev->struct_mutex);
drm_vm_open_locked(drm_dev, vma);
- mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -432,6 +424,16 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
}
/*
+ * We have to use gem object and its fops for specific mmaper,
+ * but vm_mmap() can deliver only filp. So we have to change
+ * filp->f_op and filp->private_data temporarily, then restore
+ * again. So it is important to keep lock until restoration the
+ * settings to prevent others from misuse of filp->f_op or
+ * filp->private_data.
+ */
+ mutex_lock(&dev->struct_mutex);
+
+ /*
* Set specific mmper's fops. And it will be restored by
* exynos_drm_gem_mmap_buffer to dev->driver->fops.
* This is used to call specific mapper temporarily.
@@ -448,13 +450,20 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
if (IS_ERR((void *)addr)) {
- file_priv->filp->private_data = file_priv;
+ /* check filp->f_op, filp->private_data are restored */
+ if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
+ file_priv->filp->f_op = fops_get(dev->driver->fops);
+ file_priv->filp->private_data = file_priv;
+ }
+ mutex_unlock(&dev->struct_mutex);
return PTR_ERR((void *)addr);
}
+ mutex_unlock(&dev->struct_mutex);
+
args->mapped = addr;
DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 28644539b305..7c27df03c9ff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -124,9 +124,21 @@ static struct edid *drm_hdmi_get_edid(struct device *dev,
static int drm_hdmi_check_timing(struct device *dev, void *timing)
{
struct drm_hdmi_context *ctx = to_context(dev);
+ int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /*
+ * Both, mixer and hdmi should be able to handle the requested mode.
+ * If any of the two fails, return mode as BAD.
+ */
+
+ if (mixer_ops && mixer_ops->check_timing)
+ ret = mixer_ops->check_timing(ctx->mixer_ctx->ctx, timing);
+
+ if (ret)
+ return ret;
+
if (hdmi_ops && hdmi_ops->check_timing)
return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index d80516fc9ed7..b7faa3662307 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -32,7 +32,7 @@ struct exynos_hdmi_ops {
bool (*is_connected)(void *ctx);
struct edid *(*get_edid)(void *ctx,
struct drm_connector *connector);
- int (*check_timing)(void *ctx, void *timing);
+ int (*check_timing)(void *ctx, struct fb_videomode *timing);
int (*power_on)(void *ctx, int mode);
/* manager */
@@ -58,6 +58,9 @@ struct exynos_mixer_ops {
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
+
+ /* display */
+ int (*check_timing)(void *ctx, struct fb_videomode *timing);
};
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 53b7deea8ab7..598e60f57d4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -14,7 +14,7 @@
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-#define EXYNOS_DEV_ADDR_ORDER 0x4
+#define EXYNOS_DEV_ADDR_ORDER 0x0
#ifdef CONFIG_DRM_EXYNOS_IOMMU
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 1a556354e92f..1adce07ecb5b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -137,21 +137,15 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
DRM_DEBUG_KMS("%s\n", __func__);
-again:
- /* ensure there is space available to allocate a handle */
- if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("failed to get idr.\n");
- return -ENOMEM;
- }
-
/* do the allocation under our mutexlock */
mutex_lock(lock);
- ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
mutex_unlock(lock);
- if (ret == -EAGAIN)
- goto again;
+ if (ret < 0)
+ return ret;
- return ret;
+ *idp = ret;
+ return 0;
}
static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -1786,8 +1780,6 @@ err_iommu:
drm_iommu_detach_device(drm_dev, ippdrv->dev);
err_idr:
- idr_remove_all(&ctx->ipp_idr);
- idr_remove_all(&ctx->prop_idr);
idr_destroy(&ctx->ipp_idr);
idr_destroy(&ctx->prop_idr);
return ret;
@@ -1965,8 +1957,6 @@ static int ipp_remove(struct platform_device *pdev)
exynos_drm_subdrv_unregister(&ctx->subdrv);
/* remove,destroy ipp idr */
- idr_remove_all(&ctx->ipp_idr);
- idr_remove_all(&ctx->prop_idr);
idr_destroy(&ctx->ipp_idr);
idr_destroy(&ctx->prop_idr);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 233247505ff8..2c5f266154ad 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -87,6 +87,73 @@ struct hdmi_resources {
int regul_count;
};
+struct hdmi_tg_regs {
+ u8 cmd[1];
+ u8 h_fsz[2];
+ u8 hact_st[2];
+ u8 hact_sz[2];
+ u8 v_fsz[2];
+ u8 vsync[2];
+ u8 vsync2[2];
+ u8 vact_st[2];
+ u8 vact_sz[2];
+ u8 field_chg[2];
+ u8 vact_st2[2];
+ u8 vact_st3[2];
+ u8 vact_st4[2];
+ u8 vsync_top_hdmi[2];
+ u8 vsync_bot_hdmi[2];
+ u8 field_top_hdmi[2];
+ u8 field_bot_hdmi[2];
+ u8 tg_3d[1];
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v2_blank[2];
+ u8 v1_blank[2];
+ u8 v_line[2];
+ u8 h_line[2];
+ u8 hsync_pol[1];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f0[2];
+ u8 v_blank_f1[2];
+ u8 h_sync_start[2];
+ u8 h_sync_end[2];
+ u8 v_sync_line_bef_2[2];
+ u8 v_sync_line_bef_1[2];
+ u8 v_sync_line_aft_2[2];
+ u8 v_sync_line_aft_1[2];
+ u8 v_sync_line_aft_pxl_2[2];
+ u8 v_sync_line_aft_pxl_1[2];
+ u8 v_blank_f2[2]; /* for 3D mode */
+ u8 v_blank_f3[2]; /* for 3D mode */
+ u8 v_blank_f4[2]; /* for 3D mode */
+ u8 v_blank_f5[2]; /* for 3D mode */
+ u8 v_sync_line_aft_3[2];
+ u8 v_sync_line_aft_4[2];
+ u8 v_sync_line_aft_5[2];
+ u8 v_sync_line_aft_6[2];
+ u8 v_sync_line_aft_pxl_3[2];
+ u8 v_sync_line_aft_pxl_4[2];
+ u8 v_sync_line_aft_pxl_5[2];
+ u8 v_sync_line_aft_pxl_6[2];
+ u8 vact_space_1[2];
+ u8 vact_space_2[2];
+ u8 vact_space_3[2];
+ u8 vact_space_4[2];
+ u8 vact_space_5[2];
+ u8 vact_space_6[2];
+};
+
+struct hdmi_v14_conf {
+ int pixel_clock;
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+ int cea_video_id;
+};
+
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
@@ -104,6 +171,7 @@ struct hdmi_context {
/* current hdmiphy conf index */
int cur_conf;
+ struct hdmi_v14_conf mode_conf;
struct hdmi_resources res;
@@ -392,586 +460,132 @@ static const struct hdmi_v13_conf hdmi_v13_confs[] = {
};
/* HDMI Version 1.4 */
-static const u8 hdmiphy_conf27_027[32] = {
- 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
- 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
- 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf74_176[32] = {
- 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
- 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf74_25[32] = {
- 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
- 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
-};
-
-static const u8 hdmiphy_conf148_5[32] = {
- 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
- 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
-};
-
-struct hdmi_tg_regs {
- u8 cmd;
- u8 h_fsz_l;
- u8 h_fsz_h;
- u8 hact_st_l;
- u8 hact_st_h;
- u8 hact_sz_l;
- u8 hact_sz_h;
- u8 v_fsz_l;
- u8 v_fsz_h;
- u8 vsync_l;
- u8 vsync_h;
- u8 vsync2_l;
- u8 vsync2_h;
- u8 vact_st_l;
- u8 vact_st_h;
- u8 vact_sz_l;
- u8 vact_sz_h;
- u8 field_chg_l;
- u8 field_chg_h;
- u8 vact_st2_l;
- u8 vact_st2_h;
- u8 vact_st3_l;
- u8 vact_st3_h;
- u8 vact_st4_l;
- u8 vact_st4_h;
- u8 vsync_top_hdmi_l;
- u8 vsync_top_hdmi_h;
- u8 vsync_bot_hdmi_l;
- u8 vsync_bot_hdmi_h;
- u8 field_top_hdmi_l;
- u8 field_top_hdmi_h;
- u8 field_bot_hdmi_l;
- u8 field_bot_hdmi_h;
- u8 tg_3d;
-};
-
-struct hdmi_core_regs {
- u8 h_blank[2];
- u8 v2_blank[2];
- u8 v1_blank[2];
- u8 v_line[2];
- u8 h_line[2];
- u8 hsync_pol[1];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f0[2];
- u8 v_blank_f1[2];
- u8 h_sync_start[2];
- u8 h_sync_end[2];
- u8 v_sync_line_bef_2[2];
- u8 v_sync_line_bef_1[2];
- u8 v_sync_line_aft_2[2];
- u8 v_sync_line_aft_1[2];
- u8 v_sync_line_aft_pxl_2[2];
- u8 v_sync_line_aft_pxl_1[2];
- u8 v_blank_f2[2]; /* for 3D mode */
- u8 v_blank_f3[2]; /* for 3D mode */
- u8 v_blank_f4[2]; /* for 3D mode */
- u8 v_blank_f5[2]; /* for 3D mode */
- u8 v_sync_line_aft_3[2];
- u8 v_sync_line_aft_4[2];
- u8 v_sync_line_aft_5[2];
- u8 v_sync_line_aft_6[2];
- u8 v_sync_line_aft_pxl_3[2];
- u8 v_sync_line_aft_pxl_4[2];
- u8 v_sync_line_aft_pxl_5[2];
- u8 v_sync_line_aft_pxl_6[2];
- u8 vact_space_1[2];
- u8 vact_space_2[2];
- u8 vact_space_3[2];
- u8 vact_space_4[2];
- u8 vact_space_5[2];
- u8 vact_space_6[2];
-};
-
-struct hdmi_preset_conf {
- struct hdmi_core_regs core;
- struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf {
- int width;
- int height;
- int vrefresh;
- bool interlace;
- int cea_video_id;
- const u8 *hdmiphy_data;
- const struct hdmi_preset_conf *conf;
-};
-
-static const struct hdmi_preset_conf hdmi_conf_480p60 = {
- .core = {
- .h_blank = {0x8a, 0x00},
- .v2_blank = {0x0d, 0x02},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x0d, 0x02},
- .h_line = {0x5a, 0x03},
- .hsync_pol = {0x01},
- .vsync_pol = {0x01},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x0e, 0x00},
- .h_sync_end = {0x4c, 0x00},
- .v_sync_line_bef_2 = {0x0f, 0x00},
- .v_sync_line_bef_1 = {0x09, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x5a, 0x03, /* h_fsz */
- 0x8a, 0x00, 0xd0, 0x02, /* hact */
- 0x0d, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0xe0, 0x01, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
- },
+struct hdmiphy_config {
+ int pixel_clock;
+ u8 conf[32];
};
-static const struct hdmi_preset_conf hdmi_conf_720p50 = {
- .core = {
- .h_blank = {0xbc, 0x02},
- .v2_blank = {0xee, 0x02},
- .v1_blank = {0x1e, 0x00},
- .v_line = {0xee, 0x02},
- .h_line = {0xbc, 0x07},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0xb6, 0x01},
- .h_sync_end = {0xde, 0x01},
- .v_sync_line_bef_2 = {0x0a, 0x00},
- .v_sync_line_bef_1 = {0x05, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0xbc, 0x07, /* h_fsz */
- 0xbc, 0x02, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+/* list of all required phy config settings */
+static const struct hdmiphy_config hdmiphy_v14_configs[] = {
+ {
+ .pixel_clock = 25200000,
+ .conf = {
+ 0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_720p60 = {
- .core = {
- .h_blank = {0x72, 0x01},
- .v2_blank = {0xee, 0x02},
- .v1_blank = {0x1e, 0x00},
- .v_line = {0xee, 0x02},
- .h_line = {0x72, 0x06},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x6c, 0x00},
- .h_sync_end = {0x94, 0x00},
- .v_sync_line_bef_2 = {0x0a, 0x00},
- .v_sync_line_bef_1 = {0x05, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20,
+ 0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x72, 0x06, /* h_fsz */
- 0x72, 0x01, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v2_blank = {0x32, 0x02},
- .v1_blank = {0x16, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x50, 0x0a},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f0 = {0x49, 0x02},
- .v_blank_f1 = {0x65, 0x04},
- .h_sync_start = {0x0e, 0x02},
- .h_sync_end = {0x3a, 0x02},
- .v_sync_line_bef_2 = {0x07, 0x00},
- .v_sync_line_bef_1 = {0x02, 0x00},
- .v_sync_line_aft_2 = {0x39, 0x02},
- .v_sync_line_aft_1 = {0x34, 0x02},
- .v_sync_line_aft_pxl_2 = {0x38, 0x07},
- .v_sync_line_aft_pxl_1 = {0x38, 0x07},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 36000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0a, /* h_fsz */
- 0xd0, 0x02, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 40000000,
+ .conf = {
+ 0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x32, 0x02},
- .v1_blank = {0x16, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x01},
- .v_blank_f0 = {0x49, 0x02},
- .v_blank_f1 = {0x65, 0x04},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x07, 0x00},
- .v_sync_line_bef_1 = {0x02, 0x00},
- .v_sync_line_aft_2 = {0x39, 0x02},
- .v_sync_line_aft_1 = {0x34, 0x02},
- .v_sync_line_aft_pxl_2 = {0xa4, 0x04},
- .v_sync_line_aft_pxl_1 = {0xa4, 0x04},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 65000000,
+ .conf = {
+ 0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08,
+ 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x16, 0x00, 0x1c, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
+ 0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 83500000,
+ .conf = {
+ 0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08,
+ 0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x50, 0x0a},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x0e, 0x02},
- .h_sync_end = {0x3a, 0x02},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- .vact_space_1 = {0xff, 0xff},
- .vact_space_2 = {0xff, 0xff},
- .vact_space_3 = {0xff, 0xff},
- .vact_space_4 = {0xff, 0xff},
- .vact_space_5 = {0xff, 0xff},
- .vact_space_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 106500000,
+ .conf = {
+ 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
+ 0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x50, 0x0a, /* h_fsz */
- 0xd0, 0x02, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 108000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
},
-};
-
-static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v2_blank = {0x65, 0x04},
- .v1_blank = {0x2d, 0x00},
- .v_line = {0x65, 0x04},
- .h_line = {0x98, 0x08},
- .hsync_pol = {0x00},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f0 = {0xff, 0xff},
- .v_blank_f1 = {0xff, 0xff},
- .h_sync_start = {0x56, 0x00},
- .h_sync_end = {0x82, 0x00},
- .v_sync_line_bef_2 = {0x09, 0x00},
- .v_sync_line_bef_1 = {0x04, 0x00},
- .v_sync_line_aft_2 = {0xff, 0xff},
- .v_sync_line_aft_1 = {0xff, 0xff},
- .v_sync_line_aft_pxl_2 = {0xff, 0xff},
- .v_sync_line_aft_pxl_1 = {0xff, 0xff},
- .v_blank_f2 = {0xff, 0xff},
- .v_blank_f3 = {0xff, 0xff},
- .v_blank_f4 = {0xff, 0xff},
- .v_blank_f5 = {0xff, 0xff},
- .v_sync_line_aft_3 = {0xff, 0xff},
- .v_sync_line_aft_4 = {0xff, 0xff},
- .v_sync_line_aft_5 = {0xff, 0xff},
- .v_sync_line_aft_6 = {0xff, 0xff},
- .v_sync_line_aft_pxl_3 = {0xff, 0xff},
- .v_sync_line_aft_pxl_4 = {0xff, 0xff},
- .v_sync_line_aft_pxl_5 = {0xff, 0xff},
- .v_sync_line_aft_pxl_6 = {0xff, 0xff},
- /* other don't care */
+ {
+ .pixel_clock = 146250000,
+ .conf = {
+ 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
+ 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
},
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x00, 0x00, /* vact_st3 */
- 0x00, 0x00, /* vact_st4 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- 0x00, /* 3d FP */
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
},
};
-static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
-};
-
struct hdmi_infoframe {
enum HDMI_PACKET_TYPE type;
u8 ver;
@@ -1275,31 +889,6 @@ static int hdmi_v13_conf_index(struct drm_display_mode *mode)
return -EINVAL;
}
-static int hdmi_v14_conf_index(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
- if (hdmi_confs[i].width == mode->hdisplay &&
- hdmi_confs[i].height == mode->vdisplay &&
- hdmi_confs[i].vrefresh == mode->vrefresh &&
- hdmi_confs[i].interlace ==
- ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
- true : false))
- return i;
-
- return -EINVAL;
-}
-
-static int hdmi_conf_index(struct hdmi_context *hdata,
- struct drm_display_mode *mode)
-{
- if (hdata->type == HDMI_TYPE13)
- return hdmi_v13_conf_index(mode);
-
- return hdmi_v14_conf_index(mode);
-}
-
static u8 hdmi_chksum(struct hdmi_context *hdata,
u32 start, u8 len, u32 hdr_sum)
{
@@ -1357,7 +946,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
if (hdata->type == HDMI_TYPE13)
vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
else
- vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+ vic = hdata->mode_conf.cea_video_id;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
@@ -1434,44 +1023,51 @@ static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
return -EINVAL;
}
+static int hdmi_v14_find_phy_conf(int pixel_clock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) {
+ if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock)
+ return i;
+ }
+
+ DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+ return -EINVAL;
+}
+
static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
{
int i;
- DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
check_timing->xres, check_timing->yres,
- check_timing->refresh, (check_timing->vmode &
- FB_VMODE_INTERLACED) ? true : false);
+ check_timing->refresh, check_timing->pixclock,
+ (check_timing->vmode & FB_VMODE_INTERLACED) ?
+ true : false);
- for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++)
- if (hdmi_confs[i].width == check_timing->xres &&
- hdmi_confs[i].height == check_timing->yres &&
- hdmi_confs[i].vrefresh == check_timing->refresh &&
- hdmi_confs[i].interlace ==
- ((check_timing->vmode & FB_VMODE_INTERLACED) ?
- true : false))
- return 0;
-
- /* TODO */
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
+ if (hdmiphy_v14_configs[i].pixel_clock ==
+ check_timing->pixclock)
+ return 0;
return -EINVAL;
}
-static int hdmi_check_timing(void *ctx, void *timing)
+static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
{
struct hdmi_context *hdata = ctx;
- struct fb_videomode *check_timing = timing;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
- check_timing->yres, check_timing->refresh,
- check_timing->vmode);
+ DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", timing->xres,
+ timing->yres, timing->refresh,
+ timing->vmode);
if (hdata->type == HDMI_TYPE13)
- return hdmi_v13_check_timing(check_timing);
+ return hdmi_v13_check_timing(timing);
else
- return hdmi_v14_check_timing(check_timing);
+ return hdmi_v14_check_timing(timing);
}
static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1795,9 +1391,8 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
{
- const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf;
- const struct hdmi_core_regs *core = &conf->core;
- const struct hdmi_tg_regs *tg = &conf->tg;
+ struct hdmi_core_regs *core = &hdata->mode_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
int tries;
/* setting core registers */
@@ -1900,39 +1495,39 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
- hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+ hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -2029,10 +1624,17 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
}
/* pixel clock */
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->type == HDMI_TYPE13) {
hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
- else
- hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data;
+ } else {
+ i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock);
+ if (i < 0) {
+ DRM_ERROR("failed to find hdmiphy conf\n");
+ return;
+ }
+
+ hdmiphy_data = hdmiphy_v14_configs[i].conf;
+ }
memcpy(buffer, hdmiphy_data, 32);
ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -2100,7 +1702,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(adjusted_mode);
else
- index = hdmi_v14_conf_index(adjusted_mode);
+ index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
/* just return if user desired mode exists. */
if (index >= 0)
@@ -2114,7 +1716,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(m);
else
- index = hdmi_v14_conf_index(m);
+ index = hdmi_v14_find_phy_conf(m->clock * 1000);
if (index >= 0) {
struct drm_mode_object base;
@@ -2123,6 +1725,9 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+ DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+
/* preserve display mode header while copying. */
head = adjusted_mode->head;
base = adjusted_mode->base;
@@ -2134,6 +1739,122 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
}
}
+static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
+{
+ int i;
+ BUG_ON(num_bytes > 4);
+ for (i = 0; i < num_bytes; i++)
+ reg_pair[i] = (value >> (8 * i)) & 0xff;
+}
+
+static void hdmi_v14_mode_set(struct hdmi_context *hdata,
+ struct drm_display_mode *m)
+{
+ struct hdmi_core_regs *core = &hdata->mode_conf.core;
+ struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
+
+ hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
+
+ hdata->mode_conf.pixel_clock = m->clock * 1000;
+ hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(core->v_line, 2, m->vtotal);
+ hdmi_set_reg(core->h_line, 2, m->htotal);
+ hdmi_set_reg(core->hsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
+ hdmi_set_reg(core->vsync_pol, 1,
+ (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+ hdmi_set_reg(core->int_pro_mode, 1,
+ (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+ /*
+ * Quirk requirement for exynos 5 HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ (m->vsync_end - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ (m->vsync_start - m->vdisplay) / 2);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
+ hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(core->v_blank_f0, 2, (m->vtotal +
+ ((m->vsync_end - m->vsync_start) * 4) + 5) / 2);
+ hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+ hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+ hdmi_set_reg(tg->vact_st3, 2, 0x0);
+ hdmi_set_reg(tg->vact_st4, 2, 0x0);
+ } else {
+ /* Progressive Mode */
+ hdmi_set_reg(core->v_sync_line_bef_2, 2,
+ m->vsync_end - m->vdisplay);
+ hdmi_set_reg(core->v_sync_line_bef_1, 2,
+ m->vsync_start - m->vdisplay);
+ hdmi_set_reg(core->v2_blank, 2, m->vtotal);
+ hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
+ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+ hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+ hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
+ hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
+ hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
+ }
+
+ /* Following values & calculations are same irrespective of mode type */
+ hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
+ hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
+ hdmi_set_reg(core->vact_space_1, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_2, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_3, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_4, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_5, 2, 0xffff);
+ hdmi_set_reg(core->vact_space_6, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
+ hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
+ hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
+
+ /* Timing generator registers */
+ hdmi_set_reg(tg->cmd, 1, 0x0);
+ hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+ hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+ hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+ hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+ hdmi_set_reg(tg->vsync, 2, 0x1);
+ hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+ hdmi_set_reg(tg->tg_3d, 1, 0x0);
+
+}
+
static void hdmi_mode_set(void *ctx, void *mode)
{
struct hdmi_context *hdata = ctx;
@@ -2141,11 +1862,15 @@ static void hdmi_mode_set(void *ctx, void *mode)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- conf_idx = hdmi_conf_index(hdata, mode);
- if (conf_idx >= 0)
- hdata->cur_conf = conf_idx;
- else
- DRM_DEBUG_KMS("not supported mode\n");
+ if (hdata->type == HDMI_TYPE13) {
+ conf_idx = hdmi_v13_conf_index(mode);
+ if (conf_idx >= 0)
+ hdata->cur_conf = conf_idx;
+ else
+ DRM_DEBUG_KMS("not supported mode\n");
+ } else {
+ hdmi_v14_mode_set(hdata, mode);
+ }
}
static void hdmi_get_max_resol(void *ctx, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c414584bfbae..e919aba29b3d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -284,13 +284,13 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
MXR_CFG_SCAN_PROGRASSIVE);
/* choosing between porper HD and SD mode */
- if (height == 480)
+ if (height <= 480)
val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
- else if (height == 576)
+ else if (height <= 576)
val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
- else if (height == 720)
+ else if (height <= 720)
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- else if (height == 1080)
+ else if (height <= 1080)
val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
else
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
@@ -818,6 +818,29 @@ static void mixer_win_disable(void *ctx, int win)
mixer_ctx->win_data[win].enabled = false;
}
+int mixer_check_timing(void *ctx, struct fb_videomode *timing)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ u32 w, h;
+
+ w = timing->xres;
+ h = timing->yres;
+
+ DRM_DEBUG_KMS("%s : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ __func__, timing->xres, timing->yres,
+ timing->refresh, (timing->vmode &
+ FB_VMODE_INTERLACED) ? true : false);
+
+ if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
+ return 0;
+
+ if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
+ (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+ (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+ return 0;
+
+ return -EINVAL;
+}
static void mixer_wait_for_vblank(void *ctx)
{
struct mixer_context *mixer_ctx = ctx;
@@ -955,6 +978,9 @@ static struct exynos_mixer_ops mixer_ops = {
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
+
+ /* display */
+ .check_timing = mixer_check_timing,
};
static irqreturn_t mixer_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index afded54dbb10..2590cac84257 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -260,13 +260,13 @@ static int psb_framebuffer_init(struct drm_device *dev,
default:
return -EINVAL;
}
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
- fb->gtt = gt;
return 0;
}
@@ -545,9 +545,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- int new_fb = 0;
int bytespp;
- int ret;
bytespp = sizes->surface_bpp / 8;
if (bytespp == 3) /* no 24bit packed */
@@ -562,13 +560,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
sizes->surface_depth = 16;
}
- if (!helper->fb) {
- ret = psbfb_create(psb_fbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
+ return psbfb_create(psb_fbdev, sizes);
}
static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
@@ -590,6 +582,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
framebuffer_release(info);
}
drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
if (psbfb->gtt)
@@ -615,6 +608,10 @@ int psb_fbdev_init(struct drm_device *dev)
INTELFB_CONN_LIMIT);
drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
return 0;
}
@@ -668,30 +665,6 @@ static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
- struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_fbdev *fbdev = dev_priv->fbdev;
- struct drm_crtc *crtc;
- int reset = 0;
-
- /* Should never get stolen memory for a user fb */
- WARN_ON(r->stolen);
-
- /* Check if we are erroneously live */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- if (crtc->fb == fb)
- reset = 1;
-
- if (reset)
- /*
- * Now force a sane response before we permit the DRM CRTC
- * layer to do stupid things like blank the display. Instead
- * we reset this framebuffer as if the user had forced a reset.
- * We must do this before the cleanup so that the DRM layer
- * doesn't get a chance to stick its oar in where it isn't
- * wanted.
- */
- drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
/* Let DRM do its clean up */
drm_framebuffer_cleanup(fb);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 04a371aceb34..054e26e769ec 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -202,7 +202,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
WARN_ON(gt->pages);
/* This is the shared memory object that backs the GEM resource */
- inode = gt->gem.filp->f_path.dentry->d_inode;
+ inode = file_inode(gt->gem.filp);
mapping = inode->i_mapping;
gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index b58c4701c4e8..f6f534b4197e 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -194,7 +194,7 @@ static int psb_save_display_registers(struct drm_device *dev)
regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Save crtc and output state */
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc))
crtc->funcs->save(crtc);
@@ -204,7 +204,7 @@ static int psb_save_display_registers(struct drm_device *dev)
if (connector->funcs->save)
connector->funcs->save(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
@@ -234,7 +234,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (drm_helper_crtc_in_use(crtc))
crtc->funcs->restore(crtc);
@@ -243,7 +243,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
if (connector->funcs->restore)
connector->funcs->restore(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index dd1fbfa7e467..111e3df9c5de 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -149,6 +149,16 @@ static struct drm_ioctl_desc psb_ioctls[] = {
static void psb_lastclose(struct drm_device *dev)
{
+ int ret;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = dev_priv->fbdev;
+
+ drm_modeset_lock_all(dev);
+ ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+ drm_modeset_unlock_all(dev);
+
return;
}
@@ -476,7 +486,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
case PSB_MODE_OPERATION_MODE_VALID:
umode = &arg->mode;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CONNECTOR);
@@ -525,7 +535,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
if (mode)
drm_mode_destroy(dev, mode);
mode_op_out:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
default:
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 8033526bb53b..9edb1902a096 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -85,14 +85,14 @@ struct psb_intel_limit_t {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
-#define I9XX_N_MIN 3
-#define I9XX_N_MAX 8
+#define I9XX_N_MIN 1
+#define I9XX_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
-#define I9XX_M1_MIN 10
-#define I9XX_M1_MAX 20
-#define I9XX_M2_MIN 5
-#define I9XX_M2_MAX 9
+#define I9XX_M1_MIN 8
+#define I9XX_M1_MAX 18
+#define I9XX_M2_MIN 3
+#define I9XX_M2_MAX 7
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
new file mode 100644
index 000000000000..4d341db462a2
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -0,0 +1,28 @@
+menu "I2C encoder or helper chips"
+ depends on DRM && DRM_KMS_HELPER && I2C
+
+config DRM_I2C_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ default m if DRM_NOUVEAU
+ help
+ Support for Chrontel ch7006 and similar TV encoders, found
+ on some nVidia video cards.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+
+config DRM_I2C_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ default m if DRM_NOUVEAU
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters, used in some nVidia
+ video cards.
+
+config DRM_I2C_NXP_TDA998X
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
+
+endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 92862563e7ee..43aa33baebed 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -5,3 +5,6 @@ obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
sil164-y := sil164_drv.o
obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b865d0728e28..51fa32392029 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -364,7 +364,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
new file mode 100644
index 000000000000..e68b58a1aaf9
--- /dev/null
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_edid.h>
+
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct tda998x_priv {
+ struct i2c_client *cec;
+ uint16_t rev;
+ uint8_t current_page;
+ int dpms;
+};
+
+#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
+
+/* The TDA9988 series of devices use a paged register scheme.. to simplify
+ * things we encode the page # in upper bits of the register #. To read/
+ * write a given register, we need to make sure CURPAGE register is set
+ * appropriately. Which implies reads/writes are not atomic. Fun!
+ */
+
+#define REG(page, addr) (((page) << 8) | (addr))
+#define REG2ADDR(reg) ((reg) & 0xff)
+#define REG2PAGE(reg) (((reg) >> 8) & 0xff)
+
+#define REG_CURPAGE 0xff /* write */
+
+
+/* Page 00h: General Control */
+#define REG_VERSION_LSB REG(0x00, 0x00) /* read */
+#define REG_MAIN_CNTRL0 REG(0x00, 0x01) /* read/write */
+# define MAIN_CNTRL0_SR (1 << 0)
+# define MAIN_CNTRL0_DECS (1 << 1)
+# define MAIN_CNTRL0_DEHS (1 << 2)
+# define MAIN_CNTRL0_CECS (1 << 3)
+# define MAIN_CNTRL0_CEHS (1 << 4)
+# define MAIN_CNTRL0_SCALER (1 << 7)
+#define REG_VERSION_MSB REG(0x00, 0x02) /* read */
+#define REG_SOFTRESET REG(0x00, 0x0a) /* write */
+# define SOFTRESET_AUDIO (1 << 0)
+# define SOFTRESET_I2C_MASTER (1 << 1)
+#define REG_DDC_DISABLE REG(0x00, 0x0b) /* read/write */
+#define REG_CCLK_ON REG(0x00, 0x0c) /* read/write */
+#define REG_I2C_MASTER REG(0x00, 0x0d) /* read/write */
+# define I2C_MASTER_DIS_MM (1 << 0)
+# define I2C_MASTER_DIS_FILT (1 << 1)
+# define I2C_MASTER_APP_STRT_LAT (1 << 2)
+#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
+#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
+#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
+# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
+#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
+#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
+#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
+#define REG_ENA_AP REG(0x00, 0x1e) /* read/write */
+#define REG_VIP_CNTRL_0 REG(0x00, 0x20) /* write */
+# define VIP_CNTRL_0_MIRR_A (1 << 7)
+# define VIP_CNTRL_0_SWAP_A(x) (((x) & 7) << 4)
+# define VIP_CNTRL_0_MIRR_B (1 << 3)
+# define VIP_CNTRL_0_SWAP_B(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_1 REG(0x00, 0x21) /* write */
+# define VIP_CNTRL_1_MIRR_C (1 << 7)
+# define VIP_CNTRL_1_SWAP_C(x) (((x) & 7) << 4)
+# define VIP_CNTRL_1_MIRR_D (1 << 3)
+# define VIP_CNTRL_1_SWAP_D(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_2 REG(0x00, 0x22) /* write */
+# define VIP_CNTRL_2_MIRR_E (1 << 7)
+# define VIP_CNTRL_2_SWAP_E(x) (((x) & 7) << 4)
+# define VIP_CNTRL_2_MIRR_F (1 << 3)
+# define VIP_CNTRL_2_SWAP_F(x) (((x) & 7) << 0)
+#define REG_VIP_CNTRL_3 REG(0x00, 0x23) /* write */
+# define VIP_CNTRL_3_X_TGL (1 << 0)
+# define VIP_CNTRL_3_H_TGL (1 << 1)
+# define VIP_CNTRL_3_V_TGL (1 << 2)
+# define VIP_CNTRL_3_EMB (1 << 3)
+# define VIP_CNTRL_3_SYNC_DE (1 << 4)
+# define VIP_CNTRL_3_SYNC_HS (1 << 5)
+# define VIP_CNTRL_3_DE_INT (1 << 6)
+# define VIP_CNTRL_3_EDGE (1 << 7)
+#define REG_VIP_CNTRL_4 REG(0x00, 0x24) /* write */
+# define VIP_CNTRL_4_BLC(x) (((x) & 3) << 0)
+# define VIP_CNTRL_4_BLANKIT(x) (((x) & 3) << 2)
+# define VIP_CNTRL_4_CCIR656 (1 << 4)
+# define VIP_CNTRL_4_656_ALT (1 << 5)
+# define VIP_CNTRL_4_TST_656 (1 << 6)
+# define VIP_CNTRL_4_TST_PAT (1 << 7)
+#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
+# define VIP_CNTRL_5_CKCASE (1 << 0)
+# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
+#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
+# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
+# define MAT_CONTRL_MAT_BP (1 << 2)
+#define REG_VIDFORMAT REG(0x00, 0xa0) /* write */
+#define REG_REFPIX_MSB REG(0x00, 0xa1) /* write */
+#define REG_REFPIX_LSB REG(0x00, 0xa2) /* write */
+#define REG_REFLINE_MSB REG(0x00, 0xa3) /* write */
+#define REG_REFLINE_LSB REG(0x00, 0xa4) /* write */
+#define REG_NPIX_MSB REG(0x00, 0xa5) /* write */
+#define REG_NPIX_LSB REG(0x00, 0xa6) /* write */
+#define REG_NLINE_MSB REG(0x00, 0xa7) /* write */
+#define REG_NLINE_LSB REG(0x00, 0xa8) /* write */
+#define REG_VS_LINE_STRT_1_MSB REG(0x00, 0xa9) /* write */
+#define REG_VS_LINE_STRT_1_LSB REG(0x00, 0xaa) /* write */
+#define REG_VS_PIX_STRT_1_MSB REG(0x00, 0xab) /* write */
+#define REG_VS_PIX_STRT_1_LSB REG(0x00, 0xac) /* write */
+#define REG_VS_LINE_END_1_MSB REG(0x00, 0xad) /* write */
+#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
+#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
+#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
+#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
+#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
+#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
+#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
+#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
+#define REG_HS_PIX_START_LSB REG(0x00, 0xba) /* write */
+#define REG_HS_PIX_STOP_MSB REG(0x00, 0xbb) /* write */
+#define REG_HS_PIX_STOP_LSB REG(0x00, 0xbc) /* write */
+#define REG_VWIN_START_1_MSB REG(0x00, 0xbd) /* write */
+#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
+#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
+#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
+#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
+#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
+#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
+#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
+#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
+# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
+# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
+# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
+#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
+# define TBG_CNTRL_1_VH_TGL_0 (1 << 0)
+# define TBG_CNTRL_1_VH_TGL_1 (1 << 1)
+# define TBG_CNTRL_1_VH_TGL_2 (1 << 2)
+# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3)
+# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4)
+# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5)
+# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
+#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
+#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
+# define HVF_CNTRL_0_SM (1 << 7)
+# define HVF_CNTRL_0_RWB (1 << 6)
+# define HVF_CNTRL_0_PREFIL(x) (((x) & 3) << 2)
+# define HVF_CNTRL_0_INTPOL(x) (((x) & 3) << 0)
+#define REG_HVF_CNTRL_1 REG(0x00, 0xe5) /* write */
+# define HVF_CNTRL_1_FOR (1 << 0)
+# define HVF_CNTRL_1_YUVBLK (1 << 1)
+# define HVF_CNTRL_1_VQR(x) (((x) & 3) << 2)
+# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
+# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
+#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
+
+
+/* Page 02h: PLL settings */
+#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
+# define PLL_SERIAL_1_SRL_FDN (1 << 0)
+# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
+# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
+#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
+# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
+#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
+# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
+# define PLL_SERIAL_3_SRL_DE (1 << 2)
+# define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4)
+#define REG_SERIALIZER REG(0x02, 0x03) /* read/write */
+#define REG_BUFFER_OUT REG(0x02, 0x04) /* read/write */
+#define REG_PLL_SCG1 REG(0x02, 0x05) /* read/write */
+#define REG_PLL_SCG2 REG(0x02, 0x06) /* read/write */
+#define REG_PLL_SCGN1 REG(0x02, 0x07) /* read/write */
+#define REG_PLL_SCGN2 REG(0x02, 0x08) /* read/write */
+#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
+#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
+#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
+#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
+# define SEL_CLK_SEL_CLK1 (1 << 0)
+# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
+# define SEL_CLK_ENA_SC_CLK (1 << 3)
+#define REG_ANA_GENERAL REG(0x02, 0x12) /* read/write */
+
+
+/* Page 09h: EDID Control */
+#define REG_EDID_DATA_0 REG(0x09, 0x00) /* read */
+/* next 127 successive registers are the EDID block */
+#define REG_EDID_CTRL REG(0x09, 0xfa) /* read/write */
+#define REG_DDC_ADDR REG(0x09, 0xfb) /* read/write */
+#define REG_DDC_OFFS REG(0x09, 0xfc) /* read/write */
+#define REG_DDC_SEGM_ADDR REG(0x09, 0xfd) /* read/write */
+#define REG_DDC_SEGM REG(0x09, 0xfe) /* read/write */
+
+
+/* Page 10h: information frames and packets */
+
+
+/* Page 11h: audio settings and content info packets */
+#define REG_AIP_CNTRL_0 REG(0x11, 0x00) /* read/write */
+# define AIP_CNTRL_0_RST_FIFO (1 << 0)
+# define AIP_CNTRL_0_SWAP (1 << 1)
+# define AIP_CNTRL_0_LAYOUT (1 << 2)
+# define AIP_CNTRL_0_ACR_MAN (1 << 5)
+# define AIP_CNTRL_0_RST_CTS (1 << 6)
+#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
+# define ENC_CNTRL_RST_ENC (1 << 0)
+# define ENC_CNTRL_RST_SEL (1 << 1)
+# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
+
+
+/* Page 12h: HDCP and OTP */
+#define REG_TX3 REG(0x12, 0x9a) /* read/write */
+#define REG_TX33 REG(0x12, 0xb8) /* read/write */
+# define TX33_HDMI (1 << 1)
+
+
+/* Page 13h: Gamut related metadata packets */
+
+
+
+/* CEC registers: (not paged)
+ */
+#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
+# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
+# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
+# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
+# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
+#define REG_CEC_RXSHPDLEV 0xfe /* read */
+# define CEC_RXSHPDLEV_RXSENS (1 << 0)
+# define CEC_RXSHPDLEV_HPD (1 << 1)
+
+#define REG_CEC_ENAMODS 0xff /* read/write */
+# define CEC_ENAMODS_DIS_FRO (1 << 6)
+# define CEC_ENAMODS_DIS_CCLK (1 << 5)
+# define CEC_ENAMODS_EN_RXSENS (1 << 2)
+# define CEC_ENAMODS_EN_HDMI (1 << 1)
+# define CEC_ENAMODS_EN_CEC (1 << 0)
+
+
+/* Device versions: */
+#define TDA9989N2 0x0101
+#define TDA19989 0x0201
+#define TDA19989N2 0x0202
+#define TDA19988 0x0301
+
+static void
+cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val)
+{
+ struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+}
+
+static uint8_t
+cec_read(struct drm_encoder *encoder, uint8_t addr)
+{
+ struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
+ return 0;
+}
+
+static void
+set_page(struct drm_encoder *encoder, uint16_t reg)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ if (REG2PAGE(reg) != priv->current_page) {
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {
+ REG_CURPAGE, REG2PAGE(reg)
+ };
+ int ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret);
+
+ priv->current_page = REG2PAGE(reg);
+ }
+}
+
+static int
+reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t addr = REG2ADDR(reg);
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, buf, cnt);
+ if (ret < 0)
+ goto fail;
+
+ return ret;
+
+fail:
+ dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+ return ret;
+}
+
+static uint8_t
+reg_read(struct drm_encoder *encoder, uint16_t reg)
+{
+ uint8_t val = 0;
+ reg_read_range(encoder, reg, &val, sizeof(val));
+ return val;
+}
+
+static void
+reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {REG2ADDR(reg), val};
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
+ int ret;
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ reg_write(encoder, reg, reg_read(encoder, reg) | val);
+}
+
+static void
+reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+ reg_write(encoder, reg, reg_read(encoder, reg) & ~val);
+}
+
+static void
+tda998x_reset(struct drm_encoder *encoder)
+{
+ /* reset audio and i2c master: */
+ reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ msleep(50);
+ reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+ msleep(50);
+
+ /* reset transmitter: */
+ reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+ reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+
+ /* PLL registers common configuration */
+ reg_write(encoder, REG_PLL_SERIAL_1, 0x00);
+ reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
+ reg_write(encoder, REG_PLL_SERIAL_3, 0x00);
+ reg_write(encoder, REG_SERIALIZER, 0x00);
+ reg_write(encoder, REG_BUFFER_OUT, 0x00);
+ reg_write(encoder, REG_PLL_SCG1, 0x00);
+ reg_write(encoder, REG_AUDIO_DIV, 0x03);
+ reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+ reg_write(encoder, REG_PLL_SCGN1, 0xfa);
+ reg_write(encoder, REG_PLL_SCGN2, 0x00);
+ reg_write(encoder, REG_PLL_SCGR1, 0x5b);
+ reg_write(encoder, REG_PLL_SCGR2, 0x00);
+ reg_write(encoder, REG_PLL_SCG2, 0x10);
+}
+
+/* DRM encoder functions */
+
+static void
+tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+}
+
+static void
+tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ /* we only care about on or off: */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == priv->dpms)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* enable audio and video ports */
+ reg_write(encoder, REG_ENA_AP, 0xff);
+ reg_write(encoder, REG_ENA_VP_0, 0xff);
+ reg_write(encoder, REG_ENA_VP_1, 0xff);
+ reg_write(encoder, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(encoder, REG_VIP_CNTRL_0,
+ VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
+ reg_write(encoder, REG_VIP_CNTRL_1,
+ VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
+ reg_write(encoder, REG_VIP_CNTRL_2,
+ VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* disable audio and video ports */
+ reg_write(encoder, REG_ENA_AP, 0x00);
+ reg_write(encoder, REG_ENA_VP_0, 0x00);
+ reg_write(encoder, REG_ENA_VP_1, 0x00);
+ reg_write(encoder, REG_ENA_VP_2, 0x00);
+ break;
+ }
+
+ priv->dpms = mode;
+}
+
+static void
+tda998x_encoder_save(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static void
+tda998x_encoder_restore(struct drm_encoder *encoder)
+{
+ DBG("");
+}
+
+static bool
+tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+tda998x_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void
+tda998x_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ uint16_t hs_start, hs_end, line_start, line_end;
+ uint16_t vwin_start, vwin_end, de_start, de_end;
+ uint16_t ref_pix, ref_line, pix_start2;
+ uint8_t reg, div, rep;
+
+ hs_start = mode->hsync_start - mode->hdisplay;
+ hs_end = mode->hsync_end - mode->hdisplay;
+ line_start = 1;
+ line_end = 1 + mode->vsync_end - mode->vsync_start;
+ vwin_start = mode->vtotal - mode->vsync_start;
+ vwin_end = vwin_start + mode->vdisplay;
+ de_start = mode->htotal - mode->hdisplay;
+ de_end = mode->htotal;
+
+ pix_start2 = 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ pix_start2 = (mode->htotal / 2) + hs_start;
+
+ /* TODO how is this value calculated? It is 2 for all common
+ * formats in the tables in out of tree nxp driver (assuming
+ * I've properly deciphered their byzantine table system)
+ */
+ ref_line = 2;
+
+ /* this might changes for other color formats from the CRTC: */
+ ref_pix = 3 + hs_start;
+
+ div = 148500 / mode->clock;
+
+ DBG("clock=%d, div=%u", mode->clock, div);
+ DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
+ hs_start, hs_end, line_start, line_end);
+ DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
+ vwin_start, vwin_end, de_start, de_end);
+ DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
+ ref_line, ref_pix, pix_start2);
+
+ /* mute the audio FIFO: */
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+
+ /* set HDMI HDCP mode off: */
+ reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_clear(encoder, REG_TX33, TX33_HDMI);
+
+ reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
+ /* no pre-filter or interpolator: */
+ reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
+ HVF_CNTRL_0_INTPOL(0));
+ reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
+ reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
+ VIP_CNTRL_4_BLC(0));
+ reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR);
+
+ reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
+ reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE);
+ reg_write(encoder, REG_SERIALIZER, 0);
+ reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
+
+ /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
+ rep = 0;
+ reg_write(encoder, REG_RPT_CNTRL, 0);
+ reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
+ SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+
+ reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
+ PLL_SERIAL_2_SRL_PR(rep));
+
+ reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
+ reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
+
+ /* set color matrix bypass flag: */
+ reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
+
+ /* set BIAS tmds value: */
+ reg_write(encoder, REG_ANA_GENERAL, 0x09);
+
+ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+
+ reg_write(encoder, REG_VIP_CNTRL_3, 0);
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+
+ reg_write(encoder, REG_VIDFORMAT, 0x00);
+ reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
+ reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
+ reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
+ reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
+ reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
+ reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
+ reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
+ reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
+ reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
+ reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
+ reg_write16(encoder, REG_DE_START_MSB, de_start);
+ reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+
+ if (priv->rev == TDA19988) {
+ /* let incoming pixels fill the active space (if any) */
+ reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ }
+
+ reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+ reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+
+ reg = TBG_CNTRL_1_VHX_EXT_DE |
+ TBG_CNTRL_1_VHX_EXT_HS |
+ TBG_CNTRL_1_VHX_EXT_VS |
+ TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
+ TBG_CNTRL_1_VH_TGL_2;
+ if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
+ reg |= TBG_CNTRL_1_VH_TGL_0;
+ reg_set(encoder, REG_TBG_CNTRL_1, reg);
+
+ /* must be last register set: */
+ reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+}
+
+static enum drm_connector_status
+tda998x_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV);
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int
+read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+{
+ uint8_t offset, segptr;
+ int ret, i;
+
+ /* enable EDID read irq: */
+ reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(encoder, REG_DDC_ADDR, 0xa0);
+ reg_write(encoder, REG_DDC_OFFS, offset);
+ reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(encoder, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ reg_write(encoder, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(encoder, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ for (i = 100; i > 0; i--) {
+ uint8_t val = reg_read(encoder, REG_INT_FLAGS_2);
+ if (val & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ msleep(1);
+ }
+
+ if (i == 0)
+ return -ETIMEDOUT;
+
+ ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH);
+ if (ret != EDID_LENGTH) {
+ dev_err(encoder->dev->dev, "failed to read edid block %d: %d",
+ blk, ret);
+ return ret;
+ }
+
+ reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ return 0;
+}
+
+static uint8_t *
+do_get_edid(struct drm_encoder *encoder)
+{
+ int j = 0, valid_extensions = 0;
+ uint8_t *block, *new;
+ bool print_bad_edid = drm_debug & DRM_UT_KMS;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ if (read_edid_block(encoder, block, 0))
+ goto fail;
+
+ if (!drm_edid_block_valid(block, 0, print_bad_edid))
+ goto fail;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
+ if (read_edid_block(encoder, ext_block, j))
+ goto fail;
+
+ if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
+ goto fail;
+
+ valid_extensions++;
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto fail;
+ block = new;
+ }
+
+ return block;
+
+fail:
+ dev_warn(encoder->dev->dev, "failed to read EDID\n");
+ kfree(block);
+ return NULL;
+}
+
+static int
+tda998x_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct edid *edid = (struct edid *)do_get_edid(encoder);
+ int n = 0;
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return n;
+}
+
+static int
+tda998x_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ DBG("");
+ return 0;
+}
+
+static int
+tda998x_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DBG("");
+ return 0;
+}
+
+static void
+tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ drm_i2c_encoder_destroy(encoder);
+ kfree(priv);
+}
+
+static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
+ .set_config = tda998x_encoder_set_config,
+ .destroy = tda998x_encoder_destroy,
+ .dpms = tda998x_encoder_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .mode_valid = tda998x_encoder_mode_valid,
+ .mode_set = tda998x_encoder_mode_set,
+ .detect = tda998x_encoder_detect,
+ .get_modes = tda998x_encoder_get_modes,
+ .create_resources = tda998x_encoder_create_resources,
+ .set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ return 0;
+}
+
+static int
+tda998x_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int
+tda998x_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder_slave)
+{
+ struct drm_encoder *encoder = &encoder_slave->base;
+ struct tda998x_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->current_page = 0;
+ priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ priv->dpms = DRM_MODE_DPMS_OFF;
+
+ encoder_slave->slave_priv = priv;
+ encoder_slave->slave_funcs = &tda998x_encoder_funcs;
+
+ /* wake up the device: */
+ cec_write(encoder, REG_CEC_ENAMODS,
+ CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
+
+ tda998x_reset(encoder);
+
+ /* read version: */
+ priv->rev = reg_read(encoder, REG_VERSION_LSB) |
+ reg_read(encoder, REG_VERSION_MSB) << 8;
+
+ /* mask off feature bits: */
+ priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
+
+ switch (priv->rev) {
+ case TDA9989N2: dev_info(dev->dev, "found TDA9989 n2"); break;
+ case TDA19989: dev_info(dev->dev, "found TDA19989"); break;
+ case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break;
+ case TDA19988: dev_info(dev->dev, "found TDA19988"); break;
+ default:
+ DBG("found unsupported device: %04x", priv->rev);
+ goto fail;
+ }
+
+ /* after reset, enable DDC: */
+ reg_write(encoder, REG_DDC_DISABLE, 0x00);
+
+ /* set clock on DDC channel: */
+ reg_write(encoder, REG_TX3, 39);
+
+ /* if necessary, disable multi-master: */
+ if (priv->rev == TDA19989)
+ reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
+
+ cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL,
+ CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
+
+ return 0;
+
+fail:
+ /* if encoder_init fails, the encoder slave is never registered,
+ * so cleanup here:
+ */
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
+ kfree(priv);
+ encoder_slave->slave_priv = NULL;
+ encoder_slave->slave_funcs = NULL;
+ return -ENXIO;
+}
+
+static struct i2c_device_id tda998x_ids[] = {
+ { "tda998x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tda998x_ids);
+
+static struct drm_i2c_encoder_driver tda998x_driver = {
+ .i2c_driver = {
+ .probe = tda998x_probe,
+ .remove = tda998x_remove,
+ .driver = {
+ .name = "tda998x",
+ },
+ .id_table = tda998x_ids,
+ },
+ .encoder_init = tda998x_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+tda998x_init(void)
+{
+ DBG("");
+ return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
+}
+
+static void __exit
+tda998x_exit(void)
+{
+ DBG("");
+ drm_i2c_encoder_unregister(&tda998x_driver);
+}
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
+MODULE_LICENSE("GPL");
+
+module_init(tda998x_init);
+module_exit(tda998x_exit);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0f2c5493242b..91f3ac6cef35 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_tiling.o \
i915_sysfs.o \
i915_trace_points.o \
+ i915_ums.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32158d21c632..aae31489c893 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -125,6 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->gtt_space != NULL)
seq_printf(m, " (gtt offset: %08x, size: %08x)",
obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->stolen)
+ seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
char s[3], *t = s;
if (obj->pin_mappable)
@@ -257,8 +259,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
count, size);
- seq_printf(m, "%zu [%zu] gtt total\n",
- dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
+ seq_printf(m, "%zu [%lu] gtt total\n",
+ dev_priv->gtt.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.start);
mutex_unlock(&dev->struct_mutex);
@@ -388,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
struct intel_ring_buffer *ring)
{
if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %d\n",
+ seq_printf(m, "Current sequence (%s): %u\n",
ring->name, ring->get_seqno(ring, false));
}
}
@@ -545,11 +548,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- const volatile u32 __iomem *hws;
+ const u32 *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = (volatile u32 __iomem *)ring->status_page.page_addr;
+ hws = ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -609,7 +612,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
@@ -691,7 +694,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
- seq_printf(m, "Kernel: " UTS_RELEASE);
+ seq_printf(m, "Kernel: " UTS_RELEASE "\n");
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
@@ -816,11 +819,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error_priv->error = dev_priv->first_error;
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
return single_open(file, i915_error_state, error_priv);
}
@@ -846,6 +849,77 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
+static ssize_t
+i915_next_seqno_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf),
+ "next_seqno : 0x%x\n",
+ dev_priv->next_seqno);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_next_seqno_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ char buf[20];
+ u32 val = 1;
+ int ret;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_set_seqno(dev, val);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret ?: cnt;
+}
+
+static const struct file_operations i915_next_seqno_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_next_seqno_read,
+ .write = i915_next_seqno_write,
+ .llseek = default_llseek,
+};
+
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -888,7 +962,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat;
+ u32 rpstat, cagf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -907,6 +981,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+ if (IS_HASWELL(dev))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ cagf *= GT_FREQUENCY_MULTIPLIER;
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -919,8 +998,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
- seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -1372,28 +1450,31 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
- fb->base.bits_per_pixel);
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_printf(m, "\n");
+ mutex_unlock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
if (&fb->base == ifbdev->helper.fb)
continue;
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
- fb->base.bits_per_pixel);
+ fb->base.bits_per_pixel,
+ atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_printf(m, "\n");
}
-
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -1403,7 +1484,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct intel_ring_buffer *ring;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
@@ -1421,6 +1503,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_printf(m, "\n");
}
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->default_context) {
+ seq_printf(m, "HW default context %s ring ", ring->name);
+ describe_obj(m, ring->default_context->obj);
+ seq_printf(m, "\n");
+ }
+ }
+
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -1556,7 +1646,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
if (ret)
return ret;
@@ -1585,7 +1675,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
@@ -1603,7 +1693,7 @@ i915_wedged_read(struct file *filp,
len = snprintf(buf, sizeof(buf),
"wedged : %d\n",
- atomic_read(&dev_priv->mm.wedged));
+ atomic_read(&dev_priv->gpu_error.reset_counter));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1658,7 +1748,7 @@ i915_ring_stop_read(struct file *filp,
int len;
len = snprintf(buf, sizeof(buf),
- "0x%08x\n", dev_priv->stop_rings);
+ "0x%08x\n", dev_priv->gpu_error.stop_rings);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1694,7 +1784,7 @@ i915_ring_stop_write(struct file *filp,
if (ret)
return ret;
- dev_priv->stop_rings = val;
+ dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex);
return cnt;
@@ -1708,6 +1798,102 @@ static const struct file_operations i915_ring_stop_fops = {
.llseek = default_llseek,
};
+#define DROP_UNBOUND 0x1
+#define DROP_BOUND 0x2
+#define DROP_RETIRE 0x4
+#define DROP_ACTIVE 0x8
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE)
+static ssize_t
+i915_drop_caches_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ char buf[20];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_drop_caches_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj, *next;
+ char buf[20];
+ int val = 0, ret;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
+
+ /* No need to check and wait for gpu resets, only libdrm auto-restarts
+ * on ioctls on -EAGAIN. */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (val & DROP_ACTIVE) {
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto unlock;
+ }
+
+ if (val & (DROP_RETIRE | DROP_ACTIVE))
+ i915_gem_retire_requests(dev);
+
+ if (val & DROP_BOUND) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+ if (obj->pin_count == 0) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ goto unlock;
+ }
+ }
+
+ if (val & DROP_UNBOUND) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ if (obj->pages_pin_count == 0) {
+ ret = i915_gem_object_put_pages(obj);
+ if (ret)
+ goto unlock;
+ }
+ }
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret ?: cnt;
+}
+
+static const struct file_operations i915_drop_caches_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_drop_caches_read,
+ .write = i915_drop_caches_write,
+ .llseek = default_llseek,
+};
+
static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
@@ -2105,11 +2291,23 @@ int i915_debugfs_init(struct drm_minor *minor)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_gem_drop_caches",
+ &i915_drop_caches_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_error_state",
&i915_error_state_fops);
if (ret)
return ret;
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_next_seqno",
+ &i915_next_seqno_fops);
+ if (ret)
+ return ret;
+
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -2129,10 +2327,14 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
+ 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 99daa896105d..4fa6beb14c77 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
- ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
+ ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
@@ -1297,19 +1303,21 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_switcheroo;
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_gem_stolen;
+
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
ret = i915_gem_init(dev);
if (ret)
- goto cleanup_gem_stolen;
-
- intel_modeset_gem_init(dev);
+ goto cleanup_irq;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
- ret = drm_irq_install(dev);
- if (ret)
- goto cleanup_gem;
+ intel_modeset_gem_init(dev);
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1317,7 +1325,25 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = intel_fbdev_init(dev);
if (ret)
- goto cleanup_irq;
+ goto cleanup_gem;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(dev);
+
+ /*
+ * Some ports require correctly set-up hpd registers for detection to
+ * work properly (leading to ghost connected connector status), e.g. VGA
+ * on gm45. Hence we can only set up the initial fbdev config after hpd
+ * irqs are fully enabled. Now we should scan for the initial config
+ * only once hotplug handling is enabled, but due to screwed-up locking
+ * around kms/fbdev init we can't protect the fdbev initial config
+ * scanning against hotplug events. Hence do this first and ignore the
+ * tiny window where we will loose hotplug notifactions.
+ */
+ intel_fbdev_initial_config(dev);
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ dev_priv->enable_hotplug_processing = true;
drm_kms_helper_poll_init(dev);
@@ -1326,13 +1352,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
-cleanup_irq:
- drm_irq_uninstall(dev);
cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_irq:
+ drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
@@ -1400,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return;
- ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
- ap->ranges[0].size =
- dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ ap->ranges[0].base = dev_priv->gtt.mappable_base;
+ ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
+
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1516,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch;
}
- aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
+ aperture_size = dev_priv->gtt.mappable_end;
- dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
+ dev_priv->gtt.mappable =
+ io_mapping_create_wc(dev_priv->gtt.mappable_base,
aperture_size);
- if (dev_priv->mm.gtt_mapping == NULL) {
+ if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_rmmap;
}
- i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
+ i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
@@ -1580,11 +1605,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->error_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
- spin_lock_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
@@ -1614,9 +1640,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_opregion_init(dev);
acpi_video_register();
- setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
- (unsigned long) dev);
-
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
@@ -1635,15 +1658,15 @@ out_gem_unload:
out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
- dev_priv->mm.gtt_base_addr,
+ dev_priv->gtt.mappable_base,
aperture_size);
dev_priv->mm.gtt_mtrr = -1;
}
- io_mapping_free(dev_priv->mm.gtt_mapping);
+ io_mapping_free(dev_priv->gtt.mappable);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
- i915_gem_gtt_fini(dev);
+ dev_priv->gtt.gtt_remove(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1673,11 +1696,11 @@ int i915_driver_unload(struct drm_device *dev)
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- io_mapping_free(dev_priv->mm.gtt_mapping);
+ io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
- dev_priv->mm.gtt_base_addr,
- dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
+ dev_priv->gtt.mappable_base,
+ dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1;
}
@@ -1703,8 +1726,8 @@ int i915_driver_unload(struct drm_device *dev)
}
/* Free error state after interrupts are fully disabled. */
- del_timer_sync(&dev_priv->hangcheck_timer);
- cancel_work_sync(&dev_priv->error_work);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
@@ -1723,9 +1746,6 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
- drm_mm_takedown(&dev_priv->mm.stolen);
-
- intel_cleanup_overlay(dev);
if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
@@ -1738,6 +1758,10 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
+ pm_qos_remove_request(&dev_priv->pm_qos);
+
+ if (dev_priv->slab)
+ kmem_cache_destroy(dev_priv->slab);
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 117265840b1f..c5b8c81b9440 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -468,6 +470,13 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* ignore lid events during suspend */
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_SUSPENDED;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ intel_set_power_well(dev, true);
+
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
@@ -492,9 +501,6 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_opregion_fini(dev);
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
-
console_lock();
intel_fbdev_set_suspend(dev, 1);
console_unlock();
@@ -565,12 +571,11 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
+ intel_hpd_init(dev);
}
intel_opregion_init(dev);
- dev_priv->modeset_on_lid = 0;
-
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
@@ -583,6 +588,9 @@ static int __i915_drm_thaw(struct drm_device *dev)
schedule_work(&dev_priv->console_resume_work);
}
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_DONE;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
return error;
}
@@ -778,9 +786,9 @@ int intel_gpu_reset(struct drm_device *dev)
}
/* Also reset the gpu hangman. */
- if (dev_priv->stop_rings) {
+ if (dev_priv->gpu_error.stop_rings) {
DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->stop_rings = 0;
+ dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@@ -819,12 +827,12 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev);
ret = -ENODEV;
- if (get_seconds() - dev_priv->last_gpu_reset < 5)
+ if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
else
ret = intel_gpu_reset(dev);
- dev_priv->last_gpu_reset = get_seconds();
+ dev_priv->gpu_error.last_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
@@ -870,6 +878,7 @@ int i915_reset(struct drm_device *dev)
drm_irq_uninstall(dev);
drm_irq_install(dev);
+ intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@@ -1113,102 +1122,6 @@ MODULE_LICENSE("GPL and additional rights");
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
-
-static bool IS_DISPLAYREG(u32 reg)
-{
- /*
- * This should make it easier to transition modules over to the
- * new register block scheme, since we can do it incrementally.
- */
- if (reg >= VLV_DISPLAY_BASE)
- return false;
-
- if (reg >= RENDER_RING_BASE &&
- reg < RENDER_RING_BASE + 0xff)
- return false;
- if (reg >= GEN6_BSD_RING_BASE &&
- reg < GEN6_BSD_RING_BASE + 0xff)
- return false;
- if (reg >= BLT_RING_BASE &&
- reg < BLT_RING_BASE + 0xff)
- return false;
-
- if (reg == PGTBL_ER)
- return false;
-
- if (reg >= IPEIR_I965 &&
- reg < HWSTAM)
- return false;
-
- if (reg == MI_MODE)
- return false;
-
- if (reg == GFX_MODE_GEN7)
- return false;
-
- if (reg == RENDER_HWS_PGA_GEN7 ||
- reg == BSD_HWS_PGA_GEN7 ||
- reg == BLT_HWS_PGA_GEN7)
- return false;
-
- if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
- reg == GEN6_BSD_RNCID)
- return false;
-
- if (reg == GEN6_BLITTER_ECOSKPD)
- return false;
-
- if (reg >= 0x4000c &&
- reg <= 0x4002c)
- return false;
-
- if (reg >= 0x4f000 &&
- reg <= 0x4f08f)
- return false;
-
- if (reg >= 0x4f100 &&
- reg <= 0x4f11f)
- return false;
-
- if (reg >= VLV_MASTER_IER &&
- reg <= GEN6_PMIER)
- return false;
-
- if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
- reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
- return false;
-
- if (reg >= VLV_IIR_RW &&
- reg <= VLV_ISR)
- return false;
-
- if (reg == FORCEWAKE_VLV ||
- reg == FORCEWAKE_ACK_VLV)
- return false;
-
- if (reg == GEN6_GDRST)
- return false;
-
- switch (reg) {
- case _3D_CHICKEN3:
- case IVB_CHICKEN3:
- case GEN7_COMMON_SLICE_CHICKEN1:
- case GEN7_L3CNTLREG1:
- case GEN7_L3_CHICKEN_MODE_REGISTER:
- case GEN7_ROW_CHICKEN2:
- case GEN7_L3SQCREG4:
- case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
- case GEN7_HALF_SLICE_CHICKEN1:
- case GEN6_MBCTL:
- case GEN6_UCGCTL2:
- return false;
- default:
- break;
- }
-
- return true;
-}
-
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -1232,8 +1145,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
- } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
- val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
@@ -1260,11 +1171,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
- if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
- write##y(val, dev_priv->regs + reg + 0x180000); \
- } else { \
- write##y(val, dev_priv->regs + reg); \
- } \
+ write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12ab3bdea54d..e95337c97459 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
#ifndef _I915_DRV_H_
#define _I915_DRV_H_
+#include <uapi/drm/i915_drm.h>
+
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
@@ -40,6 +42,7 @@
#include <linux/backlight.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/pm_qos.h>
/* General customization:
*/
@@ -83,7 +86,12 @@ enum port {
};
#define port_name(p) ((p) + 'A')
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
@@ -101,6 +109,19 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n);
+
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
@@ -279,6 +300,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -318,6 +340,7 @@ struct drm_i915_gt_funcs {
DEV_INFO_FLAG(has_llc)
struct intel_device_info {
+ u32 display_mmio_offset;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
@@ -345,6 +368,50 @@ struct intel_device_info {
u8 has_llc:1;
};
+enum i915_cache_level {
+ I915_CACHE_NONE = 0,
+ I915_CACHE_LLC,
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+ unsigned long start; /* Start offset of used GTT */
+ size_t total; /* Total size GTT can map */
+ size_t stolen_size; /* Total size of stolen memory */
+
+ unsigned long mappable_end; /* End offset that we can CPU map */
+ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
+ phys_addr_t mappable_base; /* PA of our GMADR */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+
+ bool do_idle_maps;
+ dma_addr_t scratch_page_dma;
+ struct page *scratch_page;
+
+ /* global gtt ops */
+ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ size_t *stolen, phys_addr_t *mappable_base,
+ unsigned long *mappable_end);
+ void (*gtt_remove)(struct drm_device *dev);
+ void (*gtt_clear_range)(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*gtt_insert_entries)(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level);
+};
+#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
@@ -354,6 +421,16 @@ struct i915_hw_ppgtt {
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
+
+ /* pte functions, mirroring the interface of the global gtt. */
+ void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
@@ -580,6 +657,9 @@ struct intel_gen6_power_mgmt {
struct mutex hw_lock;
};
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
@@ -620,8 +700,162 @@ struct intel_l3_parity {
struct work_struct error_work;
};
+struct i915_gem_mm {
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm stolen;
+ /** Memory allocator for GTT */
+ struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ int gtt_mtrr;
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
+ struct shrinker inactive_shrinker;
+ bool shrinker_no_lock_stealing;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ /**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+
+ /* storage for physical objects */
+ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ /* accounting, useful for userland debugging */
+ size_t object_memory;
+ u32 object_count;
+};
+
+struct i915_gpu_error {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ /* For reset and error_state handling. */
+ spinlock_t lock;
+ /* Protected by the above dev->gpu_error.lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct work;
+
+ unsigned long last_reset;
+
+ /**
+ * State variable and reset counter controlling the reset flow
+ *
+ * Upper bits are for the reset counter. This counter is used by the
+ * wait_seqno code to race-free noticed that a reset event happened and
+ * that it needs to restart the entire ioctl (since most likely the
+ * seqno it waited for won't ever signal anytime soon).
+ *
+ * This is important for lock-free wait paths, where no contended lock
+ * naturally enforces the correct ordering between the bail-out of the
+ * waiter and the gpu reset work code.
+ *
+ * Lowest bit controls the reset state machine: Set means a reset is in
+ * progress. This state will (presuming we don't have any bugs) decay
+ * into either unset (successful reset) or the special WEDGED value (hw
+ * terminally sour). All waiters on the reset_queue will be woken when
+ * that happens.
+ */
+ atomic_t reset_counter;
+
+ /**
+ * Special values/flags for reset_counter
+ *
+ * Note that the code relies on
+ * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
+ * being true.
+ */
+#define I915_RESET_IN_PROGRESS_FLAG 1
+#define I915_WEDGED 0xffffffff
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t reset_queue;
+
+ /* For gpu hang simulation. */
+ unsigned int stop_rings;
+};
+
+enum modeset_restore {
+ MODESET_ON_LID_OPEN,
+ MODESET_DONE,
+ MODESET_SUSPENDED,
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ struct kmem_cache *slab;
const struct intel_device_info *info;
@@ -636,10 +870,11 @@ typedef struct drm_i915_private {
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
+ spinlock_t gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
@@ -649,9 +884,11 @@ typedef struct drm_i915_private {
*/
uint32_t gpio_mmio_base;
+ wait_queue_head_t gmbus_wait_queue;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
- uint32_t next_seqno;
+ uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
@@ -661,31 +898,24 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* DPIO indirect register protection */
- spinlock_t dpio_lock;
+ struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
- u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
+ bool enable_hotplug_processing;
int num_pipe;
int num_pch_pll;
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
- unsigned int stop_rings;
-
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
@@ -696,7 +926,7 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
+ unsigned int sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
@@ -713,7 +943,6 @@ typedef struct drm_i915_private {
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@@ -734,11 +963,6 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
- spinlock_t error_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
@@ -750,115 +974,12 @@ typedef struct drm_i915_private {
unsigned long quirks;
- /* Register state */
- bool modeset_on_lid;
+ enum modeset_restore modeset_restore;
+ struct mutex modeset_restore_lock;
- struct {
- /** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
- /** Memory allocator for GTT stolen memory */
- struct drm_mm stolen;
- /** Memory allocator for GTT */
- struct drm_mm gtt_space;
- /** List of all objects in gtt_space. Used to restore gtt
- * mappings on resume */
- struct list_head bound_list;
- /**
- * List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU) but still have
- * (presumably uncached) pages still attached.
- */
- struct list_head unbound_list;
-
- /** Usable portion of the GTT for GEM */
- unsigned long gtt_start;
- unsigned long gtt_mappable_end;
- unsigned long gtt_end;
-
- struct io_mapping *gtt_mapping;
- phys_addr_t gtt_base_addr;
- int gtt_mtrr;
-
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_hw_ppgtt *aliasing_ppgtt;
-
- struct shrinker inactive_shrinker;
- bool shrinker_no_lock_stealing;
-
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
-
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * Are we in a non-interruptible section of code like
- * modesetting?
- */
- bool interruptible;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occurring and makes
- * every pending request fail
- */
- atomic_t wedged;
-
- /** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
-
- /* storage for physical objects */
- struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
-
- /* accounting, useful for userland debugging */
- size_t gtt_total;
- size_t mappable_gtt_total;
- size_t object_memory;
- u32 object_count;
- } mm;
+ struct i915_gtt gtt;
+
+ struct i915_gem_mm mm;
/* Kernel Modesetting */
@@ -900,7 +1021,7 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
- unsigned long last_gpu_reset;
+ struct i915_gpu_error gpu_error;
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
@@ -919,7 +1040,7 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
- bool fdi_rx_polarity_reversed;
+ u32 fdi_rx_config;
struct i915_suspend_saved_registers regfile;
@@ -940,11 +1061,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
-enum i915_cache_level {
- I915_CACHE_NONE = 0,
- I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
-};
+#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
@@ -971,6 +1088,8 @@ struct drm_i915_gem_object {
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
struct list_head gtt_list;
/** This object's place on the active/inactive lists */
@@ -1096,13 +1215,6 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
-
- /**
- * Number of crtcs where this object is currently the fb, but
- * will be page flipped away on the next vblank. When it
- * reaches 0, dev_priv->pending_flip_queue will be woken up.
- */
- atomic_t pending_flip;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
@@ -1141,7 +1253,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct {
- struct spinlock lock;
+ spinlock_t lock;
struct list_head request_list;
} mm;
struct idr context_idr;
@@ -1227,6 +1339,8 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_DDI(dev) (IS_HASWELL(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -1323,6 +1437,7 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
@@ -1391,18 +1506,22 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
+void *i915_gem_object_alloc(struct drm_device *dev);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -1454,8 +1573,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
-
+int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1481,8 +1600,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
-int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+{
+ return unlikely(atomic_read(&error->reset_counter)
+ & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter) == I915_WEDGED;
+}
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1523,9 +1652,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode);
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -1548,7 +1678,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_gem_gtt.c */
-int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
@@ -1562,12 +1691,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-void i915_gem_init_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
@@ -1585,9 +1712,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
+inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+
+ return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj->tiling_mode != I915_TILING_NONE;
+}
+
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
@@ -1613,9 +1753,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* i915_suspend.c */
-extern int i915_save_state(struct drm_device *dev);
-extern int i915_restore_state(struct drm_device *dev);
+/* i915_ums.c */
+void i915_save_display_reg(struct drm_device *dev);
+void i915_restore_display_reg(struct drm_device *dev);
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
@@ -1672,6 +1812,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
+extern void i915_redisable_vga(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1744,5 +1885,19 @@ __i915_write(64, q)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+/* "Broadcast RGB" property */
+#define INTEL_BROADCAST_RGB_AUTO 0
+#define INTEL_BROADCAST_RGB_FULL 1
+#define INTEL_BROADCAST_RGB_LIMITED 2
+
+static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
+{
+ if (HAS_PCH_SPLIT(dev))
+ return CPU_VGACNTRL;
+ else if (IS_VALLEYVIEW(dev))
+ return VLV_VGACNTRL;
+ else
+ return VGACNTRL;
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8febea6daa08..0e207e6e0df8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -87,47 +87,43 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
}
static int
-i915_gem_wait_for_error(struct drm_device *dev)
+i915_gem_wait_for_error(struct i915_gpu_error *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct completion *x = &dev_priv->error_completion;
- unsigned long flags;
int ret;
- if (!atomic_read(&dev_priv->mm.wedged))
+#define EXIT_COND (!i915_reset_in_progress(error))
+ if (EXIT_COND)
return 0;
+ /* GPU is already declared terminally dead, give up. */
+ if (i915_terminally_wedged(error))
+ return -EIO;
+
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
- ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
+ ret = wait_event_interruptible_timeout(error->reset_queue,
+ EXIT_COND,
+ 10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
}
+#undef EXIT_COND
- if (atomic_read(&dev_priv->mm.wedged)) {
- /* GPU is hung, bump the completion count to account for
- * the token we just consumed so that we never hit zero and
- * end up waiting upon a subsequent completion event that
- * will never happen.
- */
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- }
return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_wait_for_error(dev);
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
@@ -149,6 +145,7 @@ int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +160,9 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
mutex_lock(&dev->struct_mutex);
- i915_gem_init_global_gtt(dev, args->gtt_start,
- args->gtt_end, args->gtt_end);
+ i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+ args->gtt_end);
+ dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -186,12 +184,24 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_size = dev_priv->gtt.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
}
+void *i915_gem_object_alloc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ kmem_cache_free(dev_priv->slab, obj);
+}
+
static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
@@ -215,7 +225,7 @@ i915_gem_create(struct drm_file *file,
if (ret) {
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
- kfree(obj);
+ i915_gem_object_free(obj);
return ret;
}
@@ -259,14 +269,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle);
}
-static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
-{
- drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
-
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj->tiling_mode != I915_TILING_NONE;
-}
-
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -407,7 +409,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
loff_t offset;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
struct scatterlist *sg;
@@ -469,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
if (ret == 0)
goto next_page;
- hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -502,12 +502,6 @@ next_page:
out:
i915_gem_object_unpin_pages(obj);
- if (hit_slowpath) {
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- }
-
return ret;
}
@@ -641,7 +635,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+ if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_unpin;
@@ -838,12 +832,13 @@ out:
i915_gem_object_unpin_pages(obj);
if (hit_slowpath) {
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /*
+ * Fixup: Flush cpu caches in case we didn't flush the dirty
+ * cachelines in-line while writing and the object moved
+ * out of the cpu write domain while we've dropped the lock.
+ */
+ if (!needs_clflush_after &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
i915_gem_chipset_flush(dev);
}
@@ -940,26 +935,17 @@ unlock:
}
int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->error_completion;
- bool recovery_complete;
- unsigned long flags;
-
- /* Give the error handler a chance to run. */
- spin_lock_irqsave(&x->wait.lock, flags);
- recovery_complete = x->done > 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
-
+ if (i915_reset_in_progress(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
- /* Recovery complete, but still wedged means reset failure. */
- if (recovery_complete)
+ /* Recovery complete, but the reset failed ... */
+ if (i915_terminally_wedged(error))
return -EIO;
return -EAGAIN;
@@ -990,13 +976,22 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
+ * @reset_counter: reset sequence associated with the given seqno
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ unsigned reset_counter,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1026,7 +1021,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
+ i915_reset_in_progress(&dev_priv->gpu_error) || \
+ reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1036,7 +1032,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ /* We need to check whether any gpu reset happened in between
+ * the caller grabbing the seqno and now ... */
+ if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ end = -EAGAIN;
+
+ /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
+ * gone. */
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
@@ -1082,7 +1085,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv, interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
@@ -1090,7 +1093,9 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
if (ret)
return ret;
- return __wait_seqno(ring, seqno, interruptible, NULL);
+ return __wait_seqno(ring, seqno,
+ atomic_read(&dev_priv->gpu_error.reset_counter),
+ interruptible, NULL);
}
/**
@@ -1137,6 +1142,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = obj->ring;
+ unsigned reset_counter;
u32 seqno;
int ret;
@@ -1147,7 +1153,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (seqno == 0)
return 0;
- ret = i915_gem_check_wedge(dev_priv, true);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
@@ -1155,8 +1161,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex);
i915_gem_retire_requests_ring(ring);
@@ -1344,6 +1351,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
/* Now bind it into the GTT if needed */
ret = i915_gem_object_pin(obj, 0, true, false);
if (ret)
@@ -1359,7 +1372,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1374,7 +1387,7 @@ out:
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
- if (!atomic_read(&dev_priv->mm.wedged))
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
@@ -1432,7 +1445,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
-static uint32_t
+uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
uint32_t gtt_size;
@@ -1460,16 +1473,15 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
*/
-static uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode)
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced)
{
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 ||
+ if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
tiling_mode == I915_TILING_NONE)
return 4096;
@@ -1480,35 +1492,6 @@ i915_gem_get_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
-/**
- * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
- * unfenced object
- * @dev: the device
- * @size: size of the object
- * @tiling_mode: tiling mode of the object
- *
- * Return the required GTT alignment for an object, only taking into account
- * unfenced tiled surface requirements.
- */
-uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size,
- int tiling_mode)
-{
- /*
- * Minimum alignment is 4k (GTT page size) for sane hw.
- */
- if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
- tiling_mode == I915_TILING_NONE)
- return 4096;
-
- /* Previous hardware however needs to be aligned to a power-of-two
- * tile height. The simplest method for determining this is to reuse
- * the power-of-tile object size.
- */
- return i915_gem_get_gtt_size(dev, size, tiling_mode);
-}
-
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1571,7 +1554,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
@@ -1635,7 +1618,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
- inode = obj->base.filp->f_path.dentry->d_inode;
+ inode = file_inode(obj->base.filp);
shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
@@ -1689,7 +1672,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
-static int
+int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
@@ -1800,7 +1783,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*
* Fail silently without starting the shrinker
*/
- mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping = file_inode(obj->base.filp)->i_mapping;
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT);
@@ -1862,6 +1845,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (obj->pages)
return 0;
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to obtain a purgeable object\n");
+ return -EINVAL;
+ }
+
BUG_ON(obj->pages_pin_count);
ret = ops->get_pages(obj);
@@ -1918,9 +1906,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- if (obj->pin_count) /* are we a framebuffer? */
- intel_mark_fb_idle(obj);
-
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_del_init(&obj->ring_list);
@@ -1940,30 +1925,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
}
static int
-i915_gem_handle_seqno_wrap(struct drm_device *dev)
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i, j;
- /* The hardware uses various monotonic 32-bit counters, if we
- * detect that they will wraparound we need to idle the GPU
- * and reset those counters.
- */
- ret = 0;
+ /* Carefully retire all requests without writing to the rings */
for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
- ret |= ring->sync_seqno[j] != 0;
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
}
- if (ret == 0)
- return ret;
-
- ret = i915_gpu_idle(dev);
- if (ret)
- return ret;
-
i915_gem_retire_requests(dev);
+
+ /* Finally reset hw state */
for_each_ring(ring, dev_priv, i) {
+ intel_ring_init_seqno(ring, seqno);
+
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
}
@@ -1971,6 +1950,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
return 0;
}
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (seqno == 0)
+ return -EINVAL;
+
+ /* HWS page needs to be set less than what we
+ * will inject to ring
+ */
+ ret = i915_gem_init_seqno(dev, seqno - 1);
+ if (ret)
+ return ret;
+
+ /* Carefully set the last_seqno value so that wrap
+ * detection still works
+ */
+ dev_priv->next_seqno = seqno;
+ dev_priv->last_seqno = seqno - 1;
+ if (dev_priv->last_seqno == 0)
+ dev_priv->last_seqno--;
+
+ return 0;
+}
+
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
@@ -1978,14 +1983,14 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_handle_seqno_wrap(dev);
+ int ret = i915_gem_init_seqno(dev, 0);
if (ret)
return ret;
dev_priv->next_seqno = 1;
}
- *seqno = dev_priv->next_seqno++;
+ *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
return 0;
}
@@ -2052,7 +2057,7 @@ i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->hangcheck_timer,
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
@@ -2317,10 +2322,12 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
+ unsigned reset_counter;
u32 seqno = 0;
int ret = 0;
@@ -2361,9 +2368,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, true, timeout);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
if (timeout) {
WARN_ON(!timespec_valid(timeout));
args->timeout_ns = timespec_to_ns(timeout);
@@ -2427,15 +2435,15 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
- /* Act a barrier for all accesses through the GTT */
- mb();
-
/* Force a pagefault for domain tracking on next user access */
i915_gem_release_mmap(obj);
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
+ /* Wait for any direct GTT access to complete */
+ mb();
+
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -2454,7 +2462,7 @@ int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
- int ret = 0;
+ int ret;
if (obj->gtt_space == NULL)
return 0;
@@ -2521,52 +2529,38 @@ int i915_gpu_idle(struct drm_device *dev)
return 0;
}
-static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint64_t val;
-
- if (obj) {
- u32 size = obj->gtt_space->size;
-
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
-
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
- } else
- val = 0;
-
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
- POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
-}
-
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int fence_reg;
+ int fence_pitch_shift;
uint64_t val;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ fence_reg = FENCE_REG_SANDYBRIDGE_0;
+ fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ } else {
+ fence_reg = FENCE_REG_965_0;
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
+
if (obj) {
u32 size = obj->gtt_space->size;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
- val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
- I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
- POSTING_READ(FENCE_REG_965_0 + reg * 8);
+ fence_reg += reg * 8;
+ I915_WRITE64(fence_reg, val);
+ POSTING_READ(fence_reg);
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2645,18 +2639,37 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(FENCE_REG_830_0 + reg * 4);
}
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+ return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+ mb();
+
switch (INTEL_INFO(dev)->gen) {
case 7:
- case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 6:
case 5:
case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break;
- default: break;
+ default: BUG();
}
+
+ /* And similarly be paranoid that no direct access to this region
+ * is reordered to before the fence is installed.
+ */
+ if (i915_gem_object_needs_mb(obj))
+ mb();
}
static inline int fence_number(struct drm_i915_private *dev_priv,
@@ -2686,7 +2699,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_seqno) {
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
@@ -2696,12 +2709,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
obj->last_fenced_seqno = 0;
}
- /* Ensure that all CPU reads are completed before installing a fence
- * and all writes before removing the fence.
- */
- if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
- mb();
-
obj->fenced_gpu_access = false;
return 0;
}
@@ -2712,7 +2719,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- ret = i915_gem_object_flush_fence(obj);
+ ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
@@ -2786,7 +2793,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
- ret = i915_gem_object_flush_fence(obj);
+ ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
}
@@ -2807,7 +2814,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
- ret = i915_gem_object_flush_fence(old);
+ ret = i915_gem_object_wait_fence(old);
if (ret)
return ret;
@@ -2830,7 +2837,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
/* On non-LLC machines we have to be careful when putting differing
* types of snoopable memory together to avoid the prefetcher
- * crossing memory domains and dieing.
+ * crossing memory domains and dying.
*/
if (HAS_LLC(dev))
return true;
@@ -2908,21 +2915,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
bool mappable, fenceable;
int ret;
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_ERROR("Attempting to bind a purgeable object\n");
- return -EINVAL;
- }
-
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
obj->base.size,
- obj->tiling_mode);
+ obj->tiling_mode, true);
unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(dev,
+ i915_gem_get_gtt_alignment(dev,
obj->base.size,
- obj->tiling_mode);
+ obj->tiling_mode, false);
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
@@ -2938,7 +2940,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size >
- (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
+ (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
@@ -2959,7 +2961,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (map_and_fenceable)
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end);
+ 0, dev_priv->gtt.mappable_end);
else
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
@@ -2999,7 +3001,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
(node->start & (fence_alignment - 1)) == 0;
mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+ obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
@@ -3019,6 +3021,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return;
+ /*
+ * Stolen memory is always coherent with the GPU as it is explicitly
+ * marked as wc by the system, or the system is cache-coherent.
+ */
+ if (obj->stolen)
+ return;
+
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
* the caches are only snooped when the render cache is
@@ -3107,6 +3116,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
i915_gem_object_flush_cpu_write_domain(obj);
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * GTT domain upon first access.
+ */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ mb();
+
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3413,11 +3429,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
+ unsigned reset_counter;
u32 seqno = 0;
int ret;
- if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+ if (ret)
+ return ret;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -3427,12 +3449,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
ring = request->ring;
seqno = request->seqno;
}
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
spin_unlock(&file_priv->mm.lock);
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, true, NULL);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3706,14 +3729,14 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
- u32 mask;
+ gfp_t mask;
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return NULL;
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- kfree(obj);
+ i915_gem_object_free(obj);
return NULL;
}
@@ -3724,7 +3747,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mask |= __GFP_DMA32;
}
- mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping = file_inode(obj->base.filp)->i_mapping;
mapping_set_gfp_mask(mapping, mask);
i915_gem_object_init(obj, &i915_gem_object_ops);
@@ -3785,6 +3808,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
+ i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
@@ -3795,7 +3819,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_info_remove_obj(dev_priv, obj->base.size);
kfree(obj->bit_17);
- kfree(obj);
+ i915_gem_object_free(obj);
}
int
@@ -3829,7 +3853,7 @@ i915_gem_idle(struct drm_device *dev)
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
- del_timer_sync(&dev_priv->hangcheck_timer);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
@@ -3848,7 +3872,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
u32 misccpctl;
int i;
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return;
if (!dev_priv->l3_parity.remap_info)
@@ -3891,8 +3915,10 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else
+ else if (IS_GEN7(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else
+ BUG();
}
static bool
@@ -3911,22 +3937,11 @@ intel_enable_blt(struct drm_device *dev)
return true;
}
-int
-i915_gem_init_hw(struct drm_device *dev)
+static int i915_gem_init_rings(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
- return -EIO;
-
- if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
- I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
-
- i915_gem_l3_remap(dev);
-
- i915_gem_init_swizzling(dev);
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -3943,76 +3958,59 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_bsd_ring;
}
- dev_priv->next_seqno = 1;
-
- /*
- * XXX: There was some w/a described somewhere suggesting loading
- * contexts before PPGTT.
- */
- i915_gem_context_init(dev);
- i915_gem_init_ppgtt(dev);
+ ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+ if (ret)
+ goto cleanup_blt_ring;
return 0;
+cleanup_blt_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+
return ret;
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
+int
+i915_gem_init_hw(struct drm_device *dev)
{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
- return true;
+ if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+ I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
+ i915_gem_l3_remap(dev);
+
+ i915_gem_init_swizzling(dev);
+
+ ret = i915_gem_init_rings(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * XXX: There was some w/a described somewhere suggesting loading
+ * contexts before PPGTT.
+ */
+ i915_gem_context_init(dev);
+ i915_gem_init_ppgtt(dev);
+
+ return 0;
}
int i915_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long gtt_size, mappable_size;
int ret;
- gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
mutex_lock(&dev->struct_mutex);
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-
- i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_init_global_gtt(dev, 0, mappable_size,
- gtt_size);
- }
-
+ i915_gem_init_global_gtt(dev);
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
@@ -4047,9 +4045,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (atomic_read(&dev_priv->mm.wedged)) {
+ if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
mutex_lock(&dev->struct_mutex);
@@ -4113,8 +4111,14 @@ init_ring_lists(struct intel_ring_buffer *ring)
void
i915_gem_load(struct drm_device *dev)
{
- int i;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->slab =
+ kmem_cache_create("i915_gem_object",
+ sizeof(struct drm_i915_gem_object), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
@@ -4127,7 +4131,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- init_completion(&dev_priv->error_completion);
+ init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
@@ -4228,7 +4232,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
char *vaddr;
int i;
int page_count;
@@ -4264,7 +4268,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
int id,
int align)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = 0;
int page_count;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a3f06bcad551..94d873a6cffb 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -126,13 +126,8 @@ static int get_context_size(struct drm_device *dev)
static void do_destroy(struct i915_hw_context *ctx)
{
- struct drm_device *dev = ctx->obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
- else
- BUG_ON(ctx != dev_priv->ring[RCS].default_context);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
@@ -144,7 +139,7 @@ create_hw_context(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
- int ret, id;
+ int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
@@ -169,22 +164,11 @@ create_hw_context(struct drm_device *dev,
ctx->file_priv = file_priv;
-again:
- if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
- ret = -ENOMEM;
- DRM_DEBUG_DRIVER("idr allocation failed\n");
- goto err_out;
- }
-
- ret = idr_get_new_above(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_ID + 1, &id);
- if (ret == 0)
- ctx->id = id;
-
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
+ ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+ GFP_KERNEL);
+ if (ret < 0)
goto err_out;
+ ctx->id = ret;
return ctx;
@@ -242,7 +226,6 @@ err_destroy:
void i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ctx_size;
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
@@ -254,11 +237,9 @@ void i915_gem_context_init(struct drm_device *dev)
dev_priv->ring[RCS].default_context)
return;
- ctx_size = get_context_size(dev);
- dev_priv->hw_context_size = get_context_size(dev);
- dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
+ dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
- if (ctx_size <= 0 || ctx_size > (1<<20)) {
+ if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
return;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index abeaafef6d7e..6a5af6828624 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -281,8 +281,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ obj = i915_gem_object_alloc(dev);
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
@@ -290,7 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
- kfree(obj);
+ i915_gem_object_free(obj);
goto fail_detach;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 776a3225184c..c86d5d9356fd 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level,
- 0, dev_priv->mm.gtt_mappable_end);
+ 0, dev_priv->gtt.mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 26d08bb58218..2f2daebd0eef 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,61 +34,133 @@
#include <linux/dma_remapping.h>
struct eb_objects {
+ struct list_head objects;
int and;
- struct hlist_head buckets[0];
+ union {
+ struct drm_i915_gem_object *lut[0];
+ struct hlist_head buckets[0];
+ };
};
static struct eb_objects *
-eb_create(int size)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
{
- struct eb_objects *eb;
- int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
- BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
- while (count > size)
- count >>= 1;
- eb = kzalloc(count*sizeof(struct hlist_head) +
- sizeof(struct eb_objects),
- GFP_KERNEL);
- if (eb == NULL)
- return eb;
-
- eb->and = count - 1;
+ struct eb_objects *eb = NULL;
+
+ if (args->flags & I915_EXEC_HANDLE_LUT) {
+ int size = args->buffer_count;
+ size *= sizeof(struct drm_i915_gem_object *);
+ size += sizeof(struct eb_objects);
+ eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ }
+
+ if (eb == NULL) {
+ int size = args->buffer_count;
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
+ while (count > 2*size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
+ GFP_TEMPORARY);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ } else
+ eb->and = -args->buffer_count;
+
+ INIT_LIST_HEAD(&eb->objects);
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
- memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+ if (eb->and >= 0)
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
-static void
-eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+static int
+eb_lookup_objects(struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ const struct drm_i915_gem_execbuffer2 *args,
+ struct drm_file *file)
{
- hlist_add_head(&obj->exec_node,
- &eb->buckets[obj->exec_handle & eb->and]);
+ int i;
+
+ spin_lock(&file->table_lock);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
+ if (obj == NULL) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ return -ENOENT;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ spin_unlock(&file->table_lock);
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ return -EINVAL;
+ }
+
+ drm_gem_object_reference(&obj->base);
+ list_add_tail(&obj->exec_list, &eb->objects);
+
+ obj->exec_entry = &exec[i];
+ if (eb->and < 0) {
+ eb->lut[i] = obj;
+ } else {
+ uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+ obj->exec_handle = handle;
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[handle & eb->and]);
+ }
+ }
+ spin_unlock(&file->table_lock);
+
+ return 0;
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
- struct hlist_head *head;
- struct hlist_node *node;
- struct drm_i915_gem_object *obj;
+ if (eb->and < 0) {
+ if (handle >= -eb->and)
+ return NULL;
+ return eb->lut[handle];
+ } else {
+ struct hlist_head *head;
+ struct hlist_node *node;
- head = &eb->buckets[handle & eb->and];
- hlist_for_each(node, head) {
- obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
- if (obj->exec_handle == handle)
- return obj;
- }
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ struct drm_i915_gem_object *obj;
- return NULL;
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+ if (obj->exec_handle == handle)
+ return obj;
+ }
+ return NULL;
+ }
}
static void
eb_destroy(struct eb_objects *eb)
{
+ while (!list_empty(&eb->objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&eb->objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
kfree(eb);
}
@@ -150,17 +222,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain);
return ret;
}
- if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain)) {
- DRM_DEBUG("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- return ret;
- }
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
@@ -220,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
@@ -299,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
- struct eb_objects *eb,
- struct list_head *objects)
+ struct eb_objects *eb)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -313,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
* lockdep complains vehemently.
*/
pagefault_disable();
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
@@ -335,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring,
+ bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
@@ -376,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- entry->offset = obj->gtt_offset;
+ if (entry->offset != obj->gtt_offset) {
+ entry->offset = obj->gtt_offset;
+ *need_reloc = true;
+ }
+
+ if (entry->flags & EXEC_OBJECT_WRITE) {
+ obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
+ obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+ }
+
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
+ !obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
return 0;
}
@@ -402,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct list_head *objects)
+ struct list_head *objects,
+ bool *need_relocs)
{
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
@@ -430,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
else
list_move_tail(&obj->exec_list, &ordered_objects);
- obj->base.pending_read_domains = 0;
+ obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
@@ -470,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@@ -480,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@@ -500,21 +575,22 @@ err: /* Decrement pin count for bound objects */
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
- struct list_head *objects,
struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- int count)
+ struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
+ bool need_relocs;
int *reloc_offset;
int i, total, ret;
+ int count = args->buffer_count;
/* We may process another execbuffer during the unlock... */
- while (!list_empty(objects)) {
- obj = list_first_entry(objects,
+ while (!list_empty(&eb->objects)) {
+ obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
@@ -582,27 +658,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
- for (i = 0; i < count; i++) {
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- ret = -ENOENT;
- goto err;
- }
-
- list_add_tail(&obj->exec_list, objects);
- obj->exec_handle = exec[i].handle;
- obj->exec_entry = &exec[i];
- eb_add_object(eb, obj);
- }
+ ret = eb_lookup_objects(eb, exec, args, file);
+ if (ret)
+ goto err;
- ret = i915_gem_execbuffer_reserve(ring, file, objects);
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
@@ -623,44 +688,11 @@ err:
}
static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
-{
- u32 plane, flip_mask;
- int ret;
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
-
- return 0;
-}
-
-static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
- uint32_t flips = 0;
int ret;
list_for_each_entry(obj, objects, exec_list) {
@@ -671,18 +703,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- if (obj->base.pending_write_domain)
- flips |= atomic_read(&obj->pending_flip);
-
flush_domains |= obj->base.write_domain;
}
- if (flips) {
- ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
- if (ret)
- return ret;
- }
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev);
@@ -698,6 +721,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
+ if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
+ return false;
+
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
@@ -711,6 +737,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
+ if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
+ return -EINVAL;
+
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
@@ -718,9 +747,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, ptr, length))
- return -EFAULT;
-
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
@@ -742,8 +768,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
- obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
+ if (obj->base.write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+ obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring);
@@ -802,21 +830,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 mask;
- u32 flags;
+ u32 mask, flags;
int ret, mode, i;
+ bool need_relocs;
- if (!i915_gem_check_execbuffer(args)) {
- DRM_DEBUG("execbuf with invalid offset/length\n");
+ if (!i915_gem_check_execbuffer(args))
return -EINVAL;
- }
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
@@ -937,7 +962,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args->buffer_count);
+ eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -945,51 +970,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- INIT_LIST_HEAD(&objects);
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj;
-
- obj = to_intel_bo(drm_gem_object_lookup(dev, file,
- exec[i].handle));
- if (&obj->base == NULL) {
- DRM_DEBUG("Invalid object handle %d at index %d\n",
- exec[i].handle, i);
- /* prevent error path from reading uninitialized data */
- ret = -ENOENT;
- goto err;
- }
-
- if (!list_empty(&obj->exec_list)) {
- DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
- obj, exec[i].handle, i);
- ret = -EINVAL;
- goto err;
- }
-
- list_add_tail(&obj->exec_list, &objects);
- obj->exec_handle = exec[i].handle;
- obj->exec_entry = &exec[i];
- eb_add_object(eb, obj);
- }
+ ret = eb_lookup_objects(eb, exec, args, file);
+ if (ret)
+ goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(objects.prev,
+ batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_reserve(ring, file, &objects);
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
- ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+ if (need_relocs)
+ ret = i915_gem_execbuffer_relocate(dev, eb);
if (ret) {
if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
- &objects, eb,
- exec,
- args->buffer_count);
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1011,7 +1013,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
if (ret)
goto err;
@@ -1065,20 +1067,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&objects, ring);
+ i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
- while (!list_empty(&objects)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&objects,
- struct drm_i915_gem_object,
- exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
- }
mutex_unlock(&dev->struct_mutex);
@@ -1187,7 +1180,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2c150dee78a7..926a1e2dd234 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -44,9 +44,9 @@ typedef uint32_t gtt_pte_t;
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-static inline gtt_pte_t pte_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -77,7 +77,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev,
}
/* PPGTT support for Sandybdrige/Gen6 and later */
-static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
@@ -87,8 +87,9 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
- I915_CACHE_LLC);
+ scratch_pte = gen6_pte_encode(ppgtt->dev,
+ ppgtt->scratch_page_dma_addr,
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -108,10 +109,72 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
}
}
-int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+ struct sg_table *pages,
+ unsigned first_entry,
+ enum i915_cache_level cache_level)
{
+ gtt_pte_t *pt_vaddr;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned i, j, m, segment_len;
+ dma_addr_t page_addr;
+ struct scatterlist *sg;
+
+ /* init sg walking */
+ sg = pages->sgl;
+ i = 0;
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+
+ while (i < pages->nents) {
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
+ page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
+ cache_level);
+
+ /* grab the next page */
+ if (++m == segment_len) {
+ if (++i == pages->nents)
+ break;
+
+ sg = sg_next(sg);
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+ }
+ }
+
+ kunmap_atomic(pt_vaddr);
+
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+ int i;
+
+ if (ppgtt->pt_dma_addr) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(ppgtt->dev->pdev,
+ ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ __free_page(ppgtt->pt_pages[i]);
+ kfree(ppgtt->pt_pages);
+ kfree(ppgtt);
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
@@ -119,18 +182,17 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ret;
+ first_pd_entry_in_global_pt =
+ gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
- ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->clear_range = gen6_ppgtt_clear_range;
+ ppgtt->insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->cleanup = gen6_ppgtt_cleanup;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
- goto err_ppgtt;
+ return -ENOMEM;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -138,39 +200,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
goto err_pt_alloc;
}
- if (dev_priv->mm.gtt->needs_dmar) {
- ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
- *ppgtt->num_pd_entries,
- GFP_KERNEL);
- if (!ppgtt->pt_dma_addr)
- goto err_pt_alloc;
+ ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+ GFP_KERNEL);
+ if (!ppgtt->pt_dma_addr)
+ goto err_pt_alloc;
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
- 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
+ pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
+ PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- pt_addr)) {
- ret = -EIO;
- goto err_pd_pin;
+ if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
+ ret = -EIO;
+ goto err_pd_pin;
- }
- ppgtt->pt_dma_addr[i] = pt_addr;
}
+ ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+ ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
- i915_ppgtt_clear_range(ppgtt, 0,
- ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+ ppgtt->clear_range(ppgtt, 0,
+ ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
- dev_priv->mm.aliasing_ppgtt = ppgtt;
-
return 0;
err_pd_pin:
@@ -186,94 +241,57 @@ err_pt_alloc:
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
-err_ppgtt:
- kfree(ppgtt);
return ret;
}
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i;
+ struct i915_hw_ppgtt *ppgtt;
+ int ret;
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
- return;
+ return -ENOMEM;
- if (ppgtt->pt_dma_addr) {
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
- 4096, PCI_DMA_BIDIRECTIONAL);
- }
+ ppgtt->dev = dev;
- kfree(ppgtt->pt_dma_addr);
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
- kfree(ppgtt);
+ ret = gen6_ppgtt_init(ppgtt);
+ if (ret)
+ kfree(ppgtt);
+ else
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+ return ret;
}
-static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- const struct sg_table *pages,
- unsigned first_entry,
- enum i915_cache_level cache_level)
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
- gtt_pte_t *pt_vaddr;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned i, j, m, segment_len;
- dma_addr_t page_addr;
- struct scatterlist *sg;
-
- /* init sg walking */
- sg = pages->sgl;
- i = 0;
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
-
- while (i < pages->nents) {
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
- page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
- cache_level);
-
- /* grab the next page */
- if (++m == segment_len) {
- if (++i == pages->nents)
- break;
-
- sg = sg_next(sg);
- segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
- m = 0;
- }
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- kunmap_atomic(pt_vaddr);
+ if (!ppgtt)
+ return;
- first_pte = 0;
- act_pd++;
- }
+ ppgtt->cleanup(ppgtt);
}
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ ppgtt->insert_entries(ppgtt, obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- i915_ppgtt_clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ ppgtt->clear_range(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -282,7 +300,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- uint32_t __iomem *pd_addr;
+ gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
@@ -290,15 +308,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
return;
- pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar)
- pt_addr = ppgtt->pt_dma_addr[i];
- else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
+ pt_addr = ppgtt->pt_dma_addr[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
@@ -338,11 +352,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
}
}
+extern int intel_iommu_gfx_mapped;
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline bool needs_idle_maps(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
- if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
+ if (unlikely(dev_priv->gtt.do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
@@ -356,45 +386,18 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (unlikely(dev_priv->mm.gtt->do_idle_maps))
+ if (unlikely(dev_priv->gtt.do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
-
-static void i915_ggtt_clear_range(struct drm_device *dev,
- unsigned first_entry,
- unsigned num_entries)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- gtt_pte_t scratch_pte;
- gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- int i;
-
- if (INTEL_INFO(dev)->gen < 6) {
- intel_gtt_clear_range(first_entry, num_entries);
- return;
- }
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
- readl(gtt_base);
-}
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
- (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+ dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
+ dev_priv->gtt.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
@@ -423,16 +426,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
-static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level level)
+static void gen6_ggtt_insert_entries(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int first_entry,
+ enum i915_cache_level level)
{
- struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
- const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+ gtt_pte_t __iomem *gtt_entries =
+ (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
@@ -441,14 +443,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+ iowrite32(gen6_pte_encode(dev, addr, level),
+ &gtt_entries[i]);
i++;
}
}
- BUG_ON(i > max_entries);
- BUG_ON(i != obj->base.size / PAGE_SIZE);
-
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
@@ -456,7 +456,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+ WARN_ON(readl(&gtt_entries[i-1])
+ != gen6_pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -466,28 +467,70 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
+static void gen6_ggtt_clear_range(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
+ I915_CACHE_LLC);
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
+}
+
+
+static void i915_ggtt_insert_entries(struct drm_device *dev,
+ struct sg_table *st,
+ unsigned int pg_start,
+ enum i915_cache_level cache_level)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(st, pg_start, flags);
+
+}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ unsigned int first_entry,
+ unsigned int num_entries)
+{
+ intel_gtt_clear_range(first_entry, num_entries);
+}
+
+
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- if (INTEL_INFO(dev)->gen < 6) {
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- flags);
- } else {
- gen6_ggtt_bind_object(obj, cache_level);
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- i915_ggtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_clear_range(obj->base.dev,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -524,27 +567,101 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
*end -= 4096;
}
}
-
-void i915_gem_init_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mm_node *entry;
+ struct drm_i915_gem_object *obj;
+ unsigned long hole_start, hole_end;
- /* Substract the guard page ... */
+ BUG_ON(mappable_end > end);
+
+ /* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
- dev_priv->mm.gtt_start = start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
- dev_priv->mm.gtt_end = end;
- dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+ /* Mark any preallocated objects as occupied */
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+ DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
+ obj->gtt_offset, obj->base.size);
+
+ BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
+ obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ obj->gtt_offset,
+ obj->base.size,
+ false);
+ obj->has_global_gtt_mapping = 1;
+ }
+
+ dev_priv->gtt.start = start;
+ dev_priv->gtt.total = end - start;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+ hole_start, hole_end) {
+ DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
+ (hole_end-hole_start) / PAGE_SIZE);
+ }
- /* ... but ensure that we clear the entire range. */
- i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ /* And finally clear the reserved guard page */
+ dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+}
+
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+
+ gtt_size = dev_priv->gtt.total;
+ mappable_size = dev_priv->gtt.mappable_end;
+
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ int ret;
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (!ret)
+ return;
+
+ DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
+ drm_mm_takedown(&dev_priv->mm.gtt_space);
+ gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ }
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
static int setup_scratch_page(struct drm_device *dev)
@@ -567,8 +684,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- dev_priv->mm.gtt->scratch_page = page;
- dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+ dev_priv->gtt.scratch_page = page;
+ dev_priv->gtt.scratch_page_dma = dma_addr;
return 0;
}
@@ -576,11 +693,11 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
- pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+ set_pages_wb(dev_priv->gtt.scratch_page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(dev_priv->mm.gtt->scratch_page);
- __free_page(dev_priv->mm.gtt->scratch_page);
+ put_page(dev_priv->gtt.scratch_page);
+ __free_page(dev_priv->gtt.scratch_page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -590,14 +707,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 20;
}
-static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
-static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
@@ -606,103 +723,127 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
return stolen_decoder[snb_gmch_ctl] << 20;
}
-int i915_gem_gtt_init(struct drm_device *dev)
+static int gen6_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
+ unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
- /* On modern platforms we need not worry ourself with the legacy
- * hostbridge query stuff. Skip it entirely
- */
- if (INTEL_INFO(dev)->gen < 6) {
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
+ *mappable_base = pci_resource_start(dev->pdev, 2);
+ *mappable_end = pci_resource_len(dev->pdev, 2);
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- intel_gmch_remove();
- return -ENODEV;
- }
- return 0;
+ /* 64/512MB is the current min/max we actually know of, but this is just
+ * a coarse sanity check.
+ */
+ if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+ DRM_ERROR("Unknown GMADR size (%lx)\n",
+ dev_priv->gtt.mappable_end);
+ return -ENXIO;
}
- dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
- if (!dev_priv->mm.gtt)
- return -ENOMEM;
-
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
-#ifdef CONFIG_INTEL_IOMMU
- dev_priv->mm.gtt->needs_dmar = 1;
-#endif
+ if (IS_GEN7(dev))
+ *stolen = gen7_get_stolen_size(snb_gmch_ctl);
+ else
+ *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+ *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
- dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
-
- /* i9xx_setup */
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- dev_priv->mm.gtt->gtt_total_entries =
- gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
- if (INTEL_INFO(dev)->gen < 7)
- dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
- else
- dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
-
- dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
- /* 64/512MB is the current min/max we actually know of, but this is just a
- * coarse sanity check.
- */
- if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
- dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
- DRM_ERROR("Unknown GMADR entries (%d)\n",
- dev_priv->mm.gtt->gtt_mappable_entries);
- ret = -ENXIO;
- goto err_out;
+ dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+ if (!dev_priv->gtt.gsm) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ return -ENOMEM;
}
ret = setup_scratch_page(dev);
- if (ret) {
+ if (ret)
DRM_ERROR("Scratch setup failed\n");
- goto err_out;
- }
- dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
- dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
- if (!dev_priv->mm.gtt->gtt) {
- DRM_ERROR("Failed to map the gtt page table\n");
- teardown_scratch_page(dev);
- ret = -ENOMEM;
- goto err_out;
+ dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+
+ return ret;
+}
+
+static void gen6_gmch_remove(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ iounmap(dev_priv->gtt.gsm);
+ teardown_scratch_page(dev_priv->dev);
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen,
+ phys_addr_t *mappable_base,
+ unsigned long *mappable_end)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
}
- /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
- DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
- DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
- DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+ intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+
+ dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+ dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
return 0;
+}
-err_out:
- kfree(dev_priv->mm.gtt);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- return ret;
+static void i915_gmch_remove(struct drm_device *dev)
+{
+ intel_gmch_remove();
}
-void i915_gem_gtt_fini(struct drm_device *dev)
+int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- iounmap(dev_priv->mm.gtt->gtt);
- teardown_scratch_page(dev);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- kfree(dev_priv->mm.gtt);
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ unsigned long gtt_size;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen <= 5) {
+ dev_priv->gtt.gtt_probe = i915_gmch_probe;
+ dev_priv->gtt.gtt_remove = i915_gmch_remove;
+ } else {
+ dev_priv->gtt.gtt_probe = gen6_gmch_probe;
+ dev_priv->gtt.gtt_remove = gen6_gmch_remove;
+ }
+
+ ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
+ &dev_priv->gtt.stolen_size,
+ &gtt->mappable_base,
+ &gtt->mappable_end);
+ if (ret)
+ return ret;
+
+ gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_INFO("Memory usable by graphics device = %zdM\n",
+ dev_priv->gtt.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
+ dev_priv->gtt.mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
+ dev_priv->gtt.stolen_size >> 20);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8e91083b126f..69d97cbac13c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,85 +42,73 @@
* for is a boon.
*/
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;
-#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
+ * is unreliable, so on those compute the base by subtracting the
+ * stolen memory from the Top of Low Usable DRAM which is where the
+ * BIOS places the graphics stolen memory.
+ *
+ * On gen2, the layout is slightly different with the Graphics Segment
+ * immediately following Top of Memory (or Top of Usable DRAM). Note
+ * it appears that TOUD is only reported by 865g, so we just use the
+ * top of memory as determined by the e820 probe.
+ *
+ * XXX gen2 requires an unavailable symbol and 945gm fails with
+ * its value of TOLUD.
*/
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
+ base = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* Read Base Data of Stolen Memory Register (BDSM) directly.
+ * Note that there is also a MCHBAR miror at 0x1080c0 or
+ * we could use device 2:0x5c instead.
+ */
+ pci_read_config_dword(pdev, 0xB0, &base);
+ base &= ~4095; /* lower bits used for locking register */
+ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
+#if 0
+ } else if (IS_GEN3(dev)) {
u8 val;
+ /* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
+ base -= dev_priv->mm.gtt->stolen_size;
+ } else {
+ /* Stolen is immediately above Top of Memory */
+ base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
+ }
- return base + offset;
+ return base;
}
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
- DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
-}
-
-static void i915_setup_compression(struct drm_device *dev, int size)
+static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size <<= 1, 4096, 0);
+ if (!compressed_fb)
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size >>= 1, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
- if (!cfb_base)
- goto err_fb;
-
- if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
@@ -129,73 +117,206 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_llb)
goto err_fb;
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
- if (!ll_base)
- goto err_llb;
+ dev_priv->compressed_llb = compressed_llb;
+
+ I915_WRITE(FBC_CFB_BASE,
+ dev_priv->mm.stolen_base + compressed_fb->start);
+ I915_WRITE(FBC_LL_BASE,
+ dev_priv->mm.stolen_base + compressed_llb->start);
}
+ dev_priv->compressed_fb = compressed_fb;
dev_priv->cfb_size = size;
- dev_priv->compressed_fb = compressed_fb;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- dev_priv->compressed_llb = compressed_llb;
- }
+ DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+ size);
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
- return;
+ return 0;
-err_llb:
- drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
+ return -ENOSPC;
+}
+
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mm.stolen_base == 0)
+ return -ENODEV;
+
+ if (size < dev_priv->cfb_size)
+ return 0;
+
+ /* Release any current block */
+ i915_gem_stolen_cleanup_compression(dev);
+
+ return i915_setup_compression(dev, size);
}
-static void i915_cleanup_compression(struct drm_device *dev)
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->cfb_size == 0)
+ return;
+
+ if (dev_priv->compressed_fb)
+ drm_mm_put_block(dev_priv->compressed_fb);
+
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
+
+ dev_priv->cfb_size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_gem_stolen_cleanup_compression(dev);
+ drm_mm_takedown(&dev_priv->mm.stolen);
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+ DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
+ dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */
- drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
+
+ return 0;
+}
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
+static struct sg_table *
+i915_pages_create_for_stolen(struct drm_device *dev,
+ u32 offset, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sg_table *st;
+ struct scatterlist *sg;
+
+ DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
+ BUG_ON(offset > dev_priv->gtt.stolen_size - size);
- /* Leave 1M for line length buffer & misc. */
+ /* We hide that we have no struct page backing our stolen object
+ * by wrapping the contiguous physical allocation with a fake
+ * dma mapping in a single scatterlist.
+ */
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
- /* Try to get a 32M buffer... */
- if (prealloc_size > (36*1024*1024))
- cfb_size = 32*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return NULL;
}
- return 0;
+ sg = st->sgl;
+ sg->offset = offset;
+ sg->length = size;
+
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_len(sg) = size;
+
+ return st;
+}
+
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ BUG();
+ return -EINVAL;
+}
+
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ /* Should only be called during free */
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+ .get_pages = i915_gem_object_get_pages_stolen,
+ .put_pages = i915_gem_object_put_pages_stolen,
+};
+
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct drm_device *dev,
+ struct drm_mm_node *stolen)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return NULL;
+
+ if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
+ goto cleanup;
+
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+
+ obj->pages = i915_pages_create_for_stolen(dev,
+ stolen->start, stolen->size);
+ if (obj->pages == NULL)
+ goto cleanup;
+
+ obj->has_dma_mapping = true;
+ obj->pages_pin_count = 1;
+ obj->stolen = stolen;
+
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+
+cleanup:
+ i915_gem_object_free(obj);
+ return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+
+ if (dev_priv->mm.stolen_base == 0)
+ return NULL;
+
+ DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
+ if (size == 0)
+ return NULL;
+
+ stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (stolen)
+ stolen = drm_mm_get_block(stolen, size, 4096);
+ if (stolen == NULL)
+ return NULL;
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj)
+ return obj;
+
+ drm_mm_put_block(stolen);
+ return NULL;
+}
+
+void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_put_block(obj->stolen);
+ obj->stolen = NULL;
+ }
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cedbfd7b3dfa..abcba2f5a788 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return false;
}
- /*
- * Previous chips need to be aligned to the size of the smallest
- * fence register that can contain the object.
- */
- if (INTEL_INFO(obj->base.dev)->gen == 3)
- size = 1024*1024;
- else
- size = 512*1024;
-
- while (size < obj->base.size)
- size <<= 1;
-
+ size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size)
return false;
@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable =
obj->gtt_space == NULL ||
- (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(dev,
- obj->base.size,
- args->tiling_mode);
+ i915_gem_get_gtt_alignment(dev, obj->base.size,
+ args->tiling_mode,
+ false);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
@@ -396,6 +385,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ kfree(obj->bit_17);
+ obj->bit_17 = NULL;
+ }
+
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fe843389c7b4..2cd97d1cc920 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -287,6 +287,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ /* HPD irq before everything is fully set up. */
+ if (!dev_priv->enable_hotplug_processing)
+ return;
+
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -300,9 +304,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -355,8 +356,8 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer,
+ dev_priv->gpu_error.hangcheck_count = 0;
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@@ -524,6 +525,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
+static void gmbus_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static void dp_aux_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -533,7 +548,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
- bool blc_event;
atomic_inc(&dev_priv->irq_received);
@@ -590,8 +604,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_READ(PORT_HOTPLUG_STAT);
}
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
@@ -618,8 +632,11 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
+ if (pch_iir & SDE_AUX_MASK)
+ dp_aux_irq_handler(dev);
+
if (pch_iir & SDE_GMBUS)
- DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+ gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -662,10 +679,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
SDE_AUDIO_POWER_SHIFT_CPT);
if (pch_iir & SDE_AUX_MASK_CPT)
- DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+ dp_aux_irq_handler(dev);
if (pch_iir & SDE_GMBUS_CPT)
- DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+ gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -703,6 +720,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR);
if (de_iir) {
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
@@ -758,7 +778,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
atomic_inc(&dev_priv->irq_received);
@@ -769,11 +789,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
pm_iir = I915_READ(GEN6_PMIIR);
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
- (!IS_GEN6(dev) || pm_iir == 0))
+ if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
goto done;
ret = IRQ_HANDLED;
@@ -783,6 +801,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
else
snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -804,10 +825,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
ibx_irq_handler(dev, pch_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
}
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
@@ -816,8 +842,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -838,23 +862,60 @@ done:
*/
static void i915_error_work_func(struct work_struct *work)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- error_work);
+ struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+ work);
+ drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
+ gpu_error);
struct drm_device *dev = dev_priv->dev;
+ struct intel_ring_buffer *ring;
char *error_event[] = { "ERROR=1", NULL };
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
+ int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
- if (atomic_read(&dev_priv->mm.wedged)) {
+ /*
+ * Note that there's only one work item which does gpu resets, so we
+ * need not worry about concurrent gpu resets potentially incrementing
+ * error->reset_counter twice. We only need to take care of another
+ * racing irq/hangcheck declaring the gpu dead for a second time. A
+ * quick check for that is good enough: schedule_work ensures the
+ * correct ordering between hang detection and this work item, and since
+ * the reset in-progress bit is only ever set by code outside of this
+ * work we don't need to worry about any other races.
+ */
+ if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i915_reset(dev)) {
- atomic_set(&dev_priv->mm.wedged, 0);
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+ reset_event);
+
+ ret = i915_reset(dev);
+
+ if (ret == 0) {
+ /*
+ * After all the gem state is reset, increment the reset
+ * counter and wake up everyone waiting for the reset to
+ * complete.
+ *
+ * Since unlock operations are a one-sided barrier only,
+ * we need to insert a barrier here to order any seqno
+ * updates before
+ * the counter increment.
+ */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&dev_priv->gpu_error.reset_counter);
+
+ kobject_uevent_env(&dev->primary->kdev.kobj,
+ KOBJ_CHANGE, reset_done_event);
+ } else {
+ atomic_set(&error->reset_counter, I915_WEDGED);
}
- complete_all(&dev_priv->error_completion);
+
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
@@ -915,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) {
void __iomem *s;
@@ -924,10 +985,18 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else {
struct page *page;
void *s;
@@ -1074,6 +1143,8 @@ static void i915_gem_record_fences(struct drm_device *dev,
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
+ default:
+ BUG();
}
}
@@ -1222,9 +1293,9 @@ static void i915_capture_error_state(struct drm_device *dev)
unsigned long flags;
int i, pipe;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error = dev_priv->first_error;
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
@@ -1235,7 +1306,8 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ DRM_INFO("capturing error event; look for more information in"
+ "/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index);
kref_init(&error->ref);
@@ -1318,12 +1390,12 @@ static void i915_capture_error_state(struct drm_device *dev)
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (dev_priv->first_error == NULL) {
- dev_priv->first_error = error;
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
error = NULL;
}
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
i915_error_state_free(&error->ref);
@@ -1335,10 +1407,10 @@ void i915_destroy_error_state(struct drm_device *dev)
struct drm_i915_error_state *error;
unsigned long flags;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- error = dev_priv->first_error;
- dev_priv->first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
kref_put(&error->ref, i915_error_state_free);
@@ -1459,17 +1531,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
- INIT_COMPLETION(dev_priv->error_completion);
- atomic_set(&dev_priv->mm.wedged, 1);
+ atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+ &dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so they don't hang
+ * Wakeup waiting processes so that the reset work item
+ * doesn't deadlock trying to grab various locks.
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
}
- queue_work(dev_priv->wq, &dev_priv->error_work);
+ queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
}
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -1700,7 +1773,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->hangcheck_count++ > 1) {
+ if (dev_priv->gpu_error.hangcheck_count++ > 1) {
bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
@@ -1759,25 +1832,29 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat;
}
- dev_priv->hangcheck_count = 0;
+ dev_priv->gpu_error.hangcheck_count = 0;
return;
}
i915_get_extra_instdone(dev, instdone);
- if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
- memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
+ if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
+ sizeof(acthd)) == 0 &&
+ memcmp(dev_priv->gpu_error.prev_instdone, instdone,
+ sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
- dev_priv->hangcheck_count = 0;
+ dev_priv->gpu_error.hangcheck_count = 0;
- memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
- memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
+ memcpy(dev_priv->gpu_error.last_acthd, acthd,
+ sizeof(acthd));
+ memcpy(dev_priv->gpu_error.prev_instdone, instdone,
+ sizeof(instdone));
}
repeat:
/* Reset timer case chip hangs without another request being added */
- mod_timer(&dev_priv->hangcheck_timer,
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@@ -1847,7 +1924,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
* This register is the same on all known PCH chips.
*/
-static void ironlake_enable_pch_hotplug(struct drm_device *dev)
+static void ibx_enable_hotplug(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug;
@@ -1860,14 +1937,36 @@ static void ironlake_enable_pch_hotplug(struct drm_device *dev)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void ibx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 mask;
+
+ if (HAS_PCH_IBX(dev))
+ mask = SDE_HOTPLUG_MASK |
+ SDE_GMBUS |
+ SDE_AUX_MASK;
+ else
+ mask = SDE_HOTPLUG_MASK_CPT |
+ SDE_GMBUS_CPT |
+ SDE_AUX_MASK_CPT;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, ~mask);
+ I915_WRITE(SDEIER, mask);
+ POSTING_READ(SDEIER);
+
+ ibx_enable_hotplug(dev);
+}
+
static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A;
u32 render_irqs;
- u32 hotplug_mask;
dev_priv->irq_mask = ~display_mask;
@@ -1895,27 +1994,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
- if (HAS_PCH_CPT(dev)) {
- hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
- SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT |
- SDE_PORTD_HOTPLUG_CPT);
- } else {
- hotplug_mask = (SDE_CRT_HOTPLUG |
- SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG |
- SDE_PORTD_HOTPLUG |
- SDE_AUX_MASK);
- }
-
- dev_priv->pch_irq_mask = ~hotplug_mask;
-
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
- I915_WRITE(SDEIER, hotplug_mask);
- POSTING_READ(SDEIER);
-
- ironlake_enable_pch_hotplug(dev);
+ ibx_irq_postinstall(dev);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
@@ -1935,9 +2014,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB;
+ DE_PLANEA_FLIP_DONE_IVB |
+ DE_AUX_CHANNEL_A_IVB;
u32 render_irqs;
- u32 hotplug_mask;
dev_priv->irq_mask = ~display_mask;
@@ -1961,18 +2040,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
- hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
- SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT |
- SDE_PORTD_HOTPLUG_CPT);
- dev_priv->pch_irq_mask = ~hotplug_mask;
-
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
- I915_WRITE(SDEIER, hotplug_mask);
- POSTING_READ(SDEIER);
-
- ironlake_enable_pch_hotplug(dev);
+ ibx_irq_postinstall(dev);
return 0;
}
@@ -1981,7 +2049,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
@@ -2010,6 +2077,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2018,6 +2088,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
POSTING_READ(VLV_IER);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2038,13 +2109,22 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static void valleyview_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
/* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2055,8 +2135,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
- return 0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -2286,6 +2364,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
@@ -2296,15 +2377,25 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static void i915_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
+
if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2318,10 +2409,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
-
- intel_opregion_enable_asle(dev);
-
- return 0;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2481,7 +2568,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
@@ -2502,6 +2588,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
/*
* Enable some error detection, note the instruction error mask
@@ -2522,14 +2609,27 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static void i965_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
+
/* Note HDMI and DP share hotplug bits */
hotplug_en = 0;
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
+ hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (IS_G4X(dev)) {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
@@ -2556,10 +2656,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
- intel_opregion_enable_asle(dev);
-
- return 0;
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2655,6 +2751,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then
@@ -2706,10 +2805,16 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ setup_timer(&dev_priv->gpu_error.hangcheck_timer,
+ i915_hangcheck_elapsed,
+ (unsigned long) dev);
+
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
@@ -2730,7 +2835,8 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = valleyview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
- } else if (IS_IVYBRIDGE(dev)) {
+ dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2738,14 +2844,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
- } else if (IS_HASWELL(dev)) {
- /* Share interrupts handling with IVB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2764,13 +2862,23 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
}
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
}
+
+void intel_hpd_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 59afb7eb6db6..527b664d3434 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -141,8 +141,15 @@
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
-#define VGA_SR_INDEX 0x3c4
-#define VGA_SR_DATA 0x3c5
+/*
+ * SR01 is the only VGA register touched on non-UMS setups.
+ * VLV doesn't do UMS, so the sequencer index/data registers
+ * are the only VGA registers which need to include
+ * display_mmio_offset.
+ */
+#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
+#define SR01 1
+#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
@@ -301,6 +308,7 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
@@ -335,17 +343,19 @@
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
+ *
+ * DPIO is VLV only.
*/
-#define DPIO_PKT 0x2100
+#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
-#define DPIO_DATA 0x2104
-#define DPIO_REG 0x2108
-#define DPIO_CTL 0x2110
+#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
+#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
+#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
@@ -556,13 +566,13 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
-#define VLV_GUNIT_CLOCK_GATE 0x182060
+#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
#define GCFG_DIS (1<<8)
-#define VLV_IIR_RW 0x182084
-#define VLV_IER 0x1820a0
-#define VLV_IIR 0x1820a4
-#define VLV_IMR 0x1820a8
-#define VLV_ISR 0x1820ac
+#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -735,6 +745,7 @@
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
@@ -921,8 +932,8 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define _DPLL_A 0x06014
-#define _DPLL_B 0x06018
+#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
@@ -943,23 +954,6 @@
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
-#define SRX_INDEX 0x3c4
-#define SRX_DATA 0x3c5
-#define SR01 1
-#define SR01_SCREEN_OFF (1<<5)
-
-#define PPCR 0x61204
-#define PPCR_ON (1<<0)
-
-#define DVOB 0x61140
-#define DVOB_ON (1<<31)
-#define DVOC 0x61160
-#define DVOC_ON (1<<31)
-#define LVDS 0x61180
-#define LVDS_ON (1<<31)
-
-/* Scratch pad debug 0 reg:
- */
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -998,7 +992,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -1035,7 +1029,7 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
@@ -1178,15 +1172,15 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
-#define FW_BLC_SELF_VLV 0x6500
+#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
/*
* Palette regs
*/
-#define _PALETTE_A 0x0a000
-#define _PALETTE_B 0x0a800
+#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@@ -1242,6 +1236,10 @@
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+/** snb MCH registers for priority tuning */
+#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define MCH_SSKPD_WM0_MASK 0x3f
+#define MCH_SSKPD_WM0_VAL 0xc
/* Clocking configuration register */
#define CLKCFG 0x10c00
@@ -1551,26 +1549,26 @@
*/
/* Pipe A timing regs */
-#define _HTOTAL_A 0x60000
-#define _HBLANK_A 0x60004
-#define _HSYNC_A 0x60008
-#define _VTOTAL_A 0x6000c
-#define _VBLANK_A 0x60010
-#define _VSYNC_A 0x60014
-#define _PIPEASRC 0x6001c
-#define _BCLRPAT_A 0x60020
-#define _VSYNCSHIFT_A 0x60028
+#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
+#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
+#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
+#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
+#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
+#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
+#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
+#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
+#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
/* Pipe B timing regs */
-#define _HTOTAL_B 0x61000
-#define _HBLANK_B 0x61004
-#define _HSYNC_B 0x61008
-#define _VTOTAL_B 0x6100c
-#define _VBLANK_B 0x61010
-#define _VSYNC_B 0x61014
-#define _PIPEBSRC 0x6101c
-#define _BCLRPAT_B 0x61020
-#define _VSYNCSHIFT_B 0x61028
+#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
+#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
+#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
+#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
+#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
+#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
+#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
+#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
+#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
@@ -1631,13 +1629,10 @@
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN 0x61110
-#define HDMIB_HOTPLUG_INT_EN (1 << 29)
-#define DPB_HOTPLUG_INT_EN (1 << 29)
-#define HDMIC_HOTPLUG_INT_EN (1 << 28)
-#define DPC_HOTPLUG_INT_EN (1 << 28)
-#define HDMID_HOTPLUG_INT_EN (1 << 27)
-#define DPD_HOTPLUG_INT_EN (1 << 27)
+#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -1658,21 +1653,14 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define PORT_HOTPLUG_STAT 0x61114
+#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
/* HDMI/DP bits are gen4+ */
-#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
-#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (3 << 21)
-#define DPC_HOTPLUG_INT_STATUS (3 << 19)
-#define DPB_HOTPLUG_INT_STATUS (3 << 17)
-/* HDMI bits are shared with the DP bits */
-#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
-#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
-#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
-#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -1877,7 +1865,7 @@
#define PP_DIVISOR 0x61210
/* Panel fitting */
-#define PFIT_CONTROL 0x61230
+#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@@ -1895,9 +1883,7 @@
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
-#define PFIT_PGM_RATIOS 0x61234
-#define PFIT_VERT_SCALE_MASK 0xfff00000
-#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -1909,7 +1895,7 @@
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
-#define PFIT_AUTO_RATIOS 0x61238
+#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
/* Backlight control */
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
@@ -2639,10 +2625,10 @@
/* Display & cursor control */
/* Pipe A */
-#define _PIPEADSL 0x70000
+#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
-#define _PIPEACONF 0x70008
+#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2671,18 +2657,19 @@
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
-#define PIPECONF_BPP_MASK (0x000000e0)
-#define PIPECONF_BPP_8 (0<<5)
-#define PIPECONF_BPP_10 (1<<5)
-#define PIPECONF_BPP_6 (2<<5)
-#define PIPECONF_BPP_12 (3<<5)
+#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_BPC_MASK (0x7 << 5)
+#define PIPECONF_8BPC (0<<5)
+#define PIPECONF_10BPC (1<<5)
+#define PIPECONF_6BPC (2<<5)
+#define PIPECONF_12BPC (3<<5)
#define PIPECONF_DITHER_EN (1<<4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
#define PIPECONF_DITHER_TYPE_SP (0<<2)
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define _PIPEASTAT 0x70024
+#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2693,7 +2680,7 @@
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
@@ -2703,7 +2690,7 @@
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
@@ -2719,11 +2706,6 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
-#define PIPE_8BPC (0 << 5)
-#define PIPE_10BPC (1 << 5)
-#define PIPE_6BPC (2 << 5)
-#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
@@ -2732,7 +2714,7 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
-#define VLV_DPFLIPSTAT 0x70028
+#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
@@ -2746,7 +2728,7 @@
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
-#define DPINVGTT 0x7002c /* VLV only */
+#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@@ -2774,7 +2756,7 @@
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
-#define DSPFW1 0x70034
+#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
@@ -2782,11 +2764,11 @@
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
-#define DSPFW2 0x70038
+#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
-#define DSPFW3 0x7003c
+#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
@@ -2798,13 +2780,13 @@
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
-#define VLV_DDL1 0x70050
+#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
-#define VLV_DDL2 0x70054
+#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
@@ -2948,10 +2930,10 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define _PIPEAFRAMEHIGH 0x70040
+#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define _PIPEAFRAMEPIXEL 0x70044
+#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
@@ -2962,11 +2944,12 @@
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
-#define _CURACNTR 0x70080
+#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -2983,16 +2966,16 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define _CURABASE 0x70084
-#define _CURAPOS 0x70088
+#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
+#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define _CURBCNTR 0x700c0
-#define _CURBBASE 0x700c4
-#define _CURBPOS 0x700c8
+#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
+#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
+#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
@@ -3007,7 +2990,7 @@
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
/* Display A control */
-#define _DSPACNTR 0x70180
+#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3028,6 +3011,7 @@
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
@@ -3040,14 +3024,14 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define _DSPAADDR 0x70184
-#define _DSPASTRIDE 0x70188
-#define _DSPAPOS 0x7018C /* reserved */
-#define _DSPASIZE 0x70190
-#define _DSPASURF 0x7019C /* 965+ only */
-#define _DSPATILEOFF 0x701A4 /* 965+ only */
-#define _DSPAOFFSET 0x701A4 /* HSW */
-#define _DSPASURFLIVE 0x701AC
+#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
+#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
+#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
+#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
+#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
+#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
+#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
+#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3068,44 +3052,44 @@
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
-#define SWF00 0x71410
-#define SWF01 0x71414
-#define SWF02 0x71418
-#define SWF03 0x7141c
-#define SWF04 0x71420
-#define SWF05 0x71424
-#define SWF06 0x71428
-#define SWF10 0x70410
-#define SWF11 0x70414
-#define SWF14 0x71420
-#define SWF30 0x72414
-#define SWF31 0x72418
-#define SWF32 0x7241c
+#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
+#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
+#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
+#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
+#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
+#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
+#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
+#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
+#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
+#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
+#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
/* Pipe B */
-#define _PIPEBDSL 0x71000
-#define _PIPEBCONF 0x71008
-#define _PIPEBSTAT 0x71024
-#define _PIPEBFRAMEHIGH 0x71040
-#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
+#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
+#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
+#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_GM45 0x71040
#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
-#define _DSPBCNTR 0x71180
+#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define _DSPBADDR 0x71184
-#define _DSPBSTRIDE 0x71188
-#define _DSPBPOS 0x7118C
-#define _DSPBSIZE 0x71190
-#define _DSPBSURF 0x7119C
-#define _DSPBTILEOFF 0x711A4
-#define _DSPBOFFSET 0x711A4
-#define _DSPBSURFLIVE 0x711AC
+#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
+#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
+#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
+#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3116,6 +3100,7 @@
#define DVS_FORMAT_RGBX101010 (1<<25)
#define DVS_FORMAT_RGBX888 (2<<25)
#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_PIPE_CSC_ENABLE (1<<24)
#define DVS_SOURCE_KEY (1<<22)
#define DVS_RGB_ORDER_XBGR (1<<20)
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
@@ -3183,7 +3168,7 @@
#define SPRITE_FORMAT_RGBX161616 (3<<25)
#define SPRITE_FORMAT_YUV444 (4<<25)
#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_CSC_ENABLE (1<<24)
+#define SPRITE_PIPE_CSC_ENABLE (1<<24)
#define SPRITE_SOURCE_KEY (1<<22)
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
@@ -3254,6 +3239,8 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
+#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
+
/* Ironlake */
#define CPU_VGACNTRL 0x41000
@@ -3294,41 +3281,41 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define _PIPEA_DATA_M1 0x60030
+#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
-#define _PIPEA_DATA_N1 0x60034
+#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
#define PIPE_DATA_N1_OFFSET 0
-#define _PIPEA_DATA_M2 0x60038
+#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
#define PIPE_DATA_M2_OFFSET 0
-#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
#define PIPE_DATA_N2_OFFSET 0
-#define _PIPEA_LINK_M1 0x60040
+#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
#define PIPE_LINK_M1_OFFSET 0
-#define _PIPEA_LINK_N1 0x60044
+#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
#define PIPE_LINK_N1_OFFSET 0
-#define _PIPEA_LINK_M2 0x60048
+#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
#define PIPE_LINK_M2_OFFSET 0
-#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define _PIPEB_DATA_M1 0x61030
-#define _PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
+#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
-#define _PIPEB_DATA_M2 0x61038
-#define _PIPEB_DATA_N2 0x6103c
+#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
+#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
-#define _PIPEB_LINK_M1 0x61040
-#define _PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
+#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
-#define _PIPEB_LINK_M2 0x61048
-#define _PIPEB_LINK_N2 0x6104c
+#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
+#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
@@ -3581,27 +3568,30 @@
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
#define PORTD_PULSE_DURATION_MASK (3 << 18)
-#define PORTD_HOTPLUG_NO_DETECT (0)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
+#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
#define PORTC_PULSE_DURATION_2ms (0)
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
#define PORTC_PULSE_DURATION_MASK (3 << 10)
-#define PORTC_HOTPLUG_NO_DETECT (0)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
+#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
#define PORTB_PULSE_DURATION_2ms (0)
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
#define PORTB_PULSE_DURATION_MASK (3 << 2)
-#define PORTB_HOTPLUG_NO_DETECT (0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
+#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
@@ -3722,13 +3712,13 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60200
-#define VLV_VIDEO_DIP_DATA_A 0x60208
-#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
+#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
-#define VLV_VIDEO_DIP_CTL_B 0x61170
-#define VLV_VIDEO_DIP_DATA_B 0x61174
-#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
+#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
@@ -3820,8 +3810,6 @@
#define TRANS_FSYNC_DELAY_HB2 (1<<27)
#define TRANS_FSYNC_DELAY_HB3 (2<<27)
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_VIDEO_AUDIO (0<<26)
#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
#define TRANS_INTERLACED (3<<21)
@@ -3927,7 +3915,7 @@
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
#define FDI_12BPC (3<<16)
-#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
#define FDI_RX_PLL_ENABLE (1<<13)
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
@@ -4020,17 +4008,17 @@
#define LVDS_DETECTED (1 << 1)
/* vlv has 2 sets of panel control regs. */
-#define PIPEA_PP_STATUS 0x61200
-#define PIPEA_PP_CONTROL 0x61204
-#define PIPEA_PP_ON_DELAYS 0x61208
-#define PIPEA_PP_OFF_DELAYS 0x6120c
-#define PIPEA_PP_DIVISOR 0x61210
-
-#define PIPEB_PP_STATUS 0x61300
-#define PIPEB_PP_CONTROL 0x61304
-#define PIPEB_PP_ON_DELAYS 0x61308
-#define PIPEB_PP_OFF_DELAYS 0x6130c
-#define PIPEB_PP_DIVISOR 0x61310
+#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
+#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
+#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
+#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
+#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
+
+#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
+#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
+#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
+#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
+#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
@@ -4211,7 +4199,9 @@
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
+#define HSW_CAGF_SHIFT 7
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -4280,8 +4270,8 @@
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -4322,7 +4312,7 @@
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
-#define G4X_AUD_VID_DID 0x62020
+#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
@@ -4438,10 +4428,10 @@
#define AUDIO_CP_READY_C (1<<9)
/* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
-#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
-#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
-#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
+#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
+#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
+#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
@@ -4524,6 +4514,7 @@
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
@@ -4657,4 +4648,51 @@
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
#define WM_DBG_DISALLOW_SPRITE (1<<2)
+/* pipe CSC */
+#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
+#define _PIPE_A_CSC_COEFF_BY 0x49014
+#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
+#define _PIPE_A_CSC_COEFF_BU 0x4901c
+#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
+#define _PIPE_A_CSC_COEFF_BV 0x49024
+#define _PIPE_A_CSC_MODE 0x49028
+#define _PIPE_A_CSC_PREOFF_HI 0x49030
+#define _PIPE_A_CSC_PREOFF_ME 0x49034
+#define _PIPE_A_CSC_PREOFF_LO 0x49038
+#define _PIPE_A_CSC_POSTOFF_HI 0x49040
+#define _PIPE_A_CSC_POSTOFF_ME 0x49044
+#define _PIPE_A_CSC_POSTOFF_LO 0x49048
+
+#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
+#define _PIPE_B_CSC_COEFF_BY 0x49114
+#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
+#define _PIPE_B_CSC_COEFF_BU 0x4911c
+#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
+#define _PIPE_B_CSC_COEFF_BV 0x49124
+#define _PIPE_B_CSC_MODE 0x49128
+#define _PIPE_B_CSC_PREOFF_HI 0x49130
+#define _PIPE_B_CSC_PREOFF_ME 0x49134
+#define _PIPE_B_CSC_PREOFF_LO 0x49138
+#define _PIPE_B_CSC_POSTOFF_HI 0x49140
+#define _PIPE_B_CSC_POSTOFF_ME 0x49144
+#define _PIPE_B_CSC_POSTOFF_LO 0x49148
+
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+
+#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
+#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
+#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
+#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
+#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
+#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
+#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
+#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
+#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
+#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
+#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
+#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
+#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 63d4d30c39de..2135f21ea458 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,67 +29,6 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpll_reg;
-
- /* On IVB, 3rd pipe shares PLL with another one */
- if (pipe > 1)
- return false;
-
- if (HAS_PCH_SPLIT(dev))
- dpll_reg = _PCH_DPLL(pipe);
- else
- dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
-
- return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
-
- if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
- else
- array = dev_priv->regfile.save_palette_b;
-
- for (i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -130,6 +69,12 @@ static void i915_save_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
+
/* VGA color palette registers */
dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
@@ -188,6 +133,15 @@ static void i915_restore_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
+ /* VGA state */
+ I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
+
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+ POSTING_READ(VGA_PD);
+ udelay(150);
+
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
@@ -235,396 +189,18 @@ static void i915_restore_vga(struct drm_device *dev)
I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
-static void i915_save_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- /* Cursor state */
- dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
- dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
- } else {
- dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
- } else {
- dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
- }
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
- if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
- }
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
- else
- dev_priv->regfile.saveADPA = I915_READ(ADPA);
-
- return;
-}
-
-static void i915_restore_modeset_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_a_reg, fpa0_reg, fpa1_reg;
- int dpll_b_reg, fpb0_reg, fpb1_reg;
- int i;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
- break;
- }
-
-
- if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = _PCH_DPLL_A;
- dpll_b_reg = _PCH_DPLL_B;
- fpa0_reg = _PCH_FPA0;
- fpb0_reg = _PCH_FPB0;
- fpa1_reg = _PCH_FPA1;
- fpb1_reg = _PCH_FPB1;
- } else {
- dpll_a_reg = _DPLL_A;
- dpll_b_reg = _DPLL_B;
- fpa0_reg = _FPA0;
- fpb0_reg = _FPB0;
- fpa1_reg = _FPA1;
- fpb1_reg = _FPB1;
- }
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
- }
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- }
- I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
- /* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
- POSTING_READ(_DPLL_A_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
-
- I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
-
- I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
-
- I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
- }
-
- I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
- I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- }
- I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
- /* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
- POSTING_READ(_DPLL_B_MD);
- }
- udelay(150);
-
- /* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
-
- I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
-
- I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
-
- I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
- }
-
- /* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
- if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
- }
-
- I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
- I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
-
- /* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
-
- return;
-}
-
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
/* Don't regfile.save them in KMS mode */
- i915_save_modeset_reg(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_display_reg(dev);
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
@@ -658,24 +234,6 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->regfile.saveDP_B = I915_READ(DP_B);
- dev_priv->regfile.saveDP_C = I915_READ(DP_C);
- dev_priv->regfile.saveDP_D = I915_READ(DP_D);
- dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: regfile.save TV & SDVO state */
- }
-
/* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
@@ -690,16 +248,8 @@ static void i915_save_display(struct drm_device *dev)
}
}
- /* VGA state */
- dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
- dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
- dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
- else
- dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
-
- i915_save_vga(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_vga(dev);
}
static void i915_restore_display(struct drm_device *dev)
@@ -707,25 +257,11 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+ if (INTEL_INFO(dev)->gen <= 4)
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
- }
- }
-
- /* This is only meaningful in non-KMS mode */
- /* Don't restore them in KMS mode */
- i915_restore_modeset_reg(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_display_reg(dev);
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
@@ -763,16 +299,6 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
- I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
- I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
- }
-
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
@@ -787,19 +313,11 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
- /* VGA state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- else
- I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
- I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
-
- i915_restore_vga(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_vga(dev);
+ else
+ i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
new file mode 100644
index 000000000000..985a09716237
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -0,0 +1,503 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Copyright 2013 (c) Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = _PCH_DPLL(pipe);
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+void i915_save_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Cursor state */
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ } else {
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ } else {
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+
+ return;
+}
+
+void i915_restore_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+ break;
+ }
+
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
+ } else {
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+ }
+
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+ /* Cursor state */
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+
+ return;
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9293878ec7eb..969d08c72d10 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = 0;
- save_adpa = adpa = I915_READ(PCH_ADPA);
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(PCH_ADPA, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(PCH_ADPA, save_adpa);
- POSTING_READ(PCH_ADPA);
+ I915_WRITE(crt->adpa_reg, save_adpa);
+ POSTING_READ(crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(PCH_ADPA);
+ adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
u32 save_adpa;
- save_adpa = adpa = I915_READ(ADPA);
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- I915_WRITE(ADPA, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- I915_WRITE(ADPA, save_adpa);
+ I915_WRITE(crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(ADPA);
+ adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
- adpa = I915_READ(PCH_ADPA);
+ adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
+ I915_WRITE(crt->adpa_reg, adpa);
+ POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
@@ -684,7 +685,6 @@ static void intel_crt_reset(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.mode_set = intel_crt_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -776,7 +776,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -800,10 +800,14 @@ void intel_crt_init(struct drm_device *dev)
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
/*
- * TODO: find a proper way to discover whether we need to set the
- * polarity reversal bit or not, instead of relying on the BIOS.
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
*/
- if (HAS_PCH_LPT(dev))
- dev_priv->fdi_rx_polarity_reversed =
- !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
+ if (HAS_PCH_LPT(dev)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+ dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4bad0f724019..d64af5aa4a1c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,7 +84,8 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ bool use_fdi_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
@@ -114,16 +115,17 @@ void intel_prepare_ddi(struct drm_device *dev)
{
int port;
- if (IS_HASWELL(dev)) {
- for (port = PORT_A; port < PORT_E; port++)
- intel_prepare_ddi_buffers(dev, port, false);
+ if (!HAS_DDI(dev))
+ return;
- /* DDI E is the suggested one to work in FDI mode, so program is as such by
- * default. It will have to be re-programmed in case a digital DP output
- * will be detected on it
- */
- intel_prepare_ddi_buffers(dev, PORT_E, true);
- }
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such
+ * by default. It will have to be re-programmed in case a digital DP
+ * output will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
}
static const long hsw_ddi_buf_ctl_values[] = {
@@ -178,10 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
- ((intel_crtc->fdi_lanes - 1) << 19);
- if (dev_priv->fdi_rx_polarity_reversed)
- rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@@ -203,7 +203,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
- /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->fdi_lanes - 1) << 1) |
@@ -675,10 +678,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
+ intel_crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
- intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP = intel_dig_port->port_reversal |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DDI_PORT_WIDTH_X1;
@@ -985,7 +992,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
- temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ /* Can only use the always-on power well for eDP when
+ * not using the panel fitter, and when not using motion
+ * blur mitigation (which we don't support). */
+ if (dev_priv->pch_pf_size)
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ else
+ temp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
@@ -1069,7 +1082,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
- cpu_transcoder = pipe;
+ cpu_transcoder = (enum transcoder) pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1285,34 +1298,58 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+ I915_WRITE(DDI_BUF_CTL(port),
+ intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
}
+
+ if (intel_crtc->eld_vld) {
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ }
}
static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp);
}
+
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
@@ -1452,11 +1489,11 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_fixup = intel_ddi_mode_fixup,
.mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
};
void intel_ddi_init(struct drm_device *dev, enum port port)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -1497,6 +1534,8 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_dig_port->port = port;
+ intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_PORT_REVERSAL;
if (hdmi_connector)
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
else
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index da1ad9c80bb5..a05ac2c91ba2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -154,8 +154,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
@@ -168,8 +168,8 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
.p = { .min = 7, .max = 98 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
@@ -416,13 +416,11 @@ static const intel_limit_t intel_limits_vlv_dp = {
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
- unsigned long flags;
- u32 val = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(DPIO_REG, reg);
@@ -430,24 +428,20 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
- goto out_unlock;
+ return 0;
}
- val = I915_READ(DPIO_DATA);
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return val;
+ return I915_READ(DPIO_DATA);
}
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
- unsigned long flags;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
- goto out_unlock;
+ return;
}
I915_WRITE(DPIO_DATA, val);
@@ -456,9 +450,6 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static void vlv_init_dpio(struct drm_device *dev)
@@ -472,61 +463,14 @@ static void vlv_init_dpio(struct drm_device *dev)
POSTING_READ(DPIO_CTL);
}
-static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_dual_link_lvds[] = {
- {
- .callback = intel_dual_link_lvds_callback,
- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
- },
- },
- { } /* terminating entry */
-};
-
-static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
- unsigned int reg)
-{
- unsigned int val;
-
- /* use the module option value if specified */
- if (i915_lvds_channel_mode > 0)
- return i915_lvds_channel_mode == 2;
-
- if (dmi_check_system(intel_dual_link_lvds))
- return true;
-
- if (dev_priv->lvds_val)
- val = dev_priv->lvds_val;
- else {
- /* BIOS should set the proper LVDS register value at boot, but
- * in reality, it doesn't set the value when the lid is closed;
- * we need to check "the value to be set" in VBT when LVDS
- * register is uninitialized.
- */
- val = I915_READ(reg);
- if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
- val = dev_priv->bios_lvds_val;
- dev_priv->lvds_val = val;
- }
- return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
-}
-
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
+ if (intel_is_dual_link_lvds(dev)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -550,11 +494,10 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (is_dual_link_lvds(dev_priv, LVDS))
+ if (intel_is_dual_link_lvds(dev))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
@@ -686,19 +629,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (I915_READ(LVDS)) != 0) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
*/
- if (is_dual_link_lvds(dev_priv, LVDS))
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -751,7 +691,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
@@ -766,8 +705,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -1047,6 +985,51 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
}
}
+/*
+ * ibx_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Returns true if @port is connected, false otherwise.
+ */
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG;
+ break;
+ default:
+ return true;
+ }
+ } else {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG_CPT;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG_CPT;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG_CPT;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
@@ -1125,8 +1108,8 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- if (IS_HASWELL(dev_priv->dev)) {
- /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ if (HAS_DDI(dev_priv->dev)) {
+ /* DDI does not have a specific FDI_TX register */
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1170,7 +1153,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (IS_HASWELL(dev_priv->dev))
+ if (HAS_DDI(dev_priv->dev))
return;
reg = FDI_TX_CTL(pipe);
@@ -1231,9 +1214,15 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
- cur_state = !!(val & PIPECONF_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
+ !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
+ cur_state = false;
+ } else {
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ }
+
WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
@@ -1509,13 +1498,14 @@ static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
- unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return;
}
I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1530,24 +1520,21 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
- goto out_unlock;
+ return;
}
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
- unsigned long flags;
u32 value = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
- spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- goto out_unlock;
+ return 0;
}
I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1561,14 +1548,10 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
- goto out_unlock;
+ return 0;
}
- value = I915_READ(SBI_DATA);
-
-out_unlock:
- spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
- return value;
+ return I915_READ(SBI_DATA);
}
/**
@@ -1700,8 +1683,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* make the BPC in transcoder be consistent with
* that in pipeconf reg.
*/
- val &= ~PIPE_BPC_MASK;
- val |= pipeconf_val & PIPE_BPC_MASK;
+ val &= ~PIPECONF_BPC_MASK;
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
@@ -1728,7 +1711,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
/* Workaround: set timing override bit. */
@@ -1816,11 +1799,11 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- enum transcoder pch_transcoder;
+ enum pipe pch_transcoder;
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev))
+ if (HAS_PCH_LPT(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
@@ -1836,7 +1819,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
- assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv,
+ (enum pipe) cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
@@ -2017,18 +2001,29 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch)
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int cpp,
+ unsigned int pitch)
{
- int tile_rows, tiles;
+ if (tiling_mode != I915_TILING_NONE) {
+ unsigned int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+
+ tiles = *x / (512/cpp);
+ *x %= 512/cpp;
- tile_rows = *y / 8;
- *y %= 8;
- tiles = *x / (512/bpp);
- *x %= 512/bpp;
+ return tile_rows * pitch * 8 + tiles * 4096;
+ } else {
+ unsigned int offset;
- return tile_rows * pitch * 8 + tiles * 4096;
+ offset = *y * pitch + *x * cpp;
+ *y = 0;
+ *x = (offset & 4095) / cpp;
+ return offset & -4096;
+ }
}
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2105,9 +2100,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
@@ -2198,9 +2193,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2242,10 +2237,6 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&dev_priv->mm.wedged) ||
- atomic_read(&obj->pending_flip) == 0);
-
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
@@ -2350,43 +2341,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_FREQ_MASK;
-
- if (clock < 200000) {
- u32 temp;
- dpa_ctl |= DP_PLL_FREQ_160MHZ;
- /* workaround for 160Mhz:
- 1) program 0x4600c bits 15:0 = 0x8124
- 2) program 0x46010 bit 0 = 1
- 3) program 0x46034 bit 24 = 1
- 4) program 0x64000 bit 14 = 1
- */
- temp = I915_READ(0x4600c);
- temp &= 0xffff0000;
- I915_WRITE(0x4600c, temp | 0x8124);
-
- temp = I915_READ(0x46010);
- I915_WRITE(0x46010, temp | 1);
-
- temp = I915_READ(0x46034);
- I915_WRITE(0x46034, temp | (1 << 24));
- } else {
- dpa_ctl |= DP_PLL_FREQ_270MHZ;
- }
- I915_WRITE(DP_A, dpa_ctl);
-
- POSTING_READ(DP_A);
- udelay(500);
-}
-
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2815,7 +2769,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
@@ -2828,18 +2782,14 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
POSTING_READ(reg);
udelay(200);
- /* On Haswell, the PLL configuration for ports and pipes is handled
- * separately, as part of DDI setup */
- if (!IS_HASWELL(dev)) {
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
- }
+ POSTING_READ(reg);
+ udelay(100);
}
}
@@ -2889,7 +2839,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
@@ -2918,7 +2868,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp);
POSTING_READ(reg);
@@ -2929,10 +2879,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
- if (atomic_read(&dev_priv->mm.wedged))
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -2950,6 +2902,8 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
if (crtc->fb == NULL)
return;
+ WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
+
wait_event(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc));
@@ -2992,6 +2946,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
@@ -3066,6 +3022,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
udelay(24);
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -3146,7 +3104,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev) &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
- u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -3623,7 +3581,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
- intel_crtc->cpu_transcoder = intel_crtc->pipe;
+ intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
intel_ddi_put_crtc_pll(crtc);
}
@@ -3664,6 +3622,11 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
intel_enable_pll(dev_priv, pipe);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
@@ -3686,6 +3649,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ u32 pctl;
if (!intel_crtc->active)
@@ -3705,6 +3669,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
+
+ /* Disable pannel fitter if it is on this pipe. */
+ pctl = I915_READ(PFIT_CONTROL);
+ if ((pctl & PFIT_ENABLE) &&
+ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
+ I915_WRITE(PFIT_CONTROL, 0);
+
intel_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
@@ -3767,19 +3738,17 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, enable);
}
-static void intel_crtc_noop(struct drm_crtc *crtc)
-{
-}
-
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
+ intel_crtc->eld_vld = false;
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
@@ -3817,10 +3786,6 @@ void intel_modeset_disable(struct drm_device *dev)
}
}
-void intel_encoder_noop(struct drm_encoder *encoder)
-{
-}
-
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -4012,16 +3977,8 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133000;
}
-struct fdi_m_n {
- u32 tu;
- u32 gmch_m;
- u32 gmch_n;
- u32 link_m;
- u32 link_n;
-};
-
static void
-fdi_reduce_ratio(u32 *num, u32 *den)
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
@@ -4029,20 +3986,18 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-static void
-ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
- int link_clock, struct fdi_m_n *m_n)
+void
+intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n)
{
- m_n->tu = 64; /* default size */
-
- /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->tu = 64;
m_n->gmch_m = bits_per_pixel * pixel_clock;
m_n->gmch_n = link_clock * nlanes * 8;
- fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
-
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
- fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -4289,51 +4244,6 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
}
}
-static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 temp;
-
- temp = I915_READ(LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock->p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag on LVDS as needed */
- if (INTEL_INFO(dev)->gen >= 4) {
- if (dev_priv->lvds_dither)
- temp |= LVDS_ENABLE_DITHER;
- else
- temp &= ~LVDS_ENABLE_DITHER;
- }
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(LVDS, temp);
-}
-
static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4349,6 +4259,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
bool is_sdvo;
u32 temp;
+ mutex_lock(&dev_priv->dpio_lock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -4432,6 +4344,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4443,6 +4357,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
bool is_sdvo;
@@ -4511,12 +4426,9 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- intel_update_lvds(crtc, clock, adjusted_mode);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -4555,6 +4467,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
@@ -4588,12 +4501,9 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- intel_update_lvds(crtc, clock, adjusted_mode);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
I915_WRITE(DPLL(pipe), dpll);
@@ -4783,10 +4693,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
/* default to 8bpc */
- pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
+ pipeconf |= PIPECONF_6BPC |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
@@ -4794,7 +4704,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
+ pipeconf |= PIPECONF_6BPC |
PIPECONF_ENABLE |
I965_PIPECONF_ACTIVE;
}
@@ -4981,6 +4891,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
if (!has_vga)
return;
+ mutex_lock(&dev_priv->dpio_lock);
+
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
@@ -5123,6 +5035,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -5177,19 +5091,19 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val = I915_READ(PIPECONF(pipe));
- val &= ~PIPE_BPC_MASK;
+ val &= ~PIPECONF_BPC_MASK;
switch (intel_crtc->bpp) {
case 18:
- val |= PIPE_6BPC;
+ val |= PIPECONF_6BPC;
break;
case 24:
- val |= PIPE_8BPC;
+ val |= PIPECONF_8BPC;
break;
case 30:
- val |= PIPE_10BPC;
+ val |= PIPECONF_10BPC;
break;
case 36:
- val |= PIPE_12BPC;
+ val |= PIPECONF_12BPC;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
@@ -5206,10 +5120,80 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
else
val |= PIPECONF_PROGRESSIVE;
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ val |= PIPECONF_COLOR_RANGE_SELECT;
+ else
+ val &= ~PIPECONF_COLOR_RANGE_SELECT;
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
+/*
+ * Set up the pipe CSC unit.
+ *
+ * Currently only full range RGB to limited range RGB conversion
+ * is supported, but eventually this should handle various
+ * RGB<->YCbCr scenarios as well.
+ */
+static void intel_set_pipe_csc(struct drm_crtc *crtc,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint16_t coeff = 0x7800; /* 1.0 */
+
+ /*
+ * TODO: Check what kind of values actually come out of the pipe
+ * with these coeff/postoff values and adjust to get the best
+ * accuracy. Perhaps we even need to take the bpc value into
+ * consideration.
+ */
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
+
+ /*
+ * GY/GU and RY/RU should be the other way around according
+ * to BSpec, but reality doesn't agree. Just set them up in
+ * a way that results in the correct picture.
+ */
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ if (INTEL_INFO(dev)->gen > 6) {
+ uint16_t postoff = 0;
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ postoff = (16 * (1 << 13) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ } else {
+ uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ mode |= CSC_BLACK_SCREEN_OFFSET;
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ }
+}
+
static void haswell_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
@@ -5400,7 +5384,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct intel_encoder *intel_encoder, *edp_encoder = NULL;
- struct fdi_m_n m_n = {0};
+ struct intel_link_m_n m_n = {0};
int target_clock, pixel_multiplier, lane, link_bw;
bool is_dp = false, is_cpu_edp = false;
@@ -5452,8 +5436,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
- ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
- &m_n);
+ intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
@@ -5506,7 +5489,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
- (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ intel_is_dual_link_lvds(dev))
factor = 25;
} else if (is_sdvo && is_tv)
factor = 20;
@@ -5581,7 +5564,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
- u32 temp;
int ret;
bool dither, fdi_config_ok;
@@ -5645,54 +5627,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else
intel_put_pch_pll(intel_crtc);
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (is_lvds) {
- temp = I915_READ(PCH_LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
-
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(PCH_LVDS, temp);
- }
-
- if (is_dp && !is_cpu_edp) {
+ if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- } else {
- /* For non-DP output, clear any trans DP clock recovery setting.*/
- I915_WRITE(TRANSDATA_M1(pipe), 0);
- I915_WRITE(TRANSDATA_N1(pipe), 0);
- I915_WRITE(TRANSDPLINK_M1(pipe), 0);
- I915_WRITE(TRANSDPLINK_N1(pipe), 0);
- }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
@@ -5727,9 +5667,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
@@ -5747,6 +5684,35 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
return fdi_config_ok ? ret : -EINVAL;
}
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = false;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ if (crtc->pipe != PIPE_A && crtc->base.enabled)
+ enable = true;
+ /* XXX: Should check for edp transcoder here, but thanks to init
+ * sequence that's not yet available. Just in case desktop eDP
+ * on PORT D is possible on haswell, too. */
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->type != INTEL_OUTPUT_EDP &&
+ encoder->connectors_active)
+ enable = true;
+ }
+
+ /* Even the eDP panel fitter is outside the always-on well. */
+ if (dev_priv->pch_pf_size)
+ enable = true;
+
+ intel_set_power_well(dev, enable);
+}
+
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5759,20 +5725,13 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ bool is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
- u32 temp;
int ret;
bool dither;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
@@ -5786,11 +5745,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- if (is_cpu_edp)
- intel_crtc->cpu_transcoder = TRANSCODER_EDP;
- else
- intel_crtc->cpu_transcoder = pipe;
-
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
@@ -5806,147 +5760,32 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
return -EINVAL;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock,
- &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
- }
-
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
-
- dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
- fp);
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its
- * own on pre-Haswell/LPT generation */
- if (!is_cpu_edp) {
- struct intel_pch_pll *pll;
-
- pll = intel_get_pch_pll(intel_crtc, dpll, fp);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
- pipe);
- return -EINVAL;
- }
- } else
- intel_put_pch_pll(intel_crtc);
-
- /* The LVDS pin pair needs to be on before the DPLLs are
- * enabled. This is an exception to the general rule that
- * mode_set doesn't turn things on.
- */
- if (is_lvds) {
- temp = I915_READ(PCH_LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
-
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether
- * we're going to set the DPLLs for dual-channel mode or
- * not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP |
- LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode
- * (LVDS_A3_POWER_UP) appropriately here, but we need to
- * look more thoroughly into how panels behave in the
- * two modes.
- */
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(PCH_LVDS, temp);
- }
- }
-
- if (is_dp && !is_cpu_edp) {
+ if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- } else {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- /* For non-DP output, clear any trans DP clock recovery
- * setting.*/
- I915_WRITE(TRANSDATA_M1(pipe), 0);
- I915_WRITE(TRANSDATA_N1(pipe), 0);
- I915_WRITE(TRANSDPLINK_M1(pipe), 0);
- I915_WRITE(TRANSDPLINK_N1(pipe), 0);
- }
- }
intel_crtc->lowfreq_avail = false;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- if (intel_crtc->pch_pll) {
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(intel_crtc->pch_pll->pll_reg);
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
- }
-
- if (intel_crtc->pch_pll) {
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
- intel_crtc->lowfreq_avail = true;
- } else {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
- }
- }
- }
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
if (!is_dp || is_cpu_edp)
ironlake_set_m_n(crtc, mode, adjusted_mode);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-
haswell_set_pipeconf(crtc, adjusted_mode, dither);
+ intel_set_pipe_csc(crtc, adjusted_mode);
+
/* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
POSTING_READ(DSPCNTR(plane));
ret = intel_pipe_set_base(crtc, x, y, fb);
@@ -5972,6 +5811,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int ret;
+ if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
@@ -6068,6 +5912,7 @@ static void haswell_write_eld(struct drm_connector *connector,
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
@@ -6109,6 +5954,7 @@ static void haswell_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+ intel_crtc->eld_vld = true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@@ -6344,6 +6190,8 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
+ if (IS_HASWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
@@ -6700,6 +6548,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
if (encoder->crtc) {
crtc = encoder->crtc;
+ mutex_lock(&crtc->mutex);
+
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -6729,6 +6579,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
return false;
}
+ mutex_lock(&crtc->mutex);
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
@@ -6756,13 +6607,15 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ mutex_unlock(&crtc->mutex);
return false;
}
- if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
+ if (intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
+ mutex_unlock(&crtc->mutex);
return false;
}
@@ -6777,27 +6630,31 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
- struct drm_crtc *crtc = encoder->crtc;
-
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
- if (old->release_fb)
- old->release_fb->funcs->destroy(old->release_fb);
+ if (old->release_fb) {
+ drm_framebuffer_unregister_private(old->release_fb);
+ drm_framebuffer_unreference(old->release_fb);
+ }
+ mutex_unlock(&crtc->mutex);
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
+
+ mutex_unlock(&crtc->mutex);
}
/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -6993,11 +6850,6 @@ void intel_mark_busy(struct drm_device *dev)
void intel_mark_idle(struct drm_device *dev)
{
-}
-
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
if (!i915_powersave)
@@ -7007,12 +6859,11 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
if (!crtc->fb)
continue;
- if (to_intel_framebuffer(crtc->fb)->obj == obj)
- intel_increase_pllclock(crtc);
+ intel_decrease_pllclock(crtc);
}
}
-void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
@@ -7025,7 +6876,7 @@ void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
continue;
if (to_intel_framebuffer(crtc->fb)->obj == obj)
- intel_decrease_pllclock(crtc);
+ intel_increase_pllclock(crtc);
}
}
@@ -7109,9 +6960,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
obj = work->old_fb_obj;
- atomic_clear_mask(1 << intel_crtc->plane,
- &obj->pending_flip.counter);
- wake_up(&dev_priv->pending_flip_queue);
+ wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
@@ -7474,11 +7323,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->enable_stall_check = true;
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);
+ intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
@@ -7494,7 +7340,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7514,7 +7359,6 @@ free_work:
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_noop,
};
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
@@ -7904,16 +7748,21 @@ intel_modeset_check_state(struct drm_device *dev)
}
}
-bool intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
- bool ret = true;
+ int ret = 0;
+
+ saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+ if (!saved_mode)
+ return -ENOMEM;
+ saved_hwmode = saved_mode + 1;
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
@@ -7924,8 +7773,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
- saved_hwmode = crtc->hwmode;
- saved_mode = crtc->mode;
+ *saved_hwmode = crtc->hwmode;
+ *saved_mode = crtc->mode;
/* Hack: Because we don't (yet) support global modeset on multiple
* crtcs, we don't keep track of the new mode for more than one crtc.
@@ -7936,7 +7785,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
if (modeset_pipes) {
adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
if (IS_ERR(adjusted_mode)) {
- return false;
+ ret = PTR_ERR(adjusted_mode);
+ goto out;
}
}
@@ -7962,11 +7812,11 @@ bool intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = !intel_crtc_mode_set(&intel_crtc->base,
- mode, adjusted_mode,
- x, y, fb);
- if (!ret)
- goto done;
+ ret = intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (ret)
+ goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7987,16 +7837,23 @@ bool intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
- if (!ret && crtc->enabled) {
- crtc->hwmode = saved_hwmode;
- crtc->mode = saved_mode;
+ if (ret && crtc->enabled) {
+ crtc->hwmode = *saved_hwmode;
+ crtc->mode = *saved_mode;
} else {
intel_modeset_check_state(dev);
}
+out:
+ kfree(saved_mode);
return ret;
}
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
+}
+
#undef for_each_intel_crtc_masked
static void intel_set_config_free(struct intel_set_config *config)
@@ -8109,7 +7966,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
struct intel_encoder *encoder;
int count, ro;
- /* The upper layers ensure that we either disabl a crtc or have a list
+ /* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
@@ -8211,14 +8068,9 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
- if (!set->mode)
- set->fb = NULL;
-
- /* The fb helper likes to play gross jokes with ->mode_set_config.
- * Unfortunately the crtc helper doesn't do much at all for this case,
- * so we have to cope with this madness until the fb helper is fixed up. */
- if (set->fb && set->num_connectors == 0)
- return 0;
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
@@ -8262,11 +8114,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
drm_mode_debug_printmodeline(set->mode);
}
- if (!intel_set_mode(set->crtc, set->mode,
- set->x, set->y, set->fb)) {
- DRM_ERROR("failed to set mode on [CRTC:%d]\n",
- set->crtc->base.id);
- ret = -EINVAL;
+ ret = intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb);
+ if (ret) {
+ DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+ set->crtc->base.id, ret);
goto fail;
}
} else if (config->fb_changed) {
@@ -8283,8 +8135,8 @@ fail:
/* Try to restore the config */
if (config->mode_changed &&
- !intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
+ intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
out_config:
@@ -8303,7 +8155,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
static void intel_cpu_pll_init(struct drm_device *dev)
{
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
}
@@ -8439,11 +8291,10 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (!(IS_HASWELL(dev) &&
- (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
intel_crt_init(dev);
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
int found;
/* Haswell uses DDI functions to detect digital outputs */
@@ -8490,23 +8341,18 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
- int found;
-
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
- if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
- if (I915_READ(SDVOB) & PORT_DETECTED) {
- /* SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, SDVOB, true);
- if (!found)
- intel_hdmi_init(dev, SDVOB, PORT_B);
- if (!found && (I915_READ(DP_B) & DP_DETECTED))
- intel_dp_init(dev, DP_B, PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
- if (I915_READ(SDVOC) & PORT_DETECTED)
- intel_hdmi_init(dev, SDVOC, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -8666,14 +8512,15 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ intel_fb->obj = obj;
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
- intel_fb->obj = obj;
return 0;
}
@@ -8703,7 +8550,7 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
@@ -8765,8 +8612,9 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
- } else
- dev_priv->display.update_wm = NULL;
+ dev_priv->display.modeset_global_resources =
+ haswell_modeset_global_resources;
+ }
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
}
@@ -8888,6 +8736,18 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5734Z must invert backlight brightness */
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -8916,12 +8776,7 @@ static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
@@ -8936,10 +8791,7 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- /* We attempt to init the necessary power wells early in the initialization
- * time, so the subsystems that expect power to be enabled can work.
- */
- intel_init_power_wells(dev);
+ intel_init_power_well(dev);
intel_prepare_ddi(dev);
@@ -8981,7 +8833,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
- dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
+ dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@@ -8999,6 +8851,9 @@ void intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
}
static void
@@ -9180,20 +9035,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-static void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
+ i915_disable_vga(dev);
}
}
@@ -9209,7 +9058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_connector *connector;
- if (IS_HASWELL(dev)) {
+ if (HAS_DDI(dev)) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
if (tmp & TRANS_DDI_FUNC_ENABLE) {
@@ -9250,7 +9099,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
crtc->active ? "enabled" : "disabled");
}
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9301,9 +9150,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
if (force_restore) {
for_each_pipe(pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- intel_set_mode(&crtc->base, &crtc->base.mode,
- crtc->base.x, crtc->base.y, crtc->base.fb);
+ intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
}
i915_redisable_vga(dev);
@@ -9367,6 +9214,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
flush_scheduled_work();
drm_mode_config_cleanup(dev);
+
+ intel_cleanup_overlay(dev);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb3715b4b09d..f61cb7998c72 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -148,15 +148,6 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
return max_link_bw;
}
-static int
-intel_dp_link_clock(uint8_t link_bw)
-{
- if (link_bw == DP_LINK_BW_2_7)
- return 270000;
- else
- return 162000;
-}
-
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
@@ -191,7 +182,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode,
bool adjust_mode)
{
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_link_clock =
+ drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
@@ -330,6 +322,48 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
}
+static uint32_t
+intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->output_reg + 0x10;
+ uint32_t status;
+ bool done;
+
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ if (has_aux_irq)
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+ else
+ done = wait_for_atomic(C, 10) == 0;
+ if (!done)
+ DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+ has_aux_irq);
+#undef C
+
+ return status;
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
@@ -341,11 +375,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
- int i;
- int recv_bytes;
+ int i, ret, recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
+ bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ pm_qos_update_request(&dev_priv->pm_qos, 0);
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
@@ -379,7 +419,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
else if (IS_VALLEYVIEW(dev))
aux_clock_divider = 100;
@@ -399,7 +439,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = I915_READ(ch_ctl);
+ status = I915_READ_NOTRACE(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -408,7 +448,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if (try == 3) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
I915_READ(ch_ctl));
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
/* Must try at least 3 times according to DP spec */
@@ -421,6 +462,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -428,12 +470,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
- for (;;) {
- status = I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- udelay(100);
- }
+
+ status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
@@ -451,7 +489,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
/* Check for timeout or receive error.
@@ -459,14 +498,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
/* Unload any bytes sent back from the other side */
@@ -479,7 +520,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
- return recv_bytes;
+ ret = recv_bytes;
+out:
+ pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ return ret;
}
/* Write data to the aux channel in native mode */
@@ -718,16 +763,35 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
+ if (intel_dp->color_range_auto) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ else
+ intel_dp->color_range = 0;
+ }
+
+ if (intel_dp->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+ int link_bw_clock =
+ drm_dp_bw_code_to_link_rate(bws[clock]);
+ int link_avail = intel_dp_max_data_rate(link_bw_clock,
+ lane_count);
if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ adjusted_mode->clock = link_bw_clock;
DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -742,39 +806,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false;
}
-struct intel_dp_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-static void
-intel_reduce_ratio(uint32_t *num, uint32_t *den)
-{
- while (*num > 0xffffff || *den > 0xffffff) {
- *num >>= 1;
- *den >>= 1;
- }
-}
-
-static void
-intel_dp_compute_m_n(int bpp,
- int nlanes,
- int pixel_clock,
- int link_clock,
- struct intel_dp_m_n *m_n)
-{
- m_n->tu = 64;
- m_n->gmch_m = (pixel_clock * bpp) >> 3;
- m_n->gmch_n = link_clock * nlanes;
- intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- m_n->link_m = pixel_clock;
- m_n->link_n = link_clock;
- intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
-}
-
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -785,7 +816,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
- struct intel_dp_m_n m_n;
+ struct intel_link_m_n m_n;
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
@@ -808,8 +839,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
- mode->clock, adjusted_mode->clock, &m_n);
+ intel_link_compute_m_n(intel_crtc->bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
if (IS_HASWELL(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -851,6 +882,32 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
}
}
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+ if (clock < 200000) {
+ /* For a long time we've carried around a ILK-DevA w/a for the
+ * 160MHz clock. If we're really unlucky, it's still required.
+ */
+ DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+ dpa_ctl |= DP_PLL_FREQ_160MHZ;
+ } else {
+ dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ }
+
+ I915_WRITE(DP_A, dpa_ctl);
+
+ POSTING_READ(DP_A);
+ udelay(500);
+}
+
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -926,7 +983,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
- intel_dp->DP |= intel_dp->color_range;
+ if (!HAS_PCH_SPLIT(dev))
+ intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -950,6 +1008,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1057,6 +1118,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
@@ -1543,7 +1606,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set)
+intel_gen4_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -1641,7 +1704,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t
-intel_dp_signal_levels_hsw(uint8_t train_set)
+intel_hsw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -1673,6 +1736,34 @@ intel_dp_signal_levels_hsw(uint8_t train_set)
}
}
+/* Properly updates "DP" with the correct signal levels. */
+static void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t signal_levels, mask;
+ uint8_t train_set = intel_dp->train_set[0];
+
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_hsw_signal_levels(train_set);
+ mask = DDI_BUF_EMP_MASK;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ signal_levels = intel_gen7_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ } else {
+ signal_levels = intel_gen4_signal_levels(train_set);
+ mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+ }
+
+ DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+ *DP = (*DP & ~mask) | signal_levels;
+}
+
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
@@ -1696,14 +1787,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
- temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), temp);
- if (wait_for((I915_READ(DP_TP_STATUS(port)) &
- DP_TP_STATUS_IDLE_DONE), 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
+ if (port != PORT_A) {
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE), 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ }
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break;
@@ -1791,7 +1886,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */
@@ -1809,24 +1904,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
- uint32_t signal_levels;
-
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(
- intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- } else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- }
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
- signal_levels);
+
+ intel_dp_set_signal_levels(intel_dp, &DP);
/* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1882,7 +1961,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
@@ -1892,8 +1970,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
cr_tries = 0;
channel_eq = false;
for (;;) {
- /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels;
uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
@@ -1902,19 +1978,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
- signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
- } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- } else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- }
+ intel_dp_set_signal_levels(intel_dp, &DP);
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1964,6 +2028,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
/*
@@ -1981,7 +2047,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* intel_ddi_prepare_link_retrain will take care of redoing the link
* train.
*/
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -1998,7 +2064,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
}
POSTING_READ(intel_dp->output_reg);
- msleep(17);
+ /* We don't really know why we're doing this */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -2018,19 +2085,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
- if (crtc == NULL) {
- /* We can arrive here never having been attached
- * to a CRTC, for instance, due to inheriting
- * random state from the BIOS.
- *
- * If the pipe is not running, play safe and
- * wait for the clocks to stabilise before
- * continuing.
- */
+ if (WARN_ON(crtc == NULL)) {
+ /* We should never try to disable a port without a crtc
+ * attached. For paranoia keep the code around for a
+ * bit. */
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
- intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -2042,10 +2104,16 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
+
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) == 0)
return false; /* aux transfer failed */
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
@@ -2206,6 +2274,8 @@ static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
@@ -2216,6 +2286,9 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status;
}
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ return connector_status_disconnected;
+
return intel_dp_detect_dpcd(intel_dp);
}
@@ -2224,17 +2297,18 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit;
- switch (intel_dp->output_reg) {
- case DP_B:
- bit = DPB_HOTPLUG_LIVE_STATUS;
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS;
break;
- case DP_C:
- bit = DPC_HOTPLUG_LIVE_STATUS;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS;
break;
- case DP_D:
- bit = DPD_HOTPLUG_LIVE_STATUS;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
@@ -2290,13 +2364,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
return intel_ddc_get_modes(connector, adapter);
}
-
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
- *
- * \return true if DP port is connected.
- * \return false if DP port is disconnected.
- */
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
@@ -2306,7 +2373,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
- char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2315,10 +2381,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else
status = g4x_dp_detect(intel_dp);
- hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
- 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
- DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
-
if (status != connector_status_connected)
return status;
@@ -2419,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_dp->color_range)
- return 0;
-
- intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_dp->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
@@ -2445,11 +2518,8 @@ intel_dp_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_encoder->base.crtc) {
- struct drm_crtc *crtc = intel_encoder->base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_encoder->base.crtc)
+ intel_crtc_restore_mode(intel_encoder->base.crtc);
return 0;
}
@@ -2491,7 +2561,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.mode_fixup = intel_dp_mode_fixup,
.mode_set = intel_dp_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -2566,6 +2635,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ intel_dp->color_range_auto = true;
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
@@ -2755,7 +2825,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -2767,15 +2837,15 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
name = "DPDDC-A";
break;
case PORT_B:
- dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case PORT_C:
- dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case PORT_D:
- dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
default:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a1bd4a3ad0d..07ebac6fe8ca 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -109,6 +109,11 @@
* timings in the mode to prevent the crtc fixup from overwriting them.
* Currently only lvds needs that. */
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
+/*
+ * Set when limited 16-235 (as opposed to full 0-255) RGB color range is
+ * to be used.
+ */
+#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -153,6 +158,7 @@ struct intel_encoder {
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *);
@@ -205,6 +211,7 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
+ bool eld_vld;
bool primary_disabled; /* is the crtc obscured by a plane? */
bool lowfreq_avail;
struct intel_overlay *overlay;
@@ -228,6 +235,9 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
+
+ /* reset counter value when the last flip was submitted */
+ unsigned int reset_counter;
};
struct intel_plane {
@@ -283,6 +293,9 @@ struct cxsr_latency {
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
+#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@@ -337,9 +350,11 @@ struct intel_hdmi {
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
+ bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
+ bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
void (*set_infoframes)(struct drm_encoder *encoder,
@@ -356,6 +371,7 @@ struct intel_dp {
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
+ bool color_range_auto;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
@@ -377,6 +393,7 @@ struct intel_dp {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
+ u32 port_reversal;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
@@ -439,10 +456,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
-extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
+extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
+extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -502,12 +519,12 @@ struct intel_set_config {
bool mode_changed;
};
-extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
+extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
+extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
@@ -546,6 +563,9 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port);
+
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -589,6 +609,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
@@ -627,9 +648,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
-extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch);
+extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -648,7 +670,8 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
-extern void intel_init_power_wells(struct drm_device *dev);
+extern void intel_init_power_well(struct drm_device *dev);
+extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 15da99533e5b..00e70dbe82da 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -345,7 +345,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7b30b5c2c4ee..981bdce3634e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -57,9 +57,10 @@ static struct fb_ops intelfb_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static int intelfb_create(struct intel_fbdev *ifbdev,
+static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
@@ -83,7 +84,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_object_create_stolen(dev, size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, size);
if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@@ -133,14 +136,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size =
- dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -153,6 +155,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+ /* If the object is shmemfs backed, it will have given us zeroed pages.
+ * If the object is stolen however, it will be full of whatever
+ * garbage was left in there.
+ */
+ if (ifbdev->ifb.obj->stolen)
+ memset_io(info->screen_base, 0, info->screen_size);
+
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
@@ -173,26 +182,10 @@ out:
return ret;
}
-static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = intelfb_create(ifbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
- .fb_probe = intel_fb_find_or_create_single,
+ .fb_probe = intelfb_create,
};
static void intel_fbdev_destroy(struct drm_device *dev,
@@ -212,6 +205,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
+ drm_framebuffer_unregister_private(&ifb->base);
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
drm_gem_object_unreference_unlocked(&ifb->obj->base);
@@ -241,10 +235,18 @@ int intel_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
- drm_fb_helper_initial_config(&ifbdev->helper, 32);
+
return 0;
}
+void intel_fbdev_initial_config(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Due to peculiar init order wrt to hpd handling this is separate. */
+ drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
+}
+
void intel_fbdev_fini(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -280,7 +282,7 @@ void intel_fb_restore_mode(struct drm_device *dev)
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
if (ret)
@@ -288,7 +290,8 @@ void intel_fb_restore_mode(struct drm_device *dev)
/* Be sure to shut off any planes that may be active */
list_for_each_entry(plane, &config->plane_list, head)
- plane->funcs->disable_plane(plane);
+ if (plane->enabled)
+ plane->funcs->disable_plane(plane);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ee9821b9d93..fa8ec4a26041 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -48,7 +48,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
- enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@@ -331,6 +331,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
@@ -340,7 +341,14 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
- avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+ if (intel_hdmi->rgb_quant_range_selectable) {
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
+ avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_set_infoframe(encoder, &avi_if);
}
@@ -364,7 +372,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
@@ -391,11 +400,11 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_hdmi->sdvox_reg) {
- case SDVOB:
+ switch (intel_dig_port->port) {
+ case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
- case SDVOC:
+ case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
default:
@@ -428,7 +437,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
@@ -447,14 +457,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
return;
}
- switch (intel_hdmi->sdvox_reg) {
- case HDMIB:
+ switch (intel_dig_port->port) {
+ case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
- case HDMIC:
+ case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
- case HDMID:
+ case PORT_D:
port = VIDEO_DIP_PORT_D;
break;
default:
@@ -766,46 +776,38 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- return true;
-}
-
-static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
-{
- struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- switch (intel_hdmi->sdvox_reg) {
- case SDVOB:
- bit = HDMIB_HOTPLUG_LIVE_STATUS;
- break;
- case SDVOC:
- bit = HDMIC_HOTPLUG_LIVE_STATUS;
- break;
- default:
- bit = 0;
- break;
+ if (intel_hdmi->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (intel_hdmi->has_hdmi_sink &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ else
+ intel_hdmi->color_range = 0;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ if (intel_hdmi->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
+ return true;
}
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
- if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
- return status;
-
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
+ intel_hdmi->rgb_quant_range_selectable = false;
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
@@ -817,6 +819,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ intel_hdmi->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
}
kfree(edid);
}
@@ -902,21 +906,29 @@ intel_hdmi_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_hdmi->color_range)
- return 0;
-
- intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_hdmi->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
return -EINVAL;
done:
- if (intel_dig_port->base.base.crtc) {
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_dig_port->base.base.crtc)
+ intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
return 0;
}
@@ -931,7 +943,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -957,6 +968,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ intel_hdmi->color_range_auto = true;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -980,15 +992,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
break;
case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
break;
case PORT_A:
/* Internal port only for eDP. */
@@ -1013,7 +1025,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev))
+ if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3ef5af15b812..acf8aec9ada7 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -63,6 +63,7 @@ intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -202,6 +203,68 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
+static int
+gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
+ u32 gmbus2_status,
+ u32 gmbus4_irq_en)
+{
+ int i;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus2 = 0;
+ DEFINE_WAIT(wait);
+
+ /* Important: The hw handles only the first bit, so set only one! Since
+ * we also need to check for NAKs besides the hw ready/idle signal, we
+ * need to wake up periodically and check that ourselves. */
+ I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
+
+ for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
+ prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
+ if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
+ break;
+
+ schedule_timeout(1);
+ }
+ finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ if (gmbus2 & gmbus2_status)
+ return 0;
+ return -ETIMEDOUT;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ int reg_offset = dev_priv->gpio_mmio_base;
+
+#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
+
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ return wait_for(C, 10);
+
+ /* Important: The hw handles only the first bit, so set only one! */
+ I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
+
+ ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (ret)
+ return 0;
+ else
+ return -ETIMEDOUT;
+#undef C
+}
+
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index)
@@ -219,15 +282,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
while (len) {
int ret;
u32 val, loop = 0;
- u32 gmbus2;
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
if (ret)
- return -ETIMEDOUT;
- if (gmbus2 & GMBUS_SATOER)
- return -ENXIO;
+ return ret;
val = I915_READ(GMBUS3 + reg_offset);
do {
@@ -261,7 +320,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
- u32 gmbus2;
val = loop = 0;
do {
@@ -270,13 +328,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
if (ret)
- return -ETIMEDOUT;
- if (gmbus2 & GMBUS_SATOER)
- return -ENXIO;
+ return ret;
}
return 0;
}
@@ -345,8 +400,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
- u32 gmbus2;
-
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
@@ -361,13 +414,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (ret == -ENXIO)
goto clear_err;
- ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
- 50);
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+ GMBUS_HW_WAIT_EN);
+ if (ret == -ENXIO)
+ goto clear_err;
if (ret)
goto timeout;
- if (gmbus2 & GMBUS_SATOER)
- goto clear_err;
}
/* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -380,8 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10)) {
+ if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -405,8 +456,7 @@ clear_err:
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10)) {
+ if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -465,10 +515,13 @@ int intel_setup_gmbus(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else if (IS_VALLEYVIEW(dev))
+ dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else
dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
+ init_waitqueue_head(&dev_priv->gmbus_wait_queue);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 17aee74258ad..3d1d97488cc9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,7 +51,8 @@ struct intel_lvds_encoder {
u32 pfit_control;
u32 pfit_pgm_ratios;
- bool pfit_dirty;
+ bool is_dual_link;
+ u32 reg;
struct intel_lvds_connector *attached_connector;
};
@@ -71,15 +72,10 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 lvds_reg, tmp;
-
- if (HAS_PCH_SPLIT(dev)) {
- lvds_reg = PCH_LVDS;
- } else {
- lvds_reg = LVDS;
- }
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ u32 tmp;
- tmp = I915_READ(lvds_reg);
+ tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
return false;
@@ -92,6 +88,91 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
return true;
}
+/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *fixed_mode =
+ lvds_encoder->attached_connector->base.panel.fixed_mode;
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(lvds_encoder->reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (lvds_encoder->is_dual_link)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+
+ /* Set the dithering flag on LVDS as needed, note that there is no
+ * special lvds dither control bit on pch-split platforms, dithering is
+ * only controlled through the PIPECONF reg. */
+ if (INTEL_INFO(dev)->gen == 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+
+ I915_WRITE(lvds_encoder->reg, temp);
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
+ return;
+
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ enc->pfit_control,
+ enc->pfit_pgm_ratios);
+
+ I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, enc->pfit_control);
+}
+
/**
* Sets the power state for the panel.
*/
@@ -101,38 +182,20 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
- lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
-
- if (lvds_encoder->pfit_dirty) {
- /*
- * Enable automatic panel scaling so that non-native modes
- * fill the screen. The panel fitter should only be
- * adjusted whilst the pipe is disabled, according to
- * register description and PRM.
- */
- DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- lvds_encoder->pfit_control,
- lvds_encoder->pfit_pgm_ratios);
-
- I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
- lvds_encoder->pfit_dirty = false;
- }
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
- POSTING_READ(lvds_reg);
+ POSTING_READ(lvds_encoder->reg);
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
@@ -144,15 +207,13 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg, stat_reg;
+ u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
- lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
@@ -162,13 +223,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (lvds_encoder->pfit_control) {
- I915_WRITE(PFIT_CONTROL, 0);
- lvds_encoder->pfit_dirty = true;
- }
-
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_encoder->reg);
}
static int intel_lvds_mode_valid(struct drm_connector *connector,
@@ -406,7 +462,6 @@ out:
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
- lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -492,13 +547,14 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
};
/*
- * Lid events. Note the use of 'modeset_on_lid':
- * - we set it on lid close, and reset it on open
+ * Lid events. Note the use of 'modeset':
+ * - we set it to MODESET_ON_LID_OPEN on lid close,
+ * and set it to MODESET_DONE on open
* - we use it as a "only once" bit (ie we ignore
- * duplicate events where it was already properly
- * set/reset)
- * - the suspend/resume paths will also set it to
- * zero, since they restore the mode ("lid open").
+ * duplicate events where it was already properly set)
+ * - the suspend/resume paths will set it to
+ * MODESET_SUSPENDED and ignore the lid open event,
+ * because they restore the mode ("lid open").
*/
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
@@ -512,6 +568,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+ goto exit;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.
@@ -520,21 +579,24 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
- return NOTIFY_OK;
+ goto exit;
if (!acpi_lid_open()) {
- dev_priv->modeset_on_lid = 1;
- return NOTIFY_OK;
+ /* do modeset on next lid open event */
+ dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+ goto exit;
}
- if (!dev_priv->modeset_on_lid)
- return NOTIFY_OK;
-
- dev_priv->modeset_on_lid = 0;
+ if (dev_priv->modeset_restore == MODESET_DONE)
+ goto exit;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
+
+ dev_priv->modeset_restore = MODESET_DONE;
+exit:
+ mutex_unlock(&dev_priv->modeset_restore_lock);
return NOTIFY_OK;
}
@@ -591,8 +653,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
}
}
@@ -602,7 +663,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_fixup = intel_lvds_mode_fixup,
.mode_set = intel_lvds_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -895,6 +955,66 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_lvds_encoder *lvds_encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->type == INTEL_OUTPUT_LVDS) {
+ lvds_encoder = to_lvds_encoder(&encoder->base);
+
+ return lvds_encoder->is_dual_link;
+ }
+ }
+
+ return false;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+ struct drm_device *dev = lvds_encoder->base.base.dev;
+ unsigned int val;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(lvds_encoder->reg);
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ val = dev_priv->bios_lvds_val;
+
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
static bool intel_lvds_supported(struct drm_device *dev)
{
/* With the introduction of the PCH we gained a dedicated
@@ -980,6 +1100,8 @@ bool intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
+ intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1001,6 +1123,12 @@ bool intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_encoder->reg = PCH_LVDS;
+ } else {
+ lvds_encoder->reg = LVDS;
+ }
+
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base,
@@ -1101,6 +1229,10 @@ bool intel_lvds_init(struct drm_device *dev)
goto failed;
out:
+ lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+ DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
+
/*
* Unlock registers and just
* leave them unlocked
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b00f1c83adce..0e860f39933d 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -28,7 +28,6 @@
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
-#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
@@ -101,8 +100,9 @@ intel_attach_force_audio_property(struct drm_connector *connector)
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
- { 0, "Full" },
- { 1, "Limited 16:235" },
+ { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+ { INTEL_BROADCAST_RGB_FULL, "Full" },
+ { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
};
void
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7741c22c934c..4d338740f2cb 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -347,7 +347,7 @@ static void intel_didl_outputs(struct drm_device *dev)
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
if (acpi_is_video_device(acpi_dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7bc817f51a0..67a2501d519d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -195,7 +195,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+ regs = io_mapping_map_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;
@@ -1045,13 +1045,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1075,7 +1075,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_free;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
if (new_bo->tiling_mode) {
@@ -1157,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
kfree(params);
@@ -1165,7 +1165,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
drm_gem_object_unreference_unlocked(&new_bo->base);
out_free:
kfree(params);
@@ -1241,7 +1241,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
return -ENODEV;
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
@@ -1307,7 +1307,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -1333,8 +1333,10 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->dev = dev;
- reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
- if (!reg_bo)
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
goto out_free;
overlay->reg_bo = reg_bo;
@@ -1432,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_obj->handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6108a7..a3730e0289e5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -321,6 +321,9 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+ dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
@@ -356,12 +359,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
}
set_level:
- /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
- * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
- * registers are set.
+ /* Check the current backlight level and try to set again if it's zero.
+ * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
+ * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
*/
- dev_priv->backlight_enabled = true;
- intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+ if (!intel_panel_get_backlight(dev))
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3280cffe50f4..61fee7fcdc2c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -447,12 +447,6 @@ void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
- if (intel_fb->obj->base.size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- goto out_disable;
- }
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
@@ -486,6 +480,14 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
+ if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
+ DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
@@ -533,6 +535,7 @@ out_disable:
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
+ i915_gem_stolen_cleanup_compression(dev);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -2286,7 +2289,6 @@ err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
- mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -3581,6 +3583,19 @@ static void cpt_init_clock_gating(struct drm_device *dev)
}
}
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(MCH_SSKPD);
+ if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
+ DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
+ DRM_INFO("This can cause pipe underruns and display issues.\n");
+ DRM_INFO("Please upgrade your BIOS to fix this.\n");
+ }
+}
+
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3673,6 +3688,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3684,6 +3701,10 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
+ /* WaVSRefCountFullforceMissDisable */
+ if (IS_HASWELL(dev_priv->dev))
+ reg &= ~GEN7_FF_VS_REF_CNT_FFME;
+
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
@@ -3854,6 +3875,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
@@ -4047,35 +4070,57 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_clock_gating(dev);
}
-/* Starting with Haswell, we have different power wells for
- * different parts of the GPU. This attempts to enable them all.
- */
-void intel_init_power_wells(struct drm_device *dev)
+void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long power_wells[] = {
- HSW_PWR_WELL_CTL1,
- HSW_PWR_WELL_CTL2,
- HSW_PWR_WELL_CTL4
- };
- int i;
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
if (!IS_HASWELL(dev))
return;
- mutex_lock(&dev->struct_mutex);
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE;
- for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
- int well = I915_READ(power_wells[i]);
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
- if ((well & HSW_PWR_WELL_STATE) == 0) {
- I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
- DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ }
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
+}
- mutex_unlock(&dev->struct_mutex);
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+void intel_init_power_well(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ /* For now, we need the power well to be always enabled. */
+ intel_set_power_well(dev, true);
+
+ /* We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now. */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
/* Set up chip specific power management-related functions */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 42ff97d667d2..1d5d613eb6be 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -318,6 +318,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
@@ -331,7 +332,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -467,6 +468,9 @@ init_pipe_control(struct intel_ring_buffer *ring)
if (pc->cpu_page == NULL)
goto err_unpin;
+ DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+ ring->name, pc->gtt_offset);
+
pc->obj = obj;
ring->private = pc;
return 0;
@@ -613,6 +617,13 @@ gen6_add_request(struct intel_ring_buffer *ring)
return 0;
}
+static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->last_seqno < seqno;
+}
+
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@@ -643,11 +654,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
if (ret)
return ret;
- intel_ring_emit(waiter,
- dw1 | signaller->semaphore_register[waiter->id]);
- intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter, 0);
- intel_ring_emit(waiter, MI_NOOP);
+ /* If seqno wrap happened, omit the wait with no-ops */
+ if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ intel_ring_emit(waiter,
+ dw1 |
+ signaller->semaphore_register[waiter->id]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ } else {
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ }
intel_ring_advance(waiter);
return 0;
@@ -728,6 +748,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
+static void
+ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
@@ -735,6 +761,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return pc->cpu_page[0];
}
+static void
+pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct pipe_control *pc = ring->private;
+ pc->cpu_page[0] = seqno;
+}
+
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
@@ -1164,7 +1197,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
return ret;
}
- obj = i915_gem_alloc_object(dev, ring->size);
+ obj = NULL;
+ if (!HAS_LLC(dev))
+ obj = i915_gem_object_create_stolen(dev, ring->size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, ring->size);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
@@ -1182,7 +1219,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1348,7 +1385,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
msleep(1);
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
if (ret)
return ret;
} while (!time_after(jiffies, end));
@@ -1410,14 +1448,35 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
+static int __intel_ring_begin(struct intel_ring_buffer *ring,
+ int bytes)
+{
+ int ret;
+
+ if (unlikely(ring->tail + bytes > ring->effective_size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < bytes)) {
+ ret = ring_wait_for_space(ring, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ring->space -= bytes;
+ return 0;
+}
+
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int n = 4*num_dwords;
int ret;
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
if (ret)
return ret;
@@ -1426,20 +1485,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
- if (unlikely(ring->tail + n > ring->effective_size)) {
- ret = intel_wrap_ring_buffer(ring);
- if (unlikely(ret))
- return ret;
- }
+ return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+}
- if (unlikely(ring->space < n)) {
- ret = ring_wait_for_space(ring, n);
- if (unlikely(ret))
- return ret;
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ BUG_ON(ring->outstanding_lazy_request);
+
+ if (INTEL_INFO(ring->dev)->gen >= 6) {
+ I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
}
- ring->space -= n;
- return 0;
+ ring->set_seqno(ring, seqno);
}
void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1447,7 +1507,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = ring->dev->dev_private;
ring->tail &= ring->size - 1;
- if (dev_priv->stop_rings & intel_ring_flag(ring))
+ if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
@@ -1604,6 +1664,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
@@ -1614,6 +1675,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
+ ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
@@ -1624,6 +1686,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
@@ -1695,6 +1758,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
@@ -1755,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
@@ -1770,6 +1835,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = bsd_ring_flush;
ring->add_request = i9xx_add_request;
ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
@@ -1799,6 +1865,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->flush = blt_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6af87cd05725..d66208c2c48b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -90,6 +90,8 @@ struct intel_ring_buffer {
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
+ void (*set_seqno)(struct intel_ring_buffer *ring,
+ u32 seqno);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags);
@@ -178,6 +180,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ring->status_page.page_addr[reg];
}
+static inline void
+intel_write_status_page(struct intel_ring_buffer *ring,
+ int reg, u32 value)
+{
+ ring->status_page.page_addr[reg] = value;
+}
+
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -208,7 +217,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
}
void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c275bf0fa36d..d07a8cdf998e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -103,6 +103,7 @@ struct intel_sdvo {
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
+ bool color_range_auto;
/**
* This is set if we're going to treat the device as TV-out.
@@ -125,6 +126,7 @@ struct intel_sdvo {
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
+ bool rgb_quant_range_selectable;
/**
* This is set if we detect output of sdvo device as LVDS and
@@ -946,7 +948,8 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1);
}
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@@ -955,6 +958,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ if (intel_sdvo->rgb_quant_range_selectable) {
+ if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
@@ -1064,6 +1074,18 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+ if (intel_sdvo->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (intel_sdvo->has_hdmi_monitor &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ else
+ intel_sdvo->color_range = 0;
+ }
+
+ if (intel_sdvo->color_range)
+ adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+
return true;
}
@@ -1121,7 +1143,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1153,7 +1175,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_sdvo->is_hdmi)
+ if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1513,6 +1535,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (intel_sdvo->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ intel_sdvo->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
@@ -1564,6 +1588,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
+ intel_sdvo->rgb_quant_range_selectable = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
@@ -1897,10 +1922,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
- if (val == !!intel_sdvo->color_range)
- return 0;
-
- intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_sdvo->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
goto done;
}
@@ -1997,11 +2033,8 @@ set_value:
done:
- if (intel_sdvo->base.base.crtc) {
- struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
- }
+ if (intel_sdvo->base.base.crtc)
+ intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
return 0;
#undef CHECK_PROPERTY
@@ -2010,7 +2043,6 @@ done:
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
.mode_fixup = intel_sdvo_mode_fixup,
.mode_set = intel_sdvo_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
@@ -2200,13 +2232,16 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
}
static void
-intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *connector)
{
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_sdvo->color_range_auto = true;
+ }
}
static bool
@@ -2254,7 +2289,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
- intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+ intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d7b060e0a231..1b6eb76beb7c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -50,6 +50,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
sprctl = I915_READ(SPRCTL(pipe));
@@ -89,6 +90,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
+ if (IS_HASWELL(dev))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -103,27 +107,23 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
- if (!dev_priv->sprite_scaling_enabled) {
- dev_priv->sprite_scaling_enabled = true;
+ dev_priv->sprite_scaling_enabled |= 1 << pipe;
+
+ if (!scaling_was_enabled) {
intel_update_watermarks(dev);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- } else {
- if (dev_priv->sprite_scaling_enabled) {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- intel_update_watermarks(dev);
- }
- }
+ } else
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- pixel_size, fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -141,6 +141,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
}
static void
@@ -150,6 +154,7 @@ ivb_disable_plane(struct drm_plane *plane)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
+ bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
@@ -159,8 +164,11 @@ ivb_disable_plane(struct drm_plane *plane)
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
- dev_priv->sprite_scaling_enabled = false;
- intel_update_watermarks(dev);
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
}
static int
@@ -287,8 +295,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- pixel_size, fb->pitches[0]);
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)
@@ -593,7 +601,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -606,7 +614,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
ret = intel_plane->update_colorkey(plane, set);
out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -622,7 +630,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
@@ -635,7 +643,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
intel_plane->get_colorkey(plane, get);
out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea93520c1278..d808421c1c80 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1479,8 +1479,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- intel_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_crtc_restore_mode(crtc);
out:
return ret;
}
@@ -1488,7 +1487,6 @@ out:
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
.mode_fixup = intel_tv_mode_fixup,
.mode_set = intel_tv_mode_set,
- .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 2f486481d79a..d2253f639481 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
@@ -120,9 +121,10 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
return ret;
}
-static int mgag200fb_create(struct mga_fbdev *mfbdev,
+static int mgag200fb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
struct drm_device *dev = mfbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct mga_device *mdev = dev->dev_private;
@@ -209,23 +211,6 @@ out:
return ret;
}
-static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size
- *sizes)
-{
- struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = mgag200fb_create(mfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static int mga_fbdev_destroy(struct drm_device *dev,
struct mga_fbdev *mfbdev)
{
@@ -247,6 +232,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
}
drm_fb_helper_fini(&mfbdev->helper);
vfree(mfbdev->sysram);
+ drm_framebuffer_unregister_private(&mfb->base);
drm_framebuffer_cleanup(&mfb->base);
return 0;
@@ -255,7 +241,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
.gamma_set = mga_crtc_fb_gamma_set,
.gamma_get = mga_crtc_fb_gamma_get,
- .fb_probe = mga_fb_find_or_create_single,
+ .fb_probe = mgag200fb_create,
};
int mgag200_fbdev_init(struct mga_device *mdev)
@@ -277,6 +263,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
return ret;
}
drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(mdev->dev);
+
drm_fb_helper_initial_config(&mfbdev->helper, 32);
return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 70dd3c5529d4..64297c72464f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -23,16 +23,8 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(fb);
}
-static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs mga_fb_funcs = {
.destroy = mga_user_framebuffer_destroy,
- .create_handle = mga_user_framebuffer_create_handle,
};
int mgag200_framebuffer_init(struct drm_device *dev,
@@ -40,13 +32,15 @@ int mgag200_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
}
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
- gfb->obj = obj;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 8a55beeb8bdc..a7ff6d5a34b9 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,8 +11,9 @@ config DRM_NOUVEAU
select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
- select ACPI_WMI if ACPI
- select MXM_WMI if ACPI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
+ select MXM_WMI if ACPI && X86
select POWER_SUPPLY
help
Choose this option for open-source nVidia support.
@@ -52,26 +53,3 @@ config DRM_NOUVEAU_BACKLIGHT
help
Say Y here if you want to control the backlight of your display
(e.g. a laptop panel).
-
-menu "I2C encoder or helper chips"
- depends on DRM && DRM_KMS_HELPER && I2C
-
-config DRM_I2C_CH7006
- tristate "Chrontel ch7006 TV encoder"
- default m if DRM_NOUVEAU
- help
- Support for Chrontel ch7006 and similar TV encoders, found
- on some nVidia video cards.
-
- This driver is currently only useful if you're also using
- the nouveau driver.
-
-config DRM_I2C_SIL164
- tristate "Silicon Image sil164 TMDS transmitter"
- default m if DRM_NOUVEAU
- help
- Support for sil164 and similar single-link (or dual-link
- when used in pairs) TMDS transmitters, used in some nVidia
- video cards.
-
-endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index ab25752a0b1e..90f9140eeefd 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
+nouveau-y += core/core/event.o
nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
@@ -40,6 +41,11 @@ nouveau-y += core/subdev/bios/mxm.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bus/nv04.o
+nouveau-y += core/subdev/bus/nv31.o
+nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nvc0.o
nouveau-y += core/subdev/clock/nv04.o
nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o
@@ -85,9 +91,16 @@ nouveau-y += core/subdev/gpio/base.o
nouveau-y += core/subdev/gpio/nv10.o
nouveau-y += core/subdev/gpio/nv50.o
nouveau-y += core/subdev/gpio/nvd0.o
+nouveau-y += core/subdev/gpio/nve0.o
nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/i2c/anx9805.o
nouveau-y += core/subdev/i2c/aux.o
nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/i2c/nv04.o
+nouveau-y += core/subdev/i2c/nv4e.o
+nouveau-y += core/subdev/i2c/nv50.o
+nouveau-y += core/subdev/i2c/nv94.o
+nouveau-y += core/subdev/i2c/nvd0.o
nouveau-y += core/subdev/ibus/nvc0.o
nouveau-y += core/subdev/ibus/nve0.o
nouveau-y += core/subdev/instmem/base.o
@@ -106,10 +119,15 @@ nouveau-y += core/subdev/mxm/mxms.o
nouveau-y += core/subdev/mxm/nv50.o
nouveau-y += core/subdev/therm/base.o
nouveau-y += core/subdev/therm/fan.o
+nouveau-y += core/subdev/therm/fannil.o
+nouveau-y += core/subdev/therm/fanpwm.o
+nouveau-y += core/subdev/therm/fantog.o
nouveau-y += core/subdev/therm/ic.o
+nouveau-y += core/subdev/therm/temp.o
nouveau-y += core/subdev/therm/nv40.o
nouveau-y += core/subdev/therm/nv50.o
-nouveau-y += core/subdev/therm/temp.o
+nouveau-y += core/subdev/therm/nva3.o
+nouveau-y += core/subdev/therm/nvd0.o
nouveau-y += core/subdev/timer/base.o
nouveau-y += core/subdev/timer/nv04.o
nouveau-y += core/subdev/vm/base.o
@@ -132,6 +150,7 @@ nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/disp/base.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
nouveau-y += core/engine/disp/nv84.o
@@ -141,11 +160,13 @@ nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/nve0.o
nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/dport.o
nouveau-y += core/engine/disp/hdanva3.o
nouveau-y += core/engine/disp/hdanvd0.o
nouveau-y += core/engine/disp/hdminv84.o
nouveau-y += core/engine/disp/hdminva3.o
nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/piornv50.o
nouveau-y += core/engine/disp/sornv50.o
nouveau-y += core/engine/disp/sornv94.o
nouveau-y += core/engine/disp/sornvd0.o
@@ -194,7 +215,8 @@ nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
nouveau-y += nouveau_prime.o nouveau_abi16.o
-nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
+nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
+nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
@@ -216,7 +238,10 @@ nouveau-y += nouveau_mem.o
# other random bits
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+ifdef CONFIG_X86
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+endif
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 8bbb58f94a19..295c22165eac 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -99,3 +99,13 @@ nouveau_client_fini(struct nouveau_client *client, bool suspend)
nv_debug(client, "%s completed with %d\n", name[suspend], ret);
return ret;
}
+
+const char *
+nouveau_client_name(void *obj)
+{
+ const char *client_name = "unknown";
+ struct nouveau_client *client = nouveau_client(obj);
+ if (client)
+ client_name = client->name;
+ return client_name;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/enum.c b/drivers/gpu/drm/nouveau/core/core/enum.c
index 7cc7133d82de..dd434790ccc4 100644
--- a/drivers/gpu/drm/nouveau/core/core/enum.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -40,14 +40,15 @@ nouveau_enum_find(const struct nouveau_enum *en, u32 value)
return NULL;
}
-void
+const struct nouveau_enum *
nouveau_enum_print(const struct nouveau_enum *en, u32 value)
{
en = nouveau_enum_find(en, value);
if (en)
- printk("%s", en->name);
+ pr_cont("%s", en->name);
else
- printk("(unknown enum 0x%08x)", value);
+ pr_cont("(unknown enum 0x%08x)", value);
+ return en;
}
void
@@ -55,7 +56,7 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
{
while (bf->name) {
if (value & bf->mask) {
- printk(" %s", bf->name);
+ pr_cont(" %s", bf->name);
value &= ~bf->mask;
}
@@ -63,5 +64,5 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
}
if (value)
- printk(" (unknown bits 0x%08x)", value);
+ pr_cont(" (unknown bits 0x%08x)", value);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
new file mode 100644
index 000000000000..6d01e0f0fc8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/os.h>
+#include <core/event.h>
+
+static void
+nouveau_event_put_locked(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ if (!--event->index[index].refs)
+ event->disable(event, index);
+ list_del(&handler->head);
+}
+
+void
+nouveau_event_put(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event->lock, flags);
+ if (index < event->index_nr)
+ nouveau_event_put_locked(event, index, handler);
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_get(struct nouveau_event *event, int index,
+ struct nouveau_eventh *handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event->lock, flags);
+ if (index < event->index_nr) {
+ list_add(&handler->head, &event->index[index].list);
+ if (!event->index[index].refs++)
+ event->enable(event, index);
+ }
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_trigger(struct nouveau_event *event, int index)
+{
+ struct nouveau_eventh *handler, *temp;
+ unsigned long flags;
+
+ if (index >= event->index_nr)
+ return;
+
+ spin_lock_irqsave(&event->lock, flags);
+ list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
+ if (handler->func(handler, index) == NVKM_EVENT_DROP) {
+ nouveau_event_put_locked(event, index, handler);
+ }
+ }
+ spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_destroy(struct nouveau_event **pevent)
+{
+ struct nouveau_event *event = *pevent;
+ if (event) {
+ kfree(event);
+ *pevent = NULL;
+ }
+}
+
+int
+nouveau_event_create(int index_nr, struct nouveau_event **pevent)
+{
+ struct nouveau_event *event;
+ int i;
+
+ event = *pevent = kzalloc(sizeof(*event) + index_nr *
+ sizeof(event->index[0]), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ spin_lock_init(&event->lock);
+ for (i = 0; i < index_nr; i++)
+ INIT_LIST_HEAD(&event->index[i].list);
+ event->index_nr = index_nr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 283248c7b050..d6dc2a65ccd1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/falcon.h>
#include <core/class.h>
#include <core/enum.h>
@@ -100,8 +101,9 @@ nva3_copy_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000040) {
nv_error(falcon, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ mthd, data);
nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index b97490512723..5bc021f471f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
@@ -126,10 +127,11 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
chid = pfifo->chid(pfifo, engctx);
if (stat) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
- printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, mthd, data);
+ pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ mthd, data);
}
nv_wr32(priv, 0x102130, stat);
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 21986f3bf0c8..8bf8955051d4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
@@ -102,8 +103,9 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000040) {
nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ subc, mthd, data);
nv_wr32(priv, 0x087004, 0x00000040);
stat &= ~0x00000040;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
new file mode 100644
index 000000000000..7a5cae42834f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/disp.h>
+
+void
+_nouveau_disp_dtor(struct nouveau_object *object)
+{
+ struct nouveau_disp *disp = (void *)object;
+ nouveau_event_destroy(&disp->vblank);
+ nouveau_engine_destroy(&disp->base);
+}
+
+int
+nouveau_disp_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int heads,
+ const char *intname, const char *extname,
+ int length, void **pobject)
+{
+ struct nouveau_disp *disp;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, true,
+ intname, extname, length, pobject);
+ disp = *pobject;
+ if (ret)
+ return ret;
+
+ return nouveau_event_create(heads, &disp->vblank);
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
new file mode 100644
index 000000000000..fa27b02ff829
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/i2c.h>
+
+#include <engine/disp.h>
+
+#include "dport.h"
+
+#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt, \
+ dp->outp->hasht, dp->outp->hashm, ##args)
+#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt, \
+ dp->outp->hasht, dp->outp->hashm, ##args)
+
+/******************************************************************************
+ * link training
+ *****************************************************************************/
+struct dp_state {
+ const struct nouveau_dp_func *func;
+ struct nouveau_disp *disp;
+ struct dcb_output *outp;
+ struct nvbios_dpout info;
+ u8 version;
+ struct nouveau_i2c_port *aux;
+ int head;
+ u8 dpcd[4];
+ int link_nr;
+ u32 link_bw;
+ u8 stat[6];
+ u8 conf[4];
+};
+
+static int
+dp_set_link_config(struct dp_state *dp)
+{
+ struct nouveau_disp *disp = dp->disp;
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+ u32 lnkcmp;
+ u8 sink[2];
+
+ DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+
+ /* set desired link configuration on the sink */
+ sink[0] = dp->link_bw / 27000;
+ sink[1] = dp->link_nr;
+ if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+ nv_wraux(dp->aux, DPCD_LC00, sink, 2);
+
+ /* set desired link configuration on the source */
+ if ((lnkcmp = dp->info.lnkcmp)) {
+ if (dp->version < 0x30) {
+ while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+ lnkcmp += 4;
+ init.offset = nv_ro16(bios, lnkcmp + 2);
+ } else {
+ while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+ lnkcmp += 3;
+ init.offset = nv_ro16(bios, lnkcmp + 1);
+ }
+
+ nvbios_exec(&init);
+ }
+
+ return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
+ dp->link_nr, dp->link_bw / 27000,
+ dp->dpcd[DPCD_RC02] &
+ DPCD_RC02_ENHANCED_FRAME_CAP);
+}
+
+static void
+dp_set_training_pattern(struct dp_state *dp, u8 pattern)
+{
+ u8 sink_tp;
+
+ DBG("training pattern %d\n", pattern);
+ dp->func->pattern(dp->disp, dp->outp, dp->head, pattern);
+
+ nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
+ sink_tp |= pattern;
+ nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
+}
+
+static int
+dp_link_train_commit(struct dp_state *dp)
+{
+ int i;
+
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+ u8 lpre = (lane & 0x0c) >> 2;
+ u8 lvsw = (lane & 0x03) >> 0;
+
+ dp->conf[i] = (lpre << 3) | lvsw;
+ if (lvsw == 3)
+ dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED;
+ if (lpre == 3)
+ dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED;
+
+ DBG("config lane %d %02x\n", i, dp->conf[i]);
+ dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre);
+ }
+
+ return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4);
+}
+
+static int
+dp_link_train_update(struct dp_state *dp, u32 delay)
+{
+ int ret;
+
+ udelay(delay);
+
+ ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6);
+ if (ret)
+ return ret;
+
+ DBG("status %*ph\n", 6, dp->stat);
+ return 0;
+}
+
+static int
+dp_link_train_cr(struct dp_state *dp)
+{
+ bool cr_done = false, abort = false;
+ int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ int tries = 0, i;
+
+ dp_set_training_pattern(dp, 1);
+
+ do {
+ if (dp_link_train_commit(dp) ||
+ dp_link_train_update(dp, 100))
+ break;
+
+ cr_done = true;
+ for (i = 0; i < dp->link_nr; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
+ cr_done = false;
+ if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
+ abort = true;
+ break;
+ }
+ }
+
+ if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
+ voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+ tries = 0;
+ }
+ } while (!cr_done && !abort && ++tries < 5);
+
+ return cr_done ? 0 : -1;
+}
+
+static int
+dp_link_train_eq(struct dp_state *dp)
+{
+ bool eq_done, cr_done = true;
+ int tries = 0, i;
+
+ dp_set_training_pattern(dp, 2);
+
+ do {
+ if (dp_link_train_update(dp, 400))
+ break;
+
+ eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
+ for (i = 0; i < dp->link_nr && eq_done; i++) {
+ u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE))
+ cr_done = false;
+ if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
+ !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
+ eq_done = false;
+ }
+
+ if (dp_link_train_commit(dp))
+ break;
+ } while (!eq_done && cr_done && ++tries <= 5);
+
+ return eq_done ? 0 : -1;
+}
+
+static void
+dp_link_train_init(struct dp_state *dp, bool spread)
+{
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = nouveau_bios(dp->disp),
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+
+ /* set desired spread */
+ if (spread)
+ init.offset = dp->info.script[2];
+ else
+ init.offset = dp->info.script[3];
+ nvbios_exec(&init);
+
+ /* pre-train script */
+ init.offset = dp->info.script[0];
+ nvbios_exec(&init);
+}
+
+static void
+dp_link_train_fini(struct dp_state *dp)
+{
+ struct nvbios_init init = {
+ .subdev = nv_subdev(dp->disp),
+ .bios = nouveau_bios(dp->disp),
+ .outp = dp->outp,
+ .crtc = dp->head,
+ .execute = 1,
+ };
+
+ /* post-train script */
+ init.offset = dp->info.script[1],
+ nvbios_exec(&init);
+}
+
+int
+nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
+ struct dcb_output *outp, int head, u32 datarate)
+{
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nouveau_i2c *i2c = nouveau_i2c(disp);
+ struct dp_state _dp = {
+ .disp = disp,
+ .func = func,
+ .outp = outp,
+ .head = head,
+ }, *dp = &_dp;
+ const u32 bw_list[] = { 270000, 162000, 0 };
+ const u32 *link_bw = bw_list;
+ u8 hdr, cnt, len;
+ u32 data;
+ int ret;
+
+ /* find the bios displayport data relevant to this output */
+ data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version,
+ &hdr, &cnt, &len, &dp->info);
+ if (!data) {
+ ERR("bios data not found\n");
+ return -EINVAL;
+ }
+
+ /* acquire the aux channel and fetch some info about the display */
+ if (outp->location)
+ dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+ else
+ dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index));
+ if (!dp->aux) {
+ ERR("no aux channel?!\n");
+ return -ENODEV;
+ }
+
+ ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
+ if (ret) {
+ ERR("failed to read DPCD\n");
+ return ret;
+ }
+
+ /* adjust required bandwidth for 8B/10B coding overhead */
+ datarate = (datarate / 8) * 10;
+
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dp, dp->dpcd[3] & 0x01);
+
+ /* start off at highest link rate supported by encoder and display */
+ while (*link_bw > (dp->dpcd[1] * 27000))
+ link_bw++;
+
+ while (link_bw[0]) {
+ /* find minimum required lane count at this link rate */
+ dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
+ while ((dp->link_nr >> 1) * link_bw[0] > datarate)
+ dp->link_nr >>= 1;
+
+ /* drop link rate to minimum with this lane count */
+ while ((link_bw[1] * dp->link_nr) > datarate)
+ link_bw++;
+ dp->link_bw = link_bw[0];
+
+ /* program selected link configuration */
+ ret = dp_set_link_config(dp);
+ if (ret == 0) {
+ /* attempt to train the link at this configuration */
+ memset(dp->stat, 0x00, sizeof(dp->stat));
+ if (!dp_link_train_cr(dp) &&
+ !dp_link_train_eq(dp))
+ break;
+ } else
+ if (ret >= 1) {
+ /* dp_set_link_config() handled training */
+ break;
+ }
+
+ /* retry at lower rate */
+ link_bw++;
+ }
+
+ /* finish link training */
+ dp_set_training_pattern(dp, 0);
+
+ /* execute post-train script from vbios */
+ dp_link_train_fini(dp);
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
new file mode 100644
index 000000000000..0e1bbd18ff6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -0,0 +1,78 @@
+#ifndef __NVKM_DISP_DPORT_H__
+#define __NVKM_DISP_DPORT_H__
+
+/* DPCD Receiver Capabilities */
+#define DPCD_RC00 0x00000
+#define DPCD_RC00_DPCD_REV 0xff
+#define DPCD_RC01 0x00001
+#define DPCD_RC01_MAX_LINK_RATE 0xff
+#define DPCD_RC02 0x00002
+#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80
+#define DPCD_RC02_MAX_LANE_COUNT 0x1f
+#define DPCD_RC03 0x00003
+#define DPCD_RC03_MAX_DOWNSPREAD 0x01
+
+/* DPCD Link Configuration */
+#define DPCD_LC00 0x00100
+#define DPCD_LC00_LINK_BW_SET 0xff
+#define DPCD_LC01 0x00101
+#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
+#define DPCD_LC01_LANE_COUNT_SET 0x1f
+#define DPCD_LC02 0x00102
+#define DPCD_LC02_TRAINING_PATTERN_SET 0x03
+#define DPCD_LC03(l) ((l) + 0x00103)
+#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20
+#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
+#define DPCD_LC03_MAX_SWING_REACHED 0x04
+#define DPCD_LC03_VOLTAGE_SWING_SET 0x03
+
+/* DPCD Link/Sink Status */
+#define DPCD_LS02 0x00202
+#define DPCD_LS02_LANE1_SYMBOL_LOCKED 0x40
+#define DPCD_LS02_LANE1_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS02_LANE1_CR_DONE 0x10
+#define DPCD_LS02_LANE0_SYMBOL_LOCKED 0x04
+#define DPCD_LS02_LANE0_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS02_LANE0_CR_DONE 0x01
+#define DPCD_LS03 0x00203
+#define DPCD_LS03_LANE3_SYMBOL_LOCKED 0x40
+#define DPCD_LS03_LANE3_CHANNEL_EQ_DONE 0x20
+#define DPCD_LS03_LANE3_CR_DONE 0x10
+#define DPCD_LS03_LANE2_SYMBOL_LOCKED 0x04
+#define DPCD_LS03_LANE2_CHANNEL_EQ_DONE 0x02
+#define DPCD_LS03_LANE2_CR_DONE 0x01
+#define DPCD_LS04 0x00204
+#define DPCD_LS04_LINK_STATUS_UPDATED 0x80
+#define DPCD_LS04_DOWNSTREAM_PORT_STATUS_CHANGED 0x40
+#define DPCD_LS04_INTERLANE_ALIGN_DONE 0x01
+#define DPCD_LS06 0x00206
+#define DPCD_LS06_LANE1_PRE_EMPHASIS 0xc0
+#define DPCD_LS06_LANE1_VOLTAGE_SWING 0x30
+#define DPCD_LS06_LANE0_PRE_EMPHASIS 0x0c
+#define DPCD_LS06_LANE0_VOLTAGE_SWING 0x03
+#define DPCD_LS07 0x00207
+#define DPCD_LS07_LANE3_PRE_EMPHASIS 0xc0
+#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30
+#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c
+#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03
+
+struct nouveau_disp;
+struct dcb_output;
+
+struct nouveau_dp_func {
+ int (*pattern)(struct nouveau_disp *, struct dcb_output *,
+ int head, int pattern);
+ int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+ int link_nr, int link_bw, bool enh_frame);
+ int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+ int lane, int swing, int preem);
+};
+
+extern const struct nouveau_dp_func nv94_sor_dp_func;
+extern const struct nouveau_dp_func nvd0_sor_dp_func;
+extern const struct nouveau_dp_func nv50_pior_dp_func;
+
+int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *,
+ struct dcb_output *, int, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 1c919f2af89f..05e903f08a36 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -24,21 +24,33 @@
#include <engine/disp.h>
+#include <core/event.h>
+#include <core/class.h>
+
struct nv04_disp_priv {
struct nouveau_disp base;
};
static struct nouveau_oclass
nv04_disp_sclass[] = {
+ { NV04_DISP_CLASS, &nouveau_object_ofuncs },
{},
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv04_disp_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
+}
+
static void
-nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
+nv04_disp_vblank_disable(struct nouveau_event *event, int head)
{
- struct nouveau_disp *disp = &priv->base;
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
+ nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
}
static void
@@ -49,25 +61,25 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
u32 crtc1 = nv_rd32(priv, 0x602100);
if (crtc0 & 0x00000001) {
- nv04_disp_intr_vblank(priv, 0);
+ nouveau_event_trigger(priv->base.vblank, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nv04_disp_intr_vblank(priv, 1);
+ nouveau_event_trigger(priv->base.vblank, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
}
static int
nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv04_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -75,6 +87,9 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv04_disp_sclass;
nv_subdev(priv)->intr = nv04_disp_intr;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv04_disp_vblank_enable;
+ priv->base.vblank->disable = nv04_disp_vblank_disable;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index ca1a7d76a95b..5fa13267bd9f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -27,7 +27,6 @@
#include <core/handle.h>
#include <core/class.h>
-#include <engine/software.h>
#include <engine/disp.h>
#include <subdev/bios.h>
@@ -37,7 +36,6 @@
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/bar.h>
#include <subdev/clock.h>
#include "nv50.h"
@@ -335,7 +333,7 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head > 1)
+ if (size < sizeof(*args) || args->head > 1)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -374,7 +372,7 @@ nv50_disp_ovly_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head > 1)
+ if (size < sizeof(*args) || args->head > 1)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -543,6 +541,18 @@ nv50_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static void
+nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x61002c, (1 << head), (1 << head));
+}
+
+static void
+nv50_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x61002c, (1 << head), (0 << head));
+}
+
static int
nv50_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -559,6 +569,9 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nv50_disp_base_vblank_enable;
+ priv->base.vblank->disable = nv50_disp_base_vblank_disable;
return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
}
@@ -613,7 +626,7 @@ nv50_disp_base_init(struct nouveau_object *object)
nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
}
- /* ... EXT caps */
+ /* ... PIOR caps */
for (i = 0; i < 3; i++) {
tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
@@ -665,6 +678,9 @@ nv50_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -756,50 +772,6 @@ nv50_disp_intr_error(struct nv50_disp_priv *priv)
}
}
-static void
-nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
-{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_disp *disp = &priv->base;
- struct nouveau_software_chan *chan, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
- if (chan->vblank.crtc != crtc)
- continue;
-
- if (nv_device(priv)->chipset >= 0xc0) {
- nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
- bar->flush(bar);
- nv_wr32(priv, 0x06000c,
- upper_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060010,
- lower_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060014, chan->vblank.value);
- } else {
- nv_wr32(priv, 0x001704, chan->vblank.channel);
- nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
- bar->flush(bar);
- if (nv_device(priv)->chipset == 0x50) {
- nv_wr32(priv, 0x001570, chan->vblank.offset);
- nv_wr32(priv, 0x001574, chan->vblank.value);
- } else {
- nv_wr32(priv, 0x060010, chan->vblank.offset);
- nv_wr32(priv, 0x060014, chan->vblank.value);
- }
- }
-
- list_del(&chan->vblank.head);
- if (disp->vblank.put)
- disp->vblank.put(disp->vblank.data, crtc);
- }
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
-
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
-}
-
static u16
exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -811,8 +783,8 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
if (outp < 4) {
type = DCB_OUTPUT_ANALOG;
mask = 0;
- } else {
- outp -= 4;
+ } else
+ if (outp < 8) {
switch (ctrl & 0x00000f00) {
case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -824,6 +796,17 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
return 0x0000;
}
+ outp -= 4;
+ } else {
+ outp = outp - 8;
+ type = 0x0010;
+ mask = 0;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type |= priv->pior.type[outp]; break;
+ default:
+ nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
}
mask = 0x00c0 & (mask << 6);
@@ -834,6 +817,10 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
if (!data)
return 0x0000;
+ /* off-chip encoders require matching the exact encoder type */
+ if (dcb->location != 0)
+ type |= dcb->extdev << 8;
+
return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
}
@@ -848,9 +835,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
u32 ctrl = 0x00000000;
int i;
+ /* DAC */
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+ /* SOR */
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
@@ -865,6 +854,13 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
}
}
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+ i += 8;
+ }
+
if (!(ctrl & (1 << head)))
return false;
i--;
@@ -894,13 +890,15 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
struct nvbios_outp info1;
struct nvbios_ocfg info2;
u8 ver, hdr, cnt, len;
- u16 data, conf;
u32 ctrl = 0x00000000;
+ u32 data, conf = ~0;
int i;
+ /* DAC */
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+ /* SOR */
if (!(ctrl & (1 << head))) {
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
@@ -915,34 +913,46 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
}
}
+ /* PIOR */
+ if (!(ctrl & (1 << head))) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+ i += 8;
+ }
+
if (!(ctrl & (1 << head)))
- return 0x0000;
+ return conf;
i--;
data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
if (!data)
- return 0x0000;
-
- switch (outp->type) {
- case DCB_OUTPUT_TMDS:
- conf = (ctrl & 0x00000f00) >> 8;
- if (pclk >= 165000)
- conf |= 0x0100;
- break;
- case DCB_OUTPUT_LVDS:
- conf = priv->sor.lvdsconf;
- break;
- case DCB_OUTPUT_DP:
+ return conf;
+
+ if (outp->location == 0) {
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+ } else {
conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
- default:
- conf = 0x00ff;
- break;
+ pclk = pclk / 2;
}
data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
- if (data) {
+ if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
struct nvbios_init init = {
@@ -954,32 +964,37 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
.execute = 1,
};
- if (nvbios_exec(&init))
- return 0x0000;
- return conf;
+ nvbios_exec(&init);
}
}
- return 0x0000;
+ return conf;
}
static void
-nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
{
- int head = ffs((super & 0x00000060) >> 5) - 1;
- if (head >= 0) {
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0)
- exec_script(priv, head, 1);
- }
+ exec_script(priv, head, 1);
+}
- nv_wr32(priv, 0x610024, 0x00000010);
- nv_wr32(priv, 0x610030, 0x80000000);
+static void
+nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 2);
+}
+
+static void
+nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk)
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
}
static void
-nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
- struct dcb_output *outp, u32 pclk)
+nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
{
const int link = !(outp->sorconf.link & 1);
const int or = ffs(outp->or) - 1;
@@ -1085,53 +1100,54 @@ nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
}
static void
-nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
{
struct dcb_output outp;
- u32 addr, mask, data;
- int head;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 hval, hreg = 0x614200 + (head * 0x800);
+ u32 oval, oreg;
+ u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+ if (conf != ~0) {
+ if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp.or) - 1) * 0x08;
+ u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+ u32 datarate;
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30 / 8; break;
+ case 5: datarate = pclk * 24 / 8; break;
+ case 2:
+ default:
+ datarate = pclk * 18 / 8;
+ break;
+ }
- /* finish detaching encoder? */
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0)
- exec_script(priv, head, 2);
-
- /* check whether a vpll change is required */
- head = ffs((super & 0x00000600) >> 9) - 1;
- if (head >= 0) {
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- if (pclk) {
- struct nouveau_clock *clk = nouveau_clock(priv);
- clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ nouveau_dp_train(&priv->base, priv->sor.dp,
+ &outp, head, datarate);
}
- nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
- }
-
- /* (re)attach the relevant OR to the head */
- head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0) {
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
- if (conf) {
- if (outp.type == DCB_OUTPUT_ANALOG) {
- addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
- mask = 0xffffffff;
- data = 0x00000000;
- } else {
- if (outp.type == DCB_OUTPUT_DP)
- nv50_disp_intr_unk20_dp(priv, &outp, pclk);
- addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
- mask = 0x00000707;
- data = (conf & 0x0100) ? 0x0101 : 0x0000;
- }
-
- nv_mask(priv, addr, mask, data);
+ exec_clkcmp(priv, head, 0, pclk, &outp);
+
+ if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) {
+ oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+ oval = 0x00000000;
+ hval = 0x00000000;
+ } else
+ if (!outp.location) {
+ if (outp.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_2_dp(priv, &outp, pclk);
+ oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+ oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ hval = 0x00000000;
+ } else {
+ oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
+ oval = 0x00000001;
+ hval = 0x00000001;
}
- }
- nv_wr32(priv, 0x610024, 0x00000020);
- nv_wr32(priv, 0x610030, 0x80000000);
+ nv_mask(priv, hreg, 0x0000000f, hval);
+ nv_mask(priv, oreg, 0x00000707, oval);
+ }
}
/* If programming a TMDS output on a SOR that can also be configured for
@@ -1143,7 +1159,7 @@ nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
* programmed for DisplayPort.
*/
static void
-nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
{
struct nouveau_bios *bios = nouveau_bios(priv);
const int link = !(outp->sorconf.link & 1);
@@ -1157,35 +1173,79 @@ nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
}
static void
-nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
{
- int head = ffs((super & 0x00000180) >> 7) - 1;
- if (head >= 0) {
- struct dcb_output outp;
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
- if (outp.type == DCB_OUTPUT_TMDS)
- nv50_disp_intr_unk40_tmds(priv, &outp);
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) {
+ if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_0_tmds(priv, &outp);
+ else
+ if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp.or) - 1) * 0x08;
+ u32 ctrl = nv_rd32(priv, 0x610b84 + soff);
+ u32 datarate;
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30 / 8; break;
+ case 5: datarate = pclk * 24 / 8; break;
+ case 2:
+ default:
+ datarate = pclk * 18 / 8;
+ break;
+ }
+
+ nouveau_dp_train(&priv->base, priv->pior.dp,
+ &outp, head, datarate);
}
}
-
- nv_wr32(priv, 0x610024, 0x00000040);
- nv_wr32(priv, 0x610030, 0x80000000);
}
-static void
-nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+void
+nv50_disp_intr_supervisor(struct work_struct *work)
{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
u32 super = nv_rd32(priv, 0x610030);
+ int head;
- nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
- if (intr1 & 0x00000010)
- nv50_disp_intr_unk10(priv, super);
- if (intr1 & 0x00000020)
- nv50_disp_intr_unk20(priv, super);
- if (intr1 & 0x00000040)
- nv50_disp_intr_unk40(priv, super);
+ if (priv->super & 0x00000010) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000020 << head)))
+ continue;
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk10_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000020) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000200 << head)))
+ continue;
+ nv50_disp_intr_unk20_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk20_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000040) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(super & (0x00000080 << head)))
+ continue;
+ nv50_disp_intr_unk40_0(priv, head);
+ }
+ }
+
+ nv_wr32(priv, 0x610030, 0x80000000);
}
void
@@ -1201,19 +1261,21 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
if (intr1 & 0x00000004) {
- nv50_disp_intr_vblank(priv, 0);
+ nouveau_event_trigger(priv->base.vblank, 0);
nv_wr32(priv, 0x610024, 0x00000004);
intr1 &= ~0x00000004;
}
if (intr1 & 0x00000008) {
- nv50_disp_intr_vblank(priv, 1);
+ nouveau_event_trigger(priv->base.vblank, 1);
nv_wr32(priv, 0x610024, 0x00000008);
intr1 &= ~0x00000008;
}
if (intr1 & 0x00000070) {
- nv50_disp_intr_super(priv, intr1);
+ priv->super = (intr1 & 0x00000070);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x610024, priv->super);
intr1 &= ~0x00000070;
}
}
@@ -1226,7 +1288,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -1235,16 +1297,17 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv50_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv50_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index a6bb931450f1..1ae6ceb56704 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -3,16 +3,22 @@
#include <core/parent.h>
#include <core/namedb.h>
+#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <engine/dmaobj.h>
#include <engine/disp.h>
-struct dcb_output;
+#include "dport.h"
struct nv50_disp_priv {
struct nouveau_disp base;
struct nouveau_oclass *sclass;
+
+ struct work_struct supervisor;
+ u32 super;
+
struct {
int nr;
} head;
@@ -26,23 +32,15 @@ struct nv50_disp_priv {
int (*power)(struct nv50_disp_priv *, int sor, u32 data);
int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
- int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
- u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
- int head, u16 type, u16 mask, u32 data,
- struct dcb_output *);
- int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
- int lane, u16 type, u16 mask, u32 data,
- struct dcb_output *);
u32 lvdsconf;
+ const struct nouveau_dp_func *dp;
} sor;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int ext, u32 data);
+ u8 type[3];
+ const struct nouveau_dp_func *dp;
+ } pior;
};
#define DAC_MTHD(n) (n), (n) + 0x03
@@ -81,6 +79,11 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
struct dcb_output *);
+#define PIOR_MTHD(n) (n), (n) + 0x03
+
+int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_pior_power(struct nv50_disp_priv *, int, u32);
+
struct nv50_disp_base {
struct nouveau_parent base;
struct nouveau_ramht *ramht;
@@ -124,6 +127,7 @@ extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
extern struct nouveau_omthds nv84_disp_base_omthds[];
@@ -137,6 +141,7 @@ extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index fc84eacdfbec..d8c74c0883a1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -46,6 +46,9 @@ nv84_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -63,7 +66,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -72,17 +75,18 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv84_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv84_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index ba9dfd4669a2..a66f949c1f84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -44,14 +44,11 @@ nv94_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -69,7 +66,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -78,22 +75,19 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv94_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nv94_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
- priv->sor.dp_train = nv94_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nv94_sor_dp_func;
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 5d63902cdeda..6cf8eefac368 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -53,7 +53,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -62,17 +62,18 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nva0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nva0_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 2;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index e9192ca389fa..b75413169eae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -45,14 +45,11 @@ nva3_disp_base_omthds[] = {
{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
{},
};
@@ -70,7 +67,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_disp_priv *priv;
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -79,23 +76,20 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nva3_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
priv->sclass = nva3_disp_sclass;
priv->head.nr = 2;
priv->dac.nr = 3;
priv->sor.nr = 4;
+ priv->pior.nr = 3;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nva3_hda_eld;
priv->sor.hdmi = nva3_hdmi_ctrl;
- priv->sor.dp_train = nv94_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nv94_sor_dp_func;
+ priv->pior.power = nv50_pior_power;
+ priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 9e38ebff5fb3..788dd34ccb54 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -27,12 +27,10 @@
#include <core/handle.h>
#include <core/class.h>
-#include <engine/software.h>
#include <engine/disp.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/bar.h>
#include <subdev/clock.h>
#include <subdev/bios.h>
@@ -230,7 +228,7 @@ nvd0_disp_sync_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head >= priv->head.nr)
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -270,7 +268,7 @@ nvd0_disp_ovly_ctor(struct nouveau_object *parent,
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*data) || args->head >= priv->head.nr)
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
return -EINVAL;
ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -443,6 +441,18 @@ nvd0_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static void
+nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+static void
+nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+ nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
static int
nvd0_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -459,6 +469,10 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ priv->base.vblank->priv = priv;
+ priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
+ priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
+
return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
}
@@ -609,13 +623,24 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
}
static bool
-exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+exec_script(struct nv50_disp_priv *priv, int head, int id)
{
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_outp info;
struct dcb_output dcb;
u8 ver, hdr, cnt, len;
+ u32 ctrl = 0x00000000;
u16 data;
+ int outp;
+
+ for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+ ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (outp == 8)
+ return false;
data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
if (data) {
@@ -635,21 +660,31 @@ exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
}
static u32
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
- u32 ctrl, int id, u32 pclk)
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
+ u32 pclk, struct dcb_output *dcb)
{
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_outp info1;
struct nvbios_ocfg info2;
- struct dcb_output dcb;
u8 ver, hdr, cnt, len;
- u16 data, conf;
+ u32 ctrl = 0x00000000;
+ u32 data, conf = ~0;
+ int outp;
- data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
- if (data == 0x0000)
+ for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+ ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (outp == 8)
return false;
- switch (dcb.type) {
+ data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
+ if (data == 0x0000)
+ return conf;
+
+ switch (dcb->type) {
case DCB_OUTPUT_TMDS:
conf = (ctrl & 0x00000f00) >> 8;
if (pclk >= 165000)
@@ -668,46 +703,52 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
}
data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
- if (data) {
+ if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
.offset = data,
- .outp = &dcb,
+ .outp = dcb,
.crtc = head,
.execute = 1,
};
- if (nvbios_exec(&init))
- return 0x0000;
- return conf;
+ nvbios_exec(&init);
}
}
- return 0x0000;
+ return conf;
}
static void
-nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
{
- int i;
+ exec_script(priv, head, 1);
+}
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
- if (mcc & (1 << head))
- exec_script(priv, head, i, mcc, 1);
- }
+static void
+nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
+{
+ exec_script(priv, head, 2);
+}
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
+static void
+nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
+{
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ if (pclk)
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
}
static void
-nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
+ struct dcb_output *outp)
{
+ const int or = ffs(outp->or) - 1;
const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
@@ -750,105 +791,102 @@ nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
}
static void
-nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
{
- u32 pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
- if (mcc & (1 << head))
- exec_script(priv, head, i, mcc, 2);
- }
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+ if (conf != ~0) {
+ u32 addr, data;
+
+ if (outp.type == DCB_OUTPUT_DP) {
+ u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
+ switch ((sync & 0x000003c0) >> 6) {
+ case 6: pclk = pclk * 30 / 8; break;
+ case 5: pclk = pclk * 24 / 8; break;
+ case 2:
+ default:
+ pclk = pclk * 18 / 8;
+ break;
+ }
- pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
- if (pclk && (mask & 0x00010000)) {
- struct nouveau_clock *clk = nouveau_clock(priv);
- clk->pll_set(clk, PLL_VPLL0 + head, pclk);
- }
+ nouveau_dp_train(&priv->base, priv->sor.dp,
+ &outp, head, pclk);
+ }
- nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+ exec_clkcmp(priv, head, 0, pclk, &outp);
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
- if (mcp & (1 << head)) {
- if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
- u32 addr, mask, data = 0x00000000;
- if (i < 4) {
- addr = 0x612280 + ((i - 0) * 0x800);
- mask = 0xffffffff;
- } else {
- switch (mcp & 0x00000f00) {
- case 0x00000800:
- case 0x00000900:
- nvd0_display_unk2_calc_tu(priv, head, i - 4);
- break;
- default:
- break;
- }
-
- addr = 0x612300 + ((i - 4) * 0x800);
- mask = 0x00000707;
- if (cfg & 0x00000100)
- data = 0x00000101;
- }
- nv_mask(priv, addr, mask, data);
- }
- break;
+ if (outp.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x612280 + (ffs(outp.or) - 1) * 0x800;
+ data = 0x00000000;
+ } else {
+ if (outp.type == DCB_OUTPUT_DP)
+ nvd0_disp_intr_unk2_2_tu(priv, head, &outp);
+ addr = 0x612300 + (ffs(outp.or) - 1) * 0x800;
+ data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
}
- }
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
+ nv_mask(priv, addr, 0x00000707, data);
+ }
}
static void
-nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
{
- int pclk, i;
-
- pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ exec_clkcmp(priv, head, 1, pclk, &outp);
+}
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
- if (mcp & (1 << head))
- exec_clkcmp(priv, head, i, mcp, 1, pclk);
+void
+nvd0_disp_intr_supervisor(struct work_struct *work)
+{
+ struct nv50_disp_priv *priv =
+ container_of(work, struct nv50_disp_priv, supervisor);
+ u32 mask[4];
+ int head;
+
+ nv_debug(priv, "supervisor %08x\n", priv->super);
+ for (head = 0; head < priv->head.nr; head++) {
+ mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
+ nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
}
- nv_wr32(priv, 0x6101d4, 0x00000000);
- nv_wr32(priv, 0x6109d4, 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
-{
- struct nouveau_bar *bar = nouveau_bar(priv);
- struct nouveau_disp *disp = &priv->base;
- struct nouveau_software_chan *chan, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
- if (chan->vblank.crtc != crtc)
- continue;
-
- nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
- bar->flush(bar);
- nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060014, chan->vblank.value);
-
- list_del(&chan->vblank.head);
- if (disp->vblank.put)
- disp->vblank.put(disp->vblank.data, crtc);
+ if (priv->super & 0x00000001) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk1_0(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000002) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk2_0(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00010000))
+ continue;
+ nvd0_disp_intr_unk2_1(priv, head);
+ }
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk2_2(priv, head);
+ }
+ } else
+ if (priv->super & 0x00000004) {
+ for (head = 0; head < priv->head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvd0_disp_intr_unk4_0(priv, head);
+ }
}
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
- if (disp->vblank.notify)
- disp->vblank.notify(disp->vblank.data, crtc);
+ for (head = 0; head < priv->head.nr; head++)
+ nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
}
void
@@ -884,27 +922,11 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (intr & 0x00100000) {
u32 stat = nv_rd32(priv, 0x6100ac);
- u32 mask = 0, crtc = ~0;
-
- while (!mask && ++crtc < priv->head.nr)
- mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
-
- if (stat & 0x00000001) {
- nv_wr32(priv, 0x6100ac, 0x00000001);
- nvd0_display_unk1_handler(priv, crtc, mask);
- stat &= ~0x00000001;
- }
-
- if (stat & 0x00000002) {
- nv_wr32(priv, 0x6100ac, 0x00000002);
- nvd0_display_unk2_handler(priv, crtc, mask);
- stat &= ~0x00000002;
- }
-
- if (stat & 0x00000004) {
- nv_wr32(priv, 0x6100ac, 0x00000004);
- nvd0_display_unk4_handler(priv, crtc, mask);
- stat &= ~0x00000004;
+ if (stat & 0x00000007) {
+ priv->super = (stat & 0x00000007);
+ schedule_work(&priv->supervisor);
+ nv_wr32(priv, 0x6100ac, priv->super);
+ stat &= ~0x00000007;
}
if (stat) {
@@ -920,7 +942,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
- nvd0_disp_intr_vblank(priv, i);
+ nouveau_event_trigger(priv->base.vblank, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
@@ -933,10 +955,11 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
- "display", &priv);
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -944,8 +967,9 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nvd0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
priv->sclass = nvd0_disp_sclass;
- priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
@@ -953,14 +977,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp_train = nvd0_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 259537c4587e..20725b363d58 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -51,10 +51,11 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
int ret;
- ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
- "display", &priv);
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -62,8 +63,9 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nve0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
priv->sclass = nve0_disp_sclass;
- priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
@@ -71,14 +73,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp_train = nvd0_sor_dp_train;
- priv->sor.dp_train_init = nv94_sor_dp_train_init;
- priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
- priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
- priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
-
- INIT_LIST_HEAD(&priv->base.vblank.list);
- spin_lock_init(&priv->base.vblank.lock);
+ priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
new file mode 100644
index 000000000000..2c8ce351b52d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+#include <subdev/i2c.h>
+
+#include "nv50.h"
+
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
+static struct nouveau_i2c_port *
+nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp)
+{
+ struct nouveau_i2c *i2c = nouveau_i2c(disp);
+ return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+}
+
+static int
+nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port) {
+ if (port->func->pattern)
+ ret = port->func->pattern(port, pattern);
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane_nr, int link_bw, bool enh)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port && port->func->lnk_ctl)
+ ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh);
+
+ return ret;
+}
+
+static int
+nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int vsw, int pre)
+{
+ struct nouveau_i2c_port *port;
+ int ret = -EINVAL;
+
+ port = nv50_pior_dp_find(disp, outp);
+ if (port) {
+ if (port->func->drv_ctl)
+ ret = port->func->drv_ctl(port, lane, vsw, pre);
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+const struct nouveau_dp_func
+nv50_pior_dp_func = {
+ .pattern = nv50_pior_dp_pattern,
+ .lnk_ctl = nv50_pior_dp_lnk_ctl,
+ .drv_ctl = nv50_pior_dp_drv_ctl,
+};
+
+/******************************************************************************
+ * General PIOR handling
+ *****************************************************************************/
+int
+nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = data & NV50_DISP_PIOR_PWR_STATE;
+ const u32 soff = (or * 0x800);
+ nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat);
+ nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
+ const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
+ mthd &= ~NV50_DISP_PIOR_MTHD_OR;
+ switch (mthd) {
+ case NV50_DISP_PIOR_PWR:
+ ret = priv->pior.power(priv, or, data[0]);
+ priv->pior.type[or] = type;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 39b6b67732d0..ab1e918469a8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -79,31 +79,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
ret = 0;
break;
- case NV94_DISP_SOR_DP_TRAIN:
- switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
- case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
- ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
- ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
- ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
- break;
- default:
- break;
- }
- break;
- case NV94_DISP_SOR_DP_LNKCTL:
- ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
- break;
- case NV94_DISP_SOR_DP_DRVCTL(0):
- case NV94_DISP_SOR_DP_DRVCTL(1):
- case NV94_DISP_SOR_DP_DRVCTL(2):
- case NV94_DISP_SOR_DP_DRVCTL(3):
- ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
- type, mask, data, &outp);
- break;
default:
BUG_ON(1);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index f6edd009762e..7ec4ee83fb64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -33,124 +33,53 @@
#include "nv50.h"
static inline u32
-nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+nv94_sor_soff(struct dcb_output *outp)
{
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv94[] = { 16, 8, 0, 24 };
- if (nv_device(priv)->chipset == 0xaf)
- return nvaf[lane];
- return nv94[lane];
+ return (ffs(outp->or) - 1) * 0x800;
}
-int
-nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static inline u32
+nv94_sor_loff(struct dcb_output *outp)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nvbios_dpout info;
- u8 ver, hdr, cnt, len;
- u16 outp;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
- init.offset = info.script[2];
- else
- init.offset = info.script[3];
- nvbios_exec(&init);
-
- init.offset = info.script[0];
- nvbios_exec(&init);
- }
-
- return 0;
+ return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
}
-int
-nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nvbios_dpout info;
- u8 ver, hdr, cnt, len;
- u16 outp;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = info.script[1],
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- nvbios_exec(&init);
- }
-
- return 0;
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv94[] = { 16, 8, 0, 24 };
+ if (nv_device(priv)->chipset == 0xaf)
+ return nvaf[lane];
+ return nv94[lane];
}
-int
-nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
- u16 type, u16 mask, u32 data, struct dcb_output *info)
+static int
+nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
{
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
- nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nv94_sor_loff(outp);
+ nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
return 0;
}
-int
-nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int link_nr, int link_bw, bool enh_frame)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
- u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 soff = nv94_sor_soff(outp);
+ const u32 loff = nv94_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 outp, lane = 0;
- u8 ver, hdr, cnt, len;
- struct nvbios_dpout info;
+ u32 lane = 0;
int i;
- /* -> 10Khz units */
- link_bw *= 2700;
-
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp && info.lnkcmp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = 0x0000,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- while (link_bw < nv_ro16(bios, info.lnkcmp))
- info.lnkcmp += 4;
- init.offset = nv_ro16(bios, info.lnkcmp + 2);
-
- nvbios_exec(&init);
- }
-
dpctrl |= ((1 << link_nr) - 1) << 16;
- if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ if (enh_frame)
dpctrl |= 0x00004000;
- if (link_bw > 16200)
+ if (link_bw > 0x06)
clksor |= 0x00040000;
for (i = 0; i < link_nr; i++)
@@ -162,24 +91,25 @@ nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
return 0;
}
-int
-nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int swing, int preem)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
- const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nv94_sor_loff(outp);
u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
u8 ver, hdr, cnt, len;
- struct nvbios_dpout outp;
+ struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -188,3 +118,10 @@ nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
return 0;
}
+
+const struct nouveau_dp_func
+nv94_sor_dp_func = {
+ .pattern = nv94_sor_dp_pattern,
+ .lnk_ctl = nv94_sor_dp_lnk_ctl,
+ .drv_ctl = nv94_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index c37ce7e29f5d..9e1d435d7282 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -33,59 +33,49 @@
#include "nv50.h"
static inline u32
+nvd0_sor_soff(struct dcb_output *outp)
+{
+ return (ffs(outp->or) - 1) * 0x800;
+}
+
+static inline u32
+nvd0_sor_loff(struct dcb_output *outp)
+{
+ return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
+}
+
+static inline u32
nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
{
static const u8 nvd0[] = { 16, 8, 0, 24 };
return nvd0[lane];
}
-int
-nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
- u16 type, u16 mask, u32 data, struct dcb_output *info)
+static int
+nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int pattern)
{
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
- nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nvd0_sor_loff(outp);
+ nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
-int
-nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int link_nr, int link_bw, bool enh_frame)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
- const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 soff = nvd0_sor_soff(outp);
+ const u32 loff = nvd0_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 outp, lane = 0;
- u8 ver, hdr, cnt, len;
- struct nvbios_dpout info;
+ u32 lane = 0;
int i;
- outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
- if (outp && info.lnkcmp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = 0x0000,
- .outp = dcbo,
- .crtc = head,
- .execute = 1,
- };
-
- while (nv_ro08(bios, info.lnkcmp) < link_bw)
- info.lnkcmp += 3;
- init.offset = nv_ro16(bios, info.lnkcmp + 1);
-
- nvbios_exec(&init);
- }
-
clksor |= link_bw << 18;
dpctrl |= ((1 << link_nr) - 1) << 16;
- if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ if (enh_frame)
dpctrl |= 0x00004000;
for (i = 0; i < link_nr; i++)
@@ -97,24 +87,25 @@ nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
return 0;
}
-int
-nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
- u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+static int
+nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+ int head, int lane, int swing, int preem)
{
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
- const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ struct nouveau_bios *bios = nouveau_bios(disp);
+ struct nv50_disp_priv *priv = (void *)disp;
+ const u32 loff = nvd0_sor_loff(outp);
u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
u8 ver, hdr, cnt, len;
- struct nvbios_dpout outp;
+ struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
@@ -124,3 +115,10 @@ nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
return 0;
}
+
+const struct nouveau_dp_func
+nvd0_sor_dp_func = {
+ .pattern = nvd0_sor_dp_pattern,
+ .lnk_ctl = nvd0_sor_dp_lnk_ctl,
+ .drv_ctl = nvd0_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index c2b9db335816..7341ebe131fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -22,8 +22,10 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/object.h>
#include <core/handle.h>
+#include <core/event.h>
#include <core/class.h>
#include <engine/dmaobj.h>
@@ -146,10 +148,25 @@ nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
return -1;
}
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid)
+{
+ struct nouveau_fifo_chan *chan = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (chid >= fifo->min && chid <= fifo->max)
+ chan = (void *)fifo->channel[chid];
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ return nouveau_client_name(chan);
+}
+
void
nouveau_fifo_destroy(struct nouveau_fifo *priv)
{
kfree(priv->channel);
+ nouveau_event_destroy(&priv->uevent);
nouveau_engine_destroy(&priv->base);
}
@@ -174,6 +191,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
+ ret = nouveau_event_create(1, &priv->uevent);
+ if (ret)
+ return ret;
+
priv->chid = nouveau_fifo_chid;
spin_lock_init(&priv->lock);
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index a47a8548f9e0..f877bd524a92 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -28,6 +28,7 @@
#include <core/namedb.h>
#include <core/handle.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
@@ -398,6 +399,98 @@ out:
return handled;
}
+static void
+nv04_fifo_cache_error(struct nouveau_device *device,
+ struct nv04_fifo_priv *priv, u32 chid, u32 get)
+{
+ u32 mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
+ * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
+ * show that it wraps around to the start at GET=0x800.. No clue as to
+ * why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (device->card_type < NV_40) {
+ mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
+ const char *client_name =
+ nouveau_client_name_for_fifo_chid(&priv->base, chid);
+ nv_error(priv,
+ "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+static void
+nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
+ u32 chid)
+{
+ const char *client_name;
+ u32 dma_get = nv_rd32(priv, 0x003244);
+ u32 dma_put = nv_rd32(priv, 0x003240);
+ u32 push = nv_rd32(priv, 0x003220);
+ u32 state = nv_rd32(priv, 0x003228);
+
+ client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid);
+
+ if (device->card_type == NV_50) {
+ u32 ho_get = nv_rd32(priv, 0x003328);
+ u32 ho_put = nv_rd32(priv, 0x003320);
+ u32 ib_get = nv_rd32(priv, 0x003334);
+ u32 ib_put = nv_rd32(priv, 0x003330);
+
+ nv_error(priv,
+ "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+ chid, client_name, ho_get, dma_get, ho_put, dma_put,
+ ib_get, ib_put, state, nv_dma_state_err(state), push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(priv, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(priv, 0x003244, dma_put);
+ nv_wr32(priv, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put)
+ nv_wr32(priv, 0x003334, ib_put);
+ } else {
+ nv_error(priv,
+ "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+ chid, client_name, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
+
+ if (dma_get != dma_put)
+ nv_wr32(priv, 0x003244, dma_put);
+ }
+
+ nv_wr32(priv, 0x003228, 0x00000000);
+ nv_wr32(priv, 0x003220, 0x00000001);
+ nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+}
+
void
nv04_fifo_intr(struct nouveau_subdev *subdev)
{
@@ -416,96 +509,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- uint32_t mthd, data;
- int ptr;
-
- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
- * wrapping on my G80 chips, but CACHE1 isn't big
- * enough for this much data.. Tests show that it
- * wraps around to the start at GET=0x800.. No clue
- * as to why..
- */
- ptr = (get & 0x7ff) >> 2;
-
- if (device->card_type < NV_40) {
- mthd = nv_rd32(priv,
- NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv,
- NV04_PFIFO_CACHE1_DATA(ptr));
- } else {
- mthd = nv_rd32(priv,
- NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv,
- NV40_PFIFO_CACHE1_DATA(ptr));
- }
-
- if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- nv_error(priv, "CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
- chid, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
- }
-
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(priv, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
-
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-
+ nv04_fifo_cache_error(device, priv, chid, get);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(priv, 0x003244);
- u32 dma_put = nv_rd32(priv, 0x003240);
- u32 push = nv_rd32(priv, 0x003220);
- u32 state = nv_rd32(priv, 0x003228);
-
- if (device->card_type == NV_50) {
- u32 ho_get = nv_rd32(priv, 0x003328);
- u32 ho_put = nv_rd32(priv, 0x003320);
- u32 ib_get = nv_rd32(priv, 0x003334);
- u32 ib_put = nv_rd32(priv, 0x003330);
-
- nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x (err: %s) Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- nv_dma_state_err(state),
- push);
-
- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(priv, 0x003364, 0x00000000);
- if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(priv, 0x003244, dma_put);
- nv_wr32(priv, 0x003328, ho_put);
- } else
- if (ib_get != ib_put) {
- nv_wr32(priv, 0x003334, ib_put);
- }
- } else {
- nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
- chid, dma_get, dma_put, state,
- nv_dma_state_err(state), push);
-
- if (dma_get != dma_put)
- nv_wr32(priv, 0x003244, dma_put);
- }
-
- nv_wr32(priv, 0x003228, 0x00000000);
- nv_wr32(priv, 0x003220, 0x00000001);
- nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ nv04_fifo_dma_pusher(device, priv, chid);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
@@ -528,6 +537,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
+
+ if (status & 0x40000000) {
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_wr32(priv, 0x002100, 0x40000000);
+ status &= ~0x40000000;
+ }
}
if (status) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index bd096364f680..840af6172788 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -129,7 +129,8 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
/* do the kickoff... */
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
- nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] unload timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
ret = -EBUSY;
}
@@ -480,7 +481,7 @@ nv50_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002044, 0x01003fff);
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xbfffffff);
for (i = 0; i < 128; i++)
nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 1eb1c512f503..094000e87871 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -26,6 +26,7 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
@@ -100,7 +101,8 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
nv_wr32(priv, 0x002520, save);
if (!done) {
- nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] unload timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -378,6 +380,20 @@ nv84_fifo_cclass = {
* PFIFO engine
******************************************************************************/
+static void
+nv84_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nv84_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
+}
+
+static void
+nv84_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nv84_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
+}
+
static int
nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -401,6 +417,10 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nv84_fifo_uevent_enable;
+ priv->base.uevent->disable = nv84_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b4365dde1859..4f226afb5591 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -27,6 +27,7 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
@@ -149,7 +150,8 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -333,17 +335,17 @@ nvc0_fifo_cclass = {
******************************************************************************/
static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
- { 0x00, "PGRAPH" },
+ { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
{ 0x03, "PEEPHOLE" },
{ 0x04, "BAR1" },
{ 0x05, "BAR3" },
- { 0x07, "PFIFO" },
- { 0x10, "PBSP" },
- { 0x11, "PPPP" },
+ { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
+ { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
+ { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
{ 0x13, "PCOUNTER" },
- { 0x14, "PVP" },
- { 0x15, "PCOPY0" },
- { 0x16, "PCOPY1" },
+ { 0x14, "PVP", NULL, NVDEV_ENGINE_VP },
+ { 0x15, "PCOPY0", NULL, NVDEV_ENGINE_COPY0 },
+ { 0x16, "PCOPY1", NULL, NVDEV_ENGINE_COPY1 },
{ 0x17, "PDAEMON" },
{}
};
@@ -402,6 +404,9 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
+ const struct nouveau_enum *en;
+ struct nouveau_engine *engine;
+ struct nouveau_object *engctx = NULL;
switch (unit) {
case 3: /* PEEPHOLE */
@@ -420,16 +425,26 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ pr_cont("] from ");
+ en = nouveau_enum_print(nvc0_fifo_fault_unit, unit);
if (stat & 0x00000040) {
- printk("/");
+ pr_cont("/");
nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
} else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
}
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
+
+ if (en && en->data2) {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+
+ }
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
}
static int
@@ -484,10 +499,12 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
- printk("\n");
- nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
- "data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid,
+ nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -501,12 +518,34 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
+ if (stat & 0x00000001) {
+ u32 intr = nv_rd32(priv, 0x00252c);
+ nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
if (stat & 0x00000100) {
- nv_warn(priv, "unknown status 0x00000100\n");
+ u32 intr = nv_rd32(priv, 0x00254c);
+ nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
+ if (stat & 0x00010000) {
+ u32 intr = nv_rd32(priv, 0x00256c);
+ nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x01000000) {
+ u32 intr = nv_rd32(priv, 0x00258c);
+ nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
+ nv_wr32(priv, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
@@ -536,11 +575,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x40000000) {
- nv_warn(priv, "unknown status 0x40000000\n");
- nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+ u32 intr0 = nv_rd32(priv, 0x0025a4);
+ u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000);
+ nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n",
+ intr0, intr1);
stat &= ~0x40000000;
}
+ if (stat & 0x80000000) {
+ u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000);
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr);
+ stat &= ~0x80000000;
+ }
+
if (stat) {
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
@@ -548,6 +596,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
}
+static void
+nvc0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nvc0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nvc0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nvc0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
static int
nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -581,6 +643,10 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nvc0_fifo_uevent_enable;
+ priv->base.uevent->disable = nvc0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nvc0_fifo_intr;
nv_engine(priv)->cclass = &nvc0_fifo_cclass;
@@ -639,7 +705,8 @@ nvc0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xbfffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
+ nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index c930da99c2c1..4419e40d88e9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -27,6 +27,7 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
+#include <core/event.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
@@ -184,7 +185,8 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nouveau_client_name(chan));
if (suspend)
return -EBUSY;
}
@@ -412,20 +414,34 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
+ const struct nouveau_enum *en;
+ struct nouveau_engine *engine;
+ struct nouveau_object *engctx = NULL;
nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ pr_cont("] from ");
+ en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
if (stat & 0x00000040) {
- printk("/");
+ pr_cont("/");
nouveau_enum_print(nve0_fifo_fault_hubclient, client);
} else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
}
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
+
+ if (en && en->data2) {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine)
+ engctx = nouveau_engctx_get(engine, inst);
+
+ }
+
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
}
static int
@@ -480,10 +496,12 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
- printk("\n");
- nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
- "data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid,
+ nouveau_client_name_for_fifo_chid(&priv->base, chid),
+ subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -537,6 +555,12 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
stat &= ~0x40000000;
}
+ if (stat & 0x80000000) {
+ nouveau_event_trigger(priv->base.uevent, 0);
+ nv_wr32(priv, 0x002100, 0x80000000);
+ stat &= ~0x80000000;
+ }
+
if (stat) {
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
@@ -544,6 +568,20 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
}
+static void
+nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+ struct nve0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+ struct nve0_fifo_priv *priv = event->priv;
+ nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
static int
nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -567,6 +605,10 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.uevent->enable = nve0_fifo_uevent_enable;
+ priv->base.uevent->disable = nve0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nve0_fifo_intr;
nv_engine(priv)->cclass = &nve0_fifo_cclass;
@@ -617,7 +659,7 @@ nve0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x002a00, 0xffffffff);
nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xbfffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index e30a9c5ff1fc..ad13dcdd15f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -22,6 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -1297,16 +1298,17 @@ nv04_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv04_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(chan), subc, class, mthd,
+ data);
}
nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 5c0f843ea249..23c143aaa556 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -22,6 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -1193,16 +1194,17 @@ nv10_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(chan), subc, class, mthd,
+ data);
}
nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 5b20401bf911..0607b9801748 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,3 +1,4 @@
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
@@ -224,15 +225,17 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, nouveau_client_name(engctx), subc, class, mthd,
+ data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 0b36dd3deebd..17049d5c723d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
@@ -321,16 +322,17 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
- printk(" nsource:");
+ pr_cont(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
+ pr_cont(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst << 4, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 4, nouveau_client_name(engctx), subc,
+ class, mthd, data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index b1c3d835b4c2..f2b1a7a124f2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -24,6 +24,7 @@
#include <core/os.h>
#include <core/class.h>
+#include <core/client.h>
#include <core/handle.h>
#include <core/engctx.h>
#include <core/enum.h>
@@ -418,7 +419,7 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
nv_error(priv, "TRAP_MP_EXEC - "
"TP %d MP %d: ", tpid, i);
nouveau_enum_print(nv50_mp_exec_error_names, status);
- printk(" at %06x warp %d, opcode %08x %08x\n",
+ pr_cont(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
}
@@ -532,7 +533,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
static int
nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
- int chid, u64 inst)
+ int chid, u64 inst, struct nouveau_object *engctx)
{
u32 status = nv_rd32(priv, 0x400108);
u32 ustatus;
@@ -565,12 +566,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv, "TRAP DISPATCH_FAULT\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv, "ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x%08x "
- "400808 0x%08x 400848 0x%08x\n",
- chid, inst, subc, class, mthd, datah,
- datal, addr, r848);
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
+ chid, inst,
+ nouveau_client_name(engctx), subc,
+ class, mthd, datah, datal, addr, r848);
} else
if (display) {
nv_error(priv, "no stuck command?\n");
@@ -591,11 +591,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
nv_error(priv, "TRAP DISPATCH_QUERY\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv, "ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x 40084c 0x%08x\n",
- chid, inst, subc, class, mthd,
- data, addr);
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
+ chid, inst,
+ nouveau_client_name(engctx), subc,
+ class, mthd, data, addr);
} else
if (display) {
nv_error(priv, "no stuck command?\n");
@@ -623,7 +623,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_M2MF");
nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
@@ -644,7 +644,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_VFETCH");
nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
@@ -661,7 +661,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_STRMOUT");
nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
@@ -682,7 +682,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
if (display) {
nv_error(priv, "TRAP_CCACHE");
nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
- printk("\n");
+ pr_cont("\n");
nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
@@ -774,11 +774,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
u32 ecode = nv_rd32(priv, 0x400110);
nv_error(priv, "DATA_ERROR ");
nouveau_enum_print(nv50_data_error_names, ecode);
- printk("\n");
+ pr_cont("\n");
}
if (stat & 0x00200000) {
- if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12))
+ if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
+ engctx))
show &= ~0x00200000;
}
@@ -786,12 +787,13 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_error(priv, "");
+ nv_error(priv, "%s", "");
nouveau_bitfield_print(nv50_graph_intr_name, show);
- printk("\n");
- nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, class, mthd, data);
+ pr_cont("\n");
+ nv_error(priv,
+ "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, nouveau_client_name(engctx),
+ subc, class, mthd, data);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
@@ -907,9 +909,8 @@ nv50_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400828, 0x00000000);
nv_wr32(priv, 0x40082c, 0x00000000);
nv_wr32(priv, 0x400830, 0x00000000);
- nv_wr32(priv, 0x400724, 0x00000000);
nv_wr32(priv, 0x40032c, 0x00000000);
- nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
+ nv_wr32(priv, 0x400330, 0x00000000);
/* some unknown zcull magic */
switch (nv_device(priv)->chipset & 0xf0) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 45aff5f5085a..0de0dd724aff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -433,10 +433,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000010) {
handle = nouveau_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
- nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx),
+ subc, class, mthd, data);
}
nouveau_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
@@ -444,9 +444,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000020) {
- nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ class, mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
@@ -454,15 +455,16 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00100000) {
nv_error(priv, "DATA_ERROR [");
nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, subc, class, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, nouveau_client_name(engctx), subc,
+ class, mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
- nv_error(priv, "TRAP ch %d [0x%010llx]\n", chid, inst << 12);
+ nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
+ nouveau_client_name(engctx));
nvc0_graph_trap_intr(priv);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
@@ -611,10 +613,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
static void
nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
{
- if (fuc->data) {
- kfree(fuc->data);
- fuc->data = NULL;
- }
+ kfree(fuc->data);
+ fuc->data = NULL;
}
void
@@ -622,8 +622,7 @@ nvc0_graph_dtor(struct nouveau_object *object)
{
struct nvc0_graph_priv *priv = (void *)object;
- if (priv->data)
- kfree(priv->data);
+ kfree(priv->data);
nvc0_graph_dtor_fw(&priv->fuc409c);
nvc0_graph_dtor_fw(&priv->fuc409d);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 9f82e9702b46..61cec0f6ff1c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -78,15 +78,16 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
}
static void
-nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
+nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
+ struct nouveau_object *engctx)
{
u32 trap = nv_rd32(priv, 0x400108);
int rop;
if (trap & 0x00000001) {
u32 stat = nv_rd32(priv, 0x404000);
- nv_error(priv, "DISPATCH ch %d [0x%010llx] 0x%08x\n",
- chid, inst, stat);
+ nv_error(priv, "DISPATCH ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), stat);
nv_wr32(priv, 0x404000, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000001);
trap &= ~0x00000001;
@@ -94,8 +95,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
if (trap & 0x00000010) {
u32 stat = nv_rd32(priv, 0x405840);
- nv_error(priv, "SHADER ch %d [0x%010llx] 0x%08x\n",
- chid, inst, stat);
+ nv_error(priv, "SHADER ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), stat);
nv_wr32(priv, 0x405840, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000010);
trap &= ~0x00000010;
@@ -105,8 +106,10 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
for (rop = 0; rop < priv->rop_nr; rop++) {
u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
- nv_error(priv, "ROP%d ch %d [0x%010llx] 0x%08x 0x%08x\n",
- rop, chid, inst, statz, statc);
+ nv_error(priv,
+ "ROP%d ch %d [0x%010llx %s] 0x%08x 0x%08x\n",
+ rop, chid, inst, nouveau_client_name(engctx),
+ statz, statc);
nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
}
@@ -115,8 +118,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
}
if (trap) {
- nv_error(priv, "TRAP ch %d [0x%010llx] 0x%08x\n",
- chid, inst, trap);
+ nv_error(priv, "TRAP ch %d [0x%010llx %s] 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), trap);
nv_wr32(priv, 0x400108, trap);
}
}
@@ -145,10 +148,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00000010) {
handle = nouveau_handle_get_class(engctx, class);
if (!handle || nv_call(handle->object, mthd, data)) {
- nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc,
+ class, mthd, data);
}
nouveau_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
@@ -156,9 +159,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x00000020) {
- nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ nv_error(priv,
+ "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc, class,
+ mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
@@ -166,15 +170,15 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
if (stat & 0x00100000) {
nv_error(priv, "DATA_ERROR [");
nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, nouveau_client_name(engctx), subc, class,
+ mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
- nve0_graph_trap_isr(priv, chid, inst);
+ nve0_graph_trap_isr(priv, chid, inst, engctx);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 9fd86375f4c4..49ecbb859b25 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
@@ -231,8 +232,10 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b230, 0x00000001);
if (show) {
- nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst << 4, stat, type, mthd, data);
+ nv_error(priv,
+ "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ chid, inst << 4, nouveau_client_name(engctx), stat,
+ type, mthd, data);
}
nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b0e7e1c01ce6..c48e74953771 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -28,6 +28,9 @@
#include <core/namedb.h>
#include <core/handle.h>
#include <core/gpuobj.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
#include <engine/software.h>
#include <engine/disp.h>
@@ -90,18 +93,11 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
- unsigned long flags;
u32 crtc = *(u32 *)args;
-
if (crtc > 1)
return -EINVAL;
- disp->vblank.get(disp->vblank.data, crtc);
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_add(&chan->base.vblank.head, &disp->vblank.list);
- chan->base.vblank.crtc = crtc;
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
return 0;
}
@@ -136,6 +132,29 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
+nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_software_chan *chan =
+ container_of(event, struct nouveau_software_chan, vblank.event);
+ struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nouveau_bar *bar = nouveau_bar(priv);
+
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+ bar->flush(bar);
+
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+ }
+
+ return NVKM_EVENT_DROP;
+}
+
+static int
nv50_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -150,6 +169,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ chan->base.vblank.event.func = nv50_software_vblsem_release;
return 0;
}
@@ -170,8 +190,8 @@ nv50_software_cclass = {
static int
nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv50_software_priv *priv;
int ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index 282a1cd1bc2f..a523eaad47e3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -25,6 +25,9 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
#include <engine/software.h>
#include <engine/disp.h>
@@ -72,18 +75,12 @@ nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
- unsigned long flags;
u32 crtc = *(u32 *)args;
if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
return -EINVAL;
- disp->vblank.get(disp->vblank.data, crtc);
-
- spin_lock_irqsave(&disp->vblank.lock, flags);
- list_add(&chan->base.vblank.head, &disp->vblank.list);
- chan->base.vblank.crtc = crtc;
- spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
return 0;
}
@@ -118,6 +115,23 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
+nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_software_chan *chan =
+ container_of(event, struct nouveau_software_chan, vblank.event);
+ struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+ struct nouveau_bar *bar = nouveau_bar(priv);
+
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+
+ return NVKM_EVENT_DROP;
+}
+
+static int
nvc0_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,6 +146,7 @@ nvc0_software_context_ctor(struct nouveau_object *parent,
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ chan->base.vblank.event.func = nvc0_software_vblsem_release;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 47c4b3a5bd3a..92d3ab11d962 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -154,6 +154,14 @@ struct nve0_channel_ind_class {
u32 engine;
};
+/* 0046: NV04_DISP
+ */
+
+#define NV04_DISP_CLASS 0x00000046
+
+struct nv04_display_class {
+};
+
/* 5070: NV50_DISP
* 8270: NV84_DISP
* 8370: NVA0_DISP
@@ -190,25 +198,6 @@ struct nve0_channel_ind_class {
#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
-#define NV94_DISP_SOR_DP_TRAIN 0x00016000
-#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
-#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
-#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
-#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
-#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
-#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
-#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
-#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
-#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
-#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
-#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
-#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
#define NV50_DISP_DAC_MTHD 0x00020000
#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
@@ -230,6 +219,23 @@ struct nve0_channel_ind_class {
#define NV50_DISP_DAC_LOAD 0x0002000c
#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
+#define NV50_DISP_PIOR_MTHD 0x00030000
+#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000
+#define NV50_DISP_PIOR_MTHD_OR 0x00000003
+
+#define NV50_DISP_PIOR_PWR 0x00030000
+#define NV50_DISP_PIOR_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000
+#define NV50_DISP_PIOR_TMDS_PWR 0x00032000
+#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000
+#define NV50_DISP_PIOR_DP_PWR 0x00036000
+#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000
+
struct nv50_display_class {
};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 63acc0346ff2..c66eac513803 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -7,7 +7,7 @@ struct nouveau_client {
struct nouveau_namedb base;
struct nouveau_handle *root;
struct nouveau_object *device;
- char name[16];
+ char name[32];
u32 debug;
struct nouveau_vm *vm;
};
@@ -41,5 +41,6 @@ int nouveau_client_create_(const char *name, u64 device, const char *cfg,
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
+const char *nouveau_client_name(void *obj);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index e58b6f0984c1..d351a4e5819c 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -26,6 +26,7 @@ enum nv_subdev_type {
*/
NVDEV_SUBDEV_MXM,
NVDEV_SUBDEV_MC,
+ NVDEV_SUBDEV_BUS,
NVDEV_SUBDEV_TIMER,
NVDEV_SUBDEV_FB,
NVDEV_SUBDEV_LTCG,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
index e7b1e181943b..4fc62bb8c1f0 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/enum.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -5,12 +5,13 @@ struct nouveau_enum {
u32 value;
const char *name;
const void *data;
+ u32 data2;
};
const struct nouveau_enum *
nouveau_enum_find(const struct nouveau_enum *, u32 value);
-void
+const struct nouveau_enum *
nouveau_enum_print(const struct nouveau_enum *en, u32 value);
struct nouveau_bitfield {
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
new file mode 100644
index 000000000000..9e094408f14e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -0,0 +1,36 @@
+#ifndef __NVKM_EVENT_H__
+#define __NVKM_EVENT_H__
+
+/* return codes from event handlers */
+#define NVKM_EVENT_DROP 0
+#define NVKM_EVENT_KEEP 1
+
+struct nouveau_eventh {
+ struct list_head head;
+ int (*func)(struct nouveau_eventh *, int index);
+};
+
+struct nouveau_event {
+ spinlock_t lock;
+
+ void *priv;
+ void (*enable)(struct nouveau_event *, int index);
+ void (*disable)(struct nouveau_event *, int index);
+
+ int index_nr;
+ struct {
+ struct list_head list;
+ int refs;
+ } index[];
+};
+
+int nouveau_event_create(int index_nr, struct nouveau_event **);
+void nouveau_event_destroy(struct nouveau_event **);
+void nouveau_event_trigger(struct nouveau_event *, int index);
+
+void nouveau_event_get(struct nouveau_event *, int index,
+ struct nouveau_eventh *);
+void nouveau_event_put(struct nouveau_event *, int index,
+ struct nouveau_eventh *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 106bb19fdd9a..62e68baef087 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -136,7 +136,7 @@ static inline u8
nv_ro08(void *obj, u64 addr)
{
u8 data = nv_ofuncs(obj)->rd08(obj, addr);
- nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
+ nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
return data;
}
@@ -144,7 +144,7 @@ static inline u16
nv_ro16(void *obj, u64 addr)
{
u16 data = nv_ofuncs(obj)->rd16(obj, addr);
- nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
+ nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
return data;
}
@@ -152,28 +152,28 @@ static inline u32
nv_ro32(void *obj, u64 addr)
{
u32 data = nv_ofuncs(obj)->rd32(obj, addr);
- nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
+ nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
return data;
}
static inline void
nv_wo08(void *obj, u64 addr, u8 data)
{
- nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
+ nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
nv_ofuncs(obj)->wr08(obj, addr, data);
}
static inline void
nv_wo16(void *obj, u64 addr, u16 data)
{
- nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
+ nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
nv_ofuncs(obj)->wr16(obj, addr, data);
}
static inline void
nv_wo32(void *obj, u64 addr, u32 data)
{
- nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
+ nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
nv_ofuncs(obj)->wr32(obj, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index 1d629664f32d..febed2ea5c80 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,7 +15,8 @@ struct nouveau_object;
#define NV_PRINTK_TRACE KERN_DEBUG
#define NV_PRINTK_SPAM KERN_DEBUG
-void nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+void __printf(4, 5)
+nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
#define nv_printk(o,l,f,a...) do { \
if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 46948285f3e7..28da6772c095 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -4,18 +4,11 @@
#include <core/object.h>
#include <core/engine.h>
#include <core/device.h>
+#include <core/event.h>
struct nouveau_disp {
struct nouveau_engine base;
-
- struct {
- struct list_head list;
- spinlock_t lock;
- void (*notify)(void *, int);
- void (*get)(void *, int);
- void (*put)(void *, int);
- void *data;
- } vblank;
+ struct nouveau_event *vblank;
};
static inline struct nouveau_disp *
@@ -24,16 +17,22 @@ nouveau_disp(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
}
-#define nouveau_disp_create(p,e,c,i,x,d) \
- nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
-#define nouveau_disp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
+#define nouveau_disp_create(p,e,c,h,i,x,d) \
+ nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
+ sizeof(**d), (void **)d)
+#define nouveau_disp_destroy(d) ({ \
+ struct nouveau_disp *disp = (d); \
+ _nouveau_disp_dtor(nv_object(disp)); \
+})
#define nouveau_disp_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_disp_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
-#define _nouveau_disp_dtor _nouveau_engine_dtor
+int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int heads,
+ const char *, const char *, int, void **);
+void _nouveau_disp_dtor(struct nouveau_object *);
#define _nouveau_disp_init _nouveau_engine_init
#define _nouveau_disp_fini _nouveau_engine_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index f18846c8c6fe..b46c197709f3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -65,6 +65,8 @@ struct nouveau_fifo_base {
struct nouveau_fifo {
struct nouveau_engine base;
+ struct nouveau_event *uevent;
+
struct nouveau_object **channel;
spinlock_t lock;
u16 min;
@@ -92,6 +94,8 @@ int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int min, int max,
int size, void **);
void nouveau_fifo_destroy(struct nouveau_fifo *);
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
#define _nouveau_fifo_init _nouveau_engine_init
#define _nouveau_fifo_fini _nouveau_engine_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index c945691c8564..45799487e573 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,17 +3,17 @@
#include <core/engine.h>
#include <core/engctx.h>
+#include <core/event.h>
struct nouveau_software_chan {
struct nouveau_engctx base;
struct {
- struct list_head head;
+ struct nouveau_eventh event;
u32 channel;
u32 ctxdma;
u64 offset;
u32 value;
- u32 crtc;
} vblank;
int (*flip)(void *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index b79025da581e..123270e9813a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -16,6 +16,8 @@ enum dcb_output_type {
struct dcb_output {
int index; /* may not be raw dcb index if merging has happened */
+ u16 hasht;
+ u16 hashm;
enum dcb_output_type type;
uint8_t i2c_index;
uint8_t heads;
@@ -25,6 +27,7 @@ struct dcb_output {
uint8_t or;
uint8_t link;
bool duallink_possible;
+ uint8_t extdev;
union {
struct sor_conf {
int link;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index e6563b5cb08e..96d3364f6db3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -1,17 +1,22 @@
#ifndef __NVBIOS_GPIO_H__
#define __NVBIOS_GPIO_H__
-struct nouveau_bios;
-
enum dcb_gpio_func_name {
DCB_GPIO_PANEL_POWER = 0x01,
DCB_GPIO_TVDAC0 = 0x0c,
DCB_GPIO_TVDAC1 = 0x2d,
- DCB_GPIO_PWM_FAN = 0x09,
+ DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
DCB_GPIO_UNUSED = 0xff
};
+#define DCB_GPIO_LOG_DIR 0x02
+#define DCB_GPIO_LOG_DIR_OUT 0x00
+#define DCB_GPIO_LOG_DIR_IN 0x02
+#define DCB_GPIO_LOG_VAL 0x01
+#define DCB_GPIO_LOG_VAL_LO 0x00
+#define DCB_GPIO_LOG_VAL_HI 0x01
+
struct dcb_gpio_func {
u8 func;
u8 line;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
index 5079bedfd985..10b57a19a7de 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -15,7 +15,7 @@ struct dcb_i2c_entry {
enum dcb_i2c_type type;
u8 drive;
u8 sense;
- u32 data;
+ u8 share;
};
u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
index a2c4296fc5f6..083541dbe9c8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -23,11 +23,27 @@ struct nvbios_therm_sensor {
struct nvbios_therm_threshold thrs_shutdown;
};
+/* no vbios have more than 6 */
+#define NOUVEAU_TEMP_FAN_TRIP_MAX 10
+struct nouveau_therm_trip_point {
+ int fan_duty;
+ int temp;
+ int hysteresis;
+};
+
struct nvbios_therm_fan {
u16 pwm_freq;
u8 min_duty;
u8 max_duty;
+
+ u16 bump_period;
+ u16 slow_down_period;
+
+ struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
+ u8 nr_fan_trip;
+ u8 linear_min_temp;
+ u8 linear_max_temp;
};
enum nvbios_therm_domain {
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
new file mode 100644
index 000000000000..360baab52e4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
@@ -0,0 +1,19 @@
+#ifndef __NVBIOS_XPIO_H__
+#define __NVBIOS_XPIO_H__
+
+#define NVBIOS_XPIO_FLAG_AUX 0x10
+#define NVBIOS_XPIO_FLAG_AUX0 0x00
+#define NVBIOS_XPIO_FLAG_AUX1 0x10
+
+struct nvbios_xpio {
+ u8 type;
+ u8 addr;
+ u8 flags;
+};
+
+u16 dcb_xpio_table(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_xpio_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
new file mode 100644
index 000000000000..7d88ec4a6d06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -0,0 +1,41 @@
+#ifndef __NOUVEAU_BUS_H__
+#define __NOUVEAU_BUS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_bus_intr {
+ u32 stat;
+ u32 unit;
+};
+
+struct nouveau_bus {
+ struct nouveau_subdev base;
+};
+
+static inline struct nouveau_bus *
+nouveau_bus(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BUS];
+}
+
+#define nouveau_bus_create(p, e, o, d) \
+ nouveau_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
+ sizeof(**d), (void **)d)
+#define nouveau_bus_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_bus_init(p) \
+ nouveau_subdev_init(&(p)->base)
+#define nouveau_bus_fini(p, s) \
+ nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_bus_dtor _nouveau_subdev_dtor
+#define _nouveau_bus_init _nouveau_subdev_init
+#define _nouveau_bus_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_bus_oclass;
+extern struct nouveau_oclass nv31_bus_oclass;
+extern struct nouveau_oclass nv50_bus_oclass;
+extern struct nouveau_oclass nvc0_bus_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index b75e8f18e52c..c85b9f1579ad 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -3,6 +3,7 @@
#include <core/subdev.h>
#include <core/device.h>
+#include <core/event.h>
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
@@ -10,28 +11,18 @@
struct nouveau_gpio {
struct nouveau_subdev base;
+ struct nouveau_event *events;
+
/* hardware interfaces */
void (*reset)(struct nouveau_gpio *, u8 func);
int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
int (*sense)(struct nouveau_gpio *, int line);
- void (*irq_enable)(struct nouveau_gpio *, int line, bool);
/* software interfaces */
int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
struct dcb_gpio_func *);
int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
- int (*irq)(struct nouveau_gpio *, int idx, u8 tag, u8 line, bool on);
-
- /* interrupt handling */
- struct list_head isr;
- spinlock_t lock;
-
- void (*isr_run)(struct nouveau_gpio *, int idx, u32 mask);
- int (*isr_add)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
- void (*isr_del)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
};
static inline struct nouveau_gpio *
@@ -40,25 +31,23 @@ nouveau_gpio(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
}
-#define nouveau_gpio_create(p,e,o,d) \
- nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_gpio_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
+#define nouveau_gpio_create(p,e,o,l,d) \
+ nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d)
+#define nouveau_gpio_destroy(p) ({ \
+ struct nouveau_gpio *gpio = (p); \
+ _nouveau_gpio_dtor(nv_object(gpio)); \
+})
#define nouveau_gpio_fini(p,s) \
nouveau_subdev_fini(&(p)->base, (s))
-int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int nouveau_gpio_init(struct nouveau_gpio *);
+int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void _nouveau_gpio_dtor(struct nouveau_object *);
+int nouveau_gpio_init(struct nouveau_gpio *);
extern struct nouveau_oclass nv10_gpio_oclass;
extern struct nouveau_oclass nv50_gpio_oclass;
extern struct nouveau_oclass nvd0_gpio_oclass;
-
-void nv50_gpio_dtor(struct nouveau_object *);
-int nv50_gpio_init(struct nouveau_object *);
-int nv50_gpio_fini(struct nouveau_object *, bool);
-void nv50_gpio_intr(struct nouveau_subdev *);
-void nv50_gpio_irq_enable(struct nouveau_gpio *, int line, bool);
+extern struct nouveau_oclass nve0_gpio_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index b93ab01e3785..888384c0bed8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -10,23 +10,59 @@
#define NV_I2C_PORT(n) (0x00 + (n))
#define NV_I2C_DEFAULT(n) (0x80 + (n))
+#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
+#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
+#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
+
struct nouveau_i2c_port {
+ struct nouveau_object base;
struct i2c_adapter adapter;
- struct nouveau_i2c *i2c;
- struct i2c_algo_bit_data bit;
+
struct list_head head;
u8 index;
- u8 type;
- u32 dcb;
- u32 drive;
- u32 sense;
- u32 state;
+
+ const struct nouveau_i2c_func *func;
+};
+
+struct nouveau_i2c_func {
+ void (*acquire)(struct nouveau_i2c_port *);
+ void (*release)(struct nouveau_i2c_port *);
+
+ void (*drive_scl)(struct nouveau_i2c_port *, int);
+ void (*drive_sda)(struct nouveau_i2c_port *, int);
+ int (*sense_scl)(struct nouveau_i2c_port *);
+ int (*sense_sda)(struct nouveau_i2c_port *);
+
+ int (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8);
+ int (*pattern)(struct nouveau_i2c_port *, int pattern);
+ int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
+ int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
};
+#define nouveau_i2c_port_create(p,e,o,i,a,d) \
+ nouveau_i2c_port_create_((p), (e), (o), (i), (a), \
+ sizeof(**d), (void **)d)
+#define nouveau_i2c_port_destroy(p) ({ \
+ struct nouveau_i2c_port *port = (p); \
+ _nouveau_i2c_port_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_port_init(p) \
+ nouveau_object_init(&(p)->base)
+#define nouveau_i2c_port_fini(p,s) \
+ nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u8,
+ const struct i2c_algorithm *, int, void **);
+void _nouveau_i2c_port_dtor(struct nouveau_object *);
+#define _nouveau_i2c_port_init nouveau_object_init
+#define _nouveau_i2c_port_fini nouveau_object_fini
+
struct nouveau_i2c {
struct nouveau_subdev base;
struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
+ struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
@@ -40,21 +76,76 @@ nouveau_i2c(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
}
-extern struct nouveau_oclass nouveau_i2c_oclass;
+#define nouveau_i2c_create(p,e,o,s,d) \
+ nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d)
+#define nouveau_i2c_destroy(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_init(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_init(nv_object(i2c)); \
+})
+#define nouveau_i2c_fini(p,s) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_fini(nv_object(i2c), (s)); \
+})
-void nouveau_i2c_drive_scl(void *, int);
-void nouveau_i2c_drive_sda(void *, int);
-int nouveau_i2c_sense_scl(void *);
-int nouveau_i2c_sense_sda(void *);
+int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, struct nouveau_oclass *,
+ int, void **);
+void _nouveau_i2c_dtor(struct nouveau_object *);
+int _nouveau_i2c_init(struct nouveau_object *);
+int _nouveau_i2c_fini(struct nouveau_object *, bool);
-int nv_rdi2cr(struct nouveau_i2c_port *, u8 addr, u8 reg);
-int nv_wri2cr(struct nouveau_i2c_port *, u8 addr, u8 reg, u8 val);
-bool nv_probe_i2c(struct nouveau_i2c_port *, u8 addr);
-
-int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+extern struct nouveau_oclass nv04_i2c_oclass;
+extern struct nouveau_oclass nv4e_i2c_oclass;
+extern struct nouveau_oclass nv50_i2c_oclass;
+extern struct nouveau_oclass nv94_i2c_oclass;
+extern struct nouveau_oclass nvd0_i2c_oclass;
+extern struct nouveau_oclass nouveau_anx9805_sclass[];
extern const struct i2c_algorithm nouveau_i2c_bit_algo;
extern const struct i2c_algorithm nouveau_i2c_aux_algo;
+static inline int
+nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+{
+ u8 val;
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 2);
+ if (ret != 2)
+ return -EIO;
+
+ return val;
+}
+
+static inline int
+nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 2, .buf = buf },
+ };
+
+ int ret = i2c_transfer(&port->adapter, msgs, 1);
+ if (ret != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static inline bool
+nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+{
+ return nv_rdi2cr(port, addr, 0) >= 0;
+}
+
+int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index faee569fd458..6b17b614629f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -4,10 +4,10 @@
#include <core/device.h>
#include <core/subdev.h>
-enum nouveau_therm_fan_mode {
- FAN_CONTROL_NONE = 0,
- FAN_CONTROL_MANUAL = 1,
- FAN_CONTROL_NR,
+enum nouveau_therm_mode {
+ NOUVEAU_THERM_CTRL_NONE = 0,
+ NOUVEAU_THERM_CTRL_MANUAL = 1,
+ NOUVEAU_THERM_CTRL_AUTO = 2,
};
enum nouveau_therm_attr_type {
@@ -28,6 +28,11 @@ enum nouveau_therm_attr_type {
struct nouveau_therm {
struct nouveau_subdev base;
+ int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
+ int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
+ int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
+ int (*pwm_clock)(struct nouveau_therm *);
+
int (*fan_get)(struct nouveau_therm *);
int (*fan_set)(struct nouveau_therm *, int);
int (*fan_sense)(struct nouveau_therm *);
@@ -46,13 +51,29 @@ nouveau_therm(void *obj)
}
#define nouveau_therm_create(p,e,o,d) \
- nouveau_subdev_create((p), (e), (o), 0, "THERM", "therm", d)
-#define nouveau_therm_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
+ nouveau_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_therm_destroy(p) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_dtor(nv_object(therm)); \
+})
+#define nouveau_therm_init(p) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_init(nv_object(therm)); \
+})
+#define nouveau_therm_fini(p,s) ({ \
+ struct nouveau_therm *therm = (p); \
+ _nouveau_therm_init(nv_object(therm), (s)); \
+})
-#define _nouveau_therm_dtor _nouveau_subdev_dtor
+int nouveau_therm_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_therm_dtor(struct nouveau_object *);
+int _nouveau_therm_init(struct nouveau_object *);
+int _nouveau_therm_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv40_therm_oclass;
extern struct nouveau_oclass nv50_therm_oclass;
+extern struct nouveau_oclass nva3_therm_oclass;
+extern struct nouveau_oclass nvd0_therm_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index c24ec8ab3db4..e465d158d352 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -10,6 +10,14 @@ struct nouveau_alarm {
void (*func)(struct nouveau_alarm *);
};
+static inline void
+nouveau_alarm_init(struct nouveau_alarm *alarm,
+ void (*func)(struct nouveau_alarm *))
+{
+ INIT_LIST_HEAD(&alarm->head);
+ alarm->func = func;
+}
+
bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index cfe3b9cad156..eb496033b55c 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -16,6 +16,7 @@
#include <linux/vmalloc.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/reboot.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index f621f69fa1a2..e816f06637a7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -172,7 +172,7 @@ out:
nv_wr32(bios, pcireg, access);
}
-#if defined(CONFIG_ACPI)
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 0fd87df99dd6..2d9b9d7a7992 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,18 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
return 0x0000;
}
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+ return (outp->extdev << 8) | (outp->location << 4) | outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+ return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
u16
dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
struct dcb_output *outp)
@@ -135,34 +147,28 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
case DCB_OUTPUT_DP:
outp->link = (conf & 0x00000030) >> 4;
outp->sorconf.link = outp->link; /*XXX*/
+ outp->extdev = 0x00;
+ if (outp->location != 0)
+ outp->extdev = (conf & 0x0000ff00) >> 8;
break;
default:
break;
}
}
+
+ outp->hasht = dcb_outp_hasht(outp);
+ outp->hashm = dcb_outp_hashm(outp);
}
return dcb;
}
-static inline u16
-dcb_outp_hasht(struct dcb_output *outp)
-{
- return outp->type;
-}
-
-static inline u16
-dcb_outp_hashm(struct dcb_output *outp)
-{
- return (outp->heads << 8) | (outp->link << 6) | outp->or;
-}
-
u16
dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *len, struct dcb_output *outp)
{
u16 dcb, idx = 0;
while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
- if (dcb_outp_hasht(outp) == type) {
+ if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) {
if ((dcb_outp_hashm(outp) & mask) == mask)
break;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
index 5afb568b2d69..b2a676e53580 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -48,7 +48,7 @@ extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return extdev + *hdr;
}
-u16
+static u16
nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index c84e93fa6d95..172a4f999990 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
u16
dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
@@ -60,8 +61,14 @@ dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
u16
dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
- u8 hdr, cnt;
- u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+ u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */
+ u16 gpio;
+
+ if (!idx--)
+ gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len);
+ else
+ gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len);
+
if (gpio && ent < cnt)
return gpio + hdr + (ent * *len);
return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
index ad577db83766..cfb9288c6d28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -70,12 +70,12 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
- info->data = nv_ro32(bios, ent + 0);
- info->type = nv_ro08(bios, ent + 3);
+ info->type = nv_ro08(bios, ent + 3);
+ info->share = DCB_I2C_UNUSED;
if (ver < 0x30) {
info->type &= 0x07;
if (info->type == 0x07)
- info->type = 0xff;
+ info->type = DCB_I2C_UNUSED;
}
switch (info->type) {
@@ -88,7 +88,11 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
return 0;
case DCB_I2C_NVIO_BIT:
case DCB_I2C_NVIO_AUX:
- info->drive = nv_ro08(bios, ent + 0);
+ info->drive = nv_ro08(bios, ent + 0) & 0x0f;
+ if (nv_ro08(bios, ent + 1) & 0x01) {
+ info->share = nv_ro08(bios, ent + 1) >> 1;
+ info->share &= 0x0f;
+ }
return 0;
case DCB_I2C_UNUSED:
return 0;
@@ -121,7 +125,8 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
if (!info->sense) info->sense = 0x36;
}
- info->type = DCB_I2C_NV04_BIT;
+ info->type = DCB_I2C_NV04_BIT;
+ info->share = DCB_I2C_UNUSED;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 690ed438b2ad..2cc1e6a5eb6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -231,6 +231,11 @@ init_i2c(struct nvbios_init *init, int index)
return NULL;
}
+ if (index == -2 && init->outp->location) {
+ index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
+ return i2c->find_type(i2c, index);
+ }
+
index = init->outp->i2c_index;
}
@@ -258,7 +263,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
static int
init_rdauxr(struct nvbios_init *init, u32 addr)
{
- struct nouveau_i2c_port *port = init_i2c(init, -1);
+ struct nouveau_i2c_port *port = init_i2c(init, -2);
u8 data;
if (port && init_exec(init)) {
@@ -274,7 +279,7 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
- struct nouveau_i2c_port *port = init_i2c(init, -1);
+ struct nouveau_i2c_port *port = init_i2c(init, -2);
if (port && init_exec(init))
return nv_wraux(port, addr, &data, 1);
return -ENODEV;
@@ -1816,7 +1821,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
u8 i, j;
trace("RAM_RESTRICT_ZM_REG_GROUP\t"
- "R[%08x] 0x%02x 0x%02x\n", addr, incr, num);
+ "R[0x%08x] 0x%02x 0x%02x\n", addr, incr, num);
init->offset += 7;
for (i = 0; i < num; i++) {
@@ -1849,7 +1854,7 @@ init_copy_zm_reg(struct nvbios_init *init)
u32 sreg = nv_ro32(bios, init->offset + 1);
u32 dreg = nv_ro32(bios, init->offset + 5);
- trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", sreg, dreg);
+ trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
init->offset += 9;
init_wr32(init, dreg, init_rd32(init, sreg));
@@ -1866,7 +1871,7 @@ init_zm_reg_group(struct nvbios_init *init)
u32 addr = nv_ro32(bios, init->offset + 1);
u8 count = nv_ro08(bios, init->offset + 5);
- trace("ZM_REG_GROUP\tR[0x%06x] =\n");
+ trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
init->offset += 6;
while (count--) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 862a08a2ae27..22a20573ed1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -55,7 +55,7 @@ therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return therm + nv_ro08(bios, therm + 1);
}
-u16
+static u16
nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
@@ -155,10 +155,15 @@ int
nvbios_therm_fan_parse(struct nouveau_bios *bios,
struct nvbios_therm_fan *fan)
{
+ struct nouveau_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
u16 entry;
+ uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
+ 75, 0, 85, 0, 100, 0, 100, 0 };
+
i = 0;
+ fan->nr_fan_trip = 0;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
s16 value = nv_ro16(bios, entry + 1);
@@ -167,9 +172,30 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
fan->min_duty = value & 0xff;
fan->max_duty = (value & 0xff00) >> 8;
break;
+ case 0x24:
+ fan->nr_fan_trip++;
+ cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+ cur_trip->hysteresis = value & 0xf;
+ cur_trip->temp = (value & 0xff0) >> 4;
+ cur_trip->fan_duty = duty_lut[(value & 0xf000) >> 12];
+ break;
+ case 0x25:
+ cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+ cur_trip->fan_duty = value;
+ break;
case 0x26:
fan->pwm_freq = value;
break;
+ case 0x3b:
+ fan->bump_period = value;
+ break;
+ case 0x3c:
+ fan->slow_down_period = value;
+ break;
+ case 0x46:
+ fan->linear_min_temp = nv_ro08(bios, entry + 1);
+ fan->linear_max_temp = nv_ro08(bios, entry + 2);
+ break;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
new file mode 100644
index 000000000000..e9b8e5d30a7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
+
+static u16
+dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
+ if (data && *ver >= 0x40 && *hdr >= 0x06) {
+ u16 xpio = nv_ro16(bios, data + 0x04);
+ if (xpio) {
+ *ver = nv_ro08(bios, data + 0x00);
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ return xpio;
+ }
+ }
+ return 0x0000;
+}
+
+u16
+dcb_xpio_table(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
+ if (data && idx < *cnt) {
+ u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+ if (xpio) {
+ *ver = nv_ro08(bios, data + 0x00);
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ return xpio;
+ }
+ }
+ return 0x0000;
+}
+
+u16
+dcb_xpio_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_xpio *info)
+{
+ u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
+ if (data && *len >= 6) {
+ info->type = nv_ro08(bios, data + 0x04);
+ info->addr = nv_ro08(bios, data + 0x05);
+ info->flags = nv_ro08(bios, data + 0x06);
+ }
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
new file mode 100644
index 000000000000..8c7f8057a185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv04_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x00000001) {
+ nv_error(pbus, "BUS ERROR\n");
+ stat &= ~0x00000001;
+ nv_wr32(pbus, 0x001100, 0x00000001);
+ }
+
+ if (stat & 0x00000110) {
+ subdev = nouveau_subdev(subdev, NVDEV_SUBDEV_GPIO);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00000110;
+ nv_wr32(pbus, 0x001100, 0x00000110);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv04_bus_intr;
+ return 0;
+}
+
+static int
+nv04_bus_init(struct nouveau_object *object)
+{
+ struct nv04_bus_priv *priv = (void *)object;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00000111);
+
+ return nouveau_bus_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv04_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
new file mode 100644
index 000000000000..34132aef34e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv31_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv31_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+ u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+
+ if (gpio) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_GPIO);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ }
+
+ if (stat & 0x00000008) { /* NV41- */
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
+
+ stat &= ~0x00000008;
+ nv_wr32(pbus, 0x001100, 0x00000008);
+ }
+
+ if (stat & 0x00070000) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00070000;
+ nv_wr32(pbus, 0x001100, 0x00070000);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nv31_bus_init(struct nouveau_object *object)
+{
+ struct nv31_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00070008);
+ return 0;
+}
+
+static int
+nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv31_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv31_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv31_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x31),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv31_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv31_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
new file mode 100644
index 000000000000..f5b2117fa8c6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv50_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nv50_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x00000008) {
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
+
+ stat &= ~0x00000008;
+ nv_wr32(pbus, 0x001100, 0x00000008);
+ }
+
+ if (stat & 0x00010000) {
+ subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+ if (subdev && subdev->intr)
+ subdev->intr(subdev);
+ stat &= ~0x00010000;
+ nv_wr32(pbus, 0x001100, 0x00010000);
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0);
+ }
+}
+
+static int
+nv50_bus_init(struct nouveau_object *object)
+{
+ struct nv50_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x00010008);
+ return 0;
+}
+
+static int
+nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nv50_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nv50_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
new file mode 100644
index 000000000000..b192d6246363
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ * Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nvc0_bus_priv {
+ struct nouveau_bus base;
+};
+
+static void
+nvc0_bus_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_bus *pbus = nouveau_bus(subdev);
+ u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+ if (stat & 0x0000000e) {
+ u32 addr = nv_rd32(pbus, 0x009084);
+ u32 data = nv_rd32(pbus, 0x009088);
+
+ nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc),
+ (stat & 0x00000002) ? "!ENGINE " : "",
+ (stat & 0x00000004) ? "IBUS " : "",
+ (stat & 0x00000008) ? "TIMEOUT " : "");
+
+ nv_wr32(pbus, 0x009084, 0x00000000);
+ nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+ stat &= ~0x0000000e;
+ }
+
+ if (stat) {
+ nv_error(pbus, "unknown intr 0x%08x\n", stat);
+ nv_mask(pbus, 0x001140, stat, 0x00000000);
+ }
+}
+
+static int
+nvc0_bus_init(struct nouveau_object *object)
+{
+ struct nvc0_bus_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bus_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x001100, 0xffffffff);
+ nv_wr32(priv, 0x001140, 0x0000000e);
+ return 0;
+}
+
+static int
+nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bus_priv *priv;
+ int ret;
+
+ ret = nouveau_bus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = nvc0_bus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bus_oclass = {
+ .handle = NV_SUBDEV(BUS, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bus_ctor,
+ .dtor = _nouveau_bus_dtor,
+ .init = nvc0_bus_init,
+ .fini = _nouveau_bus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index f8a7ed4166cf..3937ced5c753 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -66,6 +66,7 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
@@ -103,8 +104,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
struct nouveau_device *device;
struct nouveau_devobj *devobj;
struct nv_device_class *args = data;
- u64 disable, boot0, strap;
- u64 mmio_base, mmio_size;
+ u32 boot0, strap;
+ u64 disable, mmio_base, mmio_size;
void __iomem *map;
int ret, i, c;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
index 8626d0d6cbbc..473c5c03d3c9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
#include <subdev/devinit.h>
@@ -46,10 +47,11 @@ nv04_identify(struct nouveau_device *device)
case 0x04:
device->cname = "NV04";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -63,10 +65,11 @@ nv04_identify(struct nouveau_device *device)
case 0x05:
device->cname = "NV05";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index 9c40b0fb23f6..d0774f5bebe1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -48,10 +49,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV10";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -64,10 +66,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV15";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -82,10 +85,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV16";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -100,10 +104,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "nForce";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -118,10 +123,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV11";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -136,10 +142,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV17";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -154,10 +161,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "nForce2";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -172,10 +180,11 @@ nv10_identify(struct nouveau_device *device)
device->cname = "NV18";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 74f88f48e1c2..ab920e0dc45b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV20";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV25";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV28";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -103,10 +107,11 @@ nv20_identify(struct nouveau_device *device)
device->cname = "NV2A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 0ac1b2c4f61d..5f2110261b04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV30";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV35";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV31";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -104,10 +108,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV36";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -123,10 +128,11 @@ nv30_identify(struct nouveau_device *device)
device->cname = "NV34";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 41d59689a021..f3d55efe9ac9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -24,6 +24,8 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/vm.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -50,11 +52,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV40";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -70,11 +73,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV41";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -90,11 +94,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV42";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -110,11 +115,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV43";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -130,11 +136,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV45";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -150,11 +157,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G70";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -170,11 +178,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G71";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -190,11 +199,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -210,11 +220,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV44";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -230,11 +241,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "G72";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -250,11 +262,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "NV44A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -270,11 +283,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C61";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -290,11 +304,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C51";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -310,11 +325,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -330,11 +346,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C67";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -350,11 +367,12 @@ nv40_identify(struct nouveau_device *device)
device->cname = "C68";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index 6ccfd8585ba2..5ed2fa51ddc2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G80";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -79,12 +81,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G84";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -104,12 +107,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G86";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -129,12 +133,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G92";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -154,12 +159,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G94";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -179,12 +185,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G96";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -204,12 +211,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G98";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -229,12 +237,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "G200";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -254,12 +263,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP77/MCP78";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -279,12 +289,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP79/MCP7A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -304,12 +315,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT215";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -330,12 +342,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT216";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -355,12 +368,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "GT218";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -380,12 +394,13 @@ nv50_identify(struct nouveau_device *device)
device->cname = "MCP89";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index f0461685a422..4393eb4d6564 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF100";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -85,12 +87,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -113,12 +116,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -141,12 +145,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF114";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -169,12 +174,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF116";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -197,12 +203,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF108";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -225,12 +232,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF110";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -253,12 +261,13 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF119";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -282,4 +291,4 @@ nvc0_identify(struct nouveau_device *device)
}
return 0;
-}
+ }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 03a652876e73..5c12391619fd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -24,6 +24,7 @@
#include <subdev/device.h>
#include <subdev/bios.h>
+#include <subdev/bus.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/clock.h>
@@ -56,13 +57,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe4:
device->cname = "GK104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -84,13 +86,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe7:
device->cname = "GK107";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -112,13 +115,14 @@ nve0_identify(struct nouveau_device *device)
case 0xe6:
device->cname = "GK106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index ae7249b09797..4a8577838417 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -78,12 +78,13 @@ nv50_devinit_init(struct nouveau_object *object)
if (ret)
return ret;
- /* if we ran the init tables, execute first script pointer for each
- * display table output entry that has a matching dcb entry.
+ /* if we ran the init tables, we have to execute the first script
+ * pointer of each dcb entry's display encoder table in order
+ * to properly initialise each encoder.
*/
- while (priv->base.post && ver) {
- u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
- if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+ while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+ if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
+ &ver, &hdr, &cnt, &len, &info)) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
@@ -95,7 +96,8 @@ nv50_devinit_init(struct nouveau_object *object)
nvbios_exec(&init);
}
- };
+ i++;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index eac236ed19b2..0772ec978165 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -22,8 +22,10 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
+#include <core/client.h>
#include <core/enum.h>
+#include <core/engctx.h>
+#include <core/object.h>
#include <subdev/fb.h>
#include <subdev/bios.h>
@@ -303,17 +305,18 @@ static const struct nouveau_enum vm_client[] = {
};
static const struct nouveau_enum vm_engine[] = {
- { 0x00000000, "PGRAPH", NULL },
- { 0x00000001, "PVP", NULL },
+ { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
+ { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
{ 0x00000004, "PEEPHOLE", NULL },
- { 0x00000005, "PFIFO", vm_pfifo_subclients },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
{ 0x00000006, "BAR", vm_bar_subclients },
- { 0x00000008, "PPPP", NULL },
- { 0x00000009, "PBSP", NULL },
- { 0x0000000a, "PCRYPT", NULL },
+ { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
+ { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
+ { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT },
{ 0x0000000b, "PCOUNTER", NULL },
{ 0x0000000c, "SEMAPHORE_BG", NULL },
- { 0x0000000d, "PCOPY", NULL },
+ { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 },
{ 0x0000000e, "PDAEMON", NULL },
{}
};
@@ -335,8 +338,10 @@ static void
nv50_fb_intr(struct nouveau_subdev *subdev)
{
struct nouveau_device *device = nv_device(subdev);
+ struct nouveau_engine *engine;
struct nv50_fb_priv *priv = (void *)subdev;
const struct nouveau_enum *en, *cl;
+ struct nouveau_object *engctx = NULL;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
int i;
@@ -367,36 +372,55 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
}
chan = (trap[2] << 16) | trap[1];
- nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x ",
+ en = nouveau_enum_find(vm_engine, st0);
+
+ if (en && en->data2) {
+ const struct nouveau_enum *orig_en = en;
+ while (en->name && en->value == st0 && en->data2) {
+ engine = nouveau_engine(subdev, en->data2);
+ if (engine) {
+ engctx = nouveau_engctx_get(engine, chan);
+ if (engctx)
+ break;
+ }
+ en++;
+ }
+ if (!engctx)
+ en = orig_en;
+ }
+
+ nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
(trap[5] & 0x00000100) ? "read" : "write",
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan);
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
+ nouveau_client_name(engctx));
+
+ nouveau_engctx_put(engctx);
- en = nouveau_enum_find(vm_engine, st0);
if (en)
- printk("%s/", en->name);
+ pr_cont("%s/", en->name);
else
- printk("%02x/", st0);
+ pr_cont("%02x/", st0);
cl = nouveau_enum_find(vm_client, st2);
if (cl)
- printk("%s/", cl->name);
+ pr_cont("%s/", cl->name);
else
- printk("%02x/", st2);
+ pr_cont("%02x/", st2);
if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
else cl = NULL;
if (cl)
- printk("%s", cl->name);
+ pr_cont("%s", cl->name);
else
- printk("%02x", st3);
+ pr_cont("%02x", st3);
- printk(" reason: ");
+ pr_cont(" reason: ");
en = nouveau_enum_find(vm_fault, st1);
if (en)
- printk("%s\n", en->name);
+ pr_cont("%s\n", en->name);
else
- printk("0x%08x\n", st1);
+ pr_cont("0x%08x\n", st1);
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index 9fb0f9b92d49..d422acc9af15 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -102,135 +102,19 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
return ret;
}
-static int
-nouveau_gpio_irq(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, bool on)
-{
- struct dcb_gpio_func func;
- int ret;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
- if (ret == 0) {
- if (idx == 0 && gpio->irq_enable)
- gpio->irq_enable(gpio, func.line, on);
- else
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-struct gpio_isr {
- struct nouveau_gpio *gpio;
- struct list_head head;
- struct work_struct work;
- int idx;
- struct dcb_gpio_func func;
- void (*handler)(void *, int);
- void *data;
- bool inhibit;
-};
-
-static void
-nouveau_gpio_isr_bh(struct work_struct *work)
-{
- struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
- struct nouveau_gpio *gpio = isr->gpio;
- unsigned long flags;
- int state;
-
- state = nouveau_gpio_get(gpio, isr->idx, isr->func.func,
- isr->func.line);
- if (state >= 0)
- isr->handler(isr->data, state);
-
- spin_lock_irqsave(&gpio->lock, flags);
- isr->inhibit = false;
- spin_unlock_irqrestore(&gpio->lock, flags);
-}
-
-static void
-nouveau_gpio_isr_run(struct nouveau_gpio *gpio, int idx, u32 line_mask)
-{
- struct gpio_isr *isr;
-
- if (idx != 0)
- return;
-
- spin_lock(&gpio->lock);
- list_for_each_entry(isr, &gpio->isr, head) {
- if (line_mask & (1 << isr->func.line)) {
- if (isr->inhibit)
- continue;
- isr->inhibit = true;
- schedule_work(&isr->work);
- }
- }
- spin_unlock(&gpio->lock);
-}
-
-static int
-nouveau_gpio_isr_add(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
-{
- struct gpio_isr *isr;
- unsigned long flags;
- int ret;
-
- isr = kzalloc(sizeof(*isr), GFP_KERNEL);
- if (!isr)
- return -ENOMEM;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &isr->func);
- if (ret) {
- kfree(isr);
- return ret;
- }
-
- INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
- isr->gpio = gpio;
- isr->handler = handler;
- isr->data = data;
- isr->idx = idx;
-
- spin_lock_irqsave(&gpio->lock, flags);
- list_add(&isr->head, &gpio->isr);
- spin_unlock_irqrestore(&gpio->lock, flags);
- return 0;
-}
-
-static void
-nouveau_gpio_isr_del(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
+void
+_nouveau_gpio_dtor(struct nouveau_object *object)
{
- struct gpio_isr *isr, *tmp;
- struct dcb_gpio_func func;
- unsigned long flags;
- LIST_HEAD(tofree);
- int ret;
-
- ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
- if (ret == 0) {
- spin_lock_irqsave(&gpio->lock, flags);
- list_for_each_entry_safe(isr, tmp, &gpio->isr, head) {
- if (memcmp(&isr->func, &func, sizeof(func)) ||
- isr->idx != idx ||
- isr->handler != handler || isr->data != data)
- continue;
- list_move_tail(&isr->head, &tofree);
- }
- spin_unlock_irqrestore(&gpio->lock, flags);
-
- list_for_each_entry_safe(isr, tmp, &tofree, head) {
- flush_work(&isr->work);
- kfree(isr);
- }
- }
+ struct nouveau_gpio *gpio = (void *)object;
+ nouveau_event_destroy(&gpio->events);
+ nouveau_subdev_destroy(&gpio->base);
}
int
nouveau_gpio_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+ struct nouveau_oclass *oclass, int lines,
+ int length, void **pobject)
{
struct nouveau_gpio *gpio;
int ret;
@@ -241,15 +125,13 @@ nouveau_gpio_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ ret = nouveau_event_create(lines, &gpio->events);
+ if (ret)
+ return ret;
+
gpio->find = nouveau_gpio_find;
gpio->set = nouveau_gpio_set;
gpio->get = nouveau_gpio_get;
- gpio->irq = nouveau_gpio_irq;
- gpio->isr_run = nouveau_gpio_isr_run;
- gpio->isr_add = nouveau_gpio_isr_add;
- gpio->isr_del = nouveau_gpio_isr_del;
- INIT_LIST_HEAD(&gpio->isr);
- spin_lock_init(&gpio->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
index 168d16a9a8e9..76d5d5465ddd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -24,7 +24,7 @@
*
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nv10_gpio_priv {
struct nouveau_gpio base;
@@ -83,27 +83,36 @@ nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
}
static void
-nv10_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
-{
- u32 mask = 0x00010001 << line;
-
- nv_wr32(gpio, 0x001104, mask);
- nv_mask(gpio, 0x001144, mask, on ? mask : 0);
-}
-
-static void
nv10_gpio_intr(struct nouveau_subdev *subdev)
{
struct nv10_gpio_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x001104);
u32 hi = (intr & 0x0000ffff) >> 0;
u32 lo = (intr & 0xffff0000) >> 16;
+ int i;
- priv->base.isr_run(&priv->base, 0, hi | lo);
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
nv_wr32(priv, 0x001104, intr);
}
+static void
+nv10_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+ nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line);
+}
+
+static void
+nv10_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+ nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000);
+}
+
static int
nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -112,14 +121,16 @@ nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv10_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.drive = nv10_gpio_drive;
priv->base.sense = nv10_gpio_sense;
- priv->base.irq_enable = nv10_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv10_gpio_intr_enable;
+ priv->base.events->disable = nv10_gpio_intr_disable;
nv_subdev(priv)->intr = nv10_gpio_intr;
return 0;
}
@@ -141,8 +152,6 @@ nv10_gpio_init(struct nouveau_object *object)
if (ret)
return ret;
- nv_wr32(priv, 0x001140, 0x00000000);
- nv_wr32(priv, 0x001100, 0xffffffff);
nv_wr32(priv, 0x001144, 0x00000000);
nv_wr32(priv, 0x001104, 0xffffffff);
return 0;
@@ -152,7 +161,6 @@ static int
nv10_gpio_fini(struct nouveau_object *object, bool suspend)
{
struct nv10_gpio_priv *priv = (void *)object;
- nv_wr32(priv, 0x001140, 0x00000000);
nv_wr32(priv, 0x001144, 0x00000000);
return nouveau_gpio_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf13a1200f26..bf489dcf46e2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nv50_gpio_priv {
struct nouveau_gpio base;
@@ -95,21 +95,12 @@ nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
}
void
-nv50_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
-{
- u32 reg = line < 16 ? 0xe050 : 0xe070;
- u32 mask = 0x00010001 << (line & 0xf);
-
- nv_wr32(gpio, reg + 4, mask);
- nv_mask(gpio, reg + 0, mask, on ? mask : 0);
-}
-
-void
nv50_gpio_intr(struct nouveau_subdev *subdev)
{
struct nv50_gpio_priv *priv = (void *)subdev;
u32 intr0, intr1 = 0;
u32 hi, lo;
+ int i;
intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
if (nv_device(priv)->chipset >= 0x90)
@@ -117,13 +108,35 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
hi = (intr0 & 0x0000ffff) | (intr1 << 16);
lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- priv->base.isr_run(&priv->base, 0, hi | lo);
+
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
nv_wr32(priv, 0xe054, intr0);
if (nv_device(priv)->chipset >= 0x90)
nv_wr32(priv, 0xe074, intr1);
}
+void
+nv50_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xe050 : 0xe070;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x04, mask);
+ nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nv50_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xe050 : 0xe070;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x04, mask);
+ nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
static int
nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,7 +145,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass,
+ nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -140,7 +155,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reset = nv50_gpio_reset;
priv->base.drive = nv50_gpio_drive;
priv->base.sense = nv50_gpio_sense;
- priv->base.irq_enable = nv50_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv50_gpio_intr_enable;
+ priv->base.events->disable = nv50_gpio_intr_disable;
nv_subdev(priv)->intr = nv50_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 83e8b8f16e6a..010431e3acec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -22,13 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <subdev/gpio.h>
+#include "priv.h"
struct nvd0_gpio_priv {
struct nouveau_gpio base;
};
-static void
+void
nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
@@ -57,7 +57,7 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
}
}
-static int
+int
nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
{
u32 data = ((dir ^ 1) << 13) | (out << 12);
@@ -66,7 +66,7 @@ nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
return 0;
}
-static int
+int
nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
{
return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
@@ -80,7 +80,7 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvd0_gpio_priv *priv;
int ret;
- ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+ ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -88,7 +88,9 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reset = nvd0_gpio_reset;
priv->base.drive = nvd0_gpio_drive;
priv->base.sense = nvd0_gpio_sense;
- priv->base.irq_enable = nv50_gpio_irq_enable;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nv50_gpio_intr_enable;
+ priv->base.events->disable = nv50_gpio_intr_disable;
nv_subdev(priv)->intr = nv50_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
new file mode 100644
index 000000000000..16b8c5bf5efa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nve0_gpio_priv {
+ struct nouveau_gpio base;
+};
+
+void
+nve0_gpio_intr(struct nouveau_subdev *subdev)
+{
+ struct nve0_gpio_priv *priv = (void *)subdev;
+ u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08);
+ u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88);
+ u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ int i;
+
+ for (i = 0; (hi | lo) && i < 32; i++) {
+ if ((hi | lo) & (1 << i))
+ nouveau_event_trigger(priv->base.events, i);
+ }
+
+ nv_wr32(priv, 0xdc00, intr0);
+ nv_wr32(priv, 0xdc88, intr1);
+}
+
+void
+nve0_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x08, mask);
+ nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nve0_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+ const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+ const u32 mask = 0x00010001 << (line & 0xf);
+ nv_wr32(event->priv, addr + 0x08, mask);
+ nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
+int
+nve0_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ nv_wr32(priv, 0xdc08, 0x00000000);
+ nv_wr32(priv, 0xdc88, 0x00000000);
+ return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+int
+nve0_gpio_init(struct nouveau_object *object)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_gpio_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0xdc00, 0xffffffff);
+ nv_wr32(priv, 0xdc80, 0xffffffff);
+ return 0;
+}
+
+void
+nve0_gpio_dtor(struct nouveau_object *object)
+{
+ struct nve0_gpio_priv *priv = (void *)object;
+ nouveau_gpio_destroy(&priv->base);
+}
+
+static int
+nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_gpio_priv *priv;
+ int ret;
+
+ ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.reset = nvd0_gpio_reset;
+ priv->base.drive = nvd0_gpio_drive;
+ priv->base.sense = nvd0_gpio_sense;
+ priv->base.events->priv = priv;
+ priv->base.events->enable = nve0_gpio_intr_enable;
+ priv->base.events->disable = nve0_gpio_intr_disable;
+ nv_subdev(priv)->intr = nve0_gpio_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_gpio_oclass = {
+ .handle = NV_SUBDEV(GPIO, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_gpio_ctor,
+ .dtor = nv50_gpio_dtor,
+ .init = nve0_gpio_init,
+ .fini = nve0_gpio_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
new file mode 100644
index 000000000000..2ee1c895c782
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_GPIO_H__
+#define __NVKM_GPIO_H__
+
+#include <subdev/gpio.h>
+
+void nv50_gpio_dtor(struct nouveau_object *);
+int nv50_gpio_init(struct nouveau_object *);
+int nv50_gpio_fini(struct nouveau_object *, bool);
+void nv50_gpio_intr(struct nouveau_subdev *);
+void nv50_gpio_intr_enable(struct nouveau_event *, int line);
+void nv50_gpio_intr_disable(struct nouveau_event *, int line);
+
+void nvd0_gpio_reset(struct nouveau_gpio *, u8);
+int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
+int nvd0_gpio_sense(struct nouveau_gpio *, int);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
new file mode 100644
index 000000000000..dec94e9d776a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/i2c.h>
+
+struct anx9805_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+ u32 ctrl;
+};
+
+static int
+anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
+{
+ struct anx9805_i2c_port *chan = (void *)port;
+ struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ u8 tmp, i;
+
+ nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
+ nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+ nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
+ nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
+
+ i = 0;
+ while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
+ mdelay(5);
+ if (i++ == 100) {
+ nv_error(port, "link training timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (tmp & 0x70) {
+ nv_error(port, "link training failed: 0x%02x\n", tmp);
+ return -EIO;
+ }
+
+ return 1;
+}
+
+static int
+anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct anx9805_i2c_port *chan = (void *)port;
+ struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+ int i, ret = -ETIMEDOUT;
+ u8 tmp;
+
+ tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
+ nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
+ nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
+ nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+
+ nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
+ for (i = 0; !(type & 1) && i < size; i++)
+ nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]);
+ nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
+ nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
+ nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
+ nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
+ nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
+
+ i = 0;
+ while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
+ mdelay(5);
+ if (i++ == 32)
+ goto done;
+ }
+
+ if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
+ ret = -EIO;
+ goto done;
+ }
+
+ for (i = 0; (type & 1) && i < size; i++)
+ data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
+ ret = 0;
+done:
+ nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+ return ret;
+}
+
+static const struct nouveau_i2c_func
+anx9805_aux_func = {
+ .aux = anx9805_aux,
+ .lnk_ctl = anx9805_train,
+};
+
+static int
+anx9805_aux_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_i2c_port *mast = (void *)parent;
+ struct anx9805_i2c_port *chan;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ switch ((oclass->handle & 0xff00) >> 8) {
+ case 0x0d:
+ chan->addr = 0x38;
+ chan->ctrl = 0x39;
+ break;
+ case 0x0e:
+ chan->addr = 0x3c;
+ chan->ctrl = 0x3b;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (mast->adapter.algo == &i2c_bit_algo) {
+ struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+ algo->udelay = max(algo->udelay, 40);
+ }
+
+ chan->base.func = &anx9805_aux_func;
+ return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_aux_ofuncs = {
+ .ctor = anx9805_aux_chan_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+};
+
+static int
+anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct anx9805_i2c_port *port = adap->algo_data;
+ struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent;
+ struct i2c_msg *msg = msgs;
+ int ret = -ETIMEDOUT;
+ int i, j, cnt = num;
+ u8 seg = 0x00, off = 0x00, tmp;
+
+ tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
+ nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
+ nv_wri2cr(mast, port->ctrl, 0x07, tmp);
+ nv_wri2cr(mast, port->addr, 0x43, 0x05);
+ mdelay(5);
+
+ while (cnt--) {
+ if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
+ nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
+ nv_wri2cr(mast, port->addr, 0x41, seg);
+ nv_wri2cr(mast, port->addr, 0x42, off);
+ nv_wri2cr(mast, port->addr, 0x44, msg->len);
+ nv_wri2cr(mast, port->addr, 0x45, 0x00);
+ nv_wri2cr(mast, port->addr, 0x43, 0x01);
+ for (i = 0; i < msg->len; i++) {
+ j = 0;
+ while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
+ mdelay(5);
+ if (j++ == 32)
+ goto done;
+ }
+ msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
+ }
+ } else
+ if (!(msg->flags & I2C_M_RD)) {
+ if (msg->addr == 0x50 && msg->len == 0x01) {
+ off = msg->buf[0];
+ } else
+ if (msg->addr == 0x30 && msg->len == 0x01) {
+ seg = msg->buf[0];
+ } else
+ goto done;
+ } else {
+ goto done;
+ }
+ msg++;
+ }
+
+ ret = num;
+done:
+ nv_wri2cr(mast, port->addr, 0x43, 0x00);
+ return ret;
+}
+
+static u32
+anx9805_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+anx9805_i2c_algo = {
+ .master_xfer = anx9805_xfer,
+ .functionality = anx9805_func
+};
+
+static const struct nouveau_i2c_func
+anx9805_i2c_func = {
+};
+
+static int
+anx9805_ddc_port_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_i2c_port *mast = (void *)parent;
+ struct anx9805_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &anx9805_i2c_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ switch ((oclass->handle & 0xff00) >> 8) {
+ case 0x0d:
+ port->addr = 0x3d;
+ port->ctrl = 0x39;
+ break;
+ case 0x0e:
+ port->addr = 0x3f;
+ port->ctrl = 0x3b;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (mast->adapter.algo == &i2c_bit_algo) {
+ struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+ algo->udelay = max(algo->udelay, 40);
+ }
+
+ port->base.func = &anx9805_i2c_func;
+ return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_ddc_ofuncs = {
+ .ctor = anx9805_ddc_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+};
+
+struct nouveau_oclass
+nouveau_anx9805_sclass[] = {
+ { .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
+ { .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index dc27e794a851..5de074ad170b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -24,151 +24,40 @@
#include <subdev/i2c.h>
-/******************************************************************************
- * aux channel util functions
- *****************************************************************************/
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nouveau_i2c *aux, int ch)
-{
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nouveau_i2c *aux, int ch)
-{
- const u32 unksel = 1; /* nfi which to use, or if it matters.. */
- const u32 ureq = unksel ? 0x00100000 : 0x00200000;
- const u32 urep = unksel ? 0x01000000 : 0x02000000;
- u32 ctrl, timeout;
-
- /* wait up to 1ms for any previous transaction to be done... */
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
- return -EBUSY;
- }
- } while (ctrl & 0x03010000);
-
- /* set some magic, and wait up to 1ms for it to appear */
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("magic wait 0x%08x\n", ctrl);
- auxch_fini(aux, ch);
- return -EBUSY;
- }
- } while ((ctrl & 0x03000000) != urep);
-
- return 0;
-}
-
-static int
-auxch_tx(struct nouveau_i2c *aux, int ch, u8 type, u32 addr, u8 *data, u8 size)
-{
- u32 ctrl, stat, timeout, retries;
- u32 xbuf[4] = {};
- int ret, i;
-
- AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
- ret = auxch_init(aux, ch);
- if (ret)
- goto out;
-
- stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
- if (!(stat & 0x10000000)) {
- AUX_DBG("sink not detected\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (!(type & 1)) {
- memcpy(xbuf, data, size);
- for (i = 0; i < 16; i += 4) {
- AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
- nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
- }
- }
-
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- ctrl &= ~0x0001f0ff;
- ctrl |= type << 12;
- ctrl |= size - 1;
- nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
-
- /* retry transaction a number of times on failure... */
- ret = -EREMOTEIO;
- for (retries = 0; retries < 32; retries++) {
- /* reset, and delay a while if this is a retry */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
- if (retries)
- udelay(400);
-
- /* transaction request, wait up to 1ms for it to complete */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("tx req timeout 0x%08x\n", ctrl);
- goto out;
- }
- } while (ctrl & 0x00010000);
-
- /* read status, and check if transaction completed ok */
- stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
- if (!(stat & 0x000f0f00)) {
- ret = 0;
- break;
- }
-
- AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
- }
-
- if (type & 1) {
- for (i = 0; i < 16; i += 4) {
- xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
- AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
- }
- memcpy(data, xbuf, size);
- }
-
-out:
- auxch_fini(aux, ch);
- return ret;
-}
-
int
-nv_rdaux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- return auxch_tx(auxch->i2c, auxch->drive, 9, addr, data, size);
+ if (port->func->aux) {
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return port->func->aux(port, 9, addr, data, size);
+ }
+ return -ENODEV;
}
int
-nv_wraux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
- return auxch_tx(auxch->i2c, auxch->drive, 8, addr, data, size);
+ if (port->func->aux) {
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return port->func->aux(port, 8, addr, data, size);
+ }
+ return -ENODEV;
}
static int
aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *auxch = (struct nouveau_i2c_port *)adap;
+ struct nouveau_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
+ if (!port->func->aux)
+ return -ENODEV;
+ if ( port->func->acquire)
+ port->func->acquire(port);
+
while (mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
@@ -185,8 +74,7 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = auxch_tx(auxch->i2c, auxch->drive, cmd,
- msg->addr, ptr, cnt);
+ ret = port->func->aux(port, cmd, msg->addr, ptr, cnt);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index dbfc2abf0cfe..a114a0ed7e98 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012 Red Hat Inc.
+ * Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,64 +22,136 @@
* Authors: Ben Skeggs
*/
-#include "core/option.h"
+#include <core/option.h>
-#include "subdev/i2c.h"
-#include "subdev/vga.h"
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
-int
-nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+/******************************************************************************
+ * interface to linux i2c bit-banging algorithm
+ *****************************************************************************/
+
+#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+#define CSTMSEL true
+#else
+#define CSTMSEL false
+#endif
+
+static int
+nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
{
- u8 val;
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
- { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
- };
+ struct i2c_algo_bit_data *bit = adap->algo_data;
+ struct nouveau_i2c_port *port = bit->data;
+ if (port->func->acquire)
+ port->func->acquire(port);
+ return 0;
+}
- int ret = i2c_transfer(&port->adapter, msgs, 2);
- if (ret != 2)
- return -EIO;
+static void
+nouveau_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_port *port = data;
+ port->func->drive_scl(port, state);
+}
- return val;
+static void
+nouveau_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_port *port = data;
+ port->func->drive_sda(port, state);
}
-int
-nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+static int
+nouveau_i2c_getscl(void *data)
{
- struct i2c_msg msgs[] = {
- { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
- { .addr = addr, .flags = 0, .len = 1, .buf = &val },
- };
+ struct nouveau_i2c_port *port = data;
+ return port->func->sense_scl(port);
+}
- int ret = i2c_transfer(&port->adapter, msgs, 2);
- if (ret != 2)
- return -EIO;
+static int
+nouveau_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_port *port = data;
+ return port->func->sense_sda(port);
+}
- return 0;
+/******************************************************************************
+ * base i2c "port" class implementation
+ *****************************************************************************/
+
+void
+_nouveau_i2c_port_dtor(struct nouveau_object *object)
+{
+ struct nouveau_i2c_port *port = (void *)object;
+ i2c_del_adapter(&port->adapter);
+ nouveau_object_destroy(&port->base);
}
-bool
-nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+int
+nouveau_i2c_port_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u8 index,
+ const struct i2c_algorithm *algo,
+ int size, void **pobject)
{
- u8 buf[] = { 0 };
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- .flags = 0,
- .len = 1,
- .buf = buf,
- },
- {
- .addr = addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_i2c *i2c = (void *)engine;
+ struct nouveau_i2c_port *port;
+ int ret;
- return i2c_transfer(&port->adapter, msgs, 2) == 2;
+ ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
+ port = *pobject;
+ if (ret)
+ return ret;
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "nouveau-%s-%d", device->name, index);
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.parent = &device->pdev->dev;
+ port->index = index;
+ i2c_set_adapdata(&port->adapter, i2c);
+
+ if ( algo == &nouveau_i2c_bit_algo &&
+ !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
+ struct i2c_algo_bit_data *bit;
+
+ bit = kzalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return -ENOMEM;
+
+ bit->udelay = 10;
+ bit->timeout = usecs_to_jiffies(2200);
+ bit->data = port;
+ bit->pre_xfer = nouveau_i2c_pre_xfer;
+ bit->setsda = nouveau_i2c_setsda;
+ bit->setscl = nouveau_i2c_setscl;
+ bit->getsda = nouveau_i2c_getsda;
+ bit->getscl = nouveau_i2c_getscl;
+
+ port->adapter.algo_data = bit;
+ ret = i2c_bit_add_bus(&port->adapter);
+ } else {
+ port->adapter.algo_data = port;
+ port->adapter.algo = algo;
+ ret = i2c_add_adapter(&port->adapter);
+ }
+
+ /* drop port's i2c subdev refcount, i2c handles this itself */
+ if (ret == 0) {
+ list_add_tail(&port->head, &i2c->ports);
+ atomic_dec(&engine->refcount);
+ }
+
+ return ret;
}
+/******************************************************************************
+ * base i2c subdev class implementation
+ *****************************************************************************/
+
static struct nouveau_i2c_port *
nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
{
@@ -103,29 +175,23 @@ nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
list_for_each_entry(port, &i2c->ports, head) {
if (port->index == index)
- break;
+ return port;
}
- if (&port->head == &i2c->ports)
- return NULL;
+ return NULL;
+}
- if (nv_device(i2c)->card_type >= NV_50 && (port->dcb & 0x00000100)) {
- u32 reg = 0x00e500, val;
- if (port->type == 6) {
- reg += port->drive * 0x50;
- val = 0x2002;
- } else {
- reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
- val = 0xe001;
- }
+static struct nouveau_i2c_port *
+nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
+{
+ struct nouveau_i2c_port *port;
- /* nfi, but neither auxch or i2c work if it's 1 */
- nv_mask(i2c, reg + 0x0c, 0x00000001, 0x00000000);
- /* nfi, but switches auxch vs normal i2c */
- nv_mask(i2c, reg + 0x00, 0x0000f003, val);
+ list_for_each_entry(port, &i2c->ports, head) {
+ if (nv_hclass(port) == type)
+ return port;
}
- return port;
+ return NULL;
}
static int
@@ -155,109 +221,86 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
return -ENODEV;
}
-void
-nouveau_i2c_drive_scl(void *data, int state)
+int
+_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
{
- struct nouveau_i2c_port *port = data;
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port;
+ int ret;
- if (port->type == DCB_I2C_NV04_BIT) {
- u8 val = nv_rdvgac(port->i2c, 0, port->drive);
- if (state) val |= 0x20;
- else val &= 0xdf;
- nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- nv_mask(port->i2c, port->drive, 0x2f, state ? 0x21 : 0x01);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (state) port->state |= 0x01;
- else port->state &= 0xfe;
- nv_wr32(port->i2c, port->drive, 4 | port->state);
+ list_for_each_entry(port, &i2c->ports, head) {
+ ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
+ if (ret && suspend)
+ goto fail;
}
-}
-
-void
-nouveau_i2c_drive_sda(void *data, int state)
-{
- struct nouveau_i2c_port *port = data;
- if (port->type == DCB_I2C_NV04_BIT) {
- u8 val = nv_rdvgac(port->i2c, 0, port->drive);
- if (state) val |= 0x10;
- else val &= 0xef;
- nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- nv_mask(port->i2c, port->drive, 0x1f, state ? 0x11 : 0x01);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (state) port->state |= 0x02;
- else port->state &= 0xfd;
- nv_wr32(port->i2c, port->drive, 4 | port->state);
+ return nouveau_subdev_fini(&i2c->base, suspend);
+fail:
+ list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+ nv_ofuncs(port)->init(nv_object(port));
}
+
+ return ret;
}
int
-nouveau_i2c_sense_scl(void *data)
+_nouveau_i2c_init(struct nouveau_object *object)
{
- struct nouveau_i2c_port *port = data;
- struct nouveau_device *device = nv_device(port->i2c);
-
- if (port->type == DCB_I2C_NV04_BIT) {
- return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x04);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- return !!(nv_rd32(port->i2c, port->sense) & 0x00040000);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (device->card_type < NV_D0)
- return !!(nv_rd32(port->i2c, port->sense) & 0x01);
- else
- return !!(nv_rd32(port->i2c, port->sense) & 0x10);
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port;
+ int ret;
+
+ ret = nouveau_subdev_init(&i2c->base);
+ if (ret == 0) {
+ list_for_each_entry(port, &i2c->ports, head) {
+ ret = nv_ofuncs(port)->init(nv_object(port));
+ if (ret)
+ goto fail;
+ }
}
- return 0;
+ return ret;
+fail:
+ list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+ nv_ofuncs(port)->fini(nv_object(port), false);
+ }
+
+ return ret;
}
-int
-nouveau_i2c_sense_sda(void *data)
+void
+_nouveau_i2c_dtor(struct nouveau_object *object)
{
- struct nouveau_i2c_port *port = data;
- struct nouveau_device *device = nv_device(port->i2c);
-
- if (port->type == DCB_I2C_NV04_BIT) {
- return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x08);
- } else
- if (port->type == DCB_I2C_NV4E_BIT) {
- return !!(nv_rd32(port->i2c, port->sense) & 0x00080000);
- } else
- if (port->type == DCB_I2C_NVIO_BIT) {
- if (device->card_type < NV_D0)
- return !!(nv_rd32(port->i2c, port->sense) & 0x02);
- else
- return !!(nv_rd32(port->i2c, port->sense) & 0x20);
+ struct nouveau_i2c *i2c = (void *)object;
+ struct nouveau_i2c_port *port, *temp;
+
+ list_for_each_entry_safe(port, temp, &i2c->ports, head) {
+ nouveau_object_ref(NULL, (struct nouveau_object **)&port);
}
- return 0;
+ nouveau_subdev_destroy(&i2c->base);
}
-static const u32 nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
+static struct nouveau_oclass *
+nouveau_i2c_extdev_sclass[] = {
+ nouveau_anx9805_sclass,
};
-static int
-nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+int
+nouveau_i2c_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct nouveau_oclass *sclass,
+ int length, void **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nouveau_bios *bios = nouveau_bios(parent);
- struct nouveau_i2c_port *port;
struct nouveau_i2c *i2c;
+ struct nouveau_object *object;
struct dcb_i2c_entry info;
- int ret, i = -1;
+ int ret, i, j, index = -1;
+ struct dcb_output outp;
+ u8 ver, hdr;
+ u32 data;
ret = nouveau_subdev_create(parent, engine, oclass, 0,
"I2C", "i2c", &i2c);
@@ -266,142 +309,60 @@ nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
i2c->find = nouveau_i2c_find;
+ i2c->find_type = nouveau_i2c_find_type;
i2c->identify = nouveau_i2c_identify;
INIT_LIST_HEAD(&i2c->ports);
- while (!dcb_i2c_parse(bios, ++i, &info)) {
+ while (!dcb_i2c_parse(bios, ++index, &info)) {
if (info.type == DCB_I2C_UNUSED)
continue;
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- nv_error(i2c, "failed port memory alloc at %d\n", i);
- break;
- }
-
- port->type = info.type;
- switch (port->type) {
- case DCB_I2C_NV04_BIT:
- port->drive = info.drive;
- port->sense = info.sense;
- break;
- case DCB_I2C_NV4E_BIT:
- port->drive = 0x600800 + info.drive;
- port->sense = port->drive;
- break;
- case DCB_I2C_NVIO_BIT:
- port->drive = info.drive & 0x0f;
- if (device->card_type < NV_D0) {
- if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
- break;
- port->drive = nv50_i2c_port[port->drive];
- port->sense = port->drive;
- } else {
- port->drive = 0x00d014 + (port->drive * 0x20);
- port->sense = port->drive;
+ oclass = sclass;
+ do {
+ ret = -EINVAL;
+ if (oclass->handle == info.type) {
+ ret = nouveau_object_ctor(*pobject, *pobject,
+ oclass, &info,
+ index, &object);
}
+ } while (ret && (++oclass)->handle);
+ }
+
+ /* in addition to the busses specified in the i2c table, there
+ * may be ddc/aux channels hiding behind external tmds/dp/etc
+ * transmitters.
+ */
+ index = ((index + 0x0f) / 0x10) * 0x10;
+ i = -1;
+ while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
+ if (!outp.location || !outp.extdev)
+ continue;
+
+ switch (outp.type) {
+ case DCB_OUTPUT_TMDS:
+ info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
break;
- case DCB_I2C_NVIO_AUX:
- port->drive = info.drive & 0x0f;
- port->sense = port->drive;
- port->adapter.algo = &nouveau_i2c_aux_algo;
+ case DCB_OUTPUT_DP:
+ info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
break;
default:
- break;
- }
-
- if (!port->adapter.algo && !port->drive) {
- nv_error(i2c, "I2C%d: type %d index %x/%x unknown\n",
- i, port->type, port->drive, port->sense);
- kfree(port);
continue;
}
- snprintf(port->adapter.name, sizeof(port->adapter.name),
- "nouveau-%s-%d", device->name, i);
- port->adapter.owner = THIS_MODULE;
- port->adapter.dev.parent = &device->pdev->dev;
- port->i2c = i2c;
- port->index = i;
- port->dcb = info.data;
- i2c_set_adapdata(&port->adapter, i2c);
-
- if (port->adapter.algo != &nouveau_i2c_aux_algo) {
- nouveau_i2c_drive_scl(port, 0);
- nouveau_i2c_drive_sda(port, 1);
- nouveau_i2c_drive_scl(port, 1);
-
-#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
- if (nouveau_boolopt(device->cfgopt, "NvI2C", true)) {
-#else
- if (nouveau_boolopt(device->cfgopt, "NvI2C", false)) {
-#endif
- port->adapter.algo = &nouveau_i2c_bit_algo;
- ret = i2c_add_adapter(&port->adapter);
- } else {
- port->adapter.algo_data = &port->bit;
- port->bit.udelay = 10;
- port->bit.timeout = usecs_to_jiffies(2200);
- port->bit.data = port;
- port->bit.setsda = nouveau_i2c_drive_sda;
- port->bit.setscl = nouveau_i2c_drive_scl;
- port->bit.getsda = nouveau_i2c_sense_sda;
- port->bit.getscl = nouveau_i2c_sense_scl;
- ret = i2c_bit_add_bus(&port->adapter);
- }
- } else {
- port->adapter.algo = &nouveau_i2c_aux_algo;
- ret = i2c_add_adapter(&port->adapter);
- }
-
- if (ret) {
- nv_error(i2c, "I2C%d: failed register: %d\n", i, ret);
- kfree(port);
- continue;
+ ret = -ENODEV;
+ j = -1;
+ while (ret && ++j < ARRAY_SIZE(nouveau_i2c_extdev_sclass)) {
+ parent = nv_object(i2c->find(i2c, outp.i2c_index));
+ oclass = nouveau_i2c_extdev_sclass[j];
+ do {
+ if (oclass->handle != info.type)
+ continue;
+ ret = nouveau_object_ctor(parent, *pobject,
+ oclass, NULL,
+ index++, &object);
+ } while (ret && (++oclass)->handle);
}
-
- list_add_tail(&port->head, &i2c->ports);
}
return 0;
}
-
-static void
-nouveau_i2c_dtor(struct nouveau_object *object)
-{
- struct nouveau_i2c *i2c = (void *)object;
- struct nouveau_i2c_port *port, *temp;
-
- list_for_each_entry_safe(port, temp, &i2c->ports, head) {
- i2c_del_adapter(&port->adapter);
- list_del(&port->head);
- kfree(port);
- }
-
- nouveau_subdev_destroy(&i2c->base);
-}
-
-static int
-nouveau_i2c_init(struct nouveau_object *object)
-{
- struct nouveau_i2c *i2c = (void *)object;
- return nouveau_subdev_init(&i2c->base);
-}
-
-static int
-nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
-{
- struct nouveau_i2c *i2c = (void *)object;
- return nouveau_subdev_fini(&i2c->base, suspend);
-}
-
-struct nouveau_oclass
-nouveau_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nouveau_i2c_ctor,
- .dtor = nouveau_i2c_dtor,
- .init = nouveau_i2c_init,
- .fini = nouveau_i2c_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
index 1c4c9a5c8e2e..a6e72d3b06b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -32,25 +32,25 @@
static inline void
i2c_drive_scl(struct nouveau_i2c_port *port, int state)
{
- nouveau_i2c_drive_scl(port, state);
+ port->func->drive_scl(port, state);
}
static inline void
i2c_drive_sda(struct nouveau_i2c_port *port, int state)
{
- nouveau_i2c_drive_sda(port, state);
+ port->func->drive_sda(port, state);
}
static inline int
i2c_sense_scl(struct nouveau_i2c_port *port)
{
- return nouveau_i2c_sense_scl(port);
+ return port->func->sense_scl(port);
}
static inline int
i2c_sense_sda(struct nouveau_i2c_port *port)
{
- return nouveau_i2c_sense_sda(port);
+ return port->func->sense_sda(port);
}
static void
@@ -77,9 +77,8 @@ i2c_start(struct nouveau_i2c_port *port)
{
int ret = 0;
- port->state = i2c_sense_scl(port);
- port->state |= i2c_sense_sda(port) << 1;
- if (port->state != 3) {
+ if (!i2c_sense_scl(port) ||
+ !i2c_sense_sda(port)) {
i2c_drive_scl(port, 0);
i2c_drive_sda(port, 1);
if (!i2c_raise_scl(port))
@@ -184,10 +183,13 @@ i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
static int
i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_port *port = (struct nouveau_i2c_port *)adap;
+ struct nouveau_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
+ if (port->func->acquire)
+ port->func->acquire(port);
+
while (!ret && mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
new file mode 100644
index 000000000000..2ad18840fe63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv04_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv04_i2c_port {
+ struct nouveau_i2c_port base;
+ u8 drive;
+ u8 sense;
+};
+
+static void
+nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ u8 val = nv_rdvgac(priv, 0, port->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ u8 val = nv_rdvgac(priv, 0, port->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
+}
+
+static int
+nv04_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv04_i2c_port *port = (void *)base;
+ return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
+}
+
+static const struct nouveau_i2c_func
+nv04_i2c_func = {
+ .drive_scl = nv04_i2c_drive_scl,
+ .drive_sda = nv04_i2c_drive_sda,
+ .sense_scl = nv04_i2c_sense_scl,
+ .sense_sda = nv04_i2c_sense_sda,
+};
+
+static int
+nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv04_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv04_i2c_func;
+ port->drive = info->drive;
+ port->sense = info->sense;
+ return 0;
+}
+
+static struct nouveau_oclass
+nv04_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
new file mode 100644
index 000000000000..f501ae25dbb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv4e_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv4e_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+};
+
+static void
+nv4e_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv4e_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00080000);
+}
+
+static const struct nouveau_i2c_func
+nv4e_i2c_func = {
+ .drive_scl = nv4e_i2c_drive_scl,
+ .drive_sda = nv4e_i2c_drive_sda,
+ .sense_scl = nv4e_i2c_sense_scl,
+ .sense_sda = nv4e_i2c_sense_sda,
+};
+
+static int
+nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv4e_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv4e_i2c_func;
+ port->addr = 0x600800 + info->drive;
+ return 0;
+}
+
+static struct nouveau_oclass
+nv4e_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv4e_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv4e_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x4e),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
new file mode 100644
index 000000000000..378dfa324e5f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+void
+nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (state) port->state |= 0x01;
+ else port->state &= 0xfe;
+ nv_wr32(priv, port->addr, port->state);
+}
+
+void
+nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (state) port->state |= 0x02;
+ else port->state &= 0xfd;
+ nv_wr32(priv, port->addr, port->state);
+}
+
+int
+nv50_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000001);
+}
+
+int
+nv50_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000002);
+}
+
+static const struct nouveau_i2c_func
+nv50_i2c_func = {
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nv50_i2c_sense_scl,
+ .sense_sda = nv50_i2c_sense_sda,
+};
+
+const u32 nv50_i2c_addr[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
+const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
+
+static int
+nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ if (info->drive >= nv50_i2c_addr_nr)
+ return -EINVAL;
+
+ port->base.func = &nv50_i2c_func;
+ port->state = 0x00000007;
+ port->addr = nv50_i2c_addr[info->drive];
+ return 0;
+}
+
+int
+nv50_i2c_port_init(struct nouveau_object *object)
+{
+ struct nv50_i2c_priv *priv = (void *)object->engine;
+ struct nv50_i2c_port *port = (void *)object;
+ nv_wr32(priv, port->addr, port->state);
+ return nouveau_i2c_port_init(&port->base);
+}
+
+static struct nouveau_oclass
+nv50_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
new file mode 100644
index 000000000000..4e5ba48ebf5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -0,0 +1,32 @@
+#ifndef __NV50_I2C_H__
+#define __NV50_I2C_H__
+
+#include <subdev/i2c.h>
+
+struct nv50_i2c_priv {
+ struct nouveau_i2c base;
+};
+
+struct nv50_i2c_port {
+ struct nouveau_i2c_port base;
+ u32 addr;
+ u32 ctrl;
+ u32 data;
+ u32 state;
+};
+
+extern const u32 nv50_i2c_addr[];
+extern const int nv50_i2c_addr_nr;
+int nv50_i2c_port_init(struct nouveau_object *);
+int nv50_i2c_sense_scl(struct nouveau_i2c_port *);
+int nv50_i2c_sense_sda(struct nouveau_i2c_port *);
+void nv50_i2c_drive_scl(struct nouveau_i2c_port *, int state);
+void nv50_i2c_drive_sda(struct nouveau_i2c_port *, int state);
+
+int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv94_i2c_acquire(struct nouveau_i2c_port *);
+void nv94_i2c_release(struct nouveau_i2c_port *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
new file mode 100644
index 000000000000..61b771670bfe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+ nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("magic wait 0x%08x\n", ctrl);
+ auxch_fini(aux, ch);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
+
+ return 0;
+}
+
+int
+nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct nouveau_i2c *aux = nouveau_i2c(base);
+ struct nv50_i2c_port *port = (void *)base;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ch = port->addr;
+ int ret, i;
+
+ AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+ ret = auxch_init(aux, ch);
+ if (ret)
+ goto out;
+
+ stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
+ if (!(stat & 0x10000000)) {
+ AUX_DBG("sink not detected\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+ nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
+ }
+ }
+
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
+
+ /* retry transaction a number of times on failure... */
+ ret = -EREMOTEIO;
+ for (retries = 0; retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
+
+ /* read status, and check if transaction completed ok */
+ stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
+ if (!(stat & 0x000f0f00)) {
+ ret = 0;
+ break;
+ }
+
+ AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+ }
+
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
+ AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
+
+out:
+ auxch_fini(aux, ch);
+ return ret;
+}
+
+void
+nv94_i2c_acquire(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ if (port->ctrl) {
+ nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000);
+ nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data);
+ }
+}
+
+void
+nv94_i2c_release(struct nouveau_i2c_port *base)
+{
+}
+
+static const struct nouveau_i2c_func
+nv94_i2c_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nv50_i2c_sense_scl,
+ .sense_sda = nv50_i2c_sense_sda,
+};
+
+static int
+nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ if (info->drive >= nv50_i2c_addr_nr)
+ return -EINVAL;
+
+ port->base.func = &nv94_i2c_func;
+ port->state = 7;
+ port->addr = nv50_i2c_addr[info->drive];
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->share * 0x50);
+ port->data = 0x0000e001;
+ }
+ return 0;
+}
+
+static const struct nouveau_i2c_func
+nv94_aux_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .aux = nv94_aux,
+};
+
+int
+nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nv94_aux_func;
+ port->addr = info->drive;
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->drive * 0x50);
+ port->data = 0x00002002;
+ }
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv94_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0x94),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
new file mode 100644
index 000000000000..f761b8a610f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static int
+nvd0_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000010);
+}
+
+static int
+nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+ struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+ struct nv50_i2c_port *port = (void *)base;
+ return !!(nv_rd32(priv, port->addr) & 0x00000020);
+}
+
+static const struct nouveau_i2c_func
+nvd0_i2c_func = {
+ .acquire = nv94_i2c_acquire,
+ .release = nv94_i2c_release,
+ .drive_scl = nv50_i2c_drive_scl,
+ .drive_sda = nv50_i2c_drive_sda,
+ .sense_scl = nvd0_i2c_sense_scl,
+ .sense_sda = nvd0_i2c_sense_sda,
+};
+
+static int
+nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_bit_algo, &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.func = &nvd0_i2c_func;
+ port->state = 0x00000007;
+ port->addr = 0x00d014 + (info->drive * 0x20);
+ if (info->share != DCB_I2C_UNUSED) {
+ port->ctrl = 0x00e500 + (info->share * 0x50);
+ port->data = 0x0000e001;
+ }
+ return 0;
+}
+
+static struct nouveau_oclass
+nvd0_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+static int
+nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_i2c_priv *priv;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_i2c_oclass = {
+ .handle = NV_SUBDEV(I2C, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 23ebe477a6f0..89da8fa7ea0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -37,7 +37,7 @@ nv04_mc_intr[] = {
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
{ 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */
- { 0x10000000, NVDEV_SUBDEV_GPIO }, /* PBUS */
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 8d759f830323..5965add6daee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -38,6 +38,7 @@ nv50_mc_intr[] = {
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0000d101, NVDEV_SUBDEV_FB },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index ceb5c83f9459..3a80b29dce0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -35,10 +35,12 @@ nv98_mc_intr[] = {
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0040d101, NVDEV_SUBDEV_FB },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 92796682722d..42bbf72023a8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,11 +36,13 @@ nvc0_mc_intr[] = {
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00040000, NVDEV_SUBDEV_THERM },
{ 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
index 839ca1edc132..4bde7f7f7b81 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -156,15 +156,15 @@ mxms_foreach(struct nouveau_mxm *mxm, u8 types,
nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
for (j = headerlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
+ pr_cont("%02x", dump[j]);
+ pr_cont("\n");
dump += headerlen;
for (i = 0; i < entries; i++, dump += recordlen) {
nv_debug(mxm, " ");
for (j = recordlen - 1; j >= 0; j--)
- printk("%02x", dump[j]);
- printk("\n");
+ pr_cont("%02x", dump[j]);
+ pr_cont("\n");
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index 1674c74a76c8..f794dc89a3b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -29,6 +29,134 @@
#include "priv.h"
+static int
+nouveau_therm_update_trip(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_therm_trip_point *trip = priv->fan->bios.trip,
+ *cur_trip = NULL,
+ *last_trip = priv->last_trip;
+ u8 temp = therm->temp_get(therm);
+ u16 duty, i;
+
+ /* look for the trip point corresponding to the current temperature */
+ cur_trip = NULL;
+ for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+ if (temp >= trip[i].temp)
+ cur_trip = &trip[i];
+ }
+
+ /* account for the hysteresis cycle */
+ if (last_trip && temp <= (last_trip->temp) &&
+ temp > (last_trip->temp - last_trip->hysteresis))
+ cur_trip = last_trip;
+
+ if (cur_trip) {
+ duty = cur_trip->fan_duty;
+ priv->last_trip = cur_trip;
+ } else {
+ duty = 0;
+ priv->last_trip = NULL;
+ }
+
+ return duty;
+}
+
+static int
+nouveau_therm_update_linear(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ u8 linear_min_temp = priv->fan->bios.linear_min_temp;
+ u8 linear_max_temp = priv->fan->bios.linear_max_temp;
+ u8 temp = therm->temp_get(therm);
+ u16 duty;
+
+ /* handle the non-linear part first */
+ if (temp < linear_min_temp)
+ return priv->fan->bios.min_duty;
+ else if (temp > linear_max_temp)
+ return priv->fan->bios.max_duty;
+
+ /* we are in the linear zone */
+ duty = (temp - linear_min_temp);
+ duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+ duty /= (linear_max_temp - linear_min_temp);
+ duty += priv->fan->bios.min_duty;
+
+ return duty;
+}
+
+static void
+nouveau_therm_update(struct nouveau_therm *therm, int mode)
+{
+ struct nouveau_timer *ptimer = nouveau_timer(therm);
+ struct nouveau_therm_priv *priv = (void *)therm;
+ unsigned long flags;
+ int duty;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (mode < 0)
+ mode = priv->mode;
+ priv->mode = mode;
+
+ switch (mode) {
+ case NOUVEAU_THERM_CTRL_MANUAL:
+ duty = nouveau_therm_fan_get(therm);
+ if (duty < 0)
+ duty = 100;
+ break;
+ case NOUVEAU_THERM_CTRL_AUTO:
+ if (priv->fan->bios.nr_fan_trip)
+ duty = nouveau_therm_update_trip(therm);
+ else
+ duty = nouveau_therm_update_linear(therm);
+ break;
+ case NOUVEAU_THERM_CTRL_NONE:
+ default:
+ goto done;
+ }
+
+ nv_debug(therm, "FAN target request: %d%%\n", duty);
+ nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
+
+done:
+ if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
+ ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_therm_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_therm_priv *priv =
+ container_of(alarm, struct nouveau_therm_priv, alarm);
+ nouveau_therm_update(&priv->base, -1);
+}
+
+int
+nouveau_therm_mode(struct nouveau_therm *therm, int mode)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_device *device = nv_device(therm);
+ static const char *name[] = {
+ "disabled",
+ "manual",
+ "automatic"
+ };
+
+ /* The default PDAEMON ucode interferes with fan management */
+ if ((mode >= ARRAY_SIZE(name)) ||
+ (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
+ return -EINVAL;
+
+ if (priv->mode == mode)
+ return 0;
+
+ nv_info(therm, "Thermal management: %s\n", name[mode]);
+ nouveau_therm_update(therm, mode);
+ return 0;
+}
+
int
nouveau_therm_attr_get(struct nouveau_therm *therm,
enum nouveau_therm_attr_type type)
@@ -37,11 +165,11 @@ nouveau_therm_attr_get(struct nouveau_therm *therm,
switch (type) {
case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
- return priv->bios_fan.min_duty;
+ return priv->fan->bios.min_duty;
case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
- return priv->bios_fan.max_duty;
+ return priv->fan->bios.max_duty;
case NOUVEAU_THERM_ATTR_FAN_MODE:
- return priv->fan.mode;
+ return priv->mode;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
return priv->bios_sensor.thrs_fan_boost.temp;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
@@ -73,42 +201,50 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
if (value < 0)
value = 0;
- if (value > priv->bios_fan.max_duty)
- value = priv->bios_fan.max_duty;
- priv->bios_fan.min_duty = value;
+ if (value > priv->fan->bios.max_duty)
+ value = priv->fan->bios.max_duty;
+ priv->fan->bios.min_duty = value;
return 0;
case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
if (value < 0)
value = 0;
- if (value < priv->bios_fan.min_duty)
- value = priv->bios_fan.min_duty;
- priv->bios_fan.max_duty = value;
+ if (value < priv->fan->bios.min_duty)
+ value = priv->fan->bios.min_duty;
+ priv->fan->bios.max_duty = value;
return 0;
case NOUVEAU_THERM_ATTR_FAN_MODE:
- return nouveau_therm_fan_set_mode(therm, value);
+ return nouveau_therm_mode(therm, value);
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
priv->bios_sensor.thrs_fan_boost.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
priv->bios_sensor.thrs_fan_boost.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
priv->bios_sensor.thrs_down_clock.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
priv->bios_sensor.thrs_down_clock.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
priv->bios_sensor.thrs_critical.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
priv->bios_sensor.thrs_critical.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
priv->bios_sensor.thrs_shutdown.temp = value;
+ priv->sensor.program_alarms(therm);
return 0;
case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
priv->bios_sensor.thrs_shutdown.hysteresis = value;
+ priv->sensor.program_alarms(therm);
return 0;
}
@@ -116,7 +252,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
}
int
-nouveau_therm_init(struct nouveau_object *object)
+_nouveau_therm_init(struct nouveau_object *object)
{
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
@@ -126,19 +262,69 @@ nouveau_therm_init(struct nouveau_object *object)
if (ret)
return ret;
- if (priv->fan.percent >= 0)
- therm->fan_set(therm, priv->fan.percent);
-
+ if (priv->suspend >= 0)
+ nouveau_therm_mode(therm, priv->mode);
+ priv->sensor.program_alarms(therm);
return 0;
}
int
-nouveau_therm_fini(struct nouveau_object *object, bool suspend)
+_nouveau_therm_fini(struct nouveau_object *object, bool suspend)
{
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
- priv->fan.percent = therm->fan_get(therm);
+ if (suspend) {
+ priv->suspend = priv->mode;
+ priv->mode = NOUVEAU_THERM_CTRL_NONE;
+ }
return nouveau_subdev_fini(&therm->base, suspend);
}
+
+int
+nouveau_therm_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int length, void **pobject)
+{
+ struct nouveau_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PTHERM",
+ "therm", length, pobject);
+ priv = *pobject;
+ if (ret)
+ return ret;
+
+ nouveau_alarm_init(&priv->alarm, nouveau_therm_alarm);
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->sensor.alarm_program_lock);
+
+ priv->base.fan_get = nouveau_therm_fan_user_get;
+ priv->base.fan_set = nouveau_therm_fan_user_set;
+ priv->base.fan_sense = nouveau_therm_fan_sense;
+ priv->base.attr_get = nouveau_therm_attr_get;
+ priv->base.attr_set = nouveau_therm_attr_set;
+ priv->mode = priv->suspend = -1; /* undefined */
+ return 0;
+}
+
+int
+nouveau_therm_preinit(struct nouveau_therm *therm)
+{
+ nouveau_therm_ic_ctor(therm);
+ nouveau_therm_sensor_ctor(therm);
+ nouveau_therm_fan_ctor(therm);
+
+ nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+ return 0;
+}
+
+void
+_nouveau_therm_dtor(struct nouveau_object *object)
+{
+ struct nouveau_therm_priv *priv = (void *)object;
+ kfree(priv->fan);
+ nouveau_subdev_destroy(&priv->base.base);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 523178685180..c728380d3d62 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -27,90 +27,107 @@
#include <core/object.h>
#include <core/device.h>
+
#include <subdev/gpio.h>
#include <subdev/timer.h>
-int
-nouveau_therm_fan_get(struct nouveau_therm *therm)
+static int
+nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
{
+ struct nouveau_therm *therm = fan->parent;
struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
- int card_type = nv_device(therm)->card_type;
- u32 divs, duty;
- int ret;
-
- if (!priv->fan.pwm_get)
- return -ENODEV;
+ struct nouveau_timer *ptimer = nouveau_timer(priv);
+ unsigned long flags;
+ int ret = 0;
+ int duty;
+
+ /* update target fan speed, restricting to allowed range */
+ spin_lock_irqsave(&fan->lock, flags);
+ if (target < 0)
+ target = fan->percent;
+ target = max_t(u8, target, fan->bios.min_duty);
+ target = min_t(u8, target, fan->bios.max_duty);
+ if (fan->percent != target) {
+ nv_debug(therm, "FAN target: %d\n", target);
+ fan->percent = target;
+ }
- ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
- if (ret == 0) {
- ret = priv->fan.pwm_get(therm, func.line, &divs, &duty);
- if (ret == 0 && divs) {
- divs = max(divs, duty);
- if (card_type <= NV_40 || (func.log[0] & 1))
- duty = divs - duty;
- return (duty * 100) / divs;
- }
+ /* check that we're not already at the target duty cycle */
+ duty = fan->get(therm);
+ if (duty == target)
+ goto done;
+
+ /* smooth out the fanspeed increase/decrease */
+ if (!immediate && duty >= 0) {
+ /* the constant "3" is a rough approximation taken from
+ * nvidia's behaviour.
+ * it is meant to bump the fan speed more incrementally
+ */
+ if (duty < target)
+ duty = min(duty + 3, target);
+ else if (duty > target)
+ duty = max(duty - 3, target);
+ } else {
+ duty = target;
+ }
- return gpio->get(gpio, 0, func.func, func.line) * 100;
+ nv_debug(therm, "FAN update: %d\n", duty);
+ ret = fan->set(therm, duty);
+ if (ret)
+ goto done;
+
+ /* schedule next fan update, if not at target speed already */
+ if (list_empty(&fan->alarm.head) && target != duty) {
+ u16 bump_period = fan->bios.bump_period;
+ u16 slow_down_period = fan->bios.slow_down_period;
+ u64 delay;
+
+ if (duty > target)
+ delay = slow_down_period;
+ else if (duty == target)
+ delay = min(bump_period, slow_down_period) ;
+ else
+ delay = bump_period;
+
+ ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
}
- return -ENODEV;
+done:
+ spin_unlock_irqrestore(&fan->lock, flags);
+ return ret;
+}
+
+static void
+nouveau_fan_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_fan *fan = container_of(alarm, struct nouveau_fan, alarm);
+ nouveau_fan_update(fan, false, -1);
}
int
-nouveau_therm_fan_set(struct nouveau_therm *therm, int percent)
+nouveau_therm_fan_get(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
- int card_type = nv_device(therm)->card_type;
- u32 divs, duty;
- int ret;
-
- if (priv->fan.mode == FAN_CONTROL_NONE)
- return -EINVAL;
-
- if (!priv->fan.pwm_set)
- return -ENODEV;
-
- if (percent < priv->bios_fan.min_duty)
- percent = priv->bios_fan.min_duty;
- if (percent > priv->bios_fan.max_duty)
- percent = priv->bios_fan.max_duty;
-
- ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
- if (ret == 0) {
- divs = priv->bios_perf_fan.pwm_divisor;
- if (priv->bios_fan.pwm_freq) {
- divs = 1;
- if (priv->fan.pwm_clock)
- divs = priv->fan.pwm_clock(therm);
- divs /= priv->bios_fan.pwm_freq;
- }
-
- duty = ((divs * percent) + 99) / 100;
- if (card_type <= NV_40 || (func.log[0] & 1))
- duty = divs - duty;
-
- ret = priv->fan.pwm_set(therm, func.line, divs, duty);
- return ret;
- }
+ return priv->fan->get(therm);
+}
- return -ENODEV;
+int
+nouveau_therm_fan_set(struct nouveau_therm *therm, bool immediate, int percent)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ return nouveau_fan_update(priv->fan, immediate, percent);
}
int
nouveau_therm_fan_sense(struct nouveau_therm *therm)
{
+ struct nouveau_therm_priv *priv = (void *)therm;
struct nouveau_timer *ptimer = nouveau_timer(therm);
struct nouveau_gpio *gpio = nouveau_gpio(therm);
- struct dcb_gpio_func func;
u32 cycles, cur, prev;
u64 start, end, tach;
- if (gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &func))
+ if (priv->fan->tach.func == DCB_GPIO_UNUSED)
return -ENODEV;
/* Time a complete rotation and extrapolate to RPM:
@@ -118,12 +135,12 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
* We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
*/
start = ptimer->read(ptimer);
- prev = gpio->get(gpio, 0, func.func, func.line);
+ prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
cycles = 0;
do {
usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
- cur = gpio->get(gpio, 0, func.func, func.line);
+ cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
if (prev != cur) {
if (!start)
start = ptimer->read(ptimer);
@@ -142,34 +159,6 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
}
int
-nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
- enum nouveau_therm_fan_mode mode)
-{
- struct nouveau_therm_priv *priv = (void *)therm;
-
- if (priv->fan.mode == mode)
- return 0;
-
- if (mode < FAN_CONTROL_NONE || mode >= FAN_CONTROL_NR)
- return -EINVAL;
-
- switch (mode)
- {
- case FAN_CONTROL_NONE:
- nv_info(therm, "switch fan to no-control mode\n");
- break;
- case FAN_CONTROL_MANUAL:
- nv_info(therm, "switch fan to manual mode\n");
- break;
- case FAN_CONTROL_NR:
- break;
- }
-
- priv->fan.mode = mode;
- return 0;
-}
-
-int
nouveau_therm_fan_user_get(struct nouveau_therm *therm)
{
return nouveau_therm_fan_get(therm);
@@ -180,55 +169,86 @@ nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
{
struct nouveau_therm_priv *priv = (void *)therm;
- if (priv->fan.mode != FAN_CONTROL_MANUAL)
+ if (priv->mode != NOUVEAU_THERM_CTRL_MANUAL)
return -EINVAL;
- return nouveau_therm_fan_set(therm, percent);
+ return nouveau_therm_fan_set(therm, true, percent);
}
-void
+static void
nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- priv->bios_fan.pwm_freq = 0;
- priv->bios_fan.min_duty = 0;
- priv->bios_fan.max_duty = 100;
+ priv->fan->bios.pwm_freq = 0;
+ priv->fan->bios.min_duty = 0;
+ priv->fan->bios.max_duty = 100;
+ priv->fan->bios.bump_period = 500;
+ priv->fan->bios.slow_down_period = 2000;
+ priv->fan->bios.linear_min_temp = 40;
+ priv->fan->bios.linear_max_temp = 85;
}
-
static void
nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- if (priv->bios_fan.min_duty > 100)
- priv->bios_fan.min_duty = 100;
- if (priv->bios_fan.max_duty > 100)
- priv->bios_fan.max_duty = 100;
+ if (priv->fan->bios.min_duty > 100)
+ priv->fan->bios.min_duty = 100;
+ if (priv->fan->bios.max_duty > 100)
+ priv->fan->bios.max_duty = 100;
- if (priv->bios_fan.min_duty > priv->bios_fan.max_duty)
- priv->bios_fan.min_duty = priv->bios_fan.max_duty;
-}
-
-int nouveau_fan_pwm_clock_dummy(struct nouveau_therm *therm)
-{
- return 1;
+ if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
+ priv->fan->bios.min_duty = priv->fan->bios.max_duty;
}
int
nouveau_therm_fan_ctor(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_gpio *gpio = nouveau_gpio(therm);
struct nouveau_bios *bios = nouveau_bios(therm);
+ struct dcb_gpio_func func;
+ int ret;
+ /* attempt to locate a drivable fan, and determine control method */
+ ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+ if (ret == 0) {
+ if (func.log[0] & DCB_GPIO_LOG_DIR_IN) {
+ nv_debug(therm, "GPIO_FAN is in input mode\n");
+ ret = -EINVAL;
+ } else {
+ ret = nouveau_fanpwm_create(therm, &func);
+ if (ret != 0)
+ ret = nouveau_fantog_create(therm, &func);
+ }
+ }
+
+ /* no controllable fan found, create a dummy fan module */
+ if (ret != 0) {
+ ret = nouveau_fannil_create(therm);
+ if (ret)
+ return ret;
+ }
+
+ nv_info(therm, "FAN control: %s\n", priv->fan->type);
+
+ /* attempt to detect a tachometer connection */
+ ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+ if (ret)
+ priv->fan->tach.func = DCB_GPIO_UNUSED;
+
+ /* initialise fan bump/slow update handling */
+ priv->fan->parent = therm;
+ nouveau_alarm_init(&priv->fan->alarm, nouveau_fan_alarm);
+ spin_lock_init(&priv->fan->lock);
+
+ /* other random init... */
nouveau_therm_fan_set_defaults(therm);
- nvbios_perf_fan_parse(bios, &priv->bios_perf_fan);
- if (nvbios_therm_fan_parse(bios, &priv->bios_fan))
+ nvbios_perf_fan_parse(bios, &priv->fan->perf);
+ if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
nv_error(therm, "parsing the thermal table failed\n");
nouveau_therm_fan_safety_checks(therm);
-
- nouveau_therm_fan_set_mode(therm, FAN_CONTROL_NONE);
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
new file mode 100644
index 000000000000..b78c182e1d51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static int
+nouveau_fannil_get(struct nouveau_therm *therm)
+{
+ return -ENODEV;
+}
+
+static int
+nouveau_fannil_set(struct nouveau_therm *therm, int percent)
+{
+ return -ENODEV;
+}
+
+int
+nouveau_fannil_create(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fan *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = priv;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->type = "none / external";
+ priv->get = nouveau_fannil_get;
+ priv->set = nouveau_fannil_set;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
new file mode 100644
index 000000000000..5f71db8e8992
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * Martin Peres
+ */
+
+#include <core/option.h>
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nouveau_fanpwm_priv {
+ struct nouveau_fan base;
+ struct dcb_gpio_func func;
+};
+
+static int
+nouveau_fanpwm_get(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+ struct nouveau_gpio *gpio = nouveau_gpio(therm);
+ int card_type = nv_device(therm)->card_type;
+ u32 divs, duty;
+ int ret;
+
+ ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+ if (ret == 0 && divs) {
+ divs = max(divs, duty);
+ if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ duty = divs - duty;
+ return (duty * 100) / divs;
+ }
+
+ return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+}
+
+static int
+nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+ int card_type = nv_device(therm)->card_type;
+ u32 divs, duty;
+ int ret;
+
+ divs = priv->base.perf.pwm_divisor;
+ if (priv->base.bios.pwm_freq) {
+ divs = 1;
+ if (therm->pwm_clock)
+ divs = therm->pwm_clock(therm);
+ divs /= priv->base.bios.pwm_freq;
+ }
+
+ duty = ((divs * percent) + 99) / 100;
+ if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ duty = divs - duty;
+
+ ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+ if (ret == 0)
+ ret = therm->pwm_ctrl(therm, priv->func.line, true);
+ return ret;
+}
+
+int
+nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+ struct nouveau_device *device = nv_device(therm);
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fanpwm_priv *priv;
+ u32 divs, duty;
+
+ if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
+ !therm->pwm_ctrl ||
+ therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = &priv->base;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.type = "PWM";
+ priv->base.get = nouveau_fanpwm_get;
+ priv->base.set = nouveau_fanpwm_set;
+ priv->func = *func;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
new file mode 100644
index 000000000000..e601773ee475
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
+struct nouveau_fantog_priv {
+ struct nouveau_fan base;
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ u32 period_us;
+ u32 percent;
+ struct dcb_gpio_func func;
+};
+
+static void
+nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)priv->base.parent;
+ struct nouveau_timer *ptimer = nouveau_timer(tpriv);
+ struct nouveau_gpio *gpio = nouveau_gpio(tpriv);
+ unsigned long flags;
+ int duty;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (percent < 0)
+ percent = priv->percent;
+ priv->percent = percent;
+
+ duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
+ gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+
+ if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
+ u64 next_change = (percent * priv->period_us) / 100;
+ if (!duty)
+ next_change = priv->period_us - next_change;
+ ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_fantog_alarm(struct nouveau_alarm *alarm)
+{
+ struct nouveau_fantog_priv *priv =
+ container_of(alarm, struct nouveau_fantog_priv, alarm);
+ nouveau_fantog_update(priv, -1);
+}
+
+static int
+nouveau_fantog_get(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ return priv->percent;
+}
+
+static int
+nouveau_fantog_set(struct nouveau_therm *therm, int percent)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+ if (therm->pwm_ctrl)
+ therm->pwm_ctrl(therm, priv->func.line, false);
+ nouveau_fantog_update(priv, percent);
+ return 0;
+}
+
+int
+nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+ struct nouveau_therm_priv *tpriv = (void *)therm;
+ struct nouveau_fantog_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ tpriv->fan = &priv->base;
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.type = "toggle";
+ priv->base.get = nouveau_fantog_get;
+ priv->base.set = nouveau_fantog_set;
+ nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm);
+ priv->period_us = 100000; /* 10Hz */
+ priv->percent = 100;
+ priv->func = *func;
+ spin_lock_init(&priv->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e512ff0aae60..e24090bac195 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -31,7 +31,7 @@ static bool
probe_monitoring_device(struct nouveau_i2c_port *i2c,
struct i2c_board_info *info)
{
- struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c->i2c);
+ struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
struct i2c_client *client;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -53,6 +53,31 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
return true;
}
+static struct i2c_board_info
+nv_board_infos[] = {
+ { I2C_BOARD_INFO("w83l785ts", 0x2d) },
+ { I2C_BOARD_INFO("w83781d", 0x2d) },
+ { I2C_BOARD_INFO("adt7473", 0x2e) },
+ { I2C_BOARD_INFO("adt7473", 0x2d) },
+ { I2C_BOARD_INFO("adt7473", 0x2c) },
+ { I2C_BOARD_INFO("f75375", 0x2e) },
+ { I2C_BOARD_INFO("lm99", 0x4c) },
+ { I2C_BOARD_INFO("lm90", 0x4c) },
+ { I2C_BOARD_INFO("lm90", 0x4d) },
+ { I2C_BOARD_INFO("adm1021", 0x18) },
+ { I2C_BOARD_INFO("adm1021", 0x19) },
+ { I2C_BOARD_INFO("adm1021", 0x1a) },
+ { I2C_BOARD_INFO("adm1021", 0x29) },
+ { I2C_BOARD_INFO("adm1021", 0x2a) },
+ { I2C_BOARD_INFO("adm1021", 0x2b) },
+ { I2C_BOARD_INFO("adm1021", 0x4c) },
+ { I2C_BOARD_INFO("adm1021", 0x4d) },
+ { I2C_BOARD_INFO("adm1021", 0x4e) },
+ { I2C_BOARD_INFO("lm63", 0x18) },
+ { I2C_BOARD_INFO("lm63", 0x4e) },
+ { }
+};
+
void
nouveau_therm_ic_ctor(struct nouveau_therm *therm)
{
@@ -60,29 +85,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
struct nouveau_bios *bios = nouveau_bios(therm);
struct nouveau_i2c *i2c = nouveau_i2c(therm);
struct nvbios_extdev_func extdev_entry;
- struct i2c_board_info info[] = {
- { I2C_BOARD_INFO("w83l785ts", 0x2d) },
- { I2C_BOARD_INFO("w83781d", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2e) },
- { I2C_BOARD_INFO("adt7473", 0x2d) },
- { I2C_BOARD_INFO("adt7473", 0x2c) },
- { I2C_BOARD_INFO("f75375", 0x2e) },
- { I2C_BOARD_INFO("lm99", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4c) },
- { I2C_BOARD_INFO("lm90", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x18) },
- { I2C_BOARD_INFO("adm1021", 0x19) },
- { I2C_BOARD_INFO("adm1021", 0x1a) },
- { I2C_BOARD_INFO("adm1021", 0x29) },
- { I2C_BOARD_INFO("adm1021", 0x2a) },
- { I2C_BOARD_INFO("adm1021", 0x2b) },
- { I2C_BOARD_INFO("adm1021", 0x4c) },
- { I2C_BOARD_INFO("adm1021", 0x4d) },
- { I2C_BOARD_INFO("adm1021", 0x4e) },
- { I2C_BOARD_INFO("lm63", 0x18) },
- { I2C_BOARD_INFO("lm63", 0x4e) },
- { }
- };
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
struct i2c_board_info board[] = {
@@ -111,6 +113,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
/* The vbios doesn't provide the address of an exisiting monitoring
device. Let's try our static list.
*/
- i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", info,
- probe_monitoring_device);
+ i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+ nv_board_infos, probe_monitoring_device);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index fcf2cfe731d6..0f5363edb964 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -25,6 +25,10 @@
#include "priv.h"
+struct nv40_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
static int
nv40_sensor_setup(struct nouveau_therm *therm)
{
@@ -34,6 +38,7 @@ nv40_sensor_setup(struct nouveau_therm *therm)
if (device->chipset >= 0x46) {
nv_mask(therm, 0x15b8, 0x80000000, 0);
nv_wr32(therm, 0x15b0, 0x80003fff);
+ mdelay(10); /* wait for the temperature to stabilize */
return nv_rd32(therm, 0x15b4) & 0x3fff;
} else {
nv_wr32(therm, 0x15b0, 0xff);
@@ -75,7 +80,20 @@ nv40_temp_get(struct nouveau_therm *therm)
return core_temp;
}
-int
+static int
+nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 mask = enable ? 0x80000000 : 0x0000000;
+ if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
+ else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+ else {
+ nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int
nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
{
if (line == 2) {
@@ -101,15 +119,15 @@ nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
return -EINVAL;
}
-int
+static int
nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
{
if (line == 2) {
- nv_wr32(therm, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+ nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
} else
if (line == 9) {
nv_wr32(therm, 0x0015f8, divs);
- nv_wr32(therm, 0x0015f4, duty | 0x80000000);
+ nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
} else {
nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
@@ -118,37 +136,51 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
return 0;
}
+static void
+nv40_therm_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_therm *therm = nouveau_therm(subdev);
+ uint32_t stat = nv_rd32(therm, 0x1100);
+
+ /* traitement */
+
+ /* ack all IRQs */
+ nv_wr32(therm, 0x1100, 0x70000);
+
+ nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+}
+
static int
nv40_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_therm_priv *priv;
- struct nouveau_therm *therm;
+ struct nv40_therm_priv *priv;
int ret;
ret = nouveau_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- therm = (void *) priv;
if (ret)
return ret;
- nouveau_therm_ic_ctor(therm);
- nouveau_therm_sensor_ctor(therm);
- nouveau_therm_fan_ctor(therm);
+ priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv40_fan_pwm_get;
+ priv->base.base.pwm_set = nv40_fan_pwm_set;
+ priv->base.base.temp_get = nv40_temp_get;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ nv_subdev(priv)->intr = nv40_therm_intr;
+ return nouveau_therm_preinit(&priv->base.base);
+}
- priv->fan.pwm_get = nv40_fan_pwm_get;
- priv->fan.pwm_set = nv40_fan_pwm_set;
+static int
+nv40_therm_init(struct nouveau_object *object)
+{
+ struct nouveau_therm *therm = (void *)object;
- therm->temp_get = nv40_temp_get;
- therm->fan_get = nouveau_therm_fan_user_get;
- therm->fan_set = nouveau_therm_fan_user_set;
- therm->fan_sense = nouveau_therm_fan_sense;
- therm->attr_get = nouveau_therm_attr_get;
- therm->attr_set = nouveau_therm_attr_set;
+ nv40_sensor_setup(therm);
- return 0;
+ return _nouveau_therm_init(object);
}
struct nouveau_oclass
@@ -157,7 +189,7 @@ nv40_therm_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_therm_ctor,
.dtor = _nouveau_therm_dtor,
- .init = nouveau_therm_init,
- .fini = nouveau_therm_fini,
+ .init = nv40_therm_init,
+ .fini = _nouveau_therm_fini,
},
-}; \ No newline at end of file
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 9360ddd469e7..86632cbd65ce 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -25,6 +25,10 @@
#include "priv.h"
+struct nv50_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
static int
pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
{
@@ -51,6 +55,16 @@ pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
}
int
+nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 data = enable ? 0x00000001 : 0x00000000;
+ int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+ if (ret == 0)
+ nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+ return ret;
+}
+
+int
nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
{
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
@@ -73,7 +87,6 @@ nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
if (ret)
return ret;
- nv_mask(therm, ctrl, 0x00010001 << line, 0x00000001 << line);
nv_wr32(therm, 0x00e114 + (id * 8), divs);
nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
return 0;
@@ -111,38 +124,178 @@ nv50_temp_get(struct nouveau_therm *therm)
return nv_rd32(therm, 0x20400);
}
+static void
+nv50_therm_program_alarms(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
+ nv_wr32(therm, 0x20000, 0x000003ff);
+
+ /* shutdown: The computer should be shutdown when reached */
+ nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
+ nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+
+ /* THRS_1 : fan boost*/
+ nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+
+ /* THRS_2 : critical */
+ nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+
+ /* THRS_4 : down clock */
+ nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
+ nv_info(therm,
+ "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
+ uint32_t thrs_reg, u8 status_bit,
+ const struct nvbios_therm_threshold *thrs,
+ enum nouveau_therm_thrs thrs_name)
+{
+ enum nouveau_therm_thrs_direction direction;
+ enum nouveau_therm_thrs_state prev_state, new_state;
+ int temp, cur;
+
+ prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+ temp = nv_rd32(therm, thrs_reg);
+
+ /* program the next threshold */
+ if (temp == thrs->temp) {
+ nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ } else {
+ nv_wr32(therm, thrs_reg, thrs->temp);
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ }
+
+ /* fix the state (in case someone reprogrammed the alarms) */
+ cur = therm->temp_get(therm);
+ if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
+ cur < thrs->temp - thrs->hysteresis)
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+
+ /* find the direction */
+ if (prev_state < new_state)
+ direction = NOUVEAU_THERM_THRS_RISING;
+ else if (prev_state > new_state)
+ direction = NOUVEAU_THERM_THRS_FALLING;
+ else
+ return;
+
+ /* advertise a change in direction */
+ nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+nv50_therm_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_therm *therm = nouveau_therm(subdev);
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ unsigned long flags;
+ uint32_t intr;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ intr = nv_rd32(therm, 0x20100);
+
+ /* THRS_4: downclock */
+ if (intr & 0x002) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
+ &sensor->thrs_down_clock,
+ NOUVEAU_THERM_THRS_DOWNCLOCK);
+ intr &= ~0x002;
+ }
+
+ /* shutdown */
+ if (intr & 0x004) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
+ &sensor->thrs_shutdown,
+ NOUVEAU_THERM_THRS_SHUTDOWN);
+ intr &= ~0x004;
+ }
+
+ /* THRS_1 : fan boost */
+ if (intr & 0x008) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
+ &sensor->thrs_fan_boost,
+ NOUVEAU_THERM_THRS_FANBOOST);
+ intr &= ~0x008;
+ }
+
+ /* THRS_2 : critical */
+ if (intr & 0x010) {
+ nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
+ &sensor->thrs_critical,
+ NOUVEAU_THERM_THRS_CRITICAL);
+ intr &= ~0x010;
+ }
+
+ if (intr)
+ nv_error(therm, "unhandled intr 0x%08x\n", intr);
+
+ /* ACK everything */
+ nv_wr32(therm, 0x20100, 0xffffffff);
+ nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
static int
nv50_therm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_therm_priv *priv;
- struct nouveau_therm *therm;
+ struct nv50_therm_priv *priv;
int ret;
ret = nouveau_therm_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
- therm = (void *) priv;
if (ret)
return ret;
- nouveau_therm_ic_ctor(therm);
- nouveau_therm_sensor_ctor(therm);
- nouveau_therm_fan_ctor(therm);
+ priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv50_fan_pwm_get;
+ priv->base.base.pwm_set = nv50_fan_pwm_set;
+ priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.sensor.program_alarms = nv50_therm_program_alarms;
+ nv_subdev(priv)->intr = nv50_therm_intr;
- priv->fan.pwm_get = nv50_fan_pwm_get;
- priv->fan.pwm_set = nv50_fan_pwm_set;
- priv->fan.pwm_clock = nv50_fan_pwm_clock;
+ /* init the thresholds */
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_SHUTDOWN,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_FANBOOST,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_CRITICAL,
+ NOUVEAU_THERM_THRS_LOWER);
+ nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+ NOUVEAU_THERM_THRS_DOWNCLOCK,
+ NOUVEAU_THERM_THRS_LOWER);
- therm->temp_get = nv50_temp_get;
- therm->fan_get = nouveau_therm_fan_user_get;
- therm->fan_set = nouveau_therm_fan_user_set;
- therm->fan_sense = nouveau_therm_fan_sense;
- therm->attr_get = nouveau_therm_attr_get;
- therm->attr_set = nouveau_therm_attr_set;
-
- return 0;
+ return nouveau_therm_preinit(&priv->base.base);
}
struct nouveau_oclass
@@ -151,7 +304,7 @@ nv50_therm_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_therm_ctor,
.dtor = _nouveau_therm_dtor,
- .init = nouveau_therm_init,
- .fini = nouveau_therm_fini,
+ .init = _nouveau_therm_init,
+ .fini = _nouveau_therm_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
new file mode 100644
index 000000000000..2dcc5437116a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nva3_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
+int
+nva3_therm_fan_sense(struct nouveau_therm *therm)
+{
+ u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
+ u32 ctrl = nv_rd32(therm, 0x00e720);
+ if (ctrl & 0x00000001)
+ return tach * 60;
+ return -ENODEV;
+}
+
+static int
+nva3_therm_init(struct nouveau_object *object)
+{
+ struct nva3_therm_priv *priv = (void *)object;
+ struct dcb_gpio_func *tach = &priv->base.fan->tach;
+ int ret;
+
+ ret = nouveau_therm_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ /* enable fan tach, count revolutions per-second */
+ nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+ if (tach->func != DCB_GPIO_UNUSED) {
+ nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+ nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
+ nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+ }
+ nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+ return 0;
+}
+
+static int
+nva3_therm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nva3_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nv50_fan_pwm_get;
+ priv->base.base.pwm_set = nv50_fan_pwm_set;
+ priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.fan_sense = nva3_therm_fan_sense;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nva3_therm_oclass = {
+ .handle = NV_SUBDEV(THERM, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_therm_ctor,
+ .dtor = _nouveau_therm_dtor,
+ .init = nva3_therm_init,
+ .fini = _nouveau_therm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
new file mode 100644
index 000000000000..d7d30ee8332e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nvd0_therm_priv {
+ struct nouveau_therm_priv base;
+};
+
+static int
+pwm_info(struct nouveau_therm *therm, int line)
+{
+ u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
+ switch (gpio & 0x000000c0) {
+ case 0x00000000: /* normal mode, possibly pwm forced off by us */
+ case 0x00000040: /* nvio special */
+ switch (gpio & 0x0000001f) {
+ case 0x19: return 1;
+ case 0x1c: return 0;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
+ return -ENODEV;
+}
+
+static int
+nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+ u32 data = enable ? 0x00000040 : 0x00000000;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
+ return 0;
+}
+
+static int
+nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
+ *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
+ *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+
+ nv_wr32(therm, 0x00e114 + (indx * 8), divs);
+ nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
+ return 0;
+}
+
+static int
+nvd0_fan_pwm_clock(struct nouveau_therm *therm)
+{
+ return (nv_device(therm)->crystal * 1000) / 20;
+}
+
+static int
+nvd0_therm_init(struct nouveau_object *object)
+{
+ struct nvd0_therm_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_therm_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ /* enable fan tach, count revolutions per-second */
+ nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+ if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
+ nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
+ nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+ nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+ }
+ nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+ return 0;
+}
+
+static int
+nvd0_therm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_therm_priv *priv;
+ int ret;
+
+ ret = nouveau_therm_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl;
+ priv->base.base.pwm_get = nvd0_fan_pwm_get;
+ priv->base.base.pwm_set = nvd0_fan_pwm_set;
+ priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
+ priv->base.base.temp_get = nv50_temp_get;
+ priv->base.base.fan_sense = nva3_therm_fan_sense;
+ priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+ return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nvd0_therm_oclass = {
+ .handle = NV_SUBDEV(THERM, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_therm_ctor,
+ .dtor = _nouveau_therm_dtor,
+ .init = nvd0_therm_init,
+ .fini = _nouveau_therm_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 1c3cd6abc36e..06b98706b3fc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -1,3 +1,6 @@
+#ifndef __NVTHERM_PRIV_H__
+#define __NVTHERM_PRIV_H__
+
/*
* Copyright 2012 The Nouveau community
*
@@ -25,33 +28,81 @@
#include <subdev/therm.h>
#include <subdev/bios/extdev.h>
+#include <subdev/bios/gpio.h>
#include <subdev/bios/perf.h>
#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+struct nouveau_fan {
+ struct nouveau_therm *parent;
+ const char *type;
+
+ struct nvbios_therm_fan bios;
+ struct nvbios_perf_fan perf;
+
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ int percent;
+
+ int (*get)(struct nouveau_therm *therm);
+ int (*set)(struct nouveau_therm *therm, int percent);
+
+ struct dcb_gpio_func tach;
+};
+
+enum nouveau_therm_thrs_direction {
+ NOUVEAU_THERM_THRS_FALLING = 0,
+ NOUVEAU_THERM_THRS_RISING = 1
+};
+
+enum nouveau_therm_thrs_state {
+ NOUVEAU_THERM_THRS_LOWER = 0,
+ NOUVEAU_THERM_THRS_HIGHER = 1
+};
+
+enum nouveau_therm_thrs {
+ NOUVEAU_THERM_THRS_FANBOOST = 0,
+ NOUVEAU_THERM_THRS_DOWNCLOCK = 1,
+ NOUVEAU_THERM_THRS_CRITICAL = 2,
+ NOUVEAU_THERM_THRS_SHUTDOWN = 3,
+ NOUVEAU_THERM_THRS_NR
+};
struct nouveau_therm_priv {
struct nouveau_therm base;
+ /* automatic thermal management */
+ struct nouveau_alarm alarm;
+ spinlock_t lock;
+ struct nouveau_therm_trip_point *last_trip;
+ int mode;
+ int suspend;
+
/* bios */
struct nvbios_therm_sensor bios_sensor;
- struct nvbios_therm_fan bios_fan;
- struct nvbios_perf_fan bios_perf_fan;
/* fan priv */
+ struct nouveau_fan *fan;
+
+ /* alarms priv */
struct {
- enum nouveau_therm_fan_mode mode;
- int percent;
+ spinlock_t alarm_program_lock;
+ struct nouveau_alarm therm_poll_alarm;
+ enum nouveau_therm_thrs_state alarm_state[NOUVEAU_THERM_THRS_NR];
+ void (*program_alarms)(struct nouveau_therm *);
+ } sensor;
- int (*pwm_get)(struct nouveau_therm *, int line, u32*, u32*);
- int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
- int (*pwm_clock)(struct nouveau_therm *);
- } fan;
+ /* what should be done if the card overheats */
+ struct {
+ void (*downclock)(struct nouveau_therm *, bool active);
+ void (*pause)(struct nouveau_therm *, bool active);
+ } emergency;
/* ic */
struct i2c_client *ic;
};
-int nouveau_therm_init(struct nouveau_object *object);
-int nouveau_therm_fini(struct nouveau_object *object, bool suspend);
+int nouveau_therm_mode(struct nouveau_therm *therm, int mode);
int nouveau_therm_attr_get(struct nouveau_therm *therm,
enum nouveau_therm_attr_type type);
int nouveau_therm_attr_set(struct nouveau_therm *therm,
@@ -63,11 +114,35 @@ int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_get(struct nouveau_therm *therm);
-int nouveau_therm_fan_set(struct nouveau_therm *therm, int percent);
+int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
-int nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
- enum nouveau_therm_fan_mode mode);
-
int nouveau_therm_fan_sense(struct nouveau_therm *therm);
+
+int nouveau_therm_preinit(struct nouveau_therm *);
+
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_state st);
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs);
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_direction dir);
+void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
+
+int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
+int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
+int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
+int nv50_fan_pwm_clock(struct nouveau_therm *);
+int nv50_temp_get(struct nouveau_therm *therm);
+
+int nva3_therm_fan_sense(struct nouveau_therm *);
+
+int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fannil_create(struct nouveau_therm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index 204282301fb1..b37624af8297 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -58,11 +58,171 @@ static void
nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *s = &priv->bios_sensor;
if (!priv->bios_sensor.slope_div)
priv->bios_sensor.slope_div = 1;
if (!priv->bios_sensor.offset_den)
priv->bios_sensor.offset_den = 1;
+
+ /* enforce a minimum hysteresis on thresholds */
+ s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
+ s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
+ s->thrs_critical.hysteresis = max_t(u8, s->thrs_critical.hysteresis, 2);
+ s->thrs_shutdown.hysteresis = max_t(u8, s->thrs_shutdown.hysteresis, 2);
+}
+
+/* must be called with alarm_program_lock taken ! */
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_state st)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ priv->sensor.alarm_state[thrs] = st;
+}
+
+/* must be called with alarm_program_lock taken ! */
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ return priv->sensor.alarm_state[thrs];
+}
+
+static void
+nv_poweroff_work(struct work_struct *work)
+{
+ orderly_poweroff(true);
+ kfree(work);
+}
+
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+ enum nouveau_therm_thrs thrs,
+ enum nouveau_therm_thrs_direction dir)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ bool active;
+ const char *thresolds[] = {
+ "fanboost", "downclock", "critical", "shutdown"
+ };
+ uint8_t temperature = therm->temp_get(therm);
+
+ if (thrs < 0 || thrs > 3)
+ return;
+
+ if (dir == NOUVEAU_THERM_THRS_FALLING)
+ nv_info(therm, "temperature (%u C) went below the '%s' threshold\n",
+ temperature, thresolds[thrs]);
+ else
+ nv_info(therm, "temperature (%u C) hit the '%s' threshold\n",
+ temperature, thresolds[thrs]);
+
+ active = (dir == NOUVEAU_THERM_THRS_RISING);
+ switch (thrs) {
+ case NOUVEAU_THERM_THRS_FANBOOST:
+ if (active) {
+ nouveau_therm_fan_set(therm, true, 100);
+ nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
+ }
+ break;
+ case NOUVEAU_THERM_THRS_DOWNCLOCK:
+ if (priv->emergency.downclock)
+ priv->emergency.downclock(therm, active);
+ break;
+ case NOUVEAU_THERM_THRS_CRITICAL:
+ if (priv->emergency.pause)
+ priv->emergency.pause(therm, active);
+ break;
+ case NOUVEAU_THERM_THRS_SHUTDOWN:
+ if (active) {
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, nv_poweroff_work);
+ schedule_work(work);
+ }
+ }
+ break;
+ case NOUVEAU_THERM_THRS_NR:
+ break;
+ }
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nouveau_therm_threshold_hyst_polling(struct nouveau_therm *therm,
+ const struct nvbios_therm_threshold *thrs,
+ enum nouveau_therm_thrs thrs_name)
+{
+ enum nouveau_therm_thrs_direction direction;
+ enum nouveau_therm_thrs_state prev_state, new_state;
+ int temp = therm->temp_get(therm);
+
+ prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+
+ if (temp >= thrs->temp && prev_state == NOUVEAU_THERM_THRS_LOWER) {
+ direction = NOUVEAU_THERM_THRS_RISING;
+ new_state = NOUVEAU_THERM_THRS_HIGHER;
+ } else if (temp <= thrs->temp - thrs->hysteresis &&
+ prev_state == NOUVEAU_THERM_THRS_HIGHER) {
+ direction = NOUVEAU_THERM_THRS_FALLING;
+ new_state = NOUVEAU_THERM_THRS_LOWER;
+ } else
+ return; /* nothing to do */
+
+ nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+ nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+alarm_timer_callback(struct nouveau_alarm *alarm)
+{
+ struct nouveau_therm_priv *priv =
+ container_of(alarm, struct nouveau_therm_priv, sensor.therm_poll_alarm);
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nouveau_timer *ptimer = nouveau_timer(priv);
+ struct nouveau_therm *therm = &priv->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
+ NOUVEAU_THERM_THRS_FANBOOST);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+ NOUVEAU_THERM_THRS_DOWNCLOCK);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
+ NOUVEAU_THERM_THRS_CRITICAL);
+
+ nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
+ NOUVEAU_THERM_THRS_SHUTDOWN);
+
+ /* schedule the next poll in one second */
+ if (list_empty(&alarm->head))
+ ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
+
+ spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
+void
+nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+
+ nv_info(therm,
+ "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+ alarm_timer_callback(&priv->sensor.therm_poll_alarm);
}
int
@@ -71,6 +231,8 @@ nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
struct nouveau_therm_priv *priv = (void *)therm;
struct nouveau_bios *bios = nouveau_bios(therm);
+ nouveau_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+
nouveau_therm_temp_set_defaults(therm);
if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
&priv->bios_sensor))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index c26ca9bef671..8e1bae4f12e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -79,7 +79,7 @@ nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
/* execute any pending alarm handlers */
list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del(&alarm->head);
+ list_del_init(&alarm->head);
alarm->func(alarm);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index d0da230d7706..74acf0f87785 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -3,7 +3,7 @@
#define ROM_BIOS_PAGE 4096
-#if defined(CONFIG_ACPI)
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
bool nouveau_is_optimus(void);
bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f65b20a375f6..5d940302d2aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -84,6 +84,8 @@ nv40_backlight_init(struct drm_connector *connector)
props.max_brightness = 31;
bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
&nv40_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
drm->backlight = bd;
bd->props.brightness = nv40_get_intensity(bd);
backlight_update_status(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 865eddfa30a7..50a6dd02f7c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -678,23 +678,6 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
return 0;
}
-static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
-{
- /*
- * offset + 0 (8 bits): Micro version
- * offset + 1 (8 bits): Minor version
- * offset + 2 (8 bits): Chip version
- * offset + 3 (8 bits): Major version
- */
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- bios->major_version = bios->data[offset + 3];
- bios->chip_version = bios->data[offset + 2];
- NV_INFO(drm, "Bios version %02x.%02x.%02x.%02x\n",
- bios->data[offset + 3], bios->data[offset + 2],
- bios->data[offset + 1], bios->data[offset]);
-}
-
static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
{
/*
@@ -710,12 +693,6 @@ static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
*/
bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
- bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
- bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
- bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
- bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
- bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
- bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
}
static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
@@ -765,25 +742,6 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return 0;
}
-static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
-{
- /*
- * offset + 8 (16 bits): PLL limits table pointer
- *
- * There's more in here, but that's unknown.
- */
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (bitentry->length < 10) {
- NV_ERROR(drm, "Do not understand BIT C table\n");
- return -EINVAL;
- }
-
- bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
-
- return 0;
-}
-
static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
{
/*
@@ -821,12 +779,6 @@ static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios,
}
parse_script_table_pointers(bios, bitentry->offset);
-
- if (bitentry->length >= 16)
- bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
- if (bitentry->length >= 18)
- bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
-
return 0;
}
@@ -852,8 +804,6 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return -EINVAL;
}
- parse_bios_version(dev, bios, bitentry->offset);
-
/*
* bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
* Quadro identity crisis), other bits possibly as for BMP feature byte
@@ -1078,9 +1028,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
return ret;
if (bios->major_version >= 0x60) /* g80+ */
parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
- ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
- if (ret)
- return ret;
parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
if (ret)
@@ -1228,8 +1175,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
*/
bios->feature_byte = bmp[9];
- parse_bios_version(dev, bios, offset + 10);
-
if (bmp_version_major < 5 || bmp_version_minor < 0x10)
bios->old_style_init = true;
legacy_scripts_offset = 18;
@@ -1276,8 +1221,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
}
+#if 0
if (bmplength > 143)
bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
+#endif
if (bmplength > 157)
bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
@@ -1522,6 +1469,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
case DCB_OUTPUT_DP:
entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->extdev = (conf & 0x0000ff00) >> 8;
switch ((conf & 0x00e00000) >> 21) {
case 0:
entry->dpconf.link_bw = 162000;
@@ -1543,8 +1491,10 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
break;
case DCB_OUTPUT_TMDS:
- if (dcb->version >= 0x40)
+ if (dcb->version >= 0x40) {
entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->extdev = (conf & 0x0000ff00) >> 8;
+ }
else if (dcb->version >= 0x30)
entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
else if (dcb->version >= 0x22)
@@ -1937,9 +1887,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
if (conn[0] != 0xff) {
NV_INFO(drm, "DCB conn %02d: ", idx);
if (olddcb_conntab(dev)[3] < 4)
- printk("%04x\n", ROM16(conn[0]));
+ pr_cont("%04x\n", ROM16(conn[0]));
else
- printk("%08x\n", ROM32(conn[0]));
+ pr_cont("%08x\n", ROM32(conn[0]));
}
}
dcb_fake_connectors(bios);
@@ -2052,45 +2002,29 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
-
- memset(bios, 0, sizeof(struct nvbios));
- spin_lock_init(&bios->lock);
- bios->dev = dev;
-
- bios->data = nouveau_bios(drm->device)->data;
- bios->length = nouveau_bios(drm->device)->size;
- return true;
-}
+ struct nouveau_bios *bios = nouveau_bios(drm->device);
+ struct nvbios *legacy = &drm->vbios;
+
+ memset(legacy, 0, sizeof(struct nvbios));
+ spin_lock_init(&legacy->lock);
+ legacy->dev = dev;
+
+ legacy->data = bios->data;
+ legacy->length = bios->size;
+ legacy->major_version = bios->version.major;
+ legacy->chip_version = bios->version.chip;
+ if (bios->bit_offset) {
+ legacy->type = NVBIOS_BIT;
+ legacy->offset = bios->bit_offset;
+ return !parse_bit_structure(legacy, legacy->offset + 6);
+ } else
+ if (bios->bmp_offset) {
+ legacy->type = NVBIOS_BMP;
+ legacy->offset = bios->bmp_offset;
+ return !parse_bmp_structure(dev, legacy, legacy->offset);
+ }
-static int nouveau_parse_vbios_struct(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
- const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
- int offset;
-
- offset = findstr(bios->data, bios->length,
- bit_signature, sizeof(bit_signature));
- if (offset) {
- NV_INFO(drm, "BIT BIOS found\n");
- bios->type = NVBIOS_BIT;
- bios->offset = offset;
- return parse_bit_structure(bios, offset + 6);
- }
-
- offset = findstr(bios->data, bios->length,
- bmp_signature, sizeof(bmp_signature));
- if (offset) {
- NV_INFO(drm, "BMP BIOS found\n");
- bios->type = NVBIOS_BMP;
- bios->offset = offset;
- return parse_bmp_structure(dev, bios, offset);
- }
-
- NV_ERROR(drm, "No known BIOS signature found\n");
- return -ENODEV;
+ return false;
}
int
@@ -2146,10 +2080,6 @@ nouveau_bios_init(struct drm_device *dev)
if (!NVInitVBIOS(dev))
return -ENODEV;
- ret = nouveau_parse_vbios_struct(dev);
- if (ret)
- return ret;
-
ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index f68c54ca422f..7ccd28f11adf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -107,20 +107,10 @@ struct nvbios {
bool old_style_init;
uint16_t init_script_tbls_ptr;
uint16_t extra_init_script_tbl_ptr;
- uint16_t macro_index_tbl_ptr;
- uint16_t macro_tbl_ptr;
- uint16_t condition_tbl_ptr;
- uint16_t io_condition_tbl_ptr;
- uint16_t io_flag_condition_tbl_ptr;
- uint16_t init_function_tbl_ptr;
-
- uint16_t pll_limit_tbl_ptr;
+
uint16_t ram_restrict_tbl_ptr;
uint8_t ram_restrict_group_count;
- uint16_t some_script_ptr; /* BIT I + 14 */
- uint16_t init96_tbl_ptr; /* BIT I + 16 */
-
struct dcb_table dcb;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1699a9083a2f..11ca82148edc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -301,17 +301,18 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ goto out;
+
if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
1 << bo->mem.mem_type, memtype);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (nvbo->pin_refcnt++)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (ret)
goto out;
nouveau_bo_placement_set(nvbo, memtype, 0);
@@ -329,10 +330,8 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
break;
}
}
- ttm_bo_unreserve(bo);
out:
- if (unlikely(ret))
- nvbo->pin_refcnt--;
+ ttm_bo_unreserve(bo);
return ret;
}
@@ -343,13 +342,13 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
- if (--nvbo->pin_refcnt)
- return 0;
-
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (ret)
return ret;
+ if (--nvbo->pin_refcnt)
+ goto out;
+
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
ret = nouveau_bo_validate(nvbo, false, false);
@@ -366,6 +365,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
}
}
+out:
ttm_bo_unreserve(bo);
return ret;
}
@@ -562,7 +562,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 25ca37989d2c..653dbbbd4fa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -28,10 +28,11 @@ struct nouveau_bo {
struct nouveau_drm_tile *tile;
struct drm_gem_object *gem;
+
+ /* protect by the ttm reservation lock */
int pin_refcnt;
struct ttm_bo_kmap_obj dma_buf_vmap;
- int vmapping_count;
};
static inline struct nouveau_bo *
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 174300b6a02e..eaa80a2b81ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -51,14 +51,15 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret)
- NV_ERROR(cli, "failed to idle channel 0x%08x\n", chan->handle);
+ NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n",
+ chan->handle, cli->base.name);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e620ba8271b4..4dd7ae2ac6c6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -55,8 +55,6 @@ MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
static int nouveau_duallink = 1;
module_param_named(duallink, nouveau_duallink, int, 0400);
-static void nouveau_connector_hotplug(void *, int);
-
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
@@ -100,22 +98,6 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct nouveau_gpio *gpio;
- struct nouveau_drm *drm;
- struct drm_device *dev;
-
- if (!nv_connector)
- return;
-
- dev = nv_connector->base.dev;
- drm = nouveau_drm(dev);
- gpio = nouveau_gpio(drm->device);
-
- if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
- gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug, connector);
- }
-
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -130,7 +112,6 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *port = NULL;
int i, panel = -ENODEV;
@@ -160,8 +141,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
- if (nv_encoder->dcb->i2c_index < 0xf)
- port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ port = nv_encoder->i2c;
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
break;
@@ -399,9 +379,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- *(nv_connector->edid) = *edid;
- status = connector_status_connected;
+ nv_connector->edid =
+ kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ if (nv_connector->edid)
+ status = connector_status_connected;
}
}
@@ -911,6 +892,37 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
+static void
+nouveau_connector_hotplug_work(struct work_struct *work)
+{
+ struct nouveau_connector *nv_connector =
+ container_of(work, struct nouveau_connector, hpd_work);
+ struct drm_connector *connector = &nv_connector->base;
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
+
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
+
+ if (plugged)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ else
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+
+ drm_helper_hpd_irq_event(dev);
+}
+
+static int
+nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
+{
+ struct nouveau_connector *nv_connector =
+ container_of(event, struct nouveau_connector, hpd_func);
+ schedule_work(&nv_connector->hpd_work);
+ return NVKM_EVENT_KEEP;
+}
+
static int
drm_conntype_from_dcb(enum dcb_connector_type dcb)
{
@@ -961,6 +973,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
connector = &nv_connector->base;
+ INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
nv_connector->index = index;
/* attempt to parse vbios connector type and hotplug gpio */
@@ -975,8 +988,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
if (olddcb_conntab(dev)[3] >= 4)
entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
- nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
- nv_connector->hpd = hpd[nv_connector->hpd];
+ ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
+ DCB_GPIO_UNUSED, &nv_connector->hpd);
+ nv_connector->hpd_func.func = nouveau_connector_hotplug;
+ if (ret)
+ nv_connector->hpd.func = DCB_GPIO_UNUSED;
nv_connector->type = nv_connector->dcb[0];
if (drm_conntype_from_dcb(nv_connector->type) ==
@@ -999,7 +1015,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
} else {
nv_connector->type = DCB_CONNECTOR_NONE;
- nv_connector->hpd = DCB_GPIO_UNUSED;
+ nv_connector->hpd.func = DCB_GPIO_UNUSED;
}
/* no vbios data, or an unknown dcb connector type - attempt to
@@ -1126,31 +1142,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
- ret = gpio->isr_add(gpio, 0, nv_connector->hpd, 0xff,
- nouveau_connector_hotplug, connector);
- if (ret == 0)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
+ if (nv_connector->hpd.func != DCB_GPIO_UNUSED)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_sysfs_connector_add(connector);
return connector;
}
-
-static void
-nouveau_connector_hotplug(void *data, int plugged)
-{
- struct drm_connector *connector = data;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector));
-
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-
- drm_helper_hpd_irq_event(dev);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 20eb84cce9e6..6e399aad491a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,6 +30,11 @@
#include <drm/drm_edid.h>
#include "nouveau_crtc.h"
+#include <core/event.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
struct nouveau_i2c_port;
enum nouveau_underscan_type {
@@ -61,7 +66,10 @@ struct nouveau_connector {
enum dcb_connector_type type;
u8 index;
u8 *dcb;
- u8 hpd;
+
+ struct dcb_gpio_func hpd;
+ struct work_struct hpd_work;
+ struct nouveau_eventh hpd_func;
int dithering_mode;
int dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 000000000000..5392e07edfc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_debugfs.h"
+#include "nouveau_drm.h"
+
+static int
+nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+ int i;
+
+ for (i = 0; i < drm->vbios.length; i++)
+ seq_printf(m, "%c", drm->vbios.data[i]);
+ return 0;
+}
+
+static struct drm_info_list nouveau_debugfs_list[] = {
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+};
+#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+
+int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ return 0;
+}
+
+void
+nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
new file mode 100644
index 000000000000..a62af6fb5f99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -0,0 +1,22 @@
+#ifndef __NOUVEAU_DEBUGFS_H__
+#define __NOUVEAU_DEBUGFS_H__
+
+#include <drm/drmP.h>
+
+#if defined(CONFIG_DEBUG_FS)
+extern int nouveau_debugfs_init(struct drm_minor *);
+extern void nouveau_debugfs_takedown(struct drm_minor *);
+#else
+static inline int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 508b00a2ce0d..4610c3a29bbe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -41,6 +41,8 @@
#include <subdev/gpio.h>
#include <engine/disp.h>
+#include <core/class.h>
+
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
@@ -78,11 +80,6 @@ nouveau_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret) {
- return ret;
- }
-
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
nv_fb->nvbo = nvbo;
@@ -125,6 +122,11 @@ nouveau_framebuffer_init(struct drm_device *dev,
}
}
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
+ if (ret) {
+ return ret;
+ }
+
return 0;
}
@@ -231,8 +233,10 @@ nouveau_display_init(struct drm_device *dev)
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio)
- gpio->irq(gpio, 0, conn->hpd, 0xff, true);
+ if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+ nouveau_event_get(gpio->events, conn->hpd.line,
+ &conn->hpd_func);
+ }
}
return ret;
@@ -249,37 +253,20 @@ nouveau_display_fini(struct drm_device *dev)
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (gpio)
- gpio->irq(gpio, 0, conn->hpd, 0xff, false);
+ if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+ nouveau_event_put(gpio->events, conn->hpd.line,
+ &conn->hpd_func);
+ }
}
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
-static void
-nouveau_display_vblank_notify(void *data, int crtc)
-{
- drm_handle_vblank(data, crtc);
-}
-
-static void
-nouveau_display_vblank_get(void *data, int crtc)
-{
- drm_vblank_get(data, crtc);
-}
-
-static void
-nouveau_display_vblank_put(void *data, int crtc)
-{
- drm_vblank_put(data, crtc);
-}
-
int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct nouveau_display *disp;
u32 pclass = dev->pdev->class >> 8;
int ret, gen;
@@ -288,11 +275,6 @@ nouveau_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
- pdisp->vblank.data = dev;
- pdisp->vblank.notify = nouveau_display_vblank_notify;
- pdisp->vblank.get = nouveau_display_vblank_get;
- pdisp->vblank.put = nouveau_display_vblank_put;
-
drm_mode_config_init(dev);
drm_mode_create_scaling_mode_property(dev);
drm_mode_create_dvi_i_properties(dev);
@@ -316,17 +298,13 @@ nouveau_display_create(struct drm_device *dev)
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
if (gen >= 1) {
+ /* -90..+90 */
disp->vibrant_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "vibrant hue", 2);
- disp->vibrant_hue_property->values[0] = 0;
- disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
+ drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+ /* -100..+100 */
disp->color_vibrance_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "color vibrance", 2);
- disp->color_vibrance_property->values[0] = 0;
- disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
+ drm_property_create_range(dev, 0, "color vibrance", 0, 200);
}
dev->mode_config.funcs = &nouveau_mode_config_funcs;
@@ -478,39 +456,6 @@ nouveau_display_resume(struct drm_device *dev)
}
}
-int
-nouveau_vblank_enable(struct drm_device *dev, int crtc)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (device->card_type >= NV_D0)
- nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 1);
- else
- if (device->card_type >= NV_50)
- nv_mask(device, NV50_PDISPLAY_INTR_EN_1, 0,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
- NV_PCRTC_INTR_0_VBLANK);
-
- return 0;
-}
-
-void
-nouveau_vblank_disable(struct drm_device *dev, int crtc)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (device->card_type >= NV_D0)
- nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 0);
- else
- if (device->card_type >= NV_50)
- nv_mask(device, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
-}
-
static int
nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
struct nouveau_bo *new_bo)
@@ -595,7 +540,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
}
FIRE_RING (chan);
- ret = nouveau_fence_new(chan, pfence);
+ ret = nouveau_fence_new(chan, false, pfence);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 722548bb3bd3..1ea3e4734b62 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -59,9 +59,6 @@ void nouveau_display_fini(struct drm_device *dev);
int nouveau_display_suspend(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
-int nouveau_vblank_enable(struct drm_device *dev, int crtc);
-void nouveau_vblank_disable(struct drm_device *dev, int crtc);
-
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 5c2e22932d1c..690d5930ce32 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -191,7 +191,7 @@ WIND_RING(struct nouveau_channel *chan)
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
-#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
+#define NV84_SUBCHAN_UEVENT 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 59838651ee8f..36fd22500569 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -35,300 +35,6 @@
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-/******************************************************************************
- * link training
- *****************************************************************************/
-struct dp_state {
- struct nouveau_i2c_port *auxch;
- struct nouveau_object *core;
- struct dcb_output *dcb;
- int crtc;
- u8 *dpcd;
- int link_nr;
- u32 link_bw;
- u8 stat[6];
- u8 conf[4];
-};
-
-static void
-dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- u8 sink[2];
- u32 data;
-
- NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
-
- /* set desired link configuration on the source */
- data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
- data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
-
- /* inform the sink of the new configuration */
- sink[0] = dp->link_bw / 27000;
- sink[1] = dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
- sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- nv_wraux(dp->auxch, DP_LINK_BW_SET, sink, 2);
-}
-
-static void
-dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- u8 sink_tp;
-
- NV_DEBUG(drm, "training pattern %d\n", pattern);
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
-
- nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
- sink_tp &= ~DP_TRAINING_PATTERN_MASK;
- sink_tp |= pattern;
- nv_wraux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
-}
-
-static int
-dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- int i;
-
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
- u8 lpre = (lane & 0x0c) >> 2;
- u8 lvsw = (lane & 0x03) >> 0;
-
- dp->conf[i] = (lpre << 3) | lvsw;
- if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
- dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
- if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
- dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
-
- NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
-
- nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
- }
-
- return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
-}
-
-static int
-dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- int ret;
-
- udelay(delay);
-
- ret = nv_rdaux(dp->auxch, DP_LANE0_1_STATUS, dp->stat, 6);
- if (ret)
- return ret;
-
- NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
- return 0;
-}
-
-static int
-dp_link_train_cr(struct drm_device *dev, struct dp_state *dp)
-{
- bool cr_done = false, abort = false;
- int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- int tries = 0, i;
-
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1);
-
- do {
- if (dp_link_train_commit(dev, dp) ||
- dp_link_train_update(dev, dp, 100))
- break;
-
- cr_done = true;
- for (i = 0; i < dp->link_nr; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
- if (!(lane & DP_LANE_CR_DONE)) {
- cr_done = false;
- if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED)
- abort = true;
- break;
- }
- }
-
- if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
- voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- tries = 0;
- }
- } while (!cr_done && !abort && ++tries < 5);
-
- return cr_done ? 0 : -1;
-}
-
-static int
-dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
-{
- bool eq_done, cr_done = true;
- int tries = 0, i;
-
- dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2);
-
- do {
- if (dp_link_train_update(dev, dp, 400))
- break;
-
- eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE);
- for (i = 0; i < dp->link_nr && eq_done; i++) {
- u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
- if (!(lane & DP_LANE_CR_DONE))
- cr_done = false;
- if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
- !(lane & DP_LANE_SYMBOL_LOCKED))
- eq_done = false;
- }
-
- if (dp_link_train_commit(dev, dp))
- break;
- } while (!eq_done && cr_done && ++tries <= 5);
-
- return eq_done ? 0 : -1;
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
-{
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
- NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
- NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
- NV94_DISP_SOR_DP_TRAIN_OP_INIT);
-}
-
-static void
-dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
-{
- struct dcb_output *dcb = dp->dcb;
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 moff = (dp->crtc << 3) | (link << 2) | or;
-
- nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
- NV94_DISP_SOR_DP_TRAIN_OP_FINI);
-}
-
-static bool
-nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct nouveau_object *core)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector =
- nouveau_encoder_connector_get(nv_encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- const u32 bw_list[] = { 270000, 162000, 0 };
- const u32 *link_bw = bw_list;
- struct dp_state dp;
-
- dp.auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
- if (!dp.auxch)
- return false;
-
- dp.core = core;
- dp.dcb = nv_encoder->dcb;
- dp.crtc = nv_crtc->index;
- dp.dpcd = nv_encoder->dp.dpcd;
-
- /* adjust required bandwidth for 8B/10B coding overhead */
- datarate = (datarate / 8) * 10;
-
- /* some sinks toggle hotplug in response to some of the actions
- * we take during link training (DP_SET_POWER is one), we need
- * to ignore them for the moment to avoid races.
- */
- gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
-
- /* enable down-spreading and execute pre-train script from vbios */
- dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* start off at highest link rate supported by encoder and display */
- while (*link_bw > nv_encoder->dp.link_bw)
- link_bw++;
-
- while (link_bw[0]) {
- /* find minimum required lane count at this link rate */
- dp.link_nr = nv_encoder->dp.link_nr;
- while ((dp.link_nr >> 1) * link_bw[0] > datarate)
- dp.link_nr >>= 1;
-
- /* drop link rate to minimum with this lane count */
- while ((link_bw[1] * dp.link_nr) > datarate)
- link_bw++;
- dp.link_bw = link_bw[0];
-
- /* program selected link configuration */
- dp_set_link_config(dev, &dp);
-
- /* attempt to train the link at this configuration */
- memset(dp.stat, 0x00, sizeof(dp.stat));
- if (!dp_link_train_cr(dev, &dp) &&
- !dp_link_train_eq(dev, &dp))
- break;
-
- /* retry at lower rate */
- link_bw++;
- }
-
- /* finish link training */
- dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
-
- /* execute post-train script from vbios */
- dp_link_train_fini(dev, &dp);
-
- /* re-enable hotplug detect */
- gpio->irq(gpio, 0, nv_connector->hpd, 0xff, true);
- return true;
-}
-
-void
-nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct nouveau_object *core)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- struct nouveau_i2c_port *auxch;
- u8 status;
-
- auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return;
-
- if (mode == DRM_MODE_DPMS_ON)
- status = DP_SET_POWER_D0;
- else
- status = DP_SET_POWER_D3;
-
- nv_wraux(auxch, DP_SET_POWER, &status, 1);
-
- if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, core);
-}
-
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
u8 *dpcd)
@@ -355,12 +61,11 @@ nouveau_dp_detect(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *auxch;
u8 *dpcd = nv_encoder->dp.dpcd;
int ret;
- auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ auxch = nv_encoder->i2c;
if (!auxch)
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5e7aef23825a..d1099365bfc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -34,6 +34,8 @@
#include <subdev/device.h>
#include <subdev/vm.h>
+#include <engine/disp.h>
+
#include "nouveau_drm.h"
#include "nouveau_irq.h"
#include "nouveau_dma.h"
@@ -48,6 +50,7 @@
#include "nouveau_abi16.h"
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
+#include "nouveau_debugfs.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -68,6 +71,32 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
+static int
+nouveau_drm_vblank_enable(struct drm_device *dev, int head)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ nouveau_event_get(pdisp->vblank, head, &drm->vblank);
+ return 0;
+}
+
+static void
+nouveau_drm_vblank_disable(struct drm_device *dev, int head)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ nouveau_event_put(pdisp->vblank, head, &drm->vblank);
+}
+
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_drm *drm =
+ container_of(event, struct nouveau_drm, vblank);
+ drm_handle_vblank(drm->dev, head);
+ return NVKM_EVENT_KEEP;
+}
+
static u64
nouveau_name(struct pci_dev *pdev)
{
@@ -132,7 +161,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
/* initialise synchronisation routines */
if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+ else if (device->chipset < 0x17) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
else ret = nvc0_fence_create(drm);
@@ -262,6 +292,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
+ drm->vblank.func = nouveau_drm_vblank_handler;
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
@@ -401,7 +432,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
nouveau_object_debug();
}
-int
+static int
nouveau_do_suspend(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -472,7 +503,7 @@ int nouveau_pmops_suspend(struct device *dev)
return 0;
}
-int
+static int
nouveau_do_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -546,10 +577,11 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- char name[16];
+ char name[32], tmpname[TASK_COMM_LEN];
int ret;
- snprintf(name, sizeof(name), "%d", pid_nr(fpriv->pid));
+ get_task_comm(tmpname, current);
+ snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
if (ret)
@@ -639,22 +671,32 @@ driver = {
.postclose = nouveau_drm_postclose,
.lastclose = nouveau_vga_lastclose,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = nouveau_debugfs_init,
+ .debugfs_cleanup = nouveau_debugfs_takedown,
+#endif
+
.irq_preinstall = nouveau_irq_preinstall,
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = nouveau_vblank_enable,
- .disable_vblank = nouveau_vblank_disable,
+ .enable_vblank = nouveau_drm_vblank_enable,
+ .disable_vblank = nouveau_drm_vblank_disable,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = nouveau_gem_prime_export,
- .gem_prime_import = nouveau_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = nouveau_gem_prime_pin,
+ .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
+ .gem_prime_vmap = nouveau_gem_prime_vmap,
+ .gem_prime_vunmap = nouveau_gem_prime_vunmap,
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index aa89eb938b47..b25df374c901 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -13,6 +13,7 @@
#define DRIVER_PATCHLEVEL 0
#include <core/client.h>
+#include <core/event.h>
#include <subdev/vm.h>
@@ -112,6 +113,7 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct nouveau_eventh vblank;
/* power management */
struct nouveau_pm *pm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index d0d95bd511ab..e24341229d5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -36,19 +36,12 @@
struct nouveau_i2c_port;
-struct dp_train_func {
- void (*link_set)(struct drm_device *, struct dcb_output *, int crtc,
- int nr, u32 bw, bool enhframe);
- void (*train_set)(struct drm_device *, struct dcb_output *, u8 pattern);
- void (*train_adj)(struct drm_device *, struct dcb_output *,
- u8 lane, u8 swing, u8 preem);
-};
-
struct nouveau_encoder {
struct drm_encoder_slave base;
struct dcb_output *dcb;
int or;
+ struct nouveau_i2c_port *i2c;
/* different to drm_encoder.crtc, this reflects what's
* actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 67a1a069de28..b03531781580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -251,9 +251,10 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
}
static int
-nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
+nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
struct drm_device *dev = fbcon->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
@@ -388,23 +389,6 @@ out:
return ret;
}
-static int
-nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = nouveau_fbcon_create(fbcon, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
@@ -433,6 +417,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_fb->nvbo = NULL;
}
drm_fb_helper_fini(&fbcon->helper);
+ drm_framebuffer_unregister_private(&nouveau_fb->base);
drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
@@ -449,7 +434,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
- .fb_probe = nouveau_fbcon_find_or_create_single,
+ .fb_probe = nouveau_fbcon_create,
};
@@ -490,6 +475,9 @@ nouveau_fbcon_init(struct drm_device *dev)
else
preferred_bpp = 32;
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1d049be79f74..6c946837a0aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -33,14 +33,14 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
+#include <engine/fifo.h>
+
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
- if (fence->work)
- fence->work(fence->priv, false);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
@@ -59,17 +59,14 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
static void
nouveau_fence_update(struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
- if (priv->read(chan) < fence->sequence)
+ if (fctx->read(chan) < fence->sequence)
break;
- if (fence->work)
- fence->work(fence->priv, true);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
@@ -80,7 +77,6 @@ nouveau_fence_update(struct nouveau_channel *chan)
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
int ret;
@@ -88,7 +84,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (3 * DRM_HZ);
fence->sequence = ++fctx->sequence;
- ret = priv->emit(fence);
+ ret = fctx->emit(fence);
if (!ret) {
kref_get(&fence->kref);
spin_lock(&fctx->lock);
@@ -107,13 +103,87 @@ nouveau_fence_done(struct nouveau_fence *fence)
return !fence->channel;
}
+struct nouveau_fence_uevent {
+ struct nouveau_eventh handler;
+ struct nouveau_fence_priv *priv;
+};
+
+static int
+nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
+{
+ struct nouveau_fence_uevent *uevent =
+ container_of(event, struct nouveau_fence_uevent, handler);
+ wake_up_all(&uevent->priv->waiting);
+ return NVKM_EVENT_KEEP;
+}
+
+static int
+nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
+
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
+ struct nouveau_fence_priv *priv = chan->drm->fence;
+ struct nouveau_fence_uevent uevent = {
+ .handler.func = nouveau_fence_wait_uevent_handler,
+ .priv = priv,
+ };
+ int ret = 0;
+
+ nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
+
+ if (fence->timeout) {
+ unsigned long timeout = fence->timeout - jiffies;
+
+ if (time_before(jiffies, fence->timeout)) {
+ if (intr) {
+ ret = wait_event_interruptible_timeout(
+ priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ } else {
+ ret = wait_event_timeout(priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ }
+ }
+
+ if (ret >= 0) {
+ fence->timeout = jiffies + ret;
+ if (time_after_eq(jiffies, fence->timeout))
+ ret = -EBUSY;
+ }
+ } else {
+ if (intr) {
+ ret = wait_event_interruptible(priv->waiting,
+ nouveau_fence_done(fence));
+ } else {
+ wait_event(priv->waiting, nouveau_fence_done(fence));
+ }
+ }
+
+ nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+
int
nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
+ while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
+ ret = nouveau_fence_wait_uevent(fence, intr);
+ if (ret < 0)
+ return ret;
+ }
+
while (!nouveau_fence_done(fence)) {
if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
@@ -143,14 +213,14 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
int
nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_fence_priv *priv = chan->drm->fence;
+ struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_channel *prev;
int ret = 0;
prev = fence ? fence->channel : NULL;
if (prev) {
if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
- ret = priv->sync(fence, prev, chan);
+ ret = fctx->sync(fence, prev, chan);
if (unlikely(ret))
ret = nouveau_fence_wait(fence, true, false);
}
@@ -182,7 +252,8 @@ nouveau_fence_ref(struct nouveau_fence *fence)
}
int
-nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
+nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
+ struct nouveau_fence **pfence)
{
struct nouveau_fence *fence;
int ret = 0;
@@ -193,13 +264,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
+
+ fence->sysmem = sysmem;
kref_init(&fence->kref);
- if (chan) {
- ret = nouveau_fence_emit(fence, chan);
- if (ret)
- nouveau_fence_unref(&fence);
- }
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret)
+ nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index cdb83acdffe2..c89943407b52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -7,15 +7,15 @@ struct nouveau_fence {
struct list_head head;
struct kref kref;
+ bool sysmem;
+
struct nouveau_channel *channel;
unsigned long timeout;
u32 sequence;
-
- void (*work)(void *priv, bool signalled);
- void *priv;
};
-int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
+int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
+ struct nouveau_fence **);
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *);
void nouveau_fence_unref(struct nouveau_fence **);
@@ -29,6 +29,13 @@ struct nouveau_fence_chan {
struct list_head pending;
struct list_head flip;
+ int (*emit)(struct nouveau_fence *);
+ int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+ struct nouveau_channel *);
+ u32 (*read)(struct nouveau_channel *);
+ int (*emit32)(struct nouveau_channel *, u64, u32);
+ int (*sync32)(struct nouveau_channel *, u64, u32);
+
spinlock_t lock;
u32 sequence;
};
@@ -39,10 +46,9 @@ struct nouveau_fence_priv {
void (*resume)(struct nouveau_drm *);
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- int (*emit)(struct nouveau_fence *);
- int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
- struct nouveau_channel *);
- u32 (*read)(struct nouveau_channel *);
+
+ wait_queue_head_t waiting;
+ bool uevent;
};
#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
@@ -60,13 +66,31 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
+
+int nv17_fence_create(struct nouveau_drm *);
void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
-u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
int nouveau_flip_complete(void *chan);
+struct nv84_fence_chan {
+ struct nouveau_fence_chan base;
+ struct nouveau_vma vma;
+ struct nouveau_vma vma_gart;
+ struct nouveau_vma dispc_vma[4];
+};
+
+struct nv84_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ struct nouveau_bo *bo_gart;
+ u32 *suspend;
+};
+
+u64 nv84_fence_crtc(struct nouveau_channel *, int);
+int nv84_fence_context_new(struct nouveau_channel *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8bf695c52f95..b4b4d0c1f4af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/dma-buf.h>
-
#include <subdev/fb.h>
#include "nouveau_drm.h"
@@ -205,6 +203,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
@@ -213,7 +212,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
- NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -315,16 +314,18 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_device *dev = chan->drm->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
uint32_t sequence;
int trycnt = 0;
int ret, i;
+ struct nouveau_bo *res_bo = NULL;
sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
retry:
if (++trycnt > 100000) {
- NV_ERROR(drm, "%s failed and gave up.\n", __func__);
+ NV_ERROR(cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -335,14 +336,19 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
+ NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
return -ENOENT;
}
nvbo = gem->driver_private;
+ if (nvbo == res_bo) {
+ res_bo = NULL;
+ drm_gem_object_unreference_unlocked(gem);
+ continue;
+ }
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_ERROR(drm, "multiple instances of buffer %d on "
+ NV_ERROR(cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
validate_fini(op, NULL);
@@ -352,15 +358,19 @@ retry:
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
- if (unlikely(ret == -EAGAIN))
- ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
- drm_gem_object_unreference_unlocked(gem);
+ if (unlikely(ret == -EAGAIN)) {
+ sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
+ ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
+ sequence);
+ if (!ret)
+ res_bo = nvbo;
+ }
if (unlikely(ret)) {
+ drm_gem_object_unreference_unlocked(gem);
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "fail reserve\n");
+ NV_ERROR(cli, "fail reserve\n");
return ret;
}
- goto retry;
}
b->user_priv = (uint64_t)(unsigned long)nvbo;
@@ -376,12 +386,14 @@ retry:
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &op->gart_list);
else {
- NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
+ NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
+ if (nvbo == res_bo)
+ goto retry;
}
return 0;
@@ -407,8 +419,9 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
}
static int
-validate_list(struct nouveau_channel *chan, struct list_head *list,
- struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
+validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
+ struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ uint64_t user_pbbo_ptr)
{
struct nouveau_drm *drm = chan->drm;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
@@ -421,7 +434,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail pre-validate sync\n");
+ NV_ERROR(cli, "fail pre-validate sync\n");
return ret;
}
@@ -429,20 +442,20 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail set_domain\n");
+ NV_ERROR(cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "fail ttm_validate\n");
+ NV_ERROR(cli, "fail ttm_validate\n");
return ret;
}
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(drm, "fail post-validate sync\n");
+ NV_ERROR(cli, "fail post-validate sync\n");
return ret;
}
@@ -478,7 +491,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
@@ -491,32 +504,32 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate_init\n");
+ NV_ERROR(cli, "validate_init\n");
return ret;
}
- ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate vram_list\n");
+ NV_ERROR(cli, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
- ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate gart_list\n");
+ NV_ERROR(cli, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
- ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate both_list\n");
+ NV_ERROR(cli, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -545,11 +558,10 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
static int
-nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
@@ -565,7 +577,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_ERROR(drm, "reloc bo index invalid\n");
+ NV_ERROR(cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -575,7 +587,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_ERROR(drm, "reloc container bo index invalid\n");
+ NV_ERROR(cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -583,7 +595,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_ERROR(drm, "reloc outside of bo\n");
+ NV_ERROR(cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -592,7 +604,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_ERROR(drm, "failed kmap for reloc\n");
+ NV_ERROR(cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -617,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
- NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
+ NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -633,6 +645,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *temp;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
@@ -662,19 +675,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
@@ -692,7 +705,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(drm, "push %d buffer not in list\n", i);
+ NV_ERROR(cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -703,15 +716,15 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_ERROR(drm, "validate: %d\n", ret);
+ NV_ERROR(cli, "validate: %d\n", ret);
goto out_prevalid;
}
/* Apply any relocations that are required */
if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
+ ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
- NV_ERROR(drm, "reloc apply: %d\n", ret);
+ NV_ERROR(cli, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -719,7 +732,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_ERROR(drm, "nv50cal_space: %d\n", ret);
+ NV_ERROR(cli, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -734,7 +747,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (nv_device(drm->device)->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_ERROR(drm, "cal_space: %d\n", ret);
+ NV_ERROR(cli, "cal_space: %d\n", ret);
goto out;
}
@@ -748,7 +761,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_ERROR(drm, "jmp_space: %d\n", ret);
+ NV_ERROR(cli, "jmp_space: %d\n", ret);
goto out;
}
@@ -784,9 +797,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
}
- ret = nouveau_fence_new(chan, &fence);
+ ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
- NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
+ NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 5c1049236d22..8d7a3f0aeb86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,9 +35,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
-extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
-extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
+extern int nouveau_gem_prime_pin(struct drm_gem_object *);
+extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
+extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
+ struct drm_device *, size_t size, struct sg_table *);
+extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
+extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a701ff5ffa5b..bb54098c6d97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -409,6 +409,81 @@ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
NULL, 0);
static ssize_t
+nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
+ nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_temp1_auto_point1_temp,
+ nouveau_hwmon_set_temp1_auto_point1_temp, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_temp1_auto_point1_temp_hyst,
+ nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
+
+static ssize_t
nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
@@ -439,6 +514,38 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
0);
static ssize_t
+nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_max_temp_hyst,
+ nouveau_hwmon_set_max_temp_hyst, 0);
+
+static ssize_t
nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
char *buf)
{
@@ -471,6 +578,107 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
nouveau_hwmon_set_critical_temp,
0);
+static ssize_t
+nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_critical_temp_hyst,
+ nouveau_hwmon_set_critical_temp_hyst, 0);
+static ssize_t
+nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_emergency_temp,
+ nouveau_hwmon_set_emergency_temp,
+ 0);
+
+static ssize_t
+nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
+ struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
+ long value;
+
+ if (kstrtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
+ value / 1000);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_emergency_temp_hyst,
+ nouveau_hwmon_set_emergency_temp_hyst,
+ 0);
+
static ssize_t nouveau_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -490,7 +698,7 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
NULL, 0);
static ssize_t
-nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
@@ -499,7 +707,7 @@ nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
}
-static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
NULL, 0);
static ssize_t
@@ -665,14 +873,21 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
&sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
static struct attribute *hwmon_fan_rpm_attributes[] = {
- &sensor_dev_attr_fan0_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL
};
static struct attribute *hwmon_pwm_fan_attributes[] = {
@@ -717,7 +932,7 @@ nouveau_hwmon_init(struct drm_device *dev)
dev_set_drvdata(hwmon_dev, dev);
/* default sysfs entries */
- ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+ ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup);
if (ret) {
if (ret)
goto error;
@@ -728,7 +943,7 @@ nouveau_hwmon_init(struct drm_device *dev)
* the gpio entries for pwm fan control even when there's no
* actual fan connected to it... therm table? */
if (therm->fan_get && therm->fan_get(therm) >= 0) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_pwm_fan_attrgroup);
if (ret)
goto error;
@@ -736,7 +951,7 @@ nouveau_hwmon_init(struct drm_device *dev)
/* if the card can read the fan rpm */
if (therm->fan_sense(therm) >= 0) {
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_fan_rpm_attrgroup);
if (ret)
goto error;
@@ -764,10 +979,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
struct nouveau_pm *pm = nouveau_pm(dev);
if (pm->hwmon) {
- sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+ sysfs_remove_group(&pm->hwmon->kobj,
&hwmon_pwm_fan_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj,
+ sysfs_remove_group(&pm->hwmon->kobj,
&hwmon_fan_rpm_attrgroup);
hwmon_device_unregister(pm->hwmon);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b8e05ae38212..f53e10874cae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,126 +22,42 @@
* Authors: Dave Airlie
*/
-#include <linux/dma-buf.h>
-
#include <drm/drmP.h>
#include "nouveau_drm.h"
#include "nouveau_gem.h"
-static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct nouveau_bo *nvbo = attachment->dmabuf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages;
- struct sg_table *sg;
- int nents;
-
- mutex_lock(&dev->struct_mutex);
- sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- mutex_unlock(&dev->struct_mutex);
- return sg;
-}
-
-static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
- sg_free_table(sg);
- kfree(sg);
-}
-
-static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct nouveau_bo *nvbo = dma_buf->priv;
-
- if (nvbo->gem->export_dma_buf == dma_buf) {
- nvbo->gem->export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(nvbo->gem);
- }
-}
-
-static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-}
-static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
+ return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
}
-static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
{
-
-}
-
-static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
-{
- return -EINVAL;
-}
-
-static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
-{
- struct nouveau_bo *nvbo = dma_buf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int ret;
- mutex_lock(&dev->struct_mutex);
- if (nvbo->vmapping_count) {
- nvbo->vmapping_count++;
- goto out_unlock;
- }
-
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
&nvbo->dma_buf_vmap);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return ERR_PTR(ret);
- }
- nvbo->vmapping_count = 1;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
+
return nvbo->dma_buf_vmap.virtual;
}
-static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- struct nouveau_bo *nvbo = dma_buf->priv;
- struct drm_device *dev = nvbo->gem->dev;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- mutex_lock(&dev->struct_mutex);
- nvbo->vmapping_count--;
- if (nvbo->vmapping_count == 0) {
- ttm_bo_kunmap(&nvbo->dma_buf_vmap);
- }
- mutex_unlock(&dev->struct_mutex);
+ ttm_bo_kunmap(&nvbo->dma_buf_vmap);
}
-static const struct dma_buf_ops nouveau_dmabuf_ops = {
- .map_dma_buf = nouveau_gem_map_dma_buf,
- .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
- .release = nouveau_gem_dmabuf_release,
- .kmap = nouveau_gem_kmap,
- .kmap_atomic = nouveau_gem_kmap_atomic,
- .kunmap = nouveau_gem_kunmap,
- .kunmap_atomic = nouveau_gem_kunmap_atomic,
- .mmap = nouveau_gem_prime_mmap,
- .vmap = nouveau_gem_prime_vmap,
- .vunmap = nouveau_gem_prime_vunmap,
-};
-
-static int
-nouveau_prime_new(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct nouveau_bo **pnvbo)
+struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
{
struct nouveau_bo *nvbo;
u32 flags = 0;
@@ -150,24 +66,22 @@ nouveau_prime_new(struct drm_device *dev,
flags = TTM_PL_FLAG_TT;
ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
- sg, pnvbo);
+ sg, &nvbo);
if (ret)
- return ret;
- nvbo = *pnvbo;
+ return ERR_PTR(ret);
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
- nouveau_bo_ref(NULL, pnvbo);
- return -ENOMEM;
+ nouveau_bo_ref(NULL, &nvbo);
+ return ERR_PTR(-ENOMEM);
}
nvbo->gem->driver_private = nvbo;
- return 0;
+ return nvbo->gem;
}
-struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+int nouveau_gem_prime_pin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int ret = 0;
@@ -175,52 +89,7 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
/* pin buffer into GTT */
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
if (ret)
- return ERR_PTR(-EINVAL);
-
- return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
-}
-
-struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct nouveau_bo *nvbo;
- int ret;
-
- if (dma_buf->ops == &nouveau_dmabuf_ops) {
- nvbo = dma_buf->priv;
- if (nvbo->gem) {
- if (nvbo->gem->dev == dev) {
- drm_gem_object_reference(nvbo->gem);
- dma_buf_put(dma_buf);
- return nvbo->gem;
- }
- }
- }
- /* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_PTR(PTR_ERR(attach));
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
- if (ret)
- goto fail_unmap;
-
- nvbo->gem->import_attach = attach;
-
- return nvbo->gem;
+ return -EINVAL;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- return ERR_PTR(ret);
+ return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 39ffc07f906b..7e24cdf1cb39 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
- dev->pci_device == 0x0329) {
+ if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
+ dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 2cd6fb8c548e..ad48444c385c 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -22,6 +22,9 @@
* Author: Ben Skeggs
*/
+#include <core/object.h>
+#include <core/class.h>
+
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -31,6 +34,8 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+#include <subdev/i2c.h>
+
int
nv04_display_early_init(struct drm_device *dev)
{
@@ -53,6 +58,7 @@ int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -71,6 +77,11 @@ nv04_display_create(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 1);
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
+ NV04_DISP_CLASS, NULL, 0, &disp->core);
+ if (ret)
+ return ret;
+
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
@@ -114,6 +125,11 @@ nv04_display_create(struct drm_device *dev)
}
}
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ }
+
/* Save previous state */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
@@ -140,7 +156,7 @@ nv04_display_destroy(struct drm_device *dev)
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
/* Restore state */
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/nv04_display.h
index 45322802e37d..a0a031dad13f 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.h
+++ b/drivers/gpu/drm/nouveau/nv04_display.h
@@ -80,6 +80,7 @@ struct nv04_display {
struct nv04_mode_state saved_reg;
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
+ struct nouveau_object *core;
};
static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index a220b94ba9f2..94eadd1dd10a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -78,6 +78,9 @@ nv04_fence_context_new(struct nouveau_channel *chan)
struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (fctx) {
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv04_fence_emit;
+ fctx->base.sync = nv04_fence_sync;
+ fctx->base.read = nv04_fence_read;
chan->fence = fctx;
return 0;
}
@@ -104,8 +107,5 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv04_fence_destroy;
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
- priv->base.emit = nv04_fence_emit;
- priv->base.sync = nv04_fence_sync;
- priv->base.read = nv04_fence_read;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 62e826a139b3..4a69ccdef9b4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -184,14 +184,23 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
.destroy = nv04_tv_destroy,
};
+static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
+ .dpms = nv04_tv_dpms,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = nv04_tv_prepare,
+ .commit = nv04_tv_commit,
+ .mode_set = nv04_tv_mode_set,
+ .detect = drm_i2c_encoder_detect,
+};
+
int
nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
- struct drm_encoder_helper_funcs *hfuncs;
- struct drm_encoder_slave_funcs *sfuncs;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
@@ -207,17 +216,11 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
if (!nv_encoder)
return -ENOMEM;
- hfuncs = kzalloc(sizeof(*hfuncs), GFP_KERNEL);
- if (!hfuncs) {
- ret = -ENOMEM;
- goto fail_free;
- }
-
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(encoder, hfuncs);
+ drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
@@ -230,30 +233,14 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
if (ret < 0)
goto fail_cleanup;
- /* Fill the function pointers */
- sfuncs = get_slave_funcs(encoder);
-
- *hfuncs = (struct drm_encoder_helper_funcs) {
- .dpms = nv04_tv_dpms,
- .save = sfuncs->save,
- .restore = sfuncs->restore,
- .mode_fixup = sfuncs->mode_fixup,
- .prepare = nv04_tv_prepare,
- .commit = nv04_tv_commit,
- .mode_set = nv04_tv_mode_set,
- .detect = sfuncs->detect,
- };
-
/* Attach it to the specified connector. */
- sfuncs->create_resources(encoder, connector);
+ get_slave_funcs(encoder)->create_resources(encoder, connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
fail_cleanup:
drm_encoder_cleanup(encoder);
- kfree(hfuncs);
-fail_free:
kfree(nv_encoder);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 03017f24d593..06f434f03fba 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -27,18 +27,7 @@
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_fence.h"
-
-struct nv10_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv10_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- spinlock_t lock;
- u32 sequence;
-};
+#include "nv10_fence.h"
int
nv10_fence_emit(struct nouveau_fence *fence)
@@ -61,45 +50,6 @@ nv10_fence_sync(struct nouveau_fence *fence,
return -ENODEV;
}
-int
-nv17_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
-{
- struct nv10_fence_priv *priv = chan->drm->fence;
- u32 value;
- int ret;
-
- if (!mutex_trylock(&prev->cli->mutex))
- return -EBUSY;
-
- spin_lock(&priv->lock);
- value = priv->sequence;
- priv->sequence += 2;
- spin_unlock(&priv->lock);
-
- ret = RING_SPACE(prev, 5);
- if (!ret) {
- BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (prev, NvSema);
- OUT_RING (prev, 0);
- OUT_RING (prev, value + 0);
- OUT_RING (prev, value + 1);
- FIRE_RING (prev);
- }
-
- if (!ret && !(ret = RING_SPACE(chan, 5))) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, 0);
- OUT_RING (chan, value + 1);
- OUT_RING (chan, value + 2);
- FIRE_RING (chan);
- }
-
- mutex_unlock(&prev->cli->mutex);
- return 0;
-}
-
u32
nv10_fence_read(struct nouveau_channel *chan)
{
@@ -115,39 +65,20 @@ nv10_fence_context_del(struct nouveau_channel *chan)
kfree(fctx);
}
-static int
+int
nv10_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
-
- if (priv->bo) {
- struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_object *object;
- u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
-
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = start,
- .limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
- }
-
- if (ret)
- nv10_fence_context_del(chan);
- return ret;
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv10_fence_sync;
+ return 0;
}
void
@@ -162,18 +93,10 @@ nv10_fence_destroy(struct nouveau_drm *drm)
kfree(priv);
}
-void nv17_fence_resume(struct nouveau_drm *drm)
-{
- struct nv10_fence_priv *priv = drm->fence;
-
- nouveau_bo_wr32(priv->bo, 0, priv->sequence);
-}
-
int
nv10_fence_create(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv;
- int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -182,33 +105,6 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.dtor = nv10_fence_destroy;
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
- priv->base.emit = nv10_fence_emit;
- priv->base.read = nv10_fence_read;
- priv->base.sync = nv10_fence_sync;
spin_lock_init(&priv->lock);
-
- if (nv_device(drm->device)->chipset >= 0x17) {
- ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
- }
-
- if (ret == 0) {
- nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
- priv->base.sync = nv17_fence_sync;
- priv->base.resume = nv17_fence_resume;
- }
- }
-
- if (ret)
- nv10_fence_destroy(drm);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
new file mode 100644
index 000000000000..e5d9204826c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -0,0 +1,19 @@
+#ifndef __NV10_FENCE_H_
+#define __NV10_FENCE_H_
+
+#include <core/os.h>
+#include "nouveau_fence.h"
+#include "nouveau_bo.h"
+
+struct nv10_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv10_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
new file mode 100644
index 000000000000..8e47a9bae8c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+int
+nv17_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ u32 value;
+ int ret;
+
+ if (!mutex_trylock(&prev->cli->mutex))
+ return -EBUSY;
+
+ spin_lock(&priv->lock);
+ value = priv->sequence;
+ priv->sequence += 2;
+ spin_unlock(&priv->lock);
+
+ ret = RING_SPACE(prev, 5);
+ if (!ret) {
+ BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (prev, NvSema);
+ OUT_RING (prev, 0);
+ OUT_RING (prev, value + 0);
+ OUT_RING (prev, value + 1);
+ FIRE_RING (prev);
+ }
+
+ if (!ret && !(ret = RING_SPACE(chan, 5))) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (chan, NvSema);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, value + 1);
+ OUT_RING (chan, value + 2);
+ FIRE_RING (chan);
+ }
+
+ mutex_unlock(&prev->cli->mutex);
+ return 0;
+}
+
+static int
+nv17_fence_context_new(struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx;
+ struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+ struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = mem->start + mem->size - 1;
+ int ret = 0;
+
+ fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv17_fence_sync;
+
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvSema, 0x0002,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = start,
+ .limit = limit,
+ }, sizeof(struct nv_dma_class),
+ &object);
+ if (ret)
+ nv10_fence_context_del(chan);
+ return ret;
+}
+
+void
+nv17_fence_resume(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv = drm->fence;
+
+ nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
+int
+nv17_fence_create(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv;
+ int ret = 0;
+
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.dtor = nv10_fence_destroy;
+ priv->base.resume = nv17_fence_resume;
+ priv->base.context_new = nv17_fence_context_new;
+ priv->base.context_del = nv10_fence_context_del;
+ spin_lock_init(&priv->lock);
+
+ ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &priv->bo);
+ if (!ret) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret) {
+ nv10_fence_destroy(drm);
+ return ret;
+ }
+
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 2ca276ada507..977e42be2050 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -768,7 +768,7 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
.crtc = crtc,
};
- crtc->funcs->set_config(&modeset);
+ drm_mode_set_config_internal(&modeset);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 35874085a61e..a6237c9cbbc3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -43,6 +43,7 @@
#include <subdev/timer.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
+#include <subdev/i2c.h>
#define EVO_DMA_NR 9
@@ -128,6 +129,11 @@ struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
u32 *ptr;
+
+ /* Protects against concurrent pushbuf access to this channel, lock is
+ * grabbed by evo_wait (if the pushbuf reservation is successful) and
+ * dropped again by evo_kick. */
+ struct mutex lock;
};
static void
@@ -271,6 +277,8 @@ nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
u32 pushbuf = *(u32 *)data;
int ret;
+ mutex_init(&dmac->lock);
+
dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
&dmac->handle);
if (!dmac->ptr)
@@ -395,11 +403,13 @@ evo_wait(void *evoc, int nr)
struct nv50_dmac *dmac = evoc;
u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+ mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
nv_wo32(dmac->base.user, 0x0000, 0x00000000);
if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ mutex_unlock(&dmac->lock);
NV_ERROR(dmac->base.user, "channel stalled\n");
return NULL;
}
@@ -415,6 +425,7 @@ evo_kick(u32 *push, void *evoc)
{
struct nv50_dmac *dmac = evoc;
nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+ mutex_unlock(&dmac->lock);
}
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
@@ -423,7 +434,10 @@ evo_kick(u32 *push, void *evoc)
static bool
evo_sync_wait(void *data)
{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+ if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
+ return true;
+ usleep_range(1, 2);
+ return false;
}
static int
@@ -502,7 +516,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (ret)
return ret;
- if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, sync->sem.offset);
@@ -512,24 +526,36 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
- OUT_RING (chan, NvSema);
- else
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, NvSema);
+ } else
+ if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
+ offset += sync->sem.offset;
+
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
+ OUT_RING (chan, 0x00000002);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x00000001);
} else {
- u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
+ u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
offset += sync->sem.offset;
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- OUT_RING (chan, 0x1002);
+ OUT_RING (chan, 0x00001002);
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
+ OUT_RING (chan, 0x00001001);
}
FIRE_RING (chan);
@@ -1493,9 +1519,6 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
evo_mthd(push, 0x0180 + (or * 0x020), 1);
evo_data(push, 0x00000000);
}
-
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
evo_kick(push, mast);
}
}
@@ -1542,20 +1565,23 @@ static const struct drm_encoder_funcs nv50_dac_func = {
static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ int type = DRM_MODE_ENCODER_DAC;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type);
drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1664,9 +1690,6 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
}
static bool
@@ -1709,9 +1732,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
evo_mthd(push, 0x0200 + (or * 0x20), 1);
evo_data(push, 0x00000000);
}
-
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
evo_kick(push, mast);
}
@@ -1723,14 +1743,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
}
static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
- nv50_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
- evo_sync(encoder->dev);
-}
-
-static void
nv50_sor_commit(struct drm_encoder *encoder)
{
}
@@ -1825,8 +1837,13 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
push = evo_wait(nv50_mast(dev), 8);
if (push) {
if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+ u32 ctrl = (depth << 16) | (proto << 8) | owner;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= 0x00002000;
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
- evo_data(push, (depth << 16) | (proto << 8) | owner);
+ evo_data(push, ctrl);
} else {
u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
u32 syncs = 0x00000001;
@@ -1862,7 +1879,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
.dpms = nv50_sor_dpms,
.mode_fixup = nv50_sor_mode_fixup,
- .prepare = nv50_sor_prepare,
+ .prepare = nv50_sor_disconnect,
.commit = nv50_sor_commit,
.mode_set = nv50_sor_mode_set,
.disable = nv50_sor_disconnect,
@@ -1876,21 +1893,33 @@ static const struct drm_encoder_funcs nv50_sor_func = {
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ int type;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ default:
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ }
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type);
drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1898,6 +1927,181 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
}
/******************************************************************************
+ * PIOR
+ *****************************************************************************/
+
+static void
+nv50_pior_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or;
+ u32 ctrl = (mode == DRM_MODE_DPMS_ON);
+ nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl);
+}
+
+static bool
+nv50_pior_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ adjusted_mode->clock *= 2;
+ return true;
+}
+
+static void
+nv50_pior_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ u8 owner = 1 << nv_crtc->index;
+ u8 proto, depth;
+ u32 *push;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_connector->base.display_info.bpc) {
+ case 10: depth = 0x6; break;
+ case 8: depth = 0x5; break;
+ case 6: depth = 0x2; break;
+ default: depth = 0x0; break;
+ }
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ proto = 0x0;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ u32 ctrl = (depth << 16) | (proto << 8) | owner;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= 0x00002000;
+ evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
+ evo_data(push, ctrl);
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_pior_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0700 + (or * 0x040), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
+}
+
+static void
+nv50_pior_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
+ .dpms = nv50_pior_dpms,
+ .mode_fixup = nv50_pior_mode_fixup,
+ .prepare = nv50_pior_disconnect,
+ .commit = nv50_pior_commit,
+ .mode_set = nv50_pior_mode_set,
+ .disable = nv50_pior_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_pior_func = {
+ .destroy = nv50_pior_destroy,
+};
+
+static int
+nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c_port *ddc = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ int type;
+
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case DCB_OUTPUT_DP:
+ ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->i2c = ddc;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type);
+ drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
* Init
*****************************************************************************/
void
@@ -1913,7 +2117,7 @@ nv50_display_init(struct drm_device *dev)
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_kick(push, nv50_mast(dev));
- return evo_sync(dev);
+ return 0;
}
return -EBUSY;
@@ -2019,25 +2223,28 @@ nv50_display_create(struct drm_device *dev)
if (IS_ERR(connector))
continue;
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
+ if (dcbe->location == DCB_LOC_ON_CHIP) {
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ ret = nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ ret = nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ } else {
+ ret = nv50_pior_create(connector, dcbe);
}
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nv50_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- nv50_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
+ if (ret) {
+ NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
+ dcbe->location, dcbe->type,
+ ffs(dcbe->or) - 1, ret);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index d889f3ac0d41..f9701e567db8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -27,27 +27,16 @@
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_fence.h"
+#include "nv10_fence.h"
#include "nv50_display.h"
-struct nv50_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv50_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- spinlock_t lock;
- u32 sequence;
-};
-
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->drm->dev;
- struct nv50_fence_priv *priv = chan->drm->fence;
- struct nv50_fence_chan *fctx;
+ struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
int ret, i;
@@ -57,6 +46,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv10_fence_emit;
+ fctx->base.read = nv10_fence_read;
+ fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvSema, 0x0002,
@@ -91,7 +83,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
int
nv50_fence_create(struct nouveau_drm *drm)
{
- struct nv50_fence_priv *priv;
+ struct nv10_fence_priv *priv;
int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -99,11 +91,9 @@ nv50_fence_create(struct nouveau_drm *drm)
return -ENOMEM;
priv->base.dtor = nv10_fence_destroy;
+ priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
- priv->base.emit = nv10_fence_emit;
- priv->base.read = nv10_fence_read;
- priv->base.sync = nv17_fence_sync;
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -119,13 +109,11 @@ nv50_fence_create(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &priv->bo);
}
- if (ret == 0) {
- nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
- priv->base.sync = nv17_fence_sync;
- priv->base.resume = nv17_fence_resume;
+ if (ret) {
+ nv10_fence_destroy(drm);
+ return ret;
}
- if (ret)
- nv10_fence_destroy(drm);
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index c686650584b6..9fd475c89820 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -23,6 +23,7 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/class.h>
#include <engine/fifo.h>
@@ -33,79 +34,115 @@
#include "nv50_display.h"
-struct nv84_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv84_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_gpuobj *mem;
-};
+u64
+nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
+{
+ struct nv84_fence_chan *fctx = chan->fence;
+ return fctx->dispc_vma[crtc].offset;
+}
static int
-nv84_fence_emit(struct nouveau_fence *fence)
+nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_channel *chan = fence->channel;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- int ret = RING_SPACE(chan, 7);
+ int ret = RING_SPACE(chan, 8);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, chan->vram);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
}
return ret;
}
-
static int
-nv84_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
+nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
+ OUT_RING (chan, chan->vram);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
FIRE_RING (chan);
}
return ret;
}
+static int
+nv84_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nv84_fence_chan *fctx = chan->fence;
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ u64 addr = fifo->chid * 16;
+
+ if (fence->sysmem)
+ addr += fctx->vma_gart.offset;
+ else
+ addr += fctx->vma.offset;
+
+ return fctx->base.emit32(chan, addr, fence->sequence);
+}
+
+static int
+nv84_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv84_fence_chan *fctx = chan->fence;
+ struct nouveau_fifo_chan *fifo = (void *)prev->object;
+ u64 addr = fifo->chid * 16;
+
+ if (fence->sysmem)
+ addr += fctx->vma_gart.offset;
+ else
+ addr += fctx->vma.offset;
+
+ return fctx->base.sync32(chan, addr, fence->sequence);
+}
+
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
struct nouveau_fifo_chan *fifo = (void *)chan->object;
struct nv84_fence_priv *priv = chan->drm->fence;
- return nv_ro32(priv->mem, fifo->chid * 16);
+ return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
}
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->drm->dev;
+ struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
+ }
+
+ nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
+ nouveau_bo_vma_del(priv->bo, &fctx->vma);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
kfree(fctx);
}
-static int
+int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nouveau_client *client = nouveau_client(fifo);
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- struct nouveau_object *object;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -113,44 +150,74 @@ nv84_fence_context_new(struct nouveau_channel *chan)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
+ fctx->base.emit = nv84_fence_emit;
+ fctx->base.sync = nv84_fence_sync;
+ fctx->base.read = nv84_fence_read;
+ fctx->base.emit32 = nv84_fence_emit32;
+ fctx->base.sync32 = nv84_fence_sync32;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = priv->mem->addr,
- .limit = priv->mem->addr +
- priv->mem->size - 1,
- }, sizeof(struct nv_dma_class),
- &object);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
+ if (ret == 0) {
+ ret = nouveau_bo_vma_add(priv->bo_gart, client->vm,
+ &fctx->vma_gart);
+ }
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvEvoSema0 + i, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
- }, sizeof(struct nv_dma_class),
- &object);
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
+ ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
+ nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
+
if (ret)
nv84_fence_context_del(chan);
- nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
return ret;
}
+static bool
+nv84_fence_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
+ }
+
+ return priv->suspend != NULL;
+}
+
+static void
+nv84_fence_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
+ vfree(priv->suspend);
+ priv->suspend = NULL;
+ }
+}
+
static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_gpuobj_ref(NULL, &priv->mem);
+ nouveau_bo_unmap(priv->bo_gart);
+ if (priv->bo_gart)
+ nouveau_bo_unpin(priv->bo_gart);
+ nouveau_bo_ref(NULL, &priv->bo_gart);
+ nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
+ nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -160,7 +227,6 @@ nv84_fence_create(struct nouveau_drm *drm)
{
struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nv84_fence_priv *priv;
- u32 chan = pfifo->max + 1;
int ret;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -168,14 +234,42 @@ nv84_fence_create(struct nouveau_drm *drm)
return -ENOMEM;
priv->base.dtor = nv84_fence_destroy;
+ priv->base.suspend = nv84_fence_suspend;
+ priv->base.resume = nv84_fence_resume;
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
- priv->base.emit = nv84_fence_emit;
- priv->base.sync = nv84_fence_sync;
- priv->base.read = nv84_fence_read;
- ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
- &priv->mem);
+ init_waitqueue_head(&priv->base.waiting);
+ priv->base.uevent = true;
+
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (ret == 0) {
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret == 0)
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_TT, 0, 0, NULL,
+ &priv->bo_gart);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
+ if (ret == 0) {
+ ret = nouveau_bo_map(priv->bo_gart);
+ if (ret)
+ nouveau_bo_unpin(priv->bo_gart);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo_gart);
+ }
+
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 2a56b1b551cb..9566267fbc42 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -34,203 +34,57 @@
#include "nv50_display.h"
-struct nvc0_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_bo *bo;
- u32 *suspend;
-};
-
-struct nvc0_fence_chan {
- struct nouveau_fence_chan base;
- struct nouveau_vma vma;
- struct nouveau_vma dispc_vma[4];
-};
-
-u64
-nvc0_fence_crtc(struct nouveau_channel *chan, int crtc)
-{
- struct nvc0_fence_chan *fctx = chan->fence;
- return fctx->dispc_vma[crtc].offset;
-}
-
static int
-nvc0_fence_emit(struct nouveau_fence *fence)
+nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nouveau_channel *chan = fence->channel;
- struct nvc0_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- u64 addr = fctx->vma.offset + fifo->chid * 16;
- int ret;
-
- ret = RING_SPACE(chan, 5);
+ int ret = RING_SPACE(chan, 6);
if (ret == 0) {
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, fence->sequence);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
}
-
return ret;
}
static int
-nvc0_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *prev, struct nouveau_channel *chan)
+nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvc0_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
- u64 addr = fctx->vma.offset + fifo->chid * 16;
- int ret;
-
- ret = RING_SPACE(chan, 5);
+ int ret = RING_SPACE(chan, 5);
if (ret == 0) {
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, upper_32_bits(virtual));
+ OUT_RING (chan, lower_32_bits(virtual));
+ OUT_RING (chan, sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
FIRE_RING (chan);
}
-
return ret;
}
-static u32
-nvc0_fence_read(struct nouveau_channel *chan)
-{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nvc0_fence_priv *priv = chan->drm->fence;
- return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
-}
-
-static void
-nvc0_fence_context_del(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->drm->dev;
- struct nvc0_fence_priv *priv = chan->drm->fence;
- struct nvc0_fence_chan *fctx = chan->fence;
- int i;
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
-
- nouveau_bo_vma_del(priv->bo, &fctx->vma);
- nouveau_fence_context_del(&fctx->base);
- chan->fence = NULL;
- kfree(fctx);
-}
-
static int
nvc0_fence_context_new(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nouveau_client *client = nouveau_client(fifo);
- struct nvc0_fence_priv *priv = chan->drm->fence;
- struct nvc0_fence_chan *fctx;
- int ret, i;
-
- fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- nouveau_fence_context_new(&fctx->base);
-
- ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
- if (ret)
- nvc0_fence_context_del(chan);
-
- /* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
+ int ret = nv84_fence_context_new(chan);
+ if (ret == 0) {
+ struct nv84_fence_chan *fctx = chan->fence;
+ fctx->base.emit32 = nvc0_fence_emit32;
+ fctx->base.sync32 = nvc0_fence_sync32;
}
-
- nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
return ret;
}
-static bool
-nvc0_fence_suspend(struct nouveau_drm *drm)
-{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv = drm->fence;
- int i;
-
- priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
- if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
- priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
- }
-
- return priv->suspend != NULL;
-}
-
-static void
-nvc0_fence_resume(struct nouveau_drm *drm)
-{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv = drm->fence;
- int i;
-
- if (priv->suspend) {
- for (i = 0; i <= pfifo->max; i++)
- nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
- vfree(priv->suspend);
- priv->suspend = NULL;
- }
-}
-
-static void
-nvc0_fence_destroy(struct nouveau_drm *drm)
-{
- struct nvc0_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
- drm->fence = NULL;
- kfree(priv);
-}
-
int
nvc0_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
- struct nvc0_fence_priv *priv;
- int ret;
-
- priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.dtor = nvc0_fence_destroy;
- priv->base.suspend = nvc0_fence_suspend;
- priv->base.resume = nvc0_fence_resume;
- priv->base.context_new = nvc0_fence_context_new;
- priv->base.context_del = nvc0_fence_context_del;
- priv->base.emit = nvc0_fence_emit;
- priv->base.sync = nvc0_fence_sync;
- priv->base.read = nvc0_fence_read;
-
- ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+ int ret = nv84_fence_create(drm);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ struct nv84_fence_priv *priv = drm->fence;
+ priv->base.context_new = nvc0_fence_context_new;
}
-
- if (ret)
- nvc0_fence_destroy(drm);
return ret;
}
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 09f65dc3d2c8..09f65dc3d2c8 100644
--- a/drivers/staging/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f2845..d85e058f2845 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
diff --git a/drivers/gpu/drm/omapdrm/TODO b/drivers/gpu/drm/omapdrm/TODO
new file mode 100644
index 000000000000..4d8c18aa5dd7
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/TODO
@@ -0,0 +1,23 @@
+TODO
+. Where should we do eviction (detatch_pages())? We aren't necessarily
+ accessing the pages via a GART, so maybe we need some other threshold
+ to put a cap on the # of pages that can be pin'd.
+ . Use mm_shrinker to trigger unpinning pages.
+ . This is mainly theoretical since most of these devices don't actually
+ have swap or harddrive.
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+ etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+ already. Possibly this could be refactored out and made more common?
+ There should be some way to do this with less wheel-reinvention.
+ . This can be handled by the dma-buf fence/reservation stuff when it
+ lands
+
+Userspace:
+. git://anongit.freedesktop.org/xorg/driver/xf86-video-omap
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
+. OMAP5432 uEVM
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 8979c80adb5f..c451c41a7a7d 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_connector.c
+ * drivers/gpu/drm/omapdrm/omap_connector.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 32109c09357c..bec66a490b8f 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_crtc.c
+ * drivers/gpu/drm/omapdrm/omap_crtc.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -274,17 +274,16 @@ static void page_flip_worker(struct work_struct *work)
struct omap_crtc *omap_crtc =
container_of(work, struct omap_crtc, page_flip_work);
struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_device *dev = crtc->dev;
struct drm_display_mode *mode = &crtc->mode;
struct drm_gem_object *bo;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&crtc->mutex);
omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
crtc->x << 16, crtc->y << 16,
mode->hdisplay << 16, mode->vdisplay << 16,
vblank_cb, crtc);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
bo = omap_framebuffer_bo(crtc->fb, 0);
drm_gem_object_unreference_unlocked(bo);
@@ -417,7 +416,7 @@ static void apply_worker(struct work_struct *work)
* the callbacks and list modification all serialized
* with respect to modesetting ioctls from userspace.
*/
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&crtc->mutex);
dispc_runtime_get();
/*
@@ -462,16 +461,15 @@ static void apply_worker(struct work_struct *work)
out:
dispc_runtime_put();
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&crtc->mutex);
}
int omap_crtc_apply(struct drm_crtc *crtc,
struct omap_drm_apply *apply)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
/* no need to queue it again if it is already queued: */
if (apply->queued)
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 2f122e00b51d..c27f59da7f29 100644
--- a/drivers/staging/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_debugfs.c
+ * drivers/gpu/drm/omapdrm/omap_debugfs.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
@@ -57,21 +57,11 @@ static int fb_show(struct seq_file *m, void *arg)
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
- }
seq_printf(m, "fbcon ");
omap_framebuffer_describe(priv->fbdev->fb, m);
+ mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
if (fb == priv->fbdev->fb)
continue;
@@ -79,9 +69,7 @@ static int fb_show(struct seq_file *m, void *arg)
seq_printf(m, "user ");
omap_framebuffer_describe(fb, m);
}
-
- mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58bcd6ae0255..58bcd6ae0255 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c81..9b794c933c81 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 4fdd61e54bd2..4fdd61e54bd2 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 480dc343446c..079c54c6f94c 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_drv.c
+ * drivers/gpu/drm/omapdrm/omap_drv.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -452,9 +452,9 @@ static void dev_lastclose(struct drm_device *dev)
}
}
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
if (ret)
DBG("failed to restore crtc mode");
}
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index f921027e7500..d4f997bb4ac0 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_drv.h
+ * drivers/gpu/drm/omapdrm/omap_drv.h
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -25,8 +25,8 @@
#include <linux/types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/omap_drm.h>
#include <linux/platform_data/omap_drm.h>
-#include "omap_drm.h"
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 25fc0c7b4f6d..21d126d0317e 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_encoder.c
+ * drivers/gpu/drm/omapdrm/omap_encoder.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index bb4969942148..8031402e7951 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_fb.c
+ * drivers/gpu/drm/omapdrm/omap_fb.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -423,14 +423,6 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
- ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
- if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
- goto fail;
- }
-
- DBG("create: FB ID: %d (%p)", fb->base.id, fb);
-
omap_fb->format = format;
for (i = 0; i < n; i++) {
@@ -461,6 +453,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
return fb;
fail:
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 70f2d6ed2ed3..b11ce609fcc2 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_fbdev.c
+ * drivers/gpu/drm/omapdrm/omap_fbdev.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
@@ -131,9 +131,6 @@ static struct fb_ops omap_fb_ops = {
.fb_pan_display = omap_fbdev_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
-
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -275,8 +272,10 @@ fail:
if (ret) {
if (fbi)
framebuffer_release(fbi);
- if (fb)
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
+ }
}
return ret;
@@ -294,25 +293,10 @@ static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
DBG("fbdev: get gamma");
}
-static int omap_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = omap_fbdev_create(helper, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.gamma_set = omap_crtc_fb_gamma_set,
.gamma_get = omap_crtc_fb_gamma_get,
- .fb_probe = omap_fbdev_probe,
+ .fb_probe = omap_fbdev_create,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
@@ -365,6 +349,10 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(helper, 32);
priv->fbdev = helper;
@@ -398,8 +386,10 @@ void omap_fbdev_free(struct drm_device *dev)
fbdev = to_omap_fbdev(priv->fbdev);
/* this will free the backing object */
- if (fbdev->fb)
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb);
+ }
kfree(fbdev);
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 518d03d4d4f3..ebbdf4132e9c 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem.c
+ * drivers/gpu/drm/omapdrm/omap_gem.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index a3236abfca3d..ac74d1bc67bf 100644
--- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem_dmabuf.c
+ * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
index ffb8cceaeb46..f9eb679eb79b 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_gem_helpers.c
+ * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
@@ -40,7 +40,7 @@ struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
int i, npages;
/* This is the shared memory object that backs the GEM resource */
- inode = obj->filp->f_path.dentry->d_inode;
+ inode = file_inode(obj->filp);
mapping = inode->i_mapping;
npages = obj->size >> PAGE_SHIFT;
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 2629ba7be6c8..e01303ee00c3 100644
--- a/drivers/staging/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_irq.c
+ * drivers/gpu/drm/omapdrm/omap_irq.c
*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index c063476db3bb..2882cda6ea19 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -1,5 +1,5 @@
/*
- * drivers/staging/omapdrm/omap_plane.c
+ * drivers/gpu/drm/omapdrm/omap_plane.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index efb609510540..efb609510540 100644
--- a/drivers/staging/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
diff --git a/drivers/staging/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
index 0444f868671c..0444f868671c 100644
--- a/drivers/staging/omapdrm/tcm-sita.h
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.h
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index a8d5ce47686f..a8d5ce47686f 100644
--- a/drivers/staging/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index ea92bbe3ed37..970f8e92dbb7 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,31 +1,8 @@
-config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default - NEW DRIVER"
+config DRM_RADEON_UMS
+ bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
- select BACKLIGHT_CLASS_DEVICE
help
- Choose this option if you want kernel modesetting enabled by default.
+ Choose this option if you still need userspace modesetting.
- This is a completely new driver. It's only part of the existing drm
- for compatibility reasons. It requires an entirely different graphics
- stack above it and works very differently from the old drm stack.
- i.e. don't enable this unless you know what you are doing it may
- cause issues or bugs compared to the previous userspace driver stack.
-
- When kernel modesetting is enabled the IOCTL of radeon/drm
- driver are considered as invalid and an error message is printed
- in the log and they return failure.
-
- KMS enabled userspace will use new API to talk with the radeon/drm
- driver. The new API provide functions to create/destroy/share/mmap
- buffer object which are then managed by the kernel memory manager
- (here TTM). In order to submit command to the GPU the userspace
- provide a buffer holding the command stream, along this buffer
- userspace have to provide a list of buffer object used by the
- command stream. The kernel radeon driver will then place buffer
- in GPU accessible memory and will update command stream to reflect
- the position of the different buffers.
-
- The kernel will also perform security check on command stream
- provided by the user, we want to catch and forbid any illegal use
- of the GPU such as DMA into random system memory or into memory
- not owned by the process supplying the command stream.
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index a6598fd66423..bf172522ea68 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -56,8 +56,12 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
-radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
- radeon_irq.o r300_cmdbuf.o r600_cp.o
+radeon-y := radeon_drv.o
+
+# add UMS driver
+radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
+ radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
+
# add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
@@ -67,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
- r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
+ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5ce9bf51a8de..46a9c3772850 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1238,6 +1238,8 @@ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
{
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+ if (!ctx->iio)
+ return;
while (CU8(base) == ATOM_IIO_START) {
ctx->iio[CU8(base + 1)] = base + 2;
base += 2;
@@ -1287,6 +1289,10 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+ if (!ctx->iio) {
+ atom_destroy(ctx);
+ return NULL;
+ }
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
while (*str && ((*str == '\n') || (*str == '\r')))
@@ -1335,8 +1341,7 @@ int atom_asic_init(struct atom_context *ctx)
void atom_destroy(struct atom_context *ctx)
{
- if (ctx->iio)
- kfree(ctx->iio);
+ kfree(ctx->iio);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9175615bbd8a..21a892c6ab9c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
@@ -1844,6 +1840,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a2d478e8692a..3c38ea46531c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -403,6 +403,19 @@ void evergreen_pm_misc(struct radeon_device *rdev)
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
+
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk and vddci.
+ */
+ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+ (rdev->family >= CHIP_BARTS) &&
+ rdev->pm.active_crtc_count &&
+ ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+ (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+ voltage = &rdev->pm.power_state[req_ps_idx].
+ clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
return;
@@ -2308,32 +2321,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
-static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
-{
- u32 grbm_reset = 0;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
-
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
@@ -2342,6 +2331,8 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
+ RREG32(SRBM_STATUS2));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
@@ -2350,112 +2341,283 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+ if (rdev->family >= CHIP_CAYMAN) {
+ dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG + 0x800));
+ }
+}
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+bool evergreen_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VC |
- SOFT_RESET_VGT);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
+ crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
}
-static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
+static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
+ u32 reset_mask = 0;
u32 tmp;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ SPI_BUSY | VGT_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- evergreen_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset |= SOFT_RESET_DB |
+ SOFT_RESET_CB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_SH |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP |
+ SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- evergreen_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
- return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ evergreen_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* Interrupts */
@@ -3280,14 +3442,14 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
radeon_ring_write(ring, fence->seq);
/* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
/* flush HDP */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
}
@@ -3310,7 +3472,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
while ((next_rptr & 7) != 5)
next_rptr++;
next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
radeon_ring_write(ring, next_rptr);
@@ -3320,8 +3482,8 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
* Pad as necessary with NOPs.
*/
while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
@@ -3380,7 +3542,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
if (cur_size_in_dw > 0xFFFFF)
cur_size_in_dw = 0xFFFFF;
size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
@@ -3488,7 +3650,7 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index ee4cff534f10..99fb13286fd0 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -36,9 +36,6 @@
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -1009,223 +1006,35 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
- * @p: structure holding the parser context.
- *
- * Check if the next packet is a relocation packet3.
- **/
-static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
-{
- struct radeon_cs_packet p3reloc;
- int r;
-
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return false;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return false;
- }
- return true;
-}
-
-/**
- * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
- * @parser: parser structure holding parsing context.
- *
- * Userspace sends a special sequence for VLINE waits.
- * PACKET0 - VLINE_START_END + value
- * PACKET3 - WAIT_REG_MEM poll vline status reg
- * RELOC (P3) - crtc_id in reloc.
- *
- * This function parses this and relocates the VLINE START END
- * and WAIT_REG_MEM packets to the correct crtc.
- * It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * This is an Evergreen(+)-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
*/
static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct radeon_crtc *radeon_crtc;
- struct radeon_cs_packet p3reloc, wait_reg_mem;
- int crtc_id;
- int r;
- uint32_t header, h_idx, reg, wait_reg_mem_info;
- volatile uint32_t *ib;
-
- ib = p->ib.ptr;
-
- /* parse the WAIT_REG_MEM */
- r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
- if (r)
- return r;
-
- /* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
- wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
- DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- return -EINVAL;
- }
-
- wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
- /* bit 4 is reg (0) or mem (1) */
- if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- return -EINVAL;
- }
- /* waiting for value to be equal */
- if ((wait_reg_mem_info & 0x7) != 0x3) {
- DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- return -EINVAL;
- }
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
- DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- return -EINVAL;
- }
-
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
- DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- return -EINVAL;
- }
-
- /* jump over the NOP */
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
- if (r)
- return r;
-
- h_idx = p->idx - 2;
- p->idx += wait_reg_mem.count + 2;
- p->idx += p3reloc.count + 2;
- header = radeon_get_ib_value(p, h_idx);
- crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
- }
- crtc = obj_to_crtc(obj);
- radeon_crtc = to_radeon_crtc(crtc);
- crtc_id = radeon_crtc->crtc_id;
-
- if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
- ib[h_idx + 2] = PACKET2(0);
- ib[h_idx + 3] = PACKET2(0);
- ib[h_idx + 4] = PACKET2(0);
- ib[h_idx + 5] = PACKET2(0);
- ib[h_idx + 6] = PACKET2(0);
- ib[h_idx + 7] = PACKET2(0);
- ib[h_idx + 8] = PACKET2(0);
- } else {
- switch (reg) {
- case EVERGREEN_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
- ib[h_idx] = header;
- ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
- }
- return 0;
+ static uint32_t vline_start_end[6] = {
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+ static uint32_t vline_status[6] = {
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -1347,7 +1156,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_LSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1376,7 +1185,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1418,7 +1227,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1430,7 +1239,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1442,7 +1251,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1454,7 +1263,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1477,7 +1286,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1499,7 +1308,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1563,7 +1372,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1581,7 +1390,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1642,7 +1451,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_ATTRIB:
case CB_COLOR6_ATTRIB:
case CB_COLOR7_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1670,7 +1479,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1703,7 +1512,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_FMASK:
case CB_COLOR7_FMASK:
tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1720,7 +1529,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_CMASK:
case CB_COLOR7_CMASK:
tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1758,7 +1567,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1774,7 +1583,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_BASE:
case CB_COLOR10_BASE:
case CB_COLOR11_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1787,7 +1596,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1905,7 +1714,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_13:
case SQ_ALU_CONST_CACHE_LS_14:
case SQ_ALU_CONST_CACHE_LS_15:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1919,7 +1728,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1933,7 +1742,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -2018,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -2064,7 +1873,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
@@ -2091,7 +1900,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -2119,7 +1928,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
@@ -2210,7 +2019,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
@@ -2231,7 +2040,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -2243,6 +2052,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -2282,7 +2094,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -2320,7 +2132,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -2354,7 +2166,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -2370,7 +2182,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2391,7 +2203,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -2413,7 +2225,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
@@ -2480,7 +2292,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2511,13 +2323,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
!mip_address &&
- !evergreen_cs_packet_next_is_pkt3_nop(p)) {
+ !radeon_cs_packet_next_is_pkt3_nop(p)) {
/* MIP_ADDRESS should point to FMASK for an MSAA texture.
* It should be 0 if FMASK is disabled. */
moffset = 0;
mipmap = NULL;
} else {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2536,7 +2348,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (vtx)\n");
return -EINVAL;
@@ -2618,7 +2430,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2637,7 +2449,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2662,7 +2474,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2691,7 +2503,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2715,7 +2527,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2819,7 +2631,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2827,12 +2639,12 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = evergreen_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = evergreen_packet3_check(p, &pkt);
break;
default:
@@ -2858,16 +2670,6 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-/*
- * DMA
- */
-
-#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
-#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
-#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
-#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
-#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
-
/**
* evergreen_dma_cs_parse() - parse the DMA IB
* @p: parser structure holding parsing context.
@@ -2881,9 +2683,9 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
- u32 idx, idx_value;
+ u32 idx;
u64 src_offset, dst_offset, dst2_offset;
int r;
@@ -2897,9 +2699,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, idx);
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
@@ -2908,19 +2708,27 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
- if (tiled) {
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 7;
- } else {
+ break;
+ /* linear */
+ case 0:
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
p->idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
@@ -2939,338 +2747,330 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_COPY\n");
return -EINVAL;
}
- if (tiled) {
- idx_value = radeon_get_ib_value(p, idx + 2);
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- p->idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx+7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- case 5:
- /* T2T partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- p->idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ /* L2L, dw */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx + 7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- switch (misc) {
- case 0:
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx+7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
- break;
- case 1:
- /* L2L, partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-
- p->idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
- src_offset = radeon_get_ib_value(p, idx+3);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ /* L2L, byte */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ /* L2T, T2L */
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- /* L2L, dw */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
@@ -3583,19 +3383,19 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
idx += pkt.count + 2;
break;
@@ -3623,88 +3423,79 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
u32 idx = 0;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
do {
header = ib->ptr[idx];
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
- if (tiled)
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
idx += count + 7;
- else
+ break;
+ /* linear */
+ case 0:
idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ return -EINVAL;
+ }
break;
case DMA_PACKET_COPY:
- if (tiled) {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- idx += 9;
- break;
- case 5:
- /* T2T partial */
- idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- switch (misc) {
- case 0:
- idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- }
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- idx += 5;
- break;
- case 1:
- /* L2L, partial */
- idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- /* L2L, dw */
- idx += 5;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 327c08b54180..4fdecc2b4040 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -24,6 +24,7 @@
* Authors: Christian König
* Rafał Miłecki
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -54,79 +55,18 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
}
/*
- * calculate the crc for a given info frame
- */
-static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void evergreen_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- uint8_t color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -154,7 +94,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -200,9 +143,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
- evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+ evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
evergreen_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 034f4c22e5db..f585be16e2d5 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -223,6 +223,7 @@
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0bfd0e9e469b..982d25ad9af3 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -729,6 +729,18 @@
#define WAIT_UNTIL 0x8040
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
#define SRBM_SOFT_RESET 0x0E60
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
#define SOFT_RESET_BIF (1 << 1)
@@ -924,20 +936,23 @@
#define CAYMAN_DMA1_CNTL 0xd82c
/* async DMA packets */
-#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
- (((t) & 0x1) << 23) | \
- (((s) & 0x1) << 22) | \
- (((n) & 0xFFFFF) << 0))
+#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
+ (((sub_cmd) & 0xFF) << 20) |\
+ (((n) & 0xFFFFF) << 0))
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
+
/* async DMA Packet types */
-#define DMA_PACKET_WRITE 0x2
-#define DMA_PACKET_COPY 0x3
-#define DMA_PACKET_INDIRECT_BUFFER 0x4
-#define DMA_PACKET_SEMAPHORE 0x5
-#define DMA_PACKET_FENCE 0x6
-#define DMA_PACKET_TRAP 0x7
-#define DMA_PACKET_SRBM_WRITE 0x9
-#define DMA_PACKET_CONSTANT_FILL 0xd
-#define DMA_PACKET_NOP 0xf
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
@@ -980,16 +995,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -998,7 +1004,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 835992d8d067..7cead763be9e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -34,6 +34,8 @@
#include "ni_reg.h"
#include "cayman_blit_shaders.h"
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1310,120 +1312,90 @@ void cayman_dma_fini(struct radeon_device *rdev)
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
}
-static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
+static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
{
- u32 grbm_reset = 0;
+ u32 reset_mask = 0;
+ u32 tmp;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
-
- dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
-}
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
-static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
-{
- u32 tmp;
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1433,29 +1405,158 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- cayman_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- cayman_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int cayman_asic_reset(struct radeon_device *rdev)
{
- return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ cayman_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/**
@@ -1464,18 +1565,20 @@ int cayman_asic_reset(struct radeon_device *rdev)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (cayman-SI).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ mask = RADEON_RESET_DMA;
else
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
- if (dma_status_reg & DMA_IDLE) {
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1843,19 +1946,21 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (cayman/TN).
*/
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -1866,9 +1971,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFF)
ndw = 0x3FFF;
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1880,8 +1985,8 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -1891,9 +1996,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1905,10 +2010,12 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 48e5022ee921..079dee202a9e 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -49,6 +49,16 @@
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -68,6 +78,10 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -474,16 +488,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -492,7 +497,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8ff7cac222dc..9db58530be37 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1215,11 +1215,11 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_reloc *reloc;
u32 value;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1233,7 +1233,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
@@ -1263,16 +1263,16 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c > 16) {
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1281,11 +1281,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
@@ -1294,11 +1294,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1355,67 +1355,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt)
-{
- volatile uint32_t *ib;
- unsigned i;
- unsigned idx;
-
- ib = p->ib.ptr;
- idx = pkt->idx;
- for (i = 0; i <= (pkt->count + 1); i++, idx++) {
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
- }
-}
-
-/**
- * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
/**
* r100_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
@@ -1444,7 +1383,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the wait until */
- r = r100_cs_packet_parse(p, &waitreloc, p->idx);
+ r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
if (r)
return r;
@@ -1461,7 +1400,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
}
/* jump over the NOP */
- r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
if (r)
return r;
@@ -1471,7 +1410,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R100_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1506,54 +1445,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
return 0;
}
-/**
- * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r100_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
static int r100_get_vtx_size(uint32_t vtx_fmt)
{
int vtx_size;
@@ -1631,7 +1522,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -1644,11 +1535,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -1657,11 +1548,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -1673,11 +1564,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXOFFSET_1:
case RADEON_PP_TXOFFSET_2:
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1700,11 +1591,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T0_3:
case RADEON_PP_CUBIC_OFFSET_T0_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[0].cube_info[i].offset = idx_value;
@@ -1718,11 +1609,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T1_3:
case RADEON_PP_CUBIC_OFFSET_T1_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[1].cube_info[i].offset = idx_value;
@@ -1736,11 +1627,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T2_3:
case RADEON_PP_CUBIC_OFFSET_T2_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[2].cube_info[i].offset = idx_value;
@@ -1754,11 +1645,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1825,11 +1716,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1986,10 +1877,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
@@ -2000,10 +1891,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case 0x23:
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
@@ -2100,37 +1991,36 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
- if (p->rdev->family >= CHIP_R200)
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r200_packet0_check);
- else
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r100_packet0_check);
- break;
- case PACKET_TYPE2:
- break;
- case PACKET_TYPE3:
- r = r100_packet3_check(p, &pkt);
- break;
- default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
- return -EINVAL;
+ case RADEON_PACKET_TYPE0:
+ if (p->rdev->family >= CHIP_R200)
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r200_packet0_check);
+ else
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r100_packet0_check);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = r100_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n",
+ pkt.type);
+ return -EINVAL;
}
- if (r) {
+ if (r)
return r;
- }
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 6a603b378adb..eb40888bdfcc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -81,10 +81,6 @@ struct r100_cs_track {
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index eab91760fae0..f0f8ee69f480 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -64,17 +64,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 98143a5c5b73..b3807edb1936 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -162,7 +162,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -175,11 +175,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -188,11 +188,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -207,11 +207,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXOFFSET_4:
case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -260,11 +260,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
@@ -278,11 +278,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -355,11 +355,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d0ba6023a1f8..c60350e6872d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -615,7 +615,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -630,11 +630,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_COLOROFFSET2:
case R300_RB3D_COLOROFFSET3:
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[i].robj = reloc->robj;
@@ -643,11 +643,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -672,11 +672,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_OFFSET_0+56:
case R300_TX_OFFSET_0+60:
i = (reg - R300_TX_OFFSET_0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -745,11 +745,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -830,11 +830,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4F24:
/* ZB_DEPTHPITCH */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1045,11 +1045,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1087,11 +1087,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb_dirty = true;
break;
case R300_RB3D_AARESOLVE_OFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->aa.robj = reloc->robj;
@@ -1156,10 +1156,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
@@ -1257,21 +1257,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r100_cs_parse_packet0(p, &pkt,
p->rdev->config.r300.reg_safe_bm,
p->rdev->config.r300.reg_safe_bm_size,
&r300_packet0_check);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r300_packet3_check(p, &pkt);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 002ab038d2ab..865e2c9980db 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -29,6 +29,8 @@
*
* Authors:
* Nicolai Haehnle <prefect_@gmx.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 1f519a5ffb8c..ff229a00d273 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -65,17 +65,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_000148_MC_FB_LOCATION 0x000148
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index ec576aaafb73..c0dc8d3ba0bb 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,7 @@
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
+#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index becb03e8b32f..6d4b5611daf4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -94,6 +94,12 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
@@ -103,6 +109,19 @@ void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+ return rdev->clock.spll.reference_freq;
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -1254,169 +1273,301 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
-/* We doesn't check that the GPU really needs a reset we simply do the
- * reset, it's up to the caller to determine if the GPU needs one. We
- * might add an helper function to check that.
- */
-static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
{
- u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
- S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
- S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
- S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
- S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
- S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
- S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
- S_008010_GUI_ACTIVE(1);
- u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
- S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
- S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
- S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
- S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
- S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
- S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
- S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 tmp;
+ u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
+ RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
+ RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
+ RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
+ RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
+ RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
+ RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
-
- /* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
- /* Check if any of the rendering block is busy and reset it */
- if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
- (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
- tmp = S_008020_SOFT_RESET_CR(1) |
- S_008020_SOFT_RESET_DB(1) |
- S_008020_SOFT_RESET_CB(1) |
- S_008020_SOFT_RESET_PA(1) |
- S_008020_SOFT_RESET_SC(1) |
- S_008020_SOFT_RESET_SMX(1) |
- S_008020_SOFT_RESET_SPI(1) |
- S_008020_SOFT_RESET_SX(1) |
- S_008020_SOFT_RESET_SH(1) |
- S_008020_SOFT_RESET_TC(1) |
- S_008020_SOFT_RESET_TA(1) |
- S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1);
- dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[2];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+ crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
}
- /* Reset CP (we always reset CP) */
- tmp = S_008020_SOFT_RESET_CP(1);
- dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+ return true;
}
-static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
{
+ u32 reset_mask = 0;
u32 tmp;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* GRBM_STATUS */
+ tmp = RREG32(R_008010_GRBM_STATUS);
+ if (rdev->family >= CHIP_RV770) {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ } else {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ }
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+ G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+ reset_mask |= RADEON_RESET_CP;
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ if (G_008010_GRBM_EE_BUSY(tmp))
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ /* SRBM_STATUS */
+ tmp = RREG32(R_000E50_SRBM_STATUS);
+ if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (G_000E50_IH_BUSY(tmp))
+ reset_mask |= RADEON_RESET_IH;
+
+ if (G_000E50_SEM_BUSY(tmp))
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (G_000E50_GRBM_RQ_PENDING(tmp))
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (G_000E50_VMC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+ G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+ G_000E50_MCDW_BUSY(tmp))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (r600_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ return reset_mask;
}
-static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct rv515_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ r600_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ mdelay(50);
+
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- r600_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ if (rdev->family >= CHIP_RV770)
+ grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ else
+ grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+ S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SMX(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
+ S_008020_SOFT_RESET_VGT(1);
+
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ if (rdev->family >= CHIP_RV770)
+ srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+ else
+ srbm_soft_reset |= SOFT_RESET_DMA;
+ }
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+ }
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
- if (reset_mask & RADEON_RESET_DMA)
- r600_gpu_soft_reset_dma(rdev);
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
mdelay(1);
rv515_mc_resume(rdev, &save);
+ udelay(50);
+
+ r600_print_gpu_status_regs(rdev);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ r600_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+/**
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status2;
-
- srbm_status = RREG32(R_000E50_SRBM_STATUS);
- grbm_status = RREG32(R_008010_GRBM_STATUS);
- grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
- if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1431,15 +1582,14 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
- dma_status_reg = RREG32(DMA_STATUS_REG);
- if (dma_status_reg & DMA_IDLE) {
+ if (!(reset_mask & RADEON_RESET_DMA)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1448,13 +1598,6 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-int r600_asic_reset(struct radeon_device *rdev)
-{
- return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -4318,14 +4461,14 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
}
/**
- * r600_get_gpu_clock - return GPU clock counter snapshot
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 77da1f9c0b8e..f651881eb0ae 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -22,6 +22,8 @@
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
@@ -488,37 +490,6 @@ set_default_state(drm_radeon_private_t *dev_priv)
ADVANCE_RING();
}
-/* 23 bits of float fractional data */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Will be exact from 0 to 2^24. Above that, we round towards zero
- * as the fractional bits will not fit in a float. (It would be better to
- * round towards even as the fpu does, but that is slower.)
- */
-__pure uint32_t int2float(uint32_t x)
-{
- uint32_t msb, exponent, fraction;
-
- /* Zero is special */
- if (!x) return 0;
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- /*
- * Use a rotate instead of a shift because that works both leftwards
- * and rightwards due to the mod(32) behaviour. This means we don't
- * need to check to see if we are above 2^24 or not.
- */
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return fraction + exponent;
-}
-
static int r600_nomm_get_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e082dca6feee..9fb5780a552f 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -31,6 +31,37 @@
#include "r600_blit_shaders.h"
#include "radeon_blit_common.h"
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index be85f75aedda..1c51c08b1fde 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -24,6 +24,8 @@
* Authors:
* Dave Airlie <airlied@redhat.com>
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9b2512bf1a46..01a3ec83f284 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -31,12 +31,7 @@
#include "r600d.h"
#include "r600_reg_safe.h"
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
-static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+static int r600_nomm;
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
@@ -784,170 +779,29 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int r600_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
+ * r600_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- *cs_reloc = p->relocs;
- (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
- (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
- * @parser: parser structure holding parsing context.
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+ * This is an R600-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct radeon_cs_packet p3reloc;
- int r;
+ static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
+ AVIVO_D2MODE_VLINE_START_END};
+ static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
+ AVIVO_D2MODE_VLINE_STATUS};
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return 0;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return 0;
- }
- return 1;
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
/**
- * r600_cs_packet_next_vline() - parse userspace VLINE packet
+ * r600_cs_common_vline_parse() - common vline parser
* @parser: parser structure holding parsing context.
+ * @vline_start_end: table of vline_start_end registers
+ * @vline_status: table of vline_status registers
*
* Userspace sends a special sequence for VLINE waits.
* PACKET0 - VLINE_START_END + value
@@ -957,9 +811,16 @@ static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
* This function parses this and relocates the VLINE START END
* and WAIT_REG_MEM packets to the correct crtc.
* It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * wait in that case. This function is common for all ASICs that
+ * are R600 and newer. The parsing algorithm is the same, and only
+ * differs in which registers are used.
+ *
+ * Caller is the ASIC-specific function which passes the parser
+ * context and ASIC-specific register table
*/
-static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status)
{
struct drm_mode_object *obj;
struct drm_crtc *crtc;
@@ -973,12 +834,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
- r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
if (r)
return r;
/* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
+ if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
@@ -987,7 +848,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ return -EINVAL;
+ }
+ /* bit 8 is me (0) or pfp (1) */
+ if (wait_reg_mem_info & 0x100) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
return -EINVAL;
}
/* waiting for value to be equal */
@@ -995,18 +861,18 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
/* jump over the NOP */
- r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
if (r)
return r;
@@ -1016,7 +882,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R600_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1028,7 +894,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
crtc_id = radeon_crtc->crtc_id;
if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
ib[h_idx + 2] = PACKET2(0);
ib[h_idx + 3] = PACKET2(0);
ib[h_idx + 4] = PACKET2(0);
@@ -1036,20 +902,15 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 6] = PACKET2(0);
ib[h_idx + 7] = PACKET2(0);
ib[h_idx + 8] = PACKET2(0);
- } else if (crtc_id == 1) {
- switch (reg) {
- case AVIVO_D1MODE_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= AVIVO_D2MODE_VLINE_START_END >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
+ } else if (reg == vline_start_end[0]) {
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= vline_start_end[crtc_id] >> 2;
ib[h_idx] = header;
- ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
+ ib[h_idx + 4] = vline_status[crtc_id] >> 2;
+ } else {
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
}
-
return 0;
}
@@ -1155,8 +1016,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028010_DB_DEPTH_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1198,7 +1059,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1221,7 +1082,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1256,8 +1117,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1320,7 +1181,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280F8_CB_COLOR6_FRAG:
case R_0280FC_CB_COLOR7_FRAG:
tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1329,7 +1190,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1351,7 +1212,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280D8_CB_COLOR6_TILE:
case R_0280DC_CB_COLOR7_TILE:
tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1360,7 +1221,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1395,7 +1256,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1410,7 +1271,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1423,7 +1284,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1493,7 +1354,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_13:
case SQ_ALU_CONST_CACHE_VS_14:
case SQ_ALU_CONST_CACHE_VS_15:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1502,7 +1363,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1788,7 +1649,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1829,7 +1690,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1881,7 +1742,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -1893,6 +1754,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -1915,7 +1779,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* src address space is memory */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -1945,7 +1809,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -1975,7 +1839,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -1991,7 +1855,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2012,7 +1876,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2078,7 +1942,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2092,7 +1956,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
texture = reloc->robj;
/* tex mip base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2113,7 +1977,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2214,7 +2078,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
@@ -2258,7 +2122,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2277,7 +2141,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2302,7 +2166,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2331,7 +2195,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2355,7 +2219,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2410,7 +2274,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = r600_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2418,12 +2282,12 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r600_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r600_packet3_check(p, &pkt);
break;
default:
@@ -2449,17 +2313,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
-{
- if (p->chunk_relocs_idx == -1) {
- return 0;
- }
- p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
- if (p->relocs == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
+#ifdef CONFIG_DRM_RADEON_UMS
/**
* cs_parser_fini() - clean parser states
@@ -2485,6 +2339,18 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->chunks_array);
}
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l)
{
@@ -2543,9 +2409,11 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
void r600_cs_legacy_init(void)
{
- r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
+ r600_nomm = 1;
}
+#endif
+
/*
* DMA
*/
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index ff80efe9cb7d..21ecc0e12dc4 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -23,6 +23,7 @@
*
* Authors: Christian König
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -121,79 +122,18 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
}
/*
- * calculate the crc for a given info frame
- */
-static void r600_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void r600_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- enum r600_hdmi_color_format color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -215,39 +155,15 @@ static void r600_hdmi_videoinfoframe(
/*
* build a Audio Info Frame
*/
-static void r600_hdmi_audioinfoframe(
- struct drm_encoder *encoder,
- uint8_t channel_count,
- uint8_t coding_type,
- uint8_t sample_size,
- uint8_t sample_frequency,
- uint8_t format,
- uint8_t channel_allocation,
- uint8_t level_shift,
- int downmix_inhibit
-)
+static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
+ const void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
-
- uint8_t frame[11];
-
- frame[0x0] = 0;
- frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
- frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
- frame[0x3] = format;
- frame[0x4] = channel_allocation;
- frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
- frame[0x6] = 0;
- frame[0x7] = 0;
- frame[0x8] = 0;
- frame[0x9] = 0;
- frame[0xA] = 0;
-
- r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+ const u8 *frame = buffer + 3;
WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -320,7 +236,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -371,9 +290,19 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
- r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
@@ -395,8 +324,11 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct r600_audio audio = r600_audio_status(rdev);
+ uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
+ struct hdmi_audio_infoframe frame;
uint32_t offset;
uint32_t iec;
+ ssize_t err;
if (!dig->afmt || !dig->afmt->enabled)
return;
@@ -462,9 +394,21 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
iec |= 0x5 << 16;
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
- 0);
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ DRM_ERROR("failed to setup audio infoframe\n");
+ return;
+ }
+
+ frame.channels = audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack audio infoframe\n");
+ return;
+ }
+ r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_audio_workaround(encoder);
}
@@ -544,7 +488,6 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* Called for ATOM_ENCODER_MODE_HDMI only */
if (!dig || !dig->afmt) {
- WARN_ON(1);
return;
}
if (!dig->afmt->enabled)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a53402b1852..a42ba11a3bed 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -182,6 +182,8 @@
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
#define R_0086D8_CP_ME_CNTL 0x86D8
+#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
+#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
#define CP_ME_RAM_DATA 0xC160
@@ -1143,19 +1145,10 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1328,6 +1321,7 @@
#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
+#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
@@ -1395,6 +1389,7 @@
#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
+#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
#define R_000E60_SRBM_SOFT_RESET 0x0E60
#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a08f657329a0..8263af3fd832 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -136,6 +136,15 @@ extern int radeon_lockup_timeout;
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
#define RADEON_RESET_DMA (1 << 2)
+#define RADEON_RESET_CP (1 << 3)
+#define RADEON_RESET_GRBM (1 << 4)
+#define RADEON_RESET_DMA1 (1 << 5)
+#define RADEON_RESET_RLC (1 << 6)
+#define RADEON_RESET_SEM (1 << 7)
+#define RADEON_RESET_IH (1 << 8)
+#define RADEON_RESET_VMC (1 << 9)
+#define RADEON_RESET_MC (1 << 10)
+#define RADEON_RESET_DISPLAY (1 << 11)
/*
* Errata workarounds.
@@ -341,7 +350,6 @@ struct radeon_bo {
struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
- int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -771,6 +779,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1169,6 +1178,10 @@ struct radeon_asic {
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
int (*mc_wait_for_idle)(struct radeon_device *rdev);
+ /* get the reference clock */
+ u32 (*get_xclk)(struct radeon_device *rdev);
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
@@ -1179,7 +1192,9 @@ struct radeon_asic {
void (*fini)(struct radeon_device *rdev);
u32 pt_ring_index;
- void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+ void (*set_page)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
} vm;
@@ -1757,6 +1772,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
/*
* BIOS helpers.
@@ -1801,7 +1817,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
@@ -1847,10 +1863,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
+#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1972,6 +1991,19 @@ static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm);
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status);
+
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 0b202c07fe50..aba0a893ea98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -934,6 +934,8 @@ static struct radeon_asic r600_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -946,7 +948,7 @@ static struct radeon_asic r600_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1018,6 +1020,8 @@ static struct radeon_asic rs780_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1030,7 +1034,7 @@ static struct radeon_asic rs780_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1102,6 +1106,8 @@ static struct radeon_asic rv770_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1114,7 +1120,7 @@ static struct radeon_asic rv770_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1186,6 +1192,8 @@ static struct radeon_asic evergreen_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1198,7 +1206,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1207,7 +1215,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1270,6 +1278,8 @@ static struct radeon_asic sumo_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1282,7 +1292,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1291,7 +1301,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1354,6 +1364,8 @@ static struct radeon_asic btc_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1366,7 +1378,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1375,7 +1387,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1438,6 +1450,8 @@ static struct radeon_asic cayman_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1445,7 +1459,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1457,7 +1471,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1468,7 +1482,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1479,7 +1493,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1565,6 +1579,8 @@ static struct radeon_asic trinity_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1572,7 +1588,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1584,7 +1600,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1595,7 +1611,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1606,7 +1622,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1692,6 +1708,8 @@ static struct radeon_asic si_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &si_get_xclk,
+ .get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1699,7 +1717,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
@@ -1711,7 +1729,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1722,7 +1740,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1733,7 +1751,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1744,7 +1762,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
},
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
@@ -1755,7 +1773,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
}
},
@@ -1944,9 +1962,13 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
+ case CHIP_OLAND:
rdev->asic = &si_asic;
/* set num crtcs */
- rdev->num_crtc = 6;
+ if (rdev->family == CHIP_OLAND)
+ rdev->num_crtc = 2;
+ else
+ rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 15d70e613076..3535f73ad3e2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -319,7 +319,7 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
bool emit_wait);
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
@@ -389,7 +389,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
+u32 r600_get_xclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -407,6 +408,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+u32 rv770_get_xclk(struct radeon_device *rdev);
/*
* evergreen
@@ -422,7 +424,8 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -473,13 +476,16 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -496,23 +502,27 @@ int si_init(struct radeon_device *rdev);
void si_fini(struct radeon_device *rdev);
int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int si_asic_reset(struct radeon_device *rdev);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-uint64_t si_get_gpu_clock(struct radeon_device *rdev);
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 si_get_xclk(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 15f5ded65e0c..d96070bf8388 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -43,6 +43,12 @@ struct atpx_verify_interface {
u32 function_bits; /* supported functions bit vector */
} __packed;
+struct atpx_px_params {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 valid_flags; /* which flags are valid */
+ u32 flags; /* flags */
+} __packed;
+
struct atpx_power_control {
u16 size;
u8 dgpu_state;
@@ -123,9 +129,61 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
}
/**
+ * radeon_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_validate(struct radeon_atpx *atpx)
+{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ atpx->functions.power_cntl = true;
+
+ if (atpx->functions.px_params) {
+ union acpi_object *info;
+ struct atpx_px_params output;
+ size_t size;
+ u32 valid_bits;
+
+ info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ printk("ATPX buffer is too small: %zu\n", size);
+ kfree(info);
+ return -EINVAL;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ valid_bits = output.flags & output.valid_flags;
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
* radeon_atpx_verify_interface - verify ATPX
*
- * @handle: acpi handle
* @atpx: radeon atpx struct
*
* Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
@@ -406,8 +464,19 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
*/
static int radeon_atpx_init(void)
{
+ int r;
+
/* set up the ATPX handle */
- return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ /* validate the atpx setup */
+ r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 9143fc45e35b..efc4f6441ef4 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -27,6 +27,8 @@
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5407459e56d2..70d38241b083 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -29,9 +29,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
-
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
@@ -128,18 +125,6 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static void radeon_cs_sync_to(struct radeon_cs_parser *p,
- struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = p->ib.sync_to[fence->ring];
- p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
int i;
@@ -148,7 +133,7 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
+ radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -203,7 +188,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
-
+ p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
@@ -226,9 +211,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
-
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
(p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
@@ -478,8 +460,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_cs_sync_to(parser, vm->fence);
- radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
+ radeon_ib_sync_to(&parser->ib, vm->fence);
+ radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
+ rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -648,3 +631,152 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_device *rdev = p->rdev;
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+ pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case RADEON_PACKET_TYPE0:
+ if (rdev->family < CHIP_R600) {
+ pkt->reg = R100_CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr =
+ RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+ } else
+ pkt->reg = R600_CP_PACKET0_GET_REG(header);
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+ break;
+ case RADEON_PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return false;
+ if (p3reloc.type != RADEON_PACKET_TYPE3)
+ return false;
+ if (p3reloc.opcode != RADEON_PACKET3_NOP)
+ return false;
+ return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p: structure holding the parser context.
+ * @pkt: structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++)
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return r;
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+ p3reloc.opcode != RADEON_PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ if (nomm) {
+ *cs_reloc = p->relocs;
+ (*cs_reloc)->lobj.gpu_offset =
+ (u64)relocs_chunk->kdata[idx + 3] << 32;
+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ } else
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0d67674b64b1..b097d5b4ff39 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -246,8 +246,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int i = 0;
struct drm_crtc *crtc_p;
- /* avivo cursor image can't end on 128 pixel boundary or
+ /*
+ * avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
+ *
+ * NOTE: It is safe to access crtc->enabled of other crtcs
+ * without holding either the mode_config lock or the other
+ * crtc's lock as long as write access to this flag _always_
+ * grabs all locks.
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
if (crtc_p->enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d6562bb0c93..44b8034a400d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -93,6 +93,7 @@ static const char radeon_family_name[][16] = {
"TAHITI",
"PITCAIRN",
"VERDE",
+ "OLAND",
"LAST",
};
@@ -758,6 +759,11 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ if (!rdev->mode_info.atom_context) {
+ radeon_atombios_fini(rdev);
+ return -ENOMEM;
+ }
+
mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
@@ -777,9 +783,11 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
- kfree(rdev->mode_info.atom_context);
}
+ kfree(rdev->mode_info.atom_context);
+ rdev->mode_info.atom_context = NULL;
kfree(rdev->mode_info.atom_card_info);
+ rdev->mode_info.atom_card_info = NULL;
}
/* COMBIOS */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 05c96fa0b051..e38fd559f1ab 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1089,12 +1089,12 @@ radeon_framebuffer_init(struct drm_device *dev,
{
int ret;
rfb->obj = obj;
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
return ret;
}
- drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d9bf96ee299a..167758488ed6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -118,20 +118,32 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
#endif
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
int radeon_no_wb;
-int radeon_modeset = -1;
+int radeon_modeset = 1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
@@ -199,6 +211,14 @@ module_param_named(msi, radeon_msi, int, 0444);
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+static struct pci_device_id pciidlist[] = {
+ radeon_PCI_IDS
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+#ifdef CONFIG_DRM_RADEON_UMS
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -227,14 +247,6 @@ static int radeon_resume(struct drm_device *dev)
return 0;
}
-static struct pci_device_id pciidlist[] = {
- radeon_PCI_IDS
-};
-
-#if defined(CONFIG_DRM_RADEON_KMS)
-MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
-
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -284,6 +296,8 @@ static struct drm_driver driver_old = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+#endif
+
static struct drm_driver kms_driver;
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
@@ -397,8 +411,13 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = radeon_gem_prime_export,
- .gem_prime_import = radeon_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = radeon_gem_prime_pin,
+ .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+ .gem_prime_vmap = radeon_gem_prime_vmap,
+ .gem_prime_vunmap = radeon_gem_prime_vunmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -411,10 +430,12 @@ static struct drm_driver kms_driver = {
static struct drm_driver *driver;
static struct pci_driver *pdriver;
+#ifdef CONFIG_DRM_RADEON_UMS
static struct pci_driver radeon_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
+#endif
static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
@@ -427,28 +448,6 @@ static struct pci_driver radeon_kms_pci_driver = {
static int __init radeon_init(void)
{
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->num_ioctls = radeon_max_ioctl;
-#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && radeon_modeset == -1) {
- DRM_INFO("VGACON disable radeon kernel modesetting.\n");
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->driver_features &= ~DRIVER_MODESET;
- radeon_modeset = 0;
- }
-#endif
- /* if enabled by default */
- if (radeon_modeset == -1) {
-#ifdef CONFIG_DRM_RADEON_KMS
- DRM_INFO("radeon defaulting to kernel modesetting.\n");
- radeon_modeset = 1;
-#else
- DRM_INFO("radeon defaulting to userspace modesetting.\n");
- radeon_modeset = 0;
-#endif
- }
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
@@ -456,9 +455,21 @@ static int __init radeon_init(void)
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
+
+ } else {
+#ifdef CONFIG_DRM_RADEON_UMS
+ DRM_INFO("radeon userspace modesetting enabled.\n");
+ driver = &driver_old;
+ pdriver = &radeon_pci_driver;
+ driver->driver_features &= ~DRIVER_MODESET;
+ driver->num_ioctls = radeon_max_ioctl;
+#else
+ DRM_ERROR("No UMS support in radeon module!\n");
+ return -EINVAL;
+#endif
}
- /* if the vga console setting is enabled still
- * let modprobe override it */
+
+ /* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e7fdf163a8ca..b369d42f7de5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
+/* The rest of the file is DEPRECATED! */
+#ifdef CONFIG_DRM_RADEON_UMS
+
enum radeon_cp_microcode_version {
UCODE_R100,
UCODE_R200,
@@ -418,8 +421,6 @@ extern int radeon_driver_open(struct drm_device *dev,
struct drm_file *file_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -462,15 +463,6 @@ extern void r600_blit_swap(struct drm_device *dev,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp);
-/* atpx handler */
-#if defined(CONFIG_VGA_SWITCHEROO)
-void radeon_register_atpx_handler(void);
-void radeon_unregister_atpx_handler(void);
-#else
-static inline void radeon_register_atpx_handler(void) {}
-static inline void radeon_unregister_atpx_handler(void) {}
-#endif
-
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -2167,4 +2159,6 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
} while (0)
+#endif /* CONFIG_DRM_RADEON_UMS */
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index d1fafeabea09..2d91123f2759 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -91,6 +91,7 @@ enum radeon_family {
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
+ CHIP_OLAND,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc8489d8c6d1..b1746741bc59 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -187,9 +187,10 @@ out_unref:
return ret;
}
-static int radeonfb_create(struct radeon_fbdev *rfbdev,
+static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
@@ -293,28 +294,13 @@ out_unref:
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}
-static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = radeonfb_create(rfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
@@ -339,6 +325,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0;
@@ -347,7 +334,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
- .fb_probe = radeon_fb_find_or_create_single,
+ .fb_probe = radeonfb_create,
};
int radeon_fbdev_init(struct radeon_device *rdev)
@@ -377,6 +364,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
}
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(rdev->ddev);
+
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 6e24f84755b5..2c1341f63dc5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -929,6 +929,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
@@ -971,7 +972,7 @@ retry:
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde,
+ radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
@@ -985,7 +986,7 @@ retry:
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
}
@@ -1009,6 +1010,7 @@ retry:
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
@@ -1038,7 +1040,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
if ((last_pte + 8 * count) != pte) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
@@ -1056,7 +1058,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
@@ -1080,8 +1083,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
- struct radeon_ring *ring = &rdev->ring[ridx];
- struct radeon_semaphore *sem = NULL;
+ struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t addr;
@@ -1124,25 +1126,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false;
}
- if (vm->fence && radeon_fence_signaled(vm->fence)) {
- radeon_fence_unref(&vm->fence);
- }
-
- if (vm->fence && vm->fence->ring != ridx) {
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- return r;
- }
- }
-
nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
- /* estimate number of dw needed */
- /* semaphore, fence and padding */
- ndw = 32;
+ /* padding, etc. */
+ ndw = 64;
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
@@ -1161,33 +1151,31 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
/* reserve space for pde addresses */
ndw += npdes * 2;
- r = radeon_ring_lock(rdev, ring, ndw);
- if (r) {
- return r;
- }
+ /* update too big for an IB */
+ if (ndw > 0xfffff)
+ return -ENOMEM;
- if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
- radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
- radeon_fence_note_sync(vm->fence, ridx);
- }
+ r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+ ib.length_dw = 0;
- r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
+ r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
- radeon_fence_unref(&vm->fence);
- r = radeon_fence_emit(rdev, &vm->fence, ridx);
+ radeon_ib_sync_to(&ib, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, vm->fence);
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
radeon_fence_unref(&vm->last_flush);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index e7710339a6a7..8d68e972789a 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -28,6 +28,8 @@
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Michel D�zer <michel@daenzer.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 9c312f9afb68..c75cb2c6ba71 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -185,11 +185,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (info->request == RADEON_INFO_TIMESTAMP) {
if (rdev->family >= CHIP_R600) {
value_ptr64 = (uint64_t*)((unsigned long)info->value);
- if (rdev->family >= CHIP_TAHITI) {
- value64 = si_get_gpu_clock(rdev);
- } else {
- value64 = r600_get_gpu_clock(rdev);
- }
+ value64 = radeon_get_gpu_clock_counter(rdev);
if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
@@ -282,7 +278,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
/* return clock value in KHz */
- value = rdev->clock.spll.reference_freq * 10;
+ if (rdev->asic->get_xclk)
+ value = radeon_get_xclk(rdev) * 10;
+ else
+ value = rdev->clock.spll.reference_freq * 10;
break;
case RADEON_INFO_NUM_BACKENDS:
if (rdev->family >= CHIP_TAHITI)
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index b9f067241633..d54d2d7c9031 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -27,6 +27,8 @@
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0bfa656aa87d..338fd6a74e87 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -169,7 +169,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
/* starting with BTC, there is one state that is used for both
* MH and SH. Difference is that we always use the high clock index for
- * mclk.
+ * mclk and vddci.
*/
if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
(rdev->family >= CHIP_BARTS) &&
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 26c23bb651c6..4940af7e75e6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -28,199 +28,71 @@
#include "radeon.h"
#include <drm/radeon_drm.h>
-#include <linux/dma-buf.h>
-
-static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct radeon_bo *bo = attachment->dmabuf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;
- struct sg_table *sg;
- int nents;
-
- mutex_lock(&dev->struct_mutex);
- sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- mutex_unlock(&dev->struct_mutex);
- return sg;
-}
-
-static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
- sg_free_table(sg);
- kfree(sg);
-}
-
-static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
-
- if (bo->gem_base.export_dma_buf == dma_buf) {
- DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
- bo->gem_base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&bo->gem_base);
- }
-}
-
-static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
-static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
-static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
{
- return -EINVAL;
-}
-
-static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret;
- mutex_lock(&dev->struct_mutex);
- if (bo->vmapping_count) {
- bo->vmapping_count++;
- goto out_unlock;
- }
-
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return ERR_PTR(ret);
- }
- bo->vmapping_count = 1;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
+
return bo->dma_buf_vmap.virtual;
}
-static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
- mutex_lock(&dev->struct_mutex);
- bo->vmapping_count--;
- if (bo->vmapping_count == 0) {
- ttm_bo_kunmap(&bo->dma_buf_vmap);
- }
- mutex_unlock(&dev->struct_mutex);
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
}
-const static struct dma_buf_ops radeon_dmabuf_ops = {
- .map_dma_buf = radeon_gem_map_dma_buf,
- .unmap_dma_buf = radeon_gem_unmap_dma_buf,
- .release = radeon_gem_dmabuf_release,
- .kmap = radeon_gem_kmap,
- .kmap_atomic = radeon_gem_kmap_atomic,
- .kunmap = radeon_gem_kunmap,
- .kunmap_atomic = radeon_gem_kunmap_atomic,
- .mmap = radeon_gem_prime_mmap,
- .vmap = radeon_gem_prime_vmap,
- .vunmap = radeon_gem_prime_vunmap,
-};
-
-static int radeon_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct radeon_bo **pbo)
+
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, pbo);
+ RADEON_GEM_DOMAIN_GTT, sg, &bo);
if (ret)
- return ret;
- bo = *pbo;
+ return ERR_PTR(ret);
bo->gem_base.driver_private = bo;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
- return 0;
+ return &bo->gem_base;
}
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
ret = radeon_bo_reserve(bo, false);
if (unlikely(ret != 0))
- return ERR_PTR(ret);
+ return ret;
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (ret) {
radeon_bo_unreserve(bo);
- return ERR_PTR(ret);
+ return ret;
}
radeon_bo_unreserve(bo);
- return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
-}
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct radeon_bo *bo;
- int ret;
-
- if (dma_buf->ops == &radeon_dmabuf_ops) {
- bo = dma_buf->priv;
- if (bo->gem_base.dev == dev) {
- drm_gem_object_reference(&bo->gem_base);
- dma_buf_put(dma_buf);
- return &bo->gem_base;
- }
- }
-
- /* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_CAST(attach);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
- if (ret)
- goto fail_unmap;
-
- bo->gem_base.import_attach = attach;
-
- return &bo->gem_base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- return ERR_PTR(ret);
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5d8f735d6aaf..7e2c2b7cf188 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3706,4 +3706,19 @@
#define RV530_GB_PIPE_SELECT2 0x4124
+#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define RADEON_PACKET_TYPE0 0
+#define RADEON_PACKET_TYPE1 1
+#define RADEON_PACKET_TYPE2 2
+#define RADEON_PACKET_TYPE3 3
+
+#define RADEON_PACKET3_NOP 0x10
+
+#define RADEON_VLINE_STAT (1 << 12)
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cd72062d5a91..8d58e268ff6d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -109,6 +109,25 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
+ * radeon_ib_sync_to - sync to fence before executing the IB
+ *
+ * @ib: IB object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence before executing the IB
+ */
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = ib->sync_to[fence->ring];
+ ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 8e9057b6a365..4d20910899d4 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -25,6 +25,8 @@
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index 590309a710b1..6927a200daf4 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -205,17 +205,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 1b2444f4d8f4..d63fe1d0f53f 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -43,6 +43,31 @@ static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * rv770_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r7xx-cayman).
+ */
+u32 rv770_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp = RREG32(CG_CLKPIN_CNTL);
+
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 20e29d23d348..c55f950a4af7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -128,6 +128,10 @@
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
+#define CG_CLKPIN_CNTL 0x660
+# define MUX_TCLK_TO_XCLK (1 << 8)
+# define XTALIN_DIVIDE (1 << 9)
+
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x3FF0000
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ae8b48205a6c..80979ed951eb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -38,6 +38,7 @@
#define SI_CE_UCODE_SIZE 2144
#define SI_RLC_UCODE_SIZE 2048
#define SI_MC_UCODE_SIZE 7769
+#define OLAND_MC_UCODE_SIZE 7863
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
@@ -54,6 +55,11 @@ MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+MODULE_FIRMWARE("radeon/OLAND_me.bin");
+MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -61,6 +67,35 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * si_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (SI).
+ */
+u32 si_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp;
+
+ tmp = RREG32(CG_CLKPIN_CNTL_2);
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ tmp = RREG32(CG_CLKPIN_CNTL);
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
/* get temperature in millidegrees */
int si_get_temp(struct radeon_device *rdev)
@@ -200,6 +235,45 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00a37400}
};
+static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a17730}
+};
+
/* ucode loading */
static int si_mc_load_microcode(struct radeon_device *rdev)
{
@@ -228,6 +302,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -322,6 +401,15 @@ static int si_init_microcode(struct radeon_device *rdev)
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
break;
+ case CHIP_OLAND:
+ chip_name = "OLAND";
+ rlc_chip_name = "OLAND";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ break;
default: BUG();
}
@@ -1125,7 +1213,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
}
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
- } else if (rdev->family == CHIP_VERDE) {
+ } else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
@@ -1570,6 +1659,23 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_OLAND:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 6;
+ rdev->config.si.max_sh_per_se = 1;
+ rdev->config.si.max_backends_per_se = 2;
+ rdev->config.si.max_texture_channel_caches = 4;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 16;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
}
/* Initialize HDP */
@@ -2106,154 +2212,275 @@ static int si_cp_resume(struct radeon_device *rdev)
return 0;
}
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status, grbm_status2;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status2 = RREG32(GRBM_STATUS2);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
+ u32 reset_mask = 0;
+ u32 tmp;
-static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
-{
- u32 grbm_reset = 0;
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ BCI_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+ /* GRBM_STATUS2 */
+ tmp = RREG32(GRBM_STATUS2);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_BCI |
- SOFT_RESET_SPI |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
-
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
-}
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
-static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
-{
- u32 tmp;
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- return;
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
- dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
- dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
-
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
if (reset_mask == 0)
- return 0;
+ return;
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
- if (radeon_mc_wait_for_idle(rdev)) {
+ if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- si_gpu_soft_reset_gfx(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_BCI |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
if (reset_mask & RADEON_RESET_DMA)
- si_gpu_soft_reset_dma(rdev);
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ grbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int si_asic_reset(struct radeon_device *rdev)
{
- return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
+ u32 reset_mask;
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ si_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * si_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* MC */
@@ -2855,19 +3082,19 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
@@ -2920,19 +3147,21 @@ void si_vm_fini(struct radeon_device *rdev)
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (SI).
*/
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -2943,11 +3172,11 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFE)
ndw = 0x3FFE;
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
+ ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1));
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2959,8 +3188,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -2972,9 +3201,9 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2986,8 +3215,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -3001,20 +3230,22 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
else
value = 0;
/* for physically contiguous pages (vram) */
- radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
- radeon_ring_write(ring, pe); /* dst addr */
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- radeon_ring_write(ring, r600_flags); /* mask */
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, value); /* value */
- radeon_ring_write(ring, upper_32_bits(value));
- radeon_ring_write(ring, incr); /* increment size */
- radeon_ring_write(ring, 0);
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
pe += ndw * 4;
addr += (ndw / 2) * incr;
count -= ndw / 2;
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
}
}
@@ -4378,14 +4609,14 @@ void si_fini(struct radeon_device *rdev)
}
/**
- * si_get_gpu_clock - return GPU clock counter snapshot
+ * si_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c056aae814f0..23fc08fc8e7f 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -58,9 +58,22 @@
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
+#define CG_CLKPIN_CNTL 0x660
+# define XTALIN_DIVIDE (1 << 1)
+#define CG_CLKPIN_CNTL_2 0x664
+# define MUX_TCLK_TO_XCLK (1 << 8)
+
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_STATUS 0xE50
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -81,6 +94,10 @@
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
@@ -783,16 +800,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -801,7 +809,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index d1d5306ebf24..f6e0b5395051 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -313,9 +313,9 @@ static int shmob_drm_pm_resume(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
- mutex_lock(&sdev->ddev->mode_config.mutex);
+ drm_modeset_lock_all(sdev->ddev);
shmob_drm_crtc_resume(&sdev->crtc);
- mutex_unlock(&sdev->ddev->mode_config.mutex);
+ drm_modeset_unlock_all(sdev->ddev);
drm_kms_helper_poll_enable(sdev->ddev);
return 0;
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 841065b998a1..5a5325e6b759 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -58,7 +58,6 @@ static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- idr_remove_all(&dev_priv->object_idr);
idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 2b2f78c428af..9a43d98e5003 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -128,17 +128,10 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
if (retval)
goto fail_alloc;
-again:
- if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
- retval = -ENOMEM;
- goto fail_idr;
- }
-
- retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
- if (retval == -EAGAIN)
- goto again;
- if (retval)
+ retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+ if (retval < 0)
goto fail_idr;
+ user_key = retval;
list_add(&item->owner_list, &file_priv->obj_list);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index be1daf7344d3..c92955df0658 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,6 +4,7 @@ config DRM_TEGRA
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select DRM_HDMI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b6679b36700f..de94707b9dbe 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -17,26 +17,257 @@
#include "drm.h"
#include "dc.h"
-struct tegra_dc_window {
- fixed20_12 x;
- fixed20_12 y;
- fixed20_12 w;
- fixed20_12 h;
- unsigned int outx;
- unsigned int outy;
- unsigned int outw;
- unsigned int outh;
- unsigned int stride;
- unsigned int fmt;
+struct tegra_plane {
+ struct drm_plane base;
+ unsigned int index;
};
+static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct tegra_plane, base);
+}
+
+static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_dc_window window;
+ unsigned int i;
+
+ memset(&window, 0, sizeof(window));
+ window.src.x = src_x >> 16;
+ window.src.y = src_y >> 16;
+ window.src.w = src_w >> 16;
+ window.src.h = src_h >> 16;
+ window.dst.x = crtc_x;
+ window.dst.y = crtc_y;
+ window.dst.w = crtc_w;
+ window.dst.h = crtc_h;
+ window.format = tegra_dc_format(fb->pixel_format);
+ window.bits_per_pixel = fb->bits_per_pixel;
+
+ for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+
+ window.base[i] = gem->paddr + fb->offsets[i];
+
+ /*
+ * Tegra doesn't support different strides for U and V planes
+ * so we display a warning if the user tries to display a
+ * framebuffer with such a configuration.
+ */
+ if (i >= 2) {
+ if (fb->pitches[i] != window.stride[1])
+ DRM_ERROR("unsupported UV-plane configuration\n");
+ } else {
+ window.stride[i] = fb->pitches[i];
+ }
+ }
+
+ return tegra_dc_setup_window(dc, p->index, &window);
+}
+
+static int tegra_plane_disable(struct drm_plane *plane)
+{
+ struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned long value;
+
+ value = WINDOW_A_SELECT << p->index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+ tegra_plane_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs tegra_plane_funcs = {
+ .update_plane = tegra_plane_update,
+ .disable_plane = tegra_plane_disable,
+ .destroy = tegra_plane_destroy,
+};
+
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+};
+
+static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < 2; i++) {
+ struct tegra_plane *plane;
+
+ plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return -ENOMEM;
+
+ plane->index = 1 + i;
+
+ err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_plane_funcs, plane_formats,
+ ARRAY_SIZE(plane_formats), false);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0);
+ unsigned long value;
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = fb->offsets[0] + y * fb->pitches[0] +
+ x * fb->bits_per_pixel / 8;
+
+ tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
+
+ value = GENERAL_UPDATE | WIN_A_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+ value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+void tegra_dc_enable_vblank(struct tegra_dc *dc)
+{
+ unsigned long value, flags;
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+void tegra_dc_disable_vblank(struct tegra_dc *dc)
+{
+ unsigned long value, flags;
+
+ spin_lock_irqsave(&dc->lock, flags);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value &= ~VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
+{
+ struct drm_device *drm = dc->base.dev;
+ struct drm_crtc *crtc = &dc->base;
+ struct drm_gem_cma_object *gem;
+ unsigned long flags, base;
+
+ if (!dc->event)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
+
+ /* check if new start address has been latched */
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+
+ if (base == gem->paddr + crtc->fb->offsets[0]) {
+ spin_lock_irqsave(&drm->event_lock, flags);
+ drm_send_vblank_event(drm, dc->pipe, dc->event);
+ drm_vblank_put(drm, dc->pipe);
+ dc->event = NULL;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+}
+
+void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct drm_device *drm = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (dc->event && dc->event->base.file_priv == file) {
+ dc->event->base.destroy(&dc->event->base);
+ drm_vblank_put(drm, dc->pipe);
+ dc->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct drm_device *drm = crtc->dev;
+
+ if (dc->event)
+ return -EBUSY;
+
+ if (event) {
+ event->pipe = dc->pipe;
+ dc->event = event;
+ drm_vblank_get(drm, dc->pipe);
+ }
+
+ tegra_dc_set_base(dc, 0, 0, fb);
+ crtc->fb = fb;
+
+ return 0;
+}
+
static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
.destroy = drm_crtc_cleanup,
};
-static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void tegra_crtc_disable(struct drm_crtc *crtc)
{
+ struct drm_device *drm = crtc->dev;
+ struct drm_plane *plane;
+
+ list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+ if (plane->crtc == crtc) {
+ tegra_plane_disable(plane);
+ plane->crtc = NULL;
+
+ if (plane->fb) {
+ drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ }
+ }
+ }
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -46,10 +277,11 @@ static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
unsigned int bpp)
{
fixed20_12 outf = dfixed_init(out);
+ fixed20_12 inf = dfixed_init(in);
u32 dda_inc;
int max;
@@ -79,9 +311,10 @@ static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
return dda_inc;
}
-static inline u32 compute_initial_dda(fixed20_12 in)
+static inline u32 compute_initial_dda(unsigned int in)
{
- return dfixed_frac(in);
+ fixed20_12 inf = dfixed_init(in);
+ return dfixed_frac(inf);
}
static int tegra_dc_set_timings(struct tegra_dc *dc,
@@ -152,18 +385,198 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
return 0;
}
+static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_YCbCr422:
+ case WIN_COLOR_DEPTH_YUV422:
+ if (planar)
+ *planar = false;
+
+ return true;
+
+ case WIN_COLOR_DEPTH_YCbCr420P:
+ case WIN_COLOR_DEPTH_YUV420P:
+ case WIN_COLOR_DEPTH_YCbCr422P:
+ case WIN_COLOR_DEPTH_YUV422P:
+ case WIN_COLOR_DEPTH_YCbCr422R:
+ case WIN_COLOR_DEPTH_YUV422R:
+ case WIN_COLOR_DEPTH_YCbCr422RA:
+ case WIN_COLOR_DEPTH_YUV422RA:
+ if (planar)
+ *planar = true;
+
+ return true;
+ }
+
+ return false;
+}
+
+int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window)
+{
+ unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
+ unsigned long value;
+ bool yuv, planar;
+
+ /*
+ * For YUV planar modes, the number of bytes per pixel takes into
+ * account only the luma component and therefore is 1.
+ */
+ yuv = tegra_dc_format_is_yuv(window->format, &planar);
+ if (!yuv)
+ bpp = window->bits_per_pixel / 8;
+ else
+ bpp = planar ? 1 : 2;
+
+ value = WINDOW_A_SELECT << index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
+ tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+ value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
+ tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+ h_offset = window->src.x * bpp;
+ v_offset = window->src.y;
+ h_size = window->src.w * bpp;
+ v_size = window->src.h;
+
+ value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
+ tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+ /*
+ * For DDA computations the number of bytes per pixel for YUV planar
+ * modes needs to take into account all Y, U and V components.
+ */
+ if (yuv && planar)
+ bpp = 2;
+
+ h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
+ v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(window->src.x);
+ v_dda = compute_initial_dda(window->src.y);
+
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
+
+ if (yuv && planar) {
+ tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
+ tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
+ value = window->stride[1] << 16 | window->stride[0];
+ tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
+ }
+
+ tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
+ value = WIN_ENABLE;
+
+ if (yuv) {
+ /* setup default colorspace conversion coefficients */
+ tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
+ tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
+ tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
+ tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
+ tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+
+ value |= CSC_ENABLE;
+ } else if (window->bits_per_pixel < 24) {
+ value |= COLOR_EXPAND;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ /*
+ * Disable blending and assume Window A is the bottom-most window,
+ * Window C is the top-most window and Window B is in the middle.
+ */
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
+
+ switch (index) {
+ case 0:
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 1:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 2:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
+ break;
+ }
+
+ tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+unsigned int tegra_dc_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+
+ case DRM_FORMAT_RGB565:
+ return WIN_COLOR_DEPTH_B5G6R5;
+
+ case DRM_FORMAT_UYVY:
+ return WIN_COLOR_DEPTH_YCbCr422;
+
+ case DRM_FORMAT_YUV420:
+ return WIN_COLOR_DEPTH_YCbCr420P;
+
+ case DRM_FORMAT_YUV422:
+ return WIN_COLOR_DEPTH_YCbCr422P;
+
+ default:
+ break;
+ }
+
+ WARN(1, "unsupported pixel format %u, using default\n", format);
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+}
+
static int tegra_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned int h_dda, v_dda, bpp;
- struct tegra_dc_window win;
+ struct tegra_dc_window window;
unsigned long div, value;
int err;
+ drm_vblank_pre_modeset(crtc->dev, dc->pipe);
+
err = tegra_crtc_setup_clk(crtc, mode, &div);
if (err) {
dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
@@ -191,83 +604,33 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
/* setup window parameters */
- memset(&win, 0, sizeof(win));
- win.x.full = dfixed_const(0);
- win.y.full = dfixed_const(0);
- win.w.full = dfixed_const(mode->hdisplay);
- win.h.full = dfixed_const(mode->vdisplay);
- win.outx = 0;
- win.outy = 0;
- win.outw = mode->hdisplay;
- win.outh = mode->vdisplay;
-
- switch (crtc->fb->pixel_format) {
- case DRM_FORMAT_XRGB8888:
- win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
- break;
-
- case DRM_FORMAT_RGB565:
- win.fmt = WIN_COLOR_DEPTH_B5G6R5;
- break;
-
- default:
- win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
- WARN_ON(1);
- break;
- }
-
- bpp = crtc->fb->bits_per_pixel / 8;
- win.stride = crtc->fb->pitches[0];
-
- /* program window registers */
- value = WINDOW_A_SELECT;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
- tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
-
- value = V_POSITION(win.outy) | H_POSITION(win.outx);
- tegra_dc_writel(dc, value, DC_WIN_POSITION);
-
- value = V_SIZE(win.outh) | H_SIZE(win.outw);
- tegra_dc_writel(dc, value, DC_WIN_SIZE);
-
- value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
- H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
- tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
-
- h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
- v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
-
- value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
- tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
-
- h_dda = compute_initial_dda(win.x);
- v_dda = compute_initial_dda(win.y);
-
- tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
- tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
-
- tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
- tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
-
- tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
- tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
- tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
- DC_WINBUF_ADDR_H_OFFSET);
- tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
-
- value = WIN_ENABLE;
-
- if (bpp < 24)
- value |= COLOR_EXPAND;
+ memset(&window, 0, sizeof(window));
+ window.src.x = 0;
+ window.src.y = 0;
+ window.src.w = mode->hdisplay;
+ window.src.h = mode->vdisplay;
+ window.dst.x = 0;
+ window.dst.y = 0;
+ window.dst.w = mode->hdisplay;
+ window.dst.h = mode->vdisplay;
+ window.format = tegra_dc_format(crtc->fb->pixel_format);
+ window.bits_per_pixel = crtc->fb->bits_per_pixel;
+ window.stride[0] = crtc->fb->pitches[0];
+ window.base[0] = gem->paddr;
+
+ err = tegra_dc_setup_window(dc, 0, &window);
+ if (err < 0)
+ dev_err(dc->dev, "failed to enable root plane\n");
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+ return 0;
+}
- tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
- tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
- return 0;
+ return tegra_dc_set_base(dc, x, y, crtc->fb);
}
static void tegra_crtc_prepare(struct drm_crtc *crtc)
@@ -314,31 +677,24 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
}
static void tegra_crtc_commit(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long update_mask;
unsigned long value;
- update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
-
- tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+ value = GENERAL_UPDATE | WIN_A_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
- value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
- value |= FRAME_END_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
- value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
- value |= FRAME_END_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+ value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+ drm_vblank_post_modeset(crtc->dev, dc->pipe);
}
static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -346,15 +702,16 @@ static void tegra_crtc_load_lut(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
- .dpms = tegra_crtc_dpms,
+ .disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
.mode_set = tegra_crtc_mode_set,
+ .mode_set_base = tegra_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
.load_lut = tegra_crtc_load_lut,
};
-static irqreturn_t tegra_drm_irq(int irq, void *data)
+static irqreturn_t tegra_dc_irq(int irq, void *data)
{
struct tegra_dc *dc = data;
unsigned long status;
@@ -373,6 +730,7 @@ static irqreturn_t tegra_drm_irq(int irq, void *data)
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_handle_vblank(dc->base.dev, dc->pipe);
+ tegra_dc_finish_page_flip(dc);
}
if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
@@ -587,7 +945,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
DUMP_REG(DC_WIN_BLEND_1WIN);
DUMP_REG(DC_WIN_BLEND_2WIN_X);
DUMP_REG(DC_WIN_BLEND_2WIN_Y);
- DUMP_REG(DC_WIN_BLEND32WIN_XY);
+ DUMP_REG(DC_WIN_BLEND_3WIN_XY);
DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
DUMP_REG(DC_WINBUF_START_ADDR);
DUMP_REG(DC_WINBUF_START_ADDR_NS);
@@ -689,13 +1047,17 @@ static int tegra_dc_drm_init(struct host1x_client *client,
return err;
}
+ err = tegra_dc_add_planes(drm, dc);
+ if (err < 0)
+ return err;
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_dc_debugfs_init(dc, drm->primary);
if (err < 0)
dev_err(dc->dev, "debugfs setup failed: %d\n", err);
}
- err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+ err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
dev_name(dc->dev), dc);
if (err < 0) {
dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
@@ -744,6 +1106,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (!dc)
return -ENOMEM;
+ spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 99977b5d5c36..79eaec9aac77 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -58,6 +58,8 @@
#define DC_CMD_SIGNAL_RAISE3 0x03e
#define DC_CMD_STATE_ACCESS 0x040
+#define READ_MUX (1 << 0)
+#define WRITE_MUX (1 << 2)
#define DC_CMD_STATE_CONTROL 0x041
#define GENERAL_ACT_REQ (1 << 0)
@@ -290,8 +292,18 @@
#define DC_DISP_SD_HW_K_VALUES 0x4dd
#define DC_DISP_SD_MAN_K_VALUES 0x4de
+#define DC_WIN_CSC_YOF 0x611
+#define DC_WIN_CSC_KYRGB 0x612
+#define DC_WIN_CSC_KUR 0x613
+#define DC_WIN_CSC_KVR 0x614
+#define DC_WIN_CSC_KUG 0x615
+#define DC_WIN_CSC_KVG 0x616
+#define DC_WIN_CSC_KUB 0x617
+#define DC_WIN_CSC_KVB 0x618
+
#define DC_WIN_WIN_OPTIONS 0x700
#define COLOR_EXPAND (1 << 6)
+#define CSC_ENABLE (1 << 18)
#define WIN_ENABLE (1 << 30)
#define DC_WIN_BYTE_SWAP 0x701
@@ -359,7 +371,7 @@
#define DC_WIN_BLEND_1WIN 0x710
#define DC_WIN_BLEND_2WIN_X 0x711
#define DC_WIN_BLEND_2WIN_Y 0x712
-#define DC_WIN_BLEND32WIN_XY 0x713
+#define DC_WIN_BLEND_3WIN_XY 0x713
#define DC_WIN_HP_FETCH_CONTROL 0x714
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d980dc75788c..9d452df5bcad 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -39,6 +39,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
return err;
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ return err;
+
err = tegra_drm_fb_init(drm);
if (err < 0)
return err;
@@ -88,13 +92,112 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (dc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ /* TODO: implement real hardware counter using syncpoints */
+ return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!crtc)
+ return -ENODEV;
+
+ tegra_dc_enable_vblank(dc);
+
+ return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (crtc)
+ tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ tegra_dc_cancel_page_flip(crtc, file);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&drm->mode_config.fb_lock);
+
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+ seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+ fb->base.id, fb->width, fb->height, fb->depth,
+ fb->bits_per_pixel,
+ atomic_read(&fb->refcount.refcount));
+ }
+
+ mutex_unlock(&drm->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+ { "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
+ .preclose = tegra_drm_preclose,
.lastclose = tegra_drm_lastclose,
+ .get_vblank_counter = tegra_drm_get_vblank_counter,
+ .enable_vblank = tegra_drm_enable_vblank,
+ .disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = tegra_debugfs_init,
+ .debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 741b5dc2742c..6dd75a2600eb 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -18,16 +18,6 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fixed.h>
-struct tegra_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_cma_object *obj;
-};
-
-static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct tegra_framebuffer, base);
-}
-
struct host1x {
struct drm_device *drm;
struct device *dev;
@@ -44,7 +34,6 @@ struct host1x {
struct list_head clients;
struct drm_fbdev_cma *fbdev;
- struct tegra_framebuffer fb;
};
struct host1x_client;
@@ -75,6 +64,7 @@ struct tegra_output;
struct tegra_dc {
struct host1x_client client;
+ spinlock_t lock;
struct host1x *host1x;
struct device *dev;
@@ -94,6 +84,9 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
struct drm_minor *minor;
struct dentry *debugfs;
+
+ /* page-flip handling */
+ struct drm_pending_vblank_event *event;
};
static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
@@ -118,6 +111,34 @@ static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
return readl(dc->regs + (reg << 2));
}
+struct tegra_dc_window {
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } src;
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } dst;
+ unsigned int bits_per_pixel;
+ unsigned int format;
+ unsigned int stride[2];
+ unsigned long base[3];
+};
+
+/* from dc.c */
+extern unsigned int tegra_dc_format(uint32_t format);
+extern int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window);
+extern void tegra_dc_enable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_disable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_cancel_page_flip(struct drm_crtc *crtc,
+ struct drm_file *file);
+
struct tegra_output_ops {
int (*enable)(struct tegra_output *output);
int (*disable)(struct tegra_output *output);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 97993c6835fd..03914953cb1c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -39,10 +39,6 @@ int tegra_drm_fb_init(struct drm_device *drm)
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
-#ifndef CONFIG_FRAMEBUFFER_CONSOLE
- drm_fbdev_cma_restore_mode(fbdev);
-#endif
-
host1x->fbdev = fbdev;
return 0;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index d4f3fb9f0c29..bb747f6cd1a4 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -10,12 +10,15 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/gpio.h>
+#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/clk/tegra.h>
+#include <drm/drm_edid.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
@@ -400,54 +403,65 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
return 0;
}
-static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
- unsigned int offset, u8 type,
- u8 version, void *data, size_t size)
+static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size)
{
- unsigned long value;
- u8 *ptr = data;
- u32 subpack[2];
+ unsigned long value = 0;
size_t i;
- u8 csum;
- /* first byte of data is the checksum */
- csum = type + version + size - 1;
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
- for (i = 1; i < size; i++)
- csum += ptr[i];
+ return value;
+}
- ptr[0] = 0x100 - csum;
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
+ size_t size)
+{
+ const u8 *ptr = data;
+ unsigned long offset;
+ unsigned long value;
+ size_t i, j;
- value = INFOFRAME_HEADER_TYPE(type) |
- INFOFRAME_HEADER_VERSION(version) |
- INFOFRAME_HEADER_LEN(size - 1);
- tegra_hdmi_writel(hdmi, value, offset);
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER;
+ break;
- /* The audio inforame only has one set of subpack registers. The hdmi
- * block pads the rest of the data as per the spec so we have to fixup
- * the length before filling in the subpacks.
- */
- if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
- size = 6;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER;
+ break;
- /* each subpack 7 bytes devided into:
- * subpack_low - bytes 0 - 3
- * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
- */
- for (i = 0; i < size; i++) {
- size_t index = i % 7;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER;
+ break;
+
+ default:
+ dev_err(hdmi->dev, "unsupported infoframe type: %02x\n",
+ ptr[0]);
+ return;
+ }
+
+ value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+ INFOFRAME_HEADER_VERSION(ptr[1]) |
+ INFOFRAME_HEADER_LEN(ptr[2]);
+ tegra_hdmi_writel(hdmi, value, offset);
+ offset++;
- if (index == 0)
- memset(subpack, 0x0, sizeof(subpack));
+ /*
+ * Each subpack contains 7 bytes, divided into:
+ * - subpack_low: bytes 0 - 3
+ * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ size_t rem = size - i, num = min_t(size_t, rem, 4);
- ((u8 *)subpack)[index] = ptr[i];
+ value = tegra_hdmi_subpack(&ptr[i], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
- if (index == 6 || (i + 1 == size)) {
- unsigned int reg = offset + 1 + (i / 7) * 2;
+ num = min_t(size_t, rem - num, 3);
- tegra_hdmi_writel(hdmi, subpack[0], reg);
- tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
- }
+ value = tegra_hdmi_subpack(&ptr[i + 4], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
}
}
@@ -455,9 +469,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
- unsigned int h_front_porch;
- unsigned int hsize = 16;
- unsigned int vsize = 9;
+ u8 buffer[17];
+ ssize_t err;
if (hdmi->dvi) {
tegra_hdmi_writel(hdmi, 0,
@@ -465,69 +478,19 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
return;
}
- h_front_porch = mode->hsync_start - mode->hdisplay;
- memset(&frame, 0, sizeof(frame));
- frame.r = HDMI_AVI_R_SAME;
-
- switch (mode->vdisplay) {
- case 480:
- if (mode->hdisplay == 640) {
- frame.m = HDMI_AVI_M_4_3;
- frame.vic = 1;
- } else {
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 3;
- }
- break;
-
- case 576:
- if (((hsize * 10) / vsize) > 14) {
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 18;
- } else {
- frame.m = HDMI_AVI_M_4_3;
- frame.vic = 17;
- }
- break;
-
- case 720:
- case 1470: /* stereo mode */
- frame.m = HDMI_AVI_M_16_9;
-
- if (h_front_porch == 110)
- frame.vic = 4;
- else
- frame.vic = 19;
- break;
-
- case 1080:
- case 2205: /* stereo mode */
- frame.m = HDMI_AVI_M_16_9;
-
- switch (h_front_porch) {
- case 88:
- frame.vic = 16;
- break;
-
- case 528:
- frame.vic = 31;
- break;
-
- default:
- frame.vic = 32;
- break;
- }
- break;
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
- default:
- frame.m = HDMI_AVI_M_16_9;
- frame.vic = 0;
- break;
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err);
+ return;
}
- tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
- HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
- &frame, sizeof(frame));
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
@@ -536,6 +499,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
+ u8 buffer[14];
+ ssize_t err;
if (hdmi->dvi) {
tegra_hdmi_writel(hdmi, 0,
@@ -543,14 +508,29 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
return;
}
- memset(&frame, 0, sizeof(frame));
- frame.cc = HDMI_AUDIO_CC_2;
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+ err);
+ return;
+ }
+
+ frame.channels = 2;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n",
+ err);
+ return;
+ }
- tegra_hdmi_write_infopack(hdmi,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
- HDMI_INFOFRAME_TYPE_AUDIO,
- HDMI_AUDIO_VERSION,
- &frame, sizeof(frame));
+ /*
+ * The audio infoframe has only one set of subpack registers, so the
+ * infoframe needs to be truncated. One set of subpack registers can
+ * contain 7 bytes. Including the 3 byte header only the first 10
+ * bytes can be programmed.
+ */
+ tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -558,8 +538,10 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
{
- struct hdmi_stereo_infoframe frame;
+ struct hdmi_vendor_infoframe frame;
unsigned long value;
+ u8 buffer[10];
+ ssize_t err;
if (!hdmi->stereo) {
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
@@ -569,22 +551,32 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
}
memset(&frame, 0, sizeof(frame));
- frame.regid0 = 0x03;
- frame.regid1 = 0x0c;
- frame.regid2 = 0x00;
- frame.hdmi_video_format = 2;
+
+ frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
+ frame.version = 0x01;
+ frame.length = 6;
+
+ frame.data[0] = 0x03; /* regid0 */
+ frame.data[1] = 0x0c; /* regid1 */
+ frame.data[2] = 0x00; /* regid2 */
+ frame.data[3] = 0x02 << 5; /* video format */
/* TODO: 74 MHz limit? */
if (1) {
- frame._3d_structure = 0;
+ frame.data[4] = 0x00 << 4; /* 3D structure */
} else {
- frame._3d_structure = 8;
- frame._3d_ext_data = 0;
+ frame.data[4] = 0x08 << 4; /* 3D structure */
+ frame.data[5] = 0x00 << 4; /* 3D ext. data */
+ }
+
+ err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n",
+ err);
+ return;
}
- tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
- HDMI_INFOFRAME_TYPE_VENDOR,
- HDMI_VENDOR_VERSION, &frame, 6);
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_ENABLE;
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 1477f36eb45a..52ac36e08ccb 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -10,195 +10,6 @@
#ifndef TEGRA_HDMI_H
#define TEGRA_HDMI_H 1
-#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
-#define HDMI_INFOFRAME_TYPE_AVI 0x82
-#define HDMI_INFOFRAME_TYPE_SPD 0x83
-#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
-#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
-#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
-
-/* all fields little endian */
-struct hdmi_avi_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- unsigned s:2; /* scan information */
- unsigned b:2; /* bar info data valid */
- unsigned a:1; /* active info present */
- unsigned y:2; /* RGB or YCbCr */
- unsigned res1:1;
-
- /* PB2 */
- unsigned r:4; /* active format aspect ratio */
- unsigned m:2; /* picture aspect ratio */
- unsigned c:2; /* colorimetry */
-
- /* PB3 */
- unsigned sc:2; /* scan information */
- unsigned q:2; /* quantization range */
- unsigned ec:3; /* extended colorimetry */
- unsigned itc:1; /* it content */
-
- /* PB4 */
- unsigned vic:7; /* video format id code */
- unsigned res4:1;
-
- /* PB5 */
- unsigned pr:4; /* pixel repetition factor */
- unsigned cn:2; /* it content type*/
- unsigned yq:2; /* ycc quantization range */
-
- /* PB6-7 */
- u16 top_bar_end_line;
-
- /* PB8-9 */
- u16 bot_bar_start_line;
-
- /* PB10-11 */
- u16 left_bar_end_pixel;
-
- /* PB12-13 */
- u16 right_bar_start_pixel;
-} __packed;
-
-#define HDMI_AVI_VERSION 0x02
-
-#define HDMI_AVI_Y_RGB 0x0
-#define HDMI_AVI_Y_YCBCR_422 0x1
-#define HDMI_AVI_Y_YCBCR_444 0x2
-
-#define HDMI_AVI_B_VERT 0x1
-#define HDMI_AVI_B_HORIZ 0x2
-
-#define HDMI_AVI_S_NONE 0x0
-#define HDMI_AVI_S_OVERSCAN 0x1
-#define HDMI_AVI_S_UNDERSCAN 0x2
-
-#define HDMI_AVI_C_NONE 0x0
-#define HDMI_AVI_C_SMPTE 0x1
-#define HDMI_AVI_C_ITU_R 0x2
-#define HDMI_AVI_C_EXTENDED 0x4
-
-#define HDMI_AVI_M_4_3 0x1
-#define HDMI_AVI_M_16_9 0x2
-
-#define HDMI_AVI_R_SAME 0x8
-#define HDMI_AVI_R_4_3_CENTER 0x9
-#define HDMI_AVI_R_16_9_CENTER 0xa
-#define HDMI_AVI_R_14_9_CENTER 0xb
-
-/* all fields little endian */
-struct hdmi_audio_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- unsigned cc:3; /* channel count */
- unsigned res1:1;
- unsigned ct:4; /* coding type */
-
- /* PB2 */
- unsigned ss:2; /* sample size */
- unsigned sf:3; /* sample frequency */
- unsigned res2:3;
-
- /* PB3 */
- unsigned cxt:5; /* coding extention type */
- unsigned res3:3;
-
- /* PB4 */
- u8 ca; /* channel/speaker allocation */
-
- /* PB5 */
- unsigned res5:3;
- unsigned lsv:4; /* level shift value */
- unsigned dm_inh:1; /* downmix inhibit */
-
- /* PB6-10 reserved */
- u8 res6;
- u8 res7;
- u8 res8;
- u8 res9;
- u8 res10;
-} __packed;
-
-#define HDMI_AUDIO_VERSION 0x01
-
-#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_CC_2 0x1
-#define HDMI_AUDIO_CC_3 0x2
-#define HDMI_AUDIO_CC_4 0x3
-#define HDMI_AUDIO_CC_5 0x4
-#define HDMI_AUDIO_CC_6 0x5
-#define HDMI_AUDIO_CC_7 0x6
-#define HDMI_AUDIO_CC_8 0x7
-
-#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_CT_PCM 0x1
-#define HDMI_AUDIO_CT_AC3 0x2
-#define HDMI_AUDIO_CT_MPEG1 0x3
-#define HDMI_AUDIO_CT_MP3 0x4
-#define HDMI_AUDIO_CT_MPEG2 0x5
-#define HDMI_AUDIO_CT_AAC_LC 0x6
-#define HDMI_AUDIO_CT_DTS 0x7
-#define HDMI_AUDIO_CT_ATRAC 0x8
-#define HDMI_AUDIO_CT_DSD 0x9
-#define HDMI_AUDIO_CT_E_AC3 0xa
-#define HDMI_AUDIO_CT_DTS_HD 0xb
-#define HDMI_AUDIO_CT_MLP 0xc
-#define HDMI_AUDIO_CT_DST 0xd
-#define HDMI_AUDIO_CT_WMA_PRO 0xe
-#define HDMI_AUDIO_CT_CXT 0xf
-
-#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUIDO_SF_32K 0x1
-#define HDMI_AUDIO_SF_44_1K 0x2
-#define HDMI_AUDIO_SF_48K 0x3
-#define HDMI_AUDIO_SF_88_2K 0x4
-#define HDMI_AUDIO_SF_96K 0x5
-#define HDMI_AUDIO_SF_176_4K 0x6
-#define HDMI_AUDIO_SF_192K 0x7
-
-#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
-#define HDMI_AUDIO_SS_16BIT 0x1
-#define HDMI_AUDIO_SS_20BIT 0x2
-#define HDMI_AUDIO_SS_24BIT 0x3
-
-#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
-#define HDMI_AUDIO_CXT_HE_AAC 0x1
-#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
-#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
-
-/* all fields little endian */
-struct hdmi_stereo_infoframe {
- /* PB0 */
- u8 csum;
-
- /* PB1 */
- u8 regid0;
-
- /* PB2 */
- u8 regid1;
-
- /* PB3 */
- u8 regid2;
-
- /* PB4 */
- unsigned res1:5;
- unsigned hdmi_video_format:3;
-
- /* PB5 */
- unsigned res2:4;
- unsigned _3d_structure:4;
-
- /* PB6*/
- unsigned res3:4;
- unsigned _3d_ext_data:4;
-} __packed;
-
-#define HDMI_VENDOR_VERSION 0x01
-
/* register definitions */
#define HDMI_CTXSW 0x00
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
new file mode 100644
index 000000000000..d24d04013476
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_TILCDC
+ tristate "DRM Support for TI LCDC Display Controller"
+ depends on DRM && OF && ARM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select OF_VIDEOMODE
+ select OF_DISPLAY_TIMING
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ Choose this option if you have an TI SoC with LCDC display
+ controller, for example AM33xx in beagle-bone, DA8xx, or
+ OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
new file mode 100644
index 000000000000..deda656b10e7
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -0,0 +1,10 @@
+ccflags-y := -Iinclude/drm -Werror
+
+tilcdc-y := \
+ tilcdc_crtc.o \
+ tilcdc_tfp410.o \
+ tilcdc_slave.o \
+ tilcdc_panel.o \
+ tilcdc_drv.o
+
+obj-$(CONFIG_DRM_TILCDC) += tilcdc.o
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
new file mode 100644
index 000000000000..5dd3c7d031d5
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kfifo.h>
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+
+struct tilcdc_crtc {
+ struct drm_crtc base;
+
+ const struct tilcdc_panel_info *info;
+ uint32_t dirty;
+ dma_addr_t start, end;
+ struct drm_pending_vblank_event *event;
+ int dpms;
+ wait_queue_head_t frame_done_wq;
+ bool frame_done;
+
+ /* fb currently set to scanout 0/1: */
+ struct drm_framebuffer *scanout[2];
+
+ /* for deferred fb unref's: */
+ DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
+ struct work_struct work;
+};
+#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
+
+static void unref_worker(struct work_struct *work)
+{
+ struct tilcdc_crtc *tilcdc_crtc = container_of(work, struct tilcdc_crtc, work);
+ struct drm_device *dev = tilcdc_crtc->base.dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.mutex);
+ while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
+ drm_framebuffer_unreference(fb);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void set_scanout(struct drm_crtc *crtc, int n)
+{
+ static const uint32_t base_reg[] = {
+ LCDC_DMA_FB_BASE_ADDR_0_REG, LCDC_DMA_FB_BASE_ADDR_1_REG,
+ };
+ static const uint32_t ceil_reg[] = {
+ LCDC_DMA_FB_CEILING_ADDR_0_REG, LCDC_DMA_FB_CEILING_ADDR_1_REG,
+ };
+ static const uint32_t stat[] = {
+ LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
+ };
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ pm_runtime_get_sync(dev->dev);
+ tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
+ tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
+ if (tilcdc_crtc->scanout[n]) {
+ if (kfifo_put(&tilcdc_crtc->unref_fifo,
+ (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &tilcdc_crtc->work);
+ } else {
+ dev_err(dev->dev, "unref fifo full!\n");
+ drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
+ }
+ }
+ tilcdc_crtc->scanout[n] = crtc->fb;
+ drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
+ tilcdc_crtc->dirty &= ~stat[n];
+ pm_runtime_put_sync(dev->dev);
+}
+
+static void update_scanout(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct drm_gem_cma_object *gem;
+ unsigned int depth, bpp;
+
+ drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ tilcdc_crtc->start = gem->paddr + fb->offsets[0] +
+ (crtc->y * fb->pitches[0]) + (crtc->x * bpp/8);
+
+ tilcdc_crtc->end = tilcdc_crtc->start +
+ (crtc->mode.vdisplay * fb->pitches[0]);
+
+ if (tilcdc_crtc->dpms == DRM_MODE_DPMS_ON) {
+ /* already enabled, so just mark the frames that need
+ * updating and they will be updated on vblank:
+ */
+ tilcdc_crtc->dirty |= LCDC_END_OF_FRAME0 | LCDC_END_OF_FRAME1;
+ drm_vblank_get(dev, 0);
+ } else {
+ /* not enabled yet, so update registers immediately: */
+ set_scanout(crtc, 0);
+ set_scanout(crtc, 1);
+ }
+}
+
+static void start(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ if (priv->rev == 2) {
+ tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+ msleep(1);
+ tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+ msleep(1);
+ }
+
+ tilcdc_set(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void stop(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
+
+ drm_crtc_cleanup(crtc);
+ WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
+ kfifo_free(&tilcdc_crtc->unref_fifo);
+ kfree(tilcdc_crtc);
+}
+
+static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+
+ if (tilcdc_crtc->event) {
+ dev_err(dev->dev, "already pending page flip!\n");
+ return -EBUSY;
+ }
+
+ crtc->fb = fb;
+ tilcdc_crtc->event = event;
+ update_scanout(crtc);
+
+ return 0;
+}
+
+static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* we really only care about on or off: */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (tilcdc_crtc->dpms == mode)
+ return;
+
+ tilcdc_crtc->dpms = mode;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ pm_runtime_forbid(dev->dev);
+ start(crtc);
+ } else {
+ tilcdc_crtc->frame_done = false;
+ stop(crtc);
+
+ /* if necessary wait for framedone irq which will still come
+ * before putting things to sleep..
+ */
+ if (priv->rev == 2) {
+ int ret = wait_event_timeout(
+ tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_err(dev->dev, "timeout waiting for framedone\n");
+ }
+ pm_runtime_allow(dev->dev);
+ }
+
+ pm_runtime_put_sync(dev->dev);
+}
+
+static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void tilcdc_crtc_commit(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ const struct tilcdc_panel_info *info = tilcdc_crtc->info;
+ uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
+ int ret;
+
+ ret = tilcdc_crtc_mode_valid(crtc, mode);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev->dev);
+
+ /* Configure the Burst Size and fifo threshold of DMA: */
+ reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
+ switch (info->dma_burst_sz) {
+ case 1:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
+ break;
+ case 2:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
+ break;
+ case 4:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
+ break;
+ case 8:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
+ break;
+ case 16:
+ reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ reg |= (info->fifo_th << 8);
+ tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
+
+ /* Configure timings: */
+ hbp = mode->htotal - mode->hsync_end;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
+ mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
+
+ /* Configure the AC Bias Period and Number of Transitions per Interrupt: */
+ reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
+ reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
+ LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
+ if (priv->rev == 2) {
+ reg |= (hfp & 0x300) >> 8;
+ reg |= (hbp & 0x300) >> 4;
+ reg |= (hsw & 0x3c0) << 21;
+ }
+ tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
+
+ reg = (((mode->hdisplay >> 4) - 1) << 4) |
+ ((hbp & 0xff) << 24) |
+ ((hfp & 0xff) << 16) |
+ ((hsw & 0x3f) << 10);
+ if (priv->rev == 2)
+ reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
+ tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
+
+ reg = ((mode->vdisplay - 1) & 0x3ff) |
+ ((vbp & 0xff) << 24) |
+ ((vfp & 0xff) << 16) |
+ ((vsw & 0x3f) << 10);
+ tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
+
+ /* Configure display type: */
+ reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
+ ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
+ LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
+ reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
+ if (info->tft_alt_mode)
+ reg |= LCDC_TFT_ALT_ENABLE;
+ if (priv->rev == 2) {
+ unsigned int depth, bpp;
+
+ drm_fb_get_bpp_depth(crtc->fb->pixel_format, &depth, &bpp);
+ switch (bpp) {
+ case 16:
+ break;
+ case 32:
+ reg |= LCDC_V2_TFT_24BPP_UNPACK;
+ /* fallthrough */
+ case 24:
+ reg |= LCDC_V2_TFT_24BPP_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+ }
+ reg |= info->fdd < 12;
+ tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
+
+ if (info->invert_pxl_clk)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+
+ if (info->sync_ctrl)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+
+ if (info->sync_edge)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+
+ if (info->raster_order)
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+ else
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+
+
+ update_scanout(crtc);
+ tilcdc_crtc_update_clk(crtc);
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ update_scanout(crtc);
+ return 0;
+}
+
+static void tilcdc_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
+ .destroy = tilcdc_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = tilcdc_crtc_page_flip,
+};
+
+static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
+ .dpms = tilcdc_crtc_dpms,
+ .mode_fixup = tilcdc_crtc_mode_fixup,
+ .prepare = tilcdc_crtc_prepare,
+ .commit = tilcdc_crtc_commit,
+ .mode_set = tilcdc_crtc_mode_set,
+ .mode_set_base = tilcdc_crtc_mode_set_base,
+ .load_lut = tilcdc_crtc_load_lut,
+};
+
+int tilcdc_crtc_max_width(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int max_width = 0;
+
+ if (priv->rev == 1)
+ max_width = 1024;
+ else if (priv->rev == 2)
+ max_width = 2048;
+
+ return max_width;
+}
+
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+ unsigned int bandwidth;
+
+ if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
+ return MODE_VIRTUAL_X;
+
+ /* width must be multiple of 16 */
+ if (mode->hdisplay & 0xf)
+ return MODE_VIRTUAL_X;
+
+ if (mode->vdisplay > 2048)
+ return MODE_VIRTUAL_Y;
+
+ /* filter out modes that would require too much memory bandwidth: */
+ bandwidth = mode->hdisplay * mode->vdisplay * drm_mode_vrefresh(mode);
+ if (bandwidth > priv->max_bandwidth)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+ const struct tilcdc_panel_info *info)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ tilcdc_crtc->info = info;
+}
+
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int dpms = tilcdc_crtc->dpms;
+ unsigned int lcd_clk, div;
+ int ret;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (dpms == DRM_MODE_DPMS_ON)
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ /* in raster mode, minimum divisor is 2: */
+ ret = clk_set_rate(priv->disp_clk, crtc->mode.clock * 1000 * 2);
+ if (ret) {
+ dev_err(dev->dev, "failed to set display clock rate to: %d\n",
+ crtc->mode.clock);
+ goto out;
+ }
+
+ lcd_clk = clk_get_rate(priv->clk);
+ div = lcd_clk / (crtc->mode.clock * 1000);
+
+ DBG("lcd_clk=%u, mode clock=%d, div=%u", lcd_clk, crtc->mode.clock, div);
+ DBG("fck=%lu, dpll_disp_ck=%lu", clk_get_rate(priv->clk), clk_get_rate(priv->disp_clk));
+
+ /* Configure the LCD clock divisor. */
+ tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(div) |
+ LCDC_RASTER_MODE);
+
+ if (priv->rev == 2)
+ tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
+ LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
+ LCDC_V2_CORE_CLK_EN);
+
+ if (dpms == DRM_MODE_DPMS_ON)
+ tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+}
+
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ uint32_t stat = tilcdc_read_irqstatus(dev);
+
+ if ((stat & LCDC_SYNC_LOST) && (stat & LCDC_FIFO_UNDERFLOW)) {
+ stop(crtc);
+ dev_err(dev->dev, "error: %08x\n", stat);
+ tilcdc_clear_irqstatus(dev, stat);
+ start(crtc);
+ } else if (stat & LCDC_PL_LOAD_DONE) {
+ tilcdc_clear_irqstatus(dev, stat);
+ } else {
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+ uint32_t dirty = tilcdc_crtc->dirty & stat;
+
+ tilcdc_clear_irqstatus(dev, stat);
+
+ if (dirty & LCDC_END_OF_FRAME0)
+ set_scanout(crtc, 0);
+
+ if (dirty & LCDC_END_OF_FRAME1)
+ set_scanout(crtc, 1);
+
+ drm_handle_vblank(dev, 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = tilcdc_crtc->event;
+ tilcdc_crtc->event = NULL;
+ if (event)
+ drm_send_vblank_event(dev, 0, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (dirty && !tilcdc_crtc->dirty)
+ drm_vblank_put(dev, 0);
+ }
+
+ if (priv->rev == 2) {
+ if (stat & LCDC_FRAME_DONE) {
+ tilcdc_crtc->frame_done = true;
+ wake_up(&tilcdc_crtc->frame_done_wq);
+ }
+ tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ /* Destroy the pending vertical blanking event associated with the
+ * pending page flip, if any, and disable vertical blanking interrupts.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = tilcdc_crtc->event;
+ if (event && event->base.file_priv == file) {
+ tilcdc_crtc->event = NULL;
+ event->base.destroy(&event->base);
+ drm_vblank_put(dev, 0);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
+{
+ struct tilcdc_crtc *tilcdc_crtc;
+ struct drm_crtc *crtc;
+ int ret;
+
+ tilcdc_crtc = kzalloc(sizeof(*tilcdc_crtc), GFP_KERNEL);
+ if (!tilcdc_crtc) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ crtc = &tilcdc_crtc->base;
+
+ tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
+ init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
+
+ ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate unref FIFO\n");
+ goto fail;
+ }
+
+ INIT_WORK(&tilcdc_crtc->work, unref_worker);
+
+ ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
+ if (ret < 0)
+ goto fail;
+
+ drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
+
+ return crtc;
+
+fail:
+ tilcdc_crtc_destroy(crtc);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
new file mode 100644
index 000000000000..c5b592dc1970
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* LCDC DRM driver, based on da8xx-fb */
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+#include "tilcdc_tfp410.h"
+#include "tilcdc_slave.h"
+#include "tilcdc_panel.h"
+
+#include "drm_fb_helper.h"
+
+static LIST_HEAD(module_list);
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+ const struct tilcdc_module_ops *funcs)
+{
+ mod->name = name;
+ mod->funcs = funcs;
+ INIT_LIST_HEAD(&mod->list);
+ list_add(&mod->list, &module_list);
+}
+
+void tilcdc_module_cleanup(struct tilcdc_module *mod)
+{
+ list_del(&mod->list);
+}
+
+static struct of_device_id tilcdc_of_match[];
+
+static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ if (priv->fbdev)
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = tilcdc_fb_create,
+ .output_poll_changed = tilcdc_fb_output_poll_changed,
+};
+
+static int modeset_init(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct tilcdc_module *mod;
+
+ drm_mode_config_init(dev);
+
+ priv->crtc = tilcdc_crtc_create(dev);
+
+ list_for_each_entry(mod, &module_list, list) {
+ DBG("loading module: %s", mod->name);
+ mod->funcs->modeset_init(mod, dev);
+ }
+
+ if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) {
+ /* oh nos! */
+ dev_err(dev->dev, "no encoders/connectors found\n");
+ return -ENXIO;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
+ dev->mode_config.max_height = 2048;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ return 0;
+}
+
+#ifdef CONFIG_CPU_FREQ
+static int cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct tilcdc_drm_private *priv = container_of(nb,
+ struct tilcdc_drm_private, freq_transition);
+ if (val == CPUFREQ_POSTCHANGE) {
+ if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
+ priv->lcd_fck_rate = clk_get_rate(priv->clk);
+ tilcdc_crtc_update_clk(priv->crtc);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * DRM operations:
+ */
+
+static int tilcdc_unload(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct tilcdc_module *mod, *cur;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+ if (priv->clk)
+ clk_put(priv->clk);
+
+ if (priv->mmio)
+ iounmap(priv->mmio);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ dev->dev_private = NULL;
+
+ pm_runtime_disable(dev->dev);
+
+ list_for_each_entry_safe(mod, cur, &module_list, list) {
+ DBG("destroying module: %s", mod->name);
+ mod->funcs->destroy(mod);
+ }
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct device_node *node = pdev->dev.of_node;
+ struct tilcdc_drm_private *priv;
+ struct resource *res;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("tilcdc", 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev->dev, "failed to get memory resource\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ if (!priv->mmio) {
+ dev_err(dev->dev, "failed to ioremap\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ priv->clk = clk_get(dev->dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get functional clock\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get display clock\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+#ifdef CONFIG_CPU_FREQ
+ priv->lcd_fck_rate = clk_get_rate(priv->clk);
+ priv->freq_transition.notifier_call = cpufreq_transition;
+ ret = cpufreq_register_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev->dev, "failed to register cpufreq notifier\n");
+ goto fail;
+ }
+#endif
+
+ if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
+ priv->max_bandwidth = 1280 * 1024 * 60;
+
+ pm_runtime_enable(dev->dev);
+
+ /* Determine LCD IP Version */
+ pm_runtime_get_sync(dev->dev);
+ switch (tilcdc_read(dev, LCDC_PID_REG)) {
+ case 0x4c100102:
+ priv->rev = 1;
+ break;
+ case 0x4f200800:
+ case 0x4f201000:
+ priv->rev = 2;
+ break;
+ default:
+ dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
+ "defaulting to LCD revision 1\n",
+ tilcdc_read(dev, LCDC_PID_REG));
+ priv->rev = 1;
+ break;
+ }
+
+ pm_runtime_put_sync(dev->dev);
+
+ ret = modeset_init(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize mode setting\n");
+ goto fail;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+ ret = drm_irq_install(dev);
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ priv->fbdev = drm_fbdev_cma_init(dev, 16,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+fail:
+ tilcdc_unload(dev);
+ return ret;
+}
+
+static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ tilcdc_crtc_cancel_page_flip(priv->crtc, file);
+}
+
+static void tilcdc_lastclose(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = arg;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return tilcdc_crtc_irq(priv->crtc);
+}
+
+static void tilcdc_irq_preinstall(struct drm_device *dev)
+{
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+}
+
+static int tilcdc_irq_postinstall(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* enable FIFO underflow irq: */
+ if (priv->rev == 1) {
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
+ } else {
+ tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
+ }
+
+ return 0;
+}
+
+static void tilcdc_irq_uninstall(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* disable irqs that we might have enabled: */
+ if (priv->rev == 1) {
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
+ } else {
+ tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
+ LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_V2_END_OF_FRAME1_INT_ENA |
+ LCDC_FRAME_DONE);
+ }
+
+}
+
+static void enable_vblank(struct drm_device *dev, bool enable)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ u32 reg, mask;
+
+ if (priv->rev == 1) {
+ reg = LCDC_DMA_CTRL_REG;
+ mask = LCDC_V1_END_OF_FRAME_INT_ENA;
+ } else {
+ reg = LCDC_INT_ENABLE_SET_REG;
+ mask = LCDC_V2_END_OF_FRAME0_INT_ENA |
+ LCDC_V2_END_OF_FRAME1_INT_ENA | LCDC_FRAME_DONE;
+ }
+
+ if (enable)
+ tilcdc_set(dev, reg, mask);
+ else
+ tilcdc_clear(dev, reg, mask);
+}
+
+static int tilcdc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ enable_vblank(dev, true);
+ return 0;
+}
+
+static void tilcdc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ enable_vblank(dev, false);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
+static const struct {
+ const char *name;
+ uint8_t rev;
+ uint8_t save;
+ uint32_t reg;
+} registers[] = {
+#define REG(rev, save, reg) { #reg, rev, save, reg }
+ /* exists in revision 1: */
+ REG(1, false, LCDC_PID_REG),
+ REG(1, true, LCDC_CTRL_REG),
+ REG(1, false, LCDC_STAT_REG),
+ REG(1, true, LCDC_RASTER_CTRL_REG),
+ REG(1, true, LCDC_RASTER_TIMING_0_REG),
+ REG(1, true, LCDC_RASTER_TIMING_1_REG),
+ REG(1, true, LCDC_RASTER_TIMING_2_REG),
+ REG(1, true, LCDC_DMA_CTRL_REG),
+ REG(1, true, LCDC_DMA_FB_BASE_ADDR_0_REG),
+ REG(1, true, LCDC_DMA_FB_CEILING_ADDR_0_REG),
+ REG(1, true, LCDC_DMA_FB_BASE_ADDR_1_REG),
+ REG(1, true, LCDC_DMA_FB_CEILING_ADDR_1_REG),
+ /* new in revision 2: */
+ REG(2, false, LCDC_RAW_STAT_REG),
+ REG(2, false, LCDC_MASKED_STAT_REG),
+ REG(2, false, LCDC_INT_ENABLE_SET_REG),
+ REG(2, false, LCDC_INT_ENABLE_CLR_REG),
+ REG(2, false, LCDC_END_OF_INT_IND_REG),
+ REG(2, true, LCDC_CLK_ENABLE_REG),
+ REG(2, true, LCDC_INT_ENABLE_SET_REG),
+#undef REG
+};
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tilcdc_regs_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ unsigned i;
+
+ pm_runtime_get_sync(dev->dev);
+
+ seq_printf(m, "revision: %d\n", priv->rev);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (priv->rev >= registers[i].rev)
+ seq_printf(m, "%s:\t %08x\n", registers[i].name,
+ tilcdc_read(dev, registers[i].reg));
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static int tilcdc_mm_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static struct drm_info_list tilcdc_debugfs_list[] = {
+ { "regs", tilcdc_regs_show, 0 },
+ { "mm", tilcdc_mm_show, 0 },
+ { "fb", drm_fb_cma_debugfs_show, 0 },
+};
+
+static int tilcdc_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct tilcdc_module *mod;
+ int ret;
+
+ ret = drm_debugfs_create_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list),
+ minor->debugfs_root, minor);
+
+ list_for_each_entry(mod, &module_list, list)
+ if (mod->funcs->debugfs_init)
+ mod->funcs->debugfs_init(mod, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void tilcdc_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct tilcdc_module *mod;
+ drm_debugfs_remove_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list), minor);
+
+ list_for_each_entry(mod, &module_list, list)
+ if (mod->funcs->debugfs_cleanup)
+ mod->funcs->debugfs_cleanup(mod, minor);
+}
+#endif
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .fasync = drm_fasync,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver tilcdc_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .load = tilcdc_load,
+ .unload = tilcdc_unload,
+ .preclose = tilcdc_preclose,
+ .lastclose = tilcdc_lastclose,
+ .irq_handler = tilcdc_irq,
+ .irq_preinstall = tilcdc_irq_preinstall,
+ .irq_postinstall = tilcdc_irq_postinstall,
+ .irq_uninstall = tilcdc_irq_uninstall,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = tilcdc_enable_vblank,
+ .disable_vblank = tilcdc_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_cma_dumb_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = tilcdc_debugfs_init,
+ .debugfs_cleanup = tilcdc_debugfs_cleanup,
+#endif
+ .fops = &fops,
+ .name = "tilcdc",
+ .desc = "TI LCD Controller DRM",
+ .date = "20121205",
+ .major = 1,
+ .minor = 0,
+};
+
+/*
+ * Power management:
+ */
+
+#ifdef CONFIG_PM_SLEEP
+static int tilcdc_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ unsigned i, n = 0;
+
+ drm_kms_helper_poll_disable(ddev);
+
+ /* Save register state: */
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (registers[i].save && (priv->rev >= registers[i].rev))
+ priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
+
+ return 0;
+}
+
+static int tilcdc_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ unsigned i, n = 0;
+
+ /* Restore register state: */
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (registers[i].save && (priv->rev >= registers[i].rev))
+ tilcdc_write(ddev, registers[i].reg, priv->saved_register[n++]);
+
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tilcdc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tilcdc_pm_suspend, tilcdc_pm_resume)
+};
+
+/*
+ * Platform driver:
+ */
+
+static int tilcdc_pdev_probe(struct platform_device *pdev)
+{
+ /* bail out early if no DT data: */
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ return drm_platform_init(&tilcdc_driver, pdev);
+}
+
+static int tilcdc_pdev_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&tilcdc_driver, pdev);
+
+ return 0;
+}
+
+static struct of_device_id tilcdc_of_match[] = {
+ { .compatible = "ti,am33xx-tilcdc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tilcdc_of_match);
+
+static struct platform_driver tilcdc_platform_driver = {
+ .probe = tilcdc_pdev_probe,
+ .remove = tilcdc_pdev_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tilcdc",
+ .pm = &tilcdc_pm_ops,
+ .of_match_table = tilcdc_of_match,
+ },
+};
+
+static int __init tilcdc_drm_init(void)
+{
+ DBG("init");
+ tilcdc_tfp410_init();
+ tilcdc_slave_init();
+ tilcdc_panel_init();
+ return platform_driver_register(&tilcdc_platform_driver);
+}
+
+static void __exit tilcdc_drm_fini(void)
+{
+ DBG("fini");
+ tilcdc_tfp410_fini();
+ tilcdc_slave_fini();
+ tilcdc_panel_fini();
+ platform_driver_unregister(&tilcdc_platform_driver);
+}
+
+late_initcall(tilcdc_drm_init);
+module_exit(tilcdc_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("TI LCD Controller DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
new file mode 100644
index 000000000000..8242b5a4307b
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_DRV_H__
+#define __TILCDC_DRV_H__
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/list.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+struct tilcdc_drm_private {
+ void __iomem *mmio;
+
+ struct clk *disp_clk; /* display dpll */
+ struct clk *clk; /* functional clock */
+ int rev; /* IP revision */
+
+ /* don't attempt resolutions w/ higher W * H * Hz: */
+ uint32_t max_bandwidth;
+
+ /* register contents saved across suspend/resume: */
+ u32 saved_register[12];
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+ unsigned int lcd_fck_rate;
+#endif
+
+ struct workqueue_struct *wq;
+
+ struct drm_fbdev_cma *fbdev;
+
+ struct drm_crtc *crtc;
+
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+};
+
+/* Sub-module for display. Since we don't know at compile time what panels
+ * or display adapter(s) might be present (for ex, off chip dvi/tfp410,
+ * hdmi encoder, various lcd panels), the connector/encoder(s) are split into
+ * separate drivers. If they are probed and found to be present, they
+ * register themselves with tilcdc_register_module().
+ */
+struct tilcdc_module;
+
+struct tilcdc_module_ops {
+ /* create appropriate encoders/connectors: */
+ int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
+ void (*destroy)(struct tilcdc_module *mod);
+#ifdef CONFIG_DEBUG_FS
+ /* create debugfs nodes (can be NULL): */
+ int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
+ /* cleanup debugfs nodes (can be NULL): */
+ void (*debugfs_cleanup)(struct tilcdc_module *mod, struct drm_minor *minor);
+#endif
+};
+
+struct tilcdc_module {
+ const char *name;
+ struct list_head list;
+ const struct tilcdc_module_ops *funcs;
+};
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+ const struct tilcdc_module_ops *funcs);
+void tilcdc_module_cleanup(struct tilcdc_module *mod);
+
+
+/* Panel config that needs to be set in the crtc, but is not coming from
+ * the mode timings. The display module is expected to call
+ * tilcdc_crtc_set_panel_info() to set this during modeset.
+ */
+struct tilcdc_panel_info {
+
+ /* AC Bias Pin Frequency */
+ uint32_t ac_bias;
+
+ /* AC Bias Pin Transitions per Interrupt */
+ uint32_t ac_bias_intrpt;
+
+ /* DMA burst size */
+ uint32_t dma_burst_sz;
+
+ /* Bits per pixel */
+ uint32_t bpp;
+
+ /* FIFO DMA Request Delay */
+ uint32_t fdd;
+
+ /* TFT Alternative Signal Mapping (Only for active) */
+ bool tft_alt_mode;
+
+ /* Invert pixel clock */
+ bool invert_pxl_clk;
+
+ /* Horizontal and Vertical Sync Edge: 0=rising 1=falling */
+ uint32_t sync_edge;
+
+ /* Horizontal and Vertical Sync: Control: 0=ignore */
+ uint32_t sync_ctrl;
+
+ /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
+ uint32_t raster_order;
+
+ /* DMA FIFO threshold */
+ uint32_t fifo_th;
+};
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+ const struct tilcdc_panel_info *info);
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
+int tilcdc_crtc_max_width(struct drm_crtc *crtc);
+
+#endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
new file mode 100644
index 000000000000..580b74e2022b
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/backlight.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+#include "tilcdc_drv.h"
+
+struct panel_module {
+ struct tilcdc_module base;
+ struct tilcdc_panel_info *info;
+ struct display_timings *timings;
+ struct backlight_device *backlight;
+};
+#define to_panel_module(x) container_of(x, struct panel_module, base)
+
+
+/*
+ * Encoder:
+ */
+
+struct panel_encoder {
+ struct drm_encoder base;
+ struct panel_module *mod;
+};
+#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
+
+
+static void panel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(panel_encoder);
+}
+
+static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ struct backlight_device *backlight = panel_encoder->mod->backlight;
+
+ if (!backlight)
+ return;
+
+ backlight->props.power = mode == DRM_MODE_DPMS_ON
+ ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ backlight_update_status(backlight);
+}
+
+static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+ return true;
+}
+
+static void panel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+ panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
+}
+
+static void panel_encoder_commit(struct drm_encoder *encoder)
+{
+ panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void panel_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+}
+
+static const struct drm_encoder_funcs panel_encoder_funcs = {
+ .destroy = panel_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
+ .dpms = panel_encoder_dpms,
+ .mode_fixup = panel_encoder_mode_fixup,
+ .prepare = panel_encoder_prepare,
+ .commit = panel_encoder_commit,
+ .mode_set = panel_encoder_mode_set,
+};
+
+static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
+ struct panel_module *mod)
+{
+ struct panel_encoder *panel_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
+ if (!panel_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ panel_encoder->mod = mod;
+
+ encoder = &panel_encoder->base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ if (ret < 0)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ panel_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct panel_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct panel_module *mod;
+};
+#define to_panel_connector(x) container_of(x, struct panel_connector, base)
+
+
+static void panel_connector_destroy(struct drm_connector *connector)
+{
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(panel_connector);
+}
+
+static enum drm_connector_status panel_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+static int panel_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ struct display_timings *timings = panel_connector->mod->timings;
+ int i;
+
+ for (i = 0; i < timings->num_timings; i++) {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct videomode vm;
+
+ if (videomode_from_timing(timings, &vm, i))
+ break;
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (timings->native_mode == i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static int panel_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ /* our only constraints are what the crtc can generate: */
+ return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *panel_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct panel_connector *panel_connector = to_panel_connector(connector);
+ return panel_connector->encoder;
+}
+
+static const struct drm_connector_funcs panel_connector_funcs = {
+ .destroy = panel_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = panel_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
+ .get_modes = panel_connector_get_modes,
+ .mode_valid = panel_connector_mode_valid,
+ .best_encoder = panel_connector_best_encoder,
+};
+
+static struct drm_connector *panel_connector_create(struct drm_device *dev,
+ struct panel_module *mod, struct drm_encoder *encoder)
+{
+ struct panel_connector *panel_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
+ if (!panel_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ panel_connector->encoder = encoder;
+ panel_connector->mod = mod;
+
+ connector = &panel_connector->base;
+
+ drm_connector_init(dev, connector, &panel_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &panel_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ panel_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct panel_module *panel_mod = to_panel_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = panel_encoder_create(dev, panel_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = panel_connector_create(dev, panel_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void panel_destroy(struct tilcdc_module *mod)
+{
+ struct panel_module *panel_mod = to_panel_module(mod);
+
+ if (panel_mod->timings) {
+ display_timings_release(panel_mod->timings);
+ kfree(panel_mod->timings);
+ }
+
+ tilcdc_module_cleanup(mod);
+ kfree(panel_mod->info);
+ kfree(panel_mod);
+}
+
+static const struct tilcdc_module_ops panel_module_ops = {
+ .modeset_init = panel_modeset_init,
+ .destroy = panel_destroy,
+};
+
+/*
+ * Device:
+ */
+
+/* maybe move this somewhere common if it is needed by other outputs? */
+static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np)
+{
+ struct device_node *info_np;
+ struct tilcdc_panel_info *info;
+ int ret = 0;
+
+ if (!np) {
+ pr_err("%s: no devicenode given\n", __func__);
+ return NULL;
+ }
+
+ info_np = of_get_child_by_name(np, "panel-info");
+ if (!info_np) {
+ pr_err("%s: could not find panel-info node\n", __func__);
+ return NULL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: allocation failed\n", __func__);
+ return NULL;
+ }
+
+ ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
+ ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
+ ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
+ ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
+ ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
+ ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
+ ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
+ ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
+ ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
+
+ /* optional: */
+ info->tft_alt_mode = of_property_read_bool(info_np, "tft-alt-mode");
+ info->invert_pxl_clk = of_property_read_bool(info_np, "invert-pxl-clk");
+
+ if (ret) {
+ pr_err("%s: error reading panel-info properties\n", __func__);
+ kfree(info);
+ return NULL;
+ }
+
+ return info;
+}
+
+static struct of_device_id panel_of_match[];
+
+static int panel_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct panel_module *panel_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ int ret = -EINVAL;
+
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ panel_mod = kzalloc(sizeof(*panel_mod), GFP_KERNEL);
+ if (!panel_mod)
+ return -ENOMEM;
+
+ mod = &panel_mod->base;
+
+ tilcdc_module_init(mod, "panel", &panel_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+
+ panel_mod->timings = of_get_display_timings(node);
+ if (!panel_mod->timings) {
+ dev_err(&pdev->dev, "could not get panel timings\n");
+ goto fail;
+ }
+
+ panel_mod->info = of_get_panel_info(node);
+ if (!panel_mod->info) {
+ dev_err(&pdev->dev, "could not get panel info\n");
+ goto fail;
+ }
+
+ panel_mod->backlight = of_find_backlight_by_node(node);
+ if (panel_mod->backlight)
+ dev_info(&pdev->dev, "found backlight\n");
+
+ return 0;
+
+fail:
+ panel_destroy(mod);
+ return ret;
+}
+
+static int panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id panel_of_match[] = {
+ { .compatible = "ti,tilcdc,panel", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, panel_of_match);
+
+struct platform_driver panel_driver = {
+ .probe = panel_probe,
+ .remove = panel_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "panel",
+ .of_match_table = panel_of_match,
+ },
+};
+
+int __init tilcdc_panel_init(void)
+{
+ return platform_driver_register(&panel_driver);
+}
+
+void __exit tilcdc_panel_fini(void)
+{
+ platform_driver_unregister(&panel_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.h b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
new file mode 100644
index 000000000000..7db40aacc74a
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_PANEL_H__
+#define __TILCDC_PANEL_H__
+
+/* sub-module for generic lcd panel output */
+
+int tilcdc_panel_init(void);
+void tilcdc_panel_fini(void);
+
+#endif /* __TILCDC_PANEL_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
new file mode 100644
index 000000000000..17fd1b45428a
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_REGS_H__
+#define __TILCDC_REGS_H__
+
+/* LCDC register definitions, based on da8xx-fb */
+
+#include <linux/bitops.h>
+
+#include "tilcdc_drv.h"
+
+/* LCDC Status Register */
+#define LCDC_END_OF_FRAME1 BIT(9)
+#define LCDC_END_OF_FRAME0 BIT(8)
+#define LCDC_PL_LOAD_DONE BIT(6)
+#define LCDC_FIFO_UNDERFLOW BIT(5)
+#define LCDC_SYNC_LOST BIT(2)
+#define LCDC_FRAME_DONE BIT(0)
+
+/* LCDC DMA Control Register */
+#define LCDC_DMA_BURST_SIZE(x) ((x) << 4)
+#define LCDC_DMA_BURST_1 0x0
+#define LCDC_DMA_BURST_2 0x1
+#define LCDC_DMA_BURST_4 0x2
+#define LCDC_DMA_BURST_8 0x3
+#define LCDC_DMA_BURST_16 0x4
+#define LCDC_V1_END_OF_FRAME_INT_ENA BIT(2)
+#define LCDC_V2_END_OF_FRAME0_INT_ENA BIT(8)
+#define LCDC_V2_END_OF_FRAME1_INT_ENA BIT(9)
+#define LCDC_DUAL_FRAME_BUFFER_ENABLE BIT(0)
+
+/* LCDC Control Register */
+#define LCDC_CLK_DIVISOR(x) ((x) << 8)
+#define LCDC_RASTER_MODE 0x01
+
+/* LCDC Raster Control Register */
+#define LCDC_PALETTE_LOAD_MODE(x) ((x) << 20)
+#define PALETTE_AND_DATA 0x00
+#define PALETTE_ONLY 0x01
+#define DATA_ONLY 0x02
+
+#define LCDC_MONO_8BIT_MODE BIT(9)
+#define LCDC_RASTER_ORDER BIT(8)
+#define LCDC_TFT_MODE BIT(7)
+#define LCDC_V1_UNDERFLOW_INT_ENA BIT(6)
+#define LCDC_V2_UNDERFLOW_INT_ENA BIT(5)
+#define LCDC_V1_PL_INT_ENA BIT(4)
+#define LCDC_V2_PL_INT_ENA BIT(6)
+#define LCDC_MONOCHROME_MODE BIT(1)
+#define LCDC_RASTER_ENABLE BIT(0)
+#define LCDC_TFT_ALT_ENABLE BIT(23)
+#define LCDC_STN_565_ENABLE BIT(24)
+#define LCDC_V2_DMA_CLK_EN BIT(2)
+#define LCDC_V2_LIDD_CLK_EN BIT(1)
+#define LCDC_V2_CORE_CLK_EN BIT(0)
+#define LCDC_V2_LPP_B10 26
+#define LCDC_V2_TFT_24BPP_MODE BIT(25)
+#define LCDC_V2_TFT_24BPP_UNPACK BIT(26)
+
+/* LCDC Raster Timing 2 Register */
+#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+#define LCDC_AC_BIAS_FREQUENCY(x) ((x) << 8)
+#define LCDC_SYNC_CTRL BIT(25)
+#define LCDC_SYNC_EDGE BIT(24)
+#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
+#define LCDC_INVERT_HSYNC BIT(21)
+#define LCDC_INVERT_VSYNC BIT(20)
+
+/* LCDC Block */
+#define LCDC_PID_REG 0x0
+#define LCDC_CTRL_REG 0x4
+#define LCDC_STAT_REG 0x8
+#define LCDC_RASTER_CTRL_REG 0x28
+#define LCDC_RASTER_TIMING_0_REG 0x2c
+#define LCDC_RASTER_TIMING_1_REG 0x30
+#define LCDC_RASTER_TIMING_2_REG 0x34
+#define LCDC_DMA_CTRL_REG 0x40
+#define LCDC_DMA_FB_BASE_ADDR_0_REG 0x44
+#define LCDC_DMA_FB_CEILING_ADDR_0_REG 0x48
+#define LCDC_DMA_FB_BASE_ADDR_1_REG 0x4c
+#define LCDC_DMA_FB_CEILING_ADDR_1_REG 0x50
+
+/* Interrupt Registers available only in Version 2 */
+#define LCDC_RAW_STAT_REG 0x58
+#define LCDC_MASKED_STAT_REG 0x5c
+#define LCDC_INT_ENABLE_SET_REG 0x60
+#define LCDC_INT_ENABLE_CLR_REG 0x64
+#define LCDC_END_OF_INT_IND_REG 0x68
+
+/* Clock registers available only on Version 2 */
+#define LCDC_CLK_ENABLE_REG 0x6c
+#define LCDC_CLK_RESET_REG 0x70
+#define LCDC_CLK_MAIN_RESET BIT(3)
+
+
+/*
+ * Helpers:
+ */
+
+static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ iowrite32(data, priv->mmio + reg);
+}
+
+static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return ioread32(priv->mmio + reg);
+}
+
+static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
+{
+ tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
+}
+
+static inline void tilcdc_clear(struct drm_device *dev, u32 reg, u32 mask)
+{
+ tilcdc_write(dev, reg, tilcdc_read(dev, reg) & ~mask);
+}
+
+/* the register to read/clear irqstatus differs between v1 and v2 of the IP */
+static inline u32 tilcdc_irqstatus_reg(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ return (priv->rev == 2) ? LCDC_MASKED_STAT_REG : LCDC_STAT_REG;
+}
+
+static inline u32 tilcdc_read_irqstatus(struct drm_device *dev)
+{
+ return tilcdc_read(dev, tilcdc_irqstatus_reg(dev));
+}
+
+static inline void tilcdc_clear_irqstatus(struct drm_device *dev, u32 mask)
+{
+ tilcdc_write(dev, tilcdc_irqstatus_reg(dev), mask);
+}
+
+#endif /* __TILCDC_REGS_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
new file mode 100644
index 000000000000..568dc1c08e6c
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "tilcdc_drv.h"
+
+struct slave_module {
+ struct tilcdc_module base;
+ struct i2c_adapter *i2c;
+};
+#define to_slave_module(x) container_of(x, struct slave_module, base)
+
+static const struct tilcdc_panel_info slave_info = {
+ .bpp = 16,
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
+
+/*
+ * Encoder:
+ */
+
+struct slave_encoder {
+ struct drm_encoder_slave base;
+ struct slave_module *mod;
+};
+#define to_slave_encoder(x) container_of(to_encoder_slave(x), struct slave_encoder, base)
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
+static void slave_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct slave_encoder *slave_encoder = to_slave_encoder(encoder);
+ if (get_slave_funcs(encoder))
+ get_slave_funcs(encoder)->destroy(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(slave_encoder);
+}
+
+static void slave_encoder_prepare(struct drm_encoder *encoder)
+{
+ drm_i2c_encoder_prepare(encoder);
+ tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
+}
+
+static const struct drm_encoder_funcs slave_encoder_funcs = {
+ .destroy = slave_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
+ .dpms = drm_i2c_encoder_dpms,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .prepare = slave_encoder_prepare,
+ .commit = drm_i2c_encoder_commit,
+ .mode_set = drm_i2c_encoder_mode_set,
+ .save = drm_i2c_encoder_save,
+ .restore = drm_i2c_encoder_restore,
+};
+
+static const struct i2c_board_info info = {
+ I2C_BOARD_INFO("tda998x", 0x70)
+};
+
+static struct drm_encoder *slave_encoder_create(struct drm_device *dev,
+ struct slave_module *mod)
+{
+ struct slave_encoder *slave_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ slave_encoder = kzalloc(sizeof(*slave_encoder), GFP_KERNEL);
+ if (!slave_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ slave_encoder->mod = mod;
+
+ encoder = &slave_encoder->base.base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &slave_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &slave_encoder_helper_funcs);
+
+ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), mod->i2c, &info);
+ if (ret)
+ goto fail;
+
+ return encoder;
+
+fail:
+ slave_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct slave_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct slave_module *mod;
+};
+#define to_slave_connector(x) container_of(x, struct slave_connector, base)
+
+static void slave_connector_destroy(struct drm_connector *connector)
+{
+ struct slave_connector *slave_connector = to_slave_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(slave_connector);
+}
+
+static enum drm_connector_status slave_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+
+static int slave_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->get_modes(encoder, connector);
+}
+
+static int slave_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ int ret;
+
+ ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
+ if (ret != MODE_OK)
+ return ret;
+
+ return get_slave_funcs(encoder)->mode_valid(encoder, mode);
+}
+
+static struct drm_encoder *slave_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct slave_connector *slave_connector = to_slave_connector(connector);
+ return slave_connector->encoder;
+}
+
+static int slave_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+ return get_slave_funcs(encoder)->set_property(encoder,
+ connector, property, value);
+}
+
+static const struct drm_connector_funcs slave_connector_funcs = {
+ .destroy = slave_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = slave_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = slave_connector_set_property,
+};
+
+static const struct drm_connector_helper_funcs slave_connector_helper_funcs = {
+ .get_modes = slave_connector_get_modes,
+ .mode_valid = slave_connector_mode_valid,
+ .best_encoder = slave_connector_best_encoder,
+};
+
+static struct drm_connector *slave_connector_create(struct drm_device *dev,
+ struct slave_module *mod, struct drm_encoder *encoder)
+{
+ struct slave_connector *slave_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ slave_connector = kzalloc(sizeof(*slave_connector), GFP_KERNEL);
+ if (!slave_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ slave_connector->encoder = encoder;
+ slave_connector->mod = mod;
+
+ connector = &slave_connector->base;
+
+ drm_connector_init(dev, connector, &slave_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &slave_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ get_slave_funcs(encoder)->create_resources(encoder, connector);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ slave_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct slave_module *slave_mod = to_slave_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = slave_encoder_create(dev, slave_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = slave_connector_create(dev, slave_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void slave_destroy(struct tilcdc_module *mod)
+{
+ struct slave_module *slave_mod = to_slave_module(mod);
+
+ tilcdc_module_cleanup(mod);
+ kfree(slave_mod);
+}
+
+static const struct tilcdc_module_ops slave_module_ops = {
+ .modeset_init = slave_modeset_init,
+ .destroy = slave_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id slave_of_match[];
+
+static int slave_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *i2c_node;
+ struct slave_module *slave_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ uint32_t i2c_phandle;
+ int ret = -EINVAL;
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
+ if (!slave_mod)
+ return -ENOMEM;
+
+ mod = &slave_mod->base;
+
+ tilcdc_module_init(mod, "slave", &slave_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+ if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+ dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+ goto fail;
+ }
+
+ i2c_node = of_find_node_by_phandle(i2c_phandle);
+ if (!i2c_node) {
+ dev_err(&pdev->dev, "could not get i2c bus node\n");
+ goto fail;
+ }
+
+ slave_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+ if (!slave_mod->i2c) {
+ dev_err(&pdev->dev, "could not get i2c\n");
+ goto fail;
+ }
+
+ of_node_put(i2c_node);
+
+ return 0;
+
+fail:
+ slave_destroy(mod);
+ return ret;
+}
+
+static int slave_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id slave_of_match[] = {
+ { .compatible = "ti,tilcdc,slave", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, slave_of_match);
+
+struct platform_driver slave_driver = {
+ .probe = slave_probe,
+ .remove = slave_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "slave",
+ .of_match_table = slave_of_match,
+ },
+};
+
+int __init tilcdc_slave_init(void)
+{
+ return platform_driver_register(&slave_driver);
+}
+
+void __exit tilcdc_slave_fini(void)
+{
+ platform_driver_unregister(&slave_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.h b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
new file mode 100644
index 000000000000..2f8504848320
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_SLAVE_H__
+#define __TILCDC_SLAVE_H__
+
+/* sub-module for i2c slave encoder output */
+
+int tilcdc_slave_init(void);
+void tilcdc_slave_fini(void);
+
+#endif /* __TILCDC_SLAVE_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
new file mode 100644
index 000000000000..58d487ba2414
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "tilcdc_drv.h"
+
+struct tfp410_module {
+ struct tilcdc_module base;
+ struct i2c_adapter *i2c;
+ int gpio;
+};
+#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
+
+
+static const struct tilcdc_panel_info dvi_info = {
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .bpp = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
+/*
+ * Encoder:
+ */
+
+struct tfp410_encoder {
+ struct drm_encoder base;
+ struct tfp410_module *mod;
+ int dpms;
+};
+#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
+
+
+static void tfp410_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(tfp410_encoder);
+}
+
+static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+
+ if (tfp410_encoder->dpms == mode)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ DBG("Power on");
+ gpio_direction_output(tfp410_encoder->mod->gpio, 1);
+ } else {
+ DBG("Power off");
+ gpio_direction_output(tfp410_encoder->mod->gpio, 0);
+ }
+
+ tfp410_encoder->dpms = mode;
+}
+
+static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+ return true;
+}
+
+static void tfp410_encoder_prepare(struct drm_encoder *encoder)
+{
+ tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
+}
+
+static void tfp410_encoder_commit(struct drm_encoder *encoder)
+{
+ tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* nothing needed */
+}
+
+static const struct drm_encoder_funcs tfp410_encoder_funcs = {
+ .destroy = tfp410_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
+ .dpms = tfp410_encoder_dpms,
+ .mode_fixup = tfp410_encoder_mode_fixup,
+ .prepare = tfp410_encoder_prepare,
+ .commit = tfp410_encoder_commit,
+ .mode_set = tfp410_encoder_mode_set,
+};
+
+static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
+ struct tfp410_module *mod)
+{
+ struct tfp410_encoder *tfp410_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL);
+ if (!tfp410_encoder) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
+ tfp410_encoder->mod = mod;
+
+ encoder = &tfp410_encoder->base;
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret < 0)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ tfp410_encoder_destroy(encoder);
+ return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct tfp410_connector {
+ struct drm_connector base;
+
+ struct drm_encoder *encoder; /* our connected encoder */
+ struct tfp410_module *mod;
+};
+#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
+
+
+static void tfp410_connector_destroy(struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ drm_connector_cleanup(connector);
+ kfree(tfp410_connector);
+}
+
+static enum drm_connector_status tfp410_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+
+ if (drm_probe_ddc(tfp410_connector->mod->i2c))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static int tfp410_connector_get_modes(struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static int tfp410_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tilcdc_drm_private *priv = connector->dev->dev_private;
+ /* our only constraints are what the crtc can generate: */
+ return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *tfp410_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ return tfp410_connector->encoder;
+}
+
+static const struct drm_connector_funcs tfp410_connector_funcs = {
+ .destroy = tfp410_connector_destroy,
+ .dpms = drm_helper_connector_dpms,
+ .detect = tfp410_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
+ .get_modes = tfp410_connector_get_modes,
+ .mode_valid = tfp410_connector_mode_valid,
+ .best_encoder = tfp410_connector_best_encoder,
+};
+
+static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
+ struct tfp410_module *mod, struct drm_encoder *encoder)
+{
+ struct tfp410_connector *tfp410_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL);
+ if (!tfp410_connector) {
+ dev_err(dev->dev, "allocation failed\n");
+ return NULL;
+ }
+
+ tfp410_connector->encoder = encoder;
+ tfp410_connector->mod = mod;
+
+ connector = &tfp410_connector->base;
+
+ drm_connector_init(dev, connector, &tfp410_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+ drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret)
+ goto fail;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ tfp410_connector_destroy(connector);
+ return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ encoder = tfp410_encoder_create(dev, tfp410_mod);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = tfp410_connector_create(dev, tfp410_mod, encoder);
+ if (!connector)
+ return -ENOMEM;
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+
+ return 0;
+}
+
+static void tfp410_destroy(struct tilcdc_module *mod)
+{
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+
+ if (tfp410_mod->i2c)
+ i2c_put_adapter(tfp410_mod->i2c);
+
+ if (!IS_ERR_VALUE(tfp410_mod->gpio))
+ gpio_free(tfp410_mod->gpio);
+
+ tilcdc_module_cleanup(mod);
+ kfree(tfp410_mod);
+}
+
+static const struct tilcdc_module_ops tfp410_module_ops = {
+ .modeset_init = tfp410_modeset_init,
+ .destroy = tfp410_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id tfp410_of_match[];
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *i2c_node;
+ struct tfp410_module *tfp410_mod;
+ struct tilcdc_module *mod;
+ struct pinctrl *pinctrl;
+ uint32_t i2c_phandle;
+ int ret = -EINVAL;
+
+ /* bail out early if no DT data: */
+ if (!node) {
+ dev_err(&pdev->dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL);
+ if (!tfp410_mod)
+ return -ENOMEM;
+
+ mod = &tfp410_mod->base;
+
+ tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "pins are not configured\n");
+
+ if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+ dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+ goto fail;
+ }
+
+ i2c_node = of_find_node_by_phandle(i2c_phandle);
+ if (!i2c_node) {
+ dev_err(&pdev->dev, "could not get i2c bus node\n");
+ goto fail;
+ }
+
+ tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+ if (!tfp410_mod->i2c) {
+ dev_err(&pdev->dev, "could not get i2c\n");
+ goto fail;
+ }
+
+ of_node_put(i2c_node);
+
+ tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
+ 0, NULL);
+ if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+ dev_warn(&pdev->dev, "No power down GPIO\n");
+ } else {
+ ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
+ if (ret) {
+ dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ tfp410_destroy(mod);
+ return ret;
+}
+
+static int tfp410_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id tfp410_of_match[] = {
+ { .compatible = "ti,tilcdc,tfp410", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tfp410_of_match);
+
+struct platform_driver tfp410_driver = {
+ .probe = tfp410_probe,
+ .remove = tfp410_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tfp410",
+ .of_match_table = tfp410_of_match,
+ },
+};
+
+int __init tilcdc_tfp410_init(void)
+{
+ return platform_driver_register(&tfp410_driver);
+}
+
+void __exit tilcdc_tfp410_fini(void)
+{
+ platform_driver_unregister(&tfp410_driver);
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
new file mode 100644
index 000000000000..5b800f1f6aa5
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_TFP410_H__
+#define __TILCDC_TFP410_H__
+
+/* sub-module for tfp410 dvi adaptor */
+
+int tilcdc_tfp410_init(void);
+void tilcdc_tfp410_fini(void);
+
+#endif /* __TILCDC_TFP410_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 52b20b12c83a..9b07b7d44a58 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+ bool interruptible)
{
if (interruptible) {
return wait_event_interruptible(bo->event_queue,
@@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
return 0;
}
}
-EXPORT_SYMBOL(ttm_bo_wait_unreserved);
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
return put_count;
}
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
{
- struct ttm_bo_global *glob = bo->glob;
int ret;
- while (unlikely(atomic_read(&bo->reserved) != 0)) {
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
/**
* Deadlock avoidance for multi-bo reserving.
*/
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
if (no_wait)
return -EBUSY;
- spin_unlock(&glob->lru_lock);
ret = ttm_bo_wait_unreserved(bo, interruptible);
- spin_lock(&glob->lru_lock);
if (unlikely(ret))
return ret;
}
- atomic_set(&bo->reserved, 1);
if (use_sequence) {
+ bool wake_up = false;
/**
* Wake up waiters that may need to recheck for deadlock,
* if we decreased the sequence number.
*/
if (unlikely((bo->val_seq - sequence < (1 << 31))
|| !bo->seq_valid))
- wake_up_all(&bo->event_queue);
+ wake_up = true;
+ /*
+ * In the worst case with memory ordering these values can be
+ * seen in the wrong order. However since we call wake_up_all
+ * in that case, this will hopefully not pose a problem,
+ * and the worst case would only cause someone to accidentally
+ * hit -EAGAIN in ttm_bo_reserve when they see old value of
+ * val_seq. However this would only happen if seq_valid was
+ * written before val_seq was, and just means some slightly
+ * increased cpu usage
+ */
bo->val_seq = sequence;
bo->seq_valid = true;
+ if (wake_up)
+ wake_up_all(&bo->event_queue);
} else {
bo->seq_valid = false;
}
@@ -289,17 +298,64 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
int put_count = 0;
int ret;
- spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
- sequence);
- if (likely(ret == 0))
+ ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0)) {
+ spin_lock(&glob->lru_lock);
put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
- ttm_bo_list_ref_sub(bo, put_count, true);
+ return ret;
+}
+
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ bool wake_up = false;
+ int ret;
+
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+ WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+ wake_up = true;
+
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ if (wake_up)
+ wake_up_all(&bo->event_queue);
+
+ return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count, ret;
+
+ ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+ if (likely(!ret)) {
+ spin_lock(&glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
return ret;
}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
{
@@ -511,7 +567,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
@@ -604,7 +660,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -668,7 +724,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+ if (remove_all && ret) {
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_reserve_nolru(entry, false, false,
+ false, 0);
+ spin_lock(&glob->lru_lock);
+ }
+
if (!ret)
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
!remove_all);
@@ -816,7 +879,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1797,7 +1860,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index cd9e4523dc56..7b90def15674 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
}
}
-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
- struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->glob;
- int ret;
-
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_wait_unreserved(bo, true);
- spin_lock(&glob->lru_lock);
- if (unlikely(ret != 0))
- ttm_eu_backoff_reservation_locked(list);
- return ret;
-}
-
-
void ttm_eu_backoff_reservation(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -145,47 +129,65 @@ int ttm_eu_reserve_buffers(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
-retry:
spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++;
+retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
-retry_this_bo:
- ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ /* already slowpath reserved? */
+ if (entry->reserved)
+ continue;
+
+ ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
switch (ret) {
case 0:
break;
case -EBUSY:
- ret = ttm_eu_wait_unreserved_locked(list, bo);
- if (unlikely(ret != 0)) {
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return ret;
- }
- goto retry_this_bo;
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_reserve_nolru(bo, true, false,
+ true, val_seq);
+ spin_lock(&glob->lru_lock);
+ if (!ret)
+ break;
+
+ if (unlikely(ret != -EAGAIN))
+ goto err;
+
+ /* fallthrough */
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
+
+ /*
+ * temporarily increase sequence number every retry,
+ * to prevent us from seeing our old reservation
+ * sequence when someone else reserved the buffer,
+ * but hasn't updated the seq_valid/seqno members yet.
+ */
+ val_seq = entry->bo->bdev->val_seq++;
+
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_unreserved(bo, true);
+ ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
if (unlikely(ret != 0))
return ret;
+ spin_lock(&glob->lru_lock);
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ret = -EBUSY;
+ goto err;
+ }
goto retry;
default:
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return ret;
+ goto err;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
}
@@ -194,6 +196,12 @@ retry_this_bo:
ttm_eu_list_ref_sub(list);
return 0;
+
+err:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7d759a430294..5e93a52d4f2c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -296,7 +296,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
- swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+ swap_space = file_inode(swap_storage)->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page(swap_space, i);
@@ -345,7 +345,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
} else
swap_storage = persistent_swap_storage;
- swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+ swap_space = file_inode(swap_storage)->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 87aa5f5d3c88..cc6d90f28c71 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -75,6 +75,8 @@ struct udl_framebuffer {
struct drm_framebuffer base;
struct udl_gem_object *obj;
bool active_16; /* active on the 16-bit channel */
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d4ab3beaada0..9f4be3d4a02e 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -22,9 +22,9 @@
#include <drm/drm_fb_helper.h>
-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
-static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
+static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
static int fb_bpp = 16;
module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
@@ -153,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
struct urb *urb;
int aligned_x;
int bpp = (fb->base.bits_per_pixel / 8);
+ int x2, y2;
+ bool store_for_later = false;
+ unsigned long flags;
if (!fb->active_16)
return 0;
@@ -169,8 +172,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
}
}
- start_cycles = get_cycles();
-
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
@@ -180,19 +181,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
(y + height > fb->base.height))
return -EINVAL;
+ /* if we are in atomic just store the info
+ can't test inside spin lock */
+ if (in_atomic())
+ store_for_later = true;
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+
+ spin_lock_irqsave(&fb->dirty_lock, flags);
+
+ if (fb->y1 < y)
+ y = fb->y1;
+ if (fb->y2 > y2)
+ y2 = fb->y2;
+ if (fb->x1 < x)
+ x = fb->x1;
+ if (fb->x2 > x2)
+ x2 = fb->x2;
+
+ if (store_for_later) {
+ fb->x1 = x;
+ fb->x2 = x2;
+ fb->y1 = y;
+ fb->y2 = y2;
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ return 0;
+ }
+
+ fb->x1 = fb->y1 = INT_MAX;
+ fb->x2 = fb->y2 = 0;
+
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ start_cycles = get_cycles();
+
urb = udl_get_urb(dev);
if (!urb)
return 0;
cmd = urb->transfer_buffer;
- for (i = y; i < y + height ; i++) {
+ for (i = y; i <= y2 ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
if (udl_render_hline(dev, bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- width * bpp,
+ (x2 - x + 1) * bpp,
&bytes_identical, &bytes_sent))
goto error;
}
@@ -422,7 +457,6 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
static const struct drm_framebuffer_funcs udlfb_funcs = {
.destroy = udl_user_framebuffer_destroy,
.dirty = udl_user_framebuffer_dirty,
- .create_handle = NULL,
};
@@ -434,16 +468,18 @@ udl_framebuffer_init(struct drm_device *dev,
{
int ret;
+ spin_lock_init(&ufb->dirty_lock);
ufb->obj = obj;
- ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
return ret;
}
-static int udlfb_create(struct udl_fbdev *ufbdev,
+static int udlfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
struct device *device = &dev->usbdev->dev;
@@ -521,27 +557,10 @@ out:
return ret;
}
-static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = udlfb_create(ufbdev, sizes);
- if (ret)
- return ret;
-
- new_fb = 1;
- }
- return new_fb;
-}
-
static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
.gamma_set = udl_crtc_fb_gamma_set,
.gamma_get = udl_crtc_fb_gamma_get,
- .fb_probe = udl_fb_find_or_create_single,
+ .fb_probe = udlfb_create,
};
static void udl_fbdev_destroy(struct drm_device *dev,
@@ -556,6 +575,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
framebuffer_release(info);
}
drm_fb_helper_fini(&ufbdev->helper);
+ drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
}
@@ -583,6 +603,10 @@ int udl_fbdev_init(struct drm_device *dev)
}
drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index afd212c99216..3816270ba49b 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -137,7 +137,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
if (obj->pages == NULL)
return -ENOMEM;
- inode = obj->base.filp->f_path.dentry->d_inode;
+ inode = file_inode(obj->base.filp);
mapping = inode->i_mapping;
gfpmask |= mapping_gfp_mask(mapping);
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 142fee5f983f..f343db73e095 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -75,15 +75,19 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
}
#endif
-static inline u16 pixel32_to_be16p(const uint8_t *pixel)
+static inline u16 pixel32_to_be16(const uint32_t pixel)
{
- uint32_t pix = *(uint32_t *)pixel;
- u16 retval;
+ return (((pixel >> 3) & 0x001f) |
+ ((pixel >> 5) & 0x07e0) |
+ ((pixel >> 8) & 0xf800));
+}
- retval = (((pix >> 3) & 0x001f) |
- ((pix >> 5) & 0x07e0) |
- ((pix >> 8) & 0xf800));
- return retval;
+static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp)
+{
+ if (bpp == 2)
+ return *(const uint16_t *)pixel == repeat;
+ else
+ return *(const uint32_t *)pixel == repeat;
}
/*
@@ -152,29 +156,33 @@ static void udl_compress_hline16(
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
while (pixel < cmd_pixel_end) {
- const u8 * const repeating_pixel = pixel;
-
- if (bpp == 2)
- *(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel);
- else if (bpp == 4)
- *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel));
+ const u8 *const start = pixel;
+ u32 repeating_pixel;
+
+ if (bpp == 2) {
+ repeating_pixel = *(uint16_t *)pixel;
+ *(uint16_t *)cmd = cpu_to_be16(repeating_pixel);
+ } else {
+ repeating_pixel = *(uint32_t *)pixel;
+ *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel));
+ }
cmd += 2;
pixel += bpp;
if (unlikely((pixel < cmd_pixel_end) &&
- (!memcmp(pixel, repeating_pixel, bpp)))) {
+ (pixel_repeats(pixel, repeating_pixel, bpp)))) {
/* go back and fill in raw pixel count */
- *raw_pixels_count_byte = (((repeating_pixel -
+ *raw_pixels_count_byte = (((start -
raw_pixel_start) / bpp) + 1) & 0xFF;
- while ((pixel < cmd_pixel_end)
- && (!memcmp(pixel, repeating_pixel, bpp))) {
+ while ((pixel < cmd_pixel_end) &&
+ (pixel_repeats(pixel, repeating_pixel, bpp))) {
pixel += bpp;
}
/* immediately after raw data is repeat byte */
- *cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF;
+ *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
@@ -223,6 +231,8 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
+ BUG_ON(!(bpp == 2 || bpp == 4));
+
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index c0f1cc7f5ca9..d0ab3fb32acd 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -120,7 +120,6 @@ int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
- idr_remove_all(&dev_priv->object_idr);
idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0d55432e02a2..0ab93ff09873 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -148,17 +148,10 @@ int via_mem_alloc(struct drm_device *dev, void *data,
if (retval)
goto fail_alloc;
-again:
- if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
- retval = -ENOMEM;
- goto fail_idr;
- }
-
- retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
- if (retval == -EAGAIN)
- goto again;
- if (retval)
+ retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+ if (retval < 0)
goto fail_idr;
+ user_key = retval;
list_add(&item->owner_list, &file_priv->obj_list);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 161f8b2549aa..07dfd823cc30 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -829,7 +829,7 @@ static void vmw_lastclose(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
set.crtc = crtc;
- ret = crtc->funcs->set_config(&set);
+ ret = drm_mode_set_config_internal(&set);
WARN_ON(ret != 0);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index d9fbbe191071..c509d40c4897 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -131,7 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
struct vmw_framebuffer *vfb;
struct vmw_resource *res;
uint32_t num_clips;
@@ -163,19 +163,15 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_no_copy;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
+ drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -EINVAL;
goto out_no_fb;
}
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ vfb = vmw_framebuffer_to_vfb(fb);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -199,9 +195,9 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
out_no_surface:
ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
+ drm_framebuffer_unreference(fb);
out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
+ drm_modeset_unlock_all(dev);
out_no_copy:
kfree(clips);
out_clips:
@@ -220,7 +216,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
- struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
struct vmw_framebuffer *vfb;
uint32_t num_clips;
int ret;
@@ -251,24 +247,20 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_copy;
}
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_mode_mutex;
- }
+ drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
+ fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -EINVAL;
goto out_no_fb;
}
- vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->dmabuf) {
DRM_ERROR("Framebuffer not dmabuf backed.\n");
ret = -EINVAL;
- goto out_no_fb;
+ goto out_no_ttm_lock;
}
ret = ttm_read_lock(&vmaster->lock, true);
@@ -281,9 +273,9 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
+ drm_framebuffer_unreference(fb);
out_no_fb:
- mutex_unlock(&dev->mode_config.mutex);
-out_no_mode_mutex:
+ drm_modeset_unlock_all(dev);
out_no_copy:
kfree(clips);
out_clips:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 54743943d8b3..3e3c7ab33ca2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -180,16 +180,29 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
+ /*
+ * FIXME: Unclear whether there's any global state touched by the
+ * cursor_set function, especially vmw_cursor_update_position looks
+ * suspicious. For now take the easy route and reacquire all locks. We
+ * can do this since the caller in the drm core doesn't check anything
+ * which is protected by any looks.
+ */
+ mutex_unlock(&crtc->mutex);
+ drm_modeset_lock_all(dev_priv->dev);
+
/* A lot of the code assumes this */
- if (handle && (width != 64 || height != 64))
- return -EINVAL;
+ if (handle && (width != 64 || height != 64)) {
+ ret = -EINVAL;
+ goto out;
+ }
if (handle) {
ret = vmw_user_lookup_handle(dev_priv, tfile,
handle, &surface, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
}
@@ -197,7 +210,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (surface && !surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* takedown old cursor */
@@ -225,14 +239,20 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
- return 0;
+ ret = 0;
+ goto out;
}
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
- return 0;
+ ret = 0;
+out:
+ drm_modeset_unlock_all(dev_priv->dev);
+ mutex_lock(&crtc->mutex);
+
+ return ret;
}
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -244,10 +264,23 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_x = x + crtc->x;
du->cursor_y = y + crtc->y;
+ /*
+ * FIXME: Unclear whether there's any global state touched by the
+ * cursor_set function, especially vmw_cursor_update_position looks
+ * suspicious. For now take the easy route and reacquire all locks. We
+ * can do this since the caller in the drm core doesn't check anything
+ * which is protected by any looks.
+ */
+ mutex_unlock(&crtc->mutex);
+ drm_modeset_lock_all(dev_priv->dev);
+
vmw_cursor_update_position(dev_priv, shown,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
+ drm_modeset_unlock_all(dev_priv->dev);
+ mutex_lock(&crtc->mutex);
+
return 0;
}
@@ -373,16 +406,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
* Generic framebuffer code
*/
-int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- if (handle)
- *handle = 0;
-
- return 0;
-}
-
/*
* Surface framebuffer code
*/
@@ -610,7 +633,6 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
- .create_handle = vmw_framebuffer_create_handle,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -681,14 +703,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- ret = drm_framebuffer_init(dev, &vfbs->base.base,
- &vmw_framebuffer_surface_funcs);
- if (ret)
- goto out_err2;
-
if (!vmw_surface_reference(surface)) {
DRM_ERROR("failed to reference surface %p\n", surface);
- goto out_err3;
+ ret = -EINVAL;
+ goto out_err2;
}
/* XXX get the first 3 from the surface info */
@@ -707,10 +725,15 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
*out = &vfbs->base;
+ ret = drm_framebuffer_init(dev, &vfbs->base.base,
+ &vmw_framebuffer_surface_funcs);
+ if (ret)
+ goto out_err3;
+
return 0;
out_err3:
- drm_framebuffer_cleanup(&vfbs->base.base);
+ vmw_surface_unreference(&surface);
out_err2:
kfree(vfbs);
out_err1:
@@ -960,7 +983,6 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty,
- .create_handle = vmw_framebuffer_create_handle,
};
/**
@@ -1053,14 +1075,10 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- ret = drm_framebuffer_init(dev, &vfbd->base.base,
- &vmw_framebuffer_dmabuf_funcs);
- if (ret)
- goto out_err2;
-
if (!vmw_dmabuf_reference(dmabuf)) {
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
- goto out_err3;
+ ret = -EINVAL;
+ goto out_err2;
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
@@ -1077,10 +1095,15 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
+ ret = drm_framebuffer_init(dev, &vfbd->base.base,
+ &vmw_framebuffer_dmabuf_funcs);
+ if (ret)
+ goto out_err3;
+
return 0;
out_err3:
- drm_framebuffer_cleanup(&vfbd->base.base);
+ vmw_dmabuf_unreference(&dmabuf);
out_err2:
kfree(vfbd);
out_err1:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e01a17b407b2..bc784254e78e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -177,17 +177,16 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
BUG_ON(res->id != -1);
- do {
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(idr, res, 1, &res->id);
- write_unlock(&dev_priv->resource_lock);
+ idr_preload(GFP_KERNEL);
+ write_lock(&dev_priv->resource_lock);
- } while (ret == -EAGAIN);
+ ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ res->id = ret;
- return ret;
+ write_unlock(&dev_priv->resource_lock);
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
}
/**
@@ -959,13 +958,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (new_backup && new_backup != res->backup) {
if (res->backup) {
- BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+ BUG_ON(!ttm_bo_is_reserved(&res->backup->base));
list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup);
}
res->backup = vmw_dmabuf_reference(new_backup);
- BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+ BUG_ON(!ttm_bo_is_reserved(&new_backup->base));
list_add_tail(&res->mob_head, &new_backup->res_list);
}
if (new_backup)
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
deleted file mode 100644
index 419917955bf6..000000000000
--- a/drivers/gpu/stub/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-config STUB_POULSBO
- tristate "Intel GMA500 Stub Driver"
- depends on PCI
- depends on NET # for THERMAL
- # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
- select INPUT if ACPI
- select ACPI_VIDEO if ACPI
- select THERMAL if ACPI
- help
- Choose this option if you have a system that has Intel GMA500
- (Poulsbo) integrated graphics. If M is selected, the module will
- be called Poulsbo. This driver is a stub driver for Poulsbo that
- will call poulsbo.ko to enable the acpi backlight control sysfs
- entry file because there have no poulsbo native driver can support
- intel opregion.
diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile
deleted file mode 100644
index cd940cc9d36d..000000000000
--- a/drivers/gpu/stub/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_STUB_POULSBO) += poulsbo.o
diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c
deleted file mode 100644
index 7edfd27b8dee..000000000000
--- a/drivers/gpu/stub/poulsbo.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Intel Poulsbo Stub driver
- *
- * Copyright (C) 2010 Novell <jlee@novell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <acpi/video.h>
-
-#define DRIVER_NAME "poulsbo"
-
-enum {
- CHIP_PSB_8108 = 0,
- CHIP_PSB_8109 = 1,
-};
-
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
- {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
- {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
- {0, 0, 0}
-};
-
-static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- return acpi_video_register();
-}
-
-static void poulsbo_remove(struct pci_dev *pdev)
-{
- acpi_video_unregister();
-}
-
-static struct pci_driver poulsbo_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = poulsbo_probe,
- .remove = poulsbo_remove,
-};
-
-static int __init poulsbo_init(void)
-{
- return pci_register_driver(&poulsbo_driver);
-}
-
-static void __exit poulsbo_exit(void)
-{
- pci_unregister_driver(&poulsbo_driver);
-}
-
-module_init(poulsbo_init);
-module_exit(poulsbo_exit);
-
-MODULE_AUTHOR("Lee, Chun-Yi <jlee@novell.com>");
-MODULE_DESCRIPTION("Poulsbo Stub Driver");
-MODULE_LICENSE("GPL");
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fa60add0ff63..cf787e1d9322 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -25,6 +25,7 @@
#include <linux/fb.h>
#include <linux/pci.h>
+#include <linux/console.h>
#include <linux/vga_switcheroo.h>
#include <linux/vgaarb.h>
@@ -337,8 +338,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->fb_info) {
struct fb_event event;
+ console_lock();
event.info = new_client->fb_info;
fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
+ console_unlock();
}
ret = vgasr_priv.handler->switchto(new_client->id);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ff75cabf7393..512b01c04ea7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2077,6 +2077,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6e5c2ffa8d96..92e47e5c9564 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -557,6 +557,9 @@
#define USB_VENDOR_ID_MADCATZ 0x0738
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+#define USB_VENDOR_ID_MASTERKIT 0x16c0
+#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
+
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index b685b04dbf9d..d7437ef5c695 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(roccat_disconnect);
static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct roccat_device *device;
unsigned int minor = iminor(inode);
long retval = 0;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index f3bbbce8353b..a7451632ceb4 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -108,7 +108,7 @@ out:
* This function is to be called with the minors_lock mutex held */
static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct hid_device *dev;
__u8 *buf;
int ret = 0;
@@ -176,7 +176,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
* mutex held. */
static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type)
{
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
struct hid_device *dev;
__u8 *buf;
int ret = 0, len;
@@ -340,7 +340,7 @@ unlock:
static long hidraw_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned int minor = iminor(inode);
long ret = 0;
struct hidraw *dev;
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 2d58f939d27f..833dd1afbf46 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -420,7 +420,7 @@ static int hsi_event_notifier_call(struct notifier_block *nb,
/**
* hsi_register_port_event - Register a client to receive port events
* @cl: HSI client that wants to receive port events
- * @cb: Event handler callback
+ * @handler: Event handler callback
*
* Clients should register a callback to be able to receive
* events from the ports. Registration should happen after
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 8bb810e1900b..a3725de92384 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -106,6 +106,8 @@ config I2C_I801
Panther Point (PCH)
Lynx Point (PCH)
Lynx Point-LP (PCH)
+ Avoton (SOC)
+ Wellsburg (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -121,6 +123,16 @@ config I2C_ISCH
This driver can also be built as a module. If so, the module
will be called i2c-isch.
+config I2C_ISMT
+ tristate "Intel iSMT SMBus Controller"
+ depends on PCI && X86
+ help
+ If you say yes to this option, support will be included for the Intel
+ iSMT SMBus host controller interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called i2c-ismt.
+
config I2C_PIIX4
tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
depends on PCI
@@ -186,11 +198,11 @@ config I2C_SIS5595
will be called i2c-sis5595.
config I2C_SIS630
- tristate "SiS 630/730"
+ tristate "SiS 630/730/964"
depends on PCI
help
If you say yes to this option, support will be included for the
- SiS630 and SiS730 SMBus (a subset of I2C) interface.
+ SiS630, SiS730 and SiS964 SMBus (a subset of I2C) interface.
This driver can also be built as a module. If so, the module
will be called i2c-sis630.
@@ -319,6 +331,18 @@ config I2C_AU1550
This driver can also be built as a module. If so, the module
will be called i2c-au1550.
+config I2C_BCM2835
+ tristate "Broadcom BCM2835 I2C controller"
+ depends on ARCH_BCM2835
+ help
+ If you say yes to this option, support will be included for the
+ BCM2835 I2C controller.
+
+ If you don't know what to do here, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-bcm2835.
+
config I2C_BLACKFIN_TWI
tristate "Blackfin TWI I2C support"
depends on BLACKFIN
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 6181f3ff263f..8f4fc23b85b1 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o
obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o
obj-$(CONFIG_I2C_I801) += i2c-i801.o
obj-$(CONFIG_I2C_ISCH) += i2c-isch.o
+obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
# Embedded system I2C/SMBus host controller drivers
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
+obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index ebc224154695..75195e3f5ddb 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -553,13 +553,6 @@ static struct at91_twi_pdata at91sam9g10_config = {
.has_dma_support = false,
};
-static struct at91_twi_pdata at91sam9x5_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_dma_support = true,
-};
-
static const struct platform_device_id at91_twi_devtypes[] = {
{
.name = "i2c-at91rm9200",
@@ -582,8 +575,18 @@ static const struct platform_device_id at91_twi_devtypes[] = {
};
#if defined(CONFIG_OF)
+static struct at91_twi_pdata at91sam9x5_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_dma_support = true,
+};
+
static const struct of_device_id atmel_twi_dt_ids[] = {
{
+ .compatible = "atmel,at91rm9200-i2c",
+ .data = &at91rm9200_config,
+ } , {
.compatible = "atmel,at91sam9260-i2c",
.data = &at91sam9260_config,
} , {
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index b278298787d7..b5b89239d622 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -376,7 +376,6 @@ static int i2c_au1550_remove(struct platform_device *pdev)
{
struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&priv->adap);
i2c_au1550_disable(priv);
iounmap(priv->psc_base);
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
new file mode 100644
index 000000000000..ea4b08fc3353
--- /dev/null
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -0,0 +1,342 @@
+/*
+ * BCM2835 master mode driver
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define BCM2835_I2C_C 0x0
+#define BCM2835_I2C_S 0x4
+#define BCM2835_I2C_DLEN 0x8
+#define BCM2835_I2C_A 0xc
+#define BCM2835_I2C_FIFO 0x10
+#define BCM2835_I2C_DIV 0x14
+#define BCM2835_I2C_DEL 0x18
+#define BCM2835_I2C_CLKT 0x1c
+
+#define BCM2835_I2C_C_READ BIT(0)
+#define BCM2835_I2C_C_CLEAR BIT(4) /* bits 4 and 5 both clear */
+#define BCM2835_I2C_C_ST BIT(7)
+#define BCM2835_I2C_C_INTD BIT(8)
+#define BCM2835_I2C_C_INTT BIT(9)
+#define BCM2835_I2C_C_INTR BIT(10)
+#define BCM2835_I2C_C_I2CEN BIT(15)
+
+#define BCM2835_I2C_S_TA BIT(0)
+#define BCM2835_I2C_S_DONE BIT(1)
+#define BCM2835_I2C_S_TXW BIT(2)
+#define BCM2835_I2C_S_RXR BIT(3)
+#define BCM2835_I2C_S_TXD BIT(4)
+#define BCM2835_I2C_S_RXD BIT(5)
+#define BCM2835_I2C_S_TXE BIT(6)
+#define BCM2835_I2C_S_RXF BIT(7)
+#define BCM2835_I2C_S_ERR BIT(8)
+#define BCM2835_I2C_S_CLKT BIT(9)
+#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
+
+#define BCM2835_I2C_TIMEOUT (msecs_to_jiffies(1000))
+
+struct bcm2835_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ struct i2c_adapter adapter;
+ struct completion completion;
+ u32 msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+};
+
+static inline void bcm2835_i2c_writel(struct bcm2835_i2c_dev *i2c_dev,
+ u32 reg, u32 val)
+{
+ writel(val, i2c_dev->regs + reg);
+}
+
+static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
+{
+ return readl(i2c_dev->regs + reg);
+}
+
+static void bcm2835_fill_txfifo(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 val;
+
+ while (i2c_dev->msg_buf_remaining) {
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ if (!(val & BCM2835_I2C_S_TXD))
+ break;
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_FIFO,
+ *i2c_dev->msg_buf);
+ i2c_dev->msg_buf++;
+ i2c_dev->msg_buf_remaining--;
+ }
+}
+
+static void bcm2835_drain_rxfifo(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 val;
+
+ while (i2c_dev->msg_buf_remaining) {
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ if (!(val & BCM2835_I2C_S_RXD))
+ break;
+ *i2c_dev->msg_buf = bcm2835_i2c_readl(i2c_dev,
+ BCM2835_I2C_FIFO);
+ i2c_dev->msg_buf++;
+ i2c_dev->msg_buf_remaining--;
+ }
+}
+
+static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
+{
+ struct bcm2835_i2c_dev *i2c_dev = data;
+ u32 val, err;
+
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, val);
+
+ err = val & (BCM2835_I2C_S_CLKT | BCM2835_I2C_S_ERR);
+ if (err) {
+ i2c_dev->msg_err = err;
+ complete(&i2c_dev->completion);
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_RXD) {
+ bcm2835_drain_rxfifo(i2c_dev);
+ if (!(val & BCM2835_I2C_S_DONE))
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_DONE) {
+ if (i2c_dev->msg_buf_remaining)
+ i2c_dev->msg_err = BCM2835_I2C_S_LEN;
+ else
+ i2c_dev->msg_err = 0;
+ complete(&i2c_dev->completion);
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_TXD) {
+ bcm2835_fill_txfifo(i2c_dev);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
+ struct i2c_msg *msg)
+{
+ u32 c;
+ int time_left;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ INIT_COMPLETION(i2c_dev->completion);
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+
+ if (msg->flags & I2C_M_RD) {
+ c = BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
+ } else {
+ c = BCM2835_I2C_C_INTT;
+ bcm2835_fill_txfifo(i2c_dev);
+ }
+ c |= BCM2835_I2C_C_ST | BCM2835_I2C_C_INTD | BCM2835_I2C_C_I2CEN;
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
+
+ time_left = wait_for_completion_timeout(&i2c_dev->completion,
+ BCM2835_I2C_TIMEOUT);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ if (!time_left) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (likely(!i2c_dev->msg_err))
+ return 0;
+
+ if ((i2c_dev->msg_err & BCM2835_I2C_S_ERR) &&
+ (msg->flags & I2C_M_IGNORE_NAK))
+ return 0;
+
+ dev_err(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
+
+ if (i2c_dev->msg_err & BCM2835_I2C_S_ERR)
+ return -EREMOTEIO;
+ else
+ return -EIO;
+}
+
+static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < num; i++) {
+ ret = bcm2835_i2c_xfer_msg(i2c_dev, &msgs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret ?: i;
+}
+
+static u32 bcm2835_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm bcm2835_i2c_algo = {
+ .master_xfer = bcm2835_i2c_xfer,
+ .functionality = bcm2835_i2c_func,
+};
+
+static int bcm2835_i2c_probe(struct platform_device *pdev)
+{
+ struct bcm2835_i2c_dev *i2c_dev;
+ struct resource *mem, *requested, *irq;
+ u32 bus_clk_rate, divider;
+ int ret;
+ struct i2c_adapter *adap;
+
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_dev\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, i2c_dev);
+ i2c_dev->dev = &pdev->dev;
+ init_completion(&i2c_dev->completion);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No mem resource\n");
+ return -ENODEV;
+ }
+
+ requested = devm_request_mem_region(&pdev->dev, mem->start,
+ resource_size(mem),
+ dev_name(&pdev->dev));
+ if (!requested) {
+ dev_err(&pdev->dev, "Could not claim register region\n");
+ return -EBUSY;
+ }
+
+ i2c_dev->regs = devm_ioremap(&pdev->dev, mem->start,
+ resource_size(mem));
+ if (!i2c_dev->regs) {
+ dev_err(&pdev->dev, "Could not map registers\n");
+ return -ENOMEM;
+ }
+
+ i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c_dev->clk)) {
+ dev_err(&pdev->dev, "Could not get clock\n");
+ return PTR_ERR(i2c_dev->clk);
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &bus_clk_rate);
+ if (ret < 0) {
+ dev_warn(&pdev->dev,
+ "Could not read clock-frequency property\n");
+ bus_clk_rate = 100000;
+ }
+
+ divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), bus_clk_rate);
+ /*
+ * Per the datasheet, the register is always interpreted as an even
+ * number, by rounding down. In other words, the LSB is ignored. So,
+ * if the LSB is set, increment the divider to avoid any issue.
+ */
+ if (divider & 1)
+ divider++;
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return -ENODEV;
+ }
+ i2c_dev->irq = irq->start;
+
+ ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IRQ\n");
+ return -ENODEV;
+ }
+
+ adap = &i2c_dev->adapter;
+ i2c_set_adapdata(adap, i2c_dev);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
+ adap->algo = &bcm2835_i2c_algo;
+ adap->dev.parent = &pdev->dev;
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ free_irq(i2c_dev->irq, i2c_dev);
+
+ return ret;
+}
+
+static int bcm2835_i2c_remove(struct platform_device *pdev)
+{
+ struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ free_irq(i2c_dev->irq, i2c_dev);
+ i2c_del_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_i2c_of_match[] = {
+ { .compatible = "brcm,bcm2835-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match);
+
+static struct platform_driver bcm2835_i2c_driver = {
+ .probe = bcm2835_i2c_probe,
+ .remove = bcm2835_i2c_remove,
+ .driver = {
+ .name = "i2c-bcm2835",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_i2c_of_match,
+ },
+};
+module_platform_driver(bcm2835_i2c_driver);
+
+MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
+MODULE_DESCRIPTION("BCM2835 I2C bus adapter");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-bcm2835");
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 0cf780fd6ef1..05080c449c6b 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -724,8 +724,6 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
{
struct bfin_twi_iface *iface = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
i2c_del_adapter(&(iface->adap));
free_irq(iface->irq, iface);
peripheral_free_list((unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 2e79c1024191..3823623baa48 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -682,7 +682,6 @@ static int cpm_i2c_probe(struct platform_device *ofdev)
out_shut:
cpm_i2c_shutdown(cpm);
out_free:
- dev_set_drvdata(&ofdev->dev, NULL);
kfree(cpm);
return result;
@@ -696,7 +695,6 @@ static int cpm_i2c_remove(struct platform_device *ofdev)
cpm_i2c_shutdown(cpm);
- dev_set_drvdata(&ofdev->dev, NULL);
kfree(cpm);
return 0;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6a0a55319449..7d1e590a7bb6 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -755,7 +755,6 @@ err_mem_ioremap:
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
- platform_set_drvdata(pdev, NULL);
put_device(&pdev->dev);
kfree(dev);
err_release_region:
@@ -771,7 +770,6 @@ static int davinci_i2c_remove(struct platform_device *pdev)
i2c_davinci_cpufreq_deregister(dev);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index f5258c205de5..94fd81875409 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -413,11 +413,23 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
+ u32 cmd = 0;
+
+ /*
+ * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
+ * manually set the stop bit. However, it cannot be
+ * detected from the registers so we set it always
+ * when writing/reading the last byte.
+ */
+ if (dev->msg_write_idx == dev->msgs_num - 1 &&
+ buf_len == 1)
+ cmd |= BIT(9);
+
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
- dw_writel(dev, 0x100, DW_IC_DATA_CMD);
+ dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
rx_limit--;
} else
- dw_writel(dev, *buf++, DW_IC_DATA_CMD);
+ dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
tx_limit--; buf_len--;
}
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 6add851e9dee..7c5e383c350d 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -319,7 +319,6 @@ err_free_irq:
free_irq(pdev->irq, dev);
err_iounmap:
iounmap(dev->base);
- pci_set_drvdata(pdev, NULL);
put_device(&pdev->dev);
kfree(dev);
err_release_region:
@@ -336,7 +335,6 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
- pci_set_drvdata(pdev, NULL);
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 343357a2b5b4..0ceb6e1b0f65 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -37,8 +37,10 @@
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include "i2c-designware-core.h"
static struct i2c_algorithm i2c_dw_algo = {
@@ -50,6 +52,42 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return clk_get_rate(dev->clk)/1000;
}
+#ifdef CONFIG_ACPI
+static int dw_i2c_acpi_configure(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct acpi_device *adev;
+ int busno, ret;
+
+ if (!ACPI_HANDLE(&pdev->dev))
+ return -ENODEV;
+
+ ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (ret)
+ return -ENODEV;
+
+ dev->adapter.nr = -1;
+ if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &busno))
+ dev->adapter.nr = busno;
+
+ dev->tx_fifo_depth = 32;
+ dev->rx_fifo_depth = 32;
+ return 0;
+}
+
+static const struct acpi_device_id dw_i2c_acpi_match[] = {
+ { "INT33C2", 0 },
+ { "INT33C3", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
+#else
+static inline int dw_i2c_acpi_configure(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
static int dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
@@ -114,18 +152,22 @@ static int dw_i2c_probe(struct platform_device *pdev)
r = -EBUSY;
goto err_unuse_clocks;
}
- {
+
+ /* Try first if we can configure the device from ACPI */
+ r = dw_i2c_acpi_configure(pdev);
+ if (r) {
u32 param1 = i2c_dw_read_comp_param(dev);
dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
+ dev->adapter.nr = pdev->id;
}
r = i2c_dw_init(dev);
if (r)
goto err_iounmap;
i2c_dw_disable_int(dev);
- r = request_irq(dev->irq, i2c_dw_isr, IRQF_DISABLED, pdev->name, dev);
+ r = request_irq(dev->irq, i2c_dw_isr, IRQF_SHARED, pdev->name, dev);
if (r) {
dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
goto err_iounmap;
@@ -140,14 +182,19 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
+ ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));
- adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
if (r) {
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_free_irq;
}
of_i2c_register_devices(adap);
+ acpi_i2c_register_devices(adap);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
return 0;
@@ -160,7 +207,6 @@ err_unuse_clocks:
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
- platform_set_drvdata(pdev, NULL);
put_device(&pdev->dev);
kfree(dev);
err_release_region:
@@ -174,7 +220,8 @@ static int dw_i2c_remove(struct platform_device *pdev)
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
- platform_set_drvdata(pdev, NULL);
+ pm_runtime_get_sync(&pdev->dev);
+
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
@@ -186,6 +233,9 @@ static int dw_i2c_remove(struct platform_device *pdev)
free_irq(dev->irq, dev);
kfree(dev);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
return 0;
@@ -233,6 +283,7 @@ static struct platform_driver dw_i2c_driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_i2c_of_match),
+ .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
.pm = &dw_i2c_dev_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 5e7886e7136e..0f3752967c4b 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -869,8 +869,6 @@ static void pch_i2c_remove(struct pci_dev *pdev)
for (i = 0; i < adap_info->ch_num; i++)
adap_info->pch_data[i].pch_base_address = NULL;
- pci_set_drvdata(pdev, NULL);
-
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 3351cc7ed11f..436b0f254916 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -436,8 +436,6 @@ err_unmap:
err:
kfree(dev);
- platform_set_drvdata(pdev, NULL);
-
return ret;
}
@@ -453,8 +451,6 @@ static int highlander_i2c_remove(struct platform_device *pdev)
iounmap(dev->base);
kfree(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3092387f6ef4..e1cf2e0e1f23 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -53,6 +53,11 @@
Panther Point (PCH) 0x1e22 32 hard yes yes yes
Lynx Point (PCH) 0x8c22 32 hard yes yes yes
Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes
+ Avoton (SOC) 0x1f3c 32 hard yes yes yes
+ Wellsburg (PCH) 0x8d22 32 hard yes yes yes
+ Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes
+ Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
+ Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -162,9 +167,14 @@
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
+#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
struct i801_mux_config {
@@ -798,6 +808,11 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
{ 0, }
};
@@ -1103,6 +1118,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
+ case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0:
+ case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1:
+ case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2:
priv->features |= FEATURE_IDF;
/* fall through */
default:
@@ -1236,7 +1254,6 @@ static void i801_remove(struct pci_dev *dev)
free_irq(dev->irq, priv);
pci_release_region(dev, SMBBAR);
- pci_set_drvdata(dev, NULL);
kfree(priv);
/*
* do not call pci_disable_device(dev) since it can cause hard hangs on
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 33a2abb6c063..405a2e240454 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -773,7 +773,6 @@ error_cleanup:
if (dev->vaddr)
iounmap(dev->vaddr);
- dev_set_drvdata(&ofdev->dev, NULL);
kfree(dev);
return ret;
}
@@ -785,8 +784,6 @@ static int iic_remove(struct platform_device *ofdev)
{
struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
- dev_set_drvdata(&ofdev->dev, NULL);
-
i2c_del_adapter(&dev->adap);
if (dev->irq) {
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a71ece63e917..82f20c60bb7b 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -605,7 +605,6 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
/* remove adapter */
dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
i2c_del_adapter(&i2c_imx->adapter);
- platform_set_drvdata(pdev, NULL);
/* setup chip registers to defaults */
writeb(0, i2c_imx->base + IMX_I2C_IADR);
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index de3736bf6465..323fa018ffd5 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -1069,7 +1069,6 @@ static int intel_mid_i2c_probe(struct pci_dev *dev,
fail3:
free_irq(dev->irq, mrst);
fail2:
- pci_set_drvdata(dev, NULL);
kfree(mrst);
fail1:
iounmap(base);
@@ -1087,7 +1086,6 @@ static void intel_mid_i2c_remove(struct pci_dev *dev)
dev_err(&dev->dev, "Failed to delete i2c adapter");
free_irq(dev->irq, mrst);
- pci_set_drvdata(dev, NULL);
iounmap(mrst->base);
kfree(mrst);
pci_release_region(dev, 0);
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 2f99613fd677..bc993331c695 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -415,8 +415,6 @@ iop3xx_i2c_remove(struct platform_device *pdev)
kfree(adapter_data);
kfree(padapter);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 4099f79c2280..8c38aaa7417c 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -40,6 +40,7 @@
/* SCH SMBus address offsets */
#define SMBHSTCNT (0 + sch_smba)
#define SMBHSTSTS (1 + sch_smba)
+#define SMBHSTCLK (2 + sch_smba)
#define SMBHSTADD (4 + sch_smba) /* TSA */
#define SMBHSTCMD (5 + sch_smba)
#define SMBHSTDAT0 (6 + sch_smba)
@@ -58,6 +59,9 @@
static unsigned short sch_smba;
static struct i2c_adapter sch_adapter;
+static int backbone_speed = 33000; /* backbone speed in kHz */
+module_param(backbone_speed, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)");
/*
* Start the i2c transaction -- the i2c_access will prepare the transaction
@@ -156,6 +160,19 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp);
return -EAGAIN;
}
+ temp = inw(SMBHSTCLK);
+ if (!temp) {
+ /*
+ * We can't determine if we have 33 or 25 MHz clock for
+ * SMBus, so expect 33 MHz and calculate a bus clock of
+ * 100 kHz. If we actually run at 25 MHz the bus will be
+ * run ~75 kHz instead which should do no harm.
+ */
+ dev_notice(&sch_adapter.dev,
+ "Clock divider unitialized. Setting defaults\n");
+ outw(backbone_speed / (4 * 100), SMBHSTCLK);
+ }
+
dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size,
(read_write)?"READ":"WRITE");
switch (size) {
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
new file mode 100644
index 000000000000..e9205ee8cf94
--- /dev/null
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -0,0 +1,963 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Supports the SMBus Message Transport (SMT) in the Intel Atom Processor
+ * S12xx Product Family.
+ *
+ * Features supported by this driver:
+ * Hardware PEC yes
+ * Block buffer yes
+ * Block process call transaction no
+ * Slave mode no
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* PCI Address Constants */
+#define SMBBAR 0
+
+/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
+#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
+#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+
+#define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */
+#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
+
+/* Hardware Descriptor Constants - Control Field */
+#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
+#define ISMT_DESC_BLK 0X04 /* Perform Block Transaction */
+#define ISMT_DESC_FAIR 0x08 /* Set fairness flag upon successful arbit. */
+#define ISMT_DESC_PEC 0x10 /* Packet Error Code */
+#define ISMT_DESC_I2C 0x20 /* I2C Enable */
+#define ISMT_DESC_INT 0x40 /* Interrupt */
+#define ISMT_DESC_SOE 0x80 /* Stop On Error */
+
+/* Hardware Descriptor Constants - Status Field */
+#define ISMT_DESC_SCS 0x01 /* Success */
+#define ISMT_DESC_DLTO 0x04 /* Data Low Time Out */
+#define ISMT_DESC_NAK 0x08 /* NAK Received */
+#define ISMT_DESC_CRC 0x10 /* CRC Error */
+#define ISMT_DESC_CLTO 0x20 /* Clock Low Time Out */
+#define ISMT_DESC_COL 0x40 /* Collisions */
+#define ISMT_DESC_LPR 0x80 /* Large Packet Received */
+
+/* Macros */
+#define ISMT_DESC_ADDR_RW(addr, rw) (((addr) << 1) | (rw))
+
+/* iSMT General Register address offsets (SMBBAR + <addr>) */
+#define ISMT_GR_GCTRL 0x000 /* General Control */
+#define ISMT_GR_SMTICL 0x008 /* SMT Interrupt Cause Location */
+#define ISMT_GR_ERRINTMSK 0x010 /* Error Interrupt Mask */
+#define ISMT_GR_ERRAERMSK 0x014 /* Error AER Mask */
+#define ISMT_GR_ERRSTS 0x018 /* Error Status */
+#define ISMT_GR_ERRINFO 0x01c /* Error Information */
+
+/* iSMT Master Registers */
+#define ISMT_MSTR_MDBA 0x100 /* Master Descriptor Base Address */
+#define ISMT_MSTR_MCTRL 0x108 /* Master Control */
+#define ISMT_MSTR_MSTS 0x10c /* Master Status */
+#define ISMT_MSTR_MDS 0x110 /* Master Descriptor Size */
+#define ISMT_MSTR_RPOLICY 0x114 /* Retry Policy */
+
+/* iSMT Miscellaneous Registers */
+#define ISMT_SPGT 0x300 /* SMBus PHY Global Timing */
+
+/* General Control Register (GCTRL) bit definitions */
+#define ISMT_GCTRL_TRST 0x04 /* Target Reset */
+#define ISMT_GCTRL_KILL 0x08 /* Kill */
+#define ISMT_GCTRL_SRST 0x40 /* Soft Reset */
+
+/* Master Control Register (MCTRL) bit definitions */
+#define ISMT_MCTRL_SS 0x01 /* Start/Stop */
+#define ISMT_MCTRL_MEIE 0x10 /* Master Error Interrupt Enable */
+#define ISMT_MCTRL_FMHP 0x00ff0000 /* Firmware Master Head Ptr (FMHP) */
+
+/* Master Status Register (MSTS) bit definitions */
+#define ISMT_MSTS_HMTP 0xff0000 /* HW Master Tail Pointer (HMTP) */
+#define ISMT_MSTS_MIS 0x20 /* Master Interrupt Status (MIS) */
+#define ISMT_MSTS_MEIS 0x10 /* Master Error Int Status (MEIS) */
+#define ISMT_MSTS_IP 0x01 /* In Progress */
+
+/* Master Descriptor Size (MDS) bit definitions */
+#define ISMT_MDS_MASK 0xff /* Master Descriptor Size mask (MDS) */
+
+/* SMBus PHY Global Timing Register (SPGT) bit definitions */
+#define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */
+#define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */
+#define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */
+#define ISMT_SPGT_SPD_400K (0x2 << 30) /* 400 kHz */
+#define ISMT_SPGT_SPD_1M (0x3 << 30) /* 1 MHz */
+
+
+/* MSI Control Register (MSICTL) bit definitions */
+#define ISMT_MSICTL_MSIE 0x01 /* MSI Enable */
+
+/* iSMT Hardware Descriptor */
+struct ismt_desc {
+ u8 tgtaddr_rw; /* target address & r/w bit */
+ u8 wr_len_cmd; /* write length in bytes or a command */
+ u8 rd_len; /* read length */
+ u8 control; /* control bits */
+ u8 status; /* status bits */
+ u8 retry; /* collision retry and retry count */
+ u8 rxbytes; /* received bytes */
+ u8 txbytes; /* transmitted bytes */
+ u32 dptr_low; /* lower 32 bit of the data pointer */
+ u32 dptr_high; /* upper 32 bit of the data pointer */
+} __packed;
+
+struct ismt_priv {
+ struct i2c_adapter adapter;
+ void *smba; /* PCI BAR */
+ struct pci_dev *pci_dev;
+ struct ismt_desc *hw; /* descriptor virt base addr */
+ dma_addr_t io_rng_dma; /* descriptor HW base addr */
+ u8 head; /* ring buffer head pointer */
+ struct completion cmp; /* interrupt completion */
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* temp R/W data buffer */
+ bool using_msi; /* type of interrupt flag */
+};
+
+/**
+ * ismt_ids - PCI device IDs supported by this driver
+ */
+static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ismt_ids);
+
+/* Bus speed control bits for slow debuggers - refer to the docs for usage */
+static unsigned int bus_speed;
+module_param(bus_speed, uint, S_IRUGO);
+MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)");
+
+/**
+ * __ismt_desc_dump() - dump the contents of a specific descriptor
+ */
+static void __ismt_desc_dump(struct device *dev, const struct ismt_desc *desc)
+{
+
+ dev_dbg(dev, "Descriptor struct: %p\n", desc);
+ dev_dbg(dev, "\ttgtaddr_rw=0x%02X\n", desc->tgtaddr_rw);
+ dev_dbg(dev, "\twr_len_cmd=0x%02X\n", desc->wr_len_cmd);
+ dev_dbg(dev, "\trd_len= 0x%02X\n", desc->rd_len);
+ dev_dbg(dev, "\tcontrol= 0x%02X\n", desc->control);
+ dev_dbg(dev, "\tstatus= 0x%02X\n", desc->status);
+ dev_dbg(dev, "\tretry= 0x%02X\n", desc->retry);
+ dev_dbg(dev, "\trxbytes= 0x%02X\n", desc->rxbytes);
+ dev_dbg(dev, "\ttxbytes= 0x%02X\n", desc->txbytes);
+ dev_dbg(dev, "\tdptr_low= 0x%08X\n", desc->dptr_low);
+ dev_dbg(dev, "\tdptr_high= 0x%08X\n", desc->dptr_high);
+}
+/**
+ * ismt_desc_dump() - dump the contents of a descriptor for debug purposes
+ * @priv: iSMT private data
+ */
+static void ismt_desc_dump(struct ismt_priv *priv)
+{
+ struct device *dev = &priv->pci_dev->dev;
+ struct ismt_desc *desc = &priv->hw[priv->head];
+
+ dev_dbg(dev, "Dump of the descriptor struct: 0x%X\n", priv->head);
+ __ismt_desc_dump(dev, desc);
+}
+
+/**
+ * ismt_gen_reg_dump() - dump the iSMT General Registers
+ * @priv: iSMT private data
+ */
+static void ismt_gen_reg_dump(struct ismt_priv *priv)
+{
+ struct device *dev = &priv->pci_dev->dev;
+
+ dev_dbg(dev, "Dump of the iSMT General Registers\n");
+ dev_dbg(dev, " GCTRL.... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_GCTRL,
+ readl(priv->smba + ISMT_GR_GCTRL));
+ dev_dbg(dev, " SMTICL... : (0x%p)=0x%016llX\n",
+ priv->smba + ISMT_GR_SMTICL,
+ (long long unsigned int)readq(priv->smba + ISMT_GR_SMTICL));
+ dev_dbg(dev, " ERRINTMSK : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRINTMSK,
+ readl(priv->smba + ISMT_GR_ERRINTMSK));
+ dev_dbg(dev, " ERRAERMSK : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRAERMSK,
+ readl(priv->smba + ISMT_GR_ERRAERMSK));
+ dev_dbg(dev, " ERRSTS... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRSTS,
+ readl(priv->smba + ISMT_GR_ERRSTS));
+ dev_dbg(dev, " ERRINFO.. : (0x%p)=0x%X\n",
+ priv->smba + ISMT_GR_ERRINFO,
+ readl(priv->smba + ISMT_GR_ERRINFO));
+}
+
+/**
+ * ismt_mstr_reg_dump() - dump the iSMT Master Registers
+ * @priv: iSMT private data
+ */
+static void ismt_mstr_reg_dump(struct ismt_priv *priv)
+{
+ struct device *dev = &priv->pci_dev->dev;
+
+ dev_dbg(dev, "Dump of the iSMT Master Registers\n");
+ dev_dbg(dev, " MDBA..... : (0x%p)=0x%016llX\n",
+ priv->smba + ISMT_MSTR_MDBA,
+ (long long unsigned int)readq(priv->smba + ISMT_MSTR_MDBA));
+ dev_dbg(dev, " MCTRL.... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_MCTRL,
+ readl(priv->smba + ISMT_MSTR_MCTRL));
+ dev_dbg(dev, " MSTS..... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_MSTS,
+ readl(priv->smba + ISMT_MSTR_MSTS));
+ dev_dbg(dev, " MDS...... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_MDS,
+ readl(priv->smba + ISMT_MSTR_MDS));
+ dev_dbg(dev, " RPOLICY.. : (0x%p)=0x%X\n",
+ priv->smba + ISMT_MSTR_RPOLICY,
+ readl(priv->smba + ISMT_MSTR_RPOLICY));
+ dev_dbg(dev, " SPGT..... : (0x%p)=0x%X\n",
+ priv->smba + ISMT_SPGT,
+ readl(priv->smba + ISMT_SPGT));
+}
+
+/**
+ * ismt_submit_desc() - add a descriptor to the ring
+ * @priv: iSMT private data
+ */
+static void ismt_submit_desc(struct ismt_priv *priv)
+{
+ uint fmhp;
+ uint val;
+
+ ismt_desc_dump(priv);
+ ismt_gen_reg_dump(priv);
+ ismt_mstr_reg_dump(priv);
+
+ /* Set the FMHP (Firmware Master Head Pointer)*/
+ fmhp = ((priv->head + 1) % ISMT_DESC_ENTRIES) << 16;
+ val = readl(priv->smba + ISMT_MSTR_MCTRL);
+ writel((val & ~ISMT_MCTRL_FMHP) | fmhp,
+ priv->smba + ISMT_MSTR_MCTRL);
+
+ /* Set the start bit */
+ val = readl(priv->smba + ISMT_MSTR_MCTRL);
+ writel(val | ISMT_MCTRL_SS,
+ priv->smba + ISMT_MSTR_MCTRL);
+}
+
+/**
+ * ismt_process_desc() - handle the completion of the descriptor
+ * @desc: the iSMT hardware descriptor
+ * @data: data buffer from the upper layer
+ * @priv: ismt_priv struct holding our dma buffer
+ * @size: SMBus transaction type
+ * @read_write: flag to indicate if this is a read or write
+ */
+static int ismt_process_desc(const struct ismt_desc *desc,
+ union i2c_smbus_data *data,
+ struct ismt_priv *priv, int size,
+ char read_write)
+{
+ u8 *dma_buffer = priv->dma_buffer;
+
+ dev_dbg(&priv->pci_dev->dev, "Processing completed descriptor\n");
+ __ismt_desc_dump(&priv->pci_dev->dev, desc);
+
+ if (desc->status & ISMT_DESC_SCS) {
+ if (read_write == I2C_SMBUS_WRITE &&
+ size != I2C_SMBUS_PROC_CALL)
+ return 0;
+
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = dma_buffer[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ data->word = dma_buffer[0] | (dma_buffer[1] << 8);
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+ data->block[0] = desc->rxbytes;
+ break;
+ }
+ return 0;
+ }
+
+ if (likely(desc->status & ISMT_DESC_NAK))
+ return -ENXIO;
+
+ if (desc->status & ISMT_DESC_CRC)
+ return -EBADMSG;
+
+ if (desc->status & ISMT_DESC_COL)
+ return -EAGAIN;
+
+ if (desc->status & ISMT_DESC_LPR)
+ return -EPROTO;
+
+ if (desc->status & (ISMT_DESC_DLTO | ISMT_DESC_CLTO))
+ return -ETIMEDOUT;
+
+ return -EIO;
+}
+
+/**
+ * ismt_access() - process an SMBus command
+ * @adap: the i2c host adapter
+ * @addr: address of the i2c/SMBus target
+ * @flags: command options
+ * @read_write: read from or write to device
+ * @command: the i2c/SMBus command to issue
+ * @size: SMBus transaction type
+ * @data: read/write data buffer
+ */
+static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ int ret;
+ dma_addr_t dma_addr = 0; /* address of the data buffer */
+ u8 dma_size = 0;
+ enum dma_data_direction dma_direction = 0;
+ struct ismt_desc *desc;
+ struct ismt_priv *priv = i2c_get_adapdata(adap);
+ struct device *dev = &priv->pci_dev->dev;
+
+ desc = &priv->hw[priv->head];
+
+ /* Initialize the descriptor */
+ memset(desc, 0, sizeof(struct ismt_desc));
+ desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
+
+ /* Initialize common control bits */
+ if (likely(priv->using_msi))
+ desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
+ else
+ desc->control = ISMT_DESC_FAIR;
+
+ if ((flags & I2C_CLIENT_PEC) && (size != I2C_SMBUS_QUICK)
+ && (size != I2C_SMBUS_I2C_BLOCK_DATA))
+ desc->control |= ISMT_DESC_PEC;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ dev_dbg(dev, "I2C_SMBUS_QUICK\n");
+ break;
+
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /*
+ * Send Byte
+ * The command field contains the write data
+ */
+ dev_dbg(dev, "I2C_SMBUS_BYTE: WRITE\n");
+ desc->control |= ISMT_DESC_CWRL;
+ desc->wr_len_cmd = command;
+ } else {
+ /* Receive Byte */
+ dev_dbg(dev, "I2C_SMBUS_BYTE: READ\n");
+ dma_size = 1;
+ dma_direction = DMA_FROM_DEVICE;
+ desc->rd_len = 1;
+ }
+ break;
+
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /*
+ * Write Byte
+ * Command plus 1 data byte
+ */
+ dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: WRITE\n");
+ desc->wr_len_cmd = 2;
+ dma_size = 2;
+ dma_direction = DMA_TO_DEVICE;
+ priv->dma_buffer[0] = command;
+ priv->dma_buffer[1] = data->byte;
+ } else {
+ /* Read Byte */
+ dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: READ\n");
+ desc->control |= ISMT_DESC_CWRL;
+ desc->wr_len_cmd = command;
+ desc->rd_len = 1;
+ dma_size = 1;
+ dma_direction = DMA_FROM_DEVICE;
+ }
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* Write Word */
+ dev_dbg(dev, "I2C_SMBUS_WORD_DATA: WRITE\n");
+ desc->wr_len_cmd = 3;
+ dma_size = 3;
+ dma_direction = DMA_TO_DEVICE;
+ priv->dma_buffer[0] = command;
+ priv->dma_buffer[1] = data->word & 0xff;
+ priv->dma_buffer[2] = data->word >> 8;
+ } else {
+ /* Read Word */
+ dev_dbg(dev, "I2C_SMBUS_WORD_DATA: READ\n");
+ desc->wr_len_cmd = command;
+ desc->control |= ISMT_DESC_CWRL;
+ desc->rd_len = 2;
+ dma_size = 2;
+ dma_direction = DMA_FROM_DEVICE;
+ }
+ break;
+
+ case I2C_SMBUS_PROC_CALL:
+ dev_dbg(dev, "I2C_SMBUS_PROC_CALL\n");
+ desc->wr_len_cmd = 3;
+ desc->rd_len = 2;
+ dma_size = 3;
+ dma_direction = DMA_BIDIRECTIONAL;
+ priv->dma_buffer[0] = command;
+ priv->dma_buffer[1] = data->word & 0xff;
+ priv->dma_buffer[2] = data->word >> 8;
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* Block Write */
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n");
+ dma_size = data->block[0] + 1;
+ dma_direction = DMA_TO_DEVICE;
+ desc->wr_len_cmd = dma_size;
+ desc->control |= ISMT_DESC_BLK;
+ priv->dma_buffer[0] = command;
+ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size);
+ } else {
+ /* Block Read */
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n");
+ dma_size = I2C_SMBUS_BLOCK_MAX;
+ dma_direction = DMA_FROM_DEVICE;
+ desc->rd_len = dma_size;
+ desc->wr_len_cmd = command;
+ desc->control |= (ISMT_DESC_BLK | ISMT_DESC_CWRL);
+ }
+ break;
+
+ default:
+ dev_err(dev, "Unsupported transaction %d\n",
+ size);
+ return -EOPNOTSUPP;
+ }
+
+ /* map the data buffer */
+ if (dma_size != 0) {
+ dev_dbg(dev, " dev=%p\n", dev);
+ dev_dbg(dev, " data=%p\n", data);
+ dev_dbg(dev, " dma_buffer=%p\n", priv->dma_buffer);
+ dev_dbg(dev, " dma_size=%d\n", dma_size);
+ dev_dbg(dev, " dma_direction=%d\n", dma_direction);
+
+ dma_addr = dma_map_single(dev,
+ priv->dma_buffer,
+ dma_size,
+ dma_direction);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Error in mapping dma buffer %p\n",
+ priv->dma_buffer);
+ return -EIO;
+ }
+
+ dev_dbg(dev, " dma_addr = 0x%016llX\n",
+ (unsigned long long)dma_addr);
+
+ desc->dptr_low = lower_32_bits(dma_addr);
+ desc->dptr_high = upper_32_bits(dma_addr);
+ }
+
+ INIT_COMPLETION(priv->cmp);
+
+ /* Add the descriptor */
+ ismt_submit_desc(priv);
+
+ /* Now we wait for interrupt completion, 1s */
+ ret = wait_for_completion_timeout(&priv->cmp, HZ*1);
+
+ /* unmap the data buffer */
+ if (dma_size != 0)
+ dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
+
+ if (unlikely(!ret)) {
+ dev_err(dev, "completion wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* do any post processing of the descriptor here */
+ ret = ismt_process_desc(desc, data, priv, size, read_write);
+
+out:
+ /* Update the ring pointer */
+ priv->head++;
+ priv->head %= ISMT_DESC_ENTRIES;
+
+ return ret;
+}
+
+/**
+ * ismt_func() - report which i2c commands are supported by this adapter
+ * @adap: the i2c host adapter
+ */
+static u32 ismt_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_QUICK |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_PEC;
+}
+
+/**
+ * smbus_algorithm - the adapter algorithm and supported functionality
+ * @smbus_xfer: the adapter algorithm
+ * @functionality: functionality supported by the adapter
+ */
+static const struct i2c_algorithm smbus_algorithm = {
+ .smbus_xfer = ismt_access,
+ .functionality = ismt_func,
+};
+
+/**
+ * ismt_handle_isr() - interrupt handler bottom half
+ * @priv: iSMT private data
+ */
+static irqreturn_t ismt_handle_isr(struct ismt_priv *priv)
+{
+ complete(&priv->cmp);
+
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * ismt_do_interrupt() - IRQ interrupt handler
+ * @vec: interrupt vector
+ * @data: iSMT private data
+ */
+static irqreturn_t ismt_do_interrupt(int vec, void *data)
+{
+ u32 val;
+ struct ismt_priv *priv = data;
+
+ /*
+ * check to see it's our interrupt, return IRQ_NONE if not ours
+ * since we are sharing interrupt
+ */
+ val = readl(priv->smba + ISMT_MSTR_MSTS);
+
+ if (!(val & (ISMT_MSTS_MIS | ISMT_MSTS_MEIS)))
+ return IRQ_NONE;
+ else
+ writel(val | ISMT_MSTS_MIS | ISMT_MSTS_MEIS,
+ priv->smba + ISMT_MSTR_MSTS);
+
+ return ismt_handle_isr(priv);
+}
+
+/**
+ * ismt_do_msi_interrupt() - MSI interrupt handler
+ * @vec: interrupt vector
+ * @data: iSMT private data
+ */
+static irqreturn_t ismt_do_msi_interrupt(int vec, void *data)
+{
+ return ismt_handle_isr(data);
+}
+
+/**
+ * ismt_hw_init() - initialize the iSMT hardware
+ * @priv: iSMT private data
+ */
+static void ismt_hw_init(struct ismt_priv *priv)
+{
+ u32 val;
+ struct device *dev = &priv->pci_dev->dev;
+
+ /* initialize the Master Descriptor Base Address (MDBA) */
+ writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
+
+ /* initialize the Master Control Register (MCTRL) */
+ writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
+
+ /* initialize the Master Status Register (MSTS) */
+ writel(0, priv->smba + ISMT_MSTR_MSTS);
+
+ /* initialize the Master Descriptor Size (MDS) */
+ val = readl(priv->smba + ISMT_MSTR_MDS);
+ writel((val & ~ISMT_MDS_MASK) | (ISMT_DESC_ENTRIES - 1),
+ priv->smba + ISMT_MSTR_MDS);
+
+ /*
+ * Set the SMBus speed (could use this for slow HW debuggers)
+ */
+
+ val = readl(priv->smba + ISMT_SPGT);
+
+ switch (bus_speed) {
+ case 0:
+ break;
+
+ case 80:
+ dev_dbg(dev, "Setting SMBus clock to 80 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_80K),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ case 100:
+ dev_dbg(dev, "Setting SMBus clock to 100 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_100K),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ case 400:
+ dev_dbg(dev, "Setting SMBus clock to 400 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_400K),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ case 1000:
+ dev_dbg(dev, "Setting SMBus clock to 1000 kHz\n");
+ writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_1M),
+ priv->smba + ISMT_SPGT);
+ break;
+
+ default:
+ dev_warn(dev, "Invalid SMBus clock speed, only 0, 80, 100, 400, and 1000 are valid\n");
+ break;
+ }
+
+ val = readl(priv->smba + ISMT_SPGT);
+
+ switch (val & ISMT_SPGT_SPD_MASK) {
+ case ISMT_SPGT_SPD_80K:
+ bus_speed = 80;
+ break;
+ case ISMT_SPGT_SPD_100K:
+ bus_speed = 100;
+ break;
+ case ISMT_SPGT_SPD_400K:
+ bus_speed = 400;
+ break;
+ case ISMT_SPGT_SPD_1M:
+ bus_speed = 1000;
+ break;
+ }
+ dev_dbg(dev, "SMBus clock is running at %d kHz\n", bus_speed);
+}
+
+/**
+ * ismt_dev_init() - initialize the iSMT data structures
+ * @priv: iSMT private data
+ */
+static int ismt_dev_init(struct ismt_priv *priv)
+{
+ /* allocate memory for the descriptor */
+ priv->hw = dmam_alloc_coherent(&priv->pci_dev->dev,
+ (ISMT_DESC_ENTRIES
+ * sizeof(struct ismt_desc)),
+ &priv->io_rng_dma,
+ GFP_KERNEL);
+ if (!priv->hw)
+ return -ENOMEM;
+
+ memset(priv->hw, 0, (ISMT_DESC_ENTRIES * sizeof(struct ismt_desc)));
+
+ priv->head = 0;
+ init_completion(&priv->cmp);
+
+ return 0;
+}
+
+/**
+ * ismt_int_init() - initialize interrupts
+ * @priv: iSMT private data
+ */
+static int ismt_int_init(struct ismt_priv *priv)
+{
+ int err;
+
+ /* Try using MSI interrupts */
+ err = pci_enable_msi(priv->pci_dev);
+ if (err) {
+ dev_warn(&priv->pci_dev->dev,
+ "Unable to use MSI interrupts, falling back to legacy\n");
+ goto intx;
+ }
+
+ err = devm_request_irq(&priv->pci_dev->dev,
+ priv->pci_dev->irq,
+ ismt_do_msi_interrupt,
+ 0,
+ "ismt-msi",
+ priv);
+ if (err) {
+ pci_disable_msi(priv->pci_dev);
+ goto intx;
+ }
+
+ priv->using_msi = true;
+ goto done;
+
+ /* Try using legacy interrupts */
+intx:
+ err = devm_request_irq(&priv->pci_dev->dev,
+ priv->pci_dev->irq,
+ ismt_do_interrupt,
+ IRQF_SHARED,
+ "ismt-intx",
+ priv);
+ if (err) {
+ dev_err(&priv->pci_dev->dev, "no usable interrupts\n");
+ return -ENODEV;
+ }
+
+ priv->using_msi = false;
+
+done:
+ return 0;
+}
+
+static struct pci_driver ismt_driver;
+
+/**
+ * ismt_probe() - probe for iSMT devices
+ * @pdev: PCI-Express device
+ * @id: PCI-Express device ID
+ */
+static int
+ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int err;
+ struct ismt_priv *priv;
+ unsigned long start, len;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, priv);
+ i2c_set_adapdata(&priv->adapter, priv);
+ priv->adapter.owner = THIS_MODULE;
+
+ priv->adapter.class = I2C_CLASS_HWMON;
+
+ priv->adapter.algo = &smbus_algorithm;
+
+ /* set up the sysfs linkage to our parent device */
+ priv->adapter.dev.parent = &pdev->dev;
+
+ /* number of retries on lost arbitration */
+ priv->adapter.retries = ISMT_MAX_RETRIES;
+
+ priv->pci_dev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable SMBus PCI device (%d)\n",
+ err);
+ return err;
+ }
+
+ /* enable bus mastering */
+ pci_set_master(pdev);
+
+ /* Determine the address of the SMBus area */
+ start = pci_resource_start(pdev, SMBBAR);
+ len = pci_resource_len(pdev, SMBBAR);
+ if (!start || !len) {
+ dev_err(&pdev->dev,
+ "SMBus base address uninitialized, upgrade BIOS\n");
+ return -ENODEV;
+ }
+
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "SMBus iSMT adapter at %lx", start);
+
+ dev_dbg(&priv->pci_dev->dev, " start=0x%lX\n", start);
+ dev_dbg(&priv->pci_dev->dev, " len=0x%lX\n", len);
+
+ err = acpi_check_resource_conflict(&pdev->resource[SMBBAR]);
+ if (err) {
+ dev_err(&pdev->dev, "ACPI resource conflict!\n");
+ return err;
+ }
+
+ err = pci_request_region(pdev, SMBBAR, ismt_driver.name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to request SMBus region 0x%lx-0x%lx\n",
+ start, start + len);
+ return err;
+ }
+
+ priv->smba = pcim_iomap(pdev, SMBBAR, len);
+ if (!priv->smba) {
+ dev_err(&pdev->dev, "Unable to ioremap SMBus BAR\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32)) != 0)) {
+ dev_err(&pdev->dev, "pci_set_dma_mask fail %p\n",
+ pdev);
+ goto fail;
+ }
+ }
+
+ err = ismt_dev_init(priv);
+ if (err)
+ goto fail;
+
+ ismt_hw_init(priv);
+
+ err = ismt_int_init(priv);
+ if (err)
+ goto fail;
+
+ err = i2c_add_adapter(&priv->adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add SMBus iSMT adapter\n");
+ err = -ENODEV;
+ goto fail;
+ }
+ return 0;
+
+fail:
+ pci_release_region(pdev, SMBBAR);
+ return err;
+}
+
+/**
+ * ismt_remove() - release driver resources
+ * @pdev: PCI-Express device
+ */
+static void ismt_remove(struct pci_dev *pdev)
+{
+ struct ismt_priv *priv = pci_get_drvdata(pdev);
+
+ i2c_del_adapter(&priv->adapter);
+ pci_release_region(pdev, SMBBAR);
+}
+
+/**
+ * ismt_suspend() - place the device in suspend
+ * @pdev: PCI-Express device
+ * @mesg: PM message
+ */
+#ifdef CONFIG_PM
+static int ismt_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, mesg));
+ return 0;
+}
+
+/**
+ * ismt_resume() - PCI resume code
+ * @pdev: PCI-Express device
+ */
+static int ismt_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ return pci_enable_device(pdev);
+}
+
+#else
+
+#define ismt_suspend NULL
+#define ismt_resume NULL
+
+#endif
+
+static struct pci_driver ismt_driver = {
+ .name = "ismt_smbus",
+ .id_table = ismt_ids,
+ .probe = ismt_probe,
+ .remove = ismt_remove,
+ .suspend = ismt_suspend,
+ .resume = ismt_resume,
+};
+
+module_pci_driver(ismt_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Bill E. Brown <bill.e.brown@intel.com>");
+MODULE_DESCRIPTION("Intel SMBus Message Transport (iSMT) driver");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a69459e5c3f3..5e705ee02f4a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -696,7 +696,6 @@ static int fsl_i2c_probe(struct platform_device *op)
return result;
fail_add:
- dev_set_drvdata(&op->dev, NULL);
free_irq(i2c->irq, i2c);
fail_request:
irq_dispose_mapping(i2c->irq);
@@ -711,7 +710,6 @@ static int fsl_i2c_remove(struct platform_device *op)
struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
i2c_del_adapter(&i2c->adap);
- dev_set_drvdata(&op->dev, NULL);
if (i2c->irq)
free_irq(i2c->irq, i2c);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index d6abaf2cf2e3..120f24646696 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -39,6 +39,7 @@
#define MXS_I2C_CTRL0_SET (0x04)
#define MXS_I2C_CTRL0_SFTRST 0x80000000
+#define MXS_I2C_CTRL0_RUN 0x20000000
#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
@@ -64,6 +65,13 @@
#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02
#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
+#define MXS_I2C_DATA (0xa0)
+
+#define MXS_I2C_DEBUG0 (0xb0)
+#define MXS_I2C_DEBUG0_CLR (0xb8)
+
+#define MXS_I2C_DEBUG0_DMAREQ 0x80000000
+
#define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \
MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \
MXS_I2C_CTRL1_EARLY_TERM_IRQ | \
@@ -85,35 +93,6 @@
#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
MXS_I2C_CTRL0_MASTER_MODE)
-struct mxs_i2c_speed_config {
- uint32_t timing0;
- uint32_t timing1;
- uint32_t timing2;
-};
-
-/*
- * Timing values for the default 24MHz clock supplied into the i2c block.
- *
- * The bus can operate at 95kHz or at 400kHz with the following timing
- * register configurations. The 100kHz mode isn't present because it's
- * values are not stated in the i.MX233/i.MX28 datasheet. The 95kHz mode
- * shall be close enough replacement. Therefore when the bus is configured
- * for 100kHz operation, 95kHz timing settings are actually loaded.
- *
- * For details, see i.MX233 [25.4.2 - 25.4.4] and i.MX28 [27.5.2 - 27.5.4].
- */
-static const struct mxs_i2c_speed_config mxs_i2c_95kHz_config = {
- .timing0 = 0x00780030,
- .timing1 = 0x00800030,
- .timing2 = 0x00300030,
-};
-
-static const struct mxs_i2c_speed_config mxs_i2c_400kHz_config = {
- .timing0 = 0x000f0007,
- .timing1 = 0x001f000f,
- .timing2 = 0x00300030,
-};
-
/**
* struct mxs_i2c_dev - per device, private MXS-I2C data
*
@@ -129,7 +108,9 @@ struct mxs_i2c_dev {
struct completion cmd_complete;
int cmd_err;
struct i2c_adapter adapter;
- const struct mxs_i2c_speed_config *speed;
+
+ uint32_t timing0;
+ uint32_t timing1;
/* DMA support components */
int dma_channel;
@@ -145,9 +126,16 @@ static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
{
stmp_reset_block(i2c->regs);
- writel(i2c->speed->timing0, i2c->regs + MXS_I2C_TIMING0);
- writel(i2c->speed->timing1, i2c->regs + MXS_I2C_TIMING1);
- writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2);
+ /*
+ * Configure timing for the I2C block. The I2C TIMING2 register has to
+ * be programmed with this particular magic number. The rest is derived
+ * from the XTAL speed and requested I2C speed.
+ *
+ * For details, see i.MX233 [25.4.2 - 25.4.4] and i.MX28 [27.5.2 - 27.5.4].
+ */
+ writel(i2c->timing0, i2c->regs + MXS_I2C_TIMING0);
+ writel(i2c->timing1, i2c->regs + MXS_I2C_TIMING1);
+ writel(0x00300030, i2c->regs + MXS_I2C_TIMING2);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
}
@@ -298,6 +286,135 @@ write_init_pio_fail:
return -EINVAL;
}
+static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (!(readl(i2c->regs + MXS_I2C_DEBUG0) &
+ MXS_I2C_DEBUG0_DMAREQ)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
+
+ return 0;
+}
+
+static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /*
+ * We do not use interrupts in the PIO mode. Due to the
+ * maximum transfer length being 8 bytes in PIO mode, the
+ * overhead of interrupt would be too large and this would
+ * neglect the gain from using the PIO mode.
+ */
+
+ while (!(readl(i2c->regs + MXS_I2C_CTRL1) &
+ MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ,
+ i2c->regs + MXS_I2C_CTRL1_CLR);
+
+ return 0;
+}
+
+static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msg, uint32_t flags)
+{
+ struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ uint32_t addr_data = msg->addr << 1;
+ uint32_t data = 0;
+ int i, shifts_left, ret;
+
+ /* Mute IRQs coming from this block. */
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR);
+
+ if (msg->flags & I2C_M_RD) {
+ addr_data |= I2C_SMBUS_READ;
+
+ /* SELECT command. */
+ writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_SELECT,
+ i2c->regs + MXS_I2C_CTRL0);
+
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+
+ writel(addr_data, i2c->regs + MXS_I2C_DATA);
+
+ ret = mxs_i2c_pio_wait_cplt(i2c);
+ if (ret)
+ return ret;
+
+ /* READ command. */
+ writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_READ | flags |
+ MXS_I2C_CTRL0_XFER_COUNT(msg->len),
+ i2c->regs + MXS_I2C_CTRL0);
+
+ for (i = 0; i < msg->len; i++) {
+ if ((i & 3) == 0) {
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+ data = readl(i2c->regs + MXS_I2C_DATA);
+ }
+ msg->buf[i] = data & 0xff;
+ data >>= 8;
+ }
+ } else {
+ addr_data |= I2C_SMBUS_WRITE;
+
+ /* WRITE command. */
+ writel(MXS_I2C_CTRL0_RUN | MXS_CMD_I2C_WRITE | flags |
+ MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1),
+ i2c->regs + MXS_I2C_CTRL0);
+
+ /*
+ * The LSB of data buffer is the first byte blasted across
+ * the bus. Higher order bytes follow. Thus the following
+ * filling schematic.
+ */
+ data = addr_data << 24;
+ for (i = 0; i < msg->len; i++) {
+ data >>= 8;
+ data |= (msg->buf[i] << 24);
+ if ((i & 3) == 2) {
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+ }
+
+ shifts_left = 24 - (i & 3) * 8;
+ if (shifts_left) {
+ data >>= shifts_left;
+ ret = mxs_i2c_pio_wait_dmareq(i2c);
+ if (ret)
+ return ret;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+ }
+
+ ret = mxs_i2c_pio_wait_cplt(i2c);
+ if (ret)
+ return ret;
+
+ /* Clear any dangling IRQs and re-enable interrupts. */
+ writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR);
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+
+ return 0;
+}
+
/*
* Low level master read/write transaction.
*/
@@ -316,24 +433,37 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (msg->len == 0)
return -EINVAL;
- INIT_COMPLETION(i2c->cmd_complete);
- i2c->cmd_err = 0;
-
- ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
- if (ret)
- return ret;
+ /*
+ * The current boundary to select between PIO/DMA transfer method
+ * is set to 8 bytes, transfers shorter than 8 bytes are transfered
+ * using PIO mode while longer transfers use DMA. The 8 byte border is
+ * based on this empirical measurement and a lot of previous frobbing.
+ */
+ if (msg->len < 8) {
+ ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
+ if (ret)
+ mxs_i2c_reset(i2c);
+ } else {
+ i2c->cmd_err = 0;
+ INIT_COMPLETION(i2c->cmd_complete);
+ ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
+ if (ret)
+ return ret;
- ret = wait_for_completion_timeout(&i2c->cmd_complete,
+ ret = wait_for_completion_timeout(&i2c->cmd_complete,
msecs_to_jiffies(1000));
- if (ret == 0)
- goto timeout;
+ if (ret == 0)
+ goto timeout;
+
+ if (i2c->cmd_err == -ENXIO)
+ mxs_i2c_reset(i2c);
- if (i2c->cmd_err == -ENXIO)
- mxs_i2c_reset(i2c);
+ ret = i2c->cmd_err;
+ }
- dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
+ dev_dbg(i2c->dev, "Done with err=%d\n", ret);
- return i2c->cmd_err;
+ return ret;
timeout:
dev_dbg(i2c->dev, "Timeout!\n");
@@ -403,6 +533,43 @@ static bool mxs_i2c_dma_filter(struct dma_chan *chan, void *param)
return true;
}
+static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, int speed)
+{
+ /* The I2C block clock run at 24MHz */
+ const uint32_t clk = 24000000;
+ uint32_t base;
+ uint16_t high_count, low_count, rcv_count, xmit_count;
+ struct device *dev = i2c->dev;
+
+ if (speed > 540000) {
+ dev_warn(dev, "Speed too high (%d Hz), using 540 kHz\n", speed);
+ speed = 540000;
+ } else if (speed < 12000) {
+ dev_warn(dev, "Speed too low (%d Hz), using 12 kHz\n", speed);
+ speed = 12000;
+ }
+
+ /*
+ * The timing derivation algorithm. There is no documentation for this
+ * algorithm available, it was derived by using the scope and fiddling
+ * with constants until the result observed on the scope was good enough
+ * for 20kHz, 50kHz, 100kHz, 200kHz, 300kHz and 400kHz. It should be
+ * possible to assume the algorithm works for other frequencies as well.
+ *
+ * Note it was necessary to cap the frequency on both ends as it's not
+ * possible to configure completely arbitrary frequency for the I2C bus
+ * clock.
+ */
+ base = ((clk / speed) - 38) / 2;
+ high_count = base + 3;
+ low_count = base - 3;
+ rcv_count = (high_count * 3) / 4;
+ xmit_count = low_count / 4;
+
+ i2c->timing0 = (high_count << 16) | rcv_count;
+ i2c->timing1 = (low_count << 16) | xmit_count;
+}
+
static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
{
uint32_t speed;
@@ -422,12 +589,12 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
}
ret = of_property_read_u32(node, "clock-frequency", &speed);
- if (ret)
+ if (ret) {
dev_warn(dev, "No I2C speed selected, using 100kHz\n");
- else if (speed == 400000)
- i2c->speed = &mxs_i2c_400kHz_config;
- else if (speed != 100000)
- dev_warn(dev, "Unsupported I2C speed selected, using 100kHz\n");
+ speed = 100000;
+ }
+
+ mxs_i2c_derive_timing(i2c, speed);
return 0;
}
@@ -471,7 +638,6 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return err;
i2c->dev = dev;
- i2c->speed = &mxs_i2c_95kHz_config;
init_completion(&i2c->cmd_complete);
@@ -531,8 +697,6 @@ static int mxs_i2c_remove(struct platform_device *pdev)
writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index adac8542771d..ac88f4000cc2 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -60,7 +60,7 @@
#include <linux/io.h>
MODULE_LICENSE("GPL");
-MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
+MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
MODULE_DESCRIPTION("nForce2/3/4/5xx SMBus driver");
@@ -188,9 +188,9 @@ static int nforce2_check_status(struct i2c_adapter *adap)
}
/* Return negative errno on error */
-static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
+static s32 nforce2_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data * data)
+ u8 command, int size, union i2c_smbus_data *data)
{
struct nforce2_smbus *smbus = adap->algo_data;
unsigned char protocol, pec;
@@ -202,56 +202,54 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
pec = (flags & I2C_CLIENT_PEC) ? NVIDIA_SMB_PRTCL_PEC : 0;
switch (size) {
+ case I2C_SMBUS_QUICK:
+ protocol |= NVIDIA_SMB_PRTCL_QUICK;
+ read_write = I2C_SMBUS_WRITE;
+ break;
- case I2C_SMBUS_QUICK:
- protocol |= NVIDIA_SMB_PRTCL_QUICK;
- read_write = I2C_SMBUS_WRITE;
- break;
-
- case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(command, NVIDIA_SMB_CMD);
- protocol |= NVIDIA_SMB_PRTCL_BYTE;
- break;
-
- case I2C_SMBUS_BYTE_DATA:
- outb_p(command, NVIDIA_SMB_CMD);
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(data->byte, NVIDIA_SMB_DATA);
- protocol |= NVIDIA_SMB_PRTCL_BYTE_DATA;
- break;
-
- case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE)
outb_p(command, NVIDIA_SMB_CMD);
- if (read_write == I2C_SMBUS_WRITE) {
- outb_p(data->word, NVIDIA_SMB_DATA);
- outb_p(data->word >> 8, NVIDIA_SMB_DATA+1);
- }
- protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec;
- break;
-
- case I2C_SMBUS_BLOCK_DATA:
- outb_p(command, NVIDIA_SMB_CMD);
- if (read_write == I2C_SMBUS_WRITE) {
- len = data->block[0];
- if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
- dev_err(&adap->dev,
- "Transaction failed "
- "(requested block size: %d)\n",
- len);
- return -EINVAL;
- }
- outb_p(len, NVIDIA_SMB_BCNT);
- for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++)
- outb_p(data->block[i + 1],
- NVIDIA_SMB_DATA+i);
+ protocol |= NVIDIA_SMB_PRTCL_BYTE;
+ break;
+
+ case I2C_SMBUS_BYTE_DATA:
+ outb_p(command, NVIDIA_SMB_CMD);
+ if (read_write == I2C_SMBUS_WRITE)
+ outb_p(data->byte, NVIDIA_SMB_DATA);
+ protocol |= NVIDIA_SMB_PRTCL_BYTE_DATA;
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ outb_p(command, NVIDIA_SMB_CMD);
+ if (read_write == I2C_SMBUS_WRITE) {
+ outb_p(data->word, NVIDIA_SMB_DATA);
+ outb_p(data->word >> 8, NVIDIA_SMB_DATA + 1);
+ }
+ protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec;
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ outb_p(command, NVIDIA_SMB_CMD);
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
+ dev_err(&adap->dev,
+ "Transaction failed (requested block size: %d)\n",
+ len);
+ return -EINVAL;
}
- protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec;
- break;
+ outb_p(len, NVIDIA_SMB_BCNT);
+ for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++)
+ outb_p(data->block[i + 1],
+ NVIDIA_SMB_DATA + i);
+ }
+ protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec;
+ break;
- default:
- dev_err(&adap->dev, "Unsupported transaction %d\n", size);
- return -EOPNOTSUPP;
+ default:
+ dev_err(&adap->dev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
}
outb_p((addr & 0x7f) << 1, NVIDIA_SMB_ADDR);
@@ -265,28 +263,28 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
return 0;
switch (size) {
-
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- data->byte = inb_p(NVIDIA_SMB_DATA);
- break;
-
- case I2C_SMBUS_WORD_DATA:
- data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA+1) << 8);
- break;
-
- case I2C_SMBUS_BLOCK_DATA:
- len = inb_p(NVIDIA_SMB_BCNT);
- if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
- dev_err(&adap->dev, "Transaction failed "
- "(received block size: 0x%02x)\n",
- len);
- return -EPROTO;
- }
- for (i = 0; i < len; i++)
- data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i);
- data->block[0] = len;
- break;
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = inb_p(NVIDIA_SMB_DATA);
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ data->word = inb_p(NVIDIA_SMB_DATA) |
+ (inb_p(NVIDIA_SMB_DATA + 1) << 8);
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ len = inb_p(NVIDIA_SMB_BCNT);
+ if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
+ dev_err(&adap->dev,
+ "Transaction failed (received block size: 0x%02x)\n",
+ len);
+ return -EPROTO;
+ }
+ for (i = 0; i < len; i++)
+ data->block[i + 1] = inb_p(NVIDIA_SMB_DATA + i);
+ data->block[0] = len;
+ break;
}
return 0;
@@ -299,7 +297,7 @@ static u32 nforce2_func(struct i2c_adapter *adapter)
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_PEC |
- (((struct nforce2_smbus*)adapter->algo_data)->blockops ?
+ (((struct nforce2_smbus *)adapter->algo_data)->blockops ?
I2C_FUNC_SMBUS_BLOCK_DATA : 0);
}
@@ -327,7 +325,7 @@ static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
{ 0 }
};
-MODULE_DEVICE_TABLE (pci, nforce2_ids);
+MODULE_DEVICE_TABLE(pci, nforce2_ids);
static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
@@ -377,7 +375,8 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
release_region(smbus->base, smbus->size);
return error;
}
- dev_info(&smbus->adapter.dev, "nForce2 SMBus adapter at %#x\n", smbus->base);
+ dev_info(&smbus->adapter.dev, "nForce2 SMBus adapter at %#x\n",
+ smbus->base);
return 0;
}
@@ -388,11 +387,12 @@ static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
int res1, res2;
/* we support 2 SMBus adapters */
- if (!(smbuses = kzalloc(2*sizeof(struct nforce2_smbus), GFP_KERNEL)))
+ smbuses = kzalloc(2 * sizeof(struct nforce2_smbus), GFP_KERNEL);
+ if (!smbuses)
return -ENOMEM;
pci_set_drvdata(dev, smbuses);
- switch(dev->device) {
+ switch (dev->device) {
case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS:
case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS:
case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS:
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 8b2ffcf45322..650293ff4d62 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -26,6 +26,7 @@
#include <linux/platform_data/i2c-nomadik.h>
#include <linux/of.h>
#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
#define DRIVER_NAME "nmk-i2c"
@@ -147,6 +148,10 @@ struct i2c_nmk_client {
* @stop: stop condition.
* @xfer_complete: acknowledge completion for a I2C message.
* @result: controller propogated result.
+ * @pinctrl: pinctrl handle.
+ * @pins_default: default state for the pins.
+ * @pins_idle: idle state for the pins.
+ * @pins_sleep: sleep state for the pins.
* @busy: Busy doing transfer.
*/
struct nmk_i2c_dev {
@@ -160,6 +165,11 @@ struct nmk_i2c_dev {
int stop;
struct completion xfer_complete;
int result;
+ /* Three pin states - default, idle & sleep */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_idle;
+ struct pinctrl_state *pins_sleep;
bool busy;
};
@@ -402,8 +412,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
u32 status = 0;
- u32 mcr;
- u32 irq_mask = 0;
+ u32 mcr, irq_mask;
int timeout;
mcr = load_i2c_mcr_reg(dev, flags);
@@ -472,8 +481,7 @@ static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
u32 status = 0;
- u32 mcr;
- u32 irq_mask = 0;
+ u32 mcr, irq_mask;
int timeout;
mcr = load_i2c_mcr_reg(dev, flags);
@@ -636,6 +644,15 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
goto out_clk;
}
+ /* Optionaly enable pins to be muxed in and configured */
+ if (!IS_ERR(dev->pins_default)) {
+ status = pinctrl_select_state(dev->pinctrl,
+ dev->pins_default);
+ if (status)
+ dev_err(&dev->adev->dev,
+ "could not set default pins\n");
+ }
+
status = init_hw(dev);
if (status)
goto out;
@@ -663,6 +680,15 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
out:
clk_disable_unprepare(dev->clk);
out_clk:
+ /* Optionally let pins go into idle state */
+ if (!IS_ERR(dev->pins_idle)) {
+ status = pinctrl_select_state(dev->pinctrl,
+ dev->pins_idle);
+ if (status)
+ dev_err(&dev->adev->dev,
+ "could not set pins to idle state\n");
+ }
+
pm_runtime_put_sync(&dev->adev->dev);
dev->busy = false;
@@ -703,8 +729,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
struct nmk_i2c_dev *dev = arg;
u32 tft, rft;
u32 count;
- u32 misr;
- u32 src = 0;
+ u32 misr, src;
/* load Tx FIFO and Rx FIFO threshold values */
tft = readl(dev->virtbase + I2C_TFTR);
@@ -857,15 +882,41 @@ static int nmk_i2c_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ int ret;
if (nmk_i2c->busy)
return -EBUSY;
+ if (!IS_ERR(nmk_i2c->pins_sleep)) {
+ ret = pinctrl_select_state(nmk_i2c->pinctrl,
+ nmk_i2c->pins_sleep);
+ if (ret)
+ dev_err(dev, "could not set pins to sleep state\n");
+ }
+
return 0;
}
static int nmk_i2c_resume(struct device *dev)
{
+ struct amba_device *adev = to_amba_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ int ret;
+
+ /* First go to the default state */
+ if (!IS_ERR(nmk_i2c->pins_default)) {
+ ret = pinctrl_select_state(nmk_i2c->pinctrl,
+ nmk_i2c->pins_default);
+ if (ret)
+ dev_err(dev, "could not set pins to default state\n");
+ }
+ /* Then let's idle the pins until the next transfer happens */
+ if (!IS_ERR(nmk_i2c->pins_idle)) {
+ ret = pinctrl_select_state(nmk_i2c->pinctrl,
+ nmk_i2c->pins_idle);
+ if (ret)
+ dev_err(dev, "could not set pins to idle state\n");
+ }
return 0;
}
#else
@@ -953,6 +1004,40 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
dev->adev = adev;
amba_set_drvdata(adev, dev);
+ dev->pinctrl = devm_pinctrl_get(&adev->dev);
+ if (IS_ERR(dev->pinctrl)) {
+ ret = PTR_ERR(dev->pinctrl);
+ goto err_pinctrl;
+ }
+
+ dev->pins_default = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(dev->pins_default)) {
+ dev_err(&adev->dev, "could not get default pinstate\n");
+ } else {
+ ret = pinctrl_select_state(dev->pinctrl,
+ dev->pins_default);
+ if (ret)
+ dev_dbg(&adev->dev, "could not set default pinstate\n");
+ }
+
+ dev->pins_idle = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_IDLE);
+ if (IS_ERR(dev->pins_idle)) {
+ dev_dbg(&adev->dev, "could not get idle pinstate\n");
+ } else {
+ /* If possible, let's go to idle until the first transfer */
+ ret = pinctrl_select_state(dev->pinctrl,
+ dev->pins_idle);
+ if (ret)
+ dev_dbg(&adev->dev, "could not set idle pinstate\n");
+ }
+
+ dev->pins_sleep = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(dev->pins_sleep))
+ dev_dbg(&adev->dev, "could not get sleep pinstate\n");
+
dev->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
if (!dev->virtbase) {
ret = -ENOMEM;
@@ -1020,8 +1105,8 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
err_irq:
iounmap(dev->virtbase);
err_no_ioremap:
- amba_set_drvdata(adev, NULL);
kfree(dev);
+ err_pinctrl:
err_no_mem:
return ret;
@@ -1044,7 +1129,6 @@ static int nmk_i2c_remove(struct amba_device *adev)
release_mem_region(res->start, resource_size(res));
clk_put(dev->clk);
pm_runtime_disable(&adev->dev);
- amba_set_drvdata(adev, NULL);
kfree(dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index a337d08a392d..0e1f8245e768 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -332,7 +332,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
&i2c->reg_io_width);
match = of_match_node(ocores_i2c_match, pdev->dev.of_node);
- if (match && (int)match->data == TYPE_GRLIB) {
+ if (match && (long)match->data == TYPE_GRLIB) {
dev_dbg(&pdev->dev, "GRLIB variant of i2c-ocores\n");
i2c->setreg = oc_setreg_grlib;
i2c->getreg = oc_getreg_grlib;
@@ -452,7 +452,6 @@ static int ocores_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 484ca771fdff..935585ef4d39 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -595,7 +595,7 @@ static int octeon_i2c_probe(struct platform_device *pdev)
result = i2c_add_adapter(&i2c->adap);
if (result < 0) {
dev_err(i2c->dev, "failed to add adapter\n");
- goto fail_add;
+ goto out;
}
dev_info(i2c->dev, "version %s\n", DRV_VERSION);
@@ -603,8 +603,6 @@ static int octeon_i2c_probe(struct platform_device *pdev)
return 0;
-fail_add:
- platform_set_drvdata(pdev, NULL);
out:
return result;
};
@@ -614,7 +612,6 @@ static int octeon_i2c_remove(struct platform_device *pdev)
struct octeon_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
- platform_set_drvdata(pdev, NULL);
return 0;
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 3ee188679cf1..e02f9e36a7b2 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1260,7 +1260,6 @@ err_unuse_clocks:
pm_runtime_put(dev->dev);
pm_runtime_disable(&pdev->dev);
err_free_mem:
- platform_set_drvdata(pdev, NULL);
return r;
}
@@ -1270,8 +1269,6 @@ static int omap_i2c_remove(struct platform_device *pdev)
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
int ret;
- platform_set_drvdata(pdev, NULL);
-
i2c_del_adapter(&dev->adapter);
ret = pm_runtime_get_sync(&pdev->dev);
if (IS_ERR_VALUE(ret))
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index a30d2f613c03..aa00df14e30b 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -260,7 +260,6 @@ e_print:
static int i2c_pca_pf_remove(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 083d68cfaf0b..f6389e2c9d02 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -349,7 +349,6 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
return 0;
ret_unmap:
- platform_set_drvdata(pldev, NULL);
if (pmcmsptwi_data.irq) {
pmcmsptwi_writel(0,
pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
@@ -374,7 +373,6 @@ static int pmcmsptwi_remove(struct platform_device *pldev)
i2c_del_adapter(&pmcmsptwi_adapter);
- platform_set_drvdata(pldev, NULL);
if (pmcmsptwi_data.irq) {
pmcmsptwi_writel(0,
pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index ce4097012e97..5f39c6d8117a 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -761,7 +761,6 @@ out_clkget:
out_drvdata:
kfree(alg_data);
err_kzalloc:
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -776,7 +775,6 @@ static int i2c_pnx_remove(struct platform_device *pdev)
release_mem_region(alg_data->base, I2C_PNX_REGION_SIZE);
clk_put(alg_data->clk);
kfree(alg_data);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 0dd5b334d090..da54e673449d 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -221,7 +221,6 @@ static int i2c_powermac_remove(struct platform_device *dev)
printk(KERN_WARNING
"i2c-powermac.c: Failed to remove bus %s !\n",
adapter->name);
- platform_set_drvdata(dev, NULL);
memset(adapter, 0, sizeof(*adapter));
return 0;
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index d7c512d717a7..261d7db437e2 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -223,7 +223,6 @@ static int puv3_i2c_probe(struct platform_device *pdev)
return 0;
fail_add_adapter:
- platform_set_drvdata(pdev, NULL);
kfree(adapter);
fail_nomem:
release_mem_region(mem->start, resource_size(mem));
@@ -245,7 +244,6 @@ static int puv3_i2c_remove(struct platform_device *pdev)
}
put_device(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 3d4985695aed..9639be86e53f 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -128,7 +128,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
return 0;
err_dev_add:
- pci_set_drvdata(dev, NULL);
kfree(sds);
err_mem:
pci_disable_device(dev);
@@ -141,7 +140,6 @@ static void ce4100_i2c_remove(struct pci_dev *dev)
unsigned int i;
sds = pci_get_drvdata(dev);
- pci_set_drvdata(dev, NULL);
for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
platform_device_unregister(sds->pdev[i]);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 1034d93fb838..1e88e8d66c55 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1215,12 +1215,10 @@ emalloc:
return ret;
}
-static int __exit i2c_pxa_remove(struct platform_device *dev)
+static int i2c_pxa_remove(struct platform_device *dev)
{
struct pxa_i2c *i2c = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
i2c_del_adapter(&i2c->adap);
if (!i2c->use_pio)
free_irq(i2c->irq, i2c);
@@ -1269,7 +1267,7 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
static struct platform_driver i2c_pxa_driver = {
.probe = i2c_pxa_probe,
- .remove = __exit_p(i2c_pxa_remove),
+ .remove = i2c_pxa_remove,
.driver = {
.name = "pxa2xx-i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c807a6d14f0c..f6b880ba1932 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -111,6 +111,8 @@ static const struct of_device_id s3c24xx_i2c_match[] = {
{ .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
{ .compatible = "samsung,s3c2440-hdmiphy-i2c",
.data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
+ { .compatible = "samsung,exynos5440-i2c",
+ .data = (void *)(QUIRK_S3C2440 | QUIRK_NO_GPIO) },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
@@ -1000,8 +1002,8 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!i2c->pdata) {
- ret = -ENOMEM;
- goto err_noclk;
+ dev_err(&pdev->dev, "no memory for platform data\n");
+ return -ENOMEM;
}
i2c->quirks = s3c24xx_get_device_quirks(pdev);
@@ -1022,32 +1024,27 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
/* find the clock and enable it */
i2c->dev = &pdev->dev;
- i2c->clk = clk_get(&pdev->dev, "i2c");
+ i2c->clk = devm_clk_get(&pdev->dev, "i2c");
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
- ret = -ENOENT;
- goto err_noclk;
+ return -ENOENT;
}
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
- clk_prepare_enable(i2c->clk);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IO resource\n");
- ret = -ENOENT;
- goto err_clk;
+ return -ENOENT;
}
i2c->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(i2c->regs)) {
- ret = PTR_ERR(i2c->regs);
- goto err_clk;
- }
+ if (IS_ERR(i2c->regs))
+ return PTR_ERR(i2c->regs);
dev_dbg(&pdev->dev, "registers %p (%p)\n",
i2c->regs, res);
@@ -1064,16 +1061,18 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (i2c->pdata->cfg_gpio) {
i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
} else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
- ret = -EINVAL;
- goto err_clk;
+ return -EINVAL;
}
/* initialise the i2c controller */
+ clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
- if (ret != 0)
- goto err_clk;
-
+ clk_disable_unprepare(i2c->clk);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "I2C controller init failed\n");
+ return ret;
+ }
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
@@ -1081,21 +1080,21 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
- goto err_clk;
+ return ret;
}
- ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
- dev_name(&pdev->dev), i2c);
+ ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq, 0,
+ dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- goto err_clk;
+ return ret;
}
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
- goto err_irq;
+ return ret;
}
/* Note, previous versions of the driver used i2c_add_adapter()
@@ -1110,7 +1109,8 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
- goto err_cpufreq;
+ s3c24xx_i2c_deregister_cpufreq(i2c);
+ return ret;
}
of_i2c_register_devices(&i2c->adap);
@@ -1120,21 +1120,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(&i2c->adap.dev);
dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
- clk_disable_unprepare(i2c->clk);
return 0;
-
- err_cpufreq:
- s3c24xx_i2c_deregister_cpufreq(i2c);
-
- err_irq:
- free_irq(i2c->irq, i2c);
-
- err_clk:
- clk_disable_unprepare(i2c->clk);
- clk_put(i2c->clk);
-
- err_noclk:
- return ret;
}
/* s3c24xx_i2c_remove
@@ -1152,10 +1138,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
s3c24xx_i2c_deregister_cpufreq(i2c);
i2c_del_adapter(&i2c->adap);
- free_irq(i2c->irq, i2c);
clk_disable_unprepare(i2c->clk);
- clk_put(i2c->clk);
if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
s3c24xx_i2c_dt_gpio_free(i2c);
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index 008836409efe..7c1ca5aca088 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -365,7 +365,6 @@ static int s6i2c_remove(struct platform_device *pdev)
{
struct s6i2c_if *iface = platform_get_drvdata(pdev);
i2c_wr16(iface, S6_I2C_ENABLE, 0);
- platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&iface->adap);
free_irq(iface->irq, iface);
clk_disable(iface->clk);
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 3a2253e1bf59..5351a2f34912 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -546,7 +546,6 @@ static int sh7760_i2c_remove(struct platform_device *pdev)
release_resource(id->ioarea);
kfree(id->ioarea);
kfree(id);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index b6e7a83a8296..debf745c0268 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -38,21 +38,21 @@
/* Transmit operation: */
/* */
/* 0 byte transmit */
-/* BUS: S A8 ACK P */
+/* BUS: S A8 ACK P(*) */
/* IRQ: DTE WAIT */
/* ICIC: */
/* ICCR: 0x94 0x90 */
/* ICDR: A8 */
/* */
/* 1 byte transmit */
-/* BUS: S A8 ACK D8(1) ACK P */
+/* BUS: S A8 ACK D8(1) ACK P(*) */
/* IRQ: DTE WAIT WAIT */
/* ICIC: -DTE */
/* ICCR: 0x94 0x90 */
/* ICDR: A8 D8(1) */
/* */
/* 2 byte transmit */
-/* BUS: S A8 ACK D8(1) ACK D8(2) ACK P */
+/* BUS: S A8 ACK D8(1) ACK D8(2) ACK P(*) */
/* IRQ: DTE WAIT WAIT WAIT */
/* ICIC: -DTE */
/* ICCR: 0x94 0x90 */
@@ -66,20 +66,20 @@
/* 0 byte receive - not supported since slave may hold SDA low */
/* */
/* 1 byte receive [TX] | [RX] */
-/* BUS: S A8 ACK | D8(1) ACK P */
+/* BUS: S A8 ACK | D8(1) ACK P(*) */
/* IRQ: DTE WAIT | WAIT DTE */
/* ICIC: -DTE | +DTE */
/* ICCR: 0x94 0x81 | 0xc0 */
/* ICDR: A8 | D8(1) */
/* */
/* 2 byte receive [TX]| [RX] */
-/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK P */
+/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK P(*) */
/* IRQ: DTE WAIT | WAIT WAIT DTE */
/* ICIC: -DTE | +DTE */
/* ICCR: 0x94 0x81 | 0xc0 */
/* ICDR: A8 | D8(1) D8(2) */
/* */
-/* 3 byte receive [TX] | [RX] */
+/* 3 byte receive [TX] | [RX] (*) */
/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK D8(3) ACK P */
/* IRQ: DTE WAIT | WAIT WAIT WAIT DTE */
/* ICIC: -DTE | +DTE */
@@ -94,7 +94,7 @@
/* SDA ___\___XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXAAAAAAAAA___/ */
/* SCL \_/1\_/2\_/3\_/4\_/5\_/6\_/7\_/8\___/9\_____/ */
/* */
-/* S D7 D6 D5 D4 D3 D2 D1 D0 P */
+/* S D7 D6 D5 D4 D3 D2 D1 D0 P(*) */
/* ___ */
/* WAIT IRQ ________________________________/ \___________ */
/* TACK IRQ ____________________________________/ \_______ */
@@ -103,6 +103,11 @@
/* _______________________________________________ */
/* BUSY __/ \_ */
/* */
+/* (*) The STOP condition is only sent by the master at the end of the last */
+/* I2C message or if the I2C_M_STOP flag is set. Similarly, the BUSY bit is */
+/* only cleared after the STOP condition, so, between messages we have to */
+/* poll for the DTE bit. */
+/* */
enum sh_mobile_i2c_op {
OP_START = 0,
@@ -132,6 +137,7 @@ struct sh_mobile_i2c_data {
struct i2c_msg *msg;
int pos;
int sr;
+ bool send_stop;
};
#define IIC_FLAG_HAS_ICIC67 (1 << 0)
@@ -322,7 +328,7 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
break;
case OP_TX_STOP: /* write data and issue a stop afterwards */
iic_wr(pd, ICDR, data);
- iic_wr(pd, ICCR, 0x90);
+ iic_wr(pd, ICCR, pd->send_stop ? 0x90 : 0x94);
break;
case OP_TX_TO_RX: /* select read mode */
iic_wr(pd, ICCR, 0x81);
@@ -349,20 +355,14 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
return ret;
}
-static int sh_mobile_i2c_is_first_byte(struct sh_mobile_i2c_data *pd)
+static bool sh_mobile_i2c_is_first_byte(struct sh_mobile_i2c_data *pd)
{
- if (pd->pos == -1)
- return 1;
-
- return 0;
+ return pd->pos == -1;
}
-static int sh_mobile_i2c_is_last_byte(struct sh_mobile_i2c_data *pd)
+static bool sh_mobile_i2c_is_last_byte(struct sh_mobile_i2c_data *pd)
{
- if (pd->pos == (pd->msg->len - 1))
- return 1;
-
- return 0;
+ return pd->pos == pd->msg->len - 1;
}
static void sh_mobile_i2c_get_data(struct sh_mobile_i2c_data *pd,
@@ -475,22 +475,25 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
+static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
+ bool do_init)
{
if (usr_msg->len == 0 && (usr_msg->flags & I2C_M_RD)) {
dev_err(pd->dev, "Unsupported zero length i2c read\n");
return -EIO;
}
- /* Initialize channel registers */
- iic_set_clr(pd, ICCR, 0, ICCR_ICE);
+ if (do_init) {
+ /* Initialize channel registers */
+ iic_set_clr(pd, ICCR, 0, ICCR_ICE);
- /* Enable channel and configure rx ack */
- iic_set_clr(pd, ICCR, ICCR_ICE, 0);
+ /* Enable channel and configure rx ack */
+ iic_set_clr(pd, ICCR, ICCR_ICE, 0);
- /* Set the clock */
- iic_wr(pd, ICCL, pd->iccl & 0xff);
- iic_wr(pd, ICCH, pd->icch & 0xff);
+ /* Set the clock */
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
+ }
pd->msg = usr_msg;
pd->pos = -1;
@@ -501,6 +504,61 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
return 0;
}
+static int poll_dte(struct sh_mobile_i2c_data *pd)
+{
+ int i;
+
+ for (i = 1000; i; i--) {
+ u_int8_t val = iic_rd(pd, ICSR);
+
+ if (val & ICSR_DTE)
+ break;
+
+ if (val & ICSR_TACK)
+ return -EIO;
+
+ udelay(10);
+ }
+
+ if (!i) {
+ dev_warn(pd->dev, "Timeout polling for DTE!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int poll_busy(struct sh_mobile_i2c_data *pd)
+{
+ int i;
+
+ for (i = 1000; i; i--) {
+ u_int8_t val = iic_rd(pd, ICSR);
+
+ dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
+
+ /* the interrupt handler may wake us up before the
+ * transfer is finished, so poll the hardware
+ * until we're done.
+ */
+ if (!(val & ICSR_BUSY)) {
+ /* handle missing acknowledge and arbitration lost */
+ if ((val | pd->sr) & (ICSR_TACK | ICSR_AL))
+ return -EIO;
+ break;
+ }
+
+ udelay(10);
+ }
+
+ if (!i) {
+ dev_err(pd->dev, "Polling timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
@@ -508,53 +566,39 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
struct i2c_msg *msg;
int err = 0;
- u_int8_t val;
- int i, k, retry_count;
+ int i, k;
activate_ch(pd);
/* Process all messages */
for (i = 0; i < num; i++) {
+ bool do_start = pd->send_stop || !i;
msg = &msgs[i];
+ pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
- err = start_ch(pd, msg);
+ err = start_ch(pd, msg, do_start);
if (err)
break;
- i2c_op(pd, OP_START, 0);
+ if (do_start)
+ i2c_op(pd, OP_START, 0);
/* The interrupt handler takes care of the rest... */
k = wait_event_timeout(pd->wait,
pd->sr & (ICSR_TACK | SW_DONE),
5 * HZ);
- if (!k)
+ if (!k) {
dev_err(pd->dev, "Transfer request timed out\n");
-
- retry_count = 1000;
-again:
- val = iic_rd(pd, ICSR);
-
- dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
-
- /* the interrupt handler may wake us up before the
- * transfer is finished, so poll the hardware
- * until we're done.
- */
- if (val & ICSR_BUSY) {
- udelay(10);
- if (retry_count--)
- goto again;
-
- err = -EIO;
- dev_err(pd->dev, "Polling timed out\n");
+ err = -ETIMEDOUT;
break;
}
- /* handle missing acknowledge and arbitration lost */
- if ((val | pd->sr) & (ICSR_TACK | ICSR_AL)) {
- err = -EIO;
+ if (pd->send_stop)
+ err = poll_busy(pd);
+ else
+ err = poll_dte(pd);
+ if (err < 0)
break;
- }
}
deactivate_ch(pd);
@@ -566,7 +610,7 @@ again:
static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
static struct i2c_algorithm sh_mobile_i2c_algorithm = {
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index de6dddb9f865..36a9556d7cfa 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -17,30 +17,25 @@
*/
/*
- Changes:
- 24.08.2002
- Fixed the typo in sis630_access (Thanks to Mark M. Hoffman)
- Changed sis630_transaction.(Thanks to Mark M. Hoffman)
- 18.09.2002
- Added SIS730 as supported.
- 21.09.2002
- Added high_clock module option.If this option is set
- used Host Master Clock 56KHz (default 14KHz).For now we save old Host
- Master Clock and after transaction completed restore (otherwise
- it's confuse BIOS and hung Machine).
- 24.09.2002
- Fixed typo in sis630_access
- Fixed logical error by restoring of Host Master Clock
- 31.07.2003
- Added block data read/write support.
-*/
-
-/*
Status: beta
Supports:
SIS 630
SIS 730
+ SIS 964
+
+ Notable differences between chips:
+ +------------------------+--------------------+-------------------+
+ | | SIS630/730 | SIS964 |
+ +------------------------+--------------------+-------------------+
+ | Clock | 14kHz/56kHz | 55.56kHz/27.78kHz |
+ | SMBus registers offset | 0x80 | 0xE0 |
+ | SMB_CNT | Bit 1 = Slave Busy | Bit 1 = Bus probe |
+ | (not used yet) | Bit 3 is reserved | Bit 3 = Last byte |
+ | SMB_PCOUNT | Offset + 0x06 | Offset + 0x14 |
+ | SMB_COUNT | 4:0 bits | 5:0 bits |
+ +------------------------+--------------------+-------------------+
+ (Other differences don't affect the functions provided by the driver)
Note: we assume there can only be one device, with one SMBus interface.
*/
@@ -55,22 +50,36 @@
#include <linux/acpi.h>
#include <linux/io.h>
-/* SIS630 SMBus registers */
-#define SMB_STS 0x80 /* status */
-#define SMB_EN 0x81 /* status enable */
-#define SMB_CNT 0x82
-#define SMBHOST_CNT 0x83
-#define SMB_ADDR 0x84
-#define SMB_CMD 0x85
-#define SMB_PCOUNT 0x86 /* processed count */
-#define SMB_COUNT 0x87
-#define SMB_BYTE 0x88 /* ~0x8F data byte field */
-#define SMBDEV_ADDR 0x90
-#define SMB_DB0 0x91
-#define SMB_DB1 0x92
-#define SMB_SAA 0x93
-
-/* register count for request_region */
+/* SIS964 id is defined here as we are the only file using it */
+#define PCI_DEVICE_ID_SI_964 0x0964
+
+/* SIS630/730/964 SMBus registers */
+#define SMB_STS 0x00 /* status */
+#define SMB_CNT 0x02 /* control */
+#define SMBHOST_CNT 0x03 /* host control */
+#define SMB_ADDR 0x04 /* address */
+#define SMB_CMD 0x05 /* command */
+#define SMB_COUNT 0x07 /* byte count */
+#define SMB_BYTE 0x08 /* ~0x8F data byte field */
+
+/* SMB_STS register */
+#define BYTE_DONE_STS 0x10 /* Byte Done Status / Block Array */
+#define SMBCOL_STS 0x04 /* Collision */
+#define SMBERR_STS 0x02 /* Device error */
+
+/* SMB_CNT register */
+#define MSTO_EN 0x40 /* Host Master Timeout Enable */
+#define SMBCLK_SEL 0x20 /* Host master clock selection */
+#define SMB_PROBE 0x02 /* Bus Probe/Slave busy */
+#define SMB_HOSTBUSY 0x01 /* Host Busy */
+
+/* SMBHOST_CNT register */
+#define SMB_KILL 0x20 /* Kill */
+#define SMB_START 0x10 /* Start */
+
+/* register count for request_region
+ * As we don't use SMB_PCOUNT, 20 is ok for SiS630 and SiS964
+ */
#define SIS630_SMB_IOREGION 20
/* PCI address constants */
@@ -96,65 +105,71 @@ static struct pci_driver sis630_driver;
static bool high_clock;
static bool force;
module_param(high_clock, bool, 0);
-MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz).");
+MODULE_PARM_DESC(high_clock,
+ "Set Host Master Clock to 56KHz (default 14KHz) (SIS630/730 only).");
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!");
-/* acpi base address */
-static unsigned short acpi_base;
+/* SMBus base adress */
+static unsigned short smbus_base;
/* supported chips */
static int supported[] = {
PCI_DEVICE_ID_SI_630,
PCI_DEVICE_ID_SI_730,
+ PCI_DEVICE_ID_SI_760,
0 /* terminates the list */
};
static inline u8 sis630_read(u8 reg)
{
- return inb(acpi_base + reg);
+ return inb(smbus_base + reg);
}
static inline void sis630_write(u8 reg, u8 data)
{
- outb(data, acpi_base + reg);
+ outb(data, smbus_base + reg);
}
-static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldclock)
+static int sis630_transaction_start(struct i2c_adapter *adap, int size,
+ u8 *oldclock)
{
- int temp;
+ int temp;
/* Make sure the SMBus host is ready to start transmitting. */
- if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) {
- dev_dbg(&adap->dev, "SMBus busy (%02x).Resetting...\n",temp);
+ temp = sis630_read(SMB_CNT);
+ if ((temp & (SMB_PROBE | SMB_HOSTBUSY)) != 0x00) {
+ dev_dbg(&adap->dev, "SMBus busy (%02x). Resetting...\n", temp);
/* kill smbus transaction */
- sis630_write(SMBHOST_CNT, 0x20);
+ sis630_write(SMBHOST_CNT, SMB_KILL);
- if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) {
+ temp = sis630_read(SMB_CNT);
+ if (temp & (SMB_PROBE | SMB_HOSTBUSY)) {
dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
return -EBUSY;
- } else {
+ } else {
dev_dbg(&adap->dev, "Successful!\n");
}
- }
+ }
/* save old clock, so we can prevent machine for hung */
*oldclock = sis630_read(SMB_CNT);
dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock);
- /* disable timeout interrupt , set Host Master Clock to 56KHz if requested */
+ /* disable timeout interrupt,
+ * set Host Master Clock to 56KHz if requested */
if (high_clock)
- sis630_write(SMB_CNT, 0x20);
+ sis630_write(SMB_CNT, SMBCLK_SEL);
else
- sis630_write(SMB_CNT, (*oldclock & ~0x40));
+ sis630_write(SMB_CNT, (*oldclock & ~MSTO_EN));
/* clear all sticky bits */
temp = sis630_read(SMB_STS);
sis630_write(SMB_STS, temp & 0x1e);
/* start the transaction by setting bit 4 and size */
- sis630_write(SMBHOST_CNT,0x10 | (size & 0x07));
+ sis630_write(SMBHOST_CNT, SMB_START | (size & 0x07));
return 0;
}
@@ -168,7 +183,7 @@ static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
msleep(1);
temp = sis630_read(SMB_STS);
/* check if block transmitted */
- if (size == SIS630_BLOCK_DATA && (temp & 0x10))
+ if (size == SIS630_BLOCK_DATA && (temp & BYTE_DONE_STS))
break;
} while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT));
@@ -178,19 +193,14 @@ static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
result = -ETIMEDOUT;
}
- if (temp & 0x02) {
+ if (temp & SMBERR_STS) {
dev_dbg(&adap->dev, "Error: Failed bus transaction\n");
result = -ENXIO;
}
- if (temp & 0x04) {
+ if (temp & SMBCOL_STS) {
dev_err(&adap->dev, "Bus collision!\n");
- result = -EIO;
- /*
- TBD: Datasheet say:
- the software should clear this bit and restart SMBUS operation.
- Should we do it or user start request again?
- */
+ result = -EAGAIN;
}
return result;
@@ -198,21 +208,21 @@ static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock)
{
- int temp = 0;
-
/* clear all status "sticky" bits */
- sis630_write(SMB_STS, temp);
+ sis630_write(SMB_STS, 0xFF);
- dev_dbg(&adap->dev, "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT));
+ dev_dbg(&adap->dev,
+ "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT));
/*
* restore old Host Master Clock if high_clock is set
* and oldclock was not 56KHz
*/
- if (high_clock && !(oldclock & 0x20))
- sis630_write(SMB_CNT,(sis630_read(SMB_CNT) & ~0x20));
+ if (high_clock && !(oldclock & SMBCLK_SEL))
+ sis630_write(SMB_CNT, sis630_read(SMB_CNT) & ~SMBCLK_SEL);
- dev_dbg(&adap->dev, "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT));
+ dev_dbg(&adap->dev,
+ "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT));
}
static int sis630_transaction(struct i2c_adapter *adap, int size)
@@ -229,7 +239,8 @@ static int sis630_transaction(struct i2c_adapter *adap, int size)
return result;
}
-static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *data, int read_write)
+static int sis630_block_data(struct i2c_adapter *adap,
+ union i2c_smbus_data *data, int read_write)
{
int i, len = 0, rc = 0;
u8 oldclock = 0;
@@ -241,39 +252,43 @@ static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *dat
else if (len > 32)
len = 32;
sis630_write(SMB_COUNT, len);
- for (i=1; i <= len; i++) {
- dev_dbg(&adap->dev, "set data 0x%02x\n", data->block[i]);
+ for (i = 1; i <= len; i++) {
+ dev_dbg(&adap->dev,
+ "set data 0x%02x\n", data->block[i]);
/* set data */
- sis630_write(SMB_BYTE+(i-1)%8, data->block[i]);
- if (i==8 || (len<8 && i==len)) {
- dev_dbg(&adap->dev, "start trans len=%d i=%d\n",len ,i);
+ sis630_write(SMB_BYTE + (i - 1) % 8, data->block[i]);
+ if (i == 8 || (len < 8 && i == len)) {
+ dev_dbg(&adap->dev,
+ "start trans len=%d i=%d\n", len, i);
/* first transaction */
rc = sis630_transaction_start(adap,
SIS630_BLOCK_DATA, &oldclock);
if (rc)
return rc;
- }
- else if ((i-1)%8 == 7 || i==len) {
- dev_dbg(&adap->dev, "trans_wait len=%d i=%d\n",len,i);
- if (i>8) {
- dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i);
+ } else if ((i - 1) % 8 == 7 || i == len) {
+ dev_dbg(&adap->dev,
+ "trans_wait len=%d i=%d\n", len, i);
+ if (i > 8) {
+ dev_dbg(&adap->dev,
+ "clear smbary_sts"
+ " len=%d i=%d\n", len, i);
/*
If this is not first transaction,
we must clear sticky bit.
clear SMBARY_STS
*/
- sis630_write(SMB_STS,0x10);
+ sis630_write(SMB_STS, BYTE_DONE_STS);
}
rc = sis630_transaction_wait(adap,
SIS630_BLOCK_DATA);
if (rc) {
- dev_dbg(&adap->dev, "trans_wait failed\n");
+ dev_dbg(&adap->dev,
+ "trans_wait failed\n");
break;
}
}
}
- }
- else {
+ } else {
/* read request */
data->block[0] = len = 0;
rc = sis630_transaction_start(adap,
@@ -294,18 +309,22 @@ static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *dat
if (data->block[0] > 32)
data->block[0] = 32;
- dev_dbg(&adap->dev, "block data read len=0x%x\n", data->block[0]);
+ dev_dbg(&adap->dev,
+ "block data read len=0x%x\n", data->block[0]);
- for (i=0; i < 8 && len < data->block[0]; i++,len++) {
- dev_dbg(&adap->dev, "read i=%d len=%d\n", i, len);
- data->block[len+1] = sis630_read(SMB_BYTE+i);
+ for (i = 0; i < 8 && len < data->block[0]; i++, len++) {
+ dev_dbg(&adap->dev,
+ "read i=%d len=%d\n", i, len);
+ data->block[len + 1] = sis630_read(SMB_BYTE +
+ i);
}
- dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i);
+ dev_dbg(&adap->dev,
+ "clear smbary_sts len=%d i=%d\n", len, i);
/* clear SMBARY_STS */
- sis630_write(SMB_STS,0x10);
- } while(len < data->block[0]);
+ sis630_write(SMB_STS, BYTE_DONE_STS);
+ } while (len < data->block[0]);
}
sis630_transaction_end(adap, oldclock);
@@ -321,42 +340,47 @@ static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
int status;
switch (size) {
- case I2C_SMBUS_QUICK:
- sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
- size = SIS630_QUICK;
- break;
- case I2C_SMBUS_BYTE:
- sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
- if (read_write == I2C_SMBUS_WRITE)
- sis630_write(SMB_CMD, command);
- size = SIS630_BYTE;
- break;
- case I2C_SMBUS_BYTE_DATA:
- sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
- sis630_write(SMB_CMD, command);
- if (read_write == I2C_SMBUS_WRITE)
- sis630_write(SMB_BYTE, data->byte);
- size = SIS630_BYTE_DATA;
- break;
- case I2C_SMBUS_PROC_CALL:
- case I2C_SMBUS_WORD_DATA:
- sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01));
+ case I2C_SMBUS_QUICK:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ size = SIS630_QUICK;
+ break;
+ case I2C_SMBUS_BYTE:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ if (read_write == I2C_SMBUS_WRITE)
sis630_write(SMB_CMD, command);
- if (read_write == I2C_SMBUS_WRITE) {
- sis630_write(SMB_BYTE, data->word & 0xff);
- sis630_write(SMB_BYTE + 1,(data->word & 0xff00) >> 8);
- }
- size = (size == I2C_SMBUS_PROC_CALL ? SIS630_PCALL : SIS630_WORD_DATA);
- break;
- case I2C_SMBUS_BLOCK_DATA:
- sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01));
- sis630_write(SMB_CMD, command);
- size = SIS630_BLOCK_DATA;
- return sis630_block_data(adap, data, read_write);
- default:
- dev_warn(&adap->dev, "Unsupported transaction %d\n",
- size);
- return -EOPNOTSUPP;
+ size = SIS630_BYTE;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ sis630_write(SMB_CMD, command);
+ if (read_write == I2C_SMBUS_WRITE)
+ sis630_write(SMB_BYTE, data->byte);
+ size = SIS630_BYTE_DATA;
+ break;
+ case I2C_SMBUS_PROC_CALL:
+ case I2C_SMBUS_WORD_DATA:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ sis630_write(SMB_CMD, command);
+ if (read_write == I2C_SMBUS_WRITE) {
+ sis630_write(SMB_BYTE, data->word & 0xff);
+ sis630_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8);
+ }
+ size = (size == I2C_SMBUS_PROC_CALL ?
+ SIS630_PCALL : SIS630_WORD_DATA);
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ sis630_write(SMB_ADDR,
+ ((addr & 0x7f) << 1) | (read_write & 0x01));
+ sis630_write(SMB_CMD, command);
+ size = SIS630_BLOCK_DATA;
+ return sis630_block_data(adap, data, read_write);
+ default:
+ dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
}
status = sis630_transaction(adap, size);
@@ -368,15 +392,16 @@ static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
return 0;
}
- switch(size) {
- case SIS630_BYTE:
- case SIS630_BYTE_DATA:
- data->byte = sis630_read(SMB_BYTE);
- break;
- case SIS630_PCALL:
- case SIS630_WORD_DATA:
- data->word = sis630_read(SMB_BYTE) + (sis630_read(SMB_BYTE + 1) << 8);
- break;
+ switch (size) {
+ case SIS630_BYTE:
+ case SIS630_BYTE_DATA:
+ data->byte = sis630_read(SMB_BYTE);
+ break;
+ case SIS630_PCALL:
+ case SIS630_WORD_DATA:
+ data->word = sis630_read(SMB_BYTE) +
+ (sis630_read(SMB_BYTE + 1) << 8);
+ break;
}
return 0;
@@ -384,9 +409,9 @@ static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
static u32 sis630_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL |
- I2C_FUNC_SMBUS_BLOCK_DATA;
+ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA;
}
static int sis630_setup(struct pci_dev *sis630_dev)
@@ -394,21 +419,23 @@ static int sis630_setup(struct pci_dev *sis630_dev)
unsigned char b;
struct pci_dev *dummy = NULL;
int retval, i;
+ /* acpi base address */
+ unsigned short acpi_base;
/* check for supported SiS devices */
- for (i=0; supported[i] > 0 ; i++) {
- if ((dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy)))
+ for (i = 0; supported[i] > 0; i++) {
+ dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy);
+ if (dummy)
break; /* found */
}
if (dummy) {
pci_dev_put(dummy);
- }
- else if (force) {
- dev_err(&sis630_dev->dev, "WARNING: Can't detect SIS630 compatible device, but "
+ } else if (force) {
+ dev_err(&sis630_dev->dev,
+ "WARNING: Can't detect SIS630 compatible device, but "
"loading because of force option enabled\n");
- }
- else {
+ } else {
return -ENODEV;
}
@@ -416,7 +443,7 @@ static int sis630_setup(struct pci_dev *sis630_dev)
Enable ACPI first , so we can accsess reg 74-75
in acpi io space and read acpi base addr
*/
- if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
+ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, &b)) {
dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
retval = -ENODEV;
goto exit;
@@ -430,24 +457,35 @@ static int sis630_setup(struct pci_dev *sis630_dev)
}
/* Determine the ACPI base address */
- if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
- dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
+ if (pci_read_config_word(sis630_dev,
+ SIS630_ACPI_BASE_REG, &acpi_base)) {
+ dev_err(&sis630_dev->dev,
+ "Error: Can't determine ACPI base address\n");
retval = -ENODEV;
goto exit;
}
- dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04x\n", acpi_base);
+ dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04hx\n", acpi_base);
+
+ if (supported[i] == PCI_DEVICE_ID_SI_760)
+ smbus_base = acpi_base + 0xE0;
+ else
+ smbus_base = acpi_base + 0x80;
+
+ dev_dbg(&sis630_dev->dev, "SMBus base at 0x%04hx\n", smbus_base);
- retval = acpi_check_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION,
+ retval = acpi_check_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name);
if (retval)
goto exit;
/* Everything is happy, let's grab the memory and set things up. */
- if (!request_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION,
+ if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name)) {
- dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
- "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
+ dev_err(&sis630_dev->dev,
+ "I/O Region 0x%04hx-0x%04hx for SMBus already in use.\n",
+ smbus_base + SMB_STS,
+ smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1);
retval = -EBUSY;
goto exit;
}
@@ -456,7 +494,7 @@ static int sis630_setup(struct pci_dev *sis630_dev)
exit:
if (retval)
- acpi_base = 0;
+ smbus_base = 0;
return retval;
}
@@ -470,20 +508,24 @@ static struct i2c_adapter sis630_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
+ .retries = 3
};
static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_964) },
{ 0, }
};
-MODULE_DEVICE_TABLE (pci, sis630_ids);
+MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (sis630_setup(dev)) {
- dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n");
+ dev_err(&dev->dev,
+ "SIS630 compatible bus not detected, "
+ "module not inserted.\n");
return -ENODEV;
}
@@ -491,17 +533,17 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
sis630_adapter.dev.parent = &dev->dev;
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
- "SMBus SIS630 adapter at %04x", acpi_base + SMB_STS);
+ "SMBus SIS630 adapter at %04hx", smbus_base + SMB_STS);
return i2c_add_adapter(&sis630_adapter);
}
static void sis630_remove(struct pci_dev *dev)
{
- if (acpi_base) {
+ if (smbus_base) {
i2c_del_adapter(&sis630_adapter);
- release_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION);
- acpi_base = 0;
+ release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
+ smbus_base = 0;
}
}
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 60195b590637..0a6f941133f6 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -975,7 +975,6 @@ stu300_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
/* Turn off everything */
stu300_wr8(0x00, dev->virtbase + I2C_CR);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 26c352a09298..6ffa56e08517 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -271,7 +271,6 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
exit_close:
serio_close(serio);
exit_kfree:
- serio_set_drvdata(serio, NULL);
kfree(taos);
exit:
return err;
@@ -285,7 +284,6 @@ static void taos_disconnect(struct serio *serio)
i2c_unregister_device(taos->client);
i2c_del_adapter(&taos->adapter);
serio_close(serio);
- serio_set_drvdata(serio, NULL);
kfree(taos);
dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index f0d9923323ea..36704e3ab3fa 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -70,6 +70,8 @@
#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
#define I2C_CLK_DIVISOR 0x06c
+#define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16
+#define I2C_CLK_MULTIPLIER_STD_FAST_MODE 8
#define DVC_CTRL_REG1 0x000
#define DVC_CTRL_REG1_INTR_EN (1<<10)
@@ -116,10 +118,23 @@ enum msg_end_type {
/**
* struct tegra_i2c_hw_feature : Different HW support on Tegra
* @has_continue_xfer_support: Continue transfer supports.
+ * @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer
+ * complete interrupt per packet basis.
+ * @has_single_clk_source: The i2c controller has single clock source. Tegra30
+ * and earlier Socs has two clock sources i.e. div-clk and
+ * fast-clk.
+ * @clk_divisor_hs_mode: Clock divisor in HS mode.
+ * @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is
+ * applicable if there is no fast clock source i.e. single clock
+ * source.
*/
struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
+ bool has_per_pkt_xfer_complete_irq;
+ bool has_single_clk_source;
+ int clk_divisor_hs_mode;
+ int clk_divisor_std_fast_mode;
};
/**
@@ -365,11 +380,13 @@ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
{
int ret;
- ret = clk_prepare_enable(i2c_dev->fast_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev,
- "Enabling fast clk failed, err %d\n", ret);
- return ret;
+ if (!i2c_dev->hw->has_single_clk_source) {
+ ret = clk_prepare_enable(i2c_dev->fast_clk);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev,
+ "Enabling fast clk failed, err %d\n", ret);
+ return ret;
+ }
}
ret = clk_prepare_enable(i2c_dev->div_clk);
if (ret < 0) {
@@ -383,13 +400,16 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev)
{
clk_disable_unprepare(i2c_dev->div_clk);
- clk_disable_unprepare(i2c_dev->fast_clk);
+ if (!i2c_dev->hw->has_single_clk_source)
+ clk_disable_unprepare(i2c_dev->fast_clk);
}
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val;
int err = 0;
+ int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
+ u32 clk_divisor;
tegra_i2c_clock_enable(i2c_dev);
@@ -404,7 +424,15 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
i2c_writel(i2c_dev, val, I2C_CNFG);
i2c_writel(i2c_dev, 0, I2C_INT_MASK);
- clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * 8);
+
+ clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
+ clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier);
+
+ /* Make sure clock divisor programmed correctly */
+ clk_divisor = i2c_dev->hw->clk_divisor_hs_mode;
+ clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode <<
+ I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT;
+ i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
@@ -546,6 +574,8 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
tegra_i2c_fill_tx_fifo(i2c_dev);
int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (i2c_dev->hw->has_per_pkt_xfer_complete_irq)
+ int_mask |= I2C_INT_PACKET_XFER_COMPLETE;
if (msg->flags & I2C_M_RD)
int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
else if (i2c_dev->msg_buf_remaining)
@@ -557,7 +587,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
tegra_i2c_mask_irq(i2c_dev, int_mask);
- if (WARN_ON(ret == 0)) {
+ if (ret == 0) {
dev_err(i2c_dev->dev, "i2c transfer timed out\n");
tegra_i2c_init(i2c_dev);
@@ -633,15 +663,32 @@ static const struct i2c_algorithm tegra_i2c_algo = {
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_continue_xfer_support = false,
+ .has_per_pkt_xfer_complete_irq = false,
+ .has_single_clk_source = false,
+ .clk_divisor_hs_mode = 3,
+ .clk_divisor_std_fast_mode = 0,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = false,
+ .has_single_clk_source = false,
+ .clk_divisor_hs_mode = 3,
+ .clk_divisor_std_fast_mode = 0,
+};
+
+static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .has_single_clk_source = true,
+ .clk_divisor_hs_mode = 1,
+ .clk_divisor_std_fast_mode = 0x19,
};
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, },
@@ -685,12 +732,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
return PTR_ERR(div_clk);
}
- fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
- if (IS_ERR(fast_clk)) {
- dev_err(&pdev->dev, "missing bus clock");
- return PTR_ERR(fast_clk);
- }
-
i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev) {
dev_err(&pdev->dev, "Could not allocate struct tegra_i2c_dev");
@@ -699,7 +740,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->base = base;
i2c_dev->div_clk = div_clk;
- i2c_dev->fast_clk = fast_clk;
i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
@@ -730,6 +770,15 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
init_completion(&i2c_dev->msg_complete);
+ if (!i2c_dev->hw->has_single_clk_source) {
+ fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
+ if (IS_ERR(fast_clk)) {
+ dev_err(&pdev->dev, "missing fast clock");
+ return PTR_ERR(fast_clk);
+ }
+ i2c_dev->fast_clk = fast_clk;
+ }
+
platform_set_drvdata(pdev, i2c_dev);
ret = tegra_i2c_init(i2c_dev);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index eec20db6246f..f3a8790a07e8 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -125,8 +125,6 @@ static int i2c_versatile_remove(struct platform_device *dev)
{
struct i2c_versatile *i2c = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
i2c_del_adapter(&i2c->adap);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index f042f6da0ace..332c720fb3fe 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -784,8 +784,6 @@ static int xiic_i2c_remove(struct platform_device *pdev)
xiic_deinit(i2c);
- platform_set_drvdata(pdev, NULL);
-
free_irq(platform_get_irq(pdev, 0), i2c);
iounmap(i2c->base);
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 93f029e98c0d..7945b05d3ea0 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -256,7 +256,6 @@ static int xlr_i2c_remove(struct platform_device *pdev)
priv = platform_get_drvdata(pdev);
i2c_del_adapter(&priv->adap);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 3862a953239c..2d1d2c5653fb 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -542,7 +542,6 @@ static int scx200_remove(struct platform_device *pdev)
struct scx200_acb_iface *iface;
iface = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
scx200_cleanup_iface(iface);
return 0;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e388590b44ab..991d38daa87d 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -935,25 +935,17 @@ out_list:
*/
int i2c_add_adapter(struct i2c_adapter *adapter)
{
- int id, res = 0;
-
-retry:
- if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
+ int id;
mutex_lock(&core_lock);
- /* "above" here means "above or equal to", sigh */
- res = idr_get_new_above(&i2c_adapter_idr, adapter,
- __i2c_first_dynamic_bus_num, &id);
+ id = idr_alloc(&i2c_adapter_idr, adapter,
+ __i2c_first_dynamic_bus_num, 0, GFP_KERNEL);
mutex_unlock(&core_lock);
-
- if (res < 0) {
- if (res == -EAGAIN)
- goto retry;
- return res;
- }
+ if (id < 0)
+ return id;
adapter->nr = id;
+
return i2c_register_adapter(adapter);
}
EXPORT_SYMBOL(i2c_add_adapter);
@@ -984,33 +976,17 @@ EXPORT_SYMBOL(i2c_add_adapter);
int i2c_add_numbered_adapter(struct i2c_adapter *adap)
{
int id;
- int status;
if (adap->nr == -1) /* -1 means dynamically assign bus id */
return i2c_add_adapter(adap);
- if (adap->nr & ~MAX_IDR_MASK)
- return -EINVAL;
-
-retry:
- if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
mutex_lock(&core_lock);
- /* "above" here means "above or equal to", sigh;
- * we need the "equal to" result to force the result
- */
- status = idr_get_new_above(&i2c_adapter_idr, adap, adap->nr, &id);
- if (status == 0 && id != adap->nr) {
- status = -EBUSY;
- idr_remove(&i2c_adapter_idr, id);
- }
+ id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
+ GFP_KERNEL);
mutex_unlock(&core_lock);
- if (status == -EAGAIN)
- goto retry;
-
- if (status == 0)
- status = i2c_register_adapter(adap);
- return status;
+ if (id < 0)
+ return id == -ENOSPC ? -EBUSY : id;
+ return i2c_register_adapter(adap);
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
@@ -1866,29 +1842,6 @@ s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command,
EXPORT_SYMBOL(i2c_smbus_write_word_data);
/**
- * i2c_smbus_process_call - SMBus "process call" protocol
- * @client: Handle to slave device
- * @command: Byte interpreted by slave
- * @value: 16-bit "word" being written
- *
- * This executes the SMBus "process call" protocol, returning negative errno
- * else a 16-bit unsigned "word" received from the device.
- */
-s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command,
- u16 value)
-{
- union i2c_smbus_data data;
- int status;
- data.word = value;
-
- status = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- I2C_SMBUS_WRITE, command,
- I2C_SMBUS_PROC_CALL, &data);
- return (status < 0) ? status : data.word;
-}
-EXPORT_SYMBOL(i2c_smbus_process_call);
-
-/**
* i2c_smbus_read_block_data - SMBus "block read" protocol
* @client: Handle to slave device
* @command: Byte interpreted by slave
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 5ec2261574ec..c3ccdea3d180 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -148,7 +148,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
- iminor(file->f_path.dentry->d_inode), count);
+ iminor(file_inode(file)), count);
ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
@@ -172,7 +172,7 @@ static ssize_t i2cdev_write(struct file *file, const char __user *buf,
return PTR_ERR(tmp);
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
- iminor(file->f_path.dentry->d_inode), count);
+ iminor(file_inode(file)), count);
ret = i2c_master_send(client, tmp, count);
kfree(tmp);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 9f50ef04a4bd..abc2e55aa243 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -250,7 +250,6 @@ static int i2c_mux_gpio_remove(struct platform_device *pdev)
for (i = 0; i < mux->data.n_gpios; i++)
gpio_free(mux->gpio_base + mux->data.gpios[i]);
- platform_set_drvdata(pdev, NULL);
i2c_put_adapter(mux->parent);
return 0;
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index a3133d7b2a0c..2abcc4790f12 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -333,7 +333,7 @@ static int ide_settings_proc_open(struct inode *inode, struct file *file)
static ssize_t ide_settings_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- ide_drive_t *drive = (ide_drive_t *) PDE(file->f_path.dentry->d_inode)->data;
+ ide_drive_t *drive = (ide_drive_t *) PDE(file_inode(file))->data;
char name[MAX_LEN + 1];
int for_real = 0, mul_factor, div_factor;
unsigned long n;
@@ -558,7 +558,7 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
static ssize_t ide_driver_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- ide_drive_t *drive = (ide_drive_t *) PDE(file->f_path.dentry->d_inode)->data;
+ ide_drive_t *drive = (ide_drive_t *) PDE(file_inode(file))->data;
char name[32];
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 394fea2ba1bc..784b97cb05b0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
- int ret, id;
+ int id;
static int next_id;
- do {
- spin_lock_irqsave(&cm.lock, flags);
- ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
- next_id, &id);
- if (!ret)
- next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
- spin_unlock_irqrestore(&cm.lock, flags);
- } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&cm.lock, flags);
+
+ id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
+ if (id >= 0)
+ next_id = max(id + 1, 0);
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+ idr_preload_end();
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return ret;
+ return id < 0 ? id : 0;
}
static void cm_free_id(__be32 local_id)
@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
- idr_pre_get(&cm.local_id_table, GFP_KERNEL);
INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d789eea32168..71c2c7116802 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
unsigned short snum)
{
struct rdma_bind_list *bind_list;
- int port, ret;
+ int ret;
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
- do {
- ret = idr_get_new_above(ps, bind_list, snum, &port);
- } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
-
- if (port != snum) {
- ret = -EADDRNOTAVAIL;
- goto err2;
- }
+ ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
bind_list->ps = ps;
- bind_list->port = (unsigned short) port;
+ bind_list->port = (unsigned short)ret;
cma_bind_port(bind_list, id_priv);
return 0;
-err2:
- idr_remove(ps, port);
-err1:
+err:
kfree(bind_list);
- return ret;
+ return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
}
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2214,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
{
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
- struct hlist_node *node;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
- hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
continue;
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 176c8f90f2bb..9f5ad7cc33c8 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
{
struct hlist_head *bucket;
struct ib_pool_fmr *fmr;
- struct hlist_node *pos;
if (!pool->cache_bucket)
return NULL;
bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
- hlist_for_each_entry(fmr, pos, bucket, cache_node)
+ hlist_for_each_entry(fmr, bucket, cache_node)
if (io_virtual_address == fmr->io_virtual_address &&
page_list_len == fmr->page_list_len &&
!memcmp(page_list, fmr->page_list,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a8905abc56e4..934f45e79e5e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
{
+ bool preload = gfp_mask & __GFP_WAIT;
unsigned long flags;
int ret, id;
-retry:
- if (!idr_pre_get(&query_idr, gfp_mask))
- return -ENOMEM;
+ if (preload)
+ idr_preload(gfp_mask);
spin_lock_irqsave(&idr_lock, flags);
- ret = idr_get_new(&query_idr, query, &id);
+
+ id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
+
spin_unlock_irqrestore(&idr_lock, flags);
- if (ret == -EAGAIN)
- goto retry;
- if (ret)
- return ret;
+ if (preload)
+ idr_preload_end();
+ if (id < 0)
+ return id;
query->mad_buf->timeout_ms = timeout_ms;
query->mad_buf->context[0] = query;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 49b15ac1987e..f2f63933e8a9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
{
struct ib_ucm_context *ctx;
- int result;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
ctx->file = file;
INIT_LIST_HEAD(&ctx->events);
- do {
- result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
- if (!result)
- goto error;
-
- mutex_lock(&ctx_id_mutex);
- result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
- mutex_unlock(&ctx_id_mutex);
- } while (result == -EAGAIN);
-
- if (result)
+ mutex_lock(&ctx_id_mutex);
+ ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&ctx_id_mutex);
+ if (ctx->id < 0)
goto error;
list_add_tail(&ctx->file_list, &file->ctxs);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2709ff581392..5ca44cd9b00c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
{
struct ucma_context *ctx;
- int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file;
- do {
- ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
- if (!ret)
- goto error;
-
- mutex_lock(&mut);
- ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
- mutex_unlock(&mut);
- } while (ret == -EAGAIN);
-
- if (ret)
+ mutex_lock(&mut);
+ ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&mut);
+ if (ctx->id < 0)
goto error;
list_add_tail(&ctx->list, &file->ctx_list);
@@ -180,23 +172,15 @@ error:
static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
{
struct ucma_multicast *mc;
- int ret;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return NULL;
- do {
- ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
- if (!ret)
- goto error;
-
- mutex_lock(&mut);
- ret = idr_get_new(&multicast_idr, mc, &mc->id);
- mutex_unlock(&mut);
- } while (ret == -EAGAIN);
-
- if (ret)
+ mutex_lock(&mut);
+ mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+ mutex_unlock(&mut);
+ if (mc->id < 0)
goto error;
mc->ctx = ctx;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 5bcb2afd3dcb..0fcd7aa26fa2 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -188,6 +188,8 @@ IB_UVERBS_DECLARE_CMD(alloc_pd);
IB_UVERBS_DECLARE_CMD(dealloc_pd);
IB_UVERBS_DECLARE_CMD(reg_mr);
IB_UVERBS_DECLARE_CMD(dereg_mr);
+IB_UVERBS_DECLARE_CMD(alloc_mw);
+IB_UVERBS_DECLARE_CMD(dealloc_mw);
IB_UVERBS_DECLARE_CMD(create_comp_channel);
IB_UVERBS_DECLARE_CMD(create_cq);
IB_UVERBS_DECLARE_CMD(resize_cq);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 0cb0007724a2..a7d00f6b3bc1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -48,6 +48,7 @@ struct uverbs_lock_class {
static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
+static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
@@ -124,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
{
int ret;
-retry:
- if (!idr_pre_get(idr, GFP_KERNEL))
- return -ENOMEM;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&ib_uverbs_idr_lock);
- ret = idr_get_new(idr, uobj, &uobj->id);
- spin_unlock(&ib_uverbs_idr_lock);
- if (ret == -EAGAIN)
- goto retry;
+ ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ uobj->id = ret;
- return ret;
+ spin_unlock(&ib_uverbs_idr_lock);
+ idr_preload_end();
+
+ return ret < 0 ? ret : 0;
}
void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
@@ -730,7 +730,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
goto err_tree_mutex_unlock;
}
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
xrcd = find_xrcd(file->device, inode);
if (!xrcd && !(cmd.oflags & O_CREAT)) {
/* no file descriptor. Need CREATE flag */
@@ -1049,6 +1049,126 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
return in_len;
}
+ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_alloc_mw cmd;
+ struct ib_uverbs_alloc_mw_resp resp;
+ struct ib_uobject *uobj;
+ struct ib_pd *pd;
+ struct ib_mw *mw;
+ int ret;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof(cmd)))
+ return -EFAULT;
+
+ uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
+ if (!uobj)
+ return -ENOMEM;
+
+ init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
+ down_write(&uobj->mutex);
+
+ pd = idr_read_pd(cmd.pd_handle, file->ucontext);
+ if (!pd) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ mw = pd->device->alloc_mw(pd, cmd.mw_type);
+ if (IS_ERR(mw)) {
+ ret = PTR_ERR(mw);
+ goto err_put;
+ }
+
+ mw->device = pd->device;
+ mw->pd = pd;
+ mw->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+
+ uobj->object = mw;
+ ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
+ if (ret)
+ goto err_unalloc;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.rkey = mw->rkey;
+ resp.mw_handle = uobj->id;
+
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp))) {
+ ret = -EFAULT;
+ goto err_copy;
+ }
+
+ put_pd_read(pd);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&uobj->list, &file->ucontext->mw_list);
+ mutex_unlock(&file->mutex);
+
+ uobj->live = 1;
+
+ up_write(&uobj->mutex);
+
+ return in_len;
+
+err_copy:
+ idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
+
+err_unalloc:
+ ib_dealloc_mw(mw);
+
+err_put:
+ put_pd_read(pd);
+
+err_free:
+ put_uobj_write(uobj);
+ return ret;
+}
+
+ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_dealloc_mw cmd;
+ struct ib_mw *mw;
+ struct ib_uobject *uobj;
+ int ret = -EINVAL;
+
+ if (copy_from_user(&cmd, buf, sizeof(cmd)))
+ return -EFAULT;
+
+ uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
+ if (!uobj)
+ return -EINVAL;
+
+ mw = uobj->object;
+
+ ret = ib_dealloc_mw(mw);
+ if (!ret)
+ uobj->live = 0;
+
+ put_uobj_write(uobj);
+
+ if (ret)
+ return ret;
+
+ idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
+
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ put_uobj(uobj);
+
+ return in_len;
+}
+
ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 6f2ce6fa98f8..2c6f0f2ecd9d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -87,6 +87,8 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
[IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
[IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
+ [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw,
+ [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw,
[IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
[IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
[IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
@@ -201,6 +203,15 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
+ /* Remove MWs before QPs, in order to support type 2A MWs. */
+ list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
+ struct ib_mw *mw = uobj->object;
+
+ idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
+ ib_dealloc_mw(mw);
+ kfree(uobj);
+ }
+
list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
struct ib_qp *qp = uobj->object;
struct ib_uqp_object *uqp =
@@ -240,8 +251,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uevent);
}
- /* XXX Free MWs */
-
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
struct ib_mr *mr = uobj->object;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 30f199e8579f..a8fdd3381405 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1099,18 +1099,19 @@ EXPORT_SYMBOL(ib_free_fast_reg_page_list);
/* Memory windows */
-struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
+struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct ib_mw *mw;
if (!pd->device->alloc_mw)
return ERR_PTR(-ENOSYS);
- mw = pd->device->alloc_mw(pd);
+ mw = pd->device->alloc_mw(pd, type);
if (!IS_ERR(mw)) {
mw->device = pd->device;
mw->pd = pd;
mw->uobject = NULL;
+ mw->type = type;
atomic_inc(&pd->usecnt);
}
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 7275e727e0f5..d53cf519f42a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1238,15 +1238,4 @@ static struct pci_driver c2_pci_driver = {
.remove = c2_remove,
};
-static int __init c2_init_module(void)
-{
- return pci_register_driver(&c2_pci_driver);
-}
-
-static void __exit c2_exit_module(void)
-{
- pci_unregister_driver(&c2_pci_driver);
-}
-
-module_init(c2_init_module);
-module_exit(c2_exit_module);
+module_pci_driver(c2_pci_driver);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 28cd5cb51859..0ab826b280b2 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -382,14 +382,17 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
{
int ret;
- do {
- spin_lock_irq(&c2dev->qp_table.lock);
- ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
- c2dev->qp_table.last++, &qp->qpn);
- spin_unlock_irq(&c2dev->qp_table.lock);
- } while ((ret == -EAGAIN) &&
- idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
- return ret;
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&c2dev->qp_table.lock);
+
+ ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0,
+ GFP_NOWAIT);
+ if (ret >= 0)
+ qp->qpn = ret;
+
+ spin_unlock_irq(&c2dev->qp_table.lock);
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
}
static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index a1c44578e039..837862287a29 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
void *handle, u32 id)
{
int ret;
- int newid;
-
- do {
- if (!idr_pre_get(idr, GFP_KERNEL)) {
- return -ENOMEM;
- }
- spin_lock_irq(&rhp->lock);
- ret = idr_get_new_above(idr, handle, id, &newid);
- BUG_ON(newid != id);
- spin_unlock_irq(&rhp->lock);
- } while (ret == -EAGAIN);
-
- return ret;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&rhp->lock);
+
+ ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
+
+ spin_unlock_irq(&rhp->lock);
+ idr_preload_end();
+
+ BUG_ON(ret == -ENOSPC);
+ return ret < 0 ? ret : 0;
}
static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 145d82a64d0a..9c12da0cbd32 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -738,7 +738,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
return ibmr;
}
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
+static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -747,6 +747,9 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
u32 stag = 0;
int ret;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 6de8463f453b..e5649e8b215d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -567,18 +567,19 @@ int iwch_bind_mw(struct ib_qp *qp,
if (mw_bind->send_flags & IB_SEND_SIGNALED)
t3_wr_flags = T3_COMPLETION_FLAG;
- sgl.addr = mw_bind->addr;
- sgl.lkey = mw_bind->mr->lkey;
- sgl.length = mw_bind->length;
+ sgl.addr = mw_bind->bind_info.addr;
+ sgl.lkey = mw_bind->bind_info.mr->lkey;
+ sgl.length = mw_bind->bind_info.length;
wqe->bind.reserved = 0;
wqe->bind.type = TPT_VATO;
/* TBD: check perms */
- wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
- wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
+ wqe->bind.perms = iwch_ib_to_tpt_bind_access(
+ mw_bind->bind_info.mw_access_flags);
+ wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);
wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
- wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
- wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
+ wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length);
+ wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);
err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
if (err) {
spin_unlock_irqrestore(&qhp->lock, flag);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c13745cde7fa..565bfb161c1a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -143,14 +143,28 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static LIST_HEAD(timeout_list);
static spinlock_t timeout_lock;
+static void deref_qp(struct c4iw_ep *ep)
+{
+ c4iw_qp_rem_ref(&ep->com.qp->ibqp);
+ clear_bit(QP_REFERENCED, &ep->com.flags);
+}
+
+static void ref_qp(struct c4iw_ep *ep)
+{
+ set_bit(QP_REFERENCED, &ep->com.flags);
+ c4iw_qp_add_ref(&ep->com.qp->ibqp);
+}
+
static void start_ep_timer(struct c4iw_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (timer_pending(&ep->timer)) {
- PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
- del_timer_sync(&ep->timer);
- } else
- c4iw_get_ep(&ep->com);
+ pr_err("%s timer already started! ep %p\n",
+ __func__, ep);
+ return;
+ }
+ clear_bit(TIMEOUT, &ep->com.flags);
+ c4iw_get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
ep->timer.data = (unsigned long)ep;
ep->timer.function = ep_timeout;
@@ -159,14 +173,10 @@ static void start_ep_timer(struct c4iw_ep *ep)
static void stop_ep_timer(struct c4iw_ep *ep)
{
- PDBG("%s ep %p\n", __func__, ep);
- if (!timer_pending(&ep->timer)) {
- WARN(1, "%s timer stopped when its not running! "
- "ep %p state %u\n", __func__, ep, ep->com.state);
- return;
- }
+ PDBG("%s ep %p stopping\n", __func__, ep);
del_timer_sync(&ep->timer);
- c4iw_put_ep(&ep->com);
+ if (!test_and_set_bit(TIMEOUT, &ep->com.flags))
+ c4iw_put_ep(&ep->com);
}
static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
@@ -271,11 +281,13 @@ void _c4iw_free_ep(struct kref *kref)
ep = container_of(kref, struct c4iw_ep, com.kref);
PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
+ if (test_bit(QP_REFERENCED, &ep->com.flags))
+ deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
+ remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
- remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
}
kfree(ep);
}
@@ -687,7 +699,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
memset(mpa, 0, sizeof(*mpa));
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
mpa->flags = MPA_REJECT;
- mpa->revision = mpa_rev;
+ mpa->revision = ep->mpa_attr.version;
mpa->private_data_size = htons(plen);
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
@@ -863,7 +875,6 @@ static void close_complete_upcall(struct c4iw_ep *ep)
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
set_bit(CLOSE_UPCALL, &ep->com.history);
}
}
@@ -906,7 +917,6 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
set_bit(ABORT_UPCALL, &ep->com.history);
}
}
@@ -946,7 +956,6 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
if (status < 0) {
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
}
}
@@ -1291,11 +1300,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (mpa->revision > mpa_rev) {
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
" Received = %d\n", __func__, mpa_rev, mpa->revision);
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
@@ -1306,6 +1317,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* Fail if there's too much private data.
*/
if (plen > MPA_MAX_PRIVATE_DATA) {
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
@@ -1314,6 +1326,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* If plen does not account for pkt size
*/
if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ stop_ep_timer(ep);
abort_connection(ep, skb, GFP_KERNEL);
return;
}
@@ -1391,30 +1404,33 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
- ep->rcv_seq += dlen;
- BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
-
/* update RX credits */
update_rx_credits(ep, dlen);
switch (state_read(&ep->com)) {
case MPA_REQ_SENT:
+ ep->rcv_seq += dlen;
process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
+ ep->rcv_seq += dlen;
process_mpa_request(ep, skb);
break;
- case MPA_REP_SENT:
+ case FPDU_MODE: {
+ struct c4iw_qp_attributes attrs;
+ BUG_ON(!ep->com.qp);
+ if (status)
+ pr_err("%s Unexpected streaming data." \
+ " qpid %u ep %p state %d tid %u status %d\n",
+ __func__, ep->com.qp->wq.sq.qid, ep,
+ state_read(&ep->com), ep->hwtid, status);
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
break;
+ }
default:
- pr_err("%s Unexpected streaming data." \
- " ep %p state %d tid %u status %d\n",
- __func__, ep, state_read(&ep->com), ep->hwtid, status);
-
- /*
- * The ep will timeout and inform the ULP of the failure.
- * See ep_timeout().
- */
break;
}
return 0;
@@ -1437,6 +1453,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
__state_set(&ep->com, DEAD);
release = 1;
break;
@@ -1475,11 +1492,11 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
V_FW_OFLD_CONNECTION_WR_ASTID(atid));
req->tcb.cplrxdataack_cplpassacceptrpl =
htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
- req->tcb.tx_max = jiffies;
+ req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- req->tcb.opt0 = TCAM_BYPASS(1) |
+ req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
DELACK(1) |
@@ -1490,20 +1507,20 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(rcv_win >> 10);
- req->tcb.opt2 = PACE(1) |
+ RCV_BUFSIZ(rcv_win >> 10));
+ req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
if (enable_tcp_sack)
- req->tcb.opt2 |= SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32) SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- req->tcb.opt2 |= WND_SCALE_EN(1);
- req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
- req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
+ req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
+ req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
set_bit(ACT_OFLD_CONN, &ep->com.history);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -1993,6 +2010,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
+ insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
accept_cr(child_ep, peer_ip, skb, req);
set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
goto out;
@@ -2018,7 +2036,6 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(req->tcp_opt));
set_emss(ep, ntohs(req->tcp_opt));
- insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
dst_confirm(ep->dst);
state_set(&ep->com, MPA_REQ_WAIT);
@@ -2163,7 +2180,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MPA_REQ_SENT:
stop_ep_timer(ep);
- if (mpa_rev == 2 && ep->tried_with_mpa_v1)
+ if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
connect_reply_upcall(ep, -ECONNRESET);
else {
/*
@@ -2235,9 +2252,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
out:
if (release)
release_ep_resources(ep);
-
- /* retry with mpa-v1 */
- if (ep && ep->retry_with_mpa_v1) {
+ else if (ep->retry_with_mpa_v1) {
+ remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -2430,6 +2446,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->com.qp = qp;
+ ref_qp(ep);
/* bind QP to EP and move to RTS */
attrs.mpa_attr = ep->mpa_attr;
@@ -2460,7 +2477,6 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return 0;
err1:
ep->com.cm_id = NULL;
- ep->com.qp = NULL;
cm_id->rem_ref(cm_id);
err:
c4iw_put_ep(&ep->com);
@@ -2501,6 +2517,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.cm_id = cm_id;
ep->com.qp = get_qhp(dev, conn_param->qpn);
BUG_ON(!ep->com.qp);
+ ref_qp(ep);
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
ep->com.qp, cm_id);
@@ -2756,7 +2773,8 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
struct c4iw_ep *ep;
int atid = be32_to_cpu(req->tid);
- ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
+ ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
+ (__force u32) req->tid);
if (!ep)
return;
@@ -2800,7 +2818,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
struct cpl_pass_accept_req *cpl;
int ret;
- rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie);
+ rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
BUG_ON(!rpl_skb);
if (req->retval) {
PDBG("%s passive open failure %d\n", __func__, req->retval);
@@ -2811,7 +2829,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
} else {
cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
- htonl(req->tid)));
+ (__force u32) htonl(
+ (__force u32) req->tid)));
ret = pass_accept_req(dev, rpl_skb);
if (!ret)
kfree_skb(rpl_skb);
@@ -2857,10 +2876,10 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
struct tcp_options_received tmp_opt;
/* Store values from cpl_rx_pkt in temporary location. */
- vlantag = cpl->vlan;
- len = cpl->len;
- l2info = cpl->l2info;
- hdr_len = cpl->hdr_len;
+ vlantag = (__force u16) cpl->vlan;
+ len = (__force u16) cpl->len;
+ l2info = (__force u32) cpl->l2info;
+ hdr_len = (__force u16) cpl->hdr_len;
intf = cpl->iff;
__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
@@ -2871,19 +2890,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
*/
memset(&tmp_opt, 0, sizeof(tmp_opt));
tcp_clear_options(&tmp_opt);
- tcp_parse_options(skb, &tmp_opt, 0, 0, NULL);
+ tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
- V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) |
+ V_SYN_MAC_IDX(G_RX_MACIDX(
+ (__force int) htonl(l2info))) |
F_SYN_XACT_MATCH);
- req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) |
- V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) |
- V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info))));
- req->vlan = vlantag;
- req->len = len;
+ req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
+ (__force int) htonl(l2info))) |
+ V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
+ (__force int) htons(hdr_len))) |
+ V_IP_HDR_LEN(G_RX_IPHDR_LEN(
+ (__force int) htons(hdr_len))) |
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
+ (__force int) htonl(l2info))));
+ req->vlan = (__force __be16) vlantag;
+ req->len = (__force __be16) len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
PASS_OPEN_TOS(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp);
@@ -2912,7 +2936,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
- req->le.filter = filter;
+ req->le.filter = (__force __be32) filter;
req->le.lport = lport;
req->le.pport = rport;
req->le.u.ipv4.lip = laddr;
@@ -2938,7 +2962,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* TP will ignore any value > 0 for MSS index.
*/
req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
- req->cookie = cpu_to_be64((u64)skb);
+ req->cookie = (unsigned long)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -2988,7 +3012,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Calculate the server tid from filter hit index from cpl_rx_pkt.
*/
- stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base
+ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
+ - dev->rdev.lldi.tids->sftid_base
+ dev->rdev.lldi.tids->nstids;
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
@@ -3049,10 +3074,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
- window = htons(tcph->window);
+ window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */
- filter = cpu_to_be32(select_ntuple(dev, dst, e));
+ filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
/*
* Synthesize the cpl_pass_accept_req. We have everything except the
@@ -3175,11 +3200,16 @@ static DECLARE_WORK(skb_work, process_work);
static void ep_timeout(unsigned long arg)
{
struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+ int kickit = 0;
spin_lock(&timeout_lock);
- list_add_tail(&ep->entry, &timeout_list);
+ if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
+ list_add_tail(&ep->entry, &timeout_list);
+ kickit = 1;
+ }
spin_unlock(&timeout_lock);
- queue_work(workq, &skb_work);
+ if (kickit)
+ queue_work(workq, &skb_work);
}
/*
@@ -3268,8 +3298,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Wake up any threads in rdma_init() or rdma_fini().
+ * However, if we are on MPAv2 and want to retry with MPAv1
+ * then, don't wake up yet.
*/
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
+ if (ep->com.state != MPA_REQ_SENT)
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ } else
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
sched(dev, skb);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index ba11c76c0b5a..80069ad595c1 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -533,7 +533,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
"qpmask 0x%x cqshift %lu cqmask 0x%x\n",
(unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),
rdev->lldi.db_reg,
rdev->lldi.gts_reg,
rdev->qpshift, rdev->qpmask,
@@ -797,7 +797,8 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
"RSS %#llx, FL %#llx, len %u\n",
pci_name(ctx->lldi.pdev), gl->va,
(unsigned long long)be64_to_cpu(*rsp),
- (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
+ (unsigned long long)be64_to_cpu(
+ *(__force __be64 *)gl->va),
gl->tot_len);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index cf2f6b47617a..1a840b2211dd 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -46,9 +46,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
- PDBG("%s AE received after RTS - "
- "qp state %d qpid 0x%x status 0x%x\n", __func__,
- qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
+ pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\
+ "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+ CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
+ CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9c1644fb0259..7eec5e13fa8c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -260,20 +260,21 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
void *handle, u32 id, int lock)
{
int ret;
- int newid;
- do {
- if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
- return -ENOMEM;
- if (lock)
- spin_lock_irq(&rhp->lock);
- ret = idr_get_new_above(idr, handle, id, &newid);
- BUG_ON(!ret && newid != id);
- if (lock)
- spin_unlock_irq(&rhp->lock);
- } while (ret == -EAGAIN);
+ if (lock) {
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&rhp->lock);
+ }
+
+ ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
+
+ if (lock) {
+ spin_unlock_irq(&rhp->lock);
+ idr_preload_end();
+ }
- return ret;
+ BUG_ON(ret == -ENOSPC);
+ return ret < 0 ? ret : 0;
}
static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
@@ -716,6 +717,8 @@ enum c4iw_ep_flags {
ABORT_REQ_IN_PROGRESS = 1,
RELEASE_RESOURCES = 2,
CLOSE_SENT = 3,
+ TIMEOUT = 4,
+ QP_REFERENCED = 5,
};
enum c4iw_ep_history {
@@ -866,7 +869,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
int page_list_len);
struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
int c4iw_dealloc_mw(struct ib_mw *mw);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index afd81790ab3c..903a92d6f91d 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -650,7 +650,7 @@ err:
return ERR_PTR(err);
}
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -659,6 +659,9 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
u32 stag = 0;
int ret;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 05bfe53bff64..17ba4f8bc12d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1383,6 +1383,7 @@ err:
qhp->ep = NULL;
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
+ abort = 1;
wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp);
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 8f5290147e8a..212150c25ea0 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
void *vpage;
u32 counter;
u64 rpage, cqx_fec, h_ret;
- int ipz_rc, ret, i;
+ int ipz_rc, i;
unsigned long flags;
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
@@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
adapter_handle = shca->ipz_hca_handle;
param.eq_handle = shca->eq.ipz_eq_handle;
- do {
- if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Can't reserve idr nr. device=%p",
- device);
- goto create_cq_exit1;
- }
-
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- } while (ret == -EAGAIN);
+ idr_preload(GFP_KERNEL);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
+ my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ idr_preload_end();
- if (ret) {
+ if (my_cq->token < 0) {
cq = ERR_PTR(-ENOMEM);
ehca_err(device, "Can't allocate new idr entry. device=%p",
device);
goto create_cq_exit1;
}
- if (my_cq->token > 0x1FFFFFF) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Invalid number of cq. device=%p", device);
- goto create_cq_exit2;
- }
-
/*
* CQs maximum depth is 4GB-64, but we need additional 20 as buffer
* for receiving errors CQEs.
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 8f7f282ead65..22f79afa7fc1 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -95,7 +95,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
int ehca_dereg_mr(struct ib_mr *mr);
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd);
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
struct ib_mw_bind *mw_bind);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 87844869dcc2..bcfb0c183620 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -688,7 +688,7 @@ dereg_mr_exit0:
/*----------------------------------------------------------------------*/
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct ib_mw *ib_mw;
u64 h_ret;
@@ -698,6 +698,9 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
container_of(pd->device, struct ehca_shca, ib_device);
struct ehca_mw_hipzout_parms hipzout;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
e_mw = ehca_mw_new();
if (!e_mw) {
ib_mw = ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 149393915ae5..00d6861a6a18 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -636,30 +636,26 @@ static struct ehca_qp *internal_create_qp(
my_qp->send_cq =
container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
- do {
- if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
- ret = -ENOMEM;
- ehca_err(pd->device, "Can't reserve idr resources.");
- goto create_qp_exit0;
- }
+ idr_preload(GFP_KERNEL);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
- write_lock_irqsave(&ehca_qp_idr_lock, flags);
- ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
- write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
- } while (ret == -EAGAIN);
+ ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
+ if (ret >= 0)
+ my_qp->token = ret;
- if (ret) {
- ret = -ENOMEM;
- ehca_err(pd->device, "Can't allocate new idr entry.");
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ idr_preload_end();
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ ret = -EINVAL;
+ ehca_err(pd->device, "Invalid number of qp");
+ } else {
+ ret = -ENOMEM;
+ ehca_err(pd->device, "Can't allocate new idr entry.");
+ }
goto create_qp_exit0;
}
- if (my_qp->token > 0x1FFFFFF) {
- ret = -EINVAL;
- ehca_err(pd->device, "Invalid number of qp");
- goto create_qp_exit1;
- }
-
if (has_srq)
parms.srq_token = my_qp->token;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 7b371f545ece..bd0caedafe99 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -194,11 +194,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
struct ipath_devdata *dd;
int ret;
- if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
dd = vzalloc(sizeof(*dd));
if (!dd) {
dd = ERR_PTR(-ENOMEM);
@@ -206,9 +201,10 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
}
dd->ipath_unit = -1;
+ idr_preload(GFP_KERNEL);
spin_lock_irqsave(&ipath_devs_lock, flags);
- ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
+ ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
if (ret < 0) {
printk(KERN_ERR IPATH_DRV_NAME
": Could not allocate unit ID: error %d\n", -ret);
@@ -216,6 +212,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
dd = ERR_PTR(ret);
goto bail_unlock;
}
+ dd->ipath_unit = ret;
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
@@ -224,7 +221,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
bail_unlock:
spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
+ idr_preload_end();
bail:
return dd;
}
@@ -2503,11 +2500,6 @@ static int __init infinipath_init(void)
* the PCI subsystem.
*/
idr_init(&unit_table);
- if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
- printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
- ret = -ENOMEM;
- goto bail;
- }
ret = pci_register_driver(&ipath_driver);
if (ret < 0) {
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 3eb7e454849b..aed8afee56da 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1864,9 +1864,9 @@ static int ipath_assign_port(struct file *fp,
goto done_chk_sdma;
}
- i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
+ i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
- (long)fp->f_path.dentry->d_inode->i_rdev, i_minor);
+ (long)file_inode(fp)->i_rdev, i_minor);
if (i_minor)
ret = find_free_port(i_minor - 1, fp, uinfo);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index a4de9d58e9b4..a479375a8fd8 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -113,7 +113,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
struct infinipath_counters counters;
struct ipath_devdata *dd;
- dd = file->f_path.dentry->d_inode->i_private;
+ dd = file_inode(file)->i_private;
dd->ipath_f_read_counters(dd, &counters);
return simple_read_from_buffer(buf, count, ppos, &counters,
@@ -154,7 +154,7 @@ static ssize_t flash_read(struct file *file, char __user *buf,
goto bail;
}
- dd = file->f_path.dentry->d_inode->i_private;
+ dd = file_inode(file)->i_private;
if (ipath_eeprom_read(dd, pos, tmp, count)) {
ipath_dev_err(dd, "failed to read from flash\n");
ret = -ENXIO;
@@ -207,7 +207,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
goto bail_tmp;
}
- dd = file->f_path.dentry->d_inode->i_private;
+ dd = file_inode(file)->i_private;
if (ipath_eeprom_write(dd, pos, tmp, count)) {
ret = -ENXIO;
ipath_dev_err(dd, "failed to write to flash\n");
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index dbc99d41605c..e0d79b2395e4 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -203,7 +203,7 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
static struct id_map_entry *
id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
{
- int ret, id;
+ int ret;
static int next_id;
struct id_map_entry *ent;
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
@@ -220,25 +220,23 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
ent->dev = to_mdev(ibdev);
INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
- do {
- spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
- ret = idr_get_new_above(&sriov->pv_id_table, ent,
- next_id, &id);
- if (!ret) {
- next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
- ent->pv_cm_id = (u32)id;
- sl_id_map_add(ibdev, ent);
- }
+ idr_preload(GFP_KERNEL);
+ spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
- spin_unlock(&sriov->id_map_lock);
- } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
- /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
- if (!ret) {
- spin_lock(&sriov->id_map_lock);
+ ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ next_id = max(ret + 1, 0);
+ ent->pv_cm_id = (u32)ret;
+ sl_id_map_add(ibdev, ent);
list_add_tail(&ent->list, &sriov->cm_list);
- spin_unlock(&sriov->id_map_lock);
- return ent;
}
+
+ spin_unlock(&sriov->id_map_lock);
+ idr_preload_end();
+
+ if (ret >= 0)
+ return ent;
+
/*error flow*/
kfree(ent);
mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0a903c129f0a..934792c477bc 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1999,16 +1999,17 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
goto demux_err;
err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
if (err)
- goto demux_err;
+ goto free_pv;
}
mlx4_ib_master_tunnels(dev, 1);
return 0;
+free_pv:
+ free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
demux_err:
- while (i > 0) {
+ while (--i >= 0) {
free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
- --i;
}
mlx4_ib_device_unregister_sysfs(dev);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e7d81c0d1ac5..23d734349d8e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -137,6 +137,14 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
props->device_cap_flags |= IB_DEVICE_XRC;
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
+ if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
+ if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
+ else
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
+ }
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
@@ -1434,6 +1442,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
}
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
+ dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
+ ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
+ ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
+ ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
+
+ ibdev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+ }
+
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
@@ -1601,8 +1620,7 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
}
out:
- if (dm)
- kfree(dm);
+ kfree(dm);
return;
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index dcd845bc30f0..f61ec26500c4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -116,6 +116,11 @@ struct mlx4_ib_mr {
struct ib_umem *umem;
};
+struct mlx4_ib_mw {
+ struct ib_mw ibmw;
+ struct mlx4_mw mmw;
+};
+
struct mlx4_ib_fast_reg_page_list {
struct ib_fast_reg_page_list ibfrpl;
__be64 *mapped_page_list;
@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
return container_of(ibmr, struct mlx4_ib_mr, ibmr);
}
+static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct mlx4_ib_mw, ibmw);
+}
+
static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
{
return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
@@ -581,6 +591,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr);
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind);
+int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len);
struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
@@ -652,12 +666,12 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
u8 *mac, int *is_mcast, u8 port);
-static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
+static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
- return 1;
+ return true;
return !!(ah->av.ib.g_slid & 0x80);
}
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index bbaf6176f207..e471f089ff00 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -41,9 +41,19 @@ static u32 convert_access(int acc)
(acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
+ (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
MLX4_PERM_LOCAL_READ;
}
+static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
+{
+ switch (type) {
+ case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
+ case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
+ default: return -1;
+ }
+}
+
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx4_ib_mr *mr;
@@ -68,7 +78,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
return &mr->ibmr;
err_mr:
- mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
+ (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_free:
kfree(mr);
@@ -163,7 +173,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
err_mr:
- mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
+ (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_umem:
ib_umem_release(mr->umem);
@@ -177,8 +187,11 @@ err_free:
int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
+ int ret;
- mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
+ ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
+ if (ret)
+ return ret;
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
@@ -186,6 +199,70 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ struct mlx4_ib_mw *mw;
+ int err;
+
+ mw = kmalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
+ to_mlx4_type(type), &mw->mmw);
+ if (err)
+ goto err_free;
+
+ err = mlx4_mw_enable(dev->dev, &mw->mmw);
+ if (err)
+ goto err_mw;
+
+ mw->ibmw.rkey = mw->mmw.key;
+
+ return &mw->ibmw;
+
+err_mw:
+ mlx4_mw_free(dev->dev, &mw->mmw);
+
+err_free:
+ kfree(mw);
+
+ return ERR_PTR(err);
+}
+
+int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind)
+{
+ struct ib_send_wr wr;
+ struct ib_send_wr *bad_wr;
+ int ret;
+
+ memset(&wr, 0, sizeof(wr));
+ wr.opcode = IB_WR_BIND_MW;
+ wr.wr_id = mw_bind->wr_id;
+ wr.send_flags = mw_bind->send_flags;
+ wr.wr.bind_mw.mw = mw;
+ wr.wr.bind_mw.bind_info = mw_bind->bind_info;
+ wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey);
+
+ ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
+ if (!ret)
+ mw->rkey = wr.wr.bind_mw.rkey;
+
+ return ret;
+}
+
+int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct mlx4_ib_mw *mw = to_mmw(ibmw);
+
+ mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
+ kfree(mw);
+
+ return 0;
+}
+
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len)
{
@@ -212,7 +289,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
return &mr->ibmr;
err_mr:
- mlx4_mr_free(dev->dev, &mr->mmr);
+ (void) mlx4_mr_free(dev->dev, &mr->mmr);
err_free:
kfree(mr);
@@ -291,7 +368,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
return &fmr->ibfmr;
err_mr:
- mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
+ (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
err_free:
kfree(fmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 19e0637220b9..35cced2a4da8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -104,6 +104,7 @@ static const __be32 mlx4_ib_opcode[] = {
[IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
[IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
+ [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW),
};
static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1746,11 +1747,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
int header_size;
int spc;
int i;
- int is_eth;
- int is_vlan = 0;
- int is_grh;
- u16 vlan;
int err = 0;
+ u16 vlan = 0xffff;
+ bool is_eth;
+ bool is_vlan = false;
+ bool is_grh;
send_size = 0;
for (i = 0; i < wr->num_sge; ++i)
@@ -1953,9 +1954,12 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
static __be32 convert_access(int acc)
{
- return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) |
- (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) |
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ?
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ?
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ?
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
}
@@ -1981,12 +1985,28 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
fseg->reserved[1] = 0;
}
+static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
+{
+ bseg->flags1 =
+ convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
+ cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
+ MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
+ MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
+ bseg->flags2 = 0;
+ if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
+ bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
+ if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
+ bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
+ bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
+ bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
+ bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
+ bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
+}
+
static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
{
- iseg->flags = 0;
- iseg->mem_key = cpu_to_be32(rkey);
- iseg->guest_id = 0;
- iseg->pa = 0;
+ memset(iseg, 0, sizeof(*iseg));
+ iseg->mem_key = cpu_to_be32(rkey);
}
static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
@@ -2291,6 +2311,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
break;
+ case IB_WR_BIND_MW:
+ ctrl->srcrb_flags |=
+ cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
+ set_bind_seg(wqe, wr);
+ wqe += sizeof(struct mlx4_wqe_bind_seg);
+ size += sizeof(struct mlx4_wqe_bind_seg) / 16;
+ break;
default:
/* No extra segments required for sends */
break;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 5b2a01dfb907..97516eb363b7 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -732,7 +732,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
dev->ports_parent =
kobject_create_and_add("ports",
kobject_get(dev->iov_parent));
- if (!dev->iov_parent) {
+ if (!dev->ports_parent) {
ret = -ENOMEM;
goto err_ports;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 07e4fbad987a..8f67fe2e91e6 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -55,7 +55,8 @@ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
/**
* nes_alloc_mw
*/
-static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
+static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type)
+{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -71,6 +72,9 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
u32 driver_key = 0;
u8 stag_key = 0;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
get_random_bytes(&next_stag_index, sizeof(next_stag_index));
stag_key = (u8)next_stag_index;
@@ -244,20 +248,19 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
if (ibmw_bind->send_flags & IB_SEND_SIGNALED)
wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
- if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_WRITE)
wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE;
- }
- if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) {
+ if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_READ)
wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ;
- }
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX,
+ ibmw_bind->bind_info.mr->lkey);
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX,
- ibmw_bind->length);
+ ibmw_bind->bind_info.length);
wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
- u64temp = (u64)ibmw_bind->addr;
+ u64temp = (u64)ibmw_bind->bind_info.addr;
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);
head++;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c4e0131f1b57..48928c8e7774 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -51,18 +51,6 @@ static DEFINE_IDR(ocrdma_dev_id);
static union ib_gid ocrdma_zero_sgid;
-static int ocrdma_get_instance(void)
-{
- int instance = 0;
-
- /* Assign an unused number */
- if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
- return -1;
- if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
- return -1;
- return instance;
-}
-
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
{
u8 mac_addr[6];
@@ -416,7 +404,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
goto idr_err;
memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
- dev->id = ocrdma_get_instance();
+ dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
if (dev->id < 0)
goto idr_err;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 959a5c4ff812..4f7aa301b3b1 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1524,7 +1524,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
}
}
- i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
+ i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
if (i_minor)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 65a2a23f6f8a..644bd6f6467c 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -45,7 +45,7 @@
static struct super_block *qib_super;
-#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
+#define private2dd(file) (file_inode(file)->i_private)
static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, const struct file_operations *fops,
@@ -171,7 +171,7 @@ static const struct file_operations cntr_ops[] = {
};
/*
- * Could use file->f_dentry->d_inode->i_ino to figure out which file,
+ * Could use file_inode(file)->i_ino to figure out which file,
* instead of separate routine for each, but for now, this works...
*/
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index ddf066d9abb6..50e33aa0b4e3 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1060,22 +1060,23 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
struct qib_devdata *dd;
int ret;
- if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
if (!dd) {
dd = ERR_PTR(-ENOMEM);
goto bail;
}
+ idr_preload(GFP_KERNEL);
spin_lock_irqsave(&qib_devs_lock, flags);
- ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
- if (ret >= 0)
+
+ ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ dd->unit = ret;
list_add(&dd->list, &qib_dev_list);
+ }
+
spin_unlock_irqrestore(&qib_devs_lock, flags);
+ idr_preload_end();
if (ret < 0) {
qib_early_err(&pdev->dev,
@@ -1180,11 +1181,6 @@ static int __init qlogic_ib_init(void)
* the PCI subsystem.
*/
idr_init(&qib_unit_table);
- if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
- pr_err("idr_pre_get() failed\n");
- ret = -ENOMEM;
- goto bail_cq_wq;
- }
ret = pci_register_driver(&qib_driver);
if (ret < 0) {
@@ -1199,7 +1195,6 @@ static int __init qlogic_ib_init(void)
bail_unit:
idr_destroy(&qib_unit_table);
-bail_cq_wq:
destroy_workqueue(qib_cq_wq);
bail_dev:
qib_dev_cleanup();
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 35275099cafd..a6a2cc2ba260 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -268,8 +268,9 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
qpp = &q->next)
if (q == qp) {
atomic_dec(&qp->refcount);
- *qpp = qp->next;
- rcu_assign_pointer(qp->next, NULL);
+ rcu_assign_pointer(*qpp,
+ rcu_dereference_protected(qp->next,
+ lockdep_is_held(&dev->qpt_lock)));
break;
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 07ca6fd5546b..eb71aaa26a9a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -117,6 +117,8 @@ enum {
#define IPOIB_OP_CM (0)
#endif
+#define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF))
+
/* structs */
struct ipoib_header {
@@ -760,4 +762,6 @@ extern int ipoib_debug_level;
#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
+extern const char ipoib_driver_version[];
+
#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index ca131335417b..c4b3940845e6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -39,7 +39,24 @@
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver));
+ struct ipoib_dev_priv *priv = netdev_priv(netdev);
+ struct ib_device_attr *attr;
+
+ attr = kmalloc(sizeof(*attr), GFP_KERNEL);
+ if (attr && !ib_query_device(priv->ca, attr))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", (int)(attr->fw_ver >> 32),
+ (int)(attr->fw_ver >> 16) & 0xffff,
+ (int)attr->fw_ver & 0xffff);
+ kfree(attr);
+
+ strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device),
+ sizeof(drvinfo->bus_info));
+
+ strlcpy(drvinfo->version, ipoib_driver_version,
+ sizeof(drvinfo->version));
+
+ strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6fdc9e78da0d..8534afd04e7c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -49,9 +49,14 @@
#include <linux/jhash.h>
#include <net/arp.h>
+#define DRV_VERSION "1.0.0"
+
+const char ipoib_driver_version[] = DRV_VERSION;
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
@@ -505,6 +510,9 @@ static void path_rec_completion(int status,
spin_unlock_irqrestore(&priv->lock, flags);
+ if (IS_ERR_OR_NULL(ah))
+ ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
+
if (old_ah)
ipoib_put_ah(old_ah);
@@ -844,10 +852,10 @@ static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
* different subnets.
*/
/* qpn octets[1:4) & port GUID octets[12:20) */
- u32 *daddr_32 = (u32 *) daddr;
+ u32 *d32 = (u32 *) daddr;
u32 hv;
- hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0);
+ hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
return hv & htbl->mask;
}
@@ -1688,6 +1696,8 @@ static void ipoib_remove_one(struct ib_device *device)
return;
dev_list = ib_get_client_data(device, &ipoib_client);
+ if (!dev_list)
+ return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
ib_unregister_event_handler(&priv->event_handler);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index ef7d3be46c31..5babdb35bda7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -94,7 +94,7 @@
/* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
-#define ISER_DEF_CMD_PER_LUN 128
+#define ISER_DEF_CMD_PER_LUN ISCSI_DEF_XMIT_CMDS_MAX
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2033a928d34d..be1edb04b085 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -369,10 +369,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
- if (aligned_len != mem->dma_nents) {
+ if (aligned_len != mem->dma_nents ||
+ (!ib_conn->fmr_pool && mem->dma_nents > 1)) {
iscsi_conn->fmr_unalign_cnt++;
- iser_warn("rdma alignment violation %d/%d aligned\n",
- aligned_len, mem->size);
+ iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
+ aligned_len, mem->size);
iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
@@ -404,7 +405,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
} else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
- if (err) {
+ if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 95a49affee44..4debadc53106 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -242,10 +242,14 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
IB_ACCESS_REMOTE_READ);
ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(ib_conn->fmr_pool)) {
- ret = PTR_ERR(ib_conn->fmr_pool);
+ ret = PTR_ERR(ib_conn->fmr_pool);
+ if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) {
ib_conn->fmr_pool = NULL;
goto out_err;
+ } else if (ret == -ENOSYS) {
+ ib_conn->fmr_pool = NULL;
+ iser_warn("FMRs are not supported, using unaligned mode\n");
+ ret = 0;
}
memset(&init_attr, 0, sizeof init_attr);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d5088ce78290..7ccf3284dda3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target)
struct Scsi_Host *shost = target->scsi_host;
int i, ret;
- if (target->state != SRP_TARGET_LIVE)
- return -EAGAIN;
-
scsi_target_block(&shost->shost_gendev);
srp_disconnect_target(target);
/*
- * Now get a new local CM ID so that we avoid confusing the
- * target in case things are really fouled up.
+ * Now get a new local CM ID so that we avoid confusing the target in
+ * case things are really fouled up. Doing so also ensures that all CM
+ * callbacks will have finished before a new QP is allocated.
*/
ret = srp_new_cm_id(target);
- if (ret)
- goto unblock;
-
- ret = srp_create_target_ib(target);
- if (ret)
- goto unblock;
+ /*
+ * Whether or not creating a new CM ID succeeded, create a new
+ * QP. This guarantees that all completion callback function
+ * invocations have finished before request resetting starts.
+ */
+ if (ret == 0)
+ ret = srp_create_target_ib(target);
+ else
+ srp_create_target_ib(target);
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
@@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target)
for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
- ret = srp_connect_target(target);
+ if (ret == 0)
+ ret = srp_connect_target(target);
-unblock:
scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
SDEV_TRANSPORT_OFFLINE);
+ target->transport_offline = !!ret;
if (ret)
goto err;
@@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
unsigned long flags;
int len;
+ if (unlikely(target->transport_offline)) {
+ scmnd->result = DID_NO_CONNECT << 16;
+ scmnd->scsi_done(scmnd);
+ return 0;
+ }
+
spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
@@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
+ if (!target->connected || target->qp_in_error)
+ return -1;
+
init_completion(&target->tsk_mgmt_done);
spin_lock_irq(&target->lock);
@@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
+ if (!req || !srp_claim_req(target, req, scmnd))
return FAILED;
srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK);
@@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
- if (target->qp_in_error)
- return FAILED;
if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
SRP_TSK_LUN_RESET))
return FAILED;
@@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
spin_unlock(&host->target_lock);
target->state = SRP_TARGET_LIVE;
- target->connected = false;
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index de2d0b3c0bfe..66fbedda4571 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -140,6 +140,7 @@ struct srp_target_port {
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
+ bool transport_offline;
/* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines.
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 369a39de4ff3..f9179b2585a9 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -100,9 +100,6 @@ static int max8925_onkey_probe(struct platform_device *pdev)
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
- irq[0] += chip->irq_base;
- irq[1] += chip->irq_base;
-
error = request_threaded_irq(irq[0], NULL, max8925_onkey_handler,
IRQF_ONESHOT, "onkey-down", info);
if (error < 0) {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 01068987809d..5c514d0711d1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -158,7 +158,7 @@ config TEGRA_IOMMU_GART
config TEGRA_IOMMU_SMMU
bool "Tegra SMMU IOMMU Support"
- depends on ARCH_TEGRA_3x_SOC && TEGRA_AHB
+ depends on ARCH_TEGRA && TEGRA_AHB
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
@@ -187,4 +187,78 @@ config EXYNOS_IOMMU_DEBUG
Say N unless you need kernel log message for IOMMU debugging
+config SHMOBILE_IPMMU
+ bool
+
+config SHMOBILE_IPMMU_TLB
+ bool
+
+config SHMOBILE_IOMMU
+ bool "IOMMU for Renesas IPMMU/IPMMUI"
+ default n
+ depends on (ARM && ARCH_SHMOBILE)
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU
+ select SHMOBILE_IPMMU
+ select SHMOBILE_IPMMU_TLB
+ help
+ Support for Renesas IPMMU/IPMMUI. This option enables
+ remapping of DMA memory accesses from all of the IP blocks
+ on the ICB.
+
+ Warning: Drivers (including userspace drivers of UIO
+ devices) of the IP blocks on the ICB *must* use addresses
+ allocated from the IPMMU (iova) for DMA with this option
+ enabled.
+
+ If unsure, say N.
+
+choice
+ prompt "IPMMU/IPMMUI address space size"
+ default SHMOBILE_IOMMU_ADDRSIZE_2048MB
+ depends on SHMOBILE_IOMMU
+ help
+ This option sets IPMMU/IPMMUI address space size by
+ adjusting the 1st level page table size. The page table size
+ is calculated as follows:
+
+ page table size = number of page table entries * 4 bytes
+ number of page table entries = address space size / 1 MiB
+
+ For example, when the address space size is 2048 MiB, the
+ 1st level page table size is 8192 bytes.
+
+ config SHMOBILE_IOMMU_ADDRSIZE_2048MB
+ bool "2 GiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_1024MB
+ bool "1 GiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_512MB
+ bool "512 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_256MB
+ bool "256 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_128MB
+ bool "128 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_64MB
+ bool "64 MiB"
+
+ config SHMOBILE_IOMMU_ADDRSIZE_32MB
+ bool "32 MiB"
+
+endchoice
+
+config SHMOBILE_IOMMU_L1SIZE
+ int
+ default 8192 if SHMOBILE_IOMMU_ADDRSIZE_2048MB
+ default 4096 if SHMOBILE_IOMMU_ADDRSIZE_1024MB
+ default 2048 if SHMOBILE_IOMMU_ADDRSIZE_512MB
+ default 1024 if SHMOBILE_IOMMU_ADDRSIZE_256MB
+ default 512 if SHMOBILE_IOMMU_ADDRSIZE_128MB
+ default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
+ default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index f66b816d455c..ef0e5207ad69 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
+obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
+obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d33eaaf783ad..98f555dafb55 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3187,8 +3187,7 @@ int __init amd_iommu_init_dma_ops(void)
free_domains:
for_each_iommu(iommu) {
- if (iommu->default_dom)
- dma_ops_domain_free(iommu->default_dom);
+ dma_ops_domain_free(iommu->default_dom);
}
return ret;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index faf10ba1ed9a..b6ecddb63cd0 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1876,11 +1876,6 @@ static int amd_iommu_init_dma(void)
struct amd_iommu *iommu;
int ret;
- init_device_table_dma();
-
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
-
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
@@ -1889,6 +1884,11 @@ static int amd_iommu_init_dma(void)
if (ret)
return ret;
+ init_device_table_dma();
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+
amd_iommu_init_api();
amd_iommu_init_notifier();
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 174bb654453d..dc7e478b7e5f 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1042,7 +1042,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->desc = page_address(desc_page);
- qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
+ qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
kfree(qi);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7fe44f83cc37..238a3caa949a 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -511,7 +511,7 @@ int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
return ret;
}
-bool exynos_sysmmu_disable(struct device *dev)
+static bool exynos_sysmmu_disable(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
bool disabled;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 43d5c8b8e7ad..0099667a397e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4255,13 +4255,19 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
{
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
- * but needs it:
+ * but needs it. Same seems to hold for the desktop versions.
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
#define GGC 0x52
#define GGC_MEMORY_SIZE_MASK (0xf << 8)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ddbdacad7768..b972d430d92b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -734,7 +734,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
size_t orig_size = size;
int ret = 0;
- if (unlikely(domain->ops->map == NULL))
+ if (unlikely(domain->ops->unmap == NULL ||
+ domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
/* find out the minimum page size supported */
@@ -808,7 +809,8 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
- if (unlikely(domain->ops->unmap == NULL))
+ if (unlikely(domain->ops->unmap == NULL ||
+ domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
/* find out the minimum page size supported */
@@ -850,6 +852,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+
+int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t paddr, u64 size)
+{
+ if (unlikely(domain->ops->domain_window_enable == NULL))
+ return -ENODEV;
+
+ return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size);
+}
+EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
+
+void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
+{
+ if (unlikely(domain->ops->domain_window_disable == NULL))
+ return;
+
+ return domain->ops->domain_window_disable(domain, wnd_nr);
+}
+EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
+
static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
@@ -861,13 +883,15 @@ static int __init iommu_init(void)
return 0;
}
-subsys_initcall(iommu_init);
+arch_initcall(iommu_init);
int iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct iommu_domain_geometry *geometry;
+ bool *paging;
int ret = 0;
+ u32 *count;
switch (attr) {
case DOMAIN_ATTR_GEOMETRY:
@@ -875,6 +899,19 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
*geometry = domain->geometry;
break;
+ case DOMAIN_ATTR_PAGING:
+ paging = data;
+ *paging = (domain->ops->pgsize_bitmap != 0UL);
+ break;
+ case DOMAIN_ATTR_WINDOWS:
+ count = data;
+
+ if (domain->ops->domain_get_windows != NULL)
+ *count = domain->ops->domain_get_windows(domain);
+ else
+ ret = -ENODEV;
+
+ break;
default:
if (!domain->ops->domain_get_attr)
return -EINVAL;
@@ -889,9 +926,26 @@ EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
int iommu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
- if (!domain->ops->domain_set_attr)
- return -EINVAL;
+ int ret = 0;
+ u32 *count;
- return domain->ops->domain_set_attr(domain, attr, data);
+ switch (attr) {
+ case DOMAIN_ATTR_WINDOWS:
+ count = data;
+
+ if (domain->ops->domain_set_windows != NULL)
+ ret = domain->ops->domain_set_windows(domain, *count);
+ else
+ ret = -ENODEV;
+
+ break;
+ default:
+ if (domain->ops->domain_set_attr == NULL)
+ return -EINVAL;
+
+ ret = domain->ops->domain_set_attr(domain, attr, data);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d33c980e9c20..6ac02fa5910f 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -146,7 +146,7 @@ static int iommu_enable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!obj || !pdata)
+ if (!pdata)
return -EINVAL;
if (!arch_iommu)
@@ -172,7 +172,7 @@ static void iommu_disable(struct omap_iommu *obj)
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!obj || !pdata)
+ if (!pdata)
return;
arch_iommu->disable(obj);
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
new file mode 100644
index 000000000000..b6e8b57cf0a8
--- /dev/null
+++ b/drivers/iommu/shmobile-iommu.c
@@ -0,0 +1,395 @@
+/*
+ * IOMMU for IPMMU/IPMMUI
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <asm/dma-iommu.h>
+#include "shmobile-ipmmu.h"
+
+#define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE
+#define L1_LEN (L1_SIZE / 4)
+#define L1_ALIGN L1_SIZE
+#define L2_SIZE SZ_1K
+#define L2_LEN (L2_SIZE / 4)
+#define L2_ALIGN L2_SIZE
+
+struct shmobile_iommu_domain_pgtable {
+ uint32_t *pgtable;
+ dma_addr_t handle;
+};
+
+struct shmobile_iommu_archdata {
+ struct list_head attached_list;
+ struct dma_iommu_mapping *iommu_mapping;
+ spinlock_t attach_lock;
+ struct shmobile_iommu_domain *attached;
+ int num_attached_devices;
+ struct shmobile_ipmmu *ipmmu;
+};
+
+struct shmobile_iommu_domain {
+ struct shmobile_iommu_domain_pgtable l1, l2[L1_LEN];
+ spinlock_t map_lock;
+ spinlock_t attached_list_lock;
+ struct list_head attached_list;
+};
+
+static struct shmobile_iommu_archdata *ipmmu_archdata;
+static struct kmem_cache *l1cache, *l2cache;
+
+static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
+ struct kmem_cache *cache, size_t size)
+{
+ pgtable->pgtable = kmem_cache_zalloc(cache, GFP_ATOMIC);
+ if (!pgtable->pgtable)
+ return -ENOMEM;
+ pgtable->handle = dma_map_single(NULL, pgtable->pgtable, size,
+ DMA_TO_DEVICE);
+ return 0;
+}
+
+static void pgtable_free(struct shmobile_iommu_domain_pgtable *pgtable,
+ struct kmem_cache *cache, size_t size)
+{
+ dma_unmap_single(NULL, pgtable->handle, size, DMA_TO_DEVICE);
+ kmem_cache_free(cache, pgtable->pgtable);
+}
+
+static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable *pgtable,
+ unsigned int index)
+{
+ return pgtable->pgtable[index];
+}
+
+static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
+ unsigned int index, unsigned int count, uint32_t val)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ pgtable->pgtable[index + i] = val;
+ dma_sync_single_for_device(NULL, pgtable->handle + index * sizeof(val),
+ sizeof(val) * count, DMA_TO_DEVICE);
+}
+
+static int shmobile_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct shmobile_iommu_domain *sh_domain;
+ int i, ret;
+
+ sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
+ if (!sh_domain)
+ return -ENOMEM;
+ ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
+ if (ret < 0) {
+ kfree(sh_domain);
+ return ret;
+ }
+ for (i = 0; i < L1_LEN; i++)
+ sh_domain->l2[i].pgtable = NULL;
+ spin_lock_init(&sh_domain->map_lock);
+ spin_lock_init(&sh_domain->attached_list_lock);
+ INIT_LIST_HEAD(&sh_domain->attached_list);
+ domain->priv = sh_domain;
+ return 0;
+}
+
+static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ int i;
+
+ for (i = 0; i < L1_LEN; i++) {
+ if (sh_domain->l2[i].pgtable)
+ pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE);
+ }
+ pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
+ kfree(sh_domain);
+ domain->priv = NULL;
+}
+
+static int shmobile_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ int ret = -EBUSY;
+
+ if (!archdata)
+ return -ENODEV;
+ spin_lock(&sh_domain->attached_list_lock);
+ spin_lock(&archdata->attach_lock);
+ if (archdata->attached != sh_domain) {
+ if (archdata->attached)
+ goto err;
+ ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE,
+ 0);
+ ipmmu_tlb_flush(archdata->ipmmu);
+ archdata->attached = sh_domain;
+ archdata->num_attached_devices = 0;
+ list_add(&archdata->attached_list, &sh_domain->attached_list);
+ }
+ archdata->num_attached_devices++;
+ ret = 0;
+err:
+ spin_unlock(&archdata->attach_lock);
+ spin_unlock(&sh_domain->attached_list_lock);
+ return ret;
+}
+
+static void shmobile_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+
+ if (!archdata)
+ return;
+ spin_lock(&sh_domain->attached_list_lock);
+ spin_lock(&archdata->attach_lock);
+ archdata->num_attached_devices--;
+ if (!archdata->num_attached_devices) {
+ ipmmu_tlb_set(archdata->ipmmu, 0, 0, 0);
+ ipmmu_tlb_flush(archdata->ipmmu);
+ archdata->attached = NULL;
+ list_del(&archdata->attached_list);
+ }
+ spin_unlock(&archdata->attach_lock);
+ spin_unlock(&sh_domain->attached_list_lock);
+}
+
+static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain)
+{
+ struct shmobile_iommu_archdata *archdata;
+
+ spin_lock(&sh_domain->attached_list_lock);
+ list_for_each_entry(archdata, &sh_domain->attached_list, attached_list)
+ ipmmu_tlb_flush(archdata->ipmmu);
+ spin_unlock(&sh_domain->attached_list_lock);
+}
+
+static int l2alloc(struct shmobile_iommu_domain *sh_domain,
+ unsigned int l1index)
+{
+ int ret;
+
+ if (!sh_domain->l2[l1index].pgtable) {
+ ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE);
+ if (ret < 0)
+ return ret;
+ }
+ pgtable_write(&sh_domain->l1, l1index, 1,
+ sh_domain->l2[l1index].handle | 0x1);
+ return 0;
+}
+
+static void l2realfree(struct shmobile_iommu_domain_pgtable *l2)
+{
+ if (l2->pgtable)
+ pgtable_free(l2, l2cache, L2_SIZE);
+}
+
+static void l2free(struct shmobile_iommu_domain *sh_domain,
+ unsigned int l1index,
+ struct shmobile_iommu_domain_pgtable *l2)
+{
+ pgtable_write(&sh_domain->l1, l1index, 1, 0);
+ if (sh_domain->l2[l1index].pgtable) {
+ *l2 = sh_domain->l2[l1index];
+ sh_domain->l2[l1index].pgtable = NULL;
+ }
+}
+
+static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ unsigned int l1index, l2index;
+ int ret;
+
+ l1index = iova >> 20;
+ switch (size) {
+ case SZ_4K:
+ l2index = (iova >> 12) & 0xff;
+ spin_lock(&sh_domain->map_lock);
+ ret = l2alloc(sh_domain, l1index);
+ if (!ret)
+ pgtable_write(&sh_domain->l2[l1index], l2index, 1,
+ paddr | 0xff2);
+ spin_unlock(&sh_domain->map_lock);
+ break;
+ case SZ_64K:
+ l2index = (iova >> 12) & 0xf0;
+ spin_lock(&sh_domain->map_lock);
+ ret = l2alloc(sh_domain, l1index);
+ if (!ret)
+ pgtable_write(&sh_domain->l2[l1index], l2index, 0x10,
+ paddr | 0xff1);
+ spin_unlock(&sh_domain->map_lock);
+ break;
+ case SZ_1M:
+ spin_lock(&sh_domain->map_lock);
+ l2free(sh_domain, l1index, &l2);
+ pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02);
+ spin_unlock(&sh_domain->map_lock);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ domain_tlb_flush(sh_domain);
+ l2realfree(&l2);
+ return ret;
+}
+
+static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ unsigned int l1index, l2index;
+ uint32_t l2entry = 0;
+ size_t ret = 0;
+
+ l1index = iova >> 20;
+ if (!(iova & 0xfffff) && size >= SZ_1M) {
+ spin_lock(&sh_domain->map_lock);
+ l2free(sh_domain, l1index, &l2);
+ spin_unlock(&sh_domain->map_lock);
+ ret = SZ_1M;
+ goto done;
+ }
+ l2index = (iova >> 12) & 0xff;
+ spin_lock(&sh_domain->map_lock);
+ if (sh_domain->l2[l1index].pgtable)
+ l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
+ switch (l2entry & 3) {
+ case 1:
+ if (l2index & 0xf)
+ break;
+ pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0);
+ ret = SZ_64K;
+ break;
+ case 2:
+ pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0);
+ ret = SZ_4K;
+ break;
+ }
+ spin_unlock(&sh_domain->map_lock);
+done:
+ if (ret)
+ domain_tlb_flush(sh_domain);
+ l2realfree(&l2);
+ return ret;
+}
+
+static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long iova)
+{
+ struct shmobile_iommu_domain *sh_domain = domain->priv;
+ uint32_t l1entry = 0, l2entry = 0;
+ unsigned int l1index, l2index;
+
+ l1index = iova >> 20;
+ l2index = (iova >> 12) & 0xff;
+ spin_lock(&sh_domain->map_lock);
+ if (sh_domain->l2[l1index].pgtable)
+ l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
+ else
+ l1entry = pgtable_read(&sh_domain->l1, l1index);
+ spin_unlock(&sh_domain->map_lock);
+ switch (l2entry & 3) {
+ case 1:
+ return (l2entry & ~0xffff) | (iova & 0xffff);
+ case 2:
+ return (l2entry & ~0xfff) | (iova & 0xfff);
+ default:
+ if ((l1entry & 3) == 2)
+ return (l1entry & ~0xfffff) | (iova & 0xfffff);
+ return 0;
+ }
+}
+
+static int find_dev_name(struct shmobile_ipmmu *ipmmu, const char *dev_name)
+{
+ unsigned int i, n = ipmmu->num_dev_names;
+
+ for (i = 0; i < n; i++) {
+ if (strcmp(ipmmu->dev_names[i], dev_name) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+static int shmobile_iommu_add_device(struct device *dev)
+{
+ struct shmobile_iommu_archdata *archdata = ipmmu_archdata;
+ struct dma_iommu_mapping *mapping;
+
+ if (!find_dev_name(archdata->ipmmu, dev_name(dev)))
+ return 0;
+ mapping = archdata->iommu_mapping;
+ if (!mapping) {
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+ L1_LEN << 20, 0);
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+ archdata->iommu_mapping = mapping;
+ }
+ dev->archdata.iommu = archdata;
+ if (arm_iommu_attach_device(dev, mapping))
+ pr_err("arm_iommu_attach_device failed\n");
+ return 0;
+}
+
+static struct iommu_ops shmobile_iommu_ops = {
+ .domain_init = shmobile_iommu_domain_init,
+ .domain_destroy = shmobile_iommu_domain_destroy,
+ .attach_dev = shmobile_iommu_attach_device,
+ .detach_dev = shmobile_iommu_detach_device,
+ .map = shmobile_iommu_map,
+ .unmap = shmobile_iommu_unmap,
+ .iova_to_phys = shmobile_iommu_iova_to_phys,
+ .add_device = shmobile_iommu_add_device,
+ .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
+};
+
+int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
+{
+ static struct shmobile_iommu_archdata *archdata;
+
+ l1cache = kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE,
+ L1_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
+ if (!l1cache)
+ return -ENOMEM;
+ l2cache = kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE,
+ L2_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
+ if (!l2cache) {
+ kmem_cache_destroy(l1cache);
+ return -ENOMEM;
+ }
+ archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
+ if (!archdata) {
+ kmem_cache_destroy(l1cache);
+ kmem_cache_destroy(l2cache);
+ return -ENOMEM;
+ }
+ spin_lock_init(&archdata->attach_lock);
+ archdata->attached = NULL;
+ archdata->ipmmu = ipmmu;
+ ipmmu_archdata = archdata;
+ bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
+ return 0;
+}
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
new file mode 100644
index 000000000000..8321f89596c4
--- /dev/null
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -0,0 +1,136 @@
+/*
+ * IPMMU/IPMMUI
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/platform_data/sh_ipmmu.h>
+#include "shmobile-ipmmu.h"
+
+#define IMCTR1 0x000
+#define IMCTR2 0x004
+#define IMASID 0x010
+#define IMTTBR 0x014
+#define IMTTBCR 0x018
+
+#define IMCTR1_TLBEN (1 << 0)
+#define IMCTR1_FLUSH (1 << 1)
+
+static void ipmmu_reg_write(struct shmobile_ipmmu *ipmmu, unsigned long reg_off,
+ unsigned long data)
+{
+ iowrite32(data, ipmmu->ipmmu_base + reg_off);
+}
+
+void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
+{
+ if (!ipmmu)
+ return;
+
+ mutex_lock(&ipmmu->flush_lock);
+ if (ipmmu->tlb_enabled)
+ ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
+ else
+ ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
+ mutex_unlock(&ipmmu->flush_lock);
+}
+
+void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
+ int asid)
+{
+ if (!ipmmu)
+ return;
+
+ mutex_lock(&ipmmu->flush_lock);
+ switch (size) {
+ default:
+ ipmmu->tlb_enabled = 0;
+ break;
+ case 0x2000:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 1);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x1000:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 2);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x800:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 3);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x400:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 4);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x200:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 5);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x100:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 6);
+ ipmmu->tlb_enabled = 1;
+ break;
+ case 0x80:
+ ipmmu_reg_write(ipmmu, IMTTBCR, 7);
+ ipmmu->tlb_enabled = 1;
+ break;
+ }
+ ipmmu_reg_write(ipmmu, IMTTBR, phys);
+ ipmmu_reg_write(ipmmu, IMASID, asid);
+ mutex_unlock(&ipmmu->flush_lock);
+}
+
+static int ipmmu_probe(struct platform_device *pdev)
+{
+ struct shmobile_ipmmu *ipmmu;
+ struct resource *res;
+ struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get platform resources\n");
+ return -ENOENT;
+ }
+ ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
+ if (!ipmmu) {
+ dev_err(&pdev->dev, "cannot allocate device data\n");
+ return -ENOMEM;
+ }
+ mutex_init(&ipmmu->flush_lock);
+ ipmmu->dev = &pdev->dev;
+ ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!ipmmu->ipmmu_base) {
+ dev_err(&pdev->dev, "ioremap_nocache failed\n");
+ return -ENOMEM;
+ }
+ ipmmu->dev_names = pdata->dev_names;
+ ipmmu->num_dev_names = pdata->num_dev_names;
+ platform_set_drvdata(pdev, ipmmu);
+ ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
+ ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
+ ipmmu_iommu_init(ipmmu);
+ return 0;
+}
+
+static struct platform_driver ipmmu_driver = {
+ .probe = ipmmu_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ipmmu",
+ },
+};
+
+static int __init ipmmu_init(void)
+{
+ return platform_driver_register(&ipmmu_driver);
+}
+subsys_initcall(ipmmu_init);
diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h
new file mode 100644
index 000000000000..4d53684673e1
--- /dev/null
+++ b/drivers/iommu/shmobile-ipmmu.h
@@ -0,0 +1,34 @@
+/* shmobile-ipmmu.h
+ *
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __SHMOBILE_IPMMU_H__
+#define __SHMOBILE_IPMMU_H__
+
+struct shmobile_ipmmu {
+ struct device *dev;
+ void __iomem *ipmmu_base;
+ int tlb_enabled;
+ struct mutex flush_lock;
+ const char * const *dev_names;
+ unsigned int num_dev_names;
+};
+
+#ifdef CONFIG_SHMOBILE_IPMMU_TLB
+void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu);
+void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
+ int asid);
+int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu);
+#else
+static inline int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __SHMOBILE_IPMMU_H__ */
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 8219f1d596ee..86437575f94d 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -430,13 +430,11 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume,
};
-#ifdef CONFIG_OF
static struct of_device_id tegra_gart_of_match[] = {
{ .compatible = "nvidia,tegra20-gart", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
-#endif
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
@@ -445,7 +443,7 @@ static struct platform_driver tegra_gart_driver = {
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
- .of_match_table = of_match_ptr(tegra_gart_of_match),
+ .of_match_table = tegra_gart_of_match,
},
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index f08dbcd2f175..b34e5fd7fd9e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1,7 +1,7 @@
/*
* IOMMU API for SMMU in Tegra30
*
- * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -294,7 +294,11 @@ struct smmu_debugfs_info {
* Per SMMU device - IOMMU device
*/
struct smmu_device {
- void __iomem *regs[NUM_SMMU_REG_BANKS];
+ void __iomem *regbase; /* register offset base */
+ void __iomem **regs; /* register block start address array */
+ void __iomem **rege; /* register block end address array */
+ int nregs; /* number of register blocks */
+
unsigned long iovmm_base; /* remappable base address */
unsigned long page_count; /* total remappable size */
spinlock_t lock;
@@ -324,38 +328,37 @@ static struct smmu_device *smmu_handle; /* unique for a system */
/*
* SMMU register accessors
*/
+static bool inline smmu_valid_reg(struct smmu_device *smmu,
+ void __iomem *addr)
+{
+ int i;
+
+ for (i = 0; i < smmu->nregs; i++) {
+ if (addr < smmu->regs[i])
+ break;
+ if (addr <= smmu->rege[i])
+ return true;
+ }
+
+ return false;
+}
+
static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
{
- BUG_ON(offs < 0x10);
- if (offs < 0x3c)
- return readl(smmu->regs[0] + offs - 0x10);
- BUG_ON(offs < 0x1f0);
- if (offs < 0x200)
- return readl(smmu->regs[1] + offs - 0x1f0);
- BUG_ON(offs < 0x228);
- if (offs < 0x284)
- return readl(smmu->regs[2] + offs - 0x228);
- BUG();
+ void __iomem *addr = smmu->regbase + offs;
+
+ BUG_ON(!smmu_valid_reg(smmu, addr));
+
+ return readl(addr);
}
static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
{
- BUG_ON(offs < 0x10);
- if (offs < 0x3c) {
- writel(val, smmu->regs[0] + offs - 0x10);
- return;
- }
- BUG_ON(offs < 0x1f0);
- if (offs < 0x200) {
- writel(val, smmu->regs[1] + offs - 0x1f0);
- return;
- }
- BUG_ON(offs < 0x228);
- if (offs < 0x284) {
- writel(val, smmu->regs[2] + offs - 0x228);
- return;
- }
- BUG();
+ void __iomem *addr = smmu->regbase + offs;
+
+ BUG_ON(!smmu_valid_reg(smmu, addr));
+
+ writel(val, addr);
}
#define VA_PAGE_TO_PA(va, page) \
@@ -965,7 +968,6 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
{
struct smmu_debugfs_info *info;
struct smmu_device *smmu;
- struct dentry *dent;
int i;
enum {
_OFF = 0,
@@ -993,8 +995,7 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
if (i == ARRAY_SIZE(command))
return -EINVAL;
- dent = file->f_dentry;
- info = dent->d_inode->i_private;
+ info = file_inode(file)->i_private;
smmu = info->smmu;
offs = SMMU_CACHE_CONFIG(info->cache);
@@ -1029,15 +1030,11 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
{
- struct smmu_debugfs_info *info;
- struct smmu_device *smmu;
- struct dentry *dent;
+ struct smmu_debugfs_info *info = s->private;
+ struct smmu_device *smmu = info->smmu;
int i;
const char * const stats[] = { "hit", "miss", };
- dent = d_find_alias(s->private);
- info = dent->d_inode->i_private;
- smmu = info->smmu;
for (i = 0; i < ARRAY_SIZE(stats); i++) {
u32 val;
@@ -1051,14 +1048,12 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
stats[i], val, offs);
}
seq_printf(s, "\n");
- dput(dent);
-
return 0;
}
static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
{
- return single_open(file, smmu_debugfs_stats_show, inode);
+ return single_open(file, smmu_debugfs_stats_show, inode->i_private);
}
static const struct file_operations smmu_debugfs_stats_fops = {
@@ -1171,7 +1166,13 @@ static int tegra_smmu_probe(struct platform_device *pdev)
return -ENOMEM;
}
- for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
+ smmu->nregs = pdev->num_resources;
+ smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs),
+ GFP_KERNEL);
+ smmu->rege = smmu->regs + smmu->nregs;
+ if (!smmu->regs)
+ return -ENOMEM;
+ for (i = 0; i < smmu->nregs; i++) {
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
@@ -1180,7 +1181,10 @@ static int tegra_smmu_probe(struct platform_device *pdev)
smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(smmu->regs[i]))
return PTR_ERR(smmu->regs[i]);
+ smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1;
}
+ /* Same as "mc" 1st regiter block start address */
+ smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & PAGE_MASK);
err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
if (err)
@@ -1217,6 +1221,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
as->pte_attr = _PTE_ATTR;
spin_lock_init(&as->lock);
+ spin_lock_init(&as->client_lock);
INIT_LIST_HEAD(&as->client);
}
spin_lock_init(&smmu->lock);
@@ -1255,13 +1260,11 @@ const struct dev_pm_ops tegra_smmu_pm_ops = {
.resume = tegra_smmu_resume,
};
-#ifdef CONFIG_OF
static struct of_device_id tegra_smmu_of_match[] = {
{ .compatible = "nvidia,tegra30-smmu", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
-#endif
static struct platform_driver tegra_smmu_driver = {
.probe = tegra_smmu_probe,
@@ -1270,7 +1273,7 @@ static struct platform_driver tegra_smmu_driver = {
.owner = THIS_MODULE,
.name = "tegra-smmu",
.pm = &tegra_smmu_pm_ops,
- .of_match_table = of_match_ptr(tegra_smmu_of_match),
+ .of_match_table = tegra_smmu_of_match,
},
};
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index af4fd3d036c1..3a4165c61196 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -145,7 +145,7 @@ void remove_divas_proc(void)
static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+ diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -172,7 +172,7 @@ static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+ diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -251,7 +251,7 @@ static const struct file_operations grp_opt_proc_fops = {
static ssize_t info_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+ diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
char c[4];
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 88e4f0ee073c..9a3ce93665c5 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -173,7 +173,7 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
struct log_data *inf;
int len;
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
struct procdata *pd = NULL;
hysdn_card *card;
@@ -319,7 +319,7 @@ static unsigned int
hysdn_log_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
hysdn_card *card;
struct procdata *pd = NULL;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index b87d9e577be2..9bb12ba3191f 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1058,7 +1058,7 @@ isdn_info_update(void)
static ssize_t
isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
- uint minor = iminor(file->f_path.dentry->d_inode);
+ uint minor = iminor(file_inode(file));
int len = 0;
int drvidx;
int chidx;
@@ -1165,7 +1165,7 @@ out:
static ssize_t
isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
{
- uint minor = iminor(file->f_path.dentry->d_inode);
+ uint minor = iminor(file_inode(file));
int drvidx;
int chidx;
int retval;
@@ -1228,7 +1228,7 @@ static unsigned int
isdn_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(file));
int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
mutex_lock(&isdn_mutex);
@@ -1269,7 +1269,7 @@ out:
static int
isdn_ioctl(struct file *file, uint cmd, ulong arg)
{
- uint minor = iminor(file->f_path.dentry->d_inode);
+ uint minor = iminor(file_inode(file));
isdn_ctrl c;
int drvidx;
int ret;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 61d78fa03b1a..38ceac5053a0 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -668,7 +668,7 @@ isdn_ppp_poll(struct file *file, poll_table *wait)
if (is->debug & 0x2)
printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n",
- iminor(file->f_path.dentry->d_inode));
+ iminor(file_inode(file)));
/* just registers wait_queue hook. This doesn't really wait. */
poll_wait(file, &is->wq, wait);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index abe2d699b6f3..8b07f83d48ad 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -483,7 +483,6 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
- struct hlist_node *node;
struct sock *csk;
int err = 0;
@@ -508,7 +507,7 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (sk->sk_protocol < ISDN_P_B_START) {
read_lock_bh(&data_sockets.lock);
- sk_for_each(csk, node, &data_sockets.head) {
+ sk_for_each(csk, &data_sockets.head) {
if (sk == csk)
continue;
if (_pms(csk)->dev != _pms(sk)->dev)
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index deda591f70b9..9cb4b621fbc3 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -64,12 +64,11 @@ unlock:
static void
send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
{
- struct hlist_node *node;
struct sock *sk;
struct sk_buff *cskb = NULL;
read_lock(&sl->lock);
- sk_for_each(sk, node, &sl->head) {
+ sk_for_each(sk, &sl->head) {
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 4469b441b785..ec50824c02ec 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -193,9 +193,18 @@ config LEDS_LP3944
To compile this driver as a module, choose M here: the
module will be called leds-lp3944.
+config LEDS_LP55XX_COMMON
+ tristate "Common Driver for TI/National LP5521 and LP5523/55231"
+ depends on LEDS_LP5521 || LEDS_LP5523
+ select FW_LOADER
+ help
+ This option supports common operations for LP5521 and LP5523/55231
+ devices.
+
config LEDS_LP5521
tristate "LED Support for N.S. LP5521 LED driver chip"
depends on LEDS_CLASS && I2C
+ select LEDS_LP55XX_COMMON
help
If you say yes here you get support for the National Semiconductor
LP5521 LED driver. It is 3 channel chip with programmable engines.
@@ -205,6 +214,7 @@ config LEDS_LP5521
config LEDS_LP5523
tristate "LED Support for TI/National LP5523/55231 LED driver chip"
depends on LEDS_CLASS && I2C
+ select LEDS_LP55XX_COMMON
help
If you say yes here you get support for TI/National Semiconductor
LP5523/55231 LED driver.
@@ -310,7 +320,7 @@ config LEDS_DAC124S085
config LEDS_PWM
tristate "PWM driven LED Support"
depends on LEDS_CLASS
- depends on HAVE_PWM
+ depends on PWM
help
This option enables support for pwm driven LEDs
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 3fb9641b6194..215e7e3b6173 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
+obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 6be2edd41173..f5b9ea315790 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -128,8 +128,10 @@ static void pm860x_led_set(struct led_classdev *cdev,
static int pm860x_led_dt_init(struct platform_device *pdev,
struct pm860x_led *data)
{
- struct device_node *nproot = pdev->dev.parent->of_node, *np;
+ struct device_node *nproot, *np;
int iset = 0;
+
+ nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
nproot = of_find_node_by_name(nproot, "leds");
@@ -145,6 +147,7 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
break;
}
}
+ of_node_put(nproot);
return 0;
}
#else
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 214145483836..a036a19040fe 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -187,6 +187,40 @@ static void lm3530_als_configure(struct lm3530_platform_data *pdata,
(pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
}
+static int lm3530_led_enable(struct lm3530_data *drvdata)
+{
+ int ret;
+
+ if (drvdata->enable)
+ return 0;
+
+ ret = regulator_enable(drvdata->regulator);
+ if (ret) {
+ dev_err(drvdata->led_dev.dev, "Failed to enable vin:%d\n", ret);
+ return ret;
+ }
+
+ drvdata->enable = true;
+ return 0;
+}
+
+static void lm3530_led_disable(struct lm3530_data *drvdata)
+{
+ int ret;
+
+ if (!drvdata->enable)
+ return;
+
+ ret = regulator_disable(drvdata->regulator);
+ if (ret) {
+ dev_err(drvdata->led_dev.dev, "Failed to disable vin:%d\n",
+ ret);
+ return;
+ }
+
+ drvdata->enable = false;
+}
+
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
@@ -245,15 +279,9 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
reg_val[12] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
reg_val[13] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
- if (!drvdata->enable) {
- ret = regulator_enable(drvdata->regulator);
- if (ret) {
- dev_err(&drvdata->client->dev,
- "Enable regulator failed\n");
- return ret;
- }
- drvdata->enable = true;
- }
+ ret = lm3530_led_enable(drvdata);
+ if (ret)
+ return ret;
for (i = 0; i < LM3530_REG_MAX; i++) {
/* do not update brightness register when pwm mode */
@@ -305,13 +333,8 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
else
drvdata->brightness = brt_val;
- if (brt_val == 0) {
- err = regulator_disable(drvdata->regulator);
- if (err)
- dev_err(&drvdata->client->dev,
- "Disable regulator failed\n");
- drvdata->enable = false;
- }
+ if (brt_val == 0)
+ lm3530_led_disable(drvdata);
break;
case LM3530_BL_MODE_ALS:
break;
@@ -458,8 +481,7 @@ static int lm3530_remove(struct i2c_client *client)
device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
- if (drvdata->enable)
- regulator_disable(drvdata->regulator);
+ lm3530_led_disable(drvdata);
led_classdev_unregister(&drvdata->led_dev);
return 0;
}
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 65d79284c488..4117235ba618 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -380,7 +380,7 @@ static void lm355x_indicator_brightness_set(struct led_classdev *cdev,
/* indicator pattern only for lm3556*/
static ssize_t lm3556_indicator_pattern_store(struct device *dev,
- struct device_attribute *devAttr,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret;
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 07b3dde90613..9f428d9dfe91 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -176,7 +176,7 @@ out:
/* torch pin config for lm3642*/
static ssize_t lm3642_torch_pin_store(struct device *dev,
- struct device_attribute *devAttr,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret;
@@ -233,7 +233,7 @@ static void lm3642_torch_brightness_set(struct led_classdev *cdev,
/* strobe pin config for lm3642*/
static ssize_t lm3642_strobe_pin_store(struct device *dev,
- struct device_attribute *devAttr,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret;
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index cb8a5220200b..1001347ba70b 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -2,8 +2,10 @@
* LP5521 LED chip driver.
*
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2012 Texas Instruments
*
* Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ * Milo(Woogyom) Kim <milo.kim@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -20,33 +22,21 @@
* 02110-1301 USA
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/leds.h>
-#include <linux/leds-lp5521.h>
-#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
-#define LP5521_PROGRAM_LENGTH 32 /* in bytes */
-
-#define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */
-#define LP5521_MAX_ENGINES 3 /* Maximum number of engines */
-
-#define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */
-#define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */
+#include "leds-lp55xx-common.h"
-#define LP5521_CMD_LOAD 0x15 /* 00010101 */
-#define LP5521_CMD_RUN 0x2a /* 00101010 */
-#define LP5521_CMD_DIRECT 0x3f /* 00111111 */
-#define LP5521_CMD_DISABLED 0x00 /* 00000000 */
+#define LP5521_PROGRAM_LENGTH 32
+#define LP5521_MAX_LEDS 3
+#define LP5521_CMD_DIRECT 0x3F
/* Registers */
#define LP5521_REG_ENABLE 0x00
@@ -58,22 +48,14 @@
#define LP5521_REG_G_CURRENT 0x06
#define LP5521_REG_B_CURRENT 0x07
#define LP5521_REG_CONFIG 0x08
-#define LP5521_REG_R_CHANNEL_PC 0x09
-#define LP5521_REG_G_CHANNEL_PC 0x0A
-#define LP5521_REG_B_CHANNEL_PC 0x0B
#define LP5521_REG_STATUS 0x0C
#define LP5521_REG_RESET 0x0D
-#define LP5521_REG_GPO 0x0E
#define LP5521_REG_R_PROG_MEM 0x10
#define LP5521_REG_G_PROG_MEM 0x30
#define LP5521_REG_B_PROG_MEM 0x50
-#define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM
-#define LP5521_PROG_MEM_SIZE 0x20
-
/* Base register to set LED current */
#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT
-
/* Base register to set the brightness */
#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM
@@ -92,440 +74,287 @@
/* default R channel current register value */
#define LP5521_REG_R_CURR_DEFAULT 0xAF
-/* Pattern Mode */
-#define PATTERN_OFF 0
+/* Reset register value */
+#define LP5521_RESET 0xFF
-struct lp5521_engine {
- int id;
- u8 mode;
- u8 prog_page;
- u8 engine_mask;
-};
+/* Program Memory Operations */
+#define LP5521_MODE_R_M 0x30 /* Operation Mode Register */
+#define LP5521_MODE_G_M 0x0C
+#define LP5521_MODE_B_M 0x03
+#define LP5521_LOAD_R 0x10
+#define LP5521_LOAD_G 0x04
+#define LP5521_LOAD_B 0x01
-struct lp5521_led {
- int id;
- u8 chan_nr;
- u8 led_current;
- u8 max_current;
- struct led_classdev cdev;
- struct work_struct brightness_work;
- u8 brightness;
-};
+#define LP5521_R_IS_LOADING(mode) \
+ ((mode & LP5521_MODE_R_M) == LP5521_LOAD_R)
+#define LP5521_G_IS_LOADING(mode) \
+ ((mode & LP5521_MODE_G_M) == LP5521_LOAD_G)
+#define LP5521_B_IS_LOADING(mode) \
+ ((mode & LP5521_MODE_B_M) == LP5521_LOAD_B)
-struct lp5521_chip {
- struct lp5521_platform_data *pdata;
- struct mutex lock; /* Serialize control */
- struct i2c_client *client;
- struct lp5521_engine engines[LP5521_MAX_ENGINES];
- struct lp5521_led leds[LP5521_MAX_LEDS];
- u8 num_channels;
- u8 num_leds;
-};
+#define LP5521_EXEC_R_M 0x30 /* Enable Register */
+#define LP5521_EXEC_G_M 0x0C
+#define LP5521_EXEC_B_M 0x03
+#define LP5521_EXEC_M 0x3F
+#define LP5521_RUN_R 0x20
+#define LP5521_RUN_G 0x08
+#define LP5521_RUN_B 0x02
-static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev)
+static inline void lp5521_wait_opmode_done(void)
{
- return container_of(cdev, struct lp5521_led, cdev);
+ /* operation mode change needs to be longer than 153 us */
+ usleep_range(200, 300);
}
-static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine)
+static inline void lp5521_wait_enable_done(void)
{
- return container_of(engine, struct lp5521_chip,
- engines[engine->id - 1]);
+ /* it takes more 488 us to update ENABLE register */
+ usleep_range(500, 600);
}
-static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led)
+static void lp5521_set_led_current(struct lp55xx_led *led, u8 led_current)
{
- return container_of(led, struct lp5521_chip,
- leds[led->id]);
+ led->led_current = led_current;
+ lp55xx_write(led->chip, LP5521_REG_LED_CURRENT_BASE + led->chan_nr,
+ led_current);
}
-static void lp5521_led_brightness_work(struct work_struct *work);
-
-static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
+static void lp5521_load_engine(struct lp55xx_chip *chip)
{
- return i2c_smbus_write_byte_data(client, reg, value);
-}
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5521_MODE_R_M,
+ [LP55XX_ENGINE_2] = LP5521_MODE_G_M,
+ [LP55XX_ENGINE_3] = LP5521_MODE_B_M,
+ };
-static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
-{
- s32 ret;
+ u8 val[] = {
+ [LP55XX_ENGINE_1] = LP5521_LOAD_R,
+ [LP55XX_ENGINE_2] = LP5521_LOAD_G,
+ [LP55XX_ENGINE_3] = LP5521_LOAD_B,
+ };
- ret = i2c_smbus_read_byte_data(client, reg);
- if (ret < 0)
- return ret;
+ lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], val[idx]);
- *buf = ret;
- return 0;
+ lp5521_wait_opmode_done();
}
-static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
+static void lp5521_stop_engine(struct lp55xx_chip *chip)
{
- struct lp5521_chip *chip = engine_to_lp5521(engine);
- struct i2c_client *client = chip->client;
- int ret;
- u8 engine_state;
-
- /* Only transition between RUN and DIRECT mode are handled here */
- if (mode == LP5521_CMD_LOAD)
- return 0;
-
- if (mode == LP5521_CMD_DISABLED)
- mode = LP5521_CMD_DIRECT;
-
- ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
- if (ret < 0)
- return ret;
-
- /* set mode only for this engine */
- engine_state &= ~(engine->engine_mask);
- mode &= engine->engine_mask;
- engine_state |= mode;
- return lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
+ lp55xx_write(chip, LP5521_REG_OP_MODE, 0);
+ lp5521_wait_opmode_done();
}
-static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+static void lp5521_run_engine(struct lp55xx_chip *chip, bool start)
{
- struct lp5521_chip *chip = engine_to_lp5521(eng);
- struct i2c_client *client = chip->client;
int ret;
- int addr;
u8 mode;
+ u8 exec;
- /* move current engine to direct mode and remember the state */
- ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
- if (ret)
- return ret;
-
- /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
- usleep_range(1000, 2000);
- ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
- if (ret)
- return ret;
-
- /* For loading, all the engines to load mode */
- lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
- /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
- usleep_range(1000, 2000);
- lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
- /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
- usleep_range(1000, 2000);
-
- addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
- i2c_smbus_write_i2c_block_data(client,
- addr,
- LP5521_PROG_MEM_SIZE,
- pattern);
-
- return lp5521_write(client, LP5521_REG_OP_MODE, mode);
-}
-
-static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
-{
- return lp5521_write(chip->client,
- LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
- curr);
-}
-
-static void lp5521_init_engine(struct lp5521_chip *chip)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
- chip->engines[i].id = i + 1;
- chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
- chip->engines[i].prog_page = i;
+ /* stop engine */
+ if (!start) {
+ lp5521_stop_engine(chip);
+ lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+ lp5521_wait_opmode_done();
+ return;
}
-}
-
-static int lp5521_configure(struct i2c_client *client)
-{
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- int ret;
- u8 cfg;
-
- lp5521_init_engine(chip);
-
- /* Set all PWMs to direct control mode */
- ret = lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
-
- cfg = chip->pdata->update_config ?
- : (LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
- ret |= lp5521_write(client, LP5521_REG_CONFIG, cfg);
-
- /* Initialize all channels PWM to zero -> leds off */
- ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
- ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
- ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
-
- /* Set engines are set to run state when OP_MODE enables engines */
- ret |= lp5521_write(client, LP5521_REG_ENABLE,
- LP5521_ENABLE_RUN_PROGRAM);
- /* enable takes 500us. 1 - 2 ms leaves some margin */
- usleep_range(1000, 2000);
-
- return ret;
-}
-
-static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
-{
- int ret;
- u8 status;
-
- ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
- if (ret < 0)
- return ret;
-
- /* Check that ext clock is really in use if requested */
- if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
- if ((status & LP5521_EXT_CLK_USED) == 0)
- return -EIO;
- return 0;
-}
-
-static void lp5521_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- struct lp5521_led *led = cdev_to_led(cdev);
- led->brightness = (u8)brightness;
- schedule_work(&led->brightness_work);
-}
-
-static void lp5521_led_brightness_work(struct work_struct *work)
-{
- struct lp5521_led *led = container_of(work,
- struct lp5521_led,
- brightness_work);
- struct lp5521_chip *chip = led_to_lp5521(led);
- struct i2c_client *client = chip->client;
- mutex_lock(&chip->lock);
- lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
- led->brightness);
- mutex_unlock(&chip->lock);
-}
-
-/* Detect the chip by setting its ENABLE register and reading it back. */
-static int lp5521_detect(struct i2c_client *client)
-{
- int ret;
- u8 buf;
+ /*
+ * To run the engine,
+ * operation mode and enable register should updated at the same time
+ */
- ret = lp5521_write(client, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT);
+ ret = lp55xx_read(chip, LP5521_REG_OP_MODE, &mode);
if (ret)
- return ret;
- /* enable takes 500us. 1 - 2 ms leaves some margin */
- usleep_range(1000, 2000);
- ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
- if (ret)
- return ret;
- if (buf != LP5521_ENABLE_DEFAULT)
- return -ENODEV;
+ return;
- return 0;
-}
+ ret = lp55xx_read(chip, LP5521_REG_ENABLE, &exec);
+ if (ret)
+ return;
-/* Set engine mode and create appropriate sysfs attributes, if required. */
-static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
-{
- int ret = 0;
+ /* change operation mode to RUN only when each engine is loading */
+ if (LP5521_R_IS_LOADING(mode)) {
+ mode = (mode & ~LP5521_MODE_R_M) | LP5521_RUN_R;
+ exec = (exec & ~LP5521_EXEC_R_M) | LP5521_RUN_R;
+ }
- /* if in that mode already do nothing, except for run */
- if (mode == engine->mode && mode != LP5521_CMD_RUN)
- return 0;
+ if (LP5521_G_IS_LOADING(mode)) {
+ mode = (mode & ~LP5521_MODE_G_M) | LP5521_RUN_G;
+ exec = (exec & ~LP5521_EXEC_G_M) | LP5521_RUN_G;
+ }
- if (mode == LP5521_CMD_RUN) {
- ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
- } else if (mode == LP5521_CMD_LOAD) {
- lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
- lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
- } else if (mode == LP5521_CMD_DISABLED) {
- lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+ if (LP5521_B_IS_LOADING(mode)) {
+ mode = (mode & ~LP5521_MODE_B_M) | LP5521_RUN_B;
+ exec = (exec & ~LP5521_EXEC_B_M) | LP5521_RUN_B;
}
- engine->mode = mode;
+ lp55xx_write(chip, LP5521_REG_OP_MODE, mode);
+ lp5521_wait_opmode_done();
- return ret;
+ lp55xx_update_bits(chip, LP5521_REG_ENABLE, LP5521_EXEC_M, exec);
+ lp5521_wait_enable_done();
}
-static int lp5521_do_store_load(struct lp5521_engine *engine,
- const char *buf, size_t len)
+static int lp5521_update_program_memory(struct lp55xx_chip *chip,
+ const u8 *data, size_t size)
{
- struct lp5521_chip *chip = engine_to_lp5521(engine);
- struct i2c_client *client = chip->client;
- int ret, nrchars, offset = 0, i = 0;
- char c[3];
- unsigned cmd;
+ enum lp55xx_engine_index idx = chip->engine_idx;
u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
+ u8 addr[] = {
+ [LP55XX_ENGINE_1] = LP5521_REG_R_PROG_MEM,
+ [LP55XX_ENGINE_2] = LP5521_REG_G_PROG_MEM,
+ [LP55XX_ENGINE_3] = LP5521_REG_B_PROG_MEM,
+ };
+ unsigned cmd;
+ char c[3];
+ int program_size;
+ int nrchars;
+ int offset = 0;
+ int ret;
+ int i;
- while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
+ /* clear program memory before updating */
+ for (i = 0; i < LP5521_PROGRAM_LENGTH; i++)
+ lp55xx_write(chip, addr[idx] + i, 0);
+
+ i = 0;
+ while ((offset < size - 1) && (i < LP5521_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
- ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
- if (ret != 2)
- goto fail;
+ ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
+ if (ret != 1)
+ goto err;
+
ret = sscanf(c, "%2x", &cmd);
if (ret != 1)
- goto fail;
- pattern[i] = (u8)cmd;
+ goto err;
+ pattern[i] = (u8)cmd;
offset += nrchars;
i++;
}
/* Each instruction is 16bit long. Check that length is even */
if (i % 2)
- goto fail;
+ goto err;
- mutex_lock(&chip->lock);
- if (engine->mode == LP5521_CMD_LOAD)
- ret = lp5521_load_program(engine, pattern);
- else
- ret = -EINVAL;
- mutex_unlock(&chip->lock);
+ program_size = i;
+ for (i = 0; i < program_size; i++)
+ lp55xx_write(chip, addr[idx] + i, pattern[i]);
- if (ret) {
- dev_err(&client->dev, "failed loading pattern\n");
- return ret;
- }
+ return 0;
- return len;
-fail:
- dev_err(&client->dev, "wrong pattern format\n");
+err:
+ dev_err(&chip->cl->dev, "wrong pattern format\n");
return -EINVAL;
}
-static ssize_t store_engine_load(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
+static void lp5521_firmware_loaded(struct lp55xx_chip *chip)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
-}
+ const struct firmware *fw = chip->fw;
-#define store_load(nr) \
-static ssize_t store_engine##nr##_load(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_load(dev, attr, buf, len, nr); \
-}
-store_load(1)
-store_load(2)
-store_load(3)
-
-static ssize_t show_engine_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- switch (chip->engines[nr - 1].mode) {
- case LP5521_CMD_RUN:
- return sprintf(buf, "run\n");
- case LP5521_CMD_LOAD:
- return sprintf(buf, "load\n");
- case LP5521_CMD_DISABLED:
- return sprintf(buf, "disabled\n");
- default:
- return sprintf(buf, "disabled\n");
+ if (fw->size > LP5521_PROGRAM_LENGTH) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
}
-}
-#define show_mode(nr) \
-static ssize_t show_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_engine_mode(dev, attr, buf, nr); \
+ /*
+ * Program momery sequence
+ * 1) set engine mode to "LOAD"
+ * 2) write firmware data into program memory
+ */
+
+ lp5521_load_engine(chip);
+ lp5521_update_program_memory(chip, fw->data, fw->size);
}
-show_mode(1)
-show_mode(2)
-show_mode(3)
-static ssize_t store_engine_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
+static int lp5521_post_init_device(struct lp55xx_chip *chip)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- struct lp5521_engine *engine = &chip->engines[nr - 1];
- mutex_lock(&chip->lock);
+ int ret;
+ u8 val;
- if (!strncmp(buf, "run", 3))
- lp5521_set_mode(engine, LP5521_CMD_RUN);
- else if (!strncmp(buf, "load", 4))
- lp5521_set_mode(engine, LP5521_CMD_LOAD);
- else if (!strncmp(buf, "disabled", 8))
- lp5521_set_mode(engine, LP5521_CMD_DISABLED);
+ /*
+ * Make sure that the chip is reset by reading back the r channel
+ * current reg. This is dummy read is required on some platforms -
+ * otherwise further access to the R G B channels in the
+ * LP5521_REG_ENABLE register will not have any effect - strange!
+ */
+ ret = lp55xx_read(chip, LP5521_REG_R_CURRENT, &val);
+ if (ret) {
+ dev_err(&chip->cl->dev, "error in resetting chip\n");
+ return ret;
+ }
+ if (val != LP5521_REG_R_CURR_DEFAULT) {
+ dev_err(&chip->cl->dev,
+ "unexpected data in register (expected 0x%x got 0x%x)\n",
+ LP5521_REG_R_CURR_DEFAULT, val);
+ ret = -EINVAL;
+ return ret;
+ }
+ usleep_range(10000, 20000);
- mutex_unlock(&chip->lock);
- return len;
-}
+ /* Set all PWMs to direct control mode */
+ ret = lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
-#define store_mode(nr) \
-static ssize_t store_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_mode(dev, attr, buf, len, nr); \
-}
-store_mode(1)
-store_mode(2)
-store_mode(3)
+ val = chip->pdata->update_config ?
+ : (LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
+ ret = lp55xx_write(chip, LP5521_REG_CONFIG, val);
+ if (ret)
+ return ret;
-static ssize_t show_max_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5521_led *led = cdev_to_led(led_cdev);
+ /* Initialize all channels PWM to zero -> leds off */
+ lp55xx_write(chip, LP5521_REG_R_PWM, 0);
+ lp55xx_write(chip, LP5521_REG_G_PWM, 0);
+ lp55xx_write(chip, LP5521_REG_B_PWM, 0);
- return sprintf(buf, "%d\n", led->max_current);
-}
+ /* Set engines are set to run state when OP_MODE enables engines */
+ ret = lp55xx_write(chip, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM);
+ if (ret)
+ return ret;
-static ssize_t show_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5521_led *led = cdev_to_led(led_cdev);
+ lp5521_wait_enable_done();
- return sprintf(buf, "%d\n", led->led_current);
+ return 0;
}
-static ssize_t store_current(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int lp5521_run_selftest(struct lp55xx_chip *chip, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5521_led *led = cdev_to_led(led_cdev);
- struct lp5521_chip *chip = led_to_lp5521(led);
- ssize_t ret;
- unsigned long curr;
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ int ret;
+ u8 status;
- if (kstrtoul(buf, 0, &curr))
- return -EINVAL;
+ ret = lp55xx_read(chip, LP5521_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
- if (curr > led->max_current)
- return -EINVAL;
+ if (pdata->clock_mode != LP55XX_CLOCK_EXT)
+ return 0;
- mutex_lock(&chip->lock);
- ret = lp5521_set_led_current(chip, led->id, curr);
- mutex_unlock(&chip->lock);
+ /* Check that ext clock is really in use if requested */
+ if ((status & LP5521_EXT_CLK_USED) == 0)
+ return -EIO;
- if (ret < 0)
- return ret;
+ return 0;
+}
- led->led_current = (u8)curr;
+static void lp5521_led_brightness_work(struct work_struct *work)
+{
+ struct lp55xx_led *led = container_of(work, struct lp55xx_led,
+ brightness_work);
+ struct lp55xx_chip *chip = led->chip;
- return len;
+ mutex_lock(&chip->lock);
+ lp55xx_write(chip, LP5521_REG_LED_PWM_BASE + led->chan_nr,
+ led->brightness);
+ mutex_unlock(&chip->lock);
}
static ssize_t lp5521_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5521_chip *chip = i2c_get_clientdata(client);
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
int ret;
mutex_lock(&chip->lock);
@@ -534,133 +363,11 @@ static ssize_t lp5521_selftest(struct device *dev,
return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
}
-static void lp5521_clear_program_memory(struct i2c_client *cl)
-{
- int i;
- u8 rgb_mem[] = {
- LP5521_REG_R_PROG_MEM,
- LP5521_REG_G_PROG_MEM,
- LP5521_REG_B_PROG_MEM,
- };
-
- for (i = 0; i < ARRAY_SIZE(rgb_mem); i++) {
- lp5521_write(cl, rgb_mem[i], 0);
- lp5521_write(cl, rgb_mem[i] + 1, 0);
- }
-}
-
-static void lp5521_write_program_memory(struct i2c_client *cl,
- u8 base, u8 *rgb, int size)
-{
- int i;
-
- if (!rgb || size <= 0)
- return;
-
- for (i = 0; i < size; i++)
- lp5521_write(cl, base + i, *(rgb + i));
-
- lp5521_write(cl, base + i, 0);
- lp5521_write(cl, base + i + 1, 0);
-}
-
-static inline struct lp5521_led_pattern *lp5521_get_pattern
- (struct lp5521_chip *chip, u8 offset)
-{
- struct lp5521_led_pattern *ptn;
- ptn = chip->pdata->patterns + (offset - 1);
- return ptn;
-}
-
-static void lp5521_run_led_pattern(int mode, struct lp5521_chip *chip)
-{
- struct lp5521_led_pattern *ptn;
- struct i2c_client *cl = chip->client;
- int num_patterns = chip->pdata->num_patterns;
-
- if (mode > num_patterns || !(chip->pdata->patterns))
- return;
-
- if (mode == PATTERN_OFF) {
- lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT);
- usleep_range(1000, 2000);
- lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
- } else {
- ptn = lp5521_get_pattern(chip, mode);
- if (!ptn)
- return;
-
- lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
- usleep_range(1000, 2000);
-
- lp5521_clear_program_memory(cl);
-
- lp5521_write_program_memory(cl, LP5521_REG_R_PROG_MEM,
- ptn->r, ptn->size_r);
- lp5521_write_program_memory(cl, LP5521_REG_G_PROG_MEM,
- ptn->g, ptn->size_g);
- lp5521_write_program_memory(cl, LP5521_REG_B_PROG_MEM,
- ptn->b, ptn->size_b);
-
- lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_RUN);
- usleep_range(1000, 2000);
- lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM);
- }
-}
-
-static ssize_t store_led_pattern(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- lp5521_run_led_pattern(val, chip);
-
- return len;
-}
-
-/* led class device attributes */
-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
-static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
-
-static struct attribute *lp5521_led_attributes[] = {
- &dev_attr_led_current.attr,
- &dev_attr_max_current.attr,
- NULL,
-};
-
-static struct attribute_group lp5521_led_attribute_group = {
- .attrs = lp5521_led_attributes
-};
-
/* device attributes */
-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
- show_engine1_mode, store_engine1_mode);
-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
- show_engine2_mode, store_engine2_mode);
-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
- show_engine3_mode, store_engine3_mode);
-static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
-static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
-static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
-static DEVICE_ATTR(led_pattern, S_IWUSR, NULL, store_led_pattern);
static struct attribute *lp5521_attributes[] = {
- &dev_attr_engine1_mode.attr,
- &dev_attr_engine2_mode.attr,
- &dev_attr_engine3_mode.attr,
&dev_attr_selftest.attr,
- &dev_attr_engine1_load.attr,
- &dev_attr_engine2_load.attr,
- &dev_attr_engine3_load.attr,
- &dev_attr_led_pattern.attr,
NULL
};
@@ -668,217 +375,91 @@ static const struct attribute_group lp5521_group = {
.attrs = lp5521_attributes,
};
-static int lp5521_register_sysfs(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- return sysfs_create_group(&dev->kobj, &lp5521_group);
-}
-
-static void lp5521_unregister_sysfs(struct i2c_client *client)
-{
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
- int i;
-
- sysfs_remove_group(&dev->kobj, &lp5521_group);
-
- for (i = 0; i < chip->num_leds; i++)
- sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
- &lp5521_led_attribute_group);
-}
-
-static int lp5521_init_led(struct lp5521_led *led,
- struct i2c_client *client,
- int chan, struct lp5521_platform_data *pdata)
-{
- struct device *dev = &client->dev;
- char name[32];
- int res;
-
- if (chan >= LP5521_MAX_LEDS)
- return -EINVAL;
-
- if (pdata->led_config[chan].led_current == 0)
- return 0;
-
- led->led_current = pdata->led_config[chan].led_current;
- led->max_current = pdata->led_config[chan].max_current;
- led->chan_nr = pdata->led_config[chan].chan_nr;
-
- if (led->chan_nr >= LP5521_MAX_LEDS) {
- dev_err(dev, "Use channel numbers between 0 and %d\n",
- LP5521_MAX_LEDS - 1);
- return -EINVAL;
- }
-
- led->cdev.brightness_set = lp5521_set_brightness;
- if (pdata->led_config[chan].name) {
- led->cdev.name = pdata->led_config[chan].name;
- } else {
- snprintf(name, sizeof(name), "%s:channel%d",
- pdata->label ?: client->name, chan);
- led->cdev.name = name;
- }
-
- res = led_classdev_register(dev, &led->cdev);
- if (res < 0) {
- dev_err(dev, "couldn't register led on channel %d\n", chan);
- return res;
- }
-
- res = sysfs_create_group(&led->cdev.dev->kobj,
- &lp5521_led_attribute_group);
- if (res < 0) {
- dev_err(dev, "couldn't register current attribute\n");
- led_classdev_unregister(&led->cdev);
- return res;
- }
- return 0;
-}
+/* Chip specific configurations */
+static struct lp55xx_device_config lp5521_cfg = {
+ .reset = {
+ .addr = LP5521_REG_RESET,
+ .val = LP5521_RESET,
+ },
+ .enable = {
+ .addr = LP5521_REG_ENABLE,
+ .val = LP5521_ENABLE_DEFAULT,
+ },
+ .max_channel = LP5521_MAX_LEDS,
+ .post_init_device = lp5521_post_init_device,
+ .brightness_work_fn = lp5521_led_brightness_work,
+ .set_led_current = lp5521_set_led_current,
+ .firmware_cb = lp5521_firmware_loaded,
+ .run_engine = lp5521_run_engine,
+ .dev_attr_group = &lp5521_group,
+};
static int lp5521_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lp5521_chip *chip;
- struct lp5521_platform_data *pdata;
- int ret, i, led;
- u8 buf;
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- i2c_set_clientdata(client, chip);
- chip->client = client;
-
- pdata = client->dev.platform_data;
+ int ret;
+ struct lp55xx_chip *chip;
+ struct lp55xx_led *led;
+ struct lp55xx_platform_data *pdata = client->dev.platform_data;
if (!pdata) {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- mutex_init(&chip->lock);
-
- chip->pdata = pdata;
-
- if (pdata->setup_resources) {
- ret = pdata->setup_resources();
- if (ret < 0)
- return ret;
- }
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
- if (pdata->enable) {
- pdata->enable(0);
- usleep_range(1000, 2000); /* Keep enable down at least 1ms */
- pdata->enable(1);
- usleep_range(1000, 2000); /* 500us abs min. */
- }
+ led = devm_kzalloc(&client->dev,
+ sizeof(*led) * pdata->num_channels, GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
- lp5521_write(client, LP5521_REG_RESET, 0xff);
- usleep_range(10000, 20000); /*
- * Exact value is not available. 10 - 20ms
- * appears to be enough for reset.
- */
+ chip->cl = client;
+ chip->pdata = pdata;
+ chip->cfg = &lp5521_cfg;
- /*
- * Make sure that the chip is reset by reading back the r channel
- * current reg. This is dummy read is required on some platforms -
- * otherwise further access to the R G B channels in the
- * LP5521_REG_ENABLE register will not have any effect - strange!
- */
- ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (ret) {
- dev_err(&client->dev, "error in resetting chip\n");
- goto fail2;
- }
- if (buf != LP5521_REG_R_CURR_DEFAULT) {
- dev_err(&client->dev,
- "unexpected data in register (expected 0x%x got 0x%x)\n",
- LP5521_REG_R_CURR_DEFAULT, buf);
- ret = -EINVAL;
- goto fail2;
- }
- usleep_range(10000, 20000);
+ mutex_init(&chip->lock);
- ret = lp5521_detect(client);
+ i2c_set_clientdata(client, led);
- if (ret) {
- dev_err(&client->dev, "Chip not found\n");
- goto fail2;
- }
+ ret = lp55xx_init_device(chip);
+ if (ret)
+ goto err_init;
dev_info(&client->dev, "%s programmable led chip found\n", id->name);
- ret = lp5521_configure(client);
- if (ret < 0) {
- dev_err(&client->dev, "error configuring chip\n");
- goto fail1;
- }
-
- /* Initialize leds */
- chip->num_channels = pdata->num_channels;
- chip->num_leds = 0;
- led = 0;
- for (i = 0; i < pdata->num_channels; i++) {
- /* Do not initialize channels that are not connected */
- if (pdata->led_config[i].led_current == 0)
- continue;
-
- ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
- if (ret) {
- dev_err(&client->dev, "error initializing leds\n");
- goto fail2;
- }
- chip->num_leds++;
-
- chip->leds[led].id = led;
- /* Set initial LED current */
- lp5521_set_led_current(chip, led,
- chip->leds[led].led_current);
-
- INIT_WORK(&(chip->leds[led].brightness_work),
- lp5521_led_brightness_work);
-
- led++;
- }
+ ret = lp55xx_register_leds(led, chip);
+ if (ret)
+ goto err_register_leds;
- ret = lp5521_register_sysfs(client);
+ ret = lp55xx_register_sysfs(chip);
if (ret) {
dev_err(&client->dev, "registering sysfs failed\n");
- goto fail2;
- }
- return ret;
-fail2:
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- cancel_work_sync(&chip->leds[i].brightness_work);
+ goto err_register_sysfs;
}
-fail1:
- if (pdata->enable)
- pdata->enable(0);
- if (pdata->release_resources)
- pdata->release_resources();
+
+ return 0;
+
+err_register_sysfs:
+ lp55xx_unregister_leds(led, chip);
+err_register_leds:
+ lp55xx_deinit_device(chip);
+err_init:
return ret;
}
static int lp5521_remove(struct i2c_client *client)
{
- struct lp5521_chip *chip = i2c_get_clientdata(client);
- int i;
+ struct lp55xx_led *led = i2c_get_clientdata(client);
+ struct lp55xx_chip *chip = led->chip;
- lp5521_run_led_pattern(PATTERN_OFF, chip);
- lp5521_unregister_sysfs(client);
-
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- cancel_work_sync(&chip->leds[i].brightness_work);
- }
+ lp5521_stop_engine(chip);
+ lp55xx_unregister_sysfs(chip);
+ lp55xx_unregister_leds(led, chip);
+ lp55xx_deinit_device(chip);
- if (chip->pdata->enable)
- chip->pdata->enable(0);
- if (chip->pdata->release_resources)
- chip->pdata->release_resources();
return 0;
}
@@ -900,5 +481,6 @@ static struct i2c_driver lp5521_driver = {
module_i2c_driver(lp5521_driver);
MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
+MODULE_AUTHOR("Milo Kim <milo.kim@ti.com>");
MODULE_DESCRIPTION("LP5521 LED engine");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 7f5be8948cde..229f734040af 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -2,8 +2,10 @@
* lp5523.c - LP5523 LED Driver
*
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2012 Texas Instruments
*
* Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ * Milo(Woogyom) Kim <milo.kim@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -20,502 +22,351 @@
* 02110-1301 USA
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/leds.h>
-#include <linux/leds-lp5523.h>
-#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
+#include "leds-lp55xx-common.h"
+
+#define LP5523_PROGRAM_LENGTH 32
+#define LP5523_MAX_LEDS 9
+
+/* Registers */
#define LP5523_REG_ENABLE 0x00
#define LP5523_REG_OP_MODE 0x01
-#define LP5523_REG_RATIOMETRIC_MSB 0x02
-#define LP5523_REG_RATIOMETRIC_LSB 0x03
#define LP5523_REG_ENABLE_LEDS_MSB 0x04
#define LP5523_REG_ENABLE_LEDS_LSB 0x05
-#define LP5523_REG_LED_CNTRL_BASE 0x06
#define LP5523_REG_LED_PWM_BASE 0x16
#define LP5523_REG_LED_CURRENT_BASE 0x26
#define LP5523_REG_CONFIG 0x36
-#define LP5523_REG_CHANNEL1_PC 0x37
-#define LP5523_REG_CHANNEL2_PC 0x38
-#define LP5523_REG_CHANNEL3_PC 0x39
-#define LP5523_REG_STATUS 0x3a
-#define LP5523_REG_GPO 0x3b
-#define LP5523_REG_VARIABLE 0x3c
-#define LP5523_REG_RESET 0x3d
-#define LP5523_REG_TEMP_CTRL 0x3e
-#define LP5523_REG_TEMP_READ 0x3f
-#define LP5523_REG_TEMP_WRITE 0x40
+#define LP5523_REG_STATUS 0x3A
+#define LP5523_REG_RESET 0x3D
#define LP5523_REG_LED_TEST_CTRL 0x41
#define LP5523_REG_LED_TEST_ADC 0x42
-#define LP5523_REG_ENG1_VARIABLE 0x45
-#define LP5523_REG_ENG2_VARIABLE 0x46
-#define LP5523_REG_ENG3_VARIABLE 0x47
-#define LP5523_REG_MASTER_FADER1 0x48
-#define LP5523_REG_MASTER_FADER2 0x49
-#define LP5523_REG_MASTER_FADER3 0x4a
-#define LP5523_REG_CH1_PROG_START 0x4c
-#define LP5523_REG_CH2_PROG_START 0x4d
-#define LP5523_REG_CH3_PROG_START 0x4e
-#define LP5523_REG_PROG_PAGE_SEL 0x4f
+#define LP5523_REG_PROG_PAGE_SEL 0x4F
#define LP5523_REG_PROG_MEM 0x50
-#define LP5523_CMD_LOAD 0x15 /* 00010101 */
-#define LP5523_CMD_RUN 0x2a /* 00101010 */
-#define LP5523_CMD_DISABLED 0x00 /* 00000000 */
-
+/* Bit description in registers */
#define LP5523_ENABLE 0x40
#define LP5523_AUTO_INC 0x40
#define LP5523_PWR_SAVE 0x20
#define LP5523_PWM_PWR_SAVE 0x04
-#define LP5523_CP_1 0x08
-#define LP5523_CP_1_5 0x10
#define LP5523_CP_AUTO 0x18
-#define LP5523_INT_CLK 0x01
#define LP5523_AUTO_CLK 0x02
+
#define LP5523_EN_LEDTEST 0x80
#define LP5523_LEDTEST_DONE 0x80
-
-#define LP5523_DEFAULT_CURRENT 50 /* microAmps */
-#define LP5523_PROGRAM_LENGTH 32 /* in bytes */
-#define LP5523_PROGRAM_PAGES 6
+#define LP5523_RESET 0xFF
#define LP5523_ADC_SHORTCIRC_LIM 80
-
-#define LP5523_LEDS 9
-#define LP5523_ENGINES 3
-
-#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */
-
-#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */
-
-#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING
-
#define LP5523_EXT_CLK_USED 0x08
-#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
-#define SHIFT_MASK(id) (((id) - 1) * 2)
+/* Memory Page Selection */
+#define LP5523_PAGE_ENG1 0
+#define LP5523_PAGE_ENG2 1
+#define LP5523_PAGE_ENG3 2
+
+/* Program Memory Operations */
+#define LP5523_MODE_ENG1_M 0x30 /* Operation Mode Register */
+#define LP5523_MODE_ENG2_M 0x0C
+#define LP5523_MODE_ENG3_M 0x03
+#define LP5523_LOAD_ENG1 0x10
+#define LP5523_LOAD_ENG2 0x04
+#define LP5523_LOAD_ENG3 0x01
+
+#define LP5523_ENG1_IS_LOADING(mode) \
+ ((mode & LP5523_MODE_ENG1_M) == LP5523_LOAD_ENG1)
+#define LP5523_ENG2_IS_LOADING(mode) \
+ ((mode & LP5523_MODE_ENG2_M) == LP5523_LOAD_ENG2)
+#define LP5523_ENG3_IS_LOADING(mode) \
+ ((mode & LP5523_MODE_ENG3_M) == LP5523_LOAD_ENG3)
+
+#define LP5523_EXEC_ENG1_M 0x30 /* Enable Register */
+#define LP5523_EXEC_ENG2_M 0x0C
+#define LP5523_EXEC_ENG3_M 0x03
+#define LP5523_EXEC_M 0x3F
+#define LP5523_RUN_ENG1 0x20
+#define LP5523_RUN_ENG2 0x08
+#define LP5523_RUN_ENG3 0x02
enum lp5523_chip_id {
LP5523,
LP55231,
};
-struct lp5523_engine {
- int id;
- u8 mode;
- u8 prog_page;
- u8 mux_page;
- u16 led_mux;
- u8 engine_mask;
-};
-
-struct lp5523_led {
- int id;
- u8 chan_nr;
- u8 led_current;
- u8 max_current;
- struct led_classdev cdev;
- struct work_struct brightness_work;
- u8 brightness;
-};
-
-struct lp5523_chip {
- struct mutex lock; /* Serialize control */
- struct i2c_client *client;
- struct lp5523_engine engines[LP5523_ENGINES];
- struct lp5523_led leds[LP5523_LEDS];
- struct lp5523_platform_data *pdata;
- u8 num_channels;
- u8 num_leds;
-};
-
-static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev)
+static inline void lp5523_wait_opmode_done(void)
{
- return container_of(cdev, struct lp5523_led, cdev);
-}
-
-static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
-{
- return container_of(engine, struct lp5523_chip,
- engines[engine->id - 1]);
-}
-
-static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
-{
- return container_of(led, struct lp5523_chip,
- leds[led->id]);
+ usleep_range(1000, 2000);
}
-static void lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
-static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
-static int lp5523_load_program(struct lp5523_engine *engine, const u8 *pattern);
-
-static void lp5523_led_brightness_work(struct work_struct *work);
-
-static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
+static void lp5523_set_led_current(struct lp55xx_led *led, u8 led_current)
{
- return i2c_smbus_write_byte_data(client, reg, value);
+ led->led_current = led_current;
+ lp55xx_write(led->chip, LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
+ led_current);
}
-static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
-{
- s32 ret = i2c_smbus_read_byte_data(client, reg);
-
- if (ret < 0)
- return ret;
-
- *buf = ret;
- return 0;
-}
-
-static int lp5523_detect(struct i2c_client *client)
+static int lp5523_post_init_device(struct lp55xx_chip *chip)
{
int ret;
- u8 buf;
- ret = lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
- if (ret)
- return ret;
- ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
+ ret = lp55xx_write(chip, LP5523_REG_ENABLE, LP5523_ENABLE);
if (ret)
return ret;
- if (buf == 0x40)
- return 0;
- else
- return -ENODEV;
-}
-static int lp5523_configure(struct i2c_client *client)
-{
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- int ret = 0;
- u8 status;
-
- /* one pattern per engine setting led mux start and stop addresses */
- static const u8 pattern[][LP5523_PROGRAM_LENGTH] = {
- { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
- { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
- { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
- };
-
- ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
/* Chip startup time is 500 us, 1 - 2 ms gives some margin */
usleep_range(1000, 2000);
- ret |= lp5523_write(client, LP5523_REG_CONFIG,
+ ret = lp55xx_write(chip, LP5523_REG_CONFIG,
LP5523_AUTO_INC | LP5523_PWR_SAVE |
LP5523_CP_AUTO | LP5523_AUTO_CLK |
LP5523_PWM_PWR_SAVE);
+ if (ret)
+ return ret;
/* turn on all leds */
- ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
- ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
-
- /* hardcode 32 bytes of memory for each engine from program memory */
- ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
- ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
- ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
-
- /* write led mux address space for each channel */
- ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
- ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
- ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
-
- if (ret) {
- dev_err(&client->dev, "could not load mux programs\n");
- return -1;
- }
-
- /* set all engines exec state and mode to run 00101010 */
- ret |= lp5523_write(client, LP5523_REG_ENABLE,
- (LP5523_CMD_RUN | LP5523_ENABLE));
-
- ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
-
- if (ret) {
- dev_err(&client->dev, "could not start mux programs\n");
- return -1;
- }
-
- /* Let the programs run for couple of ms and check the engine status */
- usleep_range(3000, 6000);
- ret = lp5523_read(client, LP5523_REG_STATUS, &status);
- if (ret < 0)
+ ret = lp55xx_write(chip, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
+ if (ret)
return ret;
- status &= LP5523_ENG_STATUS_MASK;
-
- if (status == LP5523_ENG_STATUS_MASK) {
- dev_dbg(&client->dev, "all engines configured\n");
- } else {
- dev_info(&client->dev, "status == %x\n", status);
- dev_err(&client->dev, "cound not configure LED engine\n");
- return -1;
- }
-
- dev_info(&client->dev, "disabling engines\n");
-
- ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
-
- return ret;
+ return lp55xx_write(chip, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
}
-static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
+static void lp5523_load_engine(struct lp55xx_chip *chip)
{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret;
- u8 engine_state;
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
+ [LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
+ [LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
+ };
- ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
- if (ret)
- goto fail;
+ u8 val[] = {
+ [LP55XX_ENGINE_1] = LP5523_LOAD_ENG1,
+ [LP55XX_ENGINE_2] = LP5523_LOAD_ENG2,
+ [LP55XX_ENGINE_3] = LP5523_LOAD_ENG3,
+ };
- engine_state &= ~(engine->engine_mask);
+ u8 page_sel[] = {
+ [LP55XX_ENGINE_1] = LP5523_PAGE_ENG1,
+ [LP55XX_ENGINE_2] = LP5523_PAGE_ENG2,
+ [LP55XX_ENGINE_3] = LP5523_PAGE_ENG3,
+ };
- /* set mode only for this engine */
- mode &= engine->engine_mask;
+ lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], val[idx]);
- engine_state |= mode;
+ lp5523_wait_opmode_done();
- ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
-fail:
- return ret;
+ lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, page_sel[idx]);
}
-static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
+static void lp5523_stop_engine(struct lp55xx_chip *chip)
{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret = 0;
-
- ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+ lp55xx_write(chip, LP5523_REG_OP_MODE, 0);
+ lp5523_wait_opmode_done();
+}
- ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
- ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
- (u8)(mux >> 8));
- ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
- engine->led_mux = mux;
+static void lp5523_turn_off_channels(struct lp55xx_chip *chip)
+{
+ int i;
- return ret;
+ for (i = 0; i < LP5523_MAX_LEDS; i++)
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0);
}
-static int lp5523_load_program(struct lp5523_engine *engine, const u8 *pattern)
+static void lp5523_run_engine(struct lp55xx_chip *chip, bool start)
{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
+ int ret;
+ u8 mode;
+ u8 exec;
- int ret = 0;
+ /* stop engine */
+ if (!start) {
+ lp5523_stop_engine(chip);
+ lp5523_turn_off_channels(chip);
+ return;
+ }
- ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+ /*
+ * To run the engine,
+ * operation mode and enable register should updated at the same time
+ */
- ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
- engine->prog_page);
- ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
- LP5523_PROGRAM_LENGTH, pattern);
+ ret = lp55xx_read(chip, LP5523_REG_OP_MODE, &mode);
+ if (ret)
+ return;
- return ret;
-}
+ ret = lp55xx_read(chip, LP5523_REG_ENABLE, &exec);
+ if (ret)
+ return;
-static int lp5523_run_program(struct lp5523_engine *engine)
-{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret;
+ /* change operation mode to RUN only when each engine is loading */
+ if (LP5523_ENG1_IS_LOADING(mode)) {
+ mode = (mode & ~LP5523_MODE_ENG1_M) | LP5523_RUN_ENG1;
+ exec = (exec & ~LP5523_EXEC_ENG1_M) | LP5523_RUN_ENG1;
+ }
- ret = lp5523_write(client, LP5523_REG_ENABLE,
- LP5523_CMD_RUN | LP5523_ENABLE);
- if (ret)
- goto fail;
+ if (LP5523_ENG2_IS_LOADING(mode)) {
+ mode = (mode & ~LP5523_MODE_ENG2_M) | LP5523_RUN_ENG2;
+ exec = (exec & ~LP5523_EXEC_ENG2_M) | LP5523_RUN_ENG2;
+ }
- ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
-fail:
- return ret;
+ if (LP5523_ENG3_IS_LOADING(mode)) {
+ mode = (mode & ~LP5523_MODE_ENG3_M) | LP5523_RUN_ENG3;
+ exec = (exec & ~LP5523_EXEC_ENG3_M) | LP5523_RUN_ENG3;
+ }
+
+ lp55xx_write(chip, LP5523_REG_OP_MODE, mode);
+ lp5523_wait_opmode_done();
+
+ lp55xx_update_bits(chip, LP5523_REG_ENABLE, LP5523_EXEC_M, exec);
}
-static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
+static int lp5523_update_program_memory(struct lp55xx_chip *chip,
+ const u8 *data, size_t size)
{
+ u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
+ unsigned cmd;
+ char c[3];
+ int update_size;
+ int nrchars;
+ int offset = 0;
+ int ret;
int i;
- u16 tmp_mux = 0;
-
- len = min_t(int, len, LP5523_LEDS);
- for (i = 0; i < len; i++) {
- switch (buf[i]) {
- case '1':
- tmp_mux |= (1 << i);
- break;
- case '0':
- break;
- case '\n':
- i = len;
- break;
- default:
- return -1;
- }
- }
- *mux = tmp_mux;
- return 0;
-}
+ /* clear program memory before updating */
+ for (i = 0; i < LP5523_PROGRAM_LENGTH; i++)
+ lp55xx_write(chip, LP5523_REG_PROG_MEM + i, 0);
-static void lp5523_mux_to_array(u16 led_mux, char *array)
-{
- int i, pos = 0;
- for (i = 0; i < LP5523_LEDS; i++)
- pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
+ i = 0;
+ while ((offset < size - 1) && (i < LP5523_PROGRAM_LENGTH)) {
+ /* separate sscanfs because length is working only for %s */
+ ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
+ if (ret != 1)
+ goto err;
- array[pos] = '\0';
-}
+ ret = sscanf(c, "%2x", &cmd);
+ if (ret != 1)
+ goto err;
-/*--------------------------------------------------------------*/
-/* Sysfs interface */
-/*--------------------------------------------------------------*/
+ pattern[i] = (u8)cmd;
+ offset += nrchars;
+ i++;
+ }
-static ssize_t show_engine_leds(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- char mux[LP5523_LEDS + 1];
+ /* Each instruction is 16bit long. Check that length is even */
+ if (i % 2)
+ goto err;
- lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
+ update_size = i;
+ for (i = 0; i < update_size; i++)
+ lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
- return sprintf(buf, "%s\n", mux);
-}
+ return 0;
-#define show_leds(nr) \
-static ssize_t show_engine##nr##_leds(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_engine_leds(dev, attr, buf, nr); \
+err:
+ dev_err(&chip->cl->dev, "wrong pattern format\n");
+ return -EINVAL;
}
-show_leds(1)
-show_leds(2)
-show_leds(3)
-static ssize_t store_engine_leds(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
+static void lp5523_firmware_loaded(struct lp55xx_chip *chip)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- u16 mux = 0;
- ssize_t ret;
+ const struct firmware *fw = chip->fw;
- if (lp5523_mux_parse(buf, &mux, len))
- return -EINVAL;
-
- mutex_lock(&chip->lock);
- ret = -EINVAL;
- if (chip->engines[nr - 1].mode != LP5523_CMD_LOAD)
- goto leave;
+ if (fw->size > LP5523_PROGRAM_LENGTH) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
+ }
- if (lp5523_load_mux(&chip->engines[nr - 1], mux))
- goto leave;
+ /*
+ * Program momery sequence
+ * 1) set engine mode to "LOAD"
+ * 2) write firmware data into program memory
+ */
- ret = len;
-leave:
- mutex_unlock(&chip->lock);
- return ret;
+ lp5523_load_engine(chip);
+ lp5523_update_program_memory(chip, fw->data, fw->size);
}
-#define store_leds(nr) \
-static ssize_t store_engine##nr##_leds(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_leds(dev, attr, buf, len, nr); \
-}
-store_leds(1)
-store_leds(2)
-store_leds(3)
-
static ssize_t lp5523_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ struct lp55xx_platform_data *pdata = chip->pdata;
int i, ret, pos = 0;
- int led = 0;
u8 status, adc, vdd;
mutex_lock(&chip->lock);
- ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
goto fail;
/* Check that ext clock is really in use if requested */
- if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
+ if (pdata->clock_mode == LP55XX_CLOCK_EXT) {
if ((status & LP5523_EXT_CLK_USED) == 0)
goto fail;
+ }
/* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
- lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
- LP5523_EN_LEDTEST | 16);
+ lp55xx_write(chip, LP5523_REG_LED_TEST_CTRL, LP5523_EN_LEDTEST | 16);
usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
- ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
goto fail;
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000); /* Was not ready. Wait little bit */
- ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ ret = lp55xx_read(chip, LP5523_REG_LED_TEST_ADC, &vdd);
if (ret < 0)
goto fail;
vdd--; /* There may be some fluctuation in measurement */
- for (i = 0; i < LP5523_LEDS; i++) {
+ for (i = 0; i < LP5523_MAX_LEDS; i++) {
/* Skip non-existing channels */
- if (chip->pdata->led_config[i].led_current == 0)
+ if (pdata->led_config[i].led_current == 0)
continue;
/* Set default current */
- lp5523_write(chip->client,
- LP5523_REG_LED_CURRENT_BASE + i,
- chip->pdata->led_config[i].led_current);
+ lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + i,
+ pdata->led_config[i].led_current);
- lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0xff);
/* let current stabilize 2 - 4ms before measurements start */
usleep_range(2000, 4000);
- lp5523_write(chip->client,
- LP5523_REG_LED_TEST_CTRL,
+ lp55xx_write(chip, LP5523_REG_LED_TEST_CTRL,
LP5523_EN_LEDTEST | i);
/* ADC conversion time is 2.7 ms typically */
usleep_range(3000, 6000);
- ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret < 0)
goto fail;
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000);/* Was not ready. Wait. */
- ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+
+ ret = lp55xx_read(chip, LP5523_REG_LED_TEST_ADC, &adc);
if (ret < 0)
goto fail;
if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
pos += sprintf(buf + pos, "LED %d FAIL\n", i);
- lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + i, 0x00);
/* Restore current */
- lp5523_write(chip->client,
- LP5523_REG_LED_CURRENT_BASE + i,
- chip->leds[led].led_current);
+ lp55xx_write(chip, LP5523_REG_LED_CURRENT_BASE + i,
+ led->led_current);
led++;
}
if (pos == 0)
@@ -530,249 +381,22 @@ release_lock:
return pos;
}
-static void lp5523_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- struct lp5523_led *led = cdev_to_led(cdev);
-
- led->brightness = (u8)brightness;
-
- schedule_work(&led->brightness_work);
-}
-
static void lp5523_led_brightness_work(struct work_struct *work)
{
- struct lp5523_led *led = container_of(work,
- struct lp5523_led,
+ struct lp55xx_led *led = container_of(work, struct lp55xx_led,
brightness_work);
- struct lp5523_chip *chip = led_to_lp5523(led);
- struct i2c_client *client = chip->client;
+ struct lp55xx_chip *chip = led->chip;
mutex_lock(&chip->lock);
-
- lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
+ lp55xx_write(chip, LP5523_REG_LED_PWM_BASE + led->chan_nr,
led->brightness);
-
- mutex_unlock(&chip->lock);
-}
-
-static int lp5523_do_store_load(struct lp5523_engine *engine,
- const char *buf, size_t len)
-{
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- int ret, nrchars, offset = 0, i = 0;
- char c[3];
- unsigned cmd;
- u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
-
- if (engine->mode != LP5523_CMD_LOAD)
- return -EINVAL;
-
- while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
- /* separate sscanfs because length is working only for %s */
- ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
- ret = sscanf(c, "%2x", &cmd);
- if (ret != 1)
- goto fail;
- pattern[i] = (u8)cmd;
-
- offset += nrchars;
- i++;
- }
-
- /* Each instruction is 16bit long. Check that length is even */
- if (i % 2)
- goto fail;
-
- mutex_lock(&chip->lock);
- ret = lp5523_load_program(engine, pattern);
mutex_unlock(&chip->lock);
-
- if (ret) {
- dev_err(&client->dev, "failed loading pattern\n");
- return ret;
- }
-
- return len;
-fail:
- dev_err(&client->dev, "wrong pattern format\n");
- return -EINVAL;
-}
-
-static ssize_t store_engine_load(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
}
-#define store_load(nr) \
-static ssize_t store_engine##nr##_load(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_load(dev, attr, buf, len, nr); \
-}
-store_load(1)
-store_load(2)
-store_load(3)
-
-static ssize_t show_engine_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- switch (chip->engines[nr - 1].mode) {
- case LP5523_CMD_RUN:
- return sprintf(buf, "run\n");
- case LP5523_CMD_LOAD:
- return sprintf(buf, "load\n");
- case LP5523_CMD_DISABLED:
- return sprintf(buf, "disabled\n");
- default:
- return sprintf(buf, "disabled\n");
- }
-}
-
-#define show_mode(nr) \
-static ssize_t show_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return show_engine_mode(dev, attr, buf, nr); \
-}
-show_mode(1)
-show_mode(2)
-show_mode(3)
-
-static ssize_t store_engine_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len, int nr)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- struct lp5523_engine *engine = &chip->engines[nr - 1];
- mutex_lock(&chip->lock);
-
- if (!strncmp(buf, "run", 3))
- lp5523_set_mode(engine, LP5523_CMD_RUN);
- else if (!strncmp(buf, "load", 4))
- lp5523_set_mode(engine, LP5523_CMD_LOAD);
- else if (!strncmp(buf, "disabled", 8))
- lp5523_set_mode(engine, LP5523_CMD_DISABLED);
-
- mutex_unlock(&chip->lock);
- return len;
-}
-
-#define store_mode(nr) \
-static ssize_t store_engine##nr##_mode(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t len) \
-{ \
- return store_engine_mode(dev, attr, buf, len, nr); \
-}
-store_mode(1)
-store_mode(2)
-store_mode(3)
-
-static ssize_t show_max_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5523_led *led = cdev_to_led(led_cdev);
-
- return sprintf(buf, "%d\n", led->max_current);
-}
-
-static ssize_t show_current(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5523_led *led = cdev_to_led(led_cdev);
-
- return sprintf(buf, "%d\n", led->led_current);
-}
-
-static ssize_t store_current(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lp5523_led *led = cdev_to_led(led_cdev);
- struct lp5523_chip *chip = led_to_lp5523(led);
- ssize_t ret;
- unsigned long curr;
-
- if (kstrtoul(buf, 0, &curr))
- return -EINVAL;
-
- if (curr > led->max_current)
- return -EINVAL;
-
- mutex_lock(&chip->lock);
- ret = lp5523_write(chip->client,
- LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
- (u8)curr);
- mutex_unlock(&chip->lock);
-
- if (ret < 0)
- return ret;
-
- led->led_current = (u8)curr;
-
- return len;
-}
-
-/* led class device attributes */
-static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
-static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
-
-static struct attribute *lp5523_led_attributes[] = {
- &dev_attr_led_current.attr,
- &dev_attr_max_current.attr,
- NULL,
-};
-
-static struct attribute_group lp5523_led_attribute_group = {
- .attrs = lp5523_led_attributes
-};
-
-/* device attributes */
-static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
- show_engine1_mode, store_engine1_mode);
-static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
- show_engine2_mode, store_engine2_mode);
-static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
- show_engine3_mode, store_engine3_mode);
-static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR,
- show_engine1_leds, store_engine1_leds);
-static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR,
- show_engine2_leds, store_engine2_leds);
-static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR,
- show_engine3_leds, store_engine3_leds);
-static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
-static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
-static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
static struct attribute *lp5523_attributes[] = {
- &dev_attr_engine1_mode.attr,
- &dev_attr_engine2_mode.attr,
- &dev_attr_engine3_mode.attr,
&dev_attr_selftest.attr,
- &dev_attr_engine1_load.attr,
- &dev_attr_engine1_leds.attr,
- &dev_attr_engine2_load.attr,
- &dev_attr_engine2_leds.attr,
- &dev_attr_engine3_load.attr,
- &dev_attr_engine3_leds.attr,
NULL,
};
@@ -780,252 +404,91 @@ static const struct attribute_group lp5523_group = {
.attrs = lp5523_attributes,
};
-static int lp5523_register_sysfs(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- int ret;
-
- ret = sysfs_create_group(&dev->kobj, &lp5523_group);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static void lp5523_unregister_sysfs(struct i2c_client *client)
-{
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
- int i;
-
- sysfs_remove_group(&dev->kobj, &lp5523_group);
-
- for (i = 0; i < chip->num_leds; i++)
- sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
- &lp5523_led_attribute_group);
-}
-
-/*--------------------------------------------------------------*/
-/* Set chip operating mode */
-/*--------------------------------------------------------------*/
-static void lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
-{
- /* if in that mode already do nothing, except for run */
- if (mode == engine->mode && mode != LP5523_CMD_RUN)
- return;
-
- switch (mode) {
- case LP5523_CMD_RUN:
- lp5523_run_program(engine);
- break;
- case LP5523_CMD_LOAD:
- lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
- lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
- break;
- case LP5523_CMD_DISABLED:
- lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
- break;
- default:
- return;
- }
-
- engine->mode = mode;
-}
-
-/*--------------------------------------------------------------*/
-/* Probe, Attach, Remove */
-/*--------------------------------------------------------------*/
-static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
-{
- if (id < 1 || id > LP5523_ENGINES)
- return -1;
- engine->id = id;
- engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
- engine->prog_page = id - 1;
- engine->mux_page = id + 2;
-
- return 0;
-}
+/* Chip specific configurations */
+static struct lp55xx_device_config lp5523_cfg = {
+ .reset = {
+ .addr = LP5523_REG_RESET,
+ .val = LP5523_RESET,
+ },
+ .enable = {
+ .addr = LP5523_REG_ENABLE,
+ .val = LP5523_ENABLE,
+ },
+ .max_channel = LP5523_MAX_LEDS,
+ .post_init_device = lp5523_post_init_device,
+ .brightness_work_fn = lp5523_led_brightness_work,
+ .set_led_current = lp5523_set_led_current,
+ .firmware_cb = lp5523_firmware_loaded,
+ .run_engine = lp5523_run_engine,
+ .dev_attr_group = &lp5523_group,
+};
-static int lp5523_init_led(struct lp5523_led *led, struct device *dev,
- int chan, struct lp5523_platform_data *pdata,
- const char *chip_name)
+static int lp5523_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- char name[32];
- int res;
+ int ret;
+ struct lp55xx_chip *chip;
+ struct lp55xx_led *led;
+ struct lp55xx_platform_data *pdata = client->dev.platform_data;
- if (chan >= LP5523_LEDS)
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data\n");
return -EINVAL;
-
- if (pdata->led_config[chan].led_current) {
- led->led_current = pdata->led_config[chan].led_current;
- led->max_current = pdata->led_config[chan].max_current;
- led->chan_nr = pdata->led_config[chan].chan_nr;
-
- if (led->chan_nr >= LP5523_LEDS) {
- dev_err(dev, "Use channel numbers between 0 and %d\n",
- LP5523_LEDS - 1);
- return -EINVAL;
- }
-
- if (pdata->led_config[chan].name) {
- led->cdev.name = pdata->led_config[chan].name;
- } else {
- snprintf(name, sizeof(name), "%s:channel%d",
- pdata->label ? : chip_name, chan);
- led->cdev.name = name;
- }
-
- led->cdev.brightness_set = lp5523_set_brightness;
- res = led_classdev_register(dev, &led->cdev);
- if (res < 0) {
- dev_err(dev, "couldn't register led on channel %d\n",
- chan);
- return res;
- }
- res = sysfs_create_group(&led->cdev.dev->kobj,
- &lp5523_led_attribute_group);
- if (res < 0) {
- dev_err(dev, "couldn't register current attribute\n");
- led_classdev_unregister(&led->cdev);
- return res;
- }
- } else {
- led->led_current = 0;
}
- return 0;
-}
-
-static int lp5523_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct lp5523_chip *chip;
- struct lp5523_platform_data *pdata;
- int ret, i, led;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- i2c_set_clientdata(client, chip);
- chip->client = client;
+ led = devm_kzalloc(&client->dev,
+ sizeof(*led) * pdata->num_channels, GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
- pdata = client->dev.platform_data;
-
- if (!pdata) {
- dev_err(&client->dev, "no platform data\n");
- return -EINVAL;
- }
+ chip->cl = client;
+ chip->pdata = pdata;
+ chip->cfg = &lp5523_cfg;
mutex_init(&chip->lock);
- chip->pdata = pdata;
+ i2c_set_clientdata(client, led);
- if (pdata->setup_resources) {
- ret = pdata->setup_resources();
- if (ret < 0)
- return ret;
- }
-
- if (pdata->enable) {
- pdata->enable(0);
- usleep_range(1000, 2000); /* Keep enable down at least 1ms */
- pdata->enable(1);
- usleep_range(1000, 2000); /* 500us abs min. */
- }
-
- lp5523_write(client, LP5523_REG_RESET, 0xff);
- usleep_range(10000, 20000); /*
- * Exact value is not available. 10 - 20ms
- * appears to be enough for reset.
- */
- ret = lp5523_detect(client);
+ ret = lp55xx_init_device(chip);
if (ret)
- goto fail1;
+ goto err_init;
dev_info(&client->dev, "%s Programmable led chip found\n", id->name);
- /* Initialize engines */
- for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
- ret = lp5523_init_engine(&chip->engines[i], i + 1);
- if (ret) {
- dev_err(&client->dev, "error initializing engine\n");
- goto fail1;
- }
- }
- ret = lp5523_configure(client);
- if (ret < 0) {
- dev_err(&client->dev, "error configuring chip\n");
- goto fail1;
- }
-
- /* Initialize leds */
- chip->num_channels = pdata->num_channels;
- chip->num_leds = 0;
- led = 0;
- for (i = 0; i < pdata->num_channels; i++) {
- /* Do not initialize channels that are not connected */
- if (pdata->led_config[i].led_current == 0)
- continue;
-
- INIT_WORK(&chip->leds[led].brightness_work,
- lp5523_led_brightness_work);
-
- ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata,
- id->name);
- if (ret) {
- dev_err(&client->dev, "error initializing leds\n");
- goto fail2;
- }
- chip->num_leds++;
-
- chip->leds[led].id = led;
- /* Set LED current */
- lp5523_write(client,
- LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
- chip->leds[led].led_current);
-
- led++;
- }
+ ret = lp55xx_register_leds(led, chip);
+ if (ret)
+ goto err_register_leds;
- ret = lp5523_register_sysfs(client);
+ ret = lp55xx_register_sysfs(chip);
if (ret) {
dev_err(&client->dev, "registering sysfs failed\n");
- goto fail2;
+ goto err_register_sysfs;
}
- return ret;
-fail2:
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- flush_work(&chip->leds[i].brightness_work);
- }
-fail1:
- if (pdata->enable)
- pdata->enable(0);
- if (pdata->release_resources)
- pdata->release_resources();
+
+ return 0;
+
+err_register_sysfs:
+ lp55xx_unregister_leds(led, chip);
+err_register_leds:
+ lp55xx_deinit_device(chip);
+err_init:
return ret;
}
static int lp5523_remove(struct i2c_client *client)
{
- struct lp5523_chip *chip = i2c_get_clientdata(client);
- int i;
-
- /* Disable engine mode */
- lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
+ struct lp55xx_led *led = i2c_get_clientdata(client);
+ struct lp55xx_chip *chip = led->chip;
- lp5523_unregister_sysfs(client);
-
- for (i = 0; i < chip->num_leds; i++) {
- led_classdev_unregister(&chip->leds[i].cdev);
- flush_work(&chip->leds[i].brightness_work);
- }
+ lp5523_stop_engine(chip);
+ lp55xx_unregister_sysfs(chip);
+ lp55xx_unregister_leds(led, chip);
+ lp55xx_deinit_device(chip);
- if (chip->pdata->enable)
- chip->pdata->enable(0);
- if (chip->pdata->release_resources)
- chip->pdata->release_resources();
return 0;
}
@@ -1049,5 +512,6 @@ static struct i2c_driver lp5523_driver = {
module_i2c_driver(lp5523_driver);
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
+MODULE_AUTHOR("Milo Kim <milo.kim@ti.com>");
MODULE_DESCRIPTION("LP5523 LED engine");
MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
new file mode 100644
index 000000000000..d9eb84157423
--- /dev/null
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -0,0 +1,523 @@
+/*
+ * LP5521/LP5523/LP55231 Common Driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from leds-lp5521.c, leds-lp5523.c
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_data/leds-lp55xx.h>
+
+#include "leds-lp55xx-common.h"
+
+static struct lp55xx_led *cdev_to_lp55xx_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lp55xx_led, cdev);
+}
+
+static struct lp55xx_led *dev_to_lp55xx_led(struct device *dev)
+{
+ return cdev_to_lp55xx_led(dev_get_drvdata(dev));
+}
+
+static void lp55xx_reset_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_device_config *cfg = chip->cfg;
+ u8 addr = cfg->reset.addr;
+ u8 val = cfg->reset.val;
+
+ /* no error checking here because no ACK from the device after reset */
+ lp55xx_write(chip, addr, val);
+}
+
+static int lp55xx_detect_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_device_config *cfg = chip->cfg;
+ u8 addr = cfg->enable.addr;
+ u8 val = cfg->enable.val;
+ int ret;
+
+ ret = lp55xx_write(chip, addr, val);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ ret = lp55xx_read(chip, addr, &val);
+ if (ret)
+ return ret;
+
+ if (val != cfg->enable.val)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lp55xx_post_init_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_device_config *cfg = chip->cfg;
+
+ if (!cfg->post_init_device)
+ return 0;
+
+ return cfg->post_init_device(chip);
+}
+
+static ssize_t lp55xx_show_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = dev_to_lp55xx_led(dev);
+
+ return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t lp55xx_store_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = dev_to_lp55xx_led(dev);
+ struct lp55xx_chip *chip = led->chip;
+ unsigned long curr;
+
+ if (kstrtoul(buf, 0, &curr))
+ return -EINVAL;
+
+ if (curr > led->max_current)
+ return -EINVAL;
+
+ if (!chip->cfg->set_led_current)
+ return len;
+
+ mutex_lock(&chip->lock);
+ chip->cfg->set_led_current(led, (u8)curr);
+ mutex_unlock(&chip->lock);
+
+ return len;
+}
+
+static ssize_t lp55xx_show_max_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = dev_to_lp55xx_led(dev);
+
+ return sprintf(buf, "%d\n", led->max_current);
+}
+
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, lp55xx_show_current,
+ lp55xx_store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , lp55xx_show_max_current, NULL);
+
+static struct attribute *lp55xx_led_attributes[] = {
+ &dev_attr_led_current.attr,
+ &dev_attr_max_current.attr,
+ NULL,
+};
+
+static struct attribute_group lp55xx_led_attr_group = {
+ .attrs = lp55xx_led_attributes
+};
+
+static void lp55xx_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct lp55xx_led *led = cdev_to_lp55xx_led(cdev);
+
+ led->brightness = (u8)brightness;
+ schedule_work(&led->brightness_work);
+}
+
+static int lp55xx_init_led(struct lp55xx_led *led,
+ struct lp55xx_chip *chip, int chan)
+{
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ struct lp55xx_device_config *cfg = chip->cfg;
+ struct device *dev = &chip->cl->dev;
+ char name[32];
+ int ret;
+ int max_channel = cfg->max_channel;
+
+ if (chan >= max_channel) {
+ dev_err(dev, "invalid channel: %d / %d\n", chan, max_channel);
+ return -EINVAL;
+ }
+
+ if (pdata->led_config[chan].led_current == 0)
+ return 0;
+
+ led->led_current = pdata->led_config[chan].led_current;
+ led->max_current = pdata->led_config[chan].max_current;
+ led->chan_nr = pdata->led_config[chan].chan_nr;
+
+ if (led->chan_nr >= max_channel) {
+ dev_err(dev, "Use channel numbers between 0 and %d\n",
+ max_channel - 1);
+ return -EINVAL;
+ }
+
+ led->cdev.brightness_set = lp55xx_set_brightness;
+
+ if (pdata->led_config[chan].name) {
+ led->cdev.name = pdata->led_config[chan].name;
+ } else {
+ snprintf(name, sizeof(name), "%s:channel%d",
+ pdata->label ? : chip->cl->name, chan);
+ led->cdev.name = name;
+ }
+
+ /*
+ * register led class device for each channel and
+ * add device attributes
+ */
+
+ ret = led_classdev_register(dev, &led->cdev);
+ if (ret) {
+ dev_err(dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ ret = sysfs_create_group(&led->cdev.dev->kobj, &lp55xx_led_attr_group);
+ if (ret) {
+ dev_err(dev, "led sysfs err: %d\n", ret);
+ led_classdev_unregister(&led->cdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
+{
+ struct lp55xx_chip *chip = context;
+ struct device *dev = &chip->cl->dev;
+
+ if (!fw) {
+ dev_err(dev, "firmware request failed\n");
+ goto out;
+ }
+
+ /* handling firmware data is chip dependent */
+ mutex_lock(&chip->lock);
+
+ chip->fw = fw;
+ if (chip->cfg->firmware_cb)
+ chip->cfg->firmware_cb(chip);
+
+ mutex_unlock(&chip->lock);
+
+out:
+ /* firmware should be released for other channel use */
+ release_firmware(chip->fw);
+}
+
+static int lp55xx_request_firmware(struct lp55xx_chip *chip)
+{
+ const char *name = chip->cl->name;
+ struct device *dev = &chip->cl->dev;
+
+ return request_firmware_nowait(THIS_MODULE, true, name, dev,
+ GFP_KERNEL, chip, lp55xx_firmware_loaded);
+}
+
+static ssize_t lp55xx_show_engine_select(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+
+ return sprintf(buf, "%d\n", chip->engine_idx);
+}
+
+static ssize_t lp55xx_store_engine_select(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /* select the engine to be run */
+
+ switch (val) {
+ case LP55XX_ENGINE_1:
+ case LP55XX_ENGINE_2:
+ case LP55XX_ENGINE_3:
+ mutex_lock(&chip->lock);
+ chip->engine_idx = val;
+ ret = lp55xx_request_firmware(chip);
+ mutex_unlock(&chip->lock);
+ break;
+ default:
+ dev_err(dev, "%lu: invalid engine index. (1, 2, 3)\n", val);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(dev, "request firmware err: %d\n", ret);
+ return ret;
+ }
+
+ return len;
+}
+
+static inline void lp55xx_run_engine(struct lp55xx_chip *chip, bool start)
+{
+ if (chip->cfg->run_engine)
+ chip->cfg->run_engine(chip, start);
+}
+
+static ssize_t lp55xx_store_engine_run(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+ struct lp55xx_chip *chip = led->chip;
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /* run or stop the selected engine */
+
+ if (val <= 0) {
+ lp55xx_run_engine(chip, false);
+ return len;
+ }
+
+ mutex_lock(&chip->lock);
+ lp55xx_run_engine(chip, true);
+ mutex_unlock(&chip->lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(select_engine, S_IRUGO | S_IWUSR,
+ lp55xx_show_engine_select, lp55xx_store_engine_select);
+static DEVICE_ATTR(run_engine, S_IWUSR, NULL, lp55xx_store_engine_run);
+
+static struct attribute *lp55xx_engine_attributes[] = {
+ &dev_attr_select_engine.attr,
+ &dev_attr_run_engine.attr,
+ NULL,
+};
+
+static const struct attribute_group lp55xx_engine_attr_group = {
+ .attrs = lp55xx_engine_attributes,
+};
+
+int lp55xx_write(struct lp55xx_chip *chip, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(chip->cl, reg, val);
+}
+EXPORT_SYMBOL_GPL(lp55xx_write);
+
+int lp55xx_read(struct lp55xx_chip *chip, u8 reg, u8 *val)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_byte_data(chip->cl, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lp55xx_read);
+
+int lp55xx_update_bits(struct lp55xx_chip *chip, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+ u8 tmp;
+
+ ret = lp55xx_read(chip, reg, &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+
+ return lp55xx_write(chip, reg, tmp);
+}
+EXPORT_SYMBOL_GPL(lp55xx_update_bits);
+
+int lp55xx_init_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_platform_data *pdata;
+ struct lp55xx_device_config *cfg;
+ struct device *dev = &chip->cl->dev;
+ int ret = 0;
+
+ WARN_ON(!chip);
+
+ pdata = chip->pdata;
+ cfg = chip->cfg;
+
+ if (!pdata || !cfg)
+ return -EINVAL;
+
+ if (pdata->setup_resources) {
+ ret = pdata->setup_resources();
+ if (ret < 0) {
+ dev_err(dev, "setup resoure err: %d\n", ret);
+ goto err;
+ }
+ }
+
+ if (pdata->enable) {
+ pdata->enable(0);
+ usleep_range(1000, 2000); /* Keep enable down at least 1ms */
+ pdata->enable(1);
+ usleep_range(1000, 2000); /* 500us abs min. */
+ }
+
+ lp55xx_reset_device(chip);
+
+ /*
+ * Exact value is not available. 10 - 20ms
+ * appears to be enough for reset.
+ */
+ usleep_range(10000, 20000);
+
+ ret = lp55xx_detect_device(chip);
+ if (ret) {
+ dev_err(dev, "device detection err: %d\n", ret);
+ goto err;
+ }
+
+ /* chip specific initialization */
+ ret = lp55xx_post_init_device(chip);
+ if (ret) {
+ dev_err(dev, "post init device err: %d\n", ret);
+ goto err_post_init;
+ }
+
+ return 0;
+
+err_post_init:
+ lp55xx_deinit_device(chip);
+err:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_init_device);
+
+void lp55xx_deinit_device(struct lp55xx_chip *chip)
+{
+ struct lp55xx_platform_data *pdata = chip->pdata;
+
+ if (pdata->enable)
+ pdata->enable(0);
+
+ if (pdata->release_resources)
+ pdata->release_resources();
+}
+EXPORT_SYMBOL_GPL(lp55xx_deinit_device);
+
+int lp55xx_register_leds(struct lp55xx_led *led, struct lp55xx_chip *chip)
+{
+ struct lp55xx_platform_data *pdata = chip->pdata;
+ struct lp55xx_device_config *cfg = chip->cfg;
+ int num_channels = pdata->num_channels;
+ struct lp55xx_led *each;
+ u8 led_current;
+ int ret;
+ int i;
+
+ if (!cfg->brightness_work_fn) {
+ dev_err(&chip->cl->dev, "empty brightness configuration\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_channels; i++) {
+
+ /* do not initialize channels that are not connected */
+ if (pdata->led_config[i].led_current == 0)
+ continue;
+
+ led_current = pdata->led_config[i].led_current;
+ each = led + i;
+ ret = lp55xx_init_led(each, chip, i);
+ if (ret)
+ goto err_init_led;
+
+ INIT_WORK(&each->brightness_work, cfg->brightness_work_fn);
+
+ chip->num_leds++;
+ each->chip = chip;
+
+ /* setting led current at each channel */
+ if (cfg->set_led_current)
+ cfg->set_led_current(each, led_current);
+ }
+
+ return 0;
+
+err_init_led:
+ lp55xx_unregister_leds(led, chip);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lp55xx_register_leds);
+
+void lp55xx_unregister_leds(struct lp55xx_led *led, struct lp55xx_chip *chip)
+{
+ int i;
+ struct lp55xx_led *each;
+
+ for (i = 0; i < chip->num_leds; i++) {
+ each = led + i;
+ led_classdev_unregister(&each->cdev);
+ flush_work(&each->brightness_work);
+ }
+}
+EXPORT_SYMBOL_GPL(lp55xx_unregister_leds);
+
+int lp55xx_register_sysfs(struct lp55xx_chip *chip)
+{
+ struct device *dev = &chip->cl->dev;
+ struct lp55xx_device_config *cfg = chip->cfg;
+ int ret;
+
+ if (!cfg->run_engine || !cfg->firmware_cb)
+ goto dev_specific_attrs;
+
+ ret = sysfs_create_group(&dev->kobj, &lp55xx_engine_attr_group);
+ if (ret)
+ return ret;
+
+dev_specific_attrs:
+ return cfg->dev_attr_group ?
+ sysfs_create_group(&dev->kobj, cfg->dev_attr_group) : 0;
+}
+EXPORT_SYMBOL_GPL(lp55xx_register_sysfs);
+
+void lp55xx_unregister_sysfs(struct lp55xx_chip *chip)
+{
+ struct device *dev = &chip->cl->dev;
+ struct lp55xx_device_config *cfg = chip->cfg;
+
+ if (cfg->dev_attr_group)
+ sysfs_remove_group(&dev->kobj, cfg->dev_attr_group);
+
+ sysfs_remove_group(&dev->kobj, &lp55xx_engine_attr_group);
+}
+EXPORT_SYMBOL_GPL(lp55xx_unregister_sysfs);
+
+MODULE_AUTHOR("Milo Kim <milo.kim@ti.com>");
+MODULE_DESCRIPTION("LP55xx Common Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
new file mode 100644
index 000000000000..ece4761a1302
--- /dev/null
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -0,0 +1,134 @@
+/*
+ * LP55XX Common Driver Header
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Derived from leds-lp5521.c, leds-lp5523.c
+ */
+
+#ifndef _LEDS_LP55XX_COMMON_H
+#define _LEDS_LP55XX_COMMON_H
+
+enum lp55xx_engine_index {
+ LP55XX_ENGINE_INVALID,
+ LP55XX_ENGINE_1,
+ LP55XX_ENGINE_2,
+ LP55XX_ENGINE_3,
+};
+
+struct lp55xx_led;
+struct lp55xx_chip;
+
+/*
+ * struct lp55xx_reg
+ * @addr : Register address
+ * @val : Register value
+ */
+struct lp55xx_reg {
+ u8 addr;
+ u8 val;
+};
+
+/*
+ * struct lp55xx_device_config
+ * @reset : Chip specific reset command
+ * @enable : Chip specific enable command
+ * @max_channel : Maximum number of channels
+ * @post_init_device : Chip specific initialization code
+ * @brightness_work_fn : Brightness work function
+ * @set_led_current : LED current set function
+ * @firmware_cb : Call function when the firmware is loaded
+ * @run_engine : Run internal engine for pattern
+ * @dev_attr_group : Device specific attributes
+ */
+struct lp55xx_device_config {
+ const struct lp55xx_reg reset;
+ const struct lp55xx_reg enable;
+ const int max_channel;
+
+ /* define if the device has specific initialization process */
+ int (*post_init_device) (struct lp55xx_chip *chip);
+
+ /* access brightness register */
+ void (*brightness_work_fn)(struct work_struct *work);
+
+ /* current setting function */
+ void (*set_led_current) (struct lp55xx_led *led, u8 led_current);
+
+ /* access program memory when the firmware is loaded */
+ void (*firmware_cb)(struct lp55xx_chip *chip);
+
+ /* used for running firmware LED patterns */
+ void (*run_engine) (struct lp55xx_chip *chip, bool start);
+
+ /* additional device specific attributes */
+ const struct attribute_group *dev_attr_group;
+};
+
+/*
+ * struct lp55xx_chip
+ * @cl : I2C communication for access registers
+ * @pdata : Platform specific data
+ * @lock : Lock for user-space interface
+ * @num_leds : Number of registered LEDs
+ * @cfg : Device specific configuration data
+ * @engine_idx : Selected engine number
+ * @fw : Firmware data for running a LED pattern
+ */
+struct lp55xx_chip {
+ struct i2c_client *cl;
+ struct lp55xx_platform_data *pdata;
+ struct mutex lock; /* lock for user-space interface */
+ int num_leds;
+ struct lp55xx_device_config *cfg;
+ enum lp55xx_engine_index engine_idx;
+ const struct firmware *fw;
+};
+
+/*
+ * struct lp55xx_led
+ * @chan_nr : Channel number
+ * @cdev : LED class device
+ * @led_current : Current setting at each led channel
+ * @max_current : Maximun current at each led channel
+ * @brightness_work : Workqueue for brightness control
+ * @brightness : Brightness value
+ * @chip : The lp55xx chip data
+ */
+struct lp55xx_led {
+ int chan_nr;
+ struct led_classdev cdev;
+ u8 led_current;
+ u8 max_current;
+ struct work_struct brightness_work;
+ u8 brightness;
+ struct lp55xx_chip *chip;
+};
+
+/* register access */
+extern int lp55xx_write(struct lp55xx_chip *chip, u8 reg, u8 val);
+extern int lp55xx_read(struct lp55xx_chip *chip, u8 reg, u8 *val);
+extern int lp55xx_update_bits(struct lp55xx_chip *chip, u8 reg,
+ u8 mask, u8 val);
+
+/* common device init/deinit functions */
+extern int lp55xx_init_device(struct lp55xx_chip *chip);
+extern void lp55xx_deinit_device(struct lp55xx_chip *chip);
+
+/* common LED class device functions */
+extern int lp55xx_register_leds(struct lp55xx_led *led,
+ struct lp55xx_chip *chip);
+extern void lp55xx_unregister_leds(struct lp55xx_led *led,
+ struct lp55xx_chip *chip);
+
+/* common device attributes functions */
+extern int lp55xx_register_sysfs(struct lp55xx_chip *chip);
+extern void lp55xx_unregister_sysfs(struct lp55xx_chip *chip);
+
+#endif /* _LEDS_LP55XX_COMMON_H */
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 4353942c5fd1..7c2cb384e7ae 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -130,9 +130,10 @@ static int lp8788_led_probe(struct platform_device *pdev)
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct lp8788_led_platform_data *led_pdata;
struct lp8788_led *led;
+ struct device *dev = &pdev->dev;
int ret;
- led = devm_kzalloc(lp->dev, sizeof(struct lp8788_led), GFP_KERNEL);
+ led = devm_kzalloc(dev, sizeof(struct lp8788_led), GFP_KERNEL);
if (!led)
return -ENOMEM;
@@ -154,13 +155,13 @@ static int lp8788_led_probe(struct platform_device *pdev)
ret = lp8788_led_init_device(led, led_pdata);
if (ret) {
- dev_err(lp->dev, "led init device err: %d\n", ret);
+ dev_err(dev, "led init device err: %d\n", ret);
return ret;
}
- ret = led_classdev_register(lp->dev, &led->led_dev);
+ ret = led_classdev_register(dev, &led->led_dev);
if (ret) {
- dev_err(lp->dev, "led register err: %d\n", ret);
+ dev_err(dev, "led register err: %d\n", ret);
return ret;
}
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index cee8a5b483ac..0c597bdd23f9 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -186,7 +186,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
int err = 0;
if (*delay_on == 0 && *delay_off == 0) {
- /* led subsystem ask us for a blink rate */
+ /* led subsystem ask us for a blink rate */
*delay_on = 1000;
*delay_off = 1000;
}
@@ -311,7 +311,6 @@ static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
- input_unregister_device(data->idev);
cancel_work_sync(&data->work);
data->idev = NULL;
}
@@ -382,7 +381,7 @@ static int pca9532_configure(struct i2c_client *client,
BUG_ON(data->idev);
led->state = PCA9532_PWM1;
pca9532_setled(led);
- data->idev = input_allocate_device();
+ data->idev = devm_input_allocate_device(&client->dev);
if (data->idev == NULL) {
err = -ENOMEM;
goto exit;
@@ -401,7 +400,6 @@ static int pca9532_configure(struct i2c_client *client,
INIT_WORK(&data->work, pca9532_input_work);
err = input_register_device(data->idev);
if (err) {
- input_free_device(data->idev);
cancel_work_sync(&data->work);
data->idev = NULL;
goto exit;
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 2157524f277c..a1ea5f6a8d39 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/err.h>
@@ -30,6 +31,11 @@ struct led_pwm_data {
unsigned int period;
};
+struct led_pwm_priv {
+ int num_leds;
+ struct led_pwm_data leds[0];
+};
+
static void led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -47,88 +53,152 @@ static void led_pwm_set(struct led_classdev *led_cdev,
}
}
-static int led_pwm_probe(struct platform_device *pdev)
+static inline size_t sizeof_pwm_leds_priv(int num_leds)
{
- struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
- struct led_pwm *cur_led;
- struct led_pwm_data *leds_data, *led_dat;
- int i, ret = 0;
+ return sizeof(struct led_pwm_priv) +
+ (sizeof(struct led_pwm_data) * num_leds);
+}
+
+static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ struct led_pwm_priv *priv;
+ int count, ret;
- if (!pdata)
- return -EBUSY;
+ /* count LEDs in this device, so we know how much to allocate */
+ count = of_get_child_count(node);
+ if (!count)
+ return NULL;
- leds_data = devm_kzalloc(&pdev->dev,
- sizeof(struct led_pwm_data) * pdata->num_leds,
- GFP_KERNEL);
- if (!leds_data)
- return -ENOMEM;
+ priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
+ GFP_KERNEL);
+ if (!priv)
+ return NULL;
- for (i = 0; i < pdata->num_leds; i++) {
- cur_led = &pdata->leds[i];
- led_dat = &leds_data[i];
+ for_each_child_of_node(node, child) {
+ struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
- led_dat->pwm = pwm_request(cur_led->pwm_id,
- cur_led->name);
+ led_dat->cdev.name = of_get_property(child, "label",
+ NULL) ? : child->name;
+
+ led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL);
if (IS_ERR(led_dat->pwm)) {
- ret = PTR_ERR(led_dat->pwm);
- dev_err(&pdev->dev, "unable to request PWM %d\n",
- cur_led->pwm_id);
+ dev_err(&pdev->dev, "unable to request PWM for %s\n",
+ led_dat->cdev.name);
goto err;
}
+ /* Get the period from PWM core when n*/
+ led_dat->period = pwm_get_period(led_dat->pwm);
+
+ led_dat->cdev.default_trigger = of_get_property(child,
+ "linux,default-trigger", NULL);
+ of_property_read_u32(child, "max-brightness",
+ &led_dat->cdev.max_brightness);
- led_dat->cdev.name = cur_led->name;
- led_dat->cdev.default_trigger = cur_led->default_trigger;
- led_dat->active_low = cur_led->active_low;
- led_dat->period = cur_led->pwm_period_ns;
led_dat->cdev.brightness_set = led_pwm_set;
led_dat->cdev.brightness = LED_OFF;
- led_dat->cdev.max_brightness = cur_led->max_brightness;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0) {
- pwm_free(led_dat->pwm);
+ dev_err(&pdev->dev, "failed to register for %s\n",
+ led_dat->cdev.name);
+ of_node_put(child);
goto err;
}
+ priv->num_leds++;
}
- platform_set_drvdata(pdev, leds_data);
+ return priv;
+err:
+ while (priv->num_leds--)
+ led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
- return 0;
+ return NULL;
+}
-err:
- if (i > 0) {
- for (i = i - 1; i >= 0; i--) {
- led_classdev_unregister(&leds_data[i].cdev);
- pwm_free(leds_data[i].pwm);
+static int led_pwm_probe(struct platform_device *pdev)
+{
+ struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
+ struct led_pwm_priv *priv;
+ int i, ret = 0;
+
+ if (pdata && pdata->num_leds) {
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof_pwm_leds_priv(pdata->num_leds),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ struct led_pwm *cur_led = &pdata->leds[i];
+ struct led_pwm_data *led_dat = &priv->leds[i];
+
+ led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name);
+ if (IS_ERR(led_dat->pwm)) {
+ ret = PTR_ERR(led_dat->pwm);
+ dev_err(&pdev->dev,
+ "unable to request PWM for %s\n",
+ cur_led->name);
+ goto err;
+ }
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->active_low = cur_led->active_low;
+ led_dat->period = cur_led->pwm_period_ns;
+ led_dat->cdev.brightness_set = led_pwm_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->cdev.max_brightness = cur_led->max_brightness;
+ led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+
+ ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
+ if (ret < 0)
+ goto err;
}
+ priv->num_leds = pdata->num_leds;
+ } else {
+ priv = led_pwm_create_of(pdev);
+ if (!priv)
+ return -ENODEV;
}
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err:
+ while (i--)
+ led_classdev_unregister(&priv->leds[i].cdev);
+
return ret;
}
static int led_pwm_remove(struct platform_device *pdev)
{
+ struct led_pwm_priv *priv = platform_get_drvdata(pdev);
int i;
- struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
- struct led_pwm_data *leds_data;
- leds_data = platform_get_drvdata(pdev);
-
- for (i = 0; i < pdata->num_leds; i++) {
- led_classdev_unregister(&leds_data[i].cdev);
- pwm_free(leds_data[i].pwm);
- }
+ for (i = 0; i < priv->num_leds; i++)
+ led_classdev_unregister(&priv->leds[i].cdev);
return 0;
}
+static const struct of_device_id of_pwm_leds_match[] = {
+ { .compatible = "pwm-leds", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_leds_match);
+
static struct platform_driver led_pwm_driver = {
.probe = led_pwm_probe,
.remove = led_pwm_remove,
.driver = {
.name = "leds_pwm",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_pwm_leds_match),
},
};
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index e0fff1ca5923..d3c2b7e68fbc 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -133,24 +133,24 @@ static int r_tpu_enable(struct r_tpu_priv *p, enum led_brightness brightness)
rate = clk_get_rate(p->clk);
/* pick the lowest acceptable rate */
- for (k = 0; k < ARRAY_SIZE(prescaler); k++)
- if ((rate / prescaler[k]) < p->min_rate)
+ for (k = ARRAY_SIZE(prescaler) - 1; k >= 0; k--)
+ if ((rate / prescaler[k]) >= p->min_rate)
break;
- if (!k) {
+ if (k < 0) {
dev_err(&p->pdev->dev, "clock rate mismatch\n");
goto err0;
}
dev_dbg(&p->pdev->dev, "rate = %lu, prescaler %u\n",
- rate, prescaler[k - 1]);
+ rate, prescaler[k]);
/* clear TCNT on TGRB match, count on rising edge, set prescaler */
- r_tpu_write(p, TCR, 0x0040 | (k - 1));
+ r_tpu_write(p, TCR, 0x0040 | k);
/* output 0 until TGRA, output 1 until TGRB */
r_tpu_write(p, TIOR, 0x0002);
- rate /= prescaler[k - 1] * p->refresh_rate;
+ rate /= prescaler[k] * p->refresh_rate;
r_tpu_write(p, TGRB, rate);
dev_dbg(&p->pdev->dev, "TRGB = 0x%04lx\n", rate);
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index ec9b287ecfbf..64e204e714f6 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -63,8 +63,7 @@ MODULE_LICENSE("GPL");
/*
* PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
*/
-static const struct pci_device_id ich7_lpc_pci_id[] =
-{
+static DEFINE_PCI_DEVICE_TABLE(ich7_lpc_pci_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index 07ff5a3a6cee..89792990088d 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -3,6 +3,8 @@
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -14,9 +16,6 @@
#include <asm/fhc.h>
#include <asm/upa.h>
-#define DRIVER_NAME "leds-sunfire"
-#define PFX DRIVER_NAME ": "
-
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun Fire LED driver");
MODULE_LICENSE("GPL");
@@ -130,14 +129,14 @@ static int sunfire_led_generic_probe(struct platform_device *pdev,
int i, err;
if (pdev->num_resources != 1) {
- printk(KERN_ERR PFX "Wrong number of resources %d, should be 1\n",
+ dev_err(&pdev->dev, "Wrong number of resources %d, should be 1\n",
pdev->num_resources);
return -EINVAL;
}
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p) {
- printk(KERN_ERR PFX "Could not allocate struct sunfire_drvdata\n");
+ dev_err(&pdev->dev, "Could not allocate struct sunfire_drvdata\n");
return -ENOMEM;
}
@@ -152,7 +151,7 @@ static int sunfire_led_generic_probe(struct platform_device *pdev,
err = led_classdev_register(&pdev->dev, lp);
if (err) {
- printk(KERN_ERR PFX "Could not register %s LED\n",
+ dev_err(&pdev->dev, "Could not register %s LED\n",
lp->name);
for (i--; i >= 0; i--)
led_classdev_unregister(&p->leds[i].led_cdev);
@@ -188,7 +187,7 @@ static struct led_type clockboard_led_types[NUM_LEDS_PER_BOARD] = {
{
.name = "clockboard-right",
.handler = clockboard_right_set,
- .default_trigger= "heartbeat",
+ .default_trigger = "heartbeat",
},
};
@@ -209,7 +208,7 @@ static struct led_type fhc_led_types[NUM_LEDS_PER_BOARD] = {
{
.name = "fhc-right",
.handler = fhc_right_set,
- .default_trigger= "heartbeat",
+ .default_trigger = "heartbeat",
},
};
@@ -244,13 +243,13 @@ static int __init sunfire_leds_init(void)
int err = platform_driver_register(&sunfire_clockboard_led_driver);
if (err) {
- printk(KERN_ERR PFX "Could not register clock board LED driver\n");
+ pr_err("Could not register clock board LED driver\n");
return err;
}
err = platform_driver_register(&sunfire_fhc_led_driver);
if (err) {
- printk(KERN_ERR PFX "Could not register FHC LED driver\n");
+ pr_err("Could not register FHC LED driver\n");
platform_driver_unregister(&sunfire_clockboard_led_driver);
}
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index b26a63bae16b..070ba0741b21 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -667,8 +667,68 @@ static void tca6507_remove_gpio(struct tca6507_chip *tca)
}
#endif /* CONFIG_GPIOLIB */
+#ifdef CONFIG_OF
+static struct tca6507_platform_data *
+tca6507_led_dt_init(struct i2c_client *client)
+{
+ struct device_node *np = client->dev.of_node, *child;
+ struct tca6507_platform_data *pdata;
+ struct led_info *tca_leds;
+ int count;
+
+ count = of_get_child_count(np);
+ if (!count || count > NUM_LEDS)
+ return ERR_PTR(-ENODEV);
+
+ tca_leds = devm_kzalloc(&client->dev,
+ sizeof(struct led_info) * count, GFP_KERNEL);
+ if (!tca_leds)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_child_of_node(np, child) {
+ struct led_info led;
+ u32 reg;
+ int ret;
+
+ led.name =
+ of_get_property(child, "label", NULL) ? : child->name;
+ led.default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret != 0)
+ continue;
+
+ tca_leds[reg] = led;
+ }
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct tca6507_platform_data), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->leds.leds = tca_leds;
+ pdata->leds.num_leds = count;
+
+ return pdata;
+}
+
+static const struct of_device_id of_tca6507_leds_match[] = {
+ { .compatible = "ti,tca6507", },
+ {},
+};
+
+#else
+static struct tca6507_platform_data *
+tca6507_led_dt_init(struct i2c_client *client)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#define of_tca6507_leds_match NULL
+#endif
+
static int tca6507_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct tca6507_chip *tca;
struct i2c_adapter *adapter;
@@ -683,9 +743,12 @@ static int tca6507_probe(struct i2c_client *client,
return -EIO;
if (!pdata || pdata->leds.num_leds != NUM_LEDS) {
- dev_err(&client->dev, "Need %d entries in platform-data list\n",
- NUM_LEDS);
- return -ENODEV;
+ pdata = tca6507_led_dt_init(client);
+ if (IS_ERR(pdata)) {
+ dev_err(&client->dev, "Need %d entries in platform-data list\n",
+ NUM_LEDS);
+ return PTR_ERR(pdata);
+ }
}
tca = devm_kzalloc(&client->dev, sizeof(*tca), GFP_KERNEL);
if (!tca)
@@ -750,6 +813,7 @@ static struct i2c_driver tca6507_driver = {
.driver = {
.name = "leds-tca6507",
.owner = THIS_MODULE,
+ .of_match_table = of_tca6507_leds_match,
},
.probe = tca6507_probe,
.remove = tca6507_remove,
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 74a24cf897c3..6bd5c679d877 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -157,7 +157,7 @@ static int wm831x_status_blink_set(struct led_classdev *led_cdev,
return ret;
}
-static const char *led_src_texts[] = {
+static const char * const led_src_texts[] = {
"otp",
"power",
"charger",
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index fc92ccbd71dc..b3256ff0d426 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -396,7 +396,7 @@ static const char *lg_bus_name(struct virtio_device *vdev)
}
/* The ops structure which hooks everything together. */
-static struct virtio_config_ops lguest_config_ops = {
+static const struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
.finalize_features = lg_finalize_features,
.get = lg_get,
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 91a02eeeb319..e30b490055aa 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -210,7 +210,7 @@ config DM_DEBUG
config DM_BUFIO
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
---help---
This interface allows you to do buffered I/O on a device and acts
as a cache, holding recently-read blocks in memory and performing
@@ -218,7 +218,7 @@ config DM_BUFIO
config DM_BIO_PRISON
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
---help---
Some bio locking schemes used by other device-mapper targets
including thin provisioning.
@@ -251,8 +251,8 @@ config DM_SNAPSHOT
Allow volume managers to take writable snapshots of a device.
config DM_THIN_PROVISIONING
- tristate "Thin provisioning target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Thin provisioning target"
+ depends on BLK_DEV_DM
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
@@ -268,6 +268,37 @@ config DM_DEBUG_BLOCK_STACK_TRACING
If unsure, say N.
+config DM_CACHE
+ tristate "Cache target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM
+ default n
+ select DM_PERSISTENT_DATA
+ select DM_BIO_PRISON
+ ---help---
+ dm-cache attempts to improve performance of a block device by
+ moving frequently used data to a smaller, higher performance
+ device. Different 'policy' plugins can be used to change the
+ algorithms used to select which blocks are promoted, demoted,
+ cleaned etc. It supports writeback and writethrough modes.
+
+config DM_CACHE_MQ
+ tristate "MQ Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ default y
+ ---help---
+ A cache policy that uses a multiqueue ordered by recent hit
+ count to select which blocks should be promoted and demoted.
+ This is meant to be a general purpose policy. It prioritises
+ reads over writes.
+
+config DM_CACHE_CLEANER
+ tristate "Cleaner Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ default y
+ ---help---
+ A simple cache policy that writes back all data to the
+ origin. Used when decommissioning a dm-cache.
+
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
@@ -302,8 +333,8 @@ config DM_RAID
in one of the available parity distribution methods.
config DM_LOG_USERSPACE
- tristate "Mirror userspace logging (EXPERIMENTAL)"
- depends on DM_MIRROR && EXPERIMENTAL && NET
+ tristate "Mirror userspace logging"
+ depends on DM_MIRROR && NET
select CONNECTOR
---help---
The userspace logging module provides a mechanism for
@@ -350,8 +381,8 @@ config DM_MULTIPATH_ST
If unsure, say N.
config DM_DELAY
- tristate "I/O delaying target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "I/O delaying target"
+ depends on BLK_DEV_DM
---help---
A target that delays reads and/or writes and can send
them to different devices. Useful for testing.
@@ -365,14 +396,14 @@ config DM_UEVENT
Generate udev events for DM events.
config DM_FLAKEY
- tristate "Flakey target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Flakey target"
+ depends on BLK_DEV_DM
---help---
A target that intermittently fails I/O for debugging purposes.
config DM_VERITY
- tristate "Verity target support (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Verity target support"
+ depends on BLK_DEV_DM
select CRYPTO
select CRYPTO_HASH
select DM_BUFIO
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 94dce8b49324..7ceeaefc0e95 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -11,6 +11,9 @@ dm-mirror-y += dm-raid1.o
dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
+dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
+dm-cache-mq-y += dm-cache-policy-mq.o
+dm-cache-cleaner-y += dm-cache-policy-cleaner.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
@@ -44,6 +47,9 @@ obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
+obj-$(CONFIG_DM_CACHE) += dm-cache.o
+obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
+obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7155945f8eb8..4fd9d6aeff6a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -337,7 +337,7 @@ static int read_page(struct file *file, unsigned long index,
struct page *page)
{
int ret = 0;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct buffer_head *bh;
sector_t block;
@@ -755,7 +755,7 @@ static void bitmap_file_unmap(struct bitmap_storage *store)
free_buffers(sb_page);
if (file) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
invalidate_mapping_pages(inode->i_mapping, 0, -1);
fput(file);
}
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index aefb78e3cbf9..85f0b7074257 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -14,14 +14,6 @@
/*----------------------------------------------------------------*/
-struct dm_bio_prison_cell {
- struct hlist_node list;
- struct dm_bio_prison *prison;
- struct dm_cell_key key;
- struct bio *holder;
- struct bio_list bios;
-};
-
struct dm_bio_prison {
spinlock_t lock;
mempool_t *cell_pool;
@@ -87,6 +79,19 @@ void dm_bio_prison_destroy(struct dm_bio_prison *prison)
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
+{
+ return mempool_alloc(prison->cell_pool, gfp);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
+
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
+{
+ mempool_free(cell, prison->cell_pool);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
+
static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
{
const unsigned long BIG_PRIME = 4294967291UL;
@@ -106,100 +111,103 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
struct dm_cell_key *key)
{
struct dm_bio_prison_cell *cell;
- struct hlist_node *tmp;
- hlist_for_each_entry(cell, tmp, bucket, list)
+ hlist_for_each_entry(cell, bucket, list)
if (keys_equal(&cell->key, key))
return cell;
return NULL;
}
-/*
- * This may block if a new cell needs allocating. You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
- *
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
- */
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
- struct bio *inmate, struct dm_bio_prison_cell **ref)
+static void __setup_new_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *holder,
+ uint32_t hash,
+ struct dm_bio_prison_cell *cell)
{
- int r = 1;
- unsigned long flags;
- uint32_t hash = hash_key(prison, key);
- struct dm_bio_prison_cell *cell, *cell2;
-
- BUG_ON(hash > prison->nr_buckets);
-
- spin_lock_irqsave(&prison->lock, flags);
-
- cell = __search_bucket(prison->cells + hash, key);
- if (cell) {
- bio_list_add(&cell->bios, inmate);
- goto out;
- }
+ memcpy(&cell->key, key, sizeof(cell->key));
+ cell->holder = holder;
+ bio_list_init(&cell->bios);
+ hlist_add_head(&cell->list, prison->cells + hash);
+}
- /*
- * Allocate a new cell
- */
- spin_unlock_irqrestore(&prison->lock, flags);
- cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
- spin_lock_irqsave(&prison->lock, flags);
+static int __bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ uint32_t hash = hash_key(prison, key);
+ struct dm_bio_prison_cell *cell;
- /*
- * We've been unlocked, so we have to double check that
- * nobody else has inserted this cell in the meantime.
- */
cell = __search_bucket(prison->cells + hash, key);
if (cell) {
- mempool_free(cell2, prison->cell_pool);
- bio_list_add(&cell->bios, inmate);
- goto out;
+ if (inmate)
+ bio_list_add(&cell->bios, inmate);
+ *cell_result = cell;
+ return 1;
}
- /*
- * Use new cell.
- */
- cell = cell2;
-
- cell->prison = prison;
- memcpy(&cell->key, key, sizeof(cell->key));
- cell->holder = inmate;
- bio_list_init(&cell->bios);
- hlist_add_head(&cell->list, prison->cells + hash);
+ __setup_new_cell(prison, key, inmate, hash, cell_prealloc);
+ *cell_result = cell_prealloc;
+ return 0;
+}
- r = 0;
+static int bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ unsigned long flags;
-out:
+ spin_lock_irqsave(&prison->lock, flags);
+ r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
spin_unlock_irqrestore(&prison->lock, flags);
- *ref = cell;
-
return r;
}
+
+int dm_bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
+}
EXPORT_SYMBOL_GPL(dm_bio_detain);
+int dm_get_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
+}
+EXPORT_SYMBOL_GPL(dm_get_cell);
+
/*
* @inmates must have been initialised prior to this call
*/
-static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
- struct dm_bio_prison *prison = cell->prison;
-
hlist_del(&cell->list);
if (inmates) {
- bio_list_add(inmates, cell->holder);
+ if (cell->holder)
+ bio_list_add(inmates, cell->holder);
bio_list_merge(inmates, &cell->bios);
}
-
- mempool_free(cell, prison->cell_pool);
}
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
+void dm_cell_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
{
unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release(cell, bios);
@@ -210,20 +218,18 @@ EXPORT_SYMBOL_GPL(dm_cell_release);
/*
* Sometimes we don't want the holder, just the additional bios.
*/
-static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
- struct dm_bio_prison *prison = cell->prison;
-
hlist_del(&cell->list);
bio_list_merge(inmates, &cell->bios);
-
- mempool_free(cell, prison->cell_pool);
}
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release_no_holder(cell, inmates);
@@ -231,9 +237,9 @@ void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
-void dm_cell_error(struct dm_bio_prison_cell *cell)
+void dm_cell_error(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
{
- struct dm_bio_prison *prison = cell->prison;
struct bio_list bios;
struct bio *bio;
unsigned long flags;
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 53d1a7a84e2f..3f833190eadf 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -22,7 +22,6 @@
* subsequently unlocked the bios become available.
*/
struct dm_bio_prison;
-struct dm_bio_prison_cell;
/* FIXME: this needs to be more abstract */
struct dm_cell_key {
@@ -31,21 +30,62 @@ struct dm_cell_key {
dm_block_t block;
};
+/*
+ * Treat this as opaque, only in header so callers can manage allocation
+ * themselves.
+ */
+struct dm_bio_prison_cell {
+ struct hlist_node list;
+ struct dm_cell_key key;
+ struct bio *holder;
+ struct bio_list bios;
+};
+
struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells);
void dm_bio_prison_destroy(struct dm_bio_prison *prison);
/*
- * This may block if a new cell needs allocating. You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
+ * These two functions just wrap a mempool. This is a transitory step:
+ * Eventually all bio prison clients should manage their own cell memory.
*
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
+ * in interrupt context or passed GFP_NOWAIT.
*/
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
- struct bio *inmate, struct dm_bio_prison_cell **ref);
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
+ gfp_t gfp);
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell);
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
-void dm_cell_error(struct dm_bio_prison_cell *cell);
+/*
+ * Creates, or retrieves a cell for the given key.
+ *
+ * Returns 1 if pre-existing cell returned, zero if new cell created using
+ * @cell_prealloc.
+ */
+int dm_get_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result);
+
+/*
+ * An atomic op that combines retrieving a cell, and adding a bio to it.
+ *
+ * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ */
+int dm_bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result);
+
+void dm_cell_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios);
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates);
+void dm_cell_error(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 651ca79881dd..3c955e10a618 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -859,9 +859,8 @@ static void __check_watermark(struct dm_bufio_client *c)
static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
{
struct dm_buffer *b;
- struct hlist_node *hn;
- hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
+ hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
hash_list) {
dm_bufio_cond_resched();
if (b->block == block)
@@ -1193,7 +1192,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_rw = REQ_FLUSH,
+ .bi_rw = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
new file mode 100644
index 000000000000..bed4ad4e1b7c
--- /dev/null
+++ b/drivers/md/dm-cache-block-types.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_BLOCK_TYPES_H
+#define DM_CACHE_BLOCK_TYPES_H
+
+#include "persistent-data/dm-block-manager.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * It's helpful to get sparse to differentiate between indexes into the
+ * origin device, indexes into the cache device, and indexes into the
+ * discard bitset.
+ */
+
+typedef dm_block_t __bitwise__ dm_oblock_t;
+typedef uint32_t __bitwise__ dm_cblock_t;
+typedef dm_block_t __bitwise__ dm_dblock_t;
+
+static inline dm_oblock_t to_oblock(dm_block_t b)
+{
+ return (__force dm_oblock_t) b;
+}
+
+static inline dm_block_t from_oblock(dm_oblock_t b)
+{
+ return (__force dm_block_t) b;
+}
+
+static inline dm_cblock_t to_cblock(uint32_t b)
+{
+ return (__force dm_cblock_t) b;
+}
+
+static inline uint32_t from_cblock(dm_cblock_t b)
+{
+ return (__force uint32_t) b;
+}
+
+static inline dm_dblock_t to_dblock(dm_block_t b)
+{
+ return (__force dm_dblock_t) b;
+}
+
+static inline dm_block_t from_dblock(dm_dblock_t b)
+{
+ return (__force dm_block_t) b;
+}
+
+#endif /* DM_CACHE_BLOCK_TYPES_H */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
new file mode 100644
index 000000000000..fbd3625f2748
--- /dev/null
+++ b/drivers/md/dm-cache-metadata.c
@@ -0,0 +1,1146 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-metadata.h"
+
+#include "persistent-data/dm-array.h"
+#include "persistent-data/dm-bitset.h"
+#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-disk.h"
+#include "persistent-data/dm-transaction-manager.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache metadata"
+
+#define CACHE_SUPERBLOCK_MAGIC 06142003
+#define CACHE_SUPERBLOCK_LOCATION 0
+#define CACHE_VERSION 1
+#define CACHE_METADATA_CACHE_SIZE 64
+
+/*
+ * 3 for btree insert +
+ * 2 for btree lookup used within space map
+ */
+#define CACHE_MAX_CONCURRENT_LOCKS 5
+#define SPACE_MAP_ROOT_SIZE 128
+
+enum superblock_flag_bits {
+ /* for spotting crashes that would invalidate the dirty bitset */
+ CLEAN_SHUTDOWN,
+};
+
+/*
+ * Each mapping from cache block -> origin block carries a set of flags.
+ */
+enum mapping_bits {
+ /*
+ * A valid mapping. Because we're using an array we clear this
+ * flag for an non existant mapping.
+ */
+ M_VALID = 1,
+
+ /*
+ * The data on the cache is different from that on the origin.
+ */
+ M_DIRTY = 2
+};
+
+struct cache_disk_superblock {
+ __le32 csum;
+ __le32 flags;
+ __le64 blocknr;
+
+ __u8 uuid[16];
+ __le64 magic;
+ __le32 version;
+
+ __u8 policy_name[CACHE_POLICY_NAME_SIZE];
+ __le32 policy_hint_size;
+
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+ __le64 mapping_root;
+ __le64 hint_root;
+
+ __le64 discard_root;
+ __le64 discard_block_size;
+ __le64 discard_nr_blocks;
+
+ __le32 data_block_size;
+ __le32 metadata_block_size;
+ __le32 cache_blocks;
+
+ __le32 compat_flags;
+ __le32 compat_ro_flags;
+ __le32 incompat_flags;
+
+ __le32 read_hits;
+ __le32 read_misses;
+ __le32 write_hits;
+ __le32 write_misses;
+} __packed;
+
+struct dm_cache_metadata {
+ struct block_device *bdev;
+ struct dm_block_manager *bm;
+ struct dm_space_map *metadata_sm;
+ struct dm_transaction_manager *tm;
+
+ struct dm_array_info info;
+ struct dm_array_info hint_info;
+ struct dm_disk_bitset discard_info;
+
+ struct rw_semaphore root_lock;
+ dm_block_t root;
+ dm_block_t hint_root;
+ dm_block_t discard_root;
+
+ sector_t discard_block_size;
+ dm_dblock_t discard_nr_blocks;
+
+ sector_t data_block_size;
+ dm_cblock_t cache_blocks;
+ bool changed:1;
+ bool clean_when_opened:1;
+
+ char policy_name[CACHE_POLICY_NAME_SIZE];
+ size_t policy_hint_size;
+ struct dm_cache_statistics stats;
+};
+
+/*-------------------------------------------------------------------
+ * superblock validator
+ *-----------------------------------------------------------------*/
+
+#define SUPERBLOCK_CSUM_XOR 9031977
+
+static void sb_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct cache_disk_superblock *disk_super = dm_block_data(b);
+
+ disk_super->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+}
+
+static int sb_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct cache_disk_superblock *disk_super = dm_block_data(b);
+ __le32 csum_le;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
+ DMERR("sb_check failed: blocknr %llu: wanted %llu",
+ le64_to_cpu(disk_super->blocknr),
+ (unsigned long long)dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
+ DMERR("sb_check failed: magic %llu: wanted %llu",
+ le64_to_cpu(disk_super->magic),
+ (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
+ return -EILSEQ;
+ }
+
+ csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+ if (csum_le != disk_super->csum) {
+ DMERR("sb_check failed: csum %u: wanted %u",
+ le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator sb_validator = {
+ .name = "superblock",
+ .prepare_for_write = sb_prepare_for_write,
+ .check = sb_check
+};
+
+/*----------------------------------------------------------------*/
+
+static int superblock_read_lock(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock_zero(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+{
+ int r;
+ unsigned i;
+ struct dm_block *b;
+ __le64 *data_le, zero = cpu_to_le64(0);
+ unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+
+ /*
+ * We can't use a validator here - it may be all zeroes.
+ */
+ r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
+ if (r)
+ return r;
+
+ data_le = dm_block_data(b);
+ *result = 1;
+ for (i = 0; i < sb_block_size; i++) {
+ if (data_le[i] != zero) {
+ *result = 0;
+ break;
+ }
+ }
+
+ return dm_bm_unlock(b);
+}
+
+static void __setup_mapping_info(struct dm_cache_metadata *cmd)
+{
+ struct dm_btree_value_type vt;
+
+ vt.context = NULL;
+ vt.size = sizeof(__le64);
+ vt.inc = NULL;
+ vt.dec = NULL;
+ vt.equal = NULL;
+ dm_array_info_init(&cmd->info, cmd->tm, &vt);
+
+ if (cmd->policy_hint_size) {
+ vt.size = sizeof(__le32);
+ dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
+ }
+}
+
+static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct dm_block *sblock;
+ size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+
+ /* FIXME: see if we can lose the max sectors limit */
+ if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
+ bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
+
+ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_pre_commit(cmd->tm);
+ if (r < 0)
+ return r;
+
+ r = superblock_lock_zero(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = 0;
+ memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
+ disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
+ disk_super->version = cpu_to_le32(CACHE_VERSION);
+ memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE);
+ disk_super->policy_hint_size = 0;
+
+ r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0)
+ goto bad_locked;
+
+ disk_super->mapping_root = cpu_to_le64(cmd->root);
+ disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+ disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+ disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+ disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
+ disk_super->cache_blocks = cpu_to_le32(0);
+ memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
+
+ disk_super->read_hits = cpu_to_le32(0);
+ disk_super->read_misses = cpu_to_le32(0);
+ disk_super->write_hits = cpu_to_le32(0);
+ disk_super->write_misses = cpu_to_le32(0);
+
+ return dm_tm_commit(cmd->tm, sblock);
+
+bad_locked:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __format_metadata(struct dm_cache_metadata *cmd)
+{
+ int r;
+
+ r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &cmd->tm, &cmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_create_with_sm failed");
+ return r;
+ }
+
+ __setup_mapping_info(cmd);
+
+ r = dm_array_empty(&cmd->info, &cmd->root);
+ if (r < 0)
+ goto bad;
+
+ dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+
+ r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
+ if (r < 0)
+ goto bad;
+
+ cmd->discard_block_size = 0;
+ cmd->discard_nr_blocks = 0;
+
+ r = __write_initial_superblock(cmd);
+ if (r)
+ goto bad;
+
+ cmd->clean_when_opened = true;
+ return 0;
+
+bad:
+ dm_tm_destroy(cmd->tm);
+ dm_sm_destroy(cmd->metadata_sm);
+
+ return r;
+}
+
+static int __check_incompat_features(struct cache_disk_superblock *disk_super,
+ struct dm_cache_metadata *cmd)
+{
+ uint32_t features;
+
+ features = le32_to_cpu(disk_super->incompat_flags) & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+ DMERR("could not access metadata due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ /*
+ * Check for read-only metadata to skip the following RDWR checks.
+ */
+ if (get_disk_ro(cmd->bdev->bd_disk))
+ return 0;
+
+ features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
+ if (features) {
+ DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __open_metadata(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct cache_disk_superblock *disk_super;
+ unsigned long sb_flags;
+
+ r = superblock_read_lock(cmd, &sblock);
+ if (r < 0) {
+ DMERR("couldn't read lock superblock");
+ return r;
+ }
+
+ disk_super = dm_block_data(sblock);
+
+ r = __check_incompat_features(disk_super, cmd);
+ if (r < 0)
+ goto bad;
+
+ r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ disk_super->metadata_space_map_root,
+ sizeof(disk_super->metadata_space_map_root),
+ &cmd->tm, &cmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_open_with_sm failed");
+ goto bad;
+ }
+
+ __setup_mapping_info(cmd);
+ dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+ sb_flags = le32_to_cpu(disk_super->flags);
+ cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
+ return dm_bm_unlock(sblock);
+
+bad:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
+ bool format_device)
+{
+ int r, unformatted;
+
+ r = __superblock_all_zeroes(cmd->bm, &unformatted);
+ if (r)
+ return r;
+
+ if (unformatted)
+ return format_device ? __format_metadata(cmd) : -EPERM;
+
+ return __open_metadata(cmd);
+}
+
+static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
+ bool may_format_device)
+{
+ int r;
+ cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE,
+ CACHE_METADATA_CACHE_SIZE,
+ CACHE_MAX_CONCURRENT_LOCKS);
+ if (IS_ERR(cmd->bm)) {
+ DMERR("could not create block manager");
+ return PTR_ERR(cmd->bm);
+ }
+
+ r = __open_or_format_metadata(cmd, may_format_device);
+ if (r)
+ dm_block_manager_destroy(cmd->bm);
+
+ return r;
+}
+
+static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
+{
+ dm_sm_destroy(cmd->metadata_sm);
+ dm_tm_destroy(cmd->tm);
+ dm_block_manager_destroy(cmd->bm);
+}
+
+typedef unsigned long (*flags_mutator)(unsigned long);
+
+static void update_flags(struct cache_disk_superblock *disk_super,
+ flags_mutator mutator)
+{
+ uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
+ disk_super->flags = cpu_to_le32(sb_flags);
+}
+
+static unsigned long set_clean_shutdown(unsigned long flags)
+{
+ set_bit(CLEAN_SHUTDOWN, &flags);
+ return flags;
+}
+
+static unsigned long clear_clean_shutdown(unsigned long flags)
+{
+ clear_bit(CLEAN_SHUTDOWN, &flags);
+ return flags;
+}
+
+static void read_superblock_fields(struct dm_cache_metadata *cmd,
+ struct cache_disk_superblock *disk_super)
+{
+ cmd->root = le64_to_cpu(disk_super->mapping_root);
+ cmd->hint_root = le64_to_cpu(disk_super->hint_root);
+ cmd->discard_root = le64_to_cpu(disk_super->discard_root);
+ cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
+ cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
+ cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
+ cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
+ strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
+ cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
+
+ cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
+ cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
+ cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
+ cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
+
+ cmd->changed = false;
+}
+
+/*
+ * The mutator updates the superblock flags.
+ */
+static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
+ flags_mutator mutator)
+{
+ int r;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ r = superblock_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ update_flags(disk_super, mutator);
+ read_superblock_fields(cmd, disk_super);
+
+ return dm_bm_flush_and_unlock(cmd->bm, sblock);
+}
+
+static int __begin_transaction(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * We re-read the superblock every time. Shouldn't need to do this
+ * really.
+ */
+ r = superblock_read_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ read_superblock_fields(cmd, disk_super);
+ dm_bm_unlock(sblock);
+
+ return 0;
+}
+
+static int __commit_transaction(struct dm_cache_metadata *cmd,
+ flags_mutator mutator)
+{
+ int r;
+ size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
+ */
+ BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
+
+ r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
+ &cmd->discard_root);
+ if (r)
+ return r;
+
+ r = dm_tm_pre_commit(cmd->tm);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ r = superblock_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+
+ if (mutator)
+ update_flags(disk_super, mutator);
+
+ disk_super->mapping_root = cpu_to_le64(cmd->root);
+ disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+ disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+ disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+ disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
+ strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
+
+ disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
+ disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
+ disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
+ disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
+
+ r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0) {
+ dm_bm_unlock(sblock);
+ return r;
+ }
+
+ return dm_tm_commit(cmd->tm, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The mappings are held in a dm-array that has 64-bit values stored in
+ * little-endian format. The index is the cblock, the high 48bits of the
+ * value are the oblock and the low 16 bit the flags.
+ */
+#define FLAGS_MASK ((1 << 16) - 1)
+
+static __le64 pack_value(dm_oblock_t block, unsigned flags)
+{
+ uint64_t value = from_oblock(block);
+ value <<= 16;
+ value = value | (flags & FLAGS_MASK);
+ return cpu_to_le64(value);
+}
+
+static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
+{
+ uint64_t value = le64_to_cpu(value_le);
+ uint64_t b = value >> 16;
+ *block = to_oblock(b);
+ *flags = value & FLAGS_MASK;
+}
+
+/*----------------------------------------------------------------*/
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ int r;
+ struct dm_cache_metadata *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ DMERR("could not allocate metadata struct");
+ return NULL;
+ }
+
+ init_rwsem(&cmd->root_lock);
+ cmd->bdev = bdev;
+ cmd->data_block_size = data_block_size;
+ cmd->cache_blocks = 0;
+ cmd->policy_hint_size = policy_hint_size;
+ cmd->changed = true;
+
+ r = __create_persistent_data_objects(cmd, may_format_device);
+ if (r) {
+ kfree(cmd);
+ return ERR_PTR(r);
+ }
+
+ r = __begin_transaction_flags(cmd, clear_clean_shutdown);
+ if (r < 0) {
+ dm_cache_metadata_close(cmd);
+ return ERR_PTR(r);
+ }
+
+ return cmd;
+}
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+{
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+}
+
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+{
+ int r;
+ __le64 null_mapping = pack_value(0, 0);
+
+ down_write(&cmd->root_lock);
+ __dm_bless_for_disk(&null_mapping);
+ r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
+ from_cblock(new_cache_size),
+ &null_mapping, &cmd->root);
+ if (!r)
+ cmd->cache_blocks = new_cache_size;
+ cmd->changed = true;
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ sector_t discard_block_size,
+ dm_dblock_t new_nr_entries)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = dm_bitset_resize(&cmd->discard_info,
+ cmd->discard_root,
+ from_dblock(cmd->discard_nr_blocks),
+ from_dblock(new_nr_entries),
+ false, &cmd->discard_root);
+ if (!r) {
+ cmd->discard_block_size = discard_block_size;
+ cmd->discard_nr_blocks = new_nr_entries;
+ }
+
+ cmd->changed = true;
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+ return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root);
+}
+
+static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+ return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root);
+}
+
+static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
+ bool *is_discarded)
+{
+ return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root,
+ is_discarded);
+}
+
+static int __discard(struct dm_cache_metadata *cmd,
+ dm_dblock_t dblock, bool discard)
+{
+ int r;
+
+ r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd,
+ dm_dblock_t dblock, bool discard)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __discard(cmd, dblock, discard);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context)
+{
+ int r = 0;
+ dm_block_t b;
+ bool discard;
+
+ for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ dm_dblock_t dblock = to_dblock(b);
+
+ if (cmd->clean_when_opened) {
+ r = __is_discarded(cmd, dblock, &discard);
+ if (r)
+ return r;
+ } else
+ discard = false;
+
+ r = fn(context, cmd->discard_block_size, dblock, discard);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = __load_discards(cmd, fn, context);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+{
+ dm_cblock_t r;
+
+ down_read(&cmd->root_lock);
+ r = cmd->cache_blocks;
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+ int r;
+ __le64 value = pack_value(0, 0);
+
+ __dm_bless_for_disk(&value);
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __remove(cmd, cblock);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __insert(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ int r;
+ __le64 value = pack_value(oblock, M_VALID);
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __insert(cmd, cblock, oblock);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+struct thunk {
+ load_mapping_fn fn;
+ void *context;
+
+ struct dm_cache_metadata *cmd;
+ bool respect_dirty_flags;
+ bool hints_valid;
+};
+
+static bool hints_array_initialized(struct dm_cache_metadata *cmd)
+{
+ return cmd->hint_root && cmd->policy_hint_size;
+}
+
+static bool hints_array_available(struct dm_cache_metadata *cmd,
+ const char *policy_name)
+{
+ bool policy_names_match = !strncmp(cmd->policy_name, policy_name,
+ sizeof(cmd->policy_name));
+
+ return cmd->clean_when_opened && policy_names_match &&
+ hints_array_initialized(cmd);
+}
+
+static int __load_mapping(void *context, uint64_t cblock, void *leaf)
+{
+ int r = 0;
+ bool dirty;
+ __le64 value;
+ __le32 hint_value = 0;
+ dm_oblock_t oblock;
+ unsigned flags;
+ struct thunk *thunk = context;
+ struct dm_cache_metadata *cmd = thunk->cmd;
+
+ memcpy(&value, leaf, sizeof(value));
+ unpack_value(value, &oblock, &flags);
+
+ if (flags & M_VALID) {
+ if (thunk->hints_valid) {
+ r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
+ cblock, &hint_value);
+ if (r && r != -ENODATA)
+ return r;
+ }
+
+ dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
+ r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
+ dirty, le32_to_cpu(hint_value), thunk->hints_valid);
+ }
+
+ return r;
+}
+
+static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+ load_mapping_fn fn, void *context)
+{
+ struct thunk thunk;
+
+ thunk.fn = fn;
+ thunk.context = context;
+
+ thunk.cmd = cmd;
+ thunk.respect_dirty_flags = cmd->clean_when_opened;
+ thunk.hints_valid = hints_array_available(cmd, policy_name);
+
+ return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
+}
+
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+ load_mapping_fn fn, void *context)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = __load_mappings(cmd, policy_name, fn, context);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
+{
+ int r = 0;
+ __le64 value;
+ dm_oblock_t oblock;
+ unsigned flags;
+
+ memcpy(&value, leaf, sizeof(value));
+ unpack_value(value, &oblock, &flags);
+
+ return r;
+}
+
+static int __dump_mappings(struct dm_cache_metadata *cmd)
+{
+ return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
+}
+
+void dm_cache_dump(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ __dump_mappings(cmd);
+ up_read(&cmd->root_lock);
+}
+
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = cmd->changed;
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
+{
+ int r;
+ unsigned flags;
+ dm_oblock_t oblock;
+ __le64 value;
+
+ r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
+ if (r)
+ return r;
+
+ unpack_value(value, &oblock, &flags);
+
+ if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
+ /* nothing to be done */
+ return 0;
+
+ value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0));
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+
+}
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, bool dirty)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __dirty(cmd, cblock, dirty);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats)
+{
+ down_read(&cmd->root_lock);
+ memcpy(stats, &cmd->stats, sizeof(*stats));
+ up_read(&cmd->root_lock);
+}
+
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats)
+{
+ down_write(&cmd->root_lock);
+ memcpy(&cmd->stats, stats, sizeof(*stats));
+ up_write(&cmd->root_lock);
+}
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
+{
+ int r;
+ flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
+ clear_clean_shutdown);
+
+ down_write(&cmd->root_lock);
+ r = __commit_transaction(cmd, mutator);
+ if (r)
+ goto out;
+
+ r = __begin_transaction(cmd);
+
+out:
+ up_write(&cmd->root_lock);
+ return r;
+}
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ dm_block_t *result)
+{
+ int r = -EINVAL;
+
+ down_read(&cmd->root_lock);
+ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ dm_block_t *result)
+{
+ int r = -EINVAL;
+
+ down_read(&cmd->root_lock);
+ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+ int r;
+ __le32 value;
+ size_t hint_size;
+ const char *policy_name = dm_cache_policy_get_name(policy);
+
+ if (!policy_name[0] ||
+ (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
+ return -EINVAL;
+
+ if (strcmp(cmd->policy_name, policy_name)) {
+ strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+
+ hint_size = dm_cache_policy_get_hint_size(policy);
+ if (!hint_size)
+ return 0; /* short-circuit hints initialization */
+ cmd->policy_hint_size = hint_size;
+
+ if (cmd->hint_root) {
+ r = dm_array_del(&cmd->hint_info, cmd->hint_root);
+ if (r)
+ return r;
+ }
+
+ r = dm_array_empty(&cmd->hint_info, &cmd->hint_root);
+ if (r)
+ return r;
+
+ value = cpu_to_le32(0);
+ __dm_bless_for_disk(&value);
+ r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
+ from_cblock(cmd->cache_blocks),
+ &value, &cmd->hint_root);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = begin_hints(cmd, policy);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+ uint32_t hint)
+{
+ int r;
+ __le32 value = cpu_to_le32(hint);
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
+ from_cblock(cblock), &value, &cmd->hint_root);
+ cmd->changed = true;
+
+ return r;
+}
+
+int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+ uint32_t hint)
+{
+ int r;
+
+ if (!hints_array_initialized(cmd))
+ return 0;
+
+ down_write(&cmd->root_lock);
+ r = save_hint(cmd, cblock, hint);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
new file mode 100644
index 000000000000..135864ea0eee
--- /dev/null
+++ b/drivers/md/dm-cache-metadata.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_METADATA_H
+#define DM_CACHE_METADATA_H
+
+#include "dm-cache-block-types.h"
+#include "dm-cache-policy-internal.h"
+
+/*----------------------------------------------------------------*/
+
+#define DM_CACHE_METADATA_BLOCK_SIZE 4096
+
+/* FIXME: remove this restriction */
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about 16k metadata blocks.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+
+/*
+ * A metadata device larger than 16GB triggers a warning.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Ext[234]-style compat feature flags.
+ *
+ * A new feature which old metadata will still be compatible with should
+ * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
+ *
+ * A new feature that is not compatible with old code should define a
+ * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
+ * that flag.
+ *
+ * A new feature that is not compatible with old code accessing the
+ * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
+ * guard the relevant code with that flag.
+ *
+ * As these various flags are defined they should be added to the
+ * following masks.
+ */
+#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
+#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
+#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
+
+/*
+ * Reopens or creates a new, empty metadata volume.
+ * Returns an ERR_PTR on failure.
+ */
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size);
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+
+/*
+ * The metadata needs to know how many cache blocks there are. We don't
+ * care about the origin, assuming the core target is giving us valid
+ * origin blocks to map to.
+ */
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ sector_t discard_block_size,
+ dm_dblock_t new_nr_entries);
+
+typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
+ dm_dblock_t dblock, bool discarded);
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context);
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
+
+typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
+ dm_cblock_t cblock, bool dirty,
+ uint32_t hint, bool hint_valid);
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ const char *policy_name,
+ load_mapping_fn fn,
+ void *context);
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty);
+
+struct dm_cache_statistics {
+ uint32_t read_hits;
+ uint32_t read_misses;
+ uint32_t write_hits;
+ uint32_t write_misses;
+};
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats);
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats);
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ dm_block_t *result);
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ dm_block_t *result);
+
+void dm_cache_dump(struct dm_cache_metadata *cmd);
+
+/*
+ * The policy is invited to save a 32bit hint value for every cblock (eg,
+ * for a hit count). These are stored against the policy name. If
+ * policies are changed, then hints will be lost. If the machine crashes,
+ * hints will be lost.
+ *
+ * The hints are indexed by the cblock, but many policies will not
+ * neccessarily have a fast way of accessing efficiently via cblock. So
+ * rather than querying the policy for each cblock, we let it walk its data
+ * structures and fill in the hints in whatever order it wishes.
+ */
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
+
+/*
+ * requests hints for every cblock and stores in the metadata device.
+ */
+int dm_cache_save_hint(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, uint32_t hint);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_METADATA_H */
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
new file mode 100644
index 000000000000..cc05d70b3cb8
--- /dev/null
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * writeback cache policy supporting flushing out dirty cache blocks.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache cleaner"
+#define CLEANER_VERSION "1.0.0"
+
+/* Cache entry struct. */
+struct wb_cache_entry {
+ struct list_head list;
+ struct hlist_node hlist;
+
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
+ bool dirty:1;
+ bool pending:1;
+};
+
+struct hash {
+ struct hlist_head *table;
+ dm_block_t hash_bits;
+ unsigned nr_buckets;
+};
+
+struct policy {
+ struct dm_cache_policy policy;
+ spinlock_t lock;
+
+ struct list_head free;
+ struct list_head clean;
+ struct list_head clean_pending;
+ struct list_head dirty;
+
+ /*
+ * We know exactly how many cblocks will be needed,
+ * so we can allocate them up front.
+ */
+ dm_cblock_t cache_size, nr_cblocks_allocated;
+ struct wb_cache_entry *cblocks;
+ struct hash chash;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Low-level functions.
+ */
+static unsigned next_power(unsigned n, unsigned min)
+{
+ return roundup_pow_of_two(max(n, min));
+}
+
+static struct policy *to_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct policy, policy);
+}
+
+static struct list_head *list_pop(struct list_head *q)
+{
+ struct list_head *r = q->next;
+
+ list_del(r);
+
+ return r;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Allocate/free various resources. */
+static int alloc_hash(struct hash *hash, unsigned elts)
+{
+ hash->nr_buckets = next_power(elts >> 4, 16);
+ hash->hash_bits = ffs(hash->nr_buckets) - 1;
+ hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
+
+ return hash->table ? 0 : -ENOMEM;
+}
+
+static void free_hash(struct hash *hash)
+{
+ vfree(hash->table);
+}
+
+static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
+{
+ int r = -ENOMEM;
+
+ p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
+ if (p->cblocks) {
+ unsigned u = from_cblock(cache_size);
+
+ while (u--)
+ list_add(&p->cblocks[u].list, &p->free);
+
+ p->nr_cblocks_allocated = 0;
+
+ /* Cache entries hash. */
+ r = alloc_hash(&p->chash, from_cblock(cache_size));
+ if (r)
+ vfree(p->cblocks);
+ }
+
+ return r;
+}
+
+static void free_cache_blocks_and_hash(struct policy *p)
+{
+ free_hash(&p->chash);
+ vfree(p->cblocks);
+}
+
+static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
+{
+ struct wb_cache_entry *e;
+
+ BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
+
+ e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
+
+ return e;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Hash functions (lookup, insert, remove). */
+static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
+{
+ struct hash *hash = &p->chash;
+ unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
+ struct wb_cache_entry *cur;
+ struct hlist_head *bucket = &hash->table[h];
+
+ hlist_for_each_entry(cur, bucket, hlist) {
+ if (cur->oblock == oblock) {
+ /* Move upfront bucket for faster access. */
+ hlist_del(&cur->hlist);
+ hlist_add_head(&cur->hlist, bucket);
+ return cur;
+ }
+ }
+
+ return NULL;
+}
+
+static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
+
+ hlist_add_head(&e->hlist, &p->chash.table[h]);
+}
+
+static void remove_cache_hash_entry(struct wb_cache_entry *e)
+{
+ hlist_del(&e->hlist);
+}
+
+/* Public interface (see dm-cache-policy.h */
+static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ spin_lock_irqsave(&p->lock, flags);
+
+ else if (!spin_trylock_irqsave(&p->lock, flags))
+ return -EWOULDBLOCK;
+
+ e = lookup_cache_entry(p, oblock);
+ if (e) {
+ result->op = POLICY_HIT;
+ result->cblock = e->cblock;
+
+ }
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
+}
+
+static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&p->lock, flags))
+ return -EWOULDBLOCK;
+
+ e = lookup_cache_entry(p, oblock);
+ if (e) {
+ *cblock = e->cblock;
+ r = 0;
+
+ } else
+ r = -ENOENT;
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return r;
+}
+
+static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+
+ e = lookup_cache_entry(p, oblock);
+ BUG_ON(!e);
+
+ if (set) {
+ if (!e->dirty) {
+ e->dirty = true;
+ list_move(&e->list, &p->dirty);
+ }
+
+ } else {
+ if (e->dirty) {
+ e->pending = false;
+ e->dirty = false;
+ list_move(&e->list, &p->clean);
+ }
+ }
+}
+
+static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __set_clear_dirty(pe, oblock, true);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __set_clear_dirty(pe, oblock, false);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
+{
+ insert_cache_hash_entry(p, e);
+ if (e->dirty)
+ list_add(&e->list, &p->dirty);
+ else
+ list_add(&e->list, &p->clean);
+}
+
+static int wb_load_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ int r;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e = alloc_cache_entry(p);
+
+ if (e) {
+ e->cblock = cblock;
+ e->oblock = oblock;
+ e->dirty = false; /* blocks default to clean */
+ add_cache_entry(p, e);
+ r = 0;
+
+ } else
+ r = -ENOMEM;
+
+ return r;
+}
+
+static void wb_destroy(struct dm_cache_policy *pe)
+{
+ struct policy *p = to_policy(pe);
+
+ free_cache_blocks_and_hash(p);
+ kfree(p);
+}
+
+static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
+{
+ struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
+
+ BUG_ON(!r);
+
+ remove_cache_hash_entry(r);
+ list_del(&r->list);
+
+ return r;
+}
+
+static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ e = __wb_force_remove_mapping(p, oblock);
+ list_add_tail(&e->list, &p->free);
+ BUG_ON(!from_cblock(p->nr_cblocks_allocated));
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_force_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t current_oblock, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ e = __wb_force_remove_mapping(p, current_oblock);
+ e->oblock = oblock;
+ add_cache_entry(p, e);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
+{
+ struct list_head *l;
+ struct wb_cache_entry *r;
+
+ if (list_empty(&p->dirty))
+ return NULL;
+
+ l = list_pop(&p->dirty);
+ r = container_of(l, struct wb_cache_entry, list);
+ list_add(l, &p->clean_pending);
+
+ return r;
+}
+
+static int wb_writeback_work(struct dm_cache_policy *pe,
+ dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ int r = -ENOENT;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ e = get_next_dirty_entry(p);
+ if (e) {
+ *oblock = e->oblock;
+ *cblock = e->cblock;
+ r = 0;
+ }
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return r;
+}
+
+static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
+{
+ return to_policy(pe)->nr_cblocks_allocated;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct policy *p)
+{
+ p->policy.destroy = wb_destroy;
+ p->policy.map = wb_map;
+ p->policy.lookup = wb_lookup;
+ p->policy.set_dirty = wb_set_dirty;
+ p->policy.clear_dirty = wb_clear_dirty;
+ p->policy.load_mapping = wb_load_mapping;
+ p->policy.walk_mappings = NULL;
+ p->policy.remove_mapping = wb_remove_mapping;
+ p->policy.writeback_work = wb_writeback_work;
+ p->policy.force_mapping = wb_force_mapping;
+ p->policy.residency = wb_residency;
+ p->policy.tick = NULL;
+}
+
+static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ int r;
+ struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+
+ init_policy_functions(p);
+ INIT_LIST_HEAD(&p->free);
+ INIT_LIST_HEAD(&p->clean);
+ INIT_LIST_HEAD(&p->clean_pending);
+ INIT_LIST_HEAD(&p->dirty);
+
+ p->cache_size = cache_size;
+ spin_lock_init(&p->lock);
+
+ /* Allocate cache entry structs and add them to free list. */
+ r = alloc_cache_blocks_with_hash(p, cache_size);
+ if (!r)
+ return &p->policy;
+
+ kfree(p);
+
+ return NULL;
+}
+/*----------------------------------------------------------------------------*/
+
+static struct dm_cache_policy_type wb_policy_type = {
+ .name = "cleaner",
+ .hint_size = 0,
+ .owner = THIS_MODULE,
+ .create = wb_create
+};
+
+static int __init wb_init(void)
+{
+ int r = dm_cache_policy_register(&wb_policy_type);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+ else
+ DMINFO("version " CLEANER_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit wb_exit(void)
+{
+ dm_cache_policy_unregister(&wb_policy_type);
+}
+
+module_init(wb_init);
+module_exit(wb_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("cleaner cache policy");
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
new file mode 100644
index 000000000000..52a75beeced5
--- /dev/null
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_INTERNAL_H
+#define DM_CACHE_POLICY_INTERNAL_H
+
+#include "dm-cache-policy.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Little inline functions that simplify calling the policy methods.
+ */
+static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
+}
+
+static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ BUG_ON(!p->lookup);
+ return p->lookup(p, oblock, cblock);
+}
+
+static inline void policy_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ if (p->set_dirty)
+ p->set_dirty(p, oblock);
+}
+
+static inline void policy_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ if (p->clear_dirty)
+ p->clear_dirty(p, oblock);
+}
+
+static inline int policy_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ return p->load_mapping(p, oblock, cblock, hint, hint_valid);
+}
+
+static inline int policy_walk_mappings(struct dm_cache_policy *p,
+ policy_walk_fn fn, void *context)
+{
+ return p->walk_mappings ? p->walk_mappings(p, fn, context) : 0;
+}
+
+static inline int policy_writeback_work(struct dm_cache_policy *p,
+ dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ return p->writeback_work ? p->writeback_work(p, oblock, cblock) : -ENOENT;
+}
+
+static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ return p->remove_mapping(p, oblock);
+}
+
+static inline void policy_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ return p->force_mapping(p, current_oblock, new_oblock);
+}
+
+static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
+{
+ return p->residency(p);
+}
+
+static inline void policy_tick(struct dm_cache_policy *p)
+{
+ if (p->tick)
+ return p->tick(p);
+}
+
+static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+ ssize_t sz = 0;
+ if (p->emit_config_values)
+ return p->emit_config_values(p, result, maxlen);
+
+ DMEMIT("0");
+ return 0;
+}
+
+static inline int policy_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ return p->set_config_value ? p->set_config_value(p, key, value) : -EINVAL;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Creates a new cache policy given a policy name, a cache size, an origin size and the block size.
+ */
+struct dm_cache_policy *dm_cache_policy_create(const char *name, dm_cblock_t cache_size,
+ sector_t origin_size, sector_t block_size);
+
+/*
+ * Destroys the policy. This drops references to the policy module as well
+ * as calling it's destroy method. So always use this rather than calling
+ * the policy->destroy method directly.
+ */
+void dm_cache_policy_destroy(struct dm_cache_policy *p);
+
+/*
+ * In case we've forgotten.
+ */
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_INTERNAL_H */
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
new file mode 100644
index 000000000000..964153255076
--- /dev/null
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -0,0 +1,1195 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache-policy-mq"
+#define MQ_VERSION "1.0.0"
+
+static struct kmem_cache *mq_entry_cache;
+
+/*----------------------------------------------------------------*/
+
+static unsigned next_power(unsigned n, unsigned min)
+{
+ return roundup_pow_of_two(max(n, min));
+}
+
+/*----------------------------------------------------------------*/
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+ return vzalloc(s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Large, sequential ios are probably better left on the origin device since
+ * spindles tend to have good bandwidth.
+ *
+ * The io_tracker tries to spot when the io is in one of these sequential
+ * modes.
+ *
+ * Two thresholds to switch between random and sequential io mode are defaulting
+ * as follows and can be adjusted via the constructor and message interfaces.
+ */
+#define RANDOM_THRESHOLD_DEFAULT 4
+#define SEQUENTIAL_THRESHOLD_DEFAULT 512
+
+enum io_pattern {
+ PATTERN_SEQUENTIAL,
+ PATTERN_RANDOM
+};
+
+struct io_tracker {
+ enum io_pattern pattern;
+
+ unsigned nr_seq_samples;
+ unsigned nr_rand_samples;
+ unsigned thresholds[2];
+
+ dm_oblock_t last_end_oblock;
+};
+
+static void iot_init(struct io_tracker *t,
+ int sequential_threshold, int random_threshold)
+{
+ t->pattern = PATTERN_RANDOM;
+ t->nr_seq_samples = 0;
+ t->nr_rand_samples = 0;
+ t->last_end_oblock = 0;
+ t->thresholds[PATTERN_RANDOM] = random_threshold;
+ t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
+}
+
+static enum io_pattern iot_pattern(struct io_tracker *t)
+{
+ return t->pattern;
+}
+
+static void iot_update_stats(struct io_tracker *t, struct bio *bio)
+{
+ if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ t->nr_seq_samples++;
+ else {
+ /*
+ * Just one non-sequential IO is enough to reset the
+ * counters.
+ */
+ if (t->nr_seq_samples) {
+ t->nr_seq_samples = 0;
+ t->nr_rand_samples = 0;
+ }
+
+ t->nr_rand_samples++;
+ }
+
+ t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+}
+
+static void iot_check_for_pattern_switch(struct io_tracker *t)
+{
+ switch (t->pattern) {
+ case PATTERN_SEQUENTIAL:
+ if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
+ t->pattern = PATTERN_RANDOM;
+ t->nr_seq_samples = t->nr_rand_samples = 0;
+ }
+ break;
+
+ case PATTERN_RANDOM:
+ if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
+ t->pattern = PATTERN_SEQUENTIAL;
+ t->nr_seq_samples = t->nr_rand_samples = 0;
+ }
+ break;
+ }
+}
+
+static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
+{
+ iot_update_stats(t, bio);
+ iot_check_for_pattern_switch(t);
+}
+
+/*----------------------------------------------------------------*/
+
+
+/*
+ * This queue is divided up into different levels. Allowing us to push
+ * entries to the back of any of the levels. Think of it as a partially
+ * sorted queue.
+ */
+#define NR_QUEUE_LEVELS 16u
+
+struct queue {
+ struct list_head qs[NR_QUEUE_LEVELS];
+};
+
+static void queue_init(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ INIT_LIST_HEAD(q->qs + i);
+}
+
+/*
+ * Insert an entry to the back of the given level.
+ */
+static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
+{
+ list_add_tail(elt, q->qs + level);
+}
+
+static void queue_remove(struct list_head *elt)
+{
+ list_del(elt);
+}
+
+/*
+ * Shifts all regions down one level. This has no effect on the order of
+ * the queue.
+ */
+static void queue_shift_down(struct queue *q)
+{
+ unsigned level;
+
+ for (level = 1; level < NR_QUEUE_LEVELS; level++)
+ list_splice_init(q->qs + level, q->qs + level - 1);
+}
+
+/*
+ * Gives us the oldest entry of the lowest popoulated level. If the first
+ * level is emptied then we shift down one level.
+ */
+static struct list_head *queue_pop(struct queue *q)
+{
+ unsigned level;
+ struct list_head *r;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ if (!list_empty(q->qs + level)) {
+ r = q->qs[level].next;
+ list_del(r);
+
+ /* have we just emptied the bottom level? */
+ if (level == 0 && list_empty(q->qs))
+ queue_shift_down(q);
+
+ return r;
+ }
+
+ return NULL;
+}
+
+static struct list_head *list_pop(struct list_head *lh)
+{
+ struct list_head *r = lh->next;
+
+ BUG_ON(!r);
+ list_del_init(r);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Describes a cache entry. Used in both the cache and the pre_cache.
+ */
+struct entry {
+ struct hlist_node hlist;
+ struct list_head list;
+ dm_oblock_t oblock;
+ dm_cblock_t cblock; /* valid iff in_cache */
+
+ /*
+ * FIXME: pack these better
+ */
+ bool in_cache:1;
+ unsigned hit_count;
+ unsigned generation;
+ unsigned tick;
+};
+
+struct mq_policy {
+ struct dm_cache_policy policy;
+
+ /* protects everything */
+ struct mutex lock;
+ dm_cblock_t cache_size;
+ struct io_tracker tracker;
+
+ /*
+ * We maintain two queues of entries. The cache proper contains
+ * the currently active mappings. Whereas the pre_cache tracks
+ * blocks that are being hit frequently and potential candidates
+ * for promotion to the cache.
+ */
+ struct queue pre_cache;
+ struct queue cache;
+
+ /*
+ * Keeps track of time, incremented by the core. We use this to
+ * avoid attributing multiple hits within the same tick.
+ *
+ * Access to tick_protected should be done with the spin lock held.
+ * It's copied to tick at the start of the map function (within the
+ * mutex).
+ */
+ spinlock_t tick_lock;
+ unsigned tick_protected;
+ unsigned tick;
+
+ /*
+ * A count of the number of times the map function has been called
+ * and found an entry in the pre_cache or cache. Currently used to
+ * calculate the generation.
+ */
+ unsigned hit_count;
+
+ /*
+ * A generation is a longish period that is used to trigger some
+ * book keeping effects. eg, decrementing hit counts on entries.
+ * This is needed to allow the cache to evolve as io patterns
+ * change.
+ */
+ unsigned generation;
+ unsigned generation_period; /* in lookups (will probably change) */
+
+ /*
+ * Entries in the pre_cache whose hit count passes the promotion
+ * threshold move to the cache proper. Working out the correct
+ * value for the promotion_threshold is crucial to this policy.
+ */
+ unsigned promote_threshold;
+
+ /*
+ * We need cache_size entries for the cache, and choose to have
+ * cache_size entries for the pre_cache too. One motivation for
+ * using the same size is to make the hit counts directly
+ * comparable between pre_cache and cache.
+ */
+ unsigned nr_entries;
+ unsigned nr_entries_allocated;
+ struct list_head free;
+
+ /*
+ * Cache blocks may be unallocated. We store this info in a
+ * bitset.
+ */
+ unsigned long *allocation_bitset;
+ unsigned nr_cblocks_allocated;
+ unsigned find_free_nr_words;
+ unsigned find_free_last_word;
+
+ /*
+ * The hash table allows us to quickly find an entry by origin
+ * block. Both pre_cache and cache entries are in here.
+ */
+ unsigned nr_buckets;
+ dm_block_t hash_bits;
+ struct hlist_head *table;
+};
+
+/*----------------------------------------------------------------*/
+/* Free/alloc mq cache entry structures. */
+static void takeout_queue(struct list_head *lh, struct queue *q)
+{
+ unsigned level;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_splice(q->qs + level, lh);
+}
+
+static void free_entries(struct mq_policy *mq)
+{
+ struct entry *e, *tmp;
+
+ takeout_queue(&mq->free, &mq->pre_cache);
+ takeout_queue(&mq->free, &mq->cache);
+
+ list_for_each_entry_safe(e, tmp, &mq->free, list)
+ kmem_cache_free(mq_entry_cache, e);
+}
+
+static int alloc_entries(struct mq_policy *mq, unsigned elts)
+{
+ unsigned u = mq->nr_entries;
+
+ INIT_LIST_HEAD(&mq->free);
+ mq->nr_entries_allocated = 0;
+
+ while (u--) {
+ struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);
+
+ if (!e) {
+ free_entries(mq);
+ return -ENOMEM;
+ }
+
+
+ list_add(&e->list, &mq->free);
+ }
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Simple hash table implementation. Should replace with the standard hash
+ * table that's making its way upstream.
+ */
+static void hash_insert(struct mq_policy *mq, struct entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
+
+ hlist_add_head(&e->hlist, mq->table + h);
+}
+
+static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
+ struct hlist_head *bucket = mq->table + h;
+ struct entry *e;
+
+ hlist_for_each_entry(e, bucket, hlist)
+ if (e->oblock == oblock) {
+ hlist_del(&e->hlist);
+ hlist_add_head(&e->hlist, bucket);
+ return e;
+ }
+
+ return NULL;
+}
+
+static void hash_remove(struct entry *e)
+{
+ hlist_del(&e->hlist);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Allocates a new entry structure. The memory is allocated in one lump,
+ * so we just handing it out here. Returns NULL if all entries have
+ * already been allocated. Cannot fail otherwise.
+ */
+static struct entry *alloc_entry(struct mq_policy *mq)
+{
+ struct entry *e;
+
+ if (mq->nr_entries_allocated >= mq->nr_entries) {
+ BUG_ON(!list_empty(&mq->free));
+ return NULL;
+ }
+
+ e = list_entry(list_pop(&mq->free), struct entry, list);
+ INIT_LIST_HEAD(&e->list);
+ INIT_HLIST_NODE(&e->hlist);
+
+ mq->nr_entries_allocated++;
+ return e;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Mark cache blocks allocated or not in the bitset.
+ */
+static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+ BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+ set_bit(from_cblock(cblock), mq->allocation_bitset);
+ mq->nr_cblocks_allocated++;
+}
+
+static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+ BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+ clear_bit(from_cblock(cblock), mq->allocation_bitset);
+ mq->nr_cblocks_allocated--;
+}
+
+static bool any_free_cblocks(struct mq_policy *mq)
+{
+ return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
+}
+
+/*
+ * Fills result out with a cache block that isn't in use, or return
+ * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
+ * reponsible for that.
+ */
+static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end,
+ dm_cblock_t *result, unsigned *last_word)
+{
+ int r = -ENOSPC;
+ unsigned w;
+
+ for (w = begin; w < end; w++) {
+ /*
+ * ffz is undefined if no zero exists
+ */
+ if (mq->allocation_bitset[w] != ~0UL) {
+ *last_word = w;
+ *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w]));
+ if (from_cblock(*result) < from_cblock(mq->cache_size))
+ r = 0;
+
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result)
+{
+ int r;
+
+ if (!any_free_cblocks(mq))
+ return -ENOSPC;
+
+ r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word);
+ if (r == -ENOSPC && mq->find_free_last_word)
+ r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Now we get to the meat of the policy. This section deals with deciding
+ * when to to add entries to the pre_cache and cache, and move between
+ * them.
+ */
+
+/*
+ * The queue level is based on the log2 of the hit count.
+ */
+static unsigned queue_level(struct entry *e)
+{
+ return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
+}
+
+/*
+ * Inserts the entry into the pre_cache or the cache. Ensures the cache
+ * block is marked as allocated if necc. Inserts into the hash table. Sets the
+ * tick which records when the entry was last moved about.
+ */
+static void push(struct mq_policy *mq, struct entry *e)
+{
+ e->tick = mq->tick;
+ hash_insert(mq, e);
+
+ if (e->in_cache) {
+ alloc_cblock(mq, e->cblock);
+ queue_push(&mq->cache, queue_level(e), &e->list);
+ } else
+ queue_push(&mq->pre_cache, queue_level(e), &e->list);
+}
+
+/*
+ * Removes an entry from pre_cache or cache. Removes from the hash table.
+ * Frees off the cache block if necc.
+ */
+static void del(struct mq_policy *mq, struct entry *e)
+{
+ queue_remove(&e->list);
+ hash_remove(e);
+ if (e->in_cache)
+ free_cblock(mq, e->cblock);
+}
+
+/*
+ * Like del, except it removes the first entry in the queue (ie. the least
+ * recently used).
+ */
+static struct entry *pop(struct mq_policy *mq, struct queue *q)
+{
+ struct entry *e = container_of(queue_pop(q), struct entry, list);
+
+ if (e) {
+ hash_remove(e);
+
+ if (e->in_cache)
+ free_cblock(mq, e->cblock);
+ }
+
+ return e;
+}
+
+/*
+ * Has this entry already been updated?
+ */
+static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
+{
+ return mq->tick == e->tick;
+}
+
+/*
+ * The promotion threshold is adjusted every generation. As are the counts
+ * of the entries.
+ *
+ * At the moment the threshold is taken by averaging the hit counts of some
+ * of the entries in the cache (the first 20 entries of the first level).
+ *
+ * We can be much cleverer than this though. For example, each promotion
+ * could bump up the threshold helping to prevent churn. Much more to do
+ * here.
+ */
+
+#define MAX_TO_AVERAGE 20
+
+static void check_generation(struct mq_policy *mq)
+{
+ unsigned total = 0, nr = 0, count = 0, level;
+ struct list_head *head;
+ struct entry *e;
+
+ if ((mq->hit_count >= mq->generation_period) &&
+ (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) {
+
+ mq->hit_count = 0;
+ mq->generation++;
+
+ for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
+ head = mq->cache.qs + level;
+ list_for_each_entry(e, head, list) {
+ nr++;
+ total += e->hit_count;
+
+ if (++count >= MAX_TO_AVERAGE)
+ break;
+ }
+ }
+
+ mq->promote_threshold = nr ? total / nr : 1;
+ if (mq->promote_threshold * nr < total)
+ mq->promote_threshold++;
+ }
+}
+
+/*
+ * Whenever we use an entry we bump up it's hit counter, and push it to the
+ * back to it's current level.
+ */
+static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
+{
+ if (updated_this_tick(mq, e))
+ return;
+
+ e->hit_count++;
+ mq->hit_count++;
+ check_generation(mq);
+
+ /* generation adjustment, to stop the counts increasing forever. */
+ /* FIXME: divide? */
+ /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
+ e->generation = mq->generation;
+
+ del(mq, e);
+ push(mq, e);
+}
+
+/*
+ * Demote the least recently used entry from the cache to the pre_cache.
+ * Returns the new cache entry to use, and the old origin block it was
+ * mapped to.
+ *
+ * We drop the hit count on the demoted entry back to 1 to stop it bouncing
+ * straight back into the cache if it's subsequently hit. There are
+ * various options here, and more experimentation would be good:
+ *
+ * - just forget about the demoted entry completely (ie. don't insert it
+ into the pre_cache).
+ * - divide the hit count rather that setting to some hard coded value.
+ * - set the hit count to a hard coded value other than 1, eg, is it better
+ * if it goes in at level 2?
+ */
+static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+{
+ dm_cblock_t result;
+ struct entry *demoted = pop(mq, &mq->cache);
+
+ BUG_ON(!demoted);
+ result = demoted->cblock;
+ *oblock = demoted->oblock;
+ demoted->in_cache = false;
+ demoted->hit_count = 1;
+ push(mq, demoted);
+
+ return result;
+}
+
+/*
+ * We modify the basic promotion_threshold depending on the specific io.
+ *
+ * If the origin block has been discarded then there's no cost to copy it
+ * to the cache.
+ *
+ * We bias towards reads, since they can be demoted at no cost if they
+ * haven't been dirtied.
+ */
+#define DISCARDED_PROMOTE_THRESHOLD 1
+#define READ_PROMOTE_THRESHOLD 4
+#define WRITE_PROMOTE_THRESHOLD 8
+
+static unsigned adjusted_promote_threshold(struct mq_policy *mq,
+ bool discarded_oblock, int data_dir)
+{
+ if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+ /*
+ * We don't need to do any copying at all, so give this a
+ * very low threshold. In practice this only triggers
+ * during initial population after a format.
+ */
+ return DISCARDED_PROMOTE_THRESHOLD;
+
+ return data_dir == READ ?
+ (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
+ (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+}
+
+static bool should_promote(struct mq_policy *mq, struct entry *e,
+ bool discarded_oblock, int data_dir)
+{
+ return e->hit_count >=
+ adjusted_promote_threshold(mq, discarded_oblock, data_dir);
+}
+
+static int cache_entry_found(struct mq_policy *mq,
+ struct entry *e,
+ struct policy_result *result)
+{
+ requeue_and_update_tick(mq, e);
+
+ if (e->in_cache) {
+ result->op = POLICY_HIT;
+ result->cblock = e->cblock;
+ }
+
+ return 0;
+}
+
+/*
+ * Moves and entry from the pre_cache to the cache. The main work is
+ * finding which cache block to use.
+ */
+static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+ struct policy_result *result)
+{
+ dm_cblock_t cblock;
+
+ if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ result->op = POLICY_REPLACE;
+ cblock = demote_cblock(mq, &result->old_oblock);
+ } else
+ result->op = POLICY_NEW;
+
+ result->cblock = e->cblock = cblock;
+
+ del(mq, e);
+ e->in_cache = true;
+ push(mq, e);
+
+ return 0;
+}
+
+static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ int r = 0;
+ bool updated = updated_this_tick(mq, e);
+
+ requeue_and_update_tick(mq, e);
+
+ if ((!discarded_oblock && updated) ||
+ !should_promote(mq, e, discarded_oblock, data_dir))
+ result->op = POLICY_MISS;
+ else if (!can_migrate)
+ r = -EWOULDBLOCK;
+ else
+ r = pre_cache_to_cache(mq, e, result);
+
+ return r;
+}
+
+static void insert_in_pre_cache(struct mq_policy *mq,
+ dm_oblock_t oblock)
+{
+ struct entry *e = alloc_entry(mq);
+
+ if (!e)
+ /*
+ * There's no spare entry structure, so we grab the least
+ * used one from the pre_cache.
+ */
+ e = pop(mq, &mq->pre_cache);
+
+ if (unlikely(!e)) {
+ DMWARN("couldn't pop from pre cache");
+ return;
+ }
+
+ e->in_cache = false;
+ e->oblock = oblock;
+ e->hit_count = 1;
+ e->generation = mq->generation;
+ push(mq, e);
+}
+
+static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+ struct policy_result *result)
+{
+ struct entry *e;
+ dm_cblock_t cblock;
+
+ if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ result->op = POLICY_MISS;
+ insert_in_pre_cache(mq, oblock);
+ return;
+ }
+
+ e = alloc_entry(mq);
+ if (unlikely(!e)) {
+ result->op = POLICY_MISS;
+ return;
+ }
+
+ e->oblock = oblock;
+ e->cblock = cblock;
+ e->in_cache = true;
+ e->hit_count = 1;
+ e->generation = mq->generation;
+ push(mq, e);
+
+ result->op = POLICY_NEW;
+ result->cblock = e->cblock;
+}
+
+static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
+ if (can_migrate)
+ insert_in_cache(mq, oblock, result);
+ else
+ return -EWOULDBLOCK;
+ } else {
+ insert_in_pre_cache(mq, oblock);
+ result->op = POLICY_MISS;
+ }
+
+ return 0;
+}
+
+/*
+ * Looks the oblock up in the hash table, then decides whether to put in
+ * pre_cache, or cache etc.
+ */
+static int map(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ int r = 0;
+ struct entry *e = hash_lookup(mq, oblock);
+
+ if (e && e->in_cache)
+ r = cache_entry_found(mq, e, result);
+ else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
+ result->op = POLICY_MISS;
+ else if (e)
+ r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
+ data_dir, result);
+ else
+ r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
+ data_dir, result);
+
+ if (r == -EWOULDBLOCK)
+ result->op = POLICY_MISS;
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Public interface, via the policy struct. See dm-cache-policy.h for a
+ * description of these.
+ */
+
+static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct mq_policy, policy);
+}
+
+static void mq_destroy(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ free_bitset(mq->allocation_bitset);
+ kfree(mq->table);
+ free_entries(mq);
+ kfree(mq);
+}
+
+static void copy_tick(struct mq_policy *mq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->tick_lock, flags);
+ mq->tick = mq->tick_protected;
+ spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ mutex_lock(&mq->lock);
+ else if (!mutex_trylock(&mq->lock))
+ return -EWOULDBLOCK;
+
+ copy_tick(mq);
+
+ iot_examine_bio(&mq->tracker, bio);
+ r = map(mq, oblock, can_migrate, discarded_oblock,
+ bio_data_dir(bio), result);
+
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ if (!mutex_trylock(&mq->lock))
+ return -EWOULDBLOCK;
+
+ e = hash_lookup(mq, oblock);
+ if (e && e->in_cache) {
+ *cblock = e->cblock;
+ r = 0;
+ } else
+ r = -ENOENT;
+
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static int mq_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ e = alloc_entry(mq);
+ if (!e)
+ return -ENOMEM;
+
+ e->cblock = cblock;
+ e->oblock = oblock;
+ e->in_cache = true;
+ e->hit_count = hint_valid ? hint : 1;
+ e->generation = mq->generation;
+ push(mq, e);
+
+ return 0;
+}
+
+static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ int r = 0;
+ struct entry *e;
+ unsigned level;
+
+ mutex_lock(&mq->lock);
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each_entry(e, &mq->cache.qs[level], list) {
+ r = fn(context, e->cblock, e->oblock, e->hit_count);
+ if (r)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ struct entry *e = hash_lookup(mq, oblock);
+
+ BUG_ON(!e || !e->in_cache);
+
+ del(mq, e);
+ e->in_cache = false;
+ push(mq, e);
+}
+
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ remove_mapping(mq, oblock);
+ mutex_unlock(&mq->lock);
+}
+
+static void force_mapping(struct mq_policy *mq,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct entry *e = hash_lookup(mq, current_oblock);
+
+ BUG_ON(!e || !e->in_cache);
+
+ del(mq, e);
+ e->oblock = new_oblock;
+ push(mq, e);
+}
+
+static void mq_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ force_mapping(mq, current_oblock, new_oblock);
+ mutex_unlock(&mq->lock);
+}
+
+static dm_cblock_t mq_residency(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ /* FIXME: lock mutex, not sure we can block here */
+ return to_cblock(mq->nr_cblocks_allocated);
+}
+
+static void mq_tick(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->tick_lock, flags);
+ mq->tick_protected++;
+ spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ enum io_pattern pattern;
+ unsigned long tmp;
+
+ if (!strcasecmp(key, "random_threshold"))
+ pattern = PATTERN_RANDOM;
+ else if (!strcasecmp(key, "sequential_threshold"))
+ pattern = PATTERN_SEQUENTIAL;
+ else
+ return -EINVAL;
+
+ if (kstrtoul(value, 10, &tmp))
+ return -EINVAL;
+
+ mq->tracker.thresholds[pattern] = tmp;
+
+ return 0;
+}
+
+static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+ ssize_t sz = 0;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ DMEMIT("4 random_threshold %u sequential_threshold %u",
+ mq->tracker.thresholds[PATTERN_RANDOM],
+ mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
+
+ return 0;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct mq_policy *mq)
+{
+ mq->policy.destroy = mq_destroy;
+ mq->policy.map = mq_map;
+ mq->policy.lookup = mq_lookup;
+ mq->policy.load_mapping = mq_load_mapping;
+ mq->policy.walk_mappings = mq_walk_mappings;
+ mq->policy.remove_mapping = mq_remove_mapping;
+ mq->policy.writeback_work = NULL;
+ mq->policy.force_mapping = mq_force_mapping;
+ mq->policy.residency = mq_residency;
+ mq->policy.tick = mq_tick;
+ mq->policy.emit_config_values = mq_emit_config_values;
+ mq->policy.set_config_value = mq_set_config_value;
+}
+
+static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ int r;
+ struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ init_policy_functions(mq);
+ iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
+
+ mq->cache_size = cache_size;
+ mq->tick_protected = 0;
+ mq->tick = 0;
+ mq->hit_count = 0;
+ mq->generation = 0;
+ mq->promote_threshold = 0;
+ mutex_init(&mq->lock);
+ spin_lock_init(&mq->tick_lock);
+ mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG);
+ mq->find_free_last_word = 0;
+
+ queue_init(&mq->pre_cache);
+ queue_init(&mq->cache);
+ mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
+
+ mq->nr_entries = 2 * from_cblock(cache_size);
+ r = alloc_entries(mq, mq->nr_entries);
+ if (r)
+ goto bad_cache_alloc;
+
+ mq->nr_entries_allocated = 0;
+ mq->nr_cblocks_allocated = 0;
+
+ mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
+ mq->hash_bits = ffs(mq->nr_buckets) - 1;
+ mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+ if (!mq->table)
+ goto bad_alloc_table;
+
+ mq->allocation_bitset = alloc_bitset(from_cblock(cache_size));
+ if (!mq->allocation_bitset)
+ goto bad_alloc_bitset;
+
+ return &mq->policy;
+
+bad_alloc_bitset:
+ kfree(mq->table);
+bad_alloc_table:
+ free_entries(mq);
+bad_cache_alloc:
+ kfree(mq);
+
+ return NULL;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_cache_policy_type mq_policy_type = {
+ .name = "mq",
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create
+};
+
+static struct dm_cache_policy_type default_policy_type = {
+ .name = "default",
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create
+};
+
+static int __init mq_init(void)
+{
+ int r;
+
+ mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
+ sizeof(struct entry),
+ __alignof__(struct entry),
+ 0, NULL);
+ if (!mq_entry_cache)
+ goto bad;
+
+ r = dm_cache_policy_register(&mq_policy_type);
+ if (r) {
+ DMERR("register failed %d", r);
+ goto bad_register_mq;
+ }
+
+ r = dm_cache_policy_register(&default_policy_type);
+ if (!r) {
+ DMINFO("version " MQ_VERSION " loaded");
+ return 0;
+ }
+
+ DMERR("register failed (as default) %d", r);
+
+ dm_cache_policy_unregister(&mq_policy_type);
+bad_register_mq:
+ kmem_cache_destroy(mq_entry_cache);
+bad:
+ return -ENOMEM;
+}
+
+static void __exit mq_exit(void)
+{
+ dm_cache_policy_unregister(&mq_policy_type);
+ dm_cache_policy_unregister(&default_policy_type);
+
+ kmem_cache_destroy(mq_entry_cache);
+}
+
+module_init(mq_init);
+module_exit(mq_exit);
+
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("mq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
new file mode 100644
index 000000000000..2cbf5fdaac52
--- /dev/null
+++ b/drivers/md/dm-cache-policy.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy-internal.h"
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache-policy"
+
+static DEFINE_SPINLOCK(register_lock);
+static LIST_HEAD(register_list);
+
+static struct dm_cache_policy_type *__find_policy(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ list_for_each_entry(t, &register_list, list)
+ if (!strcmp(t->name, name))
+ return t;
+
+ return NULL;
+}
+
+static struct dm_cache_policy_type *__get_policy_once(const char *name)
+{
+ struct dm_cache_policy_type *t = __find_policy(name);
+
+ if (t && !try_module_get(t->owner)) {
+ DMWARN("couldn't get module %s", name);
+ t = ERR_PTR(-EINVAL);
+ }
+
+ return t;
+}
+
+static struct dm_cache_policy_type *get_policy_once(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ spin_lock(&register_lock);
+ t = __get_policy_once(name);
+ spin_unlock(&register_lock);
+
+ return t;
+}
+
+static struct dm_cache_policy_type *get_policy(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ t = get_policy_once(name);
+ if (IS_ERR(t))
+ return NULL;
+
+ if (t)
+ return t;
+
+ request_module("dm-cache-%s", name);
+
+ t = get_policy_once(name);
+ if (IS_ERR(t))
+ return NULL;
+
+ return t;
+}
+
+static void put_policy(struct dm_cache_policy_type *t)
+{
+ module_put(t->owner);
+}
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type)
+{
+ int r;
+
+ /* One size fits all for now */
+ if (type->hint_size != 0 && type->hint_size != 4) {
+ DMWARN("hint size must be 0 or 4 but %llu supplied.", (unsigned long long) type->hint_size);
+ return -EINVAL;
+ }
+
+ spin_lock(&register_lock);
+ if (__find_policy(type->name)) {
+ DMWARN("attempt to register policy under duplicate name %s", type->name);
+ r = -EINVAL;
+ } else {
+ list_add(&type->list, &register_list);
+ r = 0;
+ }
+ spin_unlock(&register_lock);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_register);
+
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type)
+{
+ spin_lock(&register_lock);
+ list_del_init(&type->list);
+ spin_unlock(&register_lock);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_unregister);
+
+struct dm_cache_policy *dm_cache_policy_create(const char *name,
+ dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ struct dm_cache_policy *p = NULL;
+ struct dm_cache_policy_type *type;
+
+ type = get_policy(name);
+ if (!type) {
+ DMWARN("unknown policy type");
+ return NULL;
+ }
+
+ p = type->create(cache_size, origin_size, cache_block_size);
+ if (!p) {
+ put_policy(type);
+ return NULL;
+ }
+ p->private = type;
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_create);
+
+void dm_cache_policy_destroy(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ p->destroy(p);
+ put_policy(t);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_destroy);
+
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return t->name;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return t->hint_size;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_hint_size);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
new file mode 100644
index 000000000000..f0f51b260544
--- /dev/null
+++ b/drivers/md/dm-cache-policy.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_H
+#define DM_CACHE_POLICY_H
+
+#include "dm-cache-block-types.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+/* FIXME: make it clear which methods are optional. Get debug policy to
+ * double check this at start.
+ */
+
+/*
+ * The cache policy makes the important decisions about which blocks get to
+ * live on the faster cache device.
+ *
+ * When the core target has to remap a bio it calls the 'map' method of the
+ * policy. This returns an instruction telling the core target what to do.
+ *
+ * POLICY_HIT:
+ * That block is in the cache. Remap to the cache and carry on.
+ *
+ * POLICY_MISS:
+ * This block is on the origin device. Remap and carry on.
+ *
+ * POLICY_NEW:
+ * This block is currently on the origin device, but the policy wants to
+ * move it. The core should:
+ *
+ * - hold any further io to this origin block
+ * - copy the origin to the given cache block
+ * - release all the held blocks
+ * - remap the original block to the cache
+ *
+ * POLICY_REPLACE:
+ * This block is currently on the origin device. The policy wants to
+ * move it to the cache, with the added complication that the destination
+ * cache block needs a writeback first. The core should:
+ *
+ * - hold any further io to this origin block
+ * - hold any further io to the origin block that's being written back
+ * - writeback
+ * - copy new block to cache
+ * - release held blocks
+ * - remap bio to cache and reissue.
+ *
+ * Should the core run into trouble while processing a POLICY_NEW or
+ * POLICY_REPLACE instruction it will roll back the policies mapping using
+ * remove_mapping() or force_mapping(). These methods must not fail. This
+ * approach avoids having transactional semantics in the policy (ie, the
+ * core informing the policy when a migration is complete), and hence makes
+ * it easier to write new policies.
+ *
+ * In general policy methods should never block, except in the case of the
+ * map function when can_migrate is set. So be careful to implement using
+ * bounded, preallocated memory.
+ */
+enum policy_operation {
+ POLICY_HIT,
+ POLICY_MISS,
+ POLICY_NEW,
+ POLICY_REPLACE
+};
+
+/*
+ * This is the instruction passed back to the core target.
+ */
+struct policy_result {
+ enum policy_operation op;
+ dm_oblock_t old_oblock; /* POLICY_REPLACE */
+ dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
+};
+
+typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
+ dm_oblock_t oblock, uint32_t hint);
+
+/*
+ * The cache policy object. Just a bunch of methods. It is envisaged that
+ * this structure will be embedded in a bigger, policy specific structure
+ * (ie. use container_of()).
+ */
+struct dm_cache_policy {
+
+ /*
+ * FIXME: make it clear which methods are optional, and which may
+ * block.
+ */
+
+ /*
+ * Destroys this object.
+ */
+ void (*destroy)(struct dm_cache_policy *p);
+
+ /*
+ * See large comment above.
+ *
+ * oblock - the origin block we're interested in.
+ *
+ * can_block - indicates whether the current thread is allowed to
+ * block. -EWOULDBLOCK returned if it can't and would.
+ *
+ * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
+ * instructions. If denied and the policy would have
+ * returned one of these instructions it should
+ * return -EWOULDBLOCK.
+ *
+ * discarded_oblock - indicates whether the whole origin block is
+ * in a discarded state (FIXME: better to tell the
+ * policy about this sooner, so it can recycle that
+ * cache block if it wants.)
+ * bio - the bio that triggered this call.
+ * result - gets filled in with the instruction.
+ *
+ * May only return 0, or -EWOULDBLOCK (if !can_migrate)
+ */
+ int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result);
+
+ /*
+ * Sometimes we want to see if a block is in the cache, without
+ * triggering any update of stats. (ie. it's not a real hit).
+ *
+ * Must not block.
+ *
+ * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
+ * would be typical).
+ */
+ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
+
+ /*
+ * oblock must be a mapped block. Must not block.
+ */
+ void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+
+ /*
+ * Called when a cache target is first created. Used to load a
+ * mapping from the metadata device into the policy.
+ */
+ int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ dm_cblock_t cblock, uint32_t hint, bool hint_valid);
+
+ int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context);
+
+ /*
+ * Override functions used on the error paths of the core target.
+ * They must succeed.
+ */
+ void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
+ dm_oblock_t new_oblock);
+
+ int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+
+
+ /*
+ * How full is the cache?
+ */
+ dm_cblock_t (*residency)(struct dm_cache_policy *p);
+
+ /*
+ * Because of where we sit in the block layer, we can be asked to
+ * map a lot of little bios that are all in the same block (no
+ * queue merging has occurred). To stop the policy being fooled by
+ * these the core target sends regular tick() calls to the policy.
+ * The policy should only count an entry as hit once per tick.
+ */
+ void (*tick)(struct dm_cache_policy *p);
+
+ /*
+ * Configuration.
+ */
+ int (*emit_config_values)(struct dm_cache_policy *p,
+ char *result, unsigned maxlen);
+ int (*set_config_value)(struct dm_cache_policy *p,
+ const char *key, const char *value);
+
+ /*
+ * Book keeping ptr for the policy register, not for general use.
+ */
+ void *private;
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We maintain a little register of the different policy types.
+ */
+#define CACHE_POLICY_NAME_SIZE 16
+
+struct dm_cache_policy_type {
+ /* For use by the register code only. */
+ struct list_head list;
+
+ /*
+ * Policy writers should fill in these fields. The name field is
+ * what gets passed on the target line to select your policy.
+ */
+ char name[CACHE_POLICY_NAME_SIZE];
+
+ /*
+ * Policies may store a hint for each each cache block.
+ * Currently the size of this hint must be 0 or 4 bytes but we
+ * expect to relax this in future.
+ */
+ size_t hint_size;
+
+ struct module *owner;
+ struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t block_size);
+};
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type);
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_H */
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
new file mode 100644
index 000000000000..0f4e84b15c30
--- /dev/null
+++ b/drivers/md/dm-cache-target.c
@@ -0,0 +1,2584 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-bio-prison.h"
+#include "dm-cache-metadata.h"
+
+#include <linux/dm-io.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache"
+
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
+ "A percentage of time allocated for copying to and/or from cache");
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Glossary:
+ *
+ * oblock: index of an origin block
+ * cblock: index of a cache block
+ * promotion: movement of a block from origin to cache
+ * demotion: movement of a block from cache to origin
+ * migration: movement of a block between the origin and cache device,
+ * either direction
+ */
+
+/*----------------------------------------------------------------*/
+
+static size_t bitset_size_in_bytes(unsigned nr_entries)
+{
+ return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+}
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ return vzalloc(s);
+}
+
+static void clear_bitset(void *bitset, unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ memset(bitset, 0, s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+#define PRISON_CELLS 1024
+#define MIGRATION_POOL_SIZE 128
+#define COMMIT_PERIOD HZ
+#define MIGRATION_COUNT_WINDOW 10
+
+/*
+ * The block size of the device holding cache data must be >= 32KB
+ */
+#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+
+/*
+ * FIXME: the cache is read/write for the time being.
+ */
+enum cache_mode {
+ CM_WRITE, /* metadata may be changed */
+ CM_READ_ONLY, /* metadata may not be changed */
+};
+
+struct cache_features {
+ enum cache_mode mode;
+ bool write_through:1;
+};
+
+struct cache_stats {
+ atomic_t read_hit;
+ atomic_t read_miss;
+ atomic_t write_hit;
+ atomic_t write_miss;
+ atomic_t demotion;
+ atomic_t promotion;
+ atomic_t copies_avoided;
+ atomic_t cache_cell_clash;
+ atomic_t commit_count;
+ atomic_t discard_count;
+};
+
+struct cache {
+ struct dm_target *ti;
+ struct dm_target_callbacks callbacks;
+
+ /*
+ * Metadata is written to this device.
+ */
+ struct dm_dev *metadata_dev;
+
+ /*
+ * The slower of the two data devices. Typically a spindle.
+ */
+ struct dm_dev *origin_dev;
+
+ /*
+ * The faster of the two data devices. Typically an SSD.
+ */
+ struct dm_dev *cache_dev;
+
+ /*
+ * Cache features such as write-through.
+ */
+ struct cache_features features;
+
+ /*
+ * Size of the origin device in _complete_ blocks and native sectors.
+ */
+ dm_oblock_t origin_blocks;
+ sector_t origin_sectors;
+
+ /*
+ * Size of the cache device in blocks.
+ */
+ dm_cblock_t cache_size;
+
+ /*
+ * Fields for converting from sectors to blocks.
+ */
+ uint32_t sectors_per_block;
+ int sectors_per_block_shift;
+
+ struct dm_cache_metadata *cmd;
+
+ spinlock_t lock;
+ struct bio_list deferred_bios;
+ struct bio_list deferred_flush_bios;
+ struct list_head quiesced_migrations;
+ struct list_head completed_migrations;
+ struct list_head need_commit_migrations;
+ sector_t migration_threshold;
+ atomic_t nr_migrations;
+ wait_queue_head_t migration_wait;
+
+ /*
+ * cache_size entries, dirty if set
+ */
+ dm_cblock_t nr_dirty;
+ unsigned long *dirty_bitset;
+
+ /*
+ * origin_blocks entries, discarded if set.
+ */
+ sector_t discard_block_size; /* a power of 2 times sectors per block */
+ dm_dblock_t discard_nr_blocks;
+ unsigned long *discard_bitset;
+
+ struct dm_kcopyd_client *copier;
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ struct delayed_work waker;
+ unsigned long last_commit_jiffies;
+
+ struct dm_bio_prison *prison;
+ struct dm_deferred_set *all_io_ds;
+
+ mempool_t *migration_pool;
+ struct dm_cache_migration *next_migration;
+
+ struct dm_cache_policy *policy;
+ unsigned policy_nr_args;
+
+ bool need_tick_bio:1;
+ bool sized:1;
+ bool quiescing:1;
+ bool commit_requested:1;
+ bool loaded_mappings:1;
+ bool loaded_discards:1;
+
+ struct cache_stats stats;
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+ * save it and regurgitate.
+ */
+ unsigned nr_ctr_args;
+ const char **ctr_args;
+};
+
+struct per_bio_data {
+ bool tick:1;
+ unsigned req_nr:2;
+ struct dm_deferred_entry *all_io_entry;
+};
+
+struct dm_cache_migration {
+ struct list_head list;
+ struct cache *cache;
+
+ unsigned long start_jiffies;
+ dm_oblock_t old_oblock;
+ dm_oblock_t new_oblock;
+ dm_cblock_t cblock;
+
+ bool err:1;
+ bool writeback:1;
+ bool demote:1;
+ bool promote:1;
+
+ struct dm_bio_prison_cell *old_ocell;
+ struct dm_bio_prison_cell *new_ocell;
+};
+
+/*
+ * Processing a bio in the worker thread may require these memory
+ * allocations. We prealloc to avoid deadlocks (the same worker thread
+ * frees them back to the mempool).
+ */
+struct prealloc {
+ struct dm_cache_migration *mg;
+ struct dm_bio_prison_cell *cell1;
+ struct dm_bio_prison_cell *cell2;
+};
+
+static void wake_worker(struct cache *cache)
+{
+ queue_work(cache->wq, &cache->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
+{
+ /* FIXME: change to use a local slab. */
+ return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
+}
+
+static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
+{
+ dm_bio_prison_free_cell(cache->prison, cell);
+}
+
+static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
+{
+ if (!p->mg) {
+ p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ if (!p->mg)
+ return -ENOMEM;
+ }
+
+ if (!p->cell1) {
+ p->cell1 = alloc_prison_cell(cache);
+ if (!p->cell1)
+ return -ENOMEM;
+ }
+
+ if (!p->cell2) {
+ p->cell2 = alloc_prison_cell(cache);
+ if (!p->cell2)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
+{
+ if (p->cell2)
+ free_prison_cell(cache, p->cell2);
+
+ if (p->cell1)
+ free_prison_cell(cache, p->cell1);
+
+ if (p->mg)
+ mempool_free(p->mg, cache->migration_pool);
+}
+
+static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
+{
+ struct dm_cache_migration *mg = p->mg;
+
+ BUG_ON(!mg);
+ p->mg = NULL;
+
+ return mg;
+}
+
+/*
+ * You must have a cell within the prealloc struct to return. If not this
+ * function will BUG() rather than returning NULL.
+ */
+static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
+{
+ struct dm_bio_prison_cell *r = NULL;
+
+ if (p->cell1) {
+ r = p->cell1;
+ p->cell1 = NULL;
+
+ } else if (p->cell2) {
+ r = p->cell2;
+ p->cell2 = NULL;
+ } else
+ BUG();
+
+ return r;
+}
+
+/*
+ * You can't have more than two cells in a prealloc struct. BUG() will be
+ * called if you try and overfill.
+ */
+static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
+{
+ if (!p->cell2)
+ p->cell2 = cell;
+
+ else if (!p->cell1)
+ p->cell1 = cell;
+
+ else
+ BUG();
+}
+
+/*----------------------------------------------------------------*/
+
+static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
+{
+ key->virtual = 0;
+ key->dev = 0;
+ key->block = from_oblock(oblock);
+}
+
+/*
+ * The caller hands in a preallocated cell, and a free function for it.
+ * The cell will be freed if there's an error, or if it wasn't used because
+ * a cell with that key already exists.
+ */
+typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
+
+static int bio_detain(struct cache *cache, dm_oblock_t oblock,
+ struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
+ cell_free_fn free_fn, void *free_context,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_cell_key key;
+
+ build_key(oblock, &key);
+ r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
+ if (r)
+ free_fn(free_context, cell_prealloc);
+
+ return r;
+}
+
+static int get_cell(struct cache *cache,
+ dm_oblock_t oblock,
+ struct prealloc *structs,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_cell_key key;
+ struct dm_bio_prison_cell *cell_prealloc;
+
+ cell_prealloc = prealloc_get_cell(structs);
+
+ build_key(oblock, &key);
+ r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
+ if (r)
+ prealloc_put_cell(structs, cell_prealloc);
+
+ return r;
+}
+
+ /*----------------------------------------------------------------*/
+
+static bool is_dirty(struct cache *cache, dm_cblock_t b)
+{
+ return test_bit(from_cblock(b), cache->dirty_bitset);
+}
+
+static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+ policy_set_dirty(cache->policy, oblock);
+ }
+}
+
+static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ policy_clear_dirty(cache->policy, oblock);
+ cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
+ if (!from_cblock(cache->nr_dirty))
+ dm_table_event(cache->ti->table);
+ }
+}
+
+/*----------------------------------------------------------------*/
+static bool block_size_is_power_of_two(struct cache *cache)
+{
+ return cache->sectors_per_block_shift >= 0;
+}
+
+static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
+{
+ sector_t discard_blocks = cache->discard_block_size;
+ dm_block_t b = from_oblock(oblock);
+
+ if (!block_size_is_power_of_two(cache))
+ (void) sector_div(discard_blocks, cache->sectors_per_block);
+ else
+ discard_blocks >>= cache->sectors_per_block_shift;
+
+ (void) sector_div(b, discard_blocks);
+
+ return to_dblock(b);
+}
+
+static void set_discard(struct cache *cache, dm_dblock_t b)
+{
+ unsigned long flags;
+
+ atomic_inc(&cache->stats.discard_count);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ set_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void clear_discard(struct cache *cache, dm_dblock_t b)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ clear_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_discarded(struct cache *cache, dm_dblock_t b)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = test_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
+ cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static void load_stats(struct cache *cache)
+{
+ struct dm_cache_statistics stats;
+
+ dm_cache_metadata_get_stats(cache->cmd, &stats);
+ atomic_set(&cache->stats.read_hit, stats.read_hits);
+ atomic_set(&cache->stats.read_miss, stats.read_misses);
+ atomic_set(&cache->stats.write_hit, stats.write_hits);
+ atomic_set(&cache->stats.write_miss, stats.write_misses);
+}
+
+static void save_stats(struct cache *cache)
+{
+ struct dm_cache_statistics stats;
+
+ stats.read_hits = atomic_read(&cache->stats.read_hit);
+ stats.read_misses = atomic_read(&cache->stats.read_miss);
+ stats.write_hits = atomic_read(&cache->stats.write_hit);
+ stats.write_misses = atomic_read(&cache->stats.write_miss);
+
+ dm_cache_metadata_set_stats(cache->cmd, &stats);
+}
+
+/*----------------------------------------------------------------
+ * Per bio data
+ *--------------------------------------------------------------*/
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
+{
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ BUG_ON(!pb);
+ return pb;
+}
+
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ pb->tick = false;
+ pb->req_nr = dm_bio_get_target_bio_nr(bio);
+ pb->all_io_entry = NULL;
+
+ return pb;
+}
+
+/*----------------------------------------------------------------
+ * Remapping
+ *--------------------------------------------------------------*/
+static void remap_to_origin(struct cache *cache, struct bio *bio)
+{
+ bio->bi_bdev = cache->origin_dev->bdev;
+}
+
+static void remap_to_cache(struct cache *cache, struct bio *bio,
+ dm_cblock_t cblock)
+{
+ sector_t bi_sector = bio->bi_sector;
+
+ bio->bi_bdev = cache->cache_dev->bdev;
+ if (!block_size_is_power_of_two(cache))
+ bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
+ else
+ bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
+}
+
+static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ if (cache->need_tick_bio &&
+ !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+ pb->tick = true;
+ cache->need_tick_bio = false;
+ }
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock)
+{
+ check_if_tick_bio_needed(cache, bio);
+ remap_to_origin(cache, bio);
+ if (bio_data_dir(bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+}
+
+static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ remap_to_cache(cache, bio, cblock);
+ if (bio_data_dir(bio) == WRITE) {
+ set_dirty(cache, oblock, cblock);
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+ }
+}
+
+static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
+{
+ sector_t block_nr = bio->bi_sector;
+
+ if (!block_size_is_power_of_two(cache))
+ (void) sector_div(block_nr, cache->sectors_per_block);
+ else
+ block_nr >>= cache->sectors_per_block_shift;
+
+ return to_oblock(block_nr);
+}
+
+static int bio_triggers_commit(struct cache *cache, struct bio *bio)
+{
+ return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+}
+
+static void issue(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ if (!bio_triggers_commit(cache, bio)) {
+ generic_make_request(bio);
+ return;
+ }
+
+ /*
+ * Batch together any bios that trigger commits and then issue a
+ * single commit for them in do_worker().
+ */
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->commit_requested = true;
+ bio_list_add(&cache->deferred_flush_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+/*----------------------------------------------------------------
+ * Migration processing
+ *
+ * Migration covers moving data from the origin device to the cache, or
+ * vice versa.
+ *--------------------------------------------------------------*/
+static void free_migration(struct dm_cache_migration *mg)
+{
+ mempool_free(mg, mg->cache->migration_pool);
+}
+
+static void inc_nr_migrations(struct cache *cache)
+{
+ atomic_inc(&cache->nr_migrations);
+}
+
+static void dec_nr_migrations(struct cache *cache)
+{
+ atomic_dec(&cache->nr_migrations);
+
+ /*
+ * Wake the worker in case we're suspending the target.
+ */
+ wake_up(&cache->migration_wait);
+}
+
+static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+ bool holder)
+{
+ (holder ? dm_cell_release : dm_cell_release_no_holder)
+ (cache->prison, cell, &cache->deferred_bios);
+ free_prison_cell(cache, cell);
+}
+
+static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+ bool holder)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ __cell_defer(cache, cell, holder);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void cleanup_migration(struct dm_cache_migration *mg)
+{
+ dec_nr_migrations(mg->cache);
+ free_migration(mg);
+}
+
+static void migration_failure(struct dm_cache_migration *mg)
+{
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ DMWARN_LIMIT("writeback failed; couldn't copy block");
+ set_dirty(cache, mg->old_oblock, mg->cblock);
+ cell_defer(cache, mg->old_ocell, false);
+
+ } else if (mg->demote) {
+ DMWARN_LIMIT("demotion failed; couldn't copy block");
+ policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
+
+ cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ if (mg->promote)
+ cell_defer(cache, mg->new_ocell, 1);
+ } else {
+ DMWARN_LIMIT("promotion failed; couldn't copy block");
+ policy_remove_mapping(cache->policy, mg->new_oblock);
+ cell_defer(cache, mg->new_ocell, 1);
+ }
+
+ cleanup_migration(mg);
+}
+
+static void migration_success_pre_commit(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ cell_defer(cache, mg->old_ocell, false);
+ clear_dirty(cache, mg->old_oblock, mg->cblock);
+ cleanup_migration(mg);
+ return;
+
+ } else if (mg->demote) {
+ if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
+ DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
+ policy_force_mapping(cache->policy, mg->new_oblock,
+ mg->old_oblock);
+ if (mg->promote)
+ cell_defer(cache, mg->new_ocell, true);
+ cleanup_migration(mg);
+ return;
+ }
+ } else {
+ if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
+ DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+ policy_remove_mapping(cache->policy, mg->new_oblock);
+ cleanup_migration(mg);
+ return;
+ }
+ }
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->need_commit_migrations);
+ cache->commit_requested = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void migration_success_post_commit(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ DMWARN("writeback unexpectedly triggered commit");
+ return;
+
+ } else if (mg->demote) {
+ cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+
+ if (mg->promote) {
+ mg->demote = false;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->quiesced_migrations);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ } else
+ cleanup_migration(mg);
+
+ } else {
+ cell_defer(cache, mg->new_ocell, true);
+ clear_dirty(cache, mg->new_oblock, mg->cblock);
+ cleanup_migration(mg);
+ }
+}
+
+static void copy_complete(int read_err, unsigned long write_err, void *context)
+{
+ unsigned long flags;
+ struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
+ struct cache *cache = mg->cache;
+
+ if (read_err || write_err)
+ mg->err = true;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->completed_migrations);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void issue_copy_real(struct dm_cache_migration *mg)
+{
+ int r;
+ struct dm_io_region o_region, c_region;
+ struct cache *cache = mg->cache;
+
+ o_region.bdev = cache->origin_dev->bdev;
+ o_region.count = cache->sectors_per_block;
+
+ c_region.bdev = cache->cache_dev->bdev;
+ c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.count = cache->sectors_per_block;
+
+ if (mg->writeback || mg->demote) {
+ /* demote */
+ o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
+ r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
+ } else {
+ /* promote */
+ o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
+ r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
+ }
+
+ if (r < 0)
+ migration_failure(mg);
+}
+
+static void avoid_copy(struct dm_cache_migration *mg)
+{
+ atomic_inc(&mg->cache->stats.copies_avoided);
+ migration_success_pre_commit(mg);
+}
+
+static void issue_copy(struct dm_cache_migration *mg)
+{
+ bool avoid;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback || mg->demote)
+ avoid = !is_dirty(cache, mg->cblock) ||
+ is_discarded_oblock(cache, mg->old_oblock);
+ else
+ avoid = is_discarded_oblock(cache, mg->new_oblock);
+
+ avoid ? avoid_copy(mg) : issue_copy_real(mg);
+}
+
+static void complete_migration(struct dm_cache_migration *mg)
+{
+ if (mg->err)
+ migration_failure(mg);
+ else
+ migration_success_pre_commit(mg);
+}
+
+static void process_migrations(struct cache *cache, struct list_head *head,
+ void (*fn)(struct dm_cache_migration *))
+{
+ unsigned long flags;
+ struct list_head list;
+ struct dm_cache_migration *mg, *tmp;
+
+ INIT_LIST_HEAD(&list);
+ spin_lock_irqsave(&cache->lock, flags);
+ list_splice_init(head, &list);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ list_for_each_entry_safe(mg, tmp, &list, list)
+ fn(mg);
+}
+
+static void __queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+ list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
+}
+
+static void queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ __queue_quiesced_migration(mg);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
+{
+ unsigned long flags;
+ struct dm_cache_migration *mg, *tmp;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_for_each_entry_safe(mg, tmp, work, list)
+ __queue_quiesced_migration(mg);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void check_for_quiesced_migrations(struct cache *cache,
+ struct per_bio_data *pb)
+{
+ struct list_head work;
+
+ if (!pb->all_io_entry)
+ return;
+
+ INIT_LIST_HEAD(&work);
+ if (pb->all_io_entry)
+ dm_deferred_entry_dec(pb->all_io_entry, &work);
+
+ if (!list_empty(&work))
+ queue_quiesced_migrations(cache, &work);
+}
+
+static void quiesce_migration(struct dm_cache_migration *mg)
+{
+ if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
+ queue_quiesced_migration(mg);
+}
+
+static void promote(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = false;
+ mg->promote = true;
+ mg->cache = cache;
+ mg->new_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = NULL;
+ mg->new_ocell = cell;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+static void writeback(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = true;
+ mg->demote = false;
+ mg->promote = false;
+ mg->cache = cache;
+ mg->old_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = cell;
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+static void demote_then_promote(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t old_oblock, dm_oblock_t new_oblock,
+ dm_cblock_t cblock,
+ struct dm_bio_prison_cell *old_ocell,
+ struct dm_bio_prison_cell *new_ocell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = true;
+ mg->promote = true;
+ mg->cache = cache;
+ mg->old_oblock = old_oblock;
+ mg->new_oblock = new_oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = old_ocell;
+ mg->new_ocell = new_ocell;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+/*----------------------------------------------------------------
+ * bio processing
+ *--------------------------------------------------------------*/
+static void defer_bio(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_add(&cache->deferred_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void process_flush_bio(struct cache *cache, struct bio *bio)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ BUG_ON(bio->bi_size);
+ if (!pb->req_nr)
+ remap_to_origin(cache, bio);
+ else
+ remap_to_cache(cache, bio, 0);
+
+ issue(cache, bio);
+}
+
+/*
+ * People generally discard large parts of a device, eg, the whole device
+ * when formatting. Splitting these large discards up into cache block
+ * sized ios and then quiescing (always neccessary for discard) takes too
+ * long.
+ *
+ * We keep it simple, and allow any size of discard to come in, and just
+ * mark off blocks on the discard bitset. No passdown occurs!
+ *
+ * To implement passdown we need to change the bio_prison such that a cell
+ * can have a key that spans many blocks.
+ */
+static void process_discard_bio(struct cache *cache, struct bio *bio)
+{
+ dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+ cache->discard_block_size);
+ dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+ dm_block_t b;
+
+ (void) sector_div(end_block, cache->discard_block_size);
+
+ for (b = start_block; b < end_block; b++)
+ set_discard(cache, to_dblock(b));
+
+ bio_endio(bio, 0);
+}
+
+static bool spare_migration_bandwidth(struct cache *cache)
+{
+ sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+ cache->sectors_per_block;
+ return current_volume < cache->migration_threshold;
+}
+
+static bool is_writethrough_io(struct cache *cache, struct bio *bio,
+ dm_cblock_t cblock)
+{
+ return bio_data_dir(bio) == WRITE &&
+ cache->features.write_through && !is_dirty(cache, cblock);
+}
+
+static void inc_hit_counter(struct cache *cache, struct bio *bio)
+{
+ atomic_inc(bio_data_dir(bio) == READ ?
+ &cache->stats.read_hit : &cache->stats.write_hit);
+}
+
+static void inc_miss_counter(struct cache *cache, struct bio *bio)
+{
+ atomic_inc(bio_data_dir(bio) == READ ?
+ &cache->stats.read_miss : &cache->stats.write_miss);
+}
+
+static void process_bio(struct cache *cache, struct prealloc *structs,
+ struct bio *bio)
+{
+ int r;
+ bool release_cell = true;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
+ struct policy_result lookup_result;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+ bool discarded_block = is_discarded_oblock(cache, block);
+ bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+
+ /*
+ * Check to see if that block is currently migrating.
+ */
+ cell_prealloc = prealloc_get_cell(structs);
+ r = bio_detain(cache, block, bio, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ structs, &new_ocell);
+ if (r > 0)
+ return;
+
+ r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
+ bio, &lookup_result);
+
+ if (r == -EWOULDBLOCK)
+ /* migration has been denied */
+ lookup_result.op = POLICY_MISS;
+
+ switch (lookup_result.op) {
+ case POLICY_HIT:
+ inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+ /*
+ * No need to mark anything dirty in write through mode.
+ */
+ pb->req_nr == 0 ?
+ remap_to_cache(cache, bio, lookup_result.cblock) :
+ remap_to_origin_clear_discard(cache, bio, block);
+ } else
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+
+ issue(cache, bio);
+ break;
+
+ case POLICY_MISS:
+ inc_miss_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (pb->req_nr != 0) {
+ /*
+ * This is a duplicate writethrough io that is no
+ * longer needed because the block has been demoted.
+ */
+ bio_endio(bio, 0);
+ } else {
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
+ }
+ break;
+
+ case POLICY_NEW:
+ atomic_inc(&cache->stats.promotion);
+ promote(cache, structs, block, lookup_result.cblock, new_ocell);
+ release_cell = false;
+ break;
+
+ case POLICY_REPLACE:
+ cell_prealloc = prealloc_get_cell(structs);
+ r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ structs, &old_ocell);
+ if (r > 0) {
+ /*
+ * We have to be careful to avoid lock inversion of
+ * the cells. So we back off, and wait for the
+ * old_ocell to become free.
+ */
+ policy_force_mapping(cache->policy, block,
+ lookup_result.old_oblock);
+ atomic_inc(&cache->stats.cache_cell_clash);
+ break;
+ }
+ atomic_inc(&cache->stats.demotion);
+ atomic_inc(&cache->stats.promotion);
+
+ demote_then_promote(cache, structs, lookup_result.old_oblock,
+ block, lookup_result.cblock,
+ old_ocell, new_ocell);
+ release_cell = false;
+ break;
+
+ default:
+ DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
+ (unsigned) lookup_result.op);
+ bio_io_error(bio);
+ }
+
+ if (release_cell)
+ cell_defer(cache, new_ocell, false);
+}
+
+static int need_commit_due_to_time(struct cache *cache)
+{
+ return jiffies < cache->last_commit_jiffies ||
+ jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
+}
+
+static int commit_if_needed(struct cache *cache)
+{
+ if (dm_cache_changed_this_transaction(cache->cmd) &&
+ (cache->commit_requested || need_commit_due_to_time(cache))) {
+ atomic_inc(&cache->stats.commit_count);
+ cache->last_commit_jiffies = jiffies;
+ cache->commit_requested = false;
+ return dm_cache_commit(cache->cmd, false);
+ }
+
+ return 0;
+}
+
+static void process_deferred_bios(struct cache *cache)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+ struct prealloc structs;
+
+ memset(&structs, 0, sizeof(structs));
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_bios);
+ bio_list_init(&cache->deferred_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while (!bio_list_empty(&bios)) {
+ /*
+ * If we've got no free migration structs, and processing
+ * this bio might require one, we pause until there are some
+ * prepared mappings to process.
+ */
+ if (prealloc_data_structs(cache, &structs)) {
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&cache->deferred_bios, &bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+ break;
+ }
+
+ bio = bio_list_pop(&bios);
+
+ if (bio->bi_rw & REQ_FLUSH)
+ process_flush_bio(cache, bio);
+ else if (bio->bi_rw & REQ_DISCARD)
+ process_discard_bio(cache, bio);
+ else
+ process_bio(cache, &structs, bio);
+ }
+
+ prealloc_free_structs(cache, &structs);
+}
+
+static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_flush_bios);
+ bio_list_init(&cache->deferred_flush_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ submit_bios ? generic_make_request(bio) : bio_io_error(bio);
+}
+
+static void writeback_some_dirty_blocks(struct cache *cache)
+{
+ int r = 0;
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
+ struct prealloc structs;
+ struct dm_bio_prison_cell *old_ocell;
+
+ memset(&structs, 0, sizeof(structs));
+
+ while (spare_migration_bandwidth(cache)) {
+ if (prealloc_data_structs(cache, &structs))
+ break;
+
+ r = policy_writeback_work(cache->policy, &oblock, &cblock);
+ if (r)
+ break;
+
+ r = get_cell(cache, oblock, &structs, &old_ocell);
+ if (r) {
+ policy_set_dirty(cache->policy, oblock);
+ break;
+ }
+
+ writeback(cache, &structs, oblock, cblock, old_ocell);
+ }
+
+ prealloc_free_structs(cache, &structs);
+}
+
+/*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
+static void start_quiescing(struct cache *cache)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->quiescing = 1;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void stop_quiescing(struct cache *cache)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->quiescing = 0;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_quiescing(struct cache *cache)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = cache->quiescing;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+static void wait_for_migrations(struct cache *cache)
+{
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+}
+
+static void stop_worker(struct cache *cache)
+{
+ cancel_delayed_work(&cache->waker);
+ flush_workqueue(cache->wq);
+}
+
+static void requeue_deferred_io(struct cache *cache)
+{
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+ bio_list_merge(&bios, &cache->deferred_bios);
+ bio_list_init(&cache->deferred_bios);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+}
+
+static int more_work(struct cache *cache)
+{
+ if (is_quiescing(cache))
+ return !list_empty(&cache->quiesced_migrations) ||
+ !list_empty(&cache->completed_migrations) ||
+ !list_empty(&cache->need_commit_migrations);
+ else
+ return !bio_list_empty(&cache->deferred_bios) ||
+ !bio_list_empty(&cache->deferred_flush_bios) ||
+ !list_empty(&cache->quiesced_migrations) ||
+ !list_empty(&cache->completed_migrations) ||
+ !list_empty(&cache->need_commit_migrations);
+}
+
+static void do_worker(struct work_struct *ws)
+{
+ struct cache *cache = container_of(ws, struct cache, worker);
+
+ do {
+ if (!is_quiescing(cache))
+ process_deferred_bios(cache);
+
+ process_migrations(cache, &cache->quiesced_migrations, issue_copy);
+ process_migrations(cache, &cache->completed_migrations, complete_migration);
+
+ writeback_some_dirty_blocks(cache);
+
+ if (commit_if_needed(cache)) {
+ process_deferred_flush_bios(cache, false);
+
+ /*
+ * FIXME: rollback metadata or just go into a
+ * failure mode and error everything
+ */
+ } else {
+ process_deferred_flush_bios(cache, true);
+ process_migrations(cache, &cache->need_commit_migrations,
+ migration_success_post_commit);
+ }
+ } while (more_work(cache));
+}
+
+/*
+ * We want to commit periodically so that not too much
+ * unwritten metadata builds up.
+ */
+static void do_waker(struct work_struct *ws)
+{
+ struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+ wake_worker(cache);
+ queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
+}
+
+/*----------------------------------------------------------------*/
+
+static int is_congested(struct dm_dev *dev, int bdi_bits)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ return bdi_congested(&q->backing_dev_info, bdi_bits);
+}
+
+static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
+{
+ struct cache *cache = container_of(cb, struct cache, callbacks);
+
+ return is_congested(cache->origin_dev, bdi_bits) ||
+ is_congested(cache->cache_dev, bdi_bits);
+}
+
+/*----------------------------------------------------------------
+ * Target methods
+ *--------------------------------------------------------------*/
+
+/*
+ * This function gets called on the error paths of the constructor, so we
+ * have to cope with a partially initialised struct.
+ */
+static void destroy(struct cache *cache)
+{
+ unsigned i;
+
+ if (cache->next_migration)
+ mempool_free(cache->next_migration, cache->migration_pool);
+
+ if (cache->migration_pool)
+ mempool_destroy(cache->migration_pool);
+
+ if (cache->all_io_ds)
+ dm_deferred_set_destroy(cache->all_io_ds);
+
+ if (cache->prison)
+ dm_bio_prison_destroy(cache->prison);
+
+ if (cache->wq)
+ destroy_workqueue(cache->wq);
+
+ if (cache->dirty_bitset)
+ free_bitset(cache->dirty_bitset);
+
+ if (cache->discard_bitset)
+ free_bitset(cache->discard_bitset);
+
+ if (cache->copier)
+ dm_kcopyd_client_destroy(cache->copier);
+
+ if (cache->cmd)
+ dm_cache_metadata_close(cache->cmd);
+
+ if (cache->metadata_dev)
+ dm_put_device(cache->ti, cache->metadata_dev);
+
+ if (cache->origin_dev)
+ dm_put_device(cache->ti, cache->origin_dev);
+
+ if (cache->cache_dev)
+ dm_put_device(cache->ti, cache->cache_dev);
+
+ if (cache->policy)
+ dm_cache_policy_destroy(cache->policy);
+
+ for (i = 0; i < cache->nr_ctr_args ; i++)
+ kfree(cache->ctr_args[i]);
+ kfree(cache->ctr_args);
+
+ kfree(cache);
+}
+
+static void cache_dtr(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ destroy(cache);
+}
+
+static sector_t get_dev_size(struct dm_dev *dev)
+{
+ return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Construct a cache device mapping.
+ *
+ * cache <metadata dev> <cache dev> <origin dev> <block size>
+ * <#feature args> [<feature arg>]*
+ * <policy> <#policy args> [<policy arg>]*
+ *
+ * metadata dev : fast device holding the persistent metadata
+ * cache dev : fast device holding cached data blocks
+ * origin dev : slow device holding original data blocks
+ * block size : cache unit size in sectors
+ *
+ * #feature args : number of feature arguments passed
+ * feature args : writethrough. (The default is writeback.)
+ *
+ * policy : the replacement policy to use
+ * #policy args : an even number of policy arguments corresponding
+ * to key/value pairs passed to the policy
+ * policy args : key/value pairs passed to the policy
+ * E.g. 'sequential_threshold 1024'
+ * See cache-policies.txt for details.
+ *
+ * Optional feature arguments are:
+ * writethrough : write through caching that prohibits cache block
+ * content from being different from origin block content.
+ * Without this argument, the default behaviour is to write
+ * back cache block contents later for performance reasons,
+ * so they may differ from the corresponding origin blocks.
+ */
+struct cache_args {
+ struct dm_target *ti;
+
+ struct dm_dev *metadata_dev;
+
+ struct dm_dev *cache_dev;
+ sector_t cache_sectors;
+
+ struct dm_dev *origin_dev;
+ sector_t origin_sectors;
+
+ uint32_t block_size;
+
+ const char *policy_name;
+ int policy_argc;
+ const char **policy_argv;
+
+ struct cache_features features;
+};
+
+static void destroy_cache_args(struct cache_args *ca)
+{
+ if (ca->metadata_dev)
+ dm_put_device(ca->ti, ca->metadata_dev);
+
+ if (ca->cache_dev)
+ dm_put_device(ca->ti, ca->cache_dev);
+
+ if (ca->origin_dev)
+ dm_put_device(ca->ti, ca->origin_dev);
+
+ kfree(ca);
+}
+
+static bool at_least_one_arg(struct dm_arg_set *as, char **error)
+{
+ if (!as->argc) {
+ *error = "Insufficient args";
+ return false;
+ }
+
+ return true;
+}
+
+static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+ sector_t metadata_dev_size;
+ char b[BDEVNAME_SIZE];
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->metadata_dev);
+ if (r) {
+ *error = "Error opening metadata device";
+ return r;
+ }
+
+ metadata_dev_size = get_dev_size(ca->metadata_dev);
+ if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
+ DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+ bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+
+ return 0;
+}
+
+static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->cache_dev);
+ if (r) {
+ *error = "Error opening cache device";
+ return r;
+ }
+ ca->cache_sectors = get_dev_size(ca->cache_dev);
+
+ return 0;
+}
+
+static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->origin_dev);
+ if (r) {
+ *error = "Error opening origin device";
+ return r;
+ }
+
+ ca->origin_sectors = get_dev_size(ca->origin_dev);
+ if (ca->ti->len > ca->origin_sectors) {
+ *error = "Device size larger than cached device";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ unsigned long tmp;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
+ tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+ *error = "Invalid data block size";
+ return -EINVAL;
+ }
+
+ if (tmp > ca->cache_sectors) {
+ *error = "Data block size is larger than the cache device";
+ return -EINVAL;
+ }
+
+ ca->block_size = tmp;
+
+ return 0;
+}
+
+static void init_features(struct cache_features *cf)
+{
+ cf->mode = CM_WRITE;
+ cf->write_through = false;
+}
+
+static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of cache feature arguments"},
+ };
+
+ int r;
+ unsigned argc;
+ const char *arg;
+ struct cache_features *cf = &ca->features;
+
+ init_features(cf);
+
+ r = dm_read_arg_group(_args, as, &argc, error);
+ if (r)
+ return -EINVAL;
+
+ while (argc--) {
+ arg = dm_shift_arg(as);
+
+ if (!strcasecmp(arg, "writeback"))
+ cf->write_through = false;
+
+ else if (!strcasecmp(arg, "writethrough"))
+ cf->write_through = true;
+
+ else {
+ *error = "Unrecognised cache feature requested";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ static struct dm_arg _args[] = {
+ {0, 1024, "Invalid number of policy arguments"},
+ };
+
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ ca->policy_name = dm_shift_arg(as);
+
+ r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
+ if (r)
+ return -EINVAL;
+
+ ca->policy_argv = (const char **)as->argv;
+ dm_consume_args(as, ca->policy_argc);
+
+ return 0;
+}
+
+static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
+ char **error)
+{
+ int r;
+ struct dm_arg_set as;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ r = parse_metadata_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_cache_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_origin_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_block_size(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_features(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_policy(ca, &as, error);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct kmem_cache *migration_cache;
+
+static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+{
+ int r = 0;
+
+ if (argc & 1) {
+ DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
+ return -EINVAL;
+ }
+
+ while (argc) {
+ r = policy_set_config_value(p, argv[0], argv[1]);
+ if (r) {
+ DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
+ argv[0], argv[1]);
+ return r;
+ }
+
+ argc -= 2;
+ argv += 2;
+ }
+
+ return r;
+}
+
+static int create_cache_policy(struct cache *cache, struct cache_args *ca,
+ char **error)
+{
+ int r;
+
+ cache->policy = dm_cache_policy_create(ca->policy_name,
+ cache->cache_size,
+ cache->origin_sectors,
+ cache->sectors_per_block);
+ if (!cache->policy) {
+ *error = "Error creating cache's policy";
+ return -ENOMEM;
+ }
+
+ r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
+ if (r)
+ dm_cache_policy_destroy(cache->policy);
+
+ return r;
+}
+
+/*
+ * We want the discard block size to be a power of two, at least the size
+ * of the cache block size, and have no more than 2^14 discard blocks
+ * across the origin.
+ */
+#define MAX_DISCARD_BLOCKS (1 << 14)
+
+static bool too_many_discard_blocks(sector_t discard_block_size,
+ sector_t origin_size)
+{
+ (void) sector_div(origin_size, discard_block_size);
+
+ return origin_size > MAX_DISCARD_BLOCKS;
+}
+
+static sector_t calculate_discard_block_size(sector_t cache_block_size,
+ sector_t origin_size)
+{
+ sector_t discard_block_size;
+
+ discard_block_size = roundup_pow_of_two(cache_block_size);
+
+ if (origin_size)
+ while (too_many_discard_blocks(discard_block_size, origin_size))
+ discard_block_size *= 2;
+
+ return discard_block_size;
+}
+
+#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
+
+static int cache_create(struct cache_args *ca, struct cache **result)
+{
+ int r = 0;
+ char **error = &ca->ti->error;
+ struct cache *cache;
+ struct dm_target *ti = ca->ti;
+ dm_block_t origin_blocks;
+ struct dm_cache_metadata *cmd;
+ bool may_format = ca->features.mode == CM_WRITE;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return -ENOMEM;
+
+ cache->ti = ca->ti;
+ ti->private = cache;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->num_flush_bios = 2;
+ ti->flush_supported = true;
+
+ ti->num_discard_bios = 1;
+ ti->discards_supported = true;
+ ti->discard_zeroes_data_unsupported = true;
+
+ memcpy(&cache->features, &ca->features, sizeof(cache->features));
+
+ if (cache->features.write_through)
+ ti->num_write_bios = cache_num_write_bios;
+
+ cache->callbacks.congested_fn = cache_is_congested;
+ dm_table_add_target_callbacks(ti->table, &cache->callbacks);
+
+ cache->metadata_dev = ca->metadata_dev;
+ cache->origin_dev = ca->origin_dev;
+ cache->cache_dev = ca->cache_dev;
+
+ ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
+
+ /* FIXME: factor out this whole section */
+ origin_blocks = cache->origin_sectors = ca->origin_sectors;
+ (void) sector_div(origin_blocks, ca->block_size);
+ cache->origin_blocks = to_oblock(origin_blocks);
+
+ cache->sectors_per_block = ca->block_size;
+ if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (ca->block_size & (ca->block_size - 1)) {
+ dm_block_t cache_size = ca->cache_sectors;
+
+ cache->sectors_per_block_shift = -1;
+ (void) sector_div(cache_size, ca->block_size);
+ cache->cache_size = to_cblock(cache_size);
+ } else {
+ cache->sectors_per_block_shift = __ffs(ca->block_size);
+ cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
+ }
+
+ r = create_cache_policy(cache, ca, error);
+ if (r)
+ goto bad;
+ cache->policy_nr_args = ca->policy_argc;
+
+ cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
+ ca->block_size, may_format,
+ dm_cache_policy_get_hint_size(cache->policy));
+ if (IS_ERR(cmd)) {
+ *error = "Error creating metadata object";
+ r = PTR_ERR(cmd);
+ goto bad;
+ }
+ cache->cmd = cmd;
+
+ spin_lock_init(&cache->lock);
+ bio_list_init(&cache->deferred_bios);
+ bio_list_init(&cache->deferred_flush_bios);
+ INIT_LIST_HEAD(&cache->quiesced_migrations);
+ INIT_LIST_HEAD(&cache->completed_migrations);
+ INIT_LIST_HEAD(&cache->need_commit_migrations);
+ cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+ atomic_set(&cache->nr_migrations, 0);
+ init_waitqueue_head(&cache->migration_wait);
+
+ cache->nr_dirty = 0;
+ cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
+ if (!cache->dirty_bitset) {
+ *error = "could not allocate dirty bitset";
+ goto bad;
+ }
+ clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
+
+ cache->discard_block_size =
+ calculate_discard_block_size(cache->sectors_per_block,
+ cache->origin_sectors);
+ cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
+ cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
+ if (!cache->discard_bitset) {
+ *error = "could not allocate discard bitset";
+ goto bad;
+ }
+ clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+
+ cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
+ if (IS_ERR(cache->copier)) {
+ *error = "could not create kcopyd client";
+ r = PTR_ERR(cache->copier);
+ goto bad;
+ }
+
+ cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!cache->wq) {
+ *error = "could not create workqueue for metadata object";
+ goto bad;
+ }
+ INIT_WORK(&cache->worker, do_worker);
+ INIT_DELAYED_WORK(&cache->waker, do_waker);
+ cache->last_commit_jiffies = jiffies;
+
+ cache->prison = dm_bio_prison_create(PRISON_CELLS);
+ if (!cache->prison) {
+ *error = "could not create bio prison";
+ goto bad;
+ }
+
+ cache->all_io_ds = dm_deferred_set_create();
+ if (!cache->all_io_ds) {
+ *error = "could not create all_io deferred set";
+ goto bad;
+ }
+
+ cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
+ migration_cache);
+ if (!cache->migration_pool) {
+ *error = "Error creating cache's migration mempool";
+ goto bad;
+ }
+
+ cache->next_migration = NULL;
+
+ cache->need_tick_bio = true;
+ cache->sized = false;
+ cache->quiescing = false;
+ cache->commit_requested = false;
+ cache->loaded_mappings = false;
+ cache->loaded_discards = false;
+
+ load_stats(cache);
+
+ atomic_set(&cache->stats.demotion, 0);
+ atomic_set(&cache->stats.promotion, 0);
+ atomic_set(&cache->stats.copies_avoided, 0);
+ atomic_set(&cache->stats.cache_cell_clash, 0);
+ atomic_set(&cache->stats.commit_count, 0);
+ atomic_set(&cache->stats.discard_count, 0);
+
+ *result = cache;
+ return 0;
+
+bad:
+ destroy(cache);
+ return r;
+}
+
+static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+{
+ unsigned i;
+ const char **copy;
+
+ copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ for (i = 0; i < argc; i++) {
+ copy[i] = kstrdup(argv[i], GFP_KERNEL);
+ if (!copy[i]) {
+ while (i--)
+ kfree(copy[i]);
+ kfree(copy);
+ return -ENOMEM;
+ }
+ }
+
+ cache->nr_ctr_args = argc;
+ cache->ctr_args = copy;
+
+ return 0;
+}
+
+static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r = -EINVAL;
+ struct cache_args *ca;
+ struct cache *cache = NULL;
+
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca) {
+ ti->error = "Error allocating memory for cache";
+ return -ENOMEM;
+ }
+ ca->ti = ti;
+
+ r = parse_cache_args(ca, argc, argv, &ti->error);
+ if (r)
+ goto out;
+
+ r = cache_create(ca, &cache);
+
+ r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
+ if (r) {
+ destroy(cache);
+ goto out;
+ }
+
+ ti->private = cache;
+
+out:
+ destroy_cache_args(ca);
+ return r;
+}
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
+{
+ int r;
+ struct cache *cache = ti->private;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ dm_cblock_t cblock;
+
+ r = policy_lookup(cache->policy, block, &cblock);
+ if (r < 0)
+ return 2; /* assume the worst */
+
+ return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
+}
+
+static int cache_map(struct dm_target *ti, struct bio *bio)
+{
+ struct cache *cache = ti->private;
+
+ int r;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ bool can_migrate = false;
+ bool discarded_block;
+ struct dm_bio_prison_cell *cell;
+ struct policy_result lookup_result;
+ struct per_bio_data *pb;
+
+ if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ /*
+ * This can only occur if the io goes to a partial block at
+ * the end of the origin device. We don't cache these.
+ * Just remap to the origin and carry on.
+ */
+ remap_to_origin_clear_discard(cache, bio, block);
+ return DM_MAPIO_REMAPPED;
+ }
+
+ pb = init_per_bio_data(bio);
+
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /*
+ * Check to see if that block is currently migrating.
+ */
+ cell = alloc_prison_cell(cache);
+ if (!cell) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ r = bio_detain(cache, block, bio, cell,
+ (cell_free_fn) free_prison_cell,
+ cache, &cell);
+ if (r) {
+ if (r < 0)
+ defer_bio(cache, bio);
+
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ discarded_block = is_discarded_oblock(cache, block);
+
+ r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
+ bio, &lookup_result);
+ if (r == -EWOULDBLOCK) {
+ cell_defer(cache, cell, true);
+ return DM_MAPIO_SUBMITTED;
+
+ } else if (r) {
+ DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ switch (lookup_result.op) {
+ case POLICY_HIT:
+ inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+ /*
+ * No need to mark anything dirty in write through mode.
+ */
+ pb->req_nr == 0 ?
+ remap_to_cache(cache, bio, lookup_result.cblock) :
+ remap_to_origin_clear_discard(cache, bio, block);
+ cell_defer(cache, cell, false);
+ } else {
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ cell_defer(cache, cell, false);
+ }
+ break;
+
+ case POLICY_MISS:
+ inc_miss_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (pb->req_nr != 0) {
+ /*
+ * This is a duplicate writethrough io that is no
+ * longer needed because the block has been demoted.
+ */
+ bio_endio(bio, 0);
+ cell_defer(cache, cell, false);
+ return DM_MAPIO_SUBMITTED;
+ } else {
+ remap_to_origin_clear_discard(cache, bio, block);
+ cell_defer(cache, cell, false);
+ }
+ break;
+
+ default:
+ DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
+ (unsigned) lookup_result.op);
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
+{
+ struct cache *cache = ti->private;
+ unsigned long flags;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ if (pb->tick) {
+ policy_tick(cache->policy);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->need_tick_bio = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
+ }
+
+ check_for_quiesced_migrations(cache, pb);
+
+ return 0;
+}
+
+static int write_dirty_bitset(struct cache *cache)
+{
+ unsigned i, r;
+
+ for (i = 0; i < from_cblock(cache->cache_size); i++) {
+ r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
+ is_dirty(cache, to_cblock(i)));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int write_discard_bitset(struct cache *cache)
+{
+ unsigned i, r;
+
+ r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
+ cache->discard_nr_blocks);
+ if (r) {
+ DMERR("could not resize on-disk discard bitset");
+ return r;
+ }
+
+ for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
+ r = dm_cache_set_discard(cache->cmd, to_dblock(i),
+ is_discarded(cache, to_dblock(i)));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
+ uint32_t hint)
+{
+ struct cache *cache = context;
+ return dm_cache_save_hint(cache->cmd, cblock, hint);
+}
+
+static int write_hints(struct cache *cache)
+{
+ int r;
+
+ r = dm_cache_begin_hints(cache->cmd, cache->policy);
+ if (r) {
+ DMERR("dm_cache_begin_hints failed");
+ return r;
+ }
+
+ r = policy_walk_mappings(cache->policy, save_hint, cache);
+ if (r)
+ DMERR("policy_walk_mappings failed");
+
+ return r;
+}
+
+/*
+ * returns true on success
+ */
+static bool sync_metadata(struct cache *cache)
+{
+ int r1, r2, r3, r4;
+
+ r1 = write_dirty_bitset(cache);
+ if (r1)
+ DMERR("could not write dirty bitset");
+
+ r2 = write_discard_bitset(cache);
+ if (r2)
+ DMERR("could not write discard bitset");
+
+ save_stats(cache);
+
+ r3 = write_hints(cache);
+ if (r3)
+ DMERR("could not write hints");
+
+ /*
+ * If writing the above metadata failed, we still commit, but don't
+ * set the clean shutdown flag. This will effectively force every
+ * dirty bit to be set on reload.
+ */
+ r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
+ if (r4)
+ DMERR("could not write cache metadata. Data loss may occur.");
+
+ return !r1 && !r2 && !r3 && !r4;
+}
+
+static void cache_postsuspend(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ start_quiescing(cache);
+ wait_for_migrations(cache);
+ stop_worker(cache);
+ requeue_deferred_io(cache);
+ stop_quiescing(cache);
+
+ (void) sync_metadata(cache);
+}
+
+static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
+ bool dirty, uint32_t hint, bool hint_valid)
+{
+ int r;
+ struct cache *cache = context;
+
+ r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
+ if (r)
+ return r;
+
+ if (dirty)
+ set_dirty(cache, oblock, cblock);
+ else
+ clear_dirty(cache, oblock, cblock);
+
+ return 0;
+}
+
+static int load_discard(void *context, sector_t discard_block_size,
+ dm_dblock_t dblock, bool discard)
+{
+ struct cache *cache = context;
+
+ /* FIXME: handle mis-matched block size */
+
+ if (discard)
+ set_discard(cache, dblock);
+ else
+ clear_discard(cache, dblock);
+
+ return 0;
+}
+
+static int cache_preresume(struct dm_target *ti)
+{
+ int r = 0;
+ struct cache *cache = ti->private;
+ sector_t actual_cache_size = get_dev_size(cache->cache_dev);
+ (void) sector_div(actual_cache_size, cache->sectors_per_block);
+
+ /*
+ * Check to see if the cache has resized.
+ */
+ if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
+ cache->cache_size = to_cblock(actual_cache_size);
+
+ r = dm_cache_resize(cache->cmd, cache->cache_size);
+ if (r) {
+ DMERR("could not resize cache metadata");
+ return r;
+ }
+
+ cache->sized = true;
+ }
+
+ if (!cache->loaded_mappings) {
+ r = dm_cache_load_mappings(cache->cmd,
+ dm_cache_policy_get_name(cache->policy),
+ load_mapping, cache);
+ if (r) {
+ DMERR("could not load cache mappings");
+ return r;
+ }
+
+ cache->loaded_mappings = true;
+ }
+
+ if (!cache->loaded_discards) {
+ r = dm_cache_load_discards(cache->cmd, load_discard, cache);
+ if (r) {
+ DMERR("could not load origin discards");
+ return r;
+ }
+
+ cache->loaded_discards = true;
+ }
+
+ return r;
+}
+
+static void cache_resume(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ cache->need_tick_bio = true;
+ do_waker(&cache->waker.work);
+}
+
+/*
+ * Status format:
+ *
+ * <#used metadata blocks>/<#total metadata blocks>
+ * <#read hits> <#read misses> <#write hits> <#write misses>
+ * <#demotions> <#promotions> <#blocks in cache> <#dirty>
+ * <#features> <features>*
+ * <#core args> <core args>
+ * <#policy args> <policy args>*
+ */
+static void cache_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+{
+ int r = 0;
+ unsigned i;
+ ssize_t sz = 0;
+ dm_block_t nr_free_blocks_metadata = 0;
+ dm_block_t nr_blocks_metadata = 0;
+ char buf[BDEVNAME_SIZE];
+ struct cache *cache = ti->private;
+ dm_cblock_t residency;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ /* Commit to ensure statistics aren't out-of-date */
+ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
+ r = dm_cache_commit(cache->cmd, false);
+ if (r)
+ DMERR("could not commit metadata for accurate status");
+ }
+
+ r = dm_cache_get_free_metadata_block_count(cache->cmd,
+ &nr_free_blocks_metadata);
+ if (r) {
+ DMERR("could not get metadata free block count");
+ goto err;
+ }
+
+ r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
+ if (r) {
+ DMERR("could not get metadata device size");
+ goto err;
+ }
+
+ residency = policy_residency(cache->policy);
+
+ DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
+ (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ (unsigned long long)nr_blocks_metadata,
+ (unsigned) atomic_read(&cache->stats.read_hit),
+ (unsigned) atomic_read(&cache->stats.read_miss),
+ (unsigned) atomic_read(&cache->stats.write_hit),
+ (unsigned) atomic_read(&cache->stats.write_miss),
+ (unsigned) atomic_read(&cache->stats.demotion),
+ (unsigned) atomic_read(&cache->stats.promotion),
+ (unsigned long long) from_cblock(residency),
+ cache->nr_dirty);
+
+ if (cache->features.write_through)
+ DMEMIT("1 writethrough ");
+ else
+ DMEMIT("0 ");
+
+ DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
+ if (sz < maxlen) {
+ r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
+ if (r)
+ DMERR("policy_emit_config_values returned %d", r);
+ }
+
+ break;
+
+ case STATUSTYPE_TABLE:
+ format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
+ DMEMIT("%s ", buf);
+ format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
+ DMEMIT("%s ", buf);
+ format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
+ DMEMIT("%s", buf);
+
+ for (i = 0; i < cache->nr_ctr_args - 1; i++)
+ DMEMIT(" %s", cache->ctr_args[i]);
+ if (cache->nr_ctr_args)
+ DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
+ }
+
+ return;
+
+err:
+ DMEMIT("Error");
+}
+
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, char **argv)
+{
+ unsigned long tmp;
+
+ if (!strcasecmp(argv[0], "migration_threshold")) {
+ if (kstrtoul(argv[1], 10, &tmp))
+ return -EINVAL;
+
+ cache->migration_threshold = tmp;
+ return 0;
+ }
+
+ return NOT_CORE_OPTION;
+}
+
+/*
+ * Supports <key> <value>.
+ *
+ * The key migration_threshold is supported by the cache target core.
+ */
+static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r;
+ struct cache *cache = ti->private;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ r = process_config_option(cache, argv);
+ if (r == NOT_CORE_OPTION)
+ return policy_set_config_value(cache->policy, argv[0], argv[1]);
+
+ return r;
+}
+
+static int cache_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ int r = 0;
+ struct cache *cache = ti->private;
+
+ r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
+ if (!r)
+ r = fn(ti, cache->origin_dev, 0, ti->len, data);
+
+ return r;
+}
+
+/*
+ * We assume I/O is going to the origin (which is the volume
+ * more likely to have restrictions e.g. by being striped).
+ * (Looking up the exact location of the data would be expensive
+ * and could always be out of date by the time the bio is submitted.)
+ */
+static int cache_bvec_merge(struct dm_target *ti,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct cache *cache = ti->private;
+ struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = cache->origin_dev->bdev;
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
+{
+ /*
+ * FIXME: these limits may be incompatible with the cache device
+ */
+ limits->max_discard_sectors = cache->discard_block_size * 1024;
+ limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+}
+
+static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct cache *cache = ti->private;
+
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ set_discard_limits(cache, limits);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct target_type cache_target = {
+ .name = "cache",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = cache_ctr,
+ .dtr = cache_dtr,
+ .map = cache_map,
+ .end_io = cache_end_io,
+ .postsuspend = cache_postsuspend,
+ .preresume = cache_preresume,
+ .resume = cache_resume,
+ .status = cache_status,
+ .message = cache_message,
+ .iterate_devices = cache_iterate_devices,
+ .merge = cache_bvec_merge,
+ .io_hints = cache_io_hints,
+};
+
+static int __init dm_cache_init(void)
+{
+ int r;
+
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
+ return r;
+ }
+
+ migration_cache = KMEM_CACHE(dm_cache_migration, 0);
+ if (!migration_cache) {
+ dm_unregister_target(&cache_target);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit dm_cache_exit(void)
+{
+ dm_unregister_target(&cache_target);
+ kmem_cache_destroy(migration_cache);
+}
+
+module_init(dm_cache_init);
+module_exit(dm_cache_exit);
+
+MODULE_DESCRIPTION(DM_NAME " cache target");
+MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f7369f9d8595..13c15480d940 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1234,20 +1234,6 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
return 0;
}
-/*
- * Encode key into its hex representation
- */
-static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
-{
- unsigned int i;
-
- for (i = 0; i < size; i++) {
- sprintf(hex, "%02x", *key);
- hex += 2;
- key++;
- }
-}
-
static void crypt_free_tfms(struct crypt_config *cc)
{
unsigned i;
@@ -1651,7 +1637,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (opt_params == 1 && opt_string &&
!strcasecmp(opt_string, "allow_discards"))
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
else if (opt_params) {
ret = -EINVAL;
ti->error = "Invalid feature arguments";
@@ -1679,7 +1665,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
ti->discard_zeroes_data_unsupported = true;
return 0;
@@ -1717,11 +1703,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
-static int crypt_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void crypt_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct crypt_config *cc = ti->private;
- unsigned int sz = 0;
+ unsigned i, sz = 0;
switch (type) {
case STATUSTYPE_INFO:
@@ -1731,27 +1717,20 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
DMEMIT("%s ", cc->cipher_string);
- if (cc->key_size > 0) {
- if ((maxlen - sz) < ((cc->key_size << 1) + 1))
- return -ENOMEM;
-
- crypt_encode_key(result + sz, cc->key, cc->key_size);
- sz += cc->key_size << 1;
- } else {
- if (sz >= maxlen)
- return -ENOMEM;
- result[sz++] = '-';
- }
+ if (cc->key_size > 0)
+ for (i = 0; i < cc->key_size; i++)
+ DMEMIT("%02x", cc->key[i]);
+ else
+ DMEMIT("-");
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
cc->dev->name, (unsigned long long)cc->start);
- if (ti->num_discard_requests)
+ if (ti->num_discard_bios)
DMEMIT(" 1 allow_discards");
break;
}
- return 0;
}
static void crypt_postsuspend(struct dm_target *ti)
@@ -1845,7 +1824,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 12, 0},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index cc1bd048acb2..496d5f3646a5 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -198,8 +198,8 @@ out:
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->private = dc;
return 0;
@@ -293,8 +293,8 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, dc->read_delay, bio);
}
-static int delay_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void delay_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
@@ -314,8 +314,6 @@ static int delay_status(struct dm_target *ti, status_type_t type,
dc->write_delay);
break;
}
-
- return 0;
}
static int delay_iterate_devices(struct dm_target *ti,
@@ -337,7 +335,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 2, 0},
+ .version = {1, 2, 1},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 9721f2ffb1a2..7fcf21cb4ff8 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -216,8 +216,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
@@ -337,8 +337,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
return error;
}
-static int flakey_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void flakey_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
@@ -368,7 +368,6 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
break;
}
- return 0;
}
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
@@ -411,7 +410,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 3, 0},
+ .version = {1, 3, 1},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 0666b5d14b88..aa04f0224642 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1067,6 +1067,7 @@ static void retrieve_status(struct dm_table *table,
num_targets = dm_table_get_num_targets(table);
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
+ size_t l;
remaining = len - (outptr - outbuf);
if (remaining <= sizeof(struct dm_target_spec)) {
@@ -1093,14 +1094,17 @@ static void retrieve_status(struct dm_table *table,
if (ti->type->status) {
if (param->flags & DM_NOFLUSH_FLAG)
status_flags |= DM_STATUS_NOFLUSH_FLAG;
- if (ti->type->status(ti, type, status_flags, outptr, remaining)) {
- param->flags |= DM_BUFFER_FULL_FLAG;
- break;
- }
+ ti->type->status(ti, type, status_flags, outptr, remaining);
} else
outptr[0] = '\0';
- outptr += strlen(outptr) + 1;
+ l = strlen(outptr) + 1;
+ if (l == remaining) {
+ param->flags |= DM_BUFFER_FULL_FLAG;
+ break;
+ }
+
+ outptr += l;
used = param->data_start + (outptr - outbuf);
outptr = align_ptr(outptr);
@@ -1410,6 +1414,22 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
return 0;
}
+static bool buffer_test_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
+/*
+ * Process device-mapper dependent messages.
+ * Returns a number <= 1 if message was processed by device mapper.
+ * Returns 2 if message should be delivered to the target.
+ */
+static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ return 2;
+}
+
/*
* Pass a message to the target that's at the supplied device offset.
*/
@@ -1421,6 +1441,8 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
struct dm_table *table;
struct dm_target *ti;
struct dm_target_msg *tmsg = (void *) param + param->data_start;
+ size_t maxlen;
+ char *result = get_result_buffer(param, param_size, &maxlen);
md = find_device(param);
if (!md)
@@ -1444,6 +1466,10 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out_argv;
}
+ r = message_for_md(md, argc, argv, result, maxlen);
+ if (r <= 1)
+ goto out_argv;
+
table = dm_get_live_table(md);
if (!table)
goto out_argv;
@@ -1469,44 +1495,68 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
out_argv:
kfree(argv);
out:
- param->data_size = 0;
+ if (r >= 0)
+ __dev_status(md, param);
+
+ if (r == 1) {
+ param->flags |= DM_DATA_OUT_FLAG;
+ if (buffer_test_overflow(result, maxlen))
+ param->flags |= DM_BUFFER_FULL_FLAG;
+ else
+ param->data_size = param->data_start + strlen(result) + 1;
+ r = 0;
+ }
+
dm_put(md);
return r;
}
+/*
+ * The ioctl parameter block consists of two parts, a dm_ioctl struct
+ * followed by a data buffer. This flag is set if the second part,
+ * which has a variable size, is not used by the function processing
+ * the ioctl.
+ */
+#define IOCTL_FLAGS_NO_PARAMS 1
+
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
* device.
*---------------------------------------------------------------*/
-static ioctl_fn lookup_ioctl(unsigned int cmd)
+static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{
static struct {
int cmd;
+ int flags;
ioctl_fn fn;
} _ioctls[] = {
- {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */
- {DM_REMOVE_ALL_CMD, remove_all},
- {DM_LIST_DEVICES_CMD, list_devices},
-
- {DM_DEV_CREATE_CMD, dev_create},
- {DM_DEV_REMOVE_CMD, dev_remove},
- {DM_DEV_RENAME_CMD, dev_rename},
- {DM_DEV_SUSPEND_CMD, dev_suspend},
- {DM_DEV_STATUS_CMD, dev_status},
- {DM_DEV_WAIT_CMD, dev_wait},
-
- {DM_TABLE_LOAD_CMD, table_load},
- {DM_TABLE_CLEAR_CMD, table_clear},
- {DM_TABLE_DEPS_CMD, table_deps},
- {DM_TABLE_STATUS_CMD, table_status},
-
- {DM_LIST_VERSIONS_CMD, list_versions},
-
- {DM_TARGET_MSG_CMD, target_message},
- {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry}
+ {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
+ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+ {DM_LIST_DEVICES_CMD, 0, list_devices},
+
+ {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
+ {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
+ {DM_DEV_RENAME_CMD, 0, dev_rename},
+ {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
+ {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
+ {DM_DEV_WAIT_CMD, 0, dev_wait},
+
+ {DM_TABLE_LOAD_CMD, 0, table_load},
+ {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear},
+ {DM_TABLE_DEPS_CMD, 0, table_deps},
+ {DM_TABLE_STATUS_CMD, 0, table_status},
+
+ {DM_LIST_VERSIONS_CMD, 0, list_versions},
+
+ {DM_TARGET_MSG_CMD, 0, target_message},
+ {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}
};
- return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
+ if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
+ return NULL;
+
+ *ioctl_flags = _ioctls[cmd].flags;
+ return _ioctls[cmd].fn;
}
/*
@@ -1543,7 +1593,8 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-#define DM_PARAMS_VMALLOC 0x0001 /* Params alloced with vmalloc not kmalloc */
+#define DM_PARAMS_KMALLOC 0x0001 /* Params alloced with kmalloc */
+#define DM_PARAMS_VMALLOC 0x0002 /* Params alloced with vmalloc */
#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
@@ -1551,66 +1602,80 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
if (param_flags & DM_WIPE_BUFFER)
memset(param, 0, param_size);
+ if (param_flags & DM_PARAMS_KMALLOC)
+ kfree(param);
if (param_flags & DM_PARAMS_VMALLOC)
vfree(param);
- else
- kfree(param);
}
-static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags)
+static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
+ int ioctl_flags,
+ struct dm_ioctl **param, int *param_flags)
{
- struct dm_ioctl tmp, *dmi;
+ struct dm_ioctl *dmi;
int secure_data;
+ const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
- if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
+ if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
- if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
+ if (param_kernel->data_size < minimum_data_size)
return -EINVAL;
- secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
+ secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
*param_flags = secure_data ? DM_WIPE_BUFFER : 0;
+ if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) {
+ dmi = param_kernel;
+ dmi->data_size = minimum_data_size;
+ goto data_copied;
+ }
+
/*
* Try to avoid low memory issues when a device is suspended.
* Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
- if (tmp.data_size <= KMALLOC_MAX_SIZE)
- dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (param_kernel->data_size <= KMALLOC_MAX_SIZE) {
+ dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (dmi)
+ *param_flags |= DM_PARAMS_KMALLOC;
+ }
if (!dmi) {
- dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
- *param_flags |= DM_PARAMS_VMALLOC;
+ dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+ if (dmi)
+ *param_flags |= DM_PARAMS_VMALLOC;
}
if (!dmi) {
- if (secure_data && clear_user(user, tmp.data_size))
+ if (secure_data && clear_user(user, param_kernel->data_size))
return -EFAULT;
return -ENOMEM;
}
- if (copy_from_user(dmi, user, tmp.data_size))
+ if (copy_from_user(dmi, user, param_kernel->data_size))
goto bad;
+data_copied:
/*
* Abort if something changed the ioctl data while it was being copied.
*/
- if (dmi->data_size != tmp.data_size) {
+ if (dmi->data_size != param_kernel->data_size) {
DMERR("rejecting ioctl: data size modified while processing parameters");
goto bad;
}
/* Wipe the user buffer so we do not return it to userspace */
- if (secure_data && clear_user(user, tmp.data_size))
+ if (secure_data && clear_user(user, param_kernel->data_size))
goto bad;
*param = dmi;
return 0;
bad:
- free_params(dmi, tmp.data_size, *param_flags);
+ free_params(dmi, param_kernel->data_size, *param_flags);
return -EFAULT;
}
@@ -1621,6 +1686,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
param->flags &= ~DM_BUFFER_FULL_FLAG;
param->flags &= ~DM_UEVENT_GENERATED_FLAG;
param->flags &= ~DM_SECURE_DATA_FLAG;
+ param->flags &= ~DM_DATA_OUT_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
@@ -1648,11 +1714,13 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
+ int ioctl_flags;
int param_flags;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
size_t input_param_size;
+ struct dm_ioctl param_kernel;
/* only root can play with this */
if (!capable(CAP_SYS_ADMIN))
@@ -1677,7 +1745,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
if (cmd == DM_VERSION_CMD)
return 0;
- fn = lookup_ioctl(cmd);
+ fn = lookup_ioctl(cmd, &ioctl_flags);
if (!fn) {
DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
return -ENOTTY;
@@ -1686,7 +1754,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
/*
* Copy the parameters into kernel space.
*/
- r = copy_params(user, &param, &param_flags);
+ r = copy_params(user, &param_kernel, ioctl_flags, &param, &param_flags);
if (r)
return r;
@@ -1699,6 +1767,10 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
param->data_size = sizeof(*param);
r = fn(param, input_param_size);
+ if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
+ unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
+ DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+
/*
* Copy the results back to userland.
*/
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 68c02673263b..d581fe5d2faf 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/delay.h>
#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>
@@ -51,6 +52,8 @@ struct dm_kcopyd_client {
struct workqueue_struct *kcopyd_wq;
struct work_struct kcopyd_work;
+ struct dm_kcopyd_throttle *throttle;
+
/*
* We maintain three lists of jobs:
*
@@ -68,6 +71,117 @@ struct dm_kcopyd_client {
static struct page_list zero_page_list;
+static DEFINE_SPINLOCK(throttle_spinlock);
+
+/*
+ * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
+ * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
+ * by 2.
+ */
+#define ACCOUNT_INTERVAL_SHIFT SHIFT_HZ
+
+/*
+ * Sleep this number of milliseconds.
+ *
+ * The value was decided experimentally.
+ * Smaller values seem to cause an increased copy rate above the limit.
+ * The reason for this is unknown but possibly due to jiffies rounding errors
+ * or read/write cache inside the disk.
+ */
+#define SLEEP_MSEC 100
+
+/*
+ * Maximum number of sleep events. There is a theoretical livelock if more
+ * kcopyd clients do work simultaneously which this limit avoids.
+ */
+#define MAX_SLEEPS 10
+
+static void io_job_start(struct dm_kcopyd_throttle *t)
+{
+ unsigned throttle, now, difference;
+ int slept = 0, skew;
+
+ if (unlikely(!t))
+ return;
+
+try_again:
+ spin_lock_irq(&throttle_spinlock);
+
+ throttle = ACCESS_ONCE(t->throttle);
+
+ if (likely(throttle >= 100))
+ goto skip_limit;
+
+ now = jiffies;
+ difference = now - t->last_jiffies;
+ t->last_jiffies = now;
+ if (t->num_io_jobs)
+ t->io_period += difference;
+ t->total_period += difference;
+
+ /*
+ * Maintain sane values if we got a temporary overflow.
+ */
+ if (unlikely(t->io_period > t->total_period))
+ t->io_period = t->total_period;
+
+ if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
+ int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
+ t->total_period >>= shift;
+ t->io_period >>= shift;
+ }
+
+ skew = t->io_period - throttle * t->total_period / 100;
+
+ if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
+ slept++;
+ spin_unlock_irq(&throttle_spinlock);
+ msleep(SLEEP_MSEC);
+ goto try_again;
+ }
+
+skip_limit:
+ t->num_io_jobs++;
+
+ spin_unlock_irq(&throttle_spinlock);
+}
+
+static void io_job_finish(struct dm_kcopyd_throttle *t)
+{
+ unsigned long flags;
+
+ if (unlikely(!t))
+ return;
+
+ spin_lock_irqsave(&throttle_spinlock, flags);
+
+ t->num_io_jobs--;
+
+ if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ goto skip_limit;
+
+ if (!t->num_io_jobs) {
+ unsigned now, difference;
+
+ now = jiffies;
+ difference = now - t->last_jiffies;
+ t->last_jiffies = now;
+
+ t->io_period += difference;
+ t->total_period += difference;
+
+ /*
+ * Maintain sane values if we got a temporary overflow.
+ */
+ if (unlikely(t->io_period > t->total_period))
+ t->io_period = t->total_period;
+ }
+
+skip_limit:
+ spin_unlock_irqrestore(&throttle_spinlock, flags);
+}
+
+
static void wake(struct dm_kcopyd_client *kc)
{
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
@@ -348,6 +462,8 @@ static void complete_io(unsigned long error, void *context)
struct kcopyd_job *job = (struct kcopyd_job *) context;
struct dm_kcopyd_client *kc = job->kc;
+ io_job_finish(kc->throttle);
+
if (error) {
if (job->rw & WRITE)
job->write_err |= error;
@@ -389,6 +505,8 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client,
};
+ io_job_start(job->kc->throttle);
+
if (job->rw == READ)
r = dm_io(&io_req, 1, &job->source, NULL);
else
@@ -695,7 +813,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
/*-----------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
-struct dm_kcopyd_client *dm_kcopyd_client_create(void)
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
@@ -708,6 +826,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(void)
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
+ kc->throttle = throttle;
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool)
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 328cad5617ab..4f99d267340c 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,9 +53,9 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
- ti->num_write_same_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->num_write_same_bios = 1;
ti->private = lc;
return 0;
@@ -95,8 +95,8 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-static int linear_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void linear_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
@@ -110,7 +110,6 @@ static int linear_status(struct dm_target *ti, status_type_t type,
(unsigned long long)lc->start);
break;
}
- return 0;
}
static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
@@ -155,7 +154,7 @@ static int linear_iterate_devices(struct dm_target *ti,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 2, 0},
+ .version = {1, 2, 1},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 573bd04591bf..51bb81676be3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -905,8 +905,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
return 0;
@@ -1378,8 +1378,8 @@ static void multipath_resume(struct dm_target *ti)
* [priority selector-name num_ps_args [ps_args]*
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
-static int multipath_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void multipath_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int sz = 0;
unsigned long flags;
@@ -1485,8 +1485,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
}
spin_unlock_irqrestore(&m->lock, flags);
-
- return 0;
}
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
@@ -1695,7 +1693,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 5, 0},
+ .version = {1, 5, 1},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9e58dbd8d8cb..9a01d1e4c783 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1151,7 +1151,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
mutex_lock(&rs->md.reconfig_mutex);
ret = md_run(&rs->md);
@@ -1201,8 +1201,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
-static int raid_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void raid_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct raid_set *rs = ti->private;
unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -1344,8 +1344,6 @@ static int raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" -");
}
}
-
- return 0;
}
static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -1405,7 +1403,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 4, 1},
+ .version = {1, 4, 2},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fa519185ebba..d053098c6a91 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -82,6 +82,9 @@ struct mirror_set {
struct mirror mirror[0];
};
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
+ "A percentage of time allocated for raid resynchronization");
+
static void wakeup_mirrord(void *context)
{
struct mirror_set *ms = context;
@@ -1072,8 +1075,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto err_free_context;
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
@@ -1111,7 +1114,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- ms->kcopyd_client = dm_kcopyd_client_create();
+ ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(ms->kcopyd_client)) {
r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
@@ -1347,8 +1350,8 @@ static char device_status_char(struct mirror *m)
}
-static int mirror_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void mirror_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
@@ -1383,8 +1386,6 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
if (ms->features & DM_RAID1_HANDLE_ERRORS)
DMEMIT(" 1 handle_errors");
}
-
- return 0;
}
static int mirror_iterate_devices(struct dm_target *ti,
@@ -1403,7 +1404,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 13, 1},
+ .version = {1, 13, 2},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 59fc18ae52c2..c0e07026a8d1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -124,6 +124,9 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ "A percentage of time allocated for copy on write");
+
struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
{
return s->origin;
@@ -227,12 +230,11 @@ static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
{
struct dm_snap_tracked_chunk *c;
- struct hlist_node *hn;
int found = 0;
spin_lock_irq(&s->tracked_chunk_lock);
- hlist_for_each_entry(c, hn,
+ hlist_for_each_entry(c,
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
if (c->chunk == chunk) {
found = 1;
@@ -1038,7 +1040,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
- unsigned args_used, num_flush_requests = 1;
+ unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
if (argc != 4) {
@@ -1048,7 +1050,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
if (dm_target_is_snapshot_merge(ti)) {
- num_flush_requests = 2;
+ num_flush_bios = 2;
origin_mode = FMODE_WRITE;
}
@@ -1109,7 +1111,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- s->kcopyd_client = dm_kcopyd_client_create();
+ s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
r = PTR_ERR(s->kcopyd_client);
ti->error = "Could not create kcopyd client";
@@ -1128,7 +1130,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&s->tracked_chunk_lock);
ti->private = s;
- ti->num_flush_requests = num_flush_requests;
+ ti->num_flush_bios = num_flush_bios;
ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
@@ -1692,7 +1694,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
if (bio->bi_rw & REQ_FLUSH) {
- if (!dm_bio_get_target_request_nr(bio))
+ if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
@@ -1837,8 +1839,8 @@ static void snapshot_merge_resume(struct dm_target *ti)
start_merge(s);
}
-static int snapshot_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void snapshot_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
@@ -1884,8 +1886,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
maxlen - sz);
break;
}
-
- return 0;
}
static int snapshot_iterate_devices(struct dm_target *ti,
@@ -2105,7 +2105,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = dev;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
return 0;
}
@@ -2139,8 +2139,8 @@ static void origin_resume(struct dm_target *ti)
ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
}
-static int origin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void origin_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_dev *dev = ti->private;
@@ -2153,8 +2153,6 @@ static int origin_status(struct dm_target *ti, status_type_t type,
snprintf(result, maxlen, "%s", dev->name);
break;
}
-
- return 0;
}
static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
@@ -2181,7 +2179,7 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 8, 0},
+ .version = {1, 8, 1},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -2194,7 +2192,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 11, 0},
+ .version = {1, 11, 1},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2307,3 +2305,5 @@ module_exit(dm_snapshot_exit);
MODULE_DESCRIPTION(DM_NAME " snapshot target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("dm-snapshot-origin");
+MODULE_ALIAS("dm-snapshot-merge");
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index c89cde86d400..d8837d313f54 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -160,9 +160,9 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
return r;
- ti->num_flush_requests = stripes;
- ti->num_discard_requests = stripes;
- ti->num_write_same_requests = stripes;
+ ti->num_flush_bios = stripes;
+ ti->num_discard_bios = stripes;
+ ti->num_write_same_bios = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
@@ -276,19 +276,19 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
- unsigned target_request_nr;
+ unsigned target_bio_nr;
if (bio->bi_rw & REQ_FLUSH) {
- target_request_nr = dm_bio_get_target_request_nr(bio);
- BUG_ON(target_request_nr >= sc->stripes);
- bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
+ target_bio_nr = dm_bio_get_target_bio_nr(bio);
+ BUG_ON(target_bio_nr >= sc->stripes);
+ bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio->bi_rw & REQ_DISCARD) ||
unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
- target_request_nr = dm_bio_get_target_request_nr(bio);
- BUG_ON(target_request_nr >= sc->stripes);
- return stripe_map_range(sc, bio, target_request_nr);
+ target_bio_nr = dm_bio_get_target_bio_nr(bio);
+ BUG_ON(target_bio_nr >= sc->stripes);
+ return stripe_map_range(sc, bio, target_bio_nr);
}
stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
@@ -312,8 +312,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
*
*/
-static int stripe_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void stripe_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
char buffer[sc->stripes + 1];
@@ -340,7 +340,6 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
(unsigned long long)sc->stripe[i].physical_start);
break;
}
- return 0;
}
static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -428,7 +427,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 5, 0},
+ .version = {1, 5, 1},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index daf25d0890b3..e50dad0c65f4 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -217,7 +217,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
if (alloc_targets(t, num_targets)) {
kfree(t);
- t = NULL;
return -ENOMEM;
}
@@ -823,8 +822,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- if (!tgt->num_discard_requests && tgt->discards_supported)
- DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
+ if (!tgt->num_discard_bios && tgt->discards_supported)
+ DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type);
return 0;
@@ -1360,7 +1359,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_flush_requests)
+ if (!ti->num_flush_bios)
continue;
if (ti->flush_supported)
@@ -1439,7 +1438,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_write_same_requests)
+ if (!ti->num_write_same_bios)
return false;
if (!ti->type->iterate_devices ||
@@ -1657,7 +1656,7 @@ bool dm_table_supports_discards(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_discard_requests)
+ if (!ti->num_discard_bios)
continue;
if (ti->discards_supported)
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 617d21a77256..37ba5db71cd9 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -116,7 +116,7 @@ static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
/*
* Return error for discards instead of -EOPNOTSUPP
*/
- tt->num_discard_requests = 1;
+ tt->num_discard_bios = 1;
return 0;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 4d6e85367b84..00cee02f8fc9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -280,7 +280,7 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
*t = v & ((1 << 24) - 1);
}
-static void data_block_inc(void *context, void *value_le)
+static void data_block_inc(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
@@ -292,7 +292,7 @@ static void data_block_inc(void *context, void *value_le)
dm_sm_inc_block(sm, b);
}
-static void data_block_dec(void *context, void *value_le)
+static void data_block_dec(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
@@ -304,7 +304,7 @@ static void data_block_dec(void *context, void *value_le)
dm_sm_dec_block(sm, b);
}
-static int data_block_equal(void *context, void *value1_le, void *value2_le)
+static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
uint64_t b1, b2;
@@ -318,7 +318,7 @@ static int data_block_equal(void *context, void *value1_le, void *value2_le)
return b1 == b2;
}
-static void subtree_inc(void *context, void *value)
+static void subtree_inc(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
@@ -329,7 +329,7 @@ static void subtree_inc(void *context, void *value)
dm_tm_inc(info->tm, root);
}
-static void subtree_dec(void *context, void *value)
+static void subtree_dec(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
@@ -341,7 +341,7 @@ static void subtree_dec(void *context, void *value)
DMERR("btree delete failed\n");
}
-static int subtree_equal(void *context, void *value1_le, void *value2_le)
+static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
memcpy(&v1_le, value1_le, sizeof(v1_le));
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5409607d4875..009339d62828 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -26,6 +26,9 @@
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ "A percentage of time allocated for copy on write");
+
/*
* The block size of the device holding pool data must be
* between 64KB and 1GB.
@@ -227,6 +230,78 @@ struct thin_c {
/*----------------------------------------------------------------*/
/*
+ * wake_worker() is used when new work is queued and when pool_resume is
+ * ready to continue deferred IO processing.
+ */
+static void wake_worker(struct pool *pool)
+{
+ queue_work(pool->wq, &pool->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_bio_prison_cell *cell_prealloc;
+
+ /*
+ * Allocate a cell from the prison's mempool.
+ * This might block but it can't fail.
+ */
+ cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
+
+ r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
+ if (r)
+ /*
+ * We reused an old cell; we can get rid of
+ * the new one.
+ */
+ dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+
+ return r;
+}
+
+static void cell_release(struct pool *pool,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
+{
+ dm_cell_release(pool->prison, cell, bios);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_release_no_holder(struct pool *pool,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
+{
+ dm_cell_release_no_holder(pool->prison, cell, bios);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_defer_no_holder_no_free(struct thin_c *tc,
+ struct dm_bio_prison_cell *cell)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ wake_worker(pool);
+}
+
+static void cell_error(struct pool *pool,
+ struct dm_bio_prison_cell *cell)
+{
+ dm_cell_error(pool->prison, cell);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
* A global list of pools that uses a struct mapped_device as a key.
*/
static struct dm_thin_pool_table {
@@ -330,14 +405,20 @@ static void requeue_io(struct thin_c *tc)
* target.
*/
+static bool block_size_is_power_of_two(struct pool *pool)
+{
+ return pool->sectors_per_block_shift >= 0;
+}
+
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
+ struct pool *pool = tc->pool;
sector_t block_nr = bio->bi_sector;
- if (tc->pool->sectors_per_block_shift < 0)
- (void) sector_div(block_nr, tc->pool->sectors_per_block);
+ if (block_size_is_power_of_two(pool))
+ block_nr >>= pool->sectors_per_block_shift;
else
- block_nr >>= tc->pool->sectors_per_block_shift;
+ (void) sector_div(block_nr, pool->sectors_per_block);
return block_nr;
}
@@ -348,12 +429,12 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
sector_t bi_sector = bio->bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
- if (tc->pool->sectors_per_block_shift < 0)
- bio->bi_sector = (block * pool->sectors_per_block) +
- sector_div(bi_sector, pool->sectors_per_block);
- else
+ if (block_size_is_power_of_two(pool))
bio->bi_sector = (block << pool->sectors_per_block_shift) |
(bi_sector & (pool->sectors_per_block - 1));
+ else
+ bio->bi_sector = (block * pool->sectors_per_block) +
+ sector_div(bi_sector, pool->sectors_per_block);
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -420,15 +501,6 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
issue(tc, bio);
}
-/*
- * wake_worker() is used when new work is queued and when pool_resume is
- * ready to continue deferred IO processing.
- */
-static void wake_worker(struct pool *pool)
-{
- queue_work(pool->wq, &pool->worker);
-}
-
/*----------------------------------------------------------------*/
/*
@@ -515,14 +587,14 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
- dm_cell_release(cell, &pool->deferred_bios);
+ cell_release(pool, cell, &pool->deferred_bios);
spin_unlock_irqrestore(&tc->pool->lock, flags);
wake_worker(pool);
}
/*
- * Same as cell_defer except it omits the original holder of the cell.
+ * Same as cell_defer above, except it omits the original holder of the cell.
*/
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
@@ -530,7 +602,7 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
- dm_cell_release_no_holder(cell, &pool->deferred_bios);
+ cell_release_no_holder(pool, cell, &pool->deferred_bios);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
@@ -540,13 +612,15 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
if (m->bio)
m->bio->bi_end_io = m->saved_bi_end_io;
- dm_cell_error(m->cell);
+ cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
}
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+ struct pool *pool = tc->pool;
struct bio *bio;
int r;
@@ -555,7 +629,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
bio->bi_end_io = m->saved_bi_end_io;
if (m->err) {
- dm_cell_error(m->cell);
+ cell_error(pool, m->cell);
goto out;
}
@@ -567,7 +641,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
DMERR_LIMIT("dm_thin_insert_block() failed");
- dm_cell_error(m->cell);
+ cell_error(pool, m->cell);
goto out;
}
@@ -585,7 +659,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
out:
list_del(&m->list);
- mempool_free(m, tc->pool->mapping_pool);
+ mempool_free(m, pool->mapping_pool);
}
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
@@ -736,7 +810,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
if (r < 0) {
mempool_free(m, pool->mapping_pool);
DMERR_LIMIT("dm_kcopyd_copy() failed");
- dm_cell_error(cell);
+ cell_error(pool, cell);
}
}
}
@@ -802,7 +876,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
if (r < 0) {
mempool_free(m, pool->mapping_pool);
DMERR_LIMIT("dm_kcopyd_zero() failed");
- dm_cell_error(cell);
+ cell_error(pool, cell);
}
}
}
@@ -908,13 +982,13 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void no_space(struct dm_bio_prison_cell *cell)
+static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
{
struct bio *bio;
struct bio_list bios;
bio_list_init(&bios);
- dm_cell_release(cell, &bios);
+ cell_release(pool, cell, &bios);
while ((bio = bio_list_pop(&bios)))
retry_on_resume(bio);
@@ -932,7 +1006,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
struct dm_thin_new_mapping *m;
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+ if (bio_detain(tc->pool, &key, bio, &cell))
return;
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -944,7 +1018,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
* on this block.
*/
build_data_key(tc->td, lookup_result.block, &key2);
- if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
+ if (bio_detain(tc->pool, &key2, bio, &cell2)) {
cell_defer_no_holder(tc, cell);
break;
}
@@ -1020,13 +1094,13 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
case -ENOSPC:
- no_space(cell);
+ no_space(tc->pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- dm_cell_error(cell);
+ cell_error(tc->pool, cell);
break;
}
}
@@ -1044,7 +1118,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
* of being broken so we have nothing further to do here.
*/
build_data_key(tc->td, lookup_result->block, &key);
- if (dm_bio_detain(pool->prison, &key, bio, &cell))
+ if (bio_detain(pool, &key, bio, &cell))
return;
if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1065,12 +1139,13 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
{
int r;
dm_block_t data_block;
+ struct pool *pool = tc->pool;
/*
* Remap empty bios (flushes) immediately, without provisioning.
*/
if (!bio->bi_size) {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_and_issue(tc, bio, 0);
@@ -1097,14 +1172,14 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
break;
case -ENOSPC:
- no_space(cell);
+ no_space(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- set_pool_mode(tc->pool, PM_READ_ONLY);
- dm_cell_error(cell);
+ set_pool_mode(pool, PM_READ_ONLY);
+ cell_error(pool, cell);
break;
}
}
@@ -1112,6 +1187,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
static void process_bio(struct thin_c *tc, struct bio *bio)
{
int r;
+ struct pool *pool = tc->pool;
dm_block_t block = get_bio_block(tc, bio);
struct dm_bio_prison_cell *cell;
struct dm_cell_key key;
@@ -1122,7 +1198,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
* being provisioned so we have nothing further to do here.
*/
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+ if (bio_detain(pool, &key, bio, &cell))
return;
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1130,9 +1206,9 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
case 0:
if (lookup_result.shared) {
process_shared_bio(tc, bio, block, &lookup_result);
- cell_defer_no_holder(tc, cell);
+ cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
} else {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_and_issue(tc, bio, lookup_result.block);
@@ -1141,7 +1217,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_to_origin_and_issue(tc, bio);
@@ -1378,7 +1454,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_device *td = tc->td;
struct dm_thin_lookup_result result;
- struct dm_bio_prison_cell *cell1, *cell2;
+ struct dm_bio_prison_cell cell1, cell2;
+ struct dm_bio_prison_cell *cell_result;
struct dm_cell_key key;
thin_hook_bio(tc, bio);
@@ -1420,18 +1497,18 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
}
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
return DM_MAPIO_SUBMITTED;
build_data_key(tc->td, result.block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
- cell_defer_no_holder(tc, cell1);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
+ cell_defer_no_holder_no_free(tc, &cell1);
return DM_MAPIO_SUBMITTED;
}
inc_all_io_entry(tc->pool, bio);
- cell_defer_no_holder(tc, cell2);
- cell_defer_no_holder(tc, cell1);
+ cell_defer_no_holder_no_free(tc, &cell2);
+ cell_defer_no_holder_no_free(tc, &cell1);
remap(tc, bio, result.block);
return DM_MAPIO_REMAPPED;
@@ -1636,7 +1713,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_prison;
}
- pool->copier = dm_kcopyd_client_create();
+ pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(pool->copier)) {
r = PTR_ERR(pool->copier);
*error = "Error creating pool's kcopyd client";
@@ -1938,7 +2015,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
/*
* Only need to enable discards if the pool should pass
@@ -1946,7 +2023,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
* processing will cause mappings to be removed from the btree.
*/
if (pf.discard_enabled && pf.discard_passdown) {
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
/*
* Setting 'discards_supported' circumvents the normal
@@ -2299,8 +2376,8 @@ static void emit_flags(struct pool_features *pf, char *result,
* <transaction id> <used metadata sectors>/<total metadata sectors>
* <used data sectors>/<total data sectors> <held metadata root>
*/
-static int pool_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void pool_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int r;
unsigned sz = 0;
@@ -2326,32 +2403,41 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
(void) commit_or_fallback(pool);
- r = dm_pool_get_metadata_transaction_id(pool->pmd,
- &transaction_id);
- if (r)
- return r;
+ r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
+ if (r) {
+ DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+ goto err;
+ }
- r = dm_pool_get_free_metadata_block_count(pool->pmd,
- &nr_free_blocks_metadata);
- if (r)
- return r;
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
+ if (r) {
+ DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+ goto err;
+ }
r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+ goto err;
+ }
- r = dm_pool_get_free_block_count(pool->pmd,
- &nr_free_blocks_data);
- if (r)
- return r;
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
+ if (r) {
+ DMERR("dm_pool_get_free_block_count returned %d", r);
+ goto err;
+ }
r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_data_dev_size returned %d", r);
+ goto err;
+ }
r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_metadata_snap returned %d", r);
+ goto err;
+ }
DMEMIT("%llu %llu/%llu %llu/%llu ",
(unsigned long long)transaction_id,
@@ -2388,8 +2474,10 @@ static int pool_status(struct dm_target *ti, status_type_t type,
emit_flags(&pt->requested_pf, result, sz, maxlen);
break;
}
+ return;
- return 0;
+err:
+ DMEMIT("Error");
}
static int pool_iterate_devices(struct dm_target *ti,
@@ -2414,11 +2502,6 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
-static bool block_size_is_power_of_two(struct pool *pool)
-{
- return pool->sectors_per_block_shift >= 0;
-}
-
static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
{
struct pool *pool = pt->pool;
@@ -2432,15 +2515,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
if (pt->adjusted_pf.discard_passdown) {
data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
limits->discard_granularity = data_limits->discard_granularity;
- } else if (block_size_is_power_of_two(pool))
+ } else
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- else
- /*
- * Use largest power of 2 that is a factor of sectors_per_block
- * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
- */
- limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
- DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
}
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2468,7 +2544,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 6, 0},
+ .version = {1, 6, 1},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2588,17 +2664,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto bad_thin_open;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
ti->flush_supported = true;
ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = true;
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
ti->discard_zeroes_data_unsupported = true;
- /* Discard requests must be split on a block boundary */
- ti->split_discard_requests = true;
+ /* Discard bios must be split on a block boundary */
+ ti->split_discard_bios = true;
}
dm_put(pool_md);
@@ -2676,8 +2752,8 @@ static void thin_postsuspend(struct dm_target *ti)
/*
* <nr mapped sectors> <highest mapped sector>
*/
-static int thin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void thin_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int r;
ssize_t sz = 0;
@@ -2687,7 +2763,7 @@ static int thin_status(struct dm_target *ti, status_type_t type,
if (get_pool_mode(tc->pool) == PM_FAIL) {
DMEMIT("Fail");
- return 0;
+ return;
}
if (!tc->td)
@@ -2696,12 +2772,16 @@ static int thin_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
r = dm_thin_get_mapped_count(tc->td, &mapped);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_thin_get_mapped_count returned %d", r);
+ goto err;
+ }
r = dm_thin_get_highest_mapped_block(tc->td, &highest);
- if (r < 0)
- return r;
+ if (r < 0) {
+ DMERR("dm_thin_get_highest_mapped_block returned %d", r);
+ goto err;
+ }
DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
if (r)
@@ -2721,7 +2801,10 @@ static int thin_status(struct dm_target *ti, status_type_t type,
}
}
- return 0;
+ return;
+
+err:
+ DMEMIT("Error");
}
static int thin_iterate_devices(struct dm_target *ti,
@@ -2748,7 +2831,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 7, 0},
+ .version = {1, 7, 1},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 52cde982164a..6ad538375c3c 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -508,8 +508,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
/*
* Status: V (valid) or C (corruption found)
*/
-static int verity_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void verity_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_verity *v = ti->private;
unsigned sz = 0;
@@ -540,8 +540,6 @@ static int verity_status(struct dm_target *ti, status_type_t type,
DMEMIT("%02x", v->salt[x]);
break;
}
-
- return 0;
}
static int verity_ioctl(struct dm_target *ti, unsigned cmd,
@@ -860,7 +858,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 1, 0},
+ .version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 69a5c3b3b340..c99003e0d47a 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -25,7 +25,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/*
* Silently drop discards, avoiding -EOPNOTSUPP.
*/
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
return 0;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 314a0e2faf79..7e469260fe5e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -163,7 +163,6 @@ struct mapped_device {
* io objects are allocated from here.
*/
mempool_t *io_pool;
- mempool_t *tio_pool;
struct bio_set *bs;
@@ -197,7 +196,6 @@ struct mapped_device {
*/
struct dm_md_mempools {
mempool_t *io_pool;
- mempool_t *tio_pool;
struct bio_set *bs;
};
@@ -205,12 +203,6 @@ struct dm_md_mempools {
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
-/*
- * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
- * still used for _io_cache, I'm leaving this for a later cleanup
- */
-static struct kmem_cache *_rq_bio_info_cache;
-
static int __init local_init(void)
{
int r = -ENOMEM;
@@ -224,13 +216,9 @@ static int __init local_init(void)
if (!_rq_tio_cache)
goto out_free_io_cache;
- _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
- if (!_rq_bio_info_cache)
- goto out_free_rq_tio_cache;
-
r = dm_uevent_init();
if (r)
- goto out_free_rq_bio_info_cache;
+ goto out_free_rq_tio_cache;
_major = major;
r = register_blkdev(_major, _name);
@@ -244,8 +232,6 @@ static int __init local_init(void)
out_uevent_exit:
dm_uevent_exit();
-out_free_rq_bio_info_cache:
- kmem_cache_destroy(_rq_bio_info_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
out_free_io_cache:
@@ -256,7 +242,6 @@ out_free_io_cache:
static void local_exit(void)
{
- kmem_cache_destroy(_rq_bio_info_cache);
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
@@ -318,7 +303,6 @@ static void __exit dm_exit(void)
/*
* Should be empty by this point.
*/
- idr_remove_all(&_minor_idr);
idr_destroy(&_minor_idr);
}
@@ -449,12 +433,12 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
gfp_t gfp_mask)
{
- return mempool_alloc(md->tio_pool, gfp_mask);
+ return mempool_alloc(md->io_pool, gfp_mask);
}
static void free_rq_tio(struct dm_rq_target_io *tio)
{
- mempool_free(tio, tio->md->tio_pool);
+ mempool_free(tio, tio->md->io_pool);
}
static int md_in_flight(struct mapped_device *md)
@@ -627,7 +611,6 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
- trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
@@ -987,12 +970,13 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
-static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
+static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct mapped_device *md;
struct bio *clone = &tio->clone;
+ struct dm_target *ti = tio->ti;
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
@@ -1033,32 +1017,54 @@ struct clone_info {
unsigned short idx;
};
+static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
+{
+ bio->bi_sector = sector;
+ bio->bi_size = to_bytes(len);
+}
+
+static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
+{
+ bio->bi_idx = idx;
+ bio->bi_vcnt = idx + bv_count;
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+}
+
+static void clone_bio_integrity(struct bio *bio, struct bio *clone,
+ unsigned short idx, unsigned len, unsigned offset,
+ unsigned trim)
+{
+ if (!bio_integrity(bio))
+ return;
+
+ bio_integrity_clone(clone, bio, GFP_NOIO);
+
+ if (trim)
+ bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
+}
+
/*
* Creates a little bio that just does part of a bvec.
*/
-static void split_bvec(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx, unsigned int offset,
- unsigned int len, struct bio_set *bs)
+static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
+ sector_t sector, unsigned short idx,
+ unsigned offset, unsigned len)
{
struct bio *clone = &tio->clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
*clone->bi_io_vec = *bv;
- clone->bi_sector = sector;
+ bio_setup_sector(clone, sector, len);
+
clone->bi_bdev = bio->bi_bdev;
clone->bi_rw = bio->bi_rw;
clone->bi_vcnt = 1;
- clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
clone->bi_flags |= 1 << BIO_CLONED;
- if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
- bio_integrity_trim(clone,
- bio_sector_offset(bio, idx, offset), len);
- }
+ clone_bio_integrity(bio, clone, idx, len, offset, 1);
}
/*
@@ -1066,29 +1072,23 @@ static void split_bvec(struct dm_target_io *tio, struct bio *bio,
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
sector_t sector, unsigned short idx,
- unsigned short bv_count, unsigned int len,
- struct bio_set *bs)
+ unsigned short bv_count, unsigned len)
{
struct bio *clone = &tio->clone;
+ unsigned trim = 0;
__bio_clone(clone, bio);
- clone->bi_sector = sector;
- clone->bi_idx = idx;
- clone->bi_vcnt = idx + bv_count;
- clone->bi_size = to_bytes(len);
- clone->bi_flags &= ~(1 << BIO_SEG_VALID);
-
- if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
-
- if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
- bio_integrity_trim(clone,
- bio_sector_offset(bio, idx, 0), len);
- }
+ bio_setup_sector(clone, sector, len);
+ bio_setup_bv(clone, idx, bv_count);
+
+ if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
+ trim = 1;
+ clone_bio_integrity(bio, clone, idx, len, 0, trim);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
- struct dm_target *ti, int nr_iovecs)
+ struct dm_target *ti, int nr_iovecs,
+ unsigned target_bio_nr)
{
struct dm_target_io *tio;
struct bio *clone;
@@ -1099,96 +1099,104 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
- tio->target_request_nr = 0;
+ tio->target_bio_nr = target_bio_nr;
return tio;
}
-static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
- unsigned request_nr, sector_t len)
+static void __clone_and_map_simple_bio(struct clone_info *ci,
+ struct dm_target *ti,
+ unsigned target_bio_nr, sector_t len)
{
- struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
+ struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
struct bio *clone = &tio->clone;
- tio->target_request_nr = request_nr;
-
/*
* Discard requests require the bio's inline iovecs be initialized.
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
-
__bio_clone(clone, ci->bio);
- if (len) {
- clone->bi_sector = ci->sector;
- clone->bi_size = to_bytes(len);
- }
+ if (len)
+ bio_setup_sector(clone, ci->sector, len);
- __map_bio(ti, tio);
+ __map_bio(tio);
}
-static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
- unsigned num_requests, sector_t len)
+static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ unsigned num_bios, sector_t len)
{
- unsigned request_nr;
+ unsigned target_bio_nr;
- for (request_nr = 0; request_nr < num_requests; request_nr++)
- __issue_target_request(ci, ti, request_nr, len);
+ for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
+ __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
}
-static int __clone_and_map_empty_flush(struct clone_info *ci)
+static int __send_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
+ __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
return 0;
}
-/*
- * Perform all io with a single clone.
- */
-static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+ sector_t sector, int nr_iovecs,
+ unsigned short idx, unsigned short bv_count,
+ unsigned offset, unsigned len,
+ unsigned split_bvec)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
+ unsigned target_bio_nr;
+ unsigned num_target_bios = 1;
- tio = alloc_tio(ci, ti, bio->bi_max_vecs);
- clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx,
- ci->sector_count, ci->md->bs);
- __map_bio(ti, tio);
- ci->sector_count = 0;
+ /*
+ * Does the target want to receive duplicate copies of the bio?
+ */
+ if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
+ num_target_bios = ti->num_write_bios(ti, bio);
+
+ for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
+ tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
+ if (split_bvec)
+ clone_split_bio(tio, bio, sector, idx, offset, len);
+ else
+ clone_bio(tio, bio, sector, idx, bv_count, len);
+ __map_bio(tio);
+ }
}
-typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
+typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
-static unsigned get_num_discard_requests(struct dm_target *ti)
+static unsigned get_num_discard_bios(struct dm_target *ti)
{
- return ti->num_discard_requests;
+ return ti->num_discard_bios;
}
-static unsigned get_num_write_same_requests(struct dm_target *ti)
+static unsigned get_num_write_same_bios(struct dm_target *ti)
{
- return ti->num_write_same_requests;
+ return ti->num_write_same_bios;
}
typedef bool (*is_split_required_fn)(struct dm_target *ti);
static bool is_split_required_for_discard(struct dm_target *ti)
{
- return ti->split_discard_requests;
+ return ti->split_discard_bios;
}
-static int __clone_and_map_changing_extent_only(struct clone_info *ci,
- get_num_requests_fn get_num_requests,
- is_split_required_fn is_split_required)
+static int __send_changing_extent_only(struct clone_info *ci,
+ get_num_bios_fn get_num_bios,
+ is_split_required_fn is_split_required)
{
struct dm_target *ti;
sector_t len;
- unsigned num_requests;
+ unsigned num_bios;
do {
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1201,8 +1209,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
* reconfiguration might also have changed that since the
* check was performed.
*/
- num_requests = get_num_requests ? get_num_requests(ti) : 0;
- if (!num_requests)
+ num_bios = get_num_bios ? get_num_bios(ti) : 0;
+ if (!num_bios)
return -EOPNOTSUPP;
if (is_split_required && !is_split_required(ti))
@@ -1210,7 +1218,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
- __issue_target_requests(ci, ti, num_requests, len);
+ __send_duplicate_bios(ci, ti, num_bios, len);
ci->sector += len;
} while (ci->sector_count -= len);
@@ -1218,108 +1226,129 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
return 0;
}
-static int __clone_and_map_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci)
{
- return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
- is_split_required_for_discard);
+ return __send_changing_extent_only(ci, get_num_discard_bios,
+ is_split_required_for_discard);
}
-static int __clone_and_map_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci)
{
- return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
+ return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
}
-static int __clone_and_map(struct clone_info *ci)
+/*
+ * Find maximum number of sectors / bvecs we can process with a single bio.
+ */
+static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
{
struct bio *bio = ci->bio;
- struct dm_target *ti;
- sector_t len = 0, max;
- struct dm_target_io *tio;
+ sector_t bv_len, total_len = 0;
- if (unlikely(bio->bi_rw & REQ_DISCARD))
- return __clone_and_map_discard(ci);
- else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
- return __clone_and_map_write_same(ci);
+ for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
+ bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- max = max_io_len(ci->sector, ti);
+ if (bv_len > max)
+ break;
- if (ci->sector_count <= max) {
- /*
- * Optimise for the simple case where we can do all of
- * the remaining io with a single clone.
- */
- __clone_and_map_simple(ci, ti);
+ max -= bv_len;
+ total_len += bv_len;
+ }
- } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
- /*
- * There are some bvecs that don't span targets.
- * Do as many of these as possible.
- */
- int i;
- sector_t remaining = max;
- sector_t bv_len;
+ return total_len;
+}
- for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
- bv_len = to_sector(bio->bi_io_vec[i].bv_len);
+static int __split_bvec_across_targets(struct clone_info *ci,
+ struct dm_target *ti, sector_t max)
+{
+ struct bio *bio = ci->bio;
+ struct bio_vec *bv = bio->bi_io_vec + ci->idx;
+ sector_t remaining = to_sector(bv->bv_len);
+ unsigned offset = 0;
+ sector_t len;
- if (bv_len > remaining)
- break;
+ do {
+ if (offset) {
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
- remaining -= bv_len;
- len += bv_len;
+ max = max_io_len(ci->sector, ti);
}
- tio = alloc_tio(ci, ti, bio->bi_max_vecs);
- clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len,
- ci->md->bs);
- __map_bio(ti, tio);
+ len = min(remaining, max);
+
+ __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
+ bv->bv_offset + offset, len, 1);
ci->sector += len;
ci->sector_count -= len;
- ci->idx = i;
+ offset += to_bytes(len);
+ } while (remaining -= len);
- } else {
- /*
- * Handle a bvec that must be split between two or more targets.
- */
- struct bio_vec *bv = bio->bi_io_vec + ci->idx;
- sector_t remaining = to_sector(bv->bv_len);
- unsigned int offset = 0;
+ ci->idx++;
- do {
- if (offset) {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
+ return 0;
+}
- max = max_io_len(ci->sector, ti);
- }
+/*
+ * Select the correct strategy for processing a non-flush bio.
+ */
+static int __split_and_process_non_flush(struct clone_info *ci)
+{
+ struct bio *bio = ci->bio;
+ struct dm_target *ti;
+ sector_t len, max;
+ int idx;
- len = min(remaining, max);
+ if (unlikely(bio->bi_rw & REQ_DISCARD))
+ return __send_discard(ci);
+ else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ return __send_write_same(ci);
- tio = alloc_tio(ci, ti, 1);
- split_bvec(tio, bio, ci->sector, ci->idx,
- bv->bv_offset + offset, len, ci->md->bs);
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
- __map_bio(ti, tio);
+ max = max_io_len(ci->sector, ti);
+
+ /*
+ * Optimise for the simple case where we can do all of
+ * the remaining io with a single clone.
+ */
+ if (ci->sector_count <= max) {
+ __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+ ci->idx, bio->bi_vcnt - ci->idx, 0,
+ ci->sector_count, 0);
+ ci->sector_count = 0;
+ return 0;
+ }
+
+ /*
+ * There are some bvecs that don't span targets.
+ * Do as many of these as possible.
+ */
+ if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
+ len = __len_within_target(ci, max, &idx);
- ci->sector += len;
- ci->sector_count -= len;
- offset += to_bytes(len);
- } while (remaining -= len);
+ __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+ ci->idx, idx - ci->idx, 0, len, 0);
- ci->idx++;
+ ci->sector += len;
+ ci->sector_count -= len;
+ ci->idx = idx;
+
+ return 0;
}
- return 0;
+ /*
+ * Handle a bvec that must be split between two or more targets.
+ */
+ return __split_bvec_across_targets(ci, ti, max);
}
/*
- * Split the bio into several clones and submit it to targets.
+ * Entry point to split a bio into clones and submit them to the targets.
*/
static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
{
@@ -1343,16 +1372,17 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
+
if (bio->bi_rw & REQ_FLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
- error = __clone_and_map_empty_flush(&ci);
+ error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error)
- error = __clone_and_map(&ci);
+ error = __split_and_process_non_flush(&ci);
}
/* drop the extra reference count */
@@ -1756,62 +1786,38 @@ static void free_minor(int minor)
*/
static int specific_minor(int minor)
{
- int r, m;
+ int r;
if (minor >= (1 << MINORBITS))
return -EINVAL;
- r = idr_pre_get(&_minor_idr, GFP_KERNEL);
- if (!r)
- return -ENOMEM;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
- if (idr_find(&_minor_idr, minor)) {
- r = -EBUSY;
- goto out;
- }
+ r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
- r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
- if (r)
- goto out;
-
- if (m != minor) {
- idr_remove(&_minor_idr, m);
- r = -EBUSY;
- goto out;
- }
-
-out:
spin_unlock(&_minor_lock);
- return r;
+ idr_preload_end();
+ if (r < 0)
+ return r == -ENOSPC ? -EBUSY : r;
+ return 0;
}
static int next_free_minor(int *minor)
{
- int r, m;
-
- r = idr_pre_get(&_minor_idr, GFP_KERNEL);
- if (!r)
- return -ENOMEM;
+ int r;
+ idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
- r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
- if (r)
- goto out;
+ r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
- if (m >= (1 << MINORBITS)) {
- idr_remove(&_minor_idr, m);
- r = -ENOSPC;
- goto out;
- }
-
- *minor = m;
-
-out:
spin_unlock(&_minor_lock);
- return r;
+ idr_preload_end();
+ if (r < 0)
+ return r;
+ *minor = r;
+ return 0;
}
static const struct block_device_operations dm_blk_dops;
@@ -1949,8 +1955,6 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
bdput(md->bdev);
destroy_workqueue(md->wq);
- if (md->tio_pool)
- mempool_destroy(md->tio_pool);
if (md->io_pool)
mempool_destroy(md->io_pool);
if (md->bs)
@@ -1973,24 +1977,33 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
- /*
- * The md already has necessary mempools. Reload just the
- * bioset because front_pad may have changed because
- * a different table was loaded.
- */
- bioset_free(md->bs);
- md->bs = p->bs;
- p->bs = NULL;
+ if (md->io_pool && md->bs) {
+ /* The md already has necessary mempools. */
+ if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
+ /*
+ * Reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ bioset_free(md->bs);
+ md->bs = p->bs;
+ p->bs = NULL;
+ } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
+ /*
+ * There's no need to reload with request-based dm
+ * because the size of front_pad doesn't change.
+ * Note for future: If you are to reload bioset,
+ * prep-ed requests in the queue may refer
+ * to bio from the old bioset, so you must walk
+ * through the queue to unprep.
+ */
+ }
goto out;
}
- BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+ BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
- md->tio_pool = p->tio_pool;
- p->tio_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
@@ -2421,7 +2434,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
- struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
+ struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
@@ -2444,10 +2457,12 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
dm_table_put(live_map);
}
- r = dm_calculate_queue_limits(table, &limits);
- if (r) {
- map = ERR_PTR(r);
- goto out;
+ if (!live_map) {
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r) {
+ map = ERR_PTR(r);
+ goto out;
+ }
}
map = __bind(md, table, &limits);
@@ -2745,52 +2760,42 @@ EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{
- struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
- unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
+ struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
+ struct kmem_cache *cachep;
+ unsigned int pool_size;
+ unsigned int front_pad;
if (!pools)
return NULL;
- per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
+ if (type == DM_TYPE_BIO_BASED) {
+ cachep = _io_cache;
+ pool_size = 16;
+ front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+ } else if (type == DM_TYPE_REQUEST_BASED) {
+ cachep = _rq_tio_cache;
+ pool_size = MIN_IOS;
+ front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
+ /* per_bio_data_size is not used. See __bind_mempools(). */
+ WARN_ON(per_bio_data_size != 0);
+ } else
+ goto out;
- pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
- mempool_create_slab_pool(MIN_IOS, _io_cache) :
- mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+ pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
if (!pools->io_pool)
- goto free_pools_and_out;
-
- pools->tio_pool = NULL;
- if (type == DM_TYPE_REQUEST_BASED) {
- pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
- if (!pools->tio_pool)
- goto free_io_pool_and_out;
- }
+ goto out;
- pools->bs = (type == DM_TYPE_BIO_BASED) ?
- bioset_create(pool_size,
- per_bio_data_size + offsetof(struct dm_target_io, clone)) :
- bioset_create(pool_size,
- offsetof(struct dm_rq_clone_bio_info, clone));
+ pools->bs = bioset_create(pool_size, front_pad);
if (!pools->bs)
- goto free_tio_pool_and_out;
+ goto out;
if (integrity && bioset_integrity_create(pools->bs, pool_size))
- goto free_bioset_and_out;
+ goto out;
return pools;
-free_bioset_and_out:
- bioset_free(pools->bs);
-
-free_tio_pool_and_out:
- if (pools->tio_pool)
- mempool_destroy(pools->tio_pool);
-
-free_io_pool_and_out:
- mempool_destroy(pools->io_pool);
-
-free_pools_and_out:
- kfree(pools);
+out:
+ dm_free_md_mempools(pools);
return NULL;
}
@@ -2803,9 +2808,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (pools->io_pool)
mempool_destroy(pools->io_pool);
- if (pools->tio_pool)
- mempool_destroy(pools->tio_pool);
-
if (pools->bs)
bioset_free(pools->bs);
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index ceb359050a59..19b268795415 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -1,6 +1,6 @@
config DM_PERSISTENT_DATA
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
select LIBCRC32C
select DM_BUFIO
---help---
diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile
index d8e7cb767c1e..ff528792c358 100644
--- a/drivers/md/persistent-data/Makefile
+++ b/drivers/md/persistent-data/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
dm-persistent-data-objs := \
+ dm-array.o \
+ dm-bitset.o \
dm-block-manager.o \
dm-space-map-common.o \
dm-space-map-disk.o \
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
new file mode 100644
index 000000000000..172147eb1d40
--- /dev/null
+++ b/drivers/md/persistent-data/dm-array.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-array.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "array"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The array is implemented as a fully populated btree, which points to
+ * blocks that contain the packed values. This is more space efficient
+ * than just using a btree since we don't store 1 key per value.
+ */
+struct array_block {
+ __le32 csum;
+ __le32 max_entries;
+ __le32 nr_entries;
+ __le32 value_size;
+ __le64 blocknr; /* Block this node is supposed to live in. */
+} __packed;
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Validator methods. As usual we calculate a checksum, and also write the
+ * block location into the header (paranoia about ssds remapping areas by
+ * mistake).
+ */
+#define CSUM_XOR 595846735
+
+static void array_block_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t size_of_block)
+{
+ struct array_block *bh_le = dm_block_data(b);
+
+ bh_le->blocknr = cpu_to_le64(dm_block_location(b));
+ bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+ size_of_block - sizeof(__le32),
+ CSUM_XOR));
+}
+
+static int array_block_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t size_of_block)
+{
+ struct array_block *bh_le = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
+ DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
+ (unsigned long long) le64_to_cpu(bh_le->blocknr),
+ (unsigned long long) dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+ size_of_block - sizeof(__le32),
+ CSUM_XOR));
+ if (csum_disk != bh_le->csum) {
+ DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
+ (unsigned) le32_to_cpu(csum_disk),
+ (unsigned) le32_to_cpu(bh_le->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator array_validator = {
+ .name = "array",
+ .prepare_for_write = array_block_prepare_for_write,
+ .check = array_block_check
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Functions for manipulating the array blocks.
+ */
+
+/*
+ * Returns a pointer to a value within an array block.
+ *
+ * index - The index into _this_ specific block.
+ */
+static void *element_at(struct dm_array_info *info, struct array_block *ab,
+ unsigned index)
+{
+ unsigned char *entry = (unsigned char *) (ab + 1);
+
+ entry += index * info->value_type.size;
+
+ return entry;
+}
+
+/*
+ * Utility function that calls one of the value_type methods on every value
+ * in an array block.
+ */
+static void on_entries(struct dm_array_info *info, struct array_block *ab,
+ void (*fn)(void *, const void *))
+{
+ unsigned i, nr_entries = le32_to_cpu(ab->nr_entries);
+
+ for (i = 0; i < nr_entries; i++)
+ fn(info->value_type.context, element_at(info, ab, i));
+}
+
+/*
+ * Increment every value in an array block.
+ */
+static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ if (vt->inc)
+ on_entries(info, ab, vt->inc);
+}
+
+/*
+ * Decrement every value in an array block.
+ */
+static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ if (vt->dec)
+ on_entries(info, ab, vt->dec);
+}
+
+/*
+ * Each array block can hold this many values.
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
+{
+ return (size_of_block - sizeof(struct array_block)) / value_size;
+}
+
+/*
+ * Allocate a new array block. The caller will need to unlock block.
+ */
+static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
+ uint32_t max_entries,
+ struct dm_block **block, struct array_block **ab)
+{
+ int r;
+
+ r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
+ if (r)
+ return r;
+
+ (*ab) = dm_block_data(*block);
+ (*ab)->max_entries = cpu_to_le32(max_entries);
+ (*ab)->nr_entries = cpu_to_le32(0);
+ (*ab)->value_size = cpu_to_le32(info->value_type.size);
+
+ return 0;
+}
+
+/*
+ * Pad an array block out with a particular value. Every instance will
+ * cause an increment of the value_type. new_nr must always be more than
+ * the current number of entries.
+ */
+static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+ const void *value, unsigned new_nr)
+{
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+ BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = nr_entries; i < new_nr; i++) {
+ if (vt->inc)
+ vt->inc(vt->context, value);
+ memcpy(element_at(info, ab, i), value, vt->size);
+ }
+ ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Remove some entries from the back of an array block. Every value
+ * removed will be decremented. new_nr must be <= the current number of
+ * entries.
+ */
+static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
+ unsigned new_nr)
+{
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+ BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = nr_entries; i > new_nr; i--)
+ if (vt->dec)
+ vt->dec(vt->context, element_at(info, ab, i - 1));
+ ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Read locks a block, and coerces it to an array block. The caller must
+ * unlock 'block' when finished.
+ */
+static int get_ablock(struct dm_array_info *info, dm_block_t b,
+ struct dm_block **block, struct array_block **ab)
+{
+ int r;
+
+ r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ return 0;
+}
+
+/*
+ * Unlocks an array block.
+ */
+static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+{
+ return dm_tm_unlock(info->btree_info.tm, block);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Btree manipulation.
+ */
+
+/*
+ * Looks up an array block in the btree, and then read locks it.
+ *
+ * index is the index of the index of the array_block, (ie. the array index
+ * / max_entries).
+ */
+static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
+ unsigned index, struct dm_block **block,
+ struct array_block **ab)
+{
+ int r;
+ uint64_t key = index;
+ __le64 block_le;
+
+ r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
+ if (r)
+ return r;
+
+ return get_ablock(info, le64_to_cpu(block_le), block, ab);
+}
+
+/*
+ * Insert an array block into the btree. The block is _not_ unlocked.
+ */
+static int insert_ablock(struct dm_array_info *info, uint64_t index,
+ struct dm_block *block, dm_block_t *root)
+{
+ __le64 block_le = cpu_to_le64(dm_block_location(block));
+
+ __dm_bless_for_disk(block_le);
+ return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
+}
+
+/*
+ * Looks up an array block in the btree. Then shadows it, and updates the
+ * btree to point to this new shadow. 'root' is an input/output parameter
+ * for both the current root block, and the new one.
+ */
+static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+ unsigned index, struct dm_block **block,
+ struct array_block **ab)
+{
+ int r, inc;
+ uint64_t key = index;
+ dm_block_t b;
+ __le64 block_le;
+
+ /*
+ * lookup
+ */
+ r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
+ if (r)
+ return r;
+ b = le64_to_cpu(block_le);
+
+ /*
+ * shadow
+ */
+ r = dm_tm_shadow_block(info->btree_info.tm, b,
+ &array_validator, block, &inc);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ if (inc)
+ inc_ablock_entries(info, *ab);
+
+ /*
+ * Reinsert.
+ *
+ * The shadow op will often be a noop. Only insert if it really
+ * copied data.
+ */
+ if (dm_block_location(*block) != b)
+ r = insert_ablock(info, index, *block, root);
+
+ return r;
+}
+
+/*
+ * Allocate an new array block, and fill it with some values.
+ */
+static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+ uint32_t max_entries,
+ unsigned block_index, uint32_t nr,
+ const void *value, dm_block_t *root)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
+ if (r)
+ return r;
+
+ fill_ablock(info, ab, value, nr);
+ r = insert_ablock(info, block_index, block, root);
+ unlock_ablock(info, block);
+
+ return r;
+}
+
+static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
+ unsigned begin_block, unsigned end_block,
+ unsigned max_entries, const void *value,
+ dm_block_t *root)
+{
+ int r = 0;
+
+ for (; !r && begin_block != end_block; begin_block++)
+ r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
+
+ return r;
+}
+
+/*
+ * There are a bunch of functions involved with resizing an array. This
+ * structure holds information that commonly needed by them. Purely here
+ * to reduce parameter count.
+ */
+struct resize {
+ /*
+ * Describes the array.
+ */
+ struct dm_array_info *info;
+
+ /*
+ * The current root of the array. This gets updated.
+ */
+ dm_block_t root;
+
+ /*
+ * Metadata block size. Used to calculate the nr entries in an
+ * array block.
+ */
+ size_t size_of_block;
+
+ /*
+ * Maximum nr entries in an array block.
+ */
+ unsigned max_entries;
+
+ /*
+ * nr of completely full blocks in the array.
+ *
+ * 'old' refers to before the resize, 'new' after.
+ */
+ unsigned old_nr_full_blocks, new_nr_full_blocks;
+
+ /*
+ * Number of entries in the final block. 0 iff only full blocks in
+ * the array.
+ */
+ unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+
+ /*
+ * The default value used when growing the array.
+ */
+ const void *value;
+};
+
+/*
+ * Removes a consecutive set of array blocks from the btree. The values
+ * in block are decremented as a side effect of the btree remove.
+ *
+ * begin_index - the index of the first array block to remove.
+ * end_index - the one-past-the-end value. ie. this block is not removed.
+ */
+static int drop_blocks(struct resize *resize, unsigned begin_index,
+ unsigned end_index)
+{
+ int r;
+
+ while (begin_index != end_index) {
+ uint64_t key = begin_index++;
+ r = dm_btree_remove(&resize->info->btree_info, resize->root,
+ &key, &resize->root);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculates how many blocks are needed for the array.
+ */
+static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+ unsigned nr_entries_in_last_block)
+{
+ return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
+}
+
+/*
+ * Shrink an array.
+ */
+static int shrink(struct resize *resize)
+{
+ int r;
+ unsigned begin, end;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ /*
+ * Lose some blocks from the back?
+ */
+ if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
+ begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
+ resize->new_nr_entries_in_last_block);
+ end = total_nr_blocks_needed(resize->old_nr_full_blocks,
+ resize->old_nr_entries_in_last_block);
+
+ r = drop_blocks(resize, begin, end);
+ if (r)
+ return r;
+ }
+
+ /*
+ * Trim the new tail block
+ */
+ if (resize->new_nr_entries_in_last_block) {
+ r = shadow_ablock(resize->info, &resize->root,
+ resize->new_nr_full_blocks, &block, &ab);
+ if (r)
+ return r;
+
+ trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
+ unlock_ablock(resize->info, block);
+ }
+
+ return 0;
+}
+
+/*
+ * Grow an array.
+ */
+static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ r = shadow_ablock(resize->info, &resize->root,
+ resize->old_nr_full_blocks, &block, &ab);
+ if (r)
+ return r;
+
+ fill_ablock(resize->info, ab, resize->value, new_nr_entries);
+ unlock_ablock(resize->info, block);
+
+ return r;
+}
+
+static int grow_add_tail_block(struct resize *resize)
+{
+ return insert_new_ablock(resize->info, resize->size_of_block,
+ resize->max_entries,
+ resize->new_nr_full_blocks,
+ resize->new_nr_entries_in_last_block,
+ resize->value, &resize->root);
+}
+
+static int grow_needs_more_blocks(struct resize *resize)
+{
+ int r;
+
+ if (resize->old_nr_entries_in_last_block > 0) {
+ r = grow_extend_tail_block(resize, resize->max_entries);
+ if (r)
+ return r;
+ }
+
+ r = insert_full_ablocks(resize->info, resize->size_of_block,
+ resize->old_nr_full_blocks,
+ resize->new_nr_full_blocks,
+ resize->max_entries, resize->value,
+ &resize->root);
+ if (r)
+ return r;
+
+ if (resize->new_nr_entries_in_last_block)
+ r = grow_add_tail_block(resize);
+
+ return r;
+}
+
+static int grow(struct resize *resize)
+{
+ if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
+ return grow_needs_more_blocks(resize);
+
+ else if (resize->old_nr_entries_in_last_block)
+ return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
+
+ else
+ return grow_add_tail_block(resize);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * These are the value_type functions for the btree elements, which point
+ * to array blocks.
+ */
+static void block_inc(void *context, const void *value)
+{
+ __le64 block_le;
+ struct dm_array_info *info = context;
+
+ memcpy(&block_le, value, sizeof(block_le));
+ dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le));
+}
+
+static void block_dec(void *context, const void *value)
+{
+ int r;
+ uint64_t b;
+ __le64 block_le;
+ uint32_t ref_count;
+ struct dm_block *block;
+ struct array_block *ab;
+ struct dm_array_info *info = context;
+
+ memcpy(&block_le, value, sizeof(block_le));
+ b = le64_to_cpu(block_le);
+
+ r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
+ if (r) {
+ DMERR_LIMIT("couldn't get reference count for block %llu",
+ (unsigned long long) b);
+ return;
+ }
+
+ if (ref_count == 1) {
+ /*
+ * We're about to drop the last reference to this ablock.
+ * So we need to decrement the ref count of the contents.
+ */
+ r = get_ablock(info, b, &block, &ab);
+ if (r) {
+ DMERR_LIMIT("couldn't get array block %llu",
+ (unsigned long long) b);
+ return;
+ }
+
+ dec_ablock_entries(info, ab);
+ unlock_ablock(info, block);
+ }
+
+ dm_tm_dec(info->btree_info.tm, b);
+}
+
+static int block_equal(void *context, const void *value1, const void *value2)
+{
+ return !memcmp(value1, value2, sizeof(__le64));
+}
+
+/*----------------------------------------------------------------*/
+
+void dm_array_info_init(struct dm_array_info *info,
+ struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ struct dm_btree_value_type *bvt = &info->btree_info.value_type;
+
+ memcpy(&info->value_type, vt, sizeof(info->value_type));
+ info->btree_info.tm = tm;
+ info->btree_info.levels = 1;
+
+ bvt->context = info;
+ bvt->size = sizeof(__le64);
+ bvt->inc = block_inc;
+ bvt->dec = block_dec;
+ bvt->equal = block_equal;
+}
+EXPORT_SYMBOL_GPL(dm_array_info_init);
+
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
+{
+ return dm_btree_empty(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_empty);
+
+static int array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+{
+ int r;
+ struct resize resize;
+
+ if (old_size == new_size)
+ return 0;
+
+ resize.info = info;
+ resize.root = root;
+ resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ resize.max_entries = calc_max_entries(info->value_type.size,
+ resize.size_of_block);
+
+ resize.old_nr_full_blocks = old_size / resize.max_entries;
+ resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
+ resize.new_nr_full_blocks = new_size / resize.max_entries;
+ resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
+ resize.value = value;
+
+ r = ((new_size > old_size) ? grow : shrink)(&resize);
+ if (r)
+ return r;
+
+ *new_root = resize.root;
+ return 0;
+}
+
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ int r = array_resize(info, root, old_size, new_size, value, new_root);
+ __dm_unbless_for_disk(value);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_resize);
+
+int dm_array_del(struct dm_array_info *info, dm_block_t root)
+{
+ return dm_btree_del(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_del);
+
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, void *value_le)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ size_t size_of_block;
+ unsigned entry, max_entries;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+ r = lookup_ablock(info, root, index / max_entries, &block, &ab);
+ if (r)
+ return r;
+
+ entry = index % max_entries;
+ if (entry >= le32_to_cpu(ab->nr_entries))
+ r = -ENODATA;
+ else
+ memcpy(value_le, element_at(info, ab, entry),
+ info->value_type.size);
+
+ unlock_ablock(info, block);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_get_value);
+
+static int array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ size_t size_of_block;
+ unsigned max_entries;
+ unsigned entry;
+ void *old_value;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+ r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
+ if (r)
+ return r;
+ *new_root = root;
+
+ entry = index % max_entries;
+ if (entry >= le32_to_cpu(ab->nr_entries)) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ old_value = element_at(info, ab, entry);
+ if (vt->dec &&
+ (!vt->equal || !vt->equal(vt->context, old_value, value))) {
+ vt->dec(vt->context, old_value);
+ if (vt->inc)
+ vt->inc(vt->context, value);
+ }
+
+ memcpy(old_value, value, info->value_type.size);
+
+out:
+ unlock_ablock(info, block);
+ return r;
+}
+
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ int r;
+
+ r = array_set_value(info, root, index, value, new_root);
+ __dm_unbless_for_disk(value);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_set_value);
+
+struct walk_info {
+ struct dm_array_info *info;
+ int (*fn)(void *context, uint64_t key, void *leaf);
+ void *context;
+};
+
+static int walk_ablock(void *context, uint64_t *keys, void *leaf)
+{
+ struct walk_info *wi = context;
+
+ int r;
+ unsigned i;
+ __le64 block_le;
+ unsigned nr_entries, max_entries;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ memcpy(&block_le, leaf, sizeof(block_le));
+ r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
+ if (r)
+ return r;
+
+ max_entries = le32_to_cpu(ab->max_entries);
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = 0; i < nr_entries; i++) {
+ r = wi->fn(wi->context, keys[0] * max_entries + i,
+ element_at(wi->info, ab, i));
+
+ if (r)
+ break;
+ }
+
+ unlock_ablock(wi->info, block);
+ return r;
+}
+
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+ int (*fn)(void *, uint64_t key, void *leaf),
+ void *context)
+{
+ struct walk_info wi;
+
+ wi.info = info;
+ wi.fn = fn;
+ wi.context = context;
+
+ return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
+}
+EXPORT_SYMBOL_GPL(dm_array_walk);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
new file mode 100644
index 000000000000..ea177d6fa58f
--- /dev/null
+++ b/drivers/md/persistent-data/dm-array.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_ARRAY_H
+#define _LINUX_DM_ARRAY_H
+
+#include "dm-btree.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The dm-array is a persistent version of an array. It packs the data
+ * more efficiently than a btree which will result in less disk space use,
+ * and a performance boost. The element get and set operations are still
+ * O(ln(n)), but with a much smaller constant.
+ *
+ * The value type structure is reused from the btree type to support proper
+ * reference counting of values.
+ *
+ * The arrays implicitly know their length, and bounds are checked for
+ * lookups and updated. It doesn't store this in an accessible place
+ * because it would waste a whole metadata block. Make sure you store the
+ * size along with the array root in your encompassing data.
+ *
+ * Array entries are indexed via an unsigned integer starting from zero.
+ * Arrays are not sparse; if you resize an array to have 'n' entries then
+ * 'n - 1' will be the last valid index.
+ *
+ * Typical use:
+ *
+ * a) initialise a dm_array_info structure. This describes the array
+ * values and ties it into a specific transaction manager. It holds no
+ * instance data; the same info can be used for many similar arrays if
+ * you wish.
+ *
+ * b) Get yourself a root. The root is the index of a block of data on the
+ * disk that holds a particular instance of an array. You may have a
+ * pre existing root in your metadata that you wish to use, or you may
+ * want to create a brand new, empty array with dm_array_empty().
+ *
+ * Like the other data structures in this library, dm_array objects are
+ * immutable between transactions. Update functions will return you the
+ * root for a _new_ array. If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * c) resize an array with dm_array_resize().
+ *
+ * d) Get a value from the array with dm_array_get_value().
+ *
+ * e) Set a value in the array with dm_array_set_value().
+ *
+ * f) Walk an array of values in index order with dm_array_walk(). More
+ * efficient than making many calls to dm_array_get_value().
+ *
+ * g) Destroy the array with dm_array_del(). This tells the transaction
+ * manager that you're no longer using this data structure so it can
+ * recycle it's blocks. (dm_array_dec() would be a better name for it,
+ * but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Describes an array. Don't initialise this structure yourself, use the
+ * init function below.
+ */
+struct dm_array_info {
+ struct dm_transaction_manager *tm;
+ struct dm_btree_value_type value_type;
+ struct dm_btree_info btree_info;
+};
+
+/*
+ * Sets up a dm_array_info structure. You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * info - the structure being filled in.
+ * tm - the transaction manager that should supervise this structure.
+ * vt - describes the leaf values.
+ */
+void dm_array_info_init(struct dm_array_info *info,
+ struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
+/*
+ * Create an empty, zero length array.
+ *
+ * info - describes the array
+ * root - on success this will be filled out with the root block
+ */
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root);
+
+/*
+ * Resizes the array.
+ *
+ * info - describes the array
+ * root - the root block of the array on disk
+ * old_size - the caller is responsible for remembering the size of
+ * the array
+ * new_size - can be bigger or smaller than old_size
+ * value - if we're growing the array the new entries will have this value
+ * new_root - on success, points to the new root block
+ *
+ * If growing the inc function for 'value' will be called the appropriate
+ * number of times. So if the caller is holding a reference they may want
+ * to drop it.
+ */
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value);
+
+/*
+ * Frees a whole array. The value_type's decrement operation will be called
+ * for all values in the array
+ */
+int dm_array_del(struct dm_array_info *info, dm_block_t root);
+
+/*
+ * Lookup a value in the array
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - the value to be read. Will be in on-disk format of course.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, void *value);
+
+/*
+ * Set an entry in the array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - value to be written to disk. Make sure you confirm the value is
+ * in on-disk format with__dm_bless_for_disk() before calling.
+ * new_root - the new root block
+ *
+ * The old value being overwritten will be decremented, the new value
+ * incremented.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value);
+
+/*
+ * Walk through all the entries in an array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * fn - called back for every element
+ * context - passed to the callback
+ */
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t key, void *leaf),
+ void *context);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_ARRAY_H */
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
new file mode 100644
index 000000000000..cd9a86d4cdf0
--- /dev/null
+++ b/drivers/md/persistent-data/dm-bitset.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-bitset.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "bitset"
+#define BITS_PER_ARRAY_ENTRY 64
+
+/*----------------------------------------------------------------*/
+
+static struct dm_btree_value_type bitset_bvt = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL,
+};
+
+/*----------------------------------------------------------------*/
+
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+ struct dm_disk_bitset *info)
+{
+ dm_array_info_init(&info->array_info, tm, &bitset_bvt);
+ info->current_index_set = false;
+}
+EXPORT_SYMBOL_GPL(dm_disk_bitset_init);
+
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *root)
+{
+ return dm_array_empty(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_empty);
+
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t old_nr_entries, uint32_t new_nr_entries,
+ bool default_value, dm_block_t *new_root)
+{
+ uint32_t old_blocks = dm_div_up(old_nr_entries, BITS_PER_ARRAY_ENTRY);
+ uint32_t new_blocks = dm_div_up(new_nr_entries, BITS_PER_ARRAY_ENTRY);
+ __le64 value = default_value ? cpu_to_le64(~0) : cpu_to_le64(0);
+
+ __dm_bless_for_disk(&value);
+ return dm_array_resize(&info->array_info, root, old_blocks, new_blocks,
+ &value, new_root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_resize);
+
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root)
+{
+ return dm_array_del(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_del);
+
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+ dm_block_t *new_root)
+{
+ int r;
+ __le64 value;
+
+ if (!info->current_index_set)
+ return 0;
+
+ value = cpu_to_le64(info->current_bits);
+
+ __dm_bless_for_disk(&value);
+ r = dm_array_set_value(&info->array_info, root, info->current_index,
+ &value, new_root);
+ if (r)
+ return r;
+
+ info->current_index_set = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_flush);
+
+static int read_bits(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t array_index)
+{
+ int r;
+ __le64 value;
+
+ r = dm_array_get_value(&info->array_info, root, array_index, &value);
+ if (r)
+ return r;
+
+ info->current_bits = le64_to_cpu(value);
+ info->current_index_set = true;
+ info->current_index = array_index;
+ return 0;
+}
+
+static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
+
+ if (info->current_index_set) {
+ if (info->current_index == array_index)
+ return 0;
+
+ r = dm_bitset_flush(info, root, new_root);
+ if (r)
+ return r;
+ }
+
+ return read_bits(info, root, array_index);
+}
+
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ set_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_set_bit);
+
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ clear_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_clear_bit);
+
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root, bool *result)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ *result = test_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_test_bit);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
new file mode 100644
index 000000000000..e1b9bea14aa1
--- /dev/null
+++ b/drivers/md/persistent-data/dm-bitset.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_BITSET_H
+#define _LINUX_DM_BITSET_H
+
+#include "dm-array.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * This bitset type is a thin wrapper round a dm_array of 64bit words. It
+ * uses a tiny, one word cache to reduce the number of array lookups and so
+ * increase performance.
+ *
+ * Like the dm-array that it's based on, the caller needs to keep track of
+ * the size of the bitset separately. The underlying dm-array implicitly
+ * knows how many words it's storing and will return -ENODATA if you try
+ * and access an out of bounds word. However, an out of bounds bit in the
+ * final word will _not_ be detected, you have been warned.
+ *
+ * Bits are indexed from zero.
+
+ * Typical use:
+ *
+ * a) Initialise a dm_disk_bitset structure with dm_disk_bitset_init().
+ * This describes the bitset and includes the cache. It's not called it
+ * dm_bitset_info in line with other data structures because it does
+ * include instance data.
+ *
+ * b) Get yourself a root. The root is the index of a block of data on the
+ * disk that holds a particular instance of an bitset. You may have a
+ * pre existing root in your metadata that you wish to use, or you may
+ * want to create a brand new, empty bitset with dm_bitset_empty().
+ *
+ * Like the other data structures in this library, dm_bitset objects are
+ * immutable between transactions. Update functions will return you the
+ * root for a _new_ array. If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * Even read operations may trigger the cache to be flushed and as such
+ * return a root for a new, updated bitset.
+ *
+ * c) resize a bitset with dm_bitset_resize().
+ *
+ * d) Set a bit with dm_bitset_set_bit().
+ *
+ * e) Clear a bit with dm_bitset_clear_bit().
+ *
+ * f) Test a bit with dm_bitset_test_bit().
+ *
+ * g) Flush all updates from the cache with dm_bitset_flush().
+ *
+ * h) Destroy the bitset with dm_bitset_del(). This tells the transaction
+ * manager that you're no longer using this data structure so it can
+ * recycle it's blocks. (dm_bitset_dec() would be a better name for it,
+ * but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Opaque object. Unlike dm_array_info, you should have one of these per
+ * bitset. Initialise with dm_disk_bitset_init().
+ */
+struct dm_disk_bitset {
+ struct dm_array_info array_info;
+
+ uint32_t current_index;
+ uint64_t current_bits;
+
+ bool current_index_set:1;
+};
+
+/*
+ * Sets up a dm_disk_bitset structure. You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * tm - the transaction manager that should supervise this structure
+ * info - the structure being initialised
+ */
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+ struct dm_disk_bitset *info);
+
+/*
+ * Create an empty, zero length bitset.
+ *
+ * info - describes the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *new_root);
+
+/*
+ * Resize the bitset.
+ *
+ * info - describes the bitset
+ * old_root - the root block of the array on disk
+ * old_nr_entries - the number of bits in the old bitset
+ * new_nr_entries - the number of bits you want in the new bitset
+ * default_value - the value for any new bits
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t old_root,
+ uint32_t old_nr_entries, uint32_t new_nr_entries,
+ bool default_value, dm_block_t *new_root);
+
+/*
+ * Frees the bitset.
+ */
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root);
+
+/*
+ * Set a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root);
+
+/*
+ * Clears a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root);
+
+/*
+ * Tests a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block (cached values may have been written)
+ * result - the bit value you're after
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root, bool *result);
+
+/*
+ * Flush any cached changes to disk.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+ dm_block_t *new_root);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_BITSET_H */
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 28c3ed072a79..81b513890e2b 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -613,6 +613,7 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
return dm_bufio_write_dirty_buffers(bm->bufio);
}
+EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index accbb05f17b6..37d367bb9aa8 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -64,6 +64,7 @@ struct ro_spine {
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
int exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
+void ro_pop(struct ro_spine *s);
struct btree_node *ro_node(struct ro_spine *s);
struct shadow_spine {
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index f199a0c4ed04..cf9fd676ae44 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -164,6 +164,13 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
return r;
}
+void ro_pop(struct ro_spine *s)
+{
+ BUG_ON(!s->count);
+ --s->count;
+ unlock_block(s->info, s->nodes[s->count]);
+}
+
struct btree_node *ro_node(struct ro_spine *s)
{
struct dm_block *block;
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 4caf66918cdb..35865425e4b4 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -807,3 +807,55 @@ int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
return r ? r : count;
}
EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
+
+/*
+ * FIXME: We shouldn't use a recursive algorithm when we have limited stack
+ * space. Also this only works for single level trees.
+ */
+static int walk_node(struct ro_spine *s, dm_block_t block,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context)
+{
+ int r;
+ unsigned i, nr;
+ struct btree_node *n;
+ uint64_t keys;
+
+ r = ro_step(s, block);
+ n = ro_node(s);
+
+ nr = le32_to_cpu(n->header.nr_entries);
+ for (i = 0; i < nr; i++) {
+ if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
+ r = walk_node(s, value64(n, i), fn, context);
+ if (r)
+ goto out;
+ } else {
+ keys = le64_to_cpu(*key_ptr(n, i));
+ r = fn(context, &keys, value_ptr(n, i));
+ if (r)
+ goto out;
+ }
+ }
+
+out:
+ ro_pop(s);
+ return r;
+}
+
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context)
+{
+ int r;
+ struct ro_spine spine;
+
+ BUG_ON(info->levels > 1);
+
+ init_ro_spine(&spine, info);
+ r = walk_node(&spine, root, fn, context);
+ exit_ro_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_walk);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index a2cd50441ca1..8672d159e0b5 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -58,21 +58,21 @@ struct dm_btree_value_type {
* somewhere.) This method is _not_ called for insertion of a new
* value: It is assumed the ref count is already 1.
*/
- void (*inc)(void *context, void *value);
+ void (*inc)(void *context, const void *value);
/*
* This value is being deleted. The btree takes care of freeing
* the memory pointed to by @value. Often the del function just
* needs to decrement a reference count somewhere.
*/
- void (*dec)(void *context, void *value);
+ void (*dec)(void *context, const void *value);
/*
* A test for equality between two values. When a value is
* overwritten with a new one, the old one has the dec method
* called _unless_ the new and old value are deemed equal.
*/
- int (*equal)(void *context, void *value1, void *value2);
+ int (*equal)(void *context, const void *value1, const void *value2);
};
/*
@@ -142,4 +142,13 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
uint64_t *result_keys);
+/*
+ * Iterate through the a btree, calling fn() on each entry.
+ * It only works for single level trees and is internally recursive, so
+ * monitor stack usage carefully.
+ */
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context);
+
#endif /* _LINUX_DM_BTREE_H */
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 7b17a1fdeaf9..81da1a26042e 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -46,10 +46,9 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
int r = 0;
unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si;
- struct hlist_node *n;
spin_lock(&tm->lock);
- hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
+ hlist_for_each_entry(si, tm->buckets + bucket, hlist)
if (si->where == b) {
r = 1;
break;
@@ -81,14 +80,14 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
static void wipe_shadow_table(struct dm_transaction_manager *tm)
{
struct shadow_info *si;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
struct hlist_head *bucket;
int i;
spin_lock(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++) {
bucket = tm->buckets + i;
- hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
+ hlist_for_each_entry_safe(si, tmp, bucket, hlist)
kfree(si);
INIT_HLIST_HEAD(bucket);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 19d77a026639..5af2d2709081 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -184,8 +184,6 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -365,10 +363,9 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
short generation)
{
struct stripe_head *sh;
- struct hlist_node *hn;
pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
- hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
+ hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
if (sh->sector == sector && sh->generation == generation)
return sh;
pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -3917,8 +3914,6 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
- raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4377,8 +4372,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
}
}
@@ -4755,11 +4748,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0) {
- trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
- raid_bio, 0);
+ if (remaining == 0)
bio_endio(raid_bio, 0);
- }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8567a7a64104..7f5a7cac6dc7 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -134,6 +134,12 @@ config DVB_NET
You may want to disable the network support on embedded devices. If
unsure say Y.
+# This Kconfig option is used by both PCI and USB drivers
+config TTPCI_EEPROM
+ tristate
+ depends on I2C
+ default n
+
source "drivers/media/dvb-core/Kconfig"
comment "Media drivers"
@@ -157,17 +163,20 @@ source "drivers/media/firewire/Kconfig"
# Common driver options
source "drivers/media/common/Kconfig"
+comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
+
#
# Ancillary drivers (tuners, i2c, frontends)
#
config MEDIA_SUBDRV_AUTOSELECT
- bool "Autoselect tuners and i2c modules to build"
+ bool "Autoselect ancillary drivers (tuners, sensors, i2c, frontends)"
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT
default y
help
- By default, a media driver auto-selects all possible i2c
- devices that are used by any of the supported devices.
+ By default, a media driver auto-selects all possible ancillary
+ devices such as tuners, sensors, video encoders/decoders and
+ frontends, that are used by any of the supported devices.
This is generally the right thing to do, except when there
are strict constraints with regards to the kernel size,
@@ -176,12 +185,10 @@ config MEDIA_SUBDRV_AUTOSELECT
Use this option with care, as deselecting ancillary drivers which
are, in fact, necessary will result in the lack of the needed
functionality for your device (it may not tune or may not have
- the need demodulers).
+ the needed demodulators).
If unsure say Y.
-comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
-
source "drivers/media/i2c/Kconfig"
source "drivers/media/tuners/Kconfig"
source "drivers/media/dvb-frontends/Kconfig"
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index d2a436ce77f8..56c25e6299e9 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -5,6 +5,17 @@ config MEDIA_COMMON_OPTIONS
comment "common driver options"
depends on MEDIA_COMMON_OPTIONS
+config VIDEO_CX2341X
+ tristate
+
+config VIDEO_BTCX
+ depends on PCI
+ tristate
+
+config VIDEO_TVEEPROM
+ tristate
+ depends on I2C
+
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index b8e2e3a33a31..8f8d18755d15 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1 +1,4 @@
obj-y += b2c2/ saa7146/ siano/
+obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
+obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
+obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
diff --git a/drivers/media/i2c/btcx-risc.c b/drivers/media/common/btcx-risc.c
index ac1b2687a20d..ac1b2687a20d 100644
--- a/drivers/media/i2c/btcx-risc.c
+++ b/drivers/media/common/btcx-risc.c
diff --git a/drivers/media/i2c/btcx-risc.h b/drivers/media/common/btcx-risc.h
index f8bc6e8e7b51..f8bc6e8e7b51 100644
--- a/drivers/media/i2c/btcx-risc.h
+++ b/drivers/media/common/btcx-risc.h
diff --git a/drivers/media/i2c/cx2341x.c b/drivers/media/common/cx2341x.c
index 103ef6bad2e2..103ef6bad2e2 100644
--- a/drivers/media/i2c/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index b3890bd49df6..eda01bc68ab2 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -105,7 +105,7 @@ void saa7146_buffer_finish(struct saa7146_dev *dev,
}
q->curr->vb.state = state;
- do_gettimeofday(&q->curr->vb.ts);
+ v4l2_get_timestamp(&q->curr->vb.ts);
wake_up(&q->curr->vb.done);
q->curr = NULL;
@@ -265,8 +265,7 @@ static int fops_release(struct file *file)
DEB_EE("file:%p\n", file);
- if (mutex_lock_interruptible(vdev->lock))
- return -ERESTARTSYS;
+ mutex_lock(vdev->lock);
if (vdev->vfl_type == VFL_TYPE_VBI) {
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
diff --git a/drivers/media/i2c/tveeprom.c b/drivers/media/common/tveeprom.c
index 3b6cf034976a..cc1e172dfece 100644
--- a/drivers/media/i2c/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -96,170 +96,170 @@ static struct HAUPPAUGE_TUNER
hauppauge_tuner[] =
{
/* 0-9 */
- { TUNER_ABSENT, "None" },
- { TUNER_ABSENT, "External" },
- { TUNER_ABSENT, "Unspecified" },
- { TUNER_PHILIPS_PAL, "Philips FI1216" },
- { TUNER_PHILIPS_SECAM, "Philips FI1216MF" },
- { TUNER_PHILIPS_NTSC, "Philips FI1236" },
- { TUNER_PHILIPS_PAL_I, "Philips FI1246" },
+ { TUNER_ABSENT, "None" },
+ { TUNER_ABSENT, "External" },
+ { TUNER_ABSENT, "Unspecified" },
+ { TUNER_PHILIPS_PAL, "Philips FI1216" },
+ { TUNER_PHILIPS_SECAM, "Philips FI1216MF" },
+ { TUNER_PHILIPS_NTSC, "Philips FI1236" },
+ { TUNER_PHILIPS_PAL_I, "Philips FI1246" },
{ TUNER_PHILIPS_PAL_DK, "Philips FI1256" },
- { TUNER_PHILIPS_PAL, "Philips FI1216 MK2" },
- { TUNER_PHILIPS_SECAM, "Philips FI1216MF MK2" },
+ { TUNER_PHILIPS_PAL, "Philips FI1216 MK2" },
+ { TUNER_PHILIPS_SECAM, "Philips FI1216MF MK2" },
/* 10-19 */
- { TUNER_PHILIPS_NTSC, "Philips FI1236 MK2" },
- { TUNER_PHILIPS_PAL_I, "Philips FI1246 MK2" },
+ { TUNER_PHILIPS_NTSC, "Philips FI1236 MK2" },
+ { TUNER_PHILIPS_PAL_I, "Philips FI1246 MK2" },
{ TUNER_PHILIPS_PAL_DK, "Philips FI1256 MK2" },
- { TUNER_TEMIC_NTSC, "Temic 4032FY5" },
- { TUNER_TEMIC_PAL, "Temic 4002FH5" },
- { TUNER_TEMIC_PAL_I, "Temic 4062FY5" },
- { TUNER_PHILIPS_PAL, "Philips FR1216 MK2" },
- { TUNER_PHILIPS_SECAM, "Philips FR1216MF MK2" },
- { TUNER_PHILIPS_NTSC, "Philips FR1236 MK2" },
- { TUNER_PHILIPS_PAL_I, "Philips FR1246 MK2" },
+ { TUNER_TEMIC_NTSC, "Temic 4032FY5" },
+ { TUNER_TEMIC_PAL, "Temic 4002FH5" },
+ { TUNER_TEMIC_PAL_I, "Temic 4062FY5" },
+ { TUNER_PHILIPS_PAL, "Philips FR1216 MK2" },
+ { TUNER_PHILIPS_SECAM, "Philips FR1216MF MK2" },
+ { TUNER_PHILIPS_NTSC, "Philips FR1236 MK2" },
+ { TUNER_PHILIPS_PAL_I, "Philips FR1246 MK2" },
/* 20-29 */
{ TUNER_PHILIPS_PAL_DK, "Philips FR1256 MK2" },
- { TUNER_PHILIPS_PAL, "Philips FM1216" },
- { TUNER_PHILIPS_SECAM, "Philips FM1216MF" },
- { TUNER_PHILIPS_NTSC, "Philips FM1236" },
- { TUNER_PHILIPS_PAL_I, "Philips FM1246" },
+ { TUNER_PHILIPS_PAL, "Philips FM1216" },
+ { TUNER_PHILIPS_SECAM, "Philips FM1216MF" },
+ { TUNER_PHILIPS_NTSC, "Philips FM1236" },
+ { TUNER_PHILIPS_PAL_I, "Philips FM1246" },
{ TUNER_PHILIPS_PAL_DK, "Philips FM1256" },
- { TUNER_TEMIC_4036FY5_NTSC, "Temic 4036FY5" },
- { TUNER_ABSENT, "Samsung TCPN9082D" },
- { TUNER_ABSENT, "Samsung TCPM9092P" },
- { TUNER_TEMIC_4006FH5_PAL, "Temic 4006FH5" },
+ { TUNER_TEMIC_4036FY5_NTSC, "Temic 4036FY5" },
+ { TUNER_ABSENT, "Samsung TCPN9082D" },
+ { TUNER_ABSENT, "Samsung TCPM9092P" },
+ { TUNER_TEMIC_4006FH5_PAL, "Temic 4006FH5" },
/* 30-39 */
- { TUNER_ABSENT, "Samsung TCPN9085D" },
- { TUNER_ABSENT, "Samsung TCPB9085P" },
- { TUNER_ABSENT, "Samsung TCPL9091P" },
- { TUNER_TEMIC_4039FR5_NTSC, "Temic 4039FR5" },
- { TUNER_PHILIPS_FQ1216ME, "Philips FQ1216 ME" },
- { TUNER_TEMIC_4066FY5_PAL_I, "Temic 4066FY5" },
- { TUNER_PHILIPS_NTSC, "Philips TD1536" },
- { TUNER_PHILIPS_NTSC, "Philips TD1536D" },
- { TUNER_PHILIPS_NTSC, "Philips FMR1236" }, /* mono radio */
- { TUNER_ABSENT, "Philips FI1256MP" },
+ { TUNER_ABSENT, "Samsung TCPN9085D" },
+ { TUNER_ABSENT, "Samsung TCPB9085P" },
+ { TUNER_ABSENT, "Samsung TCPL9091P" },
+ { TUNER_TEMIC_4039FR5_NTSC, "Temic 4039FR5" },
+ { TUNER_PHILIPS_FQ1216ME, "Philips FQ1216 ME" },
+ { TUNER_TEMIC_4066FY5_PAL_I, "Temic 4066FY5" },
+ { TUNER_PHILIPS_NTSC, "Philips TD1536" },
+ { TUNER_PHILIPS_NTSC, "Philips TD1536D" },
+ { TUNER_PHILIPS_NTSC, "Philips FMR1236" }, /* mono radio */
+ { TUNER_ABSENT, "Philips FI1256MP" },
/* 40-49 */
- { TUNER_ABSENT, "Samsung TCPQ9091P" },
- { TUNER_TEMIC_4006FN5_MULTI_PAL, "Temic 4006FN5" },
- { TUNER_TEMIC_4009FR5_PAL, "Temic 4009FR5" },
- { TUNER_TEMIC_4046FM5, "Temic 4046FM5" },
+ { TUNER_ABSENT, "Samsung TCPQ9091P" },
+ { TUNER_TEMIC_4006FN5_MULTI_PAL,"Temic 4006FN5" },
+ { TUNER_TEMIC_4009FR5_PAL, "Temic 4009FR5" },
+ { TUNER_TEMIC_4046FM5, "Temic 4046FM5" },
{ TUNER_TEMIC_4009FN5_MULTI_PAL_FM, "Temic 4009FN5" },
- { TUNER_ABSENT, "Philips TD1536D FH 44"},
- { TUNER_LG_NTSC_FM, "LG TP18NSR01F"},
- { TUNER_LG_PAL_FM, "LG TP18PSB01D"},
- { TUNER_LG_PAL, "LG TP18PSB11D"},
- { TUNER_LG_PAL_I_FM, "LG TAPC-I001D"},
+ { TUNER_ABSENT, "Philips TD1536D FH 44"},
+ { TUNER_LG_NTSC_FM, "LG TP18NSR01F"},
+ { TUNER_LG_PAL_FM, "LG TP18PSB01D"},
+ { TUNER_LG_PAL, "LG TP18PSB11D"},
+ { TUNER_LG_PAL_I_FM, "LG TAPC-I001D"},
/* 50-59 */
- { TUNER_LG_PAL_I, "LG TAPC-I701D"},
- { TUNER_ABSENT, "Temic 4042FI5"},
- { TUNER_MICROTUNE_4049FM5, "Microtune 4049 FM5"},
- { TUNER_ABSENT, "LG TPI8NSR11F"},
- { TUNER_ABSENT, "Microtune 4049 FM5 Alt I2C"},
- { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216ME MK3"},
- { TUNER_ABSENT, "Philips FI1236 MK3"},
- { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216 ME MK3"},
- { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK3"},
- { TUNER_ABSENT, "Philips FM1216MP MK3"},
+ { TUNER_LG_PAL_I, "LG TAPC-I701D"},
+ { TUNER_ABSENT, "Temic 4042FI5"},
+ { TUNER_MICROTUNE_4049FM5, "Microtune 4049 FM5"},
+ { TUNER_ABSENT, "LG TPI8NSR11F"},
+ { TUNER_ABSENT, "Microtune 4049 FM5 Alt I2C"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216ME MK3"},
+ { TUNER_ABSENT, "Philips FI1236 MK3"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216 ME MK3"},
+ { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK3"},
+ { TUNER_ABSENT, "Philips FM1216MP MK3"},
/* 60-69 */
- { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"},
- { TUNER_ABSENT, "LG M001D MK3"},
- { TUNER_PHILIPS_FM1216ME_MK3, "LG S701D MK3"},
- { TUNER_ABSENT, "LG M701D MK3"},
- { TUNER_ABSENT, "Temic 4146FM5"},
- { TUNER_ABSENT, "Temic 4136FY5"},
- { TUNER_ABSENT, "Temic 4106FH5"},
- { TUNER_ABSENT, "Philips FQ1216LMP MK3"},
- { TUNER_LG_NTSC_TAPE, "LG TAPE H001F MK3"},
- { TUNER_LG_NTSC_TAPE, "LG TAPE H701F MK3"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"},
+ { TUNER_ABSENT, "LG M001D MK3"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "LG S701D MK3"},
+ { TUNER_ABSENT, "LG M701D MK3"},
+ { TUNER_ABSENT, "Temic 4146FM5"},
+ { TUNER_ABSENT, "Temic 4136FY5"},
+ { TUNER_ABSENT, "Temic 4106FH5"},
+ { TUNER_ABSENT, "Philips FQ1216LMP MK3"},
+ { TUNER_LG_NTSC_TAPE, "LG TAPE H001F MK3"},
+ { TUNER_LG_NTSC_TAPE, "LG TAPE H701F MK3"},
/* 70-79 */
- { TUNER_ABSENT, "LG TALN H200T"},
- { TUNER_ABSENT, "LG TALN H250T"},
- { TUNER_ABSENT, "LG TALN M200T"},
- { TUNER_ABSENT, "LG TALN Z200T"},
- { TUNER_ABSENT, "LG TALN S200T"},
- { TUNER_ABSENT, "Thompson DTT7595"},
- { TUNER_ABSENT, "Thompson DTT7592"},
- { TUNER_ABSENT, "Silicon TDA8275C1 8290"},
- { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"},
- { TUNER_ABSENT, "Thompson DTT757"},
+ { TUNER_ABSENT, "LG TALN H200T"},
+ { TUNER_ABSENT, "LG TALN H250T"},
+ { TUNER_ABSENT, "LG TALN M200T"},
+ { TUNER_ABSENT, "LG TALN Z200T"},
+ { TUNER_ABSENT, "LG TALN S200T"},
+ { TUNER_ABSENT, "Thompson DTT7595"},
+ { TUNER_ABSENT, "Thompson DTT7592"},
+ { TUNER_ABSENT, "Silicon TDA8275C1 8290"},
+ { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"},
+ { TUNER_ABSENT, "Thompson DTT757"},
/* 80-89 */
- { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"},
- { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
- { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
- { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
- { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"},
- { TUNER_TCL_2002N, "TCL 2002N 6A"},
- { TUNER_PHILIPS_FM1236_MK3, "Philips FQ1236 MK3"},
- { TUNER_SAMSUNG_TCPN_2121P30A, "Samsung TCPN 2121P30A"},
- { TUNER_ABSENT, "Samsung TCPE 4121P30A"},
- { TUNER_PHILIPS_FM1216ME_MK3, "TCL MFPE05 2"},
+ { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"},
+ { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
+ { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
+ { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
+ { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"},
+ { TUNER_TCL_2002N, "TCL 2002N 6A"},
+ { TUNER_PHILIPS_FM1236_MK3, "Philips FQ1236 MK3"},
+ { TUNER_SAMSUNG_TCPN_2121P30A, "Samsung TCPN 2121P30A"},
+ { TUNER_ABSENT, "Samsung TCPE 4121P30A"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "TCL MFPE05 2"},
/* 90-99 */
- { TUNER_ABSENT, "LG TALN H202T"},
- { TUNER_PHILIPS_FQ1216AME_MK4, "Philips FQ1216AME MK4"},
- { TUNER_PHILIPS_FQ1236A_MK4, "Philips FQ1236A MK4"},
- { TUNER_ABSENT, "Philips FQ1286A MK4"},
- { TUNER_ABSENT, "Philips FQ1216ME MK5"},
- { TUNER_ABSENT, "Philips FQ1236 MK5"},
- { TUNER_SAMSUNG_TCPG_6121P30A, "Samsung TCPG 6121P30A"},
- { TUNER_TCL_2002MB, "TCL 2002MB_3H"},
- { TUNER_ABSENT, "TCL 2002MI_3H"},
- { TUNER_TCL_2002N, "TCL 2002N 5H"},
+ { TUNER_ABSENT, "LG TALN H202T"},
+ { TUNER_PHILIPS_FQ1216AME_MK4, "Philips FQ1216AME MK4"},
+ { TUNER_PHILIPS_FQ1236A_MK4, "Philips FQ1236A MK4"},
+ { TUNER_ABSENT, "Philips FQ1286A MK4"},
+ { TUNER_ABSENT, "Philips FQ1216ME MK5"},
+ { TUNER_ABSENT, "Philips FQ1236 MK5"},
+ { TUNER_SAMSUNG_TCPG_6121P30A, "Samsung TCPG 6121P30A"},
+ { TUNER_TCL_2002MB, "TCL 2002MB_3H"},
+ { TUNER_ABSENT, "TCL 2002MI_3H"},
+ { TUNER_TCL_2002N, "TCL 2002N 5H"},
/* 100-109 */
- { TUNER_PHILIPS_FMD1216ME_MK3, "Philips FMD1216ME"},
- { TUNER_TEA5767, "Philips TEA5768HL FM Radio"},
- { TUNER_ABSENT, "Panasonic ENV57H12D5"},
- { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"},
+ { TUNER_PHILIPS_FMD1216ME_MK3, "Philips FMD1216ME"},
+ { TUNER_TEA5767, "Philips TEA5768HL FM Radio"},
+ { TUNER_ABSENT, "Panasonic ENV57H12D5"},
+ { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"},
{ TUNER_PHILIPS_FM1236_MK3, "TCL MNM05-4"},
- { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"},
- { TUNER_ABSENT, "TCL MQNM05-4"},
- { TUNER_ABSENT, "LG TAPC-W701D"},
- { TUNER_ABSENT, "TCL 9886P-WM"},
- { TUNER_ABSENT, "TCL 1676NM-WM"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"},
+ { TUNER_ABSENT, "TCL MQNM05-4"},
+ { TUNER_ABSENT, "LG TAPC-W701D"},
+ { TUNER_ABSENT, "TCL 9886P-WM"},
+ { TUNER_ABSENT, "TCL 1676NM-WM"},
/* 110-119 */
- { TUNER_ABSENT, "Thompson DTT75105"},
- { TUNER_ABSENT, "Conexant_CX24109"},
- { TUNER_TCL_2002N, "TCL M2523_5N_E"},
- { TUNER_TCL_2002MB, "TCL M2523_3DB_E"},
- { TUNER_ABSENT, "Philips 8275A"},
- { TUNER_ABSENT, "Microtune MT2060"},
- { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK5"},
- { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216ME MK5"},
- { TUNER_ABSENT, "TCL M2523_3DI_E"},
- { TUNER_ABSENT, "Samsung THPD5222FG30A"},
+ { TUNER_ABSENT, "Thompson DTT75105"},
+ { TUNER_ABSENT, "Conexant_CX24109"},
+ { TUNER_TCL_2002N, "TCL M2523_5N_E"},
+ { TUNER_TCL_2002MB, "TCL M2523_3DB_E"},
+ { TUNER_ABSENT, "Philips 8275A"},
+ { TUNER_ABSENT, "Microtune MT2060"},
+ { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK5"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216ME MK5"},
+ { TUNER_ABSENT, "TCL M2523_3DI_E"},
+ { TUNER_ABSENT, "Samsung THPD5222FG30A"},
/* 120-129 */
- { TUNER_XC2028, "Xceive XC3028"},
+ { TUNER_XC2028, "Xceive XC3028"},
{ TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK5"},
- { TUNER_ABSENT, "Philips FQD1216LME"},
- { TUNER_ABSENT, "Conexant CX24118A"},
- { TUNER_ABSENT, "TCL DMF11WIP"},
- { TUNER_ABSENT, "TCL MFNM05_4H_E"},
- { TUNER_ABSENT, "TCL MNM05_4H_E"},
- { TUNER_ABSENT, "TCL MPE05_2H_E"},
- { TUNER_ABSENT, "TCL MQNM05_4_U"},
- { TUNER_ABSENT, "TCL M2523_5NH_E"},
+ { TUNER_ABSENT, "Philips FQD1216LME"},
+ { TUNER_ABSENT, "Conexant CX24118A"},
+ { TUNER_ABSENT, "TCL DMF11WIP"},
+ { TUNER_ABSENT, "TCL MFNM05_4H_E"},
+ { TUNER_ABSENT, "TCL MNM05_4H_E"},
+ { TUNER_ABSENT, "TCL MPE05_2H_E"},
+ { TUNER_ABSENT, "TCL MQNM05_4_U"},
+ { TUNER_ABSENT, "TCL M2523_5NH_E"},
/* 130-139 */
- { TUNER_ABSENT, "TCL M2523_3DBH_E"},
- { TUNER_ABSENT, "TCL M2523_3DIH_E"},
- { TUNER_ABSENT, "TCL MFPE05_2_U"},
+ { TUNER_ABSENT, "TCL M2523_3DBH_E"},
+ { TUNER_ABSENT, "TCL M2523_3DIH_E"},
+ { TUNER_ABSENT, "TCL MFPE05_2_U"},
{ TUNER_PHILIPS_FMD1216MEX_MK3, "Philips FMD1216MEX"},
- { TUNER_ABSENT, "Philips FRH2036B"},
- { TUNER_ABSENT, "Panasonic ENGF75_01GF"},
- { TUNER_ABSENT, "MaxLinear MXL5005"},
- { TUNER_ABSENT, "MaxLinear MXL5003"},
- { TUNER_ABSENT, "Xceive XC2028"},
- { TUNER_ABSENT, "Microtune MT2131"},
+ { TUNER_ABSENT, "Philips FRH2036B"},
+ { TUNER_ABSENT, "Panasonic ENGF75_01GF"},
+ { TUNER_ABSENT, "MaxLinear MXL5005"},
+ { TUNER_ABSENT, "MaxLinear MXL5003"},
+ { TUNER_ABSENT, "Xceive XC2028"},
+ { TUNER_ABSENT, "Microtune MT2131"},
/* 140-149 */
- { TUNER_ABSENT, "Philips 8275A_8295"},
- { TUNER_ABSENT, "TCL MF02GIP_5N_E"},
- { TUNER_ABSENT, "TCL MF02GIP_3DB_E"},
- { TUNER_ABSENT, "TCL MF02GIP_3DI_E"},
- { TUNER_ABSENT, "Microtune MT2266"},
- { TUNER_ABSENT, "TCL MF10WPP_4N_E"},
- { TUNER_ABSENT, "LG TAPQ_H702F"},
- { TUNER_ABSENT, "TCL M09WPP_4N_E"},
- { TUNER_ABSENT, "MaxLinear MXL5005_v2"},
- { TUNER_PHILIPS_TDA8290, "Philips 18271_8295"},
+ { TUNER_ABSENT, "Philips 8275A_8295"},
+ { TUNER_ABSENT, "TCL MF02GIP_5N_E"},
+ { TUNER_ABSENT, "TCL MF02GIP_3DB_E"},
+ { TUNER_ABSENT, "TCL MF02GIP_3DI_E"},
+ { TUNER_ABSENT, "Microtune MT2266"},
+ { TUNER_ABSENT, "TCL MF10WPP_4N_E"},
+ { TUNER_ABSENT, "LG TAPQ_H702F"},
+ { TUNER_ABSENT, "TCL M09WPP_4N_E"},
+ { TUNER_ABSENT, "MaxLinear MXL5005_v2"},
+ { TUNER_PHILIPS_TDA8290, "Philips 18271_8295"},
/* 150-159 */
{ TUNER_XC5000, "Xceive XC5000"},
{ TUNER_ABSENT, "Xceive XC3028L"},
@@ -784,9 +784,3 @@ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len)
return 0;
}
EXPORT_SYMBOL(tveeprom_read);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 388c2eb4d747..399e1042d351 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -172,6 +172,7 @@
#define USB_PID_TWINHAN_VP7045_WARM 0x3206
#define USB_PID_TWINHAN_VP7021_COLD 0x3207
#define USB_PID_TWINHAN_VP7021_WARM 0x3208
+#define USB_PID_TWINHAN_VP7049 0x3219
#define USB_PID_TINYTWIN 0x3226
#define USB_PID_TINYTWIN_2 0xe402
#define USB_PID_TINYTWIN_3 0x9016
@@ -233,10 +234,15 @@
#define USB_PID_AVERMEDIA_A815M 0x815a
#define USB_PID_AVERMEDIA_A835 0xa835
#define USB_PID_AVERMEDIA_B835 0xb835
+#define USB_PID_AVERMEDIA_A835B_1835 0x1835
+#define USB_PID_AVERMEDIA_A835B_2835 0x2835
+#define USB_PID_AVERMEDIA_A835B_3835 0x3835
+#define USB_PID_AVERMEDIA_A835B_4835 0x4835
#define USB_PID_AVERMEDIA_1867 0x1867
#define USB_PID_AVERMEDIA_A867 0xa867
#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
+#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 9be65a3b931f..0aac3096728e 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -156,6 +156,9 @@ struct dvb_ca_private {
/* Slot to start looking for data to read from in the next user-space read operation */
int next_read_slot;
+
+ /* mutex serializing ioctls */
+ struct mutex ioctl_mutex;
};
static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
@@ -1191,6 +1194,9 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
dprintk("%s\n", __func__);
+ if (mutex_lock_interruptible(&ca->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case CA_RESET:
for (slot = 0; slot < ca->slot_count; slot++) {
@@ -1221,8 +1227,10 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
case CA_GET_SLOT_INFO: {
struct ca_slot_info *info = parg;
- if ((info->num > ca->slot_count) || (info->num < 0))
- return -EINVAL;
+ if ((info->num > ca->slot_count) || (info->num < 0)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
info->type = CA_CI_LINK;
info->flags = 0;
@@ -1241,6 +1249,8 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
break;
}
+out_unlock:
+ mutex_unlock(&ca->ioctl_mutex);
return err;
}
@@ -1695,6 +1705,8 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
mutex_init(&ca->slot_info[i].slot_lock);
}
+ mutex_init(&ca->ioctl_mutex);
+
if (signal_pending(current)) {
ret = -EINTR;
goto error;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 0223ad255cb4..6e50a7581568 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -603,6 +603,7 @@ static int dvb_frontend_thread(void *data)
enum dvbfe_algo algo;
bool re_tune = false;
+ bool semheld = false;
dev_dbg(fe->dvb->device, "%s:\n", __func__);
@@ -626,6 +627,8 @@ restart:
if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) {
/* got signal or quitting */
+ if (!down_interruptible(&fepriv->sem))
+ semheld = true;
fepriv->exit = DVB_FE_NORMAL_EXIT;
break;
}
@@ -741,6 +744,8 @@ restart:
fepriv->exit = DVB_FE_NO_EXIT;
mb();
+ if (semheld)
+ up(&fepriv->sem);
dvb_frontend_wakeup(fe);
return 0;
}
@@ -1048,6 +1053,16 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
+
+ /* Statistics API */
+ _DTV_CMD(DTV_STAT_SIGNAL_STRENGTH, 0, 0),
+ _DTV_CMD(DTV_STAT_CNR, 0, 0),
+ _DTV_CMD(DTV_STAT_PRE_ERROR_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_PRE_TOTAL_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_POST_ERROR_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_POST_TOTAL_BIT_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_ERROR_BLOCK_COUNT, 0, 0),
+ _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
};
static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
@@ -1438,7 +1453,35 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
tvp->u.data = c->lna;
break;
+ /* Fill quality measures */
+ case DTV_STAT_SIGNAL_STRENGTH:
+ tvp->u.st = c->strength;
+ break;
+ case DTV_STAT_CNR:
+ tvp->u.st = c->cnr;
+ break;
+ case DTV_STAT_PRE_ERROR_BIT_COUNT:
+ tvp->u.st = c->pre_bit_error;
+ break;
+ case DTV_STAT_PRE_TOTAL_BIT_COUNT:
+ tvp->u.st = c->pre_bit_count;
+ break;
+ case DTV_STAT_POST_ERROR_BIT_COUNT:
+ tvp->u.st = c->post_bit_error;
+ break;
+ case DTV_STAT_POST_TOTAL_BIT_COUNT:
+ tvp->u.st = c->post_bit_count;
+ break;
+ case DTV_STAT_ERROR_BLOCK_COUNT:
+ tvp->u.st = c->block_error;
+ break;
+ case DTV_STAT_TOTAL_BLOCK_COUNT:
+ tvp->u.st = c->block_count;
+ break;
default:
+ dev_dbg(fe->dvb->device,
+ "%s: FE property %d doesn't exist\n",
+ __func__, tvp->cmd);
return -EINVAL;
}
@@ -1823,16 +1866,20 @@ static int dvb_frontend_ioctl(struct file *file,
int err = -EOPNOTSUPP;
dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
- if (fepriv->exit != DVB_FE_NO_EXIT)
+ if (down_interruptible(&fepriv->sem))
+ return -ERESTARTSYS;
+
+ if (fepriv->exit != DVB_FE_NO_EXIT) {
+ up(&fepriv->sem);
return -ENODEV;
+ }
if ((file->f_flags & O_ACCMODE) == O_RDONLY &&
(_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT ||
- cmd == FE_DISEQC_RECV_SLAVE_REPLY))
+ cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
+ up(&fepriv->sem);
return -EPERM;
-
- if (down_interruptible (&fepriv->sem))
- return -ERESTARTSYS;
+ }
if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
err = dvb_frontend_ioctl_properties(file, cmd, parg);
@@ -2246,7 +2293,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
printk("%s switch command: 0x%04lx\n", __func__, swcmd);
do_gettimeofday(&nexttime);
if (dvb_frontend_debug)
- memcpy(&tv[0], &nexttime, sizeof(struct timeval));
+ tv[0] = nexttime;
/* before sending a command, initialize by sending
* a 32ms 18V to the switch
*/
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 97112cd88a17..b34922a08156 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -393,6 +393,16 @@ struct dtv_frontend_properties {
u8 atscmh_sccc_code_mode_d;
u32 lna;
+
+ /* statistics data */
+ struct dtv_fe_stats strength;
+ struct dtv_fe_stats cnr;
+ struct dtv_fe_stats pre_bit_error;
+ struct dtv_fe_stats pre_bit_count;
+ struct dtv_fe_stats post_bit_error;
+ struct dtv_fe_stats post_bit_count;
+ struct dtv_fe_stats block_error;
+ struct dtv_fe_stats block_count;
};
struct dvb_frontend {
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index c2117688aa23..44225b186f6d 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1345,26 +1345,35 @@ static int dvb_net_do_ioctl(struct file *file,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_net *dvbnet = dvbdev->priv;
+ int ret = 0;
if (((file->f_flags&O_ACCMODE)==O_RDONLY))
return -EPERM;
+ if (mutex_lock_interruptible(&dvbnet->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case NET_ADD_IF:
{
struct dvb_net_if *dvbnetif = parg;
int result;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
- if (!try_module_get(dvbdev->adapter->module))
- return -EPERM;
+ if (!try_module_get(dvbdev->adapter->module)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
result=dvb_net_add_if(dvbnet, dvbnetif->pid, dvbnetif->feedtype);
if (result<0) {
module_put(dvbdev->adapter->module);
- return result;
+ ret = result;
+ goto ioctl_error;
}
dvbnetif->if_num=result;
break;
@@ -1376,8 +1385,10 @@ static int dvb_net_do_ioctl(struct file *file,
struct dvb_net_if *dvbnetif = parg;
if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
- !dvbnet->state[dvbnetif->if_num])
- return -EINVAL;
+ !dvbnet->state[dvbnetif->if_num]) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
netdev = dvbnet->device[dvbnetif->if_num];
@@ -1388,16 +1399,18 @@ static int dvb_net_do_ioctl(struct file *file,
}
case NET_REMOVE_IF:
{
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if ((unsigned long) parg >= DVB_NET_DEVICES_MAX)
- return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
+ if ((unsigned long) parg >= DVB_NET_DEVICES_MAX) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
ret = dvb_net_remove_if(dvbnet, (unsigned long) parg);
if (!ret)
module_put(dvbdev->adapter->module);
- return ret;
+ break;
}
/* binary compatibility cruft */
@@ -1406,16 +1419,21 @@ static int dvb_net_do_ioctl(struct file *file,
struct __dvb_net_if_old *dvbnetif = parg;
int result;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
- if (!try_module_get(dvbdev->adapter->module))
- return -EPERM;
+ if (!try_module_get(dvbdev->adapter->module)) {
+ ret = -EPERM;
+ goto ioctl_error;
+ }
result=dvb_net_add_if(dvbnet, dvbnetif->pid, DVB_NET_FEEDTYPE_MPE);
if (result<0) {
module_put(dvbdev->adapter->module);
- return result;
+ ret = result;
+ goto ioctl_error;
}
dvbnetif->if_num=result;
break;
@@ -1427,8 +1445,10 @@ static int dvb_net_do_ioctl(struct file *file,
struct __dvb_net_if_old *dvbnetif = parg;
if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
- !dvbnet->state[dvbnetif->if_num])
- return -EINVAL;
+ !dvbnet->state[dvbnetif->if_num]) {
+ ret = -EINVAL;
+ goto ioctl_error;
+ }
netdev = dvbnet->device[dvbnetif->if_num];
@@ -1437,9 +1457,13 @@ static int dvb_net_do_ioctl(struct file *file,
break;
}
default:
- return -ENOTTY;
+ ret = -ENOTTY;
+ break;
}
- return 0;
+
+ioctl_error:
+ mutex_unlock(&dvbnet->ioctl_mutex);
+ return ret;
}
static long dvb_net_ioctl(struct file *file,
@@ -1505,6 +1529,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
{
int i;
+ mutex_init(&dvbnet->ioctl_mutex);
dvbnet->demux = dmx;
for (i=0; i<DVB_NET_DEVICES_MAX; i++)
diff --git a/drivers/media/dvb-core/dvb_net.h b/drivers/media/dvb-core/dvb_net.h
index 1e53acd50cf4..ede78e8c8aa8 100644
--- a/drivers/media/dvb-core/dvb_net.h
+++ b/drivers/media/dvb-core/dvb_net.h
@@ -40,6 +40,7 @@ struct dvb_net {
int state[DVB_NET_DEVICES_MAX];
unsigned int exit:1;
struct dmx_demux *demux;
+ struct mutex ioctl_mutex;
};
void dvb_net_release(struct dvb_net *);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index d33101aaf0b5..401ef64f92c6 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -418,10 +418,8 @@ int dvb_usercopy(struct file *file,
}
/* call driver */
- mutex_lock(&dvbdev_mutex);
if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
err = -ENOTTY;
- mutex_unlock(&dvbdev_mutex);
if (err < 0)
goto out;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 5efec73a32d2..6f809a70c78e 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -207,6 +207,13 @@ config DVB_SI21XX
help
A DVB-S tuner module. Say Y when you want to support this frontend.
+config DVB_TS2020
+ tristate "Montage Tehnology TS2020 based tuners"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-S/S2 silicon tuner. Say Y when you want to support this tuner.
+
config DVB_DS3000
tristate "Montage Tehnology DS3000 based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 7eb73bbd2e26..cebc0faffab5 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
+obj-$(CONFIG_DVB_TS2020) += ts2020.o
obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 464ad878490b..c9cad989b8b9 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -318,6 +318,10 @@ static int af9033_init(struct dvb_frontend *fe)
len = ARRAY_SIZE(tuner_init_fc2580);
init = tuner_init_fc2580;
break;
+ case AF9033_TUNER_FC0012:
+ len = ARRAY_SIZE(tuner_init_fc0012);
+ init = tuner_init_fc0012;
+ break;
default:
dev_dbg(&state->i2c->dev, "%s: unsupported tuner ID=%d\n",
__func__, state->cfg.tuner);
@@ -331,6 +335,20 @@ static int af9033_init(struct dvb_frontend *fe)
goto err;
}
+ if (state->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
+ ret = af9033_wr_reg_mask(state, 0x00d91c, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x00d917, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x00d916, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
state->bandwidth_hz = 0; /* force to program all parameters */
return 0;
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
index bfa4313fde21..82bd8c1513b6 100644
--- a/drivers/media/dvb-frontends/af9033.h
+++ b/drivers/media/dvb-frontends/af9033.h
@@ -40,6 +40,7 @@ struct af9033_config {
*/
#define AF9033_TUNER_TUA9001 0x27 /* Infineon TUA 9001 */
#define AF9033_TUNER_FC0011 0x28 /* Fitipower FC0011 */
+#define AF9033_TUNER_FC0012 0x2e /* Fitipower FC0012 */
#define AF9033_TUNER_MXL5007T 0xa0 /* MaxLinear MxL5007T */
#define AF9033_TUNER_TDA18218 0xa1 /* NXP TDA 18218HN */
#define AF9033_TUNER_FC2580 0x32 /* FCI FC2580 */
diff --git a/drivers/media/dvb-frontends/af9033_priv.h b/drivers/media/dvb-frontends/af9033_priv.h
index 34dddcd77538..e9bd78265543 100644
--- a/drivers/media/dvb-frontends/af9033_priv.h
+++ b/drivers/media/dvb-frontends/af9033_priv.h
@@ -199,10 +199,9 @@ static const struct reg_val ofsm_init[] = {
{ 0x8000a6, 0x01 },
{ 0x8000a9, 0x00 },
{ 0x8000aa, 0x01 },
- { 0x8000ab, 0x01 },
{ 0x8000b0, 0x01 },
- { 0x8000c0, 0x05 },
- { 0x8000c4, 0x19 },
+ { 0x8000c4, 0x05 },
+ { 0x8000c8, 0x19 },
{ 0x80f000, 0x0f },
{ 0x80f016, 0x10 },
{ 0x80f017, 0x04 },
@@ -322,8 +321,9 @@ static const struct reg_val tuner_init_tua9001[] = {
{ 0x80009b, 0x05 },
{ 0x80009c, 0x80 },
{ 0x8000b3, 0x00 },
- { 0x8000c1, 0x01 },
- { 0x8000c2, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x00 },
+ { 0x8000c9, 0x5d },
{ 0x80f007, 0x00 },
{ 0x80f01f, 0x82 },
{ 0x80f020, 0x00 },
@@ -339,14 +339,14 @@ static const struct reg_val tuner_init_tua9001[] = {
/* Fitipower fc0011 tuner init
AF9033_TUNER_FC0011 = 0x28 */
static const struct reg_val tuner_init_fc0011[] = {
- { 0x800046, AF9033_TUNER_FC0011 },
+ { 0x800046, 0x28 },
{ 0x800057, 0x00 },
{ 0x800058, 0x01 },
{ 0x80005f, 0x00 },
{ 0x800060, 0x00 },
{ 0x800068, 0xa5 },
{ 0x80006e, 0x01 },
- { 0x800071, 0x0A },
+ { 0x800071, 0x0a },
{ 0x800072, 0x02 },
{ 0x800074, 0x01 },
{ 0x800079, 0x01 },
@@ -354,7 +354,7 @@ static const struct reg_val tuner_init_fc0011[] = {
{ 0x800094, 0x00 },
{ 0x800095, 0x00 },
{ 0x800096, 0x00 },
- { 0x80009b, 0x2D },
+ { 0x80009b, 0x2d },
{ 0x80009c, 0x60 },
{ 0x80009d, 0x23 },
{ 0x8000a4, 0x50 },
@@ -362,39 +362,82 @@ static const struct reg_val tuner_init_fc0011[] = {
{ 0x8000b3, 0x01 },
{ 0x8000b7, 0x88 },
{ 0x8000b8, 0xa6 },
- { 0x8000c3, 0x01 },
- { 0x8000c4, 0x01 },
- { 0x8000c7, 0x69 },
- { 0x80F007, 0x00 },
- { 0x80F00A, 0x1B },
- { 0x80F00B, 0x1B },
- { 0x80F00C, 0x1B },
- { 0x80F00D, 0x1B },
- { 0x80F00E, 0xFF },
- { 0x80F00F, 0x01 },
- { 0x80F010, 0x00 },
- { 0x80F011, 0x02 },
- { 0x80F012, 0xFF },
- { 0x80F013, 0x01 },
- { 0x80F014, 0x00 },
- { 0x80F015, 0x02 },
- { 0x80F01B, 0xEF },
- { 0x80F01C, 0x01 },
- { 0x80F01D, 0x0f },
- { 0x80F01E, 0x02 },
- { 0x80F01F, 0x6E },
- { 0x80F020, 0x00 },
- { 0x80F025, 0xDE },
- { 0x80F026, 0x00 },
- { 0x80F027, 0x0A },
- { 0x80F028, 0x03 },
- { 0x80F029, 0x6E },
- { 0x80F02A, 0x00 },
- { 0x80F047, 0x00 },
- { 0x80F054, 0x00 },
- { 0x80F055, 0x00 },
- { 0x80F077, 0x01 },
- { 0x80F1E6, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x01 },
+ { 0x8000c9, 0x69 },
+ { 0x80f007, 0x00 },
+ { 0x80f00a, 0x1b },
+ { 0x80f00b, 0x1b },
+ { 0x80f00c, 0x1b },
+ { 0x80f00d, 0x1b },
+ { 0x80f00e, 0xff },
+ { 0x80f00f, 0x01 },
+ { 0x80f010, 0x00 },
+ { 0x80f011, 0x02 },
+ { 0x80f012, 0xff },
+ { 0x80f013, 0x01 },
+ { 0x80f014, 0x00 },
+ { 0x80f015, 0x02 },
+ { 0x80f01b, 0xef },
+ { 0x80f01c, 0x01 },
+ { 0x80f01d, 0x0f },
+ { 0x80f01e, 0x02 },
+ { 0x80f01f, 0x6e },
+ { 0x80f020, 0x00 },
+ { 0x80f025, 0xde },
+ { 0x80f026, 0x00 },
+ { 0x80f027, 0x0a },
+ { 0x80f028, 0x03 },
+ { 0x80f029, 0x6e },
+ { 0x80f02a, 0x00 },
+ { 0x80f047, 0x00 },
+ { 0x80f054, 0x00 },
+ { 0x80f055, 0x00 },
+ { 0x80f077, 0x01 },
+ { 0x80f1e6, 0x00 },
+};
+
+/* Fitipower FC0012 tuner init
+ AF9033_TUNER_FC0012 = 0x2e */
+static const struct reg_val tuner_init_fc0012[] = {
+ { 0x800046, 0x2e },
+ { 0x800057, 0x00 },
+ { 0x800058, 0x01 },
+ { 0x800059, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x80006d, 0x00 },
+ { 0x800071, 0x05 },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800075, 0x03 },
+ { 0x800076, 0x02 },
+ { 0x800077, 0x01 },
+ { 0x800078, 0x00 },
+ { 0x800079, 0x00 },
+ { 0x80007a, 0x90 },
+ { 0x80007b, 0x90 },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x01 },
+ { 0x800095, 0x02 },
+ { 0x800096, 0x01 },
+ { 0x800098, 0x0a },
+ { 0x80009b, 0x05 },
+ { 0x80009c, 0x80 },
+ { 0x8000b3, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x00 },
+ { 0x8000c9, 0x5d },
+ { 0x80f007, 0x00 },
+ { 0x80f01f, 0xa0 },
+ { 0x80f020, 0x00 },
+ { 0x80f029, 0x82 },
+ { 0x80f02a, 0x00 },
+ { 0x80f047, 0x00 },
+ { 0x80f054, 0x00 },
+ { 0x80f055, 0x00 },
+ { 0x80f077, 0x01 },
+ { 0x80f1e6, 0x00 },
};
/* MaxLinear MxL5007T tuner init
@@ -482,11 +525,12 @@ static const struct reg_val tuner_init_fc2580[] = {
{ 0x800095, 0x00 },
{ 0x800096, 0x05 },
{ 0x8000b3, 0x01 },
- { 0x8000c3, 0x01 },
- { 0x8000c4, 0x00 },
+ { 0x8000c5, 0x01 },
+ { 0x8000c6, 0x00 },
+ { 0x8000d1, 0x01 },
{ 0x80f007, 0x00 },
{ 0x80f00c, 0x19 },
- { 0x80f00d, 0x1A },
+ { 0x80f00d, 0x1a },
{ 0x80f00e, 0x00 },
{ 0x80f00f, 0x02 },
{ 0x80f010, 0x00 },
diff --git a/drivers/media/dvb-frontends/bcm3510.h b/drivers/media/dvb-frontends/bcm3510.h
index f4575c0cc446..5bd56b1623bf 100644
--- a/drivers/media/dvb-frontends/bcm3510.h
+++ b/drivers/media/dvb-frontends/bcm3510.h
@@ -34,7 +34,7 @@ struct bcm3510_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if defined(CONFIG_DVB_BCM3510) || (defined(CONFIG_DVB_BCM3510_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_BCM3510)
extern struct dvb_frontend* bcm3510_attach(const struct bcm3510_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx22700.h b/drivers/media/dvb-frontends/cx22700.h
index 4757a930ca05..382a7b1f3618 100644
--- a/drivers/media/dvb-frontends/cx22700.h
+++ b/drivers/media/dvb-frontends/cx22700.h
@@ -31,7 +31,7 @@ struct cx22700_config
u8 demod_address;
};
-#if defined(CONFIG_DVB_CX22700) || (defined(CONFIG_DVB_CX22700_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_CX22700)
extern struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx24110.h b/drivers/media/dvb-frontends/cx24110.h
index fdcceee91f3a..527aff1f2723 100644
--- a/drivers/media/dvb-frontends/cx24110.h
+++ b/drivers/media/dvb-frontends/cx24110.h
@@ -46,7 +46,7 @@ static inline int cx24110_pll_write(struct dvb_frontend *fe, u32 val)
return 0;
}
-#if defined(CONFIG_DVB_CX24110) || (defined(CONFIG_DVB_CX24110_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_CX24110)
extern struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index b48879186537..2916d7c74a1d 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -819,7 +819,7 @@ static int cx24116_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static void cx24116_clone_params(struct dvb_frontend *fe)
{
struct cx24116_state *state = fe->demodulator_priv;
- memcpy(&state->dcur, &state->dnxt, sizeof(state->dcur));
+ state->dcur = state->dnxt;
}
/* Wait for LNB */
diff --git a/drivers/media/dvb-frontends/dib0070.h b/drivers/media/dvb-frontends/dib0070.h
index 45c31fae3967..0c6befcc9143 100644
--- a/drivers/media/dvb-frontends/dib0070.h
+++ b/drivers/media/dvb-frontends/dib0070.h
@@ -48,7 +48,7 @@ struct dib0070_config {
u8 vga_filter;
};
-#if defined(CONFIG_DVB_TUNER_DIB0070) || (defined(CONFIG_DVB_TUNER_DIB0070_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUNER_DIB0070)
extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg);
extern u16 dib0070_wbd_offset(struct dvb_frontend *);
extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open);
diff --git a/drivers/media/dvb-frontends/dib0090.h b/drivers/media/dvb-frontends/dib0090.h
index 781dc49de45b..6a090954fa10 100644
--- a/drivers/media/dvb-frontends/dib0090.h
+++ b/drivers/media/dvb-frontends/dib0090.h
@@ -75,7 +75,7 @@ struct dib0090_config {
u8 force_crystal_mode;
};
-#if defined(CONFIG_DVB_TUNER_DIB0090) || (defined(CONFIG_DVB_TUNER_DIB0090_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUNER_DIB0090)
extern struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast);
diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
index 404f63a6f26b..9b6c3bbc983a 100644
--- a/drivers/media/dvb-frontends/dib3000.h
+++ b/drivers/media/dvb-frontends/dib3000.h
@@ -41,7 +41,7 @@ struct dib_fe_xfer_ops
int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
};
-#if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_DIB3000MB)
extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
#else
diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
index 39591bb172c1..9e7a2b170d55 100644
--- a/drivers/media/dvb-frontends/dib8000.h
+++ b/drivers/media/dvb-frontends/dib8000.h
@@ -37,7 +37,7 @@ struct dib8000_config {
#define DEFAULT_DIB8000_I2C_ADDRESS 18
-#if defined(CONFIG_DVB_DIB8000) || (defined(CONFIG_DVB_DIB8000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_DIB8000)
extern struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
extern struct i2c_adapter *dib8000_get_i2c_master(struct dvb_frontend *, enum dibx000_i2c_interface, int);
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index de1cc91fd833..f3639f045ff0 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -27,7 +27,7 @@ struct dib9000_config {
#define DEFAULT_DIB9000_I2C_ADDRESS 18
-#if defined(CONFIG_DVB_DIB9000) || (defined(CONFIG_DVB_DIB9000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_DIB9000)
extern struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg);
extern int dib9000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr);
extern struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index e71cc60851e7..9a2134792cfa 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2965,7 +2965,7 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config,
return NULL;
memset(state, 0, sizeof(*state));
- memcpy(&state->ops, &drxd_ops, sizeof(struct dvb_frontend_ops));
+ state->ops = drxd_ops;
state->dev = dev;
state->config = *config;
state->i2c = i2c;
@@ -2976,10 +2976,13 @@ struct dvb_frontend *drxd_attach(const struct drxd_config *config,
if (Read16(state, 0, 0, 0) < 0)
goto error;
- memcpy(&state->frontend.ops, &drxd_ops,
- sizeof(struct dvb_frontend_ops));
+ state->frontend.ops = drxd_ops;
state->frontend.demodulator_priv = state;
ConfigureMPEGOutput(state, 0);
+ /* add few initialization to allow gate control */
+ CDRXD(state, state->config.IF ? state->config.IF : 36000000);
+ InitHI(state);
+
return &state->frontend;
error:
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 60a529e3833f..1e344b033277 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -1,8 +1,8 @@
/*
- Montage Technology DS3000/TS2020 - DVBS/S2 Demodulator/Tuner driver
- Copyright (C) 2009 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+ Montage Technology DS3000 - DVBS/S2 Demodulator driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
- Copyright (C) 2009 TurboSight.com
+ Copyright (C) 2009-2012 TurboSight.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include "dvb_frontend.h"
+#include "ts2020.h"
#include "ds3000.h"
static int debug;
@@ -42,7 +43,6 @@ static int debug;
#define DS3000_DEFAULT_FIRMWARE "dvb-fe-ds3000.fw"
#define DS3000_SAMPLE_RATE 96000 /* in kHz */
-#define DS3000_XTAL_FREQ 27000 /* in kHz */
/* Register values to initialise the demod in DVB-S mode */
static u8 ds3000_dvbs_init_tab[] = {
@@ -256,22 +256,14 @@ static int ds3000_writereg(struct ds3000_state *state, int reg, int data)
return 0;
}
-static int ds3000_tuner_writereg(struct ds3000_state *state, int reg, int data)
+static int ds3000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
- u8 buf[] = { reg, data };
- struct i2c_msg msg = { .addr = 0x60,
- .flags = 0, .buf = buf, .len = 2 };
- int err;
-
- dprintk("%s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data);
+ struct ds3000_state *state = fe->demodulator_priv;
- ds3000_writereg(state, 0x03, 0x11);
- err = i2c_transfer(state->i2c, &msg, 1);
- if (err != 1) {
- printk("%s: writereg error(err == %i, reg == 0x%02x,"
- " value == 0x%02x)\n", __func__, err, reg, data);
- return -EREMOTEIO;
- }
+ if (enable)
+ ds3000_writereg(state, 0x03, 0x12);
+ else
+ ds3000_writereg(state, 0x03, 0x02);
return 0;
}
@@ -280,15 +272,14 @@ static int ds3000_tuner_writereg(struct ds3000_state *state, int reg, int data)
static int ds3000_writeFW(struct ds3000_state *state, int reg,
const u8 *data, u16 len)
{
- int i, ret = -EREMOTEIO;
+ int i, ret = 0;
struct i2c_msg msg;
u8 *buf;
buf = kmalloc(33, GFP_KERNEL);
if (buf == NULL) {
printk(KERN_ERR "Unable to kmalloc\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
*(buf) = reg;
@@ -308,8 +299,10 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
printk(KERN_ERR "%s: write error(err == %i, "
"reg == 0x%02x\n", __func__, ret, reg);
ret = -EREMOTEIO;
+ goto error;
}
}
+ ret = 0;
error:
kfree(buf);
@@ -348,38 +341,6 @@ static int ds3000_readreg(struct ds3000_state *state, u8 reg)
return b1[0];
}
-static int ds3000_tuner_readreg(struct ds3000_state *state, u8 reg)
-{
- int ret;
- u8 b0[] = { reg };
- u8 b1[] = { 0 };
- struct i2c_msg msg[] = {
- {
- .addr = 0x60,
- .flags = 0,
- .buf = b0,
- .len = 1
- }, {
- .addr = 0x60,
- .flags = I2C_M_RD,
- .buf = b1,
- .len = 1
- }
- };
-
- ds3000_writereg(state, 0x03, 0x12);
- ret = i2c_transfer(state->i2c, msg, 2);
-
- if (ret != 2) {
- printk(KERN_ERR "%s: reg=0x%x(error=%d)\n", __func__, reg, ret);
- return ret;
- }
-
- dprintk("%s: read reg 0x%02x, value 0x%02x\n", __func__, reg, b1[0]);
-
- return b1[0];
-}
-
static int ds3000_load_firmware(struct dvb_frontend *fe,
const struct firmware *fw);
@@ -424,6 +385,7 @@ static int ds3000_load_firmware(struct dvb_frontend *fe,
const struct firmware *fw)
{
struct ds3000_state *state = fe->demodulator_priv;
+ int ret = 0;
dprintk("%s\n", __func__);
dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n",
@@ -436,10 +398,10 @@ static int ds3000_load_firmware(struct dvb_frontend *fe,
/* Begin the firmware load process */
ds3000_writereg(state, 0xb2, 0x01);
/* write the entire firmware */
- ds3000_writeFW(state, 0xb0, fw->data, fw->size);
+ ret = ds3000_writeFW(state, 0xb0, fw->data, fw->size);
ds3000_writereg(state, 0xb2, 0x00);
- return 0;
+ return ret;
}
static int ds3000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
@@ -498,6 +460,9 @@ static int ds3000_read_status(struct dvb_frontend *fe, fe_status_t* status)
return 1;
}
+ if (state->config->set_lock_led)
+ state->config->set_lock_led(fe, *status == 0 ? 0 : 1);
+
dprintk("%s: status = 0x%02x\n", __func__, lock);
return 0;
@@ -568,33 +533,11 @@ static int ds3000_read_ber(struct dvb_frontend *fe, u32* ber)
return 0;
}
-/* read TS2020 signal strength */
static int ds3000_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
- struct ds3000_state *state = fe->demodulator_priv;
- u16 sig_reading, sig_strength;
- u8 rfgain, bbgain;
-
- dprintk("%s()\n", __func__);
-
- rfgain = ds3000_tuner_readreg(state, 0x3d) & 0x1f;
- bbgain = ds3000_tuner_readreg(state, 0x21) & 0x1f;
-
- if (rfgain > 15)
- rfgain = 15;
- if (bbgain > 13)
- bbgain = 13;
-
- sig_reading = rfgain * 2 + bbgain * 3;
-
- sig_strength = 40 + (64 - sig_reading) * 50 / 64 ;
-
- /* cook the value to be suitable for szap-s2 human readable output */
- *signal_strength = sig_strength * 1000;
-
- dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n", __func__,
- sig_reading, *signal_strength);
+ if (fe->ops.tuner_ops.get_rf_strength)
+ fe->ops.tuner_ops.get_rf_strength(fe, signal_strength);
return 0;
}
@@ -878,6 +821,10 @@ static int ds3000_diseqc_send_burst(struct dvb_frontend *fe,
static void ds3000_release(struct dvb_frontend *fe)
{
struct ds3000_state *state = fe->demodulator_priv;
+
+ if (state->config->set_lock_led)
+ state->config->set_lock_led(fe, 0);
+
dprintk("%s\n", __func__);
kfree(state);
}
@@ -952,133 +899,17 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
int i;
fe_status_t status;
- u8 mlpf, mlpf_new, mlpf_max, mlpf_min, nlpf, div4;
s32 offset_khz;
- u16 value, ndiv;
- u32 f3db;
+ u32 frequency;
+ u16 value;
dprintk("%s() ", __func__);
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
/* Tune */
- /* unknown */
- ds3000_tuner_writereg(state, 0x07, 0x02);
- ds3000_tuner_writereg(state, 0x10, 0x00);
- ds3000_tuner_writereg(state, 0x60, 0x79);
- ds3000_tuner_writereg(state, 0x08, 0x01);
- ds3000_tuner_writereg(state, 0x00, 0x01);
- div4 = 0;
-
- /* calculate and set freq divider */
- if (c->frequency < 1146000) {
- ds3000_tuner_writereg(state, 0x10, 0x11);
- div4 = 1;
- ndiv = ((c->frequency * (6 + 8) * 4) +
- (DS3000_XTAL_FREQ / 2)) /
- DS3000_XTAL_FREQ - 1024;
- } else {
- ds3000_tuner_writereg(state, 0x10, 0x01);
- ndiv = ((c->frequency * (6 + 8) * 2) +
- (DS3000_XTAL_FREQ / 2)) /
- DS3000_XTAL_FREQ - 1024;
- }
-
- ds3000_tuner_writereg(state, 0x01, (ndiv & 0x0f00) >> 8);
- ds3000_tuner_writereg(state, 0x02, ndiv & 0x00ff);
-
- /* set pll */
- ds3000_tuner_writereg(state, 0x03, 0x06);
- ds3000_tuner_writereg(state, 0x51, 0x0f);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x10);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- /* unknown */
- ds3000_tuner_writereg(state, 0x51, 0x17);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x08);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- value = ds3000_tuner_readreg(state, 0x3d);
- value &= 0x0f;
- if ((value > 4) && (value < 15)) {
- value -= 3;
- if (value < 4)
- value = 4;
- value = ((value << 3) | 0x01) & 0x79;
- }
-
- ds3000_tuner_writereg(state, 0x60, value);
- ds3000_tuner_writereg(state, 0x51, 0x17);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x08);
- ds3000_tuner_writereg(state, 0x50, 0x00);
-
- /* set low-pass filter period */
- ds3000_tuner_writereg(state, 0x04, 0x2e);
- ds3000_tuner_writereg(state, 0x51, 0x1b);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x04);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- f3db = ((c->symbol_rate / 1000) << 2) / 5 + 2000;
- if ((c->symbol_rate / 1000) < 5000)
- f3db += 3000;
- if (f3db < 7000)
- f3db = 7000;
- if (f3db > 40000)
- f3db = 40000;
-
- /* set low-pass filter baseband */
- value = ds3000_tuner_readreg(state, 0x26);
- mlpf = 0x2e * 207 / ((value << 1) + 151);
- mlpf_max = mlpf * 135 / 100;
- mlpf_min = mlpf * 78 / 100;
- if (mlpf_max > 63)
- mlpf_max = 63;
-
- /* rounded to the closest integer */
- nlpf = ((mlpf * f3db * 1000) + (2766 * DS3000_XTAL_FREQ / 2))
- / (2766 * DS3000_XTAL_FREQ);
- if (nlpf > 23)
- nlpf = 23;
- if (nlpf < 1)
- nlpf = 1;
-
- /* rounded to the closest integer */
- mlpf_new = ((DS3000_XTAL_FREQ * nlpf * 2766) +
- (1000 * f3db / 2)) / (1000 * f3db);
-
- if (mlpf_new < mlpf_min) {
- nlpf++;
- mlpf_new = ((DS3000_XTAL_FREQ * nlpf * 2766) +
- (1000 * f3db / 2)) / (1000 * f3db);
- }
-
- if (mlpf_new > mlpf_max)
- mlpf_new = mlpf_max;
-
- ds3000_tuner_writereg(state, 0x04, mlpf_new);
- ds3000_tuner_writereg(state, 0x06, nlpf);
- ds3000_tuner_writereg(state, 0x51, 0x1b);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x04);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(5);
-
- /* unknown */
- ds3000_tuner_writereg(state, 0x51, 0x1e);
- ds3000_tuner_writereg(state, 0x51, 0x1f);
- ds3000_tuner_writereg(state, 0x50, 0x01);
- ds3000_tuner_writereg(state, 0x50, 0x00);
- msleep(60);
-
- offset_khz = (ndiv - ndiv % 2 + 1024) * DS3000_XTAL_FREQ
- / (6 + 8) / (div4 + 1) / 2 - c->frequency;
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
/* ds3000 global reset */
ds3000_writereg(state, 0x07, 0x80);
@@ -1186,7 +1017,11 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
/* start ds3000 build-in uC */
ds3000_writereg(state, 0xb2, 0x00);
- ds3000_set_carrier_offset(fe, offset_khz);
+ if (fe->ops.tuner_ops.get_frequency) {
+ fe->ops.tuner_ops.get_frequency(fe, &frequency);
+ offset_khz = frequency - c->frequency;
+ ds3000_set_carrier_offset(fe, offset_khz);
+ }
for (i = 0; i < 30 ; i++) {
ds3000_read_status(fe, &status);
@@ -1218,6 +1053,11 @@ static int ds3000_tune(struct dvb_frontend *fe,
static enum dvbfe_algo ds3000_get_algo(struct dvb_frontend *fe)
{
+ struct ds3000_state *state = fe->demodulator_priv;
+
+ if (state->config->set_lock_led)
+ state->config->set_lock_led(fe, 0);
+
dprintk("%s()\n", __func__);
return DVBFE_ALGO_HW;
}
@@ -1237,10 +1077,6 @@ static int ds3000_initfe(struct dvb_frontend *fe)
ds3000_writereg(state, 0x08, 0x01 | ds3000_readreg(state, 0x08));
msleep(1);
- /* TS2020 init */
- ds3000_tuner_writereg(state, 0x42, 0x73);
- ds3000_tuner_writereg(state, 0x05, 0x01);
- ds3000_tuner_writereg(state, 0x62, 0xf5);
/* Load the firmware if required */
ret = ds3000_firmware_ondemand(fe);
if (ret != 0) {
@@ -1251,17 +1087,10 @@ static int ds3000_initfe(struct dvb_frontend *fe)
return 0;
}
-/* Put device to sleep */
-static int ds3000_sleep(struct dvb_frontend *fe)
-{
- dprintk("%s()\n", __func__);
- return 0;
-}
-
static struct dvb_frontend_ops ds3000_ops = {
- .delsys = { SYS_DVBS, SYS_DVBS2},
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
- .name = "Montage Technology DS3000/TS2020",
+ .name = "Montage Technology DS3000",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1011, /* kHz for QPSK frontends */
@@ -1279,7 +1108,7 @@ static struct dvb_frontend_ops ds3000_ops = {
.release = ds3000_release,
.init = ds3000_initfe,
- .sleep = ds3000_sleep,
+ .i2c_gate_ctrl = ds3000_i2c_gate_ctrl,
.read_status = ds3000_read_status,
.read_ber = ds3000_read_ber,
.read_signal_strength = ds3000_read_signal_strength,
@@ -1299,7 +1128,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
- "DS3000/TS2020 hardware");
-MODULE_AUTHOR("Konstantin Dimitrov");
+ "DS3000 hardware");
+MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/ds3000.h b/drivers/media/dvb-frontends/ds3000.h
index 1b736888ea37..478ad66c63d7 100644
--- a/drivers/media/dvb-frontends/ds3000.h
+++ b/drivers/media/dvb-frontends/ds3000.h
@@ -1,8 +1,8 @@
/*
- Montage Technology DS3000/TS2020 - DVBS/S2 Satellite demod/tuner driver
- Copyright (C) 2009 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+ Montage Technology DS3000 - DVBS/S2 Demodulator driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
- Copyright (C) 2009 TurboSight.com
+ Copyright (C) 2009-2012 TurboSight.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,7 +17,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ */
#ifndef DS3000_H
#define DS3000_H
@@ -30,6 +30,8 @@ struct ds3000_config {
u8 ci_mode;
/* Set device param to start dma */
int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
+ /* Hook for Lock LED */
+ void (*set_lock_led)(struct dvb_frontend *fe, int offon);
};
#if defined(CONFIG_DVB_DS3000) || \
diff --git a/drivers/media/dvb-frontends/dvb-pll.h b/drivers/media/dvb-frontends/dvb-pll.h
index 4de754f76ce9..f4b5a0601c3a 100644
--- a/drivers/media/dvb-frontends/dvb-pll.h
+++ b/drivers/media/dvb-frontends/dvb-pll.h
@@ -38,7 +38,7 @@
* @param pll_desc_id dvb_pll_desc to use.
* @return Frontend pointer on success, NULL on failure
*/
-#if defined(CONFIG_DVB_PLL) || (defined(CONFIG_DVB_PLL_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_PLL)
extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
int pll_addr,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/isl6405.h b/drivers/media/dvb-frontends/isl6405.h
index 1c793d37576b..8abb70c26fd9 100644
--- a/drivers/media/dvb-frontends/isl6405.h
+++ b/drivers/media/dvb-frontends/isl6405.h
@@ -55,7 +55,7 @@
#define ISL6405_ENT2 0x20
#define ISL6405_ISEL2 0x40
-#if defined(CONFIG_DVB_ISL6405) || (defined(CONFIG_DVB_ISL6405_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ISL6405)
/* override_set and override_clear control which system register bits (above)
* to always set & clear
*/
diff --git a/drivers/media/dvb-frontends/isl6421.h b/drivers/media/dvb-frontends/isl6421.h
index 47e4518a042d..e7ca7d12b50a 100644
--- a/drivers/media/dvb-frontends/isl6421.h
+++ b/drivers/media/dvb-frontends/isl6421.h
@@ -39,7 +39,7 @@
#define ISL6421_ISEL1 0x20
#define ISL6421_DCL 0x40
-#if defined(CONFIG_DVB_ISL6421) || (defined(CONFIG_DVB_ISL6421_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ISL6421)
/* override_set and override_clear control which system register bits (above) to always set & clear */
extern struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 i2c_addr,
u8 override_set, u8 override_clear);
diff --git a/drivers/media/dvb-frontends/isl6423.h b/drivers/media/dvb-frontends/isl6423.h
index e1a37fba01ca..80dfd9cc4f41 100644
--- a/drivers/media/dvb-frontends/isl6423.h
+++ b/drivers/media/dvb-frontends/isl6423.h
@@ -42,7 +42,7 @@ struct isl6423_config {
u8 mod_extern;
};
-#if defined(CONFIG_DVB_ISL6423) || (defined(CONFIG_DVB_ISL6423_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ISL6423)
extern struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/itd1000.h b/drivers/media/dvb-frontends/itd1000.h
index 5e18df071b88..edae0902f4fd 100644
--- a/drivers/media/dvb-frontends/itd1000.h
+++ b/drivers/media/dvb-frontends/itd1000.h
@@ -29,7 +29,7 @@ struct itd1000_config {
u8 i2c_address;
};
-#if defined(CONFIG_DVB_TUNER_ITD1000) || (defined(CONFIG_DVB_TUNER_ITD1000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUNER_ITD1000)
extern struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg);
#else
static inline struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg)
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index bc5a82082aaa..0e3387e00952 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -212,7 +212,7 @@ static int ix2505v_set_params(struct dvb_frontend *fe)
lpf = 0xb;
deb_info("Osc=%x b_w=%x lpf=%x\n", local_osc, b_w, lpf);
- deb_info("Data 0=[%x%x%x%x]\n", data[0], data[1], data[2], data[3]);
+ deb_info("Data 0=[%4phN]\n", data);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
diff --git a/drivers/media/dvb-frontends/l64781.h b/drivers/media/dvb-frontends/l64781.h
index 1305a9e7fb0b..6813b08a774d 100644
--- a/drivers/media/dvb-frontends/l64781.h
+++ b/drivers/media/dvb-frontends/l64781.h
@@ -31,7 +31,7 @@ struct l64781_config
u8 demod_address;
};
-#if defined(CONFIG_DVB_L64781) || (defined(CONFIG_DVB_L64781_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_L64781)
extern struct dvb_frontend* l64781_attach(const struct l64781_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/lgdt330x.h b/drivers/media/dvb-frontends/lgdt330x.h
index 9012504f0f2d..ca0eab562e1e 100644
--- a/drivers/media/dvb-frontends/lgdt330x.h
+++ b/drivers/media/dvb-frontends/lgdt330x.h
@@ -52,7 +52,7 @@ struct lgdt330x_config
int clock_polarity_flip;
};
-#if defined(CONFIG_DVB_LGDT330X) || (defined(CONFIG_DVB_LGDT330X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_LGDT330X)
extern struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 633815ed90ca..4da5272075cb 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -60,15 +60,13 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
#define info(format, arg...) \
printk(KERN_INFO "m88rs2000-fe: " format "\n" , ## arg)
-static int m88rs2000_writereg(struct m88rs2000_state *state, u8 tuner,
+static int m88rs2000_writereg(struct m88rs2000_state *state,
u8 reg, u8 data)
{
int ret;
- u8 addr = (tuner == 0) ? state->config->tuner_addr :
- state->config->demod_addr;
u8 buf[] = { reg, data };
struct i2c_msg msg = {
- .addr = addr,
+ .addr = state->config->demod_addr,
.flags = 0,
.buf = buf,
.len = 2
@@ -83,44 +81,20 @@ static int m88rs2000_writereg(struct m88rs2000_state *state, u8 tuner,
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int m88rs2000_demod_write(struct m88rs2000_state *state, u8 reg, u8 data)
-{
- return m88rs2000_writereg(state, 1, reg, data);
-}
-
-static int m88rs2000_tuner_write(struct m88rs2000_state *state, u8 reg, u8 data)
-{
- m88rs2000_demod_write(state, 0x81, 0x84);
- udelay(10);
- return m88rs2000_writereg(state, 0, reg, data);
-
-}
-
-static int m88rs2000_write(struct dvb_frontend *fe, const u8 buf[], int len)
-{
- struct m88rs2000_state *state = fe->demodulator_priv;
-
- if (len != 2)
- return -EINVAL;
-
- return m88rs2000_writereg(state, 1, buf[0], buf[1]);
-}
-
-static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 tuner, u8 reg)
+static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
- u8 addr = (tuner == 0) ? state->config->tuner_addr :
- state->config->demod_addr;
+
struct i2c_msg msg[] = {
{
- .addr = addr,
+ .addr = state->config->demod_addr,
.flags = 0,
.buf = b0,
.len = 1
}, {
- .addr = addr,
+ .addr = state->config->demod_addr,
.flags = I2C_M_RD,
.buf = b1,
.len = 1
@@ -136,18 +110,6 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 tuner, u8 reg)
return b1[0];
}
-static u8 m88rs2000_demod_read(struct m88rs2000_state *state, u8 reg)
-{
- return m88rs2000_readreg(state, 1, reg);
-}
-
-static u8 m88rs2000_tuner_read(struct m88rs2000_state *state, u8 reg)
-{
- m88rs2000_demod_write(state, 0x81, 0x85);
- udelay(10);
- return m88rs2000_readreg(state, 0, reg);
-}
-
static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
{
struct m88rs2000_state *state = fe->demodulator_priv;
@@ -166,9 +128,9 @@ static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
b[0] = (u8) (temp >> 16) & 0xff;
b[1] = (u8) (temp >> 8) & 0xff;
b[2] = (u8) temp & 0xff;
- ret = m88rs2000_demod_write(state, 0x93, b[2]);
- ret |= m88rs2000_demod_write(state, 0x94, b[1]);
- ret |= m88rs2000_demod_write(state, 0x95, b[0]);
+ ret = m88rs2000_writereg(state, 0x93, b[2]);
+ ret |= m88rs2000_writereg(state, 0x94, b[1]);
+ ret |= m88rs2000_writereg(state, 0x95, b[0]);
deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
return ret;
@@ -182,37 +144,37 @@ static int m88rs2000_send_diseqc_msg(struct dvb_frontend *fe,
int i;
u8 reg;
deb_info("%s\n", __func__);
- m88rs2000_demod_write(state, 0x9a, 0x30);
- reg = m88rs2000_demod_read(state, 0xb2);
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ reg = m88rs2000_readreg(state, 0xb2);
reg &= 0x3f;
- m88rs2000_demod_write(state, 0xb2, reg);
+ m88rs2000_writereg(state, 0xb2, reg);
for (i = 0; i < m->msg_len; i++)
- m88rs2000_demod_write(state, 0xb3 + i, m->msg[i]);
+ m88rs2000_writereg(state, 0xb3 + i, m->msg[i]);
- reg = m88rs2000_demod_read(state, 0xb1);
+ reg = m88rs2000_readreg(state, 0xb1);
reg &= 0x87;
reg |= ((m->msg_len - 1) << 3) | 0x07;
reg &= 0x7f;
- m88rs2000_demod_write(state, 0xb1, reg);
+ m88rs2000_writereg(state, 0xb1, reg);
for (i = 0; i < 15; i++) {
- if ((m88rs2000_demod_read(state, 0xb1) & 0x40) == 0x0)
+ if ((m88rs2000_readreg(state, 0xb1) & 0x40) == 0x0)
break;
msleep(20);
}
- reg = m88rs2000_demod_read(state, 0xb1);
+ reg = m88rs2000_readreg(state, 0xb1);
if ((reg & 0x40) > 0x0) {
reg &= 0x7f;
reg |= 0x40;
- m88rs2000_demod_write(state, 0xb1, reg);
+ m88rs2000_writereg(state, 0xb1, reg);
}
- reg = m88rs2000_demod_read(state, 0xb2);
+ reg = m88rs2000_readreg(state, 0xb2);
reg &= 0x3f;
reg |= 0x80;
- m88rs2000_demod_write(state, 0xb2, reg);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0xb2, reg);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
return 0;
@@ -224,14 +186,14 @@ static int m88rs2000_send_diseqc_burst(struct dvb_frontend *fe,
struct m88rs2000_state *state = fe->demodulator_priv;
u8 reg0, reg1;
deb_info("%s\n", __func__);
- m88rs2000_demod_write(state, 0x9a, 0x30);
+ m88rs2000_writereg(state, 0x9a, 0x30);
msleep(50);
- reg0 = m88rs2000_demod_read(state, 0xb1);
- reg1 = m88rs2000_demod_read(state, 0xb2);
+ reg0 = m88rs2000_readreg(state, 0xb1);
+ reg1 = m88rs2000_readreg(state, 0xb2);
/* TODO complete this section */
- m88rs2000_demod_write(state, 0xb2, reg1);
- m88rs2000_demod_write(state, 0xb1, reg0);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0xb2, reg1);
+ m88rs2000_writereg(state, 0xb1, reg0);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
return 0;
}
@@ -240,9 +202,9 @@ static int m88rs2000_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
struct m88rs2000_state *state = fe->demodulator_priv;
u8 reg0, reg1;
- m88rs2000_demod_write(state, 0x9a, 0x30);
- reg0 = m88rs2000_demod_read(state, 0xb1);
- reg1 = m88rs2000_demod_read(state, 0xb2);
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ reg0 = m88rs2000_readreg(state, 0xb1);
+ reg1 = m88rs2000_readreg(state, 0xb2);
reg1 &= 0x3f;
@@ -257,9 +219,9 @@ static int m88rs2000_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
default:
break;
}
- m88rs2000_demod_write(state, 0xb2, reg1);
- m88rs2000_demod_write(state, 0xb1, reg0);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0xb2, reg1);
+ m88rs2000_writereg(state, 0xb1, reg0);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
return 0;
}
@@ -276,14 +238,6 @@ struct inittab m88rs2000_setup[] = {
{DEMOD_WRITE, 0x00, 0x00},
{DEMOD_WRITE, 0x9a, 0xb0},
{DEMOD_WRITE, 0x81, 0xc1},
- {TUNER_WRITE, 0x42, 0x73},
- {TUNER_WRITE, 0x05, 0x07},
- {TUNER_WRITE, 0x20, 0x27},
- {TUNER_WRITE, 0x07, 0x02},
- {TUNER_WRITE, 0x11, 0xff},
- {TUNER_WRITE, 0x60, 0xf9},
- {TUNER_WRITE, 0x08, 0x01},
- {TUNER_WRITE, 0x00, 0x41},
{DEMOD_WRITE, 0x81, 0x81},
{DEMOD_WRITE, 0x86, 0xc6},
{DEMOD_WRITE, 0x9a, 0x30},
@@ -301,23 +255,10 @@ struct inittab m88rs2000_shutdown[] = {
{DEMOD_WRITE, 0xf1, 0x89},
{DEMOD_WRITE, 0x00, 0x01},
{DEMOD_WRITE, 0x9a, 0xb0},
- {TUNER_WRITE, 0x00, 0x40},
{DEMOD_WRITE, 0x81, 0x81},
{0xff, 0xaa, 0xff}
};
-struct inittab tuner_reset[] = {
- {TUNER_WRITE, 0x42, 0x73},
- {TUNER_WRITE, 0x05, 0x07},
- {TUNER_WRITE, 0x20, 0x27},
- {TUNER_WRITE, 0x07, 0x02},
- {TUNER_WRITE, 0x11, 0xff},
- {TUNER_WRITE, 0x60, 0xf9},
- {TUNER_WRITE, 0x08, 0x01},
- {TUNER_WRITE, 0x00, 0x41},
- {0xff, 0xaa, 0xff}
-};
-
struct inittab fe_reset[] = {
{DEMOD_WRITE, 0x00, 0x01},
{DEMOD_WRITE, 0xf1, 0xbf},
@@ -389,11 +330,7 @@ static int m88rs2000_tab_set(struct m88rs2000_state *state,
for (i = 0; i < 255; i++) {
switch (tab[i].cmd) {
case 0x01:
- ret = m88rs2000_demod_write(state, tab[i].reg,
- tab[i].val);
- break;
- case 0x02:
- ret = m88rs2000_tuner_write(state, tab[i].reg,
+ ret = m88rs2000_writereg(state, tab[i].reg,
tab[i].val);
break;
case 0x10:
@@ -419,7 +356,7 @@ static int m88rs2000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
struct m88rs2000_state *state = fe->demodulator_priv;
u8 data;
- data = m88rs2000_demod_read(state, 0xb2);
+ data = m88rs2000_readreg(state, 0xb2);
data |= 0x03; /* bit0 V/H, bit1 off/on */
switch (volt) {
@@ -434,23 +371,11 @@ static int m88rs2000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
break;
}
- m88rs2000_demod_write(state, 0xb2, data);
+ m88rs2000_writereg(state, 0xb2, data);
return 0;
}
-static int m88rs2000_startup(struct m88rs2000_state *state)
-{
- int ret = 0;
- u8 reg;
-
- reg = m88rs2000_tuner_read(state, 0x00);
- if ((reg & 0x40) == 0)
- ret = -ENODEV;
-
- return ret;
-}
-
static int m88rs2000_init(struct dvb_frontend *fe)
{
struct m88rs2000_state *state = fe->demodulator_priv;
@@ -458,7 +383,11 @@ static int m88rs2000_init(struct dvb_frontend *fe)
deb_info("m88rs2000: init chip\n");
/* Setup frontend from shutdown/cold */
- ret = m88rs2000_tab_set(state, m88rs2000_setup);
+ if (state->config->inittab)
+ ret = m88rs2000_tab_set(state,
+ (struct inittab *)state->config->inittab);
+ else
+ ret = m88rs2000_tab_set(state, m88rs2000_setup);
return ret;
}
@@ -475,7 +404,7 @@ static int m88rs2000_sleep(struct dvb_frontend *fe)
static int m88rs2000_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct m88rs2000_state *state = fe->demodulator_priv;
- u8 reg = m88rs2000_demod_read(state, 0x8c);
+ u8 reg = m88rs2000_readreg(state, 0x8c);
*status = 0;
@@ -488,183 +417,64 @@ static int m88rs2000_read_status(struct dvb_frontend *fe, fe_status_t *status)
return 0;
}
-/* Extact code for these unknown but lmedm04 driver uses interupt callbacks */
-
static int m88rs2000_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- deb_info("m88rs2000_read_ber %d\n", *ber);
- *ber = 0;
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u8 tmp0, tmp1;
+
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ tmp0 = m88rs2000_readreg(state, 0xd8);
+ if ((tmp0 & 0x10) != 0) {
+ m88rs2000_writereg(state, 0x9a, 0xb0);
+ *ber = 0xffffffff;
+ return 0;
+ }
+
+ *ber = (m88rs2000_readreg(state, 0xd7) << 8) |
+ m88rs2000_readreg(state, 0xd6);
+
+ tmp1 = m88rs2000_readreg(state, 0xd9);
+ m88rs2000_writereg(state, 0xd9, (tmp1 & ~7) | 4);
+ /* needs twice */
+ m88rs2000_writereg(state, 0xd8, (tmp0 & ~8) | 0x30);
+ m88rs2000_writereg(state, 0xd8, (tmp0 & ~8) | 0x30);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
+
return 0;
}
static int m88rs2000_read_signal_strength(struct dvb_frontend *fe,
u16 *strength)
{
- *strength = 0;
- return 0;
-}
+ if (fe->ops.tuner_ops.get_rf_strength)
+ fe->ops.tuner_ops.get_rf_strength(fe, strength);
-static int m88rs2000_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- deb_info("m88rs2000_read_snr %d\n", *snr);
- *snr = 0;
return 0;
}
-static int m88rs2000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
-{
- deb_info("m88rs2000_read_ber %d\n", *ucblocks);
- *ucblocks = 0;
- return 0;
-}
-
-static int m88rs2000_tuner_gate_ctrl(struct m88rs2000_state *state, u8 offset)
-{
- int ret;
- ret = m88rs2000_tuner_write(state, 0x51, 0x1f - offset);
- ret |= m88rs2000_tuner_write(state, 0x51, 0x1f);
- ret |= m88rs2000_tuner_write(state, 0x50, offset);
- ret |= m88rs2000_tuner_write(state, 0x50, 0x00);
- msleep(20);
- return ret;
-}
-
-static int m88rs2000_set_tuner_rf(struct dvb_frontend *fe)
+static int m88rs2000_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct m88rs2000_state *state = fe->demodulator_priv;
- int reg;
- reg = m88rs2000_tuner_read(state, 0x3d);
- reg &= 0x7f;
- if (reg < 0x16)
- reg = 0xa1;
- else if (reg == 0x16)
- reg = 0x99;
- else
- reg = 0xf9;
- m88rs2000_tuner_write(state, 0x60, reg);
- reg = m88rs2000_tuner_gate_ctrl(state, 0x08);
+ *snr = 512 * m88rs2000_readreg(state, 0x65);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
- return reg;
+ return 0;
}
-static int m88rs2000_set_tuner(struct dvb_frontend *fe, u16 *offset)
+static int m88rs2000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct m88rs2000_state *state = fe->demodulator_priv;
- int ret;
- u32 frequency = c->frequency;
- s32 offset_khz;
- s32 tmp;
- u32 symbol_rate = (c->symbol_rate / 1000);
- u32 f3db, gdiv28;
- u16 value, ndiv, lpf_coeff;
- u8 lpf_mxdiv, mlpf_max, mlpf_min, nlpf;
- u8 lo = 0x01, div4 = 0x0;
-
- /* Reset Tuner */
- ret = m88rs2000_tab_set(state, tuner_reset);
-
- /* Calculate frequency divider */
- if (frequency < 1060000) {
- lo |= 0x10;
- div4 = 0x1;
- ndiv = (frequency * 14 * 4) / FE_CRYSTAL_KHZ;
- } else
- ndiv = (frequency * 14 * 2) / FE_CRYSTAL_KHZ;
- ndiv = ndiv + ndiv % 2;
- ndiv = ndiv - 1024;
-
- ret = m88rs2000_tuner_write(state, 0x10, 0x80 | lo);
-
- /* Set frequency divider */
- ret |= m88rs2000_tuner_write(state, 0x01, (ndiv >> 8) & 0xf);
- ret |= m88rs2000_tuner_write(state, 0x02, ndiv & 0xff);
-
- ret |= m88rs2000_tuner_write(state, 0x03, 0x06);
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x10);
- if (ret < 0)
- return -ENODEV;
-
- /* Tuner Frequency Range */
- ret = m88rs2000_tuner_write(state, 0x10, lo);
-
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x08);
-
- /* Tuner RF */
- ret |= m88rs2000_set_tuner_rf(fe);
-
- gdiv28 = (FE_CRYSTAL_KHZ / 1000 * 1694 + 500) / 1000;
- ret |= m88rs2000_tuner_write(state, 0x04, gdiv28 & 0xff);
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x04);
- if (ret < 0)
- return -ENODEV;
-
- value = m88rs2000_tuner_read(state, 0x26);
-
- f3db = (symbol_rate * 135) / 200 + 2000;
- f3db += FREQ_OFFSET_LOW_SYM_RATE;
- if (f3db < 7000)
- f3db = 7000;
- if (f3db > 40000)
- f3db = 40000;
-
- gdiv28 = gdiv28 * 207 / (value * 2 + 151);
- mlpf_max = gdiv28 * 135 / 100;
- mlpf_min = gdiv28 * 78 / 100;
- if (mlpf_max > 63)
- mlpf_max = 63;
-
- lpf_coeff = 2766;
-
- nlpf = (f3db * gdiv28 * 2 / lpf_coeff /
- (FE_CRYSTAL_KHZ / 1000) + 1) / 2;
- if (nlpf > 23)
- nlpf = 23;
- if (nlpf < 1)
- nlpf = 1;
-
- lpf_mxdiv = (nlpf * (FE_CRYSTAL_KHZ / 1000)
- * lpf_coeff * 2 / f3db + 1) / 2;
-
- if (lpf_mxdiv < mlpf_min) {
- nlpf++;
- lpf_mxdiv = (nlpf * (FE_CRYSTAL_KHZ / 1000)
- * lpf_coeff * 2 / f3db + 1) / 2;
- }
-
- if (lpf_mxdiv > mlpf_max)
- lpf_mxdiv = mlpf_max;
-
- ret = m88rs2000_tuner_write(state, 0x04, lpf_mxdiv);
- ret |= m88rs2000_tuner_write(state, 0x06, nlpf);
-
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x04);
-
- ret |= m88rs2000_tuner_gate_ctrl(state, 0x01);
-
- msleep(80);
- /* calculate offset assuming 96000kHz*/
- offset_khz = (ndiv - ndiv % 2 + 1024) * FE_CRYSTAL_KHZ
- / 14 / (div4 + 1) / 2;
+ u8 tmp;
- offset_khz -= frequency;
+ *ucblocks = (m88rs2000_readreg(state, 0xd5) << 8) |
+ m88rs2000_readreg(state, 0xd4);
+ tmp = m88rs2000_readreg(state, 0xd8);
+ m88rs2000_writereg(state, 0xd8, tmp & ~0x20);
+ /* needs two times */
+ m88rs2000_writereg(state, 0xd8, tmp | 0x20);
+ m88rs2000_writereg(state, 0xd8, tmp | 0x20);
- tmp = offset_khz;
- tmp *= 65536;
-
- tmp = (2 * tmp + 96000) / (2 * 96000);
- if (tmp < 0)
- tmp += 65536;
-
- *offset = tmp & 0xffff;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- return (ret < 0) ? -EINVAL : 0;
+ return 0;
}
static int m88rs2000_set_fec(struct m88rs2000_state *state,
@@ -692,7 +502,7 @@ static int m88rs2000_set_fec(struct m88rs2000_state *state,
default:
fec_set = 0x08;
}
- m88rs2000_demod_write(state, 0x76, fec_set);
+ m88rs2000_writereg(state, 0x76, fec_set);
return 0;
}
@@ -701,9 +511,9 @@ static int m88rs2000_set_fec(struct m88rs2000_state *state,
static fe_code_rate_t m88rs2000_get_fec(struct m88rs2000_state *state)
{
u8 reg;
- m88rs2000_demod_write(state, 0x9a, 0x30);
- reg = m88rs2000_demod_read(state, 0x76);
- m88rs2000_demod_write(state, 0x9a, 0xb0);
+ m88rs2000_writereg(state, 0x9a, 0x30);
+ reg = m88rs2000_readreg(state, 0x76);
+ m88rs2000_writereg(state, 0x9a, 0xb0);
switch (reg) {
case 0x88:
@@ -729,7 +539,9 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
struct m88rs2000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
fe_status_t status;
- int i, ret;
+ int i, ret = 0;
+ s32 tmp;
+ u32 tuner_freq;
u16 offset = 0;
u8 reg;
@@ -743,17 +555,37 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
}
/* Set Tuner */
- ret = m88rs2000_set_tuner(fe, &offset);
+ if (fe->ops.tuner_ops.set_params)
+ ret = fe->ops.tuner_ops.set_params(fe);
+
+ if (ret < 0)
+ return -ENODEV;
+
+ if (fe->ops.tuner_ops.get_frequency)
+ ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_freq);
+
if (ret < 0)
return -ENODEV;
- ret = m88rs2000_demod_write(state, 0x9a, 0x30);
+ offset = tuner_freq - c->frequency;
+
+ /* calculate offset assuming 96000kHz*/
+ tmp = offset;
+ tmp *= 65536;
+
+ tmp = (2 * tmp + 96000) / (2 * 96000);
+ if (tmp < 0)
+ tmp += 65536;
+
+ offset = tmp & 0xffff;
+
+ ret = m88rs2000_writereg(state, 0x9a, 0x30);
/* Unknown usually 0xc6 sometimes 0xc1 */
- reg = m88rs2000_demod_read(state, 0x86);
- ret |= m88rs2000_demod_write(state, 0x86, reg);
+ reg = m88rs2000_readreg(state, 0x86);
+ ret |= m88rs2000_writereg(state, 0x86, reg);
/* Offset lower nibble always 0 */
- ret |= m88rs2000_demod_write(state, 0x9c, (offset >> 8));
- ret |= m88rs2000_demod_write(state, 0x9d, offset & 0xf0);
+ ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
+ ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
/* Reset Demod */
@@ -762,16 +594,16 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
return -ENODEV;
/* Unknown */
- reg = m88rs2000_demod_read(state, 0x70);
- ret = m88rs2000_demod_write(state, 0x70, reg);
+ reg = m88rs2000_readreg(state, 0x70);
+ ret = m88rs2000_writereg(state, 0x70, reg);
/* Set FEC */
ret |= m88rs2000_set_fec(state, c->fec_inner);
- ret |= m88rs2000_demod_write(state, 0x85, 0x1);
- ret |= m88rs2000_demod_write(state, 0x8a, 0xbf);
- ret |= m88rs2000_demod_write(state, 0x8d, 0x1e);
- ret |= m88rs2000_demod_write(state, 0x90, 0xf1);
- ret |= m88rs2000_demod_write(state, 0x91, 0x08);
+ ret |= m88rs2000_writereg(state, 0x85, 0x1);
+ ret |= m88rs2000_writereg(state, 0x8a, 0xbf);
+ ret |= m88rs2000_writereg(state, 0x8d, 0x1e);
+ ret |= m88rs2000_writereg(state, 0x90, 0xf1);
+ ret |= m88rs2000_writereg(state, 0x91, 0x08);
if (ret < 0)
return -ENODEV;
@@ -787,27 +619,25 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
return -ENODEV;
for (i = 0; i < 25; i++) {
- reg = m88rs2000_demod_read(state, 0x8c);
+ reg = m88rs2000_readreg(state, 0x8c);
if ((reg & 0x7) == 0x7) {
status = FE_HAS_LOCK;
break;
}
state->no_lock_count++;
if (state->no_lock_count == 15) {
- reg = m88rs2000_demod_read(state, 0x70);
+ reg = m88rs2000_readreg(state, 0x70);
reg ^= 0x4;
- m88rs2000_demod_write(state, 0x70, reg);
+ m88rs2000_writereg(state, 0x70, reg);
state->no_lock_count = 0;
}
- if (state->no_lock_count == 20)
- m88rs2000_set_tuner_rf(fe);
msleep(20);
}
if (status & FE_HAS_LOCK) {
state->fec_inner = m88rs2000_get_fec(state);
/* Uknown suspect SNR level */
- reg = m88rs2000_demod_read(state, 0x65);
+ reg = m88rs2000_readreg(state, 0x65);
}
state->tuner_frequency = c->frequency;
@@ -830,9 +660,9 @@ static int m88rs2000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
struct m88rs2000_state *state = fe->demodulator_priv;
if (enable)
- m88rs2000_demod_write(state, 0x81, 0x84);
+ m88rs2000_writereg(state, 0x81, 0x84);
else
- m88rs2000_demod_write(state, 0x81, 0x81);
+ m88rs2000_writereg(state, 0x81, 0x81);
udelay(10);
return 0;
}
@@ -863,7 +693,6 @@ static struct dvb_frontend_ops m88rs2000_ops = {
.release = m88rs2000_release,
.init = m88rs2000_init,
.sleep = m88rs2000_sleep,
- .write = m88rs2000_write,
.i2c_gate_ctrl = m88rs2000_i2c_gate_ctrl,
.read_status = m88rs2000_read_status,
.read_ber = m88rs2000_read_ber,
@@ -896,9 +725,6 @@ struct dvb_frontend *m88rs2000_attach(const struct m88rs2000_config *config,
state->symbol_rate = 0;
state->fec_inner = 0;
- if (m88rs2000_startup(state) < 0)
- goto error;
-
/* create dvb_frontend */
memcpy(&state->frontend.ops, &m88rs2000_ops,
sizeof(struct dvb_frontend_ops));
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index 59acdb696873..5a8023e5a4b8 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -26,8 +26,6 @@
struct m88rs2000_config {
/* Demodulator i2c address */
u8 demod_addr;
- /* Tuner address */
- u8 tuner_addr;
u8 *inittab;
@@ -55,12 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
}
#endif /* CONFIG_DVB_M88RS2000 */
-#define FE_CRYSTAL_KHZ 27000
-#define FREQ_OFFSET_LOW_SYM_RATE 3000
-
enum {
DEMOD_WRITE = 0x1,
- TUNER_WRITE,
WRITE_DELAY = 0x10,
};
#endif /* M88RS2000_H */
diff --git a/drivers/media/dvb-frontends/mb86a16.h b/drivers/media/dvb-frontends/mb86a16.h
index 6ea8c376394f..277ce061acf9 100644
--- a/drivers/media/dvb-frontends/mb86a16.h
+++ b/drivers/media/dvb-frontends/mb86a16.h
@@ -33,7 +33,7 @@ struct mb86a16_config {
-#if defined(CONFIG_DVB_MB86A16) || (defined(CONFIG_DVB_MB86A16_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_MB86A16)
extern struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
struct i2c_adapter *i2c_adap);
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index fade566927c3..f19cd7367040 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -1,11 +1,9 @@
/*
* Fujitu mb86a20s ISDB-T/ISDB-Tsb Module driver
*
- * Copyright (C) 2010 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010-2013 Mauro Carvalho Chehab <mchehab@redhat.com>
* Copyright (C) 2009-2010 Douglas Landgraf <dougsland@redhat.com>
*
- * FIXME: Need to port to DVB v5.2 API
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
@@ -26,24 +24,15 @@ static int debug = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-#define rc(args...) do { \
- printk(KERN_ERR "mb86a20s: " args); \
-} while (0)
-
-#define dprintk(args...) \
- do { \
- if (debug) { \
- printk(KERN_DEBUG "mb86a20s: %s: ", __func__); \
- printk(args); \
- } \
- } while (0)
-
struct mb86a20s_state {
struct i2c_adapter *i2c;
const struct mb86a20s_config *config;
+ u32 last_frequency;
struct dvb_frontend frontend;
+ u32 estimated_rate[3];
+
bool need_init;
};
@@ -52,6 +41,8 @@ struct regdata {
u8 data;
};
+#define BER_SAMPLING_RATE 1 /* Seconds */
+
/*
* Initialization sequence: Use whatevere default values that PV SBTVD
* does on its initialisation, obtained via USB snoop
@@ -94,41 +85,68 @@ static struct regdata mb86a20s_init[] = {
{ 0x04, 0x13 }, { 0x05, 0xff },
{ 0x04, 0x15 }, { 0x05, 0x4e },
{ 0x04, 0x16 }, { 0x05, 0x20 },
- { 0x52, 0x01 },
- { 0x50, 0xa7 }, { 0x51, 0xff },
+
+ /*
+ * On this demod, when the bit count reaches the count below,
+ * it collects the bit error count. The bit counters are initialized
+ * to 65535 here. This warrants that all of them will be quickly
+ * calculated when device gets locked. As TMCC is parsed, the values
+ * will be adjusted later in the driver's code.
+ */
+ { 0x52, 0x01 }, /* Turn on BER before Viterbi */
+ { 0x50, 0xa7 }, { 0x51, 0x00 },
{ 0x50, 0xa8 }, { 0x51, 0xff },
{ 0x50, 0xa9 }, { 0x51, 0xff },
- { 0x50, 0xaa }, { 0x51, 0xff },
+ { 0x50, 0xaa }, { 0x51, 0x00 },
{ 0x50, 0xab }, { 0x51, 0xff },
{ 0x50, 0xac }, { 0x51, 0xff },
- { 0x50, 0xad }, { 0x51, 0xff },
+ { 0x50, 0xad }, { 0x51, 0x00 },
{ 0x50, 0xae }, { 0x51, 0xff },
{ 0x50, 0xaf }, { 0x51, 0xff },
- { 0x5e, 0x07 },
- { 0x50, 0xdc }, { 0x51, 0x01 },
- { 0x50, 0xdd }, { 0x51, 0xf4 },
- { 0x50, 0xde }, { 0x51, 0x01 },
- { 0x50, 0xdf }, { 0x51, 0xf4 },
- { 0x50, 0xe0 }, { 0x51, 0x01 },
- { 0x50, 0xe1 }, { 0x51, 0xf4 },
- { 0x50, 0xb0 }, { 0x51, 0x07 },
- { 0x50, 0xb2 }, { 0x51, 0xff },
- { 0x50, 0xb3 }, { 0x51, 0xff },
- { 0x50, 0xb4 }, { 0x51, 0xff },
- { 0x50, 0xb5 }, { 0x51, 0xff },
- { 0x50, 0xb6 }, { 0x51, 0xff },
- { 0x50, 0xb7 }, { 0x51, 0xff },
- { 0x50, 0x50 }, { 0x51, 0x02 },
- { 0x50, 0x51 }, { 0x51, 0x04 },
- { 0x45, 0x04 },
- { 0x48, 0x04 },
+
+ /*
+ * On this demod, post BER counts blocks. When the count reaches the
+ * value below, it collects the block error count. The block counters
+ * are initialized to 127 here. This warrants that all of them will be
+ * quickly calculated when device gets locked. As TMCC is parsed, the
+ * values will be adjusted later in the driver's code.
+ */
+ { 0x5e, 0x07 }, /* Turn on BER after Viterbi */
+ { 0x50, 0xdc }, { 0x51, 0x00 },
+ { 0x50, 0xdd }, { 0x51, 0x7f },
+ { 0x50, 0xde }, { 0x51, 0x00 },
+ { 0x50, 0xdf }, { 0x51, 0x7f },
+ { 0x50, 0xe0 }, { 0x51, 0x00 },
+ { 0x50, 0xe1 }, { 0x51, 0x7f },
+
+ /*
+ * On this demod, when the block count reaches the count below,
+ * it collects the block error count. The block counters are initialized
+ * to 127 here. This warrants that all of them will be quickly
+ * calculated when device gets locked. As TMCC is parsed, the values
+ * will be adjusted later in the driver's code.
+ */
+ { 0x50, 0xb0 }, { 0x51, 0x07 }, /* Enable PER */
+ { 0x50, 0xb2 }, { 0x51, 0x00 },
+ { 0x50, 0xb3 }, { 0x51, 0x7f },
+ { 0x50, 0xb4 }, { 0x51, 0x00 },
+ { 0x50, 0xb5 }, { 0x51, 0x7f },
+ { 0x50, 0xb6 }, { 0x51, 0x00 },
+ { 0x50, 0xb7 }, { 0x51, 0x7f },
+
+ { 0x50, 0x50 }, { 0x51, 0x02 }, /* MER manual mode */
+ { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */
+ { 0x45, 0x04 }, /* CN symbol 4 */
+ { 0x48, 0x04 }, /* CN manual mode */
+
{ 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
{ 0x50, 0xd6 }, { 0x51, 0x1f },
{ 0x50, 0xd2 }, { 0x51, 0x03 },
{ 0x50, 0xd7 }, { 0x51, 0x3f },
{ 0x28, 0x74 }, { 0x29, 0x00 }, { 0x28, 0x74 }, { 0x29, 0x40 },
{ 0x28, 0x46 }, { 0x29, 0x2c }, { 0x28, 0x46 }, { 0x29, 0x0c },
- { 0x04, 0x40 }, { 0x05, 0x01 },
+
+ { 0x04, 0x40 }, { 0x05, 0x00 },
{ 0x28, 0x00 }, { 0x29, 0x10 },
{ 0x28, 0x05 }, { 0x29, 0x02 },
{ 0x1c, 0x01 },
@@ -176,8 +194,24 @@ static struct regdata mb86a20s_reset_reception[] = {
{ 0x08, 0x00 },
};
+static struct regdata mb86a20s_per_ber_reset[] = {
+ { 0x53, 0x00 }, /* pre BER Counter reset */
+ { 0x53, 0x07 },
+
+ { 0x5f, 0x00 }, /* post BER Counter reset */
+ { 0x5f, 0x07 },
+
+ { 0x50, 0xb1 }, /* PER Counter reset */
+ { 0x51, 0x07 },
+ { 0x51, 0x00 },
+};
+
+/*
+ * I2C read/write functions and macros
+ */
+
static int mb86a20s_i2c_writereg(struct mb86a20s_state *state,
- u8 i2c_addr, int reg, int data)
+ u8 i2c_addr, u8 reg, u8 data)
{
u8 buf[] = { reg, data };
struct i2c_msg msg = {
@@ -187,8 +221,9 @@ static int mb86a20s_i2c_writereg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, &msg, 1);
if (rc != 1) {
- printk("%s: writereg error (rc == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, rc, reg, data);
+ dev_err(&state->i2c->dev,
+ "%s: writereg error (rc == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, rc, reg, data);
return rc;
}
@@ -222,8 +257,9 @@ static int mb86a20s_i2c_readreg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, msg, 2);
if (rc != 2) {
- rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc);
- return rc;
+ dev_err(&state->i2c->dev, "%s: reg=0x%x (error=%d)\n",
+ __func__, reg, rc);
+ return (rc < 0) ? rc : -EIO;
}
return val;
@@ -237,100 +273,22 @@ static int mb86a20s_i2c_readreg(struct mb86a20s_state *state,
mb86a20s_i2c_writeregdata(state, state->config->demod_address, \
regdata, ARRAY_SIZE(regdata))
-static int mb86a20s_initfe(struct dvb_frontend *fe)
-{
- struct mb86a20s_state *state = fe->demodulator_priv;
- int rc;
- u8 regD5 = 1;
-
- dprintk("\n");
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- /* Initialize the frontend */
- rc = mb86a20s_writeregdata(state, mb86a20s_init);
- if (rc < 0)
- goto err;
-
- if (!state->config->is_serial) {
- regD5 &= ~1;
-
- rc = mb86a20s_writereg(state, 0x50, 0xd5);
- if (rc < 0)
- goto err;
- rc = mb86a20s_writereg(state, 0x51, regD5);
- if (rc < 0)
- goto err;
- }
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
-
-err:
- if (rc < 0) {
- state->need_init = true;
- printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n");
- } else {
- state->need_init = false;
- dprintk("Initialization succeeded.\n");
- }
- return rc;
-}
-
-static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
-{
- struct mb86a20s_state *state = fe->demodulator_priv;
- unsigned rf_max, rf_min, rf;
- u8 val;
-
- dprintk("\n");
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
-
- /* Does a binary search to get RF strength */
- rf_max = 0xfff;
- rf_min = 0;
- do {
- rf = (rf_max + rf_min) / 2;
- mb86a20s_writereg(state, 0x04, 0x1f);
- mb86a20s_writereg(state, 0x05, rf >> 8);
- mb86a20s_writereg(state, 0x04, 0x20);
- mb86a20s_writereg(state, 0x04, rf);
-
- val = mb86a20s_readreg(state, 0x02);
- if (val & 0x08)
- rf_min = (rf_max + rf_min) / 2;
- else
- rf_max = (rf_max + rf_min) / 2;
- if (rf_max - rf_min < 4) {
- *strength = (((rf_max + rf_min) / 2) * 65535) / 4095;
- break;
- }
- } while (1);
-
- dprintk("signal strength = %d\n", *strength);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
-
- return 0;
-}
+/*
+ * Ancillary internal routines (likely compiled inlined)
+ *
+ * The functions below assume that gateway lock has already obtained
+ */
static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct mb86a20s_state *state = fe->demodulator_priv;
- u8 val;
+ int val;
- dprintk("\n");
*status = 0;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
val = mb86a20s_readreg(state, 0x0a) & 0xf;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
+ if (val < 0)
+ return val;
if (val >= 2)
*status |= FE_HAS_SIGNAL;
@@ -347,49 +305,56 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status)
if (val >= 8) /* Maybe 9? */
*status |= FE_HAS_LOCK;
- dprintk("val = %d, status = 0x%02x\n", val, *status);
+ dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
+ __func__, *status, val);
return 0;
}
-static int mb86a20s_set_frontend(struct dvb_frontend *fe)
+static int mb86a20s_read_signal_strength(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
int rc;
-#if 0
- /*
- * FIXME: Properly implement the set frontend properties
- */
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
-#endif
-
- dprintk("\n");
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- dprintk("Calling tuner set parameters\n");
- fe->ops.tuner_ops.set_params(fe);
+ unsigned rf_max, rf_min, rf;
- /*
- * Make it more reliable: if, for some reason, the initial
- * device initialization doesn't happen, initialize it when
- * a SBTVD parameters are adjusted.
- *
- * Unfortunately, due to a hard to track bug at tda829x/tda18271,
- * the agc callback logic is not called during DVB attach time,
- * causing mb86a20s to not be initialized with Kworld SBTVD.
- * So, this hack is needed, in order to make Kworld SBTVD to work.
- */
- if (state->need_init)
- mb86a20s_initfe(fe);
+ /* Does a binary search to get RF strength */
+ rf_max = 0xfff;
+ rf_min = 0;
+ do {
+ rf = (rf_max + rf_min) / 2;
+ rc = mb86a20s_writereg(state, 0x04, 0x1f);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x05, rf >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x04, 0x20);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x04, rf);
+ if (rc < 0)
+ return rc;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
- rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
+ rc = mb86a20s_readreg(state, 0x02);
+ if (rc < 0)
+ return rc;
+ if (rc & 0x08)
+ rf_min = (rf_max + rf_min) / 2;
+ else
+ rf_max = (rf_max + rf_min) / 2;
+ if (rf_max - rf_min < 4) {
+ rf = (rf_max + rf_min) / 2;
+
+ /* Rescale it from 2^12 (4096) to 2^16 */
+ rf <<= (16 - 12);
+ dev_dbg(&state->i2c->dev,
+ "%s: signal strength = %d (%d < RF=%d < %d)\n",
+ __func__, rf, rf_min, rf >> 4, rf_max);
+ return rf;
+ }
+ } while (1);
- return rc;
+ return 0;
}
static int mb86a20s_get_modulation(struct mb86a20s_state *state,
@@ -410,7 +375,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
rc = mb86a20s_readreg(state, 0x6e);
if (rc < 0)
return rc;
- switch ((rc & 0x70) >> 4) {
+ switch ((rc >> 4) & 0x07) {
case 0:
return DQPSK;
case 1:
@@ -443,7 +408,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
rc = mb86a20s_readreg(state, 0x6e);
if (rc < 0)
return rc;
- switch (rc) {
+ switch ((rc >> 4) & 0x07) {
case 0:
return FEC_1_2;
case 1:
@@ -478,24 +443,38 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
rc = mb86a20s_readreg(state, 0x6e);
if (rc < 0)
return rc;
- if (rc > 3)
- return -EINVAL; /* Not used */
- return rc;
+
+ switch ((rc >> 4) & 0x07) {
+ case 1:
+ return GUARD_INTERVAL_1_4;
+ case 2:
+ return GUARD_INTERVAL_1_8;
+ case 3:
+ return GUARD_INTERVAL_1_16;
+ case 4:
+ return GUARD_INTERVAL_1_32;
+
+ default:
+ case 0:
+ return GUARD_INTERVAL_AUTO;
+ }
}
static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
unsigned layer)
{
int rc, count;
-
static unsigned char reg[] = {
[0] = 0x89, /* Layer A */
[1] = 0x8d, /* Layer B */
[2] = 0x91, /* Layer C */
};
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
+
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
return rc;
@@ -504,113 +483,1451 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
return rc;
count = (rc >> 4) & 0x0f;
+ dev_dbg(&state->i2c->dev, "%s: segments: %d.\n", __func__, count);
+
return count;
}
+static void mb86a20s_reset_frontend_cache(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Fixed parameters */
+ c->delivery_system = SYS_ISDBT;
+ c->bandwidth_hz = 6000000;
+
+ /* Initialize values that will be later autodetected */
+ c->isdbt_layer_enabled = 0;
+ c->transmission_mode = TRANSMISSION_MODE_AUTO;
+ c->guard_interval = GUARD_INTERVAL_AUTO;
+ c->isdbt_sb_mode = 0;
+ c->isdbt_sb_segment_count = 0;
+}
+
+/*
+ * Estimates the bit rate using the per-segment bit rate given by
+ * ABNT/NBR 15601 spec (table 4).
+ */
+static u32 isdbt_rate[3][5][4] = {
+ { /* DQPSK/QPSK */
+ { 280850, 312060, 330420, 340430 }, /* 1/2 */
+ { 374470, 416080, 440560, 453910 }, /* 2/3 */
+ { 421280, 468090, 495630, 510650 }, /* 3/4 */
+ { 468090, 520100, 550700, 567390 }, /* 5/6 */
+ { 491500, 546110, 578230, 595760 }, /* 7/8 */
+ }, { /* QAM16 */
+ { 561710, 624130, 660840, 680870 }, /* 1/2 */
+ { 748950, 832170, 881120, 907820 }, /* 2/3 */
+ { 842570, 936190, 991260, 1021300 }, /* 3/4 */
+ { 936190, 1040210, 1101400, 1134780 }, /* 5/6 */
+ { 983000, 1092220, 1156470, 1191520 }, /* 7/8 */
+ }, { /* QAM64 */
+ { 842570, 936190, 991260, 1021300 }, /* 1/2 */
+ { 1123430, 1248260, 1321680, 1361740 }, /* 2/3 */
+ { 1263860, 1404290, 1486900, 1531950 }, /* 3/4 */
+ { 1404290, 1560320, 1652110, 1702170 }, /* 5/6 */
+ { 1474500, 1638340, 1734710, 1787280 }, /* 7/8 */
+ }
+};
+
+static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
+ u32 modulation, u32 fec, u32 interleaving,
+ u32 segment)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ u32 rate;
+ int m, f, i;
+
+ /*
+ * If modulation/fec/interleaving is not detected, the default is
+ * to consider the lowest bit rate, to avoid taking too long time
+ * to get BER.
+ */
+ switch (modulation) {
+ case DQPSK:
+ case QPSK:
+ default:
+ m = 0;
+ break;
+ case QAM_16:
+ m = 1;
+ break;
+ case QAM_64:
+ m = 2;
+ break;
+ }
+
+ switch (fec) {
+ default:
+ case FEC_1_2:
+ case FEC_AUTO:
+ f = 0;
+ break;
+ case FEC_2_3:
+ f = 1;
+ break;
+ case FEC_3_4:
+ f = 2;
+ break;
+ case FEC_5_6:
+ f = 3;
+ break;
+ case FEC_7_8:
+ f = 4;
+ break;
+ }
+
+ switch (interleaving) {
+ default:
+ case GUARD_INTERVAL_1_4:
+ i = 0;
+ break;
+ case GUARD_INTERVAL_1_8:
+ i = 1;
+ break;
+ case GUARD_INTERVAL_1_16:
+ i = 2;
+ break;
+ case GUARD_INTERVAL_1_32:
+ i = 3;
+ break;
+ }
+
+ /* Samples BER at BER_SAMPLING_RATE seconds */
+ rate = isdbt_rate[m][f][i] * segment * BER_SAMPLING_RATE;
+
+ /* Avoids sampling too quickly or to overflow the register */
+ if (rate < 256)
+ rate = 256;
+ else if (rate > (1 << 24) - 1)
+ rate = (1 << 24) - 1;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
+ __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000,
+ rate, rate);
+
+ state->estimated_rate[i] = rate;
+}
+
+
static int mb86a20s_get_frontend(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int i, rc;
- /* Fixed parameters */
- p->delivery_system = SYS_ISDBT;
- p->bandwidth_hz = 6000000;
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+ /* Reset frontend cache to default values */
+ mb86a20s_reset_frontend_cache(fe);
/* Check for partial reception */
rc = mb86a20s_writereg(state, 0x6d, 0x85);
- if (rc >= 0)
- rc = mb86a20s_readreg(state, 0x6e);
- if (rc >= 0)
- p->isdbt_partial_reception = (rc & 0x10) ? 1 : 0;
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x6e);
+ if (rc < 0)
+ return rc;
+ c->isdbt_partial_reception = (rc & 0x10) ? 1 : 0;
/* Get per-layer data */
- p->isdbt_layer_enabled = 0;
+
for (i = 0; i < 3; i++) {
+ dev_dbg(&state->i2c->dev, "%s: getting data for layer %c.\n",
+ __func__, 'A' + i);
+
rc = mb86a20s_get_segment_count(state, i);
- if (rc >= 0 && rc < 14)
- p->layer[i].segment_count = rc;
- if (rc == 0x0f)
+ if (rc < 0)
+ goto noperlayer_error;
+ if (rc >= 0 && rc < 14) {
+ c->layer[i].segment_count = rc;
+ } else {
+ c->layer[i].segment_count = 0;
+ state->estimated_rate[i] = 0;
continue;
- p->isdbt_layer_enabled |= 1 << i;
+ }
+ c->isdbt_layer_enabled |= 1 << i;
rc = mb86a20s_get_modulation(state, i);
- if (rc >= 0)
- p->layer[i].modulation = rc;
+ if (rc < 0)
+ goto noperlayer_error;
+ dev_dbg(&state->i2c->dev, "%s: modulation %d.\n",
+ __func__, rc);
+ c->layer[i].modulation = rc;
rc = mb86a20s_get_fec(state, i);
- if (rc >= 0)
- p->layer[i].fec = rc;
+ if (rc < 0)
+ goto noperlayer_error;
+ dev_dbg(&state->i2c->dev, "%s: FEC %d.\n",
+ __func__, rc);
+ c->layer[i].fec = rc;
rc = mb86a20s_get_interleaving(state, i);
- if (rc >= 0)
- p->layer[i].interleaving = rc;
+ if (rc < 0)
+ goto noperlayer_error;
+ dev_dbg(&state->i2c->dev, "%s: interleaving %d.\n",
+ __func__, rc);
+ c->layer[i].interleaving = rc;
+ mb86a20s_layer_bitrate(fe, i, c->layer[i].modulation,
+ c->layer[i].fec,
+ c->layer[i].interleaving,
+ c->layer[i].segment_count);
}
- p->isdbt_sb_mode = 0;
rc = mb86a20s_writereg(state, 0x6d, 0x84);
- if ((rc >= 0) && ((rc & 0x60) == 0x20)) {
- p->isdbt_sb_mode = 1;
+ if (rc < 0)
+ return rc;
+ if ((rc & 0x60) == 0x20) {
+ c->isdbt_sb_mode = 1;
/* At least, one segment should exist */
- if (!p->isdbt_sb_segment_count)
- p->isdbt_sb_segment_count = 1;
- } else
- p->isdbt_sb_segment_count = 0;
+ if (!c->isdbt_sb_segment_count)
+ c->isdbt_sb_segment_count = 1;
+ }
/* Get transmission mode and guard interval */
- p->transmission_mode = TRANSMISSION_MODE_AUTO;
- p->guard_interval = GUARD_INTERVAL_AUTO;
rc = mb86a20s_readreg(state, 0x07);
- if (rc >= 0) {
- if ((rc & 0x60) == 0x20) {
- switch (rc & 0x0c >> 2) {
- case 0:
- p->transmission_mode = TRANSMISSION_MODE_2K;
- break;
- case 1:
- p->transmission_mode = TRANSMISSION_MODE_4K;
- break;
- case 2:
- p->transmission_mode = TRANSMISSION_MODE_8K;
- break;
- }
+ if (rc < 0)
+ return rc;
+ if ((rc & 0x60) == 0x20) {
+ switch (rc & 0x0c >> 2) {
+ case 0:
+ c->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ c->transmission_mode = TRANSMISSION_MODE_4K;
+ break;
+ case 2:
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ }
+ }
+ if (!(rc & 0x10)) {
+ switch (rc & 0x3) {
+ case 0:
+ c->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ case 1:
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 2:
+ c->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ }
+ }
+ return 0;
+
+noperlayer_error:
+
+ /* per-layer info is incomplete; discard all per-layer */
+ c->isdbt_layer_enabled = 0;
+
+ return rc;
+}
+
+static int mb86a20s_reset_counters(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc, val;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Reset the counters, if the channel changed */
+ if (state->last_frequency != c->frequency) {
+ memset(&c->strength, 0, sizeof(c->strength));
+ memset(&c->cnr, 0, sizeof(c->cnr));
+ memset(&c->pre_bit_error, 0, sizeof(c->pre_bit_error));
+ memset(&c->pre_bit_count, 0, sizeof(c->pre_bit_count));
+ memset(&c->post_bit_error, 0, sizeof(c->post_bit_error));
+ memset(&c->post_bit_count, 0, sizeof(c->post_bit_count));
+ memset(&c->block_error, 0, sizeof(c->block_error));
+ memset(&c->block_count, 0, sizeof(c->block_count));
+
+ state->last_frequency = c->frequency;
+ }
+
+ /* Clear status for most stats */
+
+ /* BER/PER counter reset */
+ rc = mb86a20s_writeregdata(state, mb86a20s_per_ber_reset);
+ if (rc < 0)
+ goto err;
+
+ /* CNR counter reset */
+ rc = mb86a20s_readreg(state, 0x45);
+ if (rc < 0)
+ goto err;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x45, val | 0x10);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_writereg(state, 0x45, val & 0x6f);
+ if (rc < 0)
+ goto err;
+
+ /* MER counter reset */
+ rc = mb86a20s_writereg(state, 0x50, 0x50);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ goto err;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x51, val | 0x01);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_writereg(state, 0x51, val & 0x06);
+ if (rc < 0)
+ goto err;
+
+ goto ok;
+err:
+ dev_err(&state->i2c->dev,
+ "%s: Can't reset FE statistics (error %d).\n",
+ __func__, rc);
+ok:
+ return rc;
+}
+
+static int mb86a20s_get_pre_ber(struct dvb_frontend *fe,
+ unsigned layer,
+ u32 *error, u32 *count)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc, val;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (layer >= 3)
+ return -EINVAL;
+
+ /* Check if the BER measures are already available */
+ rc = mb86a20s_readreg(state, 0x54);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available for that layer */
+ if (!(rc & (1 << layer))) {
+ dev_dbg(&state->i2c->dev,
+ "%s: preBER for layer %c is not available yet.\n",
+ __func__, 'A' + layer);
+ return -EBUSY;
+ }
+
+ /* Read Bit Error Count */
+ rc = mb86a20s_readreg(state, 0x55 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error = rc << 16;
+ rc = mb86a20s_readreg(state, 0x56 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc << 8;
+ rc = mb86a20s_readreg(state, 0x57 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: bit error before Viterbi for layer %c: %d.\n",
+ __func__, 'A' + layer, *error);
+
+ /* Read Bit Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xa7 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count = rc << 16;
+ rc = mb86a20s_writereg(state, 0x50, 0xa8 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count |= rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xa9 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: bit count before Viterbi for layer %c: %d.\n",
+ __func__, 'A' + layer, *count);
+
+
+ /*
+ * As we get TMCC data from the frontend, we can better estimate the
+ * BER bit counters, in order to do the BER measure during a longer
+ * time. Use those data, if available, to update the bit count
+ * measure.
+ */
+
+ if (state->estimated_rate[layer]
+ && state->estimated_rate[layer] != *count) {
+ dev_dbg(&state->i2c->dev,
+ "%s: updating layer %c preBER counter to %d.\n",
+ __func__, 'A' + layer, state->estimated_rate[layer]);
+
+ /* Turn off BER before Viterbi */
+ rc = mb86a20s_writereg(state, 0x52, 0x00);
+
+ /* Update counter for this layer */
+ rc = mb86a20s_writereg(state, 0x50, 0xa7 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51,
+ state->estimated_rate[layer] >> 16);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xa8 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51,
+ state->estimated_rate[layer] >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xa9 + layer * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51,
+ state->estimated_rate[layer]);
+ if (rc < 0)
+ return rc;
+
+ /* Turn on BER before Viterbi */
+ rc = mb86a20s_writereg(state, 0x52, 0x01);
+
+ /* Reset all preBER counters */
+ rc = mb86a20s_writereg(state, 0x53, 0x00);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x53, 0x07);
+ } else {
+ /* Reset counter to collect new data */
+ rc = mb86a20s_readreg(state, 0x53);
+ if (rc < 0)
+ return rc;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x53, val & ~(1 << layer));
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x53, val | (1 << layer));
+ }
+
+ return rc;
+}
+
+static int mb86a20s_get_post_ber(struct dvb_frontend *fe,
+ unsigned layer,
+ u32 *error, u32 *count)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ u32 counter, collect_rate;
+ int rc, val;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (layer >= 3)
+ return -EINVAL;
+
+ /* Check if the BER measures are already available */
+ rc = mb86a20s_readreg(state, 0x60);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available for that layer */
+ if (!(rc & (1 << layer))) {
+ dev_dbg(&state->i2c->dev,
+ "%s: post BER for layer %c is not available yet.\n",
+ __func__, 'A' + layer);
+ return -EBUSY;
+ }
+
+ /* Read Bit Error Count */
+ rc = mb86a20s_readreg(state, 0x64 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error = rc << 16;
+ rc = mb86a20s_readreg(state, 0x65 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc << 8;
+ rc = mb86a20s_readreg(state, 0x66 + layer * 3);
+ if (rc < 0)
+ return rc;
+ *error |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: post bit error for layer %c: %d.\n",
+ __func__, 'A' + layer, *error);
+
+ /* Read Bit Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xdc + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ counter = rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xdd + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ counter |= rc;
+ *count = counter * 204 * 8;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: post bit count for layer %c: %d.\n",
+ __func__, 'A' + layer, *count);
+
+ /*
+ * As we get TMCC data from the frontend, we can better estimate the
+ * BER bit counters, in order to do the BER measure during a longer
+ * time. Use those data, if available, to update the bit count
+ * measure.
+ */
+
+ if (!state->estimated_rate[layer])
+ goto reset_measurement;
+
+ collect_rate = state->estimated_rate[layer] / 204 / 8;
+ if (collect_rate < 32)
+ collect_rate = 32;
+ if (collect_rate > 65535)
+ collect_rate = 65535;
+ if (collect_rate != counter) {
+ dev_dbg(&state->i2c->dev,
+ "%s: updating postBER counter on layer %c to %d.\n",
+ __func__, 'A' + layer, collect_rate);
+
+ /* Turn off BER after Viterbi */
+ rc = mb86a20s_writereg(state, 0x5e, 0x00);
+
+ /* Update counter for this layer */
+ rc = mb86a20s_writereg(state, 0x50, 0xdc + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xdd + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate & 0xff);
+ if (rc < 0)
+ return rc;
+
+ /* Turn on BER after Viterbi */
+ rc = mb86a20s_writereg(state, 0x5e, 0x07);
+
+ /* Reset all preBER counters */
+ rc = mb86a20s_writereg(state, 0x5f, 0x00);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x5f, 0x07);
+
+ return rc;
+ }
+
+reset_measurement:
+ /* Reset counter to collect new data */
+ rc = mb86a20s_readreg(state, 0x5f);
+ if (rc < 0)
+ return rc;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x5f, val & ~(1 << layer));
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x5f, val | (1 << layer));
+
+ return rc;
+}
+
+static int mb86a20s_get_blk_error(struct dvb_frontend *fe,
+ unsigned layer,
+ u32 *error, u32 *count)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc, val;
+ u32 collect_rate;
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (layer >= 3)
+ return -EINVAL;
+
+ /* Check if the PER measures are already available */
+ rc = mb86a20s_writereg(state, 0x50, 0xb8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available for that layer */
+
+ if (!(rc & (1 << layer))) {
+ dev_dbg(&state->i2c->dev,
+ "%s: block counts for layer %c aren't available yet.\n",
+ __func__, 'A' + layer);
+ return -EBUSY;
+ }
+
+ /* Read Packet error Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xb9 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *error = rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xba + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *error |= rc;
+ dev_err(&state->i2c->dev, "%s: block error for layer %c: %d.\n",
+ __func__, 'A' + layer, *error);
+
+ /* Read Bit Count */
+ rc = mb86a20s_writereg(state, 0x50, 0xb2 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count = rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0xb3 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ *count |= rc;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: block count for layer %c: %d.\n",
+ __func__, 'A' + layer, *count);
+
+ /*
+ * As we get TMCC data from the frontend, we can better estimate the
+ * BER bit counters, in order to do the BER measure during a longer
+ * time. Use those data, if available, to update the bit count
+ * measure.
+ */
+
+ if (!state->estimated_rate[layer])
+ goto reset_measurement;
+
+ collect_rate = state->estimated_rate[layer] / 204 / 8;
+ if (collect_rate < 32)
+ collect_rate = 32;
+ if (collect_rate > 65535)
+ collect_rate = 65535;
+
+ if (collect_rate != *count) {
+ dev_dbg(&state->i2c->dev,
+ "%s: updating PER counter on layer %c to %d.\n",
+ __func__, 'A' + layer, collect_rate);
+
+ /* Stop PER measurement */
+ rc = mb86a20s_writereg(state, 0x50, 0xb0);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x00);
+ if (rc < 0)
+ return rc;
+
+ /* Update this layer's counter */
+ rc = mb86a20s_writereg(state, 0x50, 0xb2 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate >> 8);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x50, 0xb3 + layer * 2);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, collect_rate & 0xff);
+ if (rc < 0)
+ return rc;
+
+ /* start PER measurement */
+ rc = mb86a20s_writereg(state, 0x50, 0xb0);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x07);
+ if (rc < 0)
+ return rc;
+
+ /* Reset all counters to collect new data */
+ rc = mb86a20s_writereg(state, 0x50, 0xb1);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x07);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, 0x00);
+
+ return rc;
+ }
+
+reset_measurement:
+ /* Reset counter to collect new data */
+ rc = mb86a20s_writereg(state, 0x50, 0xb1);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ val = rc;
+ rc = mb86a20s_writereg(state, 0x51, val | (1 << layer));
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, val & ~(1 << layer));
+
+ return rc;
+}
+
+struct linear_segments {
+ unsigned x, y;
+};
+
+/*
+ * All tables below return a dB/1000 measurement
+ */
+
+static struct linear_segments cnr_to_db_table[] = {
+ { 19648, 0},
+ { 18187, 1000},
+ { 16534, 2000},
+ { 14823, 3000},
+ { 13161, 4000},
+ { 11622, 5000},
+ { 10279, 6000},
+ { 9089, 7000},
+ { 8042, 8000},
+ { 7137, 9000},
+ { 6342, 10000},
+ { 5641, 11000},
+ { 5030, 12000},
+ { 4474, 13000},
+ { 3988, 14000},
+ { 3556, 15000},
+ { 3180, 16000},
+ { 2841, 17000},
+ { 2541, 18000},
+ { 2276, 19000},
+ { 2038, 20000},
+ { 1800, 21000},
+ { 1625, 22000},
+ { 1462, 23000},
+ { 1324, 24000},
+ { 1175, 25000},
+ { 1063, 26000},
+ { 980, 27000},
+ { 907, 28000},
+ { 840, 29000},
+ { 788, 30000},
+};
+
+static struct linear_segments cnr_64qam_table[] = {
+ { 3922688, 0},
+ { 3920384, 1000},
+ { 3902720, 2000},
+ { 3894784, 3000},
+ { 3882496, 4000},
+ { 3872768, 5000},
+ { 3858944, 6000},
+ { 3851520, 7000},
+ { 3838976, 8000},
+ { 3829248, 9000},
+ { 3818240, 10000},
+ { 3806976, 11000},
+ { 3791872, 12000},
+ { 3767040, 13000},
+ { 3720960, 14000},
+ { 3637504, 15000},
+ { 3498496, 16000},
+ { 3296000, 17000},
+ { 3031040, 18000},
+ { 2715392, 19000},
+ { 2362624, 20000},
+ { 1963264, 21000},
+ { 1649664, 22000},
+ { 1366784, 23000},
+ { 1120768, 24000},
+ { 890880, 25000},
+ { 723456, 26000},
+ { 612096, 27000},
+ { 518912, 28000},
+ { 448256, 29000},
+ { 388864, 30000},
+};
+
+static struct linear_segments cnr_16qam_table[] = {
+ { 5314816, 0},
+ { 5219072, 1000},
+ { 5118720, 2000},
+ { 4998912, 3000},
+ { 4875520, 4000},
+ { 4736000, 5000},
+ { 4604160, 6000},
+ { 4458752, 7000},
+ { 4300288, 8000},
+ { 4092928, 9000},
+ { 3836160, 10000},
+ { 3521024, 11000},
+ { 3155968, 12000},
+ { 2756864, 13000},
+ { 2347008, 14000},
+ { 1955072, 15000},
+ { 1593600, 16000},
+ { 1297920, 17000},
+ { 1043968, 18000},
+ { 839680, 19000},
+ { 672256, 20000},
+ { 523008, 21000},
+ { 424704, 22000},
+ { 345088, 23000},
+ { 280064, 24000},
+ { 221440, 25000},
+ { 179712, 26000},
+ { 151040, 27000},
+ { 128512, 28000},
+ { 110080, 29000},
+ { 95744, 30000},
+};
+
+struct linear_segments cnr_qpsk_table[] = {
+ { 2834176, 0},
+ { 2683648, 1000},
+ { 2536960, 2000},
+ { 2391808, 3000},
+ { 2133248, 4000},
+ { 1906176, 5000},
+ { 1666560, 6000},
+ { 1422080, 7000},
+ { 1189632, 8000},
+ { 976384, 9000},
+ { 790272, 10000},
+ { 633344, 11000},
+ { 505600, 12000},
+ { 402944, 13000},
+ { 320768, 14000},
+ { 255488, 15000},
+ { 204032, 16000},
+ { 163072, 17000},
+ { 130304, 18000},
+ { 105216, 19000},
+ { 83456, 20000},
+ { 65024, 21000},
+ { 52480, 22000},
+ { 42752, 23000},
+ { 34560, 24000},
+ { 27136, 25000},
+ { 22016, 26000},
+ { 18432, 27000},
+ { 15616, 28000},
+ { 13312, 29000},
+ { 11520, 30000},
+};
+
+static u32 interpolate_value(u32 value, struct linear_segments *segments,
+ unsigned len)
+{
+ u64 tmp64;
+ u32 dx, dy;
+ int i, ret;
+
+ if (value >= segments[0].x)
+ return segments[0].y;
+ if (value < segments[len-1].x)
+ return segments[len-1].y;
+
+ for (i = 1; i < len - 1; i++) {
+ /* If value is identical, no need to interpolate */
+ if (value == segments[i].x)
+ return segments[i].y;
+ if (value > segments[i].x)
+ break;
+ }
+
+ /* Linear interpolation between the two (x,y) points */
+ dy = segments[i].y - segments[i - 1].y;
+ dx = segments[i - 1].x - segments[i].x;
+ tmp64 = value - segments[i].x;
+ tmp64 *= dy;
+ do_div(tmp64, dx);
+ ret = segments[i].y - tmp64;
+
+ return ret;
+}
+
+static int mb86a20s_get_main_CNR(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 cnr_linear, cnr;
+ int rc, val;
+
+ /* Check if CNR is available */
+ rc = mb86a20s_readreg(state, 0x45);
+ if (rc < 0)
+ return rc;
+
+ if (!(rc & 0x40)) {
+ dev_info(&state->i2c->dev, "%s: CNR is not available yet.\n",
+ __func__);
+ return -EBUSY;
+ }
+ val = rc;
+
+ rc = mb86a20s_readreg(state, 0x46);
+ if (rc < 0)
+ return rc;
+ cnr_linear = rc << 8;
+
+ rc = mb86a20s_readreg(state, 0x46);
+ if (rc < 0)
+ return rc;
+ cnr_linear |= rc;
+
+ cnr = interpolate_value(cnr_linear,
+ cnr_to_db_table, ARRAY_SIZE(cnr_to_db_table));
+
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = cnr;
+
+ dev_dbg(&state->i2c->dev, "%s: CNR is %d.%03d dB (%d)\n",
+ __func__, cnr / 1000, cnr % 1000, cnr_linear);
+
+ /* CNR counter reset */
+ rc = mb86a20s_writereg(state, 0x45, val | 0x10);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x45, val & 0x6f);
+
+ return rc;
+}
+
+static int mb86a20s_get_blk_error_layer_CNR(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 mer, cnr;
+ int rc, val, i;
+ struct linear_segments *segs;
+ unsigned segs_len;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Check if the measures are already available */
+ rc = mb86a20s_writereg(state, 0x50, 0x5b);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+
+ /* Check if data is available */
+ if (!(rc & 0x01)) {
+ dev_info(&state->i2c->dev,
+ "%s: MER measures aren't available yet.\n", __func__);
+ return -EBUSY;
+ }
+
+ /* Read all layers */
+ for (i = 0; i < 3; i++) {
+ if (!(c->isdbt_layer_enabled & (1 << i))) {
+ c->cnr.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ continue;
+ }
+
+ rc = mb86a20s_writereg(state, 0x50, 0x52 + i * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ mer = rc << 16;
+ rc = mb86a20s_writereg(state, 0x50, 0x53 + i * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ mer |= rc << 8;
+ rc = mb86a20s_writereg(state, 0x50, 0x54 + i * 3);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ mer |= rc;
+
+ switch (c->layer[i].modulation) {
+ case DQPSK:
+ case QPSK:
+ segs = cnr_qpsk_table;
+ segs_len = ARRAY_SIZE(cnr_qpsk_table);
+ break;
+ case QAM_16:
+ segs = cnr_16qam_table;
+ segs_len = ARRAY_SIZE(cnr_16qam_table);
+ break;
+ default:
+ case QAM_64:
+ segs = cnr_64qam_table;
+ segs_len = ARRAY_SIZE(cnr_64qam_table);
+ break;
}
- if (!(rc & 0x10)) {
- switch (rc & 0x3) {
- case 0:
- p->guard_interval = GUARD_INTERVAL_1_4;
- break;
- case 1:
- p->guard_interval = GUARD_INTERVAL_1_8;
- break;
- case 2:
- p->guard_interval = GUARD_INTERVAL_1_16;
- break;
+ cnr = interpolate_value(mer, segs, segs_len);
+
+ c->cnr.stat[1 + i].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[1 + i].svalue = cnr;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: CNR for layer %c is %d.%03d dB (MER = %d).\n",
+ __func__, 'A' + i, cnr / 1000, cnr % 1000, mer);
+
+ }
+
+ /* Start a new MER measurement */
+ /* MER counter reset */
+ rc = mb86a20s_writereg(state, 0x50, 0x50);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x51);
+ if (rc < 0)
+ return rc;
+ val = rc;
+
+ rc = mb86a20s_writereg(state, 0x51, val | 0x01);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_writereg(state, 0x51, val & 0x06);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static void mb86a20s_stats_not_ready(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int i;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /* Fill the length of each status counter */
+
+ /* Only global stats */
+ c->strength.len = 1;
+
+ /* Per-layer stats - 3 layers + global */
+ c->cnr.len = 4;
+ c->pre_bit_error.len = 4;
+ c->pre_bit_count.len = 4;
+ c->post_bit_error.len = 4;
+ c->post_bit_count.len = 4;
+ c->block_error.len = 4;
+ c->block_count.len = 4;
+
+ /* Signal is always available */
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = 0;
+
+ /* Put all of them at FE_SCALE_NOT_AVAILABLE */
+ for (i = 0; i < 4; i++) {
+ c->cnr.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+}
+
+static int mb86a20s_get_stats(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc = 0, i;
+ u32 bit_error = 0, bit_count = 0;
+ u32 t_pre_bit_error = 0, t_pre_bit_count = 0;
+ u32 t_post_bit_error = 0, t_post_bit_count = 0;
+ u32 block_error = 0, block_count = 0;
+ u32 t_block_error = 0, t_block_count = 0;
+ int active_layers = 0, pre_ber_layers = 0, post_ber_layers = 0;
+ int per_layers = 0;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ mb86a20s_get_main_CNR(fe);
+
+ /* Get per-layer stats */
+ mb86a20s_get_blk_error_layer_CNR(fe);
+
+ for (i = 0; i < 3; i++) {
+ if (c->isdbt_layer_enabled & (1 << i)) {
+ /* Layer is active and has rc segments */
+ active_layers++;
+
+ /* Handle BER before vterbi */
+ rc = mb86a20s_get_pre_ber(fe, i,
+ &bit_error, &bit_count);
+ if (rc >= 0) {
+ c->pre_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[1 + i].uvalue += bit_error;
+ c->pre_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[1 + i].uvalue += bit_count;
+ } else if (rc != -EBUSY) {
+ /*
+ * If an I/O error happened,
+ * measures are now unavailable
+ */
+ c->pre_bit_error.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_count.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ dev_err(&state->i2c->dev,
+ "%s: Can't get BER for layer %c (error %d).\n",
+ __func__, 'A' + i, rc);
}
+ if (c->block_error.stat[1 + i].scale != FE_SCALE_NOT_AVAILABLE)
+ pre_ber_layers++;
+
+ /* Handle BER post vterbi */
+ rc = mb86a20s_get_post_ber(fe, i,
+ &bit_error, &bit_count);
+ if (rc >= 0) {
+ c->post_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[1 + i].uvalue += bit_error;
+ c->post_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[1 + i].uvalue += bit_count;
+ } else if (rc != -EBUSY) {
+ /*
+ * If an I/O error happened,
+ * measures are now unavailable
+ */
+ c->post_bit_error.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ dev_err(&state->i2c->dev,
+ "%s: Can't get BER for layer %c (error %d).\n",
+ __func__, 'A' + i, rc);
+ }
+ if (c->block_error.stat[1 + i].scale != FE_SCALE_NOT_AVAILABLE)
+ post_ber_layers++;
+
+ /* Handle Block errors for PER/UCB reports */
+ rc = mb86a20s_get_blk_error(fe, i,
+ &block_error,
+ &block_count);
+ if (rc >= 0) {
+ c->block_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[1 + i].uvalue += block_error;
+ c->block_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[1 + i].uvalue += block_count;
+ } else if (rc != -EBUSY) {
+ /*
+ * If an I/O error happened,
+ * measures are now unavailable
+ */
+ c->block_error.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[1 + i].scale = FE_SCALE_NOT_AVAILABLE;
+ dev_err(&state->i2c->dev,
+ "%s: Can't get PER for layer %c (error %d).\n",
+ __func__, 'A' + i, rc);
+
+ }
+ if (c->block_error.stat[1 + i].scale != FE_SCALE_NOT_AVAILABLE)
+ per_layers++;
+
+ /* Update total preBER */
+ t_pre_bit_error += c->pre_bit_error.stat[1 + i].uvalue;
+ t_pre_bit_count += c->pre_bit_count.stat[1 + i].uvalue;
+
+ /* Update total postBER */
+ t_post_bit_error += c->post_bit_error.stat[1 + i].uvalue;
+ t_post_bit_count += c->post_bit_count.stat[1 + i].uvalue;
+
+ /* Update total PER */
+ t_block_error += c->block_error.stat[1 + i].uvalue;
+ t_block_count += c->block_count.stat[1 + i].uvalue;
}
}
+ /*
+ * Start showing global count if at least one error count is
+ * available.
+ */
+ if (pre_ber_layers) {
+ /*
+ * At least one per-layer BER measure was read. We can now
+ * calculate the total BER
+ *
+ * Total Bit Error/Count is calculated as the sum of the
+ * bit errors on all active layers.
+ */
+ c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[0].uvalue = t_pre_bit_error;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[0].uvalue = t_pre_bit_count;
+ } else {
+ c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ }
+
+ /*
+ * Start showing global count if at least one error count is
+ * available.
+ */
+ if (post_ber_layers) {
+ /*
+ * At least one per-layer BER measure was read. We can now
+ * calculate the total BER
+ *
+ * Total Bit Error/Count is calculated as the sum of the
+ * bit errors on all active layers.
+ */
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = t_post_bit_error;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = t_post_bit_count;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ }
+
+ if (per_layers) {
+ /*
+ * At least one per-layer UCB measure was read. We can now
+ * calculate the total UCB
+ *
+ * Total block Error/Count is calculated as the sum of the
+ * block errors on all active layers.
+ */
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue = t_block_error;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue = t_block_count;
+ } else {
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ }
+
+ return rc;
+}
+
+/*
+ * The functions below are called via DVB callbacks, so they need to
+ * properly use the I2C gate control
+ */
+
+static int mb86a20s_initfe(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc;
+ u8 regD5 = 1;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ /* Initialize the frontend */
+ rc = mb86a20s_writeregdata(state, mb86a20s_init);
+ if (rc < 0)
+ goto err;
+
+ if (!state->config->is_serial) {
+ regD5 &= ~1;
+
+ rc = mb86a20s_writereg(state, 0x50, 0xd5);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_writereg(state, 0x51, regD5);
+ if (rc < 0)
+ goto err;
+ }
+
+err:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
+ if (rc < 0) {
+ state->need_init = true;
+ dev_info(&state->i2c->dev,
+ "mb86a20s: Init failed. Will try again later\n");
+ } else {
+ state->need_init = false;
+ dev_dbg(&state->i2c->dev, "Initialization succeeded.\n");
+ }
+ return rc;
+}
+
+static int mb86a20s_set_frontend(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ int rc;
+#if 0
+ /*
+ * FIXME: Properly implement the set frontend properties
+ */
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+#endif
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ /*
+ * Gate should already be opened, but it doesn't hurt to
+ * double-check
+ */
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ fe->ops.tuner_ops.set_params(fe);
+
+ /*
+ * Make it more reliable: if, for some reason, the initial
+ * device initialization doesn't happen, initialize it when
+ * a SBTVD parameters are adjusted.
+ *
+ * Unfortunately, due to a hard to track bug at tda829x/tda18271,
+ * the agc callback logic is not called during DVB attach time,
+ * causing mb86a20s to not be initialized with Kworld SBTVD.
+ * So, this hack is needed, in order to make Kworld SBTVD to work.
+ */
+ if (state->need_init)
+ mb86a20s_initfe(fe);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
+ mb86a20s_reset_counters(fe);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ return rc;
+}
+
+static int mb86a20s_read_status_and_stats(struct dvb_frontend *fe,
+ fe_status_t *status)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc;
+
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ /* Get lock */
+ rc = mb86a20s_read_status(fe, status);
+ if (!(*status & FE_HAS_LOCK)) {
+ mb86a20s_stats_not_ready(fe);
+ mb86a20s_reset_frontend_cache(fe);
+ }
+ if (rc < 0) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't read frontend lock status\n", __func__);
+ goto error;
+ }
+
+ /* Get signal strength */
+ rc = mb86a20s_read_signal_strength(fe);
+ if (rc < 0) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't reset VBER registers.\n", __func__);
+ mb86a20s_stats_not_ready(fe);
+ mb86a20s_reset_frontend_cache(fe);
+
+ rc = 0; /* Status is OK */
+ goto error;
+ }
+ /* Fill signal strength */
+ c->strength.stat[0].uvalue = rc;
+
+ if (*status & FE_HAS_LOCK) {
+ /* Get TMCC info*/
+ rc = mb86a20s_get_frontend(fe);
+ if (rc < 0) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't get FE TMCC data.\n", __func__);
+ rc = 0; /* Status is OK */
+ goto error;
+ }
+
+ /* Get statistics */
+ rc = mb86a20s_get_stats(fe);
+ if (rc < 0 && rc != -EBUSY) {
+ dev_err(&state->i2c->dev,
+ "%s: Can't get FE statistics.\n", __func__);
+ rc = 0;
+ goto error;
+ }
+ rc = 0; /* Don't return EBUSY to userspace */
+ }
+ goto ok;
+
+error:
+ mb86a20s_stats_not_ready(fe);
+
+ok:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ return rc;
+}
+
+static int mb86a20s_read_signal_strength_from_cache(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+
+ *strength = c->strength.stat[0].uvalue;
+
return 0;
}
+static int mb86a20s_get_frontend_dummy(struct dvb_frontend *fe)
+{
+ /*
+ * get_frontend is now handled together with other stats
+ * retrival, when read_status() is called, as some statistics
+ * will depend on the layers detection.
+ */
+ return 0;
+};
+
static int mb86a20s_tune(struct dvb_frontend *fe,
bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
{
+ struct mb86a20s_state *state = fe->demodulator_priv;
int rc = 0;
- dprintk("\n");
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
if (re_tune)
rc = mb86a20s_set_frontend(fe);
if (!(mode_flags & FE_TUNE_MODE_ONESHOT))
- mb86a20s_read_status(fe, status);
+ mb86a20s_read_status_and_stats(fe, status);
return rc;
}
@@ -619,7 +1936,7 @@ static void mb86a20s_release(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
- dprintk("\n");
+ dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
kfree(state);
}
@@ -629,15 +1946,16 @@ static struct dvb_frontend_ops mb86a20s_ops;
struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
struct i2c_adapter *i2c)
{
+ struct mb86a20s_state *state;
u8 rev;
- /* allocate memory for the internal state */
- struct mb86a20s_state *state =
- kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL);
+ dev_dbg(&i2c->dev, "%s called.\n", __func__);
- dprintk("\n");
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL);
if (state == NULL) {
- rc("Unable to kzalloc\n");
+ dev_err(&i2c->dev,
+ "%s: unable to allocate memory for state\n", __func__);
goto error;
}
@@ -654,9 +1972,11 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
rev = mb86a20s_readreg(state, 0);
if (rev == 0x13) {
- printk(KERN_INFO "Detected a Fujitsu mb86a20s frontend\n");
+ dev_info(&i2c->dev,
+ "Detected a Fujitsu mb86a20s frontend\n");
} else {
- printk(KERN_ERR "Frontend revision %d is unknown - aborting.\n",
+ dev_dbg(&i2c->dev,
+ "Frontend revision %d is unknown - aborting.\n",
rev);
goto error;
}
@@ -690,9 +2010,9 @@ static struct dvb_frontend_ops mb86a20s_ops = {
.init = mb86a20s_initfe,
.set_frontend = mb86a20s_set_frontend,
- .get_frontend = mb86a20s_get_frontend,
- .read_status = mb86a20s_read_status,
- .read_signal_strength = mb86a20s_read_signal_strength,
+ .get_frontend = mb86a20s_get_frontend_dummy,
+ .read_status = mb86a20s_read_status_and_stats,
+ .read_signal_strength = mb86a20s_read_signal_strength_from_cache,
.tune = mb86a20s_tune,
};
diff --git a/drivers/media/dvb-frontends/mt312.h b/drivers/media/dvb-frontends/mt312.h
index 29e3bb5496b8..5706621ad79d 100644
--- a/drivers/media/dvb-frontends/mt312.h
+++ b/drivers/media/dvb-frontends/mt312.h
@@ -36,7 +36,7 @@ struct mt312_config {
unsigned int voltage_inverted:1;
};
-#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_MT312)
struct dvb_frontend *mt312_attach(const struct mt312_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/mt352.h b/drivers/media/dvb-frontends/mt352.h
index ca2562d6f289..451d904e1500 100644
--- a/drivers/media/dvb-frontends/mt352.h
+++ b/drivers/media/dvb-frontends/mt352.h
@@ -51,7 +51,7 @@ struct mt352_config
int (*demod_init)(struct dvb_frontend* fe);
};
-#if defined(CONFIG_DVB_MT352) || (defined(CONFIG_DVB_MT352_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_MT352)
extern struct dvb_frontend* mt352_attach(const struct mt352_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/nxt200x.h b/drivers/media/dvb-frontends/nxt200x.h
index f3c84583770f..b518d545609e 100644
--- a/drivers/media/dvb-frontends/nxt200x.h
+++ b/drivers/media/dvb-frontends/nxt200x.h
@@ -42,7 +42,7 @@ struct nxt200x_config
int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
};
-#if defined(CONFIG_DVB_NXT200X) || (defined(CONFIG_DVB_NXT200X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_NXT200X)
extern struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/nxt6000.h b/drivers/media/dvb-frontends/nxt6000.h
index 878eb38a075e..b5867c2ae681 100644
--- a/drivers/media/dvb-frontends/nxt6000.h
+++ b/drivers/media/dvb-frontends/nxt6000.h
@@ -33,7 +33,7 @@ struct nxt6000_config
u8 clock_inversion:1;
};
-#if defined(CONFIG_DVB_NXT6000) || (defined(CONFIG_DVB_NXT6000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_NXT6000)
extern struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/or51132.h b/drivers/media/dvb-frontends/or51132.h
index 1b8e04d973c8..938958386cb1 100644
--- a/drivers/media/dvb-frontends/or51132.h
+++ b/drivers/media/dvb-frontends/or51132.h
@@ -34,7 +34,7 @@ struct or51132_config
int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
};
-#if defined(CONFIG_DVB_OR51132) || (defined(CONFIG_DVB_OR51132_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_OR51132)
extern struct dvb_frontend* or51132_attach(const struct or51132_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index c625b57b4333..10cfc0579168 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -22,6 +22,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
@@ -44,9 +46,7 @@
static int debug;
#define dprintk(args...) \
- do { \
- if (debug) printk(KERN_DEBUG "or51211: " args); \
- } while (0)
+ do { if (debug) pr_debug(args); } while (0)
static u8 run_buf[] = {0x7f,0x01};
static u8 cmd_buf[] = {0x04,0x01,0x50,0x80,0x06}; // ATSC
@@ -80,8 +80,7 @@ static int i2c_writebytes (struct or51211_state* state, u8 reg, const u8 *buf,
msg.buf = (u8 *)buf;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
- printk(KERN_WARNING "or51211: i2c_writebytes error "
- "(addr %02x, err == %i)\n", reg, err);
+ pr_warn("error (addr %02x, err == %i)\n", reg, err);
return -EREMOTEIO;
}
@@ -98,8 +97,7 @@ static int i2c_readbytes(struct or51211_state *state, u8 reg, u8 *buf, int len)
msg.buf = buf;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
- printk(KERN_WARNING "or51211: i2c_readbytes error "
- "(addr %02x, err == %i)\n", reg, err);
+ pr_warn("error (addr %02x, err == %i)\n", reg, err);
return -EREMOTEIO;
}
@@ -118,11 +116,11 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
/* Get eprom data */
tudata[0] = 17;
if (i2c_writebytes(state,0x50,tudata,1)) {
- printk(KERN_WARNING "or51211:load_firmware error eprom addr\n");
+ pr_warn("error eprom addr\n");
return -1;
}
if (i2c_readbytes(state,0x50,&tudata[145],192)) {
- printk(KERN_WARNING "or51211: load_firmware error eprom\n");
+ pr_warn("error eprom\n");
return -1;
}
@@ -136,32 +134,32 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
state->config->reset(fe);
if (i2c_writebytes(state,state->config->demod_address,tudata,585)) {
- printk(KERN_WARNING "or51211: load_firmware error 1\n");
+ pr_warn("error 1\n");
return -1;
}
msleep(1);
if (i2c_writebytes(state,state->config->demod_address,
&fw->data[393],8125)) {
- printk(KERN_WARNING "or51211: load_firmware error 2\n");
+ pr_warn("error 2\n");
return -1;
}
msleep(1);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: load_firmware error 3\n");
+ pr_warn("error 3\n");
return -1;
}
/* Wait at least 5 msec */
msleep(10);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: load_firmware error 4\n");
+ pr_warn("error 4\n");
return -1;
}
msleep(10);
- printk("or51211: Done.\n");
+ pr_info("Done.\n");
return 0;
};
@@ -173,14 +171,14 @@ static int or51211_setmode(struct dvb_frontend* fe, int mode)
state->config->setmode(fe, mode);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: setmode error 1\n");
+ pr_warn("error 1\n");
return -1;
}
/* Wait at least 5 msec */
msleep(10);
if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) {
- printk(KERN_WARNING "or51211: setmode error 2\n");
+ pr_warn("error 2\n");
return -1;
}
@@ -196,7 +194,7 @@ static int or51211_setmode(struct dvb_frontend* fe, int mode)
* normal +/-150kHz Carrier acquisition range
*/
if (i2c_writebytes(state,state->config->demod_address,cmd_buf,3)) {
- printk(KERN_WARNING "or51211: setmode error 3\n");
+ pr_warn("error 3\n");
return -1;
}
@@ -206,14 +204,14 @@ static int or51211_setmode(struct dvb_frontend* fe, int mode)
rec_buf[3] = 0x00;
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,rec_buf,3)) {
- printk(KERN_WARNING "or51211: setmode error 5\n");
+ pr_warn("error 5\n");
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,&rec_buf[10],2)) {
- printk(KERN_WARNING "or51211: setmode error 6");
+ pr_warn("error 6\n");
return -1;
}
- dprintk("setmode rec status %02x %02x\n",rec_buf[10],rec_buf[11]);
+ dprintk("rec status %02x %02x\n", rec_buf[10], rec_buf[11]);
return 0;
}
@@ -248,15 +246,15 @@ static int or51211_read_status(struct dvb_frontend* fe, fe_status_t* status)
/* Receiver Status */
if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) {
- printk(KERN_WARNING "or51132: read_status write error\n");
+ pr_warn("write error\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
- printk(KERN_WARNING "or51132: read_status read error\n");
+ pr_warn("read error\n");
return -1;
}
- dprintk("read_status %x %x\n",rec_buf[0],rec_buf[1]);
+ dprintk("%x %x\n", rec_buf[0], rec_buf[1]);
if (rec_buf[0] & 0x01) { /* Receiver Lock */
*status |= FE_HAS_SIGNAL;
@@ -306,20 +304,18 @@ static int or51211_read_snr(struct dvb_frontend* fe, u16* snr)
snd_buf[2] = 0x04;
if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) {
- printk(KERN_WARNING "%s: error writing snr reg\n",
- __func__);
+ pr_warn("error writing snr reg\n");
return -1;
}
if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
- printk(KERN_WARNING "%s: read_status read error\n",
- __func__);
+ pr_warn("read_status read error\n");
return -1;
}
state->snr = calculate_snr(rec_buf[0], 89599047);
*snr = (state->snr) >> 16;
- dprintk("%s: noise = 0x%02x, snr = %d.%02d dB\n", __func__, rec_buf[0],
+ dprintk("noise = 0x%02x, snr = %d.%02d dB\n", rec_buf[0],
state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16);
return 0;
@@ -375,25 +371,24 @@ static int or51211_init(struct dvb_frontend* fe)
if (!state->initialized) {
/* Request the firmware, this will block until it uploads */
- printk(KERN_INFO "or51211: Waiting for firmware upload "
- "(%s)...\n", OR51211_DEFAULT_FIRMWARE);
+ pr_info("Waiting for firmware upload (%s)...\n",
+ OR51211_DEFAULT_FIRMWARE);
ret = config->request_firmware(fe, &fw,
OR51211_DEFAULT_FIRMWARE);
- printk(KERN_INFO "or51211:Got Hotplug firmware\n");
+ pr_info("Got Hotplug firmware\n");
if (ret) {
- printk(KERN_WARNING "or51211: No firmware uploaded "
- "(timeout or file not found?)\n");
+ pr_warn("No firmware uploaded "
+ "(timeout or file not found?)\n");
return ret;
}
ret = or51211_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
- printk(KERN_WARNING "or51211: Writing firmware to "
- "device failed!\n");
+ pr_warn("Writing firmware to device failed!\n");
return ret;
}
- printk(KERN_INFO "or51211: Firmware upload complete.\n");
+ pr_info("Firmware upload complete.\n");
/* Set operation mode in Receiver 1 register;
* type 1:
@@ -406,7 +401,7 @@ static int or51211_init(struct dvb_frontend* fe)
*/
if (i2c_writebytes(state,state->config->demod_address,
cmd_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error 5\n");
+ pr_warn("Load DVR Error 5\n");
return -1;
}
@@ -419,13 +414,13 @@ static int or51211_init(struct dvb_frontend* fe)
msleep(30);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error A\n");
+ pr_warn("Load DVR Error A\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[10],2)) {
- printk(KERN_WARNING "or51211: Load DVR Error B\n");
+ pr_warn("Load DVR Error B\n");
return -1;
}
@@ -436,13 +431,13 @@ static int or51211_init(struct dvb_frontend* fe)
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error C\n");
+ pr_warn("Load DVR Error C\n");
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[12],2)) {
- printk(KERN_WARNING "or51211: Load DVR Error D\n");
+ pr_warn("Load DVR Error D\n");
return -1;
}
@@ -454,16 +449,14 @@ static int or51211_init(struct dvb_frontend* fe)
get_ver_buf[4] = i+1;
if (i2c_writebytes(state,state->config->demod_address,
get_ver_buf,5)) {
- printk(KERN_WARNING "or51211:Load DVR Error 6"
- " - %d\n",i);
+ pr_warn("Load DVR Error 6 - %d\n", i);
return -1;
}
msleep(3);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[i*2],2)) {
- printk(KERN_WARNING "or51211:Load DVR Error 7"
- " - %d\n",i);
+ pr_warn("Load DVR Error 7 - %d\n", i);
return -1;
}
/* If we didn't receive the right index, try again */
@@ -471,15 +464,11 @@ static int or51211_init(struct dvb_frontend* fe)
i--;
}
}
- dprintk("read_fwbits %x %x %x %x %x %x %x %x %x %x\n",
- rec_buf[0], rec_buf[1], rec_buf[2], rec_buf[3],
- rec_buf[4], rec_buf[5], rec_buf[6], rec_buf[7],
- rec_buf[8], rec_buf[9]);
+ dprintk("read_fwbits %10ph\n", rec_buf);
- printk(KERN_INFO "or51211: ver TU%02x%02x%02x VSB mode %02x"
- " Status %02x\n",
- rec_buf[2], rec_buf[4],rec_buf[6],
- rec_buf[12],rec_buf[10]);
+ pr_info("ver TU%02x%02x%02x VSB mode %02x Status %02x\n",
+ rec_buf[2], rec_buf[4], rec_buf[6], rec_buf[12],
+ rec_buf[10]);
rec_buf[0] = 0x04;
rec_buf[1] = 0x00;
@@ -488,13 +477,13 @@ static int or51211_init(struct dvb_frontend* fe)
msleep(20);
if (i2c_writebytes(state,state->config->demod_address,
rec_buf,3)) {
- printk(KERN_WARNING "or51211: Load DVR Error 8\n");
+ pr_warn("Load DVR Error 8\n");
return -1;
}
msleep(20);
if (i2c_readbytes(state,state->config->demod_address,
&rec_buf[8],2)) {
- printk(KERN_WARNING "or51211: Load DVR Error 9\n");
+ pr_warn("Load DVR Error 9\n");
return -1;
}
state->initialized = 1;
diff --git a/drivers/media/dvb-frontends/or51211.h b/drivers/media/dvb-frontends/or51211.h
index 3ce0508b898e..9a8ae936b62d 100644
--- a/drivers/media/dvb-frontends/or51211.h
+++ b/drivers/media/dvb-frontends/or51211.h
@@ -37,7 +37,7 @@ struct or51211_config
void (*sleep)(struct dvb_frontend * fe);
};
-#if defined(CONFIG_DVB_OR51211) || (defined(CONFIG_DVB_OR51211_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_OR51211)
extern struct dvb_frontend* or51211_attach(const struct or51211_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/s5h1420.h b/drivers/media/dvb-frontends/s5h1420.h
index ff308136d865..210049b5cf30 100644
--- a/drivers/media/dvb-frontends/s5h1420.h
+++ b/drivers/media/dvb-frontends/s5h1420.h
@@ -40,7 +40,7 @@ struct s5h1420_config
u8 serial_mpeg:1;
};
-#if defined(CONFIG_DVB_S5H1420) || (defined(CONFIG_DVB_S5H1420_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_S5H1420)
extern struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
struct i2c_adapter *i2c);
extern struct i2c_adapter *s5h1420_get_tuner_i2c_adapter(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/sp8870.h b/drivers/media/dvb-frontends/sp8870.h
index a764a793c7d8..065ec67d4e30 100644
--- a/drivers/media/dvb-frontends/sp8870.h
+++ b/drivers/media/dvb-frontends/sp8870.h
@@ -35,7 +35,7 @@ struct sp8870_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if defined(CONFIG_DVB_SP8870) || (defined(CONFIG_DVB_SP8870_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_SP8870)
extern struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/sp887x.h b/drivers/media/dvb-frontends/sp887x.h
index 04eff6e0eef3..2cdc4e8bc9cd 100644
--- a/drivers/media/dvb-frontends/sp887x.h
+++ b/drivers/media/dvb-frontends/sp887x.h
@@ -17,7 +17,7 @@ struct sp887x_config
int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
};
-#if defined(CONFIG_DVB_SP887X) || (defined(CONFIG_DVB_SP887X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_SP887X)
extern struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/stb0899_drv.h b/drivers/media/dvb-frontends/stb0899_drv.h
index 98b200ce0c34..8d26ff6eb1db 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.h
+++ b/drivers/media/dvb-frontends/stb0899_drv.h
@@ -142,7 +142,7 @@ struct stb0899_config {
int (*tuner_set_rfsiggain)(struct dvb_frontend *fe, u32 rf_gain);
};
-#if defined(CONFIG_DVB_STB0899) || (defined(CONFIG_DVB_STB0899_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STB0899)
extern struct dvb_frontend *stb0899_attach(struct stb0899_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/stb6100.h b/drivers/media/dvb-frontends/stb6100.h
index 2ab096614b3f..3a1e40f3b8be 100644
--- a/drivers/media/dvb-frontends/stb6100.h
+++ b/drivers/media/dvb-frontends/stb6100.h
@@ -94,7 +94,7 @@ struct stb6100_state {
u32 reference;
};
-#if defined(CONFIG_DVB_STB6100) || (defined(CONFIG_DVB_STB6100_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STB6100)
extern struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
const struct stb6100_config *config,
diff --git a/drivers/media/dvb-frontends/stv0297.h b/drivers/media/dvb-frontends/stv0297.h
index 3f8f9468f387..c8ff3639ce00 100644
--- a/drivers/media/dvb-frontends/stv0297.h
+++ b/drivers/media/dvb-frontends/stv0297.h
@@ -42,7 +42,7 @@ struct stv0297_config
u8 stop_during_read:1;
};
-#if defined(CONFIG_DVB_STV0297) || (defined(CONFIG_DVB_STV0297_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV0297)
extern struct dvb_frontend* stv0297_attach(const struct stv0297_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 92a6075cd82f..b57ecf42e75a 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -420,7 +420,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
do_gettimeofday (&nexttime);
if (debug_legacy_dish_switch)
- memcpy (&tv[0], &nexttime, sizeof (struct timeval));
+ tv[0] = nexttime;
stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */
dvb_frontend_sleep_until(&nexttime, 32000);
diff --git a/drivers/media/dvb-frontends/stv0299.h b/drivers/media/dvb-frontends/stv0299.h
index ba219b767a69..06f70fc8327b 100644
--- a/drivers/media/dvb-frontends/stv0299.h
+++ b/drivers/media/dvb-frontends/stv0299.h
@@ -95,7 +95,7 @@ struct stv0299_config
int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
};
-#if defined(CONFIG_DVB_STV0299) || (defined(CONFIG_DVB_STV0299_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV0299)
extern struct dvb_frontend *stv0299_attach(const struct stv0299_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index b551ca350e00..e5a87b57d855 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -524,11 +524,8 @@ void stv0900_set_tuner(struct dvb_frontend *fe, u32 frequency,
struct dvb_frontend_ops *frontend_ops = NULL;
struct dvb_tuner_ops *tuner_ops = NULL;
- if (&fe->ops)
- frontend_ops = &fe->ops;
-
- if (&frontend_ops->tuner_ops)
- tuner_ops = &frontend_ops->tuner_ops;
+ frontend_ops = &fe->ops;
+ tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->set_frequency) {
if ((tuner_ops->set_frequency(fe, frequency)) < 0)
@@ -552,11 +549,8 @@ void stv0900_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
struct dvb_frontend_ops *frontend_ops = NULL;
struct dvb_tuner_ops *tuner_ops = NULL;
- if (&fe->ops)
- frontend_ops = &fe->ops;
-
- if (&frontend_ops->tuner_ops)
- tuner_ops = &frontend_ops->tuner_ops;
+ frontend_ops = &fe->ops;
+ tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->set_bandwidth) {
if ((tuner_ops->set_bandwidth(fe, bandwidth)) < 0)
@@ -1558,6 +1552,27 @@ static int stv0900_status(struct stv0900_internal *intp,
return locked;
}
+static int stv0900_set_mis(struct stv0900_internal *intp,
+ enum fe_stv0900_demod_num demod, int mis)
+{
+ enum fe_stv0900_error error = STV0900_NO_ERROR;
+
+ dprintk("%s\n", __func__);
+
+ if (mis < 0 || mis > 255) {
+ dprintk("Disable MIS filtering\n");
+ stv0900_write_bits(intp, FILTER_EN, 0);
+ } else {
+ dprintk("Enable MIS filtering - %d\n", mis);
+ stv0900_write_bits(intp, FILTER_EN, 1);
+ stv0900_write_reg(intp, ISIENTRY, mis);
+ stv0900_write_reg(intp, ISIBITENA, 0xff);
+ }
+
+ return error;
+}
+
+
static enum dvbfe_search stv0900_search(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
@@ -1578,6 +1593,8 @@ static enum dvbfe_search stv0900_search(struct dvb_frontend *fe)
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
+ stv0900_set_mis(intp, demod, c->stream_id);
+
p_result.locked = FALSE;
p_search.path = demod;
p_search.frequency = c->frequency;
@@ -1935,6 +1952,9 @@ struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
if (err_stv0900)
goto error;
+ if (state->internal->chip_id >= 0x30)
+ state->frontend.ops.info.caps |= FE_CAN_MULTISTREAM;
+
break;
default:
goto error;
diff --git a/drivers/media/dvb-frontends/stv0900_reg.h b/drivers/media/dvb-frontends/stv0900_reg.h
index 731afe93a823..511ed2a2d987 100644
--- a/drivers/media/dvb-frontends/stv0900_reg.h
+++ b/drivers/media/dvb-frontends/stv0900_reg.h
@@ -3446,8 +3446,11 @@ extern s32 shiftx(s32 x, int demod, s32 shift);
#define R0900_P1_PDELCTRL1 0xf550
#define PDELCTRL1 REGx(R0900_P1_PDELCTRL1)
#define F0900_P1_INV_MISMASK 0xf5500080
+#define INV_MISMASK FLDx(F0900_P1_INV_MISMASK)
#define F0900_P1_FILTER_EN 0xf5500020
+#define FILTER_EN FLDx(F0900_P1_FILTER_EN)
#define F0900_P1_EN_MIS00 0xf5500002
+#define EN_MIS00 FLDx(F0900_P1_EN_MIS00)
#define F0900_P1_ALGOSWRST 0xf5500001
#define ALGOSWRST FLDx(F0900_P1_ALGOSWRST)
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index 4af20780fb9c..0a40edfad739 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -1167,11 +1167,8 @@ static u32 stv0900_get_tuner_freq(struct dvb_frontend *fe)
struct dvb_tuner_ops *tuner_ops = NULL;
u32 freq = 0;
- if (&fe->ops)
- frontend_ops = &fe->ops;
-
- if (&frontend_ops->tuner_ops)
- tuner_ops = &frontend_ops->tuner_ops;
+ frontend_ops = &fe->ops;
+ tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->get_frequency) {
if ((tuner_ops->get_frequency(fe, &freq)) < 0)
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 13caec013902..f36eeefb76a6 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -4267,7 +4267,7 @@ err:
return -1;
}
-static int stv090x_set_tspath(struct stv090x_state *state)
+static int stv0900_set_tspath(struct stv090x_state *state)
{
u32 reg;
@@ -4538,6 +4538,121 @@ err:
return -1;
}
+static int stv0903_set_tspath(struct stv090x_state *state)
+{
+ u32 reg;
+
+ if (state->internal->dev_ver >= 0x20) {
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ stv090x_write_reg(state, STV090x_TSGENERAL, 0x00);
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c);
+ break;
+ }
+ } else {
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x10);
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x14);
+ break;
+ }
+ }
+
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_DVBCI:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ default:
+ break;
+ }
+
+ if (state->config->ts1_clk > 0) {
+ u32 speed;
+
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ default:
+ speed = state->internal->mclk /
+ (state->config->ts1_clk / 4);
+ if (speed < 0x08)
+ speed = 0x08;
+ if (speed > 0xFF)
+ speed = 0xFF;
+ break;
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ speed = state->internal->mclk /
+ (state->config->ts1_clk / 32);
+ if (speed < 0x20)
+ speed = 0x20;
+ if (speed > 0xFF)
+ speed = 0xFF;
+ break;
+ }
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
+ STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
+ goto err;
+ if (stv090x_write_reg(state, STV090x_P1_TSSPEED, speed) < 0)
+ goto err;
+ }
+
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
static int stv090x_init(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
@@ -4600,8 +4715,13 @@ static int stv090x_init(struct dvb_frontend *fe)
if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
- if (stv090x_set_tspath(state) < 0)
- goto err;
+ if (state->device == STV0900) {
+ if (stv0900_set_tspath(state) < 0)
+ goto err;
+ } else {
+ if (stv0903_set_tspath(state) < 0)
+ goto err;
+ }
return 0;
@@ -4642,23 +4762,26 @@ static int stv090x_setup(struct dvb_frontend *fe)
/* Stop Demod */
if (stv090x_write_reg(state, STV090x_P1_DMDISTATE, 0x5c) < 0)
goto err;
- if (stv090x_write_reg(state, STV090x_P2_DMDISTATE, 0x5c) < 0)
- goto err;
+ if (state->device == STV0900)
+ if (stv090x_write_reg(state, STV090x_P2_DMDISTATE, 0x5c) < 0)
+ goto err;
msleep(5);
/* Set No Tuner Mode */
if (stv090x_write_reg(state, STV090x_P1_TNRCFG, 0x6c) < 0)
goto err;
- if (stv090x_write_reg(state, STV090x_P2_TNRCFG, 0x6c) < 0)
- goto err;
+ if (state->device == STV0900)
+ if (stv090x_write_reg(state, STV090x_P2_TNRCFG, 0x6c) < 0)
+ goto err;
/* I2C repeater OFF */
STV090x_SETFIELD_Px(reg, ENARPT_LEVEL_FIELD, config->repeater_level);
if (stv090x_write_reg(state, STV090x_P1_I2CRPT, reg) < 0)
goto err;
- if (stv090x_write_reg(state, STV090x_P2_I2CRPT, reg) < 0)
- goto err;
+ if (state->device == STV0900)
+ if (stv090x_write_reg(state, STV090x_P2_I2CRPT, reg) < 0)
+ goto err;
if (stv090x_write_reg(state, STV090x_NCOARSE, 0x13) < 0) /* set PLL divider */
goto err;
diff --git a/drivers/media/dvb-frontends/stv090x.h b/drivers/media/dvb-frontends/stv090x.h
index 29cdc2b71314..0bd6adcfee8a 100644
--- a/drivers/media/dvb-frontends/stv090x.h
+++ b/drivers/media/dvb-frontends/stv090x.h
@@ -103,7 +103,7 @@ struct stv090x_config {
void (*tuner_i2c_lock) (struct dvb_frontend *fe, int lock);
};
-#if defined(CONFIG_DVB_STV090x) || (defined(CONFIG_DVB_STV090x_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV090x)
extern struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/stv6110x.h b/drivers/media/dvb-frontends/stv6110x.h
index 47516753929a..bc4766db29c5 100644
--- a/drivers/media/dvb-frontends/stv6110x.h
+++ b/drivers/media/dvb-frontends/stv6110x.h
@@ -53,7 +53,7 @@ struct stv6110x_devctl {
};
-#if defined(CONFIG_DVB_STV6110x) || (defined(CONFIG_DVB_STV6110x_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_STV6110x)
extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
diff --git a/drivers/media/dvb-frontends/tda1002x.h b/drivers/media/dvb-frontends/tda1002x.h
index 04d19418bf20..e404b6e44802 100644
--- a/drivers/media/dvb-frontends/tda1002x.h
+++ b/drivers/media/dvb-frontends/tda1002x.h
@@ -57,7 +57,7 @@ struct tda10023_config {
u16 deltaf;
};
-#if defined(CONFIG_DVB_TDA10021) || (defined(CONFIG_DVB_TDA10021_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA10021)
extern struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
struct i2c_adapter* i2c, u8 pwm);
#else
@@ -69,8 +69,7 @@ static inline struct dvb_frontend* tda10021_attach(const struct tda1002x_config*
}
#endif // CONFIG_DVB_TDA10021
-#if defined(CONFIG_DVB_TDA10023) || \
- (defined(CONFIG_DVB_TDA10023_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA10023)
extern struct dvb_frontend *tda10023_attach(
const struct tda10023_config *config,
struct i2c_adapter *i2c, u8 pwm);
diff --git a/drivers/media/dvb-frontends/tda1004x.h b/drivers/media/dvb-frontends/tda1004x.h
index 4e27ffb0f14e..dd283fbb61c0 100644
--- a/drivers/media/dvb-frontends/tda1004x.h
+++ b/drivers/media/dvb-frontends/tda1004x.h
@@ -117,7 +117,7 @@ struct tda1004x_state {
enum tda1004x_demod demod_type;
};
-#if defined(CONFIG_DVB_TDA1004X) || (defined(CONFIG_DVB_TDA1004X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA1004X)
extern struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
struct i2c_adapter* i2c);
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 16a4bc54dbe7..2521f7e23018 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -30,7 +30,7 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
u8 buf[len+1];
struct i2c_msg msg[1] = {
{
- .addr = priv->cfg.i2c_address,
+ .addr = priv->cfg.demod_i2c_addr,
.flags = 0,
.len = sizeof(buf),
.buf = buf,
@@ -59,12 +59,12 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
u8 buf[len];
struct i2c_msg msg[2] = {
{
- .addr = priv->cfg.i2c_address,
+ .addr = priv->cfg.demod_i2c_addr,
.flags = 0,
.len = 1,
.buf = &reg,
}, {
- .addr = priv->cfg.i2c_address,
+ .addr = priv->cfg.demod_i2c_addr,
.flags = I2C_M_RD,
.len = sizeof(buf),
.buf = buf,
@@ -1064,7 +1064,7 @@ static int tda10071_init(struct dvb_frontend *fe)
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
cmd.args[4] = 0x00;
- cmd.args[5] = 0x14;
+ cmd.args[5] = (priv->cfg.tuner_i2c_addr) ? priv->cfg.tuner_i2c_addr : 0x14;
cmd.args[6] = 0x00;
cmd.args[7] = 0x03;
cmd.args[8] = 0x02;
@@ -1202,6 +1202,20 @@ struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
goto error;
}
+ /* make sure demod i2c address is specified */
+ if (!config->demod_i2c_addr) {
+ dev_dbg(&i2c->dev, "%s: invalid demod i2c address!\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* make sure tuner i2c address is specified */
+ if (!config->tuner_i2c_addr) {
+ dev_dbg(&i2c->dev, "%s: invalid tuner i2c address!\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
/* setup the priv */
priv->i2c = i2c;
memcpy(&priv->cfg, config, sizeof(struct tda10071_config));
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index 21163c4b555c..bff1c38df802 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -28,7 +28,13 @@ struct tda10071_config {
* Default: none, must set
* Values: 0x55,
*/
- u8 i2c_address;
+ u8 demod_i2c_addr;
+
+ /* Tuner I2C address.
+ * Default: none, must set
+ * Values: 0x14, 0x54, ...
+ */
+ u8 tuner_i2c_addr;
/* Max bytes I2C provider can write at once.
* Note: Buffer is taken from the stack currently!
diff --git a/drivers/media/dvb-frontends/tda10086.h b/drivers/media/dvb-frontends/tda10086.h
index 61148c558d8d..458fe91c1b88 100644
--- a/drivers/media/dvb-frontends/tda10086.h
+++ b/drivers/media/dvb-frontends/tda10086.h
@@ -46,7 +46,7 @@ struct tda10086_config
enum tda10086_xtal xtal_freq;
};
-#if defined(CONFIG_DVB_TDA10086) || (defined(CONFIG_DVB_TDA10086_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA10086)
extern struct dvb_frontend* tda10086_attach(const struct tda10086_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/tda665x.h b/drivers/media/dvb-frontends/tda665x.h
index ec7927aa75ae..03a0da6d5cf2 100644
--- a/drivers/media/dvb-frontends/tda665x.h
+++ b/drivers/media/dvb-frontends/tda665x.h
@@ -31,7 +31,7 @@ struct tda665x_config {
u32 ref_divider;
};
-#if defined(CONFIG_DVB_TDA665x) || (defined(CONFIG_DVB_TDA665x_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA665x)
extern struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
const struct tda665x_config *config,
diff --git a/drivers/media/dvb-frontends/tda8083.h b/drivers/media/dvb-frontends/tda8083.h
index 5a03c14a10e8..de6b1860dfdd 100644
--- a/drivers/media/dvb-frontends/tda8083.h
+++ b/drivers/media/dvb-frontends/tda8083.h
@@ -35,7 +35,7 @@ struct tda8083_config
u8 demod_address;
};
-#if defined(CONFIG_DVB_TDA8083) || (defined(CONFIG_DVB_TDA8083_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA8083)
extern struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/tda8261.h b/drivers/media/dvb-frontends/tda8261.h
index 006e45351b94..55cf4ffcbfdf 100644
--- a/drivers/media/dvb-frontends/tda8261.h
+++ b/drivers/media/dvb-frontends/tda8261.h
@@ -34,7 +34,7 @@ struct tda8261_config {
enum tda8261_step step_size;
};
-#if defined(CONFIG_DVB_TDA8261) || (defined(CONFIG_DVB_TDA8261_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA8261)
extern struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
const struct tda8261_config *config,
diff --git a/drivers/media/dvb-frontends/tda8261_cfg.h b/drivers/media/dvb-frontends/tda8261_cfg.h
index 1af1ee49b542..46710744173b 100644
--- a/drivers/media/dvb-frontends/tda8261_cfg.h
+++ b/drivers/media/dvb-frontends/tda8261_cfg.h
@@ -78,7 +78,7 @@ static int tda8261_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
return err;
}
*bandwidth = t_state.bandwidth;
+ printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth);
}
- printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth);
return 0;
}
diff --git a/drivers/media/dvb-frontends/tda826x.h b/drivers/media/dvb-frontends/tda826x.h
index 89e97926ab23..5f0f20e7e4f8 100644
--- a/drivers/media/dvb-frontends/tda826x.h
+++ b/drivers/media/dvb-frontends/tda826x.h
@@ -35,7 +35,7 @@
* @param has_loopthrough Set to 1 if the card has a loopthrough RF connector.
* @return FE pointer on success, NULL on failure.
*/
-#if defined(CONFIG_DVB_TDA826X) || (defined(CONFIG_DVB_TDA826X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TDA826X)
extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c,
int has_loopthrough);
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
new file mode 100644
index 000000000000..ad7ad857ab2a
--- /dev/null
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -0,0 +1,373 @@
+/*
+ Montage Technology TS2020 - Silicon Tuner driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+
+ Copyright (C) 2009-2012 TurboSight.com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dvb_frontend.h"
+#include "ts2020.h"
+
+#define TS2020_XTAL_FREQ 27000 /* in kHz */
+#define FREQ_OFFSET_LOW_SYM_RATE 3000
+
+struct ts2020_priv {
+ /* i2c details */
+ int i2c_address;
+ struct i2c_adapter *i2c;
+ u8 clk_out_div;
+ u32 frequency;
+};
+
+static int ts2020_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int ts2020_writereg(struct dvb_frontend *fe, int reg, int data)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ u8 buf[] = { reg, data };
+ struct i2c_msg msg[] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .buf = buf,
+ .len = 2
+ }
+ };
+ int err;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ err = i2c_transfer(priv->i2c, msg, 1);
+ if (err != 1) {
+ printk(KERN_ERR
+ "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+ __func__, err, reg, data);
+ return -EREMOTEIO;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return 0;
+}
+
+static int ts2020_readreg(struct dvb_frontend *fe, u8 reg)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+ struct i2c_msg msg[] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .buf = b0,
+ .len = 1
+ }, {
+ .addr = priv->i2c_address,
+ .flags = I2C_M_RD,
+ .buf = b1,
+ .len = 1
+ }
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+
+ if (ret != 2) {
+ printk(KERN_ERR "%s: reg=0x%x(error=%d)\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return b1[0];
+}
+
+static int ts2020_sleep(struct dvb_frontend *fe)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 buf[] = { 10, 0 };
+ struct i2c_msg msg = {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .buf = buf,
+ .len = 2
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = i2c_transfer(priv->i2c, &msg, 1);
+ if (ret != 1)
+ printk(KERN_ERR "%s: i2c error\n", __func__);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return (ret == 1) ? 0 : ret;
+}
+
+static int ts2020_init(struct dvb_frontend *fe)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+
+ ts2020_writereg(fe, 0x42, 0x73);
+ ts2020_writereg(fe, 0x05, priv->clk_out_div);
+ ts2020_writereg(fe, 0x20, 0x27);
+ ts2020_writereg(fe, 0x07, 0x02);
+ ts2020_writereg(fe, 0x11, 0xff);
+ ts2020_writereg(fe, 0x60, 0xf9);
+ ts2020_writereg(fe, 0x08, 0x01);
+ ts2020_writereg(fe, 0x00, 0x41);
+
+ return 0;
+}
+
+static int ts2020_tuner_gate_ctrl(struct dvb_frontend *fe, u8 offset)
+{
+ int ret;
+ ret = ts2020_writereg(fe, 0x51, 0x1f - offset);
+ ret |= ts2020_writereg(fe, 0x51, 0x1f);
+ ret |= ts2020_writereg(fe, 0x50, offset);
+ ret |= ts2020_writereg(fe, 0x50, 0x00);
+ msleep(20);
+ return ret;
+}
+
+static int ts2020_set_tuner_rf(struct dvb_frontend *fe)
+{
+ int reg;
+
+ reg = ts2020_readreg(fe, 0x3d);
+ reg &= 0x7f;
+ if (reg < 0x16)
+ reg = 0xa1;
+ else if (reg == 0x16)
+ reg = 0x99;
+ else
+ reg = 0xf9;
+
+ ts2020_writereg(fe, 0x60, reg);
+ reg = ts2020_tuner_gate_ctrl(fe, 0x08);
+
+ return reg;
+}
+
+static int ts2020_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct ts2020_priv *priv = fe->tuner_priv;
+ int ret;
+ u32 frequency = c->frequency;
+ s32 offset_khz;
+ u32 symbol_rate = (c->symbol_rate / 1000);
+ u32 f3db, gdiv28;
+ u16 value, ndiv, lpf_coeff;
+ u8 lpf_mxdiv, mlpf_max, mlpf_min, nlpf;
+ u8 lo = 0x01, div4 = 0x0;
+
+ /* Calculate frequency divider */
+ if (frequency < 1060000) {
+ lo |= 0x10;
+ div4 = 0x1;
+ ndiv = (frequency * 14 * 4) / TS2020_XTAL_FREQ;
+ } else
+ ndiv = (frequency * 14 * 2) / TS2020_XTAL_FREQ;
+ ndiv = ndiv + ndiv % 2;
+ ndiv = ndiv - 1024;
+
+ ret = ts2020_writereg(fe, 0x10, 0x80 | lo);
+
+ /* Set frequency divider */
+ ret |= ts2020_writereg(fe, 0x01, (ndiv >> 8) & 0xf);
+ ret |= ts2020_writereg(fe, 0x02, ndiv & 0xff);
+
+ ret |= ts2020_writereg(fe, 0x03, 0x06);
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x10);
+ if (ret < 0)
+ return -ENODEV;
+
+ /* Tuner Frequency Range */
+ ret = ts2020_writereg(fe, 0x10, lo);
+
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x08);
+
+ /* Tuner RF */
+ ret |= ts2020_set_tuner_rf(fe);
+
+ gdiv28 = (TS2020_XTAL_FREQ / 1000 * 1694 + 500) / 1000;
+ ret |= ts2020_writereg(fe, 0x04, gdiv28 & 0xff);
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x04);
+ if (ret < 0)
+ return -ENODEV;
+
+ value = ts2020_readreg(fe, 0x26);
+
+ f3db = (symbol_rate * 135) / 200 + 2000;
+ f3db += FREQ_OFFSET_LOW_SYM_RATE;
+ if (f3db < 7000)
+ f3db = 7000;
+ if (f3db > 40000)
+ f3db = 40000;
+
+ gdiv28 = gdiv28 * 207 / (value * 2 + 151);
+ mlpf_max = gdiv28 * 135 / 100;
+ mlpf_min = gdiv28 * 78 / 100;
+ if (mlpf_max > 63)
+ mlpf_max = 63;
+
+ lpf_coeff = 2766;
+
+ nlpf = (f3db * gdiv28 * 2 / lpf_coeff /
+ (TS2020_XTAL_FREQ / 1000) + 1) / 2;
+ if (nlpf > 23)
+ nlpf = 23;
+ if (nlpf < 1)
+ nlpf = 1;
+
+ lpf_mxdiv = (nlpf * (TS2020_XTAL_FREQ / 1000)
+ * lpf_coeff * 2 / f3db + 1) / 2;
+
+ if (lpf_mxdiv < mlpf_min) {
+ nlpf++;
+ lpf_mxdiv = (nlpf * (TS2020_XTAL_FREQ / 1000)
+ * lpf_coeff * 2 / f3db + 1) / 2;
+ }
+
+ if (lpf_mxdiv > mlpf_max)
+ lpf_mxdiv = mlpf_max;
+
+ ret = ts2020_writereg(fe, 0x04, lpf_mxdiv);
+ ret |= ts2020_writereg(fe, 0x06, nlpf);
+
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x04);
+
+ ret |= ts2020_tuner_gate_ctrl(fe, 0x01);
+
+ msleep(80);
+ /* calculate offset assuming 96000kHz*/
+ offset_khz = (ndiv - ndiv % 2 + 1024) * TS2020_XTAL_FREQ
+ / (6 + 8) / (div4 + 1) / 2;
+
+ priv->frequency = offset_khz;
+
+ return (ret < 0) ? -EINVAL : 0;
+}
+
+static int ts2020_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct ts2020_priv *priv = fe->tuner_priv;
+ *frequency = priv->frequency;
+ return 0;
+}
+
+/* read TS2020 signal strength */
+static int ts2020_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ u16 sig_reading, sig_strength;
+ u8 rfgain, bbgain;
+
+ rfgain = ts2020_readreg(fe, 0x3d) & 0x1f;
+ bbgain = ts2020_readreg(fe, 0x21) & 0x1f;
+
+ if (rfgain > 15)
+ rfgain = 15;
+ if (bbgain > 13)
+ bbgain = 13;
+
+ sig_reading = rfgain * 2 + bbgain * 3;
+
+ sig_strength = 40 + (64 - sig_reading) * 50 / 64 ;
+
+ /* cook the value to be suitable for szap-s2 human readable output */
+ *signal_strength = sig_strength * 1000;
+
+ return 0;
+}
+
+static struct dvb_tuner_ops ts2020_tuner_ops = {
+ .info = {
+ .name = "TS2020",
+ .frequency_min = 950000,
+ .frequency_max = 2150000
+ },
+ .init = ts2020_init,
+ .release = ts2020_release,
+ .sleep = ts2020_sleep,
+ .set_params = ts2020_set_params,
+ .get_frequency = ts2020_get_frequency,
+ .get_rf_strength = ts2020_read_signal_strength,
+};
+
+struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
+ const struct ts2020_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct ts2020_priv *priv = NULL;
+ u8 buf;
+
+ priv = kzalloc(sizeof(struct ts2020_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->i2c_address = config->tuner_address;
+ priv->i2c = i2c;
+ priv->clk_out_div = config->clk_out_div;
+ fe->tuner_priv = priv;
+
+ /* Wake Up the tuner */
+ if ((0x03 & ts2020_readreg(fe, 0x00)) == 0x00) {
+ ts2020_writereg(fe, 0x00, 0x01);
+ msleep(2);
+ }
+
+ ts2020_writereg(fe, 0x00, 0x03);
+ msleep(2);
+
+ /* Check the tuner version */
+ buf = ts2020_readreg(fe, 0x00);
+ if ((buf == 0x01) || (buf == 0x41) || (buf == 0x81))
+ printk(KERN_INFO "%s: Find tuner TS2020!\n", __func__);
+ else {
+ printk(KERN_ERR "%s: Read tuner reg[0] = %d\n", __func__, buf);
+ kfree(priv);
+ return NULL;
+ }
+
+ memcpy(&fe->ops.tuner_ops, &ts2020_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ return fe;
+}
+EXPORT_SYMBOL(ts2020_attach);
+
+MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
+MODULE_DESCRIPTION("Montage Technology TS2020 - Silicon tuner driver module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h
new file mode 100644
index 000000000000..c7e64afa614a
--- /dev/null
+++ b/drivers/media/dvb-frontends/ts2020.h
@@ -0,0 +1,50 @@
+/*
+ Montage Technology TS2020 - Silicon Tuner driver
+ Copyright (C) 2009-2012 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
+
+ Copyright (C) 2009-2012 TurboSight.com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef TS2020_H
+#define TS2020_H
+
+#include <linux/dvb/frontend.h>
+
+struct ts2020_config {
+ u8 tuner_address;
+ u8 clk_out_div;
+};
+
+#if defined(CONFIG_DVB_TS2020) || \
+ (defined(CONFIG_DVB_TS2020_MODULE) && defined(MODULE))
+
+extern struct dvb_frontend *ts2020_attach(
+ struct dvb_frontend *fe,
+ const struct ts2020_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *ts2020_attach(
+ struct dvb_frontend *fe,
+ const struct ts2020_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* TS2020_H */
diff --git a/drivers/media/dvb-frontends/tua6100.h b/drivers/media/dvb-frontends/tua6100.h
index f83dbd5e42ae..83a9c30e67ca 100644
--- a/drivers/media/dvb-frontends/tua6100.h
+++ b/drivers/media/dvb-frontends/tua6100.h
@@ -34,7 +34,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_DVB_TUA6100) || (defined(CONFIG_DVB_TUA6100_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_TUA6100)
extern struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend* tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c)
diff --git a/drivers/media/dvb-frontends/ves1820.h b/drivers/media/dvb-frontends/ves1820.h
index e902ed634ec3..c073f353ac38 100644
--- a/drivers/media/dvb-frontends/ves1820.h
+++ b/drivers/media/dvb-frontends/ves1820.h
@@ -41,7 +41,7 @@ struct ves1820_config
u8 selagc:1;
};
-#if defined(CONFIG_DVB_VES1820) || (defined(CONFIG_DVB_VES1820_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_VES1820)
extern struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
struct i2c_adapter* i2c, u8 pwm);
#else
diff --git a/drivers/media/dvb-frontends/ves1x93.h b/drivers/media/dvb-frontends/ves1x93.h
index 8a5a49e808f6..2307caea6aec 100644
--- a/drivers/media/dvb-frontends/ves1x93.h
+++ b/drivers/media/dvb-frontends/ves1x93.h
@@ -40,7 +40,7 @@ struct ves1x93_config
u8 invert_pwm:1;
};
-#if defined(CONFIG_DVB_VES1X93) || (defined(CONFIG_DVB_VES1X93_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_VES1X93)
extern struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
struct i2c_adapter* i2c);
#else
diff --git a/drivers/media/dvb-frontends/zl10353.h b/drivers/media/dvb-frontends/zl10353.h
index 6e3ca9eed048..50c1004aef36 100644
--- a/drivers/media/dvb-frontends/zl10353.h
+++ b/drivers/media/dvb-frontends/zl10353.h
@@ -47,7 +47,7 @@ struct zl10353_config
u8 pll_0; /* default: 0x15 */
};
-#if defined(CONFIG_DVB_ZL10353) || (defined(CONFIG_DVB_ZL10353_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_DVB_ZL10353)
extern struct dvb_frontend* zl10353_attach(const struct zl10353_config *config,
struct i2c_adapter *i2c);
#else
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 24d78e28e493..7b771baa2212 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -1,16 +1,4 @@
#
-# Generic video config states
-#
-
-config VIDEO_BTCX
- depends on PCI
- tristate
-
-config VIDEO_TVEEPROM
- tristate
- depends on I2C
-
-#
# Multimedia Video device configuration
#
@@ -317,20 +305,6 @@ config VIDEO_SAA717X
source "drivers/media/i2c/cx25840/Kconfig"
-comment "MPEG video encoders"
-
-config VIDEO_CX2341X
- tristate "Conexant CX2341x MPEG encoders"
- depends on VIDEO_V4L2
- ---help---
- Support for the Conexant CX23416 MPEG encoders
- and CX23415 MPEG encoder/decoders.
-
- This module currently supports the encoding functions only.
-
- To compile this driver as a module, choose M here: the
- module will be called cx2341x.
-
comment "Video encoders"
config VIDEO_SAA7127
@@ -421,6 +395,13 @@ config VIDEO_OV7670
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
+config VIDEO_OV9650
+ tristate "OmniVision OV9650/OV9652 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for the Omnivision
+ OV9650 and OV9652 camera sensors.
+
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -477,7 +458,7 @@ config VIDEO_MT9V032
config VIDEO_TCM825X
tristate "TCM825x camera sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_INT_DEVICE
depends on MEDIA_CAMERA_SUPPORT
---help---
This is a driver for the Toshiba TCM825x VGA camera sensor.
@@ -516,6 +497,13 @@ config VIDEO_S5K4ECGX
source "drivers/media/i2c/smiapp/Kconfig"
+config VIDEO_S5C73M3
+ tristate "Samsung S5C73M3 sensor support"
+ depends on I2C && SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for Samsung S5C73M3
+ 8 Mpixel camera.
+
comment "Flash devices"
config VIDEO_ADP1653
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index b1d62dfd49b8..cfefd30cc1bc 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -47,8 +47,8 @@ obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
+obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
-obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
@@ -58,10 +58,9 @@ obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
+obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
-obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
-obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 64d71fb87a96..34f39d3b3e3e 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -402,9 +402,6 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.g_chip_ident = adv7180_g_chip_ident,
.s_std = adv7180_s_std,
- .queryctrl = v4l2_subdev_queryctrl,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
};
static const struct v4l2_subdev_ops adv7180_ops = {
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 2b5aa676a84e..9fc2b985df0e 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -43,6 +43,7 @@ MODULE_PARM_DESC(debug, "Debug level 0-1");
struct adv7343_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
+ const struct adv7343_platform_data *pdata;
u8 reg00;
u8 reg01;
u8 reg02;
@@ -215,12 +216,23 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
/* Enable Appropriate DAC */
val = state->reg00 & 0x03;
- if (output_type == ADV7343_COMPOSITE_ID)
- val |= ADV7343_COMPOSITE_POWER_VALUE;
- else if (output_type == ADV7343_COMPONENT_ID)
- val |= ADV7343_COMPONENT_POWER_VALUE;
+ /* configure default configuration */
+ if (!state->pdata)
+ if (output_type == ADV7343_COMPOSITE_ID)
+ val |= ADV7343_COMPOSITE_POWER_VALUE;
+ else if (output_type == ADV7343_COMPONENT_ID)
+ val |= ADV7343_COMPONENT_POWER_VALUE;
+ else
+ val |= ADV7343_SVIDEO_POWER_VALUE;
else
- val |= ADV7343_SVIDEO_POWER_VALUE;
+ val = state->pdata->mode_config.sleep_mode << 0 |
+ state->pdata->mode_config.pll_control << 1 |
+ state->pdata->mode_config.dac_3 << 2 |
+ state->pdata->mode_config.dac_2 << 3 |
+ state->pdata->mode_config.dac_1 << 4 |
+ state->pdata->mode_config.dac_6 << 5 |
+ state->pdata->mode_config.dac_5 << 6 |
+ state->pdata->mode_config.dac_4 << 7;
err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
if (err < 0)
@@ -238,6 +250,17 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
/* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
+
+ if (state->pdata && state->pdata->sd_config.sd_dac_out1)
+ val = val | (state->pdata->sd_config.sd_dac_out1 << 1);
+ else if (state->pdata && !state->pdata->sd_config.sd_dac_out1)
+ val = val & ~(state->pdata->sd_config.sd_dac_out1 << 1);
+
+ if (state->pdata && state->pdata->sd_config.sd_dac_out2)
+ val = val | (state->pdata->sd_config.sd_dac_out2 << 2);
+ else if (state->pdata && !state->pdata->sd_config.sd_dac_out2)
+ val = val & ~(state->pdata->sd_config.sd_dac_out2 << 2);
+
err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
if (err < 0)
goto setoutput_exit;
@@ -397,10 +420,14 @@ static int adv7343_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- state = kzalloc(sizeof(struct adv7343_state), GFP_KERNEL);
+ state = devm_kzalloc(&client->dev, sizeof(struct adv7343_state),
+ GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
+ /* Copy board specific information here */
+ state->pdata = client->dev.platform_data;
+
state->reg00 = 0x80;
state->reg01 = 0x00;
state->reg02 = 0x20;
@@ -431,16 +458,13 @@ static int adv7343_probe(struct i2c_client *client,
int err = state->hdl.error;
v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
return err;
}
v4l2_ctrl_handler_setup(&state->hdl);
err = adv7343_initialize(&state->sd);
- if (err) {
+ if (err)
v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
- }
return err;
}
@@ -451,7 +475,6 @@ static int adv7343_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
return 0;
}
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 38ce76ed1924..9ae977b5983a 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -1251,13 +1251,11 @@ int cx25840_ir_probe(struct v4l2_subdev *sd)
cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, 0);
mutex_init(&ir_state->rx_params_lock);
- memcpy(&default_params, &default_rx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_rx_params;
v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
mutex_init(&ir_state->tx_params_lock);
- memcpy(&default_params, &default_tx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_tx_params;
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
return 0;
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 6bf01ad62765..73b7688cbebd 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -13,6 +13,7 @@
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/mt9v011.h>
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
@@ -48,68 +49,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
#define MT9V011_VERSION 0x8232
#define MT9V011_REV_B_VERSION 0x8243
-/* supported controls */
-static struct v4l2_queryctrl mt9v011_qctrl[] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = (1 << 12) - 1 - 0x0020,
- .step = 1,
- .default_value = 0x0020,
- .flags = 0,
- }, {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 2047,
- .step = 1,
- .default_value = 0x01fc,
- .flags = 0,
- }, {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = -1 << 9,
- .maximum = (1 << 9) - 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = -1 << 9,
- .maximum = (1 << 9) - 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vflip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- }
-};
-
struct mt9v011 {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler ctrls;
unsigned width, height;
unsigned xtal;
unsigned hflip:1;
@@ -381,99 +323,6 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
set_read_mode(sd);
return 0;
-};
-
-static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct mt9v011 *core = to_mt9v011(sd);
-
- v4l2_dbg(1, debug, sd, "g_ctrl called\n");
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- ctrl->value = core->global_gain;
- return 0;
- case V4L2_CID_EXPOSURE:
- ctrl->value = core->exposure;
- return 0;
- case V4L2_CID_RED_BALANCE:
- ctrl->value = core->red_bal;
- return 0;
- case V4L2_CID_BLUE_BALANCE:
- ctrl->value = core->blue_bal;
- return 0;
- case V4L2_CID_HFLIP:
- ctrl->value = core->hflip ? 1 : 0;
- return 0;
- case V4L2_CID_VFLIP:
- ctrl->value = core->vflip ? 1 : 0;
- return 0;
- }
- return -EINVAL;
-}
-
-static int mt9v011_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- int i;
-
- v4l2_dbg(1, debug, sd, "queryctrl called\n");
-
- for (i = 0; i < ARRAY_SIZE(mt9v011_qctrl); i++)
- if (qc->id && qc->id == mt9v011_qctrl[i].id) {
- memcpy(qc, &(mt9v011_qctrl[i]),
- sizeof(*qc));
- return 0;
- }
-
- return -EINVAL;
-}
-
-
-static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct mt9v011 *core = to_mt9v011(sd);
- u8 i, n;
- n = ARRAY_SIZE(mt9v011_qctrl);
-
- for (i = 0; i < n; i++) {
- if (ctrl->id != mt9v011_qctrl[i].id)
- continue;
- if (ctrl->value < mt9v011_qctrl[i].minimum ||
- ctrl->value > mt9v011_qctrl[i].maximum)
- return -ERANGE;
- v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n",
- ctrl->id, ctrl->value);
- break;
- }
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- core->global_gain = ctrl->value;
- break;
- case V4L2_CID_EXPOSURE:
- core->exposure = ctrl->value;
- break;
- case V4L2_CID_RED_BALANCE:
- core->red_bal = ctrl->value;
- break;
- case V4L2_CID_BLUE_BALANCE:
- core->blue_bal = ctrl->value;
- break;
- case V4L2_CID_HFLIP:
- core->hflip = ctrl->value;
- set_read_mode(sd);
- return 0;
- case V4L2_CID_VFLIP:
- core->vflip = ctrl->value;
- set_read_mode(sd);
- return 0;
- default:
- return -EINVAL;
- }
-
- set_balance(sd);
-
- return 0;
}
static int mt9v011_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
@@ -599,10 +448,46 @@ static int mt9v011_g_chip_ident(struct v4l2_subdev *sd,
version);
}
-static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
- .queryctrl = mt9v011_queryctrl,
- .g_ctrl = mt9v011_g_ctrl,
+static int mt9v011_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9v011 *core =
+ container_of(ctrl->handler, struct mt9v011, ctrls);
+ struct v4l2_subdev *sd = &core->sd;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ core->global_gain = ctrl->val;
+ break;
+ case V4L2_CID_EXPOSURE:
+ core->exposure = ctrl->val;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ core->red_bal = ctrl->val;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ core->blue_bal = ctrl->val;
+ break;
+ case V4L2_CID_HFLIP:
+ core->hflip = ctrl->val;
+ set_read_mode(sd);
+ return 0;
+ case V4L2_CID_VFLIP:
+ core->vflip = ctrl->val;
+ set_read_mode(sd);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ set_balance(sd);
+ return 0;
+}
+
+static struct v4l2_ctrl_ops mt9v011_ctrl_ops = {
.s_ctrl = mt9v011_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
.reset = mt9v011_reset,
.g_chip_ident = mt9v011_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -658,6 +543,30 @@ static int mt9v011_probe(struct i2c_client *c,
return -EINVAL;
}
+ v4l2_ctrl_handler_init(&core->ctrls, 5);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_GAIN, 0, (1 << 12) - 1 - 0x20, 1, 0x20);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 2047, 1, 0x01fc);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_RED_BALANCE, -(1 << 9), (1 << 9) - 1, 1, 0);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, -(1 << 9), (1 << 9) - 1, 1, 0);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (core->ctrls.error) {
+ int ret = core->ctrls.error;
+
+ v4l2_err(sd, "control initialization error %d\n", ret);
+ v4l2_ctrl_handler_free(&core->ctrls);
+ kfree(core);
+ return ret;
+ }
+ core->sd.ctrl_handler = &core->ctrls;
+
core->global_gain = 0x0024;
core->exposure = 0x01fc;
core->width = 640;
@@ -681,12 +590,14 @@ static int mt9v011_probe(struct i2c_client *c,
static int mt9v011_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
+ struct mt9v011 *core = to_mt9v011(sd);
v4l2_dbg(1, debug, sd,
"mt9v011.c: removing mt9v011 adapter on address 0x%x\n",
c->addr << 1);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&core->ctrls);
kfree(to_mt9v011(sd));
return 0;
}
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 440c12962bae..8554b47f993a 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -660,13 +660,6 @@ static const struct v4l2_ctrl_ops noon010_ctrl_ops = {
static const struct v4l2_subdev_core_ops noon010_core_ops = {
.s_power = noon010_s_power,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.log_status = noon010_log_status,
};
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e7c82b297514..05ed5b8e7f88 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -18,6 +18,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-mediabus.h>
#include <media/ov7670.h>
@@ -47,6 +48,8 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
*/
#define OV7670_I2C_ADDR 0x42
+#define PLL_FACTOR 4
+
/* Registers */
#define REG_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */
#define REG_BLUE 0x01 /* blue gain */
@@ -164,6 +167,12 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_GFIX 0x69 /* Fix gain control */
+#define REG_DBLV 0x6b /* PLL control an debugging */
+#define DBLV_BYPASS 0x00 /* Bypass PLL */
+#define DBLV_X4 0x01 /* clock x4 */
+#define DBLV_X6 0x10 /* clock x6 */
+#define DBLV_X8 0x11 /* clock x8 */
+
#define REG_REG76 0x76 /* OV's name */
#define R76_BLKPCOR 0x80 /* Black pixel correction enable */
#define R76_WHTPCOR 0x40 /* White pixel correction enable */
@@ -183,6 +192,30 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */
#define REG_BD60MAX 0xab /* 60hz banding step limit */
+enum ov7670_model {
+ MODEL_OV7670 = 0,
+ MODEL_OV7675,
+};
+
+struct ov7670_win_size {
+ int width;
+ int height;
+ unsigned char com7_bit;
+ int hstart; /* Start/stop values for the camera. Note */
+ int hstop; /* that they do not always make complete */
+ int vstart; /* sense to humans, but evidently the sensor */
+ int vstop; /* will do the right thing... */
+ struct regval_list *regs; /* Regs to tweak */
+};
+
+struct ov7670_devtype {
+ /* formats supported for each model */
+ struct ov7670_win_size *win_sizes;
+ unsigned int n_win_sizes;
+ /* callbacks for frame rate control */
+ int (*set_framerate)(struct v4l2_subdev *, struct v4l2_fract *);
+ void (*get_framerate)(struct v4l2_subdev *, struct v4l2_fract *);
+};
/*
* Information we maintain about a known sensor.
@@ -190,14 +223,31 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
struct ov7670_format_struct; /* coming later */
struct ov7670_info {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* gain cluster */
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ };
+ struct {
+ /* exposure cluster */
+ struct v4l2_ctrl *auto_exposure;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ /* saturation/hue cluster */
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ };
struct ov7670_format_struct *fmt; /* Current format */
- unsigned char sat; /* Saturation value */
- int hue; /* Hue value */
int min_width; /* Filter out smaller sizes */
int min_height; /* Filter out smaller sizes */
int clock_speed; /* External clock speed (MHz) */
u8 clkrc; /* Clock divider value */
bool use_smbus; /* Use smbus I/O instead of I2C */
+ bool pll_bypass;
+ bool pclk_hb_disable;
+ const struct ov7670_devtype *devtype; /* Device specifics */
};
static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
@@ -205,6 +255,11 @@ static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct ov7670_info, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ov7670_info, hdl)->sd;
+}
+
/*
@@ -353,7 +408,7 @@ static struct regval_list ov7670_fmt_yuv422[] = {
{ REG_RGB444, 0 }, /* No RGB444 please */
{ REG_COM1, 0 }, /* CCIR601 */
{ REG_COM15, COM15_R00FF },
- { REG_COM9, 0x18 }, /* 4x gain ceiling; 0x8 is reserved bit */
+ { REG_COM9, 0x48 }, /* 32x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0x80 }, /* "matrix coefficient 1" */
{ 0x50, 0x80 }, /* "matrix coefficient 2" */
{ 0x51, 0 }, /* vb */
@@ -652,65 +707,178 @@ static struct regval_list ov7670_qcif_regs[] = {
{ 0xff, 0xff },
};
-static struct ov7670_win_size {
- int width;
- int height;
- unsigned char com7_bit;
- int hstart; /* Start/stop values for the camera. Note */
- int hstop; /* that they do not always make complete */
- int vstart; /* sense to humans, but evidently the sensor */
- int vstop; /* will do the right thing... */
- struct regval_list *regs; /* Regs to tweak */
-/* h/vref stuff */
-} ov7670_win_sizes[] = {
+static struct ov7670_win_size ov7670_win_sizes[] = {
/* VGA */
{
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.com7_bit = COM7_FMT_VGA,
- .hstart = 158, /* These values from */
- .hstop = 14, /* Omnivision */
+ .hstart = 158, /* These values from */
+ .hstop = 14, /* Omnivision */
.vstart = 10,
.vstop = 490,
- .regs = NULL,
+ .regs = NULL,
},
/* CIF */
{
.width = CIF_WIDTH,
.height = CIF_HEIGHT,
.com7_bit = COM7_FMT_CIF,
- .hstart = 170, /* Empirically determined */
+ .hstart = 170, /* Empirically determined */
.hstop = 90,
.vstart = 14,
.vstop = 494,
- .regs = NULL,
+ .regs = NULL,
},
/* QVGA */
{
.width = QVGA_WIDTH,
.height = QVGA_HEIGHT,
.com7_bit = COM7_FMT_QVGA,
- .hstart = 168, /* Empirically determined */
+ .hstart = 168, /* Empirically determined */
.hstop = 24,
.vstart = 12,
.vstop = 492,
- .regs = NULL,
+ .regs = NULL,
},
/* QCIF */
{
.width = QCIF_WIDTH,
.height = QCIF_HEIGHT,
.com7_bit = COM7_FMT_VGA, /* see comment above */
- .hstart = 456, /* Empirically determined */
+ .hstart = 456, /* Empirically determined */
.hstop = 24,
.vstart = 14,
.vstop = 494,
- .regs = ov7670_qcif_regs,
- },
+ .regs = ov7670_qcif_regs,
+ }
+};
+
+static struct ov7670_win_size ov7675_win_sizes[] = {
+ /*
+ * Currently, only VGA is supported. Theoretically it could be possible
+ * to support CIF, QVGA and QCIF too. Taking values for ov7670 as a
+ * base and tweak them empirically could be required.
+ */
+ {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .com7_bit = COM7_FMT_VGA,
+ .hstart = 158, /* These values from */
+ .hstop = 14, /* Omnivision */
+ .vstart = 14, /* Empirically determined */
+ .vstop = 494,
+ .regs = NULL,
+ }
};
-#define N_WIN_SIZES (ARRAY_SIZE(ov7670_win_sizes))
+static void ov7675_get_framerate(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+ u32 clkrc = info->clkrc;
+ int pll_factor;
+
+ if (info->pll_bypass)
+ pll_factor = 1;
+ else
+ pll_factor = PLL_FACTOR;
+
+ clkrc++;
+ if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ clkrc = (clkrc >> 1);
+
+ tpf->numerator = 1;
+ tpf->denominator = (5 * pll_factor * info->clock_speed) /
+ (4 * clkrc);
+}
+
+static int ov7675_set_framerate(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+ u32 clkrc;
+ int pll_factor;
+ int ret;
+
+ /*
+ * The formula is fps = 5/4*pixclk for YUV/RGB and
+ * fps = 5/2*pixclk for RAW.
+ *
+ * pixclk = clock_speed / (clkrc + 1) * PLLfactor
+ *
+ */
+ if (info->pll_bypass) {
+ pll_factor = 1;
+ ret = ov7670_write(sd, REG_DBLV, DBLV_BYPASS);
+ } else {
+ pll_factor = PLL_FACTOR;
+ ret = ov7670_write(sd, REG_DBLV, DBLV_X4);
+ }
+ if (ret < 0)
+ return ret;
+
+ if (tpf->numerator == 0 || tpf->denominator == 0) {
+ clkrc = 0;
+ } else {
+ clkrc = (5 * pll_factor * info->clock_speed * tpf->numerator) /
+ (4 * tpf->denominator);
+ if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ clkrc = (clkrc << 1);
+ clkrc--;
+ }
+
+ /*
+ * The datasheet claims that clkrc = 0 will divide the input clock by 1
+ * but we've checked with an oscilloscope that it divides by 2 instead.
+ * So, if clkrc = 0 just bypass the divider.
+ */
+ if (clkrc <= 0)
+ clkrc = CLK_EXT;
+ else if (clkrc > CLK_SCALE)
+ clkrc = CLK_SCALE;
+ info->clkrc = clkrc;
+
+ /* Recalculate frame rate */
+ ov7675_get_framerate(sd, tpf);
+
+ ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
+ if (ret < 0)
+ return ret;
+
+ return ov7670_write(sd, REG_DBLV, DBLV_X4);
+}
+
+static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ tpf->numerator = 1;
+ tpf->denominator = info->clock_speed;
+ if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
+ tpf->denominator /= (info->clkrc & CLK_SCALE);
+}
+
+static int ov7670_set_framerate_legacy(struct v4l2_subdev *sd,
+ struct v4l2_fract *tpf)
+{
+ struct ov7670_info *info = to_state(sd);
+ int div;
+ if (tpf->numerator == 0 || tpf->denominator == 0)
+ div = 1; /* Reset to full rate */
+ else
+ div = (tpf->numerator * info->clock_speed) / tpf->denominator;
+ if (div == 0)
+ div = 1;
+ else if (div > CLK_SCALE)
+ div = CLK_SCALE;
+ info->clkrc = (info->clkrc & 0x80) | div;
+ tpf->numerator = 1;
+ tpf->denominator = info->clock_speed / div;
+ return ov7670_write(sd, REG_CLKRC, info->clkrc);
+}
/*
* Store a set of start/stop values into the camera.
@@ -759,8 +927,11 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
struct ov7670_format_struct **ret_fmt,
struct ov7670_win_size **ret_wsize)
{
- int index;
+ int index, i;
struct ov7670_win_size *wsize;
+ struct ov7670_info *info = to_state(sd);
+ unsigned int n_win_sizes = info->devtype->n_win_sizes;
+ unsigned int win_sizes_limit = n_win_sizes;
for (index = 0; index < N_OV7670_FMTS; index++)
if (ov7670_formats[index].mbus_code == fmt->code)
@@ -776,15 +947,30 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
* Fields: the OV devices claim to be progressive.
*/
fmt->field = V4L2_FIELD_NONE;
+
+ /*
+ * Don't consider values that don't match min_height and min_width
+ * constraints.
+ */
+ if (info->min_width || info->min_height)
+ for (i = 0; i < n_win_sizes; i++) {
+ wsize = info->devtype->win_sizes + i;
+
+ if (wsize->width < info->min_width ||
+ wsize->height < info->min_height) {
+ win_sizes_limit = i;
+ break;
+ }
+ }
/*
* Round requested image size down to the nearest
* we support, but not below the smallest.
*/
- for (wsize = ov7670_win_sizes; wsize < ov7670_win_sizes + N_WIN_SIZES;
- wsize++)
+ for (wsize = info->devtype->win_sizes;
+ wsize < info->devtype->win_sizes + win_sizes_limit; wsize++)
if (fmt->width >= wsize->width && fmt->height >= wsize->height)
break;
- if (wsize >= ov7670_win_sizes + N_WIN_SIZES)
+ if (wsize >= info->devtype->win_sizes + win_sizes_limit)
wsize--; /* Take the smallest one */
if (ret_wsize != NULL)
*ret_wsize = wsize;
@@ -868,10 +1054,8 @@ static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
- cp->timeperframe.numerator = 1;
- cp->timeperframe.denominator = info->clock_speed;
- if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
- cp->timeperframe.denominator /= (info->clkrc & CLK_SCALE);
+ info->devtype->get_framerate(sd, &cp->timeperframe);
+
return 0;
}
@@ -880,25 +1064,13 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
struct v4l2_captureparm *cp = &parms->parm.capture;
struct v4l2_fract *tpf = &cp->timeperframe;
struct ov7670_info *info = to_state(sd);
- int div;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cp->extendedmode != 0)
return -EINVAL;
- if (tpf->numerator == 0 || tpf->denominator == 0)
- div = 1; /* Reset to full rate */
- else
- div = (tpf->numerator * info->clock_speed) / tpf->denominator;
- if (div == 0)
- div = 1;
- else if (div > CLK_SCALE)
- div = CLK_SCALE;
- info->clkrc = (info->clkrc & 0x80) | div;
- tpf->numerator = 1;
- tpf->denominator = info->clock_speed / div;
- return ov7670_write(sd, REG_CLKRC, info->clkrc);
+ return info->devtype->set_framerate(sd, tpf);
}
@@ -931,13 +1103,14 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
int i;
int num_valid = -1;
__u32 index = fsize->index;
+ unsigned int n_win_sizes = info->devtype->n_win_sizes;
/*
* If a minimum width/height was requested, filter out the capture
* windows that fall outside that.
*/
- for (i = 0; i < N_WIN_SIZES; i++) {
- struct ov7670_win_size *win = &ov7670_win_sizes[index];
+ for (i = 0; i < n_win_sizes; i++) {
+ struct ov7670_win_size *win = &info->devtype->win_sizes[index];
if (info->min_width && win->width < info->min_width)
continue;
if (info->min_height && win->height < info->min_height)
@@ -1042,23 +1215,23 @@ static int ov7670_cosine(int theta)
static void ov7670_calc_cmatrix(struct ov7670_info *info,
- int matrix[CMATRIX_LEN])
+ int matrix[CMATRIX_LEN], int sat, int hue)
{
int i;
/*
* Apply the current saturation setting first.
*/
for (i = 0; i < CMATRIX_LEN; i++)
- matrix[i] = (info->fmt->cmatrix[i]*info->sat) >> 7;
+ matrix[i] = (info->fmt->cmatrix[i] * sat) >> 7;
/*
* Then, if need be, rotate the hue value.
*/
- if (info->hue != 0) {
+ if (hue != 0) {
int sinth, costh, tmpmatrix[CMATRIX_LEN];
memcpy(tmpmatrix, matrix, CMATRIX_LEN*sizeof(int));
- sinth = ov7670_sine(info->hue);
- costh = ov7670_cosine(info->hue);
+ sinth = ov7670_sine(hue);
+ costh = ov7670_cosine(hue);
matrix[0] = (matrix[3]*sinth + matrix[0]*costh)/1000;
matrix[1] = (matrix[4]*sinth + matrix[1]*costh)/1000;
@@ -1071,60 +1244,21 @@ static void ov7670_calc_cmatrix(struct ov7670_info *info,
-static int ov7670_s_sat(struct v4l2_subdev *sd, int value)
-{
- struct ov7670_info *info = to_state(sd);
- int matrix[CMATRIX_LEN];
- int ret;
-
- info->sat = value;
- ov7670_calc_cmatrix(info, matrix);
- ret = ov7670_store_cmatrix(sd, matrix);
- return ret;
-}
-
-static int ov7670_g_sat(struct v4l2_subdev *sd, __s32 *value)
-{
- struct ov7670_info *info = to_state(sd);
-
- *value = info->sat;
- return 0;
-}
-
-static int ov7670_s_hue(struct v4l2_subdev *sd, int value)
+static int ov7670_s_sat_hue(struct v4l2_subdev *sd, int sat, int hue)
{
struct ov7670_info *info = to_state(sd);
int matrix[CMATRIX_LEN];
int ret;
- if (value < -180 || value > 180)
- return -EINVAL;
- info->hue = value;
- ov7670_calc_cmatrix(info, matrix);
+ ov7670_calc_cmatrix(info, matrix, sat, hue);
ret = ov7670_store_cmatrix(sd, matrix);
return ret;
}
-static int ov7670_g_hue(struct v4l2_subdev *sd, __s32 *value)
-{
- struct ov7670_info *info = to_state(sd);
-
- *value = info->hue;
- return 0;
-}
-
-
/*
* Some weird registers seem to store values in a sign/magnitude format!
*/
-static unsigned char ov7670_sm_to_abs(unsigned char v)
-{
- if ((v & 0x80) == 0)
- return v + 128;
- return 128 - (v & 0x7f);
-}
-
static unsigned char ov7670_abs_to_sm(unsigned char v)
{
@@ -1146,40 +1280,11 @@ static int ov7670_s_brightness(struct v4l2_subdev *sd, int value)
return ret;
}
-static int ov7670_g_brightness(struct v4l2_subdev *sd, __s32 *value)
-{
- unsigned char v = 0;
- int ret = ov7670_read(sd, REG_BRIGHT, &v);
-
- *value = ov7670_sm_to_abs(v);
- return ret;
-}
-
static int ov7670_s_contrast(struct v4l2_subdev *sd, int value)
{
return ov7670_write(sd, REG_CONTRAS, (unsigned char) value);
}
-static int ov7670_g_contrast(struct v4l2_subdev *sd, __s32 *value)
-{
- unsigned char v = 0;
- int ret = ov7670_read(sd, REG_CONTRAS, &v);
-
- *value = v;
- return ret;
-}
-
-static int ov7670_g_hflip(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char v = 0;
-
- ret = ov7670_read(sd, REG_MVFP, &v);
- *value = (v & MVFP_MIRROR) == MVFP_MIRROR;
- return ret;
-}
-
-
static int ov7670_s_hflip(struct v4l2_subdev *sd, int value)
{
unsigned char v = 0;
@@ -1195,19 +1300,6 @@ static int ov7670_s_hflip(struct v4l2_subdev *sd, int value)
return ret;
}
-
-
-static int ov7670_g_vflip(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char v = 0;
-
- ret = ov7670_read(sd, REG_MVFP, &v);
- *value = (v & MVFP_FLIP) == MVFP_FLIP;
- return ret;
-}
-
-
static int ov7670_s_vflip(struct v4l2_subdev *sd, int value)
{
unsigned char v = 0;
@@ -1256,16 +1348,6 @@ static int ov7670_s_gain(struct v4l2_subdev *sd, int value)
/*
* Tweak autogain.
*/
-static int ov7670_g_autogain(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char com8;
-
- ret = ov7670_read(sd, REG_COM8, &com8);
- *value = (com8 & COM8_AGC) != 0;
- return ret;
-}
-
static int ov7670_s_autogain(struct v4l2_subdev *sd, int value)
{
int ret;
@@ -1282,22 +1364,6 @@ static int ov7670_s_autogain(struct v4l2_subdev *sd, int value)
return ret;
}
-/*
- * Exposure is spread all over the place: top 6 bits in AECHH, middle
- * 8 in AECH, and two stashed in COM1 just for the hell of it.
- */
-static int ov7670_g_exp(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char com1, aech, aechh;
-
- ret = ov7670_read(sd, REG_COM1, &com1) +
- ov7670_read(sd, REG_AECH, &aech) +
- ov7670_read(sd, REG_AECHH, &aechh);
- *value = ((aechh & 0x3f) << 10) | (aech << 2) | (com1 & 0x03);
- return ret;
-}
-
static int ov7670_s_exp(struct v4l2_subdev *sd, int value)
{
int ret;
@@ -1324,20 +1390,6 @@ static int ov7670_s_exp(struct v4l2_subdev *sd, int value)
/*
* Tweak autoexposure.
*/
-static int ov7670_g_autoexp(struct v4l2_subdev *sd, __s32 *value)
-{
- int ret;
- unsigned char com8;
- enum v4l2_exposure_auto_type *atype = (enum v4l2_exposure_auto_type *) value;
-
- ret = ov7670_read(sd, REG_COM8, &com8);
- if (com8 & COM8_AEC)
- *atype = V4L2_EXPOSURE_AUTO;
- else
- *atype = V4L2_EXPOSURE_MANUAL;
- return ret;
-}
-
static int ov7670_s_autoexp(struct v4l2_subdev *sd,
enum v4l2_exposure_auto_type value)
{
@@ -1356,90 +1408,60 @@ static int ov7670_s_autoexp(struct v4l2_subdev *sd,
}
-
-static int ov7670_queryctrl(struct v4l2_subdev *sd,
- struct v4l2_queryctrl *qc)
+static int ov7670_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- /* Fill in min, max, step and default value for these controls. */
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
- case V4L2_CID_VFLIP:
- case V4L2_CID_HFLIP:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 256, 1, 128);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -180, 180, 5, 0);
- case V4L2_CID_GAIN:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_AUTOGAIN:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_EXPOSURE:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 1, 500);
- case V4L2_CID_EXPOSURE_AUTO:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- }
- return -EINVAL;
-}
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct ov7670_info *info = to_state(sd);
-static int ov7670_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- return ov7670_g_brightness(sd, &ctrl->value);
- case V4L2_CID_CONTRAST:
- return ov7670_g_contrast(sd, &ctrl->value);
- case V4L2_CID_SATURATION:
- return ov7670_g_sat(sd, &ctrl->value);
- case V4L2_CID_HUE:
- return ov7670_g_hue(sd, &ctrl->value);
- case V4L2_CID_VFLIP:
- return ov7670_g_vflip(sd, &ctrl->value);
- case V4L2_CID_HFLIP:
- return ov7670_g_hflip(sd, &ctrl->value);
- case V4L2_CID_GAIN:
- return ov7670_g_gain(sd, &ctrl->value);
case V4L2_CID_AUTOGAIN:
- return ov7670_g_autogain(sd, &ctrl->value);
- case V4L2_CID_EXPOSURE:
- return ov7670_g_exp(sd, &ctrl->value);
- case V4L2_CID_EXPOSURE_AUTO:
- return ov7670_g_autoexp(sd, &ctrl->value);
+ return ov7670_g_gain(sd, &info->gain->val);
}
return -EINVAL;
}
-static int ov7670_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int ov7670_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct ov7670_info *info = to_state(sd);
+
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- return ov7670_s_brightness(sd, ctrl->value);
+ return ov7670_s_brightness(sd, ctrl->val);
case V4L2_CID_CONTRAST:
- return ov7670_s_contrast(sd, ctrl->value);
+ return ov7670_s_contrast(sd, ctrl->val);
case V4L2_CID_SATURATION:
- return ov7670_s_sat(sd, ctrl->value);
- case V4L2_CID_HUE:
- return ov7670_s_hue(sd, ctrl->value);
+ return ov7670_s_sat_hue(sd,
+ info->saturation->val, info->hue->val);
case V4L2_CID_VFLIP:
- return ov7670_s_vflip(sd, ctrl->value);
+ return ov7670_s_vflip(sd, ctrl->val);
case V4L2_CID_HFLIP:
- return ov7670_s_hflip(sd, ctrl->value);
- case V4L2_CID_GAIN:
- return ov7670_s_gain(sd, ctrl->value);
+ return ov7670_s_hflip(sd, ctrl->val);
case V4L2_CID_AUTOGAIN:
- return ov7670_s_autogain(sd, ctrl->value);
- case V4L2_CID_EXPOSURE:
- return ov7670_s_exp(sd, ctrl->value);
+ /* Only set manual gain if auto gain is not explicitly
+ turned on. */
+ if (!ctrl->val) {
+ /* ov7670_s_gain turns off auto gain */
+ return ov7670_s_gain(sd, info->gain->val);
+ }
+ return ov7670_s_autogain(sd, ctrl->val);
case V4L2_CID_EXPOSURE_AUTO:
- return ov7670_s_autoexp(sd,
- (enum v4l2_exposure_auto_type) ctrl->value);
+ /* Only set manual exposure if auto exposure is not explicitly
+ turned on. */
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL) {
+ /* ov7670_s_exp turns off auto exposure */
+ return ov7670_s_exp(sd, info->exposure->val);
+ }
+ return ov7670_s_autoexp(sd, ctrl->val);
}
return -EINVAL;
}
+static const struct v4l2_ctrl_ops ov7670_ctrl_ops = {
+ .s_ctrl = ov7670_s_ctrl,
+ .g_volatile_ctrl = ov7670_g_volatile_ctrl,
+};
+
static int ov7670_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
@@ -1482,9 +1504,6 @@ static int ov7670_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *r
static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.g_chip_ident = ov7670_g_chip_ident,
- .g_ctrl = ov7670_g_ctrl,
- .s_ctrl = ov7670_s_ctrl,
- .queryctrl = ov7670_queryctrl,
.reset = ov7670_reset,
.init = ov7670_init,
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1510,9 +1529,25 @@ static const struct v4l2_subdev_ops ov7670_ops = {
/* ----------------------------------------------------------------------- */
+static const struct ov7670_devtype ov7670_devdata[] = {
+ [MODEL_OV7670] = {
+ .win_sizes = ov7670_win_sizes,
+ .n_win_sizes = ARRAY_SIZE(ov7670_win_sizes),
+ .set_framerate = ov7670_set_framerate_legacy,
+ .get_framerate = ov7670_get_framerate_legacy,
+ },
+ [MODEL_OV7675] = {
+ .win_sizes = ov7675_win_sizes,
+ .n_win_sizes = ARRAY_SIZE(ov7675_win_sizes),
+ .set_framerate = ov7675_set_framerate,
+ .get_framerate = ov7675_get_framerate,
+ },
+};
+
static int ov7670_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct v4l2_fract tpf;
struct v4l2_subdev *sd;
struct ov7670_info *info;
int ret;
@@ -1537,6 +1572,16 @@ static int ov7670_probe(struct i2c_client *client,
if (config->clock_speed)
info->clock_speed = config->clock_speed;
+
+ /*
+ * It should be allowed for ov7670 too when it is migrated to
+ * the new frame rate formula.
+ */
+ if (config->pll_bypass && id->driver_data != MODEL_OV7670)
+ info->pll_bypass = true;
+
+ if (config->pclk_hb_disable)
+ info->pclk_hb_disable = true;
}
/* Make sure it's an ov7670 */
@@ -1551,9 +1596,58 @@ static int ov7670_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
+ info->devtype = &ov7670_devdata[id->driver_data];
info->fmt = &ov7670_formats[0];
- info->sat = 128; /* Review this */
- info->clkrc = info->clock_speed / 30;
+ info->clkrc = 0;
+
+ /* Set default frame rate to 30 fps */
+ tpf.numerator = 1;
+ tpf.denominator = 30;
+ info->devtype->set_framerate(sd, &tpf);
+
+ if (info->pclk_hb_disable)
+ ov7670_write(sd, REG_COM10, COM10_PCLK_HB);
+
+ v4l2_ctrl_handler_init(&info->hdl, 10);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ info->saturation = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 256, 1, 128);
+ info->hue = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_HUE, -180, 180, 5, 0);
+ info->gain = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 128);
+ info->auto_gain = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ info->exposure = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 65535, 1, 500);
+ info->auto_exposure = v4l2_ctrl_new_std_menu(&info->hdl, &ov7670_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+ sd->ctrl_handler = &info->hdl;
+ if (info->hdl.error) {
+ int err = info->hdl.error;
+
+ v4l2_ctrl_handler_free(&info->hdl);
+ kfree(info);
+ return err;
+ }
+ /*
+ * We have checked empirically that hw allows to read back the gain
+ * value chosen by auto gain but that's not the case for auto exposure.
+ */
+ v4l2_ctrl_auto_cluster(2, &info->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &info->auto_exposure,
+ V4L2_EXPOSURE_MANUAL, false);
+ v4l2_ctrl_cluster(2, &info->saturation);
+ v4l2_ctrl_handler_setup(&info->hdl);
+
return 0;
}
@@ -1561,14 +1655,17 @@ static int ov7670_probe(struct i2c_client *client,
static int ov7670_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov7670_info *info = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
+ v4l2_ctrl_handler_free(&info->hdl);
+ kfree(info);
return 0;
}
static const struct i2c_device_id ov7670_id[] = {
- { "ov7670", 0 },
+ { "ov7670", MODEL_OV7670 },
+ { "ov7675", MODEL_OV7675 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov7670_id);
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
new file mode 100644
index 000000000000..1dbb8118a285
--- /dev/null
+++ b/drivers/media/i2c/ov9650.c
@@ -0,0 +1,1562 @@
+/*
+ * Omnivision OV9650/OV9652 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2013, Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ *
+ * Register definitions and initial settings based on a driver written
+ * by Vladimir Fonov.
+ * Copyright (c) 2010, Vladimir Fonov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/ov9650.h>
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+#define DRIVER_NAME "OV9650"
+
+/*
+ * OV9650/OV9652 register definitions
+ */
+#define REG_GAIN 0x00 /* Gain control, AGC[7:0] */
+#define REG_BLUE 0x01 /* AWB - Blue chanel gain */
+#define REG_RED 0x02 /* AWB - Red chanel gain */
+#define REG_VREF 0x03 /* [7:6] - AGC[9:8], [5:3]/[2:0] */
+#define VREF_GAIN_MASK 0xc0 /* - VREF end/start low 3 bits */
+#define REG_COM1 0x04
+#define COM1_CCIR656 0x40
+#define REG_B_AVE 0x05
+#define REG_GB_AVE 0x06
+#define REG_GR_AVE 0x07
+#define REG_R_AVE 0x08
+#define REG_COM2 0x09
+#define REG_PID 0x0a /* Product ID MSB */
+#define REG_VER 0x0b /* Product ID LSB */
+#define REG_COM3 0x0c
+#define COM3_SWAP 0x40
+#define COM3_VARIOPIXEL1 0x04
+#define REG_COM4 0x0d /* Vario Pixels */
+#define COM4_VARIOPIXEL2 0x80
+#define REG_COM5 0x0e /* System clock options */
+#define COM5_SLAVE_MODE 0x10
+#define COM5_SYSTEMCLOCK48MHZ 0x80
+#define REG_COM6 0x0f /* HREF & ADBLC options */
+#define REG_AECH 0x10 /* Exposure value, AEC[9:2] */
+#define REG_CLKRC 0x11 /* Clock control */
+#define CLK_EXT 0x40 /* Use external clock directly */
+#define CLK_SCALE 0x3f /* Mask for internal clock scale */
+#define REG_COM7 0x12 /* SCCB reset, output format */
+#define COM7_RESET 0x80
+#define COM7_FMT_MASK 0x38
+#define COM7_FMT_VGA 0x40
+#define COM7_FMT_CIF 0x20
+#define COM7_FMT_QVGA 0x10
+#define COM7_FMT_QCIF 0x08
+#define COM7_RGB 0x04
+#define COM7_YUV 0x00
+#define COM7_BAYER 0x01
+#define COM7_PBAYER 0x05
+#define REG_COM8 0x13 /* AGC/AEC options */
+#define COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */
+#define COM8_AECSTEP 0x40 /* Unlimited AEC step size */
+#define COM8_BFILT 0x20 /* Band filter enable */
+#define COM8_AGC 0x04 /* Auto gain enable */
+#define COM8_AWB 0x02 /* White balance enable */
+#define COM8_AEC 0x01 /* Auto exposure enable */
+#define REG_COM9 0x14 /* Gain ceiling */
+#define COM9_GAIN_CEIL_MASK 0x70 /* */
+#define REG_COM10 0x15 /* PCLK, HREF, HSYNC signals polarity */
+#define COM10_HSYNC 0x40 /* HSYNC instead of HREF */
+#define COM10_PCLK_HB 0x20 /* Suppress PCLK on horiz blank */
+#define COM10_HREF_REV 0x08 /* Reverse HREF */
+#define COM10_VS_LEAD 0x04 /* VSYNC on clock leading edge */
+#define COM10_VS_NEG 0x02 /* VSYNC negative */
+#define COM10_HS_NEG 0x01 /* HSYNC negative */
+#define REG_HSTART 0x17 /* Horiz start high bits */
+#define REG_HSTOP 0x18 /* Horiz stop high bits */
+#define REG_VSTART 0x19 /* Vert start high bits */
+#define REG_VSTOP 0x1a /* Vert stop high bits */
+#define REG_PSHFT 0x1b /* Pixel delay after HREF */
+#define REG_MIDH 0x1c /* Manufacturer ID MSB */
+#define REG_MIDL 0x1d /* Manufufacturer ID LSB */
+#define REG_MVFP 0x1e /* Image mirror/flip */
+#define MVFP_MIRROR 0x20 /* Mirror image */
+#define MVFP_FLIP 0x10 /* Vertical flip */
+#define REG_BOS 0x20 /* B channel Offset */
+#define REG_GBOS 0x21 /* Gb channel Offset */
+#define REG_GROS 0x22 /* Gr channel Offset */
+#define REG_ROS 0x23 /* R channel Offset */
+#define REG_AEW 0x24 /* AGC upper limit */
+#define REG_AEB 0x25 /* AGC lower limit */
+#define REG_VPT 0x26 /* AGC/AEC fast mode op region */
+#define REG_BBIAS 0x27 /* B channel output bias */
+#define REG_GBBIAS 0x28 /* Gb channel output bias */
+#define REG_GRCOM 0x29 /* Analog BLC & regulator */
+#define REG_EXHCH 0x2a /* Dummy pixel insert MSB */
+#define REG_EXHCL 0x2b /* Dummy pixel insert LSB */
+#define REG_RBIAS 0x2c /* R channel output bias */
+#define REG_ADVFL 0x2d /* LSB of dummy line insert */
+#define REG_ADVFH 0x2e /* MSB of dummy line insert */
+#define REG_YAVE 0x2f /* Y/G channel average value */
+#define REG_HSYST 0x30 /* HSYNC rising edge delay LSB*/
+#define REG_HSYEN 0x31 /* HSYNC falling edge delay LSB*/
+#define REG_HREF 0x32 /* HREF pieces */
+#define REG_CHLF 0x33 /* reserved */
+#define REG_ADC 0x37 /* reserved */
+#define REG_ACOM 0x38 /* reserved */
+#define REG_OFON 0x39 /* Power down register */
+#define OFON_PWRDN 0x08 /* Power down bit */
+#define REG_TSLB 0x3a /* YUVU format */
+#define TSLB_YUYV_MASK 0x0c /* UYVY or VYUY - see com13 */
+#define REG_COM11 0x3b /* Night mode, banding filter enable */
+#define COM11_NIGHT 0x80 /* Night mode enable */
+#define COM11_NMFR 0x60 /* Two bit NM frame rate */
+#define COM11_BANDING 0x01 /* Banding filter */
+#define COM11_AEC_REF_MASK 0x18 /* AEC reference area selection */
+#define REG_COM12 0x3c /* HREF option, UV average */
+#define COM12_HREF 0x80 /* HREF always */
+#define REG_COM13 0x3d /* Gamma selection, Color matrix en. */
+#define COM13_GAMMA 0x80 /* Gamma enable */
+#define COM13_UVSAT 0x40 /* UV saturation auto adjustment */
+#define COM13_UVSWAP 0x01 /* V before U - w/TSLB */
+#define REG_COM14 0x3e /* Edge enhancement options */
+#define COM14_EDGE_EN 0x02
+#define COM14_EEF_X2 0x01
+#define REG_EDGE 0x3f /* Edge enhancement factor */
+#define EDGE_FACTOR_MASK 0x0f
+#define REG_COM15 0x40 /* Output range, RGB 555/565 */
+#define COM15_R10F0 0x00 /* Data range 10 to F0 */
+#define COM15_R01FE 0x80 /* 01 to FE */
+#define COM15_R00FF 0xc0 /* 00 to FF */
+#define COM15_RGB565 0x10 /* RGB565 output */
+#define COM15_RGB555 0x30 /* RGB555 output */
+#define COM15_SWAPRB 0x04 /* Swap R&B */
+#define REG_COM16 0x41 /* Color matrix coeff options */
+#define REG_COM17 0x42 /* Single frame out, banding filter */
+/* n = 1...9, 0x4f..0x57 */
+#define REG_MTX(__n) (0x4f + (__n) - 1)
+#define REG_MTXS 0x58
+/* Lens Correction Option 1...5, __n = 0...5 */
+#define REG_LCC(__n) (0x62 + (__n) - 1)
+#define LCC5_LCC_ENABLE 0x01 /* LCC5, enable lens correction */
+#define LCC5_LCC_COLOR 0x04
+#define REG_MANU 0x67 /* Manual U value */
+#define REG_MANV 0x68 /* Manual V value */
+#define REG_HV 0x69 /* Manual banding filter MSB */
+#define REG_MBD 0x6a /* Manual banding filter value */
+#define REG_DBLV 0x6b /* reserved */
+#define REG_GSP 0x6c /* Gamma curve */
+#define GSP_LEN 15
+#define REG_GST 0x7c /* Gamma curve */
+#define GST_LEN 15
+#define REG_COM21 0x8b
+#define REG_COM22 0x8c /* Edge enhancement, denoising */
+#define COM22_WHTPCOR 0x02 /* White pixel correction enable */
+#define COM22_WHTPCOROPT 0x01 /* White pixel correction option */
+#define COM22_DENOISE 0x10 /* White pixel correction option */
+#define REG_COM23 0x8d /* Color bar test, color gain */
+#define COM23_TEST_MODE 0x10
+#define REG_DBLC1 0x8f /* Digital BLC */
+#define REG_DBLC_B 0x90 /* Digital BLC B channel offset */
+#define REG_DBLC_R 0x91 /* Digital BLC R channel offset */
+#define REG_DM_LNL 0x92 /* Dummy line low 8 bits */
+#define REG_DM_LNH 0x93 /* Dummy line high 8 bits */
+#define REG_LCCFB 0x9d /* Lens Correction B channel */
+#define REG_LCCFR 0x9e /* Lens Correction R channel */
+#define REG_DBLC_GB 0x9f /* Digital BLC GB chan offset */
+#define REG_DBLC_GR 0xa0 /* Digital BLC GR chan offset */
+#define REG_AECHM 0xa1 /* Exposure value - bits AEC[15:10] */
+#define REG_BD50ST 0xa2 /* Banding filter value for 50Hz */
+#define REG_BD60ST 0xa3 /* Banding filter value for 60Hz */
+#define REG_NULL 0xff /* Array end token */
+
+#define DEF_CLKRC 0x80
+
+#define OV965X_ID(_msb, _lsb) ((_msb) << 8 | (_lsb))
+#define OV9650_ID 0x9650
+#define OV9652_ID 0x9652
+
+struct ov965x_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ struct v4l2_ctrl *auto_wb;
+ struct v4l2_ctrl *blue_balance;
+ struct v4l2_ctrl *red_balance;
+ };
+ struct {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct {
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *light_freq;
+ u8 update;
+};
+
+struct ov965x_framesize {
+ u16 width;
+ u16 height;
+ u16 max_exp_lines;
+ const u8 *regs;
+};
+
+struct ov965x_interval {
+ struct v4l2_fract interval;
+ /* Maximum resolution for this interval */
+ struct v4l2_frmsize_discrete size;
+ u8 clkrc_div;
+};
+
+enum gpio_id {
+ GPIO_PWDN,
+ GPIO_RST,
+ NUM_GPIOS,
+};
+
+struct ov965x {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ enum v4l2_mbus_type bus_type;
+ int gpios[NUM_GPIOS];
+ /* External master clock frequency */
+ unsigned long mclk_frequency;
+
+ /* Protects the struct fields below */
+ struct mutex lock;
+
+ struct i2c_client *client;
+
+ /* Exposure row interval in us */
+ unsigned int exp_row_interval;
+
+ unsigned short id;
+ const struct ov965x_framesize *frame_size;
+ /* YUYV sequence (pixel format) control register */
+ u8 tslb_reg;
+ struct v4l2_mbus_framefmt format;
+
+ struct ov965x_ctrls ctrls;
+ /* Pointer to frame rate control data structure */
+ const struct ov965x_interval *fiv;
+
+ int streaming;
+ int power;
+
+ u8 apply_frame_fmt;
+};
+
+struct i2c_rv {
+ u8 addr;
+ u8 value;
+};
+
+static const struct i2c_rv ov965x_init_regs[] = {
+ { REG_COM2, 0x10 }, /* Set soft sleep mode */
+ { REG_COM5, 0x00 }, /* System clock options */
+ { REG_COM2, 0x01 }, /* Output drive, soft sleep mode */
+ { REG_COM10, 0x00 }, /* Slave mode, HREF vs HSYNC, signals negate */
+ { REG_EDGE, 0xa6 }, /* Edge enhancement treshhold and factor */
+ { REG_COM16, 0x02 }, /* Color matrix coeff double option */
+ { REG_COM17, 0x08 }, /* Single frame out, banding filter */
+ { 0x16, 0x06 },
+ { REG_CHLF, 0xc0 }, /* Reserved */
+ { 0x34, 0xbf },
+ { 0xa8, 0x80 },
+ { 0x96, 0x04 },
+ { 0x8e, 0x00 },
+ { REG_COM12, 0x77 }, /* HREF option, UV average */
+ { 0x8b, 0x06 },
+ { 0x35, 0x91 },
+ { 0x94, 0x88 },
+ { 0x95, 0x88 },
+ { REG_COM15, 0xc1 }, /* Output range, RGB 555/565 */
+ { REG_GRCOM, 0x2f }, /* Analog BLC & regulator */
+ { REG_COM6, 0x43 }, /* HREF & ADBLC options */
+ { REG_COM8, 0xe5 }, /* AGC/AEC options */
+ { REG_COM13, 0x90 }, /* Gamma selection, colour matrix, UV delay */
+ { REG_HV, 0x80 }, /* Manual banding filter MSB */
+ { 0x5c, 0x96 }, /* Reserved up to 0xa5 */
+ { 0x5d, 0x96 },
+ { 0x5e, 0x10 },
+ { 0x59, 0xeb },
+ { 0x5a, 0x9c },
+ { 0x5b, 0x55 },
+ { 0x43, 0xf0 },
+ { 0x44, 0x10 },
+ { 0x45, 0x55 },
+ { 0x46, 0x86 },
+ { 0x47, 0x64 },
+ { 0x48, 0x86 },
+ { 0x5f, 0xe0 },
+ { 0x60, 0x8c },
+ { 0x61, 0x20 },
+ { 0xa5, 0xd9 },
+ { 0xa4, 0x74 }, /* reserved */
+ { REG_COM23, 0x02 }, /* Color gain analog/_digital_ */
+ { REG_COM8, 0xe7 }, /* Enable AEC, AWB, AEC */
+ { REG_COM22, 0x23 }, /* Edge enhancement, denoising */
+ { 0xa9, 0xb8 },
+ { 0xaa, 0x92 },
+ { 0xab, 0x0a },
+ { REG_DBLC1, 0xdf }, /* Digital BLC */
+ { REG_DBLC_B, 0x00 }, /* Digital BLC B chan offset */
+ { REG_DBLC_R, 0x00 }, /* Digital BLC R chan offset */
+ { REG_DBLC_GB, 0x00 }, /* Digital BLC GB chan offset */
+ { REG_DBLC_GR, 0x00 },
+ { REG_COM9, 0x3a }, /* Gain ceiling 16x */
+ { REG_NULL, 0 }
+};
+
+#define NUM_FMT_REGS 14
+/*
+ * COM7, COM3, COM4, HSTART, HSTOP, HREF, VSTART, VSTOP, VREF,
+ * EXHCH, EXHCL, ADC, OCOM, OFON
+ */
+static const u8 frame_size_reg_addr[NUM_FMT_REGS] = {
+ 0x12, 0x0c, 0x0d, 0x17, 0x18, 0x32, 0x19, 0x1a, 0x03,
+ 0x2a, 0x2b, 0x37, 0x38, 0x39,
+};
+
+static const u8 ov965x_sxga_regs[NUM_FMT_REGS] = {
+ 0x00, 0x00, 0x00, 0x1e, 0xbe, 0xbf, 0x01, 0x81, 0x12,
+ 0x10, 0x34, 0x81, 0x93, 0x51,
+};
+
+static const u8 ov965x_vga_regs[NUM_FMT_REGS] = {
+ 0x40, 0x04, 0x80, 0x26, 0xc6, 0xed, 0x01, 0x3d, 0x00,
+ 0x10, 0x40, 0x91, 0x12, 0x43,
+};
+
+/* Determined empirically. */
+static const u8 ov965x_qvga_regs[NUM_FMT_REGS] = {
+ 0x10, 0x04, 0x80, 0x25, 0xc5, 0xbf, 0x00, 0x80, 0x12,
+ 0x10, 0x40, 0x91, 0x12, 0x43,
+};
+
+static const struct ov965x_framesize ov965x_framesizes[] = {
+ {
+ .width = SXGA_WIDTH,
+ .height = SXGA_HEIGHT,
+ .regs = ov965x_sxga_regs,
+ .max_exp_lines = 1048,
+ }, {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .regs = ov965x_vga_regs,
+ .max_exp_lines = 498,
+ }, {
+ .width = QVGA_WIDTH,
+ .height = QVGA_HEIGHT,
+ .regs = ov965x_qvga_regs,
+ .max_exp_lines = 248,
+ },
+};
+
+struct ov965x_pixfmt {
+ enum v4l2_mbus_pixelcode code;
+ u32 colorspace;
+ /* REG_TSLB value, only bits [3:2] may be set. */
+ u8 tslb_reg;
+};
+
+static const struct ov965x_pixfmt ov965x_formats[] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 0x00},
+ { V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG, 0x04},
+ { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG, 0x0c},
+ { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 0x08},
+};
+
+/*
+ * This table specifies possible frame resolution and interval
+ * combinations. Default CLKRC[5:0] divider values are valid
+ * only for 24 MHz external clock frequency.
+ */
+static struct ov965x_interval ov965x_intervals[] = {
+ {{ 100, 625 }, { SXGA_WIDTH, SXGA_HEIGHT }, 0 }, /* 6.25 fps */
+ {{ 10, 125 }, { VGA_WIDTH, VGA_HEIGHT }, 1 }, /* 12.5 fps */
+ {{ 10, 125 }, { QVGA_WIDTH, QVGA_HEIGHT }, 3 }, /* 12.5 fps */
+ {{ 1, 25 }, { VGA_WIDTH, VGA_HEIGHT }, 0 }, /* 25 fps */
+ {{ 1, 25 }, { QVGA_WIDTH, QVGA_HEIGHT }, 1 }, /* 25 fps */
+};
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ov965x, ctrls.handler)->sd;
+}
+
+static inline struct ov965x *to_ov965x(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov965x, sd);
+}
+
+static int ov965x_read(struct i2c_client *client, u8 addr, u8 *val)
+{
+ u8 buf = addr;
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1) {
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret == 1)
+ *val = buf;
+ }
+
+ v4l2_dbg(2, debug, client, "%s: 0x%02x @ 0x%02x. (%d)\n",
+ __func__, *val, addr, ret);
+
+ return ret == 1 ? 0 : ret;
+}
+
+static int ov965x_write(struct i2c_client *client, u8 addr, u8 val)
+{
+ u8 buf[2] = { addr, val };
+
+ int ret = i2c_master_send(client, buf, 2);
+
+ v4l2_dbg(2, debug, client, "%s: 0x%02x @ 0x%02X (%d)\n",
+ __func__, val, addr, ret);
+
+ return ret == 2 ? 0 : ret;
+}
+
+static int ov965x_write_array(struct i2c_client *client,
+ const struct i2c_rv *regs)
+{
+ int i, ret = 0;
+
+ for (i = 0; ret == 0 && regs[i].addr != REG_NULL; i++)
+ ret = ov965x_write(client, regs[i].addr, regs[i].value);
+
+ return ret;
+}
+
+static int ov965x_set_default_gamma_curve(struct ov965x *ov965x)
+{
+ static const u8 gamma_curve[] = {
+ /* Values taken from OV application note. */
+ 0x40, 0x30, 0x4b, 0x60, 0x70, 0x70, 0x70, 0x70,
+ 0x60, 0x60, 0x50, 0x48, 0x3a, 0x2e, 0x28, 0x22,
+ 0x04, 0x07, 0x10, 0x28, 0x36, 0x44, 0x52, 0x60,
+ 0x6c, 0x78, 0x8c, 0x9e, 0xbb, 0xd2, 0xe6
+ };
+ u8 addr = REG_GSP;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gamma_curve); i++) {
+ int ret = ov965x_write(ov965x->client, addr, gamma_curve[i]);
+ if (ret < 0)
+ return ret;
+ addr++;
+ }
+
+ return 0;
+};
+
+static int ov965x_set_color_matrix(struct ov965x *ov965x)
+{
+ static const u8 mtx[] = {
+ /* MTX1..MTX9, MTXS */
+ 0x3a, 0x3d, 0x03, 0x12, 0x26, 0x38, 0x40, 0x40, 0x40, 0x0d
+ };
+ u8 addr = REG_MTX(1);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtx); i++) {
+ int ret = ov965x_write(ov965x->client, addr, mtx[i]);
+ if (ret < 0)
+ return ret;
+ addr++;
+ }
+
+ return 0;
+}
+
+static void ov965x_gpio_set(int gpio, int val)
+{
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, val);
+}
+
+static void __ov965x_set_power(struct ov965x *ov965x, int on)
+{
+ if (on) {
+ ov965x_gpio_set(ov965x->gpios[GPIO_PWDN], 0);
+ ov965x_gpio_set(ov965x->gpios[GPIO_RST], 0);
+ usleep_range(25000, 26000);
+ } else {
+ ov965x_gpio_set(ov965x->gpios[GPIO_RST], 1);
+ ov965x_gpio_set(ov965x->gpios[GPIO_PWDN], 1);
+ }
+
+ ov965x->streaming = 0;
+}
+
+static int ov965x_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+ struct i2c_client *client = ov965x->client;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, client, "%s: on: %d\n", __func__, on);
+
+ mutex_lock(&ov965x->lock);
+ if (ov965x->power == !on) {
+ __ov965x_set_power(ov965x, on);
+ if (on) {
+ ret = ov965x_write_array(client,
+ ov965x_init_regs);
+ ov965x->apply_frame_fmt = 1;
+ ov965x->ctrls.update = 1;
+ }
+ }
+ if (!ret)
+ ov965x->power += on ? 1 : -1;
+
+ WARN_ON(ov965x->power < 0);
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+/*
+ * V4L2 controls
+ */
+
+static void ov965x_update_exposure_ctrl(struct ov965x *ov965x)
+{
+ struct v4l2_ctrl *ctrl = ov965x->ctrls.exposure;
+ unsigned long fint, trow;
+ int min, max, def;
+ u8 clkrc;
+
+ mutex_lock(&ov965x->lock);
+ if (WARN_ON(!ctrl || !ov965x->frame_size)) {
+ mutex_unlock(&ov965x->lock);
+ return;
+ }
+ clkrc = DEF_CLKRC + ov965x->fiv->clkrc_div;
+ /* Calculate internal clock frequency */
+ fint = ov965x->mclk_frequency * ((clkrc >> 7) + 1) /
+ ((2 * ((clkrc & 0x3f) + 1)));
+ /* and the row interval (in us). */
+ trow = (2 * 1520 * 1000000UL) / fint;
+ max = ov965x->frame_size->max_exp_lines * trow;
+ ov965x->exp_row_interval = trow;
+ mutex_unlock(&ov965x->lock);
+
+ v4l2_dbg(1, debug, &ov965x->sd, "clkrc: %#x, fi: %lu, tr: %lu, %d\n",
+ clkrc, fint, trow, max);
+
+ /* Update exposure time range to match current frame format. */
+ min = (trow + 100) / 100;
+ max = (max - 100) / 100;
+ def = min + (max - min) / 2;
+
+ if (v4l2_ctrl_modify_range(ctrl, min, max, 1, def))
+ v4l2_err(&ov965x->sd, "Exposure ctrl range update failed\n");
+}
+
+static int ov965x_set_banding_filter(struct ov965x *ov965x, int value)
+{
+ unsigned long mbd, light_freq;
+ int ret;
+ u8 reg;
+
+ ret = ov965x_read(ov965x->client, REG_COM8, &reg);
+ if (!ret) {
+ if (value == V4L2_CID_POWER_LINE_FREQUENCY_DISABLED)
+ reg &= ~COM8_BFILT;
+ else
+ reg |= COM8_BFILT;
+ ret = ov965x_write(ov965x->client, REG_COM8, reg);
+ }
+ if (value == V4L2_CID_POWER_LINE_FREQUENCY_DISABLED)
+ return 0;
+ if (WARN_ON(ov965x->fiv == NULL))
+ return -EINVAL;
+ /* Set minimal exposure time for 50/60 HZ lighting */
+ if (value == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
+ light_freq = 50;
+ else
+ light_freq = 60;
+ mbd = (1000UL * ov965x->fiv->interval.denominator *
+ ov965x->frame_size->max_exp_lines) /
+ ov965x->fiv->interval.numerator;
+ mbd = ((mbd / (light_freq * 2)) + 500) / 1000UL;
+
+ return ov965x_write(ov965x->client, REG_MBD, mbd);
+}
+
+static int ov965x_set_white_balance(struct ov965x *ov965x, int awb)
+{
+ int ret;
+ u8 reg;
+
+ ret = ov965x_read(ov965x->client, REG_COM8, &reg);
+ if (!ret) {
+ reg = awb ? reg | REG_COM8 : reg & ~REG_COM8;
+ ret = ov965x_write(ov965x->client, REG_COM8, reg);
+ }
+ if (!ret && !awb) {
+ ret = ov965x_write(ov965x->client, REG_BLUE,
+ ov965x->ctrls.blue_balance->val);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_write(ov965x->client, REG_RED,
+ ov965x->ctrls.red_balance->val);
+ }
+ return ret;
+}
+
+#define NUM_BR_LEVELS 7
+#define NUM_BR_REGS 3
+
+static int ov965x_set_brightness(struct ov965x *ov965x, int val)
+{
+ static const u8 regs[NUM_BR_LEVELS + 1][NUM_BR_REGS] = {
+ { REG_AEW, REG_AEB, REG_VPT },
+ { 0x1c, 0x12, 0x50 }, /* -3 */
+ { 0x3d, 0x30, 0x71 }, /* -2 */
+ { 0x50, 0x44, 0x92 }, /* -1 */
+ { 0x70, 0x64, 0xc3 }, /* 0 */
+ { 0x90, 0x84, 0xd4 }, /* +1 */
+ { 0xc4, 0xbf, 0xf9 }, /* +2 */
+ { 0xd8, 0xd0, 0xfa }, /* +3 */
+ };
+ int i, ret = 0;
+
+ val += (NUM_BR_LEVELS / 2 + 1);
+ if (val > NUM_BR_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_BR_REGS && !ret; i++)
+ ret = ov965x_write(ov965x->client, regs[0][i],
+ regs[val][i]);
+ return ret;
+}
+
+static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
+{
+ struct i2c_client *client = ov965x->client;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ int ret = 0;
+ u8 reg;
+ /*
+ * For manual mode we need to disable AGC first, so
+ * gain value in REG_VREF, REG_GAIN is not overwritten.
+ */
+ if (ctrls->auto_gain->is_new) {
+ ret = ov965x_read(client, REG_COM8, &reg);
+ if (ret < 0)
+ return ret;
+ if (ctrls->auto_gain->val)
+ reg |= COM8_AGC;
+ else
+ reg &= ~COM8_AGC;
+ ret = ov965x_write(client, REG_COM8, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls->gain->is_new && !auto_gain) {
+ unsigned int gain = ctrls->gain->val;
+ unsigned int rgain;
+ int m;
+ /*
+ * Convert gain control value to the sensor's gain
+ * registers (VREF[7:6], GAIN[7:0]) format.
+ */
+ for (m = 6; m >= 0; m--)
+ if (gain >= (1 << m) * 16)
+ break;
+ rgain = (gain - ((1 << m) * 16)) / (1 << m);
+ rgain |= (((1 << m) - 1) << 4);
+
+ ret = ov965x_write(client, REG_GAIN, rgain & 0xff);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(client, REG_VREF, &reg);
+ if (ret < 0)
+ return ret;
+ reg &= ~VREF_GAIN_MASK;
+ reg |= (((rgain >> 8) & 0x3) << 6);
+ ret = ov965x_write(client, REG_VREF, reg);
+ if (ret < 0)
+ return ret;
+ /* Return updated control's value to userspace */
+ ctrls->gain->val = (1 << m) * (16 + (rgain & 0xf));
+ }
+
+ return ret;
+}
+
+static int ov965x_set_sharpness(struct ov965x *ov965x, unsigned int value)
+{
+ u8 com14, edge;
+ int ret;
+
+ ret = ov965x_read(ov965x->client, REG_COM14, &com14);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(ov965x->client, REG_EDGE, &edge);
+ if (ret < 0)
+ return ret;
+ com14 = value ? com14 | COM14_EDGE_EN : com14 & ~COM14_EDGE_EN;
+ value--;
+ if (value > 0x0f) {
+ com14 |= COM14_EEF_X2;
+ value >>= 1;
+ } else {
+ com14 &= ~COM14_EEF_X2;
+ }
+ ret = ov965x_write(ov965x->client, REG_COM14, com14);
+ if (ret < 0)
+ return ret;
+
+ edge &= ~EDGE_FACTOR_MASK;
+ edge |= ((u8)value & 0x0f);
+
+ return ov965x_write(ov965x->client, REG_EDGE, edge);
+}
+
+static int ov965x_set_exposure(struct ov965x *ov965x, int exp)
+{
+ struct i2c_client *client = ov965x->client;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ bool auto_exposure = (exp == V4L2_EXPOSURE_AUTO);
+ int ret;
+ u8 reg;
+
+ if (ctrls->auto_exp->is_new) {
+ ret = ov965x_read(client, REG_COM8, &reg);
+ if (ret < 0)
+ return ret;
+ if (auto_exposure)
+ reg |= (COM8_AEC | COM8_AGC);
+ else
+ reg &= ~(COM8_AEC | COM8_AGC);
+ ret = ov965x_write(client, REG_COM8, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!auto_exposure && ctrls->exposure->is_new) {
+ unsigned int exposure = (ctrls->exposure->val * 100)
+ / ov965x->exp_row_interval;
+ /*
+ * Manual exposure value
+ * [b15:b0] - AECHM (b15:b10), AECH (b9:b2), COM1 (b1:b0)
+ */
+ ret = ov965x_write(client, REG_COM1, exposure & 0x3);
+ if (!ret)
+ ret = ov965x_write(client, REG_AECH,
+ (exposure >> 2) & 0xff);
+ if (!ret)
+ ret = ov965x_write(client, REG_AECHM,
+ (exposure >> 10) & 0x3f);
+ /* Update the value to minimize rounding errors */
+ ctrls->exposure->val = ((exposure * ov965x->exp_row_interval)
+ + 50) / 100;
+ if (ret < 0)
+ return ret;
+ }
+
+ v4l2_ctrl_activate(ov965x->ctrls.brightness, !exp);
+ return 0;
+}
+
+static int ov965x_set_flip(struct ov965x *ov965x)
+{
+ u8 mvfp = 0;
+
+ if (ov965x->ctrls.hflip->val)
+ mvfp |= MVFP_MIRROR;
+
+ if (ov965x->ctrls.vflip->val)
+ mvfp |= MVFP_FLIP;
+
+ return ov965x_write(ov965x->client, REG_MVFP, mvfp);
+}
+
+#define NUM_SAT_LEVELS 5
+#define NUM_SAT_REGS 6
+
+static int ov965x_set_saturation(struct ov965x *ov965x, int val)
+{
+ static const u8 regs[NUM_SAT_LEVELS][NUM_SAT_REGS] = {
+ /* MTX(1)...MTX(6) */
+ { 0x1d, 0x1f, 0x02, 0x09, 0x13, 0x1c }, /* -2 */
+ { 0x2e, 0x31, 0x02, 0x0e, 0x1e, 0x2d }, /* -1 */
+ { 0x3a, 0x3d, 0x03, 0x12, 0x26, 0x38 }, /* 0 */
+ { 0x46, 0x49, 0x04, 0x16, 0x2e, 0x43 }, /* +1 */
+ { 0x57, 0x5c, 0x05, 0x1b, 0x39, 0x54 }, /* +2 */
+ };
+ u8 addr = REG_MTX(1);
+ int i, ret = 0;
+
+ val += (NUM_SAT_LEVELS / 2);
+ if (val >= NUM_SAT_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SAT_REGS && !ret; i++)
+ ret = ov965x_write(ov965x->client, addr + i, regs[val][i]);
+
+ return ret;
+}
+
+static int ov965x_set_test_pattern(struct ov965x *ov965x, int value)
+{
+ int ret;
+ u8 reg;
+
+ ret = ov965x_read(ov965x->client, REG_COM23, &reg);
+ if (ret < 0)
+ return ret;
+ reg = value ? reg | COM23_TEST_MODE : reg & ~COM23_TEST_MODE;
+ return ov965x_write(ov965x->client, REG_COM23, reg);
+}
+
+static int __g_volatile_ctrl(struct ov965x *ov965x, struct v4l2_ctrl *ctrl)
+{
+ struct i2c_client *client = ov965x->client;
+ unsigned int exposure, gain, m;
+ u8 reg0, reg1, reg2;
+ int ret;
+
+ if (!ov965x->power)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ if (!ctrl->val)
+ return 0;
+ ret = ov965x_read(client, REG_GAIN, &reg0);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(client, REG_VREF, &reg1);
+ if (ret < 0)
+ return ret;
+ gain = ((reg1 >> 6) << 8) | reg0;
+ m = 0x01 << fls(gain >> 4);
+ ov965x->ctrls.gain->val = m * (16 + (gain & 0xf));
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL)
+ return 0;
+ ret = ov965x_read(client, REG_COM1, &reg0);
+ if (!ret)
+ ret = ov965x_read(client, REG_AECH, &reg1);
+ if (!ret)
+ ret = ov965x_read(client, REG_AECHM, &reg2);
+ if (ret < 0)
+ return ret;
+ exposure = ((reg2 & 0x3f) << 10) | (reg1 << 2) |
+ (reg0 & 0x3);
+ ov965x->ctrls.exposure->val = ((exposure *
+ ov965x->exp_row_interval) + 50) / 100;
+ break;
+ }
+
+ return 0;
+}
+
+static int ov965x_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov965x *ov965x = to_ov965x(sd);
+ int ret;
+
+ v4l2_dbg(1, debug, sd, "g_ctrl: %s\n", ctrl->name);
+
+ mutex_lock(&ov965x->lock);
+ ret = __g_volatile_ctrl(ov965x, ctrl);
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+static int ov965x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov965x *ov965x = to_ov965x(sd);
+ int ret = -EINVAL;
+
+ v4l2_dbg(1, debug, sd, "s_ctrl: %s, value: %d. power: %d\n",
+ ctrl->name, ctrl->val, ov965x->power);
+
+ mutex_lock(&ov965x->lock);
+ /*
+ * If the device is not powered up now postpone applying control's
+ * value to the hardware, until it is ready to accept commands.
+ */
+ if (ov965x->power == 0) {
+ mutex_unlock(&ov965x->lock);
+ return 0;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov965x_set_white_balance(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ ret = ov965x_set_brightness(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov965x_set_exposure(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_AUTOGAIN:
+ ret = ov965x_set_gain(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ ret = ov965x_set_flip(ov965x);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ ret = ov965x_set_banding_filter(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ ret = ov965x_set_saturation(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ ret = ov965x_set_sharpness(ov965x, ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov965x_set_test_pattern(ov965x, ctrl->val);
+ break;
+ }
+
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov965x_ctrl_ops = {
+ .g_volatile_ctrl = ov965x_g_volatile_ctrl,
+ .s_ctrl = ov965x_s_ctrl,
+};
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+ NULL
+};
+
+static int ov965x_initialize_controls(struct ov965x *ov965x)
+{
+ const struct v4l2_ctrl_ops *ops = &ov965x_ctrl_ops;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 16);
+ if (ret < 0)
+ return ret;
+
+ /* Auto/manual white balance */
+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
+ 0, 0xff, 1, 0x80);
+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
+ 0, 0xff, 1, 0x80);
+ /* Auto/manual exposure */
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+ /* Exposure time, in 100 us units. min/max is updated dynamically. */
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ 2, 1500, 1, 500);
+ /* Auto/manual gain */
+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+ 0, 1, 1, 1);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ 16, 64 * (16 + 15), 1, 64 * 16);
+
+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
+ -2, 2, 1, 0);
+ ctrls->brightness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS,
+ -3, 3, 1, 0);
+ ctrls->sharpness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS,
+ 0, 32, 1, 6);
+
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ ctrls->light_freq = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, ~0x7,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1, 0, 0,
+ test_pattern_menu);
+ if (hdl->error) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 1, true);
+ v4l2_ctrl_cluster(2, &ctrls->hflip);
+
+ ov965x->sd.ctrl_handler = hdl;
+ return 0;
+}
+
+/*
+ * V4L2 subdev video and pad level operations
+ */
+static void ov965x_get_default_format(struct v4l2_mbus_framefmt *mf)
+{
+ mf->width = ov965x_framesizes[0].width;
+ mf->height = ov965x_framesizes[0].height;
+ mf->colorspace = ov965x_formats[0].colorspace;
+ mf->code = ov965x_formats[0].code;
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(ov965x_formats))
+ return -EINVAL;
+
+ code->code = ov965x_formats[code->index].code;
+ return 0;
+}
+
+static int ov965x_enum_frame_sizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int i = ARRAY_SIZE(ov965x_formats);
+
+ if (fse->index > ARRAY_SIZE(ov965x_framesizes))
+ return -EINVAL;
+
+ while (--i)
+ if (fse->code == ov965x_formats[i].code)
+ break;
+
+ fse->code = ov965x_formats[i].code;
+
+ fse->min_width = ov965x_framesizes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = ov965x_framesizes[fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int ov965x_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+
+ mutex_lock(&ov965x->lock);
+ fi->interval = ov965x->fiv->interval;
+ mutex_unlock(&ov965x->lock);
+
+ return 0;
+}
+
+static int __ov965x_set_frame_interval(struct ov965x *ov965x,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct v4l2_mbus_framefmt *mbus_fmt = &ov965x->format;
+ const struct ov965x_interval *fiv = &ov965x_intervals[0];
+ u64 req_int, err, min_err = ~0ULL;
+ unsigned int i;
+
+
+ if (fi->interval.denominator == 0)
+ return -EINVAL;
+
+ req_int = (u64)(fi->interval.numerator * 10000) /
+ fi->interval.denominator;
+
+ for (i = 0; i < ARRAY_SIZE(ov965x_intervals); i++) {
+ const struct ov965x_interval *iv = &ov965x_intervals[i];
+
+ if (mbus_fmt->width != iv->size.width ||
+ mbus_fmt->height != iv->size.height)
+ continue;
+ err = abs64((u64)(iv->interval.numerator * 10000) /
+ iv->interval.denominator - req_int);
+ if (err < min_err) {
+ fiv = iv;
+ min_err = err;
+ }
+ }
+ ov965x->fiv = fiv;
+
+ v4l2_dbg(1, debug, &ov965x->sd, "Changed frame interval to %u us\n",
+ fiv->interval.numerator * 1000000 / fiv->interval.denominator);
+
+ return 0;
+}
+
+static int ov965x_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+ int ret;
+
+ v4l2_dbg(1, debug, sd, "Setting %d/%d frame interval\n",
+ fi->interval.numerator, fi->interval.denominator);
+
+ mutex_lock(&ov965x->lock);
+ ret = __ov965x_set_frame_interval(ov965x, fi);
+ ov965x->apply_frame_fmt = 1;
+ mutex_unlock(&ov965x->lock);
+ return ret;
+}
+
+static int ov965x_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov965x *ov965x = to_ov965x(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&ov965x->lock);
+ fmt->format = ov965x->format;
+ mutex_unlock(&ov965x->lock);
+
+ return 0;
+}
+
+static void __ov965x_try_frame_size(struct v4l2_mbus_framefmt *mf,
+ const struct ov965x_framesize **size)
+{
+ const struct ov965x_framesize *fsize = &ov965x_framesizes[0],
+ *match = NULL;
+ int i = ARRAY_SIZE(ov965x_framesizes);
+ unsigned int min_err = UINT_MAX;
+
+ while (i--) {
+ int err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ fsize++;
+ }
+ if (!match)
+ match = &ov965x_framesizes[0];
+ mf->width = match->width;
+ mf->height = match->height;
+ if (size)
+ *size = match;
+}
+
+static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ unsigned int index = ARRAY_SIZE(ov965x_formats);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct ov965x *ov965x = to_ov965x(sd);
+ const struct ov965x_framesize *size = NULL;
+ int ret = 0;
+
+ __ov965x_try_frame_size(mf, &size);
+
+ while (--index)
+ if (ov965x_formats[index].code == mf->code)
+ break;
+
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->code = ov965x_formats[index].code;
+ mf->field = V4L2_FIELD_NONE;
+
+ mutex_lock(&ov965x->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ if (fh != NULL) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ }
+ } else {
+ if (ov965x->streaming) {
+ ret = -EBUSY;
+ } else {
+ ov965x->frame_size = size;
+ ov965x->format = fmt->format;
+ ov965x->tslb_reg = ov965x_formats[index].tslb_reg;
+ ov965x->apply_frame_fmt = 1;
+ }
+ }
+
+ if (!ret && fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ struct v4l2_subdev_frame_interval fiv = {
+ .interval = { 0, 1 }
+ };
+ /* Reset to minimum possible frame interval */
+ __ov965x_set_frame_interval(ov965x, &fiv);
+ }
+ mutex_unlock(&ov965x->lock);
+
+ if (!ret)
+ ov965x_update_exposure_ctrl(ov965x);
+
+ return ret;
+}
+
+static int ov965x_set_frame_size(struct ov965x *ov965x)
+{
+ int i, ret = 0;
+
+ for (i = 0; ret == 0 && i < NUM_FMT_REGS; i++)
+ ret = ov965x_write(ov965x->client, frame_size_reg_addr[i],
+ ov965x->frame_size->regs[i]);
+ return ret;
+}
+
+static int __ov965x_set_params(struct ov965x *ov965x)
+{
+ struct i2c_client *client = ov965x->client;
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ int ret = 0;
+ u8 reg;
+
+ if (ov965x->apply_frame_fmt) {
+ reg = DEF_CLKRC + ov965x->fiv->clkrc_div;
+ ret = ov965x_write(client, REG_CLKRC, reg);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_set_frame_size(ov965x);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_read(client, REG_TSLB, &reg);
+ if (ret < 0)
+ return ret;
+ reg &= ~TSLB_YUYV_MASK;
+ reg |= ov965x->tslb_reg;
+ ret = ov965x_write(client, REG_TSLB, reg);
+ if (ret < 0)
+ return ret;
+ }
+ ret = ov965x_set_default_gamma_curve(ov965x);
+ if (ret < 0)
+ return ret;
+ ret = ov965x_set_color_matrix(ov965x);
+ if (ret < 0)
+ return ret;
+ /*
+ * Select manual banding filter, the filter will
+ * be enabled further if required.
+ */
+ ret = ov965x_read(client, REG_COM11, &reg);
+ if (!ret)
+ reg |= COM11_BANDING;
+ ret = ov965x_write(client, REG_COM11, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * Banding filter (REG_MBD value) needs to match selected
+ * resolution and frame rate, so it's always updated here.
+ */
+ return ov965x_set_banding_filter(ov965x, ctrls->light_freq->val);
+}
+
+static int ov965x_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov965x *ov965x = to_ov965x(sd);
+ struct ov965x_ctrls *ctrls = &ov965x->ctrls;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, client, "%s: on: %d\n", __func__, on);
+
+ mutex_lock(&ov965x->lock);
+ if (ov965x->streaming == !on) {
+ if (on)
+ ret = __ov965x_set_params(ov965x);
+
+ if (!ret && ctrls->update) {
+ /*
+ * ov965x_s_ctrl callback takes the mutex
+ * so it needs to be released here.
+ */
+ mutex_unlock(&ov965x->lock);
+ ret = v4l2_ctrl_handler_setup(&ctrls->handler);
+
+ mutex_lock(&ov965x->lock);
+ if (!ret)
+ ctrls->update = 0;
+ }
+ if (!ret)
+ ret = ov965x_write(client, REG_COM2,
+ on ? 0x01 : 0x11);
+ }
+ if (!ret)
+ ov965x->streaming += on ? 1 : -1;
+
+ WARN_ON(ov965x->streaming < 0);
+ mutex_unlock(&ov965x->lock);
+
+ return ret;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
+
+ ov965x_get_default_format(mf);
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops ov965x_pad_ops = {
+ .enum_mbus_code = ov965x_enum_mbus_code,
+ .enum_frame_size = ov965x_enum_frame_sizes,
+ .get_fmt = ov965x_get_fmt,
+ .set_fmt = ov965x_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops ov965x_video_ops = {
+ .s_stream = ov965x_s_stream,
+ .g_frame_interval = ov965x_g_frame_interval,
+ .s_frame_interval = ov965x_s_frame_interval,
+
+};
+
+static const struct v4l2_subdev_internal_ops ov965x_sd_internal_ops = {
+ .open = ov965x_open,
+};
+
+static const struct v4l2_subdev_core_ops ov965x_core_ops = {
+ .s_power = ov965x_s_power,
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops ov965x_subdev_ops = {
+ .core = &ov965x_core_ops,
+ .pad = &ov965x_pad_ops,
+ .video = &ov965x_video_ops,
+};
+
+/*
+ * Reset and power down GPIOs configuration
+ */
+static int ov965x_configure_gpios(struct ov965x *ov965x,
+ const struct ov9650_platform_data *pdata)
+{
+ int ret, i;
+
+ ov965x->gpios[GPIO_PWDN] = pdata->gpio_pwdn;
+ ov965x->gpios[GPIO_RST] = pdata->gpio_reset;
+
+ for (i = 0; i < ARRAY_SIZE(ov965x->gpios); i++) {
+ int gpio = ov965x->gpios[i];
+
+ if (!gpio_is_valid(gpio))
+ continue;
+ ret = devm_gpio_request_one(&ov965x->client->dev, gpio,
+ GPIOF_OUT_INIT_HIGH, "OV965X");
+ if (ret < 0)
+ return ret;
+ v4l2_dbg(1, debug, &ov965x->sd, "set gpio %d to 1\n", gpio);
+
+ gpio_set_value(gpio, 1);
+ gpio_export(gpio, 0);
+ ov965x->gpios[i] = gpio;
+ }
+
+ return 0;
+}
+
+static int ov965x_detect_sensor(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov965x *ov965x = to_ov965x(sd);
+ u8 pid, ver;
+ int ret;
+
+ mutex_lock(&ov965x->lock);
+ __ov965x_set_power(ov965x, 1);
+ usleep_range(25000, 26000);
+
+ /* Check sensor revision */
+ ret = ov965x_read(client, REG_PID, &pid);
+ if (!ret)
+ ret = ov965x_read(client, REG_VER, &ver);
+
+ __ov965x_set_power(ov965x, 0);
+
+ if (!ret) {
+ ov965x->id = OV965X_ID(pid, ver);
+ if (ov965x->id == OV9650_ID || ov965x->id == OV9652_ID) {
+ v4l2_info(sd, "Found OV%04X sensor\n", ov965x->id);
+ } else {
+ v4l2_err(sd, "Sensor detection failed (%04X, %d)\n",
+ ov965x->id, ret);
+ ret = -ENODEV;
+ }
+ }
+ mutex_unlock(&ov965x->lock);
+
+ return ret;
+}
+
+static int ov965x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct ov9650_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ struct ov965x *ov965x;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }
+
+ if (pdata->mclk_frequency == 0) {
+ dev_err(&client->dev, "MCLK frequency not specified\n");
+ return -EINVAL;
+ }
+
+ ov965x = devm_kzalloc(&client->dev, sizeof(*ov965x), GFP_KERNEL);
+ if (!ov965x)
+ return -ENOMEM;
+
+ mutex_init(&ov965x->lock);
+ ov965x->client = client;
+ ov965x->mclk_frequency = pdata->mclk_frequency;
+
+ sd = &ov965x->sd;
+ v4l2_i2c_subdev_init(sd, client, &ov965x_subdev_ops);
+ strlcpy(sd->name, DRIVER_NAME, sizeof(sd->name));
+
+ sd->internal_ops = &ov965x_sd_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ ret = ov965x_configure_gpios(ov965x, pdata);
+ if (ret < 0)
+ return ret;
+
+ ov965x->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &ov965x->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ov965x_initialize_controls(ov965x);
+ if (ret < 0)
+ goto err_me;
+
+ ov965x_get_default_format(&ov965x->format);
+ ov965x->frame_size = &ov965x_framesizes[0];
+ ov965x->fiv = &ov965x_intervals[0];
+
+ ret = ov965x_detect_sensor(sd);
+ if (ret < 0)
+ goto err_ctrls;
+
+ /* Update exposure time min/max to match frame format */
+ ov965x_update_exposure_ctrl(ov965x);
+
+ return 0;
+err_ctrls:
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+err_me:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+static int ov965x_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov965x_id[] = {
+ { "OV9650", 0 },
+ { "OV9652", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ov965x_id);
+
+static struct i2c_driver ov965x_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = ov965x_probe,
+ .remove = ov965x_remove,
+ .id_table = ov965x_id,
+};
+
+module_i2c_driver(ov965x_i2c_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>");
+MODULE_DESCRIPTION("OV9650/OV9652 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/s5c73m3/Makefile b/drivers/media/i2c/s5c73m3/Makefile
new file mode 100644
index 000000000000..fa4df342d1f1
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/Makefile
@@ -0,0 +1,2 @@
+s5c73m3-objs := s5c73m3-core.o s5c73m3-spi.o s5c73m3-ctrls.o
+obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3.o
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
new file mode 100644
index 000000000000..5dbb65e1f6b7
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -0,0 +1,1704 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sizes.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5c73m3.h>
+
+#include "s5c73m3.h"
+
+int s5c73m3_dbg;
+module_param_named(debug, s5c73m3_dbg, int, 0644);
+
+static int boot_from_rom = 1;
+module_param(boot_from_rom, int, 0644);
+
+static int update_fw;
+module_param(update_fw, int, 0644);
+
+#define S5C73M3_EMBEDDED_DATA_MAXLEN SZ_4K
+
+static const char * const s5c73m3_supply_names[S5C73M3_MAX_SUPPLIES] = {
+ "vdd-int", /* Digital Core supply (1.2V), CAM_ISP_CORE_1.2V */
+ "vdda", /* Analog Core supply (1.2V), CAM_SENSOR_CORE_1.2V */
+ "vdd-reg", /* Regulator input supply (2.8V), CAM_SENSOR_A2.8V */
+ "vddio-host", /* Digital Host I/O power supply (1.8V...2.8V),
+ CAM_ISP_SENSOR_1.8V */
+ "vddio-cis", /* Digital CIS I/O power (1.2V...1.8V),
+ CAM_ISP_MIPI_1.2V */
+ "vdd-af", /* Lens, CAM_AF_2.8V */
+};
+
+static const struct s5c73m3_frame_size s5c73m3_isp_resolutions[] = {
+ { 320, 240, COMM_CHG_MODE_YUV_320_240 },
+ { 352, 288, COMM_CHG_MODE_YUV_352_288 },
+ { 640, 480, COMM_CHG_MODE_YUV_640_480 },
+ { 880, 720, COMM_CHG_MODE_YUV_880_720 },
+ { 960, 720, COMM_CHG_MODE_YUV_960_720 },
+ { 1008, 672, COMM_CHG_MODE_YUV_1008_672 },
+ { 1184, 666, COMM_CHG_MODE_YUV_1184_666 },
+ { 1280, 720, COMM_CHG_MODE_YUV_1280_720 },
+ { 1536, 864, COMM_CHG_MODE_YUV_1536_864 },
+ { 1600, 1200, COMM_CHG_MODE_YUV_1600_1200 },
+ { 1632, 1224, COMM_CHG_MODE_YUV_1632_1224 },
+ { 1920, 1080, COMM_CHG_MODE_YUV_1920_1080 },
+ { 1920, 1440, COMM_CHG_MODE_YUV_1920_1440 },
+ { 2304, 1296, COMM_CHG_MODE_YUV_2304_1296 },
+ { 3264, 2448, COMM_CHG_MODE_YUV_3264_2448 },
+};
+
+static const struct s5c73m3_frame_size s5c73m3_jpeg_resolutions[] = {
+ { 640, 480, COMM_CHG_MODE_JPEG_640_480 },
+ { 800, 450, COMM_CHG_MODE_JPEG_800_450 },
+ { 800, 600, COMM_CHG_MODE_JPEG_800_600 },
+ { 1024, 768, COMM_CHG_MODE_JPEG_1024_768 },
+ { 1280, 720, COMM_CHG_MODE_JPEG_1280_720 },
+ { 1280, 960, COMM_CHG_MODE_JPEG_1280_960 },
+ { 1600, 900, COMM_CHG_MODE_JPEG_1600_900 },
+ { 1600, 1200, COMM_CHG_MODE_JPEG_1600_1200 },
+ { 2048, 1152, COMM_CHG_MODE_JPEG_2048_1152 },
+ { 2048, 1536, COMM_CHG_MODE_JPEG_2048_1536 },
+ { 2560, 1440, COMM_CHG_MODE_JPEG_2560_1440 },
+ { 2560, 1920, COMM_CHG_MODE_JPEG_2560_1920 },
+ { 3264, 1836, COMM_CHG_MODE_JPEG_3264_1836 },
+ { 3264, 2176, COMM_CHG_MODE_JPEG_3264_2176 },
+ { 3264, 2448, COMM_CHG_MODE_JPEG_3264_2448 },
+};
+
+static const struct s5c73m3_frame_size * const s5c73m3_resolutions[] = {
+ [RES_ISP] = s5c73m3_isp_resolutions,
+ [RES_JPEG] = s5c73m3_jpeg_resolutions
+};
+
+static const int s5c73m3_resolutions_len[] = {
+ [RES_ISP] = ARRAY_SIZE(s5c73m3_isp_resolutions),
+ [RES_JPEG] = ARRAY_SIZE(s5c73m3_jpeg_resolutions)
+};
+
+static const struct s5c73m3_interval s5c73m3_intervals[] = {
+ { COMM_FRAME_RATE_FIXED_7FPS, {142857, 1000000}, {3264, 2448} },
+ { COMM_FRAME_RATE_FIXED_15FPS, {66667, 1000000}, {3264, 2448} },
+ { COMM_FRAME_RATE_FIXED_20FPS, {50000, 1000000}, {2304, 1296} },
+ { COMM_FRAME_RATE_FIXED_30FPS, {33333, 1000000}, {2304, 1296} },
+};
+
+#define S5C73M3_DEFAULT_FRAME_INTERVAL 3 /* 30 fps */
+
+static void s5c73m3_fill_mbus_fmt(struct v4l2_mbus_framefmt *mf,
+ const struct s5c73m3_frame_size *fs,
+ u32 code)
+{
+ mf->width = fs->width;
+ mf->height = fs->height;
+ mf->code = code;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int s5c73m3_i2c_write(struct i2c_client *client, u16 addr, u16 data)
+{
+ u8 buf[4] = { addr >> 8, addr & 0xff, data >> 8, data & 0xff };
+
+ int ret = i2c_master_send(client, buf, sizeof(buf));
+
+ v4l_dbg(4, s5c73m3_dbg, client, "%s: addr 0x%04x, data 0x%04x\n",
+ __func__, addr, data);
+
+ if (ret == 4)
+ return 0;
+
+ return ret < 0 ? ret : -EREMOTEIO;
+}
+
+static int s5c73m3_i2c_read(struct i2c_client *client, u16 addr, u16 *data)
+{
+ int ret;
+ u8 rbuf[2], wbuf[2] = { addr >> 8, addr & 0xff };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(wbuf),
+ .buf = wbuf
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(rbuf),
+ .buf = rbuf
+ }
+ };
+ /*
+ * Issue repeated START after writing 2 address bytes and
+ * just one STOP only after reading the data bytes.
+ */
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret == 2) {
+ *data = be16_to_cpup((u16 *)rbuf);
+ v4l2_dbg(4, s5c73m3_dbg, client,
+ "%s: addr: 0x%04x, data: 0x%04x\n",
+ __func__, addr, *data);
+ return 0;
+ }
+
+ v4l2_err(client, "I2C read failed: addr: %04x, (%d)\n", addr, ret);
+
+ return ret >= 0 ? -EREMOTEIO : ret;
+}
+
+int s5c73m3_write(struct s5c73m3 *state, u32 addr, u16 data)
+{
+ struct i2c_client *client = state->i2c_client;
+ int ret;
+
+ if ((addr ^ state->i2c_write_address) & 0xffff0000) {
+ ret = s5c73m3_i2c_write(client, REG_CMDWR_ADDRH, addr >> 16);
+ if (ret < 0) {
+ state->i2c_write_address = 0;
+ return ret;
+ }
+ }
+
+ if ((addr ^ state->i2c_write_address) & 0xffff) {
+ ret = s5c73m3_i2c_write(client, REG_CMDWR_ADDRL, addr & 0xffff);
+ if (ret < 0) {
+ state->i2c_write_address = 0;
+ return ret;
+ }
+ }
+
+ state->i2c_write_address = addr;
+
+ ret = s5c73m3_i2c_write(client, REG_CMDBUF_ADDR, data);
+ if (ret < 0)
+ return ret;
+
+ state->i2c_write_address += 2;
+
+ return ret;
+}
+
+int s5c73m3_read(struct s5c73m3 *state, u32 addr, u16 *data)
+{
+ struct i2c_client *client = state->i2c_client;
+ int ret;
+
+ if ((addr ^ state->i2c_read_address) & 0xffff0000) {
+ ret = s5c73m3_i2c_write(client, REG_CMDRD_ADDRH, addr >> 16);
+ if (ret < 0) {
+ state->i2c_read_address = 0;
+ return ret;
+ }
+ }
+
+ if ((addr ^ state->i2c_read_address) & 0xffff) {
+ ret = s5c73m3_i2c_write(client, REG_CMDRD_ADDRL, addr & 0xffff);
+ if (ret < 0) {
+ state->i2c_read_address = 0;
+ return ret;
+ }
+ }
+
+ state->i2c_read_address = addr;
+
+ ret = s5c73m3_i2c_read(client, REG_CMDBUF_ADDR, data);
+ if (ret < 0)
+ return ret;
+
+ state->i2c_read_address += 2;
+
+ return ret;
+}
+
+static int s5c73m3_check_status(struct s5c73m3 *state, unsigned int value)
+{
+ unsigned long start = jiffies;
+ unsigned long end = start + msecs_to_jiffies(2000);
+ int ret = 0;
+ u16 status;
+ int count = 0;
+
+ while (time_is_after_jiffies(end)) {
+ ret = s5c73m3_read(state, REG_STATUS, &status);
+ if (ret < 0 || status == value)
+ break;
+ usleep_range(500, 1000);
+ ++count;
+ }
+
+ if (count > 0)
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "status check took %dms\n",
+ jiffies_to_msecs(jiffies - start));
+
+ if (ret == 0 && status != value) {
+ u16 i2c_status = 0;
+ u16 i2c_seq_status = 0;
+
+ s5c73m3_read(state, REG_I2C_STATUS, &i2c_status);
+ s5c73m3_read(state, REG_I2C_SEQ_STATUS, &i2c_seq_status);
+
+ v4l2_err(&state->sensor_sd,
+ "wrong status %#x, expected: %#x, i2c_status: %#x/%#x\n",
+ status, value, i2c_status, i2c_seq_status);
+
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+int s5c73m3_isp_command(struct s5c73m3 *state, u16 command, u16 data)
+{
+ int ret;
+
+ ret = s5c73m3_check_status(state, REG_STATUS_ISP_COMMAND_COMPLETED);
+ if (ret < 0)
+ return ret;
+
+ ret = s5c73m3_write(state, 0x00095000, command);
+ if (ret < 0)
+ return ret;
+
+ ret = s5c73m3_write(state, 0x00095002, data);
+ if (ret < 0)
+ return ret;
+
+ return s5c73m3_write(state, REG_STATUS, 0x0001);
+}
+
+static int s5c73m3_isp_comm_result(struct s5c73m3 *state, u16 command,
+ u16 *data)
+{
+ return s5c73m3_read(state, COMM_RESULT_OFFSET + command, data);
+}
+
+static int s5c73m3_set_af_softlanding(struct s5c73m3 *state)
+{
+ unsigned long start = jiffies;
+ u16 af_softlanding;
+ int count = 0;
+ int ret;
+ const char *msg;
+
+ ret = s5c73m3_isp_command(state, COMM_AF_SOFTLANDING,
+ COMM_AF_SOFTLANDING_ON);
+ if (ret < 0) {
+ v4l2_info(&state->sensor_sd, "AF soft-landing failed\n");
+ return ret;
+ }
+
+ for (;;) {
+ ret = s5c73m3_isp_comm_result(state, COMM_AF_SOFTLANDING,
+ &af_softlanding);
+ if (ret < 0) {
+ msg = "failed";
+ break;
+ }
+ if (af_softlanding == COMM_AF_SOFTLANDING_RES_COMPLETE) {
+ msg = "succeeded";
+ break;
+ }
+ if (++count > 100) {
+ ret = -ETIME;
+ msg = "timed out";
+ break;
+ }
+ msleep(25);
+ }
+
+ v4l2_info(&state->sensor_sd, "AF soft-landing %s after %dms\n",
+ msg, jiffies_to_msecs(jiffies - start));
+
+ return ret;
+}
+
+static int s5c73m3_load_fw(struct v4l2_subdev *sd)
+{
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ struct i2c_client *client = state->i2c_client;
+ const struct firmware *fw;
+ int ret;
+ char fw_name[20];
+
+ snprintf(fw_name, sizeof(fw_name), "SlimISP_%.2s.bin",
+ state->fw_file_version);
+ ret = request_firmware(&fw, fw_name, &client->dev);
+ if (ret < 0) {
+ v4l2_err(sd, "Firmware request failed (%s)\n", fw_name);
+ return -EINVAL;
+ }
+
+ v4l2_info(sd, "Loading firmware (%s, %d B)\n", fw_name, fw->size);
+
+ ret = s5c73m3_spi_write(state, fw->data, fw->size, 64);
+
+ if (ret >= 0)
+ state->isp_ready = 1;
+ else
+ v4l2_err(sd, "SPI write failed\n");
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int s5c73m3_set_frame_size(struct s5c73m3 *state)
+{
+ const struct s5c73m3_frame_size *prev_size =
+ state->sensor_pix_size[RES_ISP];
+ const struct s5c73m3_frame_size *cap_size =
+ state->sensor_pix_size[RES_JPEG];
+ unsigned int chg_mode;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Preview size: %dx%d, reg_val: 0x%x\n",
+ prev_size->width, prev_size->height, prev_size->reg_val);
+
+ chg_mode = prev_size->reg_val | COMM_CHG_MODE_NEW;
+
+ if (state->mbus_code == S5C73M3_JPEG_FMT) {
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Capture size: %dx%d, reg_val: 0x%x\n",
+ cap_size->width, cap_size->height, cap_size->reg_val);
+ chg_mode |= cap_size->reg_val;
+ }
+
+ return s5c73m3_isp_command(state, COMM_CHG_MODE, chg_mode);
+}
+
+static int s5c73m3_set_frame_rate(struct s5c73m3 *state)
+{
+ int ret;
+
+ if (state->ctrls.stabilization->val)
+ return 0;
+
+ if (WARN_ON(state->fiv == NULL))
+ return -EINVAL;
+
+ ret = s5c73m3_isp_command(state, COMM_FRAME_RATE, state->fiv->fps_reg);
+ if (!ret)
+ state->apply_fiv = 0;
+
+ return ret;
+}
+
+static int __s5c73m3_s_stream(struct s5c73m3 *state, struct v4l2_subdev *sd,
+ int on)
+{
+ u16 mode;
+ int ret;
+
+ if (on && state->apply_fmt) {
+ if (state->mbus_code == S5C73M3_JPEG_FMT)
+ mode = COMM_IMG_OUTPUT_INTERLEAVED;
+ else
+ mode = COMM_IMG_OUTPUT_YUV;
+
+ ret = s5c73m3_isp_command(state, COMM_IMG_OUTPUT, mode);
+ if (!ret)
+ ret = s5c73m3_set_frame_size(state);
+ if (ret)
+ return ret;
+ state->apply_fmt = 0;
+ }
+
+ ret = s5c73m3_isp_command(state, COMM_SENSOR_STREAMING, !!on);
+ if (ret)
+ return ret;
+
+ state->streaming = !!on;
+
+ if (!on)
+ return ret;
+
+ if (state->apply_fiv) {
+ ret = s5c73m3_set_frame_rate(state);
+ if (ret < 0)
+ v4l2_err(sd, "Error setting frame rate(%d)\n", ret);
+ }
+
+ return s5c73m3_check_status(state, REG_STATUS_ISP_COMMAND_COMPLETED);
+}
+
+static int s5c73m3_oif_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret;
+
+ mutex_lock(&state->lock);
+ ret = __s5c73m3_s_stream(state, sd, on);
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_system_status_wait(struct s5c73m3 *state, u32 value,
+ unsigned int delay, unsigned int steps)
+{
+ u16 reg = 0;
+
+ while (steps-- > 0) {
+ int ret = s5c73m3_read(state, 0x30100010, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg == value)
+ return 0;
+ usleep_range(delay, delay + 25);
+ }
+ return -ETIMEDOUT;
+}
+
+static int s5c73m3_read_fw_version(struct s5c73m3 *state)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int i, ret;
+ u16 data[2];
+ int offset;
+
+ offset = state->isp_ready ? 0x60 : 0;
+
+ for (i = 0; i < S5C73M3_SENSOR_FW_LEN / 2; i++) {
+ ret = s5c73m3_read(state, offset + i * 2, data);
+ if (ret < 0)
+ return ret;
+ state->sensor_fw[i * 2] = (char)(*data & 0xff);
+ state->sensor_fw[i * 2 + 1] = (char)(*data >> 8);
+ }
+ state->sensor_fw[S5C73M3_SENSOR_FW_LEN] = '\0';
+
+
+ for (i = 0; i < S5C73M3_SENSOR_TYPE_LEN / 2; i++) {
+ ret = s5c73m3_read(state, offset + 6 + i * 2, data);
+ if (ret < 0)
+ return ret;
+ state->sensor_type[i * 2] = (char)(*data & 0xff);
+ state->sensor_type[i * 2 + 1] = (char)(*data >> 8);
+ }
+ state->sensor_type[S5C73M3_SENSOR_TYPE_LEN] = '\0';
+
+ ret = s5c73m3_read(state, offset + 0x14, data);
+ if (ret >= 0) {
+ ret = s5c73m3_read(state, offset + 0x16, data + 1);
+ if (ret >= 0)
+ state->fw_size = data[0] + (data[1] << 16);
+ }
+
+ v4l2_info(sd, "Sensor type: %s, FW version: %s\n",
+ state->sensor_type, state->sensor_fw);
+ return ret;
+}
+
+static int s5c73m3_fw_update_from(struct s5c73m3 *state)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ u16 status = COMM_FW_UPDATE_NOT_READY;
+ int ret;
+ int count = 0;
+
+ v4l2_warn(sd, "Updating F-ROM firmware.\n");
+ do {
+ if (status == COMM_FW_UPDATE_NOT_READY) {
+ ret = s5c73m3_isp_command(state, COMM_FW_UPDATE, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = s5c73m3_read(state, 0x00095906, &status);
+ if (ret < 0)
+ return ret;
+ switch (status) {
+ case COMM_FW_UPDATE_FAIL:
+ v4l2_warn(sd, "Updating F-ROM firmware failed.\n");
+ return -EIO;
+ case COMM_FW_UPDATE_SUCCESS:
+ v4l2_warn(sd, "Updating F-ROM firmware finished.\n");
+ return 0;
+ }
+ ++count;
+ msleep(20);
+ } while (count < 500);
+
+ v4l2_warn(sd, "Updating F-ROM firmware timed-out.\n");
+ return -ETIMEDOUT;
+}
+
+static int s5c73m3_spi_boot(struct s5c73m3 *state, bool load_fw)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int ret;
+
+ /* Run ARM MCU */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(400, 500);
+
+ /* Check booting status */
+ ret = s5c73m3_system_status_wait(state, 0x0c, 100, 3);
+ if (ret < 0) {
+ v4l2_err(sd, "booting failed: %d\n", ret);
+ return ret;
+ }
+
+ /* P,M,S and Boot Mode */
+ ret = s5c73m3_write(state, 0x30100014, 0x2146);
+ if (ret < 0)
+ return ret;
+
+ ret = s5c73m3_write(state, 0x30100010, 0x210c);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(200, 250);
+
+ /* Check SPI status */
+ ret = s5c73m3_system_status_wait(state, 0x210d, 100, 300);
+ if (ret < 0)
+ v4l2_err(sd, "SPI not ready: %d\n", ret);
+
+ /* Firmware download over SPI */
+ if (load_fw)
+ s5c73m3_load_fw(sd);
+
+ /* MCU reset */
+ ret = s5c73m3_write(state, 0x30000004, 0xfffd);
+ if (ret < 0)
+ return ret;
+
+ /* Remap */
+ ret = s5c73m3_write(state, 0x301000a4, 0x0183);
+ if (ret < 0)
+ return ret;
+
+ /* MCU restart */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0 || !load_fw)
+ return ret;
+
+ ret = s5c73m3_read_fw_version(state);
+ if (ret < 0)
+ return ret;
+
+ if (load_fw && update_fw) {
+ ret = s5c73m3_fw_update_from(state);
+ update_fw = 0;
+ }
+
+ return ret;
+}
+
+static int s5c73m3_set_timing_register_for_vdd(struct s5c73m3 *state)
+{
+ static const u32 regs[][2] = {
+ { 0x30100018, 0x0618 },
+ { 0x3010001c, 0x10c1 },
+ { 0x30100020, 0x249e }
+ };
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = s5c73m3_write(state, regs[i][0], regs[i][1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void s5c73m3_set_fw_file_version(struct s5c73m3 *state)
+{
+ switch (state->sensor_fw[0]) {
+ case 'G':
+ case 'O':
+ state->fw_file_version[0] = 'G';
+ break;
+ case 'S':
+ case 'Z':
+ state->fw_file_version[0] = 'Z';
+ break;
+ }
+
+ switch (state->sensor_fw[1]) {
+ case 'C'...'F':
+ state->fw_file_version[1] = state->sensor_fw[1];
+ break;
+ }
+}
+
+static int s5c73m3_get_fw_version(struct s5c73m3 *state)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int ret;
+
+ /* Run ARM MCU */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+ usleep_range(400, 500);
+
+ /* Check booting status */
+ ret = s5c73m3_system_status_wait(state, 0x0c, 100, 3);
+ if (ret < 0) {
+
+ v4l2_err(sd, "%s: booting failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* Change I/O Driver Current in order to read from F-ROM */
+ ret = s5c73m3_write(state, 0x30100120, 0x0820);
+ ret = s5c73m3_write(state, 0x30100124, 0x0820);
+
+ /* Offset Setting */
+ ret = s5c73m3_write(state, 0x00010418, 0x0008);
+
+ /* P,M,S and Boot Mode */
+ ret = s5c73m3_write(state, 0x30100014, 0x2146);
+ if (ret < 0)
+ return ret;
+ ret = s5c73m3_write(state, 0x30100010, 0x230c);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(200, 250);
+
+ /* Check SPI status */
+ ret = s5c73m3_system_status_wait(state, 0x230e, 100, 300);
+ if (ret < 0)
+ v4l2_err(sd, "SPI not ready: %d\n", ret);
+
+ /* ARM reset */
+ ret = s5c73m3_write(state, 0x30000004, 0xfffd);
+ if (ret < 0)
+ return ret;
+
+ /* Remap */
+ ret = s5c73m3_write(state, 0x301000a4, 0x0183);
+ if (ret < 0)
+ return ret;
+
+ s5c73m3_set_timing_register_for_vdd(state);
+
+ ret = s5c73m3_read_fw_version(state);
+
+ s5c73m3_set_fw_file_version(state);
+
+ return ret;
+}
+
+static int s5c73m3_rom_boot(struct s5c73m3 *state, bool load_fw)
+{
+ static const u32 boot_regs[][2] = {
+ { 0x3100010c, 0x0044 },
+ { 0x31000108, 0x000d },
+ { 0x31000304, 0x0001 },
+ { 0x00010000, 0x5800 },
+ { 0x00010002, 0x0002 },
+ { 0x31000000, 0x0001 },
+ { 0x30100014, 0x1b85 },
+ { 0x30100010, 0x230c }
+ };
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ int i, ret;
+
+ /* Run ARM MCU */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+ usleep_range(400, 450);
+
+ /* Check booting status */
+ ret = s5c73m3_system_status_wait(state, 0x0c, 100, 4);
+ if (ret < 0) {
+ v4l2_err(sd, "Booting failed: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(boot_regs); i++) {
+ ret = s5c73m3_write(state, boot_regs[i][0], boot_regs[i][1]);
+ if (ret < 0)
+ return ret;
+ }
+ msleep(200);
+
+ /* Check the binary read status */
+ ret = s5c73m3_system_status_wait(state, 0x230e, 1000, 150);
+ if (ret < 0) {
+ v4l2_err(sd, "Binary read failed: %d\n", ret);
+ return ret;
+ }
+
+ /* ARM reset */
+ ret = s5c73m3_write(state, 0x30000004, 0xfffd);
+ if (ret < 0)
+ return ret;
+ /* Remap */
+ ret = s5c73m3_write(state, 0x301000a4, 0x0183);
+ if (ret < 0)
+ return ret;
+ /* MCU re-start */
+ ret = s5c73m3_write(state, 0x30000004, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ state->isp_ready = 1;
+
+ return s5c73m3_read_fw_version(state);
+}
+
+static int s5c73m3_isp_init(struct s5c73m3 *state)
+{
+ int ret;
+
+ state->i2c_read_address = 0;
+ state->i2c_write_address = 0;
+
+ ret = s5c73m3_i2c_write(state->i2c_client, AHB_MSB_ADDR_PTR, 0x3310);
+ if (ret < 0)
+ return ret;
+
+ if (boot_from_rom)
+ return s5c73m3_rom_boot(state, true);
+ else
+ return s5c73m3_spi_boot(state, true);
+}
+
+static const struct s5c73m3_frame_size *s5c73m3_find_frame_size(
+ struct v4l2_mbus_framefmt *fmt,
+ enum s5c73m3_resolution_types idx)
+{
+ const struct s5c73m3_frame_size *fs;
+ const struct s5c73m3_frame_size *best_fs;
+ int best_dist = INT_MAX;
+ int i;
+
+ fs = s5c73m3_resolutions[idx];
+ best_fs = NULL;
+ for (i = 0; i < s5c73m3_resolutions_len[idx]; ++i) {
+ int dist = abs(fs->width - fmt->width) +
+ abs(fs->height - fmt->height);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_fs = fs;
+ }
+ ++fs;
+ }
+
+ return best_fs;
+}
+
+static void s5c73m3_oif_try_format(struct s5c73m3 *state,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt,
+ const struct s5c73m3_frame_size **fs)
+{
+ u32 code;
+
+ switch (fmt->pad) {
+ case OIF_ISP_PAD:
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_ISP);
+ code = S5C73M3_ISP_FMT;
+ break;
+ case OIF_JPEG_PAD:
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_JPEG);
+ code = S5C73M3_JPEG_FMT;
+ break;
+ case OIF_SOURCE_PAD:
+ default:
+ if (fmt->format.code == S5C73M3_JPEG_FMT)
+ code = S5C73M3_JPEG_FMT;
+ else
+ code = S5C73M3_ISP_FMT;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ *fs = state->oif_pix_size[RES_ISP];
+ else
+ *fs = s5c73m3_find_frame_size(
+ v4l2_subdev_get_try_format(fh,
+ OIF_ISP_PAD),
+ RES_ISP);
+ break;
+ }
+
+ s5c73m3_fill_mbus_fmt(&fmt->format, *fs, code);
+}
+
+static void s5c73m3_try_format(struct s5c73m3 *state,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt,
+ const struct s5c73m3_frame_size **fs)
+{
+ u32 code;
+
+ if (fmt->pad == S5C73M3_ISP_PAD) {
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_ISP);
+ code = S5C73M3_ISP_FMT;
+ } else {
+ *fs = s5c73m3_find_frame_size(&fmt->format, RES_JPEG);
+ code = S5C73M3_JPEG_FMT;
+ }
+
+ s5c73m3_fill_mbus_fmt(&fmt->format, *fs, code);
+}
+
+static int s5c73m3_oif_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+
+ if (fi->pad != OIF_SOURCE_PAD)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fi->interval = state->fiv->interval;
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int __s5c73m3_set_frame_interval(struct s5c73m3 *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ const struct s5c73m3_frame_size *prev_size =
+ state->sensor_pix_size[RES_ISP];
+ const struct s5c73m3_interval *fiv = &s5c73m3_intervals[0];
+ unsigned int ret, min_err = UINT_MAX;
+ unsigned int i, fr_time;
+
+ if (fi->interval.denominator == 0)
+ return -EINVAL;
+
+ fr_time = fi->interval.numerator * 1000 / fi->interval.denominator;
+
+ for (i = 0; i < ARRAY_SIZE(s5c73m3_intervals); i++) {
+ const struct s5c73m3_interval *iv = &s5c73m3_intervals[i];
+
+ if (prev_size->width > iv->size.width ||
+ prev_size->height > iv->size.height)
+ continue;
+
+ ret = abs(iv->interval.numerator / 1000 - fr_time);
+ if (ret < min_err) {
+ fiv = iv;
+ min_err = ret;
+ }
+ }
+ state->fiv = fiv;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Changed frame interval to %u us\n", fiv->interval.numerator);
+ return 0;
+}
+
+static int s5c73m3_oif_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret;
+
+ if (fi->pad != OIF_SOURCE_PAD)
+ return -EINVAL;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "Setting %d/%d frame interval\n",
+ fi->interval.numerator, fi->interval.denominator);
+
+ mutex_lock(&state->lock);
+
+ ret = __s5c73m3_set_frame_interval(state, fi);
+ if (!ret) {
+ if (state->streaming)
+ ret = s5c73m3_set_frame_rate(state);
+ else
+ state->apply_fiv = 1;
+ }
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ const struct s5c73m3_interval *fi;
+ int ret = 0;
+
+ if (fie->pad != OIF_SOURCE_PAD)
+ return -EINVAL;
+ if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fi = &s5c73m3_intervals[fie->index];
+ if (fie->width > fi->size.width || fie->height > fi->size.height)
+ ret = -EINVAL;
+ else
+ fie->interval = fi->interval;
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_oif_get_pad_code(int pad, int index)
+{
+ if (pad == OIF_SOURCE_PAD) {
+ if (index > 1)
+ return -EINVAL;
+ return (index == 0) ? S5C73M3_ISP_FMT : S5C73M3_JPEG_FMT;
+ }
+
+ if (index > 0)
+ return -EINVAL;
+
+ return (pad == OIF_ISP_PAD) ? S5C73M3_ISP_FMT : S5C73M3_JPEG_FMT;
+}
+
+static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ const struct s5c73m3_frame_size *fs;
+ u32 code;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+
+ switch (fmt->pad) {
+ case S5C73M3_ISP_PAD:
+ code = S5C73M3_ISP_FMT;
+ fs = state->sensor_pix_size[RES_ISP];
+ break;
+ case S5C73M3_JPEG_PAD:
+ code = S5C73M3_JPEG_FMT;
+ fs = state->sensor_pix_size[RES_JPEG];
+ break;
+ default:
+ mutex_unlock(&state->lock);
+ return -EINVAL;
+ }
+ s5c73m3_fill_mbus_fmt(&fmt->format, fs, code);
+
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ const struct s5c73m3_frame_size *fs;
+ u32 code;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+
+ switch (fmt->pad) {
+ case OIF_ISP_PAD:
+ code = S5C73M3_ISP_FMT;
+ fs = state->oif_pix_size[RES_ISP];
+ break;
+ case OIF_JPEG_PAD:
+ code = S5C73M3_JPEG_FMT;
+ fs = state->oif_pix_size[RES_JPEG];
+ break;
+ case OIF_SOURCE_PAD:
+ code = state->mbus_code;
+ fs = state->oif_pix_size[RES_ISP];
+ break;
+ default:
+ mutex_unlock(&state->lock);
+ return -EINVAL;
+ }
+ s5c73m3_fill_mbus_fmt(&fmt->format, fs, code);
+
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct s5c73m3_frame_size *frame_size = NULL;
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ struct v4l2_mbus_framefmt *mf;
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ s5c73m3_try_format(state, fh, fmt, &frame_size);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ } else {
+ switch (fmt->pad) {
+ case S5C73M3_ISP_PAD:
+ state->sensor_pix_size[RES_ISP] = frame_size;
+ break;
+ case S5C73M3_JPEG_PAD:
+ state->sensor_pix_size[RES_JPEG] = frame_size;
+ break;
+ default:
+ ret = -EBUSY;
+ }
+
+ if (state->streaming)
+ ret = -EBUSY;
+ else
+ state->apply_fmt = 1;
+ }
+
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct s5c73m3_frame_size *frame_size = NULL;
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ struct v4l2_mbus_framefmt *mf;
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ s5c73m3_oif_try_format(state, fh, fmt, &frame_size);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ } else {
+ switch (fmt->pad) {
+ case OIF_ISP_PAD:
+ state->oif_pix_size[RES_ISP] = frame_size;
+ break;
+ case OIF_JPEG_PAD:
+ state->oif_pix_size[RES_JPEG] = frame_size;
+ break;
+ case OIF_SOURCE_PAD:
+ state->mbus_code = fmt->format.code;
+ break;
+ default:
+ ret = -EBUSY;
+ }
+
+ if (state->streaming)
+ ret = -EBUSY;
+ else
+ state->apply_fmt = 1;
+ }
+
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5c73m3_oif_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int i;
+
+ if (pad != OIF_SOURCE_PAD || fd == NULL)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ fd->num_entries = 2;
+ for (i = 0; i < fd->num_entries; i++)
+ fd->entry[i] = state->frame_desc.entry[i];
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int s5c73m3_oif_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ struct v4l2_mbus_frame_desc *frame_desc = &state->frame_desc;
+ int i;
+
+ if (pad != OIF_SOURCE_PAD || fd == NULL)
+ return -EINVAL;
+
+ fd->entry[0].length = 10 * SZ_1M;
+ fd->entry[1].length = max_t(u32, fd->entry[1].length,
+ S5C73M3_EMBEDDED_DATA_MAXLEN);
+ fd->num_entries = 2;
+
+ mutex_lock(&state->lock);
+ for (i = 0; i < fd->num_entries; i++)
+ frame_desc->entry[i] = fd->entry[i];
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const int codes[] = {
+ [S5C73M3_ISP_PAD] = S5C73M3_ISP_FMT,
+ [S5C73M3_JPEG_PAD] = S5C73M3_JPEG_FMT};
+
+ if (code->index > 0 || code->pad >= S5C73M3_NUM_PADS)
+ return -EINVAL;
+
+ code->code = codes[code->pad];
+
+ return 0;
+}
+
+static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ int ret;
+
+ ret = s5c73m3_oif_get_pad_code(code->pad, code->index);
+ if (ret < 0)
+ return ret;
+
+ code->code = ret;
+
+ return 0;
+}
+
+static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int idx;
+
+ if (fse->pad == S5C73M3_ISP_PAD) {
+ if (fse->code != S5C73M3_ISP_FMT)
+ return -EINVAL;
+ idx = RES_ISP;
+ } else{
+ if (fse->code != S5C73M3_JPEG_FMT)
+ return -EINVAL;
+ idx = RES_JPEG;
+ }
+
+ if (fse->index >= s5c73m3_resolutions_len[idx])
+ return -EINVAL;
+
+ fse->min_width = s5c73m3_resolutions[idx][fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = s5c73m3_resolutions[idx][fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int idx;
+
+ if (fse->pad == OIF_SOURCE_PAD) {
+ if (fse->index > 0)
+ return -EINVAL;
+
+ switch (fse->code) {
+ case S5C73M3_JPEG_FMT:
+ case S5C73M3_ISP_FMT: {
+ struct v4l2_mbus_framefmt *mf =
+ v4l2_subdev_get_try_format(fh, OIF_ISP_PAD);
+
+ fse->max_width = fse->min_width = mf->width;
+ fse->max_height = fse->min_height = mf->height;
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (fse->code != s5c73m3_oif_get_pad_code(fse->pad, 0))
+ return -EINVAL;
+
+ if (fse->pad == OIF_JPEG_PAD)
+ idx = RES_JPEG;
+ else
+ idx = RES_ISP;
+
+ if (fse->index >= s5c73m3_resolutions_len[idx])
+ return -EINVAL;
+
+ fse->min_width = s5c73m3_resolutions[idx][fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = s5c73m3_resolutions[idx][fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int s5c73m3_oif_log_status(struct v4l2_subdev *sd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+
+ v4l2_info(sd, "power: %d, apply_fmt: %d\n", state->power,
+ state->apply_fmt);
+
+ return 0;
+}
+
+static int s5c73m3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(fh, S5C73M3_ISP_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
+ S5C73M3_ISP_FMT);
+
+ mf = v4l2_subdev_get_try_format(fh, S5C73M3_JPEG_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
+ S5C73M3_JPEG_FMT);
+
+ return 0;
+}
+
+static int s5c73m3_oif_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(fh, OIF_ISP_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
+ S5C73M3_ISP_FMT);
+
+ mf = v4l2_subdev_get_try_format(fh, OIF_JPEG_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
+ S5C73M3_JPEG_FMT);
+
+ mf = v4l2_subdev_get_try_format(fh, OIF_SOURCE_PAD);
+ s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
+ S5C73M3_ISP_FMT);
+ return 0;
+}
+
+static int s5c73m3_gpio_set_value(struct s5c73m3 *priv, int id, u32 val)
+{
+ if (!gpio_is_valid(priv->gpio[id].gpio))
+ return 0;
+ gpio_set_value(priv->gpio[id].gpio, !!val);
+ return 1;
+}
+
+static int s5c73m3_gpio_assert(struct s5c73m3 *priv, int id)
+{
+ return s5c73m3_gpio_set_value(priv, id, priv->gpio[id].level);
+}
+
+static int s5c73m3_gpio_deassert(struct s5c73m3 *priv, int id)
+{
+ return s5c73m3_gpio_set_value(priv, id, !priv->gpio[id].level);
+}
+
+static int __s5c73m3_power_on(struct s5c73m3 *state)
+{
+ int i, ret;
+
+ for (i = 0; i < S5C73M3_MAX_SUPPLIES; i++) {
+ ret = regulator_enable(state->supplies[i].consumer);
+ if (ret)
+ goto err;
+ }
+
+ s5c73m3_gpio_deassert(state, STBY);
+ usleep_range(100, 200);
+
+ s5c73m3_gpio_deassert(state, RST);
+ usleep_range(50, 100);
+
+ return 0;
+err:
+ for (--i; i >= 0; i--)
+ regulator_disable(state->supplies[i].consumer);
+ return ret;
+}
+
+static int __s5c73m3_power_off(struct s5c73m3 *state)
+{
+ int i, ret;
+
+ if (s5c73m3_gpio_assert(state, RST))
+ usleep_range(10, 50);
+
+ if (s5c73m3_gpio_assert(state, STBY))
+ usleep_range(100, 200);
+ state->streaming = 0;
+ state->isp_ready = 0;
+
+ for (i = S5C73M3_MAX_SUPPLIES - 1; i >= 0; i--) {
+ ret = regulator_disable(state->supplies[i].consumer);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ for (++i; i < S5C73M3_MAX_SUPPLIES; i++)
+ regulator_enable(state->supplies[i].consumer);
+
+ return ret;
+}
+
+static int s5c73m3_oif_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ if (on && !state->power) {
+ ret = __s5c73m3_power_on(state);
+ if (!ret)
+ ret = s5c73m3_isp_init(state);
+ if (!ret) {
+ state->apply_fiv = 1;
+ state->apply_fmt = 1;
+ }
+ } else if (!on == state->power) {
+ ret = s5c73m3_set_af_softlanding(state);
+ if (!ret)
+ ret = __s5c73m3_power_off(state);
+ else
+ v4l2_err(sd, "Soft landing lens failed\n");
+ }
+ if (!ret)
+ state->power += on ? 1 : -1;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "%s: power: %d\n",
+ __func__, state->power);
+
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static int s5c73m3_oif_registered(struct v4l2_subdev *sd)
+{
+ struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ int ret;
+
+ ret = v4l2_device_register_subdev(sd->v4l2_dev, &state->sensor_sd);
+ if (ret) {
+ v4l2_err(sd->v4l2_dev, "Failed to register %s\n",
+ state->oif_sd.name);
+ return ret;
+ }
+
+ ret = media_entity_create_link(&state->sensor_sd.entity,
+ S5C73M3_ISP_PAD, &state->oif_sd.entity, OIF_ISP_PAD,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+
+ ret = media_entity_create_link(&state->sensor_sd.entity,
+ S5C73M3_JPEG_PAD, &state->oif_sd.entity, OIF_JPEG_PAD,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+
+ mutex_lock(&state->lock);
+ ret = __s5c73m3_power_on(state);
+ if (ret == 0)
+ s5c73m3_get_fw_version(state);
+
+ __s5c73m3_power_off(state);
+ mutex_unlock(&state->lock);
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n",
+ __func__, ret ? "failed" : "succeded", ret);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_internal_ops s5c73m3_internal_ops = {
+ .open = s5c73m3_open,
+};
+
+static const struct v4l2_subdev_pad_ops s5c73m3_pad_ops = {
+ .enum_mbus_code = s5c73m3_enum_mbus_code,
+ .enum_frame_size = s5c73m3_enum_frame_size,
+ .get_fmt = s5c73m3_get_fmt,
+ .set_fmt = s5c73m3_set_fmt,
+};
+
+static const struct v4l2_subdev_ops s5c73m3_subdev_ops = {
+ .pad = &s5c73m3_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops oif_internal_ops = {
+ .registered = s5c73m3_oif_registered,
+ .open = s5c73m3_oif_open,
+};
+
+static const struct v4l2_subdev_pad_ops s5c73m3_oif_pad_ops = {
+ .enum_mbus_code = s5c73m3_oif_enum_mbus_code,
+ .enum_frame_size = s5c73m3_oif_enum_frame_size,
+ .enum_frame_interval = s5c73m3_oif_enum_frame_interval,
+ .get_fmt = s5c73m3_oif_get_fmt,
+ .set_fmt = s5c73m3_oif_set_fmt,
+ .get_frame_desc = s5c73m3_oif_get_frame_desc,
+ .set_frame_desc = s5c73m3_oif_set_frame_desc,
+};
+
+static const struct v4l2_subdev_core_ops s5c73m3_oif_core_ops = {
+ .s_power = s5c73m3_oif_set_power,
+ .log_status = s5c73m3_oif_log_status,
+};
+
+static const struct v4l2_subdev_video_ops s5c73m3_oif_video_ops = {
+ .s_stream = s5c73m3_oif_s_stream,
+ .g_frame_interval = s5c73m3_oif_g_frame_interval,
+ .s_frame_interval = s5c73m3_oif_s_frame_interval,
+};
+
+static const struct v4l2_subdev_ops oif_subdev_ops = {
+ .core = &s5c73m3_oif_core_ops,
+ .pad = &s5c73m3_oif_pad_ops,
+ .video = &s5c73m3_oif_video_ops,
+};
+
+static int s5c73m3_configure_gpio(int nr, int val, const char *name)
+{
+ unsigned long flags = val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ int ret;
+
+ if (!gpio_is_valid(nr))
+ return 0;
+ ret = gpio_request_one(nr, flags, name);
+ if (!ret)
+ gpio_export(nr, 0);
+ return ret;
+}
+
+static int s5c73m3_free_gpios(struct s5c73m3 *state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->gpio); i++) {
+ if (!gpio_is_valid(state->gpio[i].gpio))
+ continue;
+ gpio_free(state->gpio[i].gpio);
+ state->gpio[i].gpio = -EINVAL;
+ }
+ return 0;
+}
+
+static int s5c73m3_configure_gpios(struct s5c73m3 *state,
+ const struct s5c73m3_platform_data *pdata)
+{
+ const struct s5c73m3_gpio *gpio = &pdata->gpio_stby;
+ int ret;
+
+ state->gpio[STBY].gpio = -EINVAL;
+ state->gpio[RST].gpio = -EINVAL;
+
+ ret = s5c73m3_configure_gpio(gpio->gpio, gpio->level, "S5C73M3_STBY");
+ if (ret) {
+ s5c73m3_free_gpios(state);
+ return ret;
+ }
+ state->gpio[STBY] = *gpio;
+ if (gpio_is_valid(gpio->gpio))
+ gpio_set_value(gpio->gpio, 0);
+
+ gpio = &pdata->gpio_reset;
+ ret = s5c73m3_configure_gpio(gpio->gpio, gpio->level, "S5C73M3_RST");
+ if (ret) {
+ s5c73m3_free_gpios(state);
+ return ret;
+ }
+ state->gpio[RST] = *gpio;
+ if (gpio_is_valid(gpio->gpio))
+ gpio_set_value(gpio->gpio, 0);
+
+ return 0;
+}
+
+static int s5c73m3_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ const struct s5c73m3_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev *oif_sd;
+ struct s5c73m3 *state;
+ int ret, i;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "Platform data not specified\n");
+ return -EINVAL;
+ }
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ sd = &state->sensor_sd;
+ oif_sd = &state->oif_sd;
+
+ v4l2_subdev_init(sd, &s5c73m3_subdev_ops);
+ sd->owner = client->driver->driver.owner;
+ v4l2_set_subdevdata(sd, state);
+ strlcpy(sd->name, "S5C73M3", sizeof(sd->name));
+
+ sd->internal_ops = &s5c73m3_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->sensor_pads[S5C73M3_JPEG_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ state->sensor_pads[S5C73M3_ISP_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+
+ ret = media_entity_init(&sd->entity, S5C73M3_NUM_PADS,
+ state->sensor_pads, 0);
+ if (ret < 0)
+ return ret;
+
+ v4l2_i2c_subdev_init(oif_sd, client, &oif_subdev_ops);
+ strcpy(oif_sd->name, "S5C73M3-OIF");
+
+ oif_sd->internal_ops = &oif_internal_ops;
+ oif_sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->oif_pads[OIF_ISP_PAD].flags = MEDIA_PAD_FL_SINK;
+ state->oif_pads[OIF_JPEG_PAD].flags = MEDIA_PAD_FL_SINK;
+ state->oif_pads[OIF_SOURCE_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ oif_sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+
+ ret = media_entity_init(&oif_sd->entity, OIF_NUM_PADS,
+ state->oif_pads, 0);
+ if (ret < 0)
+ return ret;
+
+ state->mclk_frequency = pdata->mclk_frequency;
+ state->bus_type = pdata->bus_type;
+
+ ret = s5c73m3_configure_gpios(state, pdata);
+ if (ret)
+ goto out_err1;
+
+ for (i = 0; i < S5C73M3_MAX_SUPPLIES; i++)
+ state->supplies[i].supply = s5c73m3_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, S5C73M3_MAX_SUPPLIES,
+ state->supplies);
+ if (ret) {
+ dev_err(dev, "failed to get regulators\n");
+ goto out_err2;
+ }
+
+ ret = s5c73m3_init_controls(state);
+ if (ret)
+ goto out_err2;
+
+ state->sensor_pix_size[RES_ISP] = &s5c73m3_isp_resolutions[1];
+ state->sensor_pix_size[RES_JPEG] = &s5c73m3_jpeg_resolutions[1];
+ state->oif_pix_size[RES_ISP] = state->sensor_pix_size[RES_ISP];
+ state->oif_pix_size[RES_JPEG] = state->sensor_pix_size[RES_JPEG];
+
+ state->mbus_code = S5C73M3_ISP_FMT;
+
+ state->fiv = &s5c73m3_intervals[S5C73M3_DEFAULT_FRAME_INTERVAL];
+
+ state->fw_file_version[0] = 'G';
+ state->fw_file_version[1] = 'C';
+
+ ret = s5c73m3_register_spi_driver(state);
+ if (ret < 0)
+ goto out_err2;
+
+ state->i2c_client = client;
+
+ v4l2_info(sd, "%s: completed succesfully\n", __func__);
+ return 0;
+
+out_err2:
+ s5c73m3_free_gpios(state);
+out_err1:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+static int s5c73m3_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+
+ v4l2_device_unregister_subdev(sd);
+
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+
+ s5c73m3_unregister_spi_driver(state);
+ s5c73m3_free_gpios(state);
+
+ return 0;
+}
+
+static const struct i2c_device_id s5c73m3_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, s5c73m3_id);
+
+static struct i2c_driver s5c73m3_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = s5c73m3_probe,
+ .remove = s5c73m3_remove,
+ .id_table = s5c73m3_id,
+};
+
+module_i2c_driver(s5c73m3_i2c_driver);
+
+MODULE_DESCRIPTION("Samsung S5C73M3 camera driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
new file mode 100644
index 000000000000..8001cde1db1e
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
@@ -0,0 +1,563 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sizes.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5c73m3.h>
+
+#include "s5c73m3.h"
+
+static int s5c73m3_get_af_status(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
+{
+ u16 reg = REG_AF_STATUS_UNFOCUSED;
+
+ int ret = s5c73m3_read(state, REG_AF_STATUS, &reg);
+
+ switch (reg) {
+ case REG_CAF_STATUS_FIND_SEARCH_DIR:
+ case REG_AF_STATUS_FOCUSING:
+ case REG_CAF_STATUS_FOCUSING:
+ ctrl->val = V4L2_AUTO_FOCUS_STATUS_BUSY;
+ break;
+ case REG_CAF_STATUS_FOCUSED:
+ case REG_AF_STATUS_FOCUSED:
+ ctrl->val = V4L2_AUTO_FOCUS_STATUS_REACHED;
+ break;
+ default:
+ v4l2_info(&state->sensor_sd, "Unknown AF status %#x\n", reg);
+ /* Fall through */
+ case REG_CAF_STATUS_UNFOCUSED:
+ case REG_AF_STATUS_UNFOCUSED:
+ case REG_AF_STATUS_INVALID:
+ ctrl->val = V4L2_AUTO_FOCUS_STATUS_FAILED;
+ break;
+ }
+
+ return ret;
+}
+
+static int s5c73m3_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sensor_sd(ctrl);
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ int ret;
+
+ if (state->power == 0)
+ return -EBUSY;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_AUTO:
+ ret = s5c73m3_get_af_status(state, state->ctrls.af_status);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ return 0;
+}
+
+static int s5c73m3_set_colorfx(struct s5c73m3 *state, int val)
+{
+ static const unsigned short colorfx[][2] = {
+ { V4L2_COLORFX_NONE, COMM_IMAGE_EFFECT_NONE },
+ { V4L2_COLORFX_BW, COMM_IMAGE_EFFECT_MONO },
+ { V4L2_COLORFX_SEPIA, COMM_IMAGE_EFFECT_SEPIA },
+ { V4L2_COLORFX_NEGATIVE, COMM_IMAGE_EFFECT_NEGATIVE },
+ { V4L2_COLORFX_AQUA, COMM_IMAGE_EFFECT_AQUA },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++) {
+ if (colorfx[i][0] != val)
+ continue;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Setting %s color effect\n",
+ v4l2_ctrl_get_menu(state->ctrls.colorfx->id)[i]);
+
+ return s5c73m3_isp_command(state, COMM_IMAGE_EFFECT,
+ colorfx[i][1]);
+ }
+ return -EINVAL;
+}
+
+/* Set exposure metering/exposure bias */
+static int s5c73m3_set_exposure(struct s5c73m3 *state, int auto_exp)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+ struct s5c73m3_ctrls *ctrls = &state->ctrls;
+ int ret = 0;
+
+ if (ctrls->exposure_metering->is_new) {
+ u16 metering;
+
+ switch (ctrls->exposure_metering->val) {
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ metering = COMM_METERING_CENTER;
+ break;
+ case V4L2_EXPOSURE_METERING_SPOT:
+ metering = COMM_METERING_SPOT;
+ break;
+ default:
+ metering = COMM_METERING_AVERAGE;
+ break;
+ }
+
+ ret = s5c73m3_isp_command(state, COMM_METERING, metering);
+ }
+
+ if (!ret && ctrls->exposure_bias->is_new) {
+ u16 exp_bias = ctrls->exposure_bias->val;
+ ret = s5c73m3_isp_command(state, COMM_EV, exp_bias);
+ }
+
+ v4l2_dbg(1, s5c73m3_dbg, sd,
+ "%s: exposure bias: %#x, metering: %#x (%d)\n", __func__,
+ ctrls->exposure_bias->val, ctrls->exposure_metering->val, ret);
+
+ return ret;
+}
+
+static int s5c73m3_set_white_balance(struct s5c73m3 *state, int val)
+{
+ static const unsigned short wb[][2] = {
+ { V4L2_WHITE_BALANCE_INCANDESCENT, COMM_AWB_MODE_INCANDESCENT},
+ { V4L2_WHITE_BALANCE_FLUORESCENT, COMM_AWB_MODE_FLUORESCENT1},
+ { V4L2_WHITE_BALANCE_FLUORESCENT_H, COMM_AWB_MODE_FLUORESCENT2},
+ { V4L2_WHITE_BALANCE_CLOUDY, COMM_AWB_MODE_CLOUDY},
+ { V4L2_WHITE_BALANCE_DAYLIGHT, COMM_AWB_MODE_DAYLIGHT},
+ { V4L2_WHITE_BALANCE_AUTO, COMM_AWB_MODE_AUTO},
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wb); i++) {
+ if (wb[i][0] != val)
+ continue;
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd,
+ "Setting white balance to: %s\n",
+ v4l2_ctrl_get_menu(state->ctrls.auto_wb->id)[i]);
+
+ return s5c73m3_isp_command(state, COMM_AWB_MODE, wb[i][1]);
+ }
+
+ return -EINVAL;
+}
+
+static int s5c73m3_af_run(struct s5c73m3 *state, bool on)
+{
+ struct s5c73m3_ctrls *c = &state->ctrls;
+
+ if (!on)
+ return s5c73m3_isp_command(state, COMM_AF_CON,
+ COMM_AF_CON_STOP);
+
+ if (c->focus_auto->val)
+ return s5c73m3_isp_command(state, COMM_AF_MODE,
+ COMM_AF_MODE_PREVIEW_CAF_START);
+
+ return s5c73m3_isp_command(state, COMM_AF_CON, COMM_AF_CON_START);
+}
+
+static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
+{
+ bool awb_lock = ctrl->val & V4L2_LOCK_WHITE_BALANCE;
+ bool ae_lock = ctrl->val & V4L2_LOCK_EXPOSURE;
+ bool af_lock = ctrl->val & V4L2_LOCK_FOCUS;
+ int ret = 0;
+
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_EXPOSURE) {
+ ret = s5c73m3_isp_command(state, COMM_AE_CON,
+ ae_lock ? COMM_AE_STOP : COMM_AE_START);
+ if (ret)
+ return ret;
+ }
+
+ if (((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_WHITE_BALANCE)
+ && state->ctrls.auto_wb->val) {
+ ret = s5c73m3_isp_command(state, COMM_AWB_CON,
+ awb_lock ? COMM_AWB_STOP : COMM_AWB_START);
+ if (ret)
+ return ret;
+ }
+
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
+ ret = s5c73m3_af_run(state, ~af_lock);
+
+ return ret;
+}
+
+static int s5c73m3_set_auto_focus(struct s5c73m3 *state, int caf)
+{
+ struct s5c73m3_ctrls *c = &state->ctrls;
+ int ret = 1;
+
+ if (c->af_distance->is_new) {
+ u16 mode = (c->af_distance->val == V4L2_AUTO_FOCUS_RANGE_MACRO)
+ ? COMM_AF_MODE_MACRO : COMM_AF_MODE_NORMAL;
+ ret = s5c73m3_isp_command(state, COMM_AF_MODE, mode);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (!ret || (c->focus_auto->is_new && c->focus_auto->val) ||
+ c->af_start->is_new)
+ ret = s5c73m3_af_run(state, 1);
+ else if ((c->focus_auto->is_new && !c->focus_auto->val) ||
+ c->af_stop->is_new)
+ ret = s5c73m3_af_run(state, 0);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int s5c73m3_set_contrast(struct s5c73m3 *state, int val)
+{
+ u16 reg = (val < 0) ? -val + 2 : val;
+ return s5c73m3_isp_command(state, COMM_CONTRAST, reg);
+}
+
+static int s5c73m3_set_saturation(struct s5c73m3 *state, int val)
+{
+ u16 reg = (val < 0) ? -val + 2 : val;
+ return s5c73m3_isp_command(state, COMM_SATURATION, reg);
+}
+
+static int s5c73m3_set_sharpness(struct s5c73m3 *state, int val)
+{
+ u16 reg = (val < 0) ? -val + 2 : val;
+ return s5c73m3_isp_command(state, COMM_SHARPNESS, reg);
+}
+
+static int s5c73m3_set_iso(struct s5c73m3 *state, int val)
+{
+ u32 iso;
+
+ if (val == V4L2_ISO_SENSITIVITY_MANUAL)
+ iso = state->ctrls.iso->val + 1;
+ else
+ iso = 0;
+
+ return s5c73m3_isp_command(state, COMM_ISO, iso);
+}
+
+static int s5c73m3_set_stabilization(struct s5c73m3 *state, int val)
+{
+ struct v4l2_subdev *sd = &state->sensor_sd;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "Image stabilization: %d\n", val);
+
+ return s5c73m3_isp_command(state, COMM_FRAME_RATE, val ?
+ COMM_FRAME_RATE_ANTI_SHAKE : COMM_FRAME_RATE_AUTO_SET);
+}
+
+static int s5c73m3_set_jpeg_quality(struct s5c73m3 *state, int quality)
+{
+ int reg;
+
+ if (quality <= 65)
+ reg = COMM_IMAGE_QUALITY_NORMAL;
+ else if (quality <= 75)
+ reg = COMM_IMAGE_QUALITY_FINE;
+ else
+ reg = COMM_IMAGE_QUALITY_SUPERFINE;
+
+ return s5c73m3_isp_command(state, COMM_IMAGE_QUALITY, reg);
+}
+
+static int s5c73m3_set_scene_program(struct s5c73m3 *state, int val)
+{
+ static const unsigned short scene_lookup[] = {
+ COMM_SCENE_MODE_NONE, /* V4L2_SCENE_MODE_NONE */
+ COMM_SCENE_MODE_AGAINST_LIGHT,/* V4L2_SCENE_MODE_BACKLIGHT */
+ COMM_SCENE_MODE_BEACH, /* V4L2_SCENE_MODE_BEACH_SNOW */
+ COMM_SCENE_MODE_CANDLE, /* V4L2_SCENE_MODE_CANDLE_LIGHT */
+ COMM_SCENE_MODE_DAWN, /* V4L2_SCENE_MODE_DAWN_DUSK */
+ COMM_SCENE_MODE_FALL, /* V4L2_SCENE_MODE_FALL_COLORS */
+ COMM_SCENE_MODE_FIRE, /* V4L2_SCENE_MODE_FIREWORKS */
+ COMM_SCENE_MODE_LANDSCAPE, /* V4L2_SCENE_MODE_LANDSCAPE */
+ COMM_SCENE_MODE_NIGHT, /* V4L2_SCENE_MODE_NIGHT */
+ COMM_SCENE_MODE_INDOOR, /* V4L2_SCENE_MODE_PARTY_INDOOR */
+ COMM_SCENE_MODE_PORTRAIT, /* V4L2_SCENE_MODE_PORTRAIT */
+ COMM_SCENE_MODE_SPORTS, /* V4L2_SCENE_MODE_SPORTS */
+ COMM_SCENE_MODE_SUNSET, /* V4L2_SCENE_MODE_SUNSET */
+ COMM_SCENE_MODE_TEXT, /* V4L2_SCENE_MODE_TEXT */
+ };
+
+ v4l2_dbg(1, s5c73m3_dbg, &state->sensor_sd, "Setting %s scene mode\n",
+ v4l2_ctrl_get_menu(state->ctrls.scene_mode->id)[val]);
+
+ return s5c73m3_isp_command(state, COMM_SCENE_MODE, scene_lookup[val]);
+}
+
+static int s5c73m3_set_power_line_freq(struct s5c73m3 *state, int val)
+{
+ unsigned int pwr_line_freq = COMM_FLICKER_NONE;
+
+ switch (val) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
+ pwr_line_freq = COMM_FLICKER_NONE;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ pwr_line_freq = COMM_FLICKER_AUTO_50HZ;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ pwr_line_freq = COMM_FLICKER_AUTO_60HZ;
+ break;
+ default:
+ case V4L2_CID_POWER_LINE_FREQUENCY_AUTO:
+ pwr_line_freq = COMM_FLICKER_NONE;
+ }
+
+ return s5c73m3_isp_command(state, COMM_FLICKER_MODE, pwr_line_freq);
+}
+
+static int s5c73m3_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sensor_sd(ctrl);
+ struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
+ int ret = 0;
+
+ v4l2_dbg(1, s5c73m3_dbg, sd, "set_ctrl: %s, value: %d\n",
+ ctrl->name, ctrl->val);
+
+ mutex_lock(&state->lock);
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (state->power == 0)
+ goto unlock;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_3A_LOCK:
+ ret = s5c73m3_3a_lock(state, ctrl);
+ break;
+
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ ret = s5c73m3_set_white_balance(state, ctrl->val);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ ret = s5c73m3_set_contrast(state, ctrl->val);
+ break;
+
+ case V4L2_CID_COLORFX:
+ ret = s5c73m3_set_colorfx(state, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = s5c73m3_set_exposure(state, ctrl->val);
+ break;
+
+ case V4L2_CID_FOCUS_AUTO:
+ ret = s5c73m3_set_auto_focus(state, ctrl->val);
+ break;
+
+ case V4L2_CID_IMAGE_STABILIZATION:
+ ret = s5c73m3_set_stabilization(state, ctrl->val);
+ break;
+
+ case V4L2_CID_ISO_SENSITIVITY:
+ ret = s5c73m3_set_iso(state, ctrl->val);
+ break;
+
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ret = s5c73m3_set_jpeg_quality(state, ctrl->val);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ ret = s5c73m3_set_power_line_freq(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ ret = s5c73m3_set_saturation(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SCENE_MODE:
+ ret = s5c73m3_set_scene_program(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ ret = s5c73m3_set_sharpness(state, ctrl->val);
+ break;
+
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ ret = s5c73m3_isp_command(state, COMM_WDR, !!ctrl->val);
+ break;
+
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ ret = s5c73m3_isp_command(state, COMM_ZOOM_STEP, ctrl->val);
+ break;
+ }
+unlock:
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops s5c73m3_ctrl_ops = {
+ .g_volatile_ctrl = s5c73m3_g_volatile_ctrl,
+ .s_ctrl = s5c73m3_s_ctrl,
+};
+
+/* Supported manual ISO values */
+static const s64 iso_qmenu[] = {
+ /* COMM_ISO: 0x0001...0x0004 */
+ 100, 200, 400, 800,
+};
+
+/* Supported exposure bias values (-2.0EV...+2.0EV) */
+static const s64 ev_bias_qmenu[] = {
+ /* COMM_EV: 0x0000...0x0008 */
+ -2000, -1500, -1000, -500, 0, 500, 1000, 1500, 2000
+};
+
+int s5c73m3_init_controls(struct s5c73m3 *state)
+{
+ const struct v4l2_ctrl_ops *ops = &s5c73m3_ctrl_ops;
+ struct s5c73m3_ctrls *ctrls = &state->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+
+ int ret = v4l2_ctrl_handler_init(hdl, 22);
+ if (ret)
+ return ret;
+
+ /* White balance */
+ ctrls->auto_wb = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ 9, ~0x15e, V4L2_WHITE_BALANCE_AUTO);
+
+ /* Exposure (only automatic exposure) */
+ ctrls->auto_exposure = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO, 0, ~0x01, V4L2_EXPOSURE_AUTO);
+
+ ctrls->exposure_bias = v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(ev_bias_qmenu) - 1,
+ ARRAY_SIZE(ev_bias_qmenu)/2 - 1,
+ ev_bias_qmenu);
+
+ ctrls->exposure_metering = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_METERING,
+ 2, ~0x7, V4L2_EXPOSURE_METERING_AVERAGE);
+
+ /* Auto focus */
+ ctrls->focus_auto = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_FOCUS_AUTO, 0, 1, 1, 0);
+
+ ctrls->af_start = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_START, 0, 1, 1, 0);
+
+ ctrls->af_stop = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_STOP, 0, 1, 1, 0);
+
+ ctrls->af_status = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_STATUS, 0,
+ (V4L2_AUTO_FOCUS_STATUS_BUSY |
+ V4L2_AUTO_FOCUS_STATUS_REACHED |
+ V4L2_AUTO_FOCUS_STATUS_FAILED),
+ 0, V4L2_AUTO_FOCUS_STATUS_IDLE);
+
+ ctrls->af_distance = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_AUTO_FOCUS_RANGE,
+ V4L2_AUTO_FOCUS_RANGE_MACRO,
+ ~(1 << V4L2_AUTO_FOCUS_RANGE_NORMAL |
+ 1 << V4L2_AUTO_FOCUS_RANGE_MACRO),
+ V4L2_AUTO_FOCUS_RANGE_NORMAL);
+ /* ISO sensitivity */
+ ctrls->auto_iso = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_ISO_SENSITIVITY_AUTO, 1, 0,
+ V4L2_ISO_SENSITIVITY_AUTO);
+
+ ctrls->iso = v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_ISO_SENSITIVITY, ARRAY_SIZE(iso_qmenu) - 1,
+ ARRAY_SIZE(iso_qmenu)/2 - 1, iso_qmenu);
+
+ ctrls->contrast = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_CONTRAST, -2, 2, 1, 0);
+
+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_SATURATION, -2, 2, 1, 0);
+
+ ctrls->sharpness = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_SHARPNESS, -2, 2, 1, 0);
+
+ ctrls->zoom = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_ZOOM_ABSOLUTE, 0, 30, 1, 0);
+
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_AQUA, ~0x40f, V4L2_COLORFX_NONE);
+
+ ctrls->wdr = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_WIDE_DYNAMIC_RANGE, 0, 1, 1, 0);
+
+ ctrls->stabilization = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_IMAGE_STABILIZATION, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+
+ ctrls->jpeg_quality = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1, 100, 1, 80);
+
+ ctrls->scene_mode = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_SCENE_MODE, V4L2_SCENE_MODE_TEXT, ~0x3fff,
+ V4L2_SCENE_MODE_NONE);
+
+ ctrls->aaa_lock = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_3A_LOCK, 0, 0x7, 0, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_exposure, 0, false);
+ ctrls->auto_iso->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_UPDATE;
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_iso, 0, false);
+ ctrls->af_status->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_cluster(6, &ctrls->focus_auto);
+
+ state->sensor_sd.ctrl_handler = hdl;
+
+ return 0;
+}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
new file mode 100644
index 000000000000..6f3a9c00fe65
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -0,0 +1,156 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sizes.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include "s5c73m3.h"
+
+#define S5C73M3_SPI_DRV_NAME "S5C73M3-SPI"
+
+enum spi_direction {
+ SPI_DIR_RX,
+ SPI_DIR_TX
+};
+
+static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len,
+ enum spi_direction dir)
+{
+ struct spi_message msg;
+ int r;
+ struct spi_transfer xfer = {
+ .len = len,
+ };
+
+ if (dir == SPI_DIR_TX)
+ xfer.tx_buf = addr;
+ else
+ xfer.rx_buf = addr;
+
+ if (spi_dev == NULL) {
+ dev_err(&spi_dev->dev, "SPI device is uninitialized\n");
+ return -ENODEV;
+ }
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ r = spi_sync(spi_dev, &msg);
+ if (r < 0)
+ dev_err(&spi_dev->dev, "%s spi_sync failed %d\n", __func__, r);
+
+ return r;
+}
+
+int s5c73m3_spi_write(struct s5c73m3 *state, const void *addr,
+ const unsigned int len, const unsigned int tx_size)
+{
+ struct spi_device *spi_dev = state->spi_dev;
+ u32 count = len / tx_size;
+ u32 extra = len % tx_size;
+ unsigned int i, j = 0;
+ u8 padding[32];
+ int r = 0;
+
+ memset(padding, 0, sizeof(padding));
+
+ for (i = 0; i < count ; i++) {
+ r = spi_xmit(spi_dev, (void *)addr + j, tx_size, SPI_DIR_TX);
+ if (r < 0)
+ return r;
+ j += tx_size;
+ }
+
+ if (extra > 0) {
+ r = spi_xmit(spi_dev, (void *)addr + j, extra, SPI_DIR_TX);
+ if (r < 0)
+ return r;
+ }
+
+ return spi_xmit(spi_dev, padding, sizeof(padding), SPI_DIR_TX);
+}
+
+int s5c73m3_spi_read(struct s5c73m3 *state, void *addr,
+ const unsigned int len, const unsigned int tx_size)
+{
+ struct spi_device *spi_dev = state->spi_dev;
+ u32 count = len / tx_size;
+ u32 extra = len % tx_size;
+ unsigned int i, j = 0;
+ int r = 0;
+
+ for (i = 0; i < count ; i++) {
+ r = spi_xmit(spi_dev, addr + j, tx_size, SPI_DIR_RX);
+ if (r < 0)
+ return r;
+ j += tx_size;
+ }
+
+ if (extra > 0)
+ return spi_xmit(spi_dev, addr + j, extra, SPI_DIR_RX);
+
+ return 0;
+}
+
+static int s5c73m3_spi_probe(struct spi_device *spi)
+{
+ int r;
+ struct s5c73m3 *state = container_of(spi->dev.driver, struct s5c73m3,
+ spidrv.driver);
+ spi->bits_per_word = 32;
+
+ r = spi_setup(spi);
+ if (r < 0) {
+ dev_err(&spi->dev, "spi_setup() failed\n");
+ return r;
+ }
+
+ mutex_lock(&state->lock);
+ state->spi_dev = spi;
+ mutex_unlock(&state->lock);
+
+ v4l2_info(&state->sensor_sd, "S5C73M3 SPI probed successfully\n");
+ return 0;
+}
+
+static int s5c73m3_spi_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+int s5c73m3_register_spi_driver(struct s5c73m3 *state)
+{
+ struct spi_driver *spidrv = &state->spidrv;
+
+ spidrv->remove = s5c73m3_spi_remove;
+ spidrv->probe = s5c73m3_spi_probe;
+ spidrv->driver.name = S5C73M3_SPI_DRV_NAME;
+ spidrv->driver.bus = &spi_bus_type;
+ spidrv->driver.owner = THIS_MODULE;
+
+ return spi_register_driver(spidrv);
+}
+
+void s5c73m3_unregister_spi_driver(struct s5c73m3 *state)
+{
+ spi_unregister_driver(&state->spidrv);
+}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
new file mode 100644
index 000000000000..9d2c08652246
--- /dev/null
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -0,0 +1,459 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef S5C73M3_H_
+#define S5C73M3_H_
+
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <media/s5c73m3.h>
+
+#define DRIVER_NAME "S5C73M3"
+
+#define S5C73M3_ISP_FMT V4L2_MBUS_FMT_VYUY8_2X8
+#define S5C73M3_JPEG_FMT V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8
+
+/* Subdevs pad index definitions */
+enum s5c73m3_pads {
+ S5C73M3_ISP_PAD,
+ S5C73M3_JPEG_PAD,
+ S5C73M3_NUM_PADS
+};
+
+enum s5c73m3_oif_pads {
+ OIF_ISP_PAD,
+ OIF_JPEG_PAD,
+ OIF_SOURCE_PAD,
+ OIF_NUM_PADS
+};
+
+#define S5C73M3_SENSOR_FW_LEN 6
+#define S5C73M3_SENSOR_TYPE_LEN 12
+
+#define S5C73M3_REG(_addrh, _addrl) (((_addrh) << 16) | _addrl)
+
+#define AHB_MSB_ADDR_PTR 0xfcfc
+#define REG_CMDWR_ADDRH 0x0050
+#define REG_CMDWR_ADDRL 0x0054
+#define REG_CMDRD_ADDRH 0x0058
+#define REG_CMDRD_ADDRL 0x005c
+#define REG_CMDBUF_ADDR 0x0f14
+
+#define REG_I2C_SEQ_STATUS S5C73M3_REG(0x0009, 0x59A6)
+#define SEQ_END_PLL (1<<0x0)
+#define SEQ_END_SENSOR (1<<0x1)
+#define SEQ_END_GPIO (1<<0x2)
+#define SEQ_END_FROM (1<<0x3)
+#define SEQ_END_STABLE_AE_AWB (1<<0x4)
+#define SEQ_END_READY_I2C_CMD (1<<0x5)
+
+#define REG_I2C_STATUS S5C73M3_REG(0x0009, 0x599E)
+#define I2C_STATUS_CIS_I2C (1<<0x0)
+#define I2C_STATUS_AF_INIT (1<<0x1)
+#define I2C_STATUS_CAL_DATA (1<<0x2)
+#define I2C_STATUS_FRAME_COUNT (1<<0x3)
+#define I2C_STATUS_FROM_INIT (1<<0x4)
+#define I2C_STATUS_I2C_CIS_STREAM_OFF (1<<0x5)
+#define I2C_STATUS_I2C_N_CMD_OVER (1<<0x6)
+#define I2C_STATUS_I2C_N_CMD_MISMATCH (1<<0x7)
+#define I2C_STATUS_CHECK_BIN_CRC (1<<0x8)
+#define I2C_STATUS_EXCEPTION (1<<0x9)
+#define I2C_STATUS_INIF_INIT_STATE (0x8)
+
+#define REG_STATUS S5C73M3_REG(0x0009, 0x5080)
+#define REG_STATUS_BOOT_SUB_MAIN_ENTER 0xff01
+#define REG_STATUS_BOOT_SRAM_TIMING_OK 0xff02
+#define REG_STATUS_BOOT_INTERRUPTS_EN 0xff03
+#define REG_STATUS_BOOT_R_PLL_DONE 0xff04
+#define REG_STATUS_BOOT_R_PLL_LOCKTIME_DONE 0xff05
+#define REG_STATUS_BOOT_DELAY_COUNT_DONE 0xff06
+#define REG_STATUS_BOOT_I_PLL_DONE 0xff07
+#define REG_STATUS_BOOT_I_PLL_LOCKTIME_DONE 0xff08
+#define REG_STATUS_BOOT_PLL_INIT_OK 0xff09
+#define REG_STATUS_BOOT_SENSOR_INIT_OK 0xff0a
+#define REG_STATUS_BOOT_GPIO_SETTING_OK 0xff0b
+#define REG_STATUS_BOOT_READ_CAL_DATA_OK 0xff0c
+#define REG_STATUS_BOOT_STABLE_AE_AWB_OK 0xff0d
+#define REG_STATUS_ISP_COMMAND_COMPLETED 0xffff
+#define REG_STATUS_EXCEPTION_OCCURED 0xdead
+
+#define COMM_RESULT_OFFSET S5C73M3_REG(0x0009, 0x5000)
+
+#define COMM_IMG_OUTPUT 0x0902
+#define COMM_IMG_OUTPUT_HDR 0x0008
+#define COMM_IMG_OUTPUT_YUV 0x0009
+#define COMM_IMG_OUTPUT_INTERLEAVED 0x000d
+
+#define COMM_STILL_PRE_FLASH 0x0a00
+#define COMM_STILL_PRE_FLASH_FIRE 0x0000
+#define COMM_STILL_PRE_FLASH_NON_FIRED 0x0000
+#define COMM_STILL_PRE_FLASH_FIRED 0x0001
+
+#define COMM_STILL_MAIN_FLASH 0x0a02
+#define COMM_STILL_MAIN_FLASH_CANCEL 0x0001
+#define COMM_STILL_MAIN_FLASH_FIRE 0x0002
+
+#define COMM_ZOOM_STEP 0x0b00
+
+#define COMM_IMAGE_EFFECT 0x0b0a
+#define COMM_IMAGE_EFFECT_NONE 0x0001
+#define COMM_IMAGE_EFFECT_NEGATIVE 0x0002
+#define COMM_IMAGE_EFFECT_AQUA 0x0003
+#define COMM_IMAGE_EFFECT_SEPIA 0x0004
+#define COMM_IMAGE_EFFECT_MONO 0x0005
+
+#define COMM_IMAGE_QUALITY 0x0b0c
+#define COMM_IMAGE_QUALITY_SUPERFINE 0x0000
+#define COMM_IMAGE_QUALITY_FINE 0x0001
+#define COMM_IMAGE_QUALITY_NORMAL 0x0002
+
+#define COMM_FLASH_MODE 0x0b0e
+#define COMM_FLASH_MODE_OFF 0x0000
+#define COMM_FLASH_MODE_ON 0x0001
+#define COMM_FLASH_MODE_AUTO 0x0002
+
+#define COMM_FLASH_STATUS 0x0b80
+#define COMM_FLASH_STATUS_OFF 0x0001
+#define COMM_FLASH_STATUS_ON 0x0002
+#define COMM_FLASH_STATUS_AUTO 0x0003
+
+#define COMM_FLASH_TORCH 0x0b12
+#define COMM_FLASH_TORCH_OFF 0x0000
+#define COMM_FLASH_TORCH_ON 0x0001
+
+#define COMM_AE_NEEDS_FLASH 0x0cba
+#define COMM_AE_NEEDS_FLASH_OFF 0x0000
+#define COMM_AE_NEEDS_FLASH_ON 0x0001
+
+#define COMM_CHG_MODE 0x0b10
+#define COMM_CHG_MODE_NEW 0x8000
+#define COMM_CHG_MODE_SUBSAMPLING_HALF 0x2000
+#define COMM_CHG_MODE_SUBSAMPLING_QUARTER 0x4000
+
+#define COMM_CHG_MODE_YUV_320_240 0x0001
+#define COMM_CHG_MODE_YUV_640_480 0x0002
+#define COMM_CHG_MODE_YUV_880_720 0x0003
+#define COMM_CHG_MODE_YUV_960_720 0x0004
+#define COMM_CHG_MODE_YUV_1184_666 0x0005
+#define COMM_CHG_MODE_YUV_1280_720 0x0006
+#define COMM_CHG_MODE_YUV_1536_864 0x0007
+#define COMM_CHG_MODE_YUV_1600_1200 0x0008
+#define COMM_CHG_MODE_YUV_1632_1224 0x0009
+#define COMM_CHG_MODE_YUV_1920_1080 0x000a
+#define COMM_CHG_MODE_YUV_1920_1440 0x000b
+#define COMM_CHG_MODE_YUV_2304_1296 0x000c
+#define COMM_CHG_MODE_YUV_3264_2448 0x000d
+#define COMM_CHG_MODE_YUV_352_288 0x000e
+#define COMM_CHG_MODE_YUV_1008_672 0x000f
+
+#define COMM_CHG_MODE_JPEG_640_480 0x0010
+#define COMM_CHG_MODE_JPEG_800_450 0x0020
+#define COMM_CHG_MODE_JPEG_800_600 0x0030
+#define COMM_CHG_MODE_JPEG_1280_720 0x0040
+#define COMM_CHG_MODE_JPEG_1280_960 0x0050
+#define COMM_CHG_MODE_JPEG_1600_900 0x0060
+#define COMM_CHG_MODE_JPEG_1600_1200 0x0070
+#define COMM_CHG_MODE_JPEG_2048_1152 0x0080
+#define COMM_CHG_MODE_JPEG_2048_1536 0x0090
+#define COMM_CHG_MODE_JPEG_2560_1440 0x00a0
+#define COMM_CHG_MODE_JPEG_2560_1920 0x00b0
+#define COMM_CHG_MODE_JPEG_3264_2176 0x00c0
+#define COMM_CHG_MODE_JPEG_1024_768 0x00d0
+#define COMM_CHG_MODE_JPEG_3264_1836 0x00e0
+#define COMM_CHG_MODE_JPEG_3264_2448 0x00f0
+
+#define COMM_AF_CON 0x0e00
+#define COMM_AF_CON_STOP 0x0000
+#define COMM_AF_CON_SCAN 0x0001 /* Full Search */
+#define COMM_AF_CON_START 0x0002 /* Fast Search */
+
+#define COMM_AF_CAL 0x0e06
+#define COMM_AF_TOUCH_AF 0x0e0a
+
+#define REG_AF_STATUS S5C73M3_REG(0x0009, 0x5e80)
+#define REG_CAF_STATUS_FIND_SEARCH_DIR 0x0001
+#define REG_CAF_STATUS_FOCUSING 0x0002
+#define REG_CAF_STATUS_FOCUSED 0x0003
+#define REG_CAF_STATUS_UNFOCUSED 0x0004
+#define REG_AF_STATUS_INVALID 0x0010
+#define REG_AF_STATUS_FOCUSING 0x0020
+#define REG_AF_STATUS_FOCUSED 0x0030
+#define REG_AF_STATUS_UNFOCUSED 0x0040
+
+#define REG_AF_TOUCH_POSITION S5C73M3_REG(0x0009, 0x5e8e)
+#define COMM_AF_FACE_ZOOM 0x0e10
+
+#define COMM_AF_MODE 0x0e02
+#define COMM_AF_MODE_NORMAL 0x0000
+#define COMM_AF_MODE_MACRO 0x0001
+#define COMM_AF_MODE_MOVIE_CAF_START 0x0002
+#define COMM_AF_MODE_MOVIE_CAF_STOP 0x0003
+#define COMM_AF_MODE_PREVIEW_CAF_START 0x0004
+#define COMM_AF_MODE_PREVIEW_CAF_STOP 0x0005
+
+#define COMM_AF_SOFTLANDING 0x0e16
+#define COMM_AF_SOFTLANDING_ON 0x0000
+#define COMM_AF_SOFTLANDING_RES_COMPLETE 0x0001
+
+#define COMM_FACE_DET 0x0e0c
+#define COMM_FACE_DET_OFF 0x0000
+#define COMM_FACE_DET_ON 0x0001
+
+#define COMM_FACE_DET_OSD 0x0e0e
+#define COMM_FACE_DET_OSD_OFF 0x0000
+#define COMM_FACE_DET_OSD_ON 0x0001
+
+#define COMM_AE_CON 0x0c00
+#define COMM_AE_STOP 0x0000 /* lock */
+#define COMM_AE_START 0x0001 /* unlock */
+
+#define COMM_ISO 0x0c02
+#define COMM_ISO_AUTO 0x0000
+#define COMM_ISO_100 0x0001
+#define COMM_ISO_200 0x0002
+#define COMM_ISO_400 0x0003
+#define COMM_ISO_800 0x0004
+#define COMM_ISO_SPORTS 0x0005
+#define COMM_ISO_NIGHT 0x0006
+#define COMM_ISO_INDOOR 0x0007
+
+/* 0x00000 (-2.0 EV)...0x0008 (2.0 EV), 0.5EV step */
+#define COMM_EV 0x0c04
+
+#define COMM_METERING 0x0c06
+#define COMM_METERING_CENTER 0x0000
+#define COMM_METERING_SPOT 0x0001
+#define COMM_METERING_AVERAGE 0x0002
+#define COMM_METERING_SMART 0x0003
+
+#define COMM_WDR 0x0c08
+#define COMM_WDR_OFF 0x0000
+#define COMM_WDR_ON 0x0001
+
+#define COMM_FLICKER_MODE 0x0c12
+#define COMM_FLICKER_NONE 0x0000
+#define COMM_FLICKER_MANUAL_50HZ 0x0001
+#define COMM_FLICKER_MANUAL_60HZ 0x0002
+#define COMM_FLICKER_AUTO 0x0003
+#define COMM_FLICKER_AUTO_50HZ 0x0004
+#define COMM_FLICKER_AUTO_60HZ 0x0005
+
+#define COMM_FRAME_RATE 0x0c1e
+#define COMM_FRAME_RATE_AUTO_SET 0x0000
+#define COMM_FRAME_RATE_FIXED_30FPS 0x0002
+#define COMM_FRAME_RATE_FIXED_20FPS 0x0003
+#define COMM_FRAME_RATE_FIXED_15FPS 0x0004
+#define COMM_FRAME_RATE_FIXED_60FPS 0x0007
+#define COMM_FRAME_RATE_FIXED_120FPS 0x0008
+#define COMM_FRAME_RATE_FIXED_7FPS 0x0009
+#define COMM_FRAME_RATE_FIXED_10FPS 0x000a
+#define COMM_FRAME_RATE_FIXED_90FPS 0x000b
+#define COMM_FRAME_RATE_ANTI_SHAKE 0x0013
+
+/* 0x0000...0x0004 -> sharpness: 0, 1, 2, -1, -2 */
+#define COMM_SHARPNESS 0x0c14
+
+/* 0x0000...0x0004 -> saturation: 0, 1, 2, -1, -2 */
+#define COMM_SATURATION 0x0c16
+
+/* 0x0000...0x0004 -> contrast: 0, 1, 2, -1, -2 */
+#define COMM_CONTRAST 0x0c18
+
+#define COMM_SCENE_MODE 0x0c1a
+#define COMM_SCENE_MODE_NONE 0x0000
+#define COMM_SCENE_MODE_PORTRAIT 0x0001
+#define COMM_SCENE_MODE_LANDSCAPE 0x0002
+#define COMM_SCENE_MODE_SPORTS 0x0003
+#define COMM_SCENE_MODE_INDOOR 0x0004
+#define COMM_SCENE_MODE_BEACH 0x0005
+#define COMM_SCENE_MODE_SUNSET 0x0006
+#define COMM_SCENE_MODE_DAWN 0x0007
+#define COMM_SCENE_MODE_FALL 0x0008
+#define COMM_SCENE_MODE_NIGHT 0x0009
+#define COMM_SCENE_MODE_AGAINST_LIGHT 0x000a
+#define COMM_SCENE_MODE_FIRE 0x000b
+#define COMM_SCENE_MODE_TEXT 0x000c
+#define COMM_SCENE_MODE_CANDLE 0x000d
+
+#define COMM_AE_AUTO_BRACKET 0x0b14
+#define COMM_AE_AUTO_BRAKET_EV05 0x0080
+#define COMM_AE_AUTO_BRAKET_EV10 0x0100
+#define COMM_AE_AUTO_BRAKET_EV15 0x0180
+#define COMM_AE_AUTO_BRAKET_EV20 0x0200
+
+#define COMM_SENSOR_STREAMING 0x090a
+#define COMM_SENSOR_STREAMING_OFF 0x0000
+#define COMM_SENSOR_STREAMING_ON 0x0001
+
+#define COMM_AWB_MODE 0x0d02
+#define COMM_AWB_MODE_INCANDESCENT 0x0000
+#define COMM_AWB_MODE_FLUORESCENT1 0x0001
+#define COMM_AWB_MODE_FLUORESCENT2 0x0002
+#define COMM_AWB_MODE_DAYLIGHT 0x0003
+#define COMM_AWB_MODE_CLOUDY 0x0004
+#define COMM_AWB_MODE_AUTO 0x0005
+
+#define COMM_AWB_CON 0x0d00
+#define COMM_AWB_STOP 0x0000 /* lock */
+#define COMM_AWB_START 0x0001 /* unlock */
+
+#define COMM_FW_UPDATE 0x0906
+#define COMM_FW_UPDATE_NOT_READY 0x0000
+#define COMM_FW_UPDATE_SUCCESS 0x0005
+#define COMM_FW_UPDATE_FAIL 0x0007
+#define COMM_FW_UPDATE_BUSY 0xffff
+
+
+#define S5C73M3_MAX_SUPPLIES 6
+
+struct s5c73m3_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ /* exposure/exposure bias cluster */
+ struct v4l2_ctrl *auto_exposure;
+ struct v4l2_ctrl *exposure_bias;
+ struct v4l2_ctrl *exposure_metering;
+ };
+ struct {
+ /* iso/auto iso cluster */
+ struct v4l2_ctrl *auto_iso;
+ struct v4l2_ctrl *iso;
+ };
+ struct v4l2_ctrl *auto_wb;
+ struct {
+ /* continuous auto focus/auto focus cluster */
+ struct v4l2_ctrl *focus_auto;
+ struct v4l2_ctrl *af_start;
+ struct v4l2_ctrl *af_stop;
+ struct v4l2_ctrl *af_status;
+ struct v4l2_ctrl *af_distance;
+ };
+
+ struct v4l2_ctrl *aaa_lock;
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *zoom;
+ struct v4l2_ctrl *wdr;
+ struct v4l2_ctrl *stabilization;
+ struct v4l2_ctrl *jpeg_quality;
+ struct v4l2_ctrl *scene_mode;
+};
+
+enum s5c73m3_gpio_id {
+ STBY,
+ RST,
+ GPIO_NUM,
+};
+
+enum s5c73m3_resolution_types {
+ RES_ISP,
+ RES_JPEG,
+};
+
+struct s5c73m3_interval {
+ u16 fps_reg;
+ struct v4l2_fract interval;
+ /* Maximum rectangle for the interval */
+ struct v4l2_frmsize_discrete size;
+};
+
+struct s5c73m3 {
+ struct v4l2_subdev sensor_sd;
+ struct media_pad sensor_pads[S5C73M3_NUM_PADS];
+
+ struct v4l2_subdev oif_sd;
+ struct media_pad oif_pads[OIF_NUM_PADS];
+
+ struct spi_driver spidrv;
+ struct spi_device *spi_dev;
+ struct i2c_client *i2c_client;
+ u32 i2c_write_address;
+ u32 i2c_read_address;
+
+ struct regulator_bulk_data supplies[S5C73M3_MAX_SUPPLIES];
+ struct s5c73m3_gpio gpio[GPIO_NUM];
+
+ /* External master clock frequency */
+ u32 mclk_frequency;
+ /* Video bus type - MIPI-CSI2/paralell */
+ enum v4l2_mbus_type bus_type;
+
+ const struct s5c73m3_frame_size *sensor_pix_size[2];
+ const struct s5c73m3_frame_size *oif_pix_size[2];
+ enum v4l2_mbus_pixelcode mbus_code;
+
+ const struct s5c73m3_interval *fiv;
+
+ struct v4l2_mbus_frame_desc frame_desc;
+ /* protects the struct members below */
+ struct mutex lock;
+
+ struct s5c73m3_ctrls ctrls;
+
+ u8 streaming:1;
+ u8 apply_fmt:1;
+ u8 apply_fiv:1;
+ u8 isp_ready:1;
+
+ short power;
+
+ char sensor_fw[S5C73M3_SENSOR_FW_LEN + 2];
+ char sensor_type[S5C73M3_SENSOR_TYPE_LEN + 2];
+ char fw_file_version[2];
+ unsigned int fw_size;
+};
+
+struct s5c73m3_frame_size {
+ u32 width;
+ u32 height;
+ u8 reg_val;
+};
+
+extern int s5c73m3_dbg;
+
+int s5c73m3_register_spi_driver(struct s5c73m3 *state);
+void s5c73m3_unregister_spi_driver(struct s5c73m3 *state);
+int s5c73m3_spi_write(struct s5c73m3 *state, const void *addr,
+ const unsigned int len, const unsigned int tx_size);
+int s5c73m3_spi_read(struct s5c73m3 *state, void *addr,
+ const unsigned int len, const unsigned int tx_size);
+
+int s5c73m3_read(struct s5c73m3 *state, u32 addr, u16 *data);
+int s5c73m3_write(struct s5c73m3 *state, u32 addr, u16 data);
+int s5c73m3_isp_command(struct s5c73m3 *state, u16 command, u16 data);
+int s5c73m3_init_controls(struct s5c73m3 *state);
+
+static inline struct v4l2_subdev *ctrl_to_sensor_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct s5c73m3,
+ ctrls.handler)->sensor_sd;
+}
+
+static inline struct s5c73m3 *sensor_sd_to_s5c73m3(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct s5c73m3, sensor_sd);
+}
+
+static inline struct s5c73m3 *oif_sd_to_s5c73m3(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct s5c73m3, oif_sd);
+}
+#endif /* S5C73M3_H_ */
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 57cd4fa0193d..bdf5e3db31d1 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -1598,7 +1598,7 @@ static int s5k6aa_probe(struct i2c_client *client,
for (i = 0; i < S5K6AA_NUM_SUPPLIES; i++)
s5k6aa->supplies[i].supply = s5k6aa_supply_names[i];
- ret = regulator_bulk_get(&client->dev, S5K6AA_NUM_SUPPLIES,
+ ret = devm_regulator_bulk_get(&client->dev, S5K6AA_NUM_SUPPLIES,
s5k6aa->supplies);
if (ret) {
dev_err(&client->dev, "Failed to get regulators\n");
@@ -1607,7 +1607,7 @@ static int s5k6aa_probe(struct i2c_client *client,
ret = s5k6aa_initialize_ctrls(s5k6aa);
if (ret)
- goto out_err4;
+ goto out_err3;
s5k6aa_presets_data_init(s5k6aa);
@@ -1618,8 +1618,6 @@ static int s5k6aa_probe(struct i2c_client *client,
return 0;
-out_err4:
- regulator_bulk_free(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
out_err3:
s5k6aa_free_gpios(s5k6aa);
out_err2:
@@ -1635,7 +1633,6 @@ static int s5k6aa_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
- regulator_bulk_free(S5K6AA_NUM_SUPPLIES, s5k6aa->supplies);
s5k6aa_free_gpios(s5k6aa);
return 0;
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index f8534eec9de9..a2a5cbbdbe28 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -271,9 +271,9 @@ static int imx074_g_chip_ident(struct v4l2_subdev *sd,
static int imx074_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int imx074_g_mbus_config(struct v4l2_subdev *sd,
@@ -430,10 +430,9 @@ static int imx074_probe(struct i2c_client *client,
{
struct imx074 *priv;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- int ret;
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "IMX074: missing platform data!\n");
return -EINVAL;
}
@@ -444,7 +443,7 @@ static int imx074_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(struct imx074), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct imx074), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -452,23 +451,15 @@ static int imx074_probe(struct i2c_client *client,
priv->fmt = &imx074_colour_fmts[0];
- ret = imx074_video_probe(client);
- if (ret < 0) {
- kfree(priv);
- return ret;
- }
-
- return ret;
+ return imx074_video_probe(client);
}
static int imx074_remove(struct i2c_client *client)
{
- struct imx074 *priv = to_imx074(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (icl->free_bus)
- icl->free_bus(icl);
- kfree(priv);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index 19f8a07764f9..bcdc86175549 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -23,7 +23,7 @@
/*
* mt9m001 i2c address 0x5d
* The platform has to define struct i2c_board_info objects and link to them
- * from struct soc_camera_link
+ * from struct soc_camera_host_desc
*/
/* mt9m001 selected register addresses */
@@ -380,9 +380,9 @@ static int mt9m001_s_register(struct v4l2_subdev *sd,
static int mt9m001_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int mt9m001_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -482,7 +482,7 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
-static int mt9m001_video_probe(struct soc_camera_link *icl,
+static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd,
struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
@@ -526,8 +526,8 @@ static int mt9m001_video_probe(struct soc_camera_link *icl,
* The platform may support different bus widths due to
* different routing of the data lines.
*/
- if (icl->query_bus_param)
- flags = icl->query_bus_param(icl);
+ if (ssdd->query_bus_param)
+ flags = ssdd->query_bus_param(ssdd);
else
flags = SOCAM_DATAWIDTH_10;
@@ -558,10 +558,10 @@ done:
return ret;
}
-static void mt9m001_video_remove(struct soc_camera_link *icl)
+static void mt9m001_video_remove(struct soc_camera_subdev_desc *ssdd)
{
- if (icl->free_bus)
- icl->free_bus(icl);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
}
static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
@@ -605,14 +605,14 @@ static int mt9m001_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
/* MT9M001 has all capture_format parameters fixed */
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -621,12 +621,12 @@ static int mt9m001_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
const struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct mt9m001 *mt9m001 = to_mt9m001(client);
unsigned int bps = soc_mbus_get_fmtdesc(mt9m001->fmt->code)->bits_per_sample;
- if (icl->set_bus_param)
- return icl->set_bus_param(icl, 1 << (bps - 1));
+ if (ssdd->set_bus_param)
+ return ssdd->set_bus_param(ssdd, 1 << (bps - 1));
/*
* Without board specific bus width settings we only support the
@@ -663,10 +663,10 @@ static int mt9m001_probe(struct i2c_client *client,
{
struct mt9m001 *mt9m001;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "MT9M001 driver needs platform data\n");
return -EINVAL;
}
@@ -677,7 +677,7 @@ static int mt9m001_probe(struct i2c_client *client,
return -EIO;
}
- mt9m001 = kzalloc(sizeof(struct mt9m001), GFP_KERNEL);
+ mt9m001 = devm_kzalloc(&client->dev, sizeof(struct mt9m001), GFP_KERNEL);
if (!mt9m001)
return -ENOMEM;
@@ -697,12 +697,9 @@ static int mt9m001_probe(struct i2c_client *client,
&mt9m001_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
V4L2_EXPOSURE_AUTO);
mt9m001->subdev.ctrl_handler = &mt9m001->hdl;
- if (mt9m001->hdl.error) {
- int err = mt9m001->hdl.error;
+ if (mt9m001->hdl.error)
+ return mt9m001->hdl.error;
- kfree(mt9m001);
- return err;
- }
v4l2_ctrl_auto_cluster(2, &mt9m001->autoexposure,
V4L2_EXPOSURE_MANUAL, true);
@@ -713,11 +710,9 @@ static int mt9m001_probe(struct i2c_client *client,
mt9m001->rect.width = MT9M001_MAX_WIDTH;
mt9m001->rect.height = MT9M001_MAX_HEIGHT;
- ret = mt9m001_video_probe(icl, client);
- if (ret) {
+ ret = mt9m001_video_probe(ssdd, client);
+ if (ret)
v4l2_ctrl_handler_free(&mt9m001->hdl);
- kfree(mt9m001);
- }
return ret;
}
@@ -725,12 +720,11 @@ static int mt9m001_probe(struct i2c_client *client,
static int mt9m001_remove(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
v4l2_device_unregister_subdev(&mt9m001->subdev);
v4l2_ctrl_handler_free(&mt9m001->hdl);
- mt9m001_video_remove(icl);
- kfree(mt9m001);
+ mt9m001_video_remove(ssdd);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index 62fd94af599b..bbc4ff99603c 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -24,7 +24,8 @@
/*
* MT9M111, MT9M112 and MT9M131:
* i2c address is 0x48 or 0x5d (depending on SADDR pin)
- * The platform has to define i2c_board_info and call i2c_register_board_info()
+ * The platform has to define struct i2c_board_info objects and link to them
+ * from struct soc_camera_host_desc
*/
/*
@@ -799,17 +800,17 @@ static int mt9m111_init(struct mt9m111 *mt9m111)
static int mt9m111_power_on(struct mt9m111 *mt9m111)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
ret = mt9m111_resume(mt9m111);
if (ret < 0) {
dev_err(&client->dev, "Failed to resume the sensor: %d\n", ret);
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
return ret;
@@ -818,10 +819,10 @@ static int mt9m111_power_on(struct mt9m111 *mt9m111)
static void mt9m111_power_off(struct mt9m111 *mt9m111)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
mt9m111_suspend(mt9m111);
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
@@ -879,13 +880,13 @@ static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -956,10 +957,10 @@ static int mt9m111_probe(struct i2c_client *client,
{
struct mt9m111 *mt9m111;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "mt9m111: driver needs platform data\n");
return -EINVAL;
}
@@ -970,7 +971,7 @@ static int mt9m111_probe(struct i2c_client *client,
return -EIO;
}
- mt9m111 = kzalloc(sizeof(struct mt9m111), GFP_KERNEL);
+ mt9m111 = devm_kzalloc(&client->dev, sizeof(struct mt9m111), GFP_KERNEL);
if (!mt9m111)
return -ENOMEM;
@@ -988,12 +989,8 @@ static int mt9m111_probe(struct i2c_client *client,
&mt9m111_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
V4L2_EXPOSURE_AUTO);
mt9m111->subdev.ctrl_handler = &mt9m111->hdl;
- if (mt9m111->hdl.error) {
- int err = mt9m111->hdl.error;
-
- kfree(mt9m111);
- return err;
- }
+ if (mt9m111->hdl.error)
+ return mt9m111->hdl.error;
/* Second stage probe - when a capture adapter is there */
mt9m111->rect.left = MT9M111_MIN_DARK_COLS;
@@ -1005,10 +1002,8 @@ static int mt9m111_probe(struct i2c_client *client,
mutex_init(&mt9m111->power_lock);
ret = mt9m111_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&mt9m111->hdl);
- kfree(mt9m111);
- }
return ret;
}
@@ -1019,7 +1014,6 @@ static int mt9m111_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&mt9m111->subdev);
v4l2_ctrl_handler_free(&mt9m111->hdl);
- kfree(mt9m111);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 40800b10a080..d80d044ebf15 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -31,8 +31,8 @@
/*
* mt9t031 i2c address 0x5d
- * The platform has to define i2c_board_info and link to it from
- * struct soc_camera_link
+ * The platform has to define struct i2c_board_info objects and link to them
+ * from struct soc_camera_host_desc
*/
/* mt9t031 selected register addresses */
@@ -608,18 +608,18 @@ static struct device_type mt9t031_dev_type = {
static int mt9t031_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct video_device *vdev = soc_camera_i2c_to_vdev(client);
int ret;
if (on) {
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
vdev->dev.type = &mt9t031_dev_type;
} else {
vdev->dev.type = NULL;
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
return 0;
@@ -707,13 +707,13 @@ static int mt9t031_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -722,9 +722,9 @@ static int mt9t031_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (soc_camera_apply_board_flags(icl, cfg) &
+ if (soc_camera_apply_board_flags(ssdd, cfg) &
V4L2_MBUS_PCLK_SAMPLE_FALLING)
return reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
else
@@ -758,11 +758,11 @@ static int mt9t031_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9t031 *mt9t031;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "MT9T031 driver needs platform data\n");
return -EINVAL;
}
@@ -773,7 +773,7 @@ static int mt9t031_probe(struct i2c_client *client,
return -EIO;
}
- mt9t031 = kzalloc(sizeof(struct mt9t031), GFP_KERNEL);
+ mt9t031 = devm_kzalloc(&client->dev, sizeof(struct mt9t031), GFP_KERNEL);
if (!mt9t031)
return -ENOMEM;
@@ -797,12 +797,9 @@ static int mt9t031_probe(struct i2c_client *client,
V4L2_CID_EXPOSURE, 1, 255, 1, 255);
mt9t031->subdev.ctrl_handler = &mt9t031->hdl;
- if (mt9t031->hdl.error) {
- int err = mt9t031->hdl.error;
+ if (mt9t031->hdl.error)
+ return mt9t031->hdl.error;
- kfree(mt9t031);
- return err;
- }
v4l2_ctrl_auto_cluster(2, &mt9t031->autoexposure,
V4L2_EXPOSURE_MANUAL, true);
@@ -816,10 +813,8 @@ static int mt9t031_probe(struct i2c_client *client,
mt9t031->yskip = 1;
ret = mt9t031_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&mt9t031->hdl);
- kfree(mt9t031);
- }
return ret;
}
@@ -830,7 +825,6 @@ static int mt9t031_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&mt9t031->subdev);
v4l2_ctrl_handler_free(&mt9t031->hdl);
- kfree(mt9t031);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index de7cd836b0a2..188e29b03273 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -92,6 +92,7 @@ struct mt9t112_priv {
struct v4l2_rect frame;
const struct mt9t112_format *format;
int model;
+ int num_formats;
u32 flags;
/* for flags */
#define INIT_DONE (1 << 0)
@@ -779,9 +780,9 @@ static int mt9t112_s_register(struct v4l2_subdev *sd,
static int mt9t112_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
@@ -859,11 +860,11 @@ static int mt9t112_set_params(struct mt9t112_priv *priv,
/*
* get color format
*/
- for (i = 0; i < ARRAY_SIZE(mt9t112_cfmts); i++)
+ for (i = 0; i < priv->num_formats; i++)
if (mt9t112_cfmts[i].code == code)
break;
- if (i == ARRAY_SIZE(mt9t112_cfmts))
+ if (i == priv->num_formats)
return -EINVAL;
priv->frame = *rect;
@@ -955,14 +956,16 @@ static int mt9t112_s_fmt(struct v4l2_subdev *sd,
static int mt9t112_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
unsigned int top, left;
int i;
- for (i = 0; i < ARRAY_SIZE(mt9t112_cfmts); i++)
+ for (i = 0; i < priv->num_formats; i++)
if (mt9t112_cfmts[i].code == mf->code)
break;
- if (i == ARRAY_SIZE(mt9t112_cfmts)) {
+ if (i == priv->num_formats) {
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_JPEG;
} else {
@@ -979,7 +982,10 @@ static int mt9t112_try_fmt(struct v4l2_subdev *sd,
static int mt9t112_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
- if (index >= ARRAY_SIZE(mt9t112_cfmts))
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ if (index >= priv->num_formats)
return -EINVAL;
*code = mt9t112_cfmts[index].code;
@@ -991,13 +997,13 @@ static int mt9t112_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH |
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1006,10 +1012,10 @@ static int mt9t112_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct mt9t112_priv *priv = to_mt9t112(client);
- if (soc_camera_apply_board_flags(icl, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ if (soc_camera_apply_board_flags(ssdd, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING)
priv->flags |= PCLK_RISING;
return 0;
@@ -1056,10 +1062,12 @@ static int mt9t112_camera_probe(struct i2c_client *client)
case 0x2680:
devname = "mt9t111";
priv->model = V4L2_IDENT_MT9T111;
+ priv->num_formats = 1;
break;
case 0x2682:
devname = "mt9t112";
priv->model = V4L2_IDENT_MT9T112;
+ priv->num_formats = ARRAY_SIZE(mt9t112_cfmts);
break;
default:
dev_err(&client->dev, "Product ID error %04x\n", chipid);
@@ -1078,7 +1086,7 @@ static int mt9t112_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9t112_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct v4l2_rect rect = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
@@ -1087,24 +1095,22 @@ static int mt9t112_probe(struct i2c_client *client,
};
int ret;
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "mt9t112: missing platform data!\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->info = icl->priv;
+ priv->info = ssdd->drv_priv;
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
ret = mt9t112_camera_probe(client);
- if (ret) {
- kfree(priv);
+ if (ret)
return ret;
- }
/* Cannot fail: using the default supported pixel code */
mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
@@ -1114,9 +1120,6 @@ static int mt9t112_probe(struct i2c_client *client,
static int mt9t112_remove(struct i2c_client *client)
{
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index d40a8858be01..a5e65d6a0781 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -25,7 +25,7 @@
/*
* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
* The platform has to define struct i2c_board_info objects and link to them
- * from struct soc_camera_link
+ * from struct soc_camera_host_desc
*/
static char *sensor_type;
@@ -508,9 +508,9 @@ static int mt9v022_s_register(struct v4l2_subdev *sd,
static int mt9v022_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -655,7 +655,7 @@ static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl)
static int mt9v022_video_probe(struct i2c_client *client)
{
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
s32 data;
int ret;
unsigned long flags;
@@ -715,8 +715,8 @@ static int mt9v022_video_probe(struct i2c_client *client)
* The platform may support different bus widths due to
* different routing of the data lines.
*/
- if (icl->query_bus_param)
- flags = icl->query_bus_param(icl);
+ if (ssdd->query_bus_param)
+ flags = ssdd->query_bus_param(ssdd);
else
flags = SOCAM_DATAWIDTH_10;
@@ -784,7 +784,7 @@ static int mt9v022_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE |
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
@@ -792,7 +792,7 @@ static int mt9v022_g_mbus_config(struct v4l2_subdev *sd,
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -801,15 +801,15 @@ static int mt9v022_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
unsigned int bps = soc_mbus_get_fmtdesc(mt9v022->fmt->code)->bits_per_sample;
int ret;
u16 pixclk = 0;
- if (icl->set_bus_param) {
- ret = icl->set_bus_param(icl, 1 << (bps - 1));
+ if (ssdd->set_bus_param) {
+ ret = ssdd->set_bus_param(ssdd, 1 << (bps - 1));
if (ret)
return ret;
} else if (bps != 10) {
@@ -873,12 +873,12 @@ static int mt9v022_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9v022 *mt9v022;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct mt9v022_platform_data *pdata = icl->priv;
+ struct mt9v022_platform_data *pdata;
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "MT9V022 driver needs platform data\n");
return -EINVAL;
}
@@ -889,10 +889,11 @@ static int mt9v022_probe(struct i2c_client *client,
return -EIO;
}
- mt9v022 = kzalloc(sizeof(struct mt9v022), GFP_KERNEL);
+ mt9v022 = devm_kzalloc(&client->dev, sizeof(struct mt9v022), GFP_KERNEL);
if (!mt9v022)
return -ENOMEM;
+ pdata = ssdd->drv_priv;
v4l2_i2c_subdev_init(&mt9v022->subdev, client, &mt9v022_subdev_ops);
v4l2_ctrl_handler_init(&mt9v022->hdl, 6);
v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
@@ -929,7 +930,6 @@ static int mt9v022_probe(struct i2c_client *client,
int err = mt9v022->hdl.error;
dev_err(&client->dev, "control initialisation err %d\n", err);
- kfree(mt9v022);
return err;
}
v4l2_ctrl_auto_cluster(2, &mt9v022->autoexposure,
@@ -949,10 +949,8 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->rect.height = MT9V022_MAX_HEIGHT;
ret = mt9v022_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&mt9v022->hdl);
- kfree(mt9v022);
- }
return ret;
}
@@ -960,13 +958,12 @@ static int mt9v022_probe(struct i2c_client *client,
static int mt9v022_remove(struct i2c_client *client)
{
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
v4l2_device_unregister_subdev(&mt9v022->subdev);
- if (icl->free_bus)
- icl->free_bus(icl);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
v4l2_ctrl_handler_free(&mt9v022->hdl);
- kfree(mt9v022);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 66698a83bda2..0f520f693b6e 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -771,9 +771,9 @@ static int ov2640_s_register(struct v4l2_subdev *sd,
static int ov2640_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
/* Select the nearest higher resolution for capture */
@@ -1046,13 +1046,13 @@ static int ov2640_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1080,11 +1080,11 @@ static int ov2640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov2640_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&adapter->dev,
"OV2640: Missing platform_data for driver\n");
return -EINVAL;
@@ -1096,7 +1096,7 @@ static int ov2640_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(struct ov2640_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov2640_priv), GFP_KERNEL);
if (!priv) {
dev_err(&adapter->dev,
"Failed to allocate memory for private data!\n");
@@ -1110,20 +1110,14 @@ static int ov2640_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
-
- kfree(priv);
- return err;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov2640_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- } else {
+ else
dev_info(&adapter->dev, "OV2640 Probed\n");
- }
return ret;
}
@@ -1134,7 +1128,6 @@ static int ov2640_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index 8577e0cfb7fe..9d53309619d2 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -934,13 +934,13 @@ static int ov5642_g_mbus_config(struct v4l2_subdev *sd,
static int ov5642_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
if (!on)
- return soc_camera_power_off(&client->dev, icl);
+ return soc_camera_power_off(&client->dev, ssdd);
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
@@ -1020,15 +1020,14 @@ static int ov5642_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov5642 *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- int ret;
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "OV5642: missing platform data!\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(struct ov5642), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov5642), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1043,25 +1042,15 @@ static int ov5642_probe(struct i2c_client *client,
priv->total_width = OV5642_DEFAULT_WIDTH + BLANKING_EXTRA_WIDTH;
priv->total_height = BLANKING_MIN_HEIGHT;
- ret = ov5642_video_probe(client);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- kfree(priv);
- return ret;
+ return ov5642_video_probe(client);
}
static int ov5642_remove(struct i2c_client *client)
{
- struct ov5642 *priv = to_ov5642(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (icl->free_bus)
- icl->free_bus(icl);
- kfree(priv);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index e87feb0881e3..dbe4f564e6b2 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -435,9 +435,9 @@ static int ov6650_set_register(struct v4l2_subdev *sd,
static int ov6650_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int ov6650_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
@@ -892,7 +892,7 @@ static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER |
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
@@ -900,7 +900,7 @@ static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -910,8 +910,8 @@ static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
+ unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
int ret;
if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
@@ -963,15 +963,15 @@ static int ov6650_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov6650 *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&client->dev,
"Failed to allocate memory for private data!\n");
@@ -1009,12 +1009,9 @@ static int ov6650_probe(struct i2c_client *client,
V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
+ if (priv->hdl.error)
+ return priv->hdl.error;
- kfree(priv);
- return err;
- }
v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
v4l2_ctrl_auto_cluster(2, &priv->autoexposure,
@@ -1029,10 +1026,8 @@ static int ov6650_probe(struct i2c_client *client,
priv->colorspace = V4L2_COLORSPACE_JPEG;
ret = ov6650_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- }
return ret;
}
@@ -1043,7 +1038,6 @@ static int ov6650_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index e4a10751894d..fbeb5b2f3ae5 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -667,9 +667,9 @@ static int ov772x_s_register(struct v4l2_subdev *sd,
static int ov772x_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height)
@@ -1019,13 +1019,13 @@ static int ov772x_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1054,11 +1054,11 @@ static int ov772x_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov772x_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "OV772X: missing platform data!\n");
return -EINVAL;
}
@@ -1070,11 +1070,11 @@ static int ov772x_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->info = icl->priv;
+ priv->info = ssdd->drv_priv;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 3);
@@ -1085,22 +1085,15 @@ static int ov772x_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- ret = priv->hdl.error;
- goto done;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov772x_video_probe(priv);
- if (ret < 0)
- goto done;
-
- priv->cfmt = &ov772x_cfmts[0];
- priv->win = &ov772x_win_sizes[0];
-
-done:
- if (ret) {
+ if (ret < 0) {
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
+ } else {
+ priv->cfmt = &ov772x_cfmts[0];
+ priv->win = &ov772x_win_sizes[0];
}
return ret;
}
@@ -1111,7 +1104,6 @@ static int ov772x_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index b323684eaf77..05993041be31 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -336,9 +336,9 @@ static int ov9640_set_register(struct v4l2_subdev *sd,
static int ov9640_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
/* select nearest higher resolution for capture */
@@ -657,13 +657,13 @@ static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -690,15 +690,15 @@ static int ov9640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov9640_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(struct ov9640_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov9640_priv), GFP_KERNEL);
if (!priv) {
dev_err(&client->dev,
"Failed to allocate memory for private data!\n");
@@ -713,19 +713,13 @@ static int ov9640_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
-
- kfree(priv);
- return err;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov9640_video_probe(client);
- if (ret) {
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- }
return ret;
}
@@ -737,7 +731,6 @@ static int ov9640_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index 7a55889e397b..2f236da80165 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -787,12 +787,12 @@ static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
static int ov9740_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct ov9740_priv *priv = to_ov9740(sd);
int ret;
if (on) {
- ret = soc_camera_power_on(&client->dev, icl);
+ ret = soc_camera_power_on(&client->dev, ssdd);
if (ret < 0)
return ret;
@@ -806,7 +806,7 @@ static int ov9740_s_power(struct v4l2_subdev *sd, int on)
priv->current_enable = true;
}
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, ssdd);
}
return 0;
@@ -905,13 +905,13 @@ static int ov9740_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -951,15 +951,15 @@ static int ov9740_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov9740_priv *priv;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!icl) {
+ if (!ssdd) {
dev_err(&client->dev, "Missing platform_data for driver\n");
return -EINVAL;
}
- priv = kzalloc(sizeof(struct ov9740_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov9740_priv), GFP_KERNEL);
if (!priv) {
dev_err(&client->dev, "Failed to allocate private data!\n");
return -ENOMEM;
@@ -972,18 +972,12 @@ static int ov9740_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&priv->hdl, &ov9740_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- int err = priv->hdl.error;
-
- kfree(priv);
- return err;
- }
+ if (priv->hdl.error)
+ return priv->hdl.error;
ret = ov9740_video_probe(client);
- if (ret < 0) {
+ if (ret < 0)
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
- }
return ret;
}
@@ -994,7 +988,6 @@ static int ov9740_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
index 02f0400051d9..5c92679bfefb 100644
--- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c
+++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
@@ -1183,9 +1183,9 @@ static int rj54n1_s_register(struct v4l2_subdev *sd,
static int rj54n1_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -1245,14 +1245,14 @@ static int rj54n1_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags =
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -1261,10 +1261,10 @@ static int rj54n1_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
/* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
- if (soc_camera_apply_board_flags(icl, cfg) &
+ if (soc_camera_apply_board_flags(ssdd, cfg) &
V4L2_MBUS_PCLK_SAMPLE_RISING)
return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
else
@@ -1334,17 +1334,17 @@ static int rj54n1_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct rj54n1 *rj54n1;
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct rj54n1_pdata *rj54n1_priv;
int ret;
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
return -EINVAL;
}
- rj54n1_priv = icl->priv;
+ rj54n1_priv = ssdd->drv_priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_warn(&adapter->dev,
@@ -1352,7 +1352,7 @@ static int rj54n1_probe(struct i2c_client *client,
return -EIO;
}
- rj54n1 = kzalloc(sizeof(struct rj54n1), GFP_KERNEL);
+ rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL);
if (!rj54n1)
return -ENOMEM;
@@ -1367,12 +1367,8 @@ static int rj54n1_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
- if (rj54n1->hdl.error) {
- int err = rj54n1->hdl.error;
-
- kfree(rj54n1);
- return err;
- }
+ if (rj54n1->hdl.error)
+ return rj54n1->hdl.error;
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
@@ -1387,10 +1383,8 @@ static int rj54n1_probe(struct i2c_client *client,
(clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
ret = rj54n1_video_probe(client, rj54n1_priv);
- if (ret < 0) {
+ if (ret < 0)
v4l2_ctrl_handler_free(&rj54n1->hdl);
- kfree(rj54n1);
- }
return ret;
}
@@ -1398,13 +1392,12 @@ static int rj54n1_probe(struct i2c_client *client,
static int rj54n1_remove(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
v4l2_device_unregister_subdev(&rj54n1->subdev);
- if (icl->free_bus)
- icl->free_bus(icl);
+ if (ssdd->free_bus)
+ ssdd->free_bus(ssdd);
v4l2_ctrl_handler_free(&rj54n1->hdl);
- kfree(rj54n1);
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 140716e71a15..7d2074601881 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -569,9 +569,9 @@ static int tw9910_s_register(struct v4l2_subdev *sd,
static int tw9910_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, ssdd, on);
}
static int tw9910_set_frame(struct v4l2_subdev *sd, u32 *width, u32 *height)
@@ -847,14 +847,14 @@ static int tw9910_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(icl, cfg);
+ cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -863,9 +863,9 @@ static int tw9910_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
u8 val = VSSL_VVALID | HSSL_DVALID;
- unsigned long flags = soc_camera_apply_board_flags(icl, cfg);
+ unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
/*
* set OUTCTR1
@@ -911,15 +911,14 @@ static int tw9910_probe(struct i2c_client *client,
struct tw9910_video_info *info;
struct i2c_adapter *adapter =
to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
- int ret;
+ struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- if (!icl || !icl->priv) {
+ if (!ssdd || !ssdd->drv_priv) {
dev_err(&client->dev, "TW9910: missing platform data!\n");
return -EINVAL;
}
- info = icl->priv;
+ info = ssdd->drv_priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev,
@@ -928,7 +927,7 @@ static int tw9910_probe(struct i2c_client *client,
return -EIO;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -936,18 +935,11 @@ static int tw9910_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops);
- ret = tw9910_video_probe(client);
- if (ret)
- kfree(priv);
-
- return ret;
+ return tw9910_video_probe(client);
}
static int tw9910_remove(struct i2c_client *client)
{
- struct tw9910_priv *priv = to_tw9910(client);
-
- kfree(priv);
return 0;
}
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index c31cc04fffd2..e747524ba6ed 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -175,7 +175,7 @@ static int ths7303_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
+ sd = devm_kzalloc(&client->dev, sizeof(struct v4l2_subdev), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
@@ -189,7 +189,6 @@ static int ths7303_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- kfree(sd);
return 0;
}
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 3b24d3fc1866..e3b33b78dd21 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -39,6 +39,7 @@
#include <media/tvaudio.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/i2c-addr.h>
@@ -91,13 +92,13 @@ struct CHIPDESC {
audiocmd init;
/* which register has which value */
- int leftreg,rightreg,treblereg,bassreg;
+ int leftreg, rightreg, treblereg, bassreg;
- /* initialize with (defaults to 65535/65535/32768/32768 */
- int leftinit,rightinit,trebleinit,bassinit;
+ /* initialize with (defaults to 65535/32768/32768 */
+ int volinit, trebleinit, bassinit;
/* functions to convert the values (v4l -> chip) */
- getvalue volfunc,treblefunc,bassfunc;
+ getvalue volfunc, treblefunc, bassfunc;
/* get/set mode */
getrxsubchans getrxsubchans;
@@ -113,6 +114,12 @@ struct CHIPDESC {
/* current state of the chip */
struct CHIPSTATE {
struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* volume/balance cluster */
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *balance;
+ };
/* chip-specific description - should point to
an entry at CHIPDESC table */
@@ -122,7 +129,7 @@ struct CHIPSTATE {
audiocmd shadow;
/* current settings */
- __u16 left, right, treble, bass, muted;
+ u16 muted;
int prevmode;
int radio;
int input;
@@ -138,6 +145,11 @@ static inline struct CHIPSTATE *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct CHIPSTATE, sd);
}
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct CHIPSTATE, hdl)->sd;
+}
+
/* ---------------------------------------------------------------------- */
/* i2c I/O functions */
@@ -1523,8 +1535,7 @@ static struct CHIPDESC chiplist[] = {
.rightreg = TDA9875_MVR,
.bassreg = TDA9875_MBA,
.treblereg = TDA9875_MTR,
- .leftinit = 58880,
- .rightinit = 58880,
+ .volinit = 58880,
},
{
.name = "tda9850",
@@ -1618,7 +1629,8 @@ static struct CHIPDESC chiplist[] = {
.inputreg = -1,
.inputmap = { TEA6420_S_SA, TEA6420_S_SB, TEA6420_S_SC },
- .inputmute = TEA6300_S_GMU,
+ .inputmute = TEA6420_S_GMU,
+ .inputmask = 0x07,
},
{
.name = "tda8425",
@@ -1679,121 +1691,39 @@ static struct CHIPDESC chiplist[] = {
/* ---------------------------------------------------------------------- */
-static int tvaudio_g_ctrl(struct v4l2_subdev *sd,
- struct v4l2_control *ctrl)
+static int tvaudio_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_sd(ctrl);
struct CHIPSTATE *chip = to_state(sd);
struct CHIPDESC *desc = chip->desc;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- if (!(desc->flags & CHIP_HAS_INPUTSEL))
- break;
- ctrl->value=chip->muted;
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
- ctrl->value = max(chip->left,chip->right);
- return 0;
- case V4L2_CID_AUDIO_BALANCE:
- {
- int volume;
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
- volume = max(chip->left,chip->right);
- if (volume)
- ctrl->value=(32768*min(chip->left,chip->right))/volume;
- else
- ctrl->value=32768;
- return 0;
- }
- case V4L2_CID_AUDIO_BASS:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- ctrl->value = chip->bass;
- return 0;
- case V4L2_CID_AUDIO_TREBLE:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- ctrl->value = chip->treble;
- return 0;
- }
- return -EINVAL;
-}
-
-static int tvaudio_s_ctrl(struct v4l2_subdev *sd,
- struct v4l2_control *ctrl)
-{
- struct CHIPSTATE *chip = to_state(sd);
- struct CHIPDESC *desc = chip->desc;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (!(desc->flags & CHIP_HAS_INPUTSEL))
- break;
-
- if (ctrl->value < 0 || ctrl->value >= 2)
- return -ERANGE;
- chip->muted = ctrl->value;
+ chip->muted = ctrl->val;
if (chip->muted)
chip_write_masked(chip,desc->inputreg,desc->inputmute,desc->inputmask);
else
chip_write_masked(chip,desc->inputreg,
desc->inputmap[chip->input],desc->inputmask);
return 0;
- case V4L2_CID_AUDIO_VOLUME:
- {
- int volume,balance;
-
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
-
- volume = max(chip->left,chip->right);
- if (volume)
- balance=(32768*min(chip->left,chip->right))/volume;
- else
- balance=32768;
-
- volume=ctrl->value;
- chip->left = (min(65536 - balance,32768) * volume) / 32768;
- chip->right = (min(balance,volume *(__u16)32768)) / 32768;
-
- chip_write(chip,desc->leftreg,desc->volfunc(chip->left));
- chip_write(chip,desc->rightreg,desc->volfunc(chip->right));
-
- return 0;
- }
- case V4L2_CID_AUDIO_BALANCE:
- {
- int volume, balance;
-
- if (!(desc->flags & CHIP_HAS_VOLUME))
- break;
-
- volume = max(chip->left, chip->right);
- balance = ctrl->value;
- chip->left = (min(65536 - balance, 32768) * volume) / 32768;
- chip->right = (min(balance, volume * (__u16)32768)) / 32768;
+ case V4L2_CID_AUDIO_VOLUME: {
+ u32 volume, balance;
+ u32 left, right;
- chip_write(chip, desc->leftreg, desc->volfunc(chip->left));
- chip_write(chip, desc->rightreg, desc->volfunc(chip->right));
+ volume = chip->volume->val;
+ balance = chip->balance->val;
+ left = (min(65536U - balance, 32768U) * volume) / 32768U;
+ right = (min(balance, 32768U) * volume) / 32768U;
+ chip_write(chip, desc->leftreg, desc->volfunc(left));
+ chip_write(chip, desc->rightreg, desc->volfunc(right));
return 0;
}
case V4L2_CID_AUDIO_BASS:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- chip->bass = ctrl->value;
- chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass));
-
+ chip_write(chip, desc->bassreg, desc->bassfunc(ctrl->val));
return 0;
case V4L2_CID_AUDIO_TREBLE:
- if (!(desc->flags & CHIP_HAS_BASSTREBLE))
- break;
- chip->treble = ctrl->value;
- chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble));
-
+ chip_write(chip, desc->treblereg, desc->treblefunc(ctrl->val));
return 0;
}
return -EINVAL;
@@ -1812,35 +1742,6 @@ static int tvaudio_s_radio(struct v4l2_subdev *sd)
return 0;
}
-static int tvaudio_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- struct CHIPSTATE *chip = to_state(sd);
- struct CHIPDESC *desc = chip->desc;
-
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (desc->flags & CHIP_HAS_INPUTSEL)
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- if (desc->flags & CHIP_HAS_VOLUME)
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
- break;
- case V4L2_CID_AUDIO_BALANCE:
- if (desc->flags & CHIP_HAS_VOLUME)
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- break;
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- if (desc->flags & CHIP_HAS_BASSTREBLE)
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- break;
- default:
- break;
- }
- return -EINVAL;
-}
-
static int tvaudio_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
@@ -1944,13 +1845,32 @@ static int tvaudio_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ide
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TVAUDIO, 0);
}
+static int tvaudio_log_status(struct v4l2_subdev *sd)
+{
+ struct CHIPSTATE *chip = to_state(sd);
+ struct CHIPDESC *desc = chip->desc;
+
+ v4l2_info(sd, "Chip: %s\n", desc->name);
+ v4l2_ctrl_handler_log_status(&chip->hdl, sd->name);
+ return 0;
+}
+
/* ----------------------------------------------------------------------- */
+static const struct v4l2_ctrl_ops tvaudio_ctrl_ops = {
+ .s_ctrl = tvaudio_s_ctrl,
+};
+
static const struct v4l2_subdev_core_ops tvaudio_core_ops = {
+ .log_status = tvaudio_log_status,
.g_chip_ident = tvaudio_g_chip_ident,
- .queryctrl = tvaudio_queryctrl,
- .g_ctrl = tvaudio_g_ctrl,
- .s_ctrl = tvaudio_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
.s_std = tvaudio_s_std,
};
@@ -2035,6 +1955,10 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
else
chip_cmd(chip, "init", &desc->init);
+ v4l2_ctrl_handler_init(&chip->hdl, 5);
+ if (desc->flags & CHIP_HAS_INPUTSEL)
+ v4l2_ctrl_new_std(&chip->hdl, &tvaudio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
if (desc->flags & CHIP_HAS_VOLUME) {
if (!desc->volfunc) {
/* This shouldn't be happen. Warn user, but keep working
@@ -2043,12 +1967,14 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_info(sd, "volume callback undefined!\n");
desc->flags &= ~CHIP_HAS_VOLUME;
} else {
- chip->left = desc->leftinit ? desc->leftinit : 65535;
- chip->right = desc->rightinit ? desc->rightinit : 65535;
- chip_write(chip, desc->leftreg,
- desc->volfunc(chip->left));
- chip_write(chip, desc->rightreg,
- desc->volfunc(chip->right));
+ chip->volume = v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
+ 0, 65535, 65535 / 100,
+ desc->volinit ? desc->volinit : 65535);
+ chip->balance = v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_BALANCE,
+ 0, 65535, 65535 / 100, 32768);
+ v4l2_ctrl_cluster(2, &chip->volume);
}
}
if (desc->flags & CHIP_HAS_BASSTREBLE) {
@@ -2059,17 +1985,28 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_info(sd, "bass/treble callbacks undefined!\n");
desc->flags &= ~CHIP_HAS_BASSTREBLE;
} else {
- chip->treble = desc->trebleinit ?
- desc->trebleinit : 32768;
- chip->bass = desc->bassinit ?
- desc->bassinit : 32768;
- chip_write(chip, desc->bassreg,
- desc->bassfunc(chip->bass));
- chip_write(chip, desc->treblereg,
- desc->treblefunc(chip->treble));
+ v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_BASS,
+ 0, 65535, 65535 / 100,
+ desc->bassinit ? desc->bassinit : 32768);
+ v4l2_ctrl_new_std(&chip->hdl,
+ &tvaudio_ctrl_ops, V4L2_CID_AUDIO_TREBLE,
+ 0, 65535, 65535 / 100,
+ desc->trebleinit ? desc->trebleinit : 32768);
}
}
+ sd->ctrl_handler = &chip->hdl;
+ if (chip->hdl.error) {
+ int err = chip->hdl.error;
+
+ v4l2_ctrl_handler_free(&chip->hdl);
+ kfree(chip);
+ return err;
+ }
+ /* set controls to the default values */
+ v4l2_ctrl_handler_setup(&chip->hdl);
+
chip->thread = NULL;
init_timer(&chip->wt);
if (desc->flags & CHIP_NEED_CHECKMODE) {
@@ -2105,6 +2042,7 @@ static int tvaudio_remove(struct i2c_client *client)
}
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&chip->hdl);
kfree(chip);
return 0;
}
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index d5e10215a28f..aa94ebc2d755 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -951,7 +951,7 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENODEV;
}
- decoder = kzalloc(sizeof(*decoder), GFP_KERNEL);
+ decoder = devm_kzalloc(&client->dev, sizeof(*decoder), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
@@ -998,7 +998,6 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
int err = decoder->hdl.error;
v4l2_ctrl_handler_free(&decoder->hdl);
- kfree(decoder);
return err;
}
v4l2_ctrl_handler_setup(&decoder->hdl);
@@ -1023,7 +1022,6 @@ static int tvp514x_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
- kfree(decoder);
return 0;
}
/* TVP5146 Init/Power on Sequence */
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 31104a960652..5967e1a0c809 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1096,13 +1096,6 @@ static const struct v4l2_ctrl_ops tvp5150_ctrl_ops = {
static const struct v4l2_subdev_core_ops tvp5150_core_ops = {
.log_status = tvp5150_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
.s_std = tvp5150_s_std,
.reset = tvp5150_reset,
.g_chip_ident = tvp5150_g_chip_ident,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index fb6a5b57eb83..537f6b4d4918 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1036,7 +1036,7 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
return -ENODEV;
}
- device = kzalloc(sizeof(struct tvp7002), GFP_KERNEL);
+ device = devm_kzalloc(&c->dev, sizeof(struct tvp7002), GFP_KERNEL);
if (!device)
return -ENOMEM;
@@ -1052,7 +1052,7 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
error = tvp7002_read(sd, TVP7002_CHIP_REV, &revision);
if (error < 0)
- goto found_error;
+ return error;
/* Get revision number */
v4l2_info(sd, "Rev. %02x detected.\n", revision);
@@ -1063,21 +1063,21 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
error = tvp7002_write_inittab(sd, tvp7002_init_default);
if (error < 0)
- goto found_error;
+ return error;
/* Set polarity information after registers have been set */
polarity_a = 0x20 | device->pdata->hs_polarity << 5
| device->pdata->vs_polarity << 2;
error = tvp7002_write(sd, TVP7002_SYNC_CTL_1, polarity_a);
if (error < 0)
- goto found_error;
+ return error;
polarity_b = 0x01 | device->pdata->fid_polarity << 2
| device->pdata->sog_polarity << 1
| device->pdata->clk_polarity;
error = tvp7002_write(sd, TVP7002_MISC_CTL_3, polarity_b);
if (error < 0)
- goto found_error;
+ return error;
/* Set registers according to default video mode */
preset.preset = device->current_preset->preset;
@@ -1091,16 +1091,11 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
int err = device->hdl.error;
v4l2_ctrl_handler_free(&device->hdl);
- kfree(device);
return err;
}
v4l2_ctrl_handler_setup(&device->hdl);
-found_error:
- if (error < 0)
- kfree(device);
-
- return error;
+ return 0;
}
/*
@@ -1120,7 +1115,6 @@ static int tvp7002_remove(struct i2c_client *c)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&device->hdl);
- kfree(device);
return 0;
}
diff --git a/drivers/media/parport/Kconfig b/drivers/media/parport/Kconfig
index ece13dcff07d..948c981d9f05 100644
--- a/drivers/media/parport/Kconfig
+++ b/drivers/media/parport/Kconfig
@@ -9,6 +9,7 @@ if MEDIA_PARPORT_SUPPORT
config VIDEO_BWQCAM
tristate "Quickcam BW Video For Linux"
depends on PARPORT && VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
help
Say Y have if you the black and white version of the QuickCam
camera. See the next option for the color version.
diff --git a/drivers/media/parport/bw-qcam.c b/drivers/media/parport/bw-qcam.c
index 5b75a64b199b..06231b85e1a9 100644
--- a/drivers/media/parport/bw-qcam.c
+++ b/drivers/media/parport/bw-qcam.c
@@ -80,6 +80,7 @@ OTHER DEALINGS IN THE SOFTWARE.
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
/* One from column A... */
#define QC_NOTSET 0
@@ -107,9 +108,11 @@ struct qcam {
struct v4l2_device v4l2_dev;
struct video_device vdev;
struct v4l2_ctrl_handler hdl;
+ struct vb2_queue vb_vidq;
struct pardevice *pdev;
struct parport *pport;
struct mutex lock;
+ struct mutex queue_lock;
int width, height;
int bpp;
int mode;
@@ -418,8 +421,6 @@ static void qc_set(struct qcam *q)
int val;
int val2;
- qc_reset(q);
-
/* Set the brightness. Yes, this is repetitive, but it works.
* Shorter versions seem to fail subtly. Feel free to try :-). */
/* I think the problem was in qc_command, not here -- bls */
@@ -558,7 +559,7 @@ static inline int qc_readbytes(struct qcam *q, char buffer[])
* n=2^(bit depth)-1. Ask me for more details if you don't understand
* this. */
-static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
+static long qc_capture(struct qcam *q, u8 *buf, unsigned long len)
{
int i, j, k, yield;
int bytes;
@@ -609,7 +610,7 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
if (o < len) {
u8 ch = invert - buffer[k];
got++;
- put_user(ch << shift, buf + o);
+ buf[o] = ch << shift;
}
}
pixels_read += bytes;
@@ -639,6 +640,67 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
return len;
}
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct qcam *dev = vb2_get_drv_priv(vq);
+
+ if (0 == *nbuffers)
+ *nbuffers = 3;
+ *nplanes = 1;
+ mutex_lock(&dev->lock);
+ if (fmt)
+ sizes[0] = fmt->fmt.pix.width * fmt->fmt.pix.height;
+ else
+ sizes[0] = (dev->width / dev->transfer_scale) *
+ (dev->height / dev->transfer_scale);
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+}
+
+static int buffer_finish(struct vb2_buffer *vb)
+{
+ struct qcam *qcam = vb2_get_drv_priv(vb->vb2_queue);
+ void *vbuf = vb2_plane_vaddr(vb, 0);
+ int size = vb->vb2_queue->plane_sizes[0];
+ int len;
+
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+
+ qc_reset(qcam);
+
+ /* Update the camera parameters if we need to */
+ if (qcam->status & QC_PARAM_CHANGE)
+ qc_set(qcam);
+
+ len = qc_capture(qcam, vbuf, size);
+
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ if (len != size)
+ vb->state = VB2_BUF_STATE_ERROR;
+ vb2_set_plane_payload(vb, 0, len);
+ return 0;
+}
+
+static struct vb2_ops qcam_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .buf_finish = buffer_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
/*
* Video4linux interfacing
*/
@@ -651,7 +713,8 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
- vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -693,6 +756,7 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pix->sizeimage = pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->priv = 0;
return 0;
}
@@ -718,6 +782,7 @@ static int qcam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format
pix->sizeimage = pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->priv = 0;
return 0;
}
@@ -729,6 +794,8 @@ static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
if (ret)
return ret;
+ if (vb2_is_busy(&qcam->vb_vidq))
+ return -EBUSY;
qcam->width = 320;
qcam->height = 240;
if (pix->height == 60)
@@ -742,12 +809,10 @@ static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
else
qcam->bpp = 4;
- mutex_lock(&qcam->lock);
qc_setscanmode(qcam);
/* We must update the camera before we grab. We could
just have changed the grab size */
qcam->status |= QC_PARAM_CHANGE;
- mutex_unlock(&qcam->lock);
return 0;
}
@@ -792,41 +857,12 @@ static int qcam_enum_framesizes(struct file *file, void *fh,
return 0;
}
-static ssize_t qcam_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qcam *qcam = video_drvdata(file);
- int len;
- parport_claim_or_block(qcam->pdev);
-
- mutex_lock(&qcam->lock);
-
- qc_reset(qcam);
-
- /* Update the camera parameters if we need to */
- if (qcam->status & QC_PARAM_CHANGE)
- qc_set(qcam);
-
- len = qc_capture(qcam, buf, count);
-
- mutex_unlock(&qcam->lock);
-
- parport_release(qcam->pdev);
- return len;
-}
-
-static unsigned int qcam_poll(struct file *filp, poll_table *wait)
-{
- return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
-}
-
static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct qcam *qcam =
container_of(ctrl->handler, struct qcam, hdl);
int ret = 0;
- mutex_lock(&qcam->lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
qcam->brightness = ctrl->val;
@@ -841,21 +877,19 @@ static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
ret = -EINVAL;
break;
}
- if (ret == 0) {
- qc_setscanmode(qcam);
+ if (ret == 0)
qcam->status |= QC_PARAM_CHANGE;
- }
- mutex_unlock(&qcam->lock);
return ret;
}
static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
- .release = v4l2_fh_release,
- .poll = qcam_poll,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .read = qcam_read,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
@@ -868,6 +902,14 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -884,6 +926,8 @@ static struct qcam *qcam_init(struct parport *port)
{
struct qcam *qcam;
struct v4l2_device *v4l2_dev;
+ struct vb2_queue *q;
+ int err;
qcam = kzalloc(sizeof(struct qcam), GFP_KERNEL);
if (qcam == NULL)
@@ -907,31 +951,45 @@ static struct qcam *qcam_init(struct parport *port)
V4L2_CID_GAMMA, 0, 255, 1, 105);
if (qcam->hdl.error) {
v4l2_err(v4l2_dev, "couldn't register controls\n");
- v4l2_ctrl_handler_free(&qcam->hdl);
- kfree(qcam);
- return NULL;
+ goto exit;
+ }
+
+ mutex_init(&qcam->lock);
+ mutex_init(&qcam->queue_lock);
+
+ /* initialize queue */
+ q = &qcam->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ q->drv_priv = qcam;
+ q->ops = &qcam_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ err = vb2_queue_init(q);
+ if (err < 0) {
+ v4l2_err(v4l2_dev, "couldn't init vb2_queue for %s.\n", port->name);
+ goto exit;
}
+ qcam->vdev.queue = q;
+ qcam->vdev.queue->lock = &qcam->queue_lock;
+
qcam->pport = port;
qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
NULL, 0, NULL);
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
- v4l2_ctrl_handler_free(&qcam->hdl);
- kfree(qcam);
- return NULL;
+ goto exit;
}
strlcpy(qcam->vdev.name, "Connectix QuickCam", sizeof(qcam->vdev.name));
qcam->vdev.v4l2_dev = v4l2_dev;
qcam->vdev.ctrl_handler = &qcam->hdl;
qcam->vdev.fops = &qcam_fops;
+ qcam->vdev.lock = &qcam->lock;
qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
set_bit(V4L2_FL_USE_FH_PRIO, &qcam->vdev.flags);
qcam->vdev.release = video_device_release_empty;
video_set_drvdata(&qcam->vdev, qcam);
- mutex_init(&qcam->lock);
-
qcam->port_mode = (QC_ANY | QC_NOTSET);
qcam->width = 320;
qcam->height = 240;
@@ -945,6 +1003,11 @@ static struct qcam *qcam_init(struct parport *port)
qcam->mode = -1;
qcam->status = QC_PARAM_CHANGE;
return qcam;
+
+exit:
+ v4l2_ctrl_handler_free(&qcam->hdl);
+ kfree(qcam);
+ return NULL;
}
static int qc_calibrate(struct qcam *q)
diff --git a/drivers/media/pci/bt8xx/Makefile b/drivers/media/pci/bt8xx/Makefile
index 5f06597c6a6e..f9fe7c4e7d53 100644
--- a/drivers/media/pci/bt8xx/Makefile
+++ b/drivers/media/pci/bt8xx/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 45e5d0661b60..ccd18e4ee789 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3835,7 +3835,7 @@ bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup,
{
struct timeval ts;
- do_gettimeofday(&ts);
+ v4l2_get_timestamp(&ts);
if (wakeup->top == wakeup->bottom) {
if (NULL != wakeup->top && curr->top != wakeup->top) {
@@ -3878,7 +3878,7 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
if (NULL == wakeup)
return;
- do_gettimeofday(&ts);
+ v4l2_get_timestamp(&ts);
wakeup->vb.ts = ts;
wakeup->vb.field_count = btv->field_count;
wakeup->vb.state = state;
@@ -3949,7 +3949,7 @@ bttv_irq_wakeup_top(struct bttv *btv)
btv->curr.top = NULL;
bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0);
- do_gettimeofday(&wakeup->vb.ts);
+ v4l2_get_timestamp(&wakeup->vb.ts);
wakeup->vb.field_count = btv->field_count;
wakeup->vb.state = VIDEOBUF_DONE;
wake_up(&wakeup->vb.done);
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index 5039b8826e0a..c63c643ed1f8 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -173,7 +173,7 @@ bttv_i2c_sendbytes(struct bttv *btv, const struct i2c_msg *msg, int last)
if (i2c_debug)
pr_cont(" %02x", msg->buf[cnt]);
}
- if (!(xmit & BT878_I2C_NOSTOP))
+ if (i2c_debug && !(xmit & BT878_I2C_NOSTOP))
pr_cont(">\n");
return msg->len;
@@ -366,8 +366,7 @@ int init_bttv_i2c(struct bttv *btv)
strlcpy(btv->c.i2c_adap.name, "bttv",
sizeof(btv->c.i2c_adap.name));
- memcpy(&btv->i2c_algo, &bttv_i2c_algo_bit_template,
- sizeof(bttv_i2c_algo_bit_template));
+ btv->i2c_algo = bttv_i2c_algo_bit_template;
btv->i2c_algo.udelay = i2c_udelay;
btv->i2c_algo.data = btv;
btv->c.i2c_adap.algo_data = &btv->i2c_algo;
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 7d96fab7d246..0e788fca992c 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -180,11 +180,11 @@ static int ca_get_app_info(struct dst_state *state)
put_command_and_length(&state->messages[0], CA_APP_INFO, length);
// Copy application_type, application_manufacturer and manufacturer_code
- memcpy(&state->messages[4], &state->messages[7], 5);
+ memmove(&state->messages[4], &state->messages[7], 5);
// Set string length and copy string
state->messages[9] = str_length;
- memcpy(&state->messages[10], &state->messages[12], str_length);
+ memmove(&state->messages[10], &state->messages[12], str_length);
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 8e971ff60588..b2c8c3439fea 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -197,7 +197,7 @@ err_exit:
return ret;
}
-static int __init cx18_alsa_load(struct cx18 *cx)
+static int cx18_alsa_load(struct cx18 *cx)
{
struct v4l2_device *v4l2_dev = &cx->v4l2_dev;
struct cx18_stream *s;
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.h b/drivers/media/pci/cx18/cx18-alsa-pcm.h
index d26e51f94577..e2b2c5b01215 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.h
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.h
@@ -20,7 +20,7 @@
* 02111-1307 USA
*/
-int __init snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
+int snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
/* Used by cx18-mailbox to announce the PCM data to the module */
void cx18_alsa_announce_pcm_data(struct snd_cx18_card *card, u8 *pcm_data,
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 4908eb7bcf6c..4af8cd6df95d 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -116,9 +116,6 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
- if (idx >= ARRAY_SIZE(hw_addrs))
- return -1;
-
if (hw == CX18_HW_TUNER) {
/* special tuner group handling */
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
@@ -240,15 +237,13 @@ int init_cx18_i2c(struct cx18 *cx)
for (i = 0; i < 2; i++) {
/* Setup algorithm for adapter */
- memcpy(&cx->i2c_algo[i], &cx18_i2c_algo_template,
- sizeof(struct i2c_algo_bit_data));
+ cx->i2c_algo[i] = cx18_i2c_algo_template;
cx->i2c_algo_cb_data[i].cx = cx;
cx->i2c_algo_cb_data[i].bus_index = i;
cx->i2c_algo[i].data = &cx->i2c_algo_cb_data[i];
/* Setup adapter */
- memcpy(&cx->i2c_adap[i], &cx18_i2c_adap_template,
- sizeof(struct i2c_adapter));
+ cx->i2c_adap[i] = cx18_i2c_adap_template;
cx->i2c_adap[i].algo_data = &cx->i2c_algo[i];
sprintf(cx->i2c_adap[i].name + strlen(cx->i2c_adap[i].name),
" #%d-%d", cx->instance, i);
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index 6d3121ff45a2..add99642f1e2 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -84,7 +84,7 @@ static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp)
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
- memcpy(dst + sd + 4, dst + sd + 12, line * 43);
+ memmove(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index eafa1144b17d..b3688aa8acc3 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -25,7 +25,10 @@ config VIDEO_CX23885
select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10071 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_A8293 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2063 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2131 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/cx23885/Makefile b/drivers/media/pci/cx23885/Makefile
index a2cbdcf15a8c..2a2cafb8cf5b 100644
--- a/drivers/media/pci/cx23885/Makefile
+++ b/drivers/media/pci/cx23885/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
obj-$(CONFIG_MEDIA_ALTERA_CI) += altera-ci.o
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 6277e145f0b8..7e923f8dd2f5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -572,6 +572,39 @@ struct cx23885_board cx23885_boards[] = {
[CX23885_BOARD_PROF_8000] = {
.name = "Prof Revolution DVB-S2 8000",
.portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR4400] = {
+ .name = "Hauppauge WinTV-HVR4400",
+ .portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_AVERMEDIA_HC81R] = {
+ .name = "AVerTV Hybrid Express Slim HC81R",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61, /* 0xc2 >> 1 */
+ .tuner_bus = 1,
+ .porta = CX23885_ANALOG_VIDEO,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN2_CH1 |
+ CX25840_VIN5_CH2 |
+ CX25840_NONE0_CH3 |
+ CX25840_NONE1_CH3,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN8_CH1 |
+ CX25840_NONE_CH2 |
+ CX25840_VIN7_CH3 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO6,
+ }, {
+ .type = CX23885_VMUX_COMPONENT,
+ .vmux = CX25840_VIN1_CH1 |
+ CX25840_NONE_CH2 |
+ CX25840_NONE0_CH3 |
+ CX25840_NONE1_CH3,
+ .amux = CX25840_AUDIO6,
+ } },
}
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -788,6 +821,26 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x8000,
.subdevice = 0x3034,
.card = CX23885_BOARD_PROF_8000,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc108,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc138,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc12a,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0xc1f8,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ }, {
+ .subvendor = 0x1461,
+ .subdevice = 0xd939,
+ .card = CX23885_BOARD_AVERMEDIA_HC81R,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1012,6 +1065,10 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
altera_ci_tuner_reset(dev, port->nr);
break;
+ case CX23885_BOARD_AVERMEDIA_HC81R:
+ /* XC3028L Reset Command */
+ bitmask = 1 << 2;
+ break;
}
if (bitmask) {
@@ -1301,6 +1358,42 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* enable irq */
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ /* GPIO-8 tda10071 demod reset */
+
+ /* Put the parts into reset and back */
+ cx23885_gpio_enable(dev, GPIO_8, 1);
+ cx23885_gpio_clear(dev, GPIO_8);
+ mdelay(100);
+ cx23885_gpio_set(dev, GPIO_8);
+ mdelay(100);
+ break;
+ case CX23885_BOARD_AVERMEDIA_HC81R:
+ cx_clear(MC417_CTL, 1);
+ /* GPIO-0,1,2 setup direction as output */
+ cx_set(GP0_IO, 0x00070000);
+ mdelay(10);
+ /* AF9013 demod reset */
+ cx_set(GP0_IO, 0x00010001);
+ mdelay(10);
+ cx_clear(GP0_IO, 0x00010001);
+ mdelay(10);
+ cx_set(GP0_IO, 0x00010001);
+ mdelay(10);
+ /* demod tune? */
+ cx_clear(GP0_IO, 0x00030003);
+ mdelay(10);
+ cx_set(GP0_IO, 0x00020002);
+ mdelay(10);
+ cx_set(GP0_IO, 0x00010001);
+ mdelay(10);
+ cx_clear(GP0_IO, 0x00020002);
+ /* XC3028L tuner reset */
+ cx_set(GP0_IO, 0x00040004);
+ cx_clear(GP0_IO, 0x00040004);
+ cx_set(GP0_IO, 0x00040004);
+ mdelay(60);
+ break;
}
}
@@ -1378,6 +1471,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_MYGICA_X8507:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
@@ -1420,6 +1514,7 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
/* sd_ir is a duplicate pointer to the AV Core, just clear it */
dev->sd_ir = NULL;
@@ -1464,6 +1559,7 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
@@ -1509,12 +1605,24 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
}
switch (dev->board) {
+ case CX23885_BOARD_AVERMEDIA_HC81R:
+ /* Defaults for VID B */
+ ts1->gen_ctrl_val = 0x4; /* Parallel */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ /* Defaults for VID C */
+ /* DREQ_POL, SMODE, PUNC_CLK, MCLK_POL Serial bus + punc clk */
+ ts2->gen_ctrl_val = 0x10e;
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
@@ -1581,6 +1689,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -1636,6 +1749,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_MPX885:
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
+ case CX23885_BOARD_AVERMEDIA_HC81R:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index f0416a668b4c..268654ac9a9f 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -439,7 +439,7 @@ void cx23885_wakeup(struct cx23885_tsport *port,
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 2f5b902e63ae..9c5ed10b2c5e 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -57,6 +57,7 @@
#include "netup-init.h"
#include "lgdt3305.h"
#include "atbm8830.h"
+#include "ts2020.h"
#include "ds3000.h"
#include "cx23885-f300.h"
#include "altera-ci.h"
@@ -66,6 +67,8 @@
#include "stv090x.h"
#include "stb6100.h"
#include "stb6100_cfg.h"
+#include "tda10071.h"
+#include "a8293.h"
static unsigned int debug;
@@ -469,6 +472,11 @@ static struct ds3000_config tevii_ds3000_config = {
.demod_address = 0x68,
};
+static struct ts2020_config tevii_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
static struct cx24116_config dvbworld_cx24116_config = {
.demod_address = 0x05,
};
@@ -493,20 +501,20 @@ static struct xc5000_config mygica_x8506_xc5000_config = {
};
static struct stv090x_config prof_8000_stv090x_config = {
- .device = STV0903,
- .demod_mode = STV090x_SINGLE,
- .clk_mode = STV090x_CLK_EXT,
- .xtal = 27000000,
- .address = 0x6A,
- .ts1_mode = STV090x_TSMODE_PARALLEL_PUNCTURED,
- .repeater_level = STV090x_RPTLEVEL_64,
- .adc1_range = STV090x_ADC_2Vpp,
- .diseqc_envelope_mode = false,
-
- .tuner_get_frequency = stb6100_get_frequency,
- .tuner_set_frequency = stb6100_set_frequency,
- .tuner_set_bandwidth = stb6100_set_bandwidth,
- .tuner_get_bandwidth = stb6100_get_bandwidth,
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+ .clk_mode = STV090x_CLK_EXT,
+ .xtal = 27000000,
+ .address = 0x6A,
+ .ts1_mode = STV090x_TSMODE_PARALLEL_PUNCTURED,
+ .repeater_level = STV090x_RPTLEVEL_64,
+ .adc1_range = STV090x_ADC_2Vpp,
+ .diseqc_envelope_mode = false,
+
+ .tuner_get_frequency = stb6100_get_frequency,
+ .tuner_set_frequency = stb6100_set_frequency,
+ .tuner_set_bandwidth = stb6100_set_bandwidth,
+ .tuner_get_bandwidth = stb6100_get_bandwidth,
};
static struct stb6100_config prof_8000_stb6100_config = {
@@ -659,6 +667,20 @@ static struct mt2063_config terratec_mt2063_config[] = {
},
};
+static const struct tda10071_config hauppauge_tda10071_config = {
+ .demod_i2c_addr = 0x05,
+ .tuner_i2c_addr = 0x54,
+ .i2c_wr_max = 64,
+ .ts_mode = TDA10071_TS_SERIAL,
+ .spec_inv = 0,
+ .xtal = 40444000, /* 40.444 MHz */
+ .pll_multiplier = 20,
+};
+
+static const struct a8293_config hauppauge_a8293_config = {
+ .i2c_addr = 0x0b,
+};
+
static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
@@ -1011,8 +1033,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(ts2020_attach, fe0->dvb.frontend,
+ &tevii_ts2020_config, &i2c_bus->i2c_adap);
fe0->dvb.frontend->ops.set_voltage = f300_set_voltage;
+ }
break;
case CX23885_BOARD_DVBWORLD_2005:
@@ -1242,6 +1267,17 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend->ops.set_voltage = p8000_set_voltage;
}
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(tda10071_attach,
+ &hauppauge_tda10071_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_a8293_config);
+ }
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 4f1055a194b5..7875dfbe09ff 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -89,6 +89,7 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
/*
* The only boards we handle right now. However other boards
* using the CX2388x integrated IR controller should be similar
@@ -140,6 +141,7 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
+ case CX23885_BOARD_MYGICA_X8507:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
@@ -289,6 +291,13 @@ int cx23885_input_init(struct cx23885_dev *dev)
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
+ case CX23885_BOARD_MYGICA_X8507:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = RC_BIT_ALL;
+ /* A guess at the remote */
+ rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 1a21926ca412..5991bc8dc158 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -300,7 +300,7 @@ void cx23885_video_wakeup(struct cx23885_dev *dev,
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
@@ -509,7 +509,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
- (dev->board == CX23885_BOARD_MYGICA_X8507)) {
+ (dev->board == CX23885_BOARD_MYGICA_X8507) ||
+ (dev->board == CX23885_BOARD_AVERMEDIA_HC81R)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
INPUT(input)->amux, 0, 0);
@@ -1818,8 +1819,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
spin_lock_init(&dev->slock);
/* Initialize VBI template */
- memcpy(&cx23885_vbi_template, &cx23885_video_template,
- sizeof(cx23885_vbi_template));
+ cx23885_vbi_template = cx23885_video_template;
strcpy(cx23885_vbi_template.name, "cx23885-vbi");
dev->tvnorm = cx23885_video_template.current_norm;
@@ -1878,6 +1878,18 @@ int cx23885_video_register(struct cx23885_dev *dev)
};
v4l2_subdev_call(sd, tuner, s_config, &cfg);
}
+
+ if (dev->board == CX23885_BOARD_AVERMEDIA_HC81R) {
+ struct xc2028_ctrl ctrl = {
+ .fname = "xc3028L-v36.fw",
+ .max_len = 64
+ };
+ struct v4l2_priv_tun_config cfg = {
+ .tuner = dev->tuner_type,
+ .priv = &ctrl
+ };
+ v4l2_subdev_call(sd, tuner, s_config, &cfg);
+ }
}
}
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 67f40d31450b..59c322d870f2 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -91,6 +91,8 @@
#define CX23885_BOARD_TEVII_S471 35
#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111 36
#define CX23885_BOARD_PROF_8000 37
+#define CX23885_BOARD_HAUPPAUGE_HVR4400 38
+#define CX23885_BOARD_AVERMEDIA_HC81R 39
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c4bd1e95d33f..d51eed051d59 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -1237,13 +1237,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
cx23888_ir_write4(dev, CX23888_IR_IRQEN_REG, 0);
mutex_init(&state->rx_params_lock);
- memcpy(&default_params, &default_rx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_rx_params;
v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
mutex_init(&state->tx_params_lock);
- memcpy(&default_params, &default_tx_params,
- sizeof(struct v4l2_subdev_ir_parameters));
+ default_params = default_tx_params;
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
} else {
kfifo_free(&state->rx_kfifo);
diff --git a/drivers/media/pci/cx25821/Makefile b/drivers/media/pci/cx25821/Makefile
index 5bf3ea4c1556..caa32b7b51f8 100644
--- a/drivers/media/pci/cx25821/Makefile
+++ b/drivers/media/pci/cx25821/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_CX25821) += cx25821.o
obj-$(CONFIG_VIDEO_CX25821_ALSA) += cx25821-alsa.o
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 53b16dd70320..d4de021dc844 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -130,7 +130,7 @@ void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
diff --git a/drivers/media/pci/cx88/Kconfig b/drivers/media/pci/cx88/Kconfig
index d27fccbf03c4..bb05eca2da29 100644
--- a/drivers/media/pci/cx88/Kconfig
+++ b/drivers/media/pci/cx88/Kconfig
@@ -62,6 +62,8 @@ config VIDEO_CX88_DVB
select DVB_STB6000 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB/ATSC cards based on the
diff --git a/drivers/media/pci/cx88/Makefile b/drivers/media/pci/cx88/Makefile
index d3679c3ee248..8619c1becee2 100644
--- a/drivers/media/pci/cx88/Makefile
+++ b/drivers/media/pci/cx88/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 0c255248cbcd..e2e0b8faf7a4 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3743,7 +3743,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
cx88_card_list(core, pci);
}
- memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board));
+ core->board = cx88_boards[core->boardnr];
if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
core->board.num_frontends = 1;
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 19a58754c6e1..39f095c37ffd 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -549,7 +549,7 @@ void cx88_wakeup(struct cx88_core *core,
* up to 32767 buffers in flight... */
if ((s16) (count - buf->count) < 0)
break;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dprintk(2,"[%p/%d] wakeup reg=%d buf=%d\n",buf,buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 666f83b2f3c0..672b267a2d3e 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -58,6 +58,7 @@
#include "stb6100.h"
#include "stb6100_proc.h"
#include "mb86a16.h"
+#include "ts2020.h"
#include "ds3000.h"
MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
@@ -264,7 +265,7 @@ static struct mb86a16_config twinhan_vp1027 = {
.demod_address = 0x08,
};
-#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
{
static const u8 clock_config [] = { 0x89, 0x38, 0x38 };
@@ -700,6 +701,11 @@ static struct ds3000_config tevii_ds3000_config = {
.set_ts_params = ds3000_set_ts_param,
};
+static struct ts2020_config tevii_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
static const struct stv0900_config prof_7301_stv0900_config = {
.demod_address = 0x6a,
/* demod_mode = 0,*/
@@ -1121,7 +1127,7 @@ static int dvb_register(struct cx8802_dev *dev)
}
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
-#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
/* MT352 is on a secondary I2C bus made from some GPIO lines */
fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
&dev->vp3054->adap);
@@ -1466,9 +1472,12 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(ts2020_attach, fe0->dvb.frontend,
+ &tevii_ts2020_config, &core->i2c_adap);
fe0->dvb.frontend->ops.set_voltage =
tevii_dvbs_set_voltage;
+ }
break;
case CX88_BOARD_OMICOM_SS4_PCI:
case CX88_BOARD_TBS_8920:
diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c
index de0f1af74e41..cf2d69615838 100644
--- a/drivers/media/pci/cx88/cx88-i2c.c
+++ b/drivers/media/pci/cx88/cx88-i2c.c
@@ -139,8 +139,7 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
if (i2c_udelay<5)
i2c_udelay=5;
- memcpy(&core->i2c_algo, &cx8800_i2c_algo_template,
- sizeof(core->i2c_algo));
+ core->i2c_algo = cx8800_i2c_algo_template;
core->i2c_adap.dev.parent = &pci->dev;
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.c b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
index d77f8ecab9d7..deede6e25d94 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
@@ -118,8 +118,7 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
return -ENOMEM;
dev->vp3054 = vp3054_i2c;
- memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
- sizeof(vp3054_i2c->algo));
+ vp3054_i2c->algo = vp3054_i2c_algo_template;
vp3054_i2c->adap.dev.parent = &dev->pci->dev;
strlcpy(vp3054_i2c->adap.name, core->name,
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.h b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
index be99c931dc3e..95d0c60a35e1 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.h
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
@@ -30,7 +30,7 @@ struct vp3054_i2c_state {
};
/* ----------------------------------------------------------------------- */
-#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
int vp3054_i2c_probe(struct cx8802_dev *dev);
void vp3054_i2c_remove(struct cx8802_dev *dev);
#else
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index ba0dba4a4d22..feff53c0a251 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -363,7 +363,7 @@ struct cx88_core {
unsigned int tuner_formats;
/* config info -- dvb */
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
int (*prev_set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
#endif
void (*gate_ctrl)(struct cx88_core *core, int open);
@@ -562,8 +562,7 @@ struct cx8802_dev {
/* for blackbird only */
struct list_head devlist;
-#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
- defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_BLACKBIRD)
struct video_device *mpeg_dev;
u32 mailbox;
int width;
@@ -574,13 +573,12 @@ struct cx8802_dev {
struct cx2341x_handler cxhdl;
#endif
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
/* for dvb only */
struct videobuf_dvb_frontends frontends;
#endif
-#if defined(CONFIG_VIDEO_CX88_VP3054) || \
- defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
/* For VP3045 secondary I2C bus support */
struct vp3054_i2c_state *vp3054;
#endif
diff --git a/drivers/media/pci/dm1105/Kconfig b/drivers/media/pci/dm1105/Kconfig
index 013df4e015cd..173daf0c0847 100644
--- a/drivers/media/pci/dm1105/Kconfig
+++ b/drivers/media/pci/dm1105/Kconfig
@@ -8,6 +8,7 @@ config DVB_DM1105
select DVB_CX24116 if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI21XX if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
depends on RC_CORE
help
Support for cards based on the SDMC DM1105 PCI chip like
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index 904c3ea350f5..026767bed5cd 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -45,6 +45,7 @@
#include "si21xx.h"
#include "cx24116.h"
#include "z0194a.h"
+#include "ts2020.h"
#include "ds3000.h"
#define MODULE_NAME "dm1105"
@@ -849,6 +850,11 @@ static struct ds3000_config dvbworld_ds3000_config = {
.demod_address = 0x68,
};
+static struct ts2020_config dvbworld_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
static int frontend_init(struct dm1105_dev *dev)
{
int ret;
@@ -898,8 +904,11 @@ static int frontend_init(struct dm1105_dev *dev)
dev->fe = dvb_attach(
ds3000_attach, &dvbworld_ds3000_config,
&dev->i2c_adap);
- if (dev->fe)
+ if (dev->fe) {
+ dvb_attach(ts2020_attach, dev->fe,
+ &dvbworld_ts2020_config, &dev->i2c_adap);
dev->fe->ops.set_voltage = dm1105_set_voltage;
+ }
break;
case DM1105_BOARD_DVBWORLD_2002:
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 4a221c693995..e970cface70e 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -205,7 +205,7 @@ err_exit:
return ret;
}
-static int __init ivtv_alsa_load(struct ivtv *itv)
+static int ivtv_alsa_load(struct ivtv *itv)
{
struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
struct ivtv_stream *s;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
index 23dfe0d12400..186814e0b2d4 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
@@ -20,4 +20,4 @@
* 02111-1307 USA
*/
-int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
+int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index df88dc4ab555..2928e7287da8 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -304,7 +304,7 @@ static void request_modules(struct ivtv *dev)
static void flush_request_modules(struct ivtv *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 46e262becb67..ceed2d87abfd 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -267,8 +267,6 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
- if (idx >= ARRAY_SIZE(hw_addrs))
- return -1;
if (hw == IVTV_HW_TUNER) {
/* special tuner handling */
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
@@ -719,13 +717,10 @@ int init_ivtv_i2c(struct ivtv *itv)
return -ENODEV;
}
if (itv->options.newi2c > 0) {
- memcpy(&itv->i2c_adap, &ivtv_i2c_adap_hw_template,
- sizeof(struct i2c_adapter));
+ itv->i2c_adap = ivtv_i2c_adap_hw_template;
} else {
- memcpy(&itv->i2c_adap, &ivtv_i2c_adap_template,
- sizeof(struct i2c_adapter));
- memcpy(&itv->i2c_algo, &ivtv_i2c_algo_template,
- sizeof(struct i2c_algo_bit_data));
+ itv->i2c_adap = ivtv_i2c_adap_template;
+ itv->i2c_algo = ivtv_i2c_algo_template;
}
itv->i2c_algo.udelay = itv->options.i2c_clock_period / 2;
itv->i2c_algo.data = itv;
@@ -735,8 +730,7 @@ int init_ivtv_i2c(struct ivtv *itv)
itv->instance);
i2c_set_adapdata(&itv->i2c_adap, &itv->v4l2_dev);
- memcpy(&itv->i2c_client, &ivtv_i2c_client_template,
- sizeof(struct i2c_client));
+ itv->i2c_client = ivtv_i2c_client_template;
itv->i2c_client.adapter = &itv->i2c_adap;
itv->i2c_adap.dev.parent = &itv->pdev->dev;
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 293db806d936..3c156bc70fb4 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -224,7 +224,7 @@ static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
- memcpy(dst + sd + 4, dst + sd + 12, line * 43);
+ memmove(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
@@ -532,7 +532,7 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
while (vi->cc_payload_idx) {
cc = vi->cc_payload[0];
- memcpy(vi->cc_payload, vi->cc_payload + 1,
+ memmove(vi->cc_payload, vi->cc_payload + 1,
sizeof(vi->cc_payload) - sizeof(vi->cc_payload[0]));
vi->cc_payload_idx--;
if (vi->cc_payload_idx && cc.odd[0] == 0x80 && cc.odd[1] == 0x80)
diff --git a/drivers/media/pci/mantis/mantis_ca.c b/drivers/media/pci/mantis/mantis_ca.c
index 3d7046909009..60c6c2f24066 100644
--- a/drivers/media/pci/mantis/mantis_ca.c
+++ b/drivers/media/pci/mantis/mantis_ca.c
@@ -198,11 +198,12 @@ void mantis_ca_exit(struct mantis_pci *mantis)
struct mantis_ca *ca = mantis->mantis_ca;
dprintk(MANTIS_DEBUG, 1, "Mantis CA exit");
+ if (!ca)
+ return;
mantis_evmgr_exit(ca);
dprintk(MANTIS_ERROR, 1, "Unregistering EN50221 device");
- if (ca)
- dvb_ca_en50221_release(&ca->en50221);
+ dvb_ca_en50221_release(&ca->en50221);
kfree(ca);
}
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 049e18667cd0..7859c43479d7 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -35,6 +35,8 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/delay.h>
@@ -811,7 +813,7 @@ again:
mchip_hsize() * mchip_vsize() * 2);
meye.grab_buffer[reqnr].size = mchip_hsize() * mchip_vsize() * 2;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
- do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
+ v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
sizeof(int), &meye.doneq_lock);
@@ -832,7 +834,7 @@ again:
size);
meye.grab_buffer[reqnr].size = size;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
- do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
+ v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
sizeof(int), &meye.doneq_lock);
@@ -865,7 +867,7 @@ static int meye_open(struct file *file)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
kfifo_reset(&meye.grabq);
kfifo_reset(&meye.doneq);
- return 0;
+ return v4l2_fh_open(file);
}
static int meye_release(struct file *file)
@@ -873,7 +875,7 @@ static int meye_release(struct file *file)
mchip_hic_stop();
mchip_dma_free();
clear_bit(0, &meye.in_use);
- return 0;
+ return v4l2_fh_release(file);
}
static int meyeioc_g_params(struct meye_params *p)
@@ -1032,8 +1034,9 @@ static int vidioc_querycap(struct file *file, void *fh,
cap->version = (MEYE_DRIVER_MAJORVERSION << 8) +
MEYE_DRIVER_MINORVERSION;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1063,191 +1066,50 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *fh,
- struct v4l2_queryctrl *c)
-{
- switch (c->id) {
-
- case V4L2_CID_BRIGHTNESS:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Brightness");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_HUE:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Hue");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_CONTRAST:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Contrast");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_SATURATION:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Saturation");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
- c->flags = 0;
- break;
- case V4L2_CID_AGC:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Agc");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 48;
- c->flags = 0;
- break;
- case V4L2_CID_MEYE_SHARPNESS:
- case V4L2_CID_SHARPNESS:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Sharpness");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 32;
-
- /* Continue to report legacy private SHARPNESS ctrl but
- * say it is disabled in preference to ctrl in the spec
- */
- c->flags = (c->id == V4L2_CID_SHARPNESS) ? 0 :
- V4L2_CTRL_FLAG_DISABLED;
- break;
- case V4L2_CID_PICTURE:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Picture");
- c->minimum = 0;
- c->maximum = 63;
- c->step = 1;
- c->default_value = 0;
- c->flags = 0;
- break;
- case V4L2_CID_JPEGQUAL:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "JPEG quality");
- c->minimum = 0;
- c->maximum = 10;
- c->step = 1;
- c->default_value = 8;
- c->flags = 0;
- break;
- case V4L2_CID_FRAMERATE:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Framerate");
- c->minimum = 0;
- c->maximum = 31;
- c->step = 1;
- c->default_value = 0;
- c->flags = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int meye_s_ctrl(struct v4l2_ctrl *ctrl)
{
mutex_lock(&meye.lock);
- switch (c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, c->value);
- meye.brightness = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, ctrl->val);
+ meye.brightness = ctrl->val << 10;
break;
case V4L2_CID_HUE:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERAHUE, c->value);
- meye.hue = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERAHUE, ctrl->val);
+ meye.hue = ctrl->val << 10;
break;
case V4L2_CID_CONTRAST:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERACONTRAST, c->value);
- meye.contrast = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERACONTRAST, ctrl->val);
+ meye.contrast = ctrl->val << 10;
break;
case V4L2_CID_SATURATION:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERACOLOR, c->value);
- meye.colour = c->value << 10;
+ SONY_PIC_COMMAND_SETCAMERACOLOR, ctrl->val);
+ meye.colour = ctrl->val << 10;
break;
- case V4L2_CID_AGC:
+ case V4L2_CID_MEYE_AGC:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERAAGC, c->value);
- meye.params.agc = c->value;
+ SONY_PIC_COMMAND_SETCAMERAAGC, ctrl->val);
+ meye.params.agc = ctrl->val;
break;
case V4L2_CID_SHARPNESS:
- case V4L2_CID_MEYE_SHARPNESS:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERASHARPNESS, c->value);
- meye.params.sharpness = c->value;
+ SONY_PIC_COMMAND_SETCAMERASHARPNESS, ctrl->val);
+ meye.params.sharpness = ctrl->val;
break;
- case V4L2_CID_PICTURE:
+ case V4L2_CID_MEYE_PICTURE:
sony_pic_camera_command(
- SONY_PIC_COMMAND_SETCAMERAPICTURE, c->value);
- meye.params.picture = c->value;
+ SONY_PIC_COMMAND_SETCAMERAPICTURE, ctrl->val);
+ meye.params.picture = ctrl->val;
break;
- case V4L2_CID_JPEGQUAL:
- meye.params.quality = c->value;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ meye.params.quality = ctrl->val;
break;
- case V4L2_CID_FRAMERATE:
- meye.params.framerate = c->value;
- break;
- default:
- mutex_unlock(&meye.lock);
- return -EINVAL;
- }
- mutex_unlock(&meye.lock);
-
- return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
-{
- mutex_lock(&meye.lock);
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value = meye.brightness >> 10;
- break;
- case V4L2_CID_HUE:
- c->value = meye.hue >> 10;
- break;
- case V4L2_CID_CONTRAST:
- c->value = meye.contrast >> 10;
- break;
- case V4L2_CID_SATURATION:
- c->value = meye.colour >> 10;
- break;
- case V4L2_CID_AGC:
- c->value = meye.params.agc;
- break;
- case V4L2_CID_SHARPNESS:
- case V4L2_CID_MEYE_SHARPNESS:
- c->value = meye.params.sharpness;
- break;
- case V4L2_CID_PICTURE:
- c->value = meye.params.picture;
- break;
- case V4L2_CID_JPEGQUAL:
- c->value = meye.params.quality;
- break;
- case V4L2_CID_FRAMERATE:
- c->value = meye.params.framerate;
+ case V4L2_CID_MEYE_FRAMERATE:
+ meye.params.framerate = ctrl->val;
break;
default:
mutex_unlock(&meye.lock);
@@ -1426,7 +1288,7 @@ static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
return -EINVAL;
buf->bytesused = meye.grab_buffer[index].size;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
if (meye.grab_buffer[index].state == MEYE_BUF_USING)
buf->flags |= V4L2_BUF_FLAG_QUEUED;
@@ -1499,7 +1361,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->index = reqnr;
buf->bytesused = meye.grab_buffer[reqnr].size;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
buf->timestamp = meye.grab_buffer[reqnr].timestamp;
buf->sequence = meye.grab_buffer[reqnr].sequence;
@@ -1577,12 +1439,12 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
static unsigned int meye_poll(struct file *file, poll_table *wait)
{
- unsigned int res = 0;
+ unsigned int res = v4l2_ctrl_poll(file, wait);
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
if (kfifo_len(&meye.doneq))
- res = POLLIN | POLLRDNORM;
+ res |= POLLIN | POLLRDNORM;
mutex_unlock(&meye.lock);
return res;
}
@@ -1669,9 +1531,6 @@ static const struct v4l2_ioctl_ops meye_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
@@ -1682,6 +1541,9 @@ static const struct v4l2_ioctl_ops meye_ioctl_ops = {
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_default = vidioc_default,
};
@@ -1692,6 +1554,10 @@ static struct video_device meye_template = {
.release = video_device_release,
};
+static const struct v4l2_ctrl_ops meye_ctrl_ops = {
+ .s_ctrl = meye_s_ctrl,
+};
+
#ifdef CONFIG_PM
static int meye_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1730,6 +1596,32 @@ static int meye_resume(struct pci_dev *pdev)
static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
+ static const struct v4l2_ctrl_config ctrl_agc = {
+ .id = V4L2_CID_MEYE_AGC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &meye_ctrl_ops,
+ .name = "AGC",
+ .max = 63,
+ .step = 1,
+ .def = 48,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ };
+ static const struct v4l2_ctrl_config ctrl_picture = {
+ .id = V4L2_CID_MEYE_PICTURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &meye_ctrl_ops,
+ .name = "Picture",
+ .max = 63,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_framerate = {
+ .id = V4L2_CID_MEYE_FRAMERATE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .ops = &meye_ctrl_ops,
+ .name = "Framerate",
+ .max = 31,
+ .step = 1,
+ };
struct v4l2_device *v4l2_dev = &meye.v4l2_dev;
int ret = -EBUSY;
unsigned long mchip_adr;
@@ -1833,24 +1725,31 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
mutex_init(&meye.lock);
init_waitqueue_head(&meye.proc_list);
- meye.brightness = 32 << 10;
- meye.hue = 32 << 10;
- meye.colour = 32 << 10;
- meye.contrast = 32 << 10;
- meye.params.subsample = 0;
- meye.params.quality = 8;
- meye.params.sharpness = 32;
- meye.params.agc = 48;
- meye.params.picture = 0;
- meye.params.framerate = 0;
-
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAHUE, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERACOLOR, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERACONTRAST, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERASHARPNESS, 32);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0);
- sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48);
+
+ v4l2_ctrl_handler_init(&meye.hdl, 3);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 63, 1, 32);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_HUE, 0, 63, 1, 32);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 63, 1, 32);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 63, 1, 32);
+ v4l2_ctrl_new_custom(&meye.hdl, &ctrl_agc, NULL);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 63, 1, 32);
+ v4l2_ctrl_new_custom(&meye.hdl, &ctrl_picture, NULL);
+ v4l2_ctrl_new_std(&meye.hdl, &meye_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, 10, 1, 8);
+ v4l2_ctrl_new_custom(&meye.hdl, &ctrl_framerate, NULL);
+ if (meye.hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ goto outvideoreg;
+ }
+
+ v4l2_ctrl_handler_setup(&meye.hdl);
+ meye.vdev->ctrl_handler = &meye.hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &meye.vdev->flags);
if (video_register_device(meye.vdev, VFL_TYPE_GRABBER,
video_nr) < 0) {
@@ -1866,6 +1765,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
return 0;
outvideoreg:
+ v4l2_ctrl_handler_free(&meye.hdl);
free_irq(meye.mchip_irq, meye_irq);
outreqirq:
iounmap(meye.mchip_mmregs);
diff --git a/drivers/media/pci/meye/meye.h b/drivers/media/pci/meye/meye.h
index 4bdeb03f1644..6fed9274cfa5 100644
--- a/drivers/media/pci/meye/meye.h
+++ b/drivers/media/pci/meye/meye.h
@@ -39,6 +39,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kfifo.h>
+#include <media/v4l2-ctrls.h>
/****************************************************************************/
/* Motion JPEG chip registers */
@@ -290,6 +291,7 @@ struct meye_grab_buffer {
/* Motion Eye device structure */
struct meye {
struct v4l2_device v4l2_dev; /* Main v4l2_device struct */
+ struct v4l2_ctrl_handler hdl;
struct pci_dev *mchip_dev; /* pci device */
u8 mchip_irq; /* irq */
u8 mchip_mode; /* actual mchip mode: HIC_MODE... */
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index fad214113669..9e82d2105d53 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -327,6 +327,14 @@ static int demod_attach_drxd(struct ngene_channel *chan)
pr_err("No DRXD found!\n");
return -ENODEV;
}
+ return 0;
+}
+
+static int tuner_attach_dtt7520x(struct ngene_channel *chan)
+{
+ struct drxd_config *feconf;
+
+ feconf = chan->dev->card_info->fe_config[chan->number];
if (!dvb_attach(dvb_pll_attach, chan->fe, feconf->pll_address,
&chan->i2c_adapter,
@@ -724,6 +732,7 @@ static struct ngene_info ngene_info_terratec = {
.name = "Terratec Integra/Cinergy2400i Dual DVB-T",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_drxd, demod_attach_drxd},
+ .tuner_attach = {tuner_attach_dtt7520x, tuner_attach_dtt7520x},
.fe_config = {&fe_terratec_dvbt_0, &fe_terratec_dvbt_1},
.i2c_access = 1,
};
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index bc08f1dbc293..dc68cf1070f7 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5773,6 +5773,23 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0000000,
},
},
+ [SAA7134_BOARD_HAWELL_HW_9004V1] = {
+ /* Hawell HW-9004V1 */
+ /* Vadim Frolov <fralik@gmail.com> */
+ .name = "Hawell HW-9004V1",
+ .audio_clock = 0x00200000,
+ .tuner_type = UNSET,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x618E700,
+ .inputs = {{
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x6010000,
+ } },
+ },
};
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e359d200d698..8fd24e7c9403 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -308,7 +308,7 @@ void saa7134_buffer_finish(struct saa7134_dev *dev,
/* finish current buffer */
q->curr->vb.state = state;
- do_gettimeofday(&q->curr->vb.ts);
+ v4l2_get_timestamp(&q->curr->vb.ts);
wake_up(&q->curr->vb.done);
q->curr = NULL;
}
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index b209de40a4f8..27915e501db9 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -607,6 +607,9 @@ static int configure_tda827x_fe(struct saa7134_dev *dev,
/* Get the first frontend */
fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+ if (!fe0)
+ return -EINVAL;
+
fe0->dvb.frontend = dvb_attach(tda10046_attach, cdec_conf, &dev->i2c_adap);
if (fe0->dvb.frontend) {
if (cdec_conf->i2c_gate)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 3abf52711e13..7c503fb68526 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2248,6 +2248,17 @@ static int saa7134_streamon(struct file *file, void *priv,
if (!res_get(dev, fh, res))
return -EBUSY;
+ /* The SAA7134 has a 1K FIFO; the datasheet suggests that when
+ * configured conservatively, there's 22 usec of buffering for video.
+ * We therefore request a DMA latency of 20 usec, giving us 2 usec of
+ * margin in case the FIFO is configured differently to the datasheet.
+ * Unfortunately, I lack register-level documentation to check the
+ * Linux FIFO setup and confirm the perfect value.
+ */
+ pm_qos_add_request(&fh->qos_request,
+ PM_QOS_CPU_DMA_LATENCY,
+ 20);
+
return videobuf_streamon(saa7134_queue(fh));
}
@@ -2259,6 +2270,8 @@ static int saa7134_streamoff(struct file *file, void *priv,
struct saa7134_dev *dev = fh->dev;
int res = saa7134_resource(fh);
+ pm_qos_remove_request(&fh->qos_request);
+
err = videobuf_streamoff(saa7134_queue(fh));
if (err < 0)
return err;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 075908fae4d9..71eefef5e324 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -29,6 +29,7 @@
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/pm_qos.h>
#include <asm/io.h>
@@ -41,7 +42,7 @@
#include <media/videobuf-dma-sg.h>
#include <sound/core.h>
#include <sound/pcm.h>
-#if defined(CONFIG_VIDEO_SAA7134_DVB) || defined(CONFIG_VIDEO_SAA7134_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
#include <media/videobuf-dvb.h>
#endif
@@ -332,6 +333,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_SENSORAY811_911 188
#define SAA7134_BOARD_KWORLD_PC150U 189
#define SAA7134_BOARD_ASUSTeK_PS3_100 190
+#define SAA7134_BOARD_HAWELL_HW_9004V1 191
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -469,6 +471,7 @@ struct saa7134_fh {
enum v4l2_buf_type type;
unsigned int resources;
enum v4l2_priority prio;
+ struct pm_qos_request qos_request;
/* video overlay */
struct v4l2_window win;
@@ -642,7 +645,7 @@ struct saa7134_dev {
struct work_struct empress_workqueue;
int empress_started;
-#if defined(CONFIG_VIDEO_SAA7134_DVB) || defined(CONFIG_VIDEO_SAA7134_DVB_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
/* SAA7134_MPEG_DVB only */
struct videobuf_dvb_frontends frontends;
int (*original_demod_sleep)(struct dvb_frontend *fe);
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 994018e2d0d6..9bb0903ee5f1 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1298,6 +1298,7 @@ static int saa7164_g_chip_ident(struct file *file, void *fh,
return 0;
}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
static int saa7164_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
@@ -1323,6 +1324,7 @@ static int saa7164_s_register(struct file *file, void *fh,
return 0;
}
+#endif
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_s_std = vidioc_s_std,
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index 6749f67cab8a..a94ccad02066 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -2,7 +2,7 @@ config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
depends on STA2X11
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
help
Say Y for support for STA2X11 VIP (Video Input Port) capture
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 27ae48842656..4b703fe8c953 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1,7 +1,11 @@
/*
* This is the driver for the STA2x11 Video Input Port.
*
+ * Copyright (C) 2012 ST Microelectronics
+ * author: Federico Vaga <federico.vaga@gmail.com>
* Copyright (C) 2010 WindRiver Systems, Inc.
+ * authors: Andreas Kies <andreas.kies@windriver.com>
+ * Vlad Lungu <vlad.lungu@windriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -19,36 +23,30 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Author: Andreas Kies <andreas.kies@windriver.com>
- * Vlad Lungu <vlad.lungu@windriver.com>
- *
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/vmalloc.h>
-
#include <linux/videodev2.h>
-
#include <linux/kmod.h>
-
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
#include "sta2x11_vip.h"
-#define DRV_NAME "sta2x11_vip"
#define DRV_VERSION "1.3"
#ifndef PCI_DEVICE_ID_STMICRO_VIP
@@ -63,8 +61,8 @@
#define DVP_TFS 0x08
#define DVP_BFO 0x0C
#define DVP_BFS 0x10
-#define DVP_VTP 0x14
-#define DVP_VBP 0x18
+#define DVP_VTP 0x14
+#define DVP_VBP 0x18
#define DVP_VMP 0x1C
#define DVP_ITM 0x98
#define DVP_ITS 0x9C
@@ -84,13 +82,21 @@
#define DVP_HLFLN_SD 0x00000001
-#define REG_WRITE(vip, reg, value) iowrite32((value), (vip->iomem)+(reg))
-#define REG_READ(vip, reg) ioread32((vip->iomem)+(reg))
-
#define SAVE_COUNT 8
#define AUX_COUNT 3
#define IRQ_COUNT 1
+
+struct vip_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ dma_addr_t dma;
+};
+static inline struct vip_buffer *to_vip_buffer(struct vb2_buffer *vb2)
+{
+ return container_of(vb2, struct vip_buffer, vb);
+}
+
/**
* struct sta2x11_vip - All internal data for one instance of device
* @v4l2_dev: device registered in v4l layer
@@ -99,29 +105,26 @@
* @adapter: contains I2C adapter information
* @register_save_area: All relevant register are saved here during suspend
* @decoder: contains information about video DAC
+ * @ctrl_hdl: handler for control framework
* @format: pixel format, fixed UYVY
* @std: video standard (e.g. PAL/NTSC)
* @input: input line for video signal ( 0 or 1 )
- * @users: Number of open of device ( max. 1 )
* @disabled: Device is in power down state
- * @mutex: ensures exclusive opening of device
* @slock: for excluse acces of registers
- * @vb_vidq: queue maintained by videobuf layer
- * @capture: linked list of capture buffer
- * @active: struct videobuf_buffer currently beingg filled
- * @started: device is ready to capture frame
- * @closing: device will be shut down
+ * @alloc_ctx: context for videobuf2
+ * @vb_vidq: queue maintained by videobuf2 layer
+ * @buffer_list: list of buffer in use
+ * @sequence: sequence number of acquired buffer
+ * @active: current active buffer
+ * @lock: used in videobuf2 callback
* @tcount: Number of top frames
* @bcount: Number of bottom frames
* @overflow: Number of FIFO overflows
- * @mem_spare: small buffer of unused frame
- * @dma_spare: dma addres of mem_spare
* @iomem: hardware base address
* @config: I2C and gpio config from platform
*
* All non-local data is accessed via this structure.
*/
-
struct sta2x11_vip {
struct v4l2_device v4l2_dev;
struct video_device *video_dev;
@@ -129,21 +132,27 @@ struct sta2x11_vip {
struct i2c_adapter *adapter;
unsigned int register_save_area[IRQ_COUNT + SAVE_COUNT + AUX_COUNT];
struct v4l2_subdev *decoder;
+ struct v4l2_ctrl_handler ctrl_hdl;
+
+
struct v4l2_pix_format format;
v4l2_std_id std;
unsigned int input;
- int users;
int disabled;
- struct mutex mutex; /* exclusive access during open */
- spinlock_t slock; /* spin lock for hardware and queue access */
- struct videobuf_queue vb_vidq;
- struct list_head capture;
- struct videobuf_buffer *active;
- int started, closing, tcount, bcount;
+ spinlock_t slock;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct vb2_queue vb_vidq;
+ struct list_head buffer_list;
+ unsigned int sequence;
+ struct vip_buffer *active; /* current active buffer */
+ spinlock_t lock; /* Used in videobuf2 callback */
+
+ /* Interrupt counters */
+ int tcount, bcount;
int overflow;
- void *mem_spare;
- dma_addr_t dma_spare;
- void *iomem;
+
+ void *iomem; /* I/O Memory */
struct vip_config *config;
};
@@ -206,318 +215,195 @@ static struct v4l2_pix_format formats_60[] = {
.colorspace = V4L2_COLORSPACE_SMPTE170M},
};
-/**
- * buf_setup - Get size and number of video buffer
- * @vq: queue in videobuf
- * @count: Number of buffers (1..MAX_FRAMES).
- * 0 use default value.
- * @size: size of buffer in bytes
- *
- * returns size and number of buffers
- * a preset value of 0 returns the default number.
- * return value: 0, always succesfull.
- */
-static int buf_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+/* Write VIP register */
+static inline void reg_write(struct sta2x11_vip *vip, unsigned int reg, u32 val)
{
- struct sta2x11_vip *vip = vq->priv_data;
-
- *size = vip->format.width * vip->format.height * 2;
- if (0 == *count || MAX_FRAMES < *count)
- *count = MAX_FRAMES;
- return 0;
-};
-
-/**
- * buf_prepare - prepare buffer for usage
- * @vq: queue in videobuf layer
- * @vb: buffer to be prepared
- * @field: type of video data (interlaced/non-interlaced)
- *
- * Allocate or realloc buffer
- * return value: 0, successful.
- *
- * -EINVAL, supplied buffer is too small.
- *
- * other, buffer could not be locked.
- */
-static int buf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+ iowrite32((val), (vip->iomem)+(reg));
+}
+/* Read VIP register */
+static inline u32 reg_read(struct sta2x11_vip *vip, unsigned int reg)
{
- struct sta2x11_vip *vip = vq->priv_data;
- int ret;
-
- vb->size = vip->format.width * vip->format.height * 2;
- if ((0 != vb->baddr) && (vb->bsize < vb->size))
- return -EINVAL;
- vb->width = vip->format.width;
- vb->height = vip->format.height;
- vb->field = field;
-
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret)
- goto fail;
- }
- vb->state = VIDEOBUF_PREPARED;
- return 0;
-fail:
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
- return ret;
+ return ioread32((vip->iomem)+(reg));
}
-
-/**
- * buf_queu - queue buffer for filling
- * @vq: queue in videobuf layer
- * @vb: buffer to be queued
- *
- * if capturing is already running, the buffer will be queued. Otherwise
- * capture is started and the buffer is used directly.
- */
-static void buf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+/* Start DMA acquisition */
+static void start_dma(struct sta2x11_vip *vip, struct vip_buffer *vip_buf)
{
- struct sta2x11_vip *vip = vq->priv_data;
- u32 dma;
+ unsigned long offset = 0;
+
+ if (vip->format.field == V4L2_FIELD_INTERLACED)
+ offset = vip->format.width * 2;
- vb->state = VIDEOBUF_QUEUED;
+ spin_lock_irq(&vip->slock);
+ /* Enable acquisition */
+ reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) | DVP_CTL_ENA);
+ /* Set Top and Bottom Field memory address */
+ reg_write(vip, DVP_VTP, (u32)vip_buf->dma);
+ reg_write(vip, DVP_VBP, (u32)vip_buf->dma + offset);
+ spin_unlock_irq(&vip->slock);
+}
- if (vip->active) {
- list_add_tail(&vb->queue, &vip->capture);
+/* Fetch the next buffer to activate */
+static void vip_active_buf_next(struct sta2x11_vip *vip)
+{
+ /* Get the next buffer */
+ spin_lock(&vip->lock);
+ if (list_empty(&vip->buffer_list)) {/* No available buffer */
+ spin_unlock(&vip->lock);
return;
}
-
- vip->started = 1;
+ vip->active = list_first_entry(&vip->buffer_list,
+ struct vip_buffer,
+ list);
+ /* Reset Top and Bottom counter */
vip->tcount = 0;
vip->bcount = 0;
- vip->active = vb;
- vb->state = VIDEOBUF_ACTIVE;
+ spin_unlock(&vip->lock);
+ if (vb2_is_streaming(&vip->vb_vidq)) { /* streaming is on */
+ start_dma(vip, vip->active); /* start dma capture */
+ }
+}
- dma = videobuf_to_dma_contig(vb);
- REG_WRITE(vip, DVP_TFO, (0 << 16) | (0));
- /* despite of interlace mode, upper and lower frames start at zero */
- REG_WRITE(vip, DVP_BFO, (0 << 16) | (0));
+/* Videobuf2 Operations */
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
- switch (vip->format.field) {
- case V4L2_FIELD_INTERLACED:
- REG_WRITE(vip, DVP_TFS,
- ((vip->format.height / 2 - 1) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_BFS, ((vip->format.height / 2 - 1) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
- REG_WRITE(vip, DVP_VMP, 4 * vip->format.width);
- break;
- case V4L2_FIELD_TOP:
- REG_WRITE(vip, DVP_TFS,
- ((vip->format.height - 1) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_BFS, ((0) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma);
- REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
- break;
- case V4L2_FIELD_BOTTOM:
- REG_WRITE(vip, DVP_TFS, ((0) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_BFS,
- ((vip->format.height) << 16) |
- (2 * vip->format.width - 1));
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma);
- REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
- break;
+ if (!(*nbuffers) || *nbuffers < MAX_FRAMES)
+ *nbuffers = MAX_FRAMES;
- default:
- pr_warning("VIP: unknown field format\n");
- return;
- }
+ *nplanes = 1;
+ sizes[0] = vip->format.sizeimage;
+ alloc_ctxs[0] = vip->alloc_ctx;
- REG_WRITE(vip, DVP_CTL, DVP_CTL_ENA);
-}
+ vip->sequence = 0;
+ vip->active = NULL;
+ vip->tcount = 0;
+ vip->bcount = 0;
-/**
- * buff_release - release buffer
- * @vq: queue in videobuf layer
- * @vb: buffer to be released
- *
- * release buffer in videobuf layer
- */
-static void buf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+ return 0;
+};
+static int buffer_init(struct vb2_buffer *vb)
{
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
+ vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0);
+ INIT_LIST_HEAD(&vip_buf->list);
+ return 0;
}
-static struct videobuf_queue_ops vip_qops = {
- .buf_setup = buf_setup,
- .buf_prepare = buf_prepare,
- .buf_queue = buf_queue,
- .buf_release = buf_release,
-};
-
-/**
- * vip_open - open video device
- * @file: descriptor of device
- *
- * open device, make sure it is only opened once.
- * return value: 0, no error.
- *
- * -EBUSY, device is already opened
- *
- * -ENOMEM, no memory for auxiliary DMA buffer
- */
-static int vip_open(struct file *file)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct video_device *dev = video_devdata(file);
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
+ unsigned long size;
+
+ size = vip->format.sizeimage;
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(&vip->v4l2_dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
- mutex_lock(&vip->mutex);
- vip->users++;
+ vb2_set_plane_payload(&vip_buf->vb, 0, size);
- if (vip->users > 1) {
- vip->users--;
- mutex_unlock(&vip->mutex);
- return -EBUSY;
+ return 0;
+}
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
+
+ spin_lock(&vip->lock);
+ list_add_tail(&vip_buf->list, &vip->buffer_list);
+ if (!vip->active) { /* No active buffer, active the first one */
+ vip->active = list_first_entry(&vip->buffer_list,
+ struct vip_buffer,
+ list);
+ if (vb2_is_streaming(&vip->vb_vidq)) /* streaming is on */
+ start_dma(vip, vip_buf); /* start dma capture */
}
+ spin_unlock(&vip->lock);
+}
+static int buffer_finish(struct vb2_buffer *vb)
+{
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
+ struct vip_buffer *vip_buf = to_vip_buffer(vb);
- file->private_data = dev;
- vip->overflow = 0;
- vip->started = 0;
- vip->closing = 0;
- vip->active = NULL;
+ /* Buffer handled, remove it from the list */
+ spin_lock(&vip->lock);
+ list_del_init(&vip_buf->list);
+ spin_unlock(&vip->lock);
- INIT_LIST_HEAD(&vip->capture);
- vip->mem_spare = dma_alloc_coherent(&vip->pdev->dev, 64,
- &vip->dma_spare, GFP_KERNEL);
- if (!vip->mem_spare) {
- vip->users--;
- mutex_unlock(&vip->mutex);
- return -ENOMEM;
- }
+ vip_active_buf_next(vip);
- mutex_unlock(&vip->mutex);
- videobuf_queue_dma_contig_init_cached(&vip->vb_vidq,
- &vip_qops,
- &vip->pdev->dev,
- &vip->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct videobuf_buffer),
- vip, NULL);
- REG_READ(vip, DVP_ITS);
- REG_WRITE(vip, DVP_HLFLN, DVP_HLFLN_SD);
- REG_WRITE(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
- REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
- REG_WRITE(vip, DVP_CTL, 0);
- REG_READ(vip, DVP_ITS);
return 0;
}
-/**
- * vip_close - close video device
- * @file: descriptor of device
- *
- * close video device, wait until all pending operations are finished
- * ( maximum FRAME_MAX buffers pending )
- * Turn off interrupts.
- *
- * return value: 0, always succesful.
- */
-static int vip_close(struct file *file)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct video_device *dev = video_devdata(file);
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
- vip->closing = 1;
- if (vip->active)
- videobuf_waiton(&vip->vb_vidq, vip->active, 0, 0);
spin_lock_irq(&vip->slock);
-
- REG_WRITE(vip, DVP_ITM, 0);
- REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
- REG_WRITE(vip, DVP_CTL, 0);
- REG_READ(vip, DVP_ITS);
-
- vip->started = 0;
- vip->active = NULL;
-
+ /* Enable interrupt VSYNC Top and Bottom*/
+ reg_write(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
spin_unlock_irq(&vip->slock);
- videobuf_stop(&vip->vb_vidq);
- videobuf_mmap_free(&vip->vb_vidq);
+ if (count)
+ start_dma(vip, vip->active);
- dma_free_coherent(&vip->pdev->dev, 64, vip->mem_spare, vip->dma_spare);
- file->private_data = NULL;
- mutex_lock(&vip->mutex);
- vip->users--;
- mutex_unlock(&vip->mutex);
return 0;
}
-/**
- * vip_read - read from video input
- * @file: descriptor of device
- * @data: user buffer
- * @count: number of bytes to be read
- * @ppos: position within stream
- *
- * read video data from video device.
- * handling is done in generic videobuf layer
- * return value: provided by videobuf layer
- */
-static ssize_t vip_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
{
- struct video_device *dev = file->private_data;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_read_stream(&vip->vb_vidq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
+ struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
+ struct vip_buffer *vip_buf, *node;
+
+ /* Disable acquisition */
+ reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
+ /* Disable all interrupts */
+ reg_write(vip, DVP_ITM, 0);
+
+ /* Release all active buffers */
+ spin_lock(&vip->lock);
+ list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) {
+ vb2_buffer_done(&vip_buf->vb, VB2_BUF_STATE_ERROR);
+ list_del(&vip_buf->list);
+ }
+ spin_unlock(&vip->lock);
+ return 0;
}
-/**
- * vip_mmap - map user buffer
- * @file: descriptor of device
- * @vma: user buffer
- *
- * map user space buffer into kernel mode, including DMA address.
- * handling is done in generic videobuf layer.
- * return value: provided by videobuf layer
- */
-static int vip_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct video_device *dev = file->private_data;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+static struct vb2_ops vip_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
- return videobuf_mmap_mapper(&vip->vb_vidq, vma);
-}
-/**
- * vip_poll - poll for event
- * @file: descriptor of device
- * @wait: contains events to be waited for
- *
- * wait for event related to video device.
- * handling is done in generic videobuf layer.
- * return value: provided by videobuf layer
- */
-static unsigned int vip_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct video_device *dev = file->private_data;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+/* File Operations */
+static const struct v4l2_file_operations vip_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll
+};
- return videobuf_poll_stream(file, &vip->vb_vidq, wait);
-}
/**
* vidioc_querycap - return capabilities of device
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @cap: contains return values
*
* the capabilities of the device are returned
@@ -527,25 +413,22 @@ static unsigned int vip_poll(struct file *file, struct poll_table_struct *wait)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
- memset(cap, 0, sizeof(struct v4l2_capability));
- strcpy(cap->driver, DRV_NAME);
- strcpy(cap->card, DRV_NAME);
- cap->version = 0;
+ strcpy(cap->driver, KBUILD_MODNAME);
+ strcpy(cap->card, KBUILD_MODNAME);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(vip->pdev));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
/**
* vidioc_s_std - set video standard
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @std: contains standard to be set
*
* the video standard is set
@@ -558,8 +441,7 @@ static int vidioc_querycap(struct file *file, void *priv,
*/
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
v4l2_std_id oldstd = vip->std, newstd;
int status;
@@ -592,8 +474,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
/**
* vidioc_g_std - get video standard
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @std: contains return values
*
* the current video standard is returned
@@ -602,8 +483,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
*/
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
*std = vip->std;
return 0;
@@ -611,8 +491,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
/**
* vidioc_querystd - get possible video standards
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @std: contains return values
*
* all possible video standards are returned
@@ -621,79 +500,11 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
*/
static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
return v4l2_subdev_call(vip->decoder, video, querystd, std);
-
}
-/**
- * vidioc_queryctl - get possible control settings
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @ctrl: contains return values
- *
- * return possible values for a control
- * return value: delivered by video DAC routine.
- */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *ctrl)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return v4l2_subdev_call(vip->decoder, core, queryctrl, ctrl);
-}
-
-/**
- * vidioc_g_ctl - get control value
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @ctrl: contains return values
- *
- * return setting for a control value
- * return value: delivered by video DAC routine.
- */
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return v4l2_subdev_call(vip->decoder, core, g_ctrl, ctrl);
-}
-
-/**
- * vidioc_s_ctl - set control value
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @ctrl: contains value to be set
- *
- * set value for a specific control
- * return value: delivered by video DAC routine.
- */
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return v4l2_subdev_call(vip->decoder, core, s_ctrl, ctrl);
-}
-
-/**
- * vidioc_enum_input - return name of input line
- * @file: descriptor of device (not used)
- * @priv: points to current videodevice
- * @inp: contains return values
- *
- * the user friendly name of the input line is returned
- *
- * return value: 0, no error.
- *
- * -EINVAL, input line number out of range
- */
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
@@ -709,8 +520,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
/**
* vidioc_s_input - set input line
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @i: new input line number
*
* the current active input line is set
@@ -721,8 +531,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
*/
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
int ret;
if (i > 1)
@@ -737,8 +546,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
/**
* vidioc_g_input - return input line
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @i: returned input line number
*
* the current active input line is returned
@@ -747,8 +555,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
*/
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
*i = vip->input;
return 0;
@@ -756,8 +563,6 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
/**
* vidioc_enum_fmt_vid_cap - return video capture format
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
* @f: returned format information
*
* returns name and format of video capture
@@ -780,8 +585,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
/**
* vidioc_try_fmt_vid_cap - set video capture format
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @f: new format
*
* new video format is set which includes width and
@@ -797,12 +601,13 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
int interlace_lim;
- if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat)
+ if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat) {
+ v4l2_warn(&vip->v4l2_dev, "Invalid format, only UYVY supported\n");
return -EINVAL;
+ }
if (V4L2_STD_525_60 & vip->std)
interlace_lim = 240;
@@ -810,6 +615,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
interlace_lim = 288;
switch (f->fmt.pix.field) {
+ default:
case V4L2_FIELD_ANY:
if (interlace_lim < f->fmt.pix.height)
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
@@ -823,10 +629,10 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
break;
case V4L2_FIELD_INTERLACED:
break;
- default:
- return -EINVAL;
}
+ /* It is the only supported format */
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
f->fmt.pix.height &= ~1;
if (2 * interlace_lim < f->fmt.pix.height)
f->fmt.pix.height = 2 * interlace_lim;
@@ -842,8 +648,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
/**
* vidioc_s_fmt_vid_cap - set current video format parameters
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @f: returned format information
*
* set new capture format
@@ -854,22 +659,63 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
+ unsigned int t_stop, b_stop, pitch;
int ret;
ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- memcpy(&vip->format, &f->fmt.pix, sizeof(struct v4l2_pix_format));
+ if (vb2_is_busy(&vip->vb_vidq)) {
+ /* Can't change format during acquisition */
+ v4l2_err(&vip->v4l2_dev, "device busy\n");
+ return -EBUSY;
+ }
+ vip->format = f->fmt.pix;
+ switch (vip->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ t_stop = ((vip->format.height / 2 - 1) << 16) |
+ (2 * vip->format.width - 1);
+ b_stop = t_stop;
+ pitch = 4 * vip->format.width;
+ break;
+ case V4L2_FIELD_TOP:
+ t_stop = ((vip->format.height - 1) << 16) |
+ (2 * vip->format.width - 1);
+ b_stop = (0 << 16) | (2 * vip->format.width - 1);
+ pitch = 2 * vip->format.width;
+ break;
+ case V4L2_FIELD_BOTTOM:
+ t_stop = (0 << 16) | (2 * vip->format.width - 1);
+ b_stop = (vip->format.height << 16) |
+ (2 * vip->format.width - 1);
+ pitch = 2 * vip->format.width;
+ break;
+ default:
+ v4l2_err(&vip->v4l2_dev, "unknown field format\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&vip->slock);
+ /* Y-X Top Field Offset */
+ reg_write(vip, DVP_TFO, 0);
+ /* Y-X Bottom Field Offset */
+ reg_write(vip, DVP_BFO, 0);
+ /* Y-X Top Field Stop*/
+ reg_write(vip, DVP_TFS, t_stop);
+ /* Y-X Bottom Field Stop */
+ reg_write(vip, DVP_BFS, b_stop);
+ /* Video Memory Pitch */
+ reg_write(vip, DVP_VMP, pitch);
+ spin_unlock_irq(&vip->slock);
+
return 0;
}
/**
* vidioc_g_fmt_vid_cap - get current video format parameters
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
+ * @file: descriptor of device
* @f: contains format information
*
* returns current video format parameters
@@ -879,150 +725,47 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- memcpy(&f->fmt.pix, &vip->format, sizeof(struct v4l2_pix_format));
- return 0;
-}
-
-/**
- * vidioc_reqfs - request buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_reqbufs(&vip->vb_vidq, p);
-}
-
-/**
- * vidioc_querybuf - query buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * query buffer state.
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
+ struct sta2x11_vip *vip = video_drvdata(file);
- return videobuf_querybuf(&vip->vb_vidq, p);
-}
+ f->fmt.pix = vip->format;
-/**
- * vidioc_qbuf - queue a buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_qbuf(&vip->vb_vidq, p);
-}
-
-/**
- * vidioc_dqbuf - dequeue a buffer
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @p: video buffer
- *
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_dqbuf(&vip->vb_vidq, p, file->f_flags & O_NONBLOCK);
-}
-
-/**
- * vidioc_streamon - turn on streaming
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @type: type of capture
- *
- * turn on streaming.
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_streamon(&vip->vb_vidq);
-}
-
-/**
- * vidioc_streamoff - turn off streaming
- * @file: descriptor of device ( not used)
- * @priv: points to current videodevice
- * @type: type of capture
- *
- * turn off streaming.
- * Handling is done in generic videobuf layer.
- */
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct video_device *dev = priv;
- struct sta2x11_vip *vip = video_get_drvdata(dev);
-
- return videobuf_streamoff(&vip->vb_vidq);
+ return 0;
}
-static const struct v4l2_file_operations vip_fops = {
- .owner = THIS_MODULE,
- .open = vip_open,
- .release = vip_close,
- .ioctl = video_ioctl2,
- .read = vip_read,
- .mmap = vip_mmap,
- .poll = vip_poll
-};
-
static const struct v4l2_ioctl_ops vip_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_s_std = vidioc_s_std,
+ /* FMT handling */
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ /* Buffer handlers */
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ /* Stream on/off */
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ /* Standard handling */
.vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
.vidioc_querystd = vidioc_querystd,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
+ /* Input handling */
.vidioc_enum_input = vidioc_enum_input,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_input = vidioc_s_input,
.vidioc_g_input = vidioc_g_input,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_s_input = vidioc_s_input,
+ /* Log status ioctl */
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ /* Event handling */
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device video_dev_template = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.release = video_device_release,
.fops = &vip_fops,
.ioctl_ops = &vip_ioctl_ops,
@@ -1036,9 +779,7 @@ static struct video_device video_dev_template = {
*
* check for both frame interrupts set ( top and bottom ).
* check FIFO overflow, but limit number of log messages after open.
- * signal a complete buffer if done.
- * dequeue a new buffer if available.
- * disable VIP if no buffer available.
+ * signal a complete buffer if done
*
* return value: IRQ_NONE, interrupt was not generated by VIP
*
@@ -1046,88 +787,122 @@ static struct video_device video_dev_template = {
*/
static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
{
- u32 status, dma;
- unsigned long flags;
- struct videobuf_buffer *vb;
+ unsigned int status;
- status = REG_READ(vip, DVP_ITS);
+ status = reg_read(vip, DVP_ITS);
- if (!status) {
- pr_debug("VIP: irq ignored\n");
+ if (!status) /* No interrupt to handle */
return IRQ_NONE;
- }
-
- if (!vip->started)
- return IRQ_HANDLED;
- if (status & DVP_IT_VSB)
- vip->bcount++;
-
- if (status & DVP_IT_VST)
- vip->tcount++;
+ if (status & DVP_IT_FIFO)
+ if (vip->overflow++ > 5)
+ pr_info("VIP: fifo overflow\n");
- if ((DVP_IT_VSB | DVP_IT_VST) == (status & (DVP_IT_VST | DVP_IT_VSB))) {
+ if ((status & DVP_IT_VST) && (status & DVP_IT_VSB)) {
/* this is bad, we are too slow, hope the condition is gone
* on the next frame */
- pr_info("VIP: both irqs\n");
return IRQ_HANDLED;
}
- if (status & DVP_IT_FIFO) {
- if (5 > vip->overflow++)
- pr_info("VIP: fifo overflow\n");
+ if (status & DVP_IT_VST)
+ if ((++vip->tcount) < 2)
+ return IRQ_HANDLED;
+ if (status & DVP_IT_VSB) {
+ vip->bcount++;
+ return IRQ_HANDLED;
}
- if (2 > vip->tcount)
- return IRQ_HANDLED;
+ if (vip->active) { /* Acquisition is over on this buffer */
+ /* Disable acquisition */
+ reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
+ /* Remove the active buffer from the list */
+ do_gettimeofday(&vip->active->vb.v4l2_buf.timestamp);
+ vip->active->vb.v4l2_buf.sequence = vip->sequence++;
+ vb2_buffer_done(&vip->active->vb, VB2_BUF_STATE_DONE);
+ }
- if (status & DVP_IT_VSB)
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
+}
- spin_lock_irqsave(&vip->slock, flags);
+static void sta2x11_vip_init_register(struct sta2x11_vip *vip)
+{
+ /* Register initialization */
+ spin_lock_irq(&vip->slock);
+ /* Clean interrupt */
+ reg_read(vip, DVP_ITS);
+ /* Enable Half Line per vertical */
+ reg_write(vip, DVP_HLFLN, DVP_HLFLN_SD);
+ /* Reset VIP control */
+ reg_write(vip, DVP_CTL, DVP_CTL_RST);
+ /* Clear VIP control */
+ reg_write(vip, DVP_CTL, 0);
+ spin_unlock_irq(&vip->slock);
+}
+static void sta2x11_vip_clear_register(struct sta2x11_vip *vip)
+{
+ spin_lock_irq(&vip->slock);
+ /* Disable interrupt */
+ reg_write(vip, DVP_ITM, 0);
+ /* Reset VIP Control */
+ reg_write(vip, DVP_CTL, DVP_CTL_RST);
+ /* Clear VIP Control */
+ reg_write(vip, DVP_CTL, 0);
+ /* Clean VIP Interrupt */
+ reg_read(vip, DVP_ITS);
+ spin_unlock_irq(&vip->slock);
+}
+static int sta2x11_vip_init_buffer(struct sta2x11_vip *vip)
+{
+ int err;
- REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) & ~DVP_CTL_ENA);
- if (vip->active) {
- do_gettimeofday(&vip->active->ts);
- vip->active->field_count++;
- vip->active->state = VIDEOBUF_DONE;
- wake_up(&vip->active->done);
- vip->active = NULL;
+ err = dma_set_coherent_mask(&vip->pdev->dev, DMA_BIT_MASK(29));
+ if (err) {
+ v4l2_err(&vip->v4l2_dev, "Cannot configure coherent mask");
+ return err;
}
- if (!vip->closing) {
- if (list_empty(&vip->capture))
- goto done;
-
- vb = list_first_entry(&vip->capture, struct videobuf_buffer,
- queue);
- if (NULL == vb) {
- pr_info("VIP: no buffer\n");
- goto done;
- }
- vb->state = VIDEOBUF_ACTIVE;
- list_del(&vb->queue);
- vip->active = vb;
- dma = videobuf_to_dma_contig(vb);
- switch (vip->format.field) {
- case V4L2_FIELD_INTERLACED:
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
- break;
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- REG_WRITE(vip, DVP_VTP, dma);
- REG_WRITE(vip, DVP_VBP, dma);
- break;
- default:
- pr_warning("VIP: unknown field format\n");
- goto done;
- break;
- }
- REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) | DVP_CTL_ENA);
+ memset(&vip->vb_vidq, 0, sizeof(struct vb2_queue));
+ vip->vb_vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vip->vb_vidq.io_modes = VB2_MMAP | VB2_READ;
+ vip->vb_vidq.drv_priv = vip;
+ vip->vb_vidq.buf_struct_size = sizeof(struct vip_buffer);
+ vip->vb_vidq.ops = &vip_video_qops;
+ vip->vb_vidq.mem_ops = &vb2_dma_contig_memops;
+ err = vb2_queue_init(&vip->vb_vidq);
+ if (err)
+ return err;
+ INIT_LIST_HEAD(&vip->buffer_list);
+ spin_lock_init(&vip->lock);
+
+
+ vip->alloc_ctx = vb2_dma_contig_init_ctx(&vip->pdev->dev);
+ if (IS_ERR(vip->alloc_ctx)) {
+ v4l2_err(&vip->v4l2_dev, "Can't allocate buffer context");
+ return PTR_ERR(vip->alloc_ctx);
}
-done:
- spin_unlock_irqrestore(&vip->slock, flags);
- return IRQ_HANDLED;
+
+ return 0;
+}
+static void sta2x11_vip_release_buffer(struct sta2x11_vip *vip)
+{
+ vb2_dma_contig_cleanup_ctx(vip->alloc_ctx);
+}
+static int sta2x11_vip_init_controls(struct sta2x11_vip *vip)
+{
+ /*
+ * Inititialize an empty control so VIP can inerithing controls
+ * from ADV7180
+ */
+ v4l2_ctrl_handler_init(&vip->ctrl_hdl, 0);
+
+ vip->v4l2_dev.ctrl_handler = &vip->ctrl_hdl;
+ if (vip->ctrl_hdl.error) {
+ int err = vip->ctrl_hdl.error;
+
+ v4l2_ctrl_handler_free(&vip->ctrl_hdl);
+ return err;
+ }
+
+ return 0;
}
/**
@@ -1212,10 +987,17 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
struct sta2x11_vip *vip;
struct vip_config *config;
+ /* Check if hardware support 26-bit DMA */
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(26))) {
+ dev_err(&pdev->dev, "26-bit DMA addressing not available\n");
+ return -EINVAL;
+ }
+ /* Enable PCI */
ret = pci_enable_device(pdev);
if (ret)
return ret;
+ /* Get VIP platform data */
config = dev_get_platdata(&pdev->dev);
if (!config) {
dev_info(&pdev->dev, "VIP slot disabled\n");
@@ -1223,6 +1005,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
goto disable;
}
+ /* Power configuration */
ret = vip_gpio_reserve(&pdev->dev, config->pwr_pin, 0,
config->pwr_name);
if (ret)
@@ -1237,7 +1020,6 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
goto disable;
}
}
-
if (config->pwr_pin != -1) {
/* Datasheet says 5ms between PWR and RST */
usleep_range(5000, 25000);
@@ -1251,17 +1033,20 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
}
usleep_range(5000, 25000);
+ /* Allocate a new VIP instance */
vip = kzalloc(sizeof(struct sta2x11_vip), GFP_KERNEL);
if (!vip) {
ret = -ENOMEM;
goto release_gpios;
}
-
vip->pdev = pdev;
vip->std = V4L2_STD_PAL;
vip->format = formats_50[0];
vip->config = config;
+ ret = sta2x11_vip_init_controls(vip);
+ if (ret)
+ goto free_mem;
if (v4l2_device_register(&pdev->dev, &vip->v4l2_dev))
goto free_mem;
@@ -1271,46 +1056,52 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
if (ret)
goto unreg;
vip->iomem = pci_iomap(pdev, 0, 0x100);
if (!vip->iomem) {
- ret = -ENOMEM; /* FIXME */
+ ret = -ENOMEM;
goto release;
}
pci_enable_msi(pdev);
- INIT_LIST_HEAD(&vip->capture);
+ /* Initialize buffer */
+ ret = sta2x11_vip_init_buffer(vip);
+ if (ret)
+ goto unmap;
+
spin_lock_init(&vip->slock);
- mutex_init(&vip->mutex);
- vip->started = 0;
- vip->disabled = 0;
ret = request_irq(pdev->irq,
(irq_handler_t) vip_irq,
- IRQF_SHARED, DRV_NAME, vip);
+ IRQF_SHARED, KBUILD_MODNAME, vip);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
ret = -ENODEV;
- goto unmap;
+ goto release_buf;
}
+ /* Alloc, initialize and register video device */
vip->video_dev = video_device_alloc();
if (!vip->video_dev) {
ret = -ENOMEM;
goto release_irq;
}
- *(vip->video_dev) = video_dev_template;
+ vip->video_dev = &video_dev_template;
+ vip->video_dev->v4l2_dev = &vip->v4l2_dev;
+ vip->video_dev->queue = &vip->vb_vidq;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vip->video_dev->flags);
video_set_drvdata(vip->video_dev, vip);
ret = video_register_device(vip->video_dev, VFL_TYPE_GRABBER, -1);
if (ret)
goto vrelease;
+ /* Get ADV7180 subdevice */
vip->adapter = i2c_get_adapter(vip->config->i2c_id);
if (!vip->adapter) {
ret = -ENODEV;
@@ -1328,10 +1119,11 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
}
i2c_put_adapter(vip->adapter);
-
v4l2_subdev_call(vip->decoder, core, init, 0);
- pr_info("STA2X11 Video Input Port (VIP) loaded\n");
+ sta2x11_vip_init_register(vip);
+
+ dev_info(&pdev->dev, "STA2X11 Video Input Port (VIP) loaded\n");
return 0;
vunreg:
@@ -1343,10 +1135,12 @@ vrelease:
video_device_release(vip->video_dev);
release_irq:
free_irq(pdev->irq, vip);
+release_buf:
+ sta2x11_vip_release_buffer(vip);
pci_disable_msi(pdev);
unmap:
+ vb2_queue_release(&vip->vb_vidq);
pci_iounmap(pdev, vip->iomem);
- mutex_destroy(&vip->mutex);
release:
pci_release_regions(pdev);
unreg:
@@ -1382,16 +1176,18 @@ static void sta2x11_vip_remove_one(struct pci_dev *pdev)
struct sta2x11_vip *vip =
container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+ sta2x11_vip_clear_register(vip);
+
video_set_drvdata(vip->video_dev, NULL);
video_unregister_device(vip->video_dev);
/*do not call video_device_release() here, is already done */
free_irq(pdev->irq, vip);
pci_disable_msi(pdev);
+ vb2_queue_release(&vip->vb_vidq);
pci_iounmap(pdev, vip->iomem);
pci_release_regions(pdev);
v4l2_device_unregister(&vip->v4l2_dev);
- mutex_destroy(&vip->mutex);
vip_gpio_release(&pdev->dev, vip->config->pwr_pin,
vip->config->pwr_name);
@@ -1416,9 +1212,6 @@ static void sta2x11_vip_remove_one(struct pci_dev *pdev)
*
* return value: 0 always indicate success,
* even if device could not be disabled. (workaround for hardware problem)
- *
- * reurn value : 0, always succesful, even if hardware does not not support
- * power down mode.
*/
static int sta2x11_vip_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1429,15 +1222,15 @@ static int sta2x11_vip_suspend(struct pci_dev *pdev, pm_message_t state)
int i;
spin_lock_irqsave(&vip->slock, flags);
- vip->register_save_area[0] = REG_READ(vip, DVP_CTL);
- REG_WRITE(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
- vip->register_save_area[SAVE_COUNT] = REG_READ(vip, DVP_ITM);
- REG_WRITE(vip, DVP_ITM, 0);
+ vip->register_save_area[0] = reg_read(vip, DVP_CTL);
+ reg_write(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
+ vip->register_save_area[SAVE_COUNT] = reg_read(vip, DVP_ITM);
+ reg_write(vip, DVP_ITM, 0);
for (i = 1; i < SAVE_COUNT; i++)
- vip->register_save_area[i] = REG_READ(vip, 4 * i);
+ vip->register_save_area[i] = reg_read(vip, 4 * i);
for (i = 0; i < AUX_COUNT; i++)
vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i] =
- REG_READ(vip, registers_to_save[i]);
+ reg_read(vip, registers_to_save[i]);
spin_unlock_irqrestore(&vip->slock, flags);
/* save pci state */
pci_save_state(pdev);
@@ -1477,7 +1270,7 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
if (vip->disabled) {
ret = pci_enable_device(pdev);
if (ret) {
- pr_warning("VIP: Can't enable device.\n");
+ pr_warn("VIP: Can't enable device.\n");
return ret;
}
vip->disabled = 0;
@@ -1488,7 +1281,7 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
* do not call pci_disable_device on sta2x11 because it
* break all other Bus masters on this EP
*/
- pr_warning("VIP: Can't enable device.\n");
+ pr_warn("VIP: Can't enable device.\n");
vip->disabled = 1;
return ret;
}
@@ -1497,12 +1290,12 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
spin_lock_irqsave(&vip->slock, flags);
for (i = 1; i < SAVE_COUNT; i++)
- REG_WRITE(vip, 4 * i, vip->register_save_area[i]);
+ reg_write(vip, 4 * i, vip->register_save_area[i]);
for (i = 0; i < AUX_COUNT; i++)
- REG_WRITE(vip, registers_to_save[i],
+ reg_write(vip, registers_to_save[i],
vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i]);
- REG_WRITE(vip, DVP_CTL, vip->register_save_area[0]);
- REG_WRITE(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
+ reg_write(vip, DVP_CTL, vip->register_save_area[0]);
+ reg_write(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
spin_unlock_irqrestore(&vip->slock, flags);
return 0;
}
@@ -1515,7 +1308,7 @@ static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
};
static struct pci_driver sta2x11_vip_driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.probe = sta2x11_vip_init_one,
.remove = sta2x11_vip_remove_one,
.id_table = sta2x11_vip_pci_tbl,
diff --git a/drivers/media/pci/ttpci/Kconfig b/drivers/media/pci/ttpci/Kconfig
index 314e417addae..0dcb8cd77676 100644
--- a/drivers/media/pci/ttpci/Kconfig
+++ b/drivers/media/pci/ttpci/Kconfig
@@ -1,8 +1,3 @@
-config TTPCI_EEPROM
- tristate
- depends on I2C
- default n
-
config DVB_AV7110
tristate "AV7110 cards"
depends on DVB_CORE && PCI && I2C
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 4656d4a10af0..3dc7aa9b6f40 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -235,7 +235,7 @@ static void recover_arm(struct av7110 *av7110)
restart_feeds(av7110);
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_check_ir_config(av7110, true);
#endif
}
@@ -268,7 +268,7 @@ static int arm_thread(void *data)
if (!av7110->arm_ready)
continue;
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_check_ir_config(av7110, false);
#endif
@@ -1730,7 +1730,7 @@ static int alps_tdlb7_tuner_set_params(struct dvb_frontend *fe)
static int alps_tdlb7_request_firmware(struct dvb_frontend* fe, const struct firmware **fw, char* name)
{
-#if defined(CONFIG_DVB_SP8870) || defined(CONFIG_DVB_SP8870_MODULE)
+#if IS_ENABLED(CONFIG_DVB_SP8870)
struct av7110* av7110 = fe->dvb->priv;
return request_firmware(fw, name, &av7110->dev->pci->dev);
@@ -2723,7 +2723,9 @@ static int av7110_attach(struct saa7146_dev* dev,
if (ret < 0)
goto err_av7110_exit_v4l_12;
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+ mutex_init(&av7110->ioctl_mutex);
+
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_ir_init(av7110);
#endif
printk(KERN_INFO "dvb-ttpci: found av7110-%d.\n", av7110_num);
@@ -2766,7 +2768,7 @@ static int av7110_detach(struct saa7146_dev* saa)
struct av7110 *av7110 = saa->ext_priv;
dprintk(4, "%p\n", av7110);
-#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
+#if IS_ENABLED(CONFIG_INPUT_EVDEV)
av7110_ir_exit(av7110);
#endif
if (budgetpatch || av7110->full_ts) {
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index a378662b1dcf..ef3d9606b269 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -271,6 +271,8 @@ struct av7110 {
struct dvb_frontend* fe;
fe_status_t fe_status;
+ struct mutex ioctl_mutex;
+
/* crash recovery */
void (*recover)(struct av7110* av7110);
fe_sec_voltage_t saved_voltage;
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 952b33dbac4f..301029ca4535 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -1109,6 +1109,9 @@ static int dvb_video_ioctl(struct file *file,
}
}
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case VIDEO_STOP:
av7110->videostate.play_state = VIDEO_STOPPED;
@@ -1297,6 +1300,7 @@ static int dvb_video_ioctl(struct file *file,
break;
}
+ mutex_unlock(&av7110->ioctl_mutex);
return ret;
}
@@ -1314,6 +1318,9 @@ static int dvb_audio_ioctl(struct file *file,
(cmd != AUDIO_GET_STATUS))
return -EPERM;
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case AUDIO_STOP:
if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
@@ -1442,6 +1449,7 @@ static int dvb_audio_ioctl(struct file *file,
ret = -ENOIOCTLCMD;
}
+ mutex_unlock(&av7110->ioctl_mutex);
return ret;
}
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index 9fc1dd0ba4c3..a6079b90252a 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -253,12 +253,17 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
unsigned long arg = (unsigned long) parg;
+ int ret = 0;
dprintk(8, "av7110:%p\n",av7110);
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
switch (cmd) {
case CA_RESET:
- return ci_ll_reset(&av7110->ci_wbuffer, file, arg, &av7110->ci_slot[0]);
+ ret = ci_ll_reset(&av7110->ci_wbuffer, file, arg,
+ &av7110->ci_slot[0]);
break;
case CA_GET_CAP:
{
@@ -277,8 +282,10 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
ca_slot_info_t *info=(ca_slot_info_t *)parg;
- if (info->num < 0 || info->num > 1)
+ if (info->num < 0 || info->num > 1) {
+ mutex_unlock(&av7110->ioctl_mutex);
return -EINVAL;
+ }
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
CA_CI_LINK : CA_CI;
@@ -306,10 +313,10 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
ca_descr_t *descr = (ca_descr_t*) parg;
- if (descr->index >= 16)
- return -EINVAL;
- if (descr->parity > 1)
+ if (descr->index >= 16 || descr->parity > 1) {
+ mutex_unlock(&av7110->ioctl_mutex);
return -EINVAL;
+ }
av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetDescr, 5,
(descr->index<<8)|descr->parity,
(descr->cw[0]<<8)|descr->cw[1],
@@ -320,9 +327,12 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
}
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+
+ mutex_unlock(&av7110->ioctl_mutex);
+ return ret;
}
static ssize_t dvb_ca_write(struct file *file, const char __user *buf,
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index a90a3b9b09bf..bb53d2488ad0 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -708,8 +708,7 @@ static const struct i2c_algo_bit_data zoran_i2c_bit_data_template = {
static int
zoran_register_i2c (struct zoran *zr)
{
- memcpy(&zr->i2c_algo, &zoran_i2c_bit_data_template,
- sizeof(struct i2c_algo_bit_data));
+ zr->i2c_algo = zoran_i2c_bit_data_template;
zr->i2c_algo.data = zr;
strlcpy(zr->i2c_adapter.name, ZR_DEVNAME(zr),
sizeof(zr->i2c_adapter.name));
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index a4cd504b8eee..519164c572c8 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -1169,7 +1169,7 @@ zoran_reap_stat_com (struct zoran *zr)
}
frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME];
buffer = &zr->jpg_buffers.buffer[frame];
- do_gettimeofday(&buffer->bs.timestamp);
+ v4l2_get_timestamp(&buffer->bs.timestamp);
if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
buffer->bs.length = (stat_com & 0x7fffff) >> 1;
@@ -1407,7 +1407,7 @@ zoran_irq (int irq,
zr->v4l_buffers.buffer[zr->v4l_grab_frame].state = BUZ_STATE_DONE;
zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.seq = zr->v4l_grab_seq;
- do_gettimeofday(&zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.timestamp);
+ v4l2_get_timestamp(&zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.timestamp);
zr->v4l_grab_frame = NO_GRAB_ACTIVE;
zr->v4l_pend_tail++;
}
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index e60ae41e2319..2e8f518f298f 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -1334,7 +1334,7 @@ static int zoran_v4l2_buffer_status(struct zoran_fh *fh,
struct zoran *zr = fh->zr;
unsigned long flags;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
diff --git a/drivers/media/pci/zoran/zoran_procfs.c b/drivers/media/pci/zoran/zoran_procfs.c
index f1423b777db1..e084b0a21b1b 100644
--- a/drivers/media/pci/zoran/zoran_procfs.c
+++ b/drivers/media/pci/zoran/zoran_procfs.c
@@ -137,7 +137,7 @@ static int zoran_open(struct inode *inode, struct file *file)
static ssize_t zoran_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- struct zoran *zr = PDE(file->f_path.dentry->d_inode)->data;
+ struct zoran *zr = PDE(file_inode(file))->data;
char *string, *sp;
char *line, *ldelim, *varname, *svar, *tdelim;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 33241120a58c..05d7b6333461 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -92,7 +92,7 @@ config VIDEO_M32R_AR_M64278
config VIDEO_OMAP2
tristate "OMAP2 Camera Capture Interface driver"
- depends on VIDEO_DEV && ARCH_OMAP2
+ depends on VIDEO_DEV && ARCH_OMAP2 && VIDEO_V4L2_INT_DEVICE
select VIDEOBUF_DMA_SG
---help---
This is a v4l2 driver for the TI OMAP2 camera capture interface
@@ -202,6 +202,15 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
help
This is a v4l2 driver for Samsung EXYNOS5 SoC G-Scaler.
+config VIDEO_SH_VEU
+ tristate "SuperH VEU mem2mem video processing driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Video Engine Unit (VEU) on SuperH and
+ SH-Mobile SoCs.
+
endif # V4L_MEM2MEM_DRIVERS
menuconfig V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 4817d2802171..42089ba3600f 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -25,6 +25,8 @@ obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
+obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
+
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
diff --git a/drivers/media/platform/blackfin/Kconfig b/drivers/media/platform/blackfin/Kconfig
index ecd5323768b7..cc239972fa2c 100644
--- a/drivers/media/platform/blackfin/Kconfig
+++ b/drivers/media/platform/blackfin/Kconfig
@@ -7,4 +7,9 @@ config VIDEO_BLACKFIN_CAPTURE
Choose PPI or EPPI as its interface.
To compile this driver as a module, choose M here: the
- module will be called bfin_video_capture.
+ module will be called bfin_capture.
+
+config VIDEO_BLACKFIN_PPI
+ tristate
+ depends on VIDEO_BLACKFIN_CAPTURE
+ default VIDEO_BLACKFIN_CAPTURE
diff --git a/drivers/media/platform/blackfin/Makefile b/drivers/media/platform/blackfin/Makefile
index aa3a0a216387..30421bc23080 100644
--- a/drivers/media/platform/blackfin/Makefile
+++ b/drivers/media/platform/blackfin/Makefile
@@ -1,2 +1,2 @@
-bfin_video_capture-objs := bfin_capture.o ppi.o
-obj-$(CONFIG_VIDEO_BLACKFIN_CAPTURE) += bfin_video_capture.o
+obj-$(CONFIG_VIDEO_BLACKFIN_CAPTURE) += bfin_capture.o
+obj-$(CONFIG_VIDEO_BLACKFIN_PPI) += ppi.o
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 1aad2a65d2f3..5f209d5810dc 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -52,6 +52,7 @@ struct bcap_format {
u32 pixelformat;
enum v4l2_mbus_pixelcode mbus_code;
int bpp; /* bits per pixel */
+ int dlen; /* data length for ppi in bits */
};
struct bcap_buffer {
@@ -76,18 +77,20 @@ struct bcap_device {
unsigned int cur_input;
/* current selected standard */
v4l2_std_id std;
+ /* current selected dv_timings */
+ struct v4l2_dv_timings dv_timings;
/* used to store pixel format */
struct v4l2_pix_format fmt;
/* bits per pixel*/
int bpp;
+ /* data length for ppi in bits */
+ int dlen;
/* used to store sensor supported format */
struct bcap_format *sensor_formats;
/* number of sensor formats array */
int num_sensor_formats;
/* pointing to current video buffer */
struct bcap_buffer *cur_frm;
- /* pointing to next video buffer */
- struct bcap_buffer *next_frm;
/* buffer queue used in videobuf2 */
struct vb2_queue buffer_queue;
/* allocator-specific contexts for each plane */
@@ -116,24 +119,35 @@ static const struct bcap_format bcap_formats[] = {
.pixelformat = V4L2_PIX_FMT_UYVY,
.mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
.bpp = 16,
+ .dlen = 8,
},
{
.desc = "YCbCr 4:2:2 Interleaved YUYV",
.pixelformat = V4L2_PIX_FMT_YUYV,
.mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
.bpp = 16,
+ .dlen = 8,
+ },
+ {
+ .desc = "YCbCr 4:2:2 Interleaved UYVY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .bpp = 16,
+ .dlen = 16,
},
{
.desc = "RGB 565",
.pixelformat = V4L2_PIX_FMT_RGB565,
.mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
.bpp = 16,
+ .dlen = 8,
},
{
.desc = "RGB 444",
.pixelformat = V4L2_PIX_FMT_RGB444,
.mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
.bpp = 16,
+ .dlen = 8,
},
};
@@ -366,9 +380,39 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
params.width = bcap_dev->fmt.width;
params.height = bcap_dev->fmt.height;
params.bpp = bcap_dev->bpp;
+ params.dlen = bcap_dev->dlen;
params.ppi_control = bcap_dev->cfg->ppi_control;
params.int_mask = bcap_dev->cfg->int_mask;
- params.blank_clocks = bcap_dev->cfg->blank_clocks;
+ if (bcap_dev->cfg->inputs[bcap_dev->cur_input].capabilities
+ & V4L2_IN_CAP_CUSTOM_TIMINGS) {
+ struct v4l2_bt_timings *bt = &bcap_dev->dv_timings.bt;
+
+ params.hdelay = bt->hsync + bt->hbackporch;
+ params.vdelay = bt->vsync + bt->vbackporch;
+ params.line = bt->hfrontporch + bt->hsync
+ + bt->hbackporch + bt->width;
+ params.frame = bt->vfrontporch + bt->vsync
+ + bt->vbackporch + bt->height;
+ if (bt->interlaced)
+ params.frame += bt->il_vfrontporch + bt->il_vsync
+ + bt->il_vbackporch;
+ } else if (bcap_dev->cfg->inputs[bcap_dev->cur_input].capabilities
+ & V4L2_IN_CAP_STD) {
+ params.hdelay = 0;
+ params.vdelay = 0;
+ if (bcap_dev->std & V4L2_STD_525_60) {
+ params.line = 858;
+ params.frame = 525;
+ } else {
+ params.line = 864;
+ params.frame = 625;
+ }
+ } else {
+ params.hdelay = 0;
+ params.vdelay = 0;
+ params.line = params.width + bcap_dev->cfg->blank_pixels;
+ params.frame = params.height;
+ }
ret = ppi->ops->set_params(ppi, &params);
if (ret < 0) {
v4l2_err(&bcap_dev->v4l2_dev,
@@ -409,10 +453,10 @@ static int bcap_stop_streaming(struct vb2_queue *vq)
/* release all active buffers */
while (!list_empty(&bcap_dev->dma_queue)) {
- bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
struct bcap_buffer, list);
- list_del(&bcap_dev->next_frm->list);
- vb2_buffer_done(&bcap_dev->next_frm->vb, VB2_BUF_STATE_ERROR);
+ list_del(&bcap_dev->cur_frm->list);
+ vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
}
return 0;
}
@@ -484,17 +528,26 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
{
struct ppi_if *ppi = dev_id;
struct bcap_device *bcap_dev = ppi->priv;
- struct timeval timevalue;
struct vb2_buffer *vb = &bcap_dev->cur_frm->vb;
dma_addr_t addr;
spin_lock(&bcap_dev->lock);
- if (bcap_dev->cur_frm != bcap_dev->next_frm) {
- do_gettimeofday(&timevalue);
- vb->v4l2_buf.timestamp = timevalue;
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
- bcap_dev->cur_frm = bcap_dev->next_frm;
+ if (!list_empty(&bcap_dev->dma_queue)) {
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+ if (ppi->err) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ ppi->err = false;
+ } else {
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
+ struct bcap_buffer, list);
+ list_del(&bcap_dev->cur_frm->list);
+ } else {
+ /* clear error flag, we will get a new frame */
+ if (ppi->err)
+ ppi->err = false;
}
ppi->ops->stop(ppi);
@@ -502,13 +555,8 @@ static irqreturn_t bcap_isr(int irq, void *dev_id)
if (bcap_dev->stop) {
complete(&bcap_dev->comp);
} else {
- if (!list_empty(&bcap_dev->dma_queue)) {
- bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
- struct bcap_buffer, list);
- list_del(&bcap_dev->next_frm->list);
- addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->next_frm->vb, 0);
- ppi->ops->update_addr(ppi, (unsigned long)addr);
- }
+ addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+ ppi->ops->update_addr(ppi, (unsigned long)addr);
ppi->ops->start(ppi);
}
@@ -542,9 +590,8 @@ static int bcap_streamon(struct file *file, void *priv,
}
/* get the next frame from the dma queue */
- bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
+ bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
struct bcap_buffer, list);
- bcap_dev->cur_frm = bcap_dev->next_frm;
/* remove buffer from the dma queue */
list_del(&bcap_dev->cur_frm->list);
addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
@@ -602,6 +649,37 @@ static int bcap_s_std(struct file *file, void *priv, v4l2_std_id *std)
return 0;
}
+static int bcap_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ int ret;
+
+ ret = v4l2_subdev_call(bcap_dev->sd, video,
+ g_dv_timings, timings);
+ if (ret < 0)
+ return ret;
+
+ bcap_dev->dv_timings = *timings;
+ return 0;
+}
+
+static int bcap_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct bcap_device *bcap_dev = video_drvdata(file);
+ int ret;
+ if (vb2_is_busy(&bcap_dev->buffer_queue))
+ return -EBUSY;
+
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_dv_timings, timings);
+ if (ret < 0)
+ return ret;
+
+ bcap_dev->dv_timings = *timings;
+ return 0;
+}
+
static int bcap_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
@@ -650,13 +728,15 @@ static int bcap_s_input(struct file *file, void *priv, unsigned int index)
return ret;
}
bcap_dev->cur_input = index;
+ /* if this route has specific config, update ppi control */
+ if (route->ppi_control)
+ config->ppi_control = route->ppi_control;
return 0;
}
static int bcap_try_format(struct bcap_device *bcap,
struct v4l2_pix_format *pixfmt,
- enum v4l2_mbus_pixelcode *mbus_code,
- int *bpp)
+ struct bcap_format *bcap_fmt)
{
struct bcap_format *sf = bcap->sensor_formats;
struct bcap_format *fmt = NULL;
@@ -671,16 +751,20 @@ static int bcap_try_format(struct bcap_device *bcap,
if (i == bcap->num_sensor_formats)
fmt = &sf[0];
- if (mbus_code)
- *mbus_code = fmt->mbus_code;
- if (bpp)
- *bpp = fmt->bpp;
v4l2_fill_mbus_format(&mbus_fmt, pixfmt, fmt->mbus_code);
ret = v4l2_subdev_call(bcap->sd, video,
try_mbus_fmt, &mbus_fmt);
if (ret < 0)
return ret;
v4l2_fill_pix_format(pixfmt, &mbus_fmt);
+ if (bcap_fmt) {
+ for (i = 0; i < bcap->num_sensor_formats; i++) {
+ fmt = &sf[i];
+ if (mbus_fmt.code == fmt->mbus_code)
+ break;
+ }
+ *bcap_fmt = *fmt;
+ }
pixfmt->bytesperline = pixfmt->width * fmt->bpp / 8;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
return 0;
@@ -709,7 +793,7 @@ static int bcap_try_fmt_vid_cap(struct file *file, void *priv,
struct bcap_device *bcap_dev = video_drvdata(file);
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
- return bcap_try_format(bcap_dev, pixfmt, NULL, NULL);
+ return bcap_try_format(bcap_dev, pixfmt, NULL);
}
static int bcap_g_fmt_vid_cap(struct file *file, void *priv,
@@ -726,24 +810,25 @@ static int bcap_s_fmt_vid_cap(struct file *file, void *priv,
{
struct bcap_device *bcap_dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
- enum v4l2_mbus_pixelcode mbus_code;
+ struct bcap_format bcap_fmt;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
- int ret, bpp;
+ int ret;
if (vb2_is_busy(&bcap_dev->buffer_queue))
return -EBUSY;
/* see if format works */
- ret = bcap_try_format(bcap_dev, pixfmt, &mbus_code, &bpp);
+ ret = bcap_try_format(bcap_dev, pixfmt, &bcap_fmt);
if (ret < 0)
return ret;
- v4l2_fill_mbus_format(&mbus_fmt, pixfmt, mbus_code);
+ v4l2_fill_mbus_format(&mbus_fmt, pixfmt, bcap_fmt.mbus_code);
ret = v4l2_subdev_call(bcap_dev->sd, video, s_mbus_fmt, &mbus_fmt);
if (ret < 0)
return ret;
bcap_dev->fmt = *pixfmt;
- bcap_dev->bpp = bpp;
+ bcap_dev->bpp = bcap_fmt.bpp;
+ bcap_dev->dlen = bcap_fmt.dlen;
return 0;
}
@@ -834,6 +919,8 @@ static const struct v4l2_ioctl_ops bcap_ioctl_ops = {
.vidioc_querystd = bcap_querystd,
.vidioc_s_std = bcap_s_std,
.vidioc_g_std = bcap_g_std,
+ .vidioc_s_dv_timings = bcap_s_dv_timings,
+ .vidioc_g_dv_timings = bcap_g_dv_timings,
.vidioc_reqbufs = bcap_reqbufs,
.vidioc_querybuf = bcap_querybuf,
.vidioc_qbuf = bcap_qbuf,
@@ -869,6 +956,7 @@ static int bcap_probe(struct platform_device *pdev)
struct i2c_adapter *i2c_adap;
struct bfin_capture_config *config;
struct vb2_queue *q;
+ struct bcap_route *route;
int ret;
config = pdev->dev.platform_data;
@@ -978,6 +1066,12 @@ static int bcap_probe(struct platform_device *pdev)
NULL);
if (bcap_dev->sd) {
int i;
+ if (!config->num_inputs) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to work without input\n");
+ goto err_unreg_vdev;
+ }
+
/* update tvnorms from the sub devices */
for (i = 0; i < config->num_inputs; i++)
vfd->tvnorms |= config->inputs[i].std;
@@ -989,8 +1083,24 @@ static int bcap_probe(struct platform_device *pdev)
v4l2_info(&bcap_dev->v4l2_dev, "v4l2 sub device registered\n");
+ /*
+ * explicitly set input, otherwise some boards
+ * may not work at the state as we expected
+ */
+ route = &config->routes[0];
+ ret = v4l2_subdev_call(bcap_dev->sd, video, s_routing,
+ route->input, route->output, 0);
+ if ((ret < 0) && (ret != -ENOIOCTLCMD)) {
+ v4l2_err(&bcap_dev->v4l2_dev, "Failed to set input\n");
+ goto err_unreg_vdev;
+ }
+ bcap_dev->cur_input = 0;
+ /* if this route has specific config, update ppi control */
+ if (route->ppi_control)
+ config->ppi_control = route->ppi_control;
+
/* now we can probe the default state */
- if (vfd->tvnorms) {
+ if (config->inputs[0].capabilities & V4L2_IN_CAP_STD) {
v4l2_std_id std;
ret = v4l2_subdev_call(bcap_dev->sd, core, g_std, &std);
if (ret) {
@@ -1000,6 +1110,17 @@ static int bcap_probe(struct platform_device *pdev)
}
bcap_dev->std = std;
}
+ if (config->inputs[0].capabilities & V4L2_IN_CAP_CUSTOM_TIMINGS) {
+ struct v4l2_dv_timings dv_timings;
+ ret = v4l2_subdev_call(bcap_dev->sd, video,
+ g_dv_timings, &dv_timings);
+ if (ret) {
+ v4l2_err(&bcap_dev->v4l2_dev,
+ "Unable to get dv timings\n");
+ goto err_unreg_vdev;
+ }
+ bcap_dev->dv_timings = dv_timings;
+ }
ret = bcap_init_sensor_formats(bcap_dev);
if (ret) {
v4l2_err(&bcap_dev->v4l2_dev,
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index d29592186b02..01b5b501347e 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/module.h>
#include <linux/slab.h>
#include <asm/bfin_ppi.h>
@@ -58,15 +59,33 @@ static irqreturn_t ppi_irq_err(int irq, void *dev_id)
* others are W1C
*/
status = bfin_read16(&reg->status);
+ if (status & 0x3000)
+ ppi->err = true;
bfin_write16(&reg->status, 0xff00);
break;
}
case PPI_TYPE_EPPI:
{
struct bfin_eppi_regs *reg = info->base;
+ unsigned short status;
+
+ status = bfin_read16(&reg->status);
+ if (status & 0x2)
+ ppi->err = true;
bfin_write16(&reg->status, 0xffff);
break;
}
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ unsigned long stat;
+
+ stat = bfin_read32(&reg->stat);
+ if (stat & 0x2)
+ ppi->err = true;
+ bfin_write32(&reg->stat, 0xc0ff);
+ break;
+ }
default:
break;
}
@@ -128,6 +147,12 @@ static int ppi_start(struct ppi_if *ppi)
bfin_write32(&reg->control, ppi->ppi_control);
break;
}
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ break;
+ }
default:
return -EINVAL;
}
@@ -155,6 +180,12 @@ static int ppi_stop(struct ppi_if *ppi)
bfin_write32(&reg->control, ppi->ppi_control);
break;
}
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ break;
+ }
default:
return -EINVAL;
}
@@ -171,17 +202,23 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
{
const struct ppi_info *info = ppi->info;
int dma32 = 0;
- int dma_config, bytes_per_line, lines_per_frame;
+ int dma_config, bytes_per_line;
+ int hcount, hdelay, samples_per_line;
bytes_per_line = params->width * params->bpp / 8;
- lines_per_frame = params->height;
+ /* convert parameters unit from pixels to samples */
+ hcount = params->width * params->bpp / params->dlen;
+ hdelay = params->hdelay * params->bpp / params->dlen;
+ samples_per_line = params->line * params->bpp / params->dlen;
if (params->int_mask == 0xFFFFFFFF)
ppi->err_int = false;
else
ppi->err_int = true;
- dma_config = (DMA_FLOW_STOP | WNR | RESTART | DMA2D | DI_EN);
+ dma_config = (DMA_FLOW_STOP | RESTART | DMA2D | DI_EN_Y);
ppi->ppi_control = params->ppi_control & ~PORT_EN;
+ if (!(ppi->ppi_control & PORT_DIR))
+ dma_config |= WNR;
switch (info->type) {
case PPI_TYPE_PPI:
{
@@ -191,8 +228,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
dma32 = 1;
bfin_write16(&reg->control, ppi->ppi_control);
- bfin_write16(&reg->count, bytes_per_line - 1);
- bfin_write16(&reg->frame, lines_per_frame);
+ bfin_write16(&reg->count, samples_per_line - 1);
+ bfin_write16(&reg->frame, params->frame);
break;
}
case PPI_TYPE_EPPI:
@@ -204,12 +241,31 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
dma32 = 1;
bfin_write32(&reg->control, ppi->ppi_control);
- bfin_write16(&reg->line, bytes_per_line + params->blank_clocks);
- bfin_write16(&reg->frame, lines_per_frame);
- bfin_write16(&reg->hdelay, 0);
- bfin_write16(&reg->vdelay, 0);
- bfin_write16(&reg->hcount, bytes_per_line);
- bfin_write16(&reg->vcount, lines_per_frame);
+ bfin_write16(&reg->line, samples_per_line);
+ bfin_write16(&reg->frame, params->frame);
+ bfin_write16(&reg->hdelay, hdelay);
+ bfin_write16(&reg->vdelay, params->vdelay);
+ bfin_write16(&reg->hcount, hcount);
+ bfin_write16(&reg->vcount, params->height);
+ break;
+ }
+ case PPI_TYPE_EPPI3:
+ {
+ struct bfin_eppi3_regs *reg = info->base;
+
+ if ((params->ppi_control & PACK_EN)
+ || (params->ppi_control & 0x70000) > DLEN_16)
+ dma32 = 1;
+
+ bfin_write32(&reg->ctl, ppi->ppi_control);
+ bfin_write32(&reg->line, samples_per_line);
+ bfin_write32(&reg->frame, params->frame);
+ bfin_write32(&reg->hdly, hdelay);
+ bfin_write32(&reg->vdly, params->vdelay);
+ bfin_write32(&reg->hcnt, hcount);
+ bfin_write32(&reg->vcnt, params->height);
+ if (params->int_mask)
+ bfin_write32(&reg->imsk, params->int_mask & 0xFF);
break;
}
default:
@@ -217,17 +273,17 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
}
if (dma32) {
- dma_config |= WDSIZE_32;
+ dma_config |= WDSIZE_32 | PSIZE_32;
set_dma_x_count(info->dma_ch, bytes_per_line >> 2);
set_dma_x_modify(info->dma_ch, 4);
set_dma_y_modify(info->dma_ch, 4);
} else {
- dma_config |= WDSIZE_16;
+ dma_config |= WDSIZE_16 | PSIZE_16;
set_dma_x_count(info->dma_ch, bytes_per_line >> 1);
set_dma_x_modify(info->dma_ch, 2);
set_dma_y_modify(info->dma_ch, 2);
}
- set_dma_y_count(info->dma_ch, lines_per_frame);
+ set_dma_y_count(info->dma_ch, params->height);
set_dma_config(info->dma_ch, dma_config);
SSYNC();
@@ -263,9 +319,15 @@ struct ppi_if *ppi_create_instance(const struct ppi_info *info)
pr_info("ppi probe success\n");
return ppi;
}
+EXPORT_SYMBOL(ppi_create_instance);
void ppi_delete_instance(struct ppi_if *ppi)
{
peripheral_free_list(ppi->info->pin_req);
kfree(ppi);
}
+EXPORT_SYMBOL(ppi_delete_instance);
+
+MODULE_DESCRIPTION("Analog Devices PPI driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 4a980e029ca7..20827ba168fc 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -178,6 +178,10 @@ struct coda_ctx {
int idx;
};
+static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
+static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
+
static inline void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
@@ -944,6 +948,24 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_d
return 0;
}
+static int coda_h264_padding(int size, char *p)
+{
+ int nal_size;
+ int diff;
+
+ diff = size - (size & ~0x7);
+ if (diff == 0)
+ return 0;
+
+ nal_size = coda_filler_size[diff];
+ memcpy(p, coda_filler_nal, nal_size);
+
+ /* Add rbsp stop bit and trailing at the end */
+ *(p + nal_size - 1) = 0x80;
+
+ return nal_size;
+}
+
static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct coda_ctx *ctx = vb2_get_drv_priv(q);
@@ -1171,7 +1193,15 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
memcpy(&ctx->vpu_header[1][0], vb2_plane_vaddr(buf, 0),
ctx->vpu_header_size[1]);
- ctx->vpu_header_size[2] = 0;
+ /*
+ * Length of H.264 headers is variable and thus it might not be
+ * aligned for the coda to append the encoded frame. In that is
+ * the case a filler NAL must be added to header 2.
+ */
+ ctx->vpu_header_size[2] = coda_h264_padding(
+ (ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1]),
+ ctx->vpu_header[2]);
break;
case V4L2_PIX_FMT_MPEG4:
/*
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index 3c56037c82fc..ccfde4eb626a 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -97,25 +97,15 @@ config VIDEO_ISIF
To compile this driver as a module, choose M here: the
module will be called vpfe.
-config VIDEO_DM644X_VPBE
- tristate "DM644X VPBE HW module"
- depends on ARCH_DAVINCI_DM644x
+config VIDEO_DAVINCI_VPBE_DISPLAY
+ tristate "DM644X/DM365/DM355 VPBE HW module"
+ depends on ARCH_DAVINCI_DM644x || ARCH_DAVINCI_DM355 || ARCH_DAVINCI_DM365
select VIDEO_VPSS_SYSTEM
select VIDEOBUF2_DMA_CONTIG
help
- Enables VPBE modules used for display on a DM644x
- SoC.
+ Enables Davinci VPBE module used for display devices.
+ This module is common for following DM644x/DM365/DM355
+ based display devices.
To compile this driver as a module, choose M here: the
module will be called vpbe.
-
-
-config VIDEO_VPBE_DISPLAY
- tristate "VPBE V4L2 Display driver"
- depends on ARCH_DAVINCI_DM644x
- select VIDEO_DM644X_VPBE
- help
- Enables VPBE V4L2 Display driver on a DM644x device
-
- To compile this driver as a module, choose M here: the
- module will be called vpbe_display.
diff --git a/drivers/media/platform/davinci/Makefile b/drivers/media/platform/davinci/Makefile
index 74ed92d09257..f40f5219ca50 100644
--- a/drivers/media/platform/davinci/Makefile
+++ b/drivers/media/platform/davinci/Makefile
@@ -16,5 +16,5 @@ obj-$(CONFIG_VIDEO_VPFE_CAPTURE) += vpfe_capture.o
obj-$(CONFIG_VIDEO_DM6446_CCDC) += dm644x_ccdc.o
obj-$(CONFIG_VIDEO_DM355_CCDC) += dm355_ccdc.o
obj-$(CONFIG_VIDEO_ISIF) += isif.o
-obj-$(CONFIG_VIDEO_DM644X_VPBE) += vpbe.o vpbe_osd.o vpbe_venc.o
-obj-$(CONFIG_VIDEO_VPBE_DISPLAY) += vpbe_display.o
+obj-$(CONFIG_VIDEO_DAVINCI_VPBE_DISPLAY) += vpbe.o vpbe_osd.o \
+ vpbe_venc.o vpbe_display.o
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index f263cabade7a..4277e4ad810c 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -557,7 +557,7 @@ static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc)
*/
static void ccdc_config_csc(struct ccdc_csc *csc)
{
- u32 val1, val2;
+ u32 val1 = 0, val2;
int i;
if (!csc->enable)
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 841b91a3d255..4ca0f9a2ad8a 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -558,9 +558,9 @@ static int platform_device_get(struct device *dev, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct vpbe_device *vpbe_dev = data;
- if (strcmp("vpbe-osd", pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-osd") != NULL)
vpbe_dev->osd_device = platform_get_drvdata(pdev);
- if (strcmp("vpbe-venc", pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-venc") != NULL)
vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
return 0;
@@ -584,7 +584,6 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
struct v4l2_subdev **enc_subdev;
struct osd_state *osd_device;
struct i2c_adapter *i2c_adap;
- int output_index;
int num_encoders;
int ret = 0;
int err;
@@ -632,8 +631,10 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
platform_device_get);
- if (err < 0)
- return err;
+ if (err < 0) {
+ ret = err;
+ goto fail_dev_unregister;
+ }
vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
vpbe_dev->cfg->venc.module_name);
@@ -731,7 +732,6 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
/* set the current encoder and output to that of venc by default */
vpbe_dev->current_sd_index = 0;
vpbe_dev->current_out_index = 0;
- output_index = 0;
mutex_unlock(&vpbe_dev->lock);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index e707a6f2325b..5e6b0cab514b 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -791,7 +791,6 @@ static int vpbe_display_g_crop(struct file *file, void *priv,
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
struct osd_state *osd_device = fh->disp_dev->osd_device;
struct v4l2_rect *rect = &crop->c;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"VIDIOC_G_CROP, layer id = %d\n",
@@ -799,7 +798,7 @@ static int vpbe_display_g_crop(struct file *file, void *priv,
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
- ret = -EINVAL;
+ return -EINVAL;
}
osd_device->ops.get_layer_config(osd_device,
layer->layer_info.id, cfg);
@@ -1393,9 +1392,9 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
}
/* Initialize videobuf queue as per the buffer type */
layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
- if (!layer->alloc_ctx) {
+ if (IS_ERR(layer->alloc_ctx)) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(layer->alloc_ctx);
}
q = &layer->buffer_queue;
memset(q, 0, sizeof(*q));
@@ -1656,7 +1655,7 @@ static int vpbe_device_get(struct device *dev, void *data)
if (strcmp("vpbe_controller", pdev->name) == 0)
vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
- if (strcmp("vpbe-osd", pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-osd") != NULL)
vpbe_disp->osd_device = platform_get_drvdata(pdev);
return 0;
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 707f243f810d..12ad17c52ef3 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -39,7 +39,22 @@
#include <linux/io.h>
#include "vpbe_osd_regs.h"
-#define MODULE_NAME VPBE_OSD_SUBDEV_NAME
+#define MODULE_NAME "davinci-vpbe-osd"
+
+static struct platform_device_id vpbe_osd_devtype[] = {
+ {
+ .name = DM644X_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_OSD_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_osd_devtype);
/* register access routines */
static inline u32 osd_read(struct osd_state *sd, u32 offset)
@@ -129,7 +144,7 @@ static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
struct osd_platform_data *pdata;
pdata = (struct osd_platform_data *)sd->dev->platform_data;
- if (pdata->field_inv_wa_enable) {
+ if (pdata != NULL && pdata->field_inv_wa_enable) {
if (!field_inversion || !lconfig->interlaced) {
osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
@@ -1526,7 +1541,7 @@ static const struct vpbe_osd_ops osd_ops = {
static int osd_probe(struct platform_device *pdev)
{
- struct osd_platform_data *pdata;
+ const struct platform_device_id *pdev_id;
struct osd_state *osd;
struct resource *res;
int ret = 0;
@@ -1535,16 +1550,15 @@ static int osd_probe(struct platform_device *pdev)
if (osd == NULL)
return -ENOMEM;
- osd->dev = &pdev->dev;
- pdata = (struct osd_platform_data *)pdev->dev.platform_data;
- osd->vpbe_type = (enum vpbe_version)pdata->vpbe_type;
- if (NULL == pdev->dev.platform_data) {
- dev_err(osd->dev, "No platform data defined for OSD"
- " sub device\n");
- ret = -ENOENT;
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id) {
+ ret = -EINVAL;
goto free_mem;
}
+ osd->dev = &pdev->dev;
+ osd->vpbe_type = pdev_id->driver_data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(osd->dev, "Unable to get OSD register address map\n");
@@ -1595,6 +1609,7 @@ static struct platform_driver osd_driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
},
+ .id_table = vpbe_osd_devtype
};
module_platform_driver(osd_driver);
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index aed7369b962a..bdbebd59df98 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -38,7 +38,22 @@
#include "vpbe_venc_regs.h"
-#define MODULE_NAME VPBE_VENC_SUBDEV_NAME
+#define MODULE_NAME "davinci-vpbe-venc"
+
+static struct platform_device_id vpbe_venc_devtype[] = {
+ {
+ .name = DM644X_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_1,
+ }, {
+ .name = DM365_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_2,
+ }, {
+ .name = DM355_VPBE_VENC_SUBDEV_NAME,
+ .driver_data = VPBE_VERSION_3,
+ },
+};
+
+MODULE_DEVICE_TABLE(platform, vpbe_venc_devtype);
static int debug = 2;
module_param(debug, int, 0644);
@@ -54,6 +69,7 @@ struct venc_state {
spinlock_t lock;
void __iomem *venc_base;
void __iomem *vdaccfg_reg;
+ enum vpbe_version venc_type;
};
static inline struct venc_state *to_state(struct v4l2_subdev *sd)
@@ -127,7 +143,7 @@ static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
{
struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
+
v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
if (benable) {
@@ -159,7 +175,7 @@ static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
/* Disable LCD output control (accepting default polarity) */
venc_write(sd, VENC_LCDOUT, 0);
- if (pdata->venc_type != VPBE_VERSION_3)
+ if (venc->venc_type != VPBE_VERSION_3)
venc_write(sd, VENC_CMPNT, 0x100);
venc_write(sd, VENC_HSPLS, 0);
venc_write(sd, VENC_HINT, 0);
@@ -203,11 +219,11 @@ static int venc_set_ntsc(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_3) {
+ if (venc->venc_type == VPBE_VERSION_3) {
venc_write(sd, VENC_CLKCTL, 0x01);
venc_write(sd, VENC_VIDCTL, 0);
val = vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
- } else if (pdata->venc_type == VPBE_VERSION_2) {
+ } else if (venc->venc_type == VPBE_VERSION_2) {
venc_write(sd, VENC_CLKCTL, 0x01);
venc_write(sd, VENC_VIDCTL, 0);
vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
@@ -238,7 +254,6 @@ static int venc_set_ntsc(struct v4l2_subdev *sd)
static int venc_set_pal(struct v4l2_subdev *sd)
{
struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
@@ -249,11 +264,11 @@ static int venc_set_pal(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_3) {
+ if (venc->venc_type == VPBE_VERSION_3) {
venc_write(sd, VENC_CLKCTL, 0x1);
venc_write(sd, VENC_VIDCTL, 0);
vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
- } else if (pdata->venc_type == VPBE_VERSION_2) {
+ } else if (venc->venc_type == VPBE_VERSION_2) {
venc_write(sd, VENC_CLKCTL, 0x1);
venc_write(sd, VENC_VIDCTL, 0);
vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
@@ -293,8 +308,8 @@ static int venc_set_480p59_94(struct v4l2_subdev *sd)
struct venc_platform_data *pdata = venc->pdata;
v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
- if ((pdata->venc_type != VPBE_VERSION_1) &&
- (pdata->venc_type != VPBE_VERSION_2))
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
return -EINVAL;
/* Setup clock at VPSS & VENC for SD */
@@ -303,12 +318,12 @@ static int venc_set_480p59_94(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_2)
+ if (venc->venc_type == VPBE_VERSION_2)
vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
venc_write(sd, VENC_OSDCLK0, 0);
venc_write(sd, VENC_OSDCLK1, 1);
- if (pdata->venc_type == VPBE_VERSION_1) {
+ if (venc->venc_type == VPBE_VERSION_1) {
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
VENC_VDPRO_DAFRQ);
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
@@ -341,8 +356,8 @@ static int venc_set_576p50(struct v4l2_subdev *sd)
v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
- if ((pdata->venc_type != VPBE_VERSION_1) &&
- (pdata->venc_type != VPBE_VERSION_2))
+ if (venc->venc_type != VPBE_VERSION_1 &&
+ venc->venc_type != VPBE_VERSION_2)
return -EINVAL;
/* Setup clock at VPSS & VENC for SD */
if (pdata->setup_clock(VPBE_ENC_CUSTOM_TIMINGS, 27000000) < 0)
@@ -350,13 +365,13 @@ static int venc_set_576p50(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- if (pdata->venc_type == VPBE_VERSION_2)
+ if (venc->venc_type == VPBE_VERSION_2)
vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
venc_write(sd, VENC_OSDCLK0, 0);
venc_write(sd, VENC_OSDCLK1, 1);
- if (pdata->venc_type == VPBE_VERSION_1) {
+ if (venc->venc_type == VPBE_VERSION_1) {
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
VENC_VDPRO_DAFRQ);
venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
@@ -460,14 +475,14 @@ static int venc_s_dv_timings(struct v4l2_subdev *sd,
else if (height == 480)
return venc_set_480p59_94(sd);
else if ((height == 720) &&
- (venc->pdata->venc_type == VPBE_VERSION_2)) {
+ (venc->venc_type == VPBE_VERSION_2)) {
/* TBD setup internal 720p mode here */
ret = venc_set_720p60_internal(sd);
/* for DM365 VPBE, there is DAC inside */
vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
return ret;
} else if ((height == 1080) &&
- (venc->pdata->venc_type == VPBE_VERSION_2)) {
+ (venc->venc_type == VPBE_VERSION_2)) {
/* TBD setup internal 1080i mode here */
ret = venc_set_1080i30_internal(sd);
/* for DM365 VPBE, there is DAC inside */
@@ -556,7 +571,7 @@ static int venc_device_get(struct device *dev, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct venc_state **venc = data;
- if (strcmp(MODULE_NAME, pdev->name) == 0)
+ if (strstr(pdev->name, "vpbe-venc") != NULL)
*venc = platform_get_drvdata(pdev);
return 0;
@@ -593,6 +608,7 @@ EXPORT_SYMBOL(venc_sub_dev_init);
static int venc_probe(struct platform_device *pdev)
{
+ const struct platform_device_id *pdev_id;
struct venc_state *venc;
struct resource *res;
int ret;
@@ -601,6 +617,12 @@ static int venc_probe(struct platform_device *pdev)
if (venc == NULL)
return -ENOMEM;
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id) {
+ ret = -EINVAL;
+ goto free_mem;
+ }
+ venc->venc_type = pdev_id->driver_data;
venc->pdev = &pdev->dev;
venc->pdata = pdev->dev.platform_data;
if (NULL == venc->pdata) {
@@ -630,7 +652,7 @@ static int venc_probe(struct platform_device *pdev)
goto release_venc_mem_region;
}
- if (venc->pdata->venc_type != VPBE_VERSION_1) {
+ if (venc->venc_type != VPBE_VERSION_1) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_err(venc->pdev,
@@ -681,7 +703,7 @@ static int venc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap((void *)venc->venc_base);
release_mem_region(res->start, resource_size(res));
- if (venc->pdata->venc_type != VPBE_VERSION_1) {
+ if (venc->venc_type != VPBE_VERSION_1) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
iounmap((void *)venc->vdaccfg_reg);
release_mem_region(res->start, resource_size(res));
@@ -698,6 +720,7 @@ static struct platform_driver venc_driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
},
+ .id_table = vpbe_venc_devtype
};
module_platform_driver(venc_driver);
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index be9d3e1b4868..28d019da4c01 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -560,10 +560,7 @@ static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
{
- struct timeval timevalue;
-
- do_gettimeofday(&timevalue);
- vpfe_dev->cur_frm->ts = timevalue;
+ v4l2_get_timestamp(&vpfe_dev->cur_frm->ts);
vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
wake_up_interruptible(&vpfe_dev->cur_frm->done);
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a409ccefb380..5892d2bc8eee 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -411,7 +411,7 @@ static struct vb2_ops video_qops = {
*/
static void vpif_process_buffer_complete(struct common_obj *common)
{
- do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
vb2_buffer_done(&common->cur_frm->vb,
VB2_BUF_STATE_DONE);
/* Make curFrm pointing to nextFrm */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 9f2b603be9c9..dd249c96126d 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -402,7 +402,7 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
/* one frame is displayed If next frame is
* available, release cur_frm and move on */
/* Copy frame display time */
- do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
/* Change status of the cur_frm */
vb2_buffer_done(&common->cur_frm->vb,
VB2_BUF_STATE_DONE);
@@ -462,8 +462,8 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
if (!channel_first_int[i][channel_id]) {
/* Mark status of the cur_frm to
* done and unlock semaphore on it */
- do_gettimeofday(&common->cur_frm->vb.
- v4l2_buf.timestamp);
+ v4l2_get_timestamp(&common->cur_frm->vb.
+ v4l2_buf.timestamp);
vb2_buffer_done(&common->cur_frm->vb,
VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 684e815a81b6..a19c552232d1 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -50,13 +50,29 @@ MODULE_AUTHOR("Texas Instruments");
/* VENCINT - vpss_int8 */
#define DM355_VPSSBL_EVTSEL_DEFAULT 0x4
-#define DM365_ISP5_PCCR 0x04
+#define DM365_ISP5_PCCR 0x04
+#define DM365_ISP5_PCCR_BL_CLK_ENABLE BIT(0)
+#define DM365_ISP5_PCCR_ISIF_CLK_ENABLE BIT(1)
+#define DM365_ISP5_PCCR_H3A_CLK_ENABLE BIT(2)
+#define DM365_ISP5_PCCR_RSZ_CLK_ENABLE BIT(3)
+#define DM365_ISP5_PCCR_IPIPE_CLK_ENABLE BIT(4)
+#define DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE BIT(5)
+#define DM365_ISP5_PCCR_RSV BIT(6)
+
+#define DM365_ISP5_BCR 0x08
+#define DM365_ISP5_BCR_ISIF_OUT_ENABLE BIT(1)
+
#define DM365_ISP5_INTSEL1 0x10
#define DM365_ISP5_INTSEL2 0x14
#define DM365_ISP5_INTSEL3 0x18
#define DM365_ISP5_CCDCMUX 0x20
#define DM365_ISP5_PG_FRAME_SIZE 0x28
#define DM365_VPBE_CLK_CTRL 0x00
+
+#define VPSS_CLK_CTRL 0x01c40044
+#define VPSS_CLK_CTRL_VENCCLKEN BIT(3)
+#define VPSS_CLK_CTRL_DACCLKEN BIT(4)
+
/*
* vpss interrupts. VDINT0 - vpss_int0, VDINT1 - vpss_int1,
* AF - vpss_int3
@@ -94,12 +110,19 @@ struct vpss_hw_ops {
void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel);
/* clear wbl overflow bit */
int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel);
+ /* set sync polarity */
+ void (*set_sync_pol)(struct vpss_sync_pol);
+ /* set the PG_FRAME_SIZE register*/
+ void (*set_pg_frame_size)(struct vpss_pg_frame_size);
+ /* check and clear interrupt if occured */
+ int (*dma_complete_interrupt)(void);
};
/* vpss configuration */
struct vpss_oper_config {
__iomem void *vpss_regs_base0;
__iomem void *vpss_regs_base1;
+ resource_size_t *vpss_regs_base2;
enum vpss_platform_type platform;
spinlock_t vpss_lock;
struct vpss_hw_ops hw_ops;
@@ -157,6 +180,14 @@ static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX);
}
+int vpss_dma_complete_interrupt(void)
+{
+ if (!oper_cfg.hw_ops.dma_complete_interrupt)
+ return 2;
+ return oper_cfg.hw_ops.dma_complete_interrupt();
+}
+EXPORT_SYMBOL(vpss_dma_complete_interrupt);
+
int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
{
if (!oper_cfg.hw_ops.select_ccdc_source)
@@ -182,6 +213,15 @@ static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
return 0;
}
+void vpss_set_sync_pol(struct vpss_sync_pol sync)
+{
+ if (!oper_cfg.hw_ops.set_sync_pol)
+ return;
+
+ oper_cfg.hw_ops.set_sync_pol(sync);
+}
+EXPORT_SYMBOL(vpss_set_sync_pol);
+
int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
{
if (!oper_cfg.hw_ops.clear_wbl_overflow)
@@ -347,6 +387,15 @@ void dm365_vpss_set_sync_pol(struct vpss_sync_pol sync)
}
EXPORT_SYMBOL(dm365_vpss_set_sync_pol);
+void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
+{
+ if (!oper_cfg.hw_ops.set_pg_frame_size)
+ return;
+
+ oper_cfg.hw_ops.set_pg_frame_size(frame_size);
+}
+EXPORT_SYMBOL(vpss_set_pg_frame_size);
+
void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
{
int current_reg = ((frame_size.hlpfr >> 1) - 1) << 16;
@@ -425,6 +474,16 @@ static int vpss_probe(struct platform_device *pdev)
oper_cfg.hw_ops.enable_clock = dm365_enable_clock;
oper_cfg.hw_ops.select_ccdc_source = dm365_select_ccdc_source;
/* Setup vpss interrupts */
+ isp5_write((isp5_read(DM365_ISP5_PCCR) |
+ DM365_ISP5_PCCR_BL_CLK_ENABLE |
+ DM365_ISP5_PCCR_ISIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_H3A_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSZ_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPE_CLK_ENABLE |
+ DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE |
+ DM365_ISP5_PCCR_RSV), DM365_ISP5_PCCR);
+ isp5_write((isp5_read(DM365_ISP5_BCR) |
+ DM365_ISP5_BCR_ISIF_OUT_ENABLE), DM365_ISP5_BCR);
isp5_write(DM365_ISP5_INTSEL1_DEFAULT, DM365_ISP5_INTSEL1);
isp5_write(DM365_ISP5_INTSEL2_DEFAULT, DM365_ISP5_INTSEL2);
isp5_write(DM365_ISP5_INTSEL3_DEFAULT, DM365_ISP5_INTSEL3);
@@ -470,11 +529,20 @@ static struct platform_driver vpss_driver = {
static void vpss_exit(void)
{
+ iounmap(oper_cfg.vpss_regs_base2);
+ release_mem_region(VPSS_CLK_CTRL, 4);
platform_driver_unregister(&vpss_driver);
}
static int __init vpss_init(void)
{
+ if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+ return -EBUSY;
+
+ oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ writel(VPSS_CLK_CTRL_VENCCLKEN |
+ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
+
return platform_driver_register(&vpss_driver);
}
subsys_initcall(vpss_init);
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index c1a07133cc56..82d9f6ac12f3 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -185,6 +185,15 @@ static const struct gsc_fmt gsc_formats[] = {
.corder = GSC_CRCB,
.num_planes = 3,
.num_comp = 3,
+ }, {
+ .name = "YUV 4:2:0 n.c. 2p, Y/CbCr tiled",
+ .pixelformat = V4L2_PIX_FMT_NV12MT_16X16,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
}
};
@@ -935,8 +944,8 @@ static struct gsc_variant gsc_v_100_variant = {
.pix_max = &gsc_v_100_max,
.pix_min = &gsc_v_100_min,
.pix_align = &gsc_v_100_align,
- .in_buf_cnt = 8,
- .out_buf_cnt = 16,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
.sc_up_max = 8,
.sc_down_max = 16,
.poly_sc_down_max = 4,
@@ -993,12 +1002,8 @@ static void *gsc_get_drv_data(struct platform_device *pdev)
static void gsc_clk_put(struct gsc_dev *gsc)
{
- if (IS_ERR_OR_NULL(gsc->clock))
- return;
-
- clk_unprepare(gsc->clock);
- clk_put(gsc->clock);
- gsc->clock = NULL;
+ if (!IS_ERR(gsc->clock))
+ clk_unprepare(gsc->clock);
}
static int gsc_clk_get(struct gsc_dev *gsc)
@@ -1007,27 +1012,22 @@ static int gsc_clk_get(struct gsc_dev *gsc)
dev_dbg(&gsc->pdev->dev, "gsc_clk_get Called\n");
- gsc->clock = clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
- if (IS_ERR(gsc->clock))
- goto err_print;
+ gsc->clock = devm_clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
+ if (IS_ERR(gsc->clock)) {
+ dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
+ GSC_CLOCK_GATE_NAME);
+ return PTR_ERR(gsc->clock);
+ }
ret = clk_prepare(gsc->clock);
if (ret < 0) {
- clk_put(gsc->clock);
- gsc->clock = NULL;
- goto err;
+ dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
+ GSC_CLOCK_GATE_NAME);
+ gsc->clock = ERR_PTR(-EINVAL);
+ return ret;
}
return 0;
-
-err:
- dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
- GSC_CLOCK_GATE_NAME);
- gsc_clk_put(gsc);
-err_print:
- dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
- GSC_CLOCK_GATE_NAME);
- return -ENXIO;
}
static int gsc_m2m_suspend(struct gsc_dev *gsc)
@@ -1096,6 +1096,7 @@ static int gsc_probe(struct platform_device *pdev)
init_waitqueue_head(&gsc->irq_queue);
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
+ gsc->clock = ERR_PTR(-EINVAL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gsc->regs = devm_ioremap_resource(dev, res);
@@ -1157,6 +1158,7 @@ static int gsc_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
pm_runtime_disable(&pdev->dev);
+ gsc_clk_put(gsc);
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 5f157efd24f0..cc19bba09bd1 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -427,6 +427,11 @@ static inline void gsc_ctx_state_lock_clear(u32 state, struct gsc_ctx *ctx)
spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
}
+static inline int is_tiled(const struct gsc_fmt *fmt)
+{
+ return fmt->pixelformat == V4L2_PIX_FMT_NV12MT_16X16;
+}
+
static inline void gsc_hw_enable_control(struct gsc_dev *dev, bool on)
{
u32 cfg = readl(dev->regs + GSC_ENABLE);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index c267c57c76fd..386c0a7a3a52 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -99,22 +99,28 @@ static void gsc_m2m_job_abort(void *priv)
gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
-static int gsc_fill_addr(struct gsc_ctx *ctx)
+static int gsc_get_bufs(struct gsc_ctx *ctx)
{
struct gsc_frame *s_frame, *d_frame;
- struct vb2_buffer *vb = NULL;
+ struct vb2_buffer *src_vb, *dst_vb;
int ret;
s_frame = &ctx->s_frame;
d_frame = &ctx->d_frame;
- vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = gsc_prepare_addr(ctx, vb, s_frame, &s_frame->addr);
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
if (ret)
return ret;
- vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- return gsc_prepare_addr(ctx, vb, d_frame, &d_frame->addr);
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+ if (ret)
+ return ret;
+
+ dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+
+ return 0;
}
static void gsc_m2m_device_run(void *priv)
@@ -148,7 +154,7 @@ static void gsc_m2m_device_run(void *priv)
goto put_device;
}
- ret = gsc_fill_addr(ctx);
+ ret = gsc_get_bufs(ctx);
if (ret) {
pr_err("Wrong address");
goto put_device;
@@ -367,6 +373,13 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
+static int gsc_m2m_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct gsc_ctx *ctx = fh_to_ctx(fh);
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
static int gsc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
@@ -548,6 +561,7 @@ static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
.vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
.vidioc_reqbufs = gsc_m2m_reqbufs,
+ .vidioc_expbuf = gsc_m2m_expbuf,
.vidioc_querybuf = gsc_m2m_querybuf,
.vidioc_qbuf = gsc_m2m_qbuf,
.vidioc_dqbuf = gsc_m2m_dqbuf,
@@ -565,7 +579,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &gsc_m2m_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
@@ -577,7 +591,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &gsc_m2m_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
@@ -597,7 +611,7 @@ static int gsc_m2m_open(struct file *file)
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
- ctx = kzalloc(sizeof (*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
ret = -ENOMEM;
goto unlock;
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.c b/drivers/media/platform/exynos-gsc/gsc-regs.c
index 0146b354dc22..6f5b5a486cf3 100644
--- a/drivers/media/platform/exynos-gsc/gsc-regs.c
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.c
@@ -214,6 +214,9 @@ void gsc_hw_set_in_image_format(struct gsc_ctx *ctx)
break;
}
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE;
+
writel(cfg, dev->regs + GSC_IN_CON);
}
@@ -334,6 +337,9 @@ void gsc_hw_set_out_image_format(struct gsc_ctx *ctx)
break;
}
+ if (is_tiled(frame->fmt))
+ cfg |= GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE;
+
end_set:
writel(cfg, dev->regs + GSC_OUT_CON);
}
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 9115a2c8d075..5f7db3f1f6f5 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1181,7 +1181,7 @@ static void viu_capture_intr(struct viu_dev *dev, u32 status)
if (waitqueue_active(&buf->vb.done)) {
list_del(&buf->vb.queue);
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
wake_up(&buf->vb.done);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 05c560f2ef06..6c4db9b98989 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -28,7 +28,7 @@ MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.1");
-static bool debug = true;
+static bool debug;
module_param(debug, bool, 0644);
/* Flags that indicate a format can be used for capture/output */
@@ -917,10 +917,8 @@ static int deinterlace_open(struct file *file)
ctx->xt = kzalloc(sizeof(struct dma_async_tx_descriptor) +
sizeof(struct data_chunk), GFP_KERNEL);
if (!ctx->xt) {
- int ret = PTR_ERR(ctx->xt);
-
kfree(ctx);
- return ret;
+ return -ENOMEM;
}
ctx->colorspace = V4L2_COLORSPACE_REC709;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index ce2b7b4788d6..92a33f081852 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -22,6 +22,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
#include <media/ov7670.h>
#include <media/videobuf2-vmalloc.h>
@@ -30,13 +31,6 @@
#include "mcam-core.h"
-/*
- * Basic frame stats - to be deleted shortly
- */
-static int frames;
-static int singles;
-static int delivered;
-
#ifdef MCAM_MODE_VMALLOC
/*
* Internal DMA buffer management. Since the controller cannot do S/G I/O,
@@ -367,10 +361,10 @@ static void mcam_frame_tasklet(unsigned long data)
if (!test_bit(bufno, &cam->flags))
continue;
if (list_empty(&cam->buffers)) {
- singles++;
+ cam->frame_state.singles++;
break; /* Leave it valid, hope for better later */
}
- delivered++;
+ cam->frame_state.delivered++;
clear_bit(bufno, &cam->flags);
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
queue);
@@ -452,7 +446,7 @@ static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
set_bit(CF_SINGLE_BUFFER, &cam->flags);
- singles++;
+ cam->frame_state.singles++;
return;
}
/*
@@ -485,7 +479,7 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
- delivered++;
+ cam->frame_state.delivered++;
mcam_buffer_done(cam, frame, &buf->vb_buf);
}
mcam_set_contig_buffer(cam, frame);
@@ -578,13 +572,13 @@ static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
*/
} else {
set_bit(CF_SG_RESTART, &cam->flags);
- singles++;
+ cam->frame_state.singles++;
cam->vb_bufs[0] = NULL;
}
/*
* Now we can give the completed frame back to user space.
*/
- delivered++;
+ cam->frame_state.delivered++;
mcam_buffer_done(cam, frame, &buf->vb_buf);
}
@@ -1232,47 +1226,6 @@ static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
return ret;
}
-
-
-static int mcam_vidioc_queryctrl(struct file *filp, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct mcam_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, queryctrl, qc);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int mcam_vidioc_g_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct mcam_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, g_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int mcam_vidioc_s_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct mcam_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, s_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
static int mcam_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -1520,9 +1473,6 @@ static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
.vidioc_dqbuf = mcam_vidioc_dqbuf,
.vidioc_streamon = mcam_vidioc_streamon,
.vidioc_streamoff = mcam_vidioc_streamoff,
- .vidioc_queryctrl = mcam_vidioc_queryctrl,
- .vidioc_g_ctrl = mcam_vidioc_g_ctrl,
- .vidioc_s_ctrl = mcam_vidioc_s_ctrl,
.vidioc_g_parm = mcam_vidioc_g_parm,
.vidioc_s_parm = mcam_vidioc_s_parm,
.vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
@@ -1545,7 +1495,9 @@ static int mcam_v4l_open(struct file *filp)
filp->private_data = cam;
- frames = singles = delivered = 0;
+ cam->frame_state.frames = 0;
+ cam->frame_state.singles = 0;
+ cam->frame_state.delivered = 0;
mutex_lock(&cam->s_mutex);
if (cam->users == 0) {
ret = mcam_setup_vb2(cam);
@@ -1566,8 +1518,9 @@ static int mcam_v4l_release(struct file *filp)
{
struct mcam_camera *cam = filp->private_data;
- cam_dbg(cam, "Release, %d frames, %d singles, %d delivered\n", frames,
- singles, delivered);
+ cam_dbg(cam, "Release, %d frames, %d singles, %d delivered\n",
+ cam->frame_state.frames, cam->frame_state.singles,
+ cam->frame_state.delivered);
mutex_lock(&cam->s_mutex);
(cam->users)--;
if (cam->users == 0) {
@@ -1660,7 +1613,7 @@ static void mcam_frame_complete(struct mcam_camera *cam, int frame)
clear_bit(CF_DMA_ACTIVE, &cam->flags);
cam->next_buf = frame;
cam->buf_seq[frame] = ++(cam->sequence);
- frames++;
+ cam->frame_state.frames++;
/*
* "This should never happen"
*/
@@ -1786,14 +1739,19 @@ int mccic_register(struct mcam_camera *cam)
/*
* Get the v4l2 setup done.
*/
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (ret)
+ goto out_unregister;
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
+
mutex_lock(&cam->s_mutex);
cam->vdev = mcam_v4l_template;
cam->vdev.debug = 0;
cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ video_set_drvdata(&cam->vdev, cam);
ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
if (ret)
goto out;
- video_set_drvdata(&cam->vdev, cam);
/*
* If so requested, try to get our DMA buffers now.
@@ -1805,6 +1763,7 @@ int mccic_register(struct mcam_camera *cam)
}
out:
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
mutex_unlock(&cam->s_mutex);
return ret;
out_unregister:
@@ -1829,6 +1788,7 @@ void mccic_shutdown(struct mcam_camera *cam)
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
video_unregister_device(&cam->vdev);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
v4l2_device_unregister(&cam->v4l2_dev);
}
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index bd6acba9fb37..01dec9e5fc2b 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/videobuf2-core.h>
@@ -15,15 +16,15 @@
* Create our own symbols for the supported buffer modes, but, for now,
* base them entirely on which videobuf2 options have been selected.
*/
-#if defined(CONFIG_VIDEOBUF2_VMALLOC) || defined(CONFIG_VIDEOBUF2_VMALLOC_MODULE)
+#if IS_ENABLED(CONFIG_VIDEOBUF2_VMALLOC)
#define MCAM_MODE_VMALLOC 1
#endif
-#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) || defined(CONFIG_VIDEOBUF2_DMA_CONTIG_MODULE)
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_CONTIG)
#define MCAM_MODE_DMA_CONTIG 1
#endif
-#if defined(CONFIG_VIDEOBUF2_DMA_SG) || defined(CONFIG_VIDEOBUF2_DMA_SG_MODULE)
+#if IS_ENABLED(CONFIG_VIDEOBUF2_DMA_SG)
#define MCAM_MODE_DMA_SG 1
#endif
@@ -73,6 +74,14 @@ static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
}
}
+/*
+ * Basic frame states
+ */
+struct mcam_frame_state {
+ unsigned int frames;
+ unsigned int singles;
+ unsigned int delivered;
+};
/*
* A description of one of our devices.
@@ -104,10 +113,12 @@ struct mcam_camera {
* should not be touched by the platform code.
*/
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
enum mcam_state state;
unsigned long flags; /* Buffer status, mainly (dev_lock) */
int users; /* How many open FDs */
+ struct mcam_frame_state frame_state; /* Frame state counter */
/*
* Subsystem structures.
*/
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 390ab094f9f2..37ad446b35b3 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -6,7 +6,7 @@ config VIDEO_OMAP2_VOUT
depends on ARCH_OMAP2 || ARCH_OMAP3
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
- select OMAP2_DSS
+ select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
default n
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 35cc526e6c93..96c4a17e4280 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -205,19 +205,21 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
- vma = find_vma(mm, virtp);
/* For kernel direct-mapped memory, take the easy way */
- if (virtp >= PAGE_OFFSET) {
- physp = virt_to_phys((void *) virtp);
- } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ if (virtp >= PAGE_OFFSET)
+ return virt_to_phys((void *) virtp);
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(mm, virtp);
+ if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
/* this will catch, kernel-allocated, mmaped-to-usermode
addresses */
physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+ up_read(&current->mm->mmap_sem);
} else {
/* otherwise, use get_user_pages() for general userland pages */
int res, nr_pages = 1;
struct page *pages;
- down_read(&current->mm->mmap_sem);
res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
0, &pages, NULL);
@@ -595,7 +597,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
return;
spin_lock(&vout->vbq_lock);
- do_gettimeofday(&timevalue);
+ v4l2_get_timestamp(&timevalue);
switch (cur_display->type) {
case OMAP_DISPLAY_TYPE_DSI:
@@ -1230,21 +1232,6 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
return ret;
}
-static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
- struct v4l2_fmtdesc *fmt)
-{
- int index = fmt->index;
-
- if (index >= NUM_OUTPUT_FORMATS)
- return -EINVAL;
-
- fmt->flags = omap_formats[index].flags;
- strlcpy(fmt->description, omap_formats[index].description,
- sizeof(fmt->description));
- fmt->pixelformat = omap_formats[index].pixelformat;
- return 0;
-}
-
static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_format *f)
{
@@ -1858,10 +1845,9 @@ static const struct v4l2_ioctl_ops vout_ioctl_ops = {
.vidioc_s_fbuf = vidioc_s_fbuf,
.vidioc_g_fbuf = vidioc_g_fbuf,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
- .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
- .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
- .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay,
.vidioc_cropcap = vidioc_cropcap,
.vidioc_g_crop = vidioc_g_crop,
.vidioc_s_crop = vidioc_s_crop,
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/media/platform/omap24xxcam.c
index 8b7ccea982e7..debb44ceb185 100644
--- a/drivers/media/platform/omap24xxcam.c
+++ b/drivers/media/platform/omap24xxcam.c
@@ -402,7 +402,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
omap24xxcam_core_disable(cam);
spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count = atomic_add_return(2, &fh->field_count);
if (csr & csr_error) {
vb->state = VIDEOBUF_ERROR;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index e4aaee91201d..6e5ad8ec0a22 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1338,28 +1338,15 @@ static int isp_enable_clocks(struct isp_device *isp)
{
int r;
unsigned long rate;
- int divisor;
-
- /*
- * cam_mclk clock chain:
- * dpll4 -> dpll4_m5 -> dpll4_m5x2 -> cam_mclk
- *
- * In OMAP3630 dpll4_m5x2 != 2 x dpll4_m5 but both are
- * set to the same value. Hence the rate set for dpll4_m5
- * has to be twice of what is set on OMAP3430 to get
- * the required value for cam_mclk
- */
- divisor = isp->revision == ISP_REVISION_15_0 ? 1 : 2;
r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
if (r) {
dev_err(isp->dev, "failed to enable cam_ick clock\n");
goto out_clk_enable_ick;
}
- r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
- CM_CAM_MCLK_HZ/divisor);
+ r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
if (r) {
- dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
+ dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
goto out_clk_enable_mclk;
}
r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
@@ -1401,33 +1388,19 @@ static void isp_disable_clocks(struct isp_device *isp)
static const char *isp_clocks[] = {
"cam_ick",
"cam_mclk",
- "dpll4_m5_ck",
"csi2_96m_fck",
"l3_ick",
};
-static void isp_put_clocks(struct isp_device *isp)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
- if (isp->clock[i]) {
- clk_put(isp->clock[i]);
- isp->clock[i] = NULL;
- }
- }
-}
-
static int isp_get_clocks(struct isp_device *isp)
{
struct clk *clk;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
- clk = clk_get(isp->dev, isp_clocks[i]);
+ clk = devm_clk_get(isp->dev, isp_clocks[i]);
if (IS_ERR(clk)) {
dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
- isp_put_clocks(isp);
return PTR_ERR(clk);
}
@@ -1993,7 +1966,6 @@ error_csiphy:
static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
- int i;
isp_unregister_entities(isp);
isp_cleanup_modules(isp);
@@ -2004,26 +1976,6 @@ static int isp_remove(struct platform_device *pdev)
isp->domain = NULL;
omap3isp_put(isp);
- free_irq(isp->irq_num, isp);
- isp_put_clocks(isp);
-
- for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
- if (isp->mmio_base[i]) {
- iounmap(isp->mmio_base[i]);
- isp->mmio_base[i] = NULL;
- }
-
- if (isp->mmio_base_phys[i]) {
- release_mem_region(isp->mmio_base_phys[i],
- isp->mmio_size[i]);
- isp->mmio_base_phys[i] = 0;
- }
- }
-
- regulator_put(isp->isp_csiphy1.vdd);
- regulator_put(isp->isp_csiphy2.vdd);
- kfree(isp);
-
return 0;
}
@@ -2041,7 +1993,8 @@ static int isp_map_mem_resource(struct platform_device *pdev,
return -ENODEV;
}
- if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+ if (!devm_request_mem_region(isp->dev, mem->start, resource_size(mem),
+ pdev->name)) {
dev_err(isp->dev,
"cannot reserve camera register I/O region\n");
return -ENODEV;
@@ -2050,8 +2003,9 @@ static int isp_map_mem_resource(struct platform_device *pdev,
isp->mmio_size[res] = resource_size(mem);
/* map the region */
- isp->mmio_base[res] = ioremap_nocache(isp->mmio_base_phys[res],
- isp->mmio_size[res]);
+ isp->mmio_base[res] = devm_ioremap_nocache(isp->dev,
+ isp->mmio_base_phys[res],
+ isp->mmio_size[res]);
if (!isp->mmio_base[res]) {
dev_err(isp->dev, "cannot map camera register I/O region\n");
return -ENODEV;
@@ -2081,7 +2035,7 @@ static int isp_probe(struct platform_device *pdev)
if (pdata == NULL)
return -EINVAL;
- isp = kzalloc(sizeof(*isp), GFP_KERNEL);
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
if (!isp) {
dev_err(&pdev->dev, "could not allocate memory\n");
return -ENOMEM;
@@ -2104,8 +2058,8 @@ static int isp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, isp);
/* Regulators */
- isp->isp_csiphy1.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY1");
- isp->isp_csiphy2.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY2");
+ isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY1");
+ isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "VDD_CSIPHY2");
/* Clocks
*
@@ -2180,7 +2134,8 @@ static int isp_probe(struct platform_device *pdev)
goto detach_dev;
}
- if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) {
+ if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
+ "OMAP3 ISP", isp)) {
dev_err(isp->dev, "Unable to request IRQ\n");
ret = -EINVAL;
goto detach_dev;
@@ -2189,7 +2144,7 @@ static int isp_probe(struct platform_device *pdev)
/* Entities */
ret = isp_initialize_modules(isp);
if (ret < 0)
- goto error_irq;
+ goto detach_dev;
ret = isp_register_entities(isp);
if (ret < 0)
@@ -2202,8 +2157,6 @@ static int isp_probe(struct platform_device *pdev)
error_modules:
isp_cleanup_modules(isp);
-error_irq:
- free_irq(isp->irq_num, isp);
detach_dev:
iommu_detach_device(isp->domain, &pdev->dev);
free_domain:
@@ -2211,26 +2164,9 @@ free_domain:
error_isp:
omap3isp_put(isp);
error:
- isp_put_clocks(isp);
-
- for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
- if (isp->mmio_base[i]) {
- iounmap(isp->mmio_base[i]);
- isp->mmio_base[i] = NULL;
- }
-
- if (isp->mmio_base_phys[i]) {
- release_mem_region(isp->mmio_base_phys[i],
- isp->mmio_size[i]);
- isp->mmio_base_phys[i] = 0;
- }
- }
- regulator_put(isp->isp_csiphy2.vdd);
- regulator_put(isp->isp_csiphy1.vdd);
platform_set_drvdata(pdev, NULL);
mutex_destroy(&isp->isp_mutex);
- kfree(isp);
return ret;
}
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 517d348ce32b..c77e1f2ae5ca 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -147,7 +147,6 @@ struct isp_platform_callback {
* @ref_count: Reference count for handling multiple ISP requests.
* @cam_ick: Pointer to camera interface clock structure.
* @cam_mclk: Pointer to camera functional clock structure.
- * @dpll4_m5_ck: Pointer to DPLL4 M5 clock structure.
* @csi2_fck: Pointer to camera CSI2 complexIO clock structure.
* @l3_ick: Pointer to OMAP3 L3 bus interface clock.
* @irq: Currently attached ISP ISR callbacks information structure.
@@ -189,10 +188,9 @@ struct isp_device {
u32 xclk_divisor[2]; /* Two clocks, a and b. */
#define ISP_CLK_CAM_ICK 0
#define ISP_CLK_CAM_MCLK 1
-#define ISP_CLK_DPLL4_M5_CK 2
-#define ISP_CLK_CSI2_FCK 3
-#define ISP_CLK_L3_ICK 4
- struct clk *clock[5];
+#define ISP_CLK_CSI2_FCK 2
+#define ISP_CLK_L3_ICK 3
+ struct clk *clock[4];
/* ISP modules */
struct ispstat isp_af;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index 85f0de85f37c..c5d84c977e29 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1136,7 +1136,7 @@ int omap3isp_ccp2_init(struct isp_device *isp)
* TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
*/
if (isp->revision == ISP_REVISION_2_0) {
- ccp2->vdds_csib = regulator_get(isp->dev, "vdds_csib");
+ ccp2->vdds_csib = devm_regulator_get(isp->dev, "vdds_csib");
if (IS_ERR(ccp2->vdds_csib)) {
dev_dbg(isp->dev,
"Could not get regulator vdds_csib\n");
@@ -1147,10 +1147,8 @@ int omap3isp_ccp2_init(struct isp_device *isp)
}
ret = ccp2_init_entities(ccp2);
- if (ret < 0) {
- regulator_put(ccp2->vdds_csib);
+ if (ret < 0)
return ret;
- }
ccp2_reset(ccp2);
return 0;
@@ -1166,6 +1164,4 @@ void omap3isp_ccp2_cleanup(struct isp_device *isp)
omap3isp_video_cleanup(&ccp2->video_in);
media_entity_cleanup(&ccp2->subdev.entity);
-
- regulator_put(ccp2->vdds_csib);
}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 3d56b33f85e8..c09de32f986a 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -32,7 +32,8 @@
#include "ispreg.h"
#include "ispcsiphy.h"
-static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
+static void csiphy_routing_cfg_3630(struct isp_csiphy *phy,
+ enum isp_interface_type iface,
bool ccp2_strobe)
{
u32 reg = isp_reg_readl(
@@ -40,6 +41,8 @@ static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
u32 shift, mode;
switch (iface) {
+ default:
+ /* Should not happen in practice, but let's keep the compiler happy. */
case ISP_INTERFACE_CCP2B_PHY1:
reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
@@ -59,9 +62,8 @@ static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
}
/* Select data/clock or data/strobe mode for CCP2 */
- switch (iface) {
- case ISP_INTERFACE_CCP2B_PHY1:
- case ISP_INTERFACE_CCP2B_PHY2:
+ if (iface == ISP_INTERFACE_CCP2B_PHY1 ||
+ iface == ISP_INTERFACE_CCP2B_PHY2) {
if (ccp2_strobe)
mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE;
else
@@ -110,7 +112,8 @@ static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
* and 3630, so they will not hold their contents in off-mode. This isn't an
* issue since the MPU power domain is forced on whilst the ISP is in use.
*/
-static void csiphy_routing_cfg(struct isp_csiphy *phy, u32 iface, bool on,
+static void csiphy_routing_cfg(struct isp_csiphy *phy,
+ enum isp_interface_type iface, bool on,
bool ccp2_strobe)
{
if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL]
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index 036e9961d027..75fd82b152ba 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -300,13 +300,11 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
struct ispstat *aewb = &isp->isp_aewb;
struct omap3isp_h3a_aewb_config *aewb_cfg;
struct omap3isp_h3a_aewb_config *aewb_recover_cfg;
- int ret;
- aewb_cfg = kzalloc(sizeof(*aewb_cfg), GFP_KERNEL);
+ aewb_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_cfg), GFP_KERNEL);
if (!aewb_cfg)
return -ENOMEM;
- memset(aewb, 0, sizeof(*aewb));
aewb->ops = &h3a_aewb_ops;
aewb->priv = aewb_cfg;
aewb->dma_ch = -1;
@@ -314,12 +312,12 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb->isp = isp;
/* Set recover state configuration */
- aewb_recover_cfg = kzalloc(sizeof(*aewb_recover_cfg), GFP_KERNEL);
+ aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
+ GFP_KERNEL);
if (!aewb_recover_cfg) {
dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
"recover configuration.\n");
- ret = -ENOMEM;
- goto err_recover_alloc;
+ return -ENOMEM;
}
aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM;
@@ -336,25 +334,13 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
dev_err(aewb->isp->dev, "AEWB: recover configuration is "
"invalid.\n");
- ret = -EINVAL;
- goto err_conf;
+ return -EINVAL;
}
aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg);
aewb->recover_priv = aewb_recover_cfg;
- ret = omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
- if (ret)
- goto err_conf;
-
- return 0;
-
-err_conf:
- kfree(aewb_recover_cfg);
-err_recover_alloc:
- kfree(aewb_cfg);
-
- return ret;
+ return omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
}
/*
@@ -362,7 +348,5 @@ err_recover_alloc:
*/
void omap3isp_h3a_aewb_cleanup(struct isp_device *isp)
{
- kfree(isp->isp_aewb.priv);
- kfree(isp->isp_aewb.recover_priv);
omap3isp_stat_cleanup(&isp->isp_aewb);
}
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 42ccce318d5d..a0bf5af32438 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -363,13 +363,11 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
struct ispstat *af = &isp->isp_af;
struct omap3isp_h3a_af_config *af_cfg;
struct omap3isp_h3a_af_config *af_recover_cfg;
- int ret;
- af_cfg = kzalloc(sizeof(*af_cfg), GFP_KERNEL);
+ af_cfg = devm_kzalloc(isp->dev, sizeof(*af_cfg), GFP_KERNEL);
if (af_cfg == NULL)
return -ENOMEM;
- memset(af, 0, sizeof(*af));
af->ops = &h3a_af_ops;
af->priv = af_cfg;
af->dma_ch = -1;
@@ -377,12 +375,12 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af->isp = isp;
/* Set recover state configuration */
- af_recover_cfg = kzalloc(sizeof(*af_recover_cfg), GFP_KERNEL);
+ af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
+ GFP_KERNEL);
if (!af_recover_cfg) {
dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
"configuration.\n");
- ret = -ENOMEM;
- goto err_recover_alloc;
+ return -ENOMEM;
}
af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN;
@@ -394,30 +392,16 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
if (h3a_af_validate_params(af, af_recover_cfg)) {
dev_err(af->isp->dev, "AF: recover configuration is "
"invalid.\n");
- ret = -EINVAL;
- goto err_conf;
+ return -EINVAL;
}
af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg);
af->recover_priv = af_recover_cfg;
- ret = omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
- if (ret)
- goto err_conf;
-
- return 0;
-
-err_conf:
- kfree(af_recover_cfg);
-err_recover_alloc:
- kfree(af_cfg);
-
- return ret;
+ return omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
}
void omap3isp_h3a_af_cleanup(struct isp_device *isp)
{
- kfree(isp->isp_af.priv);
- kfree(isp->isp_af.recover_priv);
omap3isp_stat_cleanup(&isp->isp_af);
}
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index 2d759c56f37c..e070c24048ef 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -114,14 +114,14 @@ static void hist_setup_regs(struct ispstat *hist, void *priv)
/* Regions size and position */
for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
if (c < conf->num_regions) {
- reg_hor[c] = conf->region[c].h_start <<
- ISPHIST_REG_START_SHIFT;
- reg_hor[c] = conf->region[c].h_end <<
- ISPHIST_REG_END_SHIFT;
- reg_ver[c] = conf->region[c].v_start <<
- ISPHIST_REG_START_SHIFT;
- reg_ver[c] = conf->region[c].v_end <<
- ISPHIST_REG_END_SHIFT;
+ reg_hor[c] = (conf->region[c].h_start <<
+ ISPHIST_REG_START_SHIFT)
+ | (conf->region[c].h_end <<
+ ISPHIST_REG_END_SHIFT);
+ reg_ver[c] = (conf->region[c].v_start <<
+ ISPHIST_REG_START_SHIFT)
+ | (conf->region[c].v_end <<
+ ISPHIST_REG_END_SHIFT);
} else {
reg_hor[c] = 0;
reg_ver[c] = 0;
@@ -477,11 +477,10 @@ int omap3isp_hist_init(struct isp_device *isp)
struct omap3isp_hist_config *hist_cfg;
int ret = -1;
- hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
+ hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
if (hist_cfg == NULL)
return -ENOMEM;
- memset(hist, 0, sizeof(*hist));
hist->isp = isp;
if (HIST_CONFIG_DMA)
@@ -504,7 +503,6 @@ int omap3isp_hist_init(struct isp_device *isp)
ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
if (ret) {
- kfree(hist_cfg);
if (HIST_USING_DMA(hist))
omap_free_dma(hist->dma_ch);
}
@@ -519,6 +517,5 @@ void omap3isp_hist_cleanup(struct isp_device *isp)
{
if (HIST_USING_DMA(&isp->isp_hist))
omap_free_dma(isp->isp_hist.dma_ch);
- kfree(isp->isp_hist.priv);
omap3isp_stat_cleanup(&isp->isp_hist);
}
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 691b92a3c3e7..cd8831aebdeb 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -82,8 +82,9 @@ static struct omap3isp_prev_csc flr_prev_csc = {
* The preview engine crops several rows and columns internally depending on
* which filters are enabled. To avoid format changes when the filters are
* enabled or disabled (which would prevent them from being turned on or off
- * during streaming), the driver assumes all the filters are enabled when
- * computing sink crop and source format limits.
+ * during streaming), the driver assumes all filters that can be configured
+ * during streaming are enabled when computing sink crop and source format
+ * limits.
*
* If a filter is disabled, additional cropping is automatically added at the
* preview engine input by the driver to avoid overflow at line and frame end.
@@ -92,25 +93,23 @@ static struct omap3isp_prev_csc flr_prev_csc = {
* Median filter 4 pixels
* Noise filter,
* Faulty pixels correction 4 pixels, 4 lines
- * CFA filter 4 pixels, 4 lines in Bayer mode
- * 2 lines in other modes
* Color suppression 2 pixels
* or luma enhancement
* -------------------------------------------------------------
- * Maximum total 14 pixels, 8 lines
+ * Maximum total 10 pixels, 4 lines
*
* The color suppression and luma enhancement filters are applied after bayer to
* YUV conversion. They thus can crop one pixel on the left and one pixel on the
* right side of the image without changing the color pattern. When both those
* filters are disabled, the driver must crop the two pixels on the same side of
* the image to avoid changing the bayer pattern. The left margin is thus set to
- * 8 pixels and the right margin to 6 pixels.
+ * 6 pixels and the right margin to 4 pixels.
*/
-#define PREV_MARGIN_LEFT 8
-#define PREV_MARGIN_RIGHT 6
-#define PREV_MARGIN_TOP 4
-#define PREV_MARGIN_BOTTOM 4
+#define PREV_MARGIN_LEFT 6
+#define PREV_MARGIN_RIGHT 4
+#define PREV_MARGIN_TOP 2
+#define PREV_MARGIN_BOTTOM 2
#define PREV_MIN_IN_WIDTH 64
#define PREV_MIN_IN_HEIGHT 8
@@ -1080,7 +1079,6 @@ static void preview_config_input_format(struct isp_prev_device *prev,
*/
static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
{
- const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
struct isp_device *isp = to_isp_device(prev);
unsigned int sph = prev->crop.left;
unsigned int eph = prev->crop.left + prev->crop.width - 1;
@@ -1088,14 +1086,6 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
- format->code != V4L2_MBUS_FMT_Y10_1X10) {
- sph -= 2;
- eph += 2;
- slv -= 2;
- elv += 2;
- }
-
features = (prev->params.params[0].features & active)
| (prev->params.params[1].features & ~active);
@@ -1849,6 +1839,18 @@ static void preview_try_crop(struct isp_prev_device *prev,
right -= 2;
}
+ /* The CFA filter crops 4 lines and 4 columns in Bayer mode, and 2 lines
+ * and no columns in other modes. Increase the margins based on the sink
+ * format.
+ */
+ if (sink->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ sink->code != V4L2_MBUS_FMT_Y10_1X10) {
+ left += 2;
+ right -= 2;
+ top += 2;
+ bottom -= 2;
+ }
+
/* Restrict left/top to even values to keep the Bayer pattern. */
crop->left &= ~1;
crop->top &= ~1;
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
index 15bf3eab2224..e15f01342058 100644
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ b/drivers/media/platform/omap3isp/ispqueue.c
@@ -366,7 +366,7 @@ static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
unsigned long this_pfn;
unsigned long start;
unsigned long end;
- dma_addr_t pa;
+ dma_addr_t pa = 0;
int ret = -EFAULT;
start = buf->vbuf.m.userptr;
@@ -419,7 +419,7 @@ done:
static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
{
struct vm_area_struct *vma;
- pgprot_t vm_page_prot;
+ pgprot_t uninitialized_var(vm_page_prot);
unsigned long start;
unsigned long end;
int ret = -EFAULT;
@@ -674,6 +674,7 @@ static int isp_video_queue_alloc(struct isp_video_queue *queue,
buf->vbuf.index = i;
buf->vbuf.length = size;
buf->vbuf.type = queue->type;
+ buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->vbuf.field = V4L2_FIELD_NONE;
buf->vbuf.memory = memory;
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 09a8c9cac5c9..0d0fab1a7b5e 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -27,6 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/version.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c
index fdb6740248a7..f553cc2a8ee8 100644
--- a/drivers/media/platform/s5p-fimc/fimc-capture.c
+++ b/drivers/media/platform/s5p-fimc/fimc-capture.c
@@ -486,6 +486,7 @@ static struct vb2_ops fimc_capture_qops = {
int fimc_capture_ctrls_create(struct fimc_dev *fimc)
{
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct v4l2_subdev *sensor = fimc->pipeline.subdevs[IDX_SENSOR];
int ret;
if (WARN_ON(vid_cap->ctx == NULL))
@@ -494,11 +495,13 @@ int fimc_capture_ctrls_create(struct fimc_dev *fimc)
return 0;
ret = fimc_ctrls_create(vid_cap->ctx);
- if (ret || vid_cap->user_subdev_api || !vid_cap->ctx->ctrls.ready)
+
+ if (ret || vid_cap->user_subdev_api || !sensor ||
+ !vid_cap->ctx->ctrls.ready)
return ret;
return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrls.handler,
- fimc->pipeline.subdevs[IDX_SENSOR]->ctrl_handler, NULL);
+ sensor->ctrl_handler, NULL);
}
static int fimc_capture_set_default_format(struct fimc_dev *fimc);
@@ -510,8 +513,8 @@ static int fimc_capture_open(struct file *file)
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ fimc_md_graph_lock(fimc);
+ mutex_lock(&fimc->lock);
if (fimc_m2m_active(fimc))
goto unlock;
@@ -546,6 +549,7 @@ static int fimc_capture_open(struct file *file)
}
unlock:
mutex_unlock(&fimc->lock);
+ fimc_md_graph_unlock(fimc);
return ret;
}
@@ -626,8 +630,8 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
{
bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *var = fimc->variant;
- struct fimc_pix_limit *pl = var->pix_limit;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *dst = &ctx->d_frame;
u32 depth, min_w, max_w, min_h, align_h = 3;
u32 mask = FMT_FLAGS_CAM;
@@ -699,8 +703,8 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
{
bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *var = fimc->variant;
- struct fimc_pix_limit *pl = var->pix_limit;
+ const struct fimc_variant *var = fimc->variant;
+ const struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *sink = &ctx->s_frame;
u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
u32 align_sz = 0, align_h = 4;
@@ -793,6 +797,21 @@ static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
return 0;
}
+static struct media_entity *fimc_pipeline_get_head(struct media_entity *me)
+{
+ struct media_pad *pad = &me->pads[0];
+
+ while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
+ pad = media_entity_remote_source(pad);
+ if (!pad)
+ break;
+ me = pad->entity;
+ pad = &me->pads[0];
+ }
+
+ return me;
+}
+
/**
* fimc_pipeline_try_format - negotiate and/or set formats at pipeline
* elements
@@ -808,19 +827,23 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
{
struct fimc_dev *fimc = ctx->fimc_dev;
struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
- struct v4l2_subdev *csis = fimc->pipeline.subdevs[IDX_CSIS];
struct v4l2_subdev_format sfmt;
struct v4l2_mbus_framefmt *mf = &sfmt.format;
- struct fimc_fmt *ffmt = NULL;
- int ret, i = 0;
+ struct media_entity *me;
+ struct fimc_fmt *ffmt;
+ struct media_pad *pad;
+ int ret, i = 1;
+ u32 fcc;
if (WARN_ON(!sd || !tfmt))
return -EINVAL;
memset(&sfmt, 0, sizeof(sfmt));
sfmt.format = *tfmt;
-
sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY;
+
+ me = fimc_pipeline_get_head(&sd->entity);
+
while (1) {
ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL,
FMT_FLAGS_CAM, i++);
@@ -833,40 +856,52 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
}
mf->code = tfmt->code = ffmt->mbus_code;
- ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
- if (ret)
- return ret;
- if (mf->code != tfmt->code) {
- mf->code = 0;
- continue;
+ /* set format on all pipeline subdevs */
+ while (me != &fimc->vid_cap.subdev.entity) {
+ sd = media_entity_to_v4l2_subdev(me);
+
+ sfmt.pad = 0;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
+ if (ret)
+ return ret;
+
+ if (me->pads[0].flags & MEDIA_PAD_FL_SINK) {
+ sfmt.pad = me->num_pads - 1;
+ mf->code = tfmt->code;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL,
+ &sfmt);
+ if (ret)
+ return ret;
+ }
+
+ pad = media_entity_remote_source(&me->pads[sfmt.pad]);
+ if (!pad)
+ return -EINVAL;
+ me = pad->entity;
}
- if (mf->width != tfmt->width || mf->height != tfmt->height) {
- u32 fcc = ffmt->fourcc;
- tfmt->width = mf->width;
- tfmt->height = mf->height;
- ffmt = fimc_capture_try_format(ctx,
- &tfmt->width, &tfmt->height,
- NULL, &fcc, FIMC_SD_PAD_SOURCE);
- if (ffmt && ffmt->mbus_code)
- mf->code = ffmt->mbus_code;
- if (mf->width != tfmt->width ||
- mf->height != tfmt->height)
- continue;
- tfmt->code = mf->code;
- }
- if (csis)
- ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
- if (mf->code == tfmt->code &&
- mf->width == tfmt->width && mf->height == tfmt->height)
- break;
+ if (mf->code != tfmt->code)
+ continue;
+
+ fcc = ffmt->fourcc;
+ tfmt->width = mf->width;
+ tfmt->height = mf->height;
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SINK);
+ ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height,
+ NULL, &fcc, FIMC_SD_PAD_SOURCE);
+ if (ffmt && ffmt->mbus_code)
+ mf->code = ffmt->mbus_code;
+ if (mf->width != tfmt->width || mf->height != tfmt->height)
+ continue;
+ tfmt->code = mf->code;
+ break;
}
if (fmt_id && ffmt)
*fmt_id = ffmt;
*tfmt = *mf;
- dbg("code: 0x%x, %dx%d, %p", mf->code, mf->width, mf->height, ffmt);
return 0;
}
@@ -884,14 +919,16 @@ static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor,
{
struct v4l2_mbus_frame_desc fd;
int i, ret;
+ int pad;
for (i = 0; i < num_planes; i++)
fd.entry[i].length = plane_fmt[i].sizeimage;
+ pad = sensor->entity.num_pads - 1;
if (try)
- ret = v4l2_subdev_call(sensor, pad, set_frame_desc, 0, &fd);
+ ret = v4l2_subdev_call(sensor, pad, set_frame_desc, pad, &fd);
else
- ret = v4l2_subdev_call(sensor, pad, get_frame_desc, 0, &fd);
+ ret = v4l2_subdev_call(sensor, pad, get_frame_desc, pad, &fd);
if (ret < 0)
return ret;
@@ -916,9 +953,9 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- return fimc_fill_format(&ctx->d_frame, f);
+ __fimc_get_format(&fimc->vid_cap.ctx->d_frame, f);
+ return 0;
}
static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
@@ -929,6 +966,10 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
struct v4l2_mbus_framefmt mf;
struct fimc_fmt *ffmt = NULL;
+ int ret = 0;
+
+ fimc_md_graph_lock(fimc);
+ mutex_lock(&fimc->lock);
if (fimc_jpeg_fourcc(pix->pixelformat)) {
fimc_capture_try_format(ctx, &pix->width, &pix->height,
@@ -940,16 +981,16 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
ffmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
NULL, &pix->pixelformat,
FIMC_SD_PAD_SOURCE);
- if (!ffmt)
- return -EINVAL;
+ if (!ffmt) {
+ ret = -EINVAL;
+ goto unlock;
+ }
if (!fimc->vid_cap.user_subdev_api) {
mf.width = pix->width;
mf.height = pix->height;
mf.code = ffmt->mbus_code;
- fimc_md_graph_lock(fimc);
fimc_pipeline_try_format(ctx, &mf, &ffmt, false);
- fimc_md_graph_unlock(fimc);
pix->width = mf.width;
pix->height = mf.height;
if (ffmt)
@@ -961,8 +1002,11 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
if (ffmt->flags & FMT_FLAGS_COMPRESSED)
fimc_get_sensor_frame_desc(fimc->pipeline.subdevs[IDX_SENSOR],
pix->plane_fmt, ffmt->memplanes, true);
+unlock:
+ mutex_unlock(&fimc->lock);
+ fimc_md_graph_unlock(fimc);
- return 0;
+ return ret;
}
static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx,
@@ -979,7 +1023,8 @@ static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx,
clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
}
-static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
+static int __fimc_capture_set_format(struct fimc_dev *fimc,
+ struct v4l2_format *f)
{
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
@@ -1014,12 +1059,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
mf->code = ff->fmt->mbus_code;
mf->width = pix->width;
mf->height = pix->height;
-
- fimc_md_graph_lock(fimc);
ret = fimc_pipeline_try_format(ctx, mf, &s_fmt, true);
- fimc_md_graph_unlock(fimc);
if (ret)
return ret;
+
pix->width = mf->width;
pix->height = mf->height;
}
@@ -1034,8 +1077,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
return ret;
}
- for (i = 0; i < ff->fmt->memplanes; i++)
+ for (i = 0; i < ff->fmt->memplanes; i++) {
+ ff->bytesperline[i] = pix->plane_fmt[i].bytesperline;
ff->payload[i] = pix->plane_fmt[i].sizeimage;
+ }
set_frame_bounds(ff, pix->width, pix->height);
/* Reset the composition rectangle if not yet configured */
@@ -1058,8 +1103,23 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct fimc_dev *fimc = video_drvdata(file);
+ int ret;
+
+ fimc_md_graph_lock(fimc);
+ mutex_lock(&fimc->lock);
+ /*
+ * The graph is walked within __fimc_capture_set_format() to set
+ * the format at subdevs thus the graph mutex needs to be held at
+ * this point and acquired before the video mutex, to avoid AB-BA
+ * deadlock when fimc_md_link_notify() is called by other thread.
+ * Ideally the graph walking and setting format at the whole pipeline
+ * should be removed from this driver and handled in userspace only.
+ */
+ ret = __fimc_capture_set_format(fimc, f);
- return fimc_capture_set_format(fimc, f);
+ mutex_unlock(&fimc->lock);
+ fimc_md_graph_unlock(fimc);
+ return ret;
}
static int fimc_cap_enum_input(struct file *file, void *priv,
@@ -1528,6 +1588,10 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
*mf = fmt->format;
return 0;
}
+ /* There must be a bug in the driver if this happens */
+ if (WARN_ON(ffmt == NULL))
+ return -EINVAL;
+
/* Update RGB Alpha control state and value range */
fimc_alpha_ctrl_update(ctx);
@@ -1624,16 +1688,6 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP);
switch (sel->target) {
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- f = &ctx->d_frame;
- case V4L2_SEL_TGT_CROP_BOUNDS:
- r->width = f->o_width;
- r->height = f->o_height;
- r->left = 0;
- r->top = 0;
- mutex_unlock(&fimc->lock);
- return 0;
-
case V4L2_SEL_TGT_CROP:
try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
break;
@@ -1652,9 +1706,9 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
spin_lock_irqsave(&fimc->slock, flags);
set_frame_crop(f, r->left, r->top, r->width, r->height);
set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
- spin_unlock_irqrestore(&fimc->slock, flags);
if (sel->target == V4L2_SEL_TGT_COMPOSE)
ctx->state |= FIMC_COMPOSE;
+ spin_unlock_irqrestore(&fimc->slock, flags);
}
dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
@@ -1690,7 +1744,7 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc)
},
};
- return fimc_capture_set_format(fimc, &fmt);
+ return __fimc_capture_set_format(fimc, &fmt);
}
/* fimc->lock must be already initialized */
@@ -1752,6 +1806,12 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0);
if (ret)
goto err_ent;
+ /*
+ * For proper order of acquiring/releasing the video
+ * and the graph mutex.
+ */
+ v4l2_disable_ioctl_locking(vfd, VIDIOC_TRY_FMT);
+ v4l2_disable_ioctl_locking(vfd, VIDIOC_S_FMT);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret)
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
index acc0f84ffa56..e3916bde45cf 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.c
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -241,7 +241,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
int fimc_set_scaler_info(struct fimc_ctx *ctx)
{
- struct fimc_variant *variant = ctx->fimc_dev->variant;
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
struct device *dev = &ctx->fimc_dev->pdev->dev;
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
@@ -257,14 +257,14 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
ty = d_frame->height;
}
if (tx <= 0 || ty <= 0) {
- dev_err(dev, "Invalid target size: %dx%d", tx, ty);
+ dev_err(dev, "Invalid target size: %dx%d\n", tx, ty);
return -EINVAL;
}
sx = s_frame->width;
sy = s_frame->height;
if (sx <= 0 || sy <= 0) {
- dev_err(dev, "Invalid source size: %dx%d", sx, sy);
+ dev_err(dev, "Invalid source size: %dx%d\n", sx, sy);
return -EINVAL;
}
sc->real_width = sx;
@@ -440,7 +440,7 @@ void fimc_set_yuv_order(struct fimc_ctx *ctx)
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
{
- struct fimc_variant *variant = ctx->fimc_dev->variant;
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
u32 i, depth = 0;
for (i = 0; i < f->fmt->colplanes; i++)
@@ -524,8 +524,7 @@ static int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx
static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *variant = fimc->variant;
- unsigned int flags = FIMC_DST_FMT | FIMC_SRC_FMT;
+ const struct fimc_variant *variant = fimc->variant;
int ret = 0;
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
@@ -541,8 +540,7 @@ static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_ROTATE:
- if (fimc_capture_pending(fimc) ||
- (ctx->state & flags) == flags) {
+ if (fimc_capture_pending(fimc)) {
ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
ctx->s_frame.height, ctx->d_frame.width,
ctx->d_frame.height, ctrl->val);
@@ -591,7 +589,7 @@ static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
int fimc_ctrls_create(struct fimc_ctx *ctx)
{
- struct fimc_variant *variant = ctx->fimc_dev->variant;
+ const struct fimc_variant *variant = ctx->fimc_dev->variant;
unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
struct fimc_ctrls *ctrls = &ctx->ctrls;
struct v4l2_ctrl_handler *handler = &ctrls->handler;
@@ -691,7 +689,7 @@ void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
v4l2_ctrl_unlock(ctrl);
}
-int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
int i;
@@ -704,35 +702,9 @@ int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
pixm->num_planes = frame->fmt->memplanes;
for (i = 0; i < pixm->num_planes; ++i) {
- int bpl = frame->f_width;
- if (frame->fmt->colplanes == 1) /* packed formats */
- bpl = (bpl * frame->fmt->depth[0]) / 8;
- pixm->plane_fmt[i].bytesperline = bpl;
-
- if (frame->fmt->flags & FMT_FLAGS_COMPRESSED) {
- pixm->plane_fmt[i].sizeimage = frame->payload[i];
- continue;
- }
- pixm->plane_fmt[i].sizeimage = (frame->o_width *
- frame->o_height * frame->fmt->depth[i]) / 8;
+ pixm->plane_fmt[i].bytesperline = frame->bytesperline[i];
+ pixm->plane_fmt[i].sizeimage = frame->payload[i];
}
- return 0;
-}
-
-void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
-
- frame->f_width = pixm->plane_fmt[0].bytesperline;
- if (frame->fmt->colplanes == 1)
- frame->f_width = (frame->f_width * 8) / frame->fmt->depth[0];
- frame->f_height = pixm->height;
- frame->width = pixm->width;
- frame->height = pixm->height;
- frame->o_width = pixm->width;
- frame->o_height = pixm->height;
- frame->offs_h = 0;
- frame->offs_v = 0;
}
/**
@@ -765,9 +737,16 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
if (fmt->colplanes == 1 && /* Packed */
(bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
bpl = (pix->width * fmt->depth[0]) / 8;
-
- if (i == 0) /* Same bytesperline for each plane. */
+ /*
+ * Currently bytesperline for each plane is same, except
+ * V4L2_PIX_FMT_YUV420M format. This calculation may need
+ * to be changed when other multi-planar formats are added
+ * to the fimc_formats[] array.
+ */
+ if (i == 0)
bytesperline = bpl;
+ else if (i == 1 && fmt->memplanes == 3)
+ bytesperline /= 2;
plane_fmt->bytesperline = bytesperline;
plane_fmt->sizeimage = max((pix->width * pix->height *
@@ -811,11 +790,11 @@ static void fimc_clk_put(struct fimc_dev *fimc)
{
int i;
for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
- if (IS_ERR_OR_NULL(fimc->clock[i]))
+ if (IS_ERR(fimc->clock[i]))
continue;
clk_unprepare(fimc->clock[i]);
clk_put(fimc->clock[i]);
- fimc->clock[i] = NULL;
+ fimc->clock[i] = ERR_PTR(-EINVAL);
}
}
@@ -823,14 +802,19 @@ static int fimc_clk_get(struct fimc_dev *fimc)
{
int i, ret;
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++)
+ fimc->clock[i] = ERR_PTR(-EINVAL);
+
for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
- if (IS_ERR(fimc->clock[i]))
+ if (IS_ERR(fimc->clock[i])) {
+ ret = PTR_ERR(fimc->clock[i]);
goto err;
+ }
ret = clk_prepare(fimc->clock[i]);
if (ret < 0) {
clk_put(fimc->clock[i]);
- fimc->clock[i] = NULL;
+ fimc->clock[i] = ERR_PTR(-EINVAL);
goto err;
}
}
@@ -881,7 +865,7 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
static int fimc_probe(struct platform_device *pdev)
{
- struct fimc_drvdata *drv_data = fimc_get_drvdata(pdev);
+ const struct fimc_drvdata *drv_data = fimc_get_drvdata(pdev);
struct s5p_platform_fimc *pdata;
struct fimc_dev *fimc;
struct resource *res;
@@ -922,8 +906,14 @@ static int fimc_probe(struct platform_device *pdev)
ret = fimc_clk_get(fimc);
if (ret)
return ret;
- clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
- clk_enable(fimc->clock[CLK_BUS]);
+
+ ret = clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(fimc->clock[CLK_BUS]);
+ if (ret < 0)
+ return ret;
ret = devm_request_irq(&pdev->dev, res->start, fimc_irq_handler,
0, dev_name(&pdev->dev), fimc);
@@ -957,6 +947,7 @@ err_pm:
err_sd:
fimc_unregister_capture_subdev(fimc);
err_clk:
+ clk_disable(fimc->clock[CLK_BUS]);
fimc_clk_put(fimc);
return ret;
}
@@ -1051,7 +1042,7 @@ static int fimc_remove(struct platform_device *pdev)
}
/* Image pixel limits, similar across several FIMC HW revisions. */
-static struct fimc_pix_limit s5p_pix_limit[4] = {
+static const struct fimc_pix_limit s5p_pix_limit[4] = {
[0] = {
.scaler_en_w = 3264,
.scaler_dis_w = 8192,
@@ -1086,7 +1077,7 @@ static struct fimc_pix_limit s5p_pix_limit[4] = {
},
};
-static struct fimc_variant fimc0_variant_s5p = {
+static const struct fimc_variant fimc0_variant_s5p = {
.has_inp_rot = 1,
.has_out_rot = 1,
.has_cam_if = 1,
@@ -1098,7 +1089,7 @@ static struct fimc_variant fimc0_variant_s5p = {
.pix_limit = &s5p_pix_limit[0],
};
-static struct fimc_variant fimc2_variant_s5p = {
+static const struct fimc_variant fimc2_variant_s5p = {
.has_cam_if = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
@@ -1108,7 +1099,7 @@ static struct fimc_variant fimc2_variant_s5p = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct fimc_variant fimc0_variant_s5pv210 = {
+static const struct fimc_variant fimc0_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1121,7 +1112,7 @@ static struct fimc_variant fimc0_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct fimc_variant fimc1_variant_s5pv210 = {
+static const struct fimc_variant fimc1_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1135,7 +1126,7 @@ static struct fimc_variant fimc1_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct fimc_variant fimc2_variant_s5pv210 = {
+static const struct fimc_variant fimc2_variant_s5pv210 = {
.has_cam_if = 1,
.pix_hoff = 1,
.min_inp_pixsize = 16,
@@ -1146,7 +1137,7 @@ static struct fimc_variant fimc2_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct fimc_variant fimc0_variant_exynos4 = {
+static const struct fimc_variant fimc0_variant_exynos4210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1162,9 +1153,8 @@ static struct fimc_variant fimc0_variant_exynos4 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct fimc_variant fimc3_variant_exynos4 = {
+static const struct fimc_variant fimc3_variant_exynos4210 = {
.pix_hoff = 1,
- .has_cam_if = 1,
.has_cistatus2 = 1,
.has_mainscaler_ext = 1,
.has_alpha = 1,
@@ -1176,8 +1166,38 @@ static struct fimc_variant fimc3_variant_exynos4 = {
.pix_limit = &s5p_pix_limit[3],
};
+static const struct fimc_variant fimc0_variant_exynos4x12 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .has_cam_if = 1,
+ .has_isp_wb = 1,
+ .has_cistatus2 = 1,
+ .has_mainscaler_ext = 1,
+ .has_alpha = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 2,
+ .min_vsize_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[1],
+};
+
+static const struct fimc_variant fimc3_variant_exynos4x12 = {
+ .pix_hoff = 1,
+ .has_cistatus2 = 1,
+ .has_mainscaler_ext = 1,
+ .has_alpha = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 2,
+ .min_vsize_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[3],
+};
+
/* S5PC100 */
-static struct fimc_drvdata fimc_drvdata_s5p = {
+static const struct fimc_drvdata fimc_drvdata_s5p = {
.variant = {
[0] = &fimc0_variant_s5p,
[1] = &fimc0_variant_s5p,
@@ -1188,7 +1208,7 @@ static struct fimc_drvdata fimc_drvdata_s5p = {
};
/* S5PV210, S5PC110 */
-static struct fimc_drvdata fimc_drvdata_s5pv210 = {
+static const struct fimc_drvdata fimc_drvdata_s5pv210 = {
.variant = {
[0] = &fimc0_variant_s5pv210,
[1] = &fimc1_variant_s5pv210,
@@ -1199,18 +1219,30 @@ static struct fimc_drvdata fimc_drvdata_s5pv210 = {
};
/* EXYNOS4210, S5PV310, S5PC210 */
-static struct fimc_drvdata fimc_drvdata_exynos4 = {
+static const struct fimc_drvdata fimc_drvdata_exynos4210 = {
+ .variant = {
+ [0] = &fimc0_variant_exynos4210,
+ [1] = &fimc0_variant_exynos4210,
+ [2] = &fimc0_variant_exynos4210,
+ [3] = &fimc3_variant_exynos4210,
+ },
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
+};
+
+/* EXYNOS4212, EXYNOS4412 */
+static const struct fimc_drvdata fimc_drvdata_exynos4x12 = {
.variant = {
- [0] = &fimc0_variant_exynos4,
- [1] = &fimc0_variant_exynos4,
- [2] = &fimc0_variant_exynos4,
- [3] = &fimc3_variant_exynos4,
+ [0] = &fimc0_variant_exynos4x12,
+ [1] = &fimc0_variant_exynos4x12,
+ [2] = &fimc0_variant_exynos4x12,
+ [3] = &fimc3_variant_exynos4x12,
},
.num_entities = 4,
.lclk_frequency = 166000000UL,
};
-static struct platform_device_id fimc_driver_ids[] = {
+static const struct platform_device_id fimc_driver_ids[] = {
{
.name = "s5p-fimc",
.driver_data = (unsigned long)&fimc_drvdata_s5p,
@@ -1219,7 +1251,10 @@ static struct platform_device_id fimc_driver_ids[] = {
.driver_data = (unsigned long)&fimc_drvdata_s5pv210,
}, {
.name = "exynos4-fimc",
- .driver_data = (unsigned long)&fimc_drvdata_exynos4,
+ .driver_data = (unsigned long)&fimc_drvdata_exynos4210,
+ }, {
+ .name = "exynos4x12-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_exynos4x12,
},
{},
};
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.h b/drivers/media/platform/s5p-fimc/fimc-core.h
index c0040d792499..412d50708f75 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.h
+++ b/drivers/media/platform/s5p-fimc/fimc-core.h
@@ -112,9 +112,7 @@ enum fimc_color_fmt {
/* The hardware context state. */
#define FIMC_PARAMS (1 << 0)
-#define FIMC_SRC_FMT (1 << 3)
-#define FIMC_DST_FMT (1 << 4)
-#define FIMC_COMPOSE (1 << 5)
+#define FIMC_COMPOSE (1 << 1)
#define FIMC_CTX_M2M (1 << 16)
#define FIMC_CTX_CAP (1 << 17)
#define FIMC_CTX_SHUT (1 << 18)
@@ -265,6 +263,7 @@ struct fimc_vid_buffer {
* @width: image pixel width
* @height: image pixel weight
* @payload: image size in bytes (w x h x bpp)
+ * @bytesperline: bytesperline value for each plane
* @paddr: image frame buffer physical addresses
* @dma_offset: DMA offset in bytes
* @fmt: fimc color format pointer
@@ -279,6 +278,7 @@ struct fimc_frame {
u32 width;
u32 height;
unsigned int payload[VIDEO_MAX_PLANES];
+ unsigned int bytesperline[VIDEO_MAX_PLANES];
struct fimc_addr paddr;
struct fimc_dma_offset dma_offset;
struct fimc_fmt *fmt;
@@ -372,6 +372,7 @@ struct fimc_pix_limit {
* @has_mainscaler_ext: 1 if extended mainscaler ratios in CIEXTEN register
* are present in this IP revision
* @has_cam_if: set if this instance has a camera input interface
+ * @has_isp_wb: set if this instance has ISP writeback input
* @pix_limit: pixel size constraints for the scaler
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
@@ -386,8 +387,9 @@ struct fimc_variant {
unsigned int has_cistatus2:1;
unsigned int has_mainscaler_ext:1;
unsigned int has_cam_if:1;
+ unsigned int has_isp_wb:1;
unsigned int has_alpha:1;
- struct fimc_pix_limit *pix_limit;
+ const struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
u16 hor_offs_align;
@@ -402,7 +404,7 @@ struct fimc_variant {
* @lclk_frequency: local bus clock frequency
*/
struct fimc_drvdata {
- struct fimc_variant *variant[FIMC_MAX_DEVS];
+ const struct fimc_variant *variant[FIMC_MAX_DEVS];
int num_entities;
unsigned long lclk_frequency;
};
@@ -435,7 +437,7 @@ struct fimc_dev {
struct mutex lock;
struct platform_device *pdev;
struct s5p_platform_fimc *pdata;
- struct fimc_variant *variant;
+ const struct fimc_variant *variant;
u16 id;
struct clk *clock[MAX_FIMC_CLOCKS];
void __iomem *regs;
@@ -635,7 +637,7 @@ int fimc_ctrls_create(struct fimc_ctx *ctx);
void fimc_ctrls_delete(struct fimc_ctx *ctx);
void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
-int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
+void __fimc_get_format(struct fimc_frame *frame, struct v4l2_format *f);
void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix);
struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
@@ -650,7 +652,6 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
struct fimc_frame *frame, struct fimc_addr *paddr);
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
void fimc_set_yuv_order(struct fimc_ctx *ctx);
-void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f);
void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
int fimc_register_m2m_device(struct fimc_dev *fimc,
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
index a22d7eb05c82..f0af0754a7b4 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
@@ -65,7 +65,7 @@ void flite_hw_set_interrupt_mask(struct fimc_lite *dev)
u32 cfg, intsrc;
/* Select interrupts to be enabled for each output mode */
- if (dev->out_path == FIMC_IO_DMA) {
+ if (atomic_read(&dev->out_path) == FIMC_IO_DMA) {
intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
FLITE_REG_CIGCTRL_IRQ_LASTEN |
FLITE_REG_CIGCTRL_IRQ_STARTEN;
@@ -187,12 +187,12 @@ static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
void flite_hw_set_camera_bus(struct fimc_lite *dev,
- struct s5p_fimc_isp_info *s_info)
+ struct fimc_source_info *si)
{
u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
- unsigned int flags = s_info->flags;
+ unsigned int flags = si->flags;
- if (s_info->bus_type != FIMC_MIPI_CSI2) {
+ if (si->sensor_bus_type != FIMC_BUS_TYPE_MIPI_CSI2) {
cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI |
FLITE_REG_CIGCTRL_INVPOLPCLK |
FLITE_REG_CIGCTRL_INVPOLVSYNC |
@@ -212,7 +212,7 @@ void flite_hw_set_camera_bus(struct fimc_lite *dev,
writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
- flite_hw_set_camera_port(dev, s_info->mux_id);
+ flite_hw_set_camera_port(dev, si->mux_id);
}
static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
@@ -292,9 +292,11 @@ void flite_hw_dump_regs(struct fimc_lite *dev, const char *label)
};
u32 i;
- pr_info("--- %s ---\n", label);
+ v4l2_info(&dev->subdev, "--- %s ---\n", label);
+
for (i = 0; i < ARRAY_SIZE(registers); i++) {
u32 cfg = readl(dev->regs + registers[i].offset);
- pr_info("%s: %s:\t0x%08x\n", __func__, registers[i].name, cfg);
+ v4l2_info(&dev->subdev, "%9s: 0x%08x\n",
+ registers[i].name, cfg);
}
}
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.h b/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
index adb9e9e6f3c2..0e345844c13a 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
@@ -131,9 +131,9 @@ void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
void flite_hw_capture_start(struct fimc_lite *dev);
void flite_hw_capture_stop(struct fimc_lite *dev);
void flite_hw_set_camera_bus(struct fimc_lite *dev,
- struct s5p_fimc_isp_info *s_info);
+ struct fimc_source_info *s_info);
void flite_hw_set_camera_polarity(struct fimc_lite *dev,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c
index 67db9f8102e4..bfc4206935c8 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.c
@@ -120,25 +120,29 @@ static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat,
return def_fmt;
}
-static int fimc_lite_hw_init(struct fimc_lite *fimc)
+static int fimc_lite_hw_init(struct fimc_lite *fimc, bool isp_output)
{
struct fimc_pipeline *pipeline = &fimc->pipeline;
- struct fimc_sensor_info *sensor;
+ struct v4l2_subdev *sensor;
+ struct fimc_sensor_info *si;
unsigned long flags;
- if (pipeline->subdevs[IDX_SENSOR] == NULL)
+ sensor = isp_output ? fimc->sensor : pipeline->subdevs[IDX_SENSOR];
+
+ if (sensor == NULL)
return -ENXIO;
if (fimc->fmt == NULL)
return -EINVAL;
- sensor = v4l2_get_subdev_hostdata(pipeline->subdevs[IDX_SENSOR]);
+ /* Get sensor configuration data from the sensor subdev */
+ si = v4l2_get_subdev_hostdata(sensor);
spin_lock_irqsave(&fimc->slock, flags);
- flite_hw_set_camera_bus(fimc, &sensor->pdata);
+ flite_hw_set_camera_bus(fimc, &si->pdata);
flite_hw_set_source_format(fimc, &fimc->inp_frame);
flite_hw_set_window_offset(fimc, &fimc->inp_frame);
- flite_hw_set_output_dma(fimc, &fimc->out_frame, true);
+ flite_hw_set_output_dma(fimc, &fimc->out_frame, !isp_output);
flite_hw_set_interrupt_mask(fimc);
flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
@@ -256,7 +260,7 @@ static irqreturn_t flite_irq_handler(int irq, void *priv)
wake_up(&fimc->irq_queue);
}
- if (fimc->out_path != FIMC_IO_DMA)
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA)
goto done;
if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) &&
@@ -296,7 +300,7 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
fimc->frame_count = 0;
- ret = fimc_lite_hw_init(fimc);
+ ret = fimc_lite_hw_init(fimc, false);
if (ret) {
fimc_lite_reinit(fimc, false);
return ret;
@@ -455,10 +459,16 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
static int fimc_lite_open(struct file *file)
{
struct fimc_lite *fimc = video_drvdata(file);
+ struct media_entity *me = &fimc->vfd.entity;
int ret;
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
+ mutex_lock(&me->parent->graph_mutex);
+
+ mutex_lock(&fimc->lock);
+ if (atomic_read(&fimc->out_path) != FIMC_IO_DMA) {
+ ret = -EBUSY;
+ goto done;
+ }
set_bit(ST_FLITE_IN_USE, &fimc->state);
ret = pm_runtime_get_sync(&fimc->pdev->dev);
@@ -469,7 +479,8 @@ static int fimc_lite_open(struct file *file)
if (ret < 0)
goto done;
- if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+ if (++fimc->ref_count == 1 &&
+ atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
ret = fimc_pipeline_call(fimc, open, &fimc->pipeline,
&fimc->vfd.entity, true);
if (ret < 0) {
@@ -483,6 +494,7 @@ static int fimc_lite_open(struct file *file)
}
done:
mutex_unlock(&fimc->lock);
+ mutex_unlock(&me->parent->graph_mutex);
return ret;
}
@@ -493,7 +505,8 @@ static int fimc_lite_close(struct file *file)
mutex_lock(&fimc->lock);
- if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
+ if (--fimc->ref_count == 0 &&
+ atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
clear_bit(ST_FLITE_IN_USE, &fimc->state);
fimc_lite_stop_capture(fimc, false);
fimc_pipeline_call(fimc, close, &fimc->pipeline);
@@ -598,7 +611,7 @@ static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->variant->win_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -618,7 +631,7 @@ static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->variant->out_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -962,6 +975,29 @@ static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
.vidioc_streamoff = fimc_lite_streamoff,
};
+/* Called with the media graph mutex held */
+static struct v4l2_subdev *__find_remote_sensor(struct media_entity *me)
+{
+ struct media_pad *pad = &me->pads[0];
+ struct v4l2_subdev *sd;
+
+ while (pad->flags & MEDIA_PAD_FL_SINK) {
+ /* source pad */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (sd->grp_id == GRP_ID_FIMC_IS_SENSOR)
+ return sd;
+ /* sink pad */
+ pad = &sd->entity.pads[0];
+ }
+ return NULL;
+}
+
/* Capture subdev media entity operations */
static int fimc_lite_link_setup(struct media_entity *entity,
const struct media_pad *local,
@@ -970,46 +1006,60 @@ static int fimc_lite_link_setup(struct media_entity *entity,
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
unsigned int remote_ent_type = media_entity_type(remote->entity);
+ int ret = 0;
if (WARN_ON(fimc == NULL))
return 0;
- v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x",
- __func__, local->entity->name, remote->entity->name,
+ v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x\n",
+ __func__, remote->entity->name, local->entity->name,
flags, fimc->source_subdev_grp_id);
- switch (local->index) {
- case FIMC_SD_PAD_SINK:
- if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV)
- return -EINVAL;
+ mutex_lock(&fimc->lock);
+ switch (local->index) {
+ case FLITE_SD_PAD_SINK:
+ if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV) {
+ ret = -EINVAL;
+ break;
+ }
if (flags & MEDIA_LNK_FL_ENABLED) {
- if (fimc->source_subdev_grp_id != 0)
- return -EBUSY;
- fimc->source_subdev_grp_id = sd->grp_id;
- return 0;
+ if (fimc->source_subdev_grp_id == 0)
+ fimc->source_subdev_grp_id = sd->grp_id;
+ else
+ ret = -EBUSY;
+ } else {
+ fimc->source_subdev_grp_id = 0;
+ fimc->sensor = NULL;
}
+ break;
- fimc->source_subdev_grp_id = 0;
+ case FLITE_SD_PAD_SOURCE_DMA:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else if (remote_ent_type == MEDIA_ENT_T_DEVNODE)
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
+ else
+ ret = -EINVAL;
break;
- case FIMC_SD_PAD_SOURCE:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- fimc->out_path = FIMC_IO_NONE;
- return 0;
- }
- if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
- fimc->out_path = FIMC_IO_ISP;
+ case FLITE_SD_PAD_SOURCE_ISP:
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ atomic_set(&fimc->out_path, FIMC_IO_NONE);
+ else if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
+ atomic_set(&fimc->out_path, FIMC_IO_ISP);
else
- fimc->out_path = FIMC_IO_DMA;
+ ret = -EINVAL;
break;
default:
v4l2_err(sd, "Invalid pad index\n");
- return -EINVAL;
+ ret = -EINVAL;
}
+ mb();
- return 0;
+ mutex_unlock(&fimc->lock);
+ return ret;
}
static const struct media_entity_operations fimc_lite_subdev_media_ops = {
@@ -1070,14 +1120,16 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
struct flite_frame *source = &fimc->out_frame;
const struct fimc_fmt *ffmt;
- v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d",
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d\n",
fmt->pad, mf->code, mf->width, mf->height);
mf->colorspace = V4L2_COLORSPACE_JPEG;
mutex_lock(&fimc->lock);
- if ((fimc->out_path == FIMC_IO_ISP && sd->entity.stream_count > 0) ||
- (fimc->out_path == FIMC_IO_DMA && vb2_is_busy(&fimc->vb_queue))) {
+ if ((atomic_read(&fimc->out_path) == FIMC_IO_ISP &&
+ sd->entity.stream_count > 0) ||
+ (atomic_read(&fimc->out_path) == FIMC_IO_DMA &&
+ vb2_is_busy(&fimc->vb_queue))) {
mutex_unlock(&fimc->lock);
return -EBUSY;
}
@@ -1144,7 +1196,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
@@ -1178,7 +1230,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
@@ -1188,25 +1240,47 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ unsigned long flags;
+ int ret;
- if (fimc->out_path == FIMC_IO_DMA)
- return -ENOIOCTLCMD;
-
- /* TODO: */
+ /*
+ * Find sensor subdev linked to FIMC-LITE directly or through
+ * MIPI-CSIS. This is required for configuration where FIMC-LITE
+ * is used as a subdev only and feeds data internally to FIMC-IS.
+ * The pipeline links are protected through entity.stream_count
+ * so there is no need to take the media graph mutex here.
+ */
+ fimc->sensor = __find_remote_sensor(&sd->entity);
- return 0;
-}
+ if (atomic_read(&fimc->out_path) != FIMC_IO_ISP)
+ return -ENOIOCTLCMD;
-static int fimc_lite_subdev_s_power(struct v4l2_subdev *sd, int on)
-{
- struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ mutex_lock(&fimc->lock);
+ if (on) {
+ flite_hw_reset(fimc);
+ ret = fimc_lite_hw_init(fimc, true);
+ if (!ret) {
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ } else {
+ set_bit(ST_FLITE_OFF, &fimc->state);
- if (fimc->out_path == FIMC_IO_DMA)
- return -ENOIOCTLCMD;
+ spin_lock_irqsave(&fimc->slock, flags);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
- /* TODO: */
+ ret = wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ msecs_to_jiffies(200));
+ if (ret == 0)
+ v4l2_err(sd, "s_stream(0) timeout\n");
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ }
- return 0;
+ mutex_unlock(&fimc->lock);
+ return ret;
}
static int fimc_lite_log_status(struct v4l2_subdev *sd)
@@ -1227,7 +1301,7 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
memset(vfd, 0, sizeof(*vfd));
fimc->fmt = &fimc_lite_formats[0];
- fimc->out_path = FIMC_IO_DMA;
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture",
fimc->index);
@@ -1308,7 +1382,6 @@ static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = {
};
static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
- .s_power = fimc_lite_subdev_s_power,
.log_status = fimc_lite_log_status,
};
@@ -1347,9 +1420,10 @@ static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index);
- fimc->subdev_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- fimc->subdev_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->subdev_pads[FLITE_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_DMA].flags = MEDIA_PAD_FL_SOURCE;
+ fimc->subdev_pads[FLITE_SD_PAD_SOURCE_ISP].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FLITE_SD_PADS_NUM,
fimc->subdev_pads, 0);
if (ret)
return ret;
@@ -1516,7 +1590,7 @@ static int fimc_lite_resume(struct device *dev)
INIT_LIST_HEAD(&fimc->active_buf_q);
fimc_pipeline_call(fimc, open, &fimc->pipeline,
&fimc->vfd.entity, false);
- fimc_lite_hw_init(fimc);
+ fimc_lite_hw_init(fimc, atomic_read(&fimc->out_path) == FIMC_IO_ISP);
clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
for (i = 0; i < fimc->reqbufs_count; i++) {
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.h b/drivers/media/platform/s5p-fimc/fimc-lite.h
index 3081db35c5b0..7085761f8c4b 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.h
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.h
@@ -45,8 +45,9 @@ enum {
};
#define FLITE_SD_PAD_SINK 0
-#define FLITE_SD_PAD_SOURCE 1
-#define FLITE_SD_PADS_NUM 2
+#define FLITE_SD_PAD_SOURCE_DMA 1
+#define FLITE_SD_PAD_SOURCE_ISP 2
+#define FLITE_SD_PADS_NUM 3
struct flite_variant {
unsigned short max_width;
@@ -104,6 +105,7 @@ struct flite_buffer {
* @subdev: FIMC-LITE subdev
* @vd_pad: media (sink) pad for the capture video node
* @subdev_pads: the subdev media pads
+ * @sensor: sensor subdev attached to FIMC-LITE directly or through MIPI-CSIS
* @ctrl_handler: v4l2 control handler
* @test_pattern: test pattern controls
* @index: FIMC-LITE platform device index
@@ -139,6 +141,7 @@ struct fimc_lite {
struct v4l2_subdev subdev;
struct media_pad vd_pad;
struct media_pad subdev_pads[FLITE_SD_PADS_NUM];
+ struct v4l2_subdev *sensor;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *test_pattern;
u32 index;
@@ -156,7 +159,7 @@ struct fimc_lite {
unsigned long payload[FLITE_MAX_PLANES];
struct flite_frame inp_frame;
struct flite_frame out_frame;
- enum fimc_datapath out_path;
+ atomic_t out_path;
unsigned int source_subdev_grp_id;
unsigned long state;
diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c
index 1d21da4bd24b..f3d535cdd87f 100644
--- a/drivers/media/platform/s5p-fimc/fimc-m2m.c
+++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c
@@ -1,8 +1,8 @@
/*
* Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
*
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -160,8 +160,7 @@ static void fimc_device_run(void *priv)
fimc_hw_set_output_addr(fimc, &df->paddr, -1);
fimc_activate_capture(ctx);
- ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
- FIMC_SRC_FMT | FIMC_DST_FMT);
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
fimc_hw_activate_input_dma(fimc, true);
dma_unlock:
@@ -294,13 +293,14 @@ static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
if (IS_ERR(frame))
return PTR_ERR(frame);
- return fimc_fill_format(frame, f);
+ __fimc_get_format(frame, f);
+ return 0;
}
static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_variant *variant = fimc->variant;
+ const struct fimc_variant *variant = fimc->variant;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct fimc_fmt *fmt;
u32 max_w, mod_x, mod_y;
@@ -308,8 +308,6 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
if (!IS_M2M(f->type))
return -EINVAL;
- dbg("w: %d, h: %d", pix->width, pix->height);
-
fmt = fimc_find_format(&pix->pixelformat, NULL,
get_m2m_fmt_flags(f->type), 0);
if (WARN(fmt == NULL, "Pixel format lookup failed"))
@@ -349,19 +347,39 @@ static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return fimc_try_fmt_mplane(ctx, f);
}
+static void __set_frame_format(struct fimc_frame *frame, struct fimc_fmt *fmt,
+ struct v4l2_pix_format_mplane *pixm)
+{
+ int i;
+
+ for (i = 0; i < fmt->colplanes; i++) {
+ frame->bytesperline[i] = pixm->plane_fmt[i].bytesperline;
+ frame->payload[i] = pixm->plane_fmt[i].sizeimage;
+ }
+
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+ frame->o_width = pixm->width;
+ frame->o_height = pixm->height;
+ frame->width = pixm->width;
+ frame->height = pixm->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+ frame->fmt = fmt;
+}
+
static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_fmt *fmt;
struct vb2_queue *vq;
struct fimc_frame *frame;
- struct v4l2_pix_format_mplane *pix;
- int i, ret = 0;
+ int ret;
ret = fimc_try_fmt_mplane(ctx, f);
if (ret)
@@ -379,31 +397,16 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
else
frame = &ctx->d_frame;
- pix = &f->fmt.pix_mp;
- frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
- get_m2m_fmt_flags(f->type), 0);
- if (!frame->fmt)
+ fmt = fimc_find_format(&f->fmt.pix_mp.pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (!fmt)
return -EINVAL;
+ __set_frame_format(frame, fmt, &f->fmt.pix_mp);
+
/* Update RGB Alpha control state and value range */
fimc_alpha_ctrl_update(ctx);
- for (i = 0; i < frame->fmt->colplanes; i++) {
- frame->payload[i] =
- (pix->width * pix->height * frame->fmt->depth[i]) / 8;
- }
-
- fimc_fill_frame(frame, f);
-
- ctx->scaler.enabled = 1;
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- fimc_ctx_state_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
- else
- fimc_ctx_state_set(FIMC_PARAMS | FIMC_SRC_FMT, ctx);
-
- dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
-
return 0;
}
@@ -411,7 +414,6 @@ static int fimc_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
@@ -419,7 +421,6 @@ static int fimc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
@@ -427,7 +428,6 @@ static int fimc_m2m_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
@@ -435,7 +435,6 @@ static int fimc_m2m_dqbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
@@ -443,7 +442,6 @@ static int fimc_m2m_expbuf(struct file *file, void *fh,
struct v4l2_exportbuffer *eb)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
}
@@ -452,15 +450,6 @@ static int fimc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- /* The source and target color format need to be set */
- if (V4L2_TYPE_IS_OUTPUT(type)) {
- if (!fimc_ctx_state_is_set(FIMC_SRC_FMT, ctx))
- return -EINVAL;
- } else if (!fimc_ctx_state_is_set(FIMC_DST_FMT, ctx)) {
- return -EINVAL;
- }
-
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
@@ -468,7 +457,6 @@ static int fimc_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
-
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
@@ -576,20 +564,18 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *
&ctx->s_frame : &ctx->d_frame;
/* Check to see if scaling ratio is within supported range */
- if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
- if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- ret = fimc_check_scaler_ratio(ctx, cr.c.width,
- cr.c.height, ctx->d_frame.width,
- ctx->d_frame.height, ctx->rotation);
- } else {
- ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
- ctx->s_frame.height, cr.c.width,
- cr.c.height, ctx->rotation);
- }
- if (ret) {
- v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
- return -EINVAL;
- }
+ if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = fimc_check_scaler_ratio(ctx, cr.c.width,
+ cr.c.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctx->rotation);
+ } else {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, cr.c.width,
+ cr.c.height, ctx->rotation);
+ }
+ if (ret) {
+ v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
+ return -EINVAL;
}
f->offs_h = cr.c.left;
@@ -652,6 +638,29 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return vb2_queue_init(dst_vq);
}
+static int fimc_m2m_set_default_format(struct fimc_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane pixm = {
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .width = 800,
+ .height = 600,
+ .plane_fmt[0] = {
+ .bytesperline = 800 * 4,
+ .sizeimage = 800 * 4 * 600,
+ },
+ };
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(&pixm.pixelformat, NULL, FMT_FLAGS_M2M, 0);
+ if (!fmt)
+ return -EINVAL;
+
+ __set_frame_format(&ctx->s_frame, fmt, &pixm);
+ __set_frame_format(&ctx->d_frame, fmt, &pixm);
+
+ return 0;
+}
+
static int fimc_m2m_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
@@ -696,6 +705,7 @@ static int fimc_m2m_open(struct file *file)
ctx->flags = 0;
ctx->in_path = FIMC_IO_DMA;
ctx->out_path = FIMC_IO_DMA;
+ ctx->scaler.enabled = 1;
ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
@@ -706,9 +716,15 @@ static int fimc_m2m_open(struct file *file)
if (fimc->m2m.refcnt++ == 0)
set_bit(ST_M2M_RUN, &fimc->state);
+ ret = fimc_m2m_set_default_format(ctx);
+ if (ret < 0)
+ goto error_m2m_ctx;
+
mutex_unlock(&fimc->lock);
return 0;
+error_m2m_ctx:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
error_fh:
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index b4a68ecf0ca7..a17fcb2d5d41 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -1,8 +1,8 @@
/*
* S5P/EXYNOS4 SoC series camera host interface media device driver
*
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -62,16 +62,17 @@ static void fimc_pipeline_prepare(struct fimc_pipeline *p,
sd = media_entity_to_v4l2_subdev(pad->entity);
switch (sd->grp_id) {
- case SENSOR_GROUP_ID:
+ case GRP_ID_FIMC_IS_SENSOR:
+ case GRP_ID_SENSOR:
p->subdevs[IDX_SENSOR] = sd;
break;
- case CSIS_GROUP_ID:
+ case GRP_ID_CSIS:
p->subdevs[IDX_CSIS] = sd;
break;
- case FLITE_GROUP_ID:
+ case GRP_ID_FLITE:
p->subdevs[IDX_FLITE] = sd;
break;
- case FIMC_GROUP_ID:
+ case GRP_ID_FIMC:
/* No need to control FIMC subdev through subdev ops */
break;
default:
@@ -141,7 +142,7 @@ static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state)
* @me: media entity to start graph walk with
* @prep: true to acquire sensor (and csis) subdevs
*
- * This function must be called with the graph mutex held.
+ * Called with the graph mutex held.
*/
static int __fimc_pipeline_open(struct fimc_pipeline *p,
struct media_entity *me, bool prep)
@@ -161,30 +162,19 @@ static int __fimc_pipeline_open(struct fimc_pipeline *p,
return fimc_pipeline_s_power(p, 1);
}
-static int fimc_pipeline_open(struct fimc_pipeline *p,
- struct media_entity *me, bool prep)
-{
- int ret;
-
- mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_open(p, me, prep);
- mutex_unlock(&me->parent->graph_mutex);
-
- return ret;
-}
-
/**
* __fimc_pipeline_close - disable the sensor clock and pipeline power
* @fimc: fimc device terminating the pipeline
*
- * Disable power of all subdevs in the pipeline and turn off the external
- * sensor clock.
- * Called with the graph mutex held.
+ * Disable power of all subdevs and turn the external sensor clock off.
*/
static int __fimc_pipeline_close(struct fimc_pipeline *p)
{
int ret = 0;
+ if (!p || !p->subdevs[IDX_SENSOR])
+ return -EINVAL;
+
if (p->subdevs[IDX_SENSOR]) {
ret = fimc_pipeline_s_power(p, 0);
fimc_md_set_camclk(p->subdevs[IDX_SENSOR], false);
@@ -192,28 +182,12 @@ static int __fimc_pipeline_close(struct fimc_pipeline *p)
return ret == -ENXIO ? 0 : ret;
}
-static int fimc_pipeline_close(struct fimc_pipeline *p)
-{
- struct media_entity *me;
- int ret;
-
- if (!p || !p->subdevs[IDX_SENSOR])
- return -EINVAL;
-
- me = &p->subdevs[IDX_SENSOR]->entity;
- mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_close(p);
- mutex_unlock(&me->parent->graph_mutex);
-
- return ret;
-}
-
/**
- * fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
+ * __fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
* @pipeline: video pipeline structure
* @on: passed as the s_stream call argument
*/
-static int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
+static int __fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
{
int i, ret;
@@ -235,9 +209,9 @@ static int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
/* Media pipeline operations for the FIMC/FIMC-LITE video device driver */
static const struct fimc_pipeline_ops fimc_pipeline_ops = {
- .open = fimc_pipeline_open,
- .close = fimc_pipeline_close,
- .set_stream = fimc_pipeline_s_stream,
+ .open = __fimc_pipeline_open,
+ .close = __fimc_pipeline_close,
+ .set_stream = __fimc_pipeline_s_stream,
};
/*
@@ -269,7 +243,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
return ERR_PTR(-EPROBE_DEFER);
}
v4l2_set_subdev_hostdata(sd, s_info);
- sd->grp_id = SENSOR_GROUP_ID;
+ sd->grp_id = GRP_ID_SENSOR;
v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice %s\n",
s_info->pdata.board_info->type);
@@ -316,7 +290,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
for (i = 0; i < num_clients; i++) {
struct v4l2_subdev *sd;
- fmd->sensor[i].pdata = pdata->isp_info[i];
+ fmd->sensor[i].pdata = pdata->source_info[i];
ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], true);
if (ret)
break;
@@ -338,138 +312,149 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
}
/*
- * MIPI CSIS and FIMC platform devices registration.
+ * MIPI-CSIS, FIMC and FIMC-LITE platform devices registration.
*/
-static int fimc_register_callback(struct device *dev, void *p)
+
+static int register_fimc_lite_entity(struct fimc_md *fmd,
+ struct fimc_lite *fimc_lite)
{
- struct fimc_dev *fimc = dev_get_drvdata(dev);
struct v4l2_subdev *sd;
- struct fimc_md *fmd = p;
int ret;
- if (fimc == NULL || fimc->id >= FIMC_MAX_DEVS)
- return 0;
+ if (WARN_ON(fimc_lite->index >= FIMC_LITE_MAX_DEVS ||
+ fmd->fimc_lite[fimc_lite->index]))
+ return -EBUSY;
- sd = &fimc->vid_cap.subdev;
- sd->grp_id = FIMC_GROUP_ID;
+ sd = &fimc_lite->subdev;
+ sd->grp_id = GRP_ID_FLITE;
v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops);
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
- if (ret) {
- v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
- fimc->id, ret);
- return ret;
- }
-
- fmd->fimc[fimc->id] = fimc;
- return 0;
+ if (!ret)
+ fmd->fimc_lite[fimc_lite->index] = fimc_lite;
+ else
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.LITE%d\n",
+ fimc_lite->index);
+ return ret;
}
-static int fimc_lite_register_callback(struct device *dev, void *p)
+static int register_fimc_entity(struct fimc_md *fmd, struct fimc_dev *fimc)
{
- struct fimc_lite *fimc = dev_get_drvdata(dev);
- struct fimc_md *fmd = p;
+ struct v4l2_subdev *sd;
int ret;
- if (fimc == NULL || fimc->index >= FIMC_LITE_MAX_DEVS)
- return 0;
+ if (WARN_ON(fimc->id >= FIMC_MAX_DEVS || fmd->fimc[fimc->id]))
+ return -EBUSY;
- fimc->subdev.grp_id = FLITE_GROUP_ID;
- v4l2_set_subdev_hostdata(&fimc->subdev, (void *)&fimc_pipeline_ops);
+ sd = &fimc->vid_cap.subdev;
+ sd->grp_id = GRP_ID_FIMC;
+ v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops);
- ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev);
- if (ret) {
- v4l2_err(&fmd->v4l2_dev,
- "Failed to register FIMC-LITE.%d (%d)\n",
- fimc->index, ret);
- return ret;
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (!ret) {
+ fmd->fimc[fimc->id] = fimc;
+ fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
+ } else {
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
+ fimc->id, ret);
}
-
- fmd->fimc_lite[fimc->index] = fimc;
- return 0;
+ return ret;
}
-static int csis_register_callback(struct device *dev, void *p)
+static int register_csis_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct platform_device *pdev;
- struct fimc_md *fmd = p;
+ struct device_node *node = pdev->dev.of_node;
int id, ret;
- if (!sd)
- return 0;
- pdev = v4l2_get_subdevdata(sd);
- if (!pdev || pdev->id < 0 || pdev->id >= CSIS_MAX_ENTITIES)
- return 0;
- v4l2_info(sd, "csis%d sd: %s\n", pdev->id, sd->name);
+ id = node ? of_alias_get_id(node, "csis") : max(0, pdev->id);
+
+ if (WARN_ON(id >= CSIS_MAX_ENTITIES || fmd->csis[id].sd))
+ return -EBUSY;
- id = pdev->id < 0 ? 0 : pdev->id;
- sd->grp_id = CSIS_GROUP_ID;
+ if (WARN_ON(id >= CSIS_MAX_ENTITIES))
+ return 0;
+ sd->grp_id = GRP_ID_CSIS;
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (!ret)
fmd->csis[id].sd = sd;
else
v4l2_err(&fmd->v4l2_dev,
- "Failed to register CSIS subdevice: %d\n", ret);
+ "Failed to register MIPI-CSIS.%d (%d)\n", id, ret);
return ret;
}
-/**
- * fimc_md_register_platform_entities - register FIMC and CSIS media entities
- */
-static int fimc_md_register_platform_entities(struct fimc_md *fmd)
+static int fimc_md_register_platform_entity(struct fimc_md *fmd,
+ struct platform_device *pdev,
+ int plat_entity)
{
- struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data;
- struct device_driver *driver;
- int ret, i;
-
- driver = driver_find(FIMC_MODULE_NAME, &platform_bus_type);
- if (!driver) {
- v4l2_warn(&fmd->v4l2_dev,
- "%s driver not found, deffering probe\n",
- FIMC_MODULE_NAME);
- return -EPROBE_DEFER;
- }
-
- ret = driver_for_each_device(driver, NULL, fmd,
- fimc_register_callback);
- if (ret)
- return ret;
-
- driver = driver_find(FIMC_LITE_DRV_NAME, &platform_bus_type);
- if (driver && try_module_get(driver->owner)) {
- ret = driver_for_each_device(driver, NULL, fmd,
- fimc_lite_register_callback);
- if (ret)
- return ret;
- module_put(driver->owner);
- }
- /*
- * Check if there is any sensor on the MIPI-CSI2 bus and
- * if not skip the s5p-csis module loading.
- */
- if (pdata == NULL)
- return 0;
- for (i = 0; i < pdata->num_clients; i++) {
- if (pdata->isp_info[i].bus_type == FIMC_MIPI_CSI2) {
- ret = 1;
+ struct device *dev = &pdev->dev;
+ int ret = -EPROBE_DEFER;
+ void *drvdata;
+
+ /* Lock to ensure dev->driver won't change. */
+ device_lock(dev);
+
+ if (!dev->driver || !try_module_get(dev->driver->owner))
+ goto dev_unlock;
+
+ drvdata = dev_get_drvdata(dev);
+ /* Some subdev didn't probe succesfully id drvdata is NULL */
+ if (drvdata) {
+ switch (plat_entity) {
+ case IDX_FIMC:
+ ret = register_fimc_entity(fmd, drvdata);
break;
+ case IDX_FLITE:
+ ret = register_fimc_lite_entity(fmd, drvdata);
+ break;
+ case IDX_CSIS:
+ ret = register_csis_entity(fmd, pdev, drvdata);
+ break;
+ default:
+ ret = -ENODEV;
}
}
- if (!ret)
- return 0;
- driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
- if (!driver || !try_module_get(driver->owner)) {
- v4l2_warn(&fmd->v4l2_dev,
- "%s driver not found, deffering probe\n",
- CSIS_DRIVER_NAME);
- return -EPROBE_DEFER;
+ module_put(dev->driver->owner);
+dev_unlock:
+ device_unlock(dev);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&fmd->pdev->dev, "deferring %s device registration\n",
+ dev_name(dev));
+ else if (ret < 0)
+ dev_err(&fmd->pdev->dev, "%s device registration failed (%d)\n",
+ dev_name(dev), ret);
+ return ret;
+}
+
+static int fimc_md_pdev_match(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int plat_entity = -1;
+ int ret;
+ char *p;
+
+ if (!get_device(dev))
+ return -ENODEV;
+
+ if (!strcmp(pdev->name, CSIS_DRIVER_NAME)) {
+ plat_entity = IDX_CSIS;
+ } else if (!strcmp(pdev->name, FIMC_LITE_DRV_NAME)) {
+ plat_entity = IDX_FLITE;
+ } else {
+ p = strstr(pdev->name, "fimc");
+ if (p && *(p + 4) == 0)
+ plat_entity = IDX_FIMC;
}
- return driver_for_each_device(driver, NULL, fmd,
- csis_register_callback);
+ if (plat_entity >= 0)
+ ret = fimc_md_register_platform_entity(data, pdev,
+ plat_entity);
+ put_device(dev);
+ return 0;
}
static void fimc_md_unregister_entities(struct fimc_md *fmd)
@@ -487,7 +472,7 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
if (fmd->fimc_lite[i] == NULL)
continue;
v4l2_device_unregister_subdev(&fmd->fimc_lite[i]->subdev);
- fmd->fimc[i]->pipeline_ops = NULL;
+ fmd->fimc_lite[i]->pipeline_ops = NULL;
fmd->fimc_lite[i] = NULL;
}
for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
@@ -503,6 +488,7 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
fimc_md_unregister_sensor(fmd->sensor[i].subdev);
fmd->sensor[i].subdev = NULL;
}
+ v4l2_info(&fmd->v4l2_dev, "Unregistered all entities\n");
}
/**
@@ -518,7 +504,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
struct v4l2_subdev *sensor,
int pad, int link_mask)
{
- struct fimc_sensor_info *s_info;
+ struct fimc_sensor_info *s_info = NULL;
struct media_entity *sink;
unsigned int flags = 0;
int ret, i;
@@ -582,7 +568,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (ret)
break;
- v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
source->name, flags ? '=' : '-', sink->name);
}
return 0;
@@ -602,7 +588,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
source = &fimc->subdev.entity;
sink = &fimc->vfd.entity;
/* FIMC-LITE's subdev and video node */
- ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+ ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_DMA,
sink, 0, flags);
if (ret)
break;
@@ -626,9 +612,9 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
*/
static int fimc_md_create_links(struct fimc_md *fmd)
{
- struct v4l2_subdev *csi_sensors[2] = { NULL };
+ struct v4l2_subdev *csi_sensors[CSIS_MAX_ENTITIES] = { NULL };
struct v4l2_subdev *sensor, *csis;
- struct s5p_fimc_isp_info *pdata;
+ struct fimc_source_info *pdata;
struct fimc_sensor_info *s_info;
struct media_entity *source, *sink;
int i, pad, fimc_id = 0, ret = 0;
@@ -646,8 +632,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
source = NULL;
pdata = &s_info->pdata;
- switch (pdata->bus_type) {
- case FIMC_MIPI_CSI2:
+ switch (pdata->sensor_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES,
"Wrong CSI channel id: %d\n", pdata->mux_id))
return -EINVAL;
@@ -658,28 +644,29 @@ static int fimc_md_create_links(struct fimc_md *fmd)
"but s5p-csis module is not loaded!\n"))
return -EINVAL;
- ret = media_entity_create_link(&sensor->entity, 0,
+ pad = sensor->entity.num_pads - 1;
+ ret = media_entity_create_link(&sensor->entity, pad,
&csis->entity, CSIS_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
- v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]",
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]\n",
sensor->entity.name, csis->entity.name);
source = NULL;
csi_sensors[pdata->mux_id] = sensor;
break;
- case FIMC_ITU_601...FIMC_ITU_656:
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
source = &sensor->entity;
pad = 0;
break;
default:
v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n",
- pdata->bus_type);
+ pdata->sensor_bus_type);
return -EINVAL;
}
if (source == NULL)
@@ -690,7 +677,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
pad, link_mask);
}
- for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
+ for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
if (fmd->csis[i].sd == NULL)
continue;
source = &fmd->csis[i].sd->entity;
@@ -721,42 +708,61 @@ static int fimc_md_create_links(struct fimc_md *fmd)
/*
* The peripheral sensor clock management.
*/
+static void fimc_md_put_clocks(struct fimc_md *fmd)
+{
+ int i = FIMC_MAX_CAMCLKS;
+
+ while (--i >= 0) {
+ if (IS_ERR(fmd->camclk[i].clock))
+ continue;
+ clk_unprepare(fmd->camclk[i].clock);
+ clk_put(fmd->camclk[i].clock);
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+ }
+}
+
static int fimc_md_get_clocks(struct fimc_md *fmd)
{
+ struct device *dev = NULL;
char clk_name[32];
struct clk *clock;
- int i;
+ int ret, i;
+
+ for (i = 0; i < FIMC_MAX_CAMCLKS; i++)
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+
+ if (fmd->pdev->dev.of_node)
+ dev = &fmd->pdev->dev;
for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i);
- clock = clk_get(NULL, clk_name);
- if (IS_ERR_OR_NULL(clock)) {
- v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s",
- clk_name);
- return -ENXIO;
+ clock = clk_get(dev, clk_name);
+
+ if (IS_ERR(clock)) {
+ dev_err(&fmd->pdev->dev, "Failed to get clock: %s\n",
+ clk_name);
+ ret = PTR_ERR(clock);
+ break;
+ }
+ ret = clk_prepare(clock);
+ if (ret < 0) {
+ clk_put(clock);
+ fmd->camclk[i].clock = ERR_PTR(-EINVAL);
+ break;
}
fmd->camclk[i].clock = clock;
}
- return 0;
-}
-
-static void fimc_md_put_clocks(struct fimc_md *fmd)
-{
- int i = FIMC_MAX_CAMCLKS;
+ if (ret)
+ fimc_md_put_clocks(fmd);
- while (--i >= 0) {
- if (IS_ERR_OR_NULL(fmd->camclk[i].clock))
- continue;
- clk_put(fmd->camclk[i].clock);
- fmd->camclk[i].clock = NULL;
- }
+ return ret;
}
static int __fimc_md_set_camclk(struct fimc_md *fmd,
struct fimc_sensor_info *s_info,
bool on)
{
- struct s5p_fimc_isp_info *pdata = &s_info->pdata;
+ struct fimc_source_info *pdata = &s_info->pdata;
struct fimc_camclk_info *camclk;
int ret = 0;
@@ -820,7 +826,9 @@ static int fimc_md_link_notify(struct media_pad *source,
struct fimc_dev *fimc = NULL;
struct fimc_pipeline *pipeline;
struct v4l2_subdev *sd;
+ struct mutex *lock;
int ret = 0;
+ int ref_count;
if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
return 0;
@@ -828,28 +836,33 @@ static int fimc_md_link_notify(struct media_pad *source,
sd = media_entity_to_v4l2_subdev(sink->entity);
switch (sd->grp_id) {
- case FLITE_GROUP_ID:
+ case GRP_ID_FLITE:
fimc_lite = v4l2_get_subdevdata(sd);
+ if (WARN_ON(fimc_lite == NULL))
+ return 0;
pipeline = &fimc_lite->pipeline;
+ lock = &fimc_lite->lock;
break;
- case FIMC_GROUP_ID:
+ case GRP_ID_FIMC:
fimc = v4l2_get_subdevdata(sd);
+ if (WARN_ON(fimc == NULL))
+ return 0;
pipeline = &fimc->pipeline;
+ lock = &fimc->lock;
break;
default:
return 0;
}
if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ int i;
+ mutex_lock(lock);
ret = __fimc_pipeline_close(pipeline);
- pipeline->subdevs[IDX_SENSOR] = NULL;
- pipeline->subdevs[IDX_CSIS] = NULL;
-
- if (fimc) {
- mutex_lock(&fimc->lock);
+ for (i = 0; i < IDX_MAX; i++)
+ pipeline->subdevs[i] = NULL;
+ if (fimc)
fimc_ctrls_delete(fimc->vid_cap.ctx);
- mutex_unlock(&fimc->lock);
- }
+ mutex_unlock(lock);
return ret;
}
/*
@@ -857,23 +870,15 @@ static int fimc_md_link_notify(struct media_pad *source,
* pipeline is already in use, i.e. its video node is opened.
* Recreate the controls destroyed during the link deactivation.
*/
- if (fimc) {
- mutex_lock(&fimc->lock);
- if (fimc->vid_cap.refcnt > 0) {
- ret = __fimc_pipeline_open(pipeline,
- source->entity, true);
- if (!ret)
- ret = fimc_capture_ctrls_create(fimc);
- }
- mutex_unlock(&fimc->lock);
- } else {
- mutex_lock(&fimc_lite->lock);
- if (fimc_lite->ref_count > 0) {
- ret = __fimc_pipeline_open(pipeline,
- source->entity, true);
- }
- mutex_unlock(&fimc_lite->lock);
- }
+ mutex_lock(lock);
+
+ ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
+ if (ref_count > 0)
+ ret = __fimc_pipeline_open(pipeline, source->entity, true);
+ if (!ret && fimc)
+ ret = fimc_capture_ctrls_create(fimc);
+
+ mutex_unlock(lock);
return ret ? -EPIPE : ret;
}
@@ -965,7 +970,8 @@ static int fimc_md_probe(struct platform_device *pdev)
/* Protect the media graph while we're registering entities */
mutex_lock(&fmd->media_dev.graph_mutex);
- ret = fimc_md_register_platform_entities(fmd);
+ ret = bus_for_each_dev(&platform_bus_type, NULL, fmd,
+ fimc_md_pdev_match);
if (ret)
goto err_unlock;
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.h b/drivers/media/platform/s5p-fimc/fimc-mdevice.h
index 2d8d41d82620..06b0d8276fd2 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.h
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.h
@@ -22,11 +22,13 @@
#include "mipi-csis.h"
/* Group IDs of sensor, MIPI-CSIS, FIMC-LITE and the writeback subdevs. */
-#define SENSOR_GROUP_ID (1 << 8)
-#define CSIS_GROUP_ID (1 << 9)
-#define WRITEBACK_GROUP_ID (1 << 10)
-#define FIMC_GROUP_ID (1 << 11)
-#define FLITE_GROUP_ID (1 << 12)
+#define GRP_ID_SENSOR (1 << 8)
+#define GRP_ID_FIMC_IS_SENSOR (1 << 9)
+#define GRP_ID_WRITEBACK (1 << 10)
+#define GRP_ID_CSIS (1 << 11)
+#define GRP_ID_FIMC (1 << 12)
+#define GRP_ID_FLITE (1 << 13)
+#define GRP_ID_FIMC_IS (1 << 14)
#define FIMC_MAX_SENSORS 8
#define FIMC_MAX_CAMCLKS 2
@@ -51,7 +53,7 @@ struct fimc_camclk_info {
* This data structure applies to image sensor and the writeback subdevs.
*/
struct fimc_sensor_info {
- struct s5p_fimc_isp_info pdata;
+ struct fimc_source_info pdata;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
};
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.c b/drivers/media/platform/s5p-fimc/fimc-reg.c
index 2c9d0c06c9e8..50b97c75b956 100644
--- a/drivers/media/platform/s5p-fimc/fimc-reg.c
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.c
@@ -44,9 +44,9 @@ static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
u32 flip = FIMC_REG_MSCTRL_FLIP_NORMAL;
if (ctx->hflip)
- flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
- if (ctx->vflip)
flip = FIMC_REG_MSCTRL_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
if (ctx->rotation <= 90)
return flip;
@@ -59,9 +59,9 @@ static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
u32 flip = FIMC_REG_CITRGFMT_FLIP_NORMAL;
if (ctx->hflip)
- flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
- if (ctx->vflip)
flip |= FIMC_REG_CITRGFMT_FLIP_Y_MIRROR;
+ if (ctx->vflip)
+ flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
if (ctx->rotation <= 90)
return flip;
@@ -312,7 +312,7 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct fimc_variant *variant = dev->variant;
+ const struct fimc_variant *variant = dev->variant;
struct fimc_scaler *sc = &ctx->scaler;
u32 cfg;
@@ -344,30 +344,31 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
}
}
-void fimc_hw_en_capture(struct fimc_ctx *ctx)
+void fimc_hw_enable_capture(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
+ u32 cfg;
- u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
-
- if (ctx->out_path == FIMC_IO_DMA) {
- /* one shot mode */
- cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
- FIMC_REG_CIIMGCPT_IMGCPTEN;
- } else {
- /* Continuous frame capture mode (freerun). */
- cfg &= ~(FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
- FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT);
- cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
- }
+ cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE;
if (ctx->scaler.enabled)
cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
+ else
+ cfg &= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
}
+void fimc_hw_disable_capture(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN |
+ FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
void fimc_hw_set_effect(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
@@ -553,7 +554,7 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
}
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam)
+ struct fimc_source_info *cam)
{
u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
@@ -595,14 +596,15 @@ static const struct mbus_pixfmt_desc pix_desc[] = {
};
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam)
+ struct fimc_source_info *source)
{
struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
- u32 cfg = 0;
- u32 bus_width;
+ u32 bus_width, cfg = 0;
int i;
- if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_ITU_601:
+ case FIMC_BUS_TYPE_ITU_656:
for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
if (fimc->vid_cap.mf.code == pix_desc[i].pixelcode) {
cfg = pix_desc[i].cisrcfmt;
@@ -618,15 +620,17 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
return -EINVAL;
}
- if (cam->bus_type == FIMC_ITU_601) {
+ if (source->fimc_bus_type == FIMC_BUS_TYPE_ITU_601) {
if (bus_width == 8)
cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
else if (bus_width == 16)
cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
} /* else defaults to ITU-R BT.656 8-bit */
- } else if (cam->bus_type == FIMC_MIPI_CSI2) {
+ break;
+ case FIMC_BUS_TYPE_MIPI_CSI2:
if (fimc_fmt_is_user_defined(f->fmt->color))
cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ break;
}
cfg |= (f->o_width << 16) | f->o_height;
@@ -654,7 +658,7 @@ void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
}
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam)
+ struct fimc_source_info *source)
{
u32 cfg, tmp;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
@@ -667,11 +671,11 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG);
- switch (cam->bus_type) {
- case FIMC_MIPI_CSI2:
+ switch (source->fimc_bus_type) {
+ case FIMC_BUS_TYPE_MIPI_CSI2:
cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
- if (cam->mux_id == 0)
+ if (source->mux_id == 0)
cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI_A;
/* TODO: add remaining supported formats. */
@@ -694,15 +698,16 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
break;
- case FIMC_ITU_601...FIMC_ITU_656:
- if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
+ case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
+ if (source->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
break;
- case FIMC_LCD_WB:
+ case FIMC_BUS_TYPE_LCD_WRITEBACK_A:
cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
break;
default:
- v4l2_err(&vid_cap->vfd, "Invalid camera bus type selected\n");
+ v4l2_err(&vid_cap->vfd, "Invalid FIMC bus type selected: %d\n",
+ source->fimc_bus_type);
return -EINVAL;
}
writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
@@ -737,13 +742,6 @@ void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
writel(cfg, dev->regs + FIMC_REG_MSCTRL);
}
-void fimc_hw_dis_capture(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
- cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN | FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
- writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
-}
-
/* Return an index to the buffer actually being written. */
s32 fimc_hw_get_frame_index(struct fimc_dev *dev)
{
@@ -776,13 +774,13 @@ s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev)
void fimc_activate_capture(struct fimc_ctx *ctx)
{
fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
- fimc_hw_en_capture(ctx);
+ fimc_hw_enable_capture(ctx);
}
void fimc_deactivate_capture(struct fimc_dev *fimc)
{
fimc_hw_en_lastirq(fimc, true);
- fimc_hw_dis_capture(fimc);
+ fimc_hw_disable_capture(fimc);
fimc_hw_enable_scaler(fimc, false);
fimc_hw_en_lastirq(fimc, false);
}
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.h b/drivers/media/platform/s5p-fimc/fimc-reg.h
index b6abfc7b72ac..1a40df6d1a80 100644
--- a/drivers/media/platform/s5p-fimc/fimc-reg.h
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.h
@@ -287,7 +287,7 @@ void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
-void fimc_hw_en_capture(struct fimc_ctx *ctx);
+void fimc_hw_enable_capture(struct fimc_ctx *ctx);
void fimc_hw_set_effect(struct fimc_ctx *ctx);
void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
@@ -297,16 +297,16 @@ void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
int index);
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
+ struct fimc_source_info *cam);
void fimc_hw_clear_irq(struct fimc_dev *dev);
void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
-void fimc_hw_dis_capture(struct fimc_dev *dev);
+void fimc_hw_disable_capture(struct fimc_dev *dev);
s32 fimc_hw_get_frame_index(struct fimc_dev *dev);
s32 fimc_hw_get_prev_frame_index(struct fimc_dev *dev);
void fimc_activate_capture(struct fimc_ctx *ctx);
diff --git a/drivers/media/platform/s5p-fimc/mipi-csis.c b/drivers/media/platform/s5p-fimc/mipi-csis.c
index 7abae012f55e..981863d05aaa 100644
--- a/drivers/media/platform/s5p-fimc/mipi-csis.c
+++ b/drivers/media/platform/s5p-fimc/mipi-csis.c
@@ -187,7 +187,7 @@ struct csis_state {
const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt format;
- struct spinlock slock;
+ spinlock_t slock;
struct csis_pktbuf pkt_buf;
struct s5pcsis_event events[S5PCSIS_NUM_EVENTS];
};
@@ -220,6 +220,18 @@ static const struct csis_pix_format s5pcsis_formats[] = {
.code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
.data_alignment = 32,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW8,
+ .data_alignment = 24,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW10,
+ .data_alignment = 24,
+ }, {
+ .code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .fmt_reg = S5PCSIS_CFG_FMT_RAW12,
+ .data_alignment = 24,
}
};
@@ -261,7 +273,8 @@ static void s5pcsis_reset(struct csis_state *state)
static void s5pcsis_system_enable(struct csis_state *state, int on)
{
- u32 val;
+ struct s5p_platform_mipi_csis *pdata = state->pdev->dev.platform_data;
+ u32 val, mask;
val = s5pcsis_read(state, S5PCSIS_CTRL);
if (on)
@@ -271,10 +284,11 @@ static void s5pcsis_system_enable(struct csis_state *state, int on)
s5pcsis_write(state, S5PCSIS_CTRL, val);
val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
- if (on)
- val |= S5PCSIS_DPHYCTRL_ENABLE;
- else
- val &= ~S5PCSIS_DPHYCTRL_ENABLE;
+ val &= ~S5PCSIS_DPHYCTRL_ENABLE;
+ if (on) {
+ mask = (1 << (pdata->lanes + 1)) - 1;
+ val |= (mask & S5PCSIS_DPHYCTRL_ENABLE);
+ }
s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
}
@@ -338,11 +352,11 @@ static void s5pcsis_clk_put(struct csis_state *state)
int i;
for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
- if (IS_ERR_OR_NULL(state->clock[i]))
+ if (IS_ERR(state->clock[i]))
continue;
clk_unprepare(state->clock[i]);
clk_put(state->clock[i]);
- state->clock[i] = NULL;
+ state->clock[i] = ERR_PTR(-EINVAL);
}
}
@@ -351,14 +365,19 @@ static int s5pcsis_clk_get(struct csis_state *state)
struct device *dev = &state->pdev->dev;
int i, ret;
+ for (i = 0; i < NUM_CSIS_CLOCKS; i++)
+ state->clock[i] = ERR_PTR(-EINVAL);
+
for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
state->clock[i] = clk_get(dev, csi_clock_name[i]);
- if (IS_ERR(state->clock[i]))
+ if (IS_ERR(state->clock[i])) {
+ ret = PTR_ERR(state->clock[i]);
goto err;
+ }
ret = clk_prepare(state->clock[i]);
if (ret < 0) {
clk_put(state->clock[i]);
- state->clock[i] = NULL;
+ state->clock[i] = ERR_PTR(-EINVAL);
goto err;
}
}
@@ -366,7 +385,31 @@ static int s5pcsis_clk_get(struct csis_state *state)
err:
s5pcsis_clk_put(state);
dev_err(dev, "failed to get clock: %s\n", csi_clock_name[i]);
- return -ENXIO;
+ return ret;
+}
+
+static void dump_regs(struct csis_state *state, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CTRL" },
+ { 0x04, "DPHYCTRL" },
+ { 0x08, "CONFIG" },
+ { 0x0c, "DPHYSTS" },
+ { 0x10, "INTMSK" },
+ { 0x2c, "RESOL" },
+ { 0x38, "SDW_CONFIG" },
+ };
+ u32 i;
+
+ v4l2_info(&state->sd, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = s5pcsis_read(state, registers[i].offset);
+ v4l2_info(&state->sd, "%10s: 0x%08x\n", registers[i].name, cfg);
+ }
}
static void s5pcsis_start_stream(struct csis_state *state)
@@ -401,12 +444,12 @@ static void s5pcsis_log_counters(struct csis_state *state, bool non_errors)
spin_lock_irqsave(&state->slock, flags);
- for (i--; i >= 0; i--)
- if (state->events[i].counter >= 0)
+ for (i--; i >= 0; i--) {
+ if (state->events[i].counter > 0 || debug)
v4l2_info(&state->sd, "%s events: %d\n",
state->events[i].name,
state->events[i].counter);
-
+ }
spin_unlock_irqrestore(&state->slock, flags);
}
@@ -569,7 +612,11 @@ static int s5pcsis_log_status(struct v4l2_subdev *sd)
{
struct csis_state *state = sd_to_csis_state(sd);
+ mutex_lock(&state->lock);
s5pcsis_log_counters(state, true);
+ if (debug && (state->flags & ST_POWERED))
+ dump_regs(state, __func__);
+ mutex_unlock(&state->lock);
return 0;
}
@@ -699,26 +746,32 @@ static int s5pcsis_probe(struct platform_device *pdev)
for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
state->supplies[i].supply = csis_supply_name[i];
- ret = regulator_bulk_get(&pdev->dev, CSIS_NUM_SUPPLIES,
+ ret = devm_regulator_bulk_get(&pdev->dev, CSIS_NUM_SUPPLIES,
state->supplies);
if (ret)
return ret;
ret = s5pcsis_clk_get(state);
- if (ret)
- goto e_clkput;
+ if (ret < 0)
+ return ret;
- clk_enable(state->clock[CSIS_CLK_MUX]);
if (pdata->clk_rate)
- clk_set_rate(state->clock[CSIS_CLK_MUX], pdata->clk_rate);
+ ret = clk_set_rate(state->clock[CSIS_CLK_MUX],
+ pdata->clk_rate);
else
dev_WARN(&pdev->dev, "No clock frequency specified!\n");
+ if (ret < 0)
+ goto e_clkput;
+
+ ret = clk_enable(state->clock[CSIS_CLK_MUX]);
+ if (ret < 0)
+ goto e_clkput;
ret = devm_request_irq(&pdev->dev, state->irq, s5pcsis_irq_handler,
0, dev_name(&pdev->dev), state);
if (ret) {
dev_err(&pdev->dev, "Interrupt request failed\n");
- goto e_regput;
+ goto e_clkdis;
}
v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
@@ -736,7 +789,7 @@ static int s5pcsis_probe(struct platform_device *pdev)
ret = media_entity_init(&state->sd.entity,
CSIS_PADS_NUM, state->pads, 0);
if (ret < 0)
- goto e_clkput;
+ goto e_clkdis;
/* This allows to retrieve the platform device id by the host driver */
v4l2_set_subdevdata(&state->sd, pdev);
@@ -749,10 +802,9 @@ static int s5pcsis_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return 0;
-e_regput:
- regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
-e_clkput:
+e_clkdis:
clk_disable(state->clock[CSIS_CLK_MUX]);
+e_clkput:
s5pcsis_clk_put(state);
return ret;
}
@@ -859,7 +911,6 @@ static int s5pcsis_remove(struct platform_device *pdev)
clk_disable(state->clock[CSIS_CLK_MUX]);
pm_runtime_set_suspended(&pdev->dev);
s5pcsis_clk_put(state);
- regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
media_entity_cleanup(&state->sd.entity);
diff --git a/drivers/media/platform/s5p-g2d/g2d-hw.c b/drivers/media/platform/s5p-g2d/g2d-hw.c
index 5b86cbe408e2..e87bd93811d4 100644
--- a/drivers/media/platform/s5p-g2d/g2d-hw.c
+++ b/drivers/media/platform/s5p-g2d/g2d-hw.c
@@ -28,6 +28,7 @@ void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
+ w(0, SRC_SELECT_REG);
w(f->stride & 0xFFFF, SRC_STRIDE_REG);
n = f->o_height & 0xFFF;
@@ -52,6 +53,7 @@ void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
+ w(0, DST_SELECT_REG);
w(f->stride & 0xFFFF, DST_STRIDE_REG);
n = f->o_height & 0xFFF;
@@ -82,10 +84,14 @@ void g2d_set_flip(struct g2d_dev *d, u32 r)
w(r, SRC_MSK_DIRECT_REG);
}
-u32 g2d_cmd_stretch(u32 e)
+void g2d_set_v41_stretch(struct g2d_dev *d, struct g2d_frame *src,
+ struct g2d_frame *dst)
{
- e &= 1;
- return e << 4;
+ w(DEFAULT_SCALE_MODE, SRC_SCALE_CTRL_REG);
+
+ /* inversed scaling factor: src is numerator */
+ w((src->c_width << 16) / dst->c_width, SRC_XSCALE_REG);
+ w((src->c_height << 16) / dst->c_height, SRC_YSCALE_REG);
}
void g2d_set_cmd(struct g2d_dev *d, u32 c)
@@ -96,7 +102,9 @@ void g2d_set_cmd(struct g2d_dev *d, u32 c)
void g2d_start(struct g2d_dev *d)
{
/* Clear cache */
- w(0x7, CACHECTL_REG);
+ if (d->variant->hw_rev == TYPE_G2D_3X)
+ w(0x7, CACHECTL_REG);
+
/* Enable interrupt */
w(1, INTEN_REG);
/* Start G2D engine */
diff --git a/drivers/media/platform/s5p-g2d/g2d-regs.h b/drivers/media/platform/s5p-g2d/g2d-regs.h
index 02e1cf50da4e..9bf31ad35d47 100644
--- a/drivers/media/platform/s5p-g2d/g2d-regs.h
+++ b/drivers/media/platform/s5p-g2d/g2d-regs.h
@@ -35,6 +35,9 @@
#define SRC_COLOR_MODE_REG 0x030C /* Src Image Color Mode reg */
#define SRC_LEFT_TOP_REG 0x0310 /* Src Left Top Coordinate reg */
#define SRC_RIGHT_BOTTOM_REG 0x0314 /* Src Right Bottom Coordinate reg */
+#define SRC_SCALE_CTRL_REG 0x0328 /* Src Scaling type select */
+#define SRC_XSCALE_REG 0x032c /* Src X Scaling ratio */
+#define SRC_YSCALE_REG 0x0330 /* Src Y Scaling ratio */
/* Parameter Setting Registers (Dest) */
#define DST_SELECT_REG 0x0400 /* Dest Image Selection reg */
@@ -113,3 +116,7 @@
#define DEFAULT_WIDTH 100
#define DEFAULT_HEIGHT 100
+#define DEFAULT_SCALE_MODE (2 << 0)
+
+/* Command mode register values */
+#define CMD_V3_ENABLE_STRETCH (1 << 4)
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 6ed259fb1046..aaaf276a5a6c 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -604,8 +604,13 @@ static void device_run(void *prv)
g2d_set_flip(dev, ctx->flip);
if (ctx->in.c_width != ctx->out.c_width ||
- ctx->in.c_height != ctx->out.c_height)
- cmd |= g2d_cmd_stretch(1);
+ ctx->in.c_height != ctx->out.c_height) {
+ if (dev->variant->hw_rev == TYPE_G2D_3X)
+ cmd |= CMD_V3_ENABLE_STRETCH;
+ else
+ g2d_set_v41_stretch(dev, &ctx->in, &ctx->out);
+ }
+
g2d_set_cmd(dev, cmd);
g2d_start(dev);
@@ -713,7 +718,7 @@ static int g2d_probe(struct platform_device *pdev)
return PTR_ERR(dev->regs);
dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
- if (IS_ERR_OR_NULL(dev->clk)) {
+ if (IS_ERR(dev->clk)) {
dev_err(&pdev->dev, "failed to get g2d clock\n");
return -ENXIO;
}
@@ -725,7 +730,7 @@ static int g2d_probe(struct platform_device *pdev)
}
dev->gate = clk_get(&pdev->dev, "fimg2d");
- if (IS_ERR_OR_NULL(dev->gate)) {
+ if (IS_ERR(dev->gate)) {
dev_err(&pdev->dev, "failed to get g2d clock gate\n");
ret = -ENXIO;
goto unprep_clk;
@@ -789,6 +794,7 @@ static int g2d_probe(struct platform_device *pdev)
}
def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+ dev->variant = g2d_get_drv_data(pdev);
return 0;
@@ -828,9 +834,30 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
+static struct g2d_variant g2d_drvdata_v3x = {
+ .hw_rev = TYPE_G2D_3X,
+};
+
+static struct g2d_variant g2d_drvdata_v4x = {
+ .hw_rev = TYPE_G2D_4X, /* Revision 4.1 for Exynos4X12 and Exynos5 */
+};
+
+static struct platform_device_id g2d_driver_ids[] = {
+ {
+ .name = "s5p-g2d",
+ .driver_data = (unsigned long)&g2d_drvdata_v3x,
+ }, {
+ .name = "s5p-g2d-v4x",
+ .driver_data = (unsigned long)&g2d_drvdata_v4x,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, g2d_driver_ids);
+
static struct platform_driver g2d_pdrv = {
.probe = g2d_probe,
.remove = g2d_remove,
+ .id_table = g2d_driver_ids,
.driver = {
.name = G2D_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index 6b765b0216c5..300ca05ba404 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -10,10 +10,13 @@
* License, or (at your option) any later version
*/
+#include <linux/platform_device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#define G2D_NAME "s5p-g2d"
+#define TYPE_G2D_3X 3
+#define TYPE_G2D_4X 4
struct g2d_dev {
struct v4l2_device v4l2_dev;
@@ -27,6 +30,7 @@ struct g2d_dev {
struct clk *clk;
struct clk *gate;
struct g2d_ctx *curr;
+ struct g2d_variant *variant;
int irq;
wait_queue_head_t irq_queue;
};
@@ -53,7 +57,7 @@ struct g2d_frame {
struct g2d_ctx {
struct v4l2_fh fh;
struct g2d_dev *dev;
- struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_m2m_ctx *m2m_ctx;
struct g2d_frame in;
struct g2d_frame out;
struct v4l2_ctrl *ctrl_hflip;
@@ -70,6 +74,9 @@ struct g2d_fmt {
u32 hw;
};
+struct g2d_variant {
+ unsigned short hw_rev;
+};
void g2d_reset(struct g2d_dev *d);
void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f);
@@ -80,7 +87,11 @@ void g2d_start(struct g2d_dev *d);
void g2d_clear_int(struct g2d_dev *d);
void g2d_set_rop4(struct g2d_dev *d, u32 r);
void g2d_set_flip(struct g2d_dev *d, u32 r);
-u32 g2d_cmd_stretch(u32 e);
+void g2d_set_v41_stretch(struct g2d_dev *d,
+ struct g2d_frame *src, struct g2d_frame *dst);
void g2d_set_cmd(struct g2d_dev *d, u32 c);
-
+static inline struct g2d_variant *g2d_get_drv_data(struct platform_device *pdev)
+{
+ return (struct g2d_variant *)platform_get_device_id(pdev)->driver_data;
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 022b9b9baff9..8a4013e3aee7 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -62,7 +62,7 @@
*/
struct s5p_jpeg {
struct mutex lock;
- struct spinlock slock;
+ spinlock_t slock;
struct v4l2_device v4l2_dev;
struct video_device *vfd_encoder;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 8b7fbc7cc04d..e84703c314ce 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -21,6 +21,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include <media/videobuf2-core.h>
#include "s5p_mfc_common.h"
#include "s5p_mfc_ctrl.h"
@@ -273,7 +274,6 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
struct s5p_mfc_buf *dst_buf;
size_t dspl_y_addr;
unsigned int frame_type;
- unsigned int index;
dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
@@ -310,7 +310,6 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
vb2_buffer_done(dst_buf->b,
err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
- index = dst_buf->b->v4l2_buf.index;
break;
}
}
@@ -326,8 +325,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
unsigned long flags;
unsigned int res_change;
- unsigned int index;
-
dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
& S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
@@ -387,7 +384,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
mfc_debug(2, "Running again the same buffer\n");
ctx->after_packed_pb = 1;
} else {
- index = src_buf->b->v4l2_buf.index;
mfc_debug(2, "MFC needs next buffer\n");
ctx->consumed_stream = 0;
list_del(&src_buf->list);
@@ -586,8 +582,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
clear_work_bit(ctx);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- WARN_ON(1);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
wake_up(&ctx->queue);
@@ -676,6 +671,12 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
s5p_mfc_handle_stream_complete(ctx, reason, err);
break;
+ case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_RUNNING;
+ wake_up(&ctx->queue);
+ goto irq_cleanup_hw;
+
default:
mfc_debug(2, "Unknown int reason\n");
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
@@ -777,14 +778,16 @@ static int s5p_mfc_open(struct file *file)
goto err_pwr_enable;
}
s5p_mfc_clock_on();
- ret = s5p_mfc_alloc_and_load_firmware(dev);
- if (ret)
- goto err_alloc_fw;
+ ret = s5p_mfc_load_firmware(dev);
+ if (ret) {
+ s5p_mfc_clock_off();
+ goto err_load_fw;
+ }
/* Init the FW */
ret = s5p_mfc_init_hw(dev);
+ s5p_mfc_clock_off();
if (ret)
goto err_init_hw;
- s5p_mfc_clock_off();
}
/* Init videobuf2 queue for CAPTURE */
q = &ctx->vq_dst;
@@ -833,21 +836,20 @@ static int s5p_mfc_open(struct file *file)
return ret;
/* Deinit when failure occured */
err_queue_init:
+ if (dev->num_inst == 1)
+ s5p_mfc_deinit_hw(dev);
err_init_hw:
- s5p_mfc_release_firmware(dev);
-err_alloc_fw:
- dev->ctx[ctx->num] = NULL;
- del_timer_sync(&dev->watchdog_timer);
- s5p_mfc_clock_off();
+err_load_fw:
err_pwr_enable:
if (dev->num_inst == 1) {
if (s5p_mfc_power_off() < 0)
mfc_err("power off failed\n");
- s5p_mfc_release_firmware(dev);
+ del_timer_sync(&dev->watchdog_timer);
}
err_ctrls_setup:
s5p_mfc_dec_ctrls_delete(ctx);
err_bad_node:
+ dev->ctx[ctx->num] = NULL;
err_no_ctx:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -901,11 +903,8 @@ static int s5p_mfc_release(struct file *file)
clear_bit(0, &dev->hw_lock);
dev->num_inst--;
if (dev->num_inst == 0) {
- mfc_debug(2, "Last instance - release firmware\n");
- /* reset <-> F/W release */
- s5p_mfc_reset(dev);
+ mfc_debug(2, "Last instance\n");
s5p_mfc_deinit_hw(dev);
- s5p_mfc_release_firmware(dev);
del_timer_sync(&dev->watchdog_timer);
if (s5p_mfc_power_off() < 0)
mfc_err("Power off failed\n");
@@ -1013,6 +1012,48 @@ static int match_child(struct device *dev, void *data)
return !strcmp(dev_name(dev), (char *)data);
}
+static void *mfc_get_drv_data(struct platform_device *pdev);
+
+static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+{
+ unsigned int mem_info[2];
+
+ dev->mem_dev_l = devm_kzalloc(&dev->plat_dev->dev,
+ sizeof(struct device), GFP_KERNEL);
+ if (!dev->mem_dev_l) {
+ mfc_err("Not enough memory\n");
+ return -ENOMEM;
+ }
+ device_initialize(dev->mem_dev_l);
+ of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ "samsung,mfc-l", mem_info, 2);
+ if (dma_declare_coherent_memory(dev->mem_dev_l, mem_info[0],
+ mem_info[0], mem_info[1],
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
+ mfc_err("Failed to declare coherent memory for\n"
+ "MFC device\n");
+ return -ENOMEM;
+ }
+
+ dev->mem_dev_r = devm_kzalloc(&dev->plat_dev->dev,
+ sizeof(struct device), GFP_KERNEL);
+ if (!dev->mem_dev_r) {
+ mfc_err("Not enough memory\n");
+ return -ENOMEM;
+ }
+ device_initialize(dev->mem_dev_r);
+ of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ "samsung,mfc-r", mem_info, 2);
+ if (dma_declare_coherent_memory(dev->mem_dev_r, mem_info[0],
+ mem_info[0], mem_info[1],
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
+ pr_err("Failed to declare coherent memory for\n"
+ "MFC device\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
/* MFC probe function */
static int s5p_mfc_probe(struct platform_device *pdev)
{
@@ -1036,8 +1077,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
return -ENODEV;
}
- dev->variant = (struct s5p_mfc_variant *)
- platform_get_device_id(pdev)->driver_data;
+ dev->variant = mfc_get_drv_data(pdev);
ret = s5p_mfc_init_pm(dev);
if (ret < 0) {
@@ -1065,35 +1105,43 @@ static int s5p_mfc_probe(struct platform_device *pdev)
goto err_res;
}
- dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
- match_child);
- if (!dev->mem_dev_l) {
- mfc_err("Mem child (L) device get failed\n");
- ret = -ENODEV;
- goto err_res;
- }
-
- dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
- match_child);
- if (!dev->mem_dev_r) {
- mfc_err("Mem child (R) device get failed\n");
- ret = -ENODEV;
- goto err_res;
+ if (pdev->dev.of_node) {
+ if (s5p_mfc_alloc_memdevs(dev) < 0)
+ goto err_res;
+ } else {
+ dev->mem_dev_l = device_find_child(&dev->plat_dev->dev,
+ "s5p-mfc-l", match_child);
+ if (!dev->mem_dev_l) {
+ mfc_err("Mem child (L) device get failed\n");
+ ret = -ENODEV;
+ goto err_res;
+ }
+ dev->mem_dev_r = device_find_child(&dev->plat_dev->dev,
+ "s5p-mfc-r", match_child);
+ if (!dev->mem_dev_r) {
+ mfc_err("Mem child (R) device get failed\n");
+ ret = -ENODEV;
+ goto err_res;
+ }
}
dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
- if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
+ if (IS_ERR(dev->alloc_ctx[0])) {
ret = PTR_ERR(dev->alloc_ctx[0]);
goto err_res;
}
dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
- if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
+ if (IS_ERR(dev->alloc_ctx[1])) {
ret = PTR_ERR(dev->alloc_ctx[1]);
goto err_mem_init_ctx_1;
}
mutex_init(&dev->mfc_mutex);
+ ret = s5p_mfc_alloc_firmware(dev);
+ if (ret)
+ goto err_alloc_fw;
+
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
goto err_v4l2_dev_reg;
@@ -1175,6 +1223,8 @@ err_dec_reg:
err_dec_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
err_v4l2_dev_reg:
+ s5p_mfc_release_firmware(dev);
+err_alloc_fw:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
err_mem_init_ctx_1:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
@@ -1200,8 +1250,13 @@ static int s5p_mfc_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd_enc);
video_unregister_device(dev->vfd_dec);
v4l2_device_unregister(&dev->v4l2_dev);
+ s5p_mfc_release_firmware(dev);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+ if (pdev->dev.of_node) {
+ put_device(dev->mem_dev_l);
+ put_device(dev->mem_dev_r);
+ }
s5p_mfc_final_pm(dev);
return 0;
@@ -1350,6 +1405,35 @@ static struct platform_device_id mfc_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
+static const struct of_device_id exynos_mfc_match[] = {
+ {
+ .compatible = "samsung,mfc-v5",
+ .data = &mfc_drvdata_v5,
+ }, {
+ .compatible = "samsung,mfc-v6",
+ .data = &mfc_drvdata_v6,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_mfc_match);
+
+static void *mfc_get_drv_data(struct platform_device *pdev)
+{
+ struct s5p_mfc_variant *driver_data = NULL;
+
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(of_match_ptr(exynos_mfc_match),
+ pdev->dev.of_node);
+ if (match)
+ driver_data = (struct s5p_mfc_variant *)match->data;
+ } else {
+ driver_data = (struct s5p_mfc_variant *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+ return driver_data;
+}
+
static struct platform_driver s5p_mfc_driver = {
.probe = s5p_mfc_probe,
.remove = s5p_mfc_remove,
@@ -1357,7 +1441,8 @@ static struct platform_driver s5p_mfc_driver = {
.driver = {
.name = S5P_MFC_NAME,
.owner = THIS_MODULE,
- .pm = &s5p_mfc_pm_ops
+ .pm = &s5p_mfc_pm_ops,
+ .of_match_table = exynos_mfc_match,
},
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index f02e0497ca98..202d1d7a37a8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -145,6 +145,7 @@ enum s5p_mfc_inst_state {
MFCINST_RETURN_INST,
MFCINST_ERROR,
MFCINST_ABORT,
+ MFCINST_FLUSH,
MFCINST_RES_CHANGE_INIT,
MFCINST_RES_CHANGE_FLUSH,
MFCINST_RES_CHANGE_END,
@@ -277,8 +278,9 @@ struct s5p_mfc_priv_buf {
* @int_err: error number for last interrupt
* @queue: waitqueue for waiting for completion of device commands
* @fw_size: size of firmware
- * @bank1: address of the beggining of bank 1 memory
- * @bank2: address of the beggining of bank 2 memory
+ * @fw_virt_addr: virtual firmware address
+ * @bank1: address of the beginning of bank 1 memory
+ * @bank2: address of the beginning of bank 2 memory
* @hw_lock: used for hardware locking
* @ctx: array of driver contexts
* @curr_ctx: number of the currently running context
@@ -317,8 +319,9 @@ struct s5p_mfc_dev {
unsigned int int_err;
wait_queue_head_t queue;
size_t fw_size;
- size_t bank1;
- size_t bank2;
+ void *fw_virt_addr;
+ dma_addr_t bank1;
+ dma_addr_t bank2;
unsigned long hw_lock;
struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
int curr_ctx;
@@ -493,15 +496,9 @@ struct s5p_mfc_codec_ops {
* flushed
* @head_processed: flag mentioning whether the header data is processed
* completely or not
- * @bank1_buf: handle to memory allocated for temporary buffers from
+ * @bank1: handle to memory allocated for temporary buffers from
* memory bank 1
- * @bank1_phys: address of the temporary buffers from memory bank 1
- * @bank1_size: size of the memory allocated for temporary buffers from
- * memory bank 1
- * @bank2_buf: handle to memory allocated for temporary buffers from
- * memory bank 2
- * @bank2_phys: address of the temporary buffers from memory bank 2
- * @bank2_size: size of the memory allocated for temporary buffers from
+ * @bank2: handle to memory allocated for temporary buffers from
* memory bank 2
* @capture_state: state of the capture buffers queue
* @output_state: state of the output buffers queue
@@ -581,14 +578,8 @@ struct s5p_mfc_ctx {
unsigned int dpb_flush_flag;
unsigned int head_processed;
- /* Buffers */
- void *bank1_buf;
- size_t bank1_phys;
- size_t bank1_size;
-
- void *bank2_buf;
- size_t bank2_phys;
- size_t bank2_size;
+ struct s5p_mfc_priv_buf bank1;
+ struct s5p_mfc_priv_buf bank2;
enum s5p_mfc_queue_state capture_state;
enum s5p_mfc_queue_state output_state;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 585b7b0ed8ec..2e5f30b40dea 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -22,16 +22,64 @@
#include "s5p_mfc_opr.h"
#include "s5p_mfc_pm.h"
-static void *s5p_mfc_bitproc_buf;
-static size_t s5p_mfc_bitproc_phys;
-static unsigned char *s5p_mfc_bitproc_virt;
+/* Allocate memory for firmware */
+int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
+{
+ void *bank2_virt;
+ dma_addr_t bank2_dma_addr;
+
+ dev->fw_size = dev->variant->buf_size->fw;
+
+ if (dev->fw_virt_addr) {
+ mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
+ return -ENOMEM;
+ }
+
+ dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
+ &dev->bank1, GFP_KERNEL);
+
+ if (IS_ERR(dev->fw_virt_addr)) {
+ dev->fw_virt_addr = NULL;
+ mfc_err("Allocating bitprocessor buffer failed\n");
+ return -ENOMEM;
+ }
+
+ dev->bank1 = dev->bank1;
+
+ if (HAS_PORTNUM(dev) && IS_TWOPORT(dev)) {
+ bank2_virt = dma_alloc_coherent(dev->mem_dev_r, 1 << MFC_BASE_ALIGN_ORDER,
+ &bank2_dma_addr, GFP_KERNEL);
+
+ if (IS_ERR(dev->fw_virt_addr)) {
+ mfc_err("Allocating bank2 base failed\n");
+ dma_free_coherent(dev->mem_dev_l, dev->fw_size,
+ dev->fw_virt_addr, dev->bank1);
+ dev->fw_virt_addr = NULL;
+ return -ENOMEM;
+ }
+
+ /* Valid buffers passed to MFC encoder with LAST_FRAME command
+ * should not have address of bank2 - MFC will treat it as a null frame.
+ * To avoid such situation we set bank2 address below the pool address.
+ */
+ dev->bank2 = bank2_dma_addr - (1 << MFC_BASE_ALIGN_ORDER);
+
+ dma_free_coherent(dev->mem_dev_r, 1 << MFC_BASE_ALIGN_ORDER,
+ bank2_virt, bank2_dma_addr);
+
+ } else {
+ /* In this case bank2 can point to the same address as bank1.
+ * Firmware will always occupy the beggining of this area so it is
+ * impossible having a video frame buffer with zero address. */
+ dev->bank2 = dev->bank1;
+ }
+ return 0;
+}
-/* Allocate and load firmware */
-int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
+/* Load firmware */
+int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev)
{
struct firmware *fw_blob;
- size_t bank2_base_phys;
- void *b_base;
int err;
/* Firmare has to be present as a separate file or compiled
@@ -44,77 +92,17 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
return -EINVAL;
}
- dev->fw_size = dev->variant->buf_size->fw;
if (fw_blob->size > dev->fw_size) {
mfc_err("MFC firmware is too big to be loaded\n");
release_firmware(fw_blob);
return -ENOMEM;
}
- if (s5p_mfc_bitproc_buf) {
- mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
- release_firmware(fw_blob);
- return -ENOMEM;
- }
- s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
- if (IS_ERR(s5p_mfc_bitproc_buf)) {
- s5p_mfc_bitproc_buf = NULL;
- mfc_err("Allocating bitprocessor buffer failed\n");
+ if (!dev->fw_virt_addr) {
+ mfc_err("MFC firmware is not allocated\n");
release_firmware(fw_blob);
- return -ENOMEM;
- }
- s5p_mfc_bitproc_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], s5p_mfc_bitproc_buf);
- if (s5p_mfc_bitproc_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
- mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- release_firmware(fw_blob);
- return -EIO;
- }
- s5p_mfc_bitproc_virt = vb2_dma_contig_memops.vaddr(s5p_mfc_bitproc_buf);
- if (!s5p_mfc_bitproc_virt) {
- mfc_err("Bitprocessor memory remap failed\n");
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- release_firmware(fw_blob);
- return -EIO;
- }
- dev->bank1 = s5p_mfc_bitproc_phys;
- if (HAS_PORTNUM(dev) && IS_TWOPORT(dev)) {
- b_base = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX],
- 1 << MFC_BASE_ALIGN_ORDER);
- if (IS_ERR(b_base)) {
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- mfc_err("Allocating bank2 base failed\n");
- release_firmware(fw_blob);
- return -ENOMEM;
- }
- bank2_base_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
- vb2_dma_contig_memops.put(b_base);
- if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
- mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
- release_firmware(fw_blob);
- return -EIO;
- }
- /* Valid buffers passed to MFC encoder with LAST_FRAME command
- * should not have address of bank2 - MFC will treat it as a null frame.
- * To avoid such situation we set bank2 address below the pool address.
- */
- dev->bank2 = bank2_base_phys - (1 << MFC_BASE_ALIGN_ORDER);
- } else {
- dev->bank2 = dev->bank1;
+ return -EINVAL;
}
- memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ memcpy(dev->fw_virt_addr, fw_blob->data, fw_blob->size);
wmb();
release_firmware(fw_blob);
mfc_debug_leave();
@@ -142,12 +130,12 @@ int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
release_firmware(fw_blob);
return -ENOMEM;
}
- if (s5p_mfc_bitproc_buf == NULL || s5p_mfc_bitproc_phys == 0) {
- mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
+ if (!dev->fw_virt_addr) {
+ mfc_err("MFC firmware is not allocated\n");
release_firmware(fw_blob);
return -EINVAL;
}
- memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ memcpy(dev->fw_virt_addr, fw_blob->data, fw_blob->size);
wmb();
release_firmware(fw_blob);
mfc_debug_leave();
@@ -159,12 +147,11 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
{
/* Before calling this function one has to make sure
* that MFC is no longer processing */
- if (!s5p_mfc_bitproc_buf)
+ if (!dev->fw_virt_addr)
return -EINVAL;
- vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_virt = NULL;
- s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = NULL;
+ dma_free_coherent(dev->mem_dev_l, dev->fw_size, dev->fw_virt_addr,
+ dev->bank1);
+ dev->fw_virt_addr = NULL;
return 0;
}
@@ -257,8 +244,10 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
int ret;
mfc_debug_enter();
- if (!s5p_mfc_bitproc_buf)
+ if (!dev->fw_virt_addr) {
+ mfc_err("Firmware memory is not allocated.\n");
return -EINVAL;
+ }
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
index 90aa9b9886d5..6a9b6f8606bb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
@@ -16,7 +16,8 @@
#include "s5p_mfc_common.h"
int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
-int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev);
int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 6dad9a74f61c..4582473978ca 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -991,24 +991,35 @@ static int s5p_mfc_stop_streaming(struct vb2_queue *q)
S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0);
aborted = 1;
}
- spin_lock_irqsave(&dev->irqlock, flags);
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ spin_lock_irqsave(&dev->irqlock, flags);
s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
&ctx->vq_dst);
INIT_LIST_HEAD(&ctx->dst_queue);
ctx->dst_queue_cnt = 0;
ctx->dpb_flush_flag = 1;
ctx->dec_dst_flag = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if (IS_MFCV6(dev) && (ctx->state == MFCINST_RUNNING)) {
+ ctx->state = MFCINST_FLUSH;
+ set_work_bit_irqsave(ctx);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0))
+ mfc_err("Err flushing buffers\n");
+ }
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ spin_lock_irqsave(&dev->irqlock, flags);
s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
&ctx->vq_src);
INIT_LIST_HEAD(&ctx->src_queue);
ctx->src_queue_cnt = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
}
if (aborted)
ctx->state = MFCINST_RUNNING;
- spin_unlock_irqrestore(&dev->irqlock, flags);
return 0;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index f92f6ddd739f..2356fd52a169 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1534,6 +1534,8 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
if (list_empty(&ctx->src_queue)) {
mfc_debug(2, "EOS: empty src queue, entering finishing state");
ctx->state = MFCINST_FINISHING;
+ if (s5p_mfc_ctx_ready(ctx))
+ set_work_bit_irqsave(ctx);
spin_unlock_irqrestore(&dev->irqlock, flags);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 6932e90d4065..10f8ac37cecd 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
+#include "s5p_mfc_debug.h"
#include "s5p_mfc_opr.h"
#include "s5p_mfc_opr_v5.h"
#include "s5p_mfc_opr_v6.h"
@@ -29,3 +30,32 @@ void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev)
}
dev->mfc_ops = s5p_mfc_ops;
}
+
+int s5p_mfc_alloc_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b)
+{
+
+ mfc_debug(3, "Allocating priv: %d\n", b->size);
+
+ b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
+
+ if (!b->virt) {
+ mfc_err("Allocating private buffer failed\n");
+ return -ENOMEM;
+ }
+
+ mfc_debug(3, "Allocated addr %p %08x\n", b->virt, b->dma);
+ return 0;
+}
+
+void s5p_mfc_release_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b)
+{
+ if (b->virt) {
+ dma_free_coherent(dev, b->size, b->virt, b->dma);
+ b->virt = NULL;
+ b->dma = 0;
+ b->size = 0;
+ }
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index 420abecafec0..754c540e7a7e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -80,5 +80,10 @@ struct s5p_mfc_hw_ops {
};
void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b);
+void s5p_mfc_release_priv_buf(struct device *dev,
+ struct s5p_mfc_priv_buf *b);
+
#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index bf7d010a4107..f61dba837899 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -38,39 +38,26 @@ int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ int ret;
- ctx->dsc.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX],
- buf_size->dsc);
- if (IS_ERR_VALUE((int)ctx->dsc.alloc)) {
- ctx->dsc.alloc = NULL;
- mfc_err("Allocating DESC buffer failed\n");
- return -ENOMEM;
+ ctx->dsc.size = buf_size->dsc;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->dsc);
+ if (ret) {
+ mfc_err("Failed to allocate temporary buffer\n");
+ return ret;
}
- ctx->dsc.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->dsc.alloc);
+
BUG_ON(ctx->dsc.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
- ctx->dsc.virt = vb2_dma_contig_memops.vaddr(ctx->dsc.alloc);
- if (ctx->dsc.virt == NULL) {
- vb2_dma_contig_memops.put(ctx->dsc.alloc);
- ctx->dsc.dma = 0;
- ctx->dsc.alloc = NULL;
- mfc_err("Remapping DESC buffer failed\n");
- return -ENOMEM;
- }
- memset(ctx->dsc.virt, 0, buf_size->dsc);
+ memset(ctx->dsc.virt, 0, ctx->dsc.size);
wmb();
return 0;
}
+
/* Release temporary buffers for decoding */
void s5p_mfc_release_dec_desc_buffer_v5(struct s5p_mfc_ctx *ctx)
{
- if (ctx->dsc.dma) {
- vb2_dma_contig_memops.put(ctx->dsc.alloc);
- ctx->dsc.alloc = NULL;
- ctx->dsc.dma = 0;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->dsc);
}
/* Allocate codec buffers */
@@ -80,6 +67,7 @@ int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
unsigned int enc_ref_y_size = 0;
unsigned int enc_ref_c_size = 0;
unsigned int guard_width, guard_height;
+ int ret;
if (ctx->type == MFCINST_DECODER) {
mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
@@ -113,100 +101,93 @@ int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Codecs have different memory requirements */
switch (ctx->codec_mode) {
case S5P_MFC_CODEC_H264_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
S5P_FIMV_DEC_VERT_NB_MV_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
+ ctx->bank2.size = ctx->total_dpb_count * ctx->mv_size;
break;
case S5P_MFC_CODEC_MPEG4_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
S5P_FIMV_DEC_UPNB_MV_SIZE +
S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
S5P_FIMV_DEC_STX_PARSER_SIZE +
S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_VC1RCV_DEC:
case S5P_MFC_CODEC_VC1_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
S5P_FIMV_DEC_UPNB_MV_SIZE +
S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
S5P_FIMV_DEC_NB_DCAC_SIZE +
3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_MPEG2_DEC:
- ctx->bank1_size = 0;
- ctx->bank2_size = 0;
+ ctx->bank1.size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_H263_DEC:
- ctx->bank1_size =
+ ctx->bank1.size =
ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
S5P_FIMV_DEC_UPNB_MV_SIZE +
S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
S5P_FIMV_DEC_NB_DCAC_SIZE,
S5P_FIMV_DEC_BUF_ALIGN);
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_H264_ENC:
- ctx->bank1_size = (enc_ref_y_size * 2) +
+ ctx->bank1.size = (enc_ref_y_size * 2) +
S5P_FIMV_ENC_UPMV_SIZE +
S5P_FIMV_ENC_COLFLG_SIZE +
S5P_FIMV_ENC_INTRAMD_SIZE +
S5P_FIMV_ENC_NBORINFO_SIZE;
- ctx->bank2_size = (enc_ref_y_size * 2) +
+ ctx->bank2.size = (enc_ref_y_size * 2) +
(enc_ref_c_size * 4) +
S5P_FIMV_ENC_INTRAPRED_SIZE;
break;
case S5P_MFC_CODEC_MPEG4_ENC:
- ctx->bank1_size = (enc_ref_y_size * 2) +
+ ctx->bank1.size = (enc_ref_y_size * 2) +
S5P_FIMV_ENC_UPMV_SIZE +
S5P_FIMV_ENC_COLFLG_SIZE +
S5P_FIMV_ENC_ACDCCOEF_SIZE;
- ctx->bank2_size = (enc_ref_y_size * 2) +
+ ctx->bank2.size = (enc_ref_y_size * 2) +
(enc_ref_c_size * 4);
break;
case S5P_MFC_CODEC_H263_ENC:
- ctx->bank1_size = (enc_ref_y_size * 2) +
+ ctx->bank1.size = (enc_ref_y_size * 2) +
S5P_FIMV_ENC_UPMV_SIZE +
S5P_FIMV_ENC_ACDCCOEF_SIZE;
- ctx->bank2_size = (enc_ref_y_size * 2) +
+ ctx->bank2.size = (enc_ref_y_size * 2) +
(enc_ref_c_size * 4);
break;
default:
break;
}
/* Allocate only if memory from bank 1 is necessary */
- if (ctx->bank1_size > 0) {
- ctx->bank1_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
- if (IS_ERR(ctx->bank1_buf)) {
- ctx->bank1_buf = NULL;
- printk(KERN_ERR
- "Buf alloc for decoding failed (port A)\n");
- return -ENOMEM;
+ if (ctx->bank1.size > 0) {
+
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+ if (ret) {
+ mfc_err("Failed to allocate Bank1 temporary buffer\n");
+ return ret;
}
- ctx->bank1_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
- BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
}
/* Allocate only if memory from bank 2 is necessary */
- if (ctx->bank2_size > 0) {
- ctx->bank2_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
- if (IS_ERR(ctx->bank2_buf)) {
- ctx->bank2_buf = NULL;
- mfc_err("Buf alloc for decoding failed (port B)\n");
- return -ENOMEM;
+ if (ctx->bank2.size > 0) {
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, &ctx->bank2);
+ if (ret) {
+ mfc_err("Failed to allocate Bank2 temporary buffer\n");
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
+ return ret;
}
- ctx->bank2_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
- BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
+ BUG_ON(ctx->bank2.dma & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
}
return 0;
}
@@ -214,18 +195,8 @@ int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Release buffers allocated for codec */
void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
{
- if (ctx->bank1_buf) {
- vb2_dma_contig_memops.put(ctx->bank1_buf);
- ctx->bank1_buf = NULL;
- ctx->bank1_phys = 0;
- ctx->bank1_size = 0;
- }
- if (ctx->bank2_buf) {
- vb2_dma_contig_memops.put(ctx->bank2_buf);
- ctx->bank2_buf = NULL;
- ctx->bank2_phys = 0;
- ctx->bank2_size = 0;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_r, &ctx->bank2);
}
/* Allocate memory for instance data buffer */
@@ -233,58 +204,38 @@ int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
+ int ret;
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
ctx->ctx.size = buf_size->h264_ctx;
else
ctx->ctx.size = buf_size->non_h264_ctx;
- ctx->ctx.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.size);
- if (IS_ERR(ctx->ctx.alloc)) {
- mfc_err("Allocating context buffer failed\n");
- ctx->ctx.alloc = NULL;
- return -ENOMEM;
+
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+ if (ret) {
+ mfc_err("Failed to allocate instance buffer\n");
+ return ret;
}
- ctx->ctx.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.alloc);
- BUG_ON(ctx->ctx.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
ctx->ctx.ofs = OFFSETA(ctx->ctx.dma);
- ctx->ctx.virt = vb2_dma_contig_memops.vaddr(ctx->ctx.alloc);
- if (!ctx->ctx.virt) {
- mfc_err("Remapping instance buffer failed\n");
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.ofs = 0;
- ctx->ctx.dma = 0;
- return -ENOMEM;
- }
+
/* Zero content of the allocated memory */
memset(ctx->ctx.virt, 0, ctx->ctx.size);
wmb();
/* Initialize shared memory */
- ctx->shm.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], buf_size->shm);
- if (IS_ERR(ctx->shm.alloc)) {
- mfc_err("failed to allocate shared memory\n");
- return PTR_ERR(ctx->shm.alloc);
+ ctx->shm.size = buf_size->shm;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->shm);
+ if (ret) {
+ mfc_err("Failed to allocate shared memory buffer\n");
+ return ret;
}
+
/* shared memory offset only keeps the offset from base (port a) */
- ctx->shm.ofs = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->shm.alloc)
- - dev->bank1;
+ ctx->shm.ofs = ctx->shm.dma - dev->bank1;
BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
- ctx->shm.virt = vb2_dma_contig_memops.vaddr(ctx->shm.alloc);
- if (!ctx->shm.virt) {
- vb2_dma_contig_memops.put(ctx->shm.alloc);
- ctx->shm.alloc = NULL;
- ctx->shm.ofs = 0;
- mfc_err("failed to virt addr of shared memory\n");
- return -ENOMEM;
- }
- memset((void *)ctx->shm.virt, 0, buf_size->shm);
+ memset(ctx->shm.virt, 0, buf_size->shm);
wmb();
return 0;
}
@@ -292,19 +243,8 @@ int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
/* Release instance buffer */
void s5p_mfc_release_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
{
- if (ctx->ctx.alloc) {
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.ofs = 0;
- ctx->ctx.virt = NULL;
- ctx->ctx.dma = 0;
- }
- if (ctx->shm.alloc) {
- vb2_dma_contig_memops.put(ctx->shm.alloc);
- ctx->shm.alloc = NULL;
- ctx->shm.ofs = 0;
- ctx->shm.virt = NULL;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx);
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->shm);
}
int s5p_mfc_alloc_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
@@ -443,10 +383,10 @@ int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
size_t buf_addr1, buf_addr2;
int buf_size1, buf_size2;
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
- buf_addr2 = ctx->bank2_phys;
- buf_size2 = ctx->bank2_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+ buf_addr2 = ctx->bank2.dma;
+ buf_size2 = ctx->bank2.size;
dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
~S5P_FIMV_DPB_COUNT_MASK;
mfc_write(dev, ctx->total_dpb_count | dpb,
@@ -523,7 +463,6 @@ int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
mfc_err("Unknown codec for decoding (%x)\n",
ctx->codec_mode);
return -EINVAL;
- break;
}
frame_size = ctx->luma_size;
frame_size_ch = ctx->chroma_size;
@@ -607,10 +546,10 @@ int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
unsigned int guard_width, guard_height;
int i;
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
- buf_addr2 = ctx->bank2_phys;
- buf_size2 = ctx->bank2_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
+ buf_addr2 = ctx->bank2.dma;
+ buf_size2 = ctx->bank2.size;
enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
* ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 3a8cfd9fc1bd..beb6dbacebd9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -73,6 +73,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
unsigned int mb_width, mb_height;
+ int ret;
mb_width = MB_WIDTH(ctx->img_width);
mb_height = MB_HEIGHT(ctx->img_height);
@@ -112,7 +113,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size =
+ ctx->bank1.size =
ctx->scratch_buf_size +
(ctx->mv_count * ctx->mv_size);
break;
@@ -123,7 +124,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_VC1RCV_DEC:
case S5P_MFC_CODEC_VC1_DEC:
@@ -133,11 +134,11 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_MPEG2_DEC:
- ctx->bank1_size = 0;
- ctx->bank2_size = 0;
+ ctx->bank1.size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_H263_DEC:
ctx->scratch_buf_size =
@@ -146,7 +147,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_VP8_DEC:
ctx->scratch_buf_size =
@@ -155,7 +156,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size = ctx->scratch_buf_size;
+ ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_H264_ENC:
ctx->scratch_buf_size =
@@ -164,11 +165,11 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size =
+ ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
(ctx->dpb_count * (ctx->luma_dpb_size +
ctx->chroma_dpb_size + ctx->me_buffer_size));
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_MPEG4_ENC:
case S5P_MFC_CODEC_H263_ENC:
@@ -178,28 +179,24 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
- ctx->bank1_size =
+ ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
(ctx->dpb_count * (ctx->luma_dpb_size +
ctx->chroma_dpb_size + ctx->me_buffer_size));
- ctx->bank2_size = 0;
+ ctx->bank2.size = 0;
break;
default:
break;
}
/* Allocate only if memory from bank 1 is necessary */
- if (ctx->bank1_size > 0) {
- ctx->bank1_buf = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
- if (IS_ERR(ctx->bank1_buf)) {
- ctx->bank1_buf = 0;
- pr_err("Buf alloc for decoding failed (port A)\n");
- return -ENOMEM;
+ if (ctx->bank1.size > 0) {
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+ if (ret) {
+ mfc_err("Failed to allocate Bank1 memory\n");
+ return ret;
}
- ctx->bank1_phys = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
- BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
}
return 0;
@@ -208,12 +205,7 @@ int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
/* Release buffers allocated for codec */
void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
{
- if (ctx->bank1_buf) {
- vb2_dma_contig_memops.put(ctx->bank1_buf);
- ctx->bank1_buf = 0;
- ctx->bank1_phys = 0;
- ctx->bank1_size = 0;
- }
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
}
/* Allocate memory for instance data buffer */
@@ -221,6 +213,7 @@ int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
mfc_debug_enter();
@@ -250,25 +243,10 @@ int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
break;
}
- ctx->ctx.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.size);
- if (IS_ERR(ctx->ctx.alloc)) {
- mfc_err("Allocating context buffer failed.\n");
- return PTR_ERR(ctx->ctx.alloc);
- }
-
- ctx->ctx.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.alloc);
-
- ctx->ctx.virt = vb2_dma_contig_memops.vaddr(ctx->ctx.alloc);
- if (!ctx->ctx.virt) {
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.dma = 0;
- ctx->ctx.virt = NULL;
-
- mfc_err("Remapping context buffer failed.\n");
- return -ENOMEM;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+ if (ret) {
+ mfc_err("Failed to allocate instance buffer\n");
+ return ret;
}
memset(ctx->ctx.virt, 0, ctx->ctx.size);
@@ -282,44 +260,22 @@ int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
/* Release instance buffer */
void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
{
- mfc_debug_enter();
-
- if (ctx->ctx.alloc) {
- vb2_dma_contig_memops.put(ctx->ctx.alloc);
- ctx->ctx.alloc = NULL;
- ctx->ctx.dma = 0;
- ctx->ctx.virt = NULL;
- }
-
- mfc_debug_leave();
+ s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx);
}
/* Allocate context buffers for SYS_INIT */
int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
{
struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
mfc_debug_enter();
- dev->ctx_buf.alloc = vb2_dma_contig_memops.alloc(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], buf_size->dev_ctx);
- if (IS_ERR(dev->ctx_buf.alloc)) {
- mfc_err("Allocating DESC buffer failed.\n");
- return PTR_ERR(dev->ctx_buf.alloc);
- }
-
- dev->ctx_buf.dma = s5p_mfc_mem_cookie(
- dev->alloc_ctx[MFC_BANK1_ALLOC_CTX],
- dev->ctx_buf.alloc);
-
- dev->ctx_buf.virt = vb2_dma_contig_memops.vaddr(dev->ctx_buf.alloc);
- if (!dev->ctx_buf.virt) {
- vb2_dma_contig_memops.put(dev->ctx_buf.alloc);
- dev->ctx_buf.alloc = NULL;
- dev->ctx_buf.dma = 0;
-
- mfc_err("Remapping DESC buffer failed.\n");
- return -ENOMEM;
+ dev->ctx_buf.size = buf_size->dev_ctx;
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &dev->ctx_buf);
+ if (ret) {
+ mfc_err("Failed to allocate device context buffer\n");
+ return ret;
}
memset(dev->ctx_buf.virt, 0, buf_size->dev_ctx);
@@ -333,12 +289,7 @@ int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
/* Release context buffers for SYS_INIT */
void s5p_mfc_release_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
{
- if (dev->ctx_buf.alloc) {
- vb2_dma_contig_memops.put(dev->ctx_buf.alloc);
- dev->ctx_buf.alloc = NULL;
- dev->ctx_buf.dma = 0;
- dev->ctx_buf.virt = NULL;
- }
+ s5p_mfc_release_priv_buf(dev->mem_dev_l, &dev->ctx_buf);
}
static int calc_plane(int width, int height)
@@ -417,8 +368,8 @@ int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
int buf_size1;
int align_gap;
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
mfc_debug(2, "Total DPB COUNT: %d\n", ctx->total_dpb_count);
@@ -535,13 +486,13 @@ void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- size_t buf_addr1, buf_size1;
- int i;
+ size_t buf_addr1;
+ int i, buf_size1;
mfc_debug_enter();
- buf_addr1 = ctx->bank1_phys;
- buf_size1 = ctx->bank1_size;
+ buf_addr1 = ctx->bank1.dma;
+ buf_size1 = ctx->bank1.size;
mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
@@ -1253,12 +1204,14 @@ int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
{
struct s5p_mfc_dev *dev = ctx->dev;
- unsigned int dpb;
- if (flush)
- dpb = READL(S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (1 << 14);
- else
- dpb = READL(S5P_FIMV_SI_CH0_DPB_CONF_CTRL) & ~(1 << 14);
- WRITEL(dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+
+ if (flush) {
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ S5P_FIMV_H2R_CMD_FLUSH_V6, NULL);
+ }
}
/* Decode a single frame */
@@ -1408,7 +1361,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_buf *temp_vb;
unsigned long flags;
int last_frame = 0;
- unsigned int index;
spin_lock_irqsave(&dev->irqlock, flags);
@@ -1427,8 +1379,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
- index = temp_vb->b->v4l2_buf.index;
-
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
@@ -1452,7 +1402,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
unsigned int src_y_size, src_c_size;
*/
unsigned int dst_size;
- unsigned int index;
spin_lock_irqsave(&dev->irqlock, flags);
@@ -1487,8 +1436,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
spin_unlock_irqrestore(&dev->irqlock, flags);
- index = src_mb->b->v4l2_buf.index;
-
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_encode_one_frame_v6(ctx);
@@ -1656,6 +1603,9 @@ void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
case MFCINST_HEAD_PARSED:
ret = s5p_mfc_run_init_dec_buffers(ctx);
break;
+ case MFCINST_FLUSH:
+ s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
+ break;
case MFCINST_RES_CHANGE_INIT:
s5p_mfc_run_dec_last_frames(ctx);
break;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 2895333866fc..6aa38a56aaf2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -46,7 +46,7 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
ret = clk_prepare(pm->clock_gate);
if (ret) {
- mfc_err("Failed to preapre clock-gating control\n");
+ mfc_err("Failed to prepare clock-gating control\n");
goto err_p_ip_clk;
}
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 7c1116c73bf3..8de1b3dce459 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -656,7 +656,7 @@ static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
dev_dbg(hdev->dev, "%s\n", __func__);
if (!hdev->cur_conf)
return -EINVAL;
- memset(fmt, 0, sizeof *fmt);
+ memset(fmt, 0, sizeof(*fmt));
fmt->width = t->hact.end - t->hact.beg;
fmt->height = t->vact[0].end - t->vact[0].beg;
fmt->code = V4L2_MBUS_FMT_FIXED; /* means RGB888 */
@@ -760,7 +760,7 @@ static void hdmi_resources_cleanup(struct hdmi_device *hdev)
clk_put(res->sclk_hdmi);
if (!IS_ERR_OR_NULL(res->hdmi))
clk_put(res->hdmi);
- memset(res, 0, sizeof *res);
+ memset(res, 0, sizeof(*res));
}
static int hdmi_resources_init(struct hdmi_device *hdev)
@@ -777,31 +777,31 @@ static int hdmi_resources_init(struct hdmi_device *hdev)
dev_dbg(dev, "HDMI resource init\n");
- memset(res, 0, sizeof *res);
+ memset(res, 0, sizeof(*res));
/* get clocks, power */
res->hdmi = clk_get(dev, "hdmi");
- if (IS_ERR_OR_NULL(res->hdmi)) {
+ if (IS_ERR(res->hdmi)) {
dev_err(dev, "failed to get clock 'hdmi'\n");
goto fail;
}
res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
- if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ if (IS_ERR(res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_pixel = clk_get(dev, "sclk_pixel");
- if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ if (IS_ERR(res->sclk_pixel)) {
dev_err(dev, "failed to get clock 'sclk_pixel'\n");
goto fail;
}
res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
- if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ if (IS_ERR(res->sclk_hdmiphy)) {
dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
res->hdmiphy = clk_get(dev, "hdmiphy");
- if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ if (IS_ERR(res->hdmiphy)) {
dev_err(dev, "failed to get clock 'hdmiphy'\n");
goto fail;
}
@@ -955,7 +955,7 @@ static int hdmi_probe(struct platform_device *pdev)
v4l2_subdev_init(sd, &hdmi_sd_ops);
sd->owner = THIS_MODULE;
- strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
+ strlcpy(sd->name, "s5p-hdmi", sizeof(sd->name));
hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
/* FIXME: missing fail preset is not supported */
hdmi_dev->cur_conf = hdmi_preset2timings(hdmi_dev->cur_preset);
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
index 06b5d2dbb2d9..80717cec76ae 100644
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
@@ -284,7 +284,7 @@ static int hdmiphy_probe(struct i2c_client *client,
{
struct hdmiphy_ctx *ctx;
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index b671e20e9318..04e6490a45be 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -19,6 +19,7 @@
#endif
#include <linux/fb.h>
+#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 02faea03aa7d..5733033a6ead 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -240,27 +240,27 @@ static int mxr_acquire_clocks(struct mxr_device *mdev)
struct device *dev = mdev->dev;
res->mixer = clk_get(dev, "mixer");
- if (IS_ERR_OR_NULL(res->mixer)) {
+ if (IS_ERR(res->mixer)) {
mxr_err(mdev, "failed to get clock 'mixer'\n");
goto fail;
}
res->vp = clk_get(dev, "vp");
- if (IS_ERR_OR_NULL(res->vp)) {
+ if (IS_ERR(res->vp)) {
mxr_err(mdev, "failed to get clock 'vp'\n");
goto fail;
}
res->sclk_mixer = clk_get(dev, "sclk_mixer");
- if (IS_ERR_OR_NULL(res->sclk_mixer)) {
+ if (IS_ERR(res->sclk_mixer)) {
mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
goto fail;
}
res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
- if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ if (IS_ERR(res->sclk_hdmi)) {
mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_dac = clk_get(dev, "sclk_dac");
- if (IS_ERR_OR_NULL(res->sclk_dac)) {
+ if (IS_ERR(res->sclk_dac)) {
mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
goto fail;
}
@@ -298,7 +298,7 @@ static void mxr_release_resources(struct mxr_device *mdev)
{
mxr_release_clocks(mdev);
mxr_release_plat_resources(mdev);
- memset(&mdev->res, 0, sizeof mdev->res);
+ memset(&mdev->res, 0, sizeof(mdev->res));
}
static void mxr_release_layers(struct mxr_device *mdev)
@@ -382,7 +382,7 @@ static int mxr_probe(struct platform_device *pdev)
/* mdev does not exist yet so no mxr_dbg is used */
dev_info(dev, "probe start\n");
- mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
dev_err(dev, "not enough memory.\n");
ret = -ENOMEM;
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
index 3b1670a045f4..b713403024ef 100644
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
@@ -470,11 +470,11 @@ static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
{
mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
- filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
- filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
- filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+ filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
}
static void mxr_reg_mxr_dump(struct mxr_device *mdev)
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 1f3b7436511c..82142a2d6d93 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -19,6 +19,7 @@
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
@@ -95,7 +96,7 @@ int mxr_acquire_video(struct mxr_device *mdev,
/* trying to register next output */
if (sd == NULL)
continue;
- out = kzalloc(sizeof *out, GFP_KERNEL);
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
if (out == NULL) {
mxr_err(mdev, "no memory for '%s'\n",
conf->output_name);
@@ -127,7 +128,7 @@ fail_output:
/* kfree is NULL-safe */
for (i = 0; i < mdev->output_cnt; ++i)
kfree(mdev->output[i]);
- memset(mdev->output, 0, sizeof mdev->output);
+ memset(mdev->output, 0, sizeof(mdev->output));
fail_vb2_allocator:
/* freeing allocator context */
@@ -160,8 +161,8 @@ static int mxr_querycap(struct file *file, void *priv,
mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
- strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
+ strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
sprintf(cap->bus_info, "%d", layer->idx);
cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -192,7 +193,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
struct mxr_device *mdev = layer->mdev;
struct v4l2_mbus_framefmt mbus_fmt;
- memset(&layer->geo, 0, sizeof layer->geo);
+ memset(&layer->geo, 0, sizeof(layer->geo));
mxr_get_mbus_fmt(mdev, &mbus_fmt);
@@ -425,7 +426,7 @@ static int mxr_s_selection(struct file *file, void *fh,
struct mxr_geometry tmp;
struct v4l2_rect res;
- memset(&res, 0, sizeof res);
+ memset(&res, 0, sizeof(res));
mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
s->r.width, s->r.height, s->r.left, s->r.top);
@@ -464,7 +465,7 @@ static int mxr_s_selection(struct file *file, void *fh,
/* apply change and update geometry if needed */
if (target) {
/* backup current geometry if setup fails */
- memcpy(&tmp, geo, sizeof tmp);
+ memcpy(&tmp, geo, sizeof(tmp));
/* apply requested selection */
target->x_offset = s->r.left;
@@ -496,7 +497,7 @@ static int mxr_s_selection(struct file *file, void *fh,
fail:
/* restore old geometry, which is not touched if target is NULL */
if (target)
- memcpy(geo, &tmp, sizeof tmp);
+ memcpy(geo, &tmp, sizeof(tmp));
return -ERANGE;
}
@@ -1071,7 +1072,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
{
struct mxr_layer *layer;
- layer = kzalloc(sizeof *layer, GFP_KERNEL);
+ layer = kzalloc(sizeof(*layer), GFP_KERNEL);
if (layer == NULL) {
mxr_err(mdev, "not enough memory for layer.\n");
goto fail;
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 91a6939a270a..ab6f9ef89423 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -301,7 +301,7 @@ static int sdo_probe(struct platform_device *pdev)
struct clk *sclk_vpll;
dev_info(dev, "probe start\n");
- sdev = devm_kzalloc(&pdev->dev, sizeof *sdev, GFP_KERNEL);
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
dev_err(dev, "not enough memory.\n");
ret = -ENOMEM;
@@ -341,47 +341,50 @@ static int sdo_probe(struct platform_device *pdev)
/* acquire clocks */
sdev->sclk_dac = clk_get(dev, "sclk_dac");
- if (IS_ERR_OR_NULL(sdev->sclk_dac)) {
+ if (IS_ERR(sdev->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sdev->sclk_dac);
goto fail;
}
sdev->dac = clk_get(dev, "dac");
- if (IS_ERR_OR_NULL(sdev->dac)) {
+ if (IS_ERR(sdev->dac)) {
dev_err(dev, "failed to get clock 'dac'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sdev->dac);
goto fail_sclk_dac;
}
sdev->dacphy = clk_get(dev, "dacphy");
- if (IS_ERR_OR_NULL(sdev->dacphy)) {
+ if (IS_ERR(sdev->dacphy)) {
dev_err(dev, "failed to get clock 'dacphy'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sdev->dacphy);
goto fail_dac;
}
sclk_vpll = clk_get(dev, "sclk_vpll");
- if (IS_ERR_OR_NULL(sclk_vpll)) {
+ if (IS_ERR(sclk_vpll)) {
dev_err(dev, "failed to get clock 'sclk_vpll'\n");
- ret = -ENXIO;
+ ret = PTR_ERR(sclk_vpll);
goto fail_dacphy;
}
clk_set_parent(sdev->sclk_dac, sclk_vpll);
clk_put(sclk_vpll);
sdev->fout_vpll = clk_get(dev, "fout_vpll");
- if (IS_ERR_OR_NULL(sdev->fout_vpll)) {
+ if (IS_ERR(sdev->fout_vpll)) {
dev_err(dev, "failed to get clock 'fout_vpll'\n");
+ ret = PTR_ERR(sdev->fout_vpll);
goto fail_dacphy;
}
dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
/* acquire regulator */
sdev->vdac = devm_regulator_get(dev, "vdd33a_dac");
- if (IS_ERR_OR_NULL(sdev->vdac)) {
+ if (IS_ERR(sdev->vdac)) {
dev_err(dev, "failed to get regulator 'vdac'\n");
+ ret = PTR_ERR(sdev->vdac);
goto fail_fout_vpll;
}
sdev->vdet = devm_regulator_get(dev, "vdet");
- if (IS_ERR_OR_NULL(sdev->vdet)) {
+ if (IS_ERR(sdev->vdet)) {
dev_err(dev, "failed to get regulator 'vdet'\n");
+ ret = PTR_ERR(sdev->vdet);
goto fail_fout_vpll;
}
@@ -394,7 +397,7 @@ static int sdo_probe(struct platform_device *pdev)
/* configuration of interface subdevice */
v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
sdev->sd.owner = THIS_MODULE;
- strlcpy(sdev->sd.name, "s5p-sdo", sizeof sdev->sd.name);
+ strlcpy(sdev->sd.name, "s5p-sdo", sizeof(sdev->sd.name));
/* set default format */
sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
index 49191aac9634..d90d2286090b 100644
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -338,7 +338,7 @@ static int sii9234_probe(struct i2c_client *client,
}
ctx->gpio_n_reset = pdata->gpio_n_reset;
- ret = gpio_request(ctx->gpio_n_reset, "MHL_RST");
+ ret = devm_gpio_request(dev, ctx->gpio_n_reset, "MHL_RST");
if (ret) {
dev_err(dev, "failed to acquire MHL_RST gpio\n");
return ret;
@@ -370,7 +370,6 @@ fail_pm_get:
fail_pm:
pm_runtime_disable(dev);
- gpio_free(ctx->gpio_n_reset);
fail:
dev_err(dev, "probe failed\n");
@@ -381,11 +380,8 @@ fail:
static int sii9234_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct sii9234_context *ctx = sd_to_context(sd);
pm_runtime_disable(dev);
- gpio_free(ctx->gpio_n_reset);
dev_info(dev, "remove successful\n");
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
new file mode 100644
index 000000000000..cb54c69d5748
--- /dev/null
+++ b/drivers/media/platform/sh_veu.c
@@ -0,0 +1,1266 @@
+/*
+ * sh-mobile VEU mem2mem driver
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define VEU_STR 0x00 /* start register */
+#define VEU_SWR 0x10 /* src: line length */
+#define VEU_SSR 0x14 /* src: image size */
+#define VEU_SAYR 0x18 /* src: y/rgb plane address */
+#define VEU_SACR 0x1c /* src: c plane address */
+#define VEU_BSSR 0x20 /* bundle mode register */
+#define VEU_EDWR 0x30 /* dst: line length */
+#define VEU_DAYR 0x34 /* dst: y/rgb plane address */
+#define VEU_DACR 0x38 /* dst: c plane address */
+#define VEU_TRCR 0x50 /* transform control */
+#define VEU_RFCR 0x54 /* resize scale */
+#define VEU_RFSR 0x58 /* resize clip */
+#define VEU_ENHR 0x5c /* enhance */
+#define VEU_FMCR 0x70 /* filter mode */
+#define VEU_VTCR 0x74 /* lowpass vertical */
+#define VEU_HTCR 0x78 /* lowpass horizontal */
+#define VEU_APCR 0x80 /* color match */
+#define VEU_ECCR 0x84 /* color replace */
+#define VEU_AFXR 0x90 /* fixed mode */
+#define VEU_SWPR 0x94 /* swap */
+#define VEU_EIER 0xa0 /* interrupt mask */
+#define VEU_EVTR 0xa4 /* interrupt event */
+#define VEU_STAR 0xb0 /* status */
+#define VEU_BSRR 0xb4 /* reset */
+
+#define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
+#define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
+#define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
+#define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
+#define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
+#define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
+#define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
+#define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
+#define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
+#define VEU_COFFR 0x224 /* color conversion offset */
+#define VEU_CBR 0x228 /* color conversion clip */
+
+/*
+ * 4092x4092 max size is the normal case. In some cases it can be reduced to
+ * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
+ */
+#define MAX_W 4092
+#define MAX_H 4092
+#define MIN_W 8
+#define MIN_H 8
+#define ALIGN_W 4
+
+/* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
+#define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
+
+#define MEM2MEM_DEF_TRANSLEN 1
+
+struct sh_veu_dev;
+
+struct sh_veu_file {
+ struct sh_veu_dev *veu_dev;
+ bool cfg_needed;
+};
+
+struct sh_veu_format {
+ char *name;
+ u32 fourcc;
+ unsigned int depth;
+ unsigned int ydepth;
+};
+
+/* video data format */
+struct sh_veu_vfmt {
+ /* Replace with v4l2_rect */
+ struct v4l2_rect frame;
+ unsigned int bytesperline;
+ unsigned int offset_y;
+ unsigned int offset_c;
+ const struct sh_veu_format *fmt;
+};
+
+struct sh_veu_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct device *dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct sh_veu_vfmt vfmt_out;
+ struct sh_veu_vfmt vfmt_in;
+ /* Only single user per direction so far */
+ struct sh_veu_file *capture;
+ struct sh_veu_file *output;
+ struct mutex fop_lock;
+ void __iomem *base;
+ struct vb2_alloc_ctx *alloc_ctx;
+ spinlock_t lock;
+ bool is_2h;
+ unsigned int xaction;
+ bool aborting;
+};
+
+enum sh_veu_fmt_idx {
+ SH_VEU_FMT_NV12,
+ SH_VEU_FMT_NV16,
+ SH_VEU_FMT_NV24,
+ SH_VEU_FMT_RGB332,
+ SH_VEU_FMT_RGB444,
+ SH_VEU_FMT_RGB565,
+ SH_VEU_FMT_RGB666,
+ SH_VEU_FMT_RGB24,
+};
+
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+#define DEFAULT_IN_WIDTH VGA_WIDTH
+#define DEFAULT_IN_HEIGHT VGA_HEIGHT
+#define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
+#define DEFAULT_OUT_WIDTH VGA_WIDTH
+#define DEFAULT_OUT_HEIGHT VGA_HEIGHT
+#define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
+
+/*
+ * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
+ * aligned for NV24.
+ */
+static const struct sh_veu_format sh_veu_fmt[] = {
+ [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
+ [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
+ [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
+ [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
+ [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
+ [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
+ [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
+ [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
+};
+
+#define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
+ .frame = { \
+ .width = VGA_WIDTH, \
+ .height = VGA_HEIGHT, \
+ }, \
+ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
+ .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
+}
+
+#define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
+ .frame = { \
+ .width = VGA_WIDTH, \
+ .height = VGA_HEIGHT, \
+ }, \
+ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
+ .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
+}
+
+/*
+ * TODO: add support for further output formats:
+ * SH_VEU_FMT_NV12,
+ * SH_VEU_FMT_NV16,
+ * SH_VEU_FMT_NV24,
+ * SH_VEU_FMT_RGB332,
+ * SH_VEU_FMT_RGB444,
+ * SH_VEU_FMT_RGB666,
+ * SH_VEU_FMT_RGB24,
+ */
+
+static const int sh_veu_fmt_out[] = {
+ SH_VEU_FMT_RGB565,
+};
+
+/*
+ * TODO: add support for further input formats:
+ * SH_VEU_FMT_NV16,
+ * SH_VEU_FMT_NV24,
+ * SH_VEU_FMT_RGB565,
+ * SH_VEU_FMT_RGB666,
+ * SH_VEU_FMT_RGB24,
+ */
+static const int sh_veu_fmt_in[] = {
+ SH_VEU_FMT_NV12,
+};
+
+static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
+{
+ switch (fourcc) {
+ default:
+ BUG();
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ return V4L2_COLORSPACE_JPEG;
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_RGB24:
+ return V4L2_COLORSPACE_SRGB;
+ }
+}
+
+static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
+{
+ return ioread32(veu->base + reg);
+}
+
+static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
+ u32 value)
+{
+ iowrite32(value, veu->base + reg);
+}
+
+ /* ========== mem2mem callbacks ========== */
+
+static void sh_veu_job_abort(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ veu->aborting = true;
+}
+
+static void sh_veu_lock(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+
+ mutex_lock(&veu->fop_lock);
+}
+
+static void sh_veu_unlock(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+
+ mutex_unlock(&veu->fop_lock);
+}
+
+static void sh_veu_process(struct sh_veu_dev *veu,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
+{
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+
+ sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
+ sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
+ addr + veu->vfmt_out.offset_c : 0);
+ dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
+ (unsigned long)addr,
+ veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
+
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
+ sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
+ addr + veu->vfmt_in.offset_c : 0);
+ dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
+ (unsigned long)addr,
+ veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
+
+ sh_veu_reg_write(veu, VEU_STR, 1);
+
+ sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
+}
+
+/**
+ * sh_veu_device_run() - prepares and starts the device
+ *
+ * This will be called by the framework when it decides to schedule a particular
+ * instance.
+ */
+static void sh_veu_device_run(void *priv)
+{
+ struct sh_veu_dev *veu = priv;
+ struct vb2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
+
+ if (src_buf && dst_buf)
+ sh_veu_process(veu, src_buf, dst_buf);
+}
+
+ /* ========== video ioctls ========== */
+
+static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
+ enum v4l2_buf_type type)
+{
+ return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ veu_file == veu->capture) ||
+ (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ veu_file == veu->output);
+}
+
+static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+/*
+ * It is not unusual to have video nodes open()ed multiple times. While some
+ * V4L2 operations are non-intrusive, like querying formats and various
+ * parameters, others, like setting formats, starting and stopping streaming,
+ * queuing and dequeuing buffers, directly affect hardware configuration and /
+ * or execution. This function verifies availability of the requested interface
+ * and, if available, reserves it for the requesting user.
+ */
+static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file **stream;
+
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ stream = &veu->capture;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ stream = &veu->output;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (*stream == veu_file)
+ return 0;
+
+ if (*stream)
+ return -EBUSY;
+
+ *stream = veu_file;
+
+ return 0;
+}
+
+static int sh_veu_context_init(struct sh_veu_dev *veu)
+{
+ if (veu->m2m_ctx)
+ return 0;
+
+ veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
+ sh_veu_queue_init);
+
+ if (IS_ERR(veu->m2m_ctx))
+ return PTR_ERR(veu->m2m_ctx);
+
+ return 0;
+}
+
+static int sh_veu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "sh-veu", sizeof(cap->driver));
+ strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
+{
+ if (f->index >= fmt_num)
+ return -EINVAL;
+
+ strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description));
+ f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
+ return 0;
+}
+
+static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
+}
+
+static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
+}
+
+static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &veu->vfmt_out;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &veu->vfmt_in;
+ default:
+ return NULL;
+ }
+}
+
+static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ struct sh_veu_vfmt *vfmt;
+
+ vfmt = sh_veu_get_vfmt(veu, f->type);
+
+ pix->width = vfmt->frame.width;
+ pix->height = vfmt->frame.height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = vfmt->fmt->fourcc;
+ pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
+ pix->bytesperline = vfmt->bytesperline;
+ pix->sizeimage = vfmt->bytesperline * pix->height *
+ vfmt->fmt->depth / vfmt->fmt->ydepth;
+ pix->priv = 0;
+ dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
+ f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
+
+ return 0;
+}
+
+static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return sh_veu_g_fmt(priv, f);
+}
+
+static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return sh_veu_g_fmt(priv, f);
+}
+
+static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ unsigned int y_bytes_used;
+
+ /*
+ * V4L2 specification suggests, that the driver should correct the
+ * format struct if any of the dimensions is unsupported
+ */
+ switch (pix->field) {
+ default:
+ case V4L2_FIELD_ANY:
+ pix->field = V4L2_FIELD_NONE;
+ /* fall through: continue handling V4L2_FIELD_NONE */
+ case V4L2_FIELD_NONE:
+ break;
+ }
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
+ &pix->height, MIN_H, MAX_H, 0, 0);
+
+ y_bytes_used = (pix->width * fmt->ydepth) >> 3;
+
+ if (pix->bytesperline < y_bytes_used)
+ pix->bytesperline = y_bytes_used;
+ pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
+ pix->priv = 0;
+
+ pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
+
+ return 0;
+}
+
+static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
+{
+ const int *fmt;
+ int i, n, dflt;
+
+ pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ fmt = sh_veu_fmt_out;
+ n = ARRAY_SIZE(sh_veu_fmt_out);
+ dflt = DEFAULT_OUT_FMTIDX;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ default:
+ fmt = sh_veu_fmt_in;
+ n = ARRAY_SIZE(sh_veu_fmt_in);
+ dflt = DEFAULT_IN_FMTIDX;
+ break;
+ }
+
+ for (i = 0; i < n; i++)
+ if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
+ return &sh_veu_fmt[fmt[i]];
+
+ return &sh_veu_fmt[dflt];
+}
+
+static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct sh_veu_format *fmt;
+
+ fmt = sh_veu_find_fmt(f);
+ if (!fmt)
+ /* wrong buffer type */
+ return -EINVAL;
+
+ return sh_veu_try_fmt(f, fmt);
+}
+
+static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct sh_veu_format *fmt;
+
+ fmt = sh_veu_find_fmt(f);
+ if (!fmt)
+ /* wrong buffer type */
+ return -EINVAL;
+
+ return sh_veu_try_fmt(f, fmt);
+}
+
+static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
+{
+ /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
+ unsigned int left = vfmt->frame.left & ~0x03;
+ unsigned int top = vfmt->frame.top;
+ dma_addr_t offset = ((left * veu->vfmt_out.fmt->depth) >> 3) +
+ top * veu->vfmt_out.bytesperline;
+ unsigned int y_line;
+
+ vfmt->offset_y = offset;
+
+ switch (vfmt->fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ y_line = ALIGN(vfmt->frame.width, 16);
+ vfmt->offset_c = offset + y_line * vfmt->frame.height;
+ break;
+ case V4L2_PIX_FMT_RGB332:
+ case V4L2_PIX_FMT_RGB444:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR666:
+ case V4L2_PIX_FMT_RGB24:
+ vfmt->offset_c = 0;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ struct sh_veu_vfmt *vfmt;
+ struct vb2_queue *vq;
+ int ret = sh_veu_context_init(veu);
+ if (ret < 0)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ vfmt = sh_veu_get_vfmt(veu, f->type);
+ /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
+
+ vfmt->fmt = sh_veu_find_fmt(f);
+ /* vfmt->fmt != NULL following the same argument as above */
+ vfmt->frame.width = pix->width;
+ vfmt->frame.height = pix->height;
+ vfmt->bytesperline = pix->bytesperline;
+
+ sh_veu_colour_offset(veu, vfmt);
+
+ /*
+ * We could also verify and require configuration only if any parameters
+ * actually have changed, but it is unlikely, that the user requests the
+ * same configuration several times without closing the device.
+ */
+ veu_file->cfg_needed = true;
+
+ dev_dbg(veu->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
+ f->type, pix->width, pix->height, vfmt->fmt->fourcc);
+
+ return 0;
+}
+
+static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return sh_veu_s_fmt(priv, f);
+}
+
+static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret = sh_veu_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return sh_veu_s_fmt(priv, f);
+}
+
+static int sh_veu_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct sh_veu_file *veu_file = priv;
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ int ret = sh_veu_context_init(veu);
+ if (ret < 0)
+ return ret;
+
+ ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
+}
+
+static int sh_veu_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
+ return -EBUSY;
+
+ return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
+}
+
+static void sh_veu_calc_scale(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out,
+ u32 *mant, u32 *frac, u32 *rep)
+{
+ u32 fixpoint;
+
+ /* calculate FRAC and MANT */
+ *rep = *mant = *frac = 0;
+
+ if (size_in == size_out) {
+ if (crop_out != size_out)
+ *mant = 1; /* needed for cropping */
+ return;
+ }
+
+ /* VEU2H special upscale */
+ if (veu->is_2h && size_out > size_in) {
+ u32 fixpoint = (4096 * size_in) / size_out;
+ *mant = fixpoint / 4096;
+ *frac = (fixpoint - (*mant * 4096)) & ~0x07;
+
+ switch (*frac) {
+ case 0x800:
+ *rep = 1;
+ break;
+ case 0x400:
+ *rep = 3;
+ break;
+ case 0x200:
+ *rep = 7;
+ break;
+ }
+ if (*rep)
+ return;
+ }
+
+ fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
+ *mant = fixpoint / 4096;
+ *frac = fixpoint - (*mant * 4096);
+
+ if (*frac & 0x07) {
+ /*
+ * FIXME: do we really have to round down twice in the
+ * up-scaling case?
+ */
+ *frac &= ~0x07;
+ if (size_out > size_in)
+ *frac -= 8; /* round down if scaling up */
+ else
+ *frac += 8; /* round up if scaling down */
+ }
+}
+
+static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out)
+{
+ u32 mant, frac, value, rep;
+
+ sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
+
+ /* set scale */
+ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
+ (((mant << 12) | frac) << 16);
+
+ sh_veu_reg_write(veu, VEU_RFCR, value);
+
+ /* set clip */
+ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
+ (((rep << 12) | crop_out) << 16);
+
+ sh_veu_reg_write(veu, VEU_RFSR, value);
+
+ return ALIGN((size_in * crop_out) / size_out, 4);
+}
+
+static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
+ int size_in, int size_out, int crop_out)
+{
+ u32 mant, frac, value, rep;
+
+ sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
+
+ /* set scale */
+ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
+ (mant << 12) | frac;
+
+ sh_veu_reg_write(veu, VEU_RFCR, value);
+
+ /* set clip */
+ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
+ (rep << 12) | crop_out;
+
+ sh_veu_reg_write(veu, VEU_RFSR, value);
+
+ return ALIGN((size_in * crop_out) / size_out, 4);
+}
+
+static void sh_veu_configure(struct sh_veu_dev *veu)
+{
+ u32 src_width, src_stride, src_height;
+ u32 dst_width, dst_stride, dst_height;
+ u32 real_w, real_h;
+
+ /* reset VEU */
+ sh_veu_reg_write(veu, VEU_BSRR, 0x100);
+
+ src_width = veu->vfmt_in.frame.width;
+ src_height = veu->vfmt_in.frame.height;
+ src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
+
+ dst_width = real_w = veu->vfmt_out.frame.width;
+ dst_height = real_h = veu->vfmt_out.frame.height;
+ /* Datasheet is unclear - whether it's always number of bytes or not */
+ dst_stride = veu->vfmt_out.bytesperline;
+
+ /*
+ * So far real_w == dst_width && real_h == dst_height, but it wasn't
+ * necessarily the case in the original vidix driver, so, it may change
+ * here in the future too.
+ */
+ src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
+ src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
+
+ sh_veu_reg_write(veu, VEU_SWR, src_stride);
+ sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
+ sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
+
+ sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
+ sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
+
+ sh_veu_reg_write(veu, VEU_SWPR, 0x67);
+ sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
+
+ if (veu->is_2h) {
+ sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
+ sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
+
+ sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
+ sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
+
+ sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
+ sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
+ sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
+
+ sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
+ }
+}
+
+static int sh_veu_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
+ return -EBUSY;
+
+ if (veu_file->cfg_needed) {
+ struct sh_veu_dev *veu = veu_file->veu_dev;
+ veu_file->cfg_needed = false;
+ sh_veu_configure(veu_file->veu_dev);
+ veu->xaction = 0;
+ veu->aborting = false;
+ }
+
+ return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
+}
+
+static int sh_veu_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct sh_veu_file *veu_file = priv;
+
+ if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
+ return -EBUSY;
+
+ return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
+ .vidioc_querycap = sh_veu_querycap,
+
+ .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
+
+ .vidioc_reqbufs = sh_veu_reqbufs,
+ .vidioc_querybuf = sh_veu_querybuf,
+
+ .vidioc_qbuf = sh_veu_qbuf,
+ .vidioc_dqbuf = sh_veu_dqbuf,
+
+ .vidioc_streamon = sh_veu_streamon,
+ .vidioc_streamoff = sh_veu_streamoff,
+};
+
+ /* ========== Queue operations ========== */
+
+static int sh_veu_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *f,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
+ struct sh_veu_vfmt *vfmt;
+ unsigned int size, count = *nbuffers;
+
+ if (f) {
+ const struct v4l2_pix_format *pix = &f->fmt.pix;
+ const struct sh_veu_format *fmt = sh_veu_find_fmt(f);
+ struct v4l2_format ftmp = *f;
+
+ if (fmt->fourcc != pix->pixelformat)
+ return -EINVAL;
+ sh_veu_try_fmt(&ftmp, fmt);
+ if (ftmp.fmt.pix.width != pix->width ||
+ ftmp.fmt.pix.height != pix->height)
+ return -EINVAL;
+ size = pix->bytesperline ? pix->bytesperline * pix->height :
+ pix->width * pix->height * fmt->depth >> 3;
+ } else {
+ vfmt = sh_veu_get_vfmt(veu, vq->type);
+ size = vfmt->bytesperline * vfmt->frame.height;
+ }
+
+ if (count < 2)
+ *nbuffers = count = 2;
+
+ if (size * count > VIDEO_MEM_LIMIT) {
+ count = VIDEO_MEM_LIMIT / size;
+ *nbuffers = count;
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = veu->alloc_ctx;
+
+ dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+ return 0;
+}
+
+static int sh_veu_buf_prepare(struct vb2_buffer *vb)
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
+ struct sh_veu_vfmt *vfmt;
+ unsigned int sizeimage;
+
+ vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
+ sizeimage = vfmt->bytesperline * vfmt->frame.height *
+ vfmt->fmt->depth / vfmt->fmt->ydepth;
+
+ if (vb2_plane_size(vb, 0) < sizeimage) {
+ dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, sizeimage);
+
+ return 0;
+}
+
+static void sh_veu_buf_queue(struct vb2_buffer *vb)
+{
+ struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
+ dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type);
+ v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
+}
+
+static void sh_veu_wait_prepare(struct vb2_queue *q)
+{
+ sh_veu_unlock(vb2_get_drv_priv(q));
+}
+
+static void sh_veu_wait_finish(struct vb2_queue *q)
+{
+ sh_veu_lock(vb2_get_drv_priv(q));
+}
+
+static const struct vb2_ops sh_veu_qops = {
+ .queue_setup = sh_veu_queue_setup,
+ .buf_prepare = sh_veu_buf_prepare,
+ .buf_queue = sh_veu_buf_queue,
+ .wait_prepare = sh_veu_wait_prepare,
+ .wait_finish = sh_veu_wait_finish,
+};
+
+static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = priv;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &sh_veu_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret < 0)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = priv;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &sh_veu_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+ return vb2_queue_init(dst_vq);
+}
+
+ /* ========== File operations ========== */
+
+static int sh_veu_open(struct file *file)
+{
+ struct sh_veu_dev *veu = video_drvdata(file);
+ struct sh_veu_file *veu_file;
+
+ veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
+ if (!veu_file)
+ return -ENOMEM;
+
+ veu_file->veu_dev = veu;
+ veu_file->cfg_needed = true;
+
+ file->private_data = veu_file;
+
+ pm_runtime_get_sync(veu->dev);
+
+ dev_dbg(veu->dev, "Created instance %p\n", veu_file);
+
+ return 0;
+}
+
+static int sh_veu_release(struct file *file)
+{
+ struct sh_veu_dev *veu = video_drvdata(file);
+ struct sh_veu_file *veu_file = file->private_data;
+
+ dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
+
+ pm_runtime_put(veu->dev);
+
+ if (veu_file == veu->capture) {
+ veu->capture = NULL;
+ vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
+ }
+
+ if (veu_file == veu->output) {
+ veu->output = NULL;
+ vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
+ }
+
+ if (!veu->output && !veu->capture && veu->m2m_ctx) {
+ v4l2_m2m_ctx_release(veu->m2m_ctx);
+ veu->m2m_ctx = NULL;
+ }
+
+ kfree(veu_file);
+
+ return 0;
+}
+
+static unsigned int sh_veu_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct sh_veu_file *veu_file = file->private_data;
+
+ return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
+}
+
+static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sh_veu_file *veu_file = file->private_data;
+
+ return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations sh_veu_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_veu_open,
+ .release = sh_veu_release,
+ .poll = sh_veu_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = sh_veu_mmap,
+};
+
+static const struct video_device sh_veu_videodev = {
+ .name = "sh-veu",
+ .fops = &sh_veu_fops,
+ .ioctl_ops = &sh_veu_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
+ .device_run = sh_veu_device_run,
+ .job_abort = sh_veu_job_abort,
+};
+
+static irqreturn_t sh_veu_bh(int irq, void *dev_id)
+{
+ struct sh_veu_dev *veu = dev_id;
+
+ if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
+ v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
+ veu->xaction = 0;
+ } else {
+ sh_veu_device_run(veu);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sh_veu_isr(int irq, void *dev_id)
+{
+ struct sh_veu_dev *veu = dev_id;
+ struct vb2_buffer *dst;
+ struct vb2_buffer *src;
+ u32 status = sh_veu_reg_read(veu, VEU_EVTR);
+
+ /* bundle read mode not used */
+ if (!(status & 1))
+ return IRQ_NONE;
+
+ /* disable interrupt in VEU */
+ sh_veu_reg_write(veu, VEU_EIER, 0);
+ /* halt operation */
+ sh_veu_reg_write(veu, VEU_STR, 0);
+ /* ack int, write 0 to clear bits */
+ sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
+
+ /* conversion completed */
+ dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
+ src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
+ if (!src || !dst)
+ return IRQ_NONE;
+
+ spin_lock(&veu->lock);
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ spin_unlock(&veu->lock);
+
+ veu->xaction++;
+
+ if (!veu->aborting)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static int sh_veu_probe(struct platform_device *pdev)
+{
+ struct sh_veu_dev *veu;
+ struct resource *reg_res;
+ struct video_device *vdev;
+ int irq, ret;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!reg_res || irq <= 0) {
+ dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
+ return -ENODEV;
+ }
+
+ veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
+ if (!veu)
+ return -ENOMEM;
+
+ veu->is_2h = resource_size(reg_res) == 0x22c;
+
+ veu->base = devm_request_and_ioremap(&pdev->dev, reg_res);
+ if (!veu->base)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
+ 0, "veu", veu);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ return ret;
+ }
+
+ vdev = &veu->vdev;
+
+ veu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(veu->alloc_ctx)) {
+ ret = PTR_ERR(veu->alloc_ctx);
+ goto einitctx;
+ }
+
+ *vdev = sh_veu_videodev;
+ spin_lock_init(&veu->lock);
+ mutex_init(&veu->fop_lock);
+ vdev->lock = &veu->fop_lock;
+
+ video_set_drvdata(vdev, veu);
+
+ veu->dev = &pdev->dev;
+ veu->vfmt_out = DEFAULT_OUT_VFMT;
+ veu->vfmt_in = DEFAULT_IN_VFMT;
+
+ veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
+ if (IS_ERR(veu->m2m_dev)) {
+ ret = PTR_ERR(veu->m2m_dev);
+ v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
+ goto em2minit;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ pm_runtime_suspend(&pdev->dev);
+ if (ret < 0)
+ goto evidreg;
+
+ return ret;
+
+evidreg:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(veu->m2m_dev);
+em2minit:
+ vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
+einitctx:
+ v4l2_device_unregister(&veu->v4l2_dev);
+ return ret;
+}
+
+static int sh_veu_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_veu_dev *veu = container_of(v4l2_dev,
+ struct sh_veu_dev, v4l2_dev);
+
+ video_unregister_device(&veu->vdev);
+ pm_runtime_disable(&pdev->dev);
+ v4l2_m2m_release(veu->m2m_dev);
+ vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
+ v4l2_device_unregister(&veu->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver __refdata sh_veu_pdrv = {
+ .remove = sh_veu_remove,
+ .driver = {
+ .name = "sh_veu",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_veu_init(void)
+{
+ return platform_driver_probe(&sh_veu_pdrv, sh_veu_probe);
+}
+
+static void __exit sh_veu_exit(void)
+{
+ platform_driver_unregister(&sh_veu_pdrv);
+}
+
+module_init(sh_veu_init);
+module_exit(sh_veu_exit);
+
+MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
+MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index f3c4571ac01e..66c8da18df84 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -207,6 +207,7 @@ static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
#endif
switch (vou_dev->pix.pixelformat) {
+ default:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
row_coeff = 1;
@@ -253,7 +254,8 @@ static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
*count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
- dev_dbg(vq->dev, "%s(): count=%d, size=%d\n", __func__, *count, *size);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): count=%d, size=%d\n", __func__,
+ *count, *size);
return 0;
}
@@ -269,7 +271,7 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
int ret;
- dev_dbg(vq->dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (vb->width != pix->width ||
vb->height != pix->height ||
@@ -299,7 +301,7 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_PREPARED;
}
- dev_dbg(vq->dev,
+ dev_dbg(vou_dev->v4l2_dev.dev,
"%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
__func__, vou_dev->pix_idx, bytes_per_line,
videobuf_to_dma_contig(vb), vb->memory, vb->state);
@@ -314,7 +316,7 @@ static void sh_vou_buf_queue(struct videobuf_queue *vq,
struct video_device *vdev = vq->priv_data;
struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
- dev_dbg(vq->dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &vou_dev->queue);
@@ -325,8 +327,8 @@ static void sh_vou_buf_queue(struct videobuf_queue *vq,
vou_dev->active = vb;
/* Start from side A: we use mirror addresses, so, set B */
sh_vou_reg_a_write(vou_dev, VOURPR, 1);
- dev_dbg(vq->dev, "%s: first buffer status 0x%x\n", __func__,
- sh_vou_reg_a_read(vou_dev, VOUSTR));
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
+ __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
sh_vou_schedule_next(vou_dev, vb);
/* Only activate VOU after the second buffer */
} else if (vou_dev->active->queue.next == &vb->queue) {
@@ -336,8 +338,8 @@ static void sh_vou_buf_queue(struct videobuf_queue *vq,
/* Register side switching with frame VSYNC */
sh_vou_reg_a_write(vou_dev, VOURCR, 5);
- dev_dbg(vq->dev, "%s: second buffer status 0x%x\n", __func__,
- sh_vou_reg_a_read(vou_dev, VOUSTR));
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: second buffer status 0x%x\n",
+ __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
/* Enable End-of-Frame (VSYNC) interrupts */
sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
@@ -355,7 +357,7 @@ static void sh_vou_buf_release(struct videobuf_queue *vq,
struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
unsigned long flags;
- dev_dbg(vq->dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
spin_lock_irqsave(&vou_dev->lock, flags);
@@ -388,9 +390,9 @@ static struct videobuf_queue_ops sh_vou_video_qops = {
static int sh_vou_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
@@ -401,12 +403,12 @@ static int sh_vou_querycap(struct file *file, void *priv,
static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
if (fmt->index >= ARRAY_SIZE(vou_fmt))
return -EINVAL;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
strlcpy(fmt->description, vou_fmt[fmt->index].desc,
@@ -419,8 +421,7 @@ static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
@@ -595,9 +596,9 @@ static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
*/
static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
{
- unsigned int best_err = UINT_MAX, best, width_max, height_max,
- img_height_max;
- int i, idx;
+ unsigned int best_err = UINT_MAX, best = geo->in_width,
+ width_max, height_max, img_height_max;
+ int i, idx = 0;
if (std & V4L2_STD_525_60) {
width_max = 858;
@@ -671,8 +672,7 @@ static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct v4l2_pix_format *pix = &fmt->fmt.pix;
unsigned int img_height_max;
int pix_idx;
@@ -764,11 +764,11 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct v4l2_pix_format *pix = &fmt->fmt.pix;
int i;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
pix->field = V4L2_FIELD_NONE;
@@ -788,9 +788,10 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
static int sh_vou_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *req)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -801,27 +802,30 @@ static int sh_vou_reqbufs(struct file *file, void *priv,
static int sh_vou_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
return videobuf_querybuf(&vou_file->vbq, b);
}
static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
return videobuf_qbuf(&vou_file->vbq, b);
}
static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
}
@@ -829,12 +833,11 @@ static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
static int sh_vou_streamon(struct file *file, void *priv,
enum v4l2_buf_type buftype)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
int ret;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
video, s_stream, 1);
@@ -848,11 +851,10 @@ static int sh_vou_streamon(struct file *file, void *priv,
static int sh_vou_streamoff(struct file *file, void *priv,
enum v4l2_buf_type buftype)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = priv;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
/*
* This calls buf_release from host driver's videobuf_queue_ops for all
@@ -881,13 +883,12 @@ static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
int ret;
dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, *std_id);
- if (*std_id & ~vdev->tvnorms)
+ if (*std_id & ~vou_dev->vdev->tvnorms)
return -EINVAL;
ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
@@ -909,8 +910,7 @@ static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
@@ -921,8 +921,7 @@ static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
@@ -936,8 +935,7 @@ static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
{
struct v4l2_crop a_writable = *a;
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct v4l2_rect *rect = &a_writable.c;
struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
struct v4l2_pix_format *pix = &vou_dev->pix;
@@ -1028,9 +1026,9 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
static int sh_vou_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *a)
{
- struct sh_vou_file *vou_file = priv;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
a->bounds.left = 0;
@@ -1091,7 +1089,7 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
list_del(&vb->queue);
vb->state = VIDEOBUF_DONE;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
@@ -1160,8 +1158,7 @@ static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
/* File operations */
static int sh_vou_open(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
GFP_KERNEL);
@@ -1178,11 +1175,11 @@ static int sh_vou_open(struct file *file)
int ret;
/* First open */
vou_dev->status = SH_VOU_INITIALISING;
- pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
ret = sh_vou_hw_init(vou_dev);
if (ret < 0) {
atomic_dec(&vou_dev->use_count);
- pm_runtime_put(vdev->v4l2_dev->dev);
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
vou_dev->status = SH_VOU_IDLE;
mutex_unlock(&vou_dev->fop_lock);
return ret;
@@ -1193,8 +1190,8 @@ static int sh_vou_open(struct file *file)
vou_dev->v4l2_dev.dev, &vou_dev->lock,
V4L2_BUF_TYPE_VIDEO_OUTPUT,
V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), vdev,
- &vou_dev->fop_lock);
+ sizeof(struct videobuf_buffer),
+ vou_dev->vdev, &vou_dev->fop_lock);
mutex_unlock(&vou_dev->fop_lock);
return 0;
@@ -1202,18 +1199,17 @@ static int sh_vou_open(struct file *file)
static int sh_vou_release(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = file->private_data;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (!atomic_dec_return(&vou_dev->use_count)) {
mutex_lock(&vou_dev->fop_lock);
/* Last close */
vou_dev->status = SH_VOU_IDLE;
sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
- pm_runtime_put(vdev->v4l2_dev->dev);
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
mutex_unlock(&vou_dev->fop_lock);
}
@@ -1225,12 +1221,11 @@ static int sh_vou_release(struct file *file)
static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = file->private_data;
int ret;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
if (mutex_lock_interruptible(&vou_dev->fop_lock))
return -ERESTARTSYS;
@@ -1241,12 +1236,11 @@ static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
struct sh_vou_file *vou_file = file->private_data;
unsigned int res;
- dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
mutex_lock(&vou_dev->fop_lock);
res = videobuf_poll_stream(file, &vou_file->vbq, wait);
@@ -1257,8 +1251,7 @@ static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
static int sh_vou_g_chip_ident(struct file *file, void *fh,
struct v4l2_dbg_chip_ident *id)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_chip_ident, id);
}
@@ -1267,8 +1260,7 @@ static int sh_vou_g_chip_ident(struct file *file, void *fh,
static int sh_vou_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_register, reg);
}
@@ -1276,8 +1268,7 @@ static int sh_vou_g_register(struct file *file, void *fh,
static int sh_vou_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct video_device *vdev = video_devdata(file);
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = video_drvdata(file);
return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, s_register, reg);
}
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index cb6791e62bd4..b139b525bb16 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -70,13 +70,12 @@ config VIDEO_MX2_HOSTSUPPORT
bool
config VIDEO_MX2
- tristate "i.MX27/i.MX25 Camera Sensor Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA && (MACH_MX27 || (ARCH_MX25 && BROKEN))
+ tristate "i.MX27 Camera Sensor Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA && MACH_MX27
select VIDEOBUF2_DMA_CONTIG
select VIDEO_MX2_HOSTSUPPORT
---help---
- This is a v4l2 driver for the i.MX27 and the i.MX25 Camera Sensor
- Interface
+ This is a v4l2 driver for the i.MX27 Camera Sensor Interface
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index d96c8c7e01d9..82dbf99d347c 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -166,7 +166,7 @@ static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
struct frame_buffer *buf = isi->active;
list_del_init(&buf->list);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
vb->v4l2_buf.sequence = isi->sequence++;
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
}
@@ -745,7 +745,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
return formats;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static int isi_camera_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -770,7 +770,7 @@ static int isi_camera_add_device(struct soc_camera_device *icd)
icd->devnum);
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void isi_camera_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
diff --git a/drivers/media/platform/soc_camera/mx1_camera.c b/drivers/media/platform/soc_camera/mx1_camera.c
index 032b8c9097f9..25b2a285dc86 100644
--- a/drivers/media/platform/soc_camera/mx1_camera.c
+++ b/drivers/media/platform/soc_camera/mx1_camera.c
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -307,7 +306,7 @@ static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
/* _init is used to debug races, see comment in mx1_camera_reqbufs() */
list_del_init(&vb->queue);
vb->state = VIDEOBUF_DONE;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
@@ -373,7 +372,7 @@ static void mx1_camera_init_videobuf(struct videobuf_queue *q,
videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->parent,
&pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct mx1_buffer), icd, &icd->video_lock);
+ sizeof(struct mx1_buffer), icd, &ici->host_lock);
}
static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 4a574f3cfb2f..ffba7d91f413 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1,5 +1,5 @@
/*
- * V4L2 Driver for i.MX27/i.MX25 camera host
+ * V4L2 Driver for i.MX27 camera host
*
* Copyright (C) 2008, Sascha Hauer, Pengutronix
* Copyright (C) 2010, Baruch Siach, Orex Computed Radiography
@@ -28,7 +28,6 @@
#include <linux/time.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/mutex.h>
#include <linux/clk.h>
#include <media/v4l2-common.h>
@@ -64,9 +63,7 @@
#define CSICR1_RF_OR_INTEN (1 << 24)
#define CSICR1_STATFF_LEVEL (3 << 22)
#define CSICR1_STATFF_INTEN (1 << 21)
-#define CSICR1_RXFF_LEVEL(l) (((l) & 3) << 19) /* MX27 */
-#define CSICR1_FB2_DMA_INTEN (1 << 20) /* MX25 */
-#define CSICR1_FB1_DMA_INTEN (1 << 19) /* MX25 */
+#define CSICR1_RXFF_LEVEL(l) (((l) & 3) << 19)
#define CSICR1_RXFF_INTEN (1 << 18)
#define CSICR1_SOF_POL (1 << 17)
#define CSICR1_SOF_INTEN (1 << 16)
@@ -88,45 +85,15 @@
#define SHIFT_RXFF_LEVEL 19
#define SHIFT_MCLKDIV 12
-/* control reg 3 */
-#define CSICR3_FRMCNT (0xFFFF << 16)
-#define CSICR3_FRMCNT_RST (1 << 15)
-#define CSICR3_DMA_REFLASH_RFF (1 << 14)
-#define CSICR3_DMA_REFLASH_SFF (1 << 13)
-#define CSICR3_DMA_REQ_EN_RFF (1 << 12)
-#define CSICR3_DMA_REQ_EN_SFF (1 << 11)
-#define CSICR3_RXFF_LEVEL(l) (((l) & 7) << 4) /* MX25 */
-#define CSICR3_CSI_SUP (1 << 3)
-#define CSICR3_ZERO_PACK_EN (1 << 2)
-#define CSICR3_ECC_INT_EN (1 << 1)
-#define CSICR3_ECC_AUTO_EN (1 << 0)
-
#define SHIFT_FRMCNT 16
-/* csi status reg */
-#define CSISR_SFF_OR_INT (1 << 25)
-#define CSISR_RFF_OR_INT (1 << 24)
-#define CSISR_STATFF_INT (1 << 21)
-#define CSISR_DMA_TSF_FB2_INT (1 << 20) /* MX25 */
-#define CSISR_DMA_TSF_FB1_INT (1 << 19) /* MX25 */
-#define CSISR_RXFF_INT (1 << 18)
-#define CSISR_EOF_INT (1 << 17)
-#define CSISR_SOF_INT (1 << 16)
-#define CSISR_F2_INT (1 << 15)
-#define CSISR_F1_INT (1 << 14)
-#define CSISR_COF_INT (1 << 13)
-#define CSISR_ECC_INT (1 << 1)
-#define CSISR_DRDY (1 << 0)
-
#define CSICR1 0x00
#define CSICR2 0x04
-#define CSISR_IMX25 0x18
-#define CSISR_IMX27 0x08
+#define CSISR 0x08
#define CSISTATFIFO 0x0c
#define CSIRFIFO 0x10
#define CSIRXCNT 0x14
-#define CSICR3_IMX25 0x08
-#define CSICR3_IMX27 0x1c
+#define CSICR3 0x1c
#define CSIDMASA_STATFIFO 0x20
#define CSIDMATA_STATFIFO 0x24
#define CSIDMASA_FB1 0x28
@@ -249,12 +216,6 @@ struct mx2_fmt_cfg {
struct mx2_prp_cfg cfg;
};
-enum mx2_buffer_state {
- MX2_STATE_QUEUED,
- MX2_STATE_ACTIVE,
- MX2_STATE_DONE,
-};
-
struct mx2_buf_internal {
struct list_head queue;
int bufnum;
@@ -265,12 +226,10 @@ struct mx2_buf_internal {
struct mx2_buffer {
/* common v4l buffer stuff -- must be first */
struct vb2_buffer vb;
- enum mx2_buffer_state state;
struct mx2_buf_internal internal;
};
enum mx2_camera_type {
- IMX25_CAMERA,
IMX27_CAMERA,
};
@@ -298,8 +257,6 @@ struct mx2_camera_dev {
struct mx2_buffer *fb2_active;
u32 csicr1;
- u32 reg_csisr;
- u32 reg_csicr3;
enum mx2_camera_type devtype;
struct mx2_buf_internal buf_discard[2];
@@ -315,9 +272,6 @@ struct mx2_camera_dev {
static struct platform_device_id mx2_camera_devtype[] = {
{
- .name = "imx25-camera",
- .driver_data = IMX25_CAMERA,
- }, {
.name = "imx27-camera",
.driver_data = IMX27_CAMERA,
}, {
@@ -326,16 +280,6 @@ static struct platform_device_id mx2_camera_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, mx2_camera_devtype);
-static inline int is_imx25_camera(struct mx2_camera_dev *pcdev)
-{
- return pcdev->devtype == IMX25_CAMERA;
-}
-
-static inline int is_imx27_camera(struct mx2_camera_dev *pcdev)
-{
- return pcdev->devtype == IMX27_CAMERA;
-}
-
static struct mx2_buffer *mx2_ibuf_to_buf(struct mx2_buf_internal *int_buf)
{
return container_of(int_buf, struct mx2_buffer, internal);
@@ -463,21 +407,10 @@ static void mx27_update_emma_buf(struct mx2_camera_dev *pcdev,
static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
{
- unsigned long flags;
-
clk_disable_unprepare(pcdev->clk_csi_ahb);
clk_disable_unprepare(pcdev->clk_csi_per);
writel(0, pcdev->base_csi + CSICR1);
- if (is_imx27_camera(pcdev)) {
- writel(0, pcdev->base_emma + PRP_CNTL);
- } else if (is_imx25_camera(pcdev)) {
- spin_lock_irqsave(&pcdev->lock, flags);
- pcdev->fb1_active = NULL;
- pcdev->fb2_active = NULL;
- writel(0, pcdev->base_csi + CSIDMASA_FB1);
- writel(0, pcdev->base_csi + CSIDMASA_FB2);
- spin_unlock_irqrestore(&pcdev->lock, flags);
- }
+ writel(0, pcdev->base_emma + PRP_CNTL);
}
/*
@@ -502,11 +435,8 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
if (ret < 0)
goto exit_csi_ahb;
- csicr1 = CSICR1_MCLKEN;
-
- if (is_imx27_camera(pcdev))
- csicr1 |= CSICR1_PRP_IF_EN | CSICR1_FCC |
- CSICR1_RXFF_LEVEL(0);
+ csicr1 = CSICR1_MCLKEN | CSICR1_PRP_IF_EN | CSICR1_FCC |
+ CSICR1_RXFF_LEVEL(0);
pcdev->csicr1 = csicr1;
writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
@@ -540,65 +470,6 @@ static void mx2_camera_remove_device(struct soc_camera_device *icd)
pcdev->icd = NULL;
}
-static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
- int state)
-{
- struct vb2_buffer *vb;
- struct mx2_buffer *buf;
- struct mx2_buffer **fb_active = fb == 1 ? &pcdev->fb1_active :
- &pcdev->fb2_active;
- u32 fb_reg = fb == 1 ? CSIDMASA_FB1 : CSIDMASA_FB2;
- unsigned long flags;
-
- spin_lock_irqsave(&pcdev->lock, flags);
-
- if (*fb_active == NULL)
- goto out;
-
- vb = &(*fb_active)->vb;
- dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
- do_gettimeofday(&vb->v4l2_buf.timestamp);
- vb->v4l2_buf.sequence++;
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-
- if (list_empty(&pcdev->capture)) {
- buf = NULL;
- writel(0, pcdev->base_csi + fb_reg);
- } else {
- buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
- internal.queue);
- vb = &buf->vb;
- list_del(&buf->internal.queue);
- buf->state = MX2_STATE_ACTIVE;
- writel(vb2_dma_contig_plane_dma_addr(vb, 0),
- pcdev->base_csi + fb_reg);
- }
-
- *fb_active = buf;
-
-out:
- spin_unlock_irqrestore(&pcdev->lock, flags);
-}
-
-static irqreturn_t mx25_camera_irq(int irq_csi, void *data)
-{
- struct mx2_camera_dev *pcdev = data;
- u32 status = readl(pcdev->base_csi + pcdev->reg_csisr);
-
- if (status & CSISR_DMA_TSF_FB1_INT)
- mx25_camera_frame_done(pcdev, 1, MX2_STATE_DONE);
- else if (status & CSISR_DMA_TSF_FB2_INT)
- mx25_camera_frame_done(pcdev, 2, MX2_STATE_DONE);
-
- /* FIXME: handle CSISR_RFF_OR_INT */
-
- writel(status, pcdev->base_csi + pcdev->reg_csisr);
-
- return IRQ_HANDLED;
-}
-
/*
* Videobuf operations
*/
@@ -676,97 +547,8 @@ static void mx2_videobuf_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&pcdev->lock, flags);
- buf->state = MX2_STATE_QUEUED;
list_add_tail(&buf->internal.queue, &pcdev->capture);
- if (is_imx25_camera(pcdev)) {
- u32 csicr3, dma_inten = 0;
-
- if (pcdev->fb1_active == NULL) {
- writel(vb2_dma_contig_plane_dma_addr(vb, 0),
- pcdev->base_csi + CSIDMASA_FB1);
- pcdev->fb1_active = buf;
- dma_inten = CSICR1_FB1_DMA_INTEN;
- } else if (pcdev->fb2_active == NULL) {
- writel(vb2_dma_contig_plane_dma_addr(vb, 0),
- pcdev->base_csi + CSIDMASA_FB2);
- pcdev->fb2_active = buf;
- dma_inten = CSICR1_FB2_DMA_INTEN;
- }
-
- if (dma_inten) {
- list_del(&buf->internal.queue);
- buf->state = MX2_STATE_ACTIVE;
-
- csicr3 = readl(pcdev->base_csi + pcdev->reg_csicr3);
-
- /* Reflash DMA */
- writel(csicr3 | CSICR3_DMA_REFLASH_RFF,
- pcdev->base_csi + pcdev->reg_csicr3);
-
- /* clear & enable interrupts */
- writel(dma_inten, pcdev->base_csi + pcdev->reg_csisr);
- pcdev->csicr1 |= dma_inten;
- writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
-
- /* enable DMA */
- csicr3 |= CSICR3_DMA_REQ_EN_RFF | CSICR3_RXFF_LEVEL(1);
- writel(csicr3, pcdev->base_csi + pcdev->reg_csicr3);
- }
- }
-
- spin_unlock_irqrestore(&pcdev->lock, flags);
-}
-
-static void mx2_videobuf_release(struct vb2_buffer *vb)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct mx2_camera_dev *pcdev = ici->priv;
- struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
- unsigned long flags;
-
-#ifdef DEBUG
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
- switch (buf->state) {
- case MX2_STATE_ACTIVE:
- dev_info(icd->parent, "%s (active)\n", __func__);
- break;
- case MX2_STATE_QUEUED:
- dev_info(icd->parent, "%s (queued)\n", __func__);
- break;
- default:
- dev_info(icd->parent, "%s (unknown) %d\n", __func__,
- buf->state);
- break;
- }
-#endif
-
- /*
- * Terminate only queued but inactive buffers. Active buffers are
- * released when they become inactive after videobuf_waiton().
- *
- * FIXME: implement forced termination of active buffers for mx27 and
- * mx27 eMMA, so that the user won't get stuck in an uninterruptible
- * state. This requires a specific handling for each of the these DMA
- * types.
- */
-
- spin_lock_irqsave(&pcdev->lock, flags);
- if (is_imx25_camera(pcdev) && buf->state == MX2_STATE_ACTIVE) {
- if (pcdev->fb1_active == buf) {
- pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
- writel(0, pcdev->base_csi + CSIDMASA_FB1);
- pcdev->fb1_active = NULL;
- } else if (pcdev->fb2_active == buf) {
- pcdev->csicr1 &= ~CSICR1_FB2_DMA_INTEN;
- writel(0, pcdev->base_csi + CSIDMASA_FB2);
- pcdev->fb2_active = NULL;
- }
- writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
- }
spin_unlock_irqrestore(&pcdev->lock, flags);
}
@@ -877,91 +659,87 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
struct mx2_buffer *buf;
unsigned long phys;
int bytesperline;
+ unsigned long flags;
- if (is_imx27_camera(pcdev)) {
- unsigned long flags;
- if (count < 2)
- return -EINVAL;
+ if (count < 2)
+ return -EINVAL;
- spin_lock_irqsave(&pcdev->lock, flags);
+ spin_lock_irqsave(&pcdev->lock, flags);
- buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
- internal.queue);
- buf->internal.bufnum = 0;
- vb = &buf->vb;
- buf->state = MX2_STATE_ACTIVE;
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+ buf->internal.bufnum = 0;
+ vb = &buf->vb;
- phys = vb2_dma_contig_plane_dma_addr(vb, 0);
- mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
- list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+ list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
- buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
- internal.queue);
- buf->internal.bufnum = 1;
- vb = &buf->vb;
- buf->state = MX2_STATE_ACTIVE;
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+ buf->internal.bufnum = 1;
+ vb = &buf->vb;
- phys = vb2_dma_contig_plane_dma_addr(vb, 0);
- mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
- list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
-
- bytesperline = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- if (bytesperline < 0) {
- spin_unlock_irqrestore(&pcdev->lock, flags);
- return bytesperline;
- }
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+ list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
- /*
- * I didn't manage to properly enable/disable the prp
- * on a per frame basis during running transfers,
- * thus we allocate a buffer here and use it to
- * discard frames when no buffer is available.
- * Feel free to work on this ;)
- */
- pcdev->discard_size = icd->user_height * bytesperline;
- pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
- pcdev->discard_size, &pcdev->discard_buffer_dma,
- GFP_KERNEL);
- if (!pcdev->discard_buffer) {
- spin_unlock_irqrestore(&pcdev->lock, flags);
- return -ENOMEM;
- }
+ bytesperline = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ if (bytesperline < 0) {
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ return bytesperline;
+ }
- pcdev->buf_discard[0].discard = true;
- list_add_tail(&pcdev->buf_discard[0].queue,
- &pcdev->discard);
+ /*
+ * I didn't manage to properly enable/disable the prp
+ * on a per frame basis during running transfers,
+ * thus we allocate a buffer here and use it to
+ * discard frames when no buffer is available.
+ * Feel free to work on this ;)
+ */
+ pcdev->discard_size = icd->user_height * bytesperline;
+ pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
+ pcdev->discard_size,
+ &pcdev->discard_buffer_dma, GFP_ATOMIC);
+ if (!pcdev->discard_buffer) {
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ return -ENOMEM;
+ }
- pcdev->buf_discard[1].discard = true;
- list_add_tail(&pcdev->buf_discard[1].queue,
- &pcdev->discard);
+ pcdev->buf_discard[0].discard = true;
+ list_add_tail(&pcdev->buf_discard[0].queue,
+ &pcdev->discard);
- mx2_prp_resize_commit(pcdev);
+ pcdev->buf_discard[1].discard = true;
+ list_add_tail(&pcdev->buf_discard[1].queue,
+ &pcdev->discard);
- mx27_camera_emma_buf_init(icd, bytesperline);
+ mx2_prp_resize_commit(pcdev);
- if (prp->cfg.channel == 1) {
- writel(PRP_CNTL_CH1EN |
- PRP_CNTL_CSIEN |
- prp->cfg.in_fmt |
- prp->cfg.out_fmt |
- PRP_CNTL_CH1_LEN |
- PRP_CNTL_CH1BYP |
- PRP_CNTL_CH1_TSKIP(0) |
- PRP_CNTL_IN_TSKIP(0),
- pcdev->base_emma + PRP_CNTL);
- } else {
- writel(PRP_CNTL_CH2EN |
- PRP_CNTL_CSIEN |
- prp->cfg.in_fmt |
- prp->cfg.out_fmt |
- PRP_CNTL_CH2_LEN |
- PRP_CNTL_CH2_TSKIP(0) |
- PRP_CNTL_IN_TSKIP(0),
- pcdev->base_emma + PRP_CNTL);
- }
- spin_unlock_irqrestore(&pcdev->lock, flags);
+ mx27_camera_emma_buf_init(icd, bytesperline);
+
+ if (prp->cfg.channel == 1) {
+ writel(PRP_CNTL_CH1EN |
+ PRP_CNTL_CSIEN |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH1_LEN |
+ PRP_CNTL_CH1BYP |
+ PRP_CNTL_CH1_TSKIP(0) |
+ PRP_CNTL_IN_TSKIP(0),
+ pcdev->base_emma + PRP_CNTL);
+ } else {
+ writel(PRP_CNTL_CH2EN |
+ PRP_CNTL_CSIEN |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH2_LEN |
+ PRP_CNTL_CH2_TSKIP(0) |
+ PRP_CNTL_IN_TSKIP(0),
+ pcdev->base_emma + PRP_CNTL);
}
+ spin_unlock_irqrestore(&pcdev->lock, flags);
return 0;
}
@@ -977,29 +755,27 @@ static int mx2_stop_streaming(struct vb2_queue *q)
void *b;
u32 cntl;
- if (is_imx27_camera(pcdev)) {
- spin_lock_irqsave(&pcdev->lock, flags);
+ spin_lock_irqsave(&pcdev->lock, flags);
- cntl = readl(pcdev->base_emma + PRP_CNTL);
- if (prp->cfg.channel == 1) {
- writel(cntl & ~PRP_CNTL_CH1EN,
- pcdev->base_emma + PRP_CNTL);
- } else {
- writel(cntl & ~PRP_CNTL_CH2EN,
- pcdev->base_emma + PRP_CNTL);
- }
- INIT_LIST_HEAD(&pcdev->capture);
- INIT_LIST_HEAD(&pcdev->active_bufs);
- INIT_LIST_HEAD(&pcdev->discard);
+ cntl = readl(pcdev->base_emma + PRP_CNTL);
+ if (prp->cfg.channel == 1) {
+ writel(cntl & ~PRP_CNTL_CH1EN,
+ pcdev->base_emma + PRP_CNTL);
+ } else {
+ writel(cntl & ~PRP_CNTL_CH2EN,
+ pcdev->base_emma + PRP_CNTL);
+ }
+ INIT_LIST_HEAD(&pcdev->capture);
+ INIT_LIST_HEAD(&pcdev->active_bufs);
+ INIT_LIST_HEAD(&pcdev->discard);
- b = pcdev->discard_buffer;
- pcdev->discard_buffer = NULL;
+ b = pcdev->discard_buffer;
+ pcdev->discard_buffer = NULL;
- spin_unlock_irqrestore(&pcdev->lock, flags);
+ spin_unlock_irqrestore(&pcdev->lock, flags);
- dma_free_coherent(ici->v4l2_dev.dev,
- pcdev->discard_size, b, pcdev->discard_buffer_dma);
- }
+ dma_free_coherent(ici->v4l2_dev.dev,
+ pcdev->discard_size, b, pcdev->discard_buffer_dma);
return 0;
}
@@ -1008,7 +784,6 @@ static struct vb2_ops mx2_videobuf_ops = {
.queue_setup = mx2_videobuf_setup,
.buf_prepare = mx2_videobuf_prepare,
.buf_queue = mx2_videobuf_queue,
- .buf_cleanup = mx2_videobuf_release,
.start_streaming = mx2_start_streaming,
.stop_streaming = mx2_stop_streaming,
};
@@ -1129,16 +904,9 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
if (bytesperline < 0)
return bytesperline;
- if (is_imx27_camera(pcdev)) {
- ret = mx27_camera_emma_prp_reset(pcdev);
- if (ret)
- return ret;
- } else if (is_imx25_camera(pcdev)) {
- writel((bytesperline * icd->user_height) >> 2,
- pcdev->base_csi + CSIRXCNT);
- writel((bytesperline << 16) | icd->user_height,
- pcdev->base_csi + CSIIMAG_PARA);
- }
+ ret = mx27_camera_emma_prp_reset(pcdev);
+ if (ret)
+ return ret;
writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
@@ -1425,7 +1193,6 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct mx2_fmt_cfg *emma_prp;
- unsigned int width_limit;
int ret;
dev_dbg(icd->parent, "%s: requested params: width = %d, height = %d\n",
@@ -1437,40 +1204,11 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* FIXME: implement MX27 limits */
-
- /* limit to MX25 hardware capabilities */
- if (is_imx25_camera(pcdev)) {
- if (xlate->host_fmt->bits_per_sample <= 8)
- width_limit = 0xffff * 4;
- else
- width_limit = 0xffff * 2;
- /* CSIIMAG_PARA limit */
- if (pix->width > width_limit)
- pix->width = width_limit;
- if (pix->height > 0xffff)
- pix->height = 0xffff;
-
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
- pix->bytesperline, pix->height);
- /* Check against the CSIRXCNT limit */
- if (pix->sizeimage > 4 * 0x3ffff) {
- /* Adjust geometry, preserve aspect ratio */
- unsigned int new_height = int_sqrt(div_u64(0x3ffffULL *
- 4 * pix->height, pix->bytesperline));
- pix->width = new_height * pix->width / pix->height;
- pix->height = new_height;
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- BUG_ON(pix->bytesperline < 0);
- pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
- pix->bytesperline, pix->height);
- }
- }
+ /*
+ * limit to MX27 hardware capabilities: width must be a multiple of 8 as
+ * requested by the CSI. (Table 39-2 in the i.MX27 Reference Manual).
+ */
+ pix->width &= ~0x7;
/* limit to sensor capabilities */
mf.width = pix->width;
@@ -1488,7 +1226,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
/* If the sensor does not support image size try PrP resizing */
emma_prp = mx27_emma_prp_get_format(xlate->code,
- xlate->host_fmt->fourcc);
+ xlate->host_fmt->fourcc);
if ((mf.width != pix->width || mf.height != pix->height) &&
emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) {
@@ -1600,7 +1338,7 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
vb2_get_plane_payload(vb, 0));
list_del_init(&buf->internal.queue);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
vb->v4l2_buf.sequence = pcdev->frame_count;
if (err)
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
@@ -1634,7 +1372,6 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
vb = &buf->vb;
- buf->state = MX2_STATE_ACTIVE;
phys = vb2_dma_contig_plane_dma_addr(vb, 0);
mx27_update_emma_buf(pcdev, phys, bufnum);
@@ -1774,20 +1511,6 @@ static int mx2_camera_probe(struct platform_device *pdev)
goto exit;
}
- pcdev->devtype = pdev->id_entry->driver_data;
- switch (pcdev->devtype) {
- case IMX25_CAMERA:
- pcdev->reg_csisr = CSISR_IMX25;
- pcdev->reg_csicr3 = CSICR3_IMX25;
- break;
- case IMX27_CAMERA:
- pcdev->reg_csisr = CSISR_IMX27;
- pcdev->reg_csicr3 = CSICR3_IMX27;
- break;
- default:
- break;
- }
-
pcdev->clk_csi_ahb = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(pcdev->clk_csi_ahb)) {
dev_err(&pdev->dev, "Could not get csi ahb clock\n");
@@ -1833,20 +1556,9 @@ static int mx2_camera_probe(struct platform_device *pdev)
pcdev->dev = &pdev->dev;
platform_set_drvdata(pdev, pcdev);
- if (is_imx25_camera(pcdev)) {
- err = devm_request_irq(&pdev->dev, irq_csi, mx25_camera_irq, 0,
- MX2_CAM_DRV_NAME, pcdev);
- if (err) {
- dev_err(pcdev->dev, "Camera interrupt register failed \n");
- goto exit;
- }
- }
-
- if (is_imx27_camera(pcdev)) {
- err = mx27_camera_emma_init(pdev);
- if (err)
- goto exit;
- }
+ err = mx27_camera_emma_init(pdev);
+ if (err)
+ goto exit;
/*
* We're done with drvdata here. Clear the pointer so that
@@ -1859,8 +1571,6 @@ static int mx2_camera_probe(struct platform_device *pdev)
pcdev->soc_host.priv = pcdev;
pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
- if (is_imx25_camera(pcdev))
- pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(pcdev->alloc_ctx)) {
@@ -1879,10 +1589,8 @@ static int mx2_camera_probe(struct platform_device *pdev)
exit_free_emma:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
eallocctx:
- if (is_imx27_camera(pcdev)) {
- clk_disable_unprepare(pcdev->clk_emma_ipg);
- clk_disable_unprepare(pcdev->clk_emma_ahb);
- }
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
exit:
return err;
}
@@ -1897,10 +1605,8 @@ static int mx2_camera_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
- if (is_imx27_camera(pcdev)) {
- clk_disable_unprepare(pcdev->clk_emma_ipg);
- clk_disable_unprepare(pcdev->clk_emma_ahb);
- }
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
dev_info(&pdev->dev, "MX2 Camera driver unloaded\n");
@@ -1913,23 +1619,12 @@ static struct platform_driver mx2_camera_driver = {
},
.id_table = mx2_camera_devtype,
.remove = mx2_camera_remove,
+ .probe = mx2_camera_probe,
};
+module_platform_driver(mx2_camera_driver);
-static int __init mx2_camera_init(void)
-{
- return platform_driver_probe(&mx2_camera_driver, &mx2_camera_probe);
-}
-
-static void __exit mx2_camera_exit(void)
-{
- return platform_driver_unregister(&mx2_camera_driver);
-}
-
-module_init(mx2_camera_init);
-module_exit(mx2_camera_exit);
-
-MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver");
+MODULE_DESCRIPTION("i.MX27 SoC Camera Host driver");
MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>");
MODULE_LICENSE("GPL");
MODULE_VERSION(MX2_CAM_VERSION);
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 45aef1053a49..f5cbb92db545 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -156,7 +156,7 @@ static void mx3_cam_dma_done(void *arg)
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
list_del_init(&buf->queue);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
vb->v4l2_buf.field = mx3_cam->field;
vb->v4l2_buf.sequence = mx3_cam->sequence++;
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
@@ -510,7 +510,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
clk_set_rate(mx3_cam->clk, rate);
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static int mx3_camera_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -530,7 +530,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void mx3_camera_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 39a77f0b8860..2547bf88f79f 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -592,7 +592,7 @@ static void videobuf_done(struct omap1_cam_dev *pcdev,
suspend_capture(pcdev);
}
vb->state = result;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
if (result != VIDEOBUF_ERROR)
vb->field_count++;
wake_up(&vb->done);
@@ -1383,12 +1383,12 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q,
videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+ sizeof(struct omap1_cam_buf), icd, &ici->host_lock);
else
videobuf_queue_sg_init(q, &omap1_videobuf_ops,
icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+ sizeof(struct omap1_cam_buf), icd, &ici->host_lock);
/* use videobuf mode (auto)selected with the module parameter */
pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG;
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 523330d00dee..395e2e043615 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -681,7 +681,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
list_del_init(&vb->queue);
vb->state = VIDEOBUF_DONE;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n",
@@ -842,7 +842,7 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q,
*/
videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct pxa_buffer), icd, &icd->video_lock);
+ sizeof(struct pxa_buffer), icd, &ici->host_lock);
}
static u32 mclk_get_divisor(struct platform_device *pdev,
@@ -958,7 +958,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
/*
* The following two functions absolutely depend on the fact, that
* there can be only one camera on PXA quick capture interface
- * Called with .video_lock held
+ * Called with .host_lock held
*/
static int pxa_camera_add_device(struct soc_camera_device *icd)
{
@@ -978,7 +978,7 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void pxa_camera_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -1661,23 +1661,18 @@ static int pxa_camera_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || irq < 0) {
- err = -ENODEV;
- goto exit;
- }
+ if (!res || irq < 0)
+ return -ENODEV;
- pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
if (!pcdev) {
dev_err(&pdev->dev, "Could not allocate pcdev\n");
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
- pcdev->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(pcdev->clk)) {
- err = PTR_ERR(pcdev->clk);
- goto exit_kfree;
- }
+ pcdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pcdev->clk))
+ return PTR_ERR(pcdev->clk);
pcdev->res = res;
@@ -1715,17 +1710,9 @@ static int pxa_camera_probe(struct platform_device *pdev)
/*
* Request the regions.
*/
- if (!request_mem_region(res->start, resource_size(res),
- PXA_CAM_DRV_NAME)) {
- err = -EBUSY;
- goto exit_clk;
- }
-
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- err = -ENOMEM;
- goto exit_release;
- }
+ base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!base)
+ return -ENOMEM;
pcdev->irq = irq;
pcdev->base = base;
@@ -1734,7 +1721,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pxa_camera_dma_irq_y, pcdev);
if (err < 0) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
- goto exit_iounmap;
+ return err;
}
pcdev->dma_chans[0] = err;
dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
@@ -1762,10 +1749,10 @@ static int pxa_camera_probe(struct platform_device *pdev)
DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
/* request irq */
- err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME,
- pcdev);
+ err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
+ PXA_CAM_DRV_NAME, pcdev);
if (err) {
- dev_err(&pdev->dev, "Camera interrupt register failed \n");
+ dev_err(&pdev->dev, "Camera interrupt register failed\n");
goto exit_free_dma;
}
@@ -1777,27 +1764,16 @@ static int pxa_camera_probe(struct platform_device *pdev)
err = soc_camera_host_register(&pcdev->soc_host);
if (err)
- goto exit_free_irq;
+ goto exit_free_dma;
return 0;
-exit_free_irq:
- free_irq(pcdev->irq, pcdev);
exit_free_dma:
pxa_free_dma(pcdev->dma_chans[2]);
exit_free_dma_u:
pxa_free_dma(pcdev->dma_chans[1]);
exit_free_dma_y:
pxa_free_dma(pcdev->dma_chans[0]);
-exit_iounmap:
- iounmap(base);
-exit_release:
- release_mem_region(res->start, resource_size(res));
-exit_clk:
- clk_put(pcdev->clk);
-exit_kfree:
- kfree(pcdev);
-exit:
return err;
}
@@ -1806,24 +1782,13 @@ static int pxa_camera_remove(struct platform_device *pdev)
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct pxa_camera_dev *pcdev = container_of(soc_host,
struct pxa_camera_dev, soc_host);
- struct resource *res;
-
- clk_put(pcdev->clk);
pxa_free_dma(pcdev->dma_chans[0]);
pxa_free_dma(pcdev->dma_chans[1]);
pxa_free_dma(pcdev->dma_chans[2]);
- free_irq(pcdev->irq, pcdev);
soc_camera_host_unregister(soc_host);
- iounmap(pcdev->base);
-
- res = pcdev->res;
- release_mem_region(res->start, resource_size(res));
-
- kfree(pcdev);
-
dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
return 0;
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index ebbc126e71a6..bb08a46432f4 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -516,7 +516,7 @@ static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
pcdev->active = NULL;
ret = sh_mobile_ceu_capture(pcdev);
- do_gettimeofday(&vb->v4l2_buf.timestamp);
+ v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
if (!ret) {
vb->v4l2_buf.field = pcdev->field;
vb->v4l2_buf.sequence = pcdev->sequence++;
@@ -543,7 +543,7 @@ static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
return NULL;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -572,7 +572,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
- pm_runtime_put_sync(ici->v4l2_dev.dev);
+ pm_runtime_put(ici->v4l2_dev.dev);
return ret;
}
@@ -587,7 +587,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
return 0;
}
-/* Called with .video_lock held */
+/* Called with .host_lock held */
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -612,7 +612,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
}
spin_unlock_irq(&pcdev->lock);
- pm_runtime_put_sync(ici->v4l2_dev.dev);
+ pm_runtime_put(ici->v4l2_dev.dev);
dev_info(icd->parent,
"SuperH Mobile CEU driver detached from camera %d\n",
@@ -1064,7 +1064,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
/* Add our control */
v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
- V4L2_CID_SHARPNESS, 0, 1, 1, 0);
+ V4L2_CID_SHARPNESS, 0, 1, 1, 1);
if (icd->ctrl_handler.error)
return icd->ctrl_handler.error;
@@ -2088,15 +2088,13 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (!res || (int)irq <= 0) {
dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
- err = -ENODEV;
- goto exit;
+ return -ENODEV;
}
- pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
if (!pcdev) {
dev_err(&pdev->dev, "Could not allocate pcdev\n");
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&pcdev->capture);
@@ -2105,19 +2103,17 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->pdata = pdev->dev.platform_data;
if (!pcdev->pdata) {
- err = -EINVAL;
dev_err(&pdev->dev, "CEU platform data not set.\n");
- goto exit_kfree;
+ return -EINVAL;
}
pcdev->max_width = pcdev->pdata->max_width ? : 2560;
pcdev->max_height = pcdev->pdata->max_height ? : 1920;
- base = ioremap_nocache(res->start, resource_size(res));
+ base = devm_request_and_ioremap(&pdev->dev, res);
if (!base) {
- err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n");
- goto exit_kfree;
+ return -ENXIO;
}
pcdev->irq = irq;
@@ -2133,16 +2129,15 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
DMA_MEMORY_EXCLUSIVE);
if (!err) {
dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
- err = -ENXIO;
- goto exit_iounmap;
+ return -ENXIO;
}
pcdev->video_limit = resource_size(res);
}
/* request irq */
- err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED,
- dev_name(&pdev->dev), pcdev);
+ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
+ IRQF_DISABLED, dev_name(&pdev->dev), pcdev);
if (err) {
dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
goto exit_release_mem;
@@ -2246,15 +2241,9 @@ exit_free_ctx:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
exit_free_clk:
pm_runtime_disable(&pdev->dev);
- free_irq(pcdev->irq, pcdev);
exit_release_mem:
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
-exit_iounmap:
- iounmap(base);
-exit_kfree:
- kfree(pcdev);
-exit:
return err;
}
@@ -2267,10 +2256,8 @@ static int sh_mobile_ceu_remove(struct platform_device *pdev)
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
- free_irq(pcdev->irq, pcdev);
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
- iounmap(pcdev->base);
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
if (csi2_pdev && csi2_pdev->dev.driver) {
struct module *csi2_drv = csi2_pdev->dev.driver->owner;
@@ -2279,7 +2266,6 @@ static int sh_mobile_ceu_remove(struct platform_device *pdev)
platform_device_put(csi2_pdev);
module_put(csi2_drv);
}
- kfree(pcdev);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
index a17aba9a0104..42c559eb4937 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -318,23 +318,16 @@ static int sh_csi2_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = kzalloc(sizeof(struct sh_csi2), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_csi2), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->irq = irq;
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "CSI2 register region already claimed\n");
- ret = -EBUSY;
- goto ereqreg;
- }
-
- priv->base = ioremap(res->start, resource_size(res));
+ priv->base = devm_request_and_ioremap(&pdev->dev, res);
if (!priv->base) {
- ret = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap CSI2 registers.\n");
- goto eremap;
+ return -ENXIO;
}
priv->pdev = pdev;
@@ -357,11 +350,7 @@ static int sh_csi2_probe(struct platform_device *pdev)
return 0;
esdreg:
- iounmap(priv->base);
-eremap:
- release_mem_region(res->start, resource_size(res));
-ereqreg:
- kfree(priv);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -369,14 +358,10 @@ ereqreg:
static int sh_csi2_remove(struct platform_device *pdev)
{
struct sh_csi2 *priv = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
v4l2_device_unregister_subdev(&priv->subdev);
pm_runtime_disable(&pdev->dev);
- iounmap(priv->base);
- release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 2ec90eae6ba0..8ec98051ea73 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -50,22 +50,22 @@ static LIST_HEAD(hosts);
static LIST_HEAD(devices);
static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */
-int soc_camera_power_on(struct device *dev, struct soc_camera_link *icl)
+int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd)
{
- int ret = regulator_bulk_enable(icl->num_regulators,
- icl->regulators);
+ int ret = regulator_bulk_enable(ssdd->num_regulators,
+ ssdd->regulators);
if (ret < 0) {
dev_err(dev, "Cannot enable regulators\n");
return ret;
}
- if (icl->power) {
- ret = icl->power(dev, 1);
+ if (ssdd->power) {
+ ret = ssdd->power(dev, 1);
if (ret < 0) {
dev_err(dev,
"Platform failed to power-on the camera.\n");
- regulator_bulk_disable(icl->num_regulators,
- icl->regulators);
+ regulator_bulk_disable(ssdd->num_regulators,
+ ssdd->regulators);
}
}
@@ -73,13 +73,13 @@ int soc_camera_power_on(struct device *dev, struct soc_camera_link *icl)
}
EXPORT_SYMBOL(soc_camera_power_on);
-int soc_camera_power_off(struct device *dev, struct soc_camera_link *icl)
+int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd)
{
int ret = 0;
int err;
- if (icl->power) {
- err = icl->power(dev, 0);
+ if (ssdd->power) {
+ err = ssdd->power(dev, 0);
if (err < 0) {
dev_err(dev,
"Platform failed to power-off the camera.\n");
@@ -87,8 +87,8 @@ int soc_camera_power_off(struct device *dev, struct soc_camera_link *icl)
}
}
- err = regulator_bulk_disable(icl->num_regulators,
- icl->regulators);
+ err = regulator_bulk_disable(ssdd->num_regulators,
+ ssdd->regulators);
if (err < 0) {
dev_err(dev, "Cannot disable regulators\n");
ret = ret ? : err;
@@ -136,29 +136,29 @@ EXPORT_SYMBOL(soc_camera_xlate_by_fourcc);
/**
* soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
- * @icl: camera platform parameters
+ * @ssdd: camera platform parameters
* @cfg: media bus configuration
* @return: resulting flags
*/
-unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
+unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd,
const struct v4l2_mbus_config *cfg)
{
unsigned long f, flags = cfg->flags;
/* If only one of the two polarities is supported, switch to the opposite */
- if (icl->flags & SOCAM_SENSOR_INVERT_HSYNC) {
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_HSYNC) {
f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW);
if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW)
flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
- if (icl->flags & SOCAM_SENSOR_INVERT_VSYNC) {
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_VSYNC) {
f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW);
if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW)
flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
- if (icl->flags & SOCAM_SENSOR_INVERT_PCLK) {
+ if (ssdd->flags & SOCAM_SENSOR_INVERT_PCLK) {
f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING);
if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING)
flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
@@ -383,7 +383,7 @@ static int soc_camera_prepare_buf(struct file *file, void *priv,
return vb2_prepare_buf(&icd->vb2_vidq, b);
}
-/* Always entered with .video_lock held */
+/* Always entered with .host_lock held */
static int soc_camera_init_user_formats(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
@@ -450,7 +450,7 @@ egfmt:
return ret;
}
-/* Always entered with .video_lock held */
+/* Always entered with .host_lock held */
static void soc_camera_free_user_formats(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -509,7 +509,7 @@ static int soc_camera_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
struct soc_camera_host *ici;
int ret;
@@ -517,9 +517,16 @@ static int soc_camera_open(struct file *file)
/* No device driver attached */
return -ENODEV;
+ /*
+ * Don't mess with the host during probe: wait until the loop in
+ * scan_add_host() completes
+ */
+ if (mutex_lock_interruptible(&list_lock))
+ return -ERESTARTSYS;
ici = to_soc_camera_host(icd->parent);
+ mutex_unlock(&list_lock);
- if (mutex_lock_interruptible(&icd->video_lock))
+ if (mutex_lock_interruptible(&ici->host_lock))
return -ERESTARTSYS;
if (!try_module_get(ici->ops->owner)) {
dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
@@ -545,13 +552,10 @@ static int soc_camera_open(struct file *file)
};
/* The camera could have been already on, try to reset */
- if (icl->reset)
- icl->reset(icd->pdev);
+ if (sdesc->subdev_desc.reset)
+ sdesc->subdev_desc.reset(icd->pdev);
- /* Don't mess with the host during probe */
- mutex_lock(&ici->host_lock);
ret = ici->ops->add(icd);
- mutex_unlock(&ici->host_lock);
if (ret < 0) {
dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
goto eiciadd;
@@ -570,7 +574,7 @@ static int soc_camera_open(struct file *file)
* Try to configure with default parameters. Notice: this is the
* very first open, so, we cannot race against other calls,
* apart from someone else calling open() simultaneously, but
- * .video_lock is protecting us against it.
+ * .host_lock is protecting us against it.
*/
ret = soc_camera_set_fmt(icd, &f);
if (ret < 0)
@@ -585,7 +589,7 @@ static int soc_camera_open(struct file *file)
}
v4l2_ctrl_handler_setup(&icd->ctrl_handler);
}
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
file->private_data = icd;
dev_dbg(icd->pdev, "camera device open\n");
@@ -593,7 +597,7 @@ static int soc_camera_open(struct file *file)
return 0;
/*
- * First four errors are entered with the .video_lock held
+ * First four errors are entered with the .host_lock held
* and use_count == 1
*/
einitvb:
@@ -607,7 +611,7 @@ eiciadd:
icd->use_count--;
module_put(ici->ops->owner);
emodule:
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
return ret;
}
@@ -617,7 +621,7 @@ static int soc_camera_close(struct file *file)
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- mutex_lock(&icd->video_lock);
+ mutex_lock(&ici->host_lock);
icd->use_count--;
if (!icd->use_count) {
pm_runtime_suspend(&icd->vdev->dev);
@@ -632,7 +636,7 @@ static int soc_camera_close(struct file *file)
if (icd->streamer == file)
icd->streamer = NULL;
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
module_put(ici->ops->owner);
@@ -669,13 +673,13 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
if (icd->streamer != file)
return -EBUSY;
- if (mutex_lock_interruptible(&icd->video_lock))
+ if (mutex_lock_interruptible(&ici->host_lock))
return -ERESTARTSYS;
if (ici->ops->init_videobuf)
err = videobuf_mmap_mapper(&icd->vb_vidq, vma);
else
err = vb2_mmap(&icd->vb2_vidq, vma);
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
@@ -694,26 +698,28 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
if (icd->streamer != file)
return POLLERR;
- mutex_lock(&icd->video_lock);
+ mutex_lock(&ici->host_lock);
if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream))
dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
else
res = ici->ops->poll(file, pt);
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
return res;
}
void soc_camera_lock(struct vb2_queue *vq)
{
struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- mutex_lock(&icd->video_lock);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ mutex_lock(&ici->host_lock);
}
EXPORT_SYMBOL(soc_camera_lock);
void soc_camera_unlock(struct vb2_queue *vq)
{
struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- mutex_unlock(&icd->video_lock);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ mutex_unlock(&ici->host_lock);
}
EXPORT_SYMBOL(soc_camera_unlock);
@@ -908,6 +914,8 @@ static int soc_camera_s_crop(struct file *file, void *fh,
dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
rect->width, rect->height, rect->left, rect->top);
+ current_crop.type = a->type;
+
/* If get_crop fails, we'll let host and / or client drivers decide */
ret = ici->ops->get_crop(icd, &current_crop);
@@ -1050,7 +1058,7 @@ static void scan_add_host(struct soc_camera_host *ici)
{
struct soc_camera_device *icd;
- mutex_lock(&ici->host_lock);
+ mutex_lock(&list_lock);
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
@@ -1059,28 +1067,29 @@ static void scan_add_host(struct soc_camera_host *ici)
}
}
- mutex_unlock(&ici->host_lock);
+ mutex_unlock(&list_lock);
}
#ifdef CONFIG_I2C_BOARDINFO
static int soc_camera_init_i2c(struct soc_camera_device *icd,
- struct soc_camera_link *icl)
+ struct soc_camera_desc *sdesc)
{
struct i2c_client *client;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id);
+ struct soc_camera_host_desc *shd = &sdesc->host_desc;
+ struct i2c_adapter *adap = i2c_get_adapter(shd->i2c_adapter_id);
struct v4l2_subdev *subdev;
if (!adap) {
dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
- icl->i2c_adapter_id);
+ shd->i2c_adapter_id);
goto ei2cga;
}
- icl->board_info->platform_data = icl;
+ shd->board_info->platform_data = &sdesc->subdev_desc;
subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
- icl->board_info, NULL);
+ shd->board_info, NULL);
if (!subdev)
goto ei2cnd;
@@ -1108,7 +1117,7 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
i2c_put_adapter(adap);
}
#else
-#define soc_camera_init_i2c(icd, icl) (-ENODEV)
+#define soc_camera_init_i2c(icd, sdesc) (-ENODEV)
#define soc_camera_free_i2c(icd) do {} while (0)
#endif
@@ -1118,7 +1127,9 @@ static int video_dev_create(struct soc_camera_device *icd);
static int soc_camera_probe(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
+ struct soc_camera_host_desc *shd = &sdesc->host_desc;
+ struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc;
struct device *control = NULL;
struct v4l2_subdev *sd;
struct v4l2_mbus_framefmt mf;
@@ -1137,16 +1148,13 @@ static int soc_camera_probe(struct soc_camera_device *icd)
if (ret < 0)
return ret;
- ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
- icl->regulators);
- if (ret < 0)
- goto ereg;
-
/* The camera could have been already on, try to reset */
- if (icl->reset)
- icl->reset(icd->pdev);
+ if (ssdd->reset)
+ ssdd->reset(icd->pdev);
+ mutex_lock(&ici->host_lock);
ret = ici->ops->add(icd);
+ mutex_unlock(&ici->host_lock);
if (ret < 0)
goto eadd;
@@ -1156,18 +1164,18 @@ static int soc_camera_probe(struct soc_camera_device *icd)
goto evdc;
/* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */
- if (icl->board_info) {
- ret = soc_camera_init_i2c(icd, icl);
+ if (shd->board_info) {
+ ret = soc_camera_init_i2c(icd, sdesc);
if (ret < 0)
goto eadddev;
- } else if (!icl->add_device || !icl->del_device) {
+ } else if (!shd->add_device || !shd->del_device) {
ret = -EINVAL;
goto eadddev;
} else {
- if (icl->module_name)
- ret = request_module(icl->module_name);
+ if (shd->module_name)
+ ret = request_module(shd->module_name);
- ret = icl->add_device(icd);
+ ret = shd->add_device(icd);
if (ret < 0)
goto eadddev;
@@ -1178,7 +1186,7 @@ static int soc_camera_probe(struct soc_camera_device *icd)
control = to_soc_camera_control(icd);
if (!control || !control->driver || !dev_get_drvdata(control) ||
!try_module_get(control->driver->owner)) {
- icl->del_device(icd);
+ shd->del_device(icd);
ret = -ENODEV;
goto enodrv;
}
@@ -1204,7 +1212,7 @@ static int soc_camera_probe(struct soc_camera_device *icd)
* itself is protected against concurrent open() calls, but we also have
* to protect our data.
*/
- mutex_lock(&icd->video_lock);
+ mutex_lock(&ici->host_lock);
ret = soc_camera_video_start(icd);
if (ret < 0)
@@ -1220,19 +1228,19 @@ static int soc_camera_probe(struct soc_camera_device *icd)
ici->ops->remove(icd);
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
return 0;
evidstart:
- mutex_unlock(&icd->video_lock);
+ mutex_unlock(&ici->host_lock);
soc_camera_free_user_formats(icd);
eiufmt:
ectrl:
- if (icl->board_info) {
+ if (shd->board_info) {
soc_camera_free_i2c(icd);
} else {
- icl->del_device(icd);
+ shd->del_device(icd);
module_put(control->driver->owner);
}
enodrv:
@@ -1240,10 +1248,10 @@ eadddev:
video_device_release(icd->vdev);
icd->vdev = NULL;
evdc:
+ mutex_lock(&ici->host_lock);
ici->ops->remove(icd);
+ mutex_unlock(&ici->host_lock);
eadd:
- regulator_bulk_free(icl->num_regulators, icl->regulators);
-ereg:
v4l2_ctrl_handler_free(&icd->ctrl_handler);
return ret;
}
@@ -1254,7 +1262,7 @@ ereg:
*/
static int soc_camera_remove(struct soc_camera_device *icd)
{
- struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_desc *sdesc = to_soc_camera_desc(icd);
struct video_device *vdev = icd->vdev;
BUG_ON(!icd->parent);
@@ -1265,19 +1273,17 @@ static int soc_camera_remove(struct soc_camera_device *icd)
icd->vdev = NULL;
}
- if (icl->board_info) {
+ if (sdesc->host_desc.board_info) {
soc_camera_free_i2c(icd);
} else {
struct device_driver *drv = to_soc_camera_control(icd)->driver;
if (drv) {
- icl->del_device(icd);
+ sdesc->host_desc.del_device(icd);
module_put(drv->owner);
}
}
soc_camera_free_user_formats(icd);
- regulator_bulk_free(icl->num_regulators, icl->regulators);
-
return 0;
}
@@ -1442,7 +1448,6 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
icd->devnum = num;
icd->use_count = 0;
icd->host_priv = NULL;
- mutex_init(&icd->video_lock);
list_add_tail(&icd->list, &devices);
@@ -1500,7 +1505,7 @@ static int video_dev_create(struct soc_camera_device *icd)
vdev->release = video_device_release;
vdev->tvnorms = V4L2_STD_UNKNOWN;
vdev->ctrl_handler = &icd->ctrl_handler;
- vdev->lock = &icd->video_lock;
+ vdev->lock = &ici->host_lock;
icd->vdev = vdev;
@@ -1508,7 +1513,7 @@ static int video_dev_create(struct soc_camera_device *icd)
}
/*
- * Called from soc_camera_probe() above (with .video_lock held???)
+ * Called from soc_camera_probe() above with .host_lock held
*/
static int soc_camera_video_start(struct soc_camera_device *icd)
{
@@ -1532,18 +1537,25 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
static int soc_camera_pdrv_probe(struct platform_device *pdev)
{
- struct soc_camera_link *icl = pdev->dev.platform_data;
+ struct soc_camera_desc *sdesc = pdev->dev.platform_data;
+ struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc;
struct soc_camera_device *icd;
+ int ret;
- if (!icl)
+ if (!sdesc)
return -EINVAL;
icd = devm_kzalloc(&pdev->dev, sizeof(*icd), GFP_KERNEL);
if (!icd)
return -ENOMEM;
- icd->iface = icl->bus_id;
- icd->link = icl;
+ ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators,
+ ssdd->regulators);
+ if (ret < 0)
+ return ret;
+
+ icd->iface = sdesc->host_desc.bus_id;
+ icd->sdesc = sdesc;
icd->pdev = &pdev->dev;
platform_set_drvdata(pdev, icd);
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index 7cf7fd16481f..ce3b1d6a4734 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -54,7 +54,7 @@ static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on)
{
struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- return soc_camera_set_power(p->icd->control, p->icd->link, on);
+ return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, on);
}
static struct v4l2_subdev_core_ops platform_subdev_core_ops = {
@@ -148,7 +148,7 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -173,7 +173,6 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
evdrs:
platform_set_drvdata(pdev, NULL);
- kfree(priv);
return ret;
}
@@ -185,7 +184,6 @@ static int soc_camera_platform_remove(struct platform_device *pdev)
p->icd->control = NULL;
v4l2_device_unregister_subdev(&priv->subdev);
platform_set_drvdata(pdev, NULL);
- kfree(priv);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c
index a397812635d6..89dce097a827 100644
--- a/drivers/media/platform/soc_camera/soc_mediabus.c
+++ b/drivers/media/platform/soc_camera/soc_mediabus.c
@@ -378,9 +378,6 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
{
- if (mf->fourcc == V4L2_PIX_FMT_JPEG)
- return 0;
-
if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
return width * mf->bits_per_sample / 8;
@@ -403,9 +400,6 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line);
s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
u32 bytes_per_line, u32 height)
{
- if (mf->fourcc == V4L2_PIX_FMT_JPEG)
- return 0;
-
if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
return bytes_per_line * height;
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index d854d08a6c7f..c3a2a4484401 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -130,7 +130,7 @@ static void timblogiw_dma_cb(void *data)
if (vb->state != VIDEOBUF_ERROR) {
list_del(&vb->queue);
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
vb->field_count = fh->frame_count * 2;
vb->state = VIDEOBUF_DONE;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 63e8c3461239..b051c4a28554 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -18,6 +18,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
#include <media/ov7670.h>
#include <media/videobuf-dma-sg.h>
#include <linux/delay.h>
@@ -63,6 +64,7 @@ enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 };
struct via_camera {
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
struct video_device vdev;
struct v4l2_subdev *sensor;
struct platform_device *platdev;
@@ -818,47 +820,6 @@ static int viacam_g_chip_ident(struct file *file, void *priv,
}
/*
- * Control ops are passed through to the sensor.
- */
-static int viacam_queryctrl(struct file *filp, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct via_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->lock);
- ret = sensor_call(cam, core, queryctrl, qc);
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-
-static int viacam_g_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct via_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->lock);
- ret = sensor_call(cam, core, g_ctrl, ctrl);
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-
-static int viacam_s_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct via_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->lock);
- ret = sensor_call(cam, core, s_ctrl, ctrl);
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-/*
* Only one input.
*/
static int viacam_enum_input(struct file *filp, void *priv,
@@ -1214,9 +1175,6 @@ static int viacam_enum_frameintervals(struct file *filp, void *priv,
static const struct v4l2_ioctl_ops viacam_ioctl_ops = {
.vidioc_g_chip_ident = viacam_g_chip_ident,
- .vidioc_queryctrl = viacam_queryctrl,
- .vidioc_g_ctrl = viacam_g_ctrl,
- .vidioc_s_ctrl = viacam_s_ctrl,
.vidioc_enum_input = viacam_enum_input,
.vidioc_g_input = viacam_g_input,
.vidioc_s_input = viacam_s_input,
@@ -1418,8 +1376,12 @@ static int viacam_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "Unable to register v4l2 device\n");
- return ret;
+ goto out_free;
}
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (ret)
+ goto out_unregister;
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
/*
* Convince the system that we can do DMA.
*/
@@ -1436,7 +1398,7 @@ static int viacam_probe(struct platform_device *pdev)
*/
ret = via_sensor_power_setup(cam);
if (ret)
- goto out_unregister;
+ goto out_ctrl_hdl_free;
via_sensor_power_up(cam);
/*
@@ -1485,8 +1447,12 @@ out_irq:
free_irq(viadev->pdev->irq, cam);
out_power_down:
via_sensor_power_release(cam);
+out_ctrl_hdl_free:
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
out_unregister:
v4l2_device_unregister(&cam->v4l2_dev);
+out_free:
+ kfree(cam);
return ret;
}
@@ -1499,6 +1465,8 @@ static int viacam_remove(struct platform_device *pdev)
v4l2_device_unregister(&cam->v4l2_dev);
free_irq(viadev->pdev->irq, cam);
via_sensor_power_release(cam);
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ kfree(cam);
via_cam_info = NULL;
return 0;
}
diff --git a/drivers/media/platform/vino.c b/drivers/media/platform/vino.c
index 70b0bf4b2900..eb5d6f955709 100644
--- a/drivers/media/platform/vino.c
+++ b/drivers/media/platform/vino.c
@@ -2474,8 +2474,8 @@ static irqreturn_t vino_interrupt(int irq, void *dev_id)
if ((!handled_a) && (done_a || skip_a)) {
if (!skip_a) {
- do_gettimeofday(&vino_drvdata->
- a.int_data.timestamp);
+ v4l2_get_timestamp(
+ &vino_drvdata->a.int_data.timestamp);
vino_drvdata->a.int_data.frame_counter = fc_a;
}
vino_drvdata->a.int_data.skip = skip_a;
@@ -2489,8 +2489,8 @@ static irqreturn_t vino_interrupt(int irq, void *dev_id)
if ((!handled_b) && (done_b || skip_b)) {
if (!skip_b) {
- do_gettimeofday(&vino_drvdata->
- b.int_data.timestamp);
+ v4l2_get_timestamp(
+ &vino_drvdata->b.int_data.timestamp);
vino_drvdata->b.int_data.frame_counter = fc_b;
}
vino_drvdata->b.int_data.skip = skip_b;
@@ -3410,6 +3410,9 @@ static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs,
if (fb->map_count > 0)
b->flags |= V4L2_BUF_FLAG_MAPPED;
+ b->flags &= ~V4L2_BUF_FLAG_TIMESTAMP_MASK;
+ b->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
b->index = fb->id;
b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ?
V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR;
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index 0d59b9db83cb..8a33a712f480 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -36,9 +36,17 @@
#define VIVI_MODULE_NAME "vivi"
-/* Wake up at about 30 fps */
-#define WAKE_NUMERATOR 30
-#define WAKE_DENOMINATOR 1001
+/* Maximum allowed frame rate
+ *
+ * Vivi will allow setting timeperframe in [1/FPS_MAX - FPS_MAX/1] range.
+ *
+ * Ideally FPS_MAX should be infinity, i.e. practically UINT_MAX, but that
+ * might hit application errors when they manipulate these values.
+ *
+ * Besides, for tpf < 1ms image-generation logic should be changed, to avoid
+ * producing frames with equal content.
+ */
+#define FPS_MAX 1000
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1200
@@ -69,6 +77,12 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
/* Global font descriptor */
static const u8 *font8x16;
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+ tpf_min = {.numerator = 1, .denominator = FPS_MAX},
+ tpf_max = {.numerator = FPS_MAX, .denominator = 1},
+ tpf_default = {.numerator = 1001, .denominator = 30000}; /* NTSC */
+
#define dprintk(dev, level, fmt, arg...) \
v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
@@ -77,13 +91,13 @@ static const u8 *font8x16;
------------------------------------------------------------------*/
struct vivi_fmt {
- char *name;
+ const char *name;
u32 fourcc; /* v4l2 format id */
u8 depth;
bool is_yuv;
};
-static struct vivi_fmt formats[] = {
+static const struct vivi_fmt formats[] = {
{
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
@@ -150,14 +164,14 @@ static struct vivi_fmt formats[] = {
},
};
-static struct vivi_fmt *get_format(struct v4l2_format *f)
+static const struct vivi_fmt *__get_format(u32 pixelformat)
{
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(formats); k++) {
fmt = &formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == pixelformat)
break;
}
@@ -167,12 +181,17 @@ static struct vivi_fmt *get_format(struct v4l2_format *f)
return &formats[k];
}
+static const struct vivi_fmt *get_format(struct v4l2_format *f)
+{
+ return __get_format(f->fmt.pix.pixelformat);
+}
+
/* buffer for one video frame */
struct vivi_buffer {
/* common v4l buffer stuff -- must be first */
struct vb2_buffer vb;
struct list_head list;
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
};
struct vivi_dmaqueue {
@@ -231,15 +250,17 @@ struct vivi_dev {
int input;
/* video capture */
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
+ struct v4l2_fract timeperframe;
unsigned int width, height;
struct vb2_queue vb_vidq;
unsigned int field_count;
u8 bars[9][3];
- u8 line[MAX_WIDTH * 8];
+ u8 line[MAX_WIDTH * 8] __attribute__((__aligned__(4)));
unsigned int pixelsize;
u8 alpha_component;
+ u32 textfg, textbg;
};
/* ------------------------------------------------------------------
@@ -276,7 +297,7 @@ struct bar_std {
/* Maximum number of bars are 10 - otherwise, the input print code
should be modified */
-static struct bar_std bars[] = {
+static const struct bar_std bars[] = {
{ /* Standard ITU-R color bar sequence */
{ COLOR_WHITE, COLOR_AMBER, COLOR_CYAN, COLOR_GREEN,
COLOR_MAGENTA, COLOR_RED, COLOR_BLUE, COLOR_BLACK, COLOR_BLACK }
@@ -511,66 +532,100 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
static void precalculate_line(struct vivi_dev *dev)
{
- int w;
-
- for (w = 0; w < dev->width * 2; w++) {
- int colorpos = w / (dev->width / 8) % 8;
-
- gen_twopix(dev, dev->line + w * dev->pixelsize, colorpos, w & 1);
+ unsigned pixsize = dev->pixelsize;
+ unsigned pixsize2 = 2*pixsize;
+ int colorpos;
+ u8 *pos;
+
+ for (colorpos = 0; colorpos < 16; ++colorpos) {
+ u8 pix[8];
+ int wstart = colorpos * dev->width / 8;
+ int wend = (colorpos+1) * dev->width / 8;
+ int w;
+
+ gen_twopix(dev, &pix[0], colorpos % 8, 0);
+ gen_twopix(dev, &pix[pixsize], colorpos % 8, 1);
+
+ for (w = wstart/2*2, pos = dev->line + w*pixsize; w < wend; w += 2, pos += pixsize2)
+ memcpy(pos, pix, pixsize2);
}
}
+/* need this to do rgb24 rendering */
+typedef struct { u16 __; u8 _; } __attribute__((packed)) x24;
+
static void gen_text(struct vivi_dev *dev, char *basep,
int y, int x, char *text)
{
int line;
+ unsigned int width = dev->width;
/* Checks if it is possible to show string */
- if (y + 16 >= dev->height || x + strlen(text) * 8 >= dev->width)
+ if (y + 16 >= dev->height || x + strlen(text) * 8 >= width)
return;
/* Print stream time */
- for (line = y; line < y + 16; line++) {
- int j = 0;
- char *pos = basep + line * dev->width * dev->pixelsize + x * dev->pixelsize;
- char *s;
-
- for (s = text; *s; s++) {
- u8 chr = font8x16[*s * 16 + line - y];
- int i;
-
- for (i = 0; i < 7; i++, j++) {
- /* Draw white font on black background */
- if (chr & (1 << (7 - i)))
- gen_twopix(dev, pos + j * dev->pixelsize, WHITE, (x+y) & 1);
- else
- gen_twopix(dev, pos + j * dev->pixelsize, TEXT_BLACK, (x+y) & 1);
- }
- }
+#define PRINTSTR(PIXTYPE) do { \
+ PIXTYPE fg; \
+ PIXTYPE bg; \
+ memcpy(&fg, &dev->textfg, sizeof(PIXTYPE)); \
+ memcpy(&bg, &dev->textbg, sizeof(PIXTYPE)); \
+ \
+ for (line = 0; line < 16; line++) { \
+ PIXTYPE *pos = (PIXTYPE *)( basep + ((y + line) * width + x) * sizeof(PIXTYPE) ); \
+ u8 *s; \
+ \
+ for (s = text; *s; s++) { \
+ u8 chr = font8x16[*s * 16 + line]; \
+ \
+ pos[0] = (chr & (0x01 << 7) ? fg : bg); \
+ pos[1] = (chr & (0x01 << 6) ? fg : bg); \
+ pos[2] = (chr & (0x01 << 5) ? fg : bg); \
+ pos[3] = (chr & (0x01 << 4) ? fg : bg); \
+ pos[4] = (chr & (0x01 << 3) ? fg : bg); \
+ pos[5] = (chr & (0x01 << 2) ? fg : bg); \
+ pos[6] = (chr & (0x01 << 1) ? fg : bg); \
+ pos[7] = (chr & (0x01 << 0) ? fg : bg); \
+ \
+ pos += 8; \
+ } \
+ } \
+} while (0)
+
+ switch (dev->pixelsize) {
+ case 2:
+ PRINTSTR(u16); break;
+ case 4:
+ PRINTSTR(u32); break;
+ case 3:
+ PRINTSTR(x24); break;
}
}
static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
{
- int wmax = dev->width;
+ int stride = dev->width * dev->pixelsize;
int hmax = dev->height;
- struct timeval ts;
void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
unsigned ms;
char str[100];
int h, line = 1;
+ u8 *linestart;
s32 gain;
if (!vbuf)
return;
+ linestart = dev->line + (dev->mv_count % dev->width) * dev->pixelsize;
+
for (h = 0; h < hmax; h++)
- memcpy(vbuf + h * wmax * dev->pixelsize,
- dev->line + (dev->mv_count % wmax) * dev->pixelsize,
- wmax * dev->pixelsize);
+ memcpy(vbuf + h * stride, linestart, stride);
/* Updates stream time */
+ gen_twopix(dev, (u8 *)&dev->textbg, TEXT_BLACK, /*odd=*/ 0);
+ gen_twopix(dev, (u8 *)&dev->textfg, WHITE, /*odd=*/ 0);
+
dev->ms += jiffies_to_msecs(jiffies - dev->jiffies);
dev->jiffies = jiffies;
ms = dev->ms;
@@ -622,8 +677,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
dev->field_count++;
buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
- do_gettimeofday(&ts);
- buf->vb.v4l2_buf.timestamp = ts;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
}
static void vivi_thread_tick(struct vivi_dev *dev)
@@ -645,7 +699,7 @@ static void vivi_thread_tick(struct vivi_dev *dev)
list_del(&buf->list);
spin_unlock_irqrestore(&dev->slock, flags);
- do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
/* Fill buffer */
vivi_fillbuff(dev, buf);
@@ -655,8 +709,8 @@ static void vivi_thread_tick(struct vivi_dev *dev)
dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
}
-#define frames_to_ms(frames) \
- ((frames * WAKE_NUMERATOR * 1000) / WAKE_DENOMINATOR)
+#define frames_to_ms(dev, frames) \
+ ((frames * dev->timeperframe.numerator * 1000) / dev->timeperframe.denominator)
static void vivi_sleep(struct vivi_dev *dev)
{
@@ -672,7 +726,7 @@ static void vivi_sleep(struct vivi_dev *dev)
goto stop_task;
/* Calculate time to wake up */
- timeout = msecs_to_jiffies(frames_to_ms(1));
+ timeout = msecs_to_jiffies(frames_to_ms(dev, 1));
vivi_thread_tick(dev);
@@ -872,7 +926,7 @@ static void vivi_unlock(struct vb2_queue *vq)
}
-static struct vb2_ops vivi_video_qops = {
+static const struct vb2_ops vivi_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -903,7 +957,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
@@ -939,7 +993,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vivi_dev *dev = video_drvdata(file);
- struct vivi_fmt *fmt;
+ const struct vivi_fmt *fmt;
fmt = get_format(f);
if (!fmt) {
@@ -1044,6 +1098,70 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
return 0;
}
+/* timeperframe is arbitrary and continous */
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ const struct vivi_fmt *fmt;
+
+ if (fival->index)
+ return -EINVAL;
+
+ fmt = __get_format(fival->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ /* regarding width & height - we support any */
+
+ fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+
+ /* fill in stepwise (step=1.0 is requred by V4L2 spec) */
+ fival->stepwise.min = tpf_min;
+ fival->stepwise.max = tpf_max;
+ fival->stepwise.step = (struct v4l2_fract) {1, 1};
+
+ return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivi_dev *dev = video_drvdata(file);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = dev->timeperframe;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
+#define FRACT_CMP(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivi_dev *dev = video_drvdata(file);
+ struct v4l2_fract tpf;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ tpf = parm->parm.capture.timeperframe;
+
+ /* tpf: {*, 0} resets timing; clip to [min, max]*/
+ tpf = tpf.denominator ? tpf : tpf_default;
+ tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
+ tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
+
+ dev->timeperframe = tpf;
+ parm->parm.capture.timeperframe = tpf;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
/* --- controls ---------------------------------------------- */
static int vivi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -1202,6 +1320,9 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
@@ -1209,7 +1330,7 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static struct video_device vivi_template = {
+static const struct video_device vivi_template = {
.name = "vivi",
.fops = &vivi_fops,
.ioctl_ops = &vivi_ioctl_ops,
@@ -1260,6 +1381,7 @@ static int __init vivi_create_instance(int inst)
goto free_dev;
dev->fmt = &formats[0];
+ dev->timeperframe = tpf_default;
dev->width = 640;
dev->height = 480;
dev->pixelsize = dev->fmt->depth / 8;
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 9e580166161a..24e64a09884c 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -124,6 +124,18 @@ config USB_KEENE
To compile this driver as a module, choose M here: the
module will be called radio-keene.
+config USB_MA901
+ tristate "Masterkit MA901 USB FM radio support"
+ depends on USB && VIDEO_V4L2
+ ---help---
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port. Note that the audio is not digital, and
+ you must connect the line out connector to a sound card or a
+ set of speakers or headphones.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-ma901.
+
config RADIO_TEA5764
tristate "TEA5764 I2C FM radio support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index c03ce4fe74e9..303eaebdb85a 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_DSBR) += dsbr100.o
obj-$(CONFIG_RADIO_SI470X) += si470x/
obj-$(CONFIG_USB_MR800) += radio-mr800.o
obj-$(CONFIG_USB_KEENE) += radio-keene.o
+obj-$(CONFIG_USB_MA901) += radio-ma901.o
obj-$(CONFIG_RADIO_TEA5764) += radio-tea5764.o
obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c
new file mode 100644
index 000000000000..c61f590029ad
--- /dev/null
+++ b/drivers/media/radio/radio-ma901.c
@@ -0,0 +1,460 @@
+/*
+ * Driver for the MasterKit MA901 USB FM radio. This device plugs
+ * into the USB port and an analog audio input or headphones, so this thing
+ * only deals with initialization, frequency setting, volume.
+ *
+ * Copyright (c) 2012 Alexey Klimov <klimov.linux@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+
+#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>"
+#define DRIVER_DESC "Masterkit MA901 USB FM radio driver"
+#define DRIVER_VERSION "0.0.1"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+#define USB_MA901_VENDOR 0x16c0
+#define USB_MA901_PRODUCT 0x05df
+
+/* dev_warn macro with driver name */
+#define MA901_DRIVER_NAME "radio-ma901"
+#define ma901radio_dev_warn(dev, fmt, arg...) \
+ dev_warn(dev, MA901_DRIVER_NAME " - " fmt, ##arg)
+
+#define ma901radio_dev_err(dev, fmt, arg...) \
+ dev_err(dev, MA901_DRIVER_NAME " - " fmt, ##arg)
+
+/* Probably USB_TIMEOUT should be modified in module parameter */
+#define BUFFER_LENGTH 8
+#define USB_TIMEOUT 500
+
+#define FREQ_MIN 87.5
+#define FREQ_MAX 108.0
+#define FREQ_MUL 16000
+
+#define MA901_VOLUME_MAX 16
+#define MA901_VOLUME_MIN 0
+
+/* Commands that device should understand
+ * List isn't full and will be updated with implementation of new functions
+ */
+#define MA901_RADIO_SET_FREQ 0x03
+#define MA901_RADIO_SET_VOLUME 0x04
+#define MA901_RADIO_SET_MONO_STEREO 0x05
+
+/* Comfortable defines for ma901radio_set_stereo */
+#define MA901_WANT_STEREO 0x50
+#define MA901_WANT_MONO 0xd0
+
+/* module parameter */
+static int radio_nr = -1;
+module_param(radio_nr, int, 0);
+MODULE_PARM_DESC(radio_nr, "Radio file number");
+
+/* Data for one (physical) device */
+struct ma901radio_device {
+ /* reference to USB and video device */
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
+
+ u8 *buffer;
+ struct mutex lock; /* buffer locking */
+ int curfreq;
+ u16 volume;
+ int stereo;
+ bool muted;
+};
+
+static inline struct ma901radio_device *to_ma901radio_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct ma901radio_device, v4l2_dev);
+}
+
+/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
+static int ma901radio_set_freq(struct ma901radio_device *radio, int freq)
+{
+ unsigned int freq_send = 0x300 + (freq >> 5) / 25;
+ int retval;
+
+ radio->buffer[0] = 0x0a;
+ radio->buffer[1] = MA901_RADIO_SET_FREQ;
+ radio->buffer[2] = ((freq_send >> 8) & 0xff) + 0x80;
+ radio->buffer[3] = freq_send & 0xff;
+ radio->buffer[4] = 0x00;
+ radio->buffer[5] = 0x00;
+ radio->buffer[6] = 0x00;
+ radio->buffer[7] = 0x00;
+
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 9, 0x21, 0x0300, 0,
+ radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ radio->curfreq = freq;
+ return 0;
+}
+
+static int ma901radio_set_volume(struct ma901radio_device *radio, u16 vol_to_set)
+{
+ int retval;
+
+ radio->buffer[0] = 0x0a;
+ radio->buffer[1] = MA901_RADIO_SET_VOLUME;
+ radio->buffer[2] = 0xc2;
+ radio->buffer[3] = vol_to_set + 0x20;
+ radio->buffer[4] = 0x00;
+ radio->buffer[5] = 0x00;
+ radio->buffer[6] = 0x00;
+ radio->buffer[7] = 0x00;
+
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 9, 0x21, 0x0300, 0,
+ radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ radio->volume = vol_to_set;
+ return retval;
+}
+
+static int ma901_set_stereo(struct ma901radio_device *radio, u8 stereo)
+{
+ int retval;
+
+ radio->buffer[0] = 0x0a;
+ radio->buffer[1] = MA901_RADIO_SET_MONO_STEREO;
+ radio->buffer[2] = stereo;
+ radio->buffer[3] = 0x00;
+ radio->buffer[4] = 0x00;
+ radio->buffer[5] = 0x00;
+ radio->buffer[6] = 0x00;
+ radio->buffer[7] = 0x00;
+
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 9, 0x21, 0x0300, 0,
+ radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+
+ if (retval < 0)
+ return retval;
+
+ if (stereo == MA901_WANT_STEREO)
+ radio->stereo = V4L2_TUNER_MODE_STEREO;
+ else
+ radio->stereo = V4L2_TUNER_MODE_MONO;
+
+ return retval;
+}
+
+/* Handle unplugging the device.
+ * We call video_unregister_device in any case.
+ * The last function called in this procedure is
+ * usb_ma901radio_device_release.
+ */
+static void usb_ma901radio_disconnect(struct usb_interface *intf)
+{
+ struct ma901radio_device *radio = to_ma901radio_dev(usb_get_intfdata(intf));
+
+ mutex_lock(&radio->lock);
+ video_unregister_device(&radio->vdev);
+ usb_set_intfdata(intf, NULL);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+}
+
+/* vidioc_querycap - query device capabilities */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *v)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ strlcpy(v->driver, "radio-ma901", sizeof(v->driver));
+ strlcpy(v->card, "Masterkit MA901 USB FM Radio", sizeof(v->card));
+ usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+ v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+/* vidioc_g_tuner - get tuner attributes */
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (v->index > 0)
+ return -EINVAL;
+
+ v->signal = 0;
+
+ /* TODO: the same words like in _probe() goes here.
+ * When receiving of stats will be implemented then we can call
+ * ma901radio_get_stat().
+ * retval = ma901radio_get_stat(radio, &is_stereo, &v->signal);
+ */
+
+ strcpy(v->name, "FM");
+ v->type = V4L2_TUNER_RADIO;
+ v->rangelow = FREQ_MIN * FREQ_MUL;
+ v->rangehigh = FREQ_MAX * FREQ_MUL;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ /* v->rxsubchans = is_stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; */
+ v->audmode = radio->stereo ?
+ V4L2_TUNER_MODE_STEREO : V4L2_TUNER_MODE_MONO;
+ return 0;
+}
+
+/* vidioc_s_tuner - set tuner attributes */
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (v->index > 0)
+ return -EINVAL;
+
+ /* mono/stereo selector */
+ switch (v->audmode) {
+ case V4L2_TUNER_MODE_MONO:
+ return ma901_set_stereo(radio, MA901_WANT_MONO);
+ default:
+ return ma901_set_stereo(radio, MA901_WANT_STEREO);
+ }
+}
+
+/* vidioc_s_frequency - set tuner radio frequency */
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+
+ return ma901radio_set_freq(radio, clamp_t(unsigned, f->frequency,
+ FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL));
+}
+
+/* vidioc_g_frequency - get tuner radio frequency */
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct ma901radio_device *radio = video_drvdata(file);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+ f->frequency = radio->curfreq;
+
+ return 0;
+}
+
+static int usb_ma901radio_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ma901radio_device *radio =
+ container_of(ctrl->handler, struct ma901radio_device, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME: /* set volume */
+ return ma901radio_set_volume(radio, (u16)ctrl->val);
+ }
+
+ return -EINVAL;
+}
+
+/* TODO: Should we really need to implement suspend and resume functions?
+ * Radio has it's own memory and will continue playing if power is present
+ * on usb port and on resume it will start to play again based on freq, volume
+ * values in device memory.
+ */
+static int usb_ma901radio_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ return 0;
+}
+
+static int usb_ma901radio_resume(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops usb_ma901radio_ctrl_ops = {
+ .s_ctrl = usb_ma901radio_s_ctrl,
+};
+
+/* File system interface */
+static const struct v4l2_file_operations usb_ma901radio_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops usb_ma901radio_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
+{
+ struct ma901radio_device *radio = to_ma901radio_dev(v4l2_dev);
+
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
+ kfree(radio->buffer);
+ kfree(radio);
+}
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_ma901radio_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct ma901radio_device *radio;
+ int retval = 0;
+
+ radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
+ if (!radio) {
+ dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+ if (!radio->buffer) {
+ dev_err(&intf->dev, "kmalloc for radio->buffer failed\n");
+ retval = -ENOMEM;
+ goto err_nobuf;
+ }
+
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
+ goto err_v4l2;
+ }
+
+ v4l2_ctrl_handler_init(&radio->hdl, 1);
+
+ /* TODO:It looks like this radio doesn't have mute/unmute control
+ * and windows program just emulate it using volume control.
+ * Let's plan to do the same in this driver.
+ *
+ * v4l2_ctrl_new_std(&radio->hdl, &usb_ma901radio_ctrl_ops,
+ * V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ */
+
+ v4l2_ctrl_new_std(&radio->hdl, &usb_ma901radio_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, MA901_VOLUME_MIN,
+ MA901_VOLUME_MAX, 1, MA901_VOLUME_MAX);
+
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ dev_err(&intf->dev, "couldn't register control\n");
+ goto err_ctrl;
+ }
+ mutex_init(&radio->lock);
+
+ radio->v4l2_dev.ctrl_handler = &radio->hdl;
+ radio->v4l2_dev.release = usb_ma901radio_release;
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_ma901radio_fops;
+ radio->vdev.ioctl_ops = &usb_ma901radio_ioctl_ops;
+ radio->vdev.release = video_device_release_empty;
+ radio->vdev.lock = &radio->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+ radio->usbdev = interface_to_usbdev(intf);
+ radio->intf = intf;
+ usb_set_intfdata(intf, &radio->v4l2_dev);
+ radio->curfreq = 95.21 * FREQ_MUL;
+
+ video_set_drvdata(&radio->vdev, radio);
+
+ /* TODO: we can get some statistics (freq, volume) from device
+ * but it's not implemented yet. After insertion in usb-port radio
+ * setups frequency and starts playing without any initialization.
+ * So we don't call usb_ma901radio_init/get_stat() here.
+ * retval = usb_ma901radio_init(radio);
+ */
+
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO,
+ radio_nr);
+ if (retval < 0) {
+ dev_err(&intf->dev, "could not register video device\n");
+ goto err_vdev;
+ }
+
+ return 0;
+
+err_vdev:
+ v4l2_ctrl_handler_free(&radio->hdl);
+err_ctrl:
+ v4l2_device_unregister(&radio->v4l2_dev);
+err_v4l2:
+ kfree(radio->buffer);
+err_nobuf:
+ kfree(radio);
+err:
+ return retval;
+}
+
+/* USB Device ID List */
+static struct usb_device_id usb_ma901radio_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(USB_MA901_VENDOR, USB_MA901_PRODUCT,
+ USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_ma901radio_device_table);
+
+/* USB subsystem interface */
+static struct usb_driver usb_ma901radio_driver = {
+ .name = MA901_DRIVER_NAME,
+ .probe = usb_ma901radio_probe,
+ .disconnect = usb_ma901radio_disconnect,
+ .suspend = usb_ma901radio_suspend,
+ .resume = usb_ma901radio_resume,
+ .reset_resume = usb_ma901radio_resume,
+ .id_table = usb_ma901radio_device_table,
+};
+
+module_usb_driver(usb_ma901radio_driver);
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 11f76ed4c6fb..3d0ff4404d12 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -17,49 +17,36 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <sound/aci.h>
static int radio_nr = -1;
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-static bool mono;
-module_param(mono, bool, 0);
-MODULE_PARM_DESC(mono, "Force tuner into mono mode.");
-
struct pcm20 {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
unsigned long freq;
- int muted;
+ u32 audmode;
struct snd_miro_aci *aci;
struct mutex lock;
};
static struct pcm20 pcm20_card = {
- .freq = 87*16000,
- .muted = 1,
+ .freq = 87 * 16000,
+ .audmode = V4L2_TUNER_MODE_STEREO,
};
-static int pcm20_mute(struct pcm20 *dev, unsigned char mute)
-{
- dev->muted = mute;
- return snd_aci_cmd(dev->aci, ACI_SET_TUNERMUTE, mute, -1);
-}
-
-static int pcm20_stereo(struct pcm20 *dev, unsigned char stereo)
-{
- return snd_aci_cmd(dev->aci, ACI_SET_TUNERMONO, !stereo, -1);
-}
-
static int pcm20_setfreq(struct pcm20 *dev, unsigned long freq)
{
unsigned char freql;
unsigned char freqh;
struct snd_miro_aci *aci = dev->aci;
- dev->freq = freq;
-
freq /= 160;
if (!(aci->aci_version == 0x07 || aci->aci_version >= 0xb0))
freq /= 10; /* I don't know exactly which version
@@ -67,46 +54,66 @@ static int pcm20_setfreq(struct pcm20 *dev, unsigned long freq)
freql = freq & 0xff;
freqh = freq >> 8;
- pcm20_stereo(dev, !mono);
return snd_aci_cmd(aci, ACI_WRITE_TUNE, freql, freqh);
}
static const struct v4l2_file_operations pcm20_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .poll = v4l2_ctrl_poll,
+ .release = v4l2_fh_release,
.unlocked_ioctl = video_ioctl2,
};
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
+ struct pcm20 *dev = video_drvdata(file);
+
strlcpy(v->driver, "Miro PCM20", sizeof(v->driver));
strlcpy(v->card, "Miro PCM20", sizeof(v->card));
- strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = 0x1;
- v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev->v4l2_dev.name);
+ v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- if (v->index) /* Only 1 tuner */
+ struct pcm20 *dev = video_drvdata(file);
+ int res;
+
+ if (v->index)
return -EINVAL;
strlcpy(v->name, "FM", sizeof(v->name));
v->type = V4L2_TUNER_RADIO;
v->rangelow = 87*16000;
v->rangehigh = 108*16000;
- v->signal = 0xffff;
- v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = V4L2_TUNER_CAP_LOW;
- v->audmode = V4L2_TUNER_MODE_MONO;
+ res = snd_aci_cmd(dev->aci, ACI_READ_TUNERSTATION, -1, -1);
+ v->signal = (res & 0x80) ? 0 : 0xffff;
+ /* Note: stereo detection does not work if the audio is muted,
+ it will default to mono in that case. */
+ res = snd_aci_cmd(dev->aci, ACI_READ_TUNERSTEREO, -1, -1);
+ v->rxsubchans = (res & 0x40) ? V4L2_TUNER_SUB_MONO :
+ V4L2_TUNER_SUB_STEREO;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ v->audmode = dev->audmode;
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- return v->index ? -EINVAL : 0;
+ struct pcm20 *dev = video_drvdata(file);
+
+ if (v->index)
+ return -EINVAL;
+ if (v->audmode > V4L2_TUNER_MODE_STEREO)
+ v->audmode = V4L2_TUNER_MODE_STEREO;
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMONO,
+ v->audmode == V4L2_TUNER_MODE_MONO, -1);
+ return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
@@ -131,75 +138,21 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
return -EINVAL;
- dev->freq = f->frequency;
- pcm20_setfreq(dev, f->frequency);
- return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct pcm20 *dev = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = dev->muted;
- break;
- default:
- return -EINVAL;
- }
+ dev->freq = clamp(f->frequency, 87 * 16000U, 108 * 16000U);
+ pcm20_setfreq(dev, dev->freq);
return 0;
}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int pcm20_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct pcm20 *dev = video_drvdata(file);
+ struct pcm20 *dev = container_of(ctrl->handler, struct pcm20, ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- pcm20_mute(dev, ctrl->value);
- break;
- default:
- return -EINVAL;
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMUTE, ctrl->val, -1);
+ return 0;
}
- return 0;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- a->index = 0;
- strlcpy(a->name, "Radio", sizeof(a->name));
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *a)
-{
- return a->index ? -EINVAL : 0;
+ return -EINVAL;
}
static const struct v4l2_ioctl_ops pcm20_ioctl_ops = {
@@ -208,19 +161,20 @@ static const struct v4l2_ioctl_ops pcm20_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops pcm20_ctrl_ops = {
+ .s_ctrl = pcm20_s_ctrl,
};
static int __init pcm20_init(void)
{
struct pcm20 *dev = &pcm20_card;
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
+ struct v4l2_ctrl_handler *hdl;
int res;
dev->aci = snd_aci_get_aci();
@@ -229,7 +183,7 @@ static int __init pcm20_init(void)
"you must load the snd-miro driver first!\n");
return -ENODEV;
}
- strlcpy(v4l2_dev->name, "miropcm20", sizeof(v4l2_dev->name));
+ strlcpy(v4l2_dev->name, "radio-miropcm20", sizeof(v4l2_dev->name));
mutex_init(&dev->lock);
res = v4l2_device_register(NULL, v4l2_dev);
@@ -238,20 +192,35 @@ static int __init pcm20_init(void)
return -EINVAL;
}
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 1);
+ v4l2_ctrl_new_std(hdl, &pcm20_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_dev->ctrl_handler = hdl;
+ if (hdl->error) {
+ res = hdl->error;
+ v4l2_err(v4l2_dev, "Could not register control\n");
+ goto err_hdl;
+ }
strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
dev->vdev.v4l2_dev = v4l2_dev;
dev->vdev.fops = &pcm20_fops;
dev->vdev.ioctl_ops = &pcm20_ioctl_ops;
dev->vdev.release = video_device_release_empty;
dev->vdev.lock = &dev->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMONO,
+ dev->audmode == V4L2_TUNER_MODE_MONO, -1);
+ pcm20_setfreq(dev, dev->freq);
if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
- goto fail;
+ goto err_hdl;
v4l2_info(v4l2_dev, "Mirosound PCM20 Radio tuner\n");
return 0;
-fail:
+err_hdl:
+ v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(v4l2_dev);
return -EINVAL;
}
@@ -265,6 +234,8 @@ static void __exit pcm20_cleanup(void)
struct pcm20 *dev = &pcm20_card;
video_unregister_device(&dev->vdev);
+ snd_aci_cmd(dev->aci, ACI_SET_TUNERMUTE, 1, -1);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index cabbe3adf435..02151e0e6e63 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2085,8 +2085,7 @@ static int wl1273_fm_radio_probe(struct platform_device *pdev)
}
/* V4L2 configuration */
- memcpy(&radio->videodev, &wl1273_viddev_template,
- sizeof(wl1273_viddev_template));
+ radio->videodev = wl1273_viddev_template;
radio->videodev.v4l2_dev = &radio->v4l2dev;
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 2f089b4252df..467e95575488 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -163,7 +163,7 @@ struct si470x_device {
struct completion completion;
bool status_rssi_auto_update; /* Does RSSI get updated automatic? */
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
+#if IS_ENABLED(CONFIG_USB_SI470X)
/* reference to USB and video device */
struct usb_device *usbdev;
struct usb_interface *intf;
@@ -179,7 +179,7 @@ struct si470x_device {
unsigned char hardware_version;
#endif
-#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
+#if IS_ENABLED(CONFIG_I2C_SI470X)
struct i2c_client *client;
#endif
};
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 602ef7ac8c24..a002234ed5de 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1563,8 +1563,7 @@ int fmc_prepare(struct fmdev *fmdev)
fmdev->irq_info.mask = FM_MAL_EVENT;
/* Region info */
- memcpy(&fmdev->rx.region, &region_configs[default_radio_region],
- sizeof(struct region_info));
+ fmdev->rx.region = region_configs[default_radio_region];
fmdev->rx.mute_mode = FM_MUTE_OFF;
fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 2d6fb26a0170..4d6a63fe6c5e 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -872,11 +872,11 @@ static int ati_remote_probe(struct usb_interface *interface,
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
rc_dev = rc_allocate_device();
if (!ati_remote || !rc_dev)
- goto fail1;
+ goto exit_free_dev_rdev;
/* Allocate URB buffers, URBs */
if (ati_remote_alloc_buffers(udev, ati_remote))
- goto fail2;
+ goto exit_free_buffers;
ati_remote->endpoint_in = endpoint_in;
ati_remote->endpoint_out = endpoint_out;
@@ -924,12 +924,12 @@ static int ati_remote_probe(struct usb_interface *interface,
/* Device Hardware Initialization - fills in ati_remote->idev from udev. */
err = ati_remote_initialize(ati_remote);
if (err)
- goto fail3;
+ goto exit_kill_urbs;
/* Set up and register rc device */
err = rc_register_device(ati_remote->rdev);
if (err)
- goto fail3;
+ goto exit_kill_urbs;
/* use our delay for rc_dev */
ati_remote->rdev->input_dev->rep[REP_DELAY] = repeat_delay;
@@ -939,7 +939,7 @@ static int ati_remote_probe(struct usb_interface *interface,
input_dev = input_allocate_device();
if (!input_dev) {
err = -ENOMEM;
- goto fail4;
+ goto exit_unregister_device;
}
ati_remote->idev = input_dev;
@@ -947,19 +947,24 @@ static int ati_remote_probe(struct usb_interface *interface,
err = input_register_device(input_dev);
if (err)
- goto fail5;
+ goto exit_free_input_device;
}
usb_set_intfdata(interface, ati_remote);
return 0;
- fail5: input_free_device(input_dev);
- fail4: rc_unregister_device(rc_dev);
+ exit_free_input_device:
+ input_free_device(input_dev);
+ exit_unregister_device:
+ rc_unregister_device(rc_dev);
rc_dev = NULL;
- fail3: usb_kill_urb(ati_remote->irq_urb);
+ exit_kill_urbs:
+ usb_kill_urb(ati_remote->irq_urb);
usb_kill_urb(ati_remote->out_urb);
- fail2: ati_remote_free_buffers(ati_remote);
- fail1: rc_free_device(rc_dev);
+ exit_free_buffers:
+ ati_remote_free_buffers(ati_remote);
+ exit_free_dev_rdev:
+ rc_free_device(rc_dev);
kfree(ati_remote);
return err;
}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index cef04786b52f..ee6c984cade2 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1003,7 +1003,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
rdev = rc_allocate_device();
if (!dev || !rdev)
- goto failure;
+ goto exit_free_dev_rdev;
/* validate resources */
error = -ENODEV;
@@ -1014,10 +1014,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
- goto failure;
+ goto exit_free_dev_rdev;
if (!pnp_irq_valid(pnp_dev, 0))
- goto failure;
+ goto exit_free_dev_rdev;
spin_lock_init(&dev->hw_lock);
@@ -1033,7 +1033,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
- goto failure;
+ goto exit_free_dev_rdev;
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
@@ -1075,30 +1075,30 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
device_set_wakeup_capable(&pnp_dev->dev, true);
device_set_wakeup_enable(&pnp_dev->dev, true);
+ error = rc_register_device(rdev);
+ if (error < 0)
+ goto exit_free_dev_rdev;
+
/* claim the resources */
error = -EBUSY;
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
- goto failure;
+ goto exit_unregister_device;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
- goto failure2;
+ goto exit_release_hw_io;
}
- error = rc_register_device(rdev);
- if (error < 0)
- goto failure3;
-
pr_notice("driver has been successfully loaded\n");
return 0;
-failure3:
- free_irq(dev->irq, dev);
-failure2:
+exit_release_hw_io:
release_region(dev->hw_io, ENE_IO_SIZE);
-failure:
+exit_unregister_device:
+ rc_unregister_device(rdev);
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(dev);
return error;
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 1df410e13688..d6fa441655d2 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -500,18 +500,18 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* input device for IR remote (and tx) */
rdev = rc_allocate_device();
if (!rdev)
- goto failure;
+ goto exit_free_dev_rdev;
ret = -ENODEV;
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0)) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
fintek->cir_addr = pnp_port_start(pdev, 0);
@@ -528,7 +528,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
ret = fintek_hw_detect(fintek);
if (ret)
- goto failure;
+ goto exit_free_dev_rdev;
/* Initialize CIR & CIR Wake Logical Devices */
fintek_config_mode_enable(fintek);
@@ -557,33 +557,35 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+ fintek->rdev = rdev;
+
ret = -EBUSY;
/* now claim resources */
if (!request_region(fintek->cir_addr,
fintek->cir_port_len, FINTEK_DRIVER_NAME))
- goto failure;
+ goto exit_free_dev_rdev;
if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
FINTEK_DRIVER_NAME, (void *)fintek))
- goto failure2;
+ goto exit_free_cir_addr;
ret = rc_register_device(rdev);
if (ret)
- goto failure3;
+ goto exit_free_irq;
device_init_wakeup(&pdev->dev, true);
- fintek->rdev = rdev;
+
fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
if (debug)
cir_dump_regs(fintek);
return 0;
-failure3:
+exit_free_irq:
free_irq(fintek->cir_irq, fintek);
-failure2:
+exit_free_cir_addr:
release_region(fintek->cir_addr, fintek->cir_port_len);
-failure:
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(fintek);
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 4f71a7d1f019..8b82ae9bd686 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <media/rc-core.h>
@@ -30,6 +31,45 @@ struct gpio_rc_dev {
bool active_low;
};
+#ifdef CONFIG_OF
+/*
+ * Translate OpenFirmware node properties into platform_data
+ */
+static int gpio_ir_recv_get_devtree_pdata(struct device *dev,
+ struct gpio_ir_recv_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ enum of_gpio_flags flags;
+ int gpio;
+
+ gpio = of_get_gpio_flags(np, 0, &flags);
+ if (gpio < 0) {
+ if (gpio != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get gpio flags (%d)\n", gpio);
+ return gpio;
+ }
+
+ pdata->gpio_nr = gpio;
+ pdata->active_low = (flags & OF_GPIO_ACTIVE_LOW);
+ /* probe() takes care of map_name == NULL or allowed_protos == 0 */
+ pdata->map_name = of_get_property(np, "linux,rc-map-name", NULL);
+ pdata->allowed_protos = 0;
+
+ return 0;
+}
+
+static struct of_device_id gpio_ir_recv_of_match[] = {
+ { .compatible = "gpio-ir-receiver", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
+
+#else /* !CONFIG_OF */
+
+#define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS)
+
+#endif
+
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
struct gpio_rc_dev *gpio_dev = dev_id;
@@ -66,6 +106,17 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
pdev->dev.platform_data;
int rc;
+ if (pdev->dev.of_node) {
+ struct gpio_ir_recv_platform_data *dtpdata =
+ devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
+ if (!dtpdata)
+ return -ENOMEM;
+ rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata);
+ if (rc)
+ return rc;
+ pdata = dtpdata;
+ }
+
if (!pdata)
return -EINVAL;
@@ -129,12 +180,12 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
err_request_irq:
platform_set_drvdata(pdev, NULL);
rc_unregister_device(rcdev);
+ rcdev = NULL;
err_register_rc_device:
err_gpio_direction_input:
gpio_free(pdata->gpio_nr);
err_gpio_request:
rc_free_device(rcdev);
- rcdev = NULL;
err_allocate_device:
kfree(gpio_dev);
return rc;
@@ -148,7 +199,6 @@ static int gpio_ir_recv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
rc_unregister_device(gpio_dev->rcdev);
gpio_free(gpio_dev->gpio_nr);
- rc_free_device(gpio_dev->rcdev);
kfree(gpio_dev);
return 0;
}
@@ -192,6 +242,7 @@ static struct platform_driver gpio_ir_recv_driver = {
.driver = {
.name = GPIO_IR_DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(gpio_ir_recv_of_match),
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
#endif
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index b99b096d8a8f..a4ab2e6b3f82 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -58,6 +58,7 @@ struct iguanair {
char phys[64];
};
+#define CMD_NOP 0x00
#define CMD_GET_VERSION 0x01
#define CMD_GET_BUFSIZE 0x11
#define CMD_GET_FEATURES 0x10
@@ -196,6 +197,10 @@ static void iguanair_irq_out(struct urb *urb)
if (urb->status)
dev_dbg(ir->dev, "Error: out urb status = %d\n", urb->status);
+
+ /* if we sent an nop packet, do not expect a response */
+ if (urb->status == 0 && ir->packet->header.cmd == CMD_NOP)
+ complete(&ir->completion);
}
static int iguanair_send(struct iguanair *ir, unsigned size)
@@ -219,10 +224,17 @@ static int iguanair_get_features(struct iguanair *ir)
{
int rc;
+ /*
+ * On cold boot, the iguanair initializes on the first packet
+ * received but does not process that packet. Send an empty
+ * packet.
+ */
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
- ir->packet->header.cmd = CMD_GET_VERSION;
+ ir->packet->header.cmd = CMD_NOP;
+ iguanair_send(ir, sizeof(ir->packet->header));
+ ir->packet->header.cmd = CMD_GET_VERSION;
rc = iguanair_send(ir, sizeof(ir->packet->header));
if (rc) {
dev_info(ir->dev, "failed to get version\n");
@@ -255,19 +267,14 @@ static int iguanair_get_features(struct iguanair *ir)
ir->packet->header.cmd = CMD_GET_FEATURES;
rc = iguanair_send(ir, sizeof(ir->packet->header));
- if (rc) {
+ if (rc)
dev_info(ir->dev, "failed to get features\n");
- goto out;
- }
-
out:
return rc;
}
static int iguanair_receiver(struct iguanair *ir, bool enable)
{
- int rc;
-
ir->packet->header.start = 0;
ir->packet->header.direction = DIR_OUT;
ir->packet->header.cmd = enable ? CMD_RECEIVER_ON : CMD_RECEIVER_OFF;
@@ -275,9 +282,7 @@ static int iguanair_receiver(struct iguanair *ir, bool enable)
if (enable)
ir_raw_event_reset(ir->rc);
- rc = iguanair_send(ir, sizeof(ir->packet->header));
-
- return rc;
+ return iguanair_send(ir, sizeof(ir->packet->header));
}
/*
@@ -512,6 +517,7 @@ static int iguanair_probe(struct usb_interface *intf,
rc->rx_resolution = RX_RESOLUTION;
iguanair_set_tx_carrier(rc, 38000);
+ iguanair_set_tx_mask(rc, 0);
ret = rc_register_device(rc);
if (ret < 0) {
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 78d109b978dd..dec203bb06f6 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1221,7 +1221,7 @@ static u32 imon_panel_key_lookup(u64 code)
static bool imon_mouse_event(struct imon_context *ictx,
unsigned char *buf, int len)
{
- char rel_x = 0x00, rel_y = 0x00;
+ signed char rel_x = 0x00, rel_y = 0x00;
u8 right_shift = 1;
bool mouse_input = true;
int dir = 0;
@@ -1297,7 +1297,7 @@ static void imon_touch_event(struct imon_context *ictx, unsigned char *buf)
static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
{
int dir = 0;
- char rel_x = 0x00, rel_y = 0x00;
+ signed char rel_x = 0x00, rel_y = 0x00;
u16 timeout, threshold;
u32 scancode = KEY_RESERVED;
unsigned long flags;
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 97dc8d13b06b..17c94be9f24c 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -31,11 +31,6 @@ static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
-#ifdef MODULE
-/* Used to load the decoders */
-static struct work_struct wq_load;
-#endif
-
static int ir_raw_event_thread(void *data)
{
struct ir_raw_event ev;
@@ -347,8 +342,7 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
-#ifdef MODULE
-static void init_decoders(struct work_struct *work)
+void ir_raw_init(void)
{
/* Load the decoder modules */
@@ -365,12 +359,3 @@ static void init_decoders(struct work_struct *work)
it is needed to change the CONFIG_MODULE test at rc-core.h
*/
}
-#endif
-
-void ir_raw_init(void)
-{
-#ifdef MODULE
- INIT_WORK(&wq_load, init_decoders);
- schedule_work(&wq_load);
-#endif
-}
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 1b8669b6d042..dd8237324c09 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1472,7 +1472,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* input device for IR remote (and tx) */
rdev = rc_allocate_device();
if (!rdev)
- goto failure;
+ goto exit_free_dev_rdev;
itdev->rdev = rdev;
ret = -ENODEV;
@@ -1498,12 +1498,12 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
if (!pnp_port_valid(pdev, io_rsrc_no) ||
pnp_port_len(pdev, io_rsrc_no) != dev_desc->io_region_size) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
/* store resource values */
@@ -1591,29 +1591,29 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->driver_name = ITE_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto exit_free_dev_rdev;
+
ret = -EBUSY;
/* now claim resources */
if (!request_region(itdev->cir_addr,
dev_desc->io_region_size, ITE_DRIVER_NAME))
- goto failure;
+ goto exit_unregister_device;
if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
ITE_DRIVER_NAME, (void *)itdev))
- goto failure2;
-
- ret = rc_register_device(rdev);
- if (ret)
- goto failure3;
+ goto exit_release_cir_addr;
ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
return 0;
-failure3:
- free_irq(itdev->cir_irq, itdev);
-failure2:
+exit_release_cir_addr:
release_region(itdev->cir_addr, itdev->params.io_region_size);
-failure:
+exit_unregister_device:
+ rc_unregister_device(rdev);
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(itdev);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index ab84d66c67c1..778661971aed 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-tevii-nec.o \
rc-tivo.o \
rc-total-media-in-hand.o \
+ rc-total-media-in-hand-02.o \
rc-trekstor.o \
rc-tt-1500.o \
rc-twinhan1027.o \
diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
new file mode 100644
index 000000000000..47270f72ebf0
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
@@ -0,0 +1,86 @@
+/*
+ * Total Media In Hand_02 remote controller keytable for Mygica X8507
+ *
+ * Copyright (C) 2012 Alfredo J. Delaiti <alfredodelaiti@netscape.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+
+static struct rc_map_table total_media_in_hand_02[] = {
+ { 0x0000, KEY_0 },
+ { 0x0001, KEY_1 },
+ { 0x0002, KEY_2 },
+ { 0x0003, KEY_3 },
+ { 0x0004, KEY_4 },
+ { 0x0005, KEY_5 },
+ { 0x0006, KEY_6 },
+ { 0x0007, KEY_7 },
+ { 0x0008, KEY_8 },
+ { 0x0009, KEY_9 },
+ { 0x000a, KEY_MUTE },
+ { 0x000b, KEY_STOP }, /* Stop */
+ { 0x000c, KEY_POWER2 }, /* Turn on/off application */
+ { 0x000d, KEY_OK }, /* OK */
+ { 0x000e, KEY_CAMERA }, /* Snapshot */
+ { 0x000f, KEY_ZOOM }, /* Full Screen/Restore */
+ { 0x0010, KEY_RIGHT }, /* Right arrow */
+ { 0x0011, KEY_LEFT }, /* Left arrow */
+ { 0x0012, KEY_CHANNELUP },
+ { 0x0013, KEY_CHANNELDOWN },
+ { 0x0014, KEY_SHUFFLE },
+ { 0x0016, KEY_PAUSE },
+ { 0x0017, KEY_PLAY }, /* Play */
+ { 0x001e, KEY_TIME }, /* Time Shift */
+ { 0x001f, KEY_RECORD },
+ { 0x0020, KEY_UP },
+ { 0x0021, KEY_DOWN },
+ { 0x0025, KEY_POWER }, /* Turn off computer */
+ { 0x0026, KEY_REWIND }, /* FR << */
+ { 0x0027, KEY_FASTFORWARD }, /* FF >> */
+ { 0x0029, KEY_ESC },
+ { 0x002b, KEY_VOLUMEUP },
+ { 0x002c, KEY_VOLUMEDOWN },
+ { 0x002d, KEY_CHANNEL }, /* CH Surfing */
+ { 0x0038, KEY_VIDEO }, /* TV/AV/S-Video/YPbPr */
+};
+
+static struct rc_map_list total_media_in_hand_02_map = {
+ .map = {
+ .scan = total_media_in_hand_02,
+ .size = ARRAY_SIZE(total_media_in_hand_02),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_TOTAL_MEDIA_IN_HAND_02,
+ }
+};
+
+static int __init init_rc_map_total_media_in_hand_02(void)
+{
+ return rc_map_register(&total_media_in_hand_02_map);
+}
+
+static void __exit exit_rc_map_total_media_in_hand_02(void)
+{
+ rc_map_unregister(&total_media_in_hand_02_map);
+}
+
+module_init(init_rc_map_total_media_in_hand_02)
+module_exit(exit_rc_map_total_media_in_hand_02)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(" Alfredo J. Delaiti <alfredodelaiti@netscape.net>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index ca12d3289bfe..5247d94fea29 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(lirc_dev_fop_close);
unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
unsigned int ret;
if (!ir) {
@@ -565,7 +565,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
__u32 mode;
int result = 0;
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
if (!ir) {
printk(KERN_ERR "lirc_dev: %s: no irctl found!\n", __func__);
@@ -650,7 +650,7 @@ ssize_t lirc_dev_fop_read(struct file *file,
size_t length,
loff_t *ppos)
{
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
unsigned char *buf;
int ret = 0, written = 0;
DECLARE_WAITQUEUE(wait, current);
@@ -752,16 +752,7 @@ EXPORT_SYMBOL(lirc_dev_fop_read);
void *lirc_get_pdata(struct file *file)
{
- void *data = NULL;
-
- if (file && file->f_dentry && file->f_dentry->d_inode &&
- file->f_dentry->d_inode->i_rdev) {
- struct irctl *ir;
- ir = irctls[iminor(file->f_dentry->d_inode)];
- data = ir->d.data;
- }
-
- return data;
+ return irctls[iminor(file_inode(file))]->d.data;
}
EXPORT_SYMBOL(lirc_get_pdata);
@@ -769,7 +760,7 @@ EXPORT_SYMBOL(lirc_get_pdata);
ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer,
size_t length, loff_t *ppos)
{
- struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ struct irctl *ir = irctls[iminor(file_inode(file))];
if (!ir) {
printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 9afb9331217d..5b5b6e6f79e8 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -62,7 +62,6 @@
#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
#define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
-#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */
#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
#define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
@@ -291,7 +290,8 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Philips/Spinel plus IR transceiver for ASUS */
{ USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
/* Philips IR transceiver (Dell branded) */
- { USB_DEVICE(VENDOR_PHILIPS, 0x2093) },
+ { USB_DEVICE(VENDOR_PHILIPS, 0x2093),
+ .driver_info = MCE_GEN2_TX_INV },
/* Realtek MCE IR Receiver and card reader */
{ USB_DEVICE(VENDOR_REALTEK, 0x0161),
.driver_info = MULTIFUNCTION },
@@ -365,7 +365,8 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Formosa Industrial Computing */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe042) },
/* Fintek eHome Infrared Transceiver (HP branded) */
- { USB_DEVICE(VENDOR_FINTEK, 0x5168) },
+ { USB_DEVICE(VENDOR_FINTEK, 0x5168),
+ .driver_info = MCE_GEN2_TX_INV },
/* Fintek eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_FINTEK, 0x0602) },
/* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
@@ -788,19 +789,19 @@ static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct mceusb_dev *ir = dev->priv;
- int i, ret = 0;
+ int i, length, ret = 0;
int cmdcount = 0;
- unsigned char *cmdbuf; /* MCE command buffer */
-
- cmdbuf = kzalloc(sizeof(unsigned) * MCE_CMDBUF_SIZE, GFP_KERNEL);
- if (!cmdbuf)
- return -ENOMEM;
+ unsigned char cmdbuf[MCE_CMDBUF_SIZE];
/* MCE tx init header */
cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
cmdbuf[cmdcount++] = ir->tx_mask;
+ /* Send the set TX ports command */
+ mce_async_out(ir, cmdbuf, cmdcount);
+ cmdcount = 0;
+
/* Generate mce packet data */
for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
@@ -809,8 +810,7 @@ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
/* Insert mce packet header every 4th entry */
if ((cmdcount < MCE_CMDBUF_SIZE) &&
- (cmdcount - MCE_TX_HEADER_LENGTH) %
- MCE_CODE_LENGTH == 0)
+ (cmdcount % MCE_CODE_LENGTH) == 0)
cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
/* Insert mce packet data */
@@ -828,17 +828,16 @@ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
(txbuf[i] -= MCE_MAX_PULSE_LENGTH));
}
- /* Fix packet length in last header */
- cmdbuf[cmdcount - (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH] =
- MCE_COMMAND_IRDATA + (cmdcount - MCE_TX_HEADER_LENGTH) %
- MCE_CODE_LENGTH - 1;
-
/* Check if we have room for the empty packet at the end */
if (cmdcount >= MCE_CMDBUF_SIZE) {
ret = -EINVAL;
goto out;
}
+ /* Fix packet length in last header */
+ length = cmdcount % MCE_CODE_LENGTH;
+ cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
+
/* All mce commands end with an empty packet (0x80) */
cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
@@ -846,7 +845,6 @@ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
mce_async_out(ir, cmdbuf, cmdcount);
out:
- kfree(cmdbuf);
return ret ? ret : count;
}
@@ -1121,16 +1119,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
kfree(data);
-};
+}
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
/* device resume */
mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
- /* get hw/sw revision? */
- mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
-
/* get wake version (protocol, key, address) */
mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index b8aa9abb31ff..40125d779049 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -986,25 +986,25 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* input device for IR remote (and tx) */
rdev = rc_allocate_device();
if (!rdev)
- goto failure;
+ goto exit_free_dev_rdev;
ret = -ENODEV;
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0) ||
pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
if (!pnp_port_valid(pdev, 1) ||
pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
- goto failure;
+ goto exit_free_dev_rdev;
}
nvt->cir_addr = pnp_port_start(pdev, 0);
@@ -1027,7 +1027,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
ret = nvt_hw_detect(nvt);
if (ret)
- goto failure;
+ goto exit_free_dev_rdev;
/* Initialize CIR & CIR Wake Logical Devices */
nvt_efm_enable(nvt);
@@ -1065,31 +1065,32 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* tx bits */
rdev->tx_resolution = XYZ;
#endif
+ nvt->rdev = rdev;
+
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto exit_free_dev_rdev;
ret = -EBUSY;
/* now claim resources */
if (!request_region(nvt->cir_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto failure;
+ goto exit_unregister_device;
if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure2;
+ goto exit_release_cir_addr;
if (!request_region(nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto failure3;
+ goto exit_free_irq;
if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure4;
-
- ret = rc_register_device(rdev);
- if (ret)
- goto failure5;
+ goto exit_release_cir_wake_addr;
device_init_wakeup(&pdev->dev, true);
- nvt->rdev = rdev;
+
nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
if (debug) {
cir_dump_regs(nvt);
@@ -1098,15 +1099,15 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
return 0;
-failure5:
- free_irq(nvt->cir_wake_irq, nvt);
-failure4:
+exit_release_cir_wake_addr:
release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
-failure3:
+exit_free_irq:
free_irq(nvt->cir_irq, nvt);
-failure2:
+exit_release_cir_addr:
release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
-failure:
+exit_unregister_device:
+ rc_unregister_device(rdev);
+exit_free_dev_rdev:
rc_free_device(rdev);
kfree(nvt);
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 96f0a8bb39ea..5d87287ed372 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -165,56 +165,56 @@ void ir_raw_init(void);
/* from ir-nec-decoder.c */
#ifdef CONFIG_IR_NEC_DECODER_MODULE
-#define load_nec_decode() request_module("ir-nec-decoder")
+#define load_nec_decode() request_module_nowait("ir-nec-decoder")
#else
static inline void load_nec_decode(void) { }
#endif
/* from ir-rc5-decoder.c */
#ifdef CONFIG_IR_RC5_DECODER_MODULE
-#define load_rc5_decode() request_module("ir-rc5-decoder")
+#define load_rc5_decode() request_module_nowait("ir-rc5-decoder")
#else
static inline void load_rc5_decode(void) { }
#endif
/* from ir-rc6-decoder.c */
#ifdef CONFIG_IR_RC6_DECODER_MODULE
-#define load_rc6_decode() request_module("ir-rc6-decoder")
+#define load_rc6_decode() request_module_nowait("ir-rc6-decoder")
#else
static inline void load_rc6_decode(void) { }
#endif
/* from ir-jvc-decoder.c */
#ifdef CONFIG_IR_JVC_DECODER_MODULE
-#define load_jvc_decode() request_module("ir-jvc-decoder")
+#define load_jvc_decode() request_module_nowait("ir-jvc-decoder")
#else
static inline void load_jvc_decode(void) { }
#endif
/* from ir-sony-decoder.c */
#ifdef CONFIG_IR_SONY_DECODER_MODULE
-#define load_sony_decode() request_module("ir-sony-decoder")
+#define load_sony_decode() request_module_nowait("ir-sony-decoder")
#else
static inline void load_sony_decode(void) { }
#endif
/* from ir-sanyo-decoder.c */
#ifdef CONFIG_IR_SANYO_DECODER_MODULE
-#define load_sanyo_decode() request_module("ir-sanyo-decoder")
+#define load_sanyo_decode() request_module_nowait("ir-sanyo-decoder")
#else
static inline void load_sanyo_decode(void) { }
#endif
/* from ir-mce_kbd-decoder.c */
#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
-#define load_mce_kbd_decode() request_module("ir-mce_kbd-decoder")
+#define load_mce_kbd_decode() request_module_nowait("ir-mce_kbd-decoder")
#else
static inline void load_mce_kbd_decode(void) { }
#endif
/* from ir-lirc-codec.c */
#ifdef CONFIG_IR_LIRC_CODEC_MODULE
-#define load_lirc_codec() request_module("ir-lirc-codec")
+#define load_lirc_codec() request_module_nowait("ir-lirc-codec")
#else
static inline void load_lirc_codec(void) { }
#endif
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 601d1ac1c688..759a40a42eaa 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -789,8 +789,10 @@ static ssize_t show_protocols(struct device *device,
} else if (dev->raw) {
enabled = dev->raw->enabled_protocols;
allowed = ir_raw_get_allowed_protocols();
- } else
+ } else {
+ mutex_unlock(&dev->lock);
return -ENODEV;
+ }
IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
(long long)allowed,
@@ -890,7 +892,8 @@ static ssize_t store_protocols(struct device *device,
if (i == ARRAY_SIZE(proto_names)) {
IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
count++;
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 1800326f93e6..1b37fe2779f8 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -195,9 +195,6 @@ struct redrat3_dev {
dma_addr_t dma_in;
dma_addr_t dma_out;
- /* locks this structure */
- struct mutex lock;
-
/* rx signal timeout timer */
struct timer_list rx_timeout;
u32 hw_timeout;
@@ -922,8 +919,7 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
return -EAGAIN;
}
- if (count > (RR3_DRIVER_MAXLENS * 2))
- return -EINVAL;
+ count = min_t(unsigned, count, RR3_MAX_SIG_SIZE - RR3_TX_TRAILER_LEN);
/* rr3 will disable rc detector on transmit */
rr3->det_enabled = false;
@@ -936,24 +932,22 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
}
for (i = 0; i < count; i++) {
+ cur_sample_len = redrat3_us_to_len(txbuf[i]);
for (lencheck = 0; lencheck < curlencheck; lencheck++) {
- cur_sample_len = redrat3_us_to_len(txbuf[i]);
if (sample_lens[lencheck] == cur_sample_len)
break;
}
if (lencheck == curlencheck) {
- cur_sample_len = redrat3_us_to_len(txbuf[i]);
rr3_dbg(dev, "txbuf[%d]=%u, pos %d, enc %u\n",
i, txbuf[i], curlencheck, cur_sample_len);
- if (curlencheck < 255) {
+ if (curlencheck < RR3_DRIVER_MAXLENS) {
/* now convert the value to a proper
* rr3 value.. */
sample_lens[curlencheck] = cur_sample_len;
curlencheck++;
} else {
- dev_err(dev, "signal too long\n");
- ret = -EINVAL;
- goto out;
+ count = i - 1;
+ break;
}
}
}
@@ -1087,6 +1081,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->driver_name = DRIVER_NAME;
+ rc->rx_resolution = US_TO_NS(2);
rc->map_name = RC_MAP_HAUPPAUGE;
ret = rc_register_device(rc);
@@ -1202,7 +1197,6 @@ static int redrat3_dev_probe(struct usb_interface *intf,
rr3->bulk_out_buf, ep_out->wMaxPacketSize,
(usb_complete_t)redrat3_write_bulk_callback, rr3);
- mutex_init(&rr3->lock);
rr3->udev = udev;
redrat3_reset(rr3);
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index 78be8a914225..cf0d47f57fb2 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -213,19 +213,20 @@ static int ttusbir_probe(struct usb_interface *intf,
/* find the correct alt setting */
for (i = 0; i < intf->num_altsetting && altsetting == -1; i++) {
- int bulk_out_endp = -1, iso_in_endp = -1;
+ int max_packet, bulk_out_endp = -1, iso_in_endp = -1;
idesc = &intf->altsetting[i].desc;
for (j = 0; j < idesc->bNumEndpoints; j++) {
desc = &intf->altsetting[i].endpoint[j].desc;
+ max_packet = le16_to_cpu(desc->wMaxPacketSize);
if (usb_endpoint_dir_in(desc) &&
usb_endpoint_xfer_isoc(desc) &&
- desc->wMaxPacketSize == 0x10)
+ max_packet == 0x10)
iso_in_endp = j;
else if (usb_endpoint_dir_out(desc) &&
usb_endpoint_xfer_bulk(desc) &&
- desc->wMaxPacketSize == 0x20)
+ max_packet == 0x20)
bulk_out_endp = j;
if (bulk_out_endp != -1 && iso_in_endp != -1) {
@@ -408,9 +409,8 @@ static int ttusbir_resume(struct usb_interface *intf)
struct ttusbir *tt = usb_get_intfdata(intf);
int i, rc;
- led_classdev_resume(&tt->led);
tt->is_led_on = true;
- ttusbir_set_led(tt);
+ led_classdev_resume(&tt->led);
for (i = 0; i < NUM_URBS; i++) {
rc = usb_submit_urb(tt->urb[i], GFP_KERNEL);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 930c61499037..535a18dccbd0 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -154,6 +154,8 @@
#define WBCIR_CNTR_R 0x02
/* Invert TX */
#define WBCIR_IRTX_INV 0x04
+/* Receiver oversampling */
+#define WBCIR_RX_T_OV 0x40
/* Valid banks for the SP3 UART */
enum wbcir_bank {
@@ -394,7 +396,8 @@ wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
if (data->rxstate == WBCIR_RXSTATE_ERROR)
continue;
- duration = ((irdata & 0x7F) + 1) * 2;
+ duration = ((irdata & 0x7F) + 1) *
+ (data->carrier_report_enabled ? 2 : 10);
rawir.pulse = irdata & 0x80 ? false : true;
rawir.duration = US_TO_NS(duration);
@@ -550,6 +553,17 @@ wbcir_set_carrier_report(struct rc_dev *dev, int enable)
wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL,
WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R);
+ /* Set a higher sampling resolution if carrier reports are enabled */
+ wbcir_select_bank(data, WBCIR_BANK_2);
+ data->dev->rx_resolution = US_TO_NS(enable ? 2 : 10);
+ outb(enable ? 0x03 : 0x0f, data->sbase + WBCIR_REG_SP3_BGDL);
+ outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
+
+ /* Enable oversampling if carrier reports are enabled */
+ wbcir_select_bank(data, WBCIR_BANK_7);
+ wbcir_set_bits(data->sbase + WBCIR_REG_SP3_RCCFG,
+ enable ? WBCIR_RX_T_OV : 0, WBCIR_RX_T_OV);
+
data->carrier_report_enabled = enable;
spin_unlock_irqrestore(&data->spinlock, flags);
@@ -931,8 +945,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
- /* Set baud divisor to sample every 2 ns */
- outb(0x03, data->sbase + WBCIR_REG_SP3_BGDL);
+ /* Set baud divisor to sample every 10 us */
+ outb(0x0f, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
@@ -941,12 +955,9 @@ wbcir_init_hw(struct wbcir_data *data)
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
- /*
- * Disable RX demod, enable run-length enc/dec, set freq span and
- * enable over-sampling
- */
+ /* Disable RX demod, enable run-length enc/dec, set freq span */
wbcir_select_bank(data, WBCIR_BANK_7);
- outb(0xd0, data->sbase + WBCIR_REG_SP3_RCCFG);
+ outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
@@ -1093,11 +1104,15 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->rx_resolution = US_TO_NS(2);
data->dev->allowed_protos = RC_BIT_ALL;
+ err = rc_register_device(data->dev);
+ if (err)
+ goto exit_free_rc;
+
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
err = -EBUSY;
- goto exit_free_rc;
+ goto exit_unregister_device;
}
if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
@@ -1122,24 +1137,20 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
goto exit_release_sbase;
}
- err = rc_register_device(data->dev);
- if (err)
- goto exit_free_irq;
-
device_init_wakeup(&device->dev, 1);
wbcir_init_hw(data);
return 0;
-exit_free_irq:
- free_irq(data->irq, device);
exit_release_sbase:
release_region(data->sbase, SP_IOMEM_LEN);
exit_release_ebase:
release_region(data->ebase, EHFUNC_IOMEM_LEN);
exit_release_wbase:
release_region(data->wbase, WAKEUP_IOMEM_LEN);
+exit_unregister_device:
+ rc_unregister_device(data->dev);
exit_free_rc:
rc_free_device(data->dev);
exit_unregister_led:
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index e4882546c283..3932aa81e18c 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -183,8 +183,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
unsigned int i, vco_retries;
u32 freq = p->frequency / 1000;
u32 bandwidth = p->bandwidth_hz / 1000;
- u32 fvco, xin, xdiv, xdivr;
- u16 frac;
+ u32 fvco, xin, frac, xdiv, xdivr;
u8 fa, fp, vco_sel, vco_cal;
u8 regs[FC11_NR_REGS] = { };
@@ -221,18 +220,15 @@ static int fc0011_set_params(struct dvb_frontend *fe)
/* Calc XIN. The PLL reference frequency is 18 MHz. */
xdiv = fvco / 18000;
+ WARN_ON(xdiv > 0xFF);
frac = fvco - xdiv * 18000;
frac = (frac << 15) / 18000;
if (frac >= 16384)
frac += 32786;
if (!frac)
xin = 0;
- else if (frac < 511)
- xin = 512;
- else if (frac < 65026)
- xin = frac;
else
- xin = 65024;
+ xin = clamp_t(u32, frac, 512, 65024);
regs[FC11_REG_XINHI] = xin >> 8;
regs[FC11_REG_XINLO] = xin;
@@ -247,8 +243,8 @@ static int fc0011_set_params(struct dvb_frontend *fe)
fa += 8;
}
if (fp > 0x1F) {
- fp &= 0x1F;
- fa &= 0xF;
+ fp = 0x1F;
+ fa = 0xF;
}
if (fa >= fp) {
dev_warn(&priv->i2c->dev,
@@ -351,6 +347,8 @@ static int fc0011_set_params(struct dvb_frontend *fe)
vco_cal &= FC11_VCOCAL_VALUEMASK;
switch (vco_sel) {
+ default:
+ WARN_ON(1);
case 0:
if (vco_cal < 8) {
regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
@@ -432,7 +430,8 @@ static int fc0011_set_params(struct dvb_frontend *fe)
err = fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
if (err)
return err;
- err = fc0011_writereg(priv, FC11_REG_16, 0xB);
+ regs[FC11_REG_16] = 0xB;
+ err = fc0011_writereg(priv, FC11_REG_16, regs[FC11_REG_16]);
if (err)
return err;
diff --git a/drivers/media/tuners/fc0012-priv.h b/drivers/media/tuners/fc0012-priv.h
index 4577c917e616..1a86ce1d3fcf 100644
--- a/drivers/media/tuners/fc0012-priv.h
+++ b/drivers/media/tuners/fc0012-priv.h
@@ -21,20 +21,9 @@
#ifndef _FC0012_PRIV_H_
#define _FC0012_PRIV_H_
-#define LOG_PREFIX "fc0012"
-
-#undef err
-#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
-#undef info
-#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef warn
-#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
-
struct fc0012_priv {
struct i2c_adapter *i2c;
- u8 addr;
- u8 dual_master;
- u8 xtal_freq;
+ const struct fc0012_config *cfg;
u32 frequency;
u32 bandwidth;
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 308135abd54c..f4d0e797a6cc 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -25,11 +25,13 @@ static int fc0012_writereg(struct fc0012_priv *priv, u8 reg, u8 val)
{
u8 buf[2] = {reg, val};
struct i2c_msg msg = {
- .addr = priv->addr, .flags = 0, .buf = buf, .len = 2
+ .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2
};
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
- err("I2C write reg failed, reg: %02x, val: %02x", reg, val);
+ dev_err(&priv->i2c->dev,
+ "%s: I2C write reg failed, reg: %02x, val: %02x\n",
+ KBUILD_MODNAME, reg, val);
return -EREMOTEIO;
}
return 0;
@@ -38,12 +40,16 @@ static int fc0012_writereg(struct fc0012_priv *priv, u8 reg, u8 val)
static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val)
{
struct i2c_msg msg[2] = {
- { .addr = priv->addr, .flags = 0, .buf = &reg, .len = 1 },
- { .addr = priv->addr, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = 0,
+ .buf = &reg, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD,
+ .buf = val, .len = 1 },
};
if (i2c_transfer(priv->i2c, msg, 2) != 2) {
- err("I2C read reg failed, reg: %02x", reg);
+ dev_err(&priv->i2c->dev,
+ "%s: I2C read reg failed, reg: %02x\n",
+ KBUILD_MODNAME, reg);
return -EREMOTEIO;
}
return 0;
@@ -88,7 +94,7 @@ static int fc0012_init(struct dvb_frontend *fe)
0x04, /* reg. 0x15: Enable LNA COMPS */
};
- switch (priv->xtal_freq) {
+ switch (priv->cfg->xtal_freq) {
case FC_XTAL_27_MHZ:
case FC_XTAL_28_8_MHZ:
reg[0x07] |= 0x20;
@@ -98,9 +104,12 @@ static int fc0012_init(struct dvb_frontend *fe)
break;
}
- if (priv->dual_master)
+ if (priv->cfg->dual_master)
reg[0x0c] |= 0x02;
+ if (priv->cfg->loop_through)
+ reg[0x09] |= 0x01;
+
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
@@ -114,17 +123,12 @@ static int fc0012_init(struct dvb_frontend *fe)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
if (ret)
- err("fc0012_writereg failed: %d", ret);
+ dev_err(&priv->i2c->dev, "%s: fc0012_writereg failed: %d\n",
+ KBUILD_MODNAME, ret);
return ret;
}
-static int fc0012_sleep(struct dvb_frontend *fe)
-{
- /* nothing to do here */
- return 0;
-}
-
static int fc0012_set_params(struct dvb_frontend *fe)
{
struct fc0012_priv *priv = fe->tuner_priv;
@@ -144,7 +148,7 @@ static int fc0012_set_params(struct dvb_frontend *fe)
goto exit;
}
- switch (priv->xtal_freq) {
+ switch (priv->cfg->xtal_freq) {
case FC_XTAL_27_MHZ:
xtal_freq_khz_2 = 27000 / 2;
break;
@@ -256,7 +260,8 @@ static int fc0012_set_params(struct dvb_frontend *fe)
break;
}
} else {
- err("%s: modulation type not supported!", __func__);
+ dev_err(&priv->i2c->dev, "%s: modulation type not supported!\n",
+ KBUILD_MODNAME);
return -EINVAL;
}
@@ -318,7 +323,8 @@ exit:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
if (ret)
- warn("%s: failed: %d", __func__, ret);
+ dev_warn(&priv->i2c->dev, "%s: %s failed: %d\n",
+ KBUILD_MODNAME, __func__, ret);
return ret;
}
@@ -331,8 +337,7 @@ static int fc0012_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static int fc0012_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- /* CHECK: always ? */
- *frequency = 0;
+ *frequency = 0; /* Zero-IF */
return 0;
}
@@ -408,7 +413,8 @@ err:
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
exit:
if (ret)
- warn("%s: failed: %d", __func__, ret);
+ dev_warn(&priv->i2c->dev, "%s: %s failed: %d\n",
+ KBUILD_MODNAME, __func__, ret);
return ret;
}
@@ -424,7 +430,6 @@ static const struct dvb_tuner_ops fc0012_tuner_ops = {
.release = fc0012_release,
.init = fc0012_init,
- .sleep = fc0012_sleep,
.set_params = fc0012_set_params,
@@ -436,27 +441,73 @@ static const struct dvb_tuner_ops fc0012_tuner_ops = {
};
struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, u8 i2c_address, int dual_master,
- enum fc001x_xtal_freq xtal_freq)
+ struct i2c_adapter *i2c, const struct fc0012_config *cfg)
{
- struct fc0012_priv *priv = NULL;
+ struct fc0012_priv *priv;
+ int ret;
+ u8 chip_id;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
priv = kzalloc(sizeof(struct fc0012_priv), GFP_KERNEL);
- if (priv == NULL)
- return NULL;
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ goto err;
+ }
+ priv->cfg = cfg;
priv->i2c = i2c;
- priv->dual_master = dual_master;
- priv->addr = i2c_address;
- priv->xtal_freq = xtal_freq;
- info("Fitipower FC0012 successfully attached.");
+ /* check if the tuner is there */
+ ret = fc0012_readreg(priv, 0x00, &chip_id);
+ if (ret < 0)
+ goto err;
- fe->tuner_priv = priv;
+ dev_dbg(&i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+ switch (chip_id) {
+ case 0xa1:
+ break;
+ default:
+ ret = -ENODEV;
+ goto err;
+ }
+
+ dev_info(&i2c->dev, "%s: Fitipower FC0012 successfully identified\n",
+ KBUILD_MODNAME);
+
+ if (priv->cfg->loop_through) {
+ ret = fc0012_writereg(priv, 0x09, 0x6f);
+ if (ret < 0)
+ goto err;
+ }
+
+ /*
+ * TODO: Clock out en or div?
+ * For dual tuner configuration clearing bit [0] is required.
+ */
+ if (priv->cfg->clock_out) {
+ ret = fc0012_writereg(priv, 0x0b, 0x82);
+ if (ret < 0)
+ goto err;
+ }
+ fe->tuner_priv = priv;
memcpy(&fe->ops.tuner_ops, &fc0012_tuner_ops,
sizeof(struct dvb_tuner_ops));
+err:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (ret) {
+ dev_dbg(&i2c->dev, "%s: failed: %d\n", __func__, ret);
+ kfree(priv);
+ return NULL;
+ }
+
return fe;
}
EXPORT_SYMBOL(fc0012_attach);
diff --git a/drivers/media/tuners/fc0012.h b/drivers/media/tuners/fc0012.h
index 4dbd5efe8845..54508fcc3469 100644
--- a/drivers/media/tuners/fc0012.h
+++ b/drivers/media/tuners/fc0012.h
@@ -24,19 +24,41 @@
#include "dvb_frontend.h"
#include "fc001x-common.h"
+struct fc0012_config {
+ /*
+ * I2C address
+ */
+ u8 i2c_address;
+
+ /*
+ * clock
+ */
+ enum fc001x_xtal_freq xtal_freq;
+
+ bool dual_master;
+
+ /*
+ * RF loop-through
+ */
+ bool loop_through;
+
+ /*
+ * clock output
+ */
+ bool clock_out;
+};
+
#if defined(CONFIG_MEDIA_TUNER_FC0012) || \
(defined(CONFIG_MEDIA_TUNER_FC0012_MODULE) && defined(MODULE))
extern struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- u8 i2c_address, int dual_master,
- enum fc001x_xtal_freq xtal_freq);
+ const struct fc0012_config *cfg);
#else
static inline struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- u8 i2c_address, int dual_master,
- enum fc001x_xtal_freq xtal_freq)
+ const struct fc0012_config *cfg)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif
diff --git a/drivers/media/tuners/mt2060.h b/drivers/media/tuners/mt2060.h
index cb60caffb6b6..c64fc19cb278 100644
--- a/drivers/media/tuners/mt2060.h
+++ b/drivers/media/tuners/mt2060.h
@@ -30,7 +30,7 @@ struct mt2060_config {
u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
};
-#if defined(CONFIG_MEDIA_TUNER_MT2060) || (defined(CONFIG_MEDIA_TUNER_MT2060_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2060)
extern struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1);
#else
static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1)
diff --git a/drivers/media/tuners/mt2063.h b/drivers/media/tuners/mt2063.h
index ab24170c1571..e1acfc8e7ae3 100644
--- a/drivers/media/tuners/mt2063.h
+++ b/drivers/media/tuners/mt2063.h
@@ -8,7 +8,7 @@ struct mt2063_config {
u32 refclock;
};
-#if defined(CONFIG_MEDIA_TUNER_MT2063) || (defined(CONFIG_MEDIA_TUNER_MT2063_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2063)
struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
struct mt2063_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/tuners/mt20xx.h b/drivers/media/tuners/mt20xx.h
index 259553a24903..f56241ccaa00 100644
--- a/drivers/media/tuners/mt20xx.h
+++ b/drivers/media/tuners/mt20xx.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_MEDIA_TUNER_MT20XX) || (defined(CONFIG_MEDIA_TUNER_MT20XX_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT20XX)
extern struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
struct i2c_adapter* i2c_adap,
u8 i2c_addr);
diff --git a/drivers/media/tuners/mt2131.h b/drivers/media/tuners/mt2131.h
index 6632de640df0..09ceaf68e47c 100644
--- a/drivers/media/tuners/mt2131.h
+++ b/drivers/media/tuners/mt2131.h
@@ -30,7 +30,7 @@ struct mt2131_config {
u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
};
-#if defined(CONFIG_MEDIA_TUNER_MT2131) || (defined(CONFIG_MEDIA_TUNER_MT2131_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2131)
extern struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct mt2131_config *cfg,
diff --git a/drivers/media/tuners/mt2266.h b/drivers/media/tuners/mt2266.h
index 4d083882d044..fad6dd657d77 100644
--- a/drivers/media/tuners/mt2266.h
+++ b/drivers/media/tuners/mt2266.h
@@ -24,7 +24,7 @@ struct mt2266_config {
u8 i2c_address;
};
-#if defined(CONFIG_MEDIA_TUNER_MT2266) || (defined(CONFIG_MEDIA_TUNER_MT2266_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MT2266)
extern struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg);
#else
static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg)
diff --git a/drivers/media/tuners/mxl5007t.h b/drivers/media/tuners/mxl5007t.h
index aa3eea0b5262..37b0942e2385 100644
--- a/drivers/media/tuners/mxl5007t.h
+++ b/drivers/media/tuners/mxl5007t.h
@@ -77,7 +77,7 @@ struct mxl5007t_config {
unsigned int clk_out_enable:1;
};
-#if defined(CONFIG_MEDIA_TUNER_MXL5007T) || (defined(CONFIG_MEDIA_TUNER_MXL5007T_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_MXL5007T)
extern struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 addr,
struct mxl5007t_config *cfg);
diff --git a/drivers/media/tuners/qt1010.h b/drivers/media/tuners/qt1010.h
index 807fb7b6146b..8ab5d479749f 100644
--- a/drivers/media/tuners/qt1010.h
+++ b/drivers/media/tuners/qt1010.h
@@ -36,7 +36,7 @@ struct qt1010_config {
* @param cfg tuner hw based configuration
* @return fe pointer on success, NULL on failure
*/
-#if defined(CONFIG_MEDIA_TUNER_QT1010) || (defined(CONFIG_MEDIA_TUNER_QT1010_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_QT1010)
extern struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct qt1010_config *cfg);
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 5d9f02842501..e4a84ee231cf 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -277,7 +277,7 @@ struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
{
struct tda18212_priv *priv = NULL;
int ret;
- u8 uninitialized_var(val);
+ u8 val;
priv = kzalloc(sizeof(struct tda18212_priv), GFP_KERNEL);
if (priv == NULL)
@@ -296,8 +296,8 @@ struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
- dev_dbg(&priv->i2c->dev, "%s: ret=%d chip id=%02x\n", __func__, ret,
- val);
+ if (!ret)
+ dev_dbg(&priv->i2c->dev, "%s: chip id=%02x\n", __func__, val);
if (ret || val != 0xc7) {
kfree(priv);
return NULL;
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 18198537be9f..2d31aeb6b088 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -277,7 +277,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, struct tda18218_config *cfg)
{
struct tda18218_priv *priv = NULL;
- u8 uninitialized_var(val);
+ u8 val;
int ret;
/* chip default registers values */
static u8 def_regs[] = {
@@ -302,8 +302,8 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
/* check if the tuner is there */
ret = tda18218_rd_reg(priv, R00_ID, &val);
- dev_dbg(&priv->i2c->dev, "%s: ret=%d chip id=%02x\n", __func__, ret,
- val);
+ if (!ret)
+ dev_dbg(&priv->i2c->dev, "%s: chip id=%02x\n", __func__, val);
if (ret || val != def_regs[R00_ID]) {
kfree(priv);
return NULL;
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 72c26fd77922..e7786862dab2 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -1122,6 +1122,7 @@ static int tda18271_dump_std_map(struct dvb_frontend *fe)
tda18271_dump_std_item(dvbt_7, "dvbt 7");
tda18271_dump_std_item(dvbt_8, "dvbt 8");
tda18271_dump_std_item(qam_6, "qam 6 ");
+ tda18271_dump_std_item(qam_7, "qam 7 ");
tda18271_dump_std_item(qam_8, "qam 8 ");
return 0;
@@ -1149,6 +1150,7 @@ static int tda18271_update_std_map(struct dvb_frontend *fe,
tda18271_update_std(dvbt_7, "dvbt 7");
tda18271_update_std(dvbt_8, "dvbt 8");
tda18271_update_std(qam_6, "qam 6");
+ tda18271_update_std(qam_7, "qam 7");
tda18271_update_std(qam_8, "qam 8");
return 0;
diff --git a/drivers/media/tuners/tda18271-maps.c b/drivers/media/tuners/tda18271-maps.c
index fb881c667c94..b62e925f643f 100644
--- a/drivers/media/tuners/tda18271-maps.c
+++ b/drivers/media/tuners/tda18271-maps.c
@@ -1290,13 +1290,11 @@ int tda18271_assign_map_layout(struct dvb_frontend *fe)
switch (priv->id) {
case TDA18271HDC1:
priv->maps = &tda18271c1_map_layout;
- memcpy(&priv->std, &tda18271c1_std_map,
- sizeof(struct tda18271_std_map));
+ priv->std = tda18271c1_std_map;
break;
case TDA18271HDC2:
priv->maps = &tda18271c2_map_layout;
- memcpy(&priv->std, &tda18271c2_std_map,
- sizeof(struct tda18271_std_map));
+ priv->std = tda18271c2_std_map;
break;
default:
ret = -EINVAL;
diff --git a/drivers/media/tuners/tda18271.h b/drivers/media/tuners/tda18271.h
index 89b6c6d93fec..4c418d63f540 100644
--- a/drivers/media/tuners/tda18271.h
+++ b/drivers/media/tuners/tda18271.h
@@ -121,7 +121,7 @@ enum tda18271_mode {
TDA18271_DIGITAL,
};
-#if defined(CONFIG_MEDIA_TUNER_TDA18271) || (defined(CONFIG_MEDIA_TUNER_TDA18271_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA18271)
extern struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr,
struct i2c_adapter *i2c,
struct tda18271_config *cfg);
diff --git a/drivers/media/tuners/tda827x.h b/drivers/media/tuners/tda827x.h
index 7d72ce0a0c2d..9432b5b6121b 100644
--- a/drivers/media/tuners/tda827x.h
+++ b/drivers/media/tuners/tda827x.h
@@ -50,7 +50,7 @@ struct tda827x_config
* @param cfg optional callback function pointers.
* @return FE pointer on success, NULL on failure.
*/
-#if defined(CONFIG_MEDIA_TUNER_TDA827X) || (defined(CONFIG_MEDIA_TUNER_TDA827X_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA827X)
extern struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe, int addr,
struct i2c_adapter *i2c,
struct tda827x_config *cfg);
diff --git a/drivers/media/tuners/tda8290.h b/drivers/media/tuners/tda8290.h
index 7e288b26fcc3..e12ecbaa35a4 100644
--- a/drivers/media/tuners/tda8290.h
+++ b/drivers/media/tuners/tda8290.h
@@ -28,7 +28,7 @@ struct tda829x_config {
#define TDA829X_DONT_PROBE 1
};
-#if defined(CONFIG_MEDIA_TUNER_TDA8290) || (defined(CONFIG_MEDIA_TUNER_TDA8290_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA8290)
extern int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tda9887.h b/drivers/media/tuners/tda9887.h
index acc419e8c4fc..37a4a1123e0c 100644
--- a/drivers/media/tuners/tda9887.h
+++ b/drivers/media/tuners/tda9887.h
@@ -21,7 +21,7 @@
#include "dvb_frontend.h"
/* ------------------------------------------------------------------------ */
-#if defined(CONFIG_MEDIA_TUNER_TDA9887) || (defined(CONFIG_MEDIA_TUNER_TDA9887_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA9887)
extern struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap,
u8 i2c_addr);
diff --git a/drivers/media/tuners/tea5761.h b/drivers/media/tuners/tea5761.h
index 2e2ff82c95a4..933228ffb509 100644
--- a/drivers/media/tuners/tea5761.h
+++ b/drivers/media/tuners/tea5761.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
extern int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tea5767.h b/drivers/media/tuners/tea5767.h
index d30ab1b483de..c39101199383 100644
--- a/drivers/media/tuners/tea5767.h
+++ b/drivers/media/tuners/tea5767.h
@@ -39,7 +39,7 @@ struct tea5767_ctrl {
enum tea5767_xtal xtal_freq;
};
-#if defined(CONFIG_MEDIA_TUNER_TEA5767) || (defined(CONFIG_MEDIA_TUNER_TEA5767_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5767)
extern int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
extern struct dvb_frontend *tea5767_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/tuner-simple.h b/drivers/media/tuners/tuner-simple.h
index 381fa5d35a9b..ffd12cfe650b 100644
--- a/drivers/media/tuners/tuner-simple.h
+++ b/drivers/media/tuners/tuner-simple.h
@@ -20,7 +20,7 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-#if defined(CONFIG_MEDIA_TUNER_SIMPLE) || (defined(CONFIG_MEDIA_TUNER_SIMPLE_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_SIMPLE)
extern struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap,
u8 i2c_addr,
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 7bcb6b0ff1df..09451737c77e 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -870,7 +870,7 @@ check_device:
}
read_not_reliable:
- memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
+ priv->cur_fw = new_fw;
/*
* By setting BASE in cur_fw.type only after successfully loading all
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 9ebfb2d0ff14..181d087faec4 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -56,7 +56,7 @@ struct xc2028_config {
#define XC2028_RESET_CLK 1
#define XC2028_I2C_FLUSH 2
-#if defined(CONFIG_MEDIA_TUNER_XC2028) || (defined(CONFIG_MEDIA_TUNER_XC2028_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_XC2028)
extern struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
struct xc2028_config *cfg);
#else
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 5c0fd787cc8f..2018befabb5a 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1066,7 +1066,7 @@ check_device:
goto fail;
}
- memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
+ priv->cur_fw = new_fw;
/*
* By setting BASE in cur_fw.type only after successfully loading all
diff --git a/drivers/media/tuners/xc4000.h b/drivers/media/tuners/xc4000.h
index e6a44d151cbd..97c23de5296c 100644
--- a/drivers/media/tuners/xc4000.h
+++ b/drivers/media/tuners/xc4000.h
@@ -50,7 +50,7 @@ struct xc4000_config {
* it's passed back to a bridge during tuner_callback().
*/
-#if defined(CONFIG_MEDIA_TUNER_XC4000) || (defined(CONFIG_MEDIA_TUNER_XC4000_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_XC4000)
extern struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct xc4000_config *cfg);
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index dc93cf338f36..d6be1b613c52 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -785,6 +785,7 @@ static int xc5000_set_params(struct dvb_frontend *fe)
return -EINVAL;
}
priv->rf_mode = XC_RF_MODE_AIR;
+ break;
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
dprintk(1, "%s() QAM modulation\n", __func__);
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 6746994d03fe..0a7d520636a9 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -21,7 +21,6 @@ endif
if MEDIA_ANALOG_TV_SUPPORT
comment "Analog TV USB devices"
-source "drivers/media/usb/au0828/Kconfig"
source "drivers/media/usb/pvrusb2/Kconfig"
source "drivers/media/usb/hdpvr/Kconfig"
source "drivers/media/usb/tlg2300/Kconfig"
@@ -31,6 +30,7 @@ endif
if (MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT)
comment "Analog/digital TV USB devices"
+source "drivers/media/usb/au0828/Kconfig"
source "drivers/media/usb/cx231xx/Kconfig"
source "drivers/media/usb/tm6000/Kconfig"
endif
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig
index 1766c0ce93be..953a37c613b1 100644
--- a/drivers/media/usb/au0828/Kconfig
+++ b/drivers/media/usb/au0828/Kconfig
@@ -1,17 +1,28 @@
config VIDEO_AU0828
tristate "Auvitek AU0828 support"
- depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
+ depends on I2C && INPUT && DVB_CORE && USB
select I2C_ALGOBIT
select VIDEO_TVEEPROM
select VIDEOBUF_VMALLOC
select DVB_AU8522_DTV if MEDIA_SUBDRV_AUTOSELECT
- select DVB_AU8522_V4L if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
---help---
- This is a video4linux driver for Auvitek's USB device.
+ This is a hybrid analog/digital tv capture driver for
+ Auvitek's AU0828 USB device.
To compile this driver as a module, choose M here: the
module will be called au0828
+
+config VIDEO_AU0828_V4L2
+ bool "Auvitek AU0828 v4l2 analog video support"
+ depends on VIDEO_AU0828 && VIDEO_V4L2
+ select DVB_AU8522_V4L if MEDIA_SUBDRV_AUTOSELECT
+ default y
+ ---help---
+ This is a video4linux driver for Auvitek's USB device.
+
+ Choose Y here to include support for v4l2 analog video
+ capture within the au0828 driver.
diff --git a/drivers/media/usb/au0828/Makefile b/drivers/media/usb/au0828/Makefile
index 98cc20cc0ffb..be3bdf698022 100644
--- a/drivers/media/usb/au0828/Makefile
+++ b/drivers/media/usb/au0828/Makefile
@@ -1,4 +1,8 @@
-au0828-objs := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o au0828-video.o au0828-vbi.o
+au0828-objs := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o
+
+ifeq ($(CONFIG_VIDEO_AU0828_V4L2),y)
+ au0828-objs += au0828-video.o au0828-vbi.o
+endif
obj-$(CONFIG_VIDEO_AU0828) += au0828.o
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 0cb7c28dcb17..dd32decb237d 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -169,7 +169,9 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
- case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
+ case 72261: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
+ case 72271: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
+ case 72281: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
@@ -183,16 +185,15 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
__func__, tv.model);
}
+void au0828_card_analog_fe_setup(struct au0828_dev *dev);
+
void au0828_card_setup(struct au0828_dev *dev)
{
static u8 eeprom[256];
- struct tuner_setup tun_setup;
- struct v4l2_subdev *sd;
- unsigned int mode_mask = T_ANALOG_TV;
dprintk(1, "%s()\n", __func__);
- memcpy(&dev->board, &au0828_boards[dev->boardnr], sizeof(dev->board));
+ dev->board = au0828_boards[dev->boardnr];
if (dev->i2c_rc == 0) {
dev->i2c_client.addr = 0xa0 >> 1;
@@ -209,6 +210,16 @@ void au0828_card_setup(struct au0828_dev *dev)
break;
}
+ au0828_card_analog_fe_setup(dev);
+}
+
+void au0828_card_analog_fe_setup(struct au0828_dev *dev)
+{
+#ifdef CONFIG_VIDEO_AU0828_V4L2
+ struct tuner_setup tun_setup;
+ struct v4l2_subdev *sd;
+ unsigned int mode_mask = T_ANALOG_TV;
+
if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED) {
/* Load the analog demodulator driver (note this would need to
be abstracted out if we ever need to support a different
@@ -234,6 +245,7 @@ void au0828_card_setup(struct au0828_dev *dev)
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr,
&tun_setup);
}
+#endif
}
/*
@@ -333,6 +345,8 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ USB_DEVICE(0x2040, 0x7213),
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+ { USB_DEVICE(0x2040, 0x7270),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ },
};
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 745a80a798c8..1e6f40ef1c6b 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -134,13 +134,17 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
/* Digital TV */
au0828_dvb_unregister(dev);
+#ifdef CONFIG_VIDEO_AU0828_V4L2
if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED)
au0828_analog_unregister(dev);
+#endif
/* I2C */
au0828_i2c_unregister(dev);
+#ifdef CONFIG_VIDEO_AU0828_V4L2
v4l2_device_unregister(&dev->v4l2_dev);
+#endif
usb_set_intfdata(interface, NULL);
@@ -155,7 +159,10 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
static int au0828_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- int ifnum, retval;
+ int ifnum;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
+ int retval;
+#endif
struct au0828_dev *dev;
struct usb_device *usbdev = interface_to_usbdev(interface);
@@ -194,6 +201,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
dev->usbdev = usbdev;
dev->boardnr = id->driver_info;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Create the v4l2_device */
retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
if (retval) {
@@ -203,6 +211,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
kfree(dev);
return -EIO;
}
+#endif
/* Power Up the bridge */
au0828_write(dev, REG_600, 1 << 4);
@@ -216,9 +225,11 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Setup */
au0828_card_setup(dev);
+#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Analog TV */
if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED)
au0828_analog_register(dev, interface);
+#endif
/* Digital TV */
au0828_dvb_register(dev);
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index 4ded17fe1957..17ec3651b10e 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -364,12 +364,9 @@ int au0828_i2c_register(struct au0828_dev *dev)
{
dprintk(1, "%s()\n", __func__);
- memcpy(&dev->i2c_adap, &au0828_i2c_adap_template,
- sizeof(dev->i2c_adap));
- memcpy(&dev->i2c_algo, &au0828_i2c_algo_template,
- sizeof(dev->i2c_algo));
- memcpy(&dev->i2c_client, &au0828_i2c_client_template,
- sizeof(dev->i2c_client));
+ dev->i2c_adap = au0828_i2c_adap_template;
+ dev->i2c_algo = au0828_i2c_algo_template;
+ dev->i2c_client = au0828_i2c_client_template;
dev->i2c_adap.dev.parent = &dev->usbdev->dev;
@@ -378,7 +375,11 @@ int au0828_i2c_register(struct au0828_dev *dev)
dev->i2c_adap.algo = &dev->i2c_algo;
dev->i2c_adap.algo_data = dev;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev);
+#else
+ i2c_set_adapdata(&dev->i2c_adap, dev);
+#endif
i2c_add_adapter(&dev->i2c_adap);
dev->i2c_client.adapter = &dev->i2c_adap;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 45387aab10c7..8b9e8268e911 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -304,7 +304,7 @@ static inline void buffer_filled(struct au0828_dev *dev,
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dev->isoc_ctl.buf = NULL;
@@ -321,7 +321,7 @@ static inline void vbi_buffer_filled(struct au0828_dev *dev,
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dev->isoc_ctl.vbi_buf = NULL;
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 66a56ef7bbe4..e579ff69ca4a 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -199,8 +199,10 @@ struct au0828_dev {
struct au0828_dvb dvb;
struct work_struct restart_streaming;
+#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Analog */
struct v4l2_device v4l2_dev;
+#endif
int users;
unsigned int resources; /* resources in use */
struct video_device *vdev;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 95b5d6e7cdc4..be1719283609 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -328,7 +328,7 @@ static void cpia2_usb_complete(struct urb *urb)
continue;
}
DBG("Start of frame pattern found\n");
- do_gettimeofday(&cam->workbuff->timestamp);
+ v4l2_get_timestamp(&cam->workbuff->timestamp);
cam->workbuff->seq = cam->frame_count++;
cam->workbuff->data[0] = 0xFF;
cam->workbuff->data[1] = 0xD8;
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index aeb9d2275725..d5d42b6e94be 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -825,6 +825,8 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
else
buf->flags = 0;
+ buf->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
switch (cam->buffers[buf->index].status) {
case FRAME_EMPTY:
case FRAME_ERROR:
@@ -943,7 +945,8 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->index = frame;
buf->bytesused = cam->buffers[buf->index].length;
- buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE;
+ buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE
+ | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
buf->timestamp = cam->buffers[buf->index].timestamp;
buf->sequence = cam->buffers[buf->index].seq;
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index b024e5197a75..28688dbcb609 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1291,7 +1291,7 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
dma_q->mpeg_buffer_completed = 0;
@@ -1327,7 +1327,7 @@ static void buffer_filled(char *data, int len, struct urb *urb,
memcpy(vbuf, data, len);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index bbed1e40eeda..8d529565f163 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -603,6 +603,33 @@ struct cx231xx_board cx231xx_boards[] = {
.gpio = NULL,
} },
},
+ [CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2] = {
+ .name = "Elgato Video Capture V2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .norm = V4L2_STD_NTSC,
+ .no_alt_vanc = 1,
+ .external_av = 1,
+ .dont_use_port_3 = 1,
+ .input = {{
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -642,6 +669,8 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_KWORLD_UB430_USB_HYBRID},
{USB_DEVICE(0x1f4d, 0x0237),
.driver_info = CX231XX_BOARD_ICONBIT_U100},
+ {USB_DEVICE(0x0fd9, 0x0037),
+ .driver_info = CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2},
{},
};
@@ -707,7 +736,7 @@ static void cx231xx_sleep_s5h1432(struct cx231xx *dev)
static inline void cx231xx_set_model(struct cx231xx *dev)
{
- memcpy(&dev->board, &cx231xx_boards[dev->model], sizeof(dev->board));
+ dev->board = cx231xx_boards[dev->model];
}
/* Since cx231xx_pre_card_setup() requires a proper dev->model,
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index ac7db52f404f..46e3892557c2 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -530,7 +530,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
dev->vbi_mode.bulk_ctl.buf = NULL;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index fedf7852a355..06376d904c9f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -235,7 +235,7 @@ static inline void buffer_filled(struct cx231xx *dev,
cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = NULL;
@@ -1751,6 +1751,7 @@ static int vidioc_s_register(struct file *file, void *priv,
0x02,
(u16)reg->reg, 1,
value, 1, 2);
+ break;
case 0x322:
ret =
cx231xx_write_i2c_master(dev,
@@ -2627,8 +2628,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->name, video_device_node_name(dev->vdev));
/* Initialize VBI template */
- memcpy(&cx231xx_vbi_template, &cx231xx_video_template,
- sizeof(cx231xx_vbi_template));
+ cx231xx_vbi_template = cx231xx_video_template;
strcpy(cx231xx_vbi_template.name, "cx231xx-vbi");
/* Allocate and fill vbi video_device struct */
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index a89d020de948..3e11462be0d0 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -68,6 +68,7 @@
#define CX231XX_BOARD_ICONBIT_U100 13
#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL 14
#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15
+#define CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2 16
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 7a622dbe9b6d..692224d97d06 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -1,6 +1,6 @@
config DVB_USB_V2
tristate "Support for various USB DVB devices v2"
- depends on DVB_CORE && USB && I2C && RC_CORE
+ depends on DVB_CORE && USB && I2C
help
By enabling this you will be able to choose the various supported
USB1.1 and USB2.0 DVB devices.
@@ -113,6 +113,7 @@ config DVB_USB_IT913X
config DVB_USB_LME2510
tristate "LME DM04/QQBOX DVB-S USB2.0 support"
depends on DVB_USB_V2
+ depends on RC_CORE
select DVB_TDA10086 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA826X if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT
@@ -120,6 +121,7 @@ config DVB_USB_LME2510
select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select DVB_M88RS2000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the LME DM04/QQBOX DVB-S USB2.0
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 943d93423705..b86d0f27a398 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1156,6 +1156,7 @@ error:
return ret;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
struct af9015_rc_setup {
unsigned int id;
char *rc_codes;
@@ -1312,6 +1313,9 @@ static int af9015_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
}
+#else
+ #define af9015_get_rc_config NULL
+#endif
/* interface 0 is used by DVB-T receiver and
interface 1 is for remote controller (HID) */
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 61ae7f9d0b27..f11cc42454f0 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -209,10 +209,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].len > 40 || msg[1].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
- } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
- /* integrated demod */
+ } else if ((msg[0].addr == state->af9033_config[0].i2c_addr) ||
+ (msg[0].addr == state->af9033_config[1].i2c_addr)) {
+ /* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
+
+ if (msg[0].addr == state->af9033_config[1].i2c_addr)
+ reg |= 0x100000;
+
ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
msg[1].len);
} else {
@@ -220,6 +225,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
u8 buf[5 + msg[0].len];
struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
buf, msg[1].len, msg[1].buf };
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[1].len;
buf[1] = msg[0].addr << 1;
buf[2] = 0x00; /* reg addr len */
@@ -232,10 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
- } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
- /* integrated demod */
+ } else if ((msg[0].addr == state->af9033_config[0].i2c_addr) ||
+ (msg[0].addr == state->af9033_config[1].i2c_addr)) {
+ /* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
+
+ if (msg[0].addr == state->af9033_config[1].i2c_addr)
+ reg |= 0x100000;
+
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
msg[0].len - 3);
} else {
@@ -243,6 +254,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
u8 buf[5 + msg[0].len];
struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
0, NULL };
+ req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
buf[1] = msg[0].addr << 1;
buf[2] = 0x00; /* reg addr len */
@@ -313,12 +325,57 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
struct usb_req req_fw_dl = { CMD_FW_DL, 0, 0, wbuf, 0, NULL };
struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf } ;
- u8 hdr_core;
+ u8 hdr_core, tmp;
u16 hdr_addr, hdr_data_len, hdr_checksum;
#define MAX_DATA 58
#define HDR_SIZE 7
/*
+ * In case of dual tuner configuration we need to do some extra
+ * initialization in order to download firmware to slave demod too,
+ * which is done by master demod.
+ * Master feeds also clock and controls power via GPIO.
+ */
+ ret = af9035_rd_reg(d, EEPROM_DUAL_MODE, &tmp);
+ if (ret < 0)
+ goto err;
+
+ if (tmp) {
+ /* configure gpioh1, reset & power slave demod */
+ ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0x00d8b1, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0x00d8af, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 50000);
+
+ ret = af9035_wr_reg_mask(d, 0x00d8af, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* tell the slave I2C address */
+ ret = af9035_rd_reg(d, EEPROM_2ND_DEMOD_ADDR, &tmp);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00417f, tmp);
+ if (ret < 0)
+ goto err;
+
+ /* enable clock out */
+ ret = af9035_wr_reg_mask(d, 0x00d81a, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
+ /*
* Thanks to Daniel Glöckner <daniel-gl@gmx.net> about that info!
*
* byte 0: MCS 51 core
@@ -380,6 +437,10 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
__func__, fw->size - i);
}
+ /* print warn if firmware is bad, continue and see what happens */
+ if (i)
+ dev_warn(&d->udev->dev, "%s: bad firmware\n", KBUILD_MODNAME);
+
/* firmware loaded, request boot */
req.cmd = CMD_FW_BOOT;
ret = af9035_ctrl_msg(d, &req);
@@ -489,14 +550,28 @@ static int af9035_read_config(struct dvb_usb_device *d)
u8 tmp;
u16 tmp16;
+ /* demod I2C "address" */
+ state->af9033_config[0].i2c_addr = 0x38;
+
/* check if there is dual tuners */
ret = af9035_rd_reg(d, EEPROM_DUAL_MODE, &tmp);
if (ret < 0)
goto err;
state->dual_mode = tmp;
- dev_dbg(&d->udev->dev, "%s: dual mode=%d\n",
- __func__, state->dual_mode);
+ dev_dbg(&d->udev->dev, "%s: dual mode=%d\n", __func__,
+ state->dual_mode);
+
+ if (state->dual_mode) {
+ /* read 2nd demodulator I2C address */
+ ret = af9035_rd_reg(d, EEPROM_2ND_DEMOD_ADDR, &tmp);
+ if (ret < 0)
+ goto err;
+
+ state->af9033_config[1].i2c_addr = tmp;
+ dev_dbg(&d->udev->dev, "%s: 2nd demod I2C addr=%02x\n",
+ __func__, tmp);
+ }
for (i = 0; i < state->dual_mode + 1; i++) {
/* tuner */
@@ -514,6 +589,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
case AF9033_TUNER_MXL5007T:
case AF9033_TUNER_TDA18218:
case AF9033_TUNER_FC2580:
+ case AF9033_TUNER_FC0012:
state->af9033_config[i].spec_inv = 1;
break;
default:
@@ -522,6 +598,18 @@ static int af9035_read_config(struct dvb_usb_device *d)
KBUILD_MODNAME, tmp);
}
+ /* disable dual mode if driver does not support it */
+ if (i == 1)
+ switch (tmp) {
+ case AF9033_TUNER_FC0012:
+ break;
+ default:
+ state->dual_mode = false;
+ dev_info(&d->udev->dev, "%s: driver does not " \
+ "support 2nd tuner and will " \
+ "disable it", KBUILD_MODNAME);
+ }
+
/* tuner IF frequency */
ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_L + eeprom_shift, &tmp);
if (ret < 0)
@@ -730,6 +818,12 @@ static int af9035_frontend_callback(void *adapter_priv, int component,
return 0;
}
+static int af9035_get_adapter_count(struct dvb_usb_device *d)
+{
+ struct state *state = d_to_priv(d);
+ return state->dual_mode + 1;
+}
+
static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
@@ -751,15 +845,14 @@ static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
if (ret < 0)
goto err;
- ret = af9035_wr_reg(d, 0x00d81a,
- state->dual_mode);
+ ret = af9035_wr_reg(d, 0x00d81a, state->dual_mode);
if (ret < 0)
goto err;
}
/* attach demodulator */
- adap->fe[0] = dvb_attach(af9033_attach,
- &state->af9033_config[adap->id], &d->i2c_adap);
+ adap->fe[0] = dvb_attach(af9033_attach, &state->af9033_config[adap->id],
+ &d->i2c_adap);
if (adap->fe[0] == NULL) {
ret = -ENODEV;
goto err;
@@ -785,13 +878,22 @@ static const struct fc0011_config af9035_fc0011_config = {
.i2c_address = 0x60,
};
-static struct mxl5007t_config af9035_mxl5007t_config = {
- .xtal_freq_hz = MxL_XTAL_24_MHZ,
- .if_freq_hz = MxL_IF_4_57_MHZ,
- .invert_if = 0,
- .loop_thru_enable = 0,
- .clk_out_enable = 0,
- .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+static struct mxl5007t_config af9035_mxl5007t_config[] = {
+ {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+ .invert_if = 0,
+ .loop_thru_enable = 0,
+ .clk_out_enable = 0,
+ .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+ }, {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+ .invert_if = 0,
+ .loop_thru_enable = 1,
+ .clk_out_enable = 1,
+ .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+ }
};
static struct tda18218_config af9035_tda18218_config = {
@@ -804,12 +906,32 @@ static const struct fc2580_config af9035_fc2580_config = {
.clock = 16384000,
};
+static const struct fc0012_config af9035_fc0012_config[] = {
+ {
+ .i2c_address = 0x63,
+ .xtal_freq = FC_XTAL_36_MHZ,
+ .dual_master = true,
+ .loop_through = true,
+ .clock_out = true,
+ }, {
+ .i2c_address = 0x63 | 0x80, /* I2C bus select hack */
+ .xtal_freq = FC_XTAL_36_MHZ,
+ .dual_master = true,
+ }
+};
+
static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int ret;
struct dvb_frontend *fe;
+ struct i2c_msg msg[1];
+ u8 tuner_addr;
+ /*
+ * XXX: Hack used in that function: we abuse unused I2C address bit [7]
+ * to carry info about used I2C bus for dual tuner configuration.
+ */
switch (state->af9033_config[adap->id].tuner) {
case AF9033_TUNER_TUA9001:
@@ -842,46 +964,59 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
&d->i2c_adap, &af9035_fc0011_config);
break;
case AF9033_TUNER_MXL5007T:
- ret = af9035_wr_reg(d, 0x00d8e0, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8e1, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8df, 0);
- if (ret < 0)
- goto err;
+ if (adap->id == 0) {
+ ret = af9035_wr_reg(d, 0x00d8e0, 1);
+ if (ret < 0)
+ goto err;
- msleep(30);
+ ret = af9035_wr_reg(d, 0x00d8e1, 1);
+ if (ret < 0)
+ goto err;
- ret = af9035_wr_reg(d, 0x00d8df, 1);
- if (ret < 0)
- goto err;
+ ret = af9035_wr_reg(d, 0x00d8df, 0);
+ if (ret < 0)
+ goto err;
- msleep(300);
+ msleep(30);
- ret = af9035_wr_reg(d, 0x00d8c0, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8c1, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8bf, 0);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8b4, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8b5, 1);
- if (ret < 0)
- goto err;
- ret = af9035_wr_reg(d, 0x00d8b3, 1);
- if (ret < 0)
- goto err;
+ ret = af9035_wr_reg(d, 0x00d8df, 1);
+ if (ret < 0)
+ goto err;
+
+ msleep(300);
+
+ ret = af9035_wr_reg(d, 0x00d8c0, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8c1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8bf, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8b4, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8b5, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0x00d8b3, 1);
+ if (ret < 0)
+ goto err;
+
+ tuner_addr = 0x60;
+ } else {
+ tuner_addr = 0x60 | 0x80; /* I2C bus hack */
+ }
/* attach tuner */
- fe = dvb_attach(mxl5007t_attach, adap->fe[0],
- &d->i2c_adap, 0x60, &af9035_mxl5007t_config);
+ fe = dvb_attach(mxl5007t_attach, adap->fe[0], &d->i2c_adap,
+ tuner_addr, &af9035_mxl5007t_config[adap->id]);
break;
case AF9033_TUNER_TDA18218:
/* attach tuner */
@@ -907,6 +1042,46 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
fe = dvb_attach(fc2580_attach, adap->fe[0],
&d->i2c_adap, &af9035_fc2580_config);
break;
+ case AF9033_TUNER_FC0012:
+ /*
+ * AF9035 gpiot2 = FC0012 enable
+ * XXX: there seems to be something on gpioh8 too, but on my
+ * my test I didn't find any difference.
+ */
+
+ if (adap->id == 0) {
+ /* configure gpiot2 as output and high */
+ ret = af9035_wr_reg_mask(d, 0xd8eb, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ec, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ed, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ } else {
+ /*
+ * FIXME: That belongs for the FC0012 driver.
+ * Write 02 to FC0012 master tuner register 0d directly
+ * in order to make slave tuner working.
+ */
+ msg[0].addr = 0x63;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = "\x0d\x02";
+ ret = i2c_transfer(&d->i2c_adap, msg, 1);
+ if (ret < 0)
+ goto err;
+ }
+
+ usleep_range(10000, 50000);
+
+ fe = dvb_attach(fc0012_attach, adap->fe[0], &d->i2c_adap,
+ &af9035_fc0012_config[adap->id]);
+ break;
default:
fe = NULL;
}
@@ -945,8 +1120,8 @@ static int af9035_init(struct dvb_usb_device *d)
{ 0x00dd8a, (frame_size >> 0) & 0xff, 0xff},
{ 0x00dd8b, (frame_size >> 8) & 0xff, 0xff},
{ 0x00dd0d, packet_size, 0xff },
- { 0x80f9a3, 0x00, 0x01 },
- { 0x80f9cd, 0x00, 0x01 },
+ { 0x80f9a3, state->dual_mode, 0x01 },
+ { 0x80f9cd, state->dual_mode, 0x01 },
{ 0x80f99d, 0x00, 0x01 },
{ 0x80f9a4, 0x00, 0x01 },
};
@@ -971,6 +1146,7 @@ err:
return ret;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
static int af9035_rc_query(struct dvb_usb_device *d)
{
unsigned int key;
@@ -1045,6 +1221,9 @@ err:
return ret;
}
+#else
+ #define af9035_get_rc_config NULL
+#endif
/* interface 0 is used by DVB-T receiver and
interface 1 is for remote controller (HID) */
@@ -1068,7 +1247,7 @@ static const struct dvb_usb_device_properties af9035_props = {
.init = af9035_init,
.get_rc_config = af9035_get_rc_config,
- .num_adapters = 1,
+ .get_adapter_count = af9035_get_adapter_count,
.adapter = {
{
.stream = DVB_USB_STREAM_BULK(0x84, 6, 87 * 188),
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index 75ef1ec13fbf..29f3eec22c2c 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -26,6 +26,7 @@
#include "af9033.h"
#include "tua9001.h"
#include "fc0011.h"
+#include "fc0012.h"
#include "mxl5007t.h"
#include "tda18218.h"
#include "fc2580.h"
@@ -53,7 +54,6 @@ struct usb_req {
struct state {
u8 seq; /* packet sequence number */
bool dual_mode;
-
struct af9033_config af9033_config[2];
};
@@ -91,6 +91,7 @@ u32 clock_lut_it9135[] = {
/* EEPROM locations */
#define EEPROM_IR_MODE 0x430d
#define EEPROM_DUAL_MODE 0x4326
+#define EEPROM_2ND_DEMOD_ADDR 0x4327
#define EEPROM_IR_TYPE 0x4329
#define EEPROM_1_IFFREQ_L 0x432d
#define EEPROM_1_IFFREQ_H 0x432e
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index d05c5b563dac..a20d691d0b63 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -1019,6 +1019,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
return ret;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
static int anysee_rc_query(struct dvb_usb_device *d)
{
u8 buf[] = {CMD_GET_IR_CODE};
@@ -1054,6 +1055,9 @@ static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
}
+#else
+ #define anysee_get_rc_config NULL
+#endif
static int anysee_ci_read_attribute_mem(struct dvb_ca_en50221 *ci, int slot,
int addr)
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index d75dbf27e99e..70ec80d8be71 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -189,6 +189,7 @@ static int az6007_streaming_ctrl(struct dvb_frontend *fe, int onoff)
return az6007_write(d, 0xbc, onoff, 0, NULL, 0);
}
+#if IS_ENABLED(CONFIG_RC_CORE)
/* remote control stuff (does not work with my box) */
static int az6007_rc_query(struct dvb_usb_device *d)
{
@@ -215,6 +216,20 @@ static int az6007_rc_query(struct dvb_usb_device *d)
return 0;
}
+static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
+{
+ pr_debug("Getting az6007 Remote Control properties\n");
+
+ rc->allowed_protos = RC_BIT_NEC;
+ rc->query = az6007_rc_query;
+ rc->interval = 400;
+
+ return 0;
+}
+#else
+ #define az6007_get_rc_config NULL
+#endif
+
static int az6007_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot,
int address)
@@ -822,17 +837,6 @@ static void az6007_usb_disconnect(struct usb_interface *intf)
dvb_usbv2_disconnect(intf);
}
-static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
-{
- pr_debug("Getting az6007 Remote Control properties\n");
-
- rc->allowed_protos = RC_BIT_NEC;
- rc->query = az6007_rc_query;
- rc->interval = 400;
-
- return 0;
-}
-
static int az6007_download_firmware(struct dvb_usb_device *d,
const struct firmware *fw)
{
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 059291b892b8..3cac8bd0b116 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -347,6 +347,7 @@ struct dvb_usb_adapter {
* @props: device properties
* @name: device name
* @rc_map: name of rc codes table
+ * @rc_polling_active: set when RC polling is active
* @udev: pointer to the device's struct usb_device
* @intf: pointer to the device's usb interface
* @rc: remote controller configuration
@@ -364,7 +365,7 @@ struct dvb_usb_device {
const struct dvb_usb_device_properties *props;
const char *name;
const char *rc_map;
-
+ bool rc_polling_active;
struct usb_device *udev;
struct usb_interface *intf;
struct dvb_usb_rc rc;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 671b4fa232b4..086792055912 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -102,6 +102,7 @@ static int dvb_usbv2_i2c_exit(struct dvb_usb_device *d)
return 0;
}
+#if IS_ENABLED(CONFIG_RC_CORE)
static void dvb_usb_read_remote_control(struct work_struct *work)
{
struct dvb_usb_device *d = container_of(work,
@@ -112,13 +113,16 @@ static void dvb_usb_read_remote_control(struct work_struct *work)
* When the parameter has been set to 1 via sysfs while the
* driver was running, or when bulk mode is enabled after IR init.
*/
- if (dvb_usbv2_disable_rc_polling || d->rc.bulk_mode)
+ if (dvb_usbv2_disable_rc_polling || d->rc.bulk_mode) {
+ d->rc_polling_active = false;
return;
+ }
ret = d->rc.query(d);
if (ret < 0) {
dev_err(&d->udev->dev, "%s: rc.query() failed=%d\n",
KBUILD_MODNAME, ret);
+ d->rc_polling_active = false;
return; /* stop polling */
}
@@ -182,6 +186,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
d->rc.interval);
schedule_delayed_work(&d->rc_query_work,
msecs_to_jiffies(d->rc.interval));
+ d->rc_polling_active = true;
}
return 0;
@@ -202,6 +207,10 @@ static int dvb_usbv2_remote_exit(struct dvb_usb_device *d)
return 0;
}
+#else
+ #define dvb_usbv2_remote_init(args...) 0
+ #define dvb_usbv2_remote_exit(args...)
+#endif
static void dvb_usb_data_complete(struct usb_data_stream *stream, u8 *buf,
size_t len)
@@ -959,7 +968,7 @@ int dvb_usbv2_suspend(struct usb_interface *intf, pm_message_t msg)
dev_dbg(&d->udev->dev, "%s:\n", __func__);
/* stop remote controller poll */
- if (d->rc.query && !d->rc.bulk_mode)
+ if (d->rc_polling_active)
cancel_delayed_work_sync(&d->rc_query_work);
for (i = MAX_NO_OF_ADAPTER_PER_DEVICE - 1; i >= 0; i--) {
@@ -1006,7 +1015,7 @@ static int dvb_usbv2_resume_common(struct dvb_usb_device *d)
}
/* start remote controller poll */
- if (d->rc.query && !d->rc.bulk_mode)
+ if (d->rc_polling_active)
schedule_delayed_work(&d->rc_query_work,
msecs_to_jiffies(d->rc.interval));
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 47204280b8b3..833847995c65 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -308,7 +308,7 @@ static struct i2c_algorithm it913x_i2c_algo = {
};
/* Callbacks for DVB USB */
-#define IT913X_POLL 250
+#if IS_ENABLED(CONFIG_RC_CORE)
static int it913x_rc_query(struct dvb_usb_device *d)
{
u8 ibuf[4];
@@ -334,6 +334,25 @@ static int it913x_rc_query(struct dvb_usb_device *d)
return ret;
}
+static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
+{
+ struct it913x_state *st = d->priv;
+
+ if (st->proprietary_ir == false) {
+ rc->map_name = NULL;
+ return 0;
+ }
+
+ rc->allowed_protos = RC_BIT_NEC;
+ rc->query = it913x_rc_query;
+ rc->interval = 250;
+
+ return 0;
+}
+#else
+ #define it913x_get_rc_config NULL
+#endif
+
/* Firmware sets raw */
static const char fw_it9135_v1[] = FW_IT9135_V1;
static const char fw_it9135_v2[] = FW_IT9135_V2;
@@ -643,7 +662,8 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
struct it913x_state *st = d->priv;
int ret = 0;
u8 adap_addr = I2C_BASE_ADDR + (adap->id << 5);
- u16 ep_size = adap->stream.buf_size / 4;
+ u16 ep_size = (adap->pid_filtering) ? TS_BUFFER_SIZE_PID / 4 :
+ TS_BUFFER_SIZE_MAX / 4;
u8 pkt_size = 0x80;
if (d->udev->speed != USB_SPEED_HIGH)
@@ -695,22 +715,6 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
}
/* DVB USB Driver */
-static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
-{
- struct it913x_state *st = d->priv;
-
- if (st->proprietary_ir == false) {
- rc->map_name = NULL;
- return 0;
- }
-
- rc->allowed_protos = RC_BIT_NEC;
- rc->query = it913x_rc_query;
- rc->interval = 250;
-
- return 0;
-}
-
static int it913x_get_adapter_count(struct dvb_usb_device *d)
{
struct it913x_state *st = d->priv;
@@ -779,6 +783,18 @@ static const struct usb_device_id it913x_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9006,
&it913x_properties, "ITE 9135(9006) Generic",
RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_1835,
+ &it913x_properties, "Avermedia A835B(1835)",
+ RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_2835,
+ &it913x_properties, "Avermedia A835B(2835)",
+ RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_3835,
+ &it913x_properties, "Avermedia A835B(3835)",
+ RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835,
+ &it913x_properties, "Avermedia A835B(4835)",
+ RC_MAP_IT913X_V2) },
{} /* Terminating entry */
};
@@ -797,7 +813,7 @@ module_usb_driver(it913x_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("it913x USB 2 Driver");
-MODULE_VERSION("1.32");
+MODULE_VERSION("1.33");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_IT9135_V1);
MODULE_FIRMWARE(FW_IT9135_V2);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 6427ac359f21..f30c58cecbba 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -81,6 +81,7 @@
#include "dvb-pll.h"
#include "z0194a.h"
#include "m88rs2000.h"
+#include "ts2020.h"
#define LME2510_C_S7395 "dvb-usb-lme2510c-s7395.fw";
@@ -626,8 +627,8 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
gate = 5;
for (i = 0; i < num; i++) {
- read_o = 1 & (msg[i].flags & I2C_M_RD);
- read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
+ read_o = msg[i].flags & I2C_M_RD;
+ read = i + 1 < num && msg[i + 1].flags & I2C_M_RD;
read |= read_o;
gate = (msg[i].addr == st->i2c_tuner_addr)
? (read) ? st->i2c_tuner_gate_r
@@ -640,7 +641,8 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
else
obuf[1] = msg[i].len + read + 1;
- obuf[2] = msg[i].addr;
+ obuf[2] = msg[i].addr << 1;
+
if (read) {
if (read_o)
len = 3;
@@ -894,27 +896,27 @@ static int lme2510_kill_urb(struct usb_data_stream *stream)
}
static struct tda10086_config tda10086_config = {
- .demod_address = 0x1c,
+ .demod_address = 0x0e,
.invert = 0,
.diseqc_tone = 1,
.xtal_freq = TDA10086_XTAL_16M,
};
static struct stv0288_config lme_config = {
- .demod_address = 0xd0,
+ .demod_address = 0x68,
.min_delay_ms = 15,
.inittab = s7395_inittab,
};
static struct ix2505v_config lme_tuner = {
- .tuner_address = 0xc0,
+ .tuner_address = 0x60,
.min_delay_ms = 100,
.tuner_gain = 0x0,
.tuner_chargepump = 0x3,
};
static struct stv0299_config sharp_z0194_config = {
- .demod_address = 0xd0,
+ .demod_address = 0x68,
.inittab = sharp_z0194a_inittab,
.mclk = 88000000UL,
.invert = 0,
@@ -943,11 +945,15 @@ static int dm04_rs2000_set_ts_param(struct dvb_frontend *fe,
}
static struct m88rs2000_config m88rs2000_config = {
- .demod_addr = 0xd0,
- .tuner_addr = 0xc0,
+ .demod_addr = 0x68,
.set_ts_params = dm04_rs2000_set_ts_param,
};
+static struct ts2020_config ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 7,
+};
+
static int dm04_lme2510_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t voltage)
{
@@ -1049,7 +1055,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
info("TUN Found Frontend TDA10086");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 4;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_LG;
if (st->dvb_usb_lme2510_firmware != TUNER_LG) {
st->dvb_usb_lme2510_firmware = TUNER_LG;
@@ -1065,7 +1071,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
info("FE Found Stv0299");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 5;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_S0194;
if (st->dvb_usb_lme2510_firmware != TUNER_S0194) {
st->dvb_usb_lme2510_firmware = TUNER_S0194;
@@ -1082,7 +1088,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
info("FE Found Stv0288");
st->i2c_tuner_gate_w = 4;
st->i2c_tuner_gate_r = 5;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_S7395;
if (st->dvb_usb_lme2510_firmware != TUNER_S7395) {
st->dvb_usb_lme2510_firmware = TUNER_S7395;
@@ -1097,9 +1103,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
if (adap->fe[0]) {
info("FE Found M88RS2000");
+ dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
+ &d->i2c_adap);
st->i2c_tuner_gate_w = 5;
st->i2c_tuner_gate_r = 5;
- st->i2c_tuner_addr = 0xc0;
+ st->i2c_tuner_addr = 0x60;
st->tuner_config = TUNER_RS2000;
st->fe_set_voltage =
adap->fe[0]->ops.set_voltage;
@@ -1144,7 +1152,7 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
switch (st->tuner_config) {
case TUNER_LG:
- if (dvb_attach(tda826x_attach, adap->fe[0], 0xc0,
+ if (dvb_attach(tda826x_attach, adap->fe[0], 0x60,
&d->i2c_adap, 1))
ret = st->tuner_config;
break;
@@ -1154,7 +1162,7 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
ret = st->tuner_config;
break;
case TUNER_S0194:
- if (dvb_attach(dvb_pll_attach , adap->fe[0], 0xc0,
+ if (dvb_attach(dvb_pll_attach , adap->fe[0], 0x60,
&d->i2c_adap, DVB_PLL_OPERA1))
ret = st->tuner_config;
break;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index a4c302d0aa37..d98387a3c95e 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -835,6 +835,11 @@ static struct tua9001_config rtl2832u_tua9001_config = {
.i2c_addr = 0x60,
};
+static const struct fc0012_config rtl2832u_fc0012_config = {
+ .i2c_address = 0x63, /* 0xc6 >> 1 */
+ .xtal_freq = FC_XTAL_28_8_MHZ,
+};
+
static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
{
int ret;
@@ -847,7 +852,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
switch (priv->tuner) {
case TUNER_RTL2832_FC0012:
fe = dvb_attach(fc0012_attach, adap->fe[0],
- &d->i2c_adap, 0xc6>>1, 0, FC_XTAL_28_8_MHZ);
+ &d->i2c_adap, &rtl2832u_fc0012_config);
/* since fc0012 includs reading the signal strength delegate
* that to the tuner driver */
@@ -1120,7 +1125,7 @@ err:
return ret;
}
-
+#if IS_ENABLED(CONFIG_RC_CORE)
static int rtl2831u_rc_query(struct dvb_usb_device *d)
{
int ret, i;
@@ -1203,7 +1208,11 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d,
return 0;
}
+#else
+ #define rtl2831u_get_rc_config NULL
+#endif
+#if IS_ENABLED(CONFIG_RC_CORE)
static int rtl2832u_rc_query(struct dvb_usb_device *d)
{
int ret, i;
@@ -1275,6 +1284,9 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
return 0;
}
+#else
+ #define rtl2832u_get_rc_config NULL
+#endif
static const struct dvb_usb_device_properties rtl2831u_props = {
.driver_name = KBUILD_MODNAME,
@@ -1333,13 +1345,13 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_REALTEK, 0x2838,
&rtl2832u_props, "Realtek RTL2832U reference design", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1,
- &rtl2832u_props, "Terratec Cinergy T Stick Black", NULL) },
+ &rtl2832u_props, "TerraTec Cinergy T Stick Black", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_DELOCK_USB2_DVBT,
&rtl2832u_props, "G-Tek Electronics Group Lifeview LV5TDLX DVB-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK,
- &rtl2832u_props, "NOXON DAB/DAB+ USB dongle", NULL) },
+ &rtl2832u_props, "TerraTec NOXON DAB Stick", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
- &rtl2832u_props, "NOXON DAB/DAB+ USB dongle (rev 2)", NULL) },
+ &rtl2832u_props, "TerraTec NOXON DAB Stick (rev 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
&rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
@@ -1352,6 +1364,14 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "Dexatek DK mini DVB-T Dongle", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d7,
&rtl2832u_props, "TerraTec Cinergy T Stick+", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd3a8,
+ &rtl2832u_props, "ASUS My Cinema-U3100Mini Plus V2", NULL) },
+ { DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd393,
+ &rtl2832u_props, "GIGABYTE U7300", NULL) },
+ { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1104,
+ &rtl2832u_props, "Digivox Micro Hd", NULL) },
+ { DVB_USB_DEVICE(USB_VID_COMPRO, 0x0620,
+ &rtl2832u_props, "Compro VideoMate U620F", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index fa0b2931d305..c5d95662e2e1 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -202,8 +202,12 @@ config DVB_USB_TTUSB2
select DVB_TDA10086 if MEDIA_SUBDRV_AUTOSELECT
select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA826X if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10023 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA10048 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA827X if MEDIA_SUBDRV_AUTOSELECT
help
- Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The
+ Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver and
+ the TechnoTrend CT-3650 CI DVB-C/T USB2.0 receiver. The
firmware protocol used by this module is similar to the one used by the
old ttusb-driver - that's why the module is called dvb-usb-ttusb2.
@@ -267,9 +271,11 @@ config DVB_USB_DW2102
select DVB_MT312 if MEDIA_SUBDRV_AUTOSELECT
select DVB_ZL10039 if MEDIA_SUBDRV_AUTOSELECT
select DVB_DS3000 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_M88RS2000 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the DvbWorld, TeVii, Prof DVB-S/S2 USB2.0
receivers.
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 19b5ed2825d7..bf2a908d74cf 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -561,10 +561,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
}
- if (mutex_lock_interruptible(&adap->dev->usb_mutex) < 0) {
- err("could not acquire lock");
- return -EINTR;
- }
+ mutex_lock(&adap->dev->usb_mutex);
st->buf[0] = REQUEST_ENABLE_VIDEO;
/* this bit gives a kind of command,
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 169196ec2d4e..1adf325012f7 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -38,41 +38,41 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
memcpy(&adap->props, &d->props.adapter[n], sizeof(struct dvb_usb_adapter_properties));
- for (o = 0; o < adap->props.num_frontends; o++) {
- struct dvb_usb_adapter_fe_properties *props = &adap->props.fe[o];
- /* speed - when running at FULL speed we need a HW PID filter */
- if (d->udev->speed == USB_SPEED_FULL && !(props->caps & DVB_USB_ADAP_HAS_PID_FILTER)) {
- err("This USB2.0 device cannot be run on a USB1.1 port. (it lacks a hardware PID filter)");
- return -ENODEV;
- }
+ for (o = 0; o < adap->props.num_frontends; o++) {
+ struct dvb_usb_adapter_fe_properties *props = &adap->props.fe[o];
+ /* speed - when running at FULL speed we need a HW PID filter */
+ if (d->udev->speed == USB_SPEED_FULL && !(props->caps & DVB_USB_ADAP_HAS_PID_FILTER)) {
+ err("This USB2.0 device cannot be run on a USB1.1 port. (it lacks a hardware PID filter)");
+ return -ENODEV;
+ }
- if ((d->udev->speed == USB_SPEED_FULL && props->caps & DVB_USB_ADAP_HAS_PID_FILTER) ||
- (props->caps & DVB_USB_ADAP_NEED_PID_FILTERING)) {
- info("will use the device's hardware PID filter (table count: %d).", props->pid_filter_count);
- adap->fe_adap[o].pid_filtering = 1;
- adap->fe_adap[o].max_feed_count = props->pid_filter_count;
- } else {
- info("will pass the complete MPEG2 transport stream to the software demuxer.");
- adap->fe_adap[o].pid_filtering = 0;
- adap->fe_adap[o].max_feed_count = 255;
- }
+ if ((d->udev->speed == USB_SPEED_FULL && props->caps & DVB_USB_ADAP_HAS_PID_FILTER) ||
+ (props->caps & DVB_USB_ADAP_NEED_PID_FILTERING)) {
+ info("will use the device's hardware PID filter (table count: %d).", props->pid_filter_count);
+ adap->fe_adap[o].pid_filtering = 1;
+ adap->fe_adap[o].max_feed_count = props->pid_filter_count;
+ } else {
+ info("will pass the complete MPEG2 transport stream to the software demuxer.");
+ adap->fe_adap[o].pid_filtering = 0;
+ adap->fe_adap[o].max_feed_count = 255;
+ }
- if (!adap->fe_adap[o].pid_filtering &&
- dvb_usb_force_pid_filter_usage &&
- props->caps & DVB_USB_ADAP_HAS_PID_FILTER) {
- info("pid filter enabled by module option.");
- adap->fe_adap[o].pid_filtering = 1;
- adap->fe_adap[o].max_feed_count = props->pid_filter_count;
- }
+ if (!adap->fe_adap[o].pid_filtering &&
+ dvb_usb_force_pid_filter_usage &&
+ props->caps & DVB_USB_ADAP_HAS_PID_FILTER) {
+ info("pid filter enabled by module option.");
+ adap->fe_adap[o].pid_filtering = 1;
+ adap->fe_adap[o].max_feed_count = props->pid_filter_count;
+ }
- if (props->size_of_priv > 0) {
- adap->fe_adap[o].priv = kzalloc(props->size_of_priv, GFP_KERNEL);
- if (adap->fe_adap[o].priv == NULL) {
- err("no memory for priv for adapter %d fe %d.", n, o);
- return -ENOMEM;
+ if (props->size_of_priv > 0) {
+ adap->fe_adap[o].priv = kzalloc(props->size_of_priv, GFP_KERNEL);
+ if (adap->fe_adap[o].priv == NULL) {
+ err("no memory for priv for adapter %d fe %d.", n, o);
+ return -ENOMEM;
+ }
}
}
- }
if (adap->props.size_of_priv > 0) {
adap->priv = kzalloc(adap->props.size_of_priv, GFP_KERNEL);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 9382895b1b88..9578a6761f1b 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1,9 +1,9 @@
/* DVB USB framework compliant Linux driver for the
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
- * TeVii S600, S630, S650, S660, S480,
+ * TeVii S600, S630, S650, S660, S480, S421, S632
* Prof 1100, 7500,
* Geniatech SU3000 Cards
- * Copyright (C) 2008-2011 Igor M. Liplianin (liplianin@me.by)
+ * Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -22,11 +22,14 @@
#include "tda1002x.h"
#include "mt312.h"
#include "zl10039.h"
+#include "ts2020.h"
#include "ds3000.h"
#include "stv0900.h"
#include "stv6110.h"
#include "stb6100.h"
#include "stb6100_proc.h"
+#include "m88rs2000.h"
+#include "ts2020.h"
#ifndef USB_PID_DW2102
#define USB_PID_DW2102 0x2102
@@ -68,6 +71,14 @@
#define USB_PID_PROF_1100 0xb012
#endif
+#ifndef USB_PID_TEVII_S421
+#define USB_PID_TEVII_S421 0xd421
+#endif
+
+#ifndef USB_PID_TEVII_S632
+#define USB_PID_TEVII_S632 0xd632
+#endif
+
#define DW210X_READ_MSG 0
#define DW210X_WRITE_MSG 1
@@ -80,6 +91,15 @@
#define DW2102_RC_QUERY (0x1a00)
#define DW2102_LED_CTRL (0x1b00)
+#define DW2101_FIRMWARE "dvb-usb-dw2101.fw"
+#define DW2102_FIRMWARE "dvb-usb-dw2102.fw"
+#define DW2104_FIRMWARE "dvb-usb-dw2104.fw"
+#define DW3101_FIRMWARE "dvb-usb-dw3101.fw"
+#define S630_FIRMWARE "dvb-usb-s630.fw"
+#define S660_FIRMWARE "dvb-usb-s660.fw"
+#define P1100_FIRMWARE "dvb-usb-p1100.fw"
+#define P7500_FIRMWARE "dvb-usb-p7500.fw"
+
#define err_str "did not find the firmware file. (%s) " \
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
@@ -534,7 +554,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
/*case 0x55: cx24116
case 0x6a: stv0903
- case 0x68: ds3000, stv0903
+ case 0x68: ds3000, stv0903, rs2000
case 0x60: ts2020, stv6110, stb6100
case 0xa0: eeprom */
default: {
@@ -932,6 +952,17 @@ static struct ds3000_config dw2104_ds3000_config = {
.demod_address = 0x68,
};
+static struct ts2020_config dw2104_ts2020_config = {
+ .tuner_address = 0x60,
+ .clk_out_div = 1,
+};
+
+static struct ds3000_config s660_ds3000_config = {
+ .demod_address = 0x68,
+ .ci_mode = 1,
+ .set_lock_led = dw210x_led_ctrl,
+};
+
static struct stv0900_config dw2104a_stv0900_config = {
.demod_address = 0x6a,
.demod_mode = 0,
@@ -981,6 +1012,30 @@ static struct stv0900_config prof_7500_stv0900_config = {
static struct ds3000_config su3000_ds3000_config = {
.demod_address = 0x68,
.ci_mode = 1,
+ .set_lock_led = dw210x_led_ctrl,
+};
+
+static u8 m88rs2000_inittab[] = {
+ DEMOD_WRITE, 0x9a, 0x30,
+ DEMOD_WRITE, 0x00, 0x01,
+ WRITE_DELAY, 0x19, 0x00,
+ DEMOD_WRITE, 0x00, 0x00,
+ DEMOD_WRITE, 0x9a, 0xb0,
+ DEMOD_WRITE, 0x81, 0xc1,
+ DEMOD_WRITE, 0x81, 0x81,
+ DEMOD_WRITE, 0x86, 0xc6,
+ DEMOD_WRITE, 0x9a, 0x30,
+ DEMOD_WRITE, 0xf0, 0x80,
+ DEMOD_WRITE, 0xf1, 0xbf,
+ DEMOD_WRITE, 0xb0, 0x45,
+ DEMOD_WRITE, 0xb2, 0x01,
+ DEMOD_WRITE, 0x9a, 0xb0,
+ 0xff, 0xaa, 0xff
+};
+
+static struct m88rs2000_config s421_m88rs2000_config = {
+ .demod_addr = 0x68,
+ .inittab = m88rs2000_inittab,
};
static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
@@ -1033,6 +1088,8 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
&d->dev->i2c_adap);
if (d->fe_adap[0].fe != NULL) {
+ dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ &dw2104_ts2020_config, &d->dev->i2c_adap);
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
info("Attached DS3000!\n");
return 0;
@@ -1139,12 +1196,15 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
struct s6x0_state *st = (struct s6x0_state *)d->dev->priv;
u8 obuf[] = {7, 1};
- d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
+ d->fe_adap[0].fe = dvb_attach(ds3000_attach, &s660_ds3000_config,
&d->dev->i2c_adap);
if (d->fe_adap[0].fe == NULL)
return -EIO;
+ dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config,
+ &d->dev->i2c_adap);
+
st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage;
d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage;
@@ -1182,6 +1242,14 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
err("command 0x0e transfer failed.");
obuf[0] = 0xe;
+ obuf[1] = 0x02;
+ obuf[2] = 1;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+ msleep(300);
+
+ obuf[0] = 0xe;
obuf[1] = 0x83;
obuf[2] = 0;
@@ -1205,9 +1273,40 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
if (d->fe_adap[0].fe == NULL)
return -EIO;
- info("Attached DS3000!\n");
+ if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ &dw2104_ts2020_config,
+ &d->dev->i2c_adap)) {
+ info("Attached DS3000/TS2020!\n");
+ return 0;
+ }
- return 0;
+ info("Failed to attach DS3000/TS2020!\n");
+ return -EIO;
+}
+
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+{
+ u8 obuf[] = { 0x51 };
+ u8 ibuf[] = { 0 };
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ err("command 0x51 transfer failed.");
+
+ d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
+ &d->dev->i2c_adap);
+
+ if (d->fe_adap[0].fe == NULL)
+ return -EIO;
+
+ if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ &dw2104_ts2020_config,
+ &d->dev->i2c_adap)) {
+ info("Attached RS2000/TS2020!\n");
+ return 0;
+ }
+
+ info("Failed to attach RS2000/TS2020!\n");
+ return -EIO;
}
static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
@@ -1447,6 +1546,8 @@ enum dw2102_table_entry {
TEVII_S480_1,
TEVII_S480_2,
X3M_SPC1400HD,
+ TEVII_S421,
+ TEVII_S632,
};
static struct usb_device_id dw2102_table[] = {
@@ -1465,6 +1566,8 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)},
[TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)},
[X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)},
+ [TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)},
+ [TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
{ }
};
@@ -1478,13 +1581,12 @@ static int dw2102_load_firmware(struct usb_device *dev,
u8 reset;
u8 reset16[] = {0, 0, 0, 0, 0, 0, 0};
const struct firmware *fw;
- const char *fw_2101 = "dvb-usb-dw2101.fw";
switch (dev->descriptor.idProduct) {
case 0x2101:
- ret = request_firmware(&fw, fw_2101, &dev->dev);
+ ret = request_firmware(&fw, DW2101_FIRMWARE, &dev->dev);
if (ret != 0) {
- err(err_str, fw_2101);
+ err(err_str, DW2101_FIRMWARE);
return ret;
}
break;
@@ -1586,7 +1688,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
static struct dvb_usb_device_properties dw2102_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .firmware = "dvb-usb-dw2102.fw",
+ .firmware = DW2102_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &dw2102_serit_i2c_algo,
@@ -1641,7 +1743,7 @@ static struct dvb_usb_device_properties dw2102_properties = {
static struct dvb_usb_device_properties dw2104_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .firmware = "dvb-usb-dw2104.fw",
+ .firmware = DW2104_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
@@ -1691,7 +1793,7 @@ static struct dvb_usb_device_properties dw2104_properties = {
static struct dvb_usb_device_properties dw3101_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .firmware = "dvb-usb-dw3101.fw",
+ .firmware = DW3101_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &dw3101_i2c_algo,
@@ -1739,7 +1841,7 @@ static struct dvb_usb_device_properties s6x0_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.size_of_priv = sizeof(struct s6x0_state),
- .firmware = "dvb-usb-s630.fw",
+ .firmware = S630_FIRMWARE,
.no_reconnect = 1,
.i2c_algo = &s6x0_i2c_algo,
@@ -1814,6 +1916,19 @@ static struct dvb_usb_device_description d7500 = {
{NULL},
};
+struct dvb_usb_device_properties *s421;
+static struct dvb_usb_device_description d421 = {
+ "TeVii S421 PCI",
+ {&dw2102_table[TEVII_S421], NULL},
+ {NULL},
+};
+
+static struct dvb_usb_device_description d632 = {
+ "TeVii S632 USB",
+ {&dw2102_table[TEVII_S632], NULL},
+ {NULL},
+};
+
static struct dvb_usb_device_properties su3000_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
@@ -1879,7 +1994,7 @@ static int dw2102_probe(struct usb_interface *intf,
return -ENOMEM;
/* copy default structure */
/* fill only different fields */
- p1100->firmware = "dvb-usb-p1100.fw";
+ p1100->firmware = P1100_FIRMWARE;
p1100->devices[0] = d1100;
p1100->rc.legacy.rc_map_table = rc_map_tbs_table;
p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
@@ -1891,7 +2006,7 @@ static int dw2102_probe(struct usb_interface *intf,
kfree(p1100);
return -ENOMEM;
}
- s660->firmware = "dvb-usb-s660.fw";
+ s660->firmware = S660_FIRMWARE;
s660->num_device_descs = 3;
s660->devices[0] = d660;
s660->devices[1] = d480_1;
@@ -1905,12 +2020,26 @@ static int dw2102_probe(struct usb_interface *intf,
kfree(s660);
return -ENOMEM;
}
- p7500->firmware = "dvb-usb-p7500.fw";
+ p7500->firmware = P7500_FIRMWARE;
p7500->devices[0] = d7500;
p7500->rc.legacy.rc_map_table = rc_map_tbs_table;
p7500->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach;
+
+ s421 = kmemdup(&su3000_properties,
+ sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
+ if (!s421) {
+ kfree(p1100);
+ kfree(s660);
+ kfree(p7500);
+ return -ENOMEM;
+ }
+ s421->num_device_descs = 2;
+ s421->devices[0] = d421;
+ s421->devices[1] = d632;
+ s421->adapter->fe[0].frontend_attach = m88rs2000_frontend_attach;
+
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &dw2104_properties,
@@ -1925,6 +2054,8 @@ static int dw2102_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, p7500,
THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, s421,
+ THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &su3000_properties,
THIS_MODULE, NULL, adapter_nr))
return 0;
@@ -1943,9 +2074,17 @@ module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
- " DVB-C 3101 USB2.0,"
- " TeVii S600, S630, S650, S660, S480,"
- " Prof 1100, 7500 USB2.0,"
- " Geniatech SU3000 devices");
+ " DVB-C 3101 USB2.0,"
+ " TeVii S600, S630, S650, S660, S480, S421, S632"
+ " Prof 1100, 7500 USB2.0,"
+ " Geniatech SU3000 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(DW2101_FIRMWARE);
+MODULE_FIRMWARE(DW2102_FIRMWARE);
+MODULE_FIRMWARE(DW2104_FIRMWARE);
+MODULE_FIRMWARE(DW3101_FIRMWARE);
+MODULE_FIRMWARE(S630_FIRMWARE);
+MODULE_FIRMWARE(S660_FIRMWARE);
+MODULE_FIRMWARE(P1100_FIRMWARE);
+MODULE_FIRMWARE(P7500_FIRMWARE);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 90a70c66a96e..d56f927fc31a 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -421,11 +421,10 @@ struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d)
/* setup the state */
state->i2c = &d->i2c_adap;
- memcpy(&state->config, &friio_fe_config, sizeof(friio_fe_config));
+ state->config = friio_fe_config;
/* create dvb_frontend */
- memcpy(&state->frontend.ops, &jdvbt90502_ops,
- sizeof(jdvbt90502_ops));
+ state->frontend.ops = jdvbt90502_ops;
state->frontend.demodulator_priv = state;
if (jdvbt90502_init(&state->frontend) < 0)
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index 661bb75be955..92afeb20650f 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -16,6 +16,7 @@
#include "qt1010.h"
#include "tda1004x.h"
#include "tda827x.h"
+#include "mt2060.h"
#include <media/tuner.h>
#include "tuner-simple.h"
@@ -63,23 +64,33 @@ static inline int m920x_write(struct usb_device *udev, u8 request,
return ret;
}
+static inline int m920x_write_seq(struct usb_device *udev, u8 request,
+ struct m920x_inits *seq)
+{
+ int ret;
+ while (seq->address) {
+ ret = m920x_write(udev, request, seq->data, seq->address);
+ if (ret != 0)
+ return ret;
+
+ seq++;
+ }
+
+ return ret;
+}
+
static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq)
{
int ret = 0, i, epi, flags = 0;
int adap_enabled[M9206_MAX_ADAPTERS] = { 0 };
/* Remote controller init. */
- if (d->props.rc.legacy.rc_query) {
+ if (d->props.rc.legacy.rc_query || d->props.rc.core.rc_query) {
deb("Initialising remote control\n");
- while (rc_seq->address) {
- if ((ret = m920x_write(d->udev, M9206_CORE,
- rc_seq->data,
- rc_seq->address)) != 0) {
- deb("Initialising remote control failed\n");
- return ret;
- }
-
- rc_seq++;
+ ret = m920x_write_seq(d->udev, M9206_CORE, rc_seq);
+ if (ret != 0) {
+ deb("Initialising remote control failed\n");
+ return ret;
}
deb("Initialising remote control success\n");
@@ -130,9 +141,50 @@ static int m920x_init_ep(struct usb_interface *intf)
alt->desc.bAlternateSetting);
}
-static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static inline void m920x_parse_rc_state(struct dvb_usb_device *d, u8 rc_state,
+ int *state)
{
struct m920x_state *m = d->priv;
+
+ switch (rc_state) {
+ case 0x80:
+ *state = REMOTE_NO_KEY_PRESSED;
+ break;
+
+ case 0x88: /* framing error or "invalid code" */
+ case 0x99:
+ case 0xc0:
+ case 0xd8:
+ *state = REMOTE_NO_KEY_PRESSED;
+ m->rep_count = 0;
+ break;
+
+ case 0x93:
+ case 0x92:
+ case 0x83: /* pinnacle PCTV310e */
+ case 0x82:
+ m->rep_count = 0;
+ *state = REMOTE_KEY_PRESSED;
+ break;
+
+ case 0x91:
+ case 0x81: /* pinnacle PCTV310e */
+ /* prevent immediate auto-repeat */
+ if (++m->rep_count > 2)
+ *state = REMOTE_KEY_REPEAT;
+ else
+ *state = REMOTE_NO_KEY_PRESSED;
+ break;
+
+ default:
+ deb("Unexpected rc state %02x\n", rc_state);
+ *state = REMOTE_NO_KEY_PRESSED;
+ break;
+ }
+}
+
+static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+{
int i, ret = 0;
u8 *rc_state;
@@ -140,51 +192,22 @@ static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
if (!rc_state)
return -ENOMEM;
- if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, rc_state, 1)) != 0)
+ ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE,
+ rc_state, 1);
+ if (ret != 0)
goto out;
- if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, rc_state + 1, 1)) != 0)
+ ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY,
+ rc_state + 1, 1);
+ if (ret != 0)
goto out;
+ m920x_parse_rc_state(d, rc_state[0], state);
+
for (i = 0; i < d->props.rc.legacy.rc_map_size; i++)
if (rc5_data(&d->props.rc.legacy.rc_map_table[i]) == rc_state[1]) {
*event = d->props.rc.legacy.rc_map_table[i].keycode;
-
- switch(rc_state[0]) {
- case 0x80:
- *state = REMOTE_NO_KEY_PRESSED;
- goto out;
-
- case 0x88: /* framing error or "invalid code" */
- case 0x99:
- case 0xc0:
- case 0xd8:
- *state = REMOTE_NO_KEY_PRESSED;
- m->rep_count = 0;
- goto out;
-
- case 0x93:
- case 0x92:
- case 0x83: /* pinnacle PCTV310e */
- case 0x82:
- m->rep_count = 0;
- *state = REMOTE_KEY_PRESSED;
- goto out;
-
- case 0x91:
- case 0x81: /* pinnacle PCTV310e */
- /* prevent immediate auto-repeat */
- if (++m->rep_count > 2)
- *state = REMOTE_KEY_REPEAT;
- else
- *state = REMOTE_NO_KEY_PRESSED;
- goto out;
-
- default:
- deb("Unexpected rc state %02x\n", rc_state[0]);
- *state = REMOTE_NO_KEY_PRESSED;
- goto out;
- }
+ goto out;
}
if (rc_state[1] != 0)
@@ -197,6 +220,38 @@ static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return ret;
}
+static int m920x_rc_core_query(struct dvb_usb_device *d)
+{
+ int ret = 0;
+ u8 *rc_state;
+ int state;
+
+ rc_state = kmalloc(2, GFP_KERNEL);
+ if (!rc_state)
+ return -ENOMEM;
+
+ if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, &rc_state[0], 1)) != 0)
+ goto out;
+
+ if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, &rc_state[1], 1)) != 0)
+ goto out;
+
+ deb("state=0x%02x keycode=0x%02x\n", rc_state[0], rc_state[1]);
+
+ m920x_parse_rc_state(d, rc_state[0], &state);
+
+ if (state == REMOTE_NO_KEY_PRESSED)
+ rc_keyup(d->rc_dev);
+ else if (state == REMOTE_KEY_REPEAT)
+ rc_repeat(d->rc_dev);
+ else
+ rc_keydown(d->rc_dev, rc_state[1], 0);
+
+out:
+ kfree(rc_state);
+ return ret;
+}
+
/* I2C */
static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
@@ -496,6 +551,12 @@ static struct qt1010_config m920x_qt1010_config = {
.i2c_address = 0x62
};
+static struct mt2060_config m920x_mt2060_config = {
+ .i2c_address = 0x60, /* 0xc0 */
+ .clock_out = 0,
+};
+
+
/* Callbacks for DVB USB */
static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap)
{
@@ -510,6 +571,37 @@ static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int m920x_mt352_frontend_attach_vp7049(struct dvb_usb_adapter *adap)
+{
+ struct m920x_inits vp7049_fe_init_seq[] = {
+ /* XXX without these commands the frontend cannot be detected,
+ * they must be sent BEFORE the frontend is attached */
+ { 0xff28, 0x00 },
+ { 0xff23, 0x00 },
+ { 0xff28, 0x00 },
+ { 0xff23, 0x00 },
+ { 0xff21, 0x20 },
+ { 0xff21, 0x60 },
+ { 0xff28, 0x00 },
+ { 0xff22, 0x00 },
+ { 0xff20, 0x30 },
+ { 0xff20, 0x20 },
+ { 0xff20, 0x30 },
+ { } /* terminating entry */
+ };
+ int ret;
+
+ deb("%s\n", __func__);
+
+ ret = m920x_write_seq(adap->dev->udev, M9206_CORE, vp7049_fe_init_seq);
+ if (ret != 0) {
+ deb("Initialization of vp7049 frontend failed.");
+ return ret;
+ }
+
+ return m920x_mt352_frontend_attach(adap);
+}
+
static int m920x_tda10046_08_frontend_attach(struct dvb_usb_adapter *adap)
{
deb("%s\n",__func__);
@@ -574,6 +666,18 @@ static int m920x_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int m920x_mt2060_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ deb("%s\n", __func__);
+
+ if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
+ &m920x_mt2060_config, 1220) == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+
/* device-specific initialization */
static struct m920x_inits megasky_rc_init [] = {
{ M9206_RC_INIT2, 0xa8 },
@@ -591,7 +695,7 @@ static struct m920x_inits tvwalkertwin_rc_init [] = {
};
static struct m920x_inits pinnacle310e_init[] = {
- /* without these the tuner don't work */
+ /* without these the tuner doesn't work */
{ 0xff20, 0x9b },
{ 0xff22, 0x70 },
@@ -602,6 +706,15 @@ static struct m920x_inits pinnacle310e_init[] = {
{ } /* terminating entry */
};
+static struct m920x_inits vp7049_rc_init[] = {
+ { 0xff28, 0x00 },
+ { 0xff23, 0x00 },
+ { 0xff21, 0x70 },
+ { M9206_RC_INIT2, 0x00 },
+ { M9206_RC_INIT1, 0xff },
+ { } /* terminating entry */
+};
+
/* ir keymaps */
static struct rc_map_table rc_map_megasky_table[] = {
{ 0x0012, KEY_POWER },
@@ -704,6 +817,7 @@ static struct dvb_usb_device_properties digivox_mini_ii_properties;
static struct dvb_usb_device_properties tvwalkertwin_properties;
static struct dvb_usb_device_properties dposh_properties;
static struct dvb_usb_device_properties pinnacle_pctv310e_properties;
+static struct dvb_usb_device_properties vp7049_properties;
static int m920x_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -756,6 +870,13 @@ static int m920x_probe(struct usb_interface *intf,
goto found;
}
+ ret = dvb_usb_device_init(intf, &vp7049_properties,
+ THIS_MODULE, &d, adapter_nr);
+ if (ret == 0) {
+ rc_init_seq = vp7049_rc_init;
+ goto found;
+ }
+
return ret;
} else {
/* Another interface on a multi-tuner device */
@@ -787,6 +908,7 @@ static struct usb_device_id m920x_table [] = {
{ USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_COLD) },
{ USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_WARM) },
{ USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_PINNACLE_PCTV310E) },
+ { USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_TWINHAN_VP7049) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, m920x_table);
@@ -1079,6 +1201,61 @@ static struct dvb_usb_device_properties pinnacle_pctv310e_properties = {
}
};
+static struct dvb_usb_device_properties vp7049_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "dvb-usb-vp7049-0.95.fw",
+ .download_firmware = m920x_firmware_download,
+
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS,
+ .rc_query = m920x_rc_core_query,
+ .allowed_protos = RC_TYPE_UNKNOWN,
+ },
+
+ .size_of_priv = sizeof(struct m920x_state),
+
+ .identify_state = m920x_identify_state,
+ .num_adapters = 1,
+ .adapter = {{
+ .num_frontends = 1,
+ .fe = {{
+
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 8,
+ .pid_filter = m920x_pid_filter,
+ .pid_filter_ctrl = m920x_pid_filter_ctrl,
+
+ .frontend_attach = m920x_mt352_frontend_attach_vp7049,
+ .tuner_attach = m920x_mt2060_tuner_attach,
+
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x81,
+ .u = {
+ .bulk = {
+ .buffersize = 512,
+ }
+ }
+ },
+ } },
+ } },
+ .i2c_algo = &m920x_i2c_algo,
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DTV-DVB UDTT7049",
+ { &m920x_table[7], NULL },
+ { NULL },
+ }
+ }
+};
+
static struct usb_driver m920x_driver = {
.name = "dvb_usb_m920x",
.probe = m920x_probe,
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index bcdac225ebe1..2ce3d19c58ef 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -620,6 +620,8 @@ static struct usb_device_id ttusb2_table [] = {
USB_PID_TECHNOTREND_CONNECT_S2400) },
{ USB_DEVICE(USB_VID_TECHNOTREND,
USB_PID_TECHNOTREND_CONNECT_CT3650) },
+ { USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, ttusb2_table);
@@ -721,12 +723,16 @@ static struct dvb_usb_device_properties ttusb2_properties_s2400 = {
.generic_bulk_ctrl_endpoint = 0x01,
- .num_device_descs = 1,
+ .num_device_descs = 2,
.devices = {
{ "Technotrend TT-connect S-2400",
{ &ttusb2_table[2], NULL },
{ NULL },
},
+ { "Technotrend TT-connect S-2400 (8kB EEPROM)",
+ { &ttusb2_table[4], NULL },
+ { NULL },
+ },
}
};
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index 7a5bd61bd3bb..c754a80a8d8b 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_EM28XX
depends on VIDEO_DEV && I2C
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
@@ -34,6 +34,7 @@ config VIDEO_EM28XX_DVB
tristate "DVB/ATSC Support for em28xx based TV cards"
depends on VIDEO_EM28XX && DVB_CORE
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA10023 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S921 if MEDIA_SUBDRV_AUTOSELECT
@@ -43,7 +44,10 @@ config VIDEO_EM28XX_DVB
select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA10071 if MEDIA_SUBDRV_AUTOSELECT
select DVB_A8293 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEOBUF_DVB
+ select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 619bffbab3bc..54a03b20de6e 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -6,6 +6,7 @@
Markus Rechberger <mrechberger@gmail.com>
Mauro Carvalho Chehab <mchehab@infradead.org>
Sascha Sommer <saschasommer@freenet.de>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -56,10 +57,16 @@ module_param(disable_usb_speed_check, int, 0444);
MODULE_PARM_DESC(disable_usb_speed_check,
"override min bandwidth requirement of 480M bps");
-static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
+static int usb_xfer_mode = -1;
+module_param(usb_xfer_mode, int, 0444);
+MODULE_PARM_DESC(usb_xfer_mode,
+ "USB transfer mode for frame data (-1 = auto, 0 = prefer isoc, 1 = prefer bulk)");
+
+
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
static unsigned long em28xx_devused;
@@ -486,7 +493,7 @@ struct em28xx_board em28xx_boards[] = {
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = SAA7115_COMPOSITE2,
- .amux = EM28XX_AMUX_LINE_IN,
+ .amux = EM28XX_AMUX_VIDEO,
}, {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = SAA7115_COMPOSITE0,
@@ -2073,6 +2080,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x0084),
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
{ USB_DEVICE(0x0ccd, 0x0096),
@@ -2905,7 +2914,7 @@ static void request_module_async(struct work_struct *work)
if (dev->board.has_dvb)
request_module("em28xx-dvb");
- if (dev->board.ir_codes && !disable_ir)
+ if ((dev->board.ir_codes || dev->board.has_ir_i2c) && !disable_ir)
request_module("em28xx-rc");
#endif /* CONFIG_MODULES */
}
@@ -2934,6 +2943,8 @@ void em28xx_release_resources(struct em28xx *dev)
em28xx_i2c_unregister(dev);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+
v4l2_device_unregister(&dev->v4l2_dev);
usb_put_dev(dev->udev);
@@ -2950,9 +2961,14 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
struct usb_interface *interface,
int minor)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
int retval;
+ static const char *default_chip_name = "em28xx";
+ const char *chip_name = default_chip_name;
dev->udev = udev;
+ mutex_init(&dev->vb_queue_lock);
+ mutex_init(&dev->vb_vbi_queue_lock);
mutex_init(&dev->ctrl_urb_lock);
spin_lock_init(&dev->slock);
@@ -2978,51 +2994,62 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
switch (dev->chip_id) {
case CHIP_ID_EM2800:
- em28xx_info("chip ID is em2800\n");
+ chip_name = "em2800";
break;
case CHIP_ID_EM2710:
- em28xx_info("chip ID is em2710\n");
+ chip_name = "em2710";
break;
case CHIP_ID_EM2750:
- em28xx_info("chip ID is em2750\n");
+ chip_name = "em2750";
break;
case CHIP_ID_EM2820:
- em28xx_info("chip ID is em2820 (or em2710)\n");
+ chip_name = "em2710/2820";
break;
case CHIP_ID_EM2840:
- em28xx_info("chip ID is em2840\n");
+ chip_name = "em2840";
break;
case CHIP_ID_EM2860:
- em28xx_info("chip ID is em2860\n");
+ chip_name = "em2860";
break;
case CHIP_ID_EM2870:
- em28xx_info("chip ID is em2870\n");
+ chip_name = "em2870";
dev->wait_after_write = 0;
break;
case CHIP_ID_EM2874:
- em28xx_info("chip ID is em2874\n");
+ chip_name = "em2874";
dev->reg_gpio_num = EM2874_R80_GPIO;
dev->wait_after_write = 0;
break;
case CHIP_ID_EM28174:
- em28xx_info("chip ID is em28174\n");
+ chip_name = "em28174";
dev->reg_gpio_num = EM2874_R80_GPIO;
dev->wait_after_write = 0;
break;
case CHIP_ID_EM2883:
- em28xx_info("chip ID is em2882/em2883\n");
+ chip_name = "em2882/3";
dev->wait_after_write = 0;
break;
case CHIP_ID_EM2884:
- em28xx_info("chip ID is em2884\n");
+ chip_name = "em2884";
dev->reg_gpio_num = EM2874_R80_GPIO;
dev->wait_after_write = 0;
break;
default:
- em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
+ printk(KERN_INFO DRIVER_NAME
+ ": unknown em28xx chip ID (%d)\n", dev->chip_id);
}
}
+ if (chip_name != default_chip_name)
+ printk(KERN_INFO DRIVER_NAME
+ ": chip ID is %s\n", chip_name);
+
+ /*
+ * For em2820/em2710, the name may change latter, after checking
+ * if the device has a sensor (so, it is em2710) or not.
+ */
+ snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+
if (dev->is_audio_only) {
retval = em28xx_audio_setup(dev);
if (retval)
@@ -3039,6 +3066,14 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
em28xx_pre_card_setup(dev);
+ if (dev->chip_id == CHIP_ID_EM2820) {
+ if (dev->board.is_webcam)
+ chip_name = "em2710";
+ else
+ chip_name = "em2820";
+ snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+ }
+
if (!dev->board.is_em2800) {
/* Resets I2C speed */
retval = em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
@@ -3056,6 +3091,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
return retval;
}
+ v4l2_ctrl_handler_init(hdl, 4);
+ dev->v4l2_dev.ctrl_handler = hdl;
+
/* register i2c bus */
retval = em28xx_i2c_register(dev);
if (retval < 0) {
@@ -3081,6 +3119,18 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
__func__, retval);
goto fail;
}
+ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
+ } else {
+ /* install the em28xx notify callback */
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
+ em28xx_ctrl_notify, dev);
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
+ em28xx_ctrl_notify, dev);
+ }
/* wake i2c devices */
em28xx_wake_i2c(dev);
@@ -3110,6 +3160,11 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
msleep(3);
}
+ v4l2_ctrl_handler_setup(&dev->ctrl_handler);
+ retval = dev->ctrl_handler.error;
+ if (retval)
+ goto fail;
+
retval = em28xx_register_analog_devices(dev);
if (retval < 0) {
goto fail;
@@ -3122,6 +3177,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
fail:
em28xx_i2c_unregister(dev);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
unregister_dev:
v4l2_device_unregister(&dev->v4l2_dev);
@@ -3143,7 +3199,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
struct em28xx *dev = NULL;
int retval;
bool has_audio = false, has_video = false, has_dvb = false;
- int i, nr;
+ int i, nr, try_bulk;
const int ifnum = interface->altsetting[0].desc.bInterfaceNumber;
char *speed;
@@ -3183,9 +3239,10 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
/* compute alternate max packet sizes */
- dev->alt_max_pkt_size = kmalloc(sizeof(dev->alt_max_pkt_size[0]) *
+ dev->alt_max_pkt_size_isoc =
+ kmalloc(sizeof(dev->alt_max_pkt_size_isoc[0]) *
interface->num_altsetting, GFP_KERNEL);
- if (dev->alt_max_pkt_size == NULL) {
+ if (dev->alt_max_pkt_size_isoc == NULL) {
em28xx_errdev("out of memory!\n");
kfree(dev);
retval = -ENOMEM;
@@ -3208,25 +3265,67 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (udev->speed == USB_SPEED_HIGH)
size = size * hb_mult(sizedescr);
- if (usb_endpoint_xfer_isoc(e) &&
- usb_endpoint_dir_in(e)) {
+ if (usb_endpoint_dir_in(e)) {
switch (e->bEndpointAddress) {
- case EM28XX_EP_AUDIO:
- has_audio = true;
- break;
- case EM28XX_EP_ANALOG:
+ case 0x82:
has_video = true;
- dev->alt_max_pkt_size[i] = size;
+ if (usb_endpoint_xfer_isoc(e)) {
+ dev->analog_ep_isoc =
+ e->bEndpointAddress;
+ dev->alt_max_pkt_size_isoc[i] = size;
+ } else if (usb_endpoint_xfer_bulk(e)) {
+ dev->analog_ep_bulk =
+ e->bEndpointAddress;
+ }
break;
- case EM28XX_EP_DIGITAL:
- has_dvb = true;
- if (size > dev->dvb_max_pkt_size) {
- dev->dvb_max_pkt_size = size;
- dev->dvb_alt = i;
+ case 0x83:
+ if (usb_endpoint_xfer_isoc(e)) {
+ has_audio = true;
+ } else {
+ printk(KERN_INFO DRIVER_NAME
+ ": error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
+ }
+ break;
+ case 0x84:
+ if (has_video &&
+ (usb_endpoint_xfer_bulk(e))) {
+ dev->analog_ep_bulk =
+ e->bEndpointAddress;
+ } else {
+ has_dvb = true;
+ if (usb_endpoint_xfer_isoc(e)) {
+ dev->dvb_ep_isoc = e->bEndpointAddress;
+ if (size > dev->dvb_max_pkt_size_isoc) {
+ dev->dvb_max_pkt_size_isoc = size;
+ dev->dvb_alt_isoc = i;
+ }
+ } else {
+ dev->dvb_ep_bulk = e->bEndpointAddress;
+ }
}
break;
}
}
+ /* NOTE:
+ * Old logic with support for isoc transfers only was:
+ * 0x82 isoc => analog
+ * 0x83 isoc => audio
+ * 0x84 isoc => digital
+ *
+ * New logic with support for bulk transfers
+ * 0x82 isoc => analog
+ * 0x82 bulk => analog
+ * 0x83 isoc* => audio
+ * 0x84 isoc => digital
+ * 0x84 bulk => analog or digital**
+ * (*: audio should always be isoc)
+ * (**: analog, if ep 0x82 is isoc, otherwise digital)
+ *
+ * The new logic preserves backwards compatibility and
+ * reflects the endpoint configurations we have seen
+ * so far. But there might be devices for which this
+ * logic is not sufficient...
+ */
}
}
@@ -3261,19 +3360,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
ifnum,
interface->altsetting->desc.bInterfaceNumber);
- if (has_audio)
- printk(KERN_INFO DRIVER_NAME
- ": Audio Vendor Class interface %i found\n",
- ifnum);
- if (has_video)
- printk(KERN_INFO DRIVER_NAME
- ": Video interface %i found\n",
- ifnum);
- if (has_dvb)
- printk(KERN_INFO DRIVER_NAME
- ": DVB interface %i found\n",
- ifnum);
-
/*
* Make sure we have 480 Mbps of bandwidth, otherwise things like
* video stream wouldn't likely work, since 12 Mbps is generally
@@ -3287,7 +3373,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto err_free;
}
- snprintf(dev->name, sizeof(dev->name), "em28xx #%d", nr);
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
@@ -3304,6 +3389,24 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
}
+ if (has_audio)
+ printk(KERN_INFO DRIVER_NAME
+ ": Audio interface %i found %s\n",
+ ifnum,
+ dev->has_audio_class ? "(USB Audio Class)" : "(Vendor Class)");
+ if (has_video)
+ printk(KERN_INFO DRIVER_NAME
+ ": Video interface %i found:%s%s\n",
+ ifnum,
+ dev->analog_ep_bulk ? " bulk" : "",
+ dev->analog_ep_isoc ? " isoc" : "");
+ if (has_dvb)
+ printk(KERN_INFO DRIVER_NAME
+ ": DVB interface %i found:%s%s\n",
+ ifnum,
+ dev->dvb_ep_bulk ? " bulk" : "",
+ dev->dvb_ep_isoc ? " isoc" : "");
+
dev->num_alt = interface->num_altsetting;
if ((unsigned)card[nr] < em28xx_bcount)
@@ -3312,6 +3415,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
+ /* initialize videobuf2 stuff */
+ em28xx_vb2_setup(dev);
+
/* allocate device struct */
mutex_init(&dev->lock);
mutex_lock(&dev->lock);
@@ -3320,13 +3426,46 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto unlock_and_free;
}
+ if (usb_xfer_mode < 0) {
+ if (dev->board.is_webcam)
+ try_bulk = 1;
+ else
+ try_bulk = 0;
+ } else {
+ try_bulk = usb_xfer_mode > 0;
+ }
+
+ /* Select USB transfer types to use */
+ if (has_video) {
+ if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
+ dev->analog_xfer_bulk = 1;
+ em28xx_info("analog set to %s mode.\n",
+ dev->analog_xfer_bulk ? "bulk" : "isoc");
+ }
if (has_dvb) {
- /* pre-allocate DVB isoc transfer buffers */
- retval = em28xx_alloc_isoc(dev, EM28XX_DIGITAL_MODE,
- EM28XX_DVB_MAX_PACKETS,
- EM28XX_DVB_NUM_BUFS,
- dev->dvb_max_pkt_size);
+ if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk))
+ dev->dvb_xfer_bulk = 1;
+
+ em28xx_info("dvb set to %s mode.\n",
+ dev->dvb_xfer_bulk ? "bulk" : "isoc");
+
+ /* pre-allocate DVB usb transfer buffers */
+ if (dev->dvb_xfer_bulk) {
+ retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ 512,
+ EM28XX_DVB_BULK_PACKET_MULTIPLIER);
+ } else {
+ retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ dev->dvb_max_pkt_size_isoc,
+ EM28XX_DVB_NUM_ISOC_PACKETS);
+ }
if (retval) {
+ printk(DRIVER_NAME
+ ": Failed to pre-allocate USB transfer buffers for DVB.\n");
goto unlock_and_free;
}
}
@@ -3344,7 +3483,7 @@ unlock_and_free:
mutex_unlock(&dev->lock);
err_free:
- kfree(dev->alt_max_pkt_size);
+ kfree(dev->alt_max_pkt_size_isoc);
kfree(dev);
err:
@@ -3370,6 +3509,8 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
if (!dev)
return;
+ dev->disconnected = 1;
+
if (dev->is_audio_only) {
mutex_lock(&dev->lock);
em28xx_close_extension(dev);
@@ -3381,35 +3522,28 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
flush_request_modules(dev);
- /* wait until all current v4l2 io is finished then deallocate
- resources */
mutex_lock(&dev->lock);
v4l2_device_disconnect(&dev->v4l2_dev);
if (dev->users) {
- em28xx_warn
- ("device %s is open! Deregistration and memory "
- "deallocation are deferred on close.\n",
- video_device_node_name(dev->vdev));
-
- dev->state |= DEV_MISCONFIGURED;
- em28xx_uninit_isoc(dev, dev->mode);
- dev->state |= DEV_DISCONNECTED;
- } else {
- dev->state |= DEV_DISCONNECTED;
- em28xx_release_resources(dev);
+ em28xx_warn("device %s is open! Deregistration and memory deallocation are deferred on close.\n",
+ video_device_node_name(dev->vdev));
+
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
+ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
}
- /* free DVB isoc buffers */
- em28xx_uninit_isoc(dev, EM28XX_DIGITAL_MODE);
+ em28xx_close_extension(dev);
+ /* NOTE: must be called BEFORE the resources are released */
- mutex_unlock(&dev->lock);
+ if (!dev->users)
+ em28xx_release_resources(dev);
- em28xx_close_extension(dev);
+ mutex_unlock(&dev->lock);
if (!dev->users) {
- kfree(dev->alt_max_pkt_size);
+ kfree(dev->alt_max_pkt_size_isoc);
kfree(dev);
}
}
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index bed07a6c33f8..aaedd11791f2 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -5,6 +5,7 @@
Markus Rechberger <mrechberger@gmail.com>
Mauro Carvalho Chehab <mchehab@infradead.org>
Sascha Sommer <saschasommer@freenet.de>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -76,7 +77,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
int ret;
int pipe = usb_rcvctrlpipe(dev->udev, 0);
- if (dev->state & DEV_DISCONNECTED)
+ if (dev->disconnected)
return -ENODEV;
if (len > URB_MAX_CTRL_SIZE)
@@ -100,7 +101,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
if (reg_debug)
printk(" failed!\n");
mutex_unlock(&dev->ctrl_urb_lock);
- return ret;
+ return usb_translate_errors(ret);
}
if (len)
@@ -152,7 +153,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
int ret;
int pipe = usb_sndctrlpipe(dev->udev, 0);
- if (dev->state & DEV_DISCONNECTED)
+ if (dev->disconnected)
return -ENODEV;
if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
@@ -181,6 +182,9 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
0x0000, reg, dev->urb_buf, len, HZ);
mutex_unlock(&dev->ctrl_urb_lock);
+ if (ret < 0)
+ return usb_translate_errors(ret);
+
if (dev->wait_after_write)
msleep(dev->wait_after_write);
@@ -729,22 +733,24 @@ static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
}
-static int em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
+static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
u16 width, u16 height)
{
- u8 cwidth = width;
- u8 cheight = height;
- u8 overflow = (height >> 7 & 0x02) | (width >> 8 & 0x01);
+ u8 cwidth = width >> 2;
+ u8 cheight = height >> 2;
+ u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
+ /* NOTE: size limit: 2047x1023 = 2MPix */
- em28xx_coredbg("em28xx Area Set: (%d,%d)\n",
- (width | (overflow & 2) << 7),
- (height | (overflow & 1) << 8));
+ em28xx_coredbg("capture area set to (%d,%d): %dx%d\n",
+ hstart, vstart,
+ ((overflow & 2) << 9 | cwidth << 2),
+ ((overflow & 1) << 10 | cheight << 2));
em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
- return em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
+ em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
}
static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
@@ -797,28 +803,30 @@ int em28xx_resolution_set(struct em28xx *dev)
it out, we end up with the same format as the rest of the VBI
region */
if (em28xx_vbi_supported(dev) == 1)
- em28xx_capture_area_set(dev, 0, 2, width >> 2, height >> 2);
+ em28xx_capture_area_set(dev, 0, 2, width, height);
else
- em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
+ em28xx_capture_area_set(dev, 0, 0, width, height);
return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
}
+/* Set USB alternate setting for analog video */
int em28xx_set_alternate(struct em28xx *dev)
{
- int errCode, prev_alt = dev->alt;
+ int errCode;
int i;
unsigned int min_pkt_size = dev->width * 2 + 4;
- /*
- * alt = 0 is used only for control messages, so, only values
- * greater than 0 can be used for streaming.
- */
- if (alt && alt < dev->num_alt) {
+ /* NOTE: for isoc transfers, only alt settings > 0 are allowed
+ bulk transfers seem to work only with alt=0 ! */
+ dev->alt = 0;
+ if ((alt > 0) && (alt < dev->num_alt)) {
em28xx_coredbg("alternate forced to %d\n", dev->alt);
dev->alt = alt;
goto set_alt;
}
+ if (dev->analog_xfer_bulk)
+ goto set_alt;
/* When image size is bigger than a certain value,
the frame size should be increased, otherwise, only
@@ -829,30 +837,38 @@ int em28xx_set_alternate(struct em28xx *dev)
for (i = 0; i < dev->num_alt; i++) {
/* stop when the selected alt setting offers enough bandwidth */
- if (dev->alt_max_pkt_size[i] >= min_pkt_size) {
+ if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
dev->alt = i;
break;
/* otherwise make sure that we end up with the maximum bandwidth
because the min_pkt_size equation might be wrong...
*/
- } else if (dev->alt_max_pkt_size[i] >
- dev->alt_max_pkt_size[dev->alt])
+ } else if (dev->alt_max_pkt_size_isoc[i] >
+ dev->alt_max_pkt_size_isoc[dev->alt])
dev->alt = i;
}
set_alt:
- if (dev->alt != prev_alt) {
+ /* NOTE: for bulk transfers, we need to call usb_set_interface()
+ * even if the previous settings were the same. Otherwise streaming
+ * fails with all urbs having status = -EOVERFLOW ! */
+ if (dev->analog_xfer_bulk) {
+ dev->max_pkt_size = 512; /* USB 2.0 spec */
+ dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
+ } else { /* isoc */
em28xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
- min_pkt_size, dev->alt);
- dev->max_pkt_size = dev->alt_max_pkt_size[dev->alt];
- em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
- dev->alt, dev->max_pkt_size);
- errCode = usb_set_interface(dev->udev, 0, dev->alt);
- if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
- dev->alt, errCode);
- return errCode;
- }
+ min_pkt_size, dev->alt);
+ dev->max_pkt_size =
+ dev->alt_max_pkt_size_isoc[dev->alt];
+ dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
+ }
+ em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
+ dev->alt, dev->max_pkt_size);
+ errCode = usb_set_interface(dev->udev, 0, dev->alt);
+ if (errCode < 0) {
+ em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
+ dev->alt, errCode);
+ return errCode;
}
return 0;
}
@@ -919,7 +935,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
------------------------------------------------------------------*/
/*
- * IRQ callback, called by URB callback
+ * URB completion handler for isoc/bulk transfers
*/
static void em28xx_irq_callback(struct urb *urb)
{
@@ -941,11 +957,12 @@ static void em28xx_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->slock);
- dev->isoc_ctl.isoc_copy(dev, urb);
+ dev->usb_ctl.urb_data_copy(dev, urb);
spin_unlock(&dev->slock);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
+ /* isoc only (bulk: number_of_packets = 0) */
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
@@ -961,49 +978,50 @@ static void em28xx_irq_callback(struct urb *urb)
/*
* Stop and Deallocate URBs
*/
-void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode)
+void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
{
struct urb *urb;
- struct em28xx_usb_isoc_bufs *isoc_bufs;
+ struct em28xx_usb_bufs *usb_bufs;
int i;
- em28xx_isocdbg("em28xx: called em28xx_uninit_isoc in mode %d\n", mode);
+ em28xx_isocdbg("em28xx: called em28xx_uninit_usb_xfer in mode %d\n",
+ mode);
if (mode == EM28XX_DIGITAL_MODE)
- isoc_bufs = &dev->isoc_ctl.digital_bufs;
+ usb_bufs = &dev->usb_ctl.digital_bufs;
else
- isoc_bufs = &dev->isoc_ctl.analog_bufs;
+ usb_bufs = &dev->usb_ctl.analog_bufs;
- for (i = 0; i < isoc_bufs->num_bufs; i++) {
- urb = isoc_bufs->urb[i];
+ for (i = 0; i < usb_bufs->num_bufs; i++) {
+ urb = usb_bufs->urb[i];
if (urb) {
if (!irqs_disabled())
usb_kill_urb(urb);
else
usb_unlink_urb(urb);
- if (isoc_bufs->transfer_buffer[i]) {
+ if (usb_bufs->transfer_buffer[i]) {
usb_free_coherent(dev->udev,
urb->transfer_buffer_length,
- isoc_bufs->transfer_buffer[i],
+ usb_bufs->transfer_buffer[i],
urb->transfer_dma);
}
usb_free_urb(urb);
- isoc_bufs->urb[i] = NULL;
+ usb_bufs->urb[i] = NULL;
}
- isoc_bufs->transfer_buffer[i] = NULL;
+ usb_bufs->transfer_buffer[i] = NULL;
}
- kfree(isoc_bufs->urb);
- kfree(isoc_bufs->transfer_buffer);
+ kfree(usb_bufs->urb);
+ kfree(usb_bufs->transfer_buffer);
- isoc_bufs->urb = NULL;
- isoc_bufs->transfer_buffer = NULL;
- isoc_bufs->num_bufs = 0;
+ usb_bufs->urb = NULL;
+ usb_bufs->transfer_buffer = NULL;
+ usb_bufs->num_bufs = 0;
em28xx_capture_start(dev, 0);
}
-EXPORT_SYMBOL_GPL(em28xx_uninit_isoc);
+EXPORT_SYMBOL_GPL(em28xx_uninit_usb_xfer);
/*
* Stop URBs
@@ -1012,7 +1030,7 @@ void em28xx_stop_urbs(struct em28xx *dev)
{
int i;
struct urb *urb;
- struct em28xx_usb_isoc_bufs *isoc_bufs = &dev->isoc_ctl.digital_bufs;
+ struct em28xx_usb_bufs *isoc_bufs = &dev->usb_ctl.digital_bufs;
em28xx_isocdbg("em28xx: called em28xx_stop_urbs\n");
@@ -1033,10 +1051,10 @@ EXPORT_SYMBOL_GPL(em28xx_stop_urbs);
/*
* Allocate URBs
*/
-int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size)
+int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+ int num_bufs, int max_pkt_size, int packet_multiplier)
{
- struct em28xx_usb_isoc_bufs *isoc_bufs;
+ struct em28xx_usb_bufs *usb_bufs;
int i;
int sb_size, pipe;
struct urb *urb;
@@ -1044,140 +1062,180 @@ int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
em28xx_isocdbg("em28xx: called em28xx_alloc_isoc in mode %d\n", mode);
- if (mode == EM28XX_DIGITAL_MODE)
- isoc_bufs = &dev->isoc_ctl.digital_bufs;
- else
- isoc_bufs = &dev->isoc_ctl.analog_bufs;
+ /* Check mode and if we have an endpoint for the selected
+ transfer type, select buffer */
+ if (mode == EM28XX_DIGITAL_MODE) {
+ if ((xfer_bulk && !dev->dvb_ep_bulk) ||
+ (!xfer_bulk && !dev->dvb_ep_isoc)) {
+ em28xx_errdev("no endpoint for DVB mode and transfer type %d\n",
+ xfer_bulk > 0);
+ return -EINVAL;
+ }
+ usb_bufs = &dev->usb_ctl.digital_bufs;
+ } else if (mode == EM28XX_ANALOG_MODE) {
+ if ((xfer_bulk && !dev->analog_ep_bulk) ||
+ (!xfer_bulk && !dev->analog_ep_isoc)) {
+ em28xx_errdev("no endpoint for analog mode and transfer type %d\n",
+ xfer_bulk > 0);
+ return -EINVAL;
+ }
+ usb_bufs = &dev->usb_ctl.analog_bufs;
+ } else {
+ em28xx_errdev("invalid mode selected\n");
+ return -EINVAL;
+ }
/* De-allocates all pending stuff */
- em28xx_uninit_isoc(dev, mode);
+ em28xx_uninit_usb_xfer(dev, mode);
- isoc_bufs->num_bufs = num_bufs;
+ usb_bufs->num_bufs = num_bufs;
- isoc_bufs->urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!isoc_bufs->urb) {
+ usb_bufs->urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ if (!usb_bufs->urb) {
em28xx_errdev("cannot alloc memory for usb buffers\n");
return -ENOMEM;
}
- isoc_bufs->transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
+ usb_bufs->transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
- if (!isoc_bufs->transfer_buffer) {
+ if (!usb_bufs->transfer_buffer) {
em28xx_errdev("cannot allocate memory for usb transfer\n");
- kfree(isoc_bufs->urb);
+ kfree(usb_bufs->urb);
return -ENOMEM;
}
- isoc_bufs->max_pkt_size = max_pkt_size;
- isoc_bufs->num_packets = max_packets;
- dev->isoc_ctl.vid_buf = NULL;
- dev->isoc_ctl.vbi_buf = NULL;
+ usb_bufs->max_pkt_size = max_pkt_size;
+ if (xfer_bulk)
+ usb_bufs->num_packets = 0;
+ else
+ usb_bufs->num_packets = packet_multiplier;
+ dev->usb_ctl.vid_buf = NULL;
+ dev->usb_ctl.vbi_buf = NULL;
- sb_size = isoc_bufs->num_packets * isoc_bufs->max_pkt_size;
+ sb_size = packet_multiplier * usb_bufs->max_pkt_size;
/* allocate urbs and transfer buffers */
- for (i = 0; i < isoc_bufs->num_bufs; i++) {
- urb = usb_alloc_urb(isoc_bufs->num_packets, GFP_KERNEL);
+ for (i = 0; i < usb_bufs->num_bufs; i++) {
+ urb = usb_alloc_urb(usb_bufs->num_packets, GFP_KERNEL);
if (!urb) {
- em28xx_err("cannot alloc isoc_ctl.urb %i\n", i);
- em28xx_uninit_isoc(dev, mode);
+ em28xx_err("cannot alloc usb_ctl.urb %i\n", i);
+ em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
- isoc_bufs->urb[i] = urb;
+ usb_bufs->urb[i] = urb;
- isoc_bufs->transfer_buffer[i] = usb_alloc_coherent(dev->udev,
+ usb_bufs->transfer_buffer[i] = usb_alloc_coherent(dev->udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
- if (!isoc_bufs->transfer_buffer[i]) {
+ if (!usb_bufs->transfer_buffer[i]) {
em28xx_err("unable to allocate %i bytes for transfer"
" buffer %i%s\n",
sb_size, i,
in_interrupt() ? " while in int" : "");
- em28xx_uninit_isoc(dev, mode);
+ em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
- memset(isoc_bufs->transfer_buffer[i], 0, sb_size);
-
- /* FIXME: this is a hack - should be
- 'desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK'
- should also be using 'desc.bInterval'
- */
- pipe = usb_rcvisocpipe(dev->udev,
- mode == EM28XX_ANALOG_MODE ?
- EM28XX_EP_ANALOG : EM28XX_EP_DIGITAL);
-
- usb_fill_int_urb(urb, dev->udev, pipe,
- isoc_bufs->transfer_buffer[i], sb_size,
- em28xx_irq_callback, dev, 1);
-
- urb->number_of_packets = isoc_bufs->num_packets;
- urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
-
- k = 0;
- for (j = 0; j < isoc_bufs->num_packets; j++) {
- urb->iso_frame_desc[j].offset = k;
- urb->iso_frame_desc[j].length =
- isoc_bufs->max_pkt_size;
- k += isoc_bufs->max_pkt_size;
+ memset(usb_bufs->transfer_buffer[i], 0, sb_size);
+
+ if (xfer_bulk) { /* bulk */
+ pipe = usb_rcvbulkpipe(dev->udev,
+ mode == EM28XX_ANALOG_MODE ?
+ dev->analog_ep_bulk :
+ dev->dvb_ep_bulk);
+ usb_fill_bulk_urb(urb, dev->udev, pipe,
+ usb_bufs->transfer_buffer[i], sb_size,
+ em28xx_irq_callback, dev);
+ urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ } else { /* isoc */
+ pipe = usb_rcvisocpipe(dev->udev,
+ mode == EM28XX_ANALOG_MODE ?
+ dev->analog_ep_isoc :
+ dev->dvb_ep_isoc);
+ usb_fill_int_urb(urb, dev->udev, pipe,
+ usb_bufs->transfer_buffer[i], sb_size,
+ em28xx_irq_callback, dev, 1);
+ urb->transfer_flags = URB_ISO_ASAP |
+ URB_NO_TRANSFER_DMA_MAP;
+ k = 0;
+ for (j = 0; j < usb_bufs->num_packets; j++) {
+ urb->iso_frame_desc[j].offset = k;
+ urb->iso_frame_desc[j].length =
+ usb_bufs->max_pkt_size;
+ k += usb_bufs->max_pkt_size;
+ }
}
+
+ urb->number_of_packets = usb_bufs->num_packets;
}
return 0;
}
-EXPORT_SYMBOL_GPL(em28xx_alloc_isoc);
+EXPORT_SYMBOL_GPL(em28xx_alloc_urbs);
/*
* Allocate URBs and start IRQ
*/
-int em28xx_init_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct em28xx *dev, struct urb *urb))
+int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
+ int xfer_bulk, int num_bufs, int max_pkt_size,
+ int packet_multiplier,
+ int (*urb_data_copy) (struct em28xx *dev, struct urb *urb))
{
struct em28xx_dmaqueue *dma_q = &dev->vidq;
struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
- struct em28xx_usb_isoc_bufs *isoc_bufs;
+ struct em28xx_usb_bufs *usb_bufs;
int i;
int rc;
int alloc;
- em28xx_isocdbg("em28xx: called em28xx_init_isoc in mode %d\n", mode);
+ em28xx_isocdbg("em28xx: called em28xx_init_usb_xfer in mode %d\n",
+ mode);
- dev->isoc_ctl.isoc_copy = isoc_copy;
+ dev->usb_ctl.urb_data_copy = urb_data_copy;
if (mode == EM28XX_DIGITAL_MODE) {
- isoc_bufs = &dev->isoc_ctl.digital_bufs;
- /* no need to free/alloc isoc buffers in digital mode */
+ usb_bufs = &dev->usb_ctl.digital_bufs;
+ /* no need to free/alloc usb buffers in digital mode */
alloc = 0;
} else {
- isoc_bufs = &dev->isoc_ctl.analog_bufs;
+ usb_bufs = &dev->usb_ctl.analog_bufs;
alloc = 1;
}
if (alloc) {
- rc = em28xx_alloc_isoc(dev, mode, max_packets,
- num_bufs, max_pkt_size);
+ rc = em28xx_alloc_urbs(dev, mode, xfer_bulk, num_bufs,
+ max_pkt_size, packet_multiplier);
if (rc)
return rc;
}
+ if (xfer_bulk) {
+ rc = usb_clear_halt(dev->udev, usb_bufs->urb[0]->pipe);
+ if (rc < 0) {
+ em28xx_err("failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
+ rc);
+ em28xx_uninit_usb_xfer(dev, mode);
+ return rc;
+ }
+ }
+
init_waitqueue_head(&dma_q->wq);
init_waitqueue_head(&vbi_dma_q->wq);
em28xx_capture_start(dev, 1);
/* submit urbs and enables IRQ */
- for (i = 0; i < isoc_bufs->num_bufs; i++) {
- rc = usb_submit_urb(isoc_bufs->urb[i], GFP_ATOMIC);
+ for (i = 0; i < usb_bufs->num_bufs; i++) {
+ rc = usb_submit_urb(usb_bufs->urb[i], GFP_ATOMIC);
if (rc) {
em28xx_err("submit of urb %i failed (error=%i)\n", i,
rc);
- em28xx_uninit_isoc(dev, mode);
+ em28xx_uninit_usb_xfer(dev, mode);
return rc;
}
}
return 0;
}
-EXPORT_SYMBOL_GPL(em28xx_init_isoc);
+EXPORT_SYMBOL_GPL(em28xx_init_usb_xfer);
/*
* em28xx_wake_i2c()
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 63f2e7070c00..a81ec2e8cc9b 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -10,6 +10,8 @@
(c) 2008 Aidan Thornton <makosoft@googlemail.com>
+ (c) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
+
Based on cx88-dvb, saa7134-dvb and videobuf-dvb originally written by:
(c) 2004, 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
@@ -25,7 +27,9 @@
#include "em28xx.h"
#include <media/v4l2-common.h>
-#include <media/videobuf-vmalloc.h>
+#include <dvb_demux.h>
+#include <dvb_net.h>
+#include <dmxdev.h>
#include <media/tuner.h>
#include "tuner-simple.h"
#include <linux/gpio.h>
@@ -124,34 +128,47 @@ static inline void print_err_status(struct em28xx *dev,
}
}
-static inline int em28xx_dvb_isoc_copy(struct em28xx *dev, struct urb *urb)
+static inline int em28xx_dvb_urb_data_copy(struct em28xx *dev, struct urb *urb)
{
- int i;
+ int xfer_bulk, num_packets, i;
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->disconnected)
return 0;
- if (urb->status < 0) {
+ if (urb->status < 0)
print_err_status(dev, -1, urb->status);
- if (urb->status == -ENOENT)
- return 0;
- }
- for (i = 0; i < urb->number_of_packets; i++) {
- int status = urb->iso_frame_desc[i].status;
+ xfer_bulk = usb_pipebulk(urb->pipe);
- if (status < 0) {
- print_err_status(dev, i, status);
- if (urb->iso_frame_desc[i].status != -EPROTO)
- continue;
- }
+ if (xfer_bulk) /* bulk */
+ num_packets = 1;
+ else /* isoc */
+ num_packets = urb->number_of_packets;
- dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer +
- urb->iso_frame_desc[i].offset,
- urb->iso_frame_desc[i].actual_length);
+ for (i = 0; i < num_packets; i++) {
+ if (xfer_bulk) {
+ if (urb->status < 0) {
+ print_err_status(dev, i, urb->status);
+ if (urb->status != -EPROTO)
+ continue;
+ }
+ dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
+ urb->actual_length);
+ } else {
+ if (urb->iso_frame_desc[i].status < 0) {
+ print_err_status(dev, i,
+ urb->iso_frame_desc[i].status);
+ if (urb->iso_frame_desc[i].status != -EPROTO)
+ continue;
+ }
+ dvb_dmx_swfilter(&dev->dvb->demux,
+ urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset,
+ urb->iso_frame_desc[i].actual_length);
+ }
}
return 0;
@@ -161,24 +178,40 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
{
int rc;
struct em28xx *dev = dvb->adapter.priv;
- int max_dvb_packet_size;
+ int dvb_max_packet_size, packet_multiplier, dvb_alt;
+
+ if (dev->dvb_xfer_bulk) {
+ if (!dev->dvb_ep_bulk)
+ return -ENODEV;
+ dvb_max_packet_size = 512; /* USB 2.0 spec */
+ packet_multiplier = EM28XX_DVB_BULK_PACKET_MULTIPLIER;
+ dvb_alt = 0;
+ } else { /* isoc */
+ if (!dev->dvb_ep_isoc)
+ return -ENODEV;
+ dvb_max_packet_size = dev->dvb_max_pkt_size_isoc;
+ if (dvb_max_packet_size < 0)
+ return dvb_max_packet_size;
+ packet_multiplier = EM28XX_DVB_NUM_ISOC_PACKETS;
+ dvb_alt = dev->dvb_alt_isoc;
+ }
- usb_set_interface(dev->udev, 0, dev->dvb_alt);
+ usb_set_interface(dev->udev, 0, dvb_alt);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
- max_dvb_packet_size = dev->dvb_max_pkt_size;
- if (max_dvb_packet_size < 0)
- return max_dvb_packet_size;
dprintk(1, "Using %d buffers each with %d x %d bytes\n",
EM28XX_DVB_NUM_BUFS,
- EM28XX_DVB_MAX_PACKETS,
- max_dvb_packet_size);
-
- return em28xx_init_isoc(dev, EM28XX_DIGITAL_MODE,
- EM28XX_DVB_MAX_PACKETS, EM28XX_DVB_NUM_BUFS,
- max_dvb_packet_size, em28xx_dvb_isoc_copy);
+ packet_multiplier,
+ dvb_max_packet_size);
+
+ return em28xx_init_usb_xfer(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ dvb_max_packet_size,
+ packet_multiplier,
+ em28xx_dvb_urb_data_copy);
}
static int em28xx_stop_streaming(struct em28xx_dvb *dvb)
@@ -714,7 +747,8 @@ static struct tda18271_config em28xx_cxd2820r_tda18271_config = {
};
static const struct tda10071_config em28xx_tda10071_config = {
- .i2c_address = 0x55, /* (0xaa >> 1) */
+ .demod_i2c_addr = 0x55, /* (0xaa >> 1) */
+ .tuner_i2c_addr = 0x14,
.i2c_wr_max = 64,
.ts_mode = TDA10071_TS_SERIAL,
.spec_inv = 0,
@@ -1288,7 +1322,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
- if (dev->state & DEV_DISCONNECTED) {
+ if (dev->disconnected) {
/* We cannot tell the device to sleep
* once it has been unplugged. */
if (dvb->fe[0])
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 1683bd9d51ee..8532c1d4fd46 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -50,15 +50,18 @@ do { \
} while (0)
/*
- * em2800_i2c_send_max4()
- * send up to 4 bytes to the i2c device
+ * em2800_i2c_send_bytes()
+ * send up to 4 bytes to the em2800 i2c device
*/
-static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
- char *buf, int len)
+static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
int ret;
int write_timeout;
- unsigned char b2[6];
+ u8 b2[6];
+
+ if (len < 1 || len > 4)
+ return -EOPNOTSUPP;
+
BUG_ON(len < 1 || len > 4);
b2[5] = 0x80 + len - 1;
b2[4] = addr;
@@ -70,165 +73,212 @@ static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
if (len > 3)
b2[0] = buf[3];
+ /* trigger write */
ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
if (ret != 2 + len) {
- em28xx_warn("writing to i2c device failed (error=%i)\n", ret);
- return -EIO;
+ em28xx_warn("failed to trigger write to i2c address 0x%x "
+ "(error=%i)\n", addr, ret);
+ return (ret < 0) ? ret : -EIO;
}
- for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
+ /* wait for completion */
+ for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
write_timeout -= 5) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0x80 + len - 1)
+ if (ret == 0x80 + len - 1) {
return len;
+ } else if (ret == 0x94 + len - 1) {
+ return -ENODEV;
+ } else if (ret < 0) {
+ em28xx_warn("failed to get i2c transfer status from "
+ "bridge register (error=%i)\n", ret);
+ return ret;
+ }
msleep(5);
}
- em28xx_warn("i2c write timed out\n");
+ em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
return -EIO;
}
/*
- * em2800_i2c_send_bytes()
- */
-static int em2800_i2c_send_bytes(void *data, unsigned char addr, char *buf,
- short len)
-{
- char *bufPtr = buf;
- int ret;
- int wrcount = 0;
- int count;
- int maxLen = 4;
- struct em28xx *dev = (struct em28xx *)data;
- while (len > 0) {
- count = (len > maxLen) ? maxLen : len;
- ret = em2800_i2c_send_max4(dev, addr, bufPtr, count);
- if (ret > 0) {
- len -= count;
- bufPtr += count;
- wrcount += count;
- } else
- return (ret < 0) ? ret : -EFAULT;
- }
- return wrcount;
-}
-
-/*
- * em2800_i2c_check_for_device()
- * check if there is a i2c_device at the supplied address
+ * em2800_i2c_recv_bytes()
+ * read up to 4 bytes from the em2800 i2c device
*/
-static int em2800_i2c_check_for_device(struct em28xx *dev, unsigned char addr)
+static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
- char msg;
+ u8 buf2[4];
int ret;
- int write_timeout;
- msg = addr;
- ret = dev->em28xx_write_regs(dev, 0x04, &msg, 1);
- if (ret < 0) {
- em28xx_warn("setting i2c device address failed (error=%i)\n",
- ret);
- return ret;
- }
- msg = 0x84;
- ret = dev->em28xx_write_regs(dev, 0x05, &msg, 1);
- if (ret < 0) {
- em28xx_warn("preparing i2c read failed (error=%i)\n", ret);
- return ret;
+ int read_timeout;
+ int i;
+
+ if (len < 1 || len > 4)
+ return -EOPNOTSUPP;
+
+ /* trigger read */
+ buf2[1] = 0x84 + len - 1;
+ buf2[0] = addr;
+ ret = dev->em28xx_write_regs(dev, 0x04, buf2, 2);
+ if (ret != 2) {
+ em28xx_warn("failed to trigger read from i2c address 0x%x "
+ "(error=%i)\n", addr, ret);
+ return (ret < 0) ? ret : -EIO;
}
- for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
- write_timeout -= 5) {
- unsigned reg = dev->em28xx_read_reg(dev, 0x5);
- if (reg == 0x94)
+ /* wait for completion */
+ for (read_timeout = EM2800_I2C_XFER_TIMEOUT; read_timeout > 0;
+ read_timeout -= 5) {
+ ret = dev->em28xx_read_reg(dev, 0x05);
+ if (ret == 0x84 + len - 1) {
+ break;
+ } else if (ret == 0x94 + len - 1) {
return -ENODEV;
- else if (reg == 0x84)
- return 0;
+ } else if (ret < 0) {
+ em28xx_warn("failed to get i2c transfer status from "
+ "bridge register (error=%i)\n", ret);
+ return ret;
+ }
msleep(5);
}
- return -ENODEV;
+ if (ret != 0x84 + len - 1)
+ em28xx_warn("read from i2c device at 0x%x timed out\n", addr);
+
+ /* get the received message */
+ ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
+ if (ret != len) {
+ em28xx_warn("reading from i2c device at 0x%x failed: "
+ "couldn't get the received message from the bridge "
+ "(error=%i)\n", addr, ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+ for (i = 0; i < len; i++)
+ buf[i] = buf2[len - 1 - i];
+
+ return ret;
}
/*
- * em2800_i2c_recv_bytes()
- * read from the i2c device
+ * em2800_i2c_check_for_device()
+ * check if there is an i2c device at the supplied address
*/
-static int em2800_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
- char *buf, int len)
+static int em2800_i2c_check_for_device(struct em28xx *dev, u8 addr)
{
+ u8 buf;
int ret;
- /* check for the device and set i2c read address */
- ret = em2800_i2c_check_for_device(dev, addr);
- if (ret) {
- em28xx_warn
- ("preparing read at i2c address 0x%x failed (error=%i)\n",
- addr, ret);
- return ret;
- }
- ret = dev->em28xx_read_reg_req_len(dev, 0x0, 0x3, buf, len);
- if (ret < 0) {
- em28xx_warn("reading from i2c device at 0x%x failed (error=%i)",
- addr, ret);
- return ret;
- }
- return ret;
+
+ ret = em2800_i2c_recv_bytes(dev, addr, &buf, 1);
+ if (ret == 1)
+ return 0;
+ return (ret < 0) ? ret : -EIO;
}
/*
* em28xx_i2c_send_bytes()
*/
-static int em28xx_i2c_send_bytes(void *data, unsigned char addr, char *buf,
- short len, int stop)
+static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
+ u16 len, int stop)
{
- int wrcount = 0;
- struct em28xx *dev = (struct em28xx *)data;
int write_timeout, ret;
- wrcount = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
+ if (len < 1 || len > 64)
+ return -EOPNOTSUPP;
+ /* NOTE: limited by the USB ctrl message constraints
+ * Zero length reads always succeed, even if no device is connected */
+
+ /* Write to i2c device */
+ ret = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
+ if (ret != len) {
+ if (ret < 0) {
+ em28xx_warn("writing to i2c device at 0x%x failed "
+ "(error=%i)\n", addr, ret);
+ return ret;
+ } else {
+ em28xx_warn("%i bytes write to i2c device at 0x%x "
+ "requested, but %i bytes written\n",
+ len, addr, ret);
+ return -EIO;
+ }
+ }
- /* Seems to be required after a write */
- for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
+ /* Check success of the i2c operation */
+ for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
write_timeout -= 5) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (!ret)
- break;
+ if (ret == 0) { /* success */
+ return len;
+ } else if (ret == 0x10) {
+ return -ENODEV;
+ } else if (ret < 0) {
+ em28xx_warn("failed to read i2c transfer status from "
+ "bridge (error=%i)\n", ret);
+ return ret;
+ }
msleep(5);
+ /* NOTE: do we really have to wait for success ?
+ Never seen anything else than 0x00 or 0x10
+ (even with high payload) ... */
}
-
- return wrcount;
+ em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+ return -EIO;
}
/*
* em28xx_i2c_recv_bytes()
* read a byte from the i2c device
*/
-static int em28xx_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
- char *buf, int len)
+static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
{
int ret;
+
+ if (len < 1 || len > 64)
+ return -EOPNOTSUPP;
+ /* NOTE: limited by the USB ctrl message constraints
+ * Zero length reads always succeed, even if no device is connected */
+
+ /* Read data from i2c device */
ret = dev->em28xx_read_reg_req_len(dev, 2, addr, buf, len);
+ if (ret != len) {
+ if (ret < 0) {
+ em28xx_warn("reading from i2c device at 0x%x failed "
+ "(error=%i)\n", addr, ret);
+ return ret;
+ } else {
+ em28xx_warn("%i bytes requested from i2c device at "
+ "0x%x, but %i bytes received\n",
+ len, addr, ret);
+ return -EIO;
+ }
+ }
+
+ /* Check success of the i2c operation */
+ ret = dev->em28xx_read_reg(dev, 0x05);
if (ret < 0) {
- em28xx_warn("reading i2c device failed (error=%i)\n", ret);
+ em28xx_warn("failed to read i2c transfer status from "
+ "bridge (error=%i)\n", ret);
return ret;
}
- if (dev->em28xx_read_reg(dev, 0x5) != 0)
- return -ENODEV;
- return ret;
+ if (ret > 0) {
+ if (ret == 0x10) {
+ return -ENODEV;
+ } else {
+ em28xx_warn("unknown i2c error (status=%i)\n", ret);
+ return -EIO;
+ }
+ }
+ return len;
}
/*
* em28xx_i2c_check_for_device()
* check if there is a i2c_device at the supplied address
*/
-static int em28xx_i2c_check_for_device(struct em28xx *dev, unsigned char addr)
+static int em28xx_i2c_check_for_device(struct em28xx *dev, u16 addr)
{
int ret;
+ u8 buf;
- ret = dev->em28xx_read_reg_req(dev, 2, addr);
- if (ret < 0) {
- em28xx_warn("reading from i2c device failed (error=%i)\n", ret);
- return ret;
- }
- if (dev->em28xx_read_reg(dev, 0x5) != 0)
- return -ENODEV;
- return 0;
+ ret = em28xx_i2c_recv_bytes(dev, addr, &buf, 1);
+ if (ret == 1)
+ return 0;
+ return (ret < 0) ? ret : -EIO;
}
/*
@@ -253,11 +303,11 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
rc = em2800_i2c_check_for_device(dev, addr);
else
rc = em28xx_i2c_check_for_device(dev, addr);
- if (rc < 0) {
- dprintk2(2, " no device\n");
+ if (rc == -ENODEV) {
+ if (i2c_debug >= 2)
+ printk(" no device\n");
return rc;
}
-
} else if (msgs[i].flags & I2C_M_RD) {
/* read bytes */
if (dev->board.is_em2800)
@@ -288,16 +338,16 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
msgs[i].len,
i == num - 1);
}
- if (rc < 0)
- goto err;
+ if (rc < 0) {
+ if (i2c_debug >= 2)
+ printk(" ERROR: %i\n", rc);
+ return rc;
+ }
if (i2c_debug >= 2)
printk("\n");
}
return num;
-err:
- dprintk2(2, " ERROR: %i\n", rc);
- return rc;
}
/* based on linux/sunrpc/svcauth.h and linux/hash.h
@@ -329,7 +379,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
{
unsigned char buf, *p = eedata;
struct em28xx_eeprom *em_eeprom = (void *)eedata;
- int i, err, size = len, block;
+ int i, err, size = len, block, block_max;
if (dev->chip_id == CHIP_ID_EM2874 ||
dev->chip_id == CHIP_ID_EM28174 ||
@@ -362,9 +412,15 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
dev->name, err);
return err;
}
+
+ if (dev->board.is_em2800)
+ block_max = 4;
+ else
+ block_max = 64;
+
while (size > 0) {
- if (size > 16)
- block = 16;
+ if (size > block_max)
+ block = block_max;
else
block = size;
@@ -449,7 +505,11 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
*/
static u32 functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_SMBUS_EMUL;
+ struct em28xx *dev = adap->algo_data;
+ u32 func_flags = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ if (dev->board.is_em2800)
+ func_flags &= ~I2C_FUNC_SMBUS_WRITE_BLOCK_DATA;
+ return func_flags;
}
static struct i2c_algorithm em28xx_algo = {
@@ -474,6 +534,7 @@ static struct i2c_client em28xx_client_template = {
* incomplete list of known devices
*/
static char *i2c_devs[128] = {
+ [0x3e >> 1] = "remote IR sensor",
[0x4a >> 1] = "saa7113h",
[0x52 >> 1] = "drxk",
[0x60 >> 1] = "remote IR sensor",
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 660bf803c9e4..1bef990b3f18 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -40,11 +40,6 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
#define MODULE_NAME "em28xx"
-#define i2cdprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
-
#define dprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -57,8 +52,8 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
struct em28xx_ir_poll_result {
unsigned int toggle_bit:1;
unsigned int read_count:7;
- u8 rc_address;
- u8 rc_data[4]; /* 1 byte on em2860/2880, 4 on em2874 */
+
+ u32 scancode;
};
struct em28xx_IR {
@@ -67,12 +62,17 @@ struct em28xx_IR {
char name[32];
char phys[32];
- /* poll external decoder */
+ /* poll decoder */
int polling;
struct delayed_work work;
unsigned int full_code:1;
unsigned int last_readcount;
+ u64 rc_type;
+ /* i2c slave address of external device (if used) */
+ u16 i2c_dev_addr;
+
+ int (*get_key_i2c)(struct i2c_client *, u32 *);
int (*get_key)(struct em28xx_IR *, struct em28xx_ir_poll_result *);
};
@@ -80,21 +80,16 @@ struct em28xx_IR {
I2C IR based get keycodes - should be used with ir-kbd-i2c
**********************************************************/
-static int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_terratec(struct i2c_client *i2c_dev, u32 *ir_key)
{
unsigned char b;
/* poll IR chip */
- if (1 != i2c_master_recv(ir->c, &b, 1)) {
- i2cdprintk("read error\n");
+ if (1 != i2c_master_recv(i2c_dev, &b, 1))
return -EIO;
- }
/* it seems that 0xFE indicates that a button is still hold
- down, while 0xff indicates that no button is hold
- down. 0xfe sequences are sometimes interrupted by 0xFF */
-
- i2cdprintk("key %02x\n", b);
+ down, while 0xff indicates that no button is hold down. */
if (b == 0xff)
return 0;
@@ -104,18 +99,17 @@ static int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
*ir_key = b;
- *ir_raw = b;
return 1;
}
-static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_em_haup(struct i2c_client *i2c_dev, u32 *ir_key)
{
unsigned char buf[2];
u16 code;
int size;
/* poll IR chip */
- size = i2c_master_recv(ir->c, buf, sizeof(buf));
+ size = i2c_master_recv(i2c_dev, buf, sizeof(buf));
if (size != 2)
return -EIO;
@@ -124,8 +118,6 @@ static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
if (buf[1] == 0xff)
return 0;
- ir->old = buf[1];
-
/*
* Rearranges bits to the right order.
* The bit order were determined experimentally by using
@@ -148,65 +140,51 @@ static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
((buf[1] & 0x40) ? 0x0200 : 0) | /* 0000 0010 */
((buf[1] & 0x80) ? 0x0100 : 0); /* 0000 0001 */
- i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x%02x)\n",
- code, buf[1], buf[0]);
-
/* return key */
*ir_key = code;
- *ir_raw = code;
return 1;
}
-static int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw)
+static int em28xx_get_key_pinnacle_usb_grey(struct i2c_client *i2c_dev,
+ u32 *ir_key)
{
unsigned char buf[3];
/* poll IR chip */
- if (3 != i2c_master_recv(ir->c, buf, 3)) {
- i2cdprintk("read error\n");
+ if (3 != i2c_master_recv(i2c_dev, buf, 3))
return -EIO;
- }
- i2cdprintk("key %02x\n", buf[2]&0x3f);
if (buf[0] != 0x00)
return 0;
*ir_key = buf[2]&0x3f;
- *ir_raw = buf[2]&0x3f;
return 1;
}
-static int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw)
+static int em28xx_get_key_winfast_usbii_deluxe(struct i2c_client *i2c_dev,
+ u32 *ir_key)
{
unsigned char subaddr, keydetect, key;
- struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0, .buf = &subaddr, .len = 1},
-
- { .addr = ir->c->addr, .flags = I2C_M_RD, .buf = &keydetect, .len = 1} };
+ struct i2c_msg msg[] = { { .addr = i2c_dev->addr, .flags = 0, .buf = &subaddr, .len = 1},
+ { .addr = i2c_dev->addr, .flags = I2C_M_RD, .buf = &keydetect, .len = 1} };
subaddr = 0x10;
- if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
- i2cdprintk("read error\n");
+ if (2 != i2c_transfer(i2c_dev->adapter, msg, 2))
return -EIO;
- }
if (keydetect == 0x00)
return 0;
subaddr = 0x00;
msg[1].buf = &key;
- if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
- i2cdprintk("read error\n");
- return -EIO;
- }
+ if (2 != i2c_transfer(i2c_dev->adapter, msg, 2))
+ return -EIO;
if (key == 0x00)
return 0;
*ir_key = key;
- *ir_raw = key;
return 1;
}
@@ -236,11 +214,8 @@ static int default_polling_getkey(struct em28xx_IR *ir,
/* Infrared read count (Reg 0x45[6:0] */
poll_result->read_count = (msg[0] & 0x7f);
- /* Remote Control Address (Reg 0x46) */
- poll_result->rc_address = msg[1];
-
- /* Remote Control Data (Reg 0x47) */
- poll_result->rc_data[0] = msg[2];
+ /* Remote Control Address/Data (Regs 0x46/0x47) */
+ poll_result->scancode = msg[1] << 8 | msg[2];
return 0;
}
@@ -266,13 +241,35 @@ static int em2874_polling_getkey(struct em28xx_IR *ir,
/* Infrared read count (Reg 0x51[6:0] */
poll_result->read_count = (msg[0] & 0x7f);
- /* Remote Control Address (Reg 0x52) */
- poll_result->rc_address = msg[1];
-
- /* Remote Control Data (Reg 0x53-55) */
- poll_result->rc_data[0] = msg[2];
- poll_result->rc_data[1] = msg[3];
- poll_result->rc_data[2] = msg[4];
+ /*
+ * Remote Control Address (Reg 0x52)
+ * Remote Control Data (Reg 0x53-0x55)
+ */
+ switch (ir->rc_type) {
+ case RC_BIT_RC5:
+ poll_result->scancode = msg[1] << 8 | msg[2];
+ break;
+ case RC_BIT_NEC:
+ if ((msg[3] ^ msg[4]) != 0xff) /* 32 bits NEC */
+ poll_result->scancode = (msg[1] << 24) |
+ (msg[2] << 16) |
+ (msg[3] << 8) |
+ msg[4];
+ else if ((msg[1] ^ msg[2]) != 0xff) /* 24 bits NEC */
+ poll_result->scancode = (msg[1] << 16) |
+ (msg[2] << 8) |
+ msg[3];
+ else /* Normal NEC */
+ poll_result->scancode = msg[1] << 8 | msg[3];
+ break;
+ case RC_BIT_RC6_0:
+ poll_result->scancode = msg[1] << 8 | msg[2];
+ break;
+ default:
+ poll_result->scancode = (msg[1] << 24) | (msg[2] << 16) |
+ (msg[3] << 8) | msg[4];
+ break;
+ }
return 0;
}
@@ -281,6 +278,28 @@ static int em2874_polling_getkey(struct em28xx_IR *ir,
Polling code for em28xx
**********************************************************/
+static int em28xx_i2c_ir_handle_key(struct em28xx_IR *ir)
+{
+ static u32 ir_key;
+ int rc;
+ struct i2c_client client;
+
+ client.adapter = &ir->dev->i2c_adap;
+ client.addr = ir->i2c_dev_addr;
+
+ rc = ir->get_key_i2c(&client, &ir_key);
+ if (rc < 0) {
+ dprintk("ir->get_key_i2c() failed: %d\n", rc);
+ return rc;
+ }
+
+ if (rc) {
+ dprintk("%s: keycode = 0x%04x\n", __func__, ir_key);
+ rc_keydown(ir->rc, ir_key, 0);
+ }
+ return 0;
+}
+
static void em28xx_ir_handle_key(struct em28xx_IR *ir)
{
int result;
@@ -289,22 +308,21 @@ static void em28xx_ir_handle_key(struct em28xx_IR *ir)
/* read the registers containing the IR status */
result = ir->get_key(ir, &poll_result);
if (unlikely(result < 0)) {
- dprintk("ir->get_key() failed %d\n", result);
+ dprintk("ir->get_key() failed: %d\n", result);
return;
}
if (unlikely(poll_result.read_count != ir->last_readcount)) {
- dprintk("%s: toggle: %d, count: %d, key 0x%02x%02x\n", __func__,
+ dprintk("%s: toggle: %d, count: %d, key 0x%04x\n", __func__,
poll_result.toggle_bit, poll_result.read_count,
- poll_result.rc_address, poll_result.rc_data[0]);
+ poll_result.scancode);
if (ir->full_code)
rc_keydown(ir->rc,
- poll_result.rc_address << 8 |
- poll_result.rc_data[0],
+ poll_result.scancode,
poll_result.toggle_bit);
else
rc_keydown(ir->rc,
- poll_result.rc_data[0],
+ poll_result.scancode & 0xff,
poll_result.toggle_bit);
if (ir->dev->chip_id == CHIP_ID_EM2874 ||
@@ -324,7 +342,10 @@ static void em28xx_ir_work(struct work_struct *work)
{
struct em28xx_IR *ir = container_of(work, struct em28xx_IR, work.work);
- em28xx_ir_handle_key(ir);
+ if (ir->i2c_dev_addr) /* external i2c device */
+ em28xx_i2c_ir_handle_key(ir);
+ else /* internal device */
+ em28xx_ir_handle_key(ir);
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
@@ -345,93 +366,107 @@ static void em28xx_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
+static int em2860_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
{
- int rc = 0;
struct em28xx_IR *ir = rc_dev->priv;
struct em28xx *dev = ir->dev;
- u8 ir_config = EM2874_IR_RC5;
-
- /* Adjust xclk based o IR table for RC5/NEC tables */
+ /* Adjust xclk based on IR table for RC5/NEC tables */
if (*rc_type & RC_BIT_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
*rc_type = RC_BIT_RC5;
} else if (*rc_type & RC_BIT_NEC) {
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
- ir_config = EM2874_IR_NEC;
ir->full_code = 1;
*rc_type = RC_BIT_NEC;
- } else if (*rc_type != RC_BIT_UNKNOWN)
- rc = -EINVAL;
+ } else if (*rc_type & RC_BIT_UNKNOWN) {
+ *rc_type = RC_BIT_UNKNOWN;
+ } else {
+ *rc_type = ir->rc_type;
+ return -EINVAL;
+ }
+ em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
+ EM28XX_XCLK_IR_RC5_MODE);
+
+ ir->rc_type = *rc_type;
+ return 0;
+}
+
+static int em2874_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
+{
+ struct em28xx_IR *ir = rc_dev->priv;
+ struct em28xx *dev = ir->dev;
+ u8 ir_config = EM2874_IR_RC5;
+
+ /* Adjust xclk and set type based on IR table for RC5/NEC/RC6 tables */
+ if (*rc_type & RC_BIT_RC5) {
+ dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
+ ir->full_code = 1;
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
+ dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
+ ir_config = EM2874_IR_NEC | EM2874_IR_NEC_NO_PARITY;
+ ir->full_code = 1;
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type & RC_BIT_RC6_0) {
+ dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
+ ir_config = EM2874_IR_RC6_MODE_0;
+ ir->full_code = 1;
+ *rc_type = RC_BIT_RC6_0;
+ } else if (*rc_type & RC_BIT_UNKNOWN) {
+ *rc_type = RC_BIT_UNKNOWN;
+ } else {
+ *rc_type = ir->rc_type;
+ return -EINVAL;
+ }
+ em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
EM28XX_XCLK_IR_RC5_MODE);
+ ir->rc_type = *rc_type;
+
+ return 0;
+}
+static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
+{
+ struct em28xx_IR *ir = rc_dev->priv;
+ struct em28xx *dev = ir->dev;
+
/* Setup the proper handler based on the chip */
switch (dev->chip_id) {
case CHIP_ID_EM2860:
case CHIP_ID_EM2883:
- ir->get_key = default_polling_getkey;
- break;
+ return em2860_ir_change_protocol(rc_dev, rc_type);
case CHIP_ID_EM2884:
case CHIP_ID_EM2874:
case CHIP_ID_EM28174:
- ir->get_key = em2874_polling_getkey;
- em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
- break;
+ return em2874_ir_change_protocol(rc_dev, rc_type);
default:
printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
dev->chip_id);
- rc = -EINVAL;
+ return -EINVAL;
}
-
- return rc;
}
-static void em28xx_register_i2c_ir(struct em28xx *dev)
+static int em28xx_probe_i2c_ir(struct em28xx *dev)
{
+ int i = 0;
/* Leadtek winfast tv USBII deluxe can find a non working IR-device */
/* at address 0x18, so if that address is needed for another board in */
/* the future, please put it after 0x1f. */
- struct i2c_board_info info;
const unsigned short addr_list[] = {
0x1f, 0x30, 0x47, I2C_CLIENT_END
};
- memset(&info, 0, sizeof(struct i2c_board_info));
- memset(&dev->init_data, 0, sizeof(dev->init_data));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-
- /* detect & configure */
- switch (dev->model) {
- case EM2800_BOARD_TERRATEC_CINERGY_200:
- case EM2820_BOARD_TERRATEC_CINERGY_250:
- dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
- dev->init_data.get_key = em28xx_get_key_terratec;
- dev->init_data.name = "i2c IR (EM28XX Terratec)";
- break;
- case EM2820_BOARD_PINNACLE_USB_2:
- dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
- dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
- dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
- break;
- case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- dev->init_data.get_key = em28xx_get_key_em_haup;
- dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
- break;
- case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
- dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
- dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
- dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
- break;
+ while (addr_list[i] != I2C_CLIENT_END) {
+ if (i2c_probe_func_quick_read(&dev->i2c_adap, addr_list[i]) == 1)
+ return addr_list[i];
+ i++;
}
- if (dev->init_data.name)
- info.platform_data = &dev->init_data;
- i2c_new_probed_device(&dev->i2c_adap, &info, addr_list, NULL);
+ return -ENODEV;
}
/**********************************************************
@@ -527,8 +562,21 @@ static int em28xx_ir_init(struct em28xx *dev)
struct rc_dev *rc;
int err = -ENOMEM;
u64 rc_type;
+ u16 i2c_rc_dev_addr = 0;
+
+ if (dev->board.has_snapshot_button)
+ em28xx_register_snapshot_button(dev);
+
+ if (dev->board.has_ir_i2c) {
+ i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
+ if (!i2c_rc_dev_addr) {
+ dev->board.has_ir_i2c = 0;
+ em28xx_warn("No i2c IR remote control device found.\n");
+ return -ENODEV;
+ }
+ }
- if (dev->board.ir_codes == NULL) {
+ if (dev->board.ir_codes == NULL && !dev->board.has_ir_i2c) {
/* No remote control support */
em28xx_warn("Remote control support is not available for "
"this card.\n");
@@ -538,35 +586,77 @@ static int em28xx_ir_init(struct em28xx *dev)
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device();
if (!ir || !rc)
- goto err_out_free;
+ goto error;
/* record handles to ourself */
ir->dev = dev;
dev->ir = ir;
ir->rc = rc;
- /*
- * em2874 supports more protocols. For now, let's just announce
- * the two protocols that were already tested
- */
- rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
rc->priv = ir;
- rc->change_protocol = em28xx_ir_change_protocol;
rc->open = em28xx_ir_start;
rc->close = em28xx_ir_stop;
- /* By default, keep protocol field untouched */
- rc_type = RC_BIT_UNKNOWN;
- err = em28xx_ir_change_protocol(rc, &rc_type);
- if (err)
- goto err_out_free;
+ if (dev->board.has_ir_i2c) { /* external i2c device */
+ switch (dev->model) {
+ case EM2800_BOARD_TERRATEC_CINERGY_200:
+ case EM2820_BOARD_TERRATEC_CINERGY_250:
+ rc->map_name = RC_MAP_EM_TERRATEC;
+ ir->get_key_i2c = em28xx_get_key_terratec;
+ break;
+ case EM2820_BOARD_PINNACLE_USB_2:
+ rc->map_name = RC_MAP_PINNACLE_GREY;
+ ir->get_key_i2c = em28xx_get_key_pinnacle_usb_grey;
+ break;
+ case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
+ rc->map_name = RC_MAP_HAUPPAUGE;
+ ir->get_key_i2c = em28xx_get_key_em_haup;
+ rc->allowed_protos = RC_BIT_RC5;
+ break;
+ case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
+ rc->map_name = RC_MAP_WINFAST_USBII_DELUXE;
+ ir->get_key_i2c = em28xx_get_key_winfast_usbii_deluxe;
+ break;
+ default:
+ err = -ENODEV;
+ goto error;
+ }
+
+ ir->i2c_dev_addr = i2c_rc_dev_addr;
+ } else { /* internal device */
+ switch (dev->chip_id) {
+ case CHIP_ID_EM2860:
+ case CHIP_ID_EM2883:
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
+ ir->get_key = default_polling_getkey;
+ break;
+ case CHIP_ID_EM2884:
+ case CHIP_ID_EM2874:
+ case CHIP_ID_EM28174:
+ ir->get_key = em2874_polling_getkey;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC |
+ RC_BIT_RC6_0;
+ break;
+ default:
+ err = -ENODEV;
+ goto error;
+ }
+
+ rc->change_protocol = em28xx_ir_change_protocol;
+ rc->map_name = dev->board.ir_codes;
+
+ /* By default, keep protocol field untouched */
+ rc_type = RC_BIT_UNKNOWN;
+ err = em28xx_ir_change_protocol(rc, &rc_type);
+ if (err)
+ goto error;
+ }
/* This is how often we ask the chip for IR information */
ir->polling = 100; /* ms */
/* init input device */
- snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)",
- dev->name);
+ snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)", dev->name);
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
@@ -578,28 +668,17 @@ static int em28xx_ir_init(struct em28xx *dev)
rc->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
rc->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
rc->dev.parent = &dev->udev->dev;
- rc->map_name = dev->board.ir_codes;
rc->driver_name = MODULE_NAME;
/* all done */
err = rc_register_device(rc);
if (err)
- goto err_out_stop;
-
- em28xx_register_i2c_ir(dev);
-
-#if defined(CONFIG_MODULES) && defined(MODULE)
- if (dev->board.has_ir_i2c)
- request_module("ir-kbd-i2c");
-#endif
- if (dev->board.has_snapshot_button)
- em28xx_register_snapshot_button(dev);
+ goto error;
return 0;
- err_out_stop:
+error:
dev->ir = NULL;
- err_out_free:
rc_free_device(rc);
kfree(ir);
return err;
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 6ff368297f6e..885089e22bcd 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -13,9 +13,9 @@
#define EM_GPO_3 (1 << 3)
/* em28xx endpoints */
-#define EM28XX_EP_ANALOG 0x82
+/* 0x82: (always ?) analog */
#define EM28XX_EP_AUDIO 0x83
-#define EM28XX_EP_DIGITAL 0x84
+/* 0x84: digital or analog */
/* em2800 registers */
#define EM2800_R08_AUDIOSRC 0x08
@@ -177,6 +177,7 @@
/* em2874 IR config register (0x50) */
#define EM2874_IR_NEC 0x00
+#define EM2874_IR_NEC_NO_PARITY 0x01
#define EM2874_IR_RC5 0x04
#define EM2874_IR_RC6_MODE_0 0x08
#define EM2874_IR_RC6_MODE_6A 0x0b
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 2b4c9cba2d67..39f39c527c13 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -41,105 +41,72 @@ MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
/* ------------------------------------------------------------------ */
-static void
-free_buffer(struct videobuf_queue *vq, struct em28xx_buffer *buf)
+static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- unsigned long flags = 0;
- if (in_interrupt())
- BUG();
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->slock, flags);
- if (dev->isoc_ctl.vbi_buf == buf)
- dev->isoc_ctl.vbi_buf = NULL;
- spin_unlock_irqrestore(&dev->slock, flags);
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ unsigned long size;
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
+ if (fmt)
+ size = fmt->fmt.pix.sizeimage;
+ else
+ size = dev->vbi_width * dev->vbi_height * 2;
-static int
-vbi_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-{
- struct em28xx_fh *fh = q->priv_data;
- struct em28xx *dev = fh->dev;
+ if (0 == *nbuffers)
+ *nbuffers = 32;
+ if (*nbuffers < 2)
+ *nbuffers = 2;
+ if (*nbuffers > 32)
+ *nbuffers = 32;
- *size = dev->vbi_width * dev->vbi_height * 2;
+ *nplanes = 1;
+ sizes[0] = size;
- if (0 == *count)
- *count = vbibufs;
- if (*count < 2)
- *count = 2;
- if (*count > 32)
- *count = 32;
return 0;
}
-static int
-vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vbi_buffer_prepare(struct vb2_buffer *vb)
{
- struct em28xx_fh *fh = q->priv_data;
- struct em28xx *dev = fh->dev;
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
- int rc = 0;
+ unsigned long size;
- buf->vb.size = dev->vbi_width * dev->vbi_height * 2;
+ size = dev->vbi_width * dev->vbi_height * 2;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < size) {
+ printk(KERN_INFO "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
-
- buf->vb.width = dev->vbi_width;
- buf->vb.height = dev->vbi_height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
}
+ vb2_set_plane_payload(&buf->vb, 0, size);
- buf->vb.state = VIDEOBUF_PREPARED;
return 0;
-
-fail:
- free_buffer(q, buf);
- return rc;
}
static void
-vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
-{
- struct em28xx_buffer *buf = container_of(vb,
- struct em28xx_buffer,
- vb);
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- struct em28xx_dmaqueue *vbiq = &dev->vbiq;
-
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vbiq->active);
-}
-
-static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+vbi_buffer_queue(struct vb2_buffer *vb)
{
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
- free_buffer(q, buf);
+ struct em28xx_dmaqueue *vbiq = &dev->vbiq;
+ unsigned long flags = 0;
+
+ buf->mem = vb2_plane_vaddr(vb, 0);
+ buf->length = vb2_plane_size(vb, 0);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &vbiq->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-struct videobuf_queue_ops em28xx_vbi_qops = {
- .buf_setup = vbi_setup,
- .buf_prepare = vbi_prepare,
- .buf_queue = vbi_queue,
- .buf_release = vbi_release,
+
+struct vb2_ops em28xx_vbi_qops = {
+ .queue_setup = vbi_queue_setup,
+ .buf_prepare = vbi_buffer_prepare,
+ .buf_queue = vbi_buffer_queue,
+ .start_streaming = em28xx_start_analog_streaming,
+ .stop_streaming = em28xx_stop_vbi_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 1e553d357380..32bd7de5dec1 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -6,6 +6,7 @@
Markus Rechberger <mrechberger@gmail.com>
Mauro Carvalho Chehab <mchehab@infradead.org>
Sascha Sommer <saschasommer@freenet.de>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
Some parts based on SN9C10x PC Camera Controllers GPL driver made
by Luca Risolia <luca.risolia@studio.unibo.it>
@@ -39,6 +40,7 @@
#include "em28xx.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-chip-ident.h>
#include <media/msp3400.h>
#include <media/tuner.h>
@@ -74,9 +76,9 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(EM28XX_VERSION);
-static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
+static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
+static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
@@ -124,101 +126,50 @@ static struct em28xx_fmt format[] = {
},
};
-/* supported controls */
-/* Common to all boards */
-static struct v4l2_queryctrl ac97_qctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Volume",
- .minimum = 0x0,
- .maximum = 0x1f,
- .step = 0x1,
- .default_value = 0x1f,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- .flags = 0,
- }
-};
-
/* ------------------------------------------------------------------
DMA and thread functions
------------------------------------------------------------------*/
/*
- * Announces that a buffer were filled and request the next
+ * Finish the current buffer
*/
-static inline void buffer_filled(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer *buf)
+static inline void finish_buffer(struct em28xx *dev,
+ struct em28xx_buffer *buf)
{
- /* Advice that buffer was filled */
- em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
- dev->isoc_ctl.vid_buf = NULL;
+ buf->vb.v4l2_buf.sequence = dev->field_count++;
+ buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
-}
-
-static inline void vbi_buffer_filled(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer *buf)
-{
- /* Advice that buffer was filled */
- em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
-
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
-
- dev->isoc_ctl.vbi_buf = NULL;
-
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
/*
- * Identify the buffer header type and properly handles
+ * Copy picture data from USB buffer to videobuf buffer
*/
static void em28xx_copy_video(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
struct em28xx_buffer *buf,
- unsigned char *p,
- unsigned char *outp, unsigned long len)
+ unsigned char *usb_buf,
+ unsigned long len)
{
void *fieldstart, *startwrite, *startread;
int linesdone, currlinedone, offset, lencopy, remain;
int bytesperline = dev->width << 1;
- if (dma_q->pos + len > buf->vb.size)
- len = buf->vb.size - dma_q->pos;
+ if (buf->pos + len > buf->length)
+ len = buf->length - buf->pos;
- startread = p;
+ startread = usb_buf;
remain = len;
- if (dev->progressive)
- fieldstart = outp;
- else {
- /* Interlaces two half frames */
- if (buf->top_field)
- fieldstart = outp;
- else
- fieldstart = outp + bytesperline;
- }
+ if (dev->progressive || buf->top_field)
+ fieldstart = buf->vb_buf;
+ else /* interlaced mode, even nr. of lines */
+ fieldstart = buf->vb_buf + bytesperline;
- linesdone = dma_q->pos / bytesperline;
- currlinedone = dma_q->pos % bytesperline;
+ linesdone = buf->pos / bytesperline;
+ currlinedone = buf->pos % bytesperline;
if (dev->progressive)
offset = linesdone * bytesperline + currlinedone;
@@ -229,11 +180,12 @@ static void em28xx_copy_video(struct em28xx *dev,
lencopy = bytesperline - currlinedone;
lencopy = lencopy > remain ? remain : lencopy;
- if ((char *)startwrite + lencopy > (char *)outp + buf->vb.size) {
+ if ((char *)startwrite + lencopy > (char *)buf->vb_buf + buf->length) {
em28xx_isocdbg("Overflow of %zi bytes past buffer end (1)\n",
- ((char *)startwrite + lencopy) -
- ((char *)outp + buf->vb.size));
- remain = (char *)outp + buf->vb.size - (char *)startwrite;
+ ((char *)startwrite + lencopy) -
+ ((char *)buf->vb_buf + buf->length));
+ remain = (char *)buf->vb_buf + buf->length -
+ (char *)startwrite;
lencopy = remain;
}
if (lencopy <= 0)
@@ -243,21 +195,24 @@ static void em28xx_copy_video(struct em28xx *dev,
remain -= lencopy;
while (remain > 0) {
- startwrite += lencopy + bytesperline;
+ if (dev->progressive)
+ startwrite += lencopy;
+ else
+ startwrite += lencopy + bytesperline;
startread += lencopy;
if (bytesperline > remain)
lencopy = remain;
else
lencopy = bytesperline;
- if ((char *)startwrite + lencopy > (char *)outp +
- buf->vb.size) {
+ if ((char *)startwrite + lencopy > (char *)buf->vb_buf +
+ buf->length) {
em28xx_isocdbg("Overflow of %zi bytes past buffer end"
"(2)\n",
((char *)startwrite + lencopy) -
- ((char *)outp + buf->vb.size));
- lencopy = remain = (char *)outp + buf->vb.size -
- (char *)startwrite;
+ ((char *)buf->vb_buf + buf->length));
+ lencopy = remain = (char *)buf->vb_buf + buf->length -
+ (char *)startwrite;
}
if (lencopy <= 0)
break;
@@ -267,57 +222,29 @@ static void em28xx_copy_video(struct em28xx *dev,
remain -= lencopy;
}
- dma_q->pos += len;
+ buf->pos += len;
}
+/*
+ * Copy VBI data from USB buffer to videobuf buffer
+ */
static void em28xx_copy_vbi(struct em28xx *dev,
- struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer *buf,
- unsigned char *p,
- unsigned char *outp, unsigned long len)
+ struct em28xx_buffer *buf,
+ unsigned char *usb_buf,
+ unsigned long len)
{
- void *startwrite, *startread;
- int offset;
- int bytesperline;
-
- if (dev == NULL) {
- em28xx_isocdbg("dev is null\n");
- return;
- }
- bytesperline = dev->vbi_width;
+ unsigned int offset;
- if (dma_q == NULL) {
- em28xx_isocdbg("dma_q is null\n");
- return;
- }
- if (buf == NULL) {
- return;
- }
- if (p == NULL) {
- em28xx_isocdbg("p is null\n");
- return;
- }
- if (outp == NULL) {
- em28xx_isocdbg("outp is null\n");
- return;
- }
-
- if (dma_q->pos + len > buf->vb.size)
- len = buf->vb.size - dma_q->pos;
-
- startread = p;
-
- startwrite = outp + dma_q->pos;
- offset = dma_q->pos;
+ if (buf->pos + len > buf->length)
+ len = buf->length - buf->pos;
+ offset = buf->pos;
/* Make sure the bottom field populates the second half of the frame */
- if (buf->top_field == 0) {
- startwrite += bytesperline * dev->vbi_height;
- offset += bytesperline * dev->vbi_height;
- }
+ if (buf->top_field == 0)
+ offset += dev->vbi_width * dev->vbi_height;
- memcpy(startwrite, startread, len);
- dma_q->pos += len;
+ memcpy(buf->vb_buf + offset, usb_buf, len);
+ buf->pos += len;
}
static inline void print_err_status(struct em28xx *dev,
@@ -360,470 +287,444 @@ static inline void print_err_status(struct em28xx *dev,
}
/*
- * video-buf generic routine to get the next available buffer
+ * get the next available buffer from dma queue
*/
-static inline void get_next_buf(struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer **buf)
+static inline struct em28xx_buffer *get_next_buf(struct em28xx *dev,
+ struct em28xx_dmaqueue *dma_q)
{
- struct em28xx *dev = container_of(dma_q, struct em28xx, vidq);
- char *outp;
+ struct em28xx_buffer *buf;
if (list_empty(&dma_q->active)) {
em28xx_isocdbg("No active queue to serve\n");
- dev->isoc_ctl.vid_buf = NULL;
- *buf = NULL;
- return;
+ return NULL;
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue);
-
+ buf = list_entry(dma_q->active.next, struct em28xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ list_del(&buf->list);
+ buf->pos = 0;
+ buf->vb_buf = buf->mem;
- dev->isoc_ctl.vid_buf = *buf;
-
- return;
+ return buf;
}
/*
- * video-buf generic routine to get the next available VBI buffer
+ * Finish the current buffer if completed and prepare for the next field
*/
-static inline void vbi_get_next_buf(struct em28xx_dmaqueue *dma_q,
- struct em28xx_buffer **buf)
-{
- struct em28xx *dev = container_of(dma_q, struct em28xx, vbiq);
- char *outp;
-
- if (list_empty(&dma_q->active)) {
- em28xx_isocdbg("No active queue to serve\n");
- dev->isoc_ctl.vbi_buf = NULL;
- *buf = NULL;
- return;
+static struct em28xx_buffer *
+finish_field_prepare_next(struct em28xx *dev,
+ struct em28xx_buffer *buf,
+ struct em28xx_dmaqueue *dma_q)
+{
+ if (dev->progressive || dev->top_field) { /* Brand new frame */
+ if (buf != NULL)
+ finish_buffer(dev, buf);
+ buf = get_next_buf(dev, dma_q);
+ }
+ if (buf != NULL) {
+ buf->top_field = dev->top_field;
+ buf->pos = 0;
}
- /* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct em28xx_buffer, vb.queue);
- /* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0x00, (*buf)->vb.size);
-
- dev->isoc_ctl.vbi_buf = *buf;
-
- return;
+ return buf;
}
/*
- * Controls the isoc copy of each urb packet
+ * Process data packet according to the em2710/em2750/em28xx frame data format
*/
-static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb)
+static inline void process_frame_data_em28xx(struct em28xx *dev,
+ unsigned char *data_pkt,
+ unsigned int data_len)
{
- struct em28xx_buffer *buf;
+ struct em28xx_buffer *buf = dev->usb_ctl.vid_buf;
+ struct em28xx_buffer *vbi_buf = dev->usb_ctl.vbi_buf;
struct em28xx_dmaqueue *dma_q = &dev->vidq;
- unsigned char *outp = NULL;
- int i, len = 0, rc = 1;
- unsigned char *p;
-
- if (!dev)
- return 0;
-
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
- return 0;
-
- if (urb->status < 0) {
- print_err_status(dev, -1, urb->status);
- if (urb->status == -ENOENT)
- return 0;
- }
-
- buf = dev->isoc_ctl.vid_buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
-
- for (i = 0; i < urb->number_of_packets; i++) {
- int status = urb->iso_frame_desc[i].status;
+ struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
- if (status < 0) {
- print_err_status(dev, i, status);
- if (urb->iso_frame_desc[i].status != -EPROTO)
- continue;
+ /* capture type 0 = vbi start
+ capture type 1 = vbi in progress
+ capture type 2 = video start
+ capture type 3 = video in progress */
+ if (data_len >= 4) {
+ /* NOTE: Headers are always 4 bytes and
+ * never split across packets */
+ if (data_pkt[0] == 0x88 && data_pkt[1] == 0x88 &&
+ data_pkt[2] == 0x88 && data_pkt[3] == 0x88) {
+ /* Continuation */
+ data_pkt += 4;
+ data_len -= 4;
+ } else if (data_pkt[0] == 0x33 && data_pkt[1] == 0x95) {
+ /* Field start (VBI mode) */
+ dev->capture_type = 0;
+ dev->vbi_read = 0;
+ em28xx_isocdbg("VBI START HEADER !!!\n");
+ dev->top_field = !(data_pkt[2] & 1);
+ data_pkt += 4;
+ data_len -= 4;
+ } else if (data_pkt[0] == 0x22 && data_pkt[1] == 0x5a) {
+ /* Field start (VBI disabled) */
+ dev->capture_type = 2;
+ em28xx_isocdbg("VIDEO START HEADER !!!\n");
+ dev->top_field = !(data_pkt[2] & 1);
+ data_pkt += 4;
+ data_len -= 4;
}
+ }
+ /* NOTE: With bulk transfers, intermediate data packets
+ * have no continuation header */
- len = urb->iso_frame_desc[i].actual_length - 4;
+ if (dev->capture_type == 0) {
+ vbi_buf = finish_field_prepare_next(dev, vbi_buf, vbi_dma_q);
+ dev->usb_ctl.vbi_buf = vbi_buf;
+ dev->capture_type = 1;
+ }
- if (urb->iso_frame_desc[i].actual_length <= 0) {
- /* em28xx_isocdbg("packet %d is empty",i); - spammy */
- continue;
- }
- if (urb->iso_frame_desc[i].actual_length >
- dev->max_pkt_size) {
- em28xx_isocdbg("packet bigger than packet size");
- continue;
- }
+ if (dev->capture_type == 1) {
+ int vbi_size = dev->vbi_width * dev->vbi_height;
+ int vbi_data_len = ((dev->vbi_read + data_len) > vbi_size) ?
+ (vbi_size - dev->vbi_read) : data_len;
- p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ /* Copy VBI data */
+ if (vbi_buf != NULL)
+ em28xx_copy_vbi(dev, vbi_buf, data_pkt, vbi_data_len);
+ dev->vbi_read += vbi_data_len;
- /* FIXME: incomplete buffer checks where removed to make
- logic simpler. Impacts of those changes should be evaluated
- */
- if (p[0] == 0x33 && p[1] == 0x95 && p[2] == 0x00) {
- em28xx_isocdbg("VBI HEADER!!!\n");
- /* FIXME: Should add vbi copy */
- continue;
+ if (vbi_data_len < data_len) {
+ /* Continue with copying video data */
+ dev->capture_type = 2;
+ data_pkt += vbi_data_len;
+ data_len -= vbi_data_len;
}
- if (p[0] == 0x22 && p[1] == 0x5a) {
- em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2],
- len, (p[2] & 1) ? "odd" : "even");
-
- if (dev->progressive || !(p[2] & 1)) {
- if (buf != NULL)
- buffer_filled(dev, dma_q, buf);
- get_next_buf(dma_q, &buf);
- if (buf == NULL)
- outp = NULL;
- else
- outp = videobuf_to_vmalloc(&buf->vb);
- }
-
- if (buf != NULL) {
- if (p[2] & 1)
- buf->top_field = 0;
- else
- buf->top_field = 1;
- }
+ }
- dma_q->pos = 0;
- }
- if (buf != NULL) {
- if (p[0] != 0x88 && p[0] != 0x22) {
- em28xx_isocdbg("frame is not complete\n");
- len += 4;
- } else {
- p += 4;
- }
- em28xx_copy_video(dev, dma_q, buf, p, outp, len);
- }
+ if (dev->capture_type == 2) {
+ buf = finish_field_prepare_next(dev, buf, dma_q);
+ dev->usb_ctl.vid_buf = buf;
+ dev->capture_type = 3;
}
- return rc;
+
+ if (dev->capture_type == 3 && buf != NULL && data_len > 0)
+ em28xx_copy_video(dev, buf, data_pkt, data_len);
}
-/* Version of isoc handler that takes into account a mixture of video and
- VBI data */
-static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
+/* Processes and copies the URB data content (video and VBI data) */
+static inline int em28xx_urb_data_copy(struct em28xx *dev, struct urb *urb)
{
- struct em28xx_buffer *buf, *vbi_buf;
- struct em28xx_dmaqueue *dma_q = &dev->vidq;
- struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
- unsigned char *outp = NULL;
- unsigned char *vbioutp = NULL;
- int i, len = 0, rc = 1;
- unsigned char *p;
- int vbi_size;
+ int xfer_bulk, num_packets, i;
+ unsigned char *usb_data_pkt;
+ unsigned int usb_data_len;
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->disconnected)
return 0;
- if (urb->status < 0) {
+ if (urb->status < 0)
print_err_status(dev, -1, urb->status);
- if (urb->status == -ENOENT)
- return 0;
- }
- buf = dev->isoc_ctl.vid_buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
+ xfer_bulk = usb_pipebulk(urb->pipe);
- vbi_buf = dev->isoc_ctl.vbi_buf;
- if (vbi_buf != NULL)
- vbioutp = videobuf_to_vmalloc(&vbi_buf->vb);
+ if (xfer_bulk) /* bulk */
+ num_packets = 1;
+ else /* isoc */
+ num_packets = urb->number_of_packets;
- for (i = 0; i < urb->number_of_packets; i++) {
- int status = urb->iso_frame_desc[i].status;
+ for (i = 0; i < num_packets; i++) {
+ if (xfer_bulk) { /* bulk */
+ usb_data_len = urb->actual_length;
- if (status < 0) {
- print_err_status(dev, i, status);
- if (urb->iso_frame_desc[i].status != -EPROTO)
+ usb_data_pkt = urb->transfer_buffer;
+ } else { /* isoc */
+ if (urb->iso_frame_desc[i].status < 0) {
+ print_err_status(dev, i,
+ urb->iso_frame_desc[i].status);
+ if (urb->iso_frame_desc[i].status != -EPROTO)
+ continue;
+ }
+
+ usb_data_len = urb->iso_frame_desc[i].actual_length;
+ if (usb_data_len > dev->max_pkt_size) {
+ em28xx_isocdbg("packet bigger than packet size");
continue;
- }
+ }
- len = urb->iso_frame_desc[i].actual_length;
- if (urb->iso_frame_desc[i].actual_length <= 0) {
- /* em28xx_isocdbg("packet %d is empty",i); - spammy */
- continue;
- }
- if (urb->iso_frame_desc[i].actual_length >
- dev->max_pkt_size) {
- em28xx_isocdbg("packet bigger than packet size");
- continue;
+ usb_data_pkt = urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset;
}
- p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
-
- /* capture type 0 = vbi start
- capture type 1 = video start
- capture type 2 = video in progress */
- if (p[0] == 0x33 && p[1] == 0x95) {
- dev->capture_type = 0;
- dev->vbi_read = 0;
- em28xx_isocdbg("VBI START HEADER!!!\n");
- dev->cur_field = p[2];
- p += 4;
- len -= 4;
- } else if (p[0] == 0x88 && p[1] == 0x88 &&
- p[2] == 0x88 && p[3] == 0x88) {
- /* continuation */
- p += 4;
- len -= 4;
- } else if (p[0] == 0x22 && p[1] == 0x5a) {
- /* start video */
- p += 4;
- len -= 4;
+ if (usb_data_len == 0) {
+ /* NOTE: happens very often with isoc transfers */
+ /* em28xx_usbdbg("packet %d is empty",i); - spammy */
+ continue;
}
- vbi_size = dev->vbi_width * dev->vbi_height;
-
- if (dev->capture_type == 0) {
- if (dev->vbi_read >= vbi_size) {
- /* We've already read all the VBI data, so
- treat the rest as video */
- em28xx_isocdbg("dev->vbi_read > vbi_size\n");
- } else if ((dev->vbi_read + len) < vbi_size) {
- /* This entire frame is VBI data */
- if (dev->vbi_read == 0 &&
- (!(dev->cur_field & 1))) {
- /* Brand new frame */
- if (vbi_buf != NULL)
- vbi_buffer_filled(dev,
- vbi_dma_q,
- vbi_buf);
- vbi_get_next_buf(vbi_dma_q, &vbi_buf);
- if (vbi_buf == NULL)
- vbioutp = NULL;
- else
- vbioutp = videobuf_to_vmalloc(
- &vbi_buf->vb);
- }
-
- if (dev->vbi_read == 0) {
- vbi_dma_q->pos = 0;
- if (vbi_buf != NULL) {
- if (dev->cur_field & 1)
- vbi_buf->top_field = 0;
- else
- vbi_buf->top_field = 1;
- }
- }
-
- dev->vbi_read += len;
- em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p,
- vbioutp, len);
- } else {
- /* Some of this frame is VBI data and some is
- video data */
- int vbi_data_len = vbi_size - dev->vbi_read;
- dev->vbi_read += vbi_data_len;
- em28xx_copy_vbi(dev, vbi_dma_q, vbi_buf, p,
- vbioutp, vbi_data_len);
- dev->capture_type = 1;
- p += vbi_data_len;
- len -= vbi_data_len;
- }
- }
+ process_frame_data_em28xx(dev, usb_data_pkt, usb_data_len);
+ }
+ return 1;
+}
- if (dev->capture_type == 1) {
- dev->capture_type = 2;
- if (dev->progressive || !(dev->cur_field & 1)) {
- if (buf != NULL)
- buffer_filled(dev, dma_q, buf);
- get_next_buf(dma_q, &buf);
- if (buf == NULL)
- outp = NULL;
- else
- outp = videobuf_to_vmalloc(&buf->vb);
- }
- if (buf != NULL) {
- if (dev->cur_field & 1)
- buf->top_field = 0;
- else
- buf->top_field = 1;
- }
- dma_q->pos = 0;
- }
+static int get_ressource(enum v4l2_buf_type f_type)
+{
+ switch (f_type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return EM28XX_RESOURCE_VIDEO;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ return EM28XX_RESOURCE_VBI;
+ default:
+ BUG();
+ return 0;
+ }
+}
- if (buf != NULL && dev->capture_type == 2) {
- if (len >= 4 && p[0] == 0x88 && p[1] == 0x88 &&
- p[2] == 0x88 && p[3] == 0x88) {
- p += 4;
- len -= 4;
- }
- if (len >= 4 && p[0] == 0x22 && p[1] == 0x5a) {
- em28xx_isocdbg("Video frame %d, len=%i, %s\n",
- p[2], len, (p[2] & 1) ?
- "odd" : "even");
- p += 4;
- len -= 4;
- }
+/* Usage lock check functions */
+static int res_get(struct em28xx *dev, enum v4l2_buf_type f_type)
+{
+ int res_type = get_ressource(f_type);
- if (len > 0)
- em28xx_copy_video(dev, dma_q, buf, p, outp,
- len);
- }
+ /* is it free? */
+ if (dev->resources & res_type) {
+ /* no, someone else uses it */
+ return -EBUSY;
}
- return rc;
+
+ /* it's free, grab it */
+ dev->resources |= res_type;
+ em28xx_videodbg("res: get %d\n", res_type);
+ return 0;
}
+static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type)
+{
+ int res_type = get_ressource(f_type);
+
+ dev->resources &= ~res_type;
+ em28xx_videodbg("res: put %d\n", res_type);
+}
/* ------------------------------------------------------------------
- Videobuf operations
+ Videobuf2 operations
------------------------------------------------------------------*/
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- struct v4l2_frequency f;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ unsigned long size;
- *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)
- >> 3;
-
- if (0 == *count)
- *count = EM28XX_DEF_BUF;
+ if (fmt)
+ size = fmt->fmt.pix.sizeimage;
+ else
+ size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- if (*count < EM28XX_MIN_BUF)
- *count = EM28XX_MIN_BUF;
+ if (size == 0)
+ return -EINVAL;
- /* Ask tuner to go to analog or radio mode */
- memset(&f, 0, sizeof(f));
- f.frequency = dev->ctl_freq;
- f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (0 == *nbuffers)
+ *nbuffers = 32;
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+ *nplanes = 1;
+ sizes[0] = size;
return 0;
}
-/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct em28xx_buffer *buf)
+static int
+buffer_prepare(struct vb2_buffer *vb)
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- unsigned long flags = 0;
- if (in_interrupt())
- BUG();
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+ unsigned long size;
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
+ em28xx_videodbg("%s, field=%d\n", __func__, vb->v4l2_buf.field);
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->slock, flags);
- if (dev->isoc_ctl.vid_buf == buf)
- dev->isoc_ctl.vid_buf = NULL;
- spin_unlock_irqrestore(&dev->slock, flags);
+ size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ if (vb2_plane_size(vb, 0) < size) {
+ em28xx_videodbg("%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(&buf->vb, 0, size);
+
+ return 0;
}
-static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
- struct em28xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ struct v4l2_frequency f;
+ int rc = 0;
- buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth
- + 7) >> 3;
+ em28xx_videodbg("%s\n", __func__);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
+ /* Make sure streaming is not already in progress for this type
+ of filehandle (e.g. video, vbi) */
+ rc = res_get(dev, vq->type);
+ if (rc)
+ return rc;
+
+ if (dev->streaming_users++ == 0) {
+ /* First active streaming user, so allocate all the URBs */
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
+ /* Allocate the USB bandwidth */
+ em28xx_set_alternate(dev);
+
+ /* Needed, since GPIO might have disabled power of
+ some i2c device
+ */
+ em28xx_wake_i2c(dev);
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
+ dev->capture_type = -1;
+ rc = em28xx_init_usb_xfer(dev, EM28XX_ANALOG_MODE,
+ dev->analog_xfer_bulk,
+ EM28XX_NUM_BUFS,
+ dev->max_pkt_size,
+ dev->packet_multiplier,
+ em28xx_urb_data_copy);
if (rc < 0)
goto fail;
- }
- if (!dev->isoc_ctl.analog_bufs.num_bufs)
- urb_init = 1;
+ /*
+ * djh: it's not clear whether this code is still needed. I'm
+ * leaving it in here for now entirely out of concern for
+ * backward compatibility (the old code did it)
+ */
- if (urb_init) {
- if (em28xx_vbi_supported(dev) == 1)
- rc = em28xx_init_isoc(dev, EM28XX_ANALOG_MODE,
- EM28XX_NUM_PACKETS,
- EM28XX_NUM_BUFS,
- dev->max_pkt_size,
- em28xx_isoc_copy_vbi);
+ /* Ask tuner to go to analog or radio mode */
+ memset(&f, 0, sizeof(f));
+ f.frequency = dev->ctl_freq;
+ if (vq->owner && vq->owner->vdev->vfl_type == VFL_TYPE_RADIO)
+ f.type = V4L2_TUNER_RADIO;
else
- rc = em28xx_init_isoc(dev, EM28XX_ANALOG_MODE,
- EM28XX_NUM_PACKETS,
- EM28XX_NUM_BUFS,
- dev->max_pkt_size,
- em28xx_isoc_copy);
- if (rc < 0)
- goto fail;
+ f.type = V4L2_TUNER_ANALOG_TV;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
}
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
-
fail:
- free_buffer(vq, buf);
return rc;
}
-static void
-buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static int em28xx_stop_streaming(struct vb2_queue *vq)
{
- struct em28xx_buffer *buf = container_of(vb,
- struct em28xx_buffer,
- vb);
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = fh->dev;
- struct em28xx_dmaqueue *vidq = &dev->vidq;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ struct em28xx_dmaqueue *vidq = &dev->vidq;
+ unsigned long flags = 0;
+
+ em28xx_videodbg("%s\n", __func__);
+
+ res_free(dev, vq->type);
+
+ if (dev->streaming_users-- == 1) {
+ /* Last active user, so shutdown all the URBS */
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
+ }
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&vidq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ dev->usb_ctl.vid_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return 0;
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+int em28xx_stop_vbi_streaming(struct vb2_queue *vq)
{
- struct em28xx_buffer *buf = container_of(vb,
- struct em28xx_buffer,
- vb);
- struct em28xx_fh *fh = vq->priv_data;
- struct em28xx *dev = (struct em28xx *)fh->dev;
+ struct em28xx *dev = vb2_get_drv_priv(vq);
+ struct em28xx_dmaqueue *vbiq = &dev->vbiq;
+ unsigned long flags = 0;
+
+ em28xx_videodbg("%s\n", __func__);
- em28xx_isocdbg("em28xx: called buffer_release\n");
+ res_free(dev, vq->type);
- free_buffer(vq, buf);
+ if (dev->streaming_users-- == 1) {
+ /* Last active user, so shutdown all the URBS */
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
+ }
+
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&vbiq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ dev->usb_ctl.vbi_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ return 0;
}
-static struct videobuf_queue_ops em28xx_video_qops = {
- .buf_setup = buffer_setup,
+static void
+buffer_queue(struct vb2_buffer *vb)
+{
+ struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+ struct em28xx_dmaqueue *vidq = &dev->vidq;
+ unsigned long flags = 0;
+
+ em28xx_videodbg("%s\n", __func__);
+ buf->mem = vb2_plane_vaddr(vb, 0);
+ buf->length = vb2_plane_size(vb, 0);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static struct vb2_ops em28xx_video_qops = {
+ .queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .start_streaming = em28xx_start_analog_streaming,
+ .stop_streaming = em28xx_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
+int em28xx_vb2_setup(struct em28xx *dev)
+{
+ int rc;
+ struct vb2_queue *q;
+
+ /* Setup Videobuf2 for Video capture */
+ q = &dev->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct em28xx_buffer);
+ q->ops = &em28xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+
+ rc = vb2_queue_init(q);
+ if (rc < 0)
+ return rc;
+
+ /* Setup Videobuf2 for VBI capture */
+ q = &dev->vb_vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct em28xx_buffer);
+ q->ops = &em28xx_vbi_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+
+ rc = vb2_queue_init(q);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
/********************* v4l2 interface **************************************/
static void video_mux(struct em28xx *dev, int index)
@@ -856,143 +757,54 @@ static void video_mux(struct em28xx *dev, int index)
em28xx_audio_analog_set(dev);
}
-/* Usage lock check functions */
-static int res_get(struct em28xx_fh *fh, unsigned int bit)
-{
- struct em28xx *dev = fh->dev;
-
- if (fh->resources & bit)
- /* have it already allocated */
- return 1;
-
- /* is it free? */
- if (dev->resources & bit) {
- /* no, someone else uses it */
- return 0;
- }
- /* it's free, grab it */
- fh->resources |= bit;
- dev->resources |= bit;
- em28xx_videodbg("res: get %d\n", bit);
- return 1;
-}
-
-static int res_check(struct em28xx_fh *fh, unsigned int bit)
-{
- return fh->resources & bit;
-}
-
-static int res_locked(struct em28xx *dev, unsigned int bit)
-{
- return dev->resources & bit;
-}
-
-static void res_free(struct em28xx_fh *fh, unsigned int bits)
+void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
{
- struct em28xx *dev = fh->dev;
-
- BUG_ON((fh->resources & bits) != bits);
-
- fh->resources &= ~bits;
- dev->resources &= ~bits;
- em28xx_videodbg("res: put %d\n", bits);
-}
-
-static int get_ressource(struct em28xx_fh *fh)
-{
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return EM28XX_RESOURCE_VIDEO;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return EM28XX_RESOURCE_VBI;
- default:
- BUG();
- return 0;
- }
-}
-
-/*
- * ac97_queryctrl()
- * return the ac97 supported controls
- */
-static int ac97_queryctrl(struct v4l2_queryctrl *qc)
-{
- int i;
+ struct em28xx *dev = priv;
- for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) {
- if (qc->id && qc->id == ac97_qctrl[i].id) {
- memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc));
- return 0;
- }
- }
-
- /* Control is not ac97 related */
- return 1;
-}
-
-/*
- * ac97_get_ctrl()
- * return the current values for ac97 mute and volume
- */
-static int ac97_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl)
-{
+ /*
+ * In the case of non-AC97 volume controls, we still need
+ * to do some setups at em28xx, in order to mute/unmute
+ * and to adjust audio volume. However, the value ranges
+ * should be checked by the corresponding V4L subdriver.
+ */
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- ctrl->value = dev->mute;
- return 0;
+ dev->mute = ctrl->val;
+ em28xx_audio_analog_set(dev);
+ break;
case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = dev->volume;
- return 0;
- default:
- /* Control is not ac97 related */
- return 1;
+ dev->volume = ctrl->val;
+ em28xx_audio_analog_set(dev);
+ break;
}
}
-/*
- * ac97_set_ctrl()
- * set values for ac97 mute and volume
- */
-static int ac97_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl)
+static int em28xx_s_ctrl(struct v4l2_ctrl *ctrl)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++)
- if (ctrl->id == ac97_qctrl[i].id)
- goto handle;
-
- /* Announce that hasn't handle it */
- return 1;
-
-handle:
- if (ctrl->value < ac97_qctrl[i].minimum ||
- ctrl->value > ac97_qctrl[i].maximum)
- return -ERANGE;
+ struct em28xx *dev = container_of(ctrl->handler, struct em28xx, ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- dev->mute = ctrl->value;
+ dev->mute = ctrl->val;
break;
case V4L2_CID_AUDIO_VOLUME:
- dev->volume = ctrl->value;
+ dev->volume = ctrl->val;
break;
}
return em28xx_audio_analog_set(dev);
}
+const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
+ .s_ctrl = em28xx_s_ctrl,
+};
+
static int check_dev(struct em28xx *dev)
{
- if (dev->state & DEV_DISCONNECTED) {
+ if (dev->disconnected) {
em28xx_errdev("v4l2 ioctl: device not present\n");
return -ENODEV;
}
-
- if (dev->state & DEV_MISCONFIGURED) {
- em28xx_errdev("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
- return -EIO;
- }
return 0;
}
@@ -1072,8 +884,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
/* the em2800 can only scale down to 50% */
height = height > (3 * maxh / 4) ? maxh : maxh / 2;
width = width > (3 * maxw / 4) ? maxw : maxw / 2;
- /* MaxPacketSize for em2800 is too small to capture at full resolution
- * use half of maxw as the scaler can only scale to 50% */
+ /*
+ * MaxPacketSize for em2800 is too small to capture at full
+ * resolution use half of maxw as the scaler can only scale
+ * to 50%
+ */
if (width == maxw && height == maxh)
width /= 2;
} else {
@@ -1091,7 +906,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width = width;
f->fmt.pix.height = height;
f->fmt.pix.pixelformat = fmt->fourcc;
- f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3;
+ f->fmt.pix.bytesperline = (width * fmt->depth + 7) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
if (dev->progressive)
@@ -1119,7 +934,6 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
/* set new image size */
get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
- em28xx_set_alternate(dev);
em28xx_resolution_set(dev);
return 0;
@@ -1128,21 +942,13 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
+ struct em28xx *dev = video_drvdata(file);
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
+ if (dev->streaming_users > 0)
+ return -EBUSY;
vidioc_try_fmt_vid_cap(file, priv, f);
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- em28xx_errdev("%s queue busy\n", __func__);
- return -EBUSY;
- }
-
return em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
f->fmt.pix.width, f->fmt.pix.height);
}
@@ -1153,6 +959,8 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
struct em28xx *dev = fh->dev;
int rc;
+ if (dev->board.is_webcam)
+ return -ENOTTY;
rc = check_dev(dev);
if (rc < 0)
return rc;
@@ -1168,6 +976,8 @@ static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *norm)
struct em28xx *dev = fh->dev;
int rc;
+ if (dev->board.is_webcam)
+ return -ENOTTY;
rc = check_dev(dev);
if (rc < 0)
return rc;
@@ -1184,15 +994,22 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
struct v4l2_format f;
int rc;
+ if (dev->board.is_webcam)
+ return -ENOTTY;
+ if (*norm == dev->norm)
+ return 0;
rc = check_dev(dev);
if (rc < 0)
return rc;
+ if (dev->streaming_users > 0)
+ return -EBUSY;
+
dev->norm = *norm;
/* Adjusts width/height, if needed */
- f.fmt.pix.width = dev->width;
- f.fmt.pix.height = dev->height;
+ f.fmt.pix.width = 720;
+ f.fmt.pix.height = (*norm & V4L2_STD_525_60) ? 480 : 576;
vidioc_try_fmt_vid_cap(file, priv, &f);
/* set new image size */
@@ -1216,6 +1033,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ p->parm.capture.readbuffers = EM28XX_MIN_BUF;
if (dev->board.is_webcam)
rc = v4l2_device_call_until_err(&dev->v4l2_dev, 0,
video, g_parm, p);
@@ -1233,11 +1051,12 @@ static int vidioc_s_parm(struct file *file, void *priv,
struct em28xx *dev = fh->dev;
if (!dev->board.is_webcam)
- return -EINVAL;
+ return -ENOTTY;
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ p->parm.capture.readbuffers = EM28XX_MIN_BUF;
return v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, s_parm, p);
}
@@ -1276,6 +1095,9 @@ static int vidioc_enum_input(struct file *file, void *priv,
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = dev->vdev->tvnorms;
+ /* webcams do not have the STD API */
+ if (dev->board.is_webcam)
+ i->capabilities = 0;
return 0;
}
@@ -1375,131 +1197,6 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int id = qc->id;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- memset(qc, 0, sizeof(*qc));
-
- qc->id = id;
-
- /* enumerate AC97 controls */
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
- rc = ac97_queryctrl(qc);
- if (!rc)
- return 0;
- }
-
- /* enumerate V4L2 device controls */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, qc);
-
- if (qc->type)
- return 0;
- else
- return -EINVAL;
-}
-
-/*
- * FIXME: This is an indirect way to check if a control exists at a
- * subdev. Instead of that hack, maybe the better would be to change all
- * subdevs to return -ENOIOCTLCMD, if an ioctl is not supported.
- */
-static int check_subdev_ctrl(struct em28xx *dev, int id)
-{
- struct v4l2_queryctrl qc;
-
- memset(&qc, 0, sizeof(qc));
- qc.id = id;
-
- /* enumerate V4L2 device controls */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, &qc);
-
- if (qc.type)
- return 0;
- else
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
- rc = 0;
-
- /* Set an AC97 control */
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
- rc = ac97_get_ctrl(dev, ctrl);
- else
- rc = 1;
-
- /* It were not an AC97 control. Sends it to the v4l2 dev interface */
- if (rc == 1) {
- if (check_subdev_ctrl(dev, ctrl->id))
- return -EINVAL;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_ctrl, ctrl);
- rc = 0;
- }
-
- return rc;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- /* Set an AC97 control */
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
- rc = ac97_set_ctrl(dev, ctrl);
- else
- rc = 1;
-
- /* It isn't an AC97 control. Sends it to the v4l2 dev interface */
- if (rc == 1) {
- rc = check_subdev_ctrl(dev, ctrl->id);
- if (!rc)
- v4l2_device_call_all(&dev->v4l2_dev, 0,
- core, s_ctrl, ctrl);
- /*
- * In the case of non-AC97 volume controls, we still need
- * to do some setups at em28xx, in order to mute/unmute
- * and to adjust audio volume. However, the value ranges
- * should be checked by the corresponding V4L subdriver.
- */
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- dev->mute = ctrl->value;
- rc = em28xx_audio_analog_set(dev);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- dev->volume = ctrl->value;
- rc = em28xx_audio_analog_set(dev);
- }
- }
- return (rc < 0) ? rc : 0;
-}
-
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -1515,7 +1212,6 @@ static int vidioc_g_tuner(struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Tuner");
- t->type = V4L2_TUNER_ANALOG_TV;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
return 0;
@@ -1545,7 +1241,9 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (0 != f->tuner)
+ return -EINVAL;
+
f->frequency = dev->ctl_freq;
return 0;
}
@@ -1564,13 +1262,9 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (0 != f->tuner)
return -EINVAL;
- if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV))
- return -EINVAL;
- if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
- return -EINVAL;
-
- dev->ctl_freq = f->frequency;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f);
+ dev->ctl_freq = f->frequency;
return 0;
}
@@ -1596,6 +1290,14 @@ static int vidioc_g_chip_ident(struct file *file, void *priv,
chip->ident = V4L2_IDENT_NONE;
chip->revision = 0;
+ if (chip->match.type == V4L2_CHIP_MATCH_HOST) {
+ if (v4l2_chip_match_host(&chip->match))
+ chip->ident = V4L2_IDENT_NONE;
+ return 0;
+ }
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
v4l2_device_call_all(&dev->v4l2_dev, 0, core, g_chip_ident, chip);
@@ -1704,72 +1406,10 @@ static int vidioc_cropcap(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc = -EINVAL;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (unlikely(type != fh->type))
- return -EINVAL;
-
- em28xx_videodbg("vidioc_streamon fh=%p t=%d fh->res=%d dev->res=%d\n",
- fh, type, fh->resources, dev->resources);
-
- if (unlikely(!res_get(fh, get_ressource(fh))))
- return -EBUSY;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_streamon(&fh->vb_vidq);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_streamon(&fh->vb_vbiq);
-
- return rc;
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- if (type != fh->type)
- return -EINVAL;
-
- em28xx_videodbg("vidioc_streamoff fh=%p t=%d fh->res=%d dev->res=%d\n",
- fh, type, fh->resources, dev->resources);
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (res_check(fh, EM28XX_RESOURCE_VIDEO)) {
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh, EM28XX_RESOURCE_VIDEO);
- }
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (res_check(fh, EM28XX_RESOURCE_VBI)) {
- videobuf_streamoff(&fh->vb_vbiq);
- res_free(fh, EM28XX_RESOURCE_VBI);
- }
- }
-
- return 0;
-}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
+ struct video_device *vdev = video_devdata(file);
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
@@ -1777,20 +1417,26 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->capabilities =
- V4L2_CAP_SLICED_VBI_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
-
- if (dev->vbi_dev)
- cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ cap->device_caps = V4L2_CAP_READWRITE |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else if (vdev->vfl_type == VFL_TYPE_RADIO)
+ cap->device_caps = V4L2_CAP_RADIO;
+ else
+ cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE;
if (dev->audio_mode.has_audio)
- cap->capabilities |= V4L2_CAP_AUDIO;
+ cap->device_caps |= V4L2_CAP_AUDIO;
if (dev->tuner_type != TUNER_ABSENT)
- cap->capabilities |= V4L2_CAP_TUNER;
+ cap->device_caps |= V4L2_CAP_TUNER;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
+ V4L2_CAP_READWRITE | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ if (dev->vbi_dev)
+ cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
+ if (dev->radio_dev)
+ cap->capabilities |= V4L2_CAP_RADIO;
return 0;
}
@@ -1845,46 +1491,6 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return 0;
}
-/* Sliced VBI ioctls */
-static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- f->fmt.sliced.service_set = 0;
- v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
-
- if (f->fmt.sliced.service_set == 0)
- rc = -EINVAL;
-
- return rc;
-}
-
-static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
-
- if (f->fmt.sliced.service_set == 0)
- return -EINVAL;
-
- return 0;
-}
-
/* RAW VBI ioctls */
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
@@ -1900,6 +1506,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
format->fmt.vbi.sampling_rate = 6750000 * 4 / 2;
format->fmt.vbi.count[0] = dev->vbi_height;
format->fmt.vbi.count[1] = dev->vbi_height;
+ memset(format->fmt.vbi.reserved, 0, sizeof(format->fmt.vbi.reserved));
/* Varies by video standard (NTSC, PAL, etc.) */
if (dev->norm & V4L2_STD_525_60) {
@@ -1928,6 +1535,7 @@ static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
format->fmt.vbi.sampling_rate = 6750000 * 4 / 2;
format->fmt.vbi.count[0] = dev->vbi_height;
format->fmt.vbi.count[1] = dev->vbi_height;
+ memset(format->fmt.vbi.reserved, 0, sizeof(format->fmt.vbi.reserved));
/* Varies by video standard (NTSC, PAL, etc.) */
if (dev->norm & V4L2_STD_525_60) {
@@ -1943,100 +1551,10 @@ static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_reqbufs(&fh->vb_vidq, rb);
- else
- return videobuf_reqbufs(&fh->vb_vbiq, rb);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *b)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_querybuf(&fh->vb_vidq, b);
- else {
- /* FIXME: I'm not sure yet whether this is a bug in zvbi or
- the videobuf framework, but we probably shouldn't be
- returning a buffer larger than that which was asked for.
- At a minimum, it causes a crash in zvbi since it does
- a memcpy based on the source buffer length */
- int result = videobuf_querybuf(&fh->vb_vbiq, b);
- b->length = dev->vbi_width * dev->vbi_height * 2;
-
- return result;
- }
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_qbuf(&fh->vb_vidq, b);
- else
- return videobuf_qbuf(&fh->vb_vbiq, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct em28xx_fh *fh = priv;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags &
- O_NONBLOCK);
- else
- return videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags &
- O_NONBLOCK);
-}
-
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
-static int radio_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct em28xx *dev = ((struct em28xx_fh *)priv)->dev;
-
- strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
- strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
- usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
-
- cap->capabilities = V4L2_CAP_TUNER;
- return 0;
-}
-
static int radio_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -2053,26 +1571,6 @@ static int radio_g_tuner(struct file *file, void *priv,
return 0;
}
-static int radio_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
- strcpy(i->name, "Radio");
- i->type = V4L2_INPUT_TYPE_TUNER;
-
- return 0;
-}
-
-static int radio_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
-{
- if (unlikely(a->index))
- return -EINVAL;
-
- strcpy(a->name, "Radio");
- return 0;
-}
-
static int radio_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -2086,48 +1584,16 @@ static int radio_s_tuner(struct file *file, void *priv,
return 0;
}
-static int radio_s_audio(struct file *file, void *fh,
- const struct v4l2_audio *a)
-{
- return 0;
-}
-
-static int radio_s_input(struct file *file, void *fh, unsigned int i)
-{
- return 0;
-}
-
-static int radio_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- int i;
-
- if (qc->id < V4L2_CID_BASE ||
- qc->id >= V4L2_CID_LASTP1)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ac97_qctrl); i++) {
- if (qc->id && qc->id == ac97_qctrl[i].id) {
- memcpy(qc, &(ac97_qctrl[i]), sizeof(*qc));
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
/*
* em28xx_v4l2_open()
* inits the device and starts isoc transfer
*/
static int em28xx_v4l2_open(struct file *filp)
{
- int errCode = 0, radio = 0;
struct video_device *vdev = video_devdata(filp);
struct em28xx *dev = video_drvdata(filp);
enum v4l2_buf_type fh_type = 0;
struct em28xx_fh *fh;
- enum v4l2_field field;
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
@@ -2136,9 +1602,6 @@ static int em28xx_v4l2_open(struct file *filp)
case VFL_TYPE_VBI:
fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
}
em28xx_videodbg("open dev=%s type=%s users=%d\n",
@@ -2154,14 +1617,13 @@ static int em28xx_v4l2_open(struct file *filp)
mutex_unlock(&dev->lock);
return -ENOMEM;
}
+ v4l2_fh_init(&fh->fh, vdev);
fh->dev = dev;
- fh->radio = radio;
fh->type = fh_type;
filp->private_data = fh;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
- em28xx_set_alternate(dev);
em28xx_resolution_set(dev);
/* Needed, since GPIO might have disabled power of
@@ -2170,31 +1632,18 @@ static int em28xx_v4l2_open(struct file *filp)
em28xx_wake_i2c(dev);
}
- if (fh->radio) {
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO) {
em28xx_videodbg("video_open: setting radio device\n");
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio);
}
dev->users++;
- if (dev->progressive)
- field = V4L2_FIELD_NONE;
- else
- field = V4L2_FIELD_INTERLACED;
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops,
- NULL, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, field,
- sizeof(struct em28xx_buffer), fh, &dev->lock);
-
- videobuf_queue_vmalloc_init(&fh->vb_vbiq, &em28xx_vbi_qops,
- NULL, &dev->slock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct em28xx_buffer), fh, &dev->lock);
mutex_unlock(&dev->lock);
+ v4l2_fh_add(&fh->fh);
- return errCode;
+ return 0;
}
/*
@@ -2248,25 +1697,16 @@ static int em28xx_v4l2_close(struct file *filp)
em28xx_videodbg("users=%d\n", dev->users);
mutex_lock(&dev->lock);
- if (res_check(fh, EM28XX_RESOURCE_VIDEO)) {
- videobuf_stop(&fh->vb_vidq);
- res_free(fh, EM28XX_RESOURCE_VIDEO);
- }
-
- if (res_check(fh, EM28XX_RESOURCE_VBI)) {
- videobuf_stop(&fh->vb_vbiq);
- res_free(fh, EM28XX_RESOURCE_VBI);
- }
+ vb2_fop_release(filp);
if (dev->users == 1) {
/* the device is already disconnect,
free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
+ if (dev->disconnected) {
em28xx_release_resources(dev);
- kfree(dev->alt_max_pkt_size);
+ kfree(dev->alt_max_pkt_size_isoc);
mutex_unlock(&dev->lock);
kfree(dev);
- kfree(fh);
return 0;
}
@@ -2274,7 +1714,6 @@ static int em28xx_v4l2_close(struct file *filp)
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
/* do this before setting alternate! */
- em28xx_uninit_isoc(dev, EM28XX_ANALOG_MODE);
em28xx_set_mode(dev, EM28XX_SUSPEND);
/* set alternate 0 */
@@ -2287,129 +1726,18 @@ static int em28xx_v4l2_close(struct file *filp)
}
}
- videobuf_mmap_free(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vbiq);
- kfree(fh);
dev->users--;
mutex_unlock(&dev->lock);
return 0;
}
-/*
- * em28xx_v4l2_read()
- * will allocate buffers when called for the first time
- */
-static ssize_t
-em28xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
- loff_t *pos)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- /* FIXME: read() is not prepared to allow changing the video
- resolution while streaming. Seems a bug at em28xx_set_fmt
- */
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (res_locked(dev, EM28XX_RESOURCE_VIDEO))
- rc = -EBUSY;
- else
- rc = videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (!res_get(fh, EM28XX_RESOURCE_VBI))
- rc = -EBUSY;
- else
- rc = videobuf_read_stream(&fh->vb_vbiq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- }
- mutex_unlock(&dev->lock);
-
- return rc;
-}
-
-/*
- * em28xx_poll()
- * will allocate buffers when called for the first time
- */
-static unsigned int em28xx_poll(struct file *filp, poll_table *wait)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (!res_get(fh, EM28XX_RESOURCE_VIDEO))
- return POLLERR;
- return videobuf_poll_stream(filp, &fh->vb_vidq, wait);
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (!res_get(fh, EM28XX_RESOURCE_VBI))
- return POLLERR;
- return videobuf_poll_stream(filp, &fh->vb_vbiq, wait);
- } else {
- return POLLERR;
- }
-}
-
-static unsigned int em28xx_v4l2_poll(struct file *filp, poll_table *wait)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- unsigned int res;
-
- mutex_lock(&dev->lock);
- res = em28xx_poll(filp, wait);
- mutex_unlock(&dev->lock);
- return res;
-}
-
-/*
- * em28xx_v4l2_mmap()
- */
-static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct em28xx_fh *fh = filp->private_data;
- struct em28xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- rc = videobuf_mmap_mapper(&fh->vb_vbiq, vma);
- mutex_unlock(&dev->lock);
-
- em28xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
- rc);
-
- return rc;
-}
-
static const struct v4l2_file_operations em28xx_v4l_fops = {
.owner = THIS_MODULE,
.open = em28xx_v4l2_open,
.release = em28xx_v4l2_close,
- .read = em28xx_v4l2_read,
- .poll = em28xx_v4l2_poll,
- .mmap = em28xx_v4l2_mmap,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -2420,19 +1748,20 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_cropcap = vidioc_cropcap,
- .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap,
- .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap,
- .vidioc_s_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap,
-
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
.vidioc_g_std = vidioc_g_std,
.vidioc_querystd = vidioc_querystd,
.vidioc_s_std = vidioc_s_std,
@@ -2441,15 +1770,14 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
@@ -2459,11 +1787,10 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static const struct video_device em28xx_video_template = {
.fops = &em28xx_v4l_fops,
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = V4L2_STD_ALL,
- .current_norm = V4L2_STD_PAL,
};
static const struct v4l2_file_operations radio_fops = {
@@ -2474,18 +1801,13 @@ static const struct v4l2_file_operations radio_fops = {
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
- .vidioc_querycap = radio_querycap,
+ .vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = radio_g_tuner,
- .vidioc_enum_input = radio_enum_input,
- .vidioc_g_audio = radio_g_audio,
.vidioc_s_tuner = radio_s_tuner,
- .vidioc_s_audio = radio_s_audio,
- .vidioc_s_input = radio_s_input,
- .vidioc_queryctrl = radio_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
@@ -2514,9 +1836,11 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
- vfd->release = video_device_release;
vfd->debug = video_debug;
vfd->lock = &dev->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+ if (dev->board.is_webcam)
+ vfd->tvnorms = 0;
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
dev->name, type_name);
@@ -2527,7 +1851,7 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
int em28xx_register_analog_devices(struct em28xx *dev)
{
- u8 val;
+ u8 val;
int ret;
unsigned int maxw;
@@ -2535,7 +1859,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->name, EM28XX_VERSION);
/* set default norm */
- dev->norm = em28xx_video_template.current_norm;
+ dev->norm = V4L2_STD_PAL;
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
@@ -2543,10 +1867,10 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->format = &format[0];
maxw = norm_maxw(dev);
- /* MaxPacketSize for em2800 is too small to capture at full resolution
- * use half of maxw as the scaler can only scale to 50% */
- if (dev->board.is_em2800)
- maxw /= 2;
+ /* MaxPacketSize for em2800 is too small to capture at full resolution
+ * use half of maxw as the scaler can only scale to 50% */
+ if (dev->board.is_em2800)
+ maxw /= 2;
em28xx_set_video_format(dev, format[0].fourcc,
maxw, norm_maxh(dev));
@@ -2572,6 +1896,8 @@ int em28xx_register_analog_devices(struct em28xx *dev)
em28xx_errdev("cannot allocate video_device.\n");
return -ENODEV;
}
+ dev->vdev->queue = &dev->vb_vidq;
+ dev->vdev->queue->lock = &dev->vb_queue_lock;
/* register v4l2 video video_device */
ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
@@ -2587,6 +1913,9 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->vbi_dev = em28xx_vdev_init(dev, &em28xx_video_template,
"vbi");
+ dev->vbi_dev->queue = &dev->vb_vbiq;
+ dev->vbi_dev->queue->lock = &dev->vb_vbi_queue_lock;
+
/* register v4l2 vbi video_device */
ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 86e90d86da6d..5f0b2c59e846 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -4,6 +4,7 @@
Copyright (C) 2005 Markus Rechberger <mrechberger@gmail.com>
Ludovico Cavedon <cavedon@sssup.it>
Mauro Carvalho Chehab <mchehab@infradead.org>
+ Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
Based on the em2800 driver from Sascha Sommer <saschasommer@freenet.de>
@@ -30,13 +31,12 @@
#include <linux/mutex.h>
#include <linux/videodev2.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
#include <media/ir-kbd-i2c.h>
#include <media/rc-core.h>
-#if defined(CONFIG_VIDEO_EM28XX_DVB) || defined(CONFIG_VIDEO_EM28XX_DVB_MODULE)
-#include <media/videobuf-dvb.h>
-#endif
#include "tuner-xc2028.h"
#include "xc5000.h"
#include "em28xx-reg.h"
@@ -157,12 +157,18 @@
#define EM28XX_NUM_BUFS 5
#define EM28XX_DVB_NUM_BUFS 5
-/* number of packets for each buffer
+/* isoc transfers: number of packets for each buffer
windows requests only 64 packets .. so we better do the same
this is what I found out for all alternate numbers there!
*/
-#define EM28XX_NUM_PACKETS 64
-#define EM28XX_DVB_MAX_PACKETS 64
+#define EM28XX_NUM_ISOC_PACKETS 64
+#define EM28XX_DVB_NUM_ISOC_PACKETS 64
+
+/* bulk transfers: transfer buffer size = packet size * packet multiplier
+ USB 2.0 spec says bulk packet size is always 512 bytes
+ */
+#define EM28XX_BULK_PACKET_MULTIPLIER 384
+#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
#define EM28XX_INTERLACED_DEFAULT 1
@@ -187,12 +193,8 @@
Interval: 125us
*/
-/* time to wait when stopping the isoc transfer */
-#define EM28XX_URB_TIMEOUT \
- msecs_to_jiffies(EM28XX_NUM_BUFS * EM28XX_NUM_PACKETS)
-
/* time in msecs to wait for i2c writes to finish */
-#define EM2800_I2C_WRITE_TIMEOUT 20
+#define EM2800_I2C_XFER_TIMEOUT 20
enum em28xx_mode {
EM28XX_SUSPEND,
@@ -203,7 +205,7 @@ enum em28xx_mode {
struct em28xx;
-struct em28xx_usb_isoc_bufs {
+struct em28xx_usb_bufs {
/* max packet size of isoc transaction */
int max_pkt_size;
@@ -213,26 +215,26 @@ struct em28xx_usb_isoc_bufs {
/* number of allocated urbs */
int num_bufs;
- /* urb for isoc transfers */
+ /* urb for isoc/bulk transfers */
struct urb **urb;
- /* transfer buffers for isoc transfer */
+ /* transfer buffers for isoc/bulk transfer */
char **transfer_buffer;
};
-struct em28xx_usb_isoc_ctl {
- /* isoc transfer buffers for analog mode */
- struct em28xx_usb_isoc_bufs analog_bufs;
+struct em28xx_usb_ctl {
+ /* isoc/bulk transfer buffers for analog mode */
+ struct em28xx_usb_bufs analog_bufs;
- /* isoc transfer buffers for digital mode */
- struct em28xx_usb_isoc_bufs digital_bufs;
+ /* isoc/bulk transfer buffers for digital mode */
+ struct em28xx_usb_bufs digital_bufs;
/* Stores already requested buffers */
struct em28xx_buffer *vid_buf;
struct em28xx_buffer *vbi_buf;
- /* isoc urb callback */
- int (*isoc_copy) (struct em28xx *dev, struct urb *urb);
+ /* copy data from URB */
+ int (*urb_data_copy) (struct em28xx *dev, struct urb *urb);
};
@@ -247,19 +249,26 @@ struct em28xx_fmt {
/* buffer for one video frame */
struct em28xx_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_buffer vb;
+ struct list_head list;
- struct list_head frame;
+ void *mem;
+ unsigned int length;
int top_field;
+
+ /* counter to control buffer fill */
+ unsigned int pos;
+ /* NOTE; in interlaced mode, this value is reset to zero at
+ * the start of each new field (not frame !) */
+
+ /* pointer to vmalloc memory address in vb */
+ char *vb_buf;
};
struct em28xx_dmaqueue {
struct list_head active;
wait_queue_head_t wq;
-
- /* Counters to control buffer fill */
- int pos;
};
/* inputs */
@@ -430,13 +439,6 @@ struct em28xx_eeprom {
u8 string_idx_table;
};
-/* device states */
-enum em28xx_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04,
-};
-
#define EM28XX_AUDIO_BUFS 5
#define EM28XX_NUM_AUDIO_PACKETS 64
#define EM28XX_AUDIO_MAX_PACKET_SIZE 196 /* static value */
@@ -469,12 +471,8 @@ struct em28xx_audio {
struct em28xx;
struct em28xx_fh {
+ struct v4l2_fh fh;
struct em28xx *dev;
- int radio;
- unsigned int resources;
-
- struct videobuf_queue vb_vidq;
- struct videobuf_queue vb_vbiq;
enum v4l2_buf_type type;
};
@@ -487,9 +485,14 @@ struct em28xx {
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
+ unsigned char disconnected:1; /* device has been diconnected */
+
int audio_ifnum;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* provides ac97 mute and volume overrides */
+ struct v4l2_ctrl_handler ac97_ctrl_handler;
struct em28xx_board board;
/* Webcam specific fields */
@@ -497,7 +500,7 @@ struct em28xx {
int sensor_xres, sensor_yres;
int sensor_xtal;
- /* Allows progressive (e. g. non-interlaced) mode */
+ /* Progressive (non-interlaced) mode */
int progressive;
/* Vinmode/Vinctl used at the driver */
@@ -532,6 +535,7 @@ struct em28xx {
struct i2c_client i2c_client;
/* video for linux */
int users; /* user count for exclusive use */
+ int streaming_users; /* Number of actively streaming users */
struct video_device *vdev; /* video for linux device struct */
v4l2_std_id norm; /* selected tv norm */
int ctl_freq; /* selected frequency */
@@ -554,13 +558,10 @@ struct em28xx {
struct em28xx_audio adev;
- /* states */
- enum em28xx_dev_state state;
-
- /* vbi related state tracking */
+ /* capture state tracking */
int capture_type;
+ unsigned char top_field:1;
int vbi_read;
- unsigned char cur_field;
unsigned int vbi_width;
unsigned int vbi_height; /* lines per field */
@@ -574,6 +575,12 @@ struct em28xx {
struct video_device *vbi_dev;
struct video_device *radio_dev;
+ /* Videobuf2 */
+ struct vb2_queue vb_vidq;
+ struct vb2_queue vb_vbiq;
+ struct mutex vb_queue_lock;
+ struct mutex vb_vbi_queue_lock;
+
/* resources in use */
unsigned int resources;
@@ -582,17 +589,31 @@ struct em28xx {
/* Isoc control struct */
struct em28xx_dmaqueue vidq;
struct em28xx_dmaqueue vbiq;
- struct em28xx_usb_isoc_ctl isoc_ctl;
+ struct em28xx_usb_ctl usb_ctl;
spinlock_t slock;
+ unsigned int field_count;
+ unsigned int vbi_field_count;
+
/* usb transfer */
struct usb_device *udev; /* the usb device */
- int alt; /* alternate */
- int max_pkt_size; /* max packet size of isoc transaction */
- int num_alt; /* Number of alternative settings */
- unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
- int dvb_alt; /* alternate for DVB */
- unsigned int dvb_max_pkt_size; /* wMaxPacketSize for DVB */
+ u8 analog_ep_isoc; /* address of isoc endpoint for analog */
+ u8 analog_ep_bulk; /* address of bulk endpoint for analog */
+ u8 dvb_ep_isoc; /* address of isoc endpoint for DVB */
+ u8 dvb_ep_bulk; /* address of bulk endpoint for DVC */
+ int alt; /* alternate setting */
+ int max_pkt_size; /* max packet size of the selected ep at alt */
+ int packet_multiplier; /* multiplier for wMaxPacketSize, used for
+ URB buffer size definition */
+ int num_alt; /* number of alternative settings */
+ unsigned int *alt_max_pkt_size_isoc; /* array of isoc wMaxPacketSize */
+ unsigned int analog_xfer_bulk:1; /* use bulk instead of isoc
+ transfers for analog */
+ int dvb_alt_isoc; /* alternate setting for DVB isoc transfers */
+ unsigned int dvb_max_pkt_size_isoc; /* isoc max packet size of the
+ selected DVB ep at dvb_alt */
+ unsigned int dvb_xfer_bulk:1; /* use bulk instead of isoc
+ transfers for DVB */
char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */
/* helper funcs that call usb_control_msg */
@@ -619,9 +640,6 @@ struct em28xx {
struct delayed_work sbutton_query_work;
struct em28xx_dvb *dvb;
-
- /* I2C keyboard data */
- struct IR_i2c_init_data init_data;
};
struct em28xx_ops {
@@ -666,12 +684,14 @@ int em28xx_vbi_supported(struct em28xx *dev);
int em28xx_set_outfmt(struct em28xx *dev);
int em28xx_resolution_set(struct em28xx *dev);
int em28xx_set_alternate(struct em28xx *dev);
-int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size);
-int em28xx_init_isoc(struct em28xx *dev, enum em28xx_mode mode,
- int max_packets, int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct em28xx *dev, struct urb *urb));
-void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode);
+int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+ int num_bufs, int max_pkt_size, int packet_multiplier);
+int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
+ int xfer_bulk,
+ int num_bufs, int max_pkt_size, int packet_multiplier,
+ int (*urb_data_copy)
+ (struct em28xx *dev, struct urb *urb));
+void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode);
void em28xx_stop_urbs(struct em28xx *dev);
int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
@@ -683,8 +703,13 @@ void em28xx_init_extension(struct em28xx *dev);
void em28xx_close_extension(struct em28xx *dev);
/* Provided by em28xx-video.c */
+int em28xx_vb2_setup(struct em28xx *dev);
int em28xx_register_analog_devices(struct em28xx *dev);
void em28xx_release_analog_resources(struct em28xx *dev);
+void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv);
+int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
+int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
+extern const struct v4l2_ctrl_ops em28xx_ctrl_ops;
/* Provided by em28xx-cards.c */
extern int em2800_variant_detect(struct usb_device *udev, int model);
@@ -695,7 +720,7 @@ int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_release_resources(struct em28xx *dev);
/* Provided by em28xx-vbi.c */
-extern struct videobuf_queue_ops em28xx_vbi_qops;
+extern struct vb2_ops em28xx_vbi_qops;
/* printk macros */
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index b3ba47d4d6a2..1dcdd9f95f1c 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -541,7 +541,7 @@ static int do_command(struct gspca_dev *gspca_dev, u16 command,
/* test button press */
a = ((gspca_dev->usb_buf[1] & 0x02) == 0);
if (a != sd->params.qx3.button) {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
input_report_key(gspca_dev->input_dev, KEY_CAMERA, a);
input_sync(gspca_dev->input_dev);
#endif
@@ -1640,7 +1640,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
/* Update the camera status */
do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->params.qx3.button) {
/* The camera latch will hold the pressed state until we reset
@@ -1869,7 +1869,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = sd_dq_callback,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index e0a431bb0d42..3564bdbb2ea3 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -44,7 +44,7 @@
#include "gspca.h"
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
#include <linux/input.h>
#include <linux/usb/input.h>
#endif
@@ -118,7 +118,7 @@ static const struct vm_operations_struct gspca_vm_ops = {
/*
* Input and interrupt endpoint handling functions
*/
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static void int_irq(struct urb *urb)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
@@ -2303,7 +2303,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
return 0;
out:
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
if (gspca_dev->input_dev)
input_unregister_device(gspca_dev->input_dev);
#endif
@@ -2348,7 +2348,7 @@ EXPORT_SYMBOL(gspca_dev_probe);
void gspca_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
struct input_dev *input_dev;
#endif
@@ -2360,7 +2360,7 @@ void gspca_disconnect(struct usb_interface *intf)
gspca_dev->present = 0;
destroy_urbs(gspca_dev);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
gspca_input_destroy_urb(gspca_dev);
input_dev = gspca_dev->input_dev;
if (input_dev) {
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index 352317d7acdb..5559932bf2f5 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -138,7 +138,7 @@ struct sd_desc {
cam_reg_op get_register;
#endif
cam_ident_op get_chip_ident;
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
cam_int_pkt_op int_pkt_scan;
/* other_input makes the gspca core create gspca_dev->input even when
int_pkt_scan is NULL, for cams with non interrupt driven buttons */
@@ -167,7 +167,7 @@ struct gspca_dev {
struct usb_device *dev;
struct file *capt_file; /* file doing video capture */
/* protected by queue_lock */
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
struct input_dev *input_dev;
char phys[64]; /* physical device path */
#endif
@@ -190,7 +190,7 @@ struct gspca_dev {
#define USB_BUF_SZ 64
__u8 *usb_buf; /* buffer for USB exchanges */
struct urb *urb[MAX_NURBS];
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
struct urb *int_urb;
#endif
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index 62ba80d9b998..fdaeeb14453f 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -536,20 +536,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- return 0;
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index bbf91e07e38b..61e25dbf2447 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -246,7 +246,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
konica_stream_off(gspca_dev);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* Don't keep the button in the pressed state "forever" if it was
pressed when streaming is stopped */
if (sd->snapshot_pressed) {
@@ -345,7 +345,7 @@ static void sd_isoc_irq(struct urb *urb)
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
} else {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
u8 button_state = st & 0x40 ? 1 : 0;
if (sd->snapshot_pressed != button_state) {
input_report_key(gspca_dev->input_dev,
@@ -452,7 +452,7 @@ static const struct sd_desc sd_desc = {
.init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 9aa09f845ce4..9ad19a7ef81b 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -4238,7 +4238,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
if (sd->bridge == BRIDGE_W9968CF)
w9968cf_stop0(sd);
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->snapshot_pressed) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
@@ -4255,7 +4255,7 @@ static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state)
struct sd *sd = (struct sd *) gspca_dev;
if (sd->snapshot_pressed != state) {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
input_sync(gspca_dev->input_dev);
#endif
@@ -4924,7 +4924,7 @@ static const struct sd_desc sd_desc = {
.dq_callback = sd_reset_snapshot,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index d236d1791f78..3b75097dd34e 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -55,6 +55,11 @@ MODULE_LICENSE("GPL");
#define PAC207_AUTOGAIN_DEADZONE 30
+/* global parameters */
+static int led_invert;
+module_param(led_invert, int, 0644);
+MODULE_PARM_DESC(led_invert, "Invert led");
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
@@ -187,10 +192,14 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- pac207_write_reg(gspca_dev, 0x41, 0x00);
- /* Bit_0=Image Format,
- * Bit_1=LED,
- * Bit_2=Compression test mode enable */
+ u8 mode;
+
+ /* mode: Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ if (led_invert)
+ mode = 0x02;
+ else
+ mode = 0x00;
+ pac207_write_reg(gspca_dev, 0x41, mode);
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
return gspca_dev->usb_err;
@@ -303,7 +312,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
pac207_write_reg(gspca_dev, 0x02,
v4l2_ctrl_g_ctrl(gspca_dev->exposure)); /* PXCK = 12MHz /n */
- mode = 0x02; /* Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ /* mode: Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ if (led_invert)
+ mode = 0x00;
+ else
+ mode = 0x02;
if (gspca_dev->width == 176) { /* 176x144 */
mode |= 0x01;
PDEBUG(D_STREAM, "pac207_start mode 176x144");
@@ -325,8 +338,15 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ u8 mode;
+
+ /* mode: Image Format (Bit 0), LED (1), Compr. test mode (2) */
+ if (led_invert)
+ mode = 0x02;
+ else
+ mode = 0x00;
pac207_write_reg(gspca_dev, 0x40, 0x00); /* Stop ISO pipe */
- pac207_write_reg(gspca_dev, 0x41, 0x00); /* Turn of LED */
+ pac207_write_reg(gspca_dev, 0x41, mode); /* Turn off LED */
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
}
@@ -393,7 +413,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -422,7 +442,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = pac207_do_auto_gain,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 4f5869a98082..add6f725ba50 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -890,7 +890,7 @@ static int sd_chip_ident(struct gspca_dev *gspca_dev,
}
#endif
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -936,7 +936,7 @@ static const struct sd_desc sd_desc = {
.set_register = sd_dbg_s_register,
.get_chip_ident = sd_chip_ident,
#endif
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/pac7311.c b/drivers/media/usb/gspca/pac7311.c
index ba3558d3f017..a12dfbf6e051 100644
--- a/drivers/media/usb/gspca/pac7311.c
+++ b/drivers/media/usb/gspca/pac7311.c
@@ -621,7 +621,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -661,7 +661,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index a33cb78a839c..5f729b8aa2bd 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -594,7 +594,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
sd_pkt_scan_janggu(gspca_dev, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
{
struct sd *sd = (struct sd *)gspca_dev;
@@ -688,7 +688,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = sd_dq_callback,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 41f769fe340c..4ec544f4a845 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -2205,7 +2205,7 @@ static void qual_upd(struct work_struct *work)
mutex_unlock(&gspca_dev->usb_lock);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet */
int len) /* interrupt packet length */
@@ -2349,7 +2349,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
.dq_callback = sd_dqcallback,
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 1220340e7602..104ae25275b4 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1400,7 +1400,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -1430,7 +1430,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.querymenu = sd_querymenu,
.dq_callback = do_autogain,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -1448,7 +1448,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
{USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
{USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+#if !IS_ENABLED(CONFIG_USB_SN9C102)
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
#endif
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 36307a9028a9..671d0c6dece3 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -3077,7 +3077,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -3109,7 +3109,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
.querymenu = sd_querymenu,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/spca561.c b/drivers/media/usb/gspca/spca561.c
index cfe71dd6747d..d1db3d8f6522 100644
--- a/drivers/media/usb/gspca/spca561.c
+++ b/drivers/media/usb/gspca/spca561.c
@@ -741,7 +741,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
return;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
if (data[0] & 0x20) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
input_sync(gspca_dev->input_dev);
@@ -866,7 +866,7 @@ static const struct sd_desc sd_desc_12a = {
.start = sd_start_12a,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
@@ -879,7 +879,7 @@ static const struct sd_desc sd_desc_72a = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 999ec7764449..657160b4a1f7 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -492,7 +492,7 @@ frame_data:
}
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -529,7 +529,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = stv06xx_pkt_scan,
.isoc_init = stv06xx_isoc_init,
.isoc_nego = stv06xx_isoc_nego,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
index 748e1421d6d8..e95fa8997d22 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
@@ -52,9 +52,13 @@ static int vv6410_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_HFLIP:
+ if (!gspca_dev->streaming)
+ return 0;
err = vv6410_set_hflip(gspca_dev, ctrl->val);
break;
case V4L2_CID_VFLIP:
+ if (!gspca_dev->streaming)
+ return 0;
err = vv6410_set_vflip(gspca_dev, ctrl->val);
break;
case V4L2_CID_GAIN:
@@ -94,11 +98,14 @@ static int vv6410_init_controls(struct sd *sd)
{
struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
- v4l2_ctrl_handler_init(hdl, 4);
- v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_handler_init(hdl, 2);
+ /* Disable the hardware VFLIP and HFLIP as we currently lack a
+ mechanism to adjust the image offset in such a way that
+ we don't need to renegotiate the announced format */
+ /* v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops, */
+ /* V4L2_CID_HFLIP, 0, 1, 1, 0); */
+ /* v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops, */
+ /* V4L2_CID_VFLIP, 0, 1, 1, 0); */
v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 32768, 1, 20000);
v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c
index 8bc6c3ceec2c..e2cc4e5a0ccb 100644
--- a/drivers/media/usb/gspca/t613.c
+++ b/drivers/media/usb/gspca/t613.c
@@ -494,7 +494,7 @@ static void setcolors(struct gspca_dev *gspca_dev, s32 val)
static void setgamma(struct gspca_dev *gspca_dev, s32 val)
{
- PDEBUG(D_CONF, "Gamma: %d", sd->gamma);
+ PDEBUG(D_CONF, "Gamma: %d", val);
reg_w_ixbuf(gspca_dev, 0x90,
gamma_table[val], sizeof gamma_table[0]);
}
@@ -823,7 +823,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
msleep(20);
reg_w(gspca_dev, 0x0309);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->button_pressed) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
@@ -841,7 +841,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
int pkt_type;
if (data[0] == 0x5a) {
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
if (len > 20) {
u8 state = (data[20] & 0x80) ? 1 : 0;
if (sd->button_pressed != state) {
@@ -1019,7 +1019,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index d4b23c9bf90c..7eaf64eb867c 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -2759,7 +2759,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
break;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->button_state) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
@@ -2914,7 +2914,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static void cit_check_button(struct gspca_dev *gspca_dev)
{
int new_button_state;
@@ -3062,7 +3062,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.dq_callback = cit_check_button,
.other_input = 1,
#endif
@@ -3079,7 +3079,7 @@ static const struct sd_desc sd_desc_isoc_nego = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.dq_callback = cit_check_button,
.other_input = 1,
#endif
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 77c57755e7b4..a8dc421f9f1f 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6902,7 +6902,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
return 0;
}
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -6929,7 +6929,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 84dc26fe80ee..5c6193536399 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -391,7 +391,7 @@ static int hdpvr_probe(struct usb_interface *interface,
goto error;
}
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
retval = hdpvr_register_i2c_adapter(dev);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
@@ -419,7 +419,7 @@ static int hdpvr_probe(struct usb_interface *interface,
return 0;
reg_fail:
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
i2c_del_adapter(&dev->i2c_adapter);
#endif
error:
@@ -451,7 +451,7 @@ static void hdpvr_disconnect(struct usb_interface *interface)
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
mutex_unlock(&dev->io_mutex);
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
i2c_del_adapter(&dev->i2c_adapter);
#endif
video_unregister_device(dev->video_dev);
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 031cf024304c..a38f58c4c6bf 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -13,7 +13,7 @@
*
*/
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -217,8 +217,7 @@ int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
hdpvr_activate_ir(dev);
- memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
- sizeof(struct i2c_adapter));
+ dev->i2c_adapter = hdpvr_i2c_adapter_template;
dev->i2c_adapter.dev.parent = &dev->udev->dev;
i2c_set_adapdata(&dev->i2c_adapter, dev);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index e046fdaec5ae..f7702aeeda3f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -422,8 +422,7 @@ int pvr2_encoder_adjust(struct pvr2_hdw *hdw)
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Error from cx2341x module code=%d",ret);
} else {
- memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state,
- sizeof(struct cx2341x_mpeg_params));
+ hdw->enc_cur_state = hdw->enc_ctl_state;
hdw->enc_cur_valid = !0;
}
return ret;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 9ab596c78a4e..b5e929f1bf82 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -649,8 +649,8 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
}
// Configure the adapter and set up everything else related to it.
- memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap));
- memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo));
+ hdw->i2c_adap = pvr2_i2c_adap_template;
+ hdw->i2c_algo = pvr2_i2c_algo_template;
strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name));
hdw->i2c_adap.dev.parent = &hdw->usb_dev->dev;
hdw->i2c_adap.algo = &hdw->i2c_algo;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 6930676051e7..34c3b6e80e86 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1339,7 +1339,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
return;
}
- memcpy(&dip->devbase,&vdev_template,sizeof(vdev_template));
+ dip->devbase = vdev_template;
dip->devbase.release = pvr2_video_device_release;
dip->devbase.ioctl_ops = &pvr2_ioctl_ops;
{
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 5210239cbaee..5ec15cb1ed26 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -316,7 +316,8 @@ static void pwc_isoc_handler(struct urb *urb)
struct pwc_frame_buf *fbuf = pdev->fill_buf;
if (pdev->vsync == 1) {
- do_gettimeofday(&fbuf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(
+ &fbuf->vb.v4l2_buf.timestamp);
pdev->vsync = 2;
}
@@ -1007,7 +1008,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
}
/* Init video_device structure */
- memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
+ pdev->vdev = pwc_template;
strcpy(pdev->vdev.name, name);
pdev->vdev.queue = &pdev->vb_queue;
pdev->vdev.queue->lock = &pdev->vb_queue_lock;
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 545e9bbdeede..aa7449eaca08 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -434,19 +434,18 @@ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
case V4L2_PIX_FMT_PWC1:
if (DEVICE_USE_CODEC23(pdev->type)) {
PWC_DEBUG_IOCTL("codec1 is only supported for old pwc webcam\n");
- return -EINVAL;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
}
break;
case V4L2_PIX_FMT_PWC2:
if (DEVICE_USE_CODEC1(pdev->type)) {
PWC_DEBUG_IOCTL("codec23 is only supported for new pwc webcam\n");
- return -EINVAL;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
}
break;
default:
PWC_DEBUG_IOCTL("Unsupported pixel format\n");
- return -EINVAL;
-
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
}
size = pwc_get_size(pdev, f->fmt.pix.width, f->fmt.pix.height);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 8ebec0d7bf59..498c57ea5d32 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -593,7 +593,7 @@ static int s2255_got_frame(struct s2255_channel *channel, int jpgsize)
buf = list_entry(dma_q->active.next,
struct s2255_buffer, vb.queue);
list_del(&buf->vb.queue);
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
s2255_fillbuff(channel, buf, jpgsize);
wake_up(&buf->vb.done);
dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
@@ -629,7 +629,6 @@ static void s2255_fillbuff(struct s2255_channel *channel,
struct s2255_buffer *buf, int jpgsize)
{
int pos = 0;
- struct timeval ts;
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
@@ -674,8 +673,7 @@ static void s2255_fillbuff(struct s2255_channel *channel,
/* tell v4l buffer was filled */
buf->vb.field_count = channel->frame_count * 2;
- do_gettimeofday(&ts);
- buf->vb.ts = ts;
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
}
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/media/usb/sn9c102/sn9c102_core.c
index 73605864fffa..c957e9aa6077 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/media/usb/sn9c102/sn9c102_core.c
@@ -173,7 +173,7 @@ sn9c102_request_buffers(struct sn9c102_device* cam, u32 count,
cam->frame[i].buf.sequence = 0;
cam->frame[i].buf.field = V4L2_FIELD_NONE;
cam->frame[i].buf.memory = V4L2_MEMORY_MMAP;
- cam->frame[i].buf.flags = 0;
+ cam->frame[i].buf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
}
return cam->nbuffers;
@@ -773,7 +773,8 @@ end_of_frame:
img);
if ((*f)->buf.bytesused == 0)
- do_gettimeofday(&(*f)->buf.timestamp);
+ v4l2_get_timestamp(
+ &(*f)->buf.timestamp);
(*f)->buf.bytesused += img;
@@ -2826,7 +2827,7 @@ sn9c102_vidioc_querybuf(struct sn9c102_device* cam, void __user * arg)
b.index >= cam->nbuffers || cam->io != IO_MMAP)
return -EINVAL;
- memcpy(&b, &cam->frame[b.index].buf, sizeof(b));
+ b = cam->frame[b.index].buf;
if (cam->frame[b.index].vma_use_count)
b.flags |= V4L2_BUF_FLAG_MAPPED;
@@ -2929,7 +2930,7 @@ sn9c102_vidioc_dqbuf(struct sn9c102_device* cam, struct file* filp,
f->state = F_UNUSED;
- memcpy(&b, &f->buf, sizeof(b));
+ b = f->buf;
if (f->vma_use_count)
b.flags |= V4L2_BUF_FLAG_MAPPED;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index fa3671de02aa..39f1aae209bc 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -78,7 +78,7 @@ struct stk1160_buffer *stk1160_next_buffer(struct stk1160 *dev)
unsigned long flags = 0;
/* Current buffer must be NULL when this functions gets called */
- BUG_ON(dev->isoc_ctl.buf);
+ WARN_ON(dev->isoc_ctl.buf);
spin_lock_irqsave(&dev->buf_lock, flags);
if (!list_empty(&dev->avail_bufs)) {
@@ -101,7 +101,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
buf->vb.v4l2_buf.bytesused = buf->bytesused;
- do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
vb2_set_plane_payload(&buf->vb, 0, buf->bytesused);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 5d3c032d733c..4cbab085e348 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
#include <linux/usb.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
@@ -38,12 +39,12 @@
#include "stk-webcam.h"
-static bool hflip;
-module_param(hflip, bool, 0444);
+static int hflip = -1;
+module_param(hflip, int, 0444);
MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 0");
-static bool vflip;
-module_param(vflip, bool, 0444);
+static int vflip = -1;
+module_param(vflip, int, 0444);
MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 0");
static int debug;
@@ -62,6 +63,19 @@ static struct usb_device_id stkwebcam_table[] = {
};
MODULE_DEVICE_TABLE(usb, stkwebcam_table);
+/* The stk webcam laptop module is mounted upside down in some laptops :( */
+static const struct dmi_system_id stk_upside_down_dmi_table[] = {
+ {
+ .ident = "ASUS G1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1")
+ }
+ },
+ {}
+};
+
+
/*
* Basic stuff
*/
@@ -466,6 +480,7 @@ static int stk_setup_siobuf(struct stk_camera *dev, int index)
buf->dev = dev;
buf->v4lbuf.index = index;
buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf->v4lbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->v4lbuf.field = V4L2_FIELD_NONE;
buf->v4lbuf.memory = V4L2_MEMORY_MMAP;
buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length;
@@ -816,10 +831,16 @@ static int stk_vidioc_g_ctrl(struct file *filp,
c->value = dev->vsettings.brightness;
break;
case V4L2_CID_HFLIP:
- c->value = dev->vsettings.hflip;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ c->value = !dev->vsettings.hflip;
+ else
+ c->value = dev->vsettings.hflip;
break;
case V4L2_CID_VFLIP:
- c->value = dev->vsettings.vflip;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ c->value = !dev->vsettings.vflip;
+ else
+ c->value = dev->vsettings.vflip;
break;
default:
return -EINVAL;
@@ -836,10 +857,16 @@ static int stk_vidioc_s_ctrl(struct file *filp,
dev->vsettings.brightness = c->value;
return stk_sensor_set_brightness(dev, c->value >> 8);
case V4L2_CID_HFLIP:
- dev->vsettings.hflip = c->value;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.hflip = !c->value;
+ else
+ dev->vsettings.hflip = c->value;
return 0;
case V4L2_CID_VFLIP:
- dev->vsettings.vflip = c->value;
+ if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.vflip = !c->value;
+ else
+ dev->vsettings.vflip = c->value;
return 0;
default:
return -EINVAL;
@@ -1113,7 +1140,7 @@ static int stk_vidioc_dqbuf(struct file *filp,
sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
sbuf->v4lbuf.sequence = ++dev->sequence;
- do_gettimeofday(&sbuf->v4lbuf.timestamp);
+ v4l2_get_timestamp(&sbuf->v4lbuf.timestamp);
*buf = sbuf->v4lbuf;
return 0;
@@ -1275,8 +1302,18 @@ static int stk_camera_probe(struct usb_interface *interface,
dev->interface = interface;
usb_get_intf(interface);
- dev->vsettings.vflip = vflip;
- dev->vsettings.hflip = hflip;
+ if (hflip != -1)
+ dev->vsettings.hflip = hflip;
+ else if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.hflip = 1;
+ else
+ dev->vsettings.hflip = 0;
+ if (vflip != -1)
+ dev->vsettings.vflip = vflip;
+ else if (dmi_check_system(stk_upside_down_dmi_table))
+ dev->vsettings.vflip = 1;
+ else
+ dev->vsettings.vflip = 0;
dev->n_sbufs = 0;
set_present(dev);
diff --git a/drivers/media/usb/tlg2300/pd-video.c b/drivers/media/usb/tlg2300/pd-video.c
index 3082bfa9b2c5..21723378bb8f 100644
--- a/drivers/media/usb/tlg2300/pd-video.c
+++ b/drivers/media/usb/tlg2300/pd-video.c
@@ -212,7 +212,7 @@ static void submit_frame(struct front_face *front)
front->curr_frame = NULL;
vb->state = VIDEOBUF_DONE;
vb->field_count++;
- do_gettimeofday(&vb->ts);
+ v4l2_get_timestamp(&vb->ts);
wake_up(&vb->done);
}
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 22cc0116deb6..7c32353c59db 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -40,10 +40,13 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
u8 *data = NULL;
int delay = 5000;
- mutex_lock(&dev->usb_lock);
-
- if (len)
+ if (len) {
data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->usb_lock);
if (req_type & USB_DIR_IN)
pipe = usb_rcvctrlpipe(dev->udev, 0);
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index e1f3f66e1e63..9fc1e940a82b 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -360,8 +360,8 @@ dvb_dmx_err:
dvb_dmx_release(&dvb->demux);
frontend_err:
if (dvb->frontend) {
- dvb_frontend_detach(dvb->frontend);
dvb_unregister_frontend(dvb->frontend);
+ dvb_frontend_detach(dvb->frontend);
}
adapter_err:
dvb_unregister_adapter(&dvb->adapter);
@@ -384,8 +384,8 @@ static void unregister_dvb(struct tm6000_core *dev)
/* mutex_lock(&tm6000_driver.open_close_mutex); */
if (dvb->frontend) {
- dvb_frontend_detach(dvb->frontend);
dvb_unregister_frontend(dvb->frontend);
+ dvb_frontend_detach(dvb->frontend);
}
dvb_dmxdev_release(&dvb->dmxdev);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index f656fd7a39a2..1a6857929c15 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -34,6 +34,7 @@
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/tuner.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
@@ -49,82 +50,20 @@
#define TM6000_MIN_BUF 4
#define TM6000_DEF_BUF 8
+#define TM6000_NUM_URB_BUF 8
+
#define TM6000_MAX_ISO_PACKETS 46 /* Max number of ISO packets */
/* Declare static vars that will be used as parameters */
static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
static int radio_nr = -1; /* /dev/radioN, -1 for autodetect */
+static bool keep_urb; /* keep urb buffers allocated */
/* Debug level */
int tm6000_debug;
EXPORT_SYMBOL_GPL(tm6000_debug);
-static const struct v4l2_queryctrl no_ctrl = {
- .name = "42",
- .flags = V4L2_CTRL_FLAG_DISABLED,
-};
-
-/* supported controls */
-static struct v4l2_queryctrl tm6000_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 54,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 119,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 112,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0,
- .flags = 0,
- },
- /* --- audio --- */
- {
- .id = V4L2_CID_AUDIO_MUTE,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- }, {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = -15,
- .maximum = 15,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }
-};
-
-static const unsigned int CTRLS = ARRAY_SIZE(tm6000_qctrl);
-static int qctl_regs[ARRAY_SIZE(tm6000_qctrl)];
-
static struct tm6000_fmt format[] = {
{
.name = "4:2:2, packed, YVY2",
@@ -141,16 +80,6 @@ static struct tm6000_fmt format[] = {
}
};
-static const struct v4l2_queryctrl *ctrl_by_id(unsigned int id)
-{
- unsigned int i;
-
- for (i = 0; i < CTRLS; i++)
- if (tm6000_qctrl[i].id == id)
- return tm6000_qctrl+i;
- return NULL;
-}
-
/* ------------------------------------------------------------------
* DMA and thread functions
* ------------------------------------------------------------------
@@ -191,7 +120,7 @@ static inline void buffer_filled(struct tm6000_core *dev,
dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
@@ -538,6 +467,71 @@ static void tm6000_irq_callback(struct urb *urb)
}
/*
+ * Allocate URB buffers
+ */
+static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
+{
+ int num_bufs = TM6000_NUM_URB_BUF;
+ int i;
+
+ if (dev->urb_buffer != NULL)
+ return 0;
+
+ dev->urb_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ if (!dev->urb_buffer) {
+ tm6000_err("cannot allocate memory for urb buffers\n");
+ return -ENOMEM;
+ }
+
+ dev->urb_dma = kmalloc(sizeof(dma_addr_t *)*num_bufs, GFP_KERNEL);
+ if (!dev->urb_dma) {
+ tm6000_err("cannot allocate memory for urb dma pointers\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ dev->urb_buffer[i] = usb_alloc_coherent(
+ dev->udev, dev->urb_size,
+ GFP_KERNEL, &dev->urb_dma[i]);
+ if (!dev->urb_buffer[i]) {
+ tm6000_err("unable to allocate %i bytes for transfer buffer %i\n",
+ dev->urb_size, i);
+ return -ENOMEM;
+ }
+ memset(dev->urb_buffer[i], 0, dev->urb_size);
+ }
+
+ return 0;
+}
+
+/*
+ * Free URB buffers
+ */
+static int tm6000_free_urb_buffers(struct tm6000_core *dev)
+{
+ int i;
+
+ if (dev->urb_buffer == NULL)
+ return 0;
+
+ for (i = 0; i < TM6000_NUM_URB_BUF; i++) {
+ if (dev->urb_buffer[i]) {
+ usb_free_coherent(dev->udev,
+ dev->urb_size,
+ dev->urb_buffer[i],
+ dev->urb_dma[i]);
+ dev->urb_buffer[i] = NULL;
+ }
+ }
+ kfree(dev->urb_buffer);
+ kfree(dev->urb_dma);
+ dev->urb_buffer = NULL;
+ dev->urb_dma = NULL;
+
+ return 0;
+}
+
+/*
* Stop and Deallocate URBs
*/
static void tm6000_uninit_isoc(struct tm6000_core *dev)
@@ -551,18 +545,15 @@ static void tm6000_uninit_isoc(struct tm6000_core *dev)
if (urb) {
usb_kill_urb(urb);
usb_unlink_urb(urb);
- if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_free_coherent(dev->udev,
- urb->transfer_buffer_length,
- dev->isoc_ctl.transfer_buffer[i],
- urb->transfer_dma);
- }
usb_free_urb(urb);
dev->isoc_ctl.urb[i] = NULL;
}
dev->isoc_ctl.transfer_buffer[i] = NULL;
}
+ if (!keep_urb)
+ tm6000_free_urb_buffers(dev);
+
kfree(dev->isoc_ctl.urb);
kfree(dev->isoc_ctl.transfer_buffer);
@@ -572,12 +563,13 @@ static void tm6000_uninit_isoc(struct tm6000_core *dev)
}
/*
- * Allocate URBs and start IRQ
+ * Assign URBs and start IRQ
*/
static int tm6000_prepare_isoc(struct tm6000_core *dev)
{
struct tm6000_dmaqueue *dma_q = &dev->vidq;
- int i, j, sb_size, pipe, size, max_packets, num_bufs = 8;
+ int i, j, sb_size, pipe, size, max_packets;
+ int num_bufs = TM6000_NUM_URB_BUF;
struct urb *urb;
/* De-allocates all pending stuff */
@@ -605,6 +597,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
max_packets = TM6000_MAX_ISO_PACKETS;
sb_size = max_packets * size;
+ dev->urb_size = sb_size;
dev->isoc_ctl.num_bufs = num_bufs;
@@ -627,6 +620,17 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
max_packets, num_bufs, sb_size,
dev->isoc_in.maxsize, size);
+
+ if (!dev->urb_buffer && tm6000_alloc_urb_buffers(dev) < 0) {
+ tm6000_err("cannot allocate memory for urb buffers\n");
+
+ /* call free, as some buffers might have been allocated */
+ tm6000_free_urb_buffers(dev);
+ kfree(dev->isoc_ctl.urb);
+ kfree(dev->isoc_ctl.transfer_buffer);
+ return -ENOMEM;
+ }
+
/* allocate urbs and transfer buffers */
for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
@@ -638,17 +642,8 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
- sb_size, GFP_KERNEL, &urb->transfer_dma);
- if (!dev->isoc_ctl.transfer_buffer[i]) {
- tm6000_err("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
- sb_size, i,
- in_interrupt() ? " while in int" : "");
- tm6000_uninit_isoc(dev);
- return -ENOMEM;
- }
- memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
+ urb->transfer_dma = dev->urb_dma[i];
+ dev->isoc_ctl.transfer_buffer[i] = dev->urb_buffer[i];
usb_fill_bulk_urb(urb, dev->udev, pipe,
dev->isoc_ctl.transfer_buffer[i], sb_size,
@@ -879,16 +874,21 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+ struct video_device *vdev = video_devdata(file);
strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
strlcpy(cap->card, "Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE;
-
+ usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
if (dev->tuner_type != TUNER_ABSENT)
- cap->capabilities |= V4L2_CAP_TUNER;
+ cap->device_caps |= V4L2_CAP_TUNER;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ else
+ cap->device_caps |= V4L2_CAP_RADIO;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
+ V4L2_CAP_RADIO | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -896,7 +896,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (unlikely(f->index >= ARRAY_SIZE(format)))
+ if (f->index >= ARRAY_SIZE(format))
return -EINVAL;
strlcpy(f->description, format[f->index].name, sizeof(f->description));
@@ -913,10 +913,12 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = fh->height;
f->fmt.pix.field = fh->vb_vidq.field;
f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fh->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.priv = 0;
return 0;
}
@@ -947,12 +949,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
field = f->fmt.pix.field;
- if (field == V4L2_FIELD_ANY)
- field = V4L2_FIELD_SEQ_TB;
- else if (V4L2_FIELD_INTERLACED != field) {
- dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n");
- return -EINVAL;
- }
+ field = V4L2_FIELD_INTERLACED;
tm6000_get_std_res(dev);
@@ -962,11 +959,13 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width &= ~0x01;
f->fmt.pix.field = field;
+ f->fmt.pix.priv = 0;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1141,79 +1140,40 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
}
/* --- controls ---------------------------------------------- */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
- if (qc->id && qc->id == tm6000_qctrl[i].id) {
- memcpy(qc, &(tm6000_qctrl[i]),
- sizeof(*qc));
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int tm6000_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
- int val;
+ struct tm6000_core *dev = container_of(ctrl->handler, struct tm6000_core, ctrl_handler);
+ u8 val = ctrl->val;
- /* FIXME: Probably, those won't work! Maybe we need shadow regs */
switch (ctrl->id) {
case V4L2_CID_CONTRAST:
- val = tm6000_get_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0);
- break;
+ tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
+ return 0;
case V4L2_CID_BRIGHTNESS:
- val = tm6000_get_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0);
+ tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
return 0;
case V4L2_CID_SATURATION:
- val = tm6000_get_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0);
+ tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
return 0;
case V4L2_CID_HUE:
- val = tm6000_get_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, 0);
- return 0;
- case V4L2_CID_AUDIO_MUTE:
- val = dev->ctl_mute;
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- val = dev->ctl_volume;
+ tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
return 0;
- default:
- return -EINVAL;
}
+ return -EINVAL;
+}
- if (val < 0)
- return val;
-
- ctrl->value = val;
+static const struct v4l2_ctrl_ops tm6000_ctrl_ops = {
+ .s_ctrl = tm6000_s_ctrl,
+};
- return 0;
-}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int tm6000_radio_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
- u8 val = ctrl->value;
+ struct tm6000_core *dev = container_of(ctrl->handler,
+ struct tm6000_core, radio_ctrl_handler);
+ u8 val = ctrl->val;
switch (ctrl->id) {
- case V4L2_CID_CONTRAST:
- tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
- return 0;
- case V4L2_CID_BRIGHTNESS:
- tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
- return 0;
- case V4L2_CID_SATURATION:
- tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
- return 0;
- case V4L2_CID_HUE:
- tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
- return 0;
case V4L2_CID_AUDIO_MUTE:
dev->ctl_mute = val;
tm6000_tvaudio_set_mute(dev, val);
@@ -1226,20 +1186,24 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
return -EINVAL;
}
+static const struct v4l2_ctrl_ops tm6000_radio_ctrl_ops = {
+ .s_ctrl = tm6000_radio_s_ctrl,
+};
+
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- if (unlikely(UNSET == dev->tuner_type))
- return -EINVAL;
+ if (UNSET == dev->tuner_type)
+ return -ENOTTY;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
- t->capability = V4L2_TUNER_CAP_NORM;
+ t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
t->rangehigh = 0xffffffffUL;
t->rxsubchans = V4L2_TUNER_SUB_STEREO;
@@ -1257,11 +1221,14 @@ static int vidioc_s_tuner(struct file *file, void *priv,
struct tm6000_core *dev = fh->dev;
if (UNSET == dev->tuner_type)
- return -EINVAL;
+ return -ENOTTY;
if (0 != t->index)
return -EINVAL;
- dev->amode = t->audmode;
+ if (t->audmode > V4L2_TUNER_MODE_STEREO)
+ dev->amode = V4L2_TUNER_MODE_STEREO;
+ else
+ dev->amode = t->audmode;
dprintk(dev, 3, "audio mode: %x\n", t->audmode);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
@@ -1275,10 +1242,11 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- if (unlikely(UNSET == dev->tuner_type))
+ if (UNSET == dev->tuner_type)
+ return -ENOTTY;
+ if (f->tuner)
return -EINVAL;
- f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->freq;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f);
@@ -1292,13 +1260,9 @@ static int vidioc_s_frequency(struct file *file, void *priv,
struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- if (unlikely(UNSET == dev->tuner_type))
- return -EINVAL;
- if (unlikely(f->tuner != 0))
- return -EINVAL;
- if (0 == fh->radio && V4L2_TUNER_ANALOG_TV != f->type)
- return -EINVAL;
- if (1 == fh->radio && V4L2_TUNER_RADIO != f->type)
+ if (UNSET == dev->tuner_type)
+ return -ENOTTY;
+ if (f->tuner != 0)
return -EINVAL;
dev->freq = f->frequency;
@@ -1307,27 +1271,6 @@ static int vidioc_s_frequency(struct file *file, void *priv,
return 0;
}
-static int radio_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct tm6000_fh *fh = file->private_data;
- struct tm6000_core *dev = fh->dev;
-
- strcpy(cap->driver, "tm6000");
- strlcpy(cap->card, dev->name, sizeof(dev->name));
- sprintf(cap->bus_info, "USB%04x:%04x",
- le16_to_cpu(dev->udev->descriptor.idVendor),
- le16_to_cpu(dev->udev->descriptor.idProduct));
- cap->version = dev->dev_type;
- cap->capabilities = V4L2_CAP_TUNER |
- V4L2_CAP_AUDIO |
- V4L2_CAP_RADIO |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
-
- return 0;
-}
-
static int radio_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -1340,7 +1283,9 @@ static int radio_g_tuner(struct file *file, void *priv,
memset(t, 0, sizeof(*t));
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
+ t->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
t->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ t->audmode = V4L2_TUNER_MODE_STEREO;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
@@ -1355,95 +1300,14 @@ static int radio_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
+ if (t->audmode > V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
return 0;
}
-static int radio_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
-
- if (i->index != 0)
- return -EINVAL;
-
- if (!dev->rinput.type)
- return -EINVAL;
-
- strcpy(i->name, "Radio");
- i->type = V4L2_INPUT_TYPE_TUNER;
-
- return 0;
-}
-
-static int radio_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
-
- if (dev->input != 5)
- return -EINVAL;
-
- *i = dev->input - 5;
-
- return 0;
-}
-
-static int radio_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- memset(a, 0, sizeof(*a));
- strcpy(a->name, "Radio");
- return 0;
-}
-
-static int radio_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *a)
-{
- return 0;
-}
-
-static int radio_s_input(struct file *filp, void *priv, unsigned int i)
-{
- struct tm6000_fh *fh = priv;
- struct tm6000_core *dev = fh->dev;
-
- if (i)
- return -EINVAL;
-
- if (!dev->rinput.type)
- return -EINVAL;
-
- dev->input = i + 5;
-
- return 0;
-}
-
-static int radio_s_std(struct file *file, void *fh, v4l2_std_id *norm)
-{
- return 0;
-}
-
-static int radio_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- const struct v4l2_queryctrl *ctrl;
-
- if (c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- if (c->id == V4L2_CID_AUDIO_MUTE) {
- ctrl = ctrl_by_id(c->id);
- *c = *ctrl;
- } else
- *c = no_ctrl;
-
- return 0;
-}
-
/* ------------------------------------------------------------------
File operations for the device
------------------------------------------------------------------*/
@@ -1454,7 +1318,7 @@ static int __tm6000_open(struct file *file)
struct tm6000_core *dev = video_drvdata(file);
struct tm6000_fh *fh;
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- int i, rc;
+ int rc;
int radio = 0;
dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n",
@@ -1486,6 +1350,7 @@ static int __tm6000_open(struct file *file)
return -ENOMEM;
}
+ v4l2_fh_init(&fh->fh, vdev);
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -1514,13 +1379,7 @@ static int __tm6000_open(struct file *file)
if (rc < 0)
return rc;
- if (dev->mode != TM6000_MODE_ANALOG) {
- /* Put all controls at a sane state */
- for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
- qctl_regs[i] = tm6000_qctrl[i].default_value;
-
- dev->mode = TM6000_MODE_ANALOG;
- }
+ dev->mode = TM6000_MODE_ANALOG;
if (!fh->radio) {
videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
@@ -1530,12 +1389,12 @@ static int __tm6000_open(struct file *file)
sizeof(struct tm6000_buffer), fh, &dev->lock);
} else {
dprintk(dev, V4L2_DEBUG_OPEN, "video_open: setting radio device\n");
- dev->input = 5;
tm6000_set_audio_rinput(dev);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_radio);
tm6000_prepare_isoc(dev);
tm6000_start_thread(dev);
}
+ v4l2_fh_add(&fh->fh);
return 0;
}
@@ -1576,29 +1435,35 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
static unsigned int
__tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct tm6000_fh *fh = file->private_data;
struct tm6000_buffer *buf;
+ int res = 0;
+ if (v4l2_event_pending(&fh->fh))
+ res = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->fh.wait, wait);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
- return POLLERR;
+ return res | POLLERR;
if (!!is_res_streaming(fh->dev, fh))
- return POLLERR;
+ return res | POLLERR;
if (!is_res_read(fh->dev, fh)) {
/* streaming capture */
if (list_empty(&fh->vb_vidq.stream))
- return POLLERR;
+ return res | POLLERR;
buf = list_entry(fh->vb_vidq.stream.next, struct tm6000_buffer, vb.stream);
- } else {
+ poll_wait(file, &buf->vb.done, wait);
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return res | POLLIN | POLLRDNORM;
+ } else if (req_events & (POLLIN | POLLRDNORM)) {
/* read() capture */
- return videobuf_poll_stream(file, &fh->vb_vidq, wait);
+ return res | videobuf_poll_stream(file, &fh->vb_vidq, wait);
}
- poll_wait(file, &buf->vb.done, wait);
- if (buf->vb.state == VIDEOBUF_DONE ||
- buf->vb.state == VIDEOBUF_ERROR)
- return POLLIN | POLLRDNORM;
- return 0;
+ return res;
}
static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait)
@@ -1648,7 +1513,8 @@ static int tm6000_release(struct file *file)
if (!fh->radio)
videobuf_mmap_free(&fh->vb_vidq);
}
-
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
kfree(fh);
mutex_unlock(&dev->lock);
@@ -1688,9 +1554,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
@@ -1701,6 +1564,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device tm6000_template = {
@@ -1715,25 +1580,19 @@ static struct video_device tm6000_template = {
static const struct v4l2_file_operations radio_fops = {
.owner = THIS_MODULE,
.open = tm6000_open,
+ .poll = v4l2_ctrl_poll,
.release = tm6000_release,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
- .vidioc_querycap = radio_querycap,
+ .vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = radio_g_tuner,
- .vidioc_enum_input = radio_enum_input,
- .vidioc_g_audio = radio_g_audio,
.vidioc_s_tuner = radio_s_tuner,
- .vidioc_s_audio = radio_s_audio,
- .vidioc_s_input = radio_s_input,
- .vidioc_s_std = radio_s_std,
- .vidioc_queryctrl = radio_queryctrl,
- .vidioc_g_input = radio_g_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device tm6000_radio_template = {
@@ -1762,6 +1621,7 @@ static struct video_device *vdev_init(struct tm6000_core *dev,
vfd->release = video_device_release;
vfd->debug = tm6000_debug;
vfd->lock = &dev->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
@@ -1771,15 +1631,41 @@ static struct video_device *vdev_init(struct tm6000_core *dev,
int tm6000_v4l2_register(struct tm6000_core *dev)
{
- int ret = -1;
+ int ret = 0;
+
+ v4l2_ctrl_handler_init(&dev->ctrl_handler, 6);
+ v4l2_ctrl_handler_init(&dev->radio_ctrl_handler, 2);
+ v4l2_ctrl_new_std(&dev->radio_ctrl_handler, &tm6000_radio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&dev->radio_ctrl_handler, &tm6000_radio_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, -15, 15, 1, 0);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 54);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 119);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 112);
+ v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_add_handler(&dev->ctrl_handler,
+ &dev->radio_ctrl_handler, NULL);
+
+ if (dev->radio_ctrl_handler.error)
+ ret = dev->radio_ctrl_handler.error;
+ if (!ret && dev->ctrl_handler.error)
+ ret = dev->ctrl_handler.error;
+ if (ret)
+ goto free_ctrl;
dev->vfd = vdev_init(dev, &tm6000_template, "video");
if (!dev->vfd) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_ctrl;
}
+ dev->vfd->ctrl_handler = &dev->ctrl_handler;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
@@ -1790,7 +1676,9 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
if (ret < 0) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
- return ret;
+ video_device_release(dev->vfd);
+ dev->vfd = NULL;
+ goto free_ctrl;
}
printk(KERN_INFO "%s: registered device %s\n",
@@ -1803,15 +1691,17 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
ret = -ENXIO;
- return ret; /* FIXME release resource */
+ goto unreg_video;
}
+ dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
radio_nr);
if (ret < 0) {
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
- return ret; /* FIXME release resource */
+ video_device_release(dev->radio_dev);
+ goto unreg_video;
}
printk(KERN_INFO "%s: registered device %s\n",
@@ -1820,12 +1710,22 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
return ret;
+
+unreg_video:
+ video_unregister_device(dev->vfd);
+free_ctrl:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_ctrl_handler_free(&dev->radio_ctrl_handler);
+ return ret;
}
int tm6000_v4l2_unregister(struct tm6000_core *dev)
{
video_unregister_device(dev->vfd);
+ /* if URB buffers are still allocated free them now */
+ tm6000_free_urb_buffers(dev);
+
if (dev->radio_dev) {
if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
@@ -1851,3 +1751,5 @@ MODULE_PARM_DESC(debug, "activates debug info");
module_param(vid_limit, int, 0644);
MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
+module_param(keep_urb, bool, 0);
+MODULE_PARM_DESC(keep_urb, "Keep urb buffers allocated even when the device is closed by the user");
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index 6df418658c9c..08bd0740dd23 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -27,6 +27,8 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
#include <linux/dvb/frontend.h>
#include "dvb_demux.h"
@@ -222,6 +224,8 @@ struct tm6000_core {
struct video_device *radio_dev;
struct tm6000_dmaqueue vidq;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl_handler radio_ctrl_handler;
int input;
struct tm6000_input vinput[3]; /* video input */
@@ -264,6 +268,11 @@ struct tm6000_core {
spinlock_t slock;
+ /* urb dma buffers */
+ char **urb_buffer;
+ dma_addr_t *urb_dma;
+ unsigned int urb_size;
+
unsigned long quirks;
};
@@ -282,6 +291,7 @@ struct tm6000_ops {
};
struct tm6000_fh {
+ struct v4l2_fh fh;
struct tm6000_core *dev;
unsigned int radio;
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index 5b682cc4c814..e40718552850 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -561,6 +561,13 @@ static void ttusb_process_muxpack(struct ttusb *ttusb, const u8 * muxpack,
{
u16 csum = 0, cc;
int i;
+
+ if (len < 4 || len & 0x1) {
+ pr_warn("%s: muxpack has invalid len %d\n", __func__, len);
+ numinvalid++;
+ return;
+ }
+
for (i = 0; i < len; i += 2)
csum ^= le16_to_cpup((__le16 *) (muxpack + i));
if (csum) {
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index c9b2042f8bdf..816b1cffab7d 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -1169,7 +1169,7 @@ static void usbvision_parse_data(struct usb_usbvision *usbvision)
if (newstate == parse_state_next_frame) {
frame->grabstate = frame_state_done;
- do_gettimeofday(&(frame->timestamp));
+ v4l2_get_timestamp(&(frame->timestamp));
frame->sequence = usbvision->frame_num;
spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c
index 89fec029e924..ba262a32bd3a 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/media/usb/usbvision/usbvision-i2c.c
@@ -189,8 +189,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
if (usbvision->registered_i2c)
return 0;
- memcpy(&usbvision->i2c_adap, &i2c_adap_template,
- sizeof(struct i2c_adapter));
+ usbvision->i2c_adap = i2c_adap_template;
sprintf(usbvision->i2c_adap.name, "%s-%d-%s", i2c_adap_template.name,
usbvision->dev->bus->busnum, usbvision->dev->devpath);
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index ad7f7448072e..cd1fe78a5532 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -761,7 +761,7 @@ static int vidioc_querybuf(struct file *file,
if (vb->index >= usbvision->num_frames)
return -EINVAL;
/* Updating the corresponding frame state */
- vb->flags = 0;
+ vb->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
frame = &usbvision->frame[vb->index];
if (frame->grabstate >= frame_state_ready)
vb->flags |= V4L2_BUF_FLAG_QUEUED;
@@ -843,7 +843,8 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb)
vb->memory = V4L2_MEMORY_MMAP;
vb->flags = V4L2_BUF_FLAG_MAPPED |
V4L2_BUF_FLAG_QUEUED |
- V4L2_BUF_FLAG_DONE;
+ V4L2_BUF_FLAG_DONE |
+ V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vb->index = f->index;
vb->sequence = f->sequence;
vb->timestamp = f->timestamp;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index d5baab17a5ef..61e28dec991d 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1838,7 +1838,7 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
{
int ret = 0;
- memcpy(&ctrl->info, info, sizeof(*info));
+ ctrl->info = *info;
INIT_LIST_HEAD(&ctrl->info.mappings);
/* Allocate an array to save control values (cur, def, max, etc.) */
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 778addc5caff..6c233a54ce40 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -115,11 +115,27 @@ static int uvc_buffer_finish(struct vb2_buffer *vb)
return 0;
}
+static void uvc_wait_prepare(struct vb2_queue *vq)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&queue->mutex);
+}
+
+static void uvc_wait_finish(struct vb2_queue *vq)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+
+ mutex_lock(&queue->mutex);
+}
+
static struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
.buf_finish = uvc_buffer_finish,
+ .wait_prepare = uvc_wait_prepare,
+ .wait_finish = uvc_wait_finish,
};
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 68d59b527492..b2dc32623a71 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -315,7 +315,7 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream,
goto done;
}
- memcpy(&stream->ctrl, &probe, sizeof probe);
+ stream->ctrl = probe;
stream->cur_format = format;
stream->cur_frame = frame;
@@ -387,7 +387,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
return -EBUSY;
}
- memcpy(&probe, &stream->ctrl, sizeof probe);
+ probe = stream->ctrl;
probe.dwFrameInterval =
uvc_try_frame_interval(stream->cur_frame, interval);
@@ -398,7 +398,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
return ret;
}
- memcpy(&stream->ctrl, &probe, sizeof probe);
+ stream->ctrl = probe;
mutex_unlock(&stream->mutex);
/* Return the actual frame period. */
@@ -501,8 +501,8 @@ static int uvc_v4l2_open(struct file *file)
if (atomic_inc_return(&stream->dev->users) == 1) {
ret = uvc_status_start(stream->dev);
if (ret < 0) {
- usb_autopm_put_interface(stream->dev->intf);
atomic_dec(&stream->dev->users);
+ usb_autopm_put_interface(stream->dev->intf);
kfree(handle);
return ret;
}
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 39edd4442932..74d56df3347f 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -501,7 +501,6 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam,
int jpgsize)
{
int pos = 0;
- struct timeval ts;
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
@@ -530,8 +529,7 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam,
/* tell v4l buffer was filled */
buf->vb.field_count = cam->frame_count * 2;
- do_gettimeofday(&ts);
- buf->vb.ts = ts;
+ v4l2_get_timestamp(&buf->vb.ts);
buf->vb.state = VIDEOBUF_DONE;
}
@@ -559,7 +557,7 @@ static int zr364xx_got_frame(struct zr364xx_camera *cam, int jpgsize)
goto unlock;
}
list_del(&buf->vb.queue);
- do_gettimeofday(&buf->vb.ts);
+ v4l2_get_timestamp(&buf->vb.ts);
DBG("[%p/%d] wakeup\n", buf, buf->vb.i);
zr364xx_fillbuff(cam, buf, jpgsize);
wake_up(&buf->vb.done);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 65875c3aba1b..976d029e9925 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -82,3 +82,14 @@ config VIDEOBUF2_DMA_SG
#depends on HAS_DMA
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+
+config VIDEO_V4L2_INT_DEVICE
+ tristate "V4L2 int device (DEPRECATED)"
+ depends on VIDEO_V4L2
+ ---help---
+ An early framework for a hardware-independent interface for
+ image sensors and bridges etc. Currently used by omap24xxcam and
+ tcm825x drivers that should be converted to V4L2 subdev.
+
+ Do not use for new developments.
+
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index c2d61d4f03d1..a9d355230e8e 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -10,7 +10,8 @@ ifeq ($(CONFIG_COMPAT),y)
videodev-objs += v4l2-compat-ioctl32.o
endif
-obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-int-device.o
+obj-$(CONFIG_VIDEO_DEV) += videodev.o
+obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index b5a819af2b8c..b5a8aac2e126 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -1013,6 +1013,11 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
t->standby = false;
analog_ops->set_params(&t->fe, &params);
+ /*
+ * The tuner driver might decide to change the audmode if it only
+ * supports stereo, so update t->audmode.
+ */
+ t->audmode = params.audmode;
}
/*
@@ -1235,8 +1240,18 @@ static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (set_mode(t, vt->type))
return 0;
- if (t->mode == V4L2_TUNER_RADIO)
+ if (t->mode == V4L2_TUNER_RADIO) {
t->audmode = vt->audmode;
+ /*
+ * For radio audmode can only be mono or stereo. Map any
+ * other values to stereo. The actual tuner driver that is
+ * called in set_radio_freq can decide to limit the audmode to
+ * mono if only mono is supported.
+ */
+ if (t->audmode != V4L2_TUNER_MODE_MONO &&
+ t->audmode != V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ }
set_freq(t, 0);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 380ddd89fa4c..aa044f491666 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -238,7 +238,7 @@ int v4l2_chip_match_host(const struct v4l2_dbg_match *match)
}
EXPORT_SYMBOL(v4l2_chip_match_host);
-#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_I2C)
int v4l2_chip_match_i2c_client(struct i2c_client *c, const struct v4l2_dbg_match *match)
{
int len;
@@ -384,7 +384,7 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
{
static const unsigned short radio_addrs[] = {
-#if defined(CONFIG_MEDIA_TUNER_TEA5761) || defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE)
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
0x10,
#endif
0x60,
@@ -978,3 +978,13 @@ const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
return best;
}
EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
+
+void v4l2_get_timestamp(struct timeval *tv)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+EXPORT_SYMBOL_GPL(v4l2_get_timestamp);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index f6ee201d9347..6b28b5800500 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -577,8 +577,6 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_GAIN: return "Gain";
case V4L2_CID_HFLIP: return "Horizontal Flip";
case V4L2_CID_VFLIP: return "Vertical Flip";
- case V4L2_CID_HCENTER: return "Horizontal Center";
- case V4L2_CID_VCENTER: return "Vertical Center";
case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
@@ -1160,8 +1158,7 @@ static int new_to_user(struct v4l2_ext_control *c,
}
/* Copy the new value to the current value. */
-static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- bool update_inactive)
+static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
{
bool changed = false;
@@ -1185,8 +1182,8 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
ctrl->cur.val = ctrl->val;
break;
}
- if (update_inactive) {
- /* Note: update_inactive can only be true for auto clusters. */
+ if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
+ /* Note: CH_FLAGS is only set for auto clusters. */
ctrl->flags &=
~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
if (!is_cur_manual(ctrl->cluster[0])) {
@@ -1196,14 +1193,15 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
}
fh = NULL;
}
- if (changed || update_inactive) {
+ if (changed || ch_flags) {
/* If a control was changed that was not one of the controls
modified by the application, then send the event to all. */
if (!ctrl->is_new)
fh = NULL;
send_event(fh, ctrl,
- (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) |
- (update_inactive ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
+ if (ctrl->call_notify && changed && ctrl->handler->notify)
+ ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
}
}
@@ -1257,6 +1255,41 @@ static int cluster_changed(struct v4l2_ctrl *master)
return diff;
}
+/* Control range checking */
+static int check_range(enum v4l2_ctrl_type type,
+ s32 min, s32 max, u32 step, s32 def)
+{
+ switch (type) {
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (step != 1 || max > 1 || min < 0)
+ return -ERANGE;
+ /* fall through */
+ case V4L2_CTRL_TYPE_INTEGER:
+ if (step <= 0 || min > max || def < min || def > max)
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (step || min || !max || (def & ~max))
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (min > max || def < min || def > max)
+ return -ERANGE;
+ /* Note: step == menu_skip_mask for menu controls.
+ So here we check if the default value is masked out. */
+ if (step && ((1 << def) & step))
+ return -EINVAL;
+ return 0;
+ case V4L2_CTRL_TYPE_STRING:
+ if (min > max || min < 0 || step < 1 || def)
+ return -ERANGE;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
/* Validate a new control */
static int validate_new(const struct v4l2_ctrl *ctrl,
struct v4l2_ext_control *c)
@@ -1529,30 +1562,21 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra = 0;
+ int err;
if (hdl->error)
return NULL;
/* Sanity checks */
if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
- (type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
- (type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
- (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL) ||
- (type == V4L2_CTRL_TYPE_STRING && max == 0)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
- if (type != V4L2_CTRL_TYPE_BITMASK && max < min) {
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
handler_set_err(hdl, -ERANGE);
return NULL;
}
- if ((type == V4L2_CTRL_TYPE_INTEGER ||
- type == V4L2_CTRL_TYPE_MENU ||
- type == V4L2_CTRL_TYPE_INTEGER_MENU ||
- type == V4L2_CTRL_TYPE_BOOLEAN) &&
- (def < min || def > max)) {
- handler_set_err(hdl, -ERANGE);
+ err = check_range(type, min, max, step, def);
+ if (err) {
+ handler_set_err(hdl, err);
return NULL;
}
if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
@@ -1980,6 +2004,13 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
+
/* Call s_ctrl for all controls owned by the handler */
int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
{
@@ -2426,8 +2457,8 @@ EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
/* Core function that calls try/s_ctrl and ensures that the new value is
copied to the current value on a set.
Must be called with ctrl->handler->lock held. */
-static int try_or_set_cluster(struct v4l2_fh *fh,
- struct v4l2_ctrl *master, bool set)
+static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags)
{
bool update_flag;
int ret;
@@ -2465,7 +2496,8 @@ static int try_or_set_cluster(struct v4l2_fh *fh,
/* If OK, then make the new values permanent. */
update_flag = is_cur_manual(master) != is_new_manual(master);
for (i = 0; i < master->ncontrols; i++)
- new_to_cur(fh, master->cluster[i], update_flag && i > 0);
+ new_to_cur(fh, master->cluster[i], ch_flags |
+ ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
return 0;
}
@@ -2592,7 +2624,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
} while (!ret && idx);
if (!ret)
- ret = try_or_set_cluster(fh, master, set);
+ ret = try_or_set_cluster(fh, master, set, 0);
/* Copy the new values back to userspace. */
if (!ret) {
@@ -2638,10 +2670,9 @@ EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
/* Helper function for VIDIOC_S_CTRL compatibility */
static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c)
+ struct v4l2_ext_control *c, u32 ch_flags)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
- int ret;
int i;
/* String controls are not supported. The user_to_new() and
@@ -2651,12 +2682,6 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
if (ctrl->type == V4L2_CTRL_TYPE_STRING)
return -EINVAL;
- ret = validate_new(ctrl, c);
- if (ret)
- return ret;
-
- v4l2_ctrl_lock(ctrl);
-
/* Reset the 'is_new' flags of the cluster */
for (i = 0; i < master->ncontrols; i++)
if (master->cluster[i])
@@ -2670,10 +2695,22 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
update_from_auto_cluster(master);
user_to_new(c, ctrl);
- ret = try_or_set_cluster(fh, master, true);
- cur_to_user(c, ctrl);
+ return try_or_set_cluster(fh, master, true, ch_flags);
+}
- v4l2_ctrl_unlock(ctrl);
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ struct v4l2_ext_control *c)
+{
+ int ret = validate_new(ctrl, c);
+
+ if (!ret) {
+ v4l2_ctrl_lock(ctrl);
+ ret = set_ctrl(fh, ctrl, c, 0);
+ if (!ret)
+ cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ }
return ret;
}
@@ -2691,7 +2728,7 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return -EACCES;
c.value = control->value;
- ret = set_ctrl(fh, ctrl, &c);
+ ret = set_ctrl_lock(fh, ctrl, &c);
control->value = c.value;
return ret;
}
@@ -2710,7 +2747,7 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
/* It's a driver bug if this happens. */
WARN_ON(!type_is_int(ctrl));
c.value = val;
- return set_ctrl(NULL, ctrl, &c);
+ return set_ctrl_lock(NULL, ctrl, &c);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
@@ -2721,10 +2758,61 @@ int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
/* It's a driver bug if this happens. */
WARN_ON(ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
c.value64 = val;
- return set_ctrl(NULL, ctrl, &c);
+ return set_ctrl_lock(NULL, ctrl, &c);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl_int64);
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
+{
+ if (ctrl == NULL)
+ return;
+ if (notify == NULL) {
+ ctrl->call_notify = 0;
+ return;
+ }
+ if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
+ return;
+ ctrl->handler->notify = notify;
+ ctrl->handler->notify_priv = priv;
+ ctrl->call_notify = 1;
+}
+EXPORT_SYMBOL(v4l2_ctrl_notify);
+
+int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s32 min, s32 max, u32 step, s32 def)
+{
+ int ret = check_range(ctrl->type, min, max, step, def);
+ struct v4l2_ext_control c;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ v4l2_ctrl_lock(ctrl);
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ c.value = ctrl->cur.val;
+ if (validate_new(ctrl, &c))
+ c.value = def;
+ if (c.value != ctrl->cur.val)
+ ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE);
+ else
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_modify_range);
+
static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
@@ -2804,6 +2892,15 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
}
EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (!sd->ctrl_handler)
+ return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
+
unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 98dcad9c8a3b..de1e9ab7db99 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -222,7 +222,7 @@ static struct class video_class = {
struct video_device *video_devdata(struct file *file)
{
- return video_device[iminor(file->f_path.dentry->d_inode)];
+ return video_device[iminor(file_inode(file))];
}
EXPORT_SYMBOL(video_devdata);
@@ -568,11 +568,6 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_s_priority ||
test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
- SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
- SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
- SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
- SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
/* Note: the control handler can also be passed through the filehandle,
@@ -605,8 +600,6 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
- SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
- SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
@@ -672,6 +665,13 @@ static void determine_valid_ioctls(struct video_device *vdev)
}
if (!is_radio) {
/* ioctls valid for video or vbi */
+ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
+ SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
+ SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
if (ops->vidioc_g_std || vdev->current_norm)
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 513969fa695d..8ed5da2170bf 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -112,7 +112,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
/* Unregister subdevs */
list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
v4l2_device_unregister_subdev(sd);
-#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_I2C)
if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -159,31 +159,21 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
sd->v4l2_dev = v4l2_dev;
if (sd->internal_ops && sd->internal_ops->registered) {
err = sd->internal_ops->registered(sd);
- if (err) {
- module_put(sd->owner);
- return err;
- }
+ if (err)
+ goto error_module;
}
/* This just returns 0 if either of the two args is NULL */
err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
- if (err) {
- if (sd->internal_ops && sd->internal_ops->unregistered)
- sd->internal_ops->unregistered(sd);
- module_put(sd->owner);
- return err;
- }
+ if (err)
+ goto error_unregister;
#if defined(CONFIG_MEDIA_CONTROLLER)
/* Register the entity. */
if (v4l2_dev->mdev) {
err = media_device_register_entity(v4l2_dev->mdev, entity);
- if (err < 0) {
- if (sd->internal_ops && sd->internal_ops->unregistered)
- sd->internal_ops->unregistered(sd);
- module_put(sd->owner);
- return err;
- }
+ if (err < 0)
+ goto error_unregister;
}
#endif
@@ -192,6 +182,14 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
spin_unlock(&v4l2_dev->lock);
return 0;
+
+error_unregister:
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
+error_module:
+ module_put(sd->owner);
+ sd->v4l2_dev = NULL;
+ return err;
}
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index c72009218152..86dcb5483c42 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -311,3 +311,10 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
+
+int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 438ea45d1074..da99cf727162 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -62,7 +62,7 @@ struct v4l2_m2m_dev {
struct list_head job_queue;
spinlock_t job_spinlock;
- struct v4l2_m2m_ops *m2m_ops;
+ const struct v4l2_m2m_ops *m2m_ops;
};
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
@@ -519,7 +519,7 @@ EXPORT_SYMBOL(v4l2_m2m_mmap);
*
* Usually called from driver's probe() function.
*/
-struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
+struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 5449e8aa984a..fb5ee5dd8fe9 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -340,7 +340,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
break;
}
- b->flags = 0;
+ b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
if (vb->map)
b->flags |= V4L2_BUF_FLAG_MAPPED;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index e02c4797b1c6..db1235dcb328 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -40,9 +40,10 @@ module_param(debug, int, 0644);
#define call_qop(q, op, args...) \
(((q)->ops->op) ? ((q)->ops->op(args)) : 0)
-#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
- V4L2_BUF_FLAG_PREPARED)
+ V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_TIMESTAMP_MASK)
/**
* __vb2_buf_mem_alloc() - allocate video memory for the given buffer
@@ -401,7 +402,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
/*
* Clear any buffer state related flags.
*/
- b->flags &= ~V4L2_BUFFER_STATE_FLAGS;
+ b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
+ b->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
switch (vb->state) {
case VB2_BUF_STATE_QUEUED:
@@ -941,7 +943,7 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
vb->v4l2_buf.field = b->field;
vb->v4l2_buf.timestamp = b->timestamp;
- vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
+ vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
}
/**
@@ -1963,6 +1965,11 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
poll_wait(file, &fh->wait, wait);
}
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
+ return res;
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
+ return res;
+
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 56ff19cdc2ad..ffcb10ac4341 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -512,18 +512,17 @@ int memstick_add_host(struct memstick_host *host)
{
int rc;
- while (1) {
- if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
- return -ENOMEM;
+ idr_preload(GFP_KERNEL);
+ spin_lock(&memstick_host_lock);
- spin_lock(&memstick_host_lock);
- rc = idr_get_new(&memstick_host_idr, host, &host->id);
- spin_unlock(&memstick_host_lock);
- if (!rc)
- break;
- else if (rc != -EAGAIN)
- return rc;
- }
+ rc = idr_alloc(&memstick_host_idr, host, 0, 0, GFP_NOWAIT);
+ if (rc >= 0)
+ host->id = rc;
+
+ spin_unlock(&memstick_host_lock);
+ idr_preload_end();
+ if (rc < 0)
+ return rc;
dev_set_name(&host->dev, "memstick%u", host->id);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 9729b92fbfdd..f12b78dbce04 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1213,21 +1213,10 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->page_size = be16_to_cpu(sys_info->unit_size);
mutex_lock(&mspro_block_disk_lock);
- if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) {
- mutex_unlock(&mspro_block_disk_lock);
- return -ENOMEM;
- }
-
- rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
+ disk_id = idr_alloc(&mspro_block_disk_idr, card, 0, 256, GFP_KERNEL);
mutex_unlock(&mspro_block_disk_lock);
-
- if (rc)
- return rc;
-
- if ((disk_id << MSPRO_BLOCK_PART_SHIFT) > 255) {
- rc = -ENOSPC;
- goto out_release_id;
- }
+ if (disk_id < 0)
+ return disk_id;
msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT);
if (!msb->disk) {
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 29b2172ae18f..a7c5b31c0d50 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
/* Executes one TPC (data is read/written from small or large fifo) */
static void r592_execute_tpc(struct r592_device *dev)
{
- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+ bool is_write;
int len, error;
u32 status, reg;
@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
return;
}
+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
len = dev->req->long_data ?
dev->req->sg.length : dev->req->data_len;
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index f5ddb82dadb7..64a779c58a74 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -426,6 +426,9 @@ static void rtsx_pci_ms_request(struct memstick_host *msh)
dev_dbg(ms_dev(host), "--> %s\n", __func__);
+ if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD))
+ return;
+
schedule_work(&host->handle_req);
}
@@ -441,6 +444,10 @@ static int rtsx_pci_ms_set_param(struct memstick_host *msh,
dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
__func__, param, value);
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD);
+ if (err)
+ return err;
+
switch (param) {
case MEMSTICK_POWER:
if (value == MEMSTICK_POWER_ON)
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 391e23e6a647..582bda543520 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -531,7 +531,7 @@ static int pm800_probe(struct i2c_client *client,
ret = device_800_init(chip, pdata);
if (ret) {
dev_err(chip->dev, "%s id 0x%x failed!\n", __func__, chip->id);
- goto err_800_init;
+ goto err_subchip_alloc;
}
ret = pm800_pages_init(chip);
@@ -546,10 +546,8 @@ static int pm800_probe(struct i2c_client *client,
err_page_init:
mfd_remove_devices(chip->dev);
device_irq_exit_800(chip);
-err_800_init:
- devm_kfree(&client->dev, subchip);
err_subchip_alloc:
- pm80x_deinit(client);
+ pm80x_deinit();
out_init:
return ret;
}
@@ -562,9 +560,7 @@ static int pm800_remove(struct i2c_client *client)
device_irq_exit_800(chip);
pm800_pages_exit(chip);
- devm_kfree(&client->dev, chip->subchip);
-
- pm80x_deinit(client);
+ pm80x_deinit();
return 0;
}
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index e671230be2b1..65d7ac099b20 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -257,7 +257,7 @@ static int pm805_probe(struct i2c_client *client,
pdata->plat_config(chip, pdata);
err_805_init:
- pm80x_deinit(client);
+ pm80x_deinit();
out_init:
return ret;
}
@@ -269,7 +269,7 @@ static int pm805_remove(struct i2c_client *client)
mfd_remove_devices(chip->dev);
device_irq_exit_805(chip);
- pm80x_deinit(client);
+ pm80x_deinit();
return 0;
}
diff --git a/drivers/mfd/88pm80x.c b/drivers/mfd/88pm80x.c
index 1adb355d86d1..f736a46eb8c0 100644
--- a/drivers/mfd/88pm80x.c
+++ b/drivers/mfd/88pm80x.c
@@ -48,14 +48,12 @@ int pm80x_init(struct i2c_client *client,
ret = PTR_ERR(map);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
ret);
- goto err_regmap_init;
+ return ret;
}
chip->id = id->driver_data;
- if (chip->id < CHIP_PM800 || chip->id > CHIP_PM805) {
- ret = -EINVAL;
- goto err_chip_id;
- }
+ if (chip->id < CHIP_PM800 || chip->id > CHIP_PM805)
+ return -EINVAL;
chip->client = client;
chip->regmap = map;
@@ -82,19 +80,11 @@ int pm80x_init(struct i2c_client *client,
}
return 0;
-
-err_chip_id:
- regmap_exit(map);
-err_regmap_init:
- devm_kfree(&client->dev, chip);
- return ret;
}
EXPORT_SYMBOL_GPL(pm80x_init);
-int pm80x_deinit(struct i2c_client *client)
+int pm80x_deinit(void)
{
- struct pm80x_chip *chip = i2c_get_clientdata(client);
-
/*
* workaround: clear the dependency between pm800 and pm805.
* would remove it after HW chip fixes the issue.
@@ -103,10 +93,6 @@ int pm80x_deinit(struct i2c_client *client)
g_pm80x_chip->companion = NULL;
else
g_pm80x_chip = NULL;
-
- regmap_exit(chip->regmap);
- devm_kfree(&client->dev, chip);
-
return 0;
}
EXPORT_SYMBOL_GPL(pm80x_deinit);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ff553babf455..671f5b171c73 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -65,7 +65,7 @@ config MFD_SM501_GPIO
config MFD_RTSX_PCI
tristate "Support for Realtek PCI-E card reader"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
This supports for Realtek PCI-Express card reader including rts5209,
@@ -95,7 +95,7 @@ config MFD_DM355EVM_MSP
config MFD_TI_SSP
tristate "TI Sequencer Serial Port support"
- depends on ARCH_DAVINCI_TNETV107X
+ depends on ARCH_DAVINCI_TNETV107X && GENERIC_HARDIRQS
select MFD_CORE
---help---
Say Y here if you want support for the Sequencer Serial Port
@@ -109,6 +109,7 @@ config MFD_TI_AM335X_TSCADC
select MFD_CORE
select REGMAP
select REGMAP_MMIO
+ depends on GENERIC_HARDIRQS
help
If you say yes here you get support for Texas Instruments series
of Touch Screen /ADC chips.
@@ -126,6 +127,7 @@ config HTC_EGPIO
config HTC_PASIC3
tristate "HTC PASIC3 LED/DS1WM chip support"
select MFD_CORE
+ depends on GENERIC_HARDIRQS
help
This core driver provides register access for the LED/DS1WM
chips labeled "AIC2" and "AIC3", found on HTC Blueangel and
@@ -157,6 +159,7 @@ config MFD_LM3533
depends on I2C
select MFD_CORE
select REGMAP_I2C
+ depends on GENERIC_HARDIRQS
help
Say yes here to enable support for National Semiconductor / TI
LM3533 Lighting Power chips.
@@ -171,6 +174,7 @@ config TPS6105X
select REGULATOR
select MFD_CORE
select REGULATOR_FIXED_VOLTAGE
+ depends on GENERIC_HARDIRQS
help
This option enables a driver for the TP61050/TPS61052
high-power "white LED driver". This boost converter is
@@ -193,7 +197,7 @@ config TPS65010
config TPS6507X
tristate "TPS6507x Power Management / Touch Screen chips"
select MFD_CORE
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
help
If you say yes here you get support for the TPS6507x series of
Power Management / Touch Screen chips. These include voltage
@@ -204,7 +208,7 @@ config TPS6507X
config MFD_TPS65217
tristate "TPS65217 Power Management / White LED chips"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
help
@@ -234,7 +238,7 @@ config MFD_TPS6586X
config MFD_TPS65910
bool "TPS65910 Power Management chip"
- depends on I2C=y && GPIOLIB
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -251,7 +255,7 @@ config MFD_TPS65912_I2C
bool "TPS65912 Power Management chip with I2C"
select MFD_CORE
select MFD_TPS65912
- depends on I2C=y && GPIOLIB
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
help
If you say yes here you get support for the TPS65912 series of
PM chips with I2C interface.
@@ -260,7 +264,7 @@ config MFD_TPS65912_SPI
bool "TPS65912 Power Management chip with SPI"
select MFD_CORE
select MFD_TPS65912
- depends on SPI_MASTER && GPIOLIB
+ depends on SPI_MASTER && GPIOLIB && GENERIC_HARDIRQS
help
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
@@ -330,13 +334,13 @@ config TWL4030_POWER
config MFD_TWL4030_AUDIO
bool
- depends on TWL4030_CORE
+ depends on TWL4030_CORE && GENERIC_HARDIRQS
select MFD_CORE
default n
config TWL6040_CORE
bool "Support for TWL6040 audio codec"
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -405,7 +409,7 @@ config MFD_TMIO
config MFD_T7L66XB
bool "Support Toshiba T7L66XB"
- depends on ARM && HAVE_CLK
+ depends on ARM && HAVE_CLK && GENERIC_HARDIRQS
select MFD_CORE
select MFD_TMIO
help
@@ -413,7 +417,7 @@ config MFD_T7L66XB
config MFD_SMSC
bool "Support for the SMSC ECE1099 series chips"
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
help
@@ -460,7 +464,7 @@ config MFD_DA9052_SPI
select REGMAP_SPI
select REGMAP_IRQ
select PMIC_DA9052
- depends on SPI_MASTER=y
+ depends on SPI_MASTER=y && GENERIC_HARDIRQS
help
Support for the Dialog Semiconductor DA9052 PMIC
when controlled using SPI. This driver provides common support
@@ -472,7 +476,7 @@ config MFD_DA9052_I2C
select REGMAP_I2C
select REGMAP_IRQ
select PMIC_DA9052
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
Support for the Dialog Semiconductor DA9052 PMIC
when controlled using I2C. This driver provides common support
@@ -485,7 +489,7 @@ config MFD_DA9055
select REGMAP_IRQ
select PMIC_DA9055
select MFD_CORE
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
Say yes here for support of Dialog Semiconductor DA9055. This is
a Power Management IC. This driver provides common support for
@@ -508,7 +512,7 @@ config PMIC_ADP5520
config MFD_LP8788
bool "Texas Instruments LP8788 Power Management Unit Driver"
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
select IRQ_DOMAIN
@@ -611,7 +615,7 @@ config MFD_ARIZONA_I2C
select MFD_ARIZONA
select MFD_CORE
select REGMAP_I2C
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
help
Support for the Wolfson Microelectronics Arizona platform audio SoC
core functionality controlled via I2C.
@@ -621,7 +625,7 @@ config MFD_ARIZONA_SPI
select MFD_ARIZONA
select MFD_CORE
select REGMAP_SPI
- depends on SPI_MASTER
+ depends on SPI_MASTER && GENERIC_HARDIRQS
help
Support for the Wolfson Microelectronics Arizona platform audio SoC
core functionality controlled via I2C.
@@ -641,7 +645,7 @@ config MFD_WM5110
config MFD_WM8400
bool "Support Wolfson Microelectronics WM8400"
select MFD_CORE
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
select REGMAP_I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -785,7 +789,7 @@ config MFD_MC13783
config MFD_MC13XXX
tristate
- depends on SPI_MASTER || I2C
+ depends on (SPI_MASTER || I2C) && GENERIC_HARDIRQS
select MFD_CORE
select MFD_MC13783
help
@@ -796,7 +800,7 @@ config MFD_MC13XXX
config MFD_MC13XXX_SPI
tristate "Freescale MC13783 and MC13892 SPI interface"
- depends on SPI_MASTER
+ depends on SPI_MASTER && GENERIC_HARDIRQS
select REGMAP_SPI
select MFD_MC13XXX
help
@@ -804,7 +808,7 @@ config MFD_MC13XXX_SPI
config MFD_MC13XXX_I2C
tristate "Freescale MC13892 I2C interface"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select REGMAP_I2C
select MFD_MC13XXX
help
@@ -822,7 +826,7 @@ config ABX500_CORE
config AB3100_CORE
bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
- depends on I2C=y && ABX500_CORE
+ depends on I2C=y && ABX500_CORE && GENERIC_HARDIRQS
select MFD_CORE
default y if ARCH_U300
help
@@ -909,7 +913,7 @@ config MFD_TIMBERDALE
config LPC_SCH
tristate "Intel SCH LPC"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
LPC bridge function of the Intel SCH provides support for
@@ -917,7 +921,7 @@ config LPC_SCH
config LPC_ICH
tristate "Intel ICH LPC"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
The LPC bridge function of the Intel ICH provides support for
@@ -928,7 +932,7 @@ config LPC_ICH
config MFD_RDC321X
tristate "Support for RDC-R321x southbridge"
select MFD_CORE
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
Say yes here if you want to have support for the RDC R-321x SoC
southbridge which provides access to GPIOs and Watchdog using the
@@ -937,7 +941,7 @@ config MFD_RDC321X
config MFD_JANZ_CMODIO
tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board"
select MFD_CORE
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This is the core driver for the Janz CMOD-IO PCI MODULbus
carrier board. This device is a PCI to MODULbus bridge which may
@@ -955,7 +959,7 @@ config MFD_JZ4740_ADC
config MFD_VX855
tristate "Support for VIA VX855/VX875 integrated south bridge"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
help
Say yes here to enable support for various functions of the
@@ -964,7 +968,7 @@ config MFD_VX855
config MFD_WL1273_CORE
tristate "Support for TI WL1273 FM radio."
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select MFD_CORE
default n
help
@@ -1028,7 +1032,7 @@ config MFD_TPS65090
config MFD_AAT2870_CORE
bool "Support for the AnalogicTech AAT2870"
select MFD_CORE
- depends on I2C=y && GPIOLIB
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
help
If you say yes here you get support for the AAT2870.
This driver provides common support for accessing the device,
@@ -1060,7 +1064,7 @@ config MFD_RC5T583
config MFD_STA2X11
bool "STA2X11 multi function device support"
- depends on STA2X11
+ depends on STA2X11 && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_MMIO
@@ -1077,7 +1081,7 @@ config MFD_PALMAS
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
If you say yes here you get support for the Palmas
series of PMIC chips from Texas Instruments.
@@ -1085,7 +1089,7 @@ config MFD_PALMAS
config MFD_VIPERBOARD
tristate "Support for Nano River Technologies Viperboard"
select MFD_CORE
- depends on USB
+ depends on USB && GENERIC_HARDIRQS
default n
help
Say yes here if you want support for Nano River Technologies
@@ -1099,7 +1103,7 @@ config MFD_VIPERBOARD
config MFD_RETU
tristate "Support for Retu multi-function device"
select MFD_CORE
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select REGMAP_IRQ
help
Retu is a multi-function device found on Nokia Internet Tablets
@@ -1110,7 +1114,7 @@ config MFD_AS3711
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C=y && GENERIC_HARDIRQS
help
Support for the AS3711 PMIC from AMS
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8b977f8045ae..b90409c23664 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o
obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 8b5d685ab980..7c84ced2e01b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -320,6 +320,7 @@ static struct abx500_ops ab8500_ops = {
.mask_and_set_register = ab8500_mask_and_set_register,
.event_registers_startup_state_get = NULL,
.startup_irq_enabled = NULL,
+ .dump_all_banks = ab8500_dump_all_banks,
};
static void ab8500_irq_lock(struct irq_data *data)
@@ -368,16 +369,48 @@ static void ab8500_irq_mask(struct irq_data *data)
int mask = 1 << (offset % 8);
ab8500->mask[index] |= mask;
+
+ /* The AB8500 GPIOs have two interrupts each (rising & falling). */
+ if (offset >= AB8500_INT_GPIO6R && offset <= AB8500_INT_GPIO41R)
+ ab8500->mask[index + 2] |= mask;
+ if (offset >= AB9540_INT_GPIO50R && offset <= AB9540_INT_GPIO54R)
+ ab8500->mask[index + 1] |= mask;
+ if (offset == AB8540_INT_GPIO43R || offset == AB8540_INT_GPIO44R)
+ /* Here the falling IRQ is one bit lower */
+ ab8500->mask[index] |= (mask << 1);
}
static void ab8500_irq_unmask(struct irq_data *data)
{
struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+ unsigned int type = irqd_get_trigger_type(data);
int offset = data->hwirq;
int index = offset / 8;
int mask = 1 << (offset % 8);
- ab8500->mask[index] &= ~mask;
+ if (type & IRQ_TYPE_EDGE_RISING)
+ ab8500->mask[index] &= ~mask;
+
+ /* The AB8500 GPIOs have two interrupts each (rising & falling). */
+ if (type & IRQ_TYPE_EDGE_FALLING) {
+ if (offset >= AB8500_INT_GPIO6R && offset <= AB8500_INT_GPIO41R)
+ ab8500->mask[index + 2] &= ~mask;
+ else if (offset >= AB9540_INT_GPIO50R && offset <= AB9540_INT_GPIO54R)
+ ab8500->mask[index + 1] &= ~mask;
+ else if (offset == AB8540_INT_GPIO43R || offset == AB8540_INT_GPIO44R)
+ /* Here the falling IRQ is one bit lower */
+ ab8500->mask[index] &= ~(mask << 1);
+ else
+ ab8500->mask[index] &= ~mask;
+ } else {
+ /* Satisfies the case where type is not set. */
+ ab8500->mask[index] &= ~mask;
+ }
+}
+
+static int ab8500_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ return 0;
}
static struct irq_chip ab8500_irq_chip = {
@@ -387,6 +420,7 @@ static struct irq_chip ab8500_irq_chip = {
.irq_mask = ab8500_irq_mask,
.irq_disable = ab8500_irq_mask,
.irq_unmask = ab8500_irq_unmask,
+ .irq_set_type = ab8500_irq_set_type,
};
static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
@@ -411,6 +445,19 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
line = (i << 3) + int_bit;
latch_val &= ~(1 << int_bit);
+ /*
+ * This handles the falling edge hwirqs from the GPIO
+ * lines. Route them back to the line registered for the
+ * rising IRQ, as this is merely a flag for the same IRQ
+ * in linux terms.
+ */
+ if (line >= AB8500_INT_GPIO6F && line <= AB8500_INT_GPIO41F)
+ line -= 16;
+ if (line >= AB9540_INT_GPIO50F && line <= AB9540_INT_GPIO54F)
+ line -= 8;
+ if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
+ line += 1;
+
handle_nested_irq(ab8500->irq_base + line);
} while (latch_val);
@@ -521,6 +568,7 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
int virq = ab8500_irq_get_virq(ab8500, line);
handle_nested_irq(virq);
+ ab8500_debug_register_interrupt(line);
value &= ~(1 << bit);
} while (value);
@@ -929,7 +977,7 @@ static struct resource ab8505_iddet_resources[] = {
static struct resource ab8500_temp_resources[] = {
{
- .name = "AB8500_TEMP_WARM",
+ .name = "ABX500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
.end = AB8500_INT_TEMP_WARM,
.flags = IORESOURCE_IRQ,
@@ -1005,8 +1053,8 @@ static struct mfd_cell abx500_common_devs[] = {
.of_compatible = "stericsson,ab8500-denc",
},
{
- .name = "ab8500-temp",
- .of_compatible = "stericsson,ab8500-temp",
+ .name = "abx500-temp",
+ .of_compatible = "stericsson,abx500-temp",
.num_resources = ARRAY_SIZE(ab8500_temp_resources),
.resources = ab8500_temp_resources,
},
@@ -1049,7 +1097,7 @@ static struct mfd_cell ab8500_bm_devs[] = {
static struct mfd_cell ab8500_devs[] = {
{
- .name = "ab8500-gpio",
+ .name = "pinctrl-ab8500",
.of_compatible = "stericsson,ab8500-gpio",
},
{
@@ -1066,7 +1114,8 @@ static struct mfd_cell ab8500_devs[] = {
static struct mfd_cell ab9540_devs[] = {
{
- .name = "ab8500-gpio",
+ .name = "pinctrl-ab9540",
+ .of_compatible = "stericsson,ab9540-gpio",
},
{
.name = "ab9540-usb",
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 5a8e707bc038..45fe3c50eb03 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -4,6 +4,72 @@
* Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
* License Terms: GNU General Public License v2
*/
+/*
+ * AB8500 register access
+ * ======================
+ *
+ * read:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # cat <debugfs>/ab8500/register-value
+ *
+ * write:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # echo VALUE > <debugfs>/ab8500/register-value
+ *
+ * read all registers from a bank:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # cat <debugfs>/ab8500/all-bank-register
+ *
+ * BANK target AB8500 register bank
+ * ADDR target AB8500 register address
+ * VALUE decimal or 0x-prefixed hexadecimal
+ *
+ *
+ * User Space notification on AB8500 IRQ
+ * =====================================
+ *
+ * Allows user space entity to be notified when target AB8500 IRQ occurs.
+ * When subscribed, a sysfs entry is created in ab8500.i2c platform device.
+ * One can pool this file to get target IRQ occurence information.
+ *
+ * subscribe to an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-subscribe
+ *
+ * unsubscribe from an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-unsubscribe
+ *
+ *
+ * AB8500 register formated read/write access
+ * ==========================================
+ *
+ * Read: read data, data>>SHIFT, data&=MASK, output data
+ * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE
+ * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data
+ * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98]
+ *
+ * Usage:
+ * # echo "CMD [OPTIONS] BANK ADRESS [VALUE]" > $debugfs/ab8500/hwreg
+ *
+ * CMD read read access
+ * write write access
+ *
+ * BANK target reg bank
+ * ADDRESS target reg address
+ * VALUE (write) value to be updated
+ *
+ * OPTIONS
+ * -d|-dec (read) output in decimal
+ * -h|-hexa (read) output in 0x-hexa (default)
+ * -l|-w|-b 32bit (default), 16bit or 8bit reg access
+ * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF)
+ * -s|-shift SHIFT bit shift value (read:left, write:right)
+ * -o|-offset OFFSET address offset to add to ADDRESS value
+ *
+ * Warning: bit shift operation is applied to bit-mask.
+ * Warning: bit shift direction depends on read or right command.
+ */
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -11,13 +77,30 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/string.h>
+#include <linux/ctype.h>
+#endif
static u32 debug_bank;
static u32 debug_address;
+static int irq_first;
+static int irq_last;
+static u32 *irq_count;
+static int num_irqs;
+
+static struct device_attribute **dev_attr;
+static char **event_name;
+
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -42,15 +125,35 @@ struct ab8500_prcmu_ranges {
const struct ab8500_reg_range *range;
};
+/* hwreg- "mask" and "shift" entries ressources */
+struct hwreg_cfg {
+ u32 bank; /* target bank */
+ u32 addr; /* target address */
+ uint fmt; /* format */
+ uint mask; /* read/write mask, applied before any bit shift */
+ int shift; /* bit shift (read:right shift, write:left shift */
+};
+/* fmt bit #0: 0=hexa, 1=dec */
+#define REG_FMT_DEC(c) ((c)->fmt & 0x1)
+#define REG_FMT_HEX(c) (!REG_FMT_DEC(c))
+
+static struct hwreg_cfg hwreg_cfg = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+};
+
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_NUM_BANKS 22
+#define AB8500_ADC_NAME_STRING "gpadc"
+#define AB8500_NUM_BANKS 24
#define AB8500_REV_REG 0x80
static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
- .range = 0,
+ .range = NULL,
},
[AB8500_SYS_CTRL1_BLOCK] = {
.num_ranges = 3,
@@ -215,7 +318,7 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
[AB8500_CHARGER] = {
- .num_ranges = 8,
+ .num_ranges = 9,
.range = (struct ab8500_reg_range[]) {
{
.first = 0x00,
@@ -249,6 +352,10 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
.first = 0xC0,
.last = 0xC2,
},
+ {
+ .first = 0xf5,
+ .last = 0xf6,
+ },
},
},
[AB8500_GAS_GAUGE] = {
@@ -268,6 +375,24 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
+ [AB8500_DEVELOPMENT] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ },
+ },
+ [AB8500_DEBUG] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x05,
+ .last = 0x07,
+ },
+ },
+ },
[AB8500_AUDIO] = {
.num_ranges = 1,
.range = (struct ab8500_reg_range[]) {
@@ -354,15 +479,30 @@ static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
};
-static int ab8500_registers_print(struct seq_file *s, void *p)
+static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
- struct device *dev = s->private;
- unsigned int i;
- u32 bank = debug_bank;
+ char buf[16];
+ struct kobject *kobj = (struct kobject *)data;
+ unsigned int irq_abb = irq - irq_first;
- seq_printf(s, AB8500_NAME_STRING " register values:\n");
+ if (irq_abb < num_irqs)
+ irq_count[irq_abb]++;
+ /*
+ * This makes it possible to use poll for events (POLLPRI | POLLERR)
+ * from userspace on sysfs file named <irq-nr>
+ */
+ sprintf(buf, "%d", irq);
+ sysfs_notify(kobj, NULL, buf);
+
+ return IRQ_HANDLED;
+}
+
+/* Prints to seq_file or log_buf */
+static int ab8500_registers_print(struct device *dev, u32 bank,
+ struct seq_file *s)
+{
+ unsigned int i;
- seq_printf(s, " bank %u:\n", bank);
for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
u32 reg;
@@ -379,22 +519,42 @@ static int ab8500_registers_print(struct seq_file *s, void *p)
return err;
}
- err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank,
- reg, value);
- if (err < 0) {
- dev_err(dev, "seq_printf overflow\n");
- /* Error is not returned here since
- * the output is wanted in any case */
- return 0;
+ if (s) {
+ err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n",
+ bank, reg, value);
+ if (err < 0) {
+ dev_err(dev,
+ "seq_printf overflow bank=%d reg=%d\n",
+ bank, reg);
+ /* Error is not returned here since
+ * the output is wanted in any case */
+ return 0;
+ }
+ } else {
+ printk(KERN_INFO" [%u/0x%02X]: 0x%02X\n", bank,
+ reg, value);
}
}
}
return 0;
}
+static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ u32 bank = debug_bank;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ seq_printf(s, " bank %u:\n", bank);
+
+ ab8500_registers_print(dev, bank, s);
+ return 0;
+}
+
static int ab8500_registers_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_registers_print, inode->i_private);
+ return single_open(file, ab8500_print_bank_registers, inode->i_private);
}
static const struct file_operations ab8500_registers_fops = {
@@ -405,6 +565,64 @@ static const struct file_operations ab8500_registers_fops = {
.owner = THIS_MODULE,
};
+static int ab8500_print_all_banks(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ unsigned int i;
+ int err;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ for (i = 1; i < AB8500_NUM_BANKS; i++) {
+ err = seq_printf(s, " bank %u:\n", i);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow, bank=%d\n", i);
+
+ ab8500_registers_print(dev, i, s);
+ }
+ return 0;
+}
+
+/* Dump registers to kernel log */
+void ab8500_dump_all_banks(struct device *dev)
+{
+ unsigned int i;
+
+ printk(KERN_INFO"ab8500 register values:\n");
+
+ for (i = 1; i < AB8500_NUM_BANKS; i++) {
+ printk(KERN_INFO" bank %u:\n", i);
+ ab8500_registers_print(dev, i, NULL);
+ }
+}
+
+static int ab8500_all_banks_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *s;
+ int err;
+
+ err = single_open(file, ab8500_print_all_banks, inode->i_private);
+ if (!err) {
+ /* Default buf size in seq_read is not enough */
+ s = (struct seq_file *)file->private_data;
+ s->size = (PAGE_SIZE * 2);
+ s->buf = kmalloc(s->size, GFP_KERNEL);
+ if (!s->buf) {
+ single_release(inode, file);
+ err = -ENOMEM;
+ }
+ }
+ return err;
+}
+
+static const struct file_operations ab8500_all_banks_fops = {
+ .open = ab8500_all_banks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static int ab8500_bank_print(struct seq_file *s, void *p)
{
return seq_printf(s, "%d\n", debug_bank);
@@ -519,6 +737,761 @@ static ssize_t ab8500_val_write(struct file *file,
return count;
}
+/*
+ * Interrupt status
+ */
+static u32 num_interrupts[AB8500_MAX_NR_IRQS];
+static int num_interrupt_lines;
+
+void ab8500_debug_register_interrupt(int line)
+{
+ if (line < num_interrupt_lines)
+ num_interrupts[line]++;
+}
+
+static int ab8500_interrupts_print(struct seq_file *s, void *p)
+{
+ int line;
+
+ seq_printf(s, "irq: number of\n");
+
+ for (line = 0; line < num_interrupt_lines; line++)
+ seq_printf(s, "%3i: %6i\n", line, num_interrupts[line]);
+
+ return 0;
+}
+
+static int ab8500_interrupts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_interrupts_print, inode->i_private);
+}
+
+/*
+ * - HWREG DB8500 formated routines
+ */
+static int ab8500_hwreg_print(struct seq_file *s, void *d)
+{
+ struct device *dev = s->private;
+ int ret;
+ u8 regvalue;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)hwreg_cfg.bank, (u8)hwreg_cfg.addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (hwreg_cfg.shift >= 0)
+ regvalue >>= hwreg_cfg.shift;
+ else
+ regvalue <<= -hwreg_cfg.shift;
+ regvalue &= hwreg_cfg.mask;
+
+ if (REG_FMT_DEC(&hwreg_cfg))
+ seq_printf(s, "%d\n", regvalue);
+ else
+ seq_printf(s, "0x%02X\n", regvalue);
+ return 0;
+}
+
+static int ab8500_hwreg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_hwreg_print, inode->i_private);
+}
+
+static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+{
+ int bat_ctrl_raw;
+ int bat_ctrl_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL);
+ bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BAT_CTRL, bat_ctrl_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bat_ctrl_convert, bat_ctrl_raw);
+}
+
+static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
+ .open = ab8500_gpadc_bat_ctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+{
+ int btemp_ball_raw;
+ int btemp_ball_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL);
+ btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
+ btemp_ball_raw);
+
+ return seq_printf(s,
+ "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+}
+
+static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
+ .open = ab8500_gpadc_btemp_ball_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+{
+ int main_charger_v_raw;
+ int main_charger_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V);
+ main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_V, main_charger_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_v_convert, main_charger_v_raw);
+}
+
+static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_v_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
+ .open = ab8500_gpadc_main_charger_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+{
+ int acc_detect1_raw;
+ int acc_detect1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1);
+ acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
+ acc_detect1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect1_convert, acc_detect1_raw);
+}
+
+static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect1_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
+ .open = ab8500_gpadc_acc_detect1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+{
+ int acc_detect2_raw;
+ int acc_detect2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2);
+ acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ ACC_DETECT2, acc_detect2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect2_convert, acc_detect2_raw);
+}
+
+static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect2_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
+ .open = ab8500_gpadc_acc_detect2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+{
+ int aux1_raw;
+ int aux1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1);
+ aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
+ aux1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux1_convert, aux1_raw);
+}
+
+static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux1_fops = {
+ .open = ab8500_gpadc_aux1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+{
+ int aux2_raw;
+ int aux2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2);
+ aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
+ aux2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux2_convert, aux2_raw);
+}
+
+static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux2_fops = {
+ .open = ab8500_gpadc_aux2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+{
+ int main_bat_v_raw;
+ int main_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V);
+ main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
+ main_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_bat_v_convert, main_bat_v_raw);
+}
+
+static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
+ .open = ab8500_gpadc_main_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+{
+ int vbus_v_raw;
+ int vbus_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V);
+ vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
+ vbus_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ vbus_v_convert, vbus_v_raw);
+}
+
+static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_vbus_v_fops = {
+ .open = ab8500_gpadc_vbus_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+{
+ int main_charger_c_raw;
+ int main_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C);
+ main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_C, main_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_c_convert, main_charger_c_raw);
+}
+
+static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
+ .open = ab8500_gpadc_main_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+{
+ int usb_charger_c_raw;
+ int usb_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C);
+ usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ USB_CHARGER_C, usb_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ usb_charger_c_convert, usb_charger_c_raw);
+}
+
+static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_usb_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
+ .open = ab8500_gpadc_usb_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+{
+ int bk_bat_v_raw;
+ int bk_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V);
+ bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BK_BAT_V, bk_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bk_bat_v_convert, bk_bat_v_raw);
+}
+
+static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
+ .open = ab8500_gpadc_bk_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+{
+ int die_temp_raw;
+ int die_temp_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP);
+ die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
+ die_temp_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ die_temp_convert, die_temp_raw);
+}
+
+static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_die_temp_fops = {
+ .open = ab8500_gpadc_die_temp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * return length of an ASCII numerical value, 0 is string is not a
+ * numerical value.
+ * string shall start at value 1st char.
+ * string can be tailed with \0 or space or newline chars only.
+ * value can be decimal or hexadecimal (prefixed 0x or 0X).
+ */
+static int strval_len(char *b)
+{
+ char *s = b;
+ if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) {
+ s += 2;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isxdigit(*s))
+ return 0;
+ }
+ } else {
+ if (*s == '-')
+ s++;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isdigit(*s))
+ return 0;
+ }
+ }
+ return (int) (s-b);
+}
+
+/*
+ * parse hwreg input data.
+ * update global hwreg_cfg only if input data syntax is ok.
+ */
+static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg,
+ struct device *dev)
+{
+ uint write, val = 0;
+ u8 regvalue;
+ int ret;
+ struct hwreg_cfg loc = {
+ .bank = 0, /* default: invalid phys addr */
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+ };
+
+ /* read or write ? */
+ if (!strncmp(b, "read ", 5)) {
+ write = 0;
+ b += 5;
+ } else if (!strncmp(b, "write ", 6)) {
+ write = 1;
+ b += 6;
+ } else
+ return -EINVAL;
+
+ /* OPTIONS -l|-w|-b -s -m -o */
+ while ((*b == ' ') || (*b == '-')) {
+ if (*(b-1) != ' ') {
+ b++;
+ continue;
+ }
+ if ((!strncmp(b, "-d ", 3)) ||
+ (!strncmp(b, "-dec ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt |= (1<<0);
+ } else if ((!strncmp(b, "-h ", 3)) ||
+ (!strncmp(b, "-hex ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt &= ~(1<<0);
+ } else if ((!strncmp(b, "-m ", 3)) ||
+ (!strncmp(b, "-mask ", 6))) {
+ b += (*(b+2) == ' ') ? 3 : 6;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.mask = simple_strtoul(b, &b, 0);
+ } else if ((!strncmp(b, "-s ", 3)) ||
+ (!strncmp(b, "-shift ", 7))) {
+ b += (*(b+2) == ' ') ? 3 : 7;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.shift = simple_strtol(b, &b, 0);
+ } else {
+ return -EINVAL;
+ }
+ }
+ /* get arg BANK and ADDRESS */
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.bank = simple_strtoul(b, &b, 0);
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.addr = simple_strtoul(b, &b, 0);
+
+ if (write) {
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ val = simple_strtoul(b, &b, 0);
+ }
+
+ /* args are ok, update target cfg (mainly for read) */
+ *cfg = loc;
+
+#ifdef ABB_HWREG_DEBUG
+ pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d"
+ "value=0x%X\n", (write) ? "write" : "read",
+ REG_FMT_DEC(cfg) ? "decimal" : "hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
+#endif
+
+ if (!write)
+ return 0;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (cfg->shift >= 0) {
+ regvalue &= ~(cfg->mask << (cfg->shift));
+ val = (val & cfg->mask) << (cfg->shift);
+ } else {
+ regvalue &= ~(cfg->mask >> (-cfg->shift));
+ val = (val & cfg->mask) >> (-cfg->shift);
+ }
+ val = val | regvalue;
+
+ ret = abx500_set_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, (u8)val);
+ if (ret < 0) {
+ pr_err("abx500_set_reg failed %d, %d", ret, __LINE__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t ab8500_hwreg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[128];
+ int buf_size, ret;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* get args and process */
+ ret = hwreg_common_write(buf, &hwreg_cfg, dev);
+ return (ret) ? ret : buf_size;
+}
+
+/*
+ * - irq subscribe/unsubscribe stuff
+ */
+static int ab8500_subscribe_unsubscribe_print(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", irq_first);
+
+ return 0;
+}
+
+static int ab8500_subscribe_unsubscribe_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_subscribe_unsubscribe_print,
+ inode->i_private);
+}
+
+/*
+ * Userspace should use poll() on this file. When an event occur
+ * the blocking poll will be released.
+ */
+static ssize_t show_irq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long name;
+ unsigned int irq_index;
+ int err;
+
+ err = strict_strtoul(attr->attr.name, 0, &name);
+ if (err)
+ return err;
+
+ irq_index = name - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+ else
+ return sprintf(buf, "%u\n", irq_count[irq_index]);
+}
+
+static ssize_t ab8500_subscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+
+ /*
+ * This will create a sysfs file named <irq-nr> which userspace can
+ * use to select or poll and get the AB8500 events
+ */
+ dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute),
+ GFP_KERNEL);
+ event_name[irq_index] = kmalloc(buf_size, GFP_KERNEL);
+ sprintf(event_name[irq_index], "%lu", user_val);
+ dev_attr[irq_index]->show = show_irq;
+ dev_attr[irq_index]->store = NULL;
+ dev_attr[irq_index]->attr.name = event_name[irq_index];
+ dev_attr[irq_index]->attr.mode = S_IRUGO;
+ err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ if (err < 0) {
+ printk(KERN_ERR "sysfs_create_file failed %d\n", err);
+ return err;
+ }
+
+ err = request_threaded_irq(user_val, NULL, ab8500_debug_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "ab8500-debug", &dev->kobj);
+ if (err < 0) {
+ printk(KERN_ERR "request_threaded_irq failed %d, %lu\n",
+ err, user_val);
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ return err;
+ }
+
+ return buf_size;
+}
+
+static ssize_t ab8500_unsubscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+
+ /* Set irq count to 0 when unsubscribe */
+ irq_count[irq_index] = 0;
+
+ if (dev_attr[irq_index])
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+
+
+ free_irq(user_val, &dev->kobj);
+ kfree(event_name[irq_index]);
+ kfree(dev_attr[irq_index]);
+
+ return buf_size;
+}
+
+/*
+ * - several deubgfs nodes fops
+ */
+
static const struct file_operations ab8500_bank_fops = {
.open = ab8500_bank_open,
.write = ab8500_bank_write,
@@ -546,64 +1519,231 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
+static const struct file_operations ab8500_interrupts_fops = {
+ .open = ab8500_interrupts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_subscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_subscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_unsubscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_unsubscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_hwreg_fops = {
+ .open = ab8500_hwreg_open,
+ .write = ab8500_hwreg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static struct dentry *ab8500_dir;
-static struct dentry *ab8500_reg_file;
-static struct dentry *ab8500_bank_file;
-static struct dentry *ab8500_address_file;
-static struct dentry *ab8500_val_file;
+static struct dentry *ab8500_gpadc_dir;
static int ab8500_debug_probe(struct platform_device *plf)
{
+ struct dentry *file;
+ int ret = -ENOMEM;
+ struct ab8500 *ab8500;
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
+ ab8500 = dev_get_drvdata(plf->dev.parent);
+ num_irqs = ab8500->mask_size;
+
+ irq_count = kzalloc(sizeof(*irq_count)*num_irqs, GFP_KERNEL);
+ if (!irq_count)
+ return -ENOMEM;
+
+ dev_attr = kzalloc(sizeof(*dev_attr)*num_irqs,GFP_KERNEL);
+ if (!dev_attr)
+ goto out_freeirq_count;
+
+ event_name = kzalloc(sizeof(*event_name)*num_irqs, GFP_KERNEL);
+ if (!event_name)
+ goto out_freedev_attr;
+
+ irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
+ if (irq_first < 0) {
+ dev_err(&plf->dev, "First irq not found, err %d\n",
+ irq_first);
+ ret = irq_first;
+ goto out_freeevent_name;
+ }
+
+ irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
+ if (irq_last < 0) {
+ dev_err(&plf->dev, "Last irq not found, err %d\n",
+ irq_last);
+ ret = irq_last;
+ goto out_freeevent_name;
+ }
+
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
if (!ab8500_dir)
- goto exit_no_debugfs;
+ goto err;
+
+ ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
+ ab8500_dir);
+ if (!ab8500_gpadc_dir)
+ goto err;
+
+ file = debugfs_create_file("all-bank-registers", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("all-banks", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_all_banks_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-address", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_address_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-value", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_val_fops);
+ if (!file)
+ goto err;
- ab8500_reg_file = debugfs_create_file("all-bank-registers",
- S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
- if (!ab8500_reg_file)
- goto exit_destroy_dir;
+ file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
+ if (!file)
+ goto err;
- ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops);
- if (!ab8500_bank_file)
- goto exit_destroy_reg;
+ if (is_ab8500(ab8500))
+ num_interrupt_lines = AB8500_NR_IRQS;
+ else if (is_ab8505(ab8500))
+ num_interrupt_lines = AB8505_NR_IRQS;
+ else if (is_ab9540(ab8500))
+ num_interrupt_lines = AB9540_NR_IRQS;
- ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev,
- &ab8500_address_fops);
- if (!ab8500_address_file)
- goto exit_destroy_bank;
+ file = debugfs_create_file("interrupts", (S_IRUGO),
+ ab8500_dir, &plf->dev, &ab8500_interrupts_fops);
+ if (!file)
+ goto err;
- ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops);
- if (!ab8500_val_file)
- goto exit_destroy_address;
+ file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR),
+ ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+ if (!file)
+ goto err;
return 0;
-exit_destroy_address:
- debugfs_remove(ab8500_address_file);
-exit_destroy_bank:
- debugfs_remove(ab8500_bank_file);
-exit_destroy_reg:
- debugfs_remove(ab8500_reg_file);
-exit_destroy_dir:
- debugfs_remove(ab8500_dir);
-exit_no_debugfs:
+err:
+ if (ab8500_dir)
+ debugfs_remove_recursive(ab8500_dir);
dev_err(&plf->dev, "failed to create debugfs entries.\n");
- return -ENOMEM;
+out_freeevent_name:
+ kfree(event_name);
+out_freedev_attr:
+ kfree(dev_attr);
+out_freeirq_count:
+ kfree(irq_count);
+
+ return ret;
}
static int ab8500_debug_remove(struct platform_device *plf)
{
- debugfs_remove(ab8500_val_file);
- debugfs_remove(ab8500_address_file);
- debugfs_remove(ab8500_bank_file);
- debugfs_remove(ab8500_reg_file);
- debugfs_remove(ab8500_dir);
+ debugfs_remove_recursive(ab8500_dir);
+ kfree(event_name);
+ kfree(dev_attr);
+ kfree(irq_count);
return 0;
}
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 3fb1f40d6389..b1f3561b023f 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
@@ -82,6 +83,11 @@
/* This is used to not lose precision when dividing to get gain and offset */
#define CALIB_SCALE 1000
+/* Time in ms before disabling regulator */
+#define GPADC_AUDOSUSPEND_DELAY 1
+
+#define CONVERSION_TIME 500 /* ms */
+
enum cal_channels {
ADC_INPUT_VMAIN = 0,
ADC_INPUT_BTEMP,
@@ -102,10 +108,10 @@ struct adc_cal_data {
/**
* struct ab8500_gpadc - AB8500 GPADC device information
- * @chip_id ABB chip id
* @dev: pointer to the struct device
* @node: a list of AB8500 GPADCs, hence prepared for
reentrance
+ * @parent: pointer to the struct ab8500
* @ab8500_gpadc_complete: pointer to the struct completion, to indicate
* the completion of gpadc conversion
* @ab8500_gpadc_lock: structure of type mutex
@@ -114,9 +120,9 @@ struct adc_cal_data {
* @cal_data array of ADC calibration data structs
*/
struct ab8500_gpadc {
- u8 chip_id;
struct device *dev;
struct list_head node;
+ struct ab8500 *parent;
struct completion ab8500_gpadc_complete;
struct mutex ab8500_gpadc_lock;
struct regulator *regu;
@@ -282,8 +288,9 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
return -ENODEV;
mutex_lock(&gpadc->ab8500_gpadc_lock);
+
/* Enable VTVout LDO this is required for GPADC */
- regulator_enable(gpadc->regu);
+ pm_runtime_get_sync(gpadc->dev);
/* Check if ADC is not busy, lock and proceed */
do {
@@ -332,7 +339,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
EN_BUF | EN_ICHAR);
break;
case BTEMP_BALL:
- if (gpadc->chip_id >= AB8500_CUT3P0) {
+ if (!is_ab8500_2p0_or_earlier(gpadc->parent)) {
/* Turn on btemp pull-up on ABB 3.0 */
ret = abx500_mask_and_set_register_interruptible(
gpadc->dev,
@@ -344,7 +351,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
* Delay might be needed for ABB8500 cut 3.0, if not, remove
* when hardware will be available
*/
- msleep(1);
+ usleep_range(1000, 1000);
break;
}
/* Intentional fallthrough */
@@ -367,7 +374,8 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
goto out;
}
/* wait for completion of conversion */
- if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete, 2*HZ)) {
+ if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete,
+ msecs_to_jiffies(CONVERSION_TIME))) {
dev_err(gpadc->dev,
"timeout: didn't receive GPADC conversion interrupt\n");
ret = -EINVAL;
@@ -397,8 +405,10 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
goto out;
}
- /* Disable VTVout LDO this is required for GPADC */
- regulator_disable(gpadc->regu);
+
+ pm_runtime_mark_last_busy(gpadc->dev);
+ pm_runtime_put_autosuspend(gpadc->dev);
+
mutex_unlock(&gpadc->ab8500_gpadc_lock);
return (high_data << 8) | low_data;
@@ -412,7 +422,9 @@ out:
*/
(void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- regulator_disable(gpadc->regu);
+
+ pm_runtime_put(gpadc->dev);
+
mutex_unlock(&gpadc->ab8500_gpadc_lock);
dev_err(gpadc->dev,
"gpadc_conversion: Failed to AD convert channel %d\n", channel);
@@ -571,6 +583,28 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
gpadc->cal_data[ADC_INPUT_VBAT].offset);
}
+static int ab8500_gpadc_runtime_suspend(struct device *dev)
+{
+ struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ regulator_disable(gpadc->regu);
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_resume(struct device *dev)
+{
+ struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ regulator_enable(gpadc->regu);
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_idle(struct device *dev)
+{
+ pm_runtime_suspend(dev);
+ return 0;
+}
+
static int ab8500_gpadc_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -591,6 +625,7 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
}
gpadc->dev = &pdev->dev;
+ gpadc->parent = dev_get_drvdata(pdev->dev.parent);
mutex_init(&gpadc->ab8500_gpadc_lock);
/* Initialize completion used to notify completion of conversion */
@@ -607,14 +642,6 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
goto fail;
}
- /* Get Chip ID of the ABB ASIC */
- ret = abx500_get_chip_id(gpadc->dev);
- if (ret < 0) {
- dev_err(gpadc->dev, "failed to get chip ID\n");
- goto fail_irq;
- }
- gpadc->chip_id = (u8) ret;
-
/* VTVout LDO used to power up ab8500-GPADC */
gpadc->regu = regulator_get(&pdev->dev, "vddadc");
if (IS_ERR(gpadc->regu)) {
@@ -622,6 +649,16 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
dev_err(gpadc->dev, "failed to get vtvout LDO\n");
goto fail_irq;
}
+
+ platform_set_drvdata(pdev, gpadc);
+
+ regulator_enable(gpadc->regu);
+
+ pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gpadc->dev);
+ pm_runtime_set_active(gpadc->dev);
+ pm_runtime_enable(gpadc->dev);
+
ab8500_gpadc_read_calibration_data(gpadc);
list_add_tail(&gpadc->node, &ab8500_gpadc_list);
dev_dbg(gpadc->dev, "probe success\n");
@@ -642,19 +679,34 @@ static int ab8500_gpadc_remove(struct platform_device *pdev)
list_del(&gpadc->node);
/* remove interrupt - completion of Sw ADC conversion */
free_irq(gpadc->irq, gpadc);
- /* disable VTVout LDO that is being used by GPADC */
- regulator_put(gpadc->regu);
+
+ pm_runtime_get_sync(gpadc->dev);
+ pm_runtime_disable(gpadc->dev);
+
+ regulator_disable(gpadc->regu);
+
+ pm_runtime_set_suspended(gpadc->dev);
+
+ pm_runtime_put_noidle(gpadc->dev);
+
kfree(gpadc);
gpadc = NULL;
return 0;
}
+static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
+ SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
+ ab8500_gpadc_runtime_resume,
+ ab8500_gpadc_runtime_idle)
+};
+
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
.remove = ab8500_gpadc_remove,
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
+ .pm = &ab8500_gpadc_pm_ops,
},
};
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 8a33b2c7eead..108fd86552f0 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -7,12 +7,73 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/signal.h>
+#include <linux/power_supply.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-sysctrl.h>
static struct device *sysctrl_dev;
+void ab8500_power_off(void)
+{
+ sigset_t old;
+ sigset_t all;
+ static char *pss[] = {"ab8500_ac", "ab8500_usb"};
+ int i;
+ bool charger_present = false;
+ union power_supply_propval val;
+ struct power_supply *psy;
+ int ret;
+
+ /*
+ * If we have a charger connected and we're powering off,
+ * reboot into charge-only mode.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(pss); i++) {
+ psy = power_supply_get_by_name(pss[i]);
+ if (!psy)
+ continue;
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
+
+ if (!ret && val.intval) {
+ charger_present = true;
+ break;
+ }
+ }
+
+ if (!charger_present)
+ goto shutdown;
+
+ /* Check if battery is known */
+ psy = power_supply_get_by_name("ab8500_btemp");
+ if (psy) {
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY,
+ &val);
+ if (!ret && val.intval != POWER_SUPPLY_TECHNOLOGY_UNKNOWN) {
+ printk(KERN_INFO
+ "Charger \"%s\" is connected with known battery."
+ " Rebooting.\n",
+ pss[i]);
+ machine_restart("charging");
+ }
+ }
+
+shutdown:
+ sigfillset(&all);
+
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ }
+}
+
static inline bool valid_bank(u8 bank)
{
return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
@@ -33,6 +94,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
return abx500_get_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_read);
int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
{
@@ -48,10 +110,40 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_write);
static int ab8500_sysctrl_probe(struct platform_device *pdev)
{
+ struct ab8500_platform_data *plat;
+ struct ab8500_sysctrl_platform_data *pdata;
+
sysctrl_dev = &pdev->dev;
+ plat = dev_get_platdata(pdev->dev.parent);
+ if (plat->pm_power_off)
+ pm_power_off = ab8500_power_off;
+
+ pdata = plat->sysctrl;
+
+ if (pdata) {
+ int ret, i, j;
+
+ for (i = AB8500_SYSCLKREQ1RFCLKBUF;
+ i <= AB8500_SYSCLKREQ8RFCLKBUF; i++) {
+ j = i - AB8500_SYSCLKREQ1RFCLKBUF;
+ ret = ab8500_sysctrl_write(i, 0xff,
+ pdata->initial_req_buf_config[j]);
+ dev_dbg(&pdev->dev,
+ "Setting SysClkReq%dRfClkBuf 0x%X\n",
+ j + 1,
+ pdata->initial_req_buf_config[j]);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "unable to set sysClkReq%dRfClkBuf: "
+ "%d\n", j + 1, ret);
+ }
+ }
+ }
+
return 0;
}
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index 7ce65f49480f..9818afba2515 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -153,6 +153,22 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
}
EXPORT_SYMBOL(abx500_startup_irq_enabled);
+void abx500_dump_all_banks(void)
+{
+ struct abx500_ops *ops;
+ struct device dummy_child = {0};
+ struct abx500_device_entry *dev_entry;
+
+ list_for_each_entry(dev_entry, &abx500_list, list) {
+ dummy_child.parent = dev_entry->dev;
+ ops = &dev_entry->ops;
+
+ if ((ops != NULL) && (ops->dump_all_banks != NULL))
+ ops->dump_all_banks(&dummy_child);
+ }
+}
+EXPORT_SYMBOL(abx500_dump_all_banks);
+
MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
MODULE_DESCRIPTION("ABX500 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 222c03a5ddc0..b562c7bf8a46 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -115,7 +115,7 @@ static irqreturn_t arizona_underclocked(int irq, void *data)
if (val & ARIZONA_ADC_UNDERCLOCKED_STS)
dev_err(arizona->dev, "ADC underclocked\n");
if (val & ARIZONA_MIXER_UNDERCLOCKED_STS)
- dev_err(arizona->dev, "Mixer underclocked\n");
+ dev_err(arizona->dev, "Mixer dropped sample\n");
return IRQ_HANDLED;
}
@@ -263,10 +263,36 @@ static int arizona_runtime_suspend(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int arizona_resume_noirq(struct device *dev)
+{
+ struct arizona *arizona = dev_get_drvdata(dev);
+
+ dev_dbg(arizona->dev, "Early resume, disabling IRQ\n");
+ disable_irq(arizona->irq);
+
+ return 0;
+}
+
+static int arizona_resume(struct device *dev)
+{
+ struct arizona *arizona = dev_get_drvdata(dev);
+
+ dev_dbg(arizona->dev, "Late resume, reenabling IRQ\n");
+ enable_irq(arizona->irq);
+
+ return 0;
+}
+#endif
+
const struct dev_pm_ops arizona_pm_ops = {
SET_RUNTIME_PM_OPS(arizona_runtime_suspend,
arizona_runtime_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, arizona_resume)
+#ifdef CONFIG_PM_SLEEP
+ .resume_noirq = arizona_resume_noirq,
+#endif
};
EXPORT_SYMBOL_GPL(arizona_pm_ops);
@@ -275,19 +301,19 @@ static struct mfd_cell early_devs[] = {
};
static struct mfd_cell wm5102_devs[] = {
+ { .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
- { .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5102-codec" },
};
static struct mfd_cell wm5110_devs[] = {
+ { .name = "arizona-micsupp" },
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
- { .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5110-codec" },
};
@@ -484,6 +510,29 @@ int arizona_dev_init(struct arizona *arizona)
goto err_reset;
}
+ for (i = 0; i < ARIZONA_MAX_MICBIAS; i++) {
+ if (!arizona->pdata.micbias[i].mV)
+ continue;
+
+ val = (arizona->pdata.micbias[i].mV - 1500) / 100;
+ val <<= ARIZONA_MICB1_LVL_SHIFT;
+
+ if (arizona->pdata.micbias[i].ext_cap)
+ val |= ARIZONA_MICB1_EXT_CAP;
+
+ if (arizona->pdata.micbias[i].discharge)
+ val |= ARIZONA_MICB1_DISCH;
+
+ if (arizona->pdata.micbias[i].fast_start)
+ val |= ARIZONA_MICB1_RATE;
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MIC_BIAS_CTRL_1 + i,
+ ARIZONA_MICB1_LVL_MASK |
+ ARIZONA_MICB1_DISCH |
+ ARIZONA_MICB1_RATE, val);
+ }
+
for (i = 0; i < ARIZONA_MAX_INPUT; i++) {
/* Default for both is 0 so noop with defaults */
val = arizona->pdata.dmic_ref[i]
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 885e56780358..6a9fec40d018 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -60,7 +60,7 @@ static inline bool i2c_safe_reg(unsigned char reg)
* This fix is to follow any read or write with a dummy read to a safe
* register.
*/
-int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
+static int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
{
int val;
@@ -85,7 +85,6 @@ int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
return 0;
}
-EXPORT_SYMBOL(da9052_i2c_fix);
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index a2bacf95b59e..21f261bf9e95 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -33,6 +33,7 @@
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <linux/cpufreq.h>
+#include <linux/platform_data/ux500_wdt.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
@@ -2207,21 +2208,25 @@ int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
sleep_auto_off ? A9WDOG_AUTO_OFF_EN :
A9WDOG_AUTO_OFF_DIS);
}
+EXPORT_SYMBOL(db8500_prcmu_config_a9wdog);
int db8500_prcmu_enable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0);
}
+EXPORT_SYMBOL(db8500_prcmu_enable_a9wdog);
int db8500_prcmu_disable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0);
}
+EXPORT_SYMBOL(db8500_prcmu_disable_a9wdog);
int db8500_prcmu_kick_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0);
}
+EXPORT_SYMBOL(db8500_prcmu_kick_a9wdog);
/*
* timeout is 28 bit, in ms.
@@ -2239,6 +2244,7 @@ int db8500_prcmu_load_a9wdog(u8 id, u32 timeout)
(u8)((timeout >> 12) & 0xff),
(u8)((timeout >> 20) & 0xff));
}
+EXPORT_SYMBOL(db8500_prcmu_load_a9wdog);
/**
* prcmu_abb_read() - Read register value(s) from the ABB.
@@ -3094,6 +3100,11 @@ static struct resource ab8500_resources[] = {
}
};
+static struct ux500_wdt_data db8500_wdt_pdata = {
+ .timeout = 600, /* 10 minutes */
+ .has_28_bits_resolution = true,
+};
+
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
@@ -3108,6 +3119,12 @@ static struct mfd_cell db8500_prcmu_devs[] = {
.pdata_size = sizeof(db8500_cpufreq_table),
},
{
+ .name = "ux500_wdt",
+ .platform_data = &db8500_wdt_pdata,
+ .pdata_size = sizeof(db8500_wdt_pdata),
+ .id = -1,
+ },
+ {
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
.num_resources = ARRAY_SIZE(ab8500_resources),
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index d9d930302e98..9f12f91d6296 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -50,6 +50,7 @@
* document number TBD : Panther Point
* document number TBD : Lynx Point
* document number TBD : Lynx Point-LP
+ * document number TBD : Wellsburg
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -75,8 +76,10 @@
#define ACPIBASE_GCS_OFF 0x3410
#define ACPIBASE_GCS_END 0x3414
-#define GPIOBASE 0x48
-#define GPIOCTRL 0x4C
+#define GPIOBASE_ICH0 0x58
+#define GPIOCTRL_ICH0 0x5C
+#define GPIOBASE_ICH6 0x48
+#define GPIOCTRL_ICH6 0x4C
#define RCBABASE 0xf0
@@ -84,8 +87,17 @@
#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
-static int lpc_ich_acpi_save = -1;
-static int lpc_ich_gpio_save = -1;
+struct lpc_ich_cfg {
+ int base;
+ int ctrl;
+ int save;
+};
+
+struct lpc_ich_priv {
+ int chipset;
+ struct lpc_ich_cfg acpi;
+ struct lpc_ich_cfg gpio;
+};
static struct resource wdt_ich_res[] = {
/* ACPI - TCO */
@@ -194,6 +206,7 @@ enum lpc_chipsets {
LPC_PPT, /* Panther Point */
LPC_LPT, /* Lynx Point */
LPC_LPT_LP, /* Lynx Point-LP */
+ LPC_WBG, /* Wellsburg */
};
struct lpc_ich_info lpc_chipset_info[] = {
@@ -474,6 +487,10 @@ struct lpc_ich_info lpc_chipset_info[] = {
.name = "Lynx Point_LP",
.iTCO_version = 2,
},
+ [LPC_WBG] = {
+ .name = "Wellsburg",
+ .iTCO_version = 2,
+ },
};
/*
@@ -655,45 +672,82 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
{ PCI_VDEVICE(INTEL, 0x9c45), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c46), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c47), LPC_LPT_LP},
+ { PCI_VDEVICE(INTEL, 0x8d40), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d41), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d42), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d43), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d44), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d45), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d46), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d47), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d48), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d49), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4a), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4b), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4c), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4d), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4e), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d4f), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d50), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d51), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d52), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d53), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d54), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d55), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d56), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d57), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d58), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d59), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5a), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5b), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5c), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5d), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5e), LPC_WBG},
+ { PCI_VDEVICE(INTEL, 0x8d5f), LPC_WBG},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
static void lpc_ich_restore_config_space(struct pci_dev *dev)
{
- if (lpc_ich_acpi_save >= 0) {
- pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
- lpc_ich_acpi_save = -1;
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+
+ if (priv->acpi.save >= 0) {
+ pci_write_config_byte(dev, priv->acpi.ctrl, priv->acpi.save);
+ priv->acpi.save = -1;
}
- if (lpc_ich_gpio_save >= 0) {
- pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
- lpc_ich_gpio_save = -1;
+ if (priv->gpio.save >= 0) {
+ pci_write_config_byte(dev, priv->gpio.ctrl, priv->gpio.save);
+ priv->gpio.save = -1;
}
}
static void lpc_ich_enable_acpi_space(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
- pci_read_config_byte(dev, ACPICTRL, &reg_save);
- pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
- lpc_ich_acpi_save = reg_save;
+ pci_read_config_byte(dev, priv->acpi.ctrl, &reg_save);
+ pci_write_config_byte(dev, priv->acpi.ctrl, reg_save | 0x10);
+ priv->acpi.save = reg_save;
}
static void lpc_ich_enable_gpio_space(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
- pci_read_config_byte(dev, GPIOCTRL, &reg_save);
- pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
- lpc_ich_gpio_save = reg_save;
+ pci_read_config_byte(dev, priv->gpio.ctrl, &reg_save);
+ pci_write_config_byte(dev, priv->gpio.ctrl, reg_save | 0x10);
+ priv->gpio.save = reg_save;
}
-static void lpc_ich_finalize_cell(struct mfd_cell *cell,
- const struct pci_device_id *id)
+static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
{
- cell->platform_data = &lpc_chipset_info[id->driver_data];
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+
+ cell->platform_data = &lpc_chipset_info[priv->chipset];
cell->pdata_size = sizeof(struct lpc_ich_info);
}
@@ -721,9 +775,9 @@ static int lpc_ich_check_conflict_gpio(struct resource *res)
return use_gpio ? use_gpio : ret;
}
-static int lpc_ich_init_gpio(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int lpc_ich_init_gpio(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u32 base_addr_cfg;
u32 base_addr;
int ret;
@@ -731,7 +785,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev,
struct resource *res;
/* Setup power management base register */
- pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->acpi.base, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
@@ -757,7 +811,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev,
gpe0_done:
/* Setup GPIO base register */
- pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->gpio.base, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
@@ -768,7 +822,7 @@ gpe0_done:
/* Older devices provide fewer GPIO and have a smaller resource size. */
res = &gpio_ich_res[ICH_RES_GPIO];
res->start = base_addr;
- switch (lpc_chipset_info[id->driver_data].gpio_version) {
+ switch (lpc_chipset_info[priv->chipset].gpio_version) {
case ICH_V5_GPIO:
case ICH_V10CORP_GPIO:
res->end = res->start + 128 - 1;
@@ -784,10 +838,10 @@ gpe0_done:
acpi_conflict = true;
goto gpio_done;
}
- lpc_chipset_info[id->driver_data].use_gpio = ret;
+ lpc_chipset_info[priv->chipset].use_gpio = ret;
lpc_ich_enable_gpio_space(dev);
- lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
1, NULL, 0, NULL);
@@ -798,16 +852,16 @@ gpio_done:
return ret;
}
-static int lpc_ich_init_wdt(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int lpc_ich_init_wdt(struct pci_dev *dev)
{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u32 base_addr_cfg;
u32 base_addr;
int ret;
struct resource *res;
/* Setup power management base register */
- pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ pci_read_config_dword(dev, priv->acpi.base, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
@@ -830,7 +884,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
* we have to read RCBA from PCI Config space 0xf0 and use
* it as base. GCS = RCBA + ICH6_GCS(0x3410).
*/
- if (lpc_chipset_info[id->driver_data].iTCO_version == 1) {
+ if (lpc_chipset_info[priv->chipset].iTCO_version == 1) {
/* Don't register iomem for TCO ver 1 */
lpc_ich_cells[LPC_WDT].num_resources--;
} else {
@@ -847,7 +901,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
res->end = base_addr + ACPIBASE_GCS_END;
}
- lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
+ lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
1, NULL, 0, NULL);
@@ -858,14 +912,36 @@ wdt_done:
static int lpc_ich_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
+ struct lpc_ich_priv *priv;
int ret;
bool cell_added = false;
- ret = lpc_ich_init_wdt(dev, id);
+ priv = devm_kzalloc(&dev->dev,
+ sizeof(struct lpc_ich_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->chipset = id->driver_data;
+ priv->acpi.save = -1;
+ priv->acpi.base = ACPIBASE;
+ priv->acpi.ctrl = ACPICTRL;
+
+ priv->gpio.save = -1;
+ if (priv->chipset <= LPC_ICH5) {
+ priv->gpio.base = GPIOBASE_ICH0;
+ priv->gpio.ctrl = GPIOCTRL_ICH0;
+ } else {
+ priv->gpio.base = GPIOBASE_ICH6;
+ priv->gpio.ctrl = GPIOCTRL_ICH6;
+ }
+
+ pci_set_drvdata(dev, priv);
+
+ ret = lpc_ich_init_wdt(dev);
if (!ret)
cell_added = true;
- ret = lpc_ich_init_gpio(dev, id);
+ ret = lpc_ich_init_gpio(dev);
if (!ret)
cell_added = true;
@@ -876,6 +952,7 @@ static int lpc_ich_probe(struct pci_dev *dev,
if (!cell_added) {
dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
+ pci_set_drvdata(dev, NULL);
return -ENODEV;
}
@@ -886,6 +963,7 @@ static void lpc_ich_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
lpc_ich_restore_config_space(dev);
+ pci_set_drvdata(dev, NULL);
}
static struct pci_driver lpc_ich_driver = {
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index 5624fcbba69b..8cc6aac27cb2 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -45,34 +45,32 @@ static struct resource smbus_sch_resource = {
.flags = IORESOURCE_IO,
};
-
static struct resource gpio_sch_resource = {
.flags = IORESOURCE_IO,
};
-static struct mfd_cell lpc_sch_cells[] = {
- {
- .name = "isch_smbus",
- .num_resources = 1,
- .resources = &smbus_sch_resource,
- },
- {
- .name = "sch_gpio",
- .num_resources = 1,
- .resources = &gpio_sch_resource,
- },
-};
-
static struct resource wdt_sch_resource = {
.flags = IORESOURCE_IO,
};
-static struct mfd_cell tunnelcreek_cells[] = {
- {
- .name = "ie6xx_wdt",
- .num_resources = 1,
- .resources = &wdt_sch_resource,
- },
+static struct mfd_cell lpc_sch_cells[3];
+
+static struct mfd_cell isch_smbus_cell = {
+ .name = "isch_smbus",
+ .num_resources = 1,
+ .resources = &smbus_sch_resource,
+};
+
+static struct mfd_cell sch_gpio_cell = {
+ .name = "sch_gpio",
+ .num_resources = 1,
+ .resources = &gpio_sch_resource,
+};
+
+static struct mfd_cell wdt_sch_cell = {
+ .name = "ie6xx_wdt",
+ .num_resources = 1,
+ .resources = &wdt_sch_resource,
};
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
@@ -88,79 +86,76 @@ static int lpc_sch_probe(struct pci_dev *dev,
{
unsigned int base_addr_cfg;
unsigned short base_addr;
- int i;
+ int i, cells = 0;
int ret;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
- if (!(base_addr_cfg & (1 << 31))) {
- dev_err(&dev->dev, "Decode of the SMBus I/O range disabled\n");
- return -ENODEV;
- }
- base_addr = (unsigned short)base_addr_cfg;
- if (base_addr == 0) {
- dev_err(&dev->dev, "I/O space for SMBus uninitialized\n");
- return -ENODEV;
- }
-
- smbus_sch_resource.start = base_addr;
- smbus_sch_resource.end = base_addr + SMBUS_IO_SIZE - 1;
+ base_addr = 0;
+ if (!(base_addr_cfg & (1 << 31)))
+ dev_warn(&dev->dev, "Decode of the SMBus I/O range disabled\n");
+ else
+ base_addr = (unsigned short)base_addr_cfg;
- pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
- if (!(base_addr_cfg & (1 << 31))) {
- dev_err(&dev->dev, "Decode of the GPIO I/O range disabled\n");
- return -ENODEV;
- }
- base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
- dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
- return -ENODEV;
+ dev_warn(&dev->dev, "I/O space for SMBus uninitialized\n");
+ } else {
+ lpc_sch_cells[cells++] = isch_smbus_cell;
+ smbus_sch_resource.start = base_addr;
+ smbus_sch_resource.end = base_addr + SMBUS_IO_SIZE - 1;
}
- gpio_sch_resource.start = base_addr;
-
- if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
- gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+ pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+ base_addr = 0;
+ if (!(base_addr_cfg & (1 << 31)))
+ dev_warn(&dev->dev, "Decode of the GPIO I/O range disabled\n");
else
- gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
-
- for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
- lpc_sch_cells[i].id = id->device;
+ base_addr = (unsigned short)base_addr_cfg;
- ret = mfd_add_devices(&dev->dev, 0,
- lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL,
- 0, NULL);
- if (ret)
- goto out_dev;
+ if (base_addr == 0) {
+ dev_warn(&dev->dev, "I/O space for GPIO uninitialized\n");
+ } else {
+ lpc_sch_cells[cells++] = sch_gpio_cell;
+ gpio_sch_resource.start = base_addr;
+ if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+ else
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
+ }
if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
- || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
+ || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
- if (!(base_addr_cfg & (1 << 31))) {
- dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
- ret = -ENODEV;
- goto out_dev;
+ base_addr = 0;
+ if (!(base_addr_cfg & (1 << 31)))
+ dev_warn(&dev->dev, "Decode of the WDT I/O range disabled\n");
+ else
+ base_addr = (unsigned short)base_addr_cfg;
+ if (base_addr == 0)
+ dev_warn(&dev->dev, "I/O space for WDT uninitialized\n");
+ else {
+ lpc_sch_cells[cells++] = wdt_sch_cell;
+ wdt_sch_resource.start = base_addr;
+ wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
}
- base_addr = (unsigned short)base_addr_cfg;
- if (base_addr == 0) {
- dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
- ret = -ENODEV;
- goto out_dev;
- }
-
- wdt_sch_resource.start = base_addr;
- wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
+ }
- for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
- tunnelcreek_cells[i].id = id->device;
+ if (WARN_ON(cells > ARRAY_SIZE(lpc_sch_cells))) {
+ dev_err(&dev->dev, "Cell count exceeds array size");
+ return -ENODEV;
+ }
- ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
- ARRAY_SIZE(tunnelcreek_cells), NULL,
- 0, NULL);
+ if (cells == 0) {
+ dev_err(&dev->dev, "All decode registers disabled.\n");
+ return -ENODEV;
}
- return ret;
-out_dev:
- mfd_remove_devices(&dev->dev);
+ for (i = 0; i < cells; i++)
+ lpc_sch_cells[i].id = id->device;
+
+ ret = mfd_add_devices(&dev->dev, 0, lpc_sch_cells, cells, NULL, 0, NULL);
+ if (ret)
+ mfd_remove_devices(&dev->dev);
+
return ret;
}
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index e32466e865b9..f0cc40296d8c 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -14,10 +14,13 @@
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8925.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
static struct resource bk_resources[] = {
{ 0x84, 0x84, "mode control", IORESOURCE_REG, },
@@ -639,17 +642,33 @@ static struct irq_chip max8925_irq_chip = {
.irq_disable = max8925_irq_disable,
};
+static int max8925_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_chip_and_handler(virq, &max8925_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+ return 0;
+}
+
+static struct irq_domain_ops max8925_irq_domain_ops = {
+ .map = max8925_irq_domain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+
static int max8925_irq_init(struct max8925_chip *chip, int irq,
struct max8925_platform_data *pdata)
{
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
- int i, ret;
- int __irq;
+ int ret;
+ struct device_node *node = chip->dev->of_node;
- if (!pdata || !pdata->irq_base) {
- dev_warn(chip->dev, "No interrupt support on IRQ base\n");
- return -EINVAL;
- }
/* clear all interrupts */
max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ1);
max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ2);
@@ -667,35 +686,30 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff);
mutex_init(&chip->irq_lock);
- chip->core_irq = irq;
- chip->irq_base = pdata->irq_base;
-
- /* register with genirq */
- for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
- __irq = i + chip->irq_base;
- irq_set_chip_data(__irq, chip);
- irq_set_chip_and_handler(__irq, &max8925_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#else
- irq_set_noprobe(__irq);
-#endif
- }
- if (!irq) {
- dev_warn(chip->dev, "No interrupt support on core IRQ\n");
- goto tsc_irq;
+ chip->irq_base = irq_alloc_descs(-1, 0, MAX8925_NR_IRQS, 0);
+ if (chip->irq_base < 0) {
+ dev_err(chip->dev, "Failed to allocate interrupts, ret:%d\n",
+ chip->irq_base);
+ return -EBUSY;
}
+ irq_domain_add_legacy(node, MAX8925_NR_IRQS, chip->irq_base, 0,
+ &max8925_irq_domain_ops, chip);
+
+ /* request irq handler for pmic main irq*/
+ chip->core_irq = irq;
+ if (!chip->core_irq)
+ return -EBUSY;
ret = request_threaded_irq(irq, NULL, max8925_irq, flags | IRQF_ONESHOT,
"max8925", chip);
if (ret) {
dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret);
chip->core_irq = 0;
+ return -EBUSY;
}
-tsc_irq:
+ /* request irq handler for pmic tsc irq*/
+
/* mask TSC interrupt */
max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f);
@@ -704,7 +718,6 @@ tsc_irq:
return 0;
}
chip->tsc_irq = pdata->tsc_irq;
-
ret = request_threaded_irq(chip->tsc_irq, NULL, max8925_tsc_irq,
flags | IRQF_ONESHOT, "max8925-tsc", chip);
if (ret) {
@@ -846,7 +859,7 @@ int max8925_device_init(struct max8925_chip *chip,
ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
ARRAY_SIZE(rtc_devs),
- &rtc_resources[0], chip->irq_base, NULL);
+ NULL, chip->irq_base, NULL);
if (ret < 0) {
dev_err(chip->dev, "Failed to add rtc subdev\n");
goto out;
@@ -854,7 +867,7 @@ int max8925_device_init(struct max8925_chip *chip,
ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
ARRAY_SIZE(onkey_devs),
- &onkey_resources[0], 0, NULL);
+ NULL, chip->irq_base, NULL);
if (ret < 0) {
dev_err(chip->dev, "Failed to add onkey subdev\n");
goto out_dev;
@@ -873,21 +886,19 @@ int max8925_device_init(struct max8925_chip *chip,
goto out_dev;
}
- if (pdata && pdata->power) {
- ret = mfd_add_devices(chip->dev, 0, &power_devs[0],
- ARRAY_SIZE(power_devs),
- &power_supply_resources[0], 0, NULL);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add power supply "
- "subdev\n");
- goto out_dev;
- }
+ ret = mfd_add_devices(chip->dev, 0, &power_devs[0],
+ ARRAY_SIZE(power_devs),
+ NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to add power supply subdev, err = %d\n", ret);
+ goto out_dev;
}
if (pdata && pdata->touch) {
ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
ARRAY_SIZE(touch_devs),
- &touch_resources[0], 0, NULL);
+ NULL, chip->tsc_irq, NULL);
if (ret < 0) {
dev_err(chip->dev, "Failed to add touch subdev\n");
goto out_dev;
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 00b5b456063d..92bbebd31598 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -135,13 +135,37 @@ static const struct i2c_device_id max8925_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, max8925_id_table);
+static int max8925_dt_init(struct device_node *np, struct device *dev,
+ struct max8925_platform_data *pdata)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "maxim,tsc-irq", &pdata->tsc_irq);
+ if (ret) {
+ dev_err(dev, "Not found maxim,tsc-irq property\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int max8925_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8925_platform_data *pdata = client->dev.platform_data;
static struct max8925_chip *chip;
-
- if (!pdata) {
+ struct device_node *node = client->dev.of_node;
+
+ if (node && !pdata) {
+ /* parse DT to get platform data */
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct max8925_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (max8925_dt_init(node, &client->dev, pdata))
+ return -EINVAL;
+ } else if (!pdata) {
pr_info("%s: platform data is missing\n", __func__);
return -EINVAL;
}
@@ -203,11 +227,18 @@ static int max8925_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(max8925_pm_ops, max8925_suspend, max8925_resume);
+static const struct of_device_id max8925_dt_ids[] = {
+ { .compatible = "maxim,max8925", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max8925_dt_ids);
+
static struct i2c_driver max8925_driver = {
.driver = {
.name = "max8925",
.owner = THIS_MODULE,
.pm = &max8925_pm_ops,
+ .of_match_table = of_match_ptr(max8925_dt_ids),
},
.probe = max8925_probe,
.remove = max8925_remove,
@@ -217,7 +248,6 @@ static struct i2c_driver max8925_driver = {
static int __init max8925_i2c_init(void)
{
int ret;
-
ret = i2c_add_driver(&max8925_driver);
if (ret != 0)
pr_err("Failed to register MAX8925 I2C driver: %d\n", ret);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 05164d7f054b..6b5edf64de2b 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap.h>
@@ -91,21 +90,23 @@
struct usbhs_hcd_omap {
+ int nports;
+ struct clk **utmi_clk;
+ struct clk **hsic60m_clk;
+ struct clk **hsic480m_clk;
+
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
- struct clk *utmi_p1_fck;
- struct clk *usbhost_p1_fck;
- struct clk *utmi_p2_fck;
- struct clk *usbhost_p2_fck;
+ struct clk *utmi_p1_gfclk;
+ struct clk *utmi_p2_gfclk;
struct clk *init_60m_fclk;
struct clk *ehci_logic_fck;
void __iomem *uhh_base;
- struct usbhs_omap_platform_data platdata;
+ struct usbhs_omap_platform_data *pdata;
u32 usbhs_rev;
- spinlock_t lock;
};
/*-------------------------------------------------------------------------*/
@@ -184,19 +185,13 @@ err_end:
static int omap_usbhs_alloc_children(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbhs_hcd_omap *omap;
- struct ehci_hcd_omap_platform_data *ehci_data;
- struct ohci_hcd_omap_platform_data *ohci_data;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
struct platform_device *ehci;
struct platform_device *ohci;
struct resource *res;
struct resource resources[2];
int ret;
- omap = platform_get_drvdata(pdev);
- ehci_data = omap->platdata.ehci_data;
- ohci_data = omap->platdata.ohci_data;
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ehci");
if (!res) {
dev_err(dev, "EHCI get resource IORESOURCE_MEM failed\n");
@@ -213,8 +208,8 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
}
resources[1] = *res;
- ehci = omap_usbhs_alloc_child(OMAP_EHCI_DEVICE, resources, 2, ehci_data,
- sizeof(*ehci_data), dev);
+ ehci = omap_usbhs_alloc_child(OMAP_EHCI_DEVICE, resources, 2, pdata,
+ sizeof(*pdata), dev);
if (!ehci) {
dev_err(dev, "omap_usbhs_alloc_child failed\n");
@@ -238,8 +233,8 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
}
resources[1] = *res;
- ohci = omap_usbhs_alloc_child(OMAP_OHCI_DEVICE, resources, 2, ohci_data,
- sizeof(*ohci_data), dev);
+ ohci = omap_usbhs_alloc_child(OMAP_OHCI_DEVICE, resources, 2, pdata,
+ sizeof(*pdata), dev);
if (!ohci) {
dev_err(dev, "omap_usbhs_alloc_child failed\n");
ret = -ENOMEM;
@@ -278,31 +273,52 @@ static bool is_ohci_port(enum usbhs_omap_port_mode pmode)
static int usbhs_runtime_resume(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i, r;
dev_dbg(dev, "usbhs_runtime_resume\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
- }
-
omap_tll_enable();
- spin_lock_irqsave(&omap->lock, flags);
- if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ if (!IS_ERR(omap->ehci_logic_fck))
clk_enable(omap->ehci_logic_fck);
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(omap->usbhost_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(omap->usbhost_p2_fck);
-
- clk_enable(omap->utmi_p1_fck);
- clk_enable(omap->utmi_p2_fck);
+ for (i = 0; i < omap->nports; i++) {
+ switch (pdata->port_mode[i]) {
+ case OMAP_EHCI_PORT_MODE_HSIC:
+ if (!IS_ERR(omap->hsic60m_clk[i])) {
+ r = clk_enable(omap->hsic60m_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Can't enable port %d hsic60m clk:%d\n",
+ i, r);
+ }
+ }
- spin_unlock_irqrestore(&omap->lock, flags);
+ if (!IS_ERR(omap->hsic480m_clk[i])) {
+ r = clk_enable(omap->hsic480m_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Can't enable port %d hsic480m clk:%d\n",
+ i, r);
+ }
+ }
+ /* Fall through as HSIC mode needs utmi_clk */
+
+ case OMAP_EHCI_PORT_MODE_TLL:
+ if (!IS_ERR(omap->utmi_clk[i])) {
+ r = clk_enable(omap->utmi_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Can't enable port %d clk : %d\n",
+ i, r);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
return 0;
}
@@ -310,51 +326,122 @@ static int usbhs_runtime_resume(struct device *dev)
static int usbhs_runtime_suspend(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i;
dev_dbg(dev, "usbhs_runtime_suspend\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&omap->lock, flags);
+ for (i = 0; i < omap->nports; i++) {
+ switch (pdata->port_mode[i]) {
+ case OMAP_EHCI_PORT_MODE_HSIC:
+ if (!IS_ERR(omap->hsic60m_clk[i]))
+ clk_disable(omap->hsic60m_clk[i]);
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_disable(omap->usbhost_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_disable(omap->usbhost_p2_fck);
+ if (!IS_ERR(omap->hsic480m_clk[i]))
+ clk_disable(omap->hsic480m_clk[i]);
+ /* Fall through as utmi_clks were used in HSIC mode */
- clk_disable(omap->utmi_p2_fck);
- clk_disable(omap->utmi_p1_fck);
+ case OMAP_EHCI_PORT_MODE_TLL:
+ if (!IS_ERR(omap->utmi_clk[i]))
+ clk_disable(omap->utmi_clk[i]);
+ break;
+ default:
+ break;
+ }
+ }
- if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ if (!IS_ERR(omap->ehci_logic_fck))
clk_disable(omap->ehci_logic_fck);
- spin_unlock_irqrestore(&omap->lock, flags);
omap_tll_disable();
return 0;
}
+static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
+ unsigned reg)
+{
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i;
+
+ for (i = 0; i < omap->nports; i++) {
+ switch (pdata->port_mode[i]) {
+ case OMAP_USBHS_PORT_MODE_UNUSED:
+ reg &= ~(OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS << i);
+ break;
+ case OMAP_EHCI_PORT_MODE_PHY:
+ if (pdata->single_ulpi_bypass)
+ break;
+
+ if (i == 0)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else
+ reg &= ~(OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS
+ << (i-1));
+ break;
+ default:
+ if (pdata->single_ulpi_bypass)
+ break;
+
+ if (i == 0)
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS
+ << (i-1);
+ break;
+ }
+ }
+
+ if (pdata->single_ulpi_bypass) {
+ /* bypass ULPI only if none of the ports use PHY mode */
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+
+ for (i = 0; i < omap->nports; i++) {
+ if (is_ehci_phy_mode(pdata->port_mode[i])) {
+ reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ break;
+ }
+ }
+ }
+
+ return reg;
+}
+
+static unsigned omap_usbhs_rev2_hostconfig(struct usbhs_hcd_omap *omap,
+ unsigned reg)
+{
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
+ int i;
+
+ for (i = 0; i < omap->nports; i++) {
+ /* Clear port mode fields for PHY mode */
+ reg &= ~(OMAP4_P1_MODE_CLEAR << 2 * i);
+
+ if (is_ehci_tll_mode(pdata->port_mode[i]) ||
+ (is_ohci_port(pdata->port_mode[i])))
+ reg |= OMAP4_P1_MODE_TLL << 2 * i;
+ else if (is_ehci_hsic_mode(pdata->port_mode[i]))
+ reg |= OMAP4_P1_MODE_HSIC << 2 * i;
+ }
+
+ return reg;
+}
+
static void omap_usbhs_init(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
unsigned reg;
dev_dbg(dev, "starting TI HSUSB Controller\n");
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+ if (pdata->phy_reset) {
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
+ gpio_request_one(pdata->reset_gpio_port[0],
GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
+ gpio_request_one(pdata->reset_gpio_port[1],
GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
/* Hold the PHY in RESET for enough time till DIR is high */
@@ -362,9 +449,6 @@ static void omap_usbhs_init(struct device *dev)
}
pm_runtime_get_sync(dev);
- spin_lock_irqsave(&omap->lock, flags);
- omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
- dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
@@ -374,89 +458,51 @@ static void omap_usbhs_init(struct device *dev)
reg |= OMAP4_UHH_HOSTCONFIG_APP_START_CLK;
reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
- if (is_omap_usbhs_rev1(omap)) {
- if (pdata->port_mode[0] == OMAP_USBHS_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- if (pdata->port_mode[1] == OMAP_USBHS_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- if (pdata->port_mode[2] == OMAP_USBHS_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
-
- /* Bypass the TLL module for PHY mode operation */
- if (pdata->single_ulpi_bypass) {
- dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
- if (is_ehci_phy_mode(pdata->port_mode[0]) ||
- is_ehci_phy_mode(pdata->port_mode[1]) ||
- is_ehci_phy_mode(pdata->port_mode[2]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- } else {
- dev_dbg(dev, "OMAP3 ES version > ES2.1\n");
- if (is_ehci_phy_mode(pdata->port_mode[0]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- if (is_ehci_phy_mode(pdata->port_mode[1]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- if (is_ehci_phy_mode(pdata->port_mode[2]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- }
- } else if (is_omap_usbhs_rev2(omap)) {
- /* Clear port mode fields for PHY mode*/
- reg &= ~OMAP4_P1_MODE_CLEAR;
- reg &= ~OMAP4_P2_MODE_CLEAR;
+ switch (omap->usbhs_rev) {
+ case OMAP_USBHS_REV1:
+ omap_usbhs_rev1_hostconfig(omap, reg);
+ break;
- if (is_ehci_tll_mode(pdata->port_mode[0]) ||
- (is_ohci_port(pdata->port_mode[0])))
- reg |= OMAP4_P1_MODE_TLL;
- else if (is_ehci_hsic_mode(pdata->port_mode[0]))
- reg |= OMAP4_P1_MODE_HSIC;
+ case OMAP_USBHS_REV2:
+ omap_usbhs_rev2_hostconfig(omap, reg);
+ break;
- if (is_ehci_tll_mode(pdata->port_mode[1]) ||
- (is_ohci_port(pdata->port_mode[1])))
- reg |= OMAP4_P2_MODE_TLL;
- else if (is_ehci_hsic_mode(pdata->port_mode[1]))
- reg |= OMAP4_P2_MODE_HSIC;
+ default: /* newer revisions */
+ omap_usbhs_rev2_hostconfig(omap, reg);
+ break;
}
usbhs_write(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
dev_dbg(dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
- spin_unlock_irqrestore(&omap->lock, flags);
-
pm_runtime_put_sync(dev);
- if (pdata->ehci_data->phy_reset) {
+ if (pdata->phy_reset) {
/* Hold the PHY in RESET for enough time till
* PHY is settled and ready
*/
udelay(10);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
gpio_set_value_cansleep
- (pdata->ehci_data->reset_gpio_port[0], 1);
+ (pdata->reset_gpio_port[0], 1);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
gpio_set_value_cansleep
- (pdata->ehci_data->reset_gpio_port[1], 1);
+ (pdata->reset_gpio_port[1], 1);
}
}
static void omap_usbhs_deinit(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ struct usbhs_omap_platform_data *pdata = omap->pdata;
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+ if (pdata->phy_reset) {
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
+ gpio_free(pdata->reset_gpio_port[0]);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
+ gpio_free(pdata->reset_gpio_port[1]);
}
}
@@ -474,137 +520,185 @@ static int usbhs_omap_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0;
int i;
+ bool need_logic_fck;
if (!pdata) {
dev_err(dev, "Missing platform data\n");
- ret = -ENOMEM;
- goto end_probe;
+ return -ENODEV;
}
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ omap = devm_kzalloc(dev, sizeof(*omap), GFP_KERNEL);
if (!omap) {
dev_err(dev, "Memory allocation failed\n");
- ret = -ENOMEM;
- goto end_probe;
+ return -ENOMEM;
}
- spin_lock_init(&omap->lock);
-
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- omap->platdata.port_mode[i] = pdata->port_mode[i];
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
+ omap->uhh_base = devm_request_and_ioremap(dev, res);
+ if (!omap->uhh_base) {
+ dev_err(dev, "Resource request/ioremap failed\n");
+ return -EADDRNOTAVAIL;
+ }
- omap->platdata.ehci_data = pdata->ehci_data;
- omap->platdata.ohci_data = pdata->ohci_data;
+ omap->pdata = pdata;
pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, omap);
+ pm_runtime_get_sync(dev);
+
+ omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
- is_ehci_hsic_mode(i)) {
- omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
- if (IS_ERR(omap->ehci_logic_fck)) {
- ret = PTR_ERR(omap->ehci_logic_fck);
- dev_warn(dev, "ehci_logic_fck failed:%d\n",
- ret);
- }
+ /* we need to call runtime suspend before we update omap->nports
+ * to prevent unbalanced clk_disable()
+ */
+ pm_runtime_put_sync(dev);
+
+ /*
+ * If platform data contains nports then use that
+ * else make out number of ports from USBHS revision
+ */
+ if (pdata->nports) {
+ omap->nports = pdata->nports;
+ } else {
+ switch (omap->usbhs_rev) {
+ case OMAP_USBHS_REV1:
+ omap->nports = 3;
+ break;
+ case OMAP_USBHS_REV2:
+ omap->nports = 2;
+ break;
+ default:
+ omap->nports = OMAP3_HS_USB_PORTS;
+ dev_dbg(dev,
+ "USB HOST Rev:0x%d not recognized, assuming %d ports\n",
+ omap->usbhs_rev, omap->nports);
break;
}
+ }
- omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
- if (IS_ERR(omap->utmi_p1_fck)) {
- ret = PTR_ERR(omap->utmi_p1_fck);
- dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_end;
+ i = sizeof(struct clk *) * omap->nports;
+ omap->utmi_clk = devm_kzalloc(dev, i, GFP_KERNEL);
+ omap->hsic480m_clk = devm_kzalloc(dev, i, GFP_KERNEL);
+ omap->hsic60m_clk = devm_kzalloc(dev, i, GFP_KERNEL);
+
+ if (!omap->utmi_clk || !omap->hsic480m_clk || !omap->hsic60m_clk) {
+ dev_err(dev, "Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ need_logic_fck = false;
+ for (i = 0; i < omap->nports; i++) {
+ if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
+ is_ehci_hsic_mode(i))
+ need_logic_fck |= true;
+ }
+
+ omap->ehci_logic_fck = ERR_PTR(-EINVAL);
+ if (need_logic_fck) {
+ omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
+ if (IS_ERR(omap->ehci_logic_fck)) {
+ ret = PTR_ERR(omap->ehci_logic_fck);
+ dev_dbg(dev, "ehci_logic_fck failed:%d\n", ret);
+ }
+ }
+
+ omap->utmi_p1_gfclk = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(omap->utmi_p1_gfclk)) {
+ ret = PTR_ERR(omap->utmi_p1_gfclk);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+ goto err_p1_gfclk;
+ }
+
+ omap->utmi_p2_gfclk = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(omap->utmi_p2_gfclk)) {
+ ret = PTR_ERR(omap->utmi_p2_gfclk);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_p2_gfclk;
}
omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
if (IS_ERR(omap->xclk60mhsp1_ck)) {
ret = PTR_ERR(omap->xclk60mhsp1_ck);
dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
- goto err_utmi_p1_fck;
- }
-
- omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
- if (IS_ERR(omap->utmi_p2_fck)) {
- ret = PTR_ERR(omap->utmi_p2_fck);
- dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
- goto err_xclk60mhsp1_ck;
+ goto err_xclk60mhsp1;
}
omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
if (IS_ERR(omap->xclk60mhsp2_ck)) {
ret = PTR_ERR(omap->xclk60mhsp2_ck);
dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
- goto err_utmi_p2_fck;
- }
-
- omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
- if (IS_ERR(omap->usbhost_p1_fck)) {
- ret = PTR_ERR(omap->usbhost_p1_fck);
- dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
- goto err_xclk60mhsp2_ck;
- }
-
- omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
- if (IS_ERR(omap->usbhost_p2_fck)) {
- ret = PTR_ERR(omap->usbhost_p2_fck);
- dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
- goto err_usbhost_p1_fck;
+ goto err_xclk60mhsp2;
}
omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
if (IS_ERR(omap->init_60m_fclk)) {
ret = PTR_ERR(omap->init_60m_fclk);
dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
- goto err_usbhost_p2_fck;
+ goto err_init60m;
+ }
+
+ for (i = 0; i < omap->nports; i++) {
+ char clkname[30];
+
+ /* clock names are indexed from 1*/
+ snprintf(clkname, sizeof(clkname),
+ "usb_host_hs_utmi_p%d_clk", i + 1);
+
+ /* If a clock is not found we won't bail out as not all
+ * platforms have all clocks and we can function without
+ * them
+ */
+ omap->utmi_clk[i] = clk_get(dev, clkname);
+ if (IS_ERR(omap->utmi_clk[i]))
+ dev_dbg(dev, "Failed to get clock : %s : %ld\n",
+ clkname, PTR_ERR(omap->utmi_clk[i]));
+
+ snprintf(clkname, sizeof(clkname),
+ "usb_host_hs_hsic480m_p%d_clk", i + 1);
+ omap->hsic480m_clk[i] = clk_get(dev, clkname);
+ if (IS_ERR(omap->hsic480m_clk[i]))
+ dev_dbg(dev, "Failed to get clock : %s : %ld\n",
+ clkname, PTR_ERR(omap->hsic480m_clk[i]));
+
+ snprintf(clkname, sizeof(clkname),
+ "usb_host_hs_hsic60m_p%d_clk", i + 1);
+ omap->hsic60m_clk[i] = clk_get(dev, clkname);
+ if (IS_ERR(omap->hsic60m_clk[i]))
+ dev_dbg(dev, "Failed to get clock : %s : %ld\n",
+ clkname, PTR_ERR(omap->hsic60m_clk[i]));
}
if (is_ehci_phy_mode(pdata->port_mode[0])) {
- /* for OMAP3 , the clk set paretn fails */
- ret = clk_set_parent(omap->utmi_p1_fck,
+ /* for OMAP3, clk_set_parent fails */
+ ret = clk_set_parent(omap->utmi_p1_gfclk,
omap->xclk60mhsp1_ck);
if (ret != 0)
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
+ dev_dbg(dev, "xclk60mhsp1_ck set parent failed: %d\n",
+ ret);
} else if (is_ehci_tll_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
+ ret = clk_set_parent(omap->utmi_p1_gfclk,
omap->init_60m_fclk);
if (ret != 0)
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
+ dev_dbg(dev, "P0 init_60m_fclk set parent failed: %d\n",
+ ret);
}
if (is_ehci_phy_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
+ ret = clk_set_parent(omap->utmi_p2_gfclk,
omap->xclk60mhsp2_ck);
if (ret != 0)
- dev_err(dev, "xclk60mhsp2_ck set parent"
- "failed error:%d\n", ret);
+ dev_dbg(dev, "xclk60mhsp2_ck set parent failed: %d\n",
+ ret);
} else if (is_ehci_tll_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
+ ret = clk_set_parent(omap->utmi_p2_gfclk,
omap->init_60m_fclk);
if (ret != 0)
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_init_60m_fclk;
- }
-
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_init_60m_fclk;
+ dev_dbg(dev, "P1 init_60m_fclk set parent failed: %d\n",
+ ret);
}
- platform_set_drvdata(pdev, omap);
-
omap_usbhs_init(dev);
ret = omap_usbhs_alloc_children(pdev);
if (ret) {
@@ -612,39 +706,41 @@ static int usbhs_omap_probe(struct platform_device *pdev)
goto err_alloc;
}
- goto end_probe;
+ return 0;
err_alloc:
omap_usbhs_deinit(&pdev->dev);
- iounmap(omap->uhh_base);
-
-err_init_60m_fclk:
- clk_put(omap->init_60m_fclk);
-err_usbhost_p2_fck:
- clk_put(omap->usbhost_p2_fck);
+ for (i = 0; i < omap->nports; i++) {
+ if (!IS_ERR(omap->utmi_clk[i]))
+ clk_put(omap->utmi_clk[i]);
+ if (!IS_ERR(omap->hsic60m_clk[i]))
+ clk_put(omap->hsic60m_clk[i]);
+ if (!IS_ERR(omap->hsic480m_clk[i]))
+ clk_put(omap->hsic480m_clk[i]);
+ }
-err_usbhost_p1_fck:
- clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->init_60m_fclk);
-err_xclk60mhsp2_ck:
+err_init60m:
clk_put(omap->xclk60mhsp2_ck);
-err_utmi_p2_fck:
- clk_put(omap->utmi_p2_fck);
-
-err_xclk60mhsp1_ck:
+err_xclk60mhsp2:
clk_put(omap->xclk60mhsp1_ck);
-err_utmi_p1_fck:
- clk_put(omap->utmi_p1_fck);
+err_xclk60mhsp1:
+ clk_put(omap->utmi_p2_gfclk);
-err_end:
- clk_put(omap->ehci_logic_fck);
+err_p2_gfclk:
+ clk_put(omap->utmi_p1_gfclk);
+
+err_p1_gfclk:
+ if (!IS_ERR(omap->ehci_logic_fck))
+ clk_put(omap->ehci_logic_fck);
+
+err_mem:
pm_runtime_disable(dev);
- kfree(omap);
-end_probe:
return ret;
}
@@ -657,19 +753,29 @@ end_probe:
static int usbhs_omap_remove(struct platform_device *pdev)
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+ int i;
omap_usbhs_deinit(&pdev->dev);
- iounmap(omap->uhh_base);
+
+ for (i = 0; i < omap->nports; i++) {
+ if (!IS_ERR(omap->utmi_clk[i]))
+ clk_put(omap->utmi_clk[i]);
+ if (!IS_ERR(omap->hsic60m_clk[i]))
+ clk_put(omap->hsic60m_clk[i]);
+ if (!IS_ERR(omap->hsic480m_clk[i]))
+ clk_put(omap->hsic480m_clk[i]);
+ }
+
clk_put(omap->init_60m_fclk);
- clk_put(omap->usbhost_p2_fck);
- clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->utmi_p1_gfclk);
+ clk_put(omap->utmi_p2_gfclk);
clk_put(omap->xclk60mhsp2_ck);
- clk_put(omap->utmi_p2_fck);
clk_put(omap->xclk60mhsp1_ck);
- clk_put(omap->utmi_p1_fck);
- clk_put(omap->ehci_logic_fck);
+
+ if (!IS_ERR(omap->ehci_logic_fck))
+ clk_put(omap->ehci_logic_fck);
+
pm_runtime_disable(&pdev->dev);
- kfree(omap);
return 0;
}
@@ -685,7 +791,7 @@ static struct platform_driver usbhs_omap_driver = {
.owner = THIS_MODULE,
.pm = &usbhsomap_dev_pm_ops,
},
- .remove = __exit_p(usbhs_omap_remove),
+ .remove = usbhs_omap_remove,
};
MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index eb869153206d..0aef1a768880 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -54,10 +54,13 @@
#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
+#define OMAP_TLL_CHANNEL_CONF_DRVVBUS (1 << 16)
+#define OMAP_TLL_CHANNEL_CONF_CHRGVBUS (1 << 15)
#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
+#define OMAP_TLL_CHANNEL_CONF_MODE_TRANSPARENT_UTMI (2 << 1)
#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
@@ -92,21 +95,25 @@
#define OMAP_USBTLL_REV1 0x00000015 /* OMAP3 */
#define OMAP_USBTLL_REV2 0x00000018 /* OMAP 3630 */
#define OMAP_USBTLL_REV3 0x00000004 /* OMAP4 */
+#define OMAP_USBTLL_REV4 0x00000006 /* OMAP5 */
#define is_ehci_tll_mode(x) (x == OMAP_EHCI_PORT_MODE_TLL)
+/* only PHY and UNUSED modes don't need TLL */
+#define omap_usb_mode_needs_tll(x) ((x) != OMAP_USBHS_PORT_MODE_UNUSED &&\
+ (x) != OMAP_EHCI_PORT_MODE_PHY)
+
struct usbtll_omap {
- struct clk *usbtll_p1_fck;
- struct clk *usbtll_p2_fck;
- struct usbtll_omap_platform_data platdata;
- /* secure the register updates */
- spinlock_t lock;
+ int nch; /* num. of channels */
+ struct usbhs_omap_platform_data *pdata;
+ struct clk **ch_clk;
};
/*-------------------------------------------------------------------------*/
-const char usbtll_driver_name[] = USBTLL_DRIVER_NAME;
-struct platform_device *tll_pdev;
+static const char usbtll_driver_name[] = USBTLL_DRIVER_NAME;
+static struct device *tll_dev;
+static DEFINE_SPINLOCK(tll_lock); /* serialize access to tll_dev */
/*-------------------------------------------------------------------------*/
@@ -203,84 +210,84 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
static int usbtll_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbtll_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
void __iomem *base;
struct resource *res;
struct usbtll_omap *tll;
unsigned reg;
- unsigned long flags;
int ret = 0;
- int i, ver, count;
+ int i, ver;
+ bool needs_tll;
dev_dbg(dev, "starting TI HSUSB TLL Controller\n");
- tll = kzalloc(sizeof(struct usbtll_omap), GFP_KERNEL);
+ tll = devm_kzalloc(dev, sizeof(struct usbtll_omap), GFP_KERNEL);
if (!tll) {
dev_err(dev, "Memory allocation failed\n");
- ret = -ENOMEM;
- goto end;
+ return -ENOMEM;
}
- spin_lock_init(&tll->lock);
-
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- tll->platdata.port_mode[i] = pdata->port_mode[i];
-
- tll->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
- if (IS_ERR(tll->usbtll_p1_fck)) {
- ret = PTR_ERR(tll->usbtll_p1_fck);
- dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
- goto err_tll;
+ if (!pdata) {
+ dev_err(dev, "Platform data missing\n");
+ return -ENODEV;
}
- tll->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
- if (IS_ERR(tll->usbtll_p2_fck)) {
- ret = PTR_ERR(tll->usbtll_p2_fck);
- dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
- goto err_usbtll_p1_fck;
- }
+ tll->pdata = pdata;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "usb tll get resource failed\n");
- ret = -ENODEV;
- goto err_usbtll_p2_fck;
- }
-
- base = ioremap(res->start, resource_size(res));
+ base = devm_request_and_ioremap(dev, res);
if (!base) {
- dev_err(dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_usbtll_p2_fck;
+ ret = -EADDRNOTAVAIL;
+ dev_err(dev, "Resource request/ioremap failed:%d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, tll);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- spin_lock_irqsave(&tll->lock, flags);
-
ver = usbtll_read(base, OMAP_USBTLL_REVISION);
switch (ver) {
case OMAP_USBTLL_REV1:
- case OMAP_USBTLL_REV2:
- count = OMAP_TLL_CHANNEL_COUNT;
+ case OMAP_USBTLL_REV4:
+ tll->nch = OMAP_TLL_CHANNEL_COUNT;
break;
+ case OMAP_USBTLL_REV2:
case OMAP_USBTLL_REV3:
- count = OMAP_REV2_TLL_CHANNEL_COUNT;
+ tll->nch = OMAP_REV2_TLL_CHANNEL_COUNT;
break;
default:
- dev_err(dev, "TLL version failed\n");
- ret = -ENODEV;
- goto err_ioremap;
+ tll->nch = OMAP_TLL_CHANNEL_COUNT;
+ dev_dbg(dev,
+ "USB TLL Rev : 0x%x not recognized, assuming %d channels\n",
+ ver, tll->nch);
+ break;
}
- if (is_ehci_tll_mode(pdata->port_mode[0]) ||
- is_ehci_tll_mode(pdata->port_mode[1]) ||
- is_ehci_tll_mode(pdata->port_mode[2]) ||
- is_ohci_port(pdata->port_mode[0]) ||
- is_ohci_port(pdata->port_mode[1]) ||
- is_ohci_port(pdata->port_mode[2])) {
+ tll->ch_clk = devm_kzalloc(dev, sizeof(struct clk * [tll->nch]),
+ GFP_KERNEL);
+ if (!tll->ch_clk) {
+ ret = -ENOMEM;
+ dev_err(dev, "Couldn't allocate memory for channel clocks\n");
+ goto err_clk_alloc;
+ }
+
+ for (i = 0; i < tll->nch; i++) {
+ char clkname[] = "usb_tll_hs_usb_chx_clk";
+
+ snprintf(clkname, sizeof(clkname),
+ "usb_tll_hs_usb_ch%d_clk", i);
+ tll->ch_clk[i] = clk_get(dev, clkname);
+
+ if (IS_ERR(tll->ch_clk[i]))
+ dev_dbg(dev, "can't get clock : %s\n", clkname);
+ }
+
+ needs_tll = false;
+ for (i = 0; i < tll->nch; i++)
+ needs_tll |= omap_usb_mode_needs_tll(pdata->port_mode[i]);
+
+ if (needs_tll) {
/* Program Common TLL register */
reg = usbtll_read(base, OMAP_TLL_SHARED_CONF);
@@ -292,7 +299,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
usbtll_write(base, OMAP_TLL_SHARED_CONF, reg);
/* Enable channels now */
- for (i = 0; i < count; i++) {
+ for (i = 0; i < tll->nch; i++) {
reg = usbtll_read(base, OMAP_TLL_CHANNEL_CONF(i));
if (is_ohci_port(pdata->port_mode[i])) {
@@ -308,6 +315,15 @@ static int usbtll_omap_probe(struct platform_device *pdev)
reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+ } else if (pdata->port_mode[i] ==
+ OMAP_EHCI_PORT_MODE_HSIC) {
+ /*
+ * HSIC Mode requires UTMI port configurations
+ */
+ reg |= OMAP_TLL_CHANNEL_CONF_DRVVBUS
+ | OMAP_TLL_CHANNEL_CONF_CHRGVBUS
+ | OMAP_TLL_CHANNEL_CONF_MODE_TRANSPARENT_UTMI
+ | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
} else {
continue;
}
@@ -320,25 +336,18 @@ static int usbtll_omap_probe(struct platform_device *pdev)
}
}
-err_ioremap:
- spin_unlock_irqrestore(&tll->lock, flags);
- iounmap(base);
pm_runtime_put_sync(dev);
- tll_pdev = pdev;
- if (!ret)
- goto end;
- pm_runtime_disable(dev);
+ /* only after this can omap_tll_enable/disable work */
+ spin_lock(&tll_lock);
+ tll_dev = dev;
+ spin_unlock(&tll_lock);
-err_usbtll_p2_fck:
- clk_put(tll->usbtll_p2_fck);
-
-err_usbtll_p1_fck:
- clk_put(tll->usbtll_p1_fck);
+ return 0;
-err_tll:
- kfree(tll);
+err_clk_alloc:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
-end:
return ret;
}
@@ -351,36 +360,42 @@ end:
static int usbtll_omap_remove(struct platform_device *pdev)
{
struct usbtll_omap *tll = platform_get_drvdata(pdev);
+ int i;
+
+ spin_lock(&tll_lock);
+ tll_dev = NULL;
+ spin_unlock(&tll_lock);
+
+ for (i = 0; i < tll->nch; i++)
+ if (!IS_ERR(tll->ch_clk[i]))
+ clk_put(tll->ch_clk[i]);
- clk_put(tll->usbtll_p2_fck);
- clk_put(tll->usbtll_p1_fck);
pm_runtime_disable(&pdev->dev);
- kfree(tll);
return 0;
}
static int usbtll_runtime_resume(struct device *dev)
{
struct usbtll_omap *tll = dev_get_drvdata(dev);
- struct usbtll_omap_platform_data *pdata = &tll->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = tll->pdata;
+ int i;
dev_dbg(dev, "usbtll_runtime_resume\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&tll->lock, flags);
+ for (i = 0; i < tll->nch; i++) {
+ if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
+ int r;
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(tll->usbtll_p1_fck);
-
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(tll->usbtll_p2_fck);
+ if (IS_ERR(tll->ch_clk[i]))
+ continue;
- spin_unlock_irqrestore(&tll->lock, flags);
+ r = clk_enable(tll->ch_clk[i]);
+ if (r) {
+ dev_err(dev,
+ "Error enabling ch %d clock: %d\n", i, r);
+ }
+ }
+ }
return 0;
}
@@ -388,26 +403,18 @@ static int usbtll_runtime_resume(struct device *dev)
static int usbtll_runtime_suspend(struct device *dev)
{
struct usbtll_omap *tll = dev_get_drvdata(dev);
- struct usbtll_omap_platform_data *pdata = &tll->platdata;
- unsigned long flags;
+ struct usbhs_omap_platform_data *pdata = tll->pdata;
+ int i;
dev_dbg(dev, "usbtll_runtime_suspend\n");
- if (!pdata) {
- dev_dbg(dev, "missing platform_data\n");
- return -ENODEV;
+ for (i = 0; i < tll->nch; i++) {
+ if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
+ if (!IS_ERR(tll->ch_clk[i]))
+ clk_disable(tll->ch_clk[i]);
+ }
}
- spin_lock_irqsave(&tll->lock, flags);
-
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_disable(tll->usbtll_p1_fck);
-
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_disable(tll->usbtll_p2_fck);
-
- spin_unlock_irqrestore(&tll->lock, flags);
-
return 0;
}
@@ -429,21 +436,39 @@ static struct platform_driver usbtll_omap_driver = {
int omap_tll_enable(void)
{
- if (!tll_pdev) {
- pr_err("missing omap usbhs tll platform_data\n");
- return -ENODEV;
+ int ret;
+
+ spin_lock(&tll_lock);
+
+ if (!tll_dev) {
+ pr_err("%s: OMAP USB TLL not initialized\n", __func__);
+ ret = -ENODEV;
+ } else {
+ ret = pm_runtime_get_sync(tll_dev);
}
- return pm_runtime_get_sync(&tll_pdev->dev);
+
+ spin_unlock(&tll_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(omap_tll_enable);
int omap_tll_disable(void)
{
- if (!tll_pdev) {
- pr_err("missing omap usbhs tll platform_data\n");
- return -ENODEV;
+ int ret;
+
+ spin_lock(&tll_lock);
+
+ if (!tll_dev) {
+ pr_err("%s: OMAP USB TLL not initialized\n", __func__);
+ ret = -ENODEV;
+ } else {
+ ret = pm_runtime_put_sync(tll_dev);
}
- return pm_runtime_put_sync(&tll_pdev->dev);
+
+ spin_unlock(&tll_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(omap_tll_disable);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 6ffd7a2affdc..bbdbc50a3cca 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -39,6 +39,14 @@ enum palmas_ids {
PALMAS_USB_ID,
};
+static struct resource palmas_rtc_resources[] = {
+ {
+ .start = PALMAS_RTC_ALARM_IRQ,
+ .end = PALMAS_RTC_ALARM_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static const struct mfd_cell palmas_children[] = {
{
.name = "palmas-pmic",
@@ -59,6 +67,8 @@ static const struct mfd_cell palmas_children[] = {
{
.name = "palmas-rtc",
.id = PALMAS_RTC_ID,
+ .resources = &palmas_rtc_resources[0],
+ .num_resources = ARRAY_SIZE(palmas_rtc_resources),
},
{
.name = "palmas-pwrbutton",
@@ -456,8 +466,8 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
ret = mfd_add_devices(palmas->dev, -1,
children, ARRAY_SIZE(palmas_children),
- NULL, regmap_irq_chip_get_base(palmas->irq_data),
- NULL);
+ NULL, 0,
+ regmap_irq_get_domain(palmas->irq_data));
kfree(children);
if (ret < 0)
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index 3d3b4addf81a..2a2d31687b72 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -115,14 +115,24 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
u8 mask, val;
+ int err;
mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
- if (voltage == OUTPUT_3V3)
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ if (err < 0)
+ return err;
val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
- else if (voltage == OUTPUT_1V8)
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ if (err < 0)
+ return err;
val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
- else
+ } else {
return -EINVAL;
+ }
return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
}
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
index 98fe0f39463e..ec78d9fb0879 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/mfd/rts5209.c
@@ -149,10 +149,18 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
int err;
if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
if (err < 0)
return err;
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
new file mode 100644
index 000000000000..fc831dcb1480
--- /dev/null
+++ b/drivers/mfd/rts5227.c
@@ -0,0 +1,234 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ *
+ * Roger Tseng <rogerable@realtek.com>
+ * No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ u16 cap;
+
+ rtsx_pci_init_cmd(pcr);
+
+ /* Configure GPIO as output */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+ /* Switch LDO3318 source from DV33 to card_3v3 */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+ /* Configure LTR */
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap);
+ if (cap & PCI_EXP_LTR_EN)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
+ /* Configure OBFF */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
+ /* Configure force_clock_req
+ * Maybe We should define 0xFF03 as some name
+ */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 0xFF03, 0x08, 0x08);
+ /* Correct driving */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_CLK_DRIVE_SEL, 0xFF, 0x96);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_CMD_DRIVE_SEL, 0xFF, 0x96);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD30_DAT_DRIVE_SEL, 0xFF, 0x96);
+
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5227_optimize_phy(struct rtsx_pcr *pcr)
+{
+ /* Optimize RX sensitivity */
+ return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
+}
+
+static int rts5227_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
+}
+
+static int rts5227_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+}
+
+static int rts5227_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
+}
+
+static int rts5227_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
+}
+
+static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_PARTIAL_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x02);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ /* To avoid too large in-rush current */
+ udelay(150);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x06);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK | PMOS_STRG_MASK,
+ SD_POWER_OFF | PMOS_STRG_400mA);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0X00);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u8 drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ drive_sel = 0x96;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24);
+ if (err < 0)
+ return err;
+ drive_sel = 0xB3;
+ } else {
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, drive_sel);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, drive_sel);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, drive_sel);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static const struct pcr_ops rts5227_pcr_ops = {
+ .extra_init_hw = rts5227_extra_init_hw,
+ .optimize_phy = rts5227_optimize_phy,
+ .turn_on_led = rts5227_turn_on_led,
+ .turn_off_led = rts5227_turn_off_led,
+ .enable_auto_blink = rts5227_enable_auto_blink,
+ .disable_auto_blink = rts5227_disable_auto_blink,
+ .card_power_on = rts5227_card_power_on,
+ .card_power_off = rts5227_card_power_off,
+ .switch_output_voltage = rts5227_switch_output_voltage,
+ .cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
+};
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5227_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5227_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5227_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5227_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+void rts5227_init_params(struct rtsx_pcr *pcr)
+{
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+ pcr->ops = &rts5227_pcr_ops;
+
+ pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
index 29d889cbb9c5..58af4dbe3586 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/mfd/rts5229.c
@@ -119,10 +119,18 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
int err;
if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ if (err < 0)
+ return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
if (err < 0)
return err;
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 9fc57009e228..2f12cc13489a 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -55,6 +55,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtsx_pci_ids) = {
{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -325,7 +326,6 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
val = ((u64)addr << 32) | ((u64)len << 12) | option;
put_unaligned_le64(val, ptr);
- ptr++;
pcr->sgi++;
}
@@ -591,8 +591,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int err, clk;
- u8 N, min_N, max_N, clk_divider;
- u8 mcu_cnt, div, max_div;
+ u8 n, clk_divider, mcu_cnt, div;
u8 depth[] = {
[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
@@ -616,10 +615,6 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
card_clock /= 1000000;
dev_dbg(&(pcr->pci->dev), "Switch card clock to %dMHz\n", card_clock);
- min_N = 80;
- max_N = 208;
- max_div = CLK_DIV_8;
-
clk = card_clock;
if (!initial_mode && double_clk)
clk = card_clock * 2;
@@ -631,30 +626,30 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return 0;
if (pcr->ops->conv_clk_and_div_n)
- N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
else
- N = (u8)(clk - 2);
- if ((clk <= 2) || (N > max_N))
+ n = (u8)(clk - 2);
+ if ((clk <= 2) || (n > MAX_DIV_N_PCR))
return -EINVAL;
mcu_cnt = (u8)(125/clk + 3);
if (mcu_cnt > 15)
mcu_cnt = 15;
- /* Make sure that the SSC clock div_n is equal or greater than min_N */
+ /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
div = CLK_DIV_1;
- while ((N < min_N) && (div < max_div)) {
+ while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
if (pcr->ops->conv_clk_and_div_n) {
- int dbl_clk = pcr->ops->conv_clk_and_div_n(N,
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
DIV_N_TO_CLK) * 2;
- N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
CLK_TO_DIV_N);
} else {
- N = (N + 2) * 2 - 2;
+ n = (n + 2) * 2 - 2;
}
div++;
}
- dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
+ dev_dbg(&(pcr->pci->dev), "n = %d, div = %d\n", n, div);
ssc_depth = depth[ssc_depth];
if (double_clk)
@@ -671,7 +666,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
SSC_DEPTH_MASK, ssc_depth);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (vpclk) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
@@ -713,6 +708,25 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
+int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
+{
+ unsigned int cd_mask[] = {
+ [RTSX_SD_CARD] = SD_EXIST,
+ [RTSX_MS_CARD] = MS_EXIST
+ };
+
+ if (!pcr->ms_pmos) {
+ /* When using single PMOS, accessing card is not permitted
+ * if the existing card is not the designated one.
+ */
+ if (pcr->card_exist & (~cd_mask[card]))
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
+
int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
if (pcr->ops->switch_output_voltage)
@@ -758,7 +772,7 @@ static void rtsx_pci_card_detect(struct work_struct *work)
struct delayed_work *dwork;
struct rtsx_pcr *pcr;
unsigned long flags;
- unsigned int card_detect = 0;
+ unsigned int card_detect = 0, card_inserted, card_removed;
u32 irq_status;
dwork = to_delayed_work(work);
@@ -766,25 +780,35 @@ static void rtsx_pci_card_detect(struct work_struct *work)
dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
+ mutex_lock(&pcr->pcr_mutex);
spin_lock_irqsave(&pcr->lock, flags);
irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
dev_dbg(&(pcr->pci->dev), "irq_status: 0x%08x\n", irq_status);
- if (pcr->card_inserted || pcr->card_removed) {
+ irq_status &= CARD_EXIST;
+ card_inserted = pcr->card_inserted & irq_status;
+ card_removed = pcr->card_removed;
+ pcr->card_inserted = 0;
+ pcr->card_removed = 0;
+
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ if (card_inserted || card_removed) {
dev_dbg(&(pcr->pci->dev),
"card_inserted: 0x%x, card_removed: 0x%x\n",
- pcr->card_inserted, pcr->card_removed);
+ card_inserted, card_removed);
if (pcr->ops->cd_deglitch)
- pcr->card_inserted = pcr->ops->cd_deglitch(pcr);
+ card_inserted = pcr->ops->cd_deglitch(pcr);
+
+ card_detect = card_inserted | card_removed;
- card_detect = pcr->card_inserted | pcr->card_removed;
- pcr->card_inserted = 0;
- pcr->card_removed = 0;
+ pcr->card_exist |= card_inserted;
+ pcr->card_exist &= ~card_removed;
}
- spin_unlock_irqrestore(&pcr->lock, flags);
+ mutex_unlock(&pcr->pcr_mutex);
if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
pcr->slots[RTSX_SD_CARD].card_event(
@@ -836,10 +860,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
}
- if (pcr->card_inserted || pcr->card_removed)
- schedule_delayed_work(&pcr->carddet_work,
- msecs_to_jiffies(200));
-
if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
pcr->trans_result = TRANS_RESULT_FAIL;
@@ -852,6 +872,10 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
}
+ if (pcr->card_inserted || pcr->card_removed)
+ schedule_delayed_work(&pcr->carddet_work,
+ msecs_to_jiffies(200));
+
spin_unlock(&pcr->lock);
return IRQ_HANDLED;
}
@@ -974,6 +998,14 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
+ /* No CD interrupt if probing driver with card inserted.
+ * So we need to initialize pcr->card_exist here.
+ */
+ if (pcr->ops->cd_deglitch)
+ pcr->card_exist = pcr->ops->cd_deglitch(pcr);
+ else
+ pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
+
return 0;
}
@@ -997,6 +1029,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5289:
rtl8411_init_params(pcr);
break;
+
+ case 0x5227:
+ rts5227_init_params(pcr);
+ break;
}
dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1030,6 +1066,10 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
(int)pcidev->revision);
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret < 0)
+ return ret;
+
ret = pci_enable_device(pcidev);
if (ret)
return ret;
@@ -1051,15 +1091,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
}
handle->pcr = pcr;
- if (!idr_pre_get(&rtsx_pci_idr, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto free_handle;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock(&rtsx_pci_lock);
- ret = idr_get_new(&rtsx_pci_idr, pcr, &pcr->id);
+ ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ pcr->id = ret;
spin_unlock(&rtsx_pci_lock);
- if (ret)
+ idr_preload_end();
+ if (ret < 0)
goto free_handle;
pcr->pci = pcidev;
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 12462c1df1a9..2b3ab8a04823 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -25,8 +25,12 @@
#include <linux/mfd/rtsx_pci.h>
+#define MIN_DIV_N_PCR 80
+#define MAX_DIV_N_PCR 208
+
void rts5209_init_params(struct rtsx_pcr *pcr);
void rts5229_init_params(struct rtsx_pcr *pcr);
void rtl8411_init_params(struct rtsx_pcr *pcr);
+void rts5227_init_params(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 3f10591ea94e..61aea6381cdf 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -20,6 +20,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
static struct platform_driver syscon_driver;
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 409afa23d5dc..5ad4b772b097 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6507x.h>
@@ -116,11 +117,19 @@ static const struct i2c_device_id tps6507x_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
+#ifdef CONFIG_OF
+static struct of_device_id tps6507x_of_match[] = {
+ {.compatible = "ti,tps6507x", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tps6507x_of_match);
+#endif
static struct i2c_driver tps6507x_i2c_driver = {
.driver = {
.name = "tps6507x",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps6507x_of_match),
},
.probe = tps6507x_i2c_probe,
.remove = tps6507x_i2c_remove,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 8d12a8e00d9c..98edb5be85c6 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -25,6 +25,8 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/err.h>
#define NUM_INT_REG 2
@@ -148,18 +150,31 @@ static const struct regmap_config tps65090_regmap_config = {
.volatile_reg = is_volatile_reg,
};
+#ifdef CONFIG_OF
+static const struct of_device_id tps65090_of_match[] = {
+ { .compatible = "ti,tps65090",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tps65090_of_match);
+#endif
+
static int tps65090_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps65090_platform_data *pdata = client->dev.platform_data;
+ int irq_base = 0;
struct tps65090 *tps65090;
int ret;
- if (!pdata) {
- dev_err(&client->dev, "tps65090 requires platform data\n");
+ if (!pdata && !client->dev.of_node) {
+ dev_err(&client->dev,
+ "tps65090 requires platform data or of_node\n");
return -EINVAL;
}
+ if (pdata)
+ irq_base = pdata->irq_base;
+
tps65090 = devm_kzalloc(&client->dev, sizeof(*tps65090), GFP_KERNEL);
if (!tps65090) {
dev_err(&client->dev, "mem alloc for tps65090 failed\n");
@@ -178,7 +193,7 @@ static int tps65090_i2c_probe(struct i2c_client *client,
if (client->irq) {
ret = regmap_add_irq_chip(tps65090->rmap, client->irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW, pdata->irq_base,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW, irq_base,
&tps65090_irq_chip, &tps65090->irq_data);
if (ret) {
dev_err(&client->dev,
@@ -189,7 +204,7 @@ static int tps65090_i2c_probe(struct i2c_client *client,
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
ARRAY_SIZE(tps65090s), NULL,
- regmap_irq_chip_get_base(tps65090->irq_data), NULL);
+ 0, regmap_irq_get_domain(tps65090->irq_data));
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
@@ -215,28 +230,6 @@ static int tps65090_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tps65090_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- if (client->irq)
- disable_irq(client->irq);
- return 0;
-}
-
-static int tps65090_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- if (client->irq)
- enable_irq(client->irq);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tps65090_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
-};
-
static const struct i2c_device_id tps65090_id_table[] = {
{ "tps65090", 0 },
{ },
@@ -247,7 +240,7 @@ static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
.owner = THIS_MODULE,
- .pm = &tps65090_pm_ops,
+ .of_match_table = of_match_ptr(tps65090_of_match),
},
.probe = tps65090_i2c_probe,
.remove = tps65090_i2c_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4f3baadd0038..89ab4d970643 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -66,16 +66,6 @@
/* Triton Core internal information (BEGIN) */
-#define TWL_NUM_SLAVES 4
-
-#define SUB_CHIP_ID0 0
-#define SUB_CHIP_ID1 1
-#define SUB_CHIP_ID2 2
-#define SUB_CHIP_ID3 3
-#define SUB_CHIP_ID_INVAL 0xff
-
-#define TWL_MODULE_LAST TWL4030_MODULE_LAST
-
/* Base Address defns for twl4030_map[] */
/* subchip/slave 0 - USB ID */
@@ -94,10 +84,7 @@
#define TWL4030_BASEADD_MADC 0x0000
#define TWL4030_BASEADD_MAIN_CHARGE 0x0074
#define TWL4030_BASEADD_PRECHARGE 0x00AA
-#define TWL4030_BASEADD_PWM0 0x00F8
-#define TWL4030_BASEADD_PWM1 0x00FB
-#define TWL4030_BASEADD_PWMA 0x00EF
-#define TWL4030_BASEADD_PWMB 0x00F1
+#define TWL4030_BASEADD_PWM 0x00F8
#define TWL4030_BASEADD_KEYPAD 0x00D2
#define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */
@@ -117,7 +104,7 @@
/* subchip/slave 0 0x48 - POWER */
#define TWL6030_BASEADD_RTC 0x0000
-#define TWL6030_BASEADD_MEM 0x0017
+#define TWL6030_BASEADD_SECURED_REG 0x0017
#define TWL6030_BASEADD_PM_MASTER 0x001F
#define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */
#define TWL6030_BASEADD_PM_MISC 0x00E2
@@ -132,6 +119,7 @@
#define TWL6030_BASEADD_PIH 0x00D0
#define TWL6030_BASEADD_CHARGER 0x00E0
#define TWL6025_BASEADD_CHARGER 0x00DA
+#define TWL6030_BASEADD_LED 0x00F4
/* subchip/slave 2 0x4A - DFT */
#define TWL6030_BASEADD_DIEID 0x00C0
@@ -153,33 +141,28 @@
/*----------------------------------------------------------------------*/
-/* is driver active, bound to a chip? */
-static bool inuse;
-
-/* TWL IDCODE Register value */
-static u32 twl_idcode;
-
-static unsigned int twl_id;
-unsigned int twl_rev(void)
-{
- return twl_id;
-}
-EXPORT_SYMBOL(twl_rev);
-
/* Structure for each TWL4030/TWL6030 Slave */
struct twl_client {
struct i2c_client *client;
struct regmap *regmap;
};
-static struct twl_client twl_modules[TWL_NUM_SLAVES];
-
/* mapping the module id to slave id and base address */
struct twl_mapping {
unsigned char sid; /* Slave ID */
unsigned char base; /* base address */
};
-static struct twl_mapping *twl_map;
+
+struct twl_private {
+ bool ready; /* The core driver is ready to be used */
+ u32 twl_idcode; /* TWL IDCODE Register value */
+ unsigned int twl_id;
+
+ struct twl_mapping *twl_map;
+ struct twl_client *twl_modules;
+};
+
+static struct twl_private *twl_priv;
static struct twl_mapping twl4030_map[] = {
/*
@@ -188,34 +171,33 @@ static struct twl_mapping twl4030_map[] = {
* so they continue to match the order in this table.
*/
+ /* Common IPs */
{ 0, TWL4030_BASEADD_USB },
+ { 1, TWL4030_BASEADD_PIH },
+ { 2, TWL4030_BASEADD_MAIN_CHARGE },
+ { 3, TWL4030_BASEADD_PM_MASTER },
+ { 3, TWL4030_BASEADD_PM_RECEIVER },
+
+ { 3, TWL4030_BASEADD_RTC },
+ { 2, TWL4030_BASEADD_PWM },
+ { 2, TWL4030_BASEADD_LED },
+ { 3, TWL4030_BASEADD_SECURED_REG },
+
+ /* TWL4030 specific IPs */
{ 1, TWL4030_BASEADD_AUDIO_VOICE },
{ 1, TWL4030_BASEADD_GPIO },
{ 1, TWL4030_BASEADD_INTBR },
- { 1, TWL4030_BASEADD_PIH },
-
{ 1, TWL4030_BASEADD_TEST },
{ 2, TWL4030_BASEADD_KEYPAD },
+
{ 2, TWL4030_BASEADD_MADC },
{ 2, TWL4030_BASEADD_INTERRUPTS },
- { 2, TWL4030_BASEADD_LED },
-
- { 2, TWL4030_BASEADD_MAIN_CHARGE },
{ 2, TWL4030_BASEADD_PRECHARGE },
- { 2, TWL4030_BASEADD_PWM0 },
- { 2, TWL4030_BASEADD_PWM1 },
- { 2, TWL4030_BASEADD_PWMA },
-
- { 2, TWL4030_BASEADD_PWMB },
- { 2, TWL5031_BASEADD_ACCESSORY },
- { 2, TWL5031_BASEADD_INTERRUPTS },
{ 3, TWL4030_BASEADD_BACKUP },
{ 3, TWL4030_BASEADD_INT },
- { 3, TWL4030_BASEADD_PM_MASTER },
- { 3, TWL4030_BASEADD_PM_RECEIVER },
- { 3, TWL4030_BASEADD_RTC },
- { 3, TWL4030_BASEADD_SECURED_REG },
+ { 2, TWL5031_BASEADD_ACCESSORY },
+ { 2, TWL5031_BASEADD_INTERRUPTS },
};
static struct regmap_config twl4030_regmap_config[4] = {
@@ -251,35 +233,25 @@ static struct twl_mapping twl6030_map[] = {
* <linux/i2c/twl.h> defines for TWL4030_MODULE_*
* so they continue to match the order in this table.
*/
- { SUB_CHIP_ID1, TWL6030_BASEADD_USB },
- { SUB_CHIP_ID_INVAL, TWL6030_BASEADD_AUDIO },
- { SUB_CHIP_ID2, TWL6030_BASEADD_DIEID },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID1, TWL6030_BASEADD_PIH },
-
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID1, TWL6030_BASEADD_GPADC_CTRL },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
-
- { SUB_CHIP_ID1, TWL6030_BASEADD_CHARGER },
- { SUB_CHIP_ID1, TWL6030_BASEADD_GASGAUGE },
- { SUB_CHIP_ID1, TWL6030_BASEADD_PWM },
- { SUB_CHIP_ID0, TWL6030_BASEADD_ZERO },
- { SUB_CHIP_ID1, TWL6030_BASEADD_ZERO },
-
- { SUB_CHIP_ID2, TWL6030_BASEADD_ZERO },
- { SUB_CHIP_ID2, TWL6030_BASEADD_ZERO },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
- { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
-
- { SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
- { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
- { SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
- { SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
- { SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
+
+ /* Common IPs */
+ { 1, TWL6030_BASEADD_USB },
+ { 1, TWL6030_BASEADD_PIH },
+ { 1, TWL6030_BASEADD_CHARGER },
+ { 0, TWL6030_BASEADD_PM_MASTER },
+ { 0, TWL6030_BASEADD_PM_SLAVE_MISC },
+
+ { 0, TWL6030_BASEADD_RTC },
+ { 1, TWL6030_BASEADD_PWM },
+ { 1, TWL6030_BASEADD_LED },
+ { 0, TWL6030_BASEADD_SECURED_REG },
+
+ /* TWL6030 specific IPs */
+ { 0, TWL6030_BASEADD_ZERO },
+ { 1, TWL6030_BASEADD_ZERO },
+ { 2, TWL6030_BASEADD_ZERO },
+ { 1, TWL6030_BASEADD_GPADC_CTRL },
+ { 1, TWL6030_BASEADD_GASGAUGE },
};
static struct regmap_config twl6030_regmap_config[3] = {
@@ -305,8 +277,30 @@ static struct regmap_config twl6030_regmap_config[3] = {
/*----------------------------------------------------------------------*/
+static inline int twl_get_num_slaves(void)
+{
+ if (twl_class_is_4030())
+ return 4; /* TWL4030 class have four slave address */
+ else
+ return 3; /* TWL6030 class have three slave address */
+}
+
+static inline int twl_get_last_module(void)
+{
+ if (twl_class_is_4030())
+ return TWL4030_MODULE_LAST;
+ else
+ return TWL6030_MODULE_LAST;
+}
+
/* Exported Functions */
+unsigned int twl_rev(void)
+{
+ return twl_priv ? twl_priv->twl_id : 0;
+}
+EXPORT_SYMBOL(twl_rev);
+
/**
* twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
@@ -314,9 +308,6 @@ static struct regmap_config twl6030_regmap_config[3] = {
* @reg: register address (just offset will do)
* @num_bytes: number of bytes to transfer
*
- * IMPORTANT: for 'value' parameter: Allocate value num_bytes+1 and
- * valid data starts at Offset 1.
- *
* Returns the result of operation - 0 is success
*/
int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
@@ -325,24 +316,21 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
int sid;
struct twl_client *twl;
- if (unlikely(mod_no >= TWL_MODULE_LAST)) {
- pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
- return -EPERM;
- }
- if (unlikely(!inuse)) {
+ if (unlikely(!twl_priv || !twl_priv->ready)) {
pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- if (unlikely(sid == SUB_CHIP_ID_INVAL)) {
- pr_err("%s: module %d is not part of the pmic\n",
- DRIVER_NAME, mod_no);
- return -EINVAL;
+ if (unlikely(mod_no >= twl_get_last_module())) {
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return -EPERM;
}
- twl = &twl_modules[sid];
- ret = regmap_bulk_write(twl->regmap, twl_map[mod_no].base + reg,
- value, num_bytes);
+ sid = twl_priv->twl_map[mod_no].sid;
+ twl = &twl_priv->twl_modules[sid];
+
+ ret = regmap_bulk_write(twl->regmap,
+ twl_priv->twl_map[mod_no].base + reg, value,
+ num_bytes);
if (ret)
pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n",
@@ -367,24 +355,21 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
int sid;
struct twl_client *twl;
- if (unlikely(mod_no >= TWL_MODULE_LAST)) {
- pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
- return -EPERM;
- }
- if (unlikely(!inuse)) {
+ if (unlikely(!twl_priv || !twl_priv->ready)) {
pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- if (unlikely(sid == SUB_CHIP_ID_INVAL)) {
- pr_err("%s: module %d is not part of the pmic\n",
- DRIVER_NAME, mod_no);
- return -EINVAL;
+ if (unlikely(mod_no >= twl_get_last_module())) {
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return -EPERM;
}
- twl = &twl_modules[sid];
- ret = regmap_bulk_read(twl->regmap, twl_map[mod_no].base + reg,
- value, num_bytes);
+ sid = twl_priv->twl_map[mod_no].sid;
+ twl = &twl_priv->twl_modules[sid];
+
+ ret = regmap_bulk_read(twl->regmap,
+ twl_priv->twl_map[mod_no].base + reg, value,
+ num_bytes);
if (ret)
pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n",
@@ -394,34 +379,6 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
EXPORT_SYMBOL(twl_i2c_read);
-/**
- * twl_i2c_write_u8 - Writes a 8 bit register in TWL4030/TWL5030/TWL60X0
- * @mod_no: module number
- * @value: the value to be written 8 bit
- * @reg: register address (just offset will do)
- *
- * Returns result of operation - 0 is success
- */
-int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
-{
- return twl_i2c_write(mod_no, &value, reg, 1);
-}
-EXPORT_SYMBOL(twl_i2c_write_u8);
-
-/**
- * twl_i2c_read_u8 - Reads a 8 bit register from TWL4030/TWL5030/TWL60X0
- * @mod_no: module number
- * @value: the value read 8 bit
- * @reg: register address (just offset will do)
- *
- * Returns result of operation - 0 is success
- */
-int twl_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
-{
- return twl_i2c_read(mod_no, value, reg, 1);
-}
-EXPORT_SYMBOL(twl_i2c_read_u8);
-
/*----------------------------------------------------------------------*/
/**
@@ -440,7 +397,7 @@ static int twl_read_idcode_register(void)
goto fail;
}
- err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_idcode),
+ err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_priv->twl_idcode),
REG_IDCODE_7_0, 4);
if (err) {
pr_err("TWL4030: unable to read IDCODE -%d\n", err);
@@ -461,7 +418,7 @@ fail:
*/
int twl_get_type(void)
{
- return TWL_SIL_TYPE(twl_idcode);
+ return TWL_SIL_TYPE(twl_priv->twl_idcode);
}
EXPORT_SYMBOL_GPL(twl_get_type);
@@ -472,7 +429,7 @@ EXPORT_SYMBOL_GPL(twl_get_type);
*/
int twl_get_version(void)
{
- return TWL_SIL_REV(twl_idcode);
+ return TWL_SIL_REV(twl_priv->twl_idcode);
}
EXPORT_SYMBOL_GPL(twl_get_version);
@@ -509,13 +466,20 @@ int twl_get_hfclk_rate(void)
EXPORT_SYMBOL_GPL(twl_get_hfclk_rate);
static struct device *
-add_numbered_child(unsigned chip, const char *name, int num,
+add_numbered_child(unsigned mod_no, const char *name, int num,
void *pdata, unsigned pdata_len,
bool can_wakeup, int irq0, int irq1)
{
struct platform_device *pdev;
- struct twl_client *twl = &twl_modules[chip];
- int status;
+ struct twl_client *twl;
+ int status, sid;
+
+ if (unlikely(mod_no >= twl_get_last_module())) {
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return ERR_PTR(-EPERM);
+ }
+ sid = twl_priv->twl_map[mod_no].sid;
+ twl = &twl_priv->twl_modules[sid];
pdev = platform_device_alloc(name, num);
if (!pdev) {
@@ -560,11 +524,11 @@ err:
return &pdev->dev;
}
-static inline struct device *add_child(unsigned chip, const char *name,
+static inline struct device *add_child(unsigned mod_no, const char *name,
void *pdata, unsigned pdata_len,
bool can_wakeup, int irq0, int irq1)
{
- return add_numbered_child(chip, name, -1, pdata, pdata_len,
+ return add_numbered_child(mod_no, name, -1, pdata, pdata_len,
can_wakeup, irq0, irq1);
}
@@ -573,7 +537,6 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
struct regulator_consumer_supply *consumers,
unsigned num_consumers, unsigned long features)
{
- unsigned sub_chip_id;
struct twl_regulator_driver_data drv_data;
/* regulator framework demands init_data ... */
@@ -600,8 +563,7 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
}
/* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid;
- return add_numbered_child(sub_chip_id, "twl_reg", num,
+ return add_numbered_child(TWL_MODULE_PM_MASTER, "twl_reg", num,
pdata, sizeof(*pdata), false, 0, 0);
}
@@ -623,10 +585,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
unsigned long features)
{
struct device *child;
- unsigned sub_chip_id;
if (IS_ENABLED(CONFIG_GPIO_TWL4030) && pdata->gpio) {
- child = add_child(SUB_CHIP_ID1, "twl4030_gpio",
+ child = add_child(TWL4030_MODULE_GPIO, "twl4030_gpio",
pdata->gpio, sizeof(*pdata->gpio),
false, irq_base + GPIO_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -634,7 +595,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
if (IS_ENABLED(CONFIG_KEYBOARD_TWL4030) && pdata->keypad) {
- child = add_child(SUB_CHIP_ID2, "twl4030_keypad",
+ child = add_child(TWL4030_MODULE_KEYPAD, "twl4030_keypad",
pdata->keypad, sizeof(*pdata->keypad),
true, irq_base + KEYPAD_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -643,7 +604,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID2, "twl4030_madc",
+ child = add_child(TWL4030_MODULE_MADC, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
true, irq_base + MADC_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -658,22 +619,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* Eventually, Linux might become more aware of such
* HW security concerns, and "least privilege".
*/
- sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
- child = add_child(sub_chip_id, "twl_rtc", NULL, 0,
+ child = add_child(TWL_MODULE_RTC, "twl_rtc", NULL, 0,
true, irq_base + RTC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_PWM_TWL)) {
- child = add_child(SUB_CHIP_ID1, "twl-pwm", NULL, 0,
+ child = add_child(TWL_MODULE_PWM, "twl-pwm", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
- child = add_child(SUB_CHIP_ID1, "twl-pwmled", NULL, 0,
+ child = add_child(TWL_MODULE_LED, "twl-pwmled", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -725,7 +685,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
- child = add_child(SUB_CHIP_ID0, "twl4030_usb",
+ child = add_child(TWL_MODULE_USB, "twl4030_usb",
pdata->usb, sizeof(*pdata->usb), true,
/* irq0 = USB_PRES, irq1 = USB */
irq_base + USB_PRES_INTR_OFFSET,
@@ -774,7 +734,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
pdata->usb->features = features;
- child = add_child(SUB_CHIP_ID0, "twl6030_usb",
+ child = add_child(TWL_MODULE_USB, "twl6030_usb",
pdata->usb, sizeof(*pdata->usb), true,
/* irq1 = VBUS_PRES, irq0 = USB ID */
irq_base + USBOTG_INTR_OFFSET,
@@ -799,22 +759,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID3, "twl4030_wdt", NULL, 0,
- false, 0, 0);
+ child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL,
+ 0, false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID3, "twl4030_pwrbutton", NULL, 0,
- true, irq_base + 8 + 0, 0);
+ child = add_child(TWL_MODULE_PM_MASTER, "twl4030_pwrbutton",
+ NULL, 0, true, irq_base + 8 + 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
twl_class_is_4030()) {
- child = add_child(SUB_CHIP_ID1, "twl4030-audio",
+ child = add_child(TWL4030_MODULE_AUDIO_VOICE, "twl4030-audio",
pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
@@ -1054,7 +1014,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
!(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(SUB_CHIP_ID3, "twl4030_bci",
+ child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci",
pdata->bci, sizeof(*pdata->bci), false,
/* irq0 = CHG_PRES, irq1 = BCI */
irq_base + BCI_PRES_INTR_OFFSET,
@@ -1145,25 +1105,23 @@ static int twl_remove(struct i2c_client *client)
unsigned i, num_slaves;
int status;
- if (twl_class_is_4030()) {
+ if (twl_class_is_4030())
status = twl4030_exit_irq();
- num_slaves = TWL_NUM_SLAVES;
- } else {
+ else
status = twl6030_exit_irq();
- num_slaves = TWL_NUM_SLAVES - 1;
- }
if (status < 0)
return status;
+ num_slaves = twl_get_num_slaves();
for (i = 0; i < num_slaves; i++) {
- struct twl_client *twl = &twl_modules[i];
+ struct twl_client *twl = &twl_priv->twl_modules[i];
if (twl->client && twl->client != client)
i2c_unregister_device(twl->client);
- twl_modules[i].client = NULL;
+ twl->client = NULL;
}
- inuse = false;
+ twl_priv->ready = false;
return 0;
}
@@ -1179,6 +1137,17 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i, num_slaves;
+ if (!node && !pdata) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (twl_priv) {
+ dev_dbg(&client->dev, "only one instance of %s allowed\n",
+ DRIVER_NAME);
+ return -EBUSY;
+ }
+
pdev = platform_device_alloc(DRIVER_NAME, -1);
if (!pdev) {
dev_err(&client->dev, "can't alloc pdev\n");
@@ -1191,54 +1160,44 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
return status;
}
- if (node && !pdata) {
- /*
- * XXX: Temporary pdata until the information is correctly
- * retrieved by every TWL modules from DT.
- */
- pdata = devm_kzalloc(&client->dev,
- sizeof(struct twl4030_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- status = -ENOMEM;
- goto free;
- }
- }
-
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data?\n");
- status = -EINVAL;
- goto free;
- }
-
- platform_set_drvdata(pdev, pdata);
-
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
dev_dbg(&client->dev, "can't talk I2C?\n");
status = -EIO;
goto free;
}
- if (inuse) {
- dev_dbg(&client->dev, "driver is already in use\n");
- status = -EBUSY;
+ twl_priv = devm_kzalloc(&client->dev, sizeof(struct twl_private),
+ GFP_KERNEL);
+ if (!twl_priv) {
+ status = -ENOMEM;
goto free;
}
if ((id->driver_data) & TWL6030_CLASS) {
- twl_id = TWL6030_CLASS_ID;
- twl_map = &twl6030_map[0];
+ twl_priv->twl_id = TWL6030_CLASS_ID;
+ twl_priv->twl_map = &twl6030_map[0];
+ /* The charger base address is different in twl6025 */
+ if ((id->driver_data) & TWL6025_SUBCLASS)
+ twl_priv->twl_map[TWL_MODULE_MAIN_CHARGE].base =
+ TWL6025_BASEADD_CHARGER;
twl_regmap_config = twl6030_regmap_config;
- num_slaves = TWL_NUM_SLAVES - 1;
} else {
- twl_id = TWL4030_CLASS_ID;
- twl_map = &twl4030_map[0];
+ twl_priv->twl_id = TWL4030_CLASS_ID;
+ twl_priv->twl_map = &twl4030_map[0];
twl_regmap_config = twl4030_regmap_config;
- num_slaves = TWL_NUM_SLAVES;
+ }
+
+ num_slaves = twl_get_num_slaves();
+ twl_priv->twl_modules = devm_kzalloc(&client->dev,
+ sizeof(struct twl_client) * num_slaves,
+ GFP_KERNEL);
+ if (!twl_priv->twl_modules) {
+ status = -ENOMEM;
+ goto free;
}
for (i = 0; i < num_slaves; i++) {
- struct twl_client *twl = &twl_modules[i];
+ struct twl_client *twl = &twl_priv->twl_modules[i];
if (i == 0) {
twl->client = client;
@@ -1264,19 +1223,19 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
- inuse = true;
+ twl_priv->ready = true;
/* setup clock framework */
- clocks_init(&pdev->dev, pdata->clock);
+ clocks_init(&pdev->dev, pdata ? pdata->clock : NULL);
/* read TWL IDCODE Register */
- if (twl_id == TWL4030_CLASS_ID) {
+ if (twl_class_is_4030()) {
status = twl_read_idcode_register();
WARN(status < 0, "Error: reading twl_idcode register value\n");
}
/* load power event scripts */
- if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata->power)
+ if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata && pdata->power)
twl4030_power_init(pdata->power);
/* Maybe init the T2 Interrupt subsystem */
@@ -1308,10 +1267,9 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
}
- status = -ENODEV;
if (node)
status = of_platform_populate(node, NULL, NULL, &client->dev);
- if (status)
+ else
status = add_children(pdata, irq_base, id->driver_data);
fail:
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 558c2928f261..bf75e967a1f3 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -49,6 +49,8 @@
#define SYS_ID_HBI_SHIFT 16
#define SYS_PROCIDx_HBI_SHIFT 0
+#define SYS_LED_LED(n) (1 << (n))
+
#define SYS_MCI_CARDIN (1 << 0)
#define SYS_MCI_WPROT (1 << 1)
@@ -336,34 +338,40 @@ void __init vexpress_sysreg_early_init(void __iomem *base)
void __init vexpress_sysreg_of_early_init(void)
{
- struct device_node *node = of_find_compatible_node(NULL, NULL,
- "arm,vexpress-sysreg");
+ struct device_node *node;
+
+ if (vexpress_sysreg_base)
+ return;
+ node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg");
if (node) {
vexpress_sysreg_base = of_iomap(node, 0);
vexpress_sysreg_setup(node);
- } else {
- pr_info("vexpress-sysreg: No Device Tree node found.");
}
}
+#define VEXPRESS_SYSREG_GPIO(_name, _reg, _value) \
+ [VEXPRESS_GPIO_##_name] = { \
+ .reg = _reg, \
+ .value = _reg##_##_value, \
+ }
+
static struct vexpress_sysreg_gpio {
unsigned long reg;
u32 value;
} vexpress_sysreg_gpios[] = {
- [VEXPRESS_GPIO_MMC_CARDIN] = {
- .reg = SYS_MCI,
- .value = SYS_MCI_CARDIN,
- },
- [VEXPRESS_GPIO_MMC_WPROT] = {
- .reg = SYS_MCI,
- .value = SYS_MCI_WPROT,
- },
- [VEXPRESS_GPIO_FLASH_WPn] = {
- .reg = SYS_FLASH,
- .value = SYS_FLASH_WPn,
- },
+ VEXPRESS_SYSREG_GPIO(MMC_CARDIN, SYS_MCI, CARDIN),
+ VEXPRESS_SYSREG_GPIO(MMC_WPROT, SYS_MCI, WPROT),
+ VEXPRESS_SYSREG_GPIO(FLASH_WPn, SYS_FLASH, WPn),
+ VEXPRESS_SYSREG_GPIO(LED0, SYS_LED, LED(0)),
+ VEXPRESS_SYSREG_GPIO(LED1, SYS_LED, LED(1)),
+ VEXPRESS_SYSREG_GPIO(LED2, SYS_LED, LED(2)),
+ VEXPRESS_SYSREG_GPIO(LED3, SYS_LED, LED(3)),
+ VEXPRESS_SYSREG_GPIO(LED4, SYS_LED, LED(4)),
+ VEXPRESS_SYSREG_GPIO(LED5, SYS_LED, LED(5)),
+ VEXPRESS_SYSREG_GPIO(LED6, SYS_LED, LED(6)),
+ VEXPRESS_SYSREG_GPIO(LED7, SYS_LED, LED(7)),
};
static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip,
@@ -372,12 +380,6 @@ static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip,
return 0;
}
-static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- return 0;
-}
-
static int vexpress_sysreg_gpio_get(struct gpio_chip *chip,
unsigned offset)
{
@@ -401,6 +403,14 @@ static void vexpress_sysreg_gpio_set(struct gpio_chip *chip,
writel(reg_value, vexpress_sysreg_base + gpio->reg);
}
+static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ vexpress_sysreg_gpio_set(chip, offset, value);
+
+ return 0;
+}
+
static struct gpio_chip vexpress_sysreg_gpio_chip = {
.label = "vexpress-sysreg",
.direction_input = vexpress_sysreg_gpio_direction_input,
@@ -412,6 +422,30 @@ static struct gpio_chip vexpress_sysreg_gpio_chip = {
};
+#define VEXPRESS_SYSREG_GREEN_LED(_name, _default_trigger, _gpio) \
+ { \
+ .name = "v2m:green:"_name, \
+ .default_trigger = _default_trigger, \
+ .gpio = VEXPRESS_GPIO_##_gpio, \
+ }
+
+struct gpio_led vexpress_sysreg_leds[] = {
+ VEXPRESS_SYSREG_GREEN_LED("user1", "heartbeat", LED0),
+ VEXPRESS_SYSREG_GREEN_LED("user2", "mmc0", LED1),
+ VEXPRESS_SYSREG_GREEN_LED("user3", "cpu0", LED2),
+ VEXPRESS_SYSREG_GREEN_LED("user4", "cpu1", LED3),
+ VEXPRESS_SYSREG_GREEN_LED("user5", "cpu2", LED4),
+ VEXPRESS_SYSREG_GREEN_LED("user6", "cpu3", LED5),
+ VEXPRESS_SYSREG_GREEN_LED("user7", "cpu4", LED6),
+ VEXPRESS_SYSREG_GREEN_LED("user8", "cpu5", LED7),
+};
+
+struct gpio_led_platform_data vexpress_sysreg_leds_pdata = {
+ .num_leds = ARRAY_SIZE(vexpress_sysreg_leds),
+ .leds = vexpress_sysreg_leds,
+};
+
+
static ssize_t vexpress_sysreg_sys_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -456,6 +490,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
return err;
}
+ platform_device_register_data(vexpress_sysreg_dev, "leds-gpio",
+ PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata,
+ sizeof(vexpress_sysreg_leds_pdata));
+
vexpress_sysreg_dev = &pdev->dev;
device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id);
@@ -478,6 +516,7 @@ static struct platform_driver vexpress_sysreg_driver = {
static int __init vexpress_sysreg_init(void)
{
+ vexpress_sysreg_of_early_init();
return platform_driver_register(&vexpress_sysreg_driver);
}
core_initcall(vexpress_sysreg_init);
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index a9d9d41d95d3..a433f580aa4c 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -59,12 +59,13 @@ static const struct reg_default wm5102_reva_patch[] = {
static const struct reg_default wm5102_revb_patch[] = {
{ 0x80, 0x0003 },
{ 0x081, 0xE022 },
- { 0x410, 0x6080 },
- { 0x418, 0x6080 },
- { 0x420, 0x6080 },
+ { 0x410, 0x4080 },
+ { 0x418, 0x4080 },
+ { 0x420, 0x4080 },
{ 0x428, 0xC000 },
- { 0x441, 0x8014 },
+ { 0x4B0, 0x0066 },
{ 0x458, 0x000b },
+ { 0x212, 0x0000 },
{ 0x80, 0x0000 },
};
@@ -231,11 +232,9 @@ const struct regmap_irq_chip wm5102_irq = {
static const struct reg_default wm5102_reg_default[] = {
{ 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
{ 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
- { 0x0000000D, 0x0000 }, /* R13 - Ctrl IF Status 1 */
{ 0x00000016, 0x0000 }, /* R22 - Write Sequencer Ctrl 0 */
{ 0x00000017, 0x0000 }, /* R23 - Write Sequencer Ctrl 1 */
{ 0x00000018, 0x0000 }, /* R24 - Write Sequencer Ctrl 2 */
- { 0x0000001A, 0x0000 }, /* R26 - Write Sequencer PROM */
{ 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
{ 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
{ 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
@@ -250,12 +249,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
{ 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
{ 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
- { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 1 */
- { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 2 */
- { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 3 */
- { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 4 */
- { 0x0000006C, 0x01FF }, /* R108 - Always On Triggers Sequence Select 5 */
- { 0x0000006D, 0x01FF }, /* R109 - Always On Triggers Sequence Select 6 */
+ { 0x00000066, 0x01FF }, /* R102 - Always On Triggers Sequence Select 1 */
+ { 0x00000067, 0x01FF }, /* R103 - Always On Triggers Sequence Select 2 */
+ { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */
+ { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
+ { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
+ { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
+ { 0x0000006E, 0x01FF }, /* R110 - Trigger Sequence Select 32 */
+ { 0x0000006F, 0x01FF }, /* R111 - Trigger Sequence Select 33 */
{ 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
{ 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
{ 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
@@ -265,13 +266,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
{ 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
{ 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
- { 0x00000100, 0x0001 }, /* R256 - Clock 32k 1 */
+ { 0x00000100, 0x0002 }, /* R256 - Clock 32k 1 */
{ 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
{ 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
{ 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
{ 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
{ 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
{ 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
+ { 0x00000114, 0x0011 }, /* R276 - Async sample rate 2 */
{ 0x00000149, 0x0000 }, /* R329 - Output system clock */
{ 0x0000014A, 0x0000 }, /* R330 - Output async clock */
{ 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
@@ -280,13 +282,14 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
{ 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
- { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
+ { 0x00000171, 0x0002 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
+ { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
{ 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
@@ -302,6 +305,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
+ { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
{ 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
@@ -317,8 +321,12 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
+ { 0x00000225, 0x0400 }, /* R549 - HP Ctrl 1L */
+ { 0x00000226, 0x0400 }, /* R550 - HP Ctrl 1R */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
+ { 0x0000029F, 0x0000 }, /* R671 - Headphone Detect Test */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
@@ -350,53 +358,44 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
{ 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
{ 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
- { 0x00000410, 0x0080 }, /* R1040 - Output Path Config 1L */
+ { 0x00000410, 0x4080 }, /* R1040 - Output Path Config 1L */
{ 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
- { 0x00000412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */
+ { 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
{ 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
{ 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
{ 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
- { 0x00000416, 0x0080 }, /* R1046 - DAC Volume Limit 1R */
+ { 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
{ 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
- { 0x00000418, 0x0080 }, /* R1048 - Output Path Config 2L */
+ { 0x00000418, 0x4080 }, /* R1048 - Output Path Config 2L */
{ 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
- { 0x0000041A, 0x0080 }, /* R1050 - DAC Volume Limit 2L */
+ { 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
{ 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
{ 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
{ 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
- { 0x0000041E, 0x0080 }, /* R1054 - DAC Volume Limit 2R */
+ { 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
{ 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
- { 0x00000420, 0x0080 }, /* R1056 - Output Path Config 3L */
+ { 0x00000420, 0x4080 }, /* R1056 - Output Path Config 3L */
{ 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
- { 0x00000422, 0x0080 }, /* R1058 - DAC Volume Limit 3L */
+ { 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
{ 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
- { 0x00000424, 0x0080 }, /* R1060 - Output Path Config 3R */
- { 0x00000425, 0x0180 }, /* R1061 - DAC Digital Volume 3R */
- { 0x00000426, 0x0080 }, /* R1062 - DAC Volume Limit 3R */
- { 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */
+ { 0x00000428, 0xC000 }, /* R1064 - Output Path Config 4L */
{ 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
- { 0x0000042A, 0x0080 }, /* R1066 - Out Volume 4L */
+ { 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
{ 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
- { 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */
{ 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
- { 0x0000042E, 0x0080 }, /* R1070 - Out Volume 4R */
+ { 0x0000042E, 0x0081 }, /* R1070 - Out Volume 4R */
{ 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
{ 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
{ 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
- { 0x00000432, 0x0080 }, /* R1074 - DAC Volume Limit 5L */
+ { 0x00000432, 0x0081 }, /* R1074 - DAC Volume Limit 5L */
{ 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
- { 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
- { 0x00000436, 0x0080 }, /* R1078 - DAC Volume Limit 5R */
- { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
+ { 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
+ { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
- { 0x000004DC, 0x0000 }, /* R1244 - DAC comp 1 */
- { 0x000004DD, 0x0000 }, /* R1245 - DAC comp 2 */
- { 0x000004DE, 0x0000 }, /* R1246 - DAC comp 3 */
- { 0x000004DF, 0x0000 }, /* R1247 - DAC comp 4 */
{ 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
{ 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
@@ -424,7 +423,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
{ 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
{ 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
- { 0x0000051B, 0x0000 }, /* R1307 - AIF1 Force Write */
{ 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
{ 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
{ 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
@@ -440,7 +438,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
{ 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
{ 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
- { 0x0000055B, 0x0000 }, /* R1371 - AIF2 Force Write */
{ 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
{ 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
{ 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
@@ -456,7 +453,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
{ 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
{ 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
- { 0x0000059B, 0x0000 }, /* R1435 - AIF3 Force Write */
{ 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
{ 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
{ 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
@@ -780,22 +776,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
{ 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
{ 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
- { 0x000008D0, 0x0000 }, /* R2256 - DRC2LMIX Input 1 Source */
- { 0x000008D1, 0x0080 }, /* R2257 - DRC2LMIX Input 1 Volume */
- { 0x000008D2, 0x0000 }, /* R2258 - DRC2LMIX Input 2 Source */
- { 0x000008D3, 0x0080 }, /* R2259 - DRC2LMIX Input 2 Volume */
- { 0x000008D4, 0x0000 }, /* R2260 - DRC2LMIX Input 3 Source */
- { 0x000008D5, 0x0080 }, /* R2261 - DRC2LMIX Input 3 Volume */
- { 0x000008D6, 0x0000 }, /* R2262 - DRC2LMIX Input 4 Source */
- { 0x000008D7, 0x0080 }, /* R2263 - DRC2LMIX Input 4 Volume */
- { 0x000008D8, 0x0000 }, /* R2264 - DRC2RMIX Input 1 Source */
- { 0x000008D9, 0x0080 }, /* R2265 - DRC2RMIX Input 1 Volume */
- { 0x000008DA, 0x0000 }, /* R2266 - DRC2RMIX Input 2 Source */
- { 0x000008DB, 0x0080 }, /* R2267 - DRC2RMIX Input 2 Volume */
- { 0x000008DC, 0x0000 }, /* R2268 - DRC2RMIX Input 3 Source */
- { 0x000008DD, 0x0080 }, /* R2269 - DRC2RMIX Input 3 Volume */
- { 0x000008DE, 0x0000 }, /* R2270 - DRC2RMIX Input 4 Source */
- { 0x000008DF, 0x0080 }, /* R2271 - DRC2RMIX Input 4 Volume */
{ 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
{ 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
{ 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
@@ -887,7 +867,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
- { 0x00000D41, 0x0000 }, /* R3393 - ADSP2 IRQ0 */
+ { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
@@ -982,11 +962,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
{ 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
{ 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
- { 0x00000E89, 0x0018 }, /* R3721 - DRC2 ctrl1 */
- { 0x00000E8A, 0x0933 }, /* R3722 - DRC2 ctrl2 */
- { 0x00000E8B, 0x0018 }, /* R3723 - DRC2 ctrl3 */
- { 0x00000E8C, 0x0000 }, /* R3724 - DRC2 ctrl4 */
- { 0x00000E8D, 0x0000 }, /* R3725 - DRC2 ctrl5 */
{ 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
{ 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
{ 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
@@ -997,16 +972,12 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
- { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
{ 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
{ 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
{ 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
- { 0x00000EF6, 0x0000 }, /* R3830 - ISRC 3 CTRL 1 */
- { 0x00000EF7, 0x0000 }, /* R3831 - ISRC 3 CTRL 2 */
- { 0x00000EF8, 0x0000 }, /* R3832 - ISRC 3 CTRL 3 */
{ 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
{ 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
};
@@ -1833,17 +1804,24 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
return true;
default:
- return false;
+ if ((reg >= 0x100000 && reg < 0x106000) ||
+ (reg >= 0x180000 && reg < 0x180800) ||
+ (reg >= 0x190000 && reg < 0x194800) ||
+ (reg >= 0x1a8000 && reg < 0x1a9800))
+ return true;
+ else
+ return false;
}
}
static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
{
- if (reg > 0xffff)
- return true;
-
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
@@ -1884,12 +1862,22 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
+ case ARIZONA_DSP1_SCRATCH_0:
+ case ARIZONA_DSP1_SCRATCH_1:
+ case ARIZONA_DSP1_SCRATCH_2:
+ case ARIZONA_DSP1_SCRATCH_3:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_HP_DACVAL:
case ARIZONA_MIC_DETECT_3:
return true;
default:
- return false;
+ if ((reg >= 0x100000 && reg < 0x106000) ||
+ (reg >= 0x180000 && reg < 0x180800) ||
+ (reg >= 0x190000 && reg < 0x194800) ||
+ (reg >= 0x1a8000 && reg < 0x1a9800))
+ return true;
+ else
+ return false;
}
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 57c488d42d3e..803e93fae56a 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -467,7 +467,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err;
}
- ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
+ ret = devm_regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
@@ -478,7 +478,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
- goto err_get;
+ goto err;
}
ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET);
@@ -658,8 +658,6 @@ err_irq:
err_enable:
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
-err_get:
- regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
err:
mfd_remove_devices(wm8994->dev);
return ret;
@@ -672,7 +670,6 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
wm8994_irq_exit(wm8994);
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
- regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
}
static const struct of_device_id wm8994_of_match[] = {
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index f428d86bfc10..f32550a74bdd 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -885,7 +885,7 @@ struct c2port_device *c2port_device_register(char *name,
struct c2port_ops *ops, void *devdata)
{
struct c2port_device *c2dev;
- int id, ret;
+ int ret;
if (unlikely(!ops) || unlikely(!ops->access) || \
unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
@@ -897,22 +897,18 @@ struct c2port_device *c2port_device_register(char *name,
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
- ret = idr_pre_get(&c2port_idr, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto error_idr_get_new;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock_irq(&c2port_idr_lock);
- ret = idr_get_new(&c2port_idr, c2dev, &id);
+ ret = idr_alloc(&c2port_idr, c2dev, 0, 0, GFP_NOWAIT);
spin_unlock_irq(&c2port_idr_lock);
+ idr_preload_end();
if (ret < 0)
- goto error_idr_get_new;
- c2dev->id = id;
+ goto error_idr_alloc;
+ c2dev->id = ret;
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
- "c2port%d", id);
+ "c2port%d", c2dev->id);
if (unlikely(IS_ERR(c2dev->dev))) {
ret = PTR_ERR(c2dev->dev);
goto error_device_create;
@@ -946,10 +942,10 @@ error_device_create_bin_file:
error_device_create:
spin_lock_irq(&c2port_idr_lock);
- idr_remove(&c2port_idr, id);
+ idr_remove(&c2port_idr, c2dev->id);
spin_unlock_irq(&c2port_idr_lock);
-error_idr_get_new:
+error_idr_alloc:
kfree(c2dev);
return ERR_PTR(ret);
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index eaddfe9db149..736c7714f565 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -546,7 +546,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
goto out_dma_unmap;
}
- dma_async_memcpy_issue_pending(chan);
+ dma_async_issue_pending(chan);
/* Set the total byte count */
fpga_set_byte_count(priv->regs, priv->bytes);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 8835eabb3b87..7508cafff103 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -631,6 +631,8 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dst, src;
+ unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP |
+ DMA_COMPL_SKIP_SRC_UNMAP;
dst_sg = buf->vb.sglist;
dst_nents = buf->vb.sglen;
@@ -666,7 +668,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
src = SYS_FPGA_BLOCK;
tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
REG_BLOCK_SIZE,
- 0);
+ dma_flags);
if (!tx) {
dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
return -ENOMEM;
@@ -749,7 +751,7 @@ static irqreturn_t data_irq(int irq, void *dev_id)
submitted = true;
/* Start the DMA Engine */
- dma_async_memcpy_issue_pending(priv->chan);
+ dma_async_issue_pending(priv->chan);
out:
/* If no DMA was submitted, re-enable interrupts */
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index c9e695ea7c9a..04f2e1fa9dd1 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -1,13 +1,14 @@
menu "EEPROM support"
config EEPROM_AT24
- tristate "I2C EEPROMs from most vendors"
+ tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
help
- Enable this driver to get read/write support to most I2C EEPROMs,
- after you configure the driver to know about each EEPROM on
- your target board. Use these generic chip names, instead of
- vendor-specific ones like at24c64 or 24lc02:
+ Enable this driver to get read/write support to most I2C EEPROMs
+ and compatible devices like FRAMs, SRAMs, ROMs etc. After you
+ configure the driver to know about each chip on your target
+ board. Use these generic chip names, instead of vendor-specific
+ ones like at24c64, 24lc02 or fm24c04:
24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a969b373..36f5d52775a9 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <asm/sections.h>
#define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+ addr = (unsigned long) dereference_function_descriptor((void *)addr);
return addr;
}
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 240a6d361665..2129274ef7ab 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -280,11 +280,10 @@ static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
const struct mmu_notifier_ops *ops)
{
struct mmu_notifier *mn, *gru_mn = NULL;
- struct hlist_node *n;
if (mm->mmu_notifier_mm) {
rcu_read_lock();
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list,
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list,
hlist)
if (mn->ops == ops) {
gru_mn = mn;
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 0bd5349b0422..0ab7c922212c 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -196,13 +196,14 @@ int tifm_add_adapter(struct tifm_adapter *fm)
{
int rc;
- if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL))
- return -ENOMEM;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&tifm_adapter_lock);
- rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id);
+ rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT);
+ if (rc >= 0)
+ fm->id = rc;
spin_unlock(&tifm_adapter_lock);
- if (rc)
+ idr_preload_end();
+ if (rc < 0)
return rc;
dev_set_name(&fm->dev, "tifm%u", fm->id);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index c3e8397f62ed..a8cee33ae8d2 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -127,9 +127,8 @@ static struct dbell_entry *dbell_index_table_find(u32 idx)
{
u32 bucket = VMCI_DOORBELL_HASH(idx);
struct dbell_entry *dbell;
- struct hlist_node *node;
- hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
+ hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
node) {
if (idx == dbell->idx)
return dbell;
@@ -359,12 +358,10 @@ static void dbell_fire_entries(u32 notify_idx)
{
u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
struct dbell_entry *dbell;
- struct hlist_node *node;
spin_lock_bh(&vmci_doorbell_it.lock);
- hlist_for_each_entry(dbell, node,
- &vmci_doorbell_it.entries[bucket], node) {
+ hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
if (dbell->idx == notify_idx &&
atomic_read(&dbell->active) == 1) {
if (dbell->run_delayed) {
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index a196f84a4fd2..9a53a30de445 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -46,11 +46,10 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
enum vmci_resource_type type)
{
struct vmci_resource *r, *resource = NULL;
- struct hlist_node *node;
unsigned int idx = vmci_resource_hash(handle);
rcu_read_lock();
- hlist_for_each_entry_rcu(r, node,
+ hlist_for_each_entry_rcu(r,
&vmci_resource_table.entries[idx], node) {
u32 cid = r->handle.context;
u32 rid = r->handle.resource;
@@ -146,12 +145,11 @@ void vmci_resource_remove(struct vmci_resource *resource)
struct vmci_handle handle = resource->handle;
unsigned int idx = vmci_resource_hash(handle);
struct vmci_resource *r;
- struct hlist_node *node;
/* Remove resource from hash table. */
spin_lock(&vmci_resource_table.lock);
- hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
+ hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
if (vmci_handle_is_equal(r->handle, resource->handle)) {
hlist_del_init_rcu(&r->node);
break;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9ef0a0..5bab73b91c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,12 @@ MODULE_ALIAS("mmc:block");
#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
+ (req->cmd_flags & REQ_META)) && \
+ (rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER 0x01
+#define PACKED_CMD_WR 0x02
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -89,6 +95,7 @@ struct mmc_blk_data {
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
+#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
unsigned int usage;
unsigned int read_only;
@@ -113,15 +120,10 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
+enum {
+ MMC_PACKED_NR_IDX = -1,
+ MMC_PACKED_NR_ZERO,
+ MMC_PACKED_NR_SINGLE,
};
module_param(perdev_minors, int, 0444);
@@ -131,6 +133,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
+{
+ struct mmc_packed *packed = mqrq->packed;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ packed->nr_entries = MMC_PACKED_NR_ZERO;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+ packed->retries = 0;
+ packed->blocks = 0;
+}
+
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -1148,12 +1163,78 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
+ if (mmc_packed_cmd(mq_mrq->cmd_type)) {
+ if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+ return MMC_BLK_PARTIAL;
+ else
+ return MMC_BLK_SUCCESS;
+ }
+
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *req = mq_rq->req;
+ struct mmc_packed *packed = mq_rq->packed;
+ int err, check, status;
+ u8 *ext_csd;
+
+ BUG_ON(!packed);
+
+ packed->retries--;
+ check = mmc_blk_err_check(card, areq);
+ err = get_card_status(card, &status, 0);
+ if (err) {
+ pr_err("%s: error %d sending status command\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if (status & R1_EXCEPTION_EVENT) {
+ ext_csd = kzalloc(512, GFP_KERNEL);
+ if (!ext_csd) {
+ pr_err("%s: unable to allocate buffer for ext_csd\n",
+ req->rq_disk->disk_name);
+ return -ENOMEM;
+ }
+
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+ pr_err("%s: error %d sending ext_csd\n",
+ req->rq_disk->disk_name, err);
+ check = MMC_BLK_ABORT;
+ goto free;
+ }
+
+ if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+ EXT_CSD_PACKED_FAILURE) &&
+ (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_GENERIC_ERROR)) {
+ if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_INDEXED_ERROR) {
+ packed->idx_failure =
+ ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+ check = MMC_BLK_PARTIAL;
+ }
+ pr_err("%s: packed cmd failed, nr %u, sectors %u, "
+ "failure index: %d\n",
+ req->rq_disk->disk_name, packed->nr_entries,
+ packed->blocks, packed->idx_failure);
+ }
+free:
+ kfree(ext_csd);
+ }
+
+ return check;
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1308,10 +1389,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_queue_bounce_pre(mqrq);
}
+static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(q);
+ unsigned int len, nr_segs = 0;
+
+ do {
+ len = min(hdr_sz, max_seg_sz);
+ hdr_sz -= len;
+ nr_segs++;
+ } while (hdr_sz);
+
+ return nr_segs;
+}
+
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
+ struct request *cur = req, *next = NULL;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_queue_req *mqrq = mq->mqrq_cur;
+ bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+ unsigned int req_sectors = 0, phys_segments = 0;
+ unsigned int max_blk_count, max_phys_segs;
+ bool put_back = true;
+ u8 max_packed_rw = 0;
+ u8 reqs = 0;
+
+ if (!(md->flags & MMC_BLK_PACKED_CMD))
+ goto no_packed;
+
+ if ((rq_data_dir(cur) == WRITE) &&
+ mmc_host_packed_wr(card->host))
+ max_packed_rw = card->ext_csd.max_packed_writes;
+
+ if (max_packed_rw == 0)
+ goto no_packed;
+
+ if (mmc_req_rel_wr(cur) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ goto no_packed;
+
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(cur), 8))
+ goto no_packed;
+
+ mmc_blk_clear_packed(mqrq);
+
+ max_blk_count = min(card->host->max_blk_count,
+ card->host->max_req_size >> 9);
+ if (unlikely(max_blk_count > 0xffff))
+ max_blk_count = 0xffff;
+
+ max_phys_segs = queue_max_segments(q);
+ req_sectors += blk_rq_sectors(cur);
+ phys_segments += cur->nr_phys_segments;
+
+ if (rq_data_dir(cur) == WRITE) {
+ req_sectors += mmc_large_sector(card) ? 8 : 1;
+ phys_segments += mmc_calc_packed_hdr_segs(q, card);
+ }
+
+ do {
+ if (reqs >= max_packed_rw - 1) {
+ put_back = false;
+ break;
+ }
+
+ spin_lock_irq(q->queue_lock);
+ next = blk_fetch_request(q);
+ spin_unlock_irq(q->queue_lock);
+ if (!next) {
+ put_back = false;
+ break;
+ }
+
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(next), 8))
+ break;
+
+ if (next->cmd_flags & REQ_DISCARD ||
+ next->cmd_flags & REQ_FLUSH)
+ break;
+
+ if (rq_data_dir(cur) != rq_data_dir(next))
+ break;
+
+ if (mmc_req_rel_wr(next) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ break;
+
+ req_sectors += blk_rq_sectors(next);
+ if (req_sectors > max_blk_count)
+ break;
+
+ phys_segments += next->nr_phys_segments;
+ if (phys_segments > max_phys_segs)
+ break;
+
+ list_add_tail(&next->queuelist, &mqrq->packed->list);
+ cur = next;
+ reqs++;
+ } while (1);
+
+ if (put_back) {
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, next);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ if (reqs > 0) {
+ list_add(&req->queuelist, &mqrq->packed->list);
+ mqrq->packed->nr_entries = ++reqs;
+ mqrq->packed->retries = reqs;
+ return reqs;
+ }
+
+no_packed:
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ return 0;
+}
+
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct request *prq;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_packed *packed = mqrq->packed;
+ bool do_rel_wr, do_data_tag;
+ u32 *packed_cmd_hdr;
+ u8 hdr_blocks;
+ u8 i = 1;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_WRITE;
+ packed->blocks = 0;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+
+ packed_cmd_hdr = packed->cmd_hdr;
+ memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
+ packed_cmd_hdr[0] = (packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ hdr_blocks = mmc_large_sector(card) ? 8 : 1;
+
+ /*
+ * Argument for each entry of packed group
+ */
+ list_for_each_entry(prq, &packed->list, queuelist) {
+ do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (prq->cmd_flags & REQ_META) &&
+ (rq_data_dir(prq) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+ /* Argument of CMD23 */
+ packed_cmd_hdr[(i * 2)] =
+ (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
+ blk_rq_sectors(prq);
+ /* Argument of CMD18 or CMD25 */
+ packed_cmd_hdr[((i * 2)) + 1] =
+ mmc_card_blockaddr(card) ?
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ packed->blocks += blk_rq_sectors(prq);
+ i++;
+ }
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.sbc = &brq->sbc;
+ brq->mrq.stop = &brq->stop;
+
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ brq->data.blksz = 512;
+ brq->data.blocks = packed->blocks + hdr_blocks;
+ brq->data.flags |= MMC_DATA_WRITE;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
struct mmc_blk_request *brq, struct request *req,
int ret)
{
+ struct mmc_queue_req *mq_rq;
+ mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
/*
* If this is an SD card and we're writing, we can first
* mark the known good sectors as ok.
@@ -1328,11 +1620,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
ret = blk_end_request(req, 0, blocks << 9);
}
} else {
- ret = blk_end_request(req, 0, brq->data.bytes_xfered);
+ if (!mmc_packed_cmd(mq_rq->cmd_type))
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
return ret;
}
+static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+ int idx = packed->idx_failure, i = 0;
+ int ret = 0;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ if (idx == i) {
+ /* retry from error index */
+ packed->nr_entries -= idx;
+ mq_rq->req = prq;
+ ret = 1;
+
+ if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
+ list_del_init(&prq->queuelist);
+ mmc_blk_clear_packed(mq_rq);
+ }
+ return ret;
+ }
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, 0, blk_rq_bytes(prq));
+ i++;
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+ return ret;
+}
+
+static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
+static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
+ struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct request_queue *q = mq->queue;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.prev);
+ if (prq->queuelist.prev != &packed->list) {
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(mq->queue, prq);
+ spin_unlock_irq(q->queue_lock);
+ } else {
+ list_del_init(&prq->queuelist);
+ }
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
@@ -1343,10 +1708,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
struct mmc_queue_req *mq_rq;
struct request *req = rqc;
struct mmc_async_req *areq;
+ const u8 packed_nr = 2;
+ u8 reqs = 0;
if (!rqc && !mq->mqrq_prev->req)
return 0;
+ if (rqc)
+ reqs = mmc_blk_prep_packed_list(mq, rqc);
+
do {
if (rqc) {
/*
@@ -1357,15 +1727,24 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
(card->ext_csd.data_sector_size == 4096)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
req->rq_disk->disk_name);
+ mq_rq = mq->mqrq_cur;
goto cmd_abort;
}
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+
+ if (reqs >= packed_nr)
+ mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
+ card, mq);
+ else
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
areq = NULL;
areq = mmc_start_req(card->host, areq, (int *) &status);
- if (!areq)
+ if (!areq) {
+ if (status == MMC_BLK_NEW_REQUEST)
+ mq->flags |= MMC_QUEUE_NEW_REQUEST;
return 0;
+ }
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
@@ -1380,8 +1759,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
- ret = blk_end_request(req, 0,
+
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ ret = mmc_blk_end_packed_req(mq_rq);
+ break;
+ } else {
+ ret = blk_end_request(req, 0,
brq->data.bytes_xfered);
+ }
+
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1414,7 +1800,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV)
+ if (err == -ENODEV ||
+ mmc_packed_cmd(mq_rq->cmd_type))
goto cmd_abort;
/* Fall through */
}
@@ -1438,30 +1825,62 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
break;
case MMC_BLK_NOMEDIUM:
goto cmd_abort;
+ default:
+ pr_err("%s: Unhandled return value (%d)",
+ req->rq_disk->disk_name, status);
+ goto cmd_abort;
}
if (ret) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
- mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ if (!mq_rq->packed->retries)
+ goto cmd_abort;
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ } else {
+
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card,
+ disable_multi, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ }
}
} while (ret);
return 1;
cmd_abort:
- if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
- while (ret)
- ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ mmc_blk_abort_packed_req(mq_rq);
+ } else {
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
+ while (ret)
+ ret = blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
+ }
start_new_req:
if (rqc) {
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
- mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+ if (mmc_card_removed(card)) {
+ rqc->cmd_flags |= REQ_QUIET;
+ blk_end_request_all(rqc, -EIO);
+ } else {
+ /*
+ * If current request is packed, it needs to put back.
+ */
+ if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
+ mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
+
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host,
+ &mq->mqrq_cur->mmc_active, NULL);
+ }
}
return 0;
@@ -1472,6 +1891,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ unsigned long flags;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@@ -1486,6 +1907,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out;
}
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
@@ -1501,11 +1923,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
+ if (!req && host->areq) {
+ spin_lock_irqsave(&host->context_info.lock, flags);
+ host->context_info.is_waiting_last_req = true;
+ spin_unlock_irqrestore(&host->context_info.lock, flags);
+ }
ret = mmc_blk_issue_rw_rq(mq, req);
}
out:
- if (!req)
+ if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
/* release host only when there are no more requests */
mmc_release_host(card->host);
return ret;
@@ -1624,6 +2051,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
}
+ if (mmc_card_mmc(card) &&
+ (area_type == MMC_BLK_DATA_AREA_MAIN) &&
+ (md->flags & MMC_BLK_CMD23) &&
+ card->ext_csd.packed_event_en) {
+ if (!mmc_packed_init(&md->queue, card))
+ md->flags |= MMC_BLK_PACKED_CMD;
+ }
+
return md;
err_putdisk:
@@ -1732,6 +2167,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
/* Then flush out any already in there */
mmc_cleanup_queue(&md->queue);
+ if (md->flags & MMC_BLK_PACKED_CMD)
+ mmc_packed_clean(&md->queue);
mmc_blk_put(md);
}
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fadf52eb5d70..fa4e44ee7961 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -22,7 +22,8 @@
#define MMC_QUEUE_BOUNCESZ 65536
-#define MMC_QUEUE_SUSPENDED (1 << 0)
+
+#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
/*
* Prepare a MMC request. This just filters out odd stuff.
@@ -58,6 +59,7 @@ static int mmc_queue_thread(void *d)
do {
struct request *req = NULL;
struct mmc_queue_req *tmp;
+ unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -67,12 +69,23 @@ static int mmc_queue_thread(void *d)
if (req || mq->mqrq_prev->req) {
set_current_state(TASK_RUNNING);
+ cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
+ if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ continue; /* fetch again */
+ }
/*
* Current request becomes previous request
* and vice versa.
+ * In case of special requests, current request
+ * has been finished. Do not assign it to previous
+ * request.
*/
+ if (cmd_flags & MMC_REQ_SPECIAL_MASK)
+ mq->mqrq_cur->req = NULL;
+
mq->mqrq_prev->brq.mrq.data = NULL;
mq->mqrq_prev->req = NULL;
tmp = mq->mqrq_prev;
@@ -103,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
+ unsigned long flags;
+ struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
@@ -112,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q)
return;
}
- if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+ cntx = &mq->card->host->context_info;
+ if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
+ /*
+ * New MMC request arrived when MMC thread may be
+ * blocked on the previous request to be complete
+ * with no current request fetched
+ */
+ spin_lock_irqsave(&cntx->lock, flags);
+ if (cntx->is_waiting_last_req) {
+ cntx->is_new_req = true;
+ wake_up_interruptible(&cntx->wait);
+ }
+ spin_unlock_irqrestore(&cntx->lock, flags);
+ } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
@@ -334,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
}
EXPORT_SYMBOL(mmc_cleanup_queue);
+int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+ int ret = 0;
+
+
+ mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_cur->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
+ mmc_card_name(card));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_prev->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
+ mmc_card_name(card));
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&mqrq_cur->packed->list);
+ INIT_LIST_HEAD(&mqrq_prev->packed->list);
+
+out:
+ return ret;
+}
+
+void mmc_packed_clean(struct mmc_queue *mq)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ kfree(mqrq_prev->packed);
+ mqrq_prev->packed = NULL;
+}
+
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
@@ -378,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
+static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
+ struct mmc_packed *packed,
+ struct scatterlist *sg,
+ enum mmc_packed_type cmd_type)
+{
+ struct scatterlist *__sg = sg;
+ unsigned int sg_len = 0;
+ struct request *req;
+
+ if (mmc_packed_wr(cmd_type)) {
+ unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
+ unsigned int len, remain, offset = 0;
+ u8 *buf = (u8 *)packed->cmd_hdr;
+
+ remain = hdr_sz;
+ do {
+ len = min(remain, max_seg_sz);
+ sg_set_buf(__sg, buf + offset, len);
+ offset += len;
+ remain -= len;
+ (__sg++)->page_link &= ~0x02;
+ sg_len++;
+ } while (remain);
+ }
+
+ list_for_each_entry(req, &packed->list, queuelist) {
+ sg_len += blk_rq_map_sg(mq->queue, req, __sg);
+ __sg = sg + (sg_len - 1);
+ (__sg++)->page_link &= ~0x02;
+ }
+ sg_mark_end(sg + (sg_len - 1));
+ return sg_len;
+}
+
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
@@ -386,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
+ enum mmc_packed_type cmd_type;
int i;
- if (!mqrq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ cmd_type = mqrq->cmd_type;
+
+ if (!mqrq->bounce_buf) {
+ if (mmc_packed_cmd(cmd_type))
+ return mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->sg, cmd_type);
+ else
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ }
BUG_ON(!mqrq->bounce_sg);
- sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+ if (mmc_packed_cmd(cmd_type))
+ sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->bounce_sg, cmd_type);
+ else
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d2a1eb4b9f9f..031bf6376c99 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,23 @@ struct mmc_blk_request {
struct mmc_data data;
};
+enum mmc_packed_type {
+ MMC_PACKED_NONE = 0,
+ MMC_PACKED_WRITE,
+};
+
+#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
+#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
+
+struct mmc_packed {
+ struct list_head list;
+ u32 cmd_hdr[1024];
+ unsigned int blocks;
+ u8 nr_entries;
+ u8 retries;
+ s16 idx_failure;
+};
+
struct mmc_queue_req {
struct request *req;
struct mmc_blk_request brq;
@@ -20,6 +37,8 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req mmc_active;
+ enum mmc_packed_type cmd_type;
+ struct mmc_packed *packed;
};
struct mmc_queue {
@@ -27,6 +46,9 @@ struct mmc_queue {
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
+#define MMC_QUEUE_SUSPENDED (1 << 0)
+#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
@@ -46,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
+extern void mmc_packed_clean(struct mmc_queue *);
+
#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 420cb6753c1e..e219c97a02a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
+ mmc_init_context_info(card->host);
ret = device_add(&card->dev);
if (ret)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed7687cf09..08a3cf2a7610 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -319,11 +319,45 @@ out:
}
EXPORT_SYMBOL(mmc_start_bkops);
+/*
+ * mmc_wait_data_done() - done callback for data request
+ * @mrq: done data request
+ *
+ * Wakes up mmc context, passed as a callback to host controller driver
+ */
+static void mmc_wait_data_done(struct mmc_request *mrq)
+{
+ mrq->host->context_info.is_done_rcv = true;
+ wake_up_interruptible(&mrq->host->context_info.wait);
+}
+
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
+/*
+ *__mmc_start_data_req() - starts data request
+ * @host: MMC host to start the request
+ * @mrq: data request to start
+ *
+ * Sets the done callback to be called when request is completed by the card.
+ * Starts data mmc request execution
+ */
+static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_wait_data_done;
+ mrq->host = host;
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_wait_data_done(mrq);
+ return -ENOMEDIUM;
+ }
+ mmc_start_request(host, mrq);
+
+ return 0;
+}
+
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
@@ -337,6 +371,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+/*
+ * mmc_wait_for_data_req_done() - wait for request completed
+ * @host: MMC host to prepare the command.
+ * @mrq: MMC request to wait for
+ *
+ * Blocks MMC context till host controller will ack end of data request
+ * execution or new request notification arrives from the block layer.
+ * Handles command retries.
+ *
+ * Returns enum mmc_blk_status after checking errors.
+ */
+static int mmc_wait_for_data_req_done(struct mmc_host *host,
+ struct mmc_request *mrq,
+ struct mmc_async_req *next_req)
+{
+ struct mmc_command *cmd;
+ struct mmc_context_info *context_info = &host->context_info;
+ int err;
+ unsigned long flags;
+
+ while (1) {
+ wait_event_interruptible(context_info->wait,
+ (context_info->is_done_rcv ||
+ context_info->is_new_req));
+ spin_lock_irqsave(&context_info->lock, flags);
+ context_info->is_waiting_last_req = false;
+ spin_unlock_irqrestore(&context_info->lock, flags);
+ if (context_info->is_done_rcv) {
+ context_info->is_done_rcv = false;
+ context_info->is_new_req = false;
+ cmd = mrq->cmd;
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card)) {
+ err = host->areq->err_check(host->card,
+ host->areq);
+ break; /* return err */
+ } else {
+ pr_info("%s: req failed (CMD%u): %d, retrying...\n",
+ mmc_hostname(host),
+ cmd->opcode, cmd->error);
+ cmd->retries--;
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ continue; /* wait for done/new event again */
+ }
+ } else if (context_info->is_new_req) {
+ context_info->is_new_req = false;
+ if (!next_req) {
+ err = MMC_BLK_NEW_REQUEST;
+ break; /* return err */
+ }
+ }
+ }
+ return err;
+}
+
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
@@ -426,8 +516,16 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) {
- mmc_wait_for_req_done(host, host->areq->mrq);
- err = host->areq->err_check(host->card, host->areq);
+ err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+ if (err == MMC_BLK_NEW_REQUEST) {
+ if (error)
+ *error = err;
+ /*
+ * The previous request was not completed,
+ * nothing to return
+ */
+ return NULL;
+ }
/*
* Check BKOPS urgency for each R1 response
*/
@@ -439,14 +537,14 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
}
if (!err && areq)
- start_err = __mmc_start_req(host, areq->mrq);
+ start_err = __mmc_start_data_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
/* Cancel a prepared request if it was not started. */
if ((err || start_err) && areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
+ mmc_post_req(host, areq->mrq, -EINVAL);
if (err)
host->areq = NULL;
@@ -1137,7 +1235,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
*/
voltage = regulator_get_voltage(supply);
- if (regulator_count_voltages(supply) == 1)
+ if (!regulator_can_change_voltage(supply))
min_uV = max_uV = voltage;
if (voltage < 0)
@@ -1219,10 +1317,30 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
return ocr;
}
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
+int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+{
+ int err = 0;
+ int old_signal_voltage = host->ios.signal_voltage;
+
+ host->ios.signal_voltage = signal_voltage;
+ if (host->ops->start_signal_voltage_switch) {
+ mmc_host_clk_hold(host);
+ err = host->ops->start_signal_voltage_switch(host, &host->ios);
+ mmc_host_clk_release(host);
+ }
+
+ if (err)
+ host->ios.signal_voltage = old_signal_voltage;
+
+ return err;
+
+}
+
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{
struct mmc_command cmd = {0};
int err = 0;
+ u32 clock;
BUG_ON(!host);
@@ -1230,27 +1348,81 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
* Send CMD11 only if the request is to switch the card to
* 1.8V signalling.
*/
- if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
- cmd.opcode = SD_SWITCH_VOLTAGE;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ return __mmc_set_signal_voltage(host, signal_voltage);
- err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err)
- return err;
+ /*
+ * If we cannot switch voltages, return failure so the caller
+ * can continue without UHS mode
+ */
+ if (!host->ops->start_signal_voltage_switch)
+ return -EPERM;
+ if (!host->ops->card_busy)
+ pr_warning("%s: cannot verify signal voltage switch\n",
+ mmc_hostname(host));
+
+ cmd.opcode = SD_SWITCH_VOLTAGE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
- return -EIO;
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+ return -EIO;
+
+ mmc_host_clk_hold(host);
+ /*
+ * The card should drive cmd and dat[0:3] low immediately
+ * after the response of cmd11, but wait 1 ms to be sure
+ */
+ mmc_delay(1);
+ if (host->ops->card_busy && !host->ops->card_busy(host)) {
+ err = -EAGAIN;
+ goto power_cycle;
}
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
- host->ios.signal_voltage = signal_voltage;
+ if (__mmc_set_signal_voltage(host, signal_voltage)) {
+ /*
+ * Voltages may not have been switched, but we've already
+ * sent CMD11, so a power cycle is required anyway
+ */
+ err = -EAGAIN;
+ goto power_cycle;
+ }
- if (host->ops->start_signal_voltage_switch) {
- mmc_host_clk_hold(host);
- err = host->ops->start_signal_voltage_switch(host, &host->ios);
- mmc_host_clk_release(host);
+ /* Keep clock gated for at least 5 ms */
+ mmc_delay(5);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ /* Wait for at least 1 ms according to spec */
+ mmc_delay(1);
+
+ /*
+ * Failure to switch is indicated by the card holding
+ * dat[0:3] low
+ */
+ if (host->ops->card_busy && host->ops->card_busy(host))
+ err = -EAGAIN;
+
+power_cycle:
+ if (err) {
+ pr_debug("%s: Signal voltage switch failed, "
+ "power cycling card\n", mmc_hostname(host));
+ mmc_power_cycle(host);
}
+ mmc_host_clk_release(host);
+
return err;
}
@@ -1314,7 +1486,7 @@ static void mmc_power_up(struct mmc_host *host)
mmc_set_ios(host);
/* Set signal voltage to 3.3V */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
+ __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
/*
* This delay should be sufficient to allow the power supply
@@ -1372,6 +1544,14 @@ void mmc_power_off(struct mmc_host *host)
mmc_host_clk_release(host);
}
+void mmc_power_cycle(struct mmc_host *host)
+{
+ mmc_power_off(host);
+ /* Wait at least 1 ms according to SD spec */
+ mmc_delay(1);
+ mmc_power_up(host);
+}
+
/*
* Cleanup when the last reference to the bus operator is dropped.
*/
@@ -2388,6 +2568,7 @@ EXPORT_SYMBOL(mmc_flush_cache);
* Turn the cache ON/OFF.
* Turning the cache OFF shall trigger flushing of the data
* to the non-volatile storage.
+ * This function should be called with host claimed
*/
int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
{
@@ -2399,7 +2580,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
mmc_card_is_removable(host))
return err;
- mmc_claim_host(host);
if (card && mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0)) {
enable = !!enable;
@@ -2417,7 +2597,6 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
card->ext_csd.cache_ctrl = enable;
}
}
- mmc_release_host(host);
return err;
}
@@ -2436,10 +2615,6 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
- err = mmc_cache_ctrl(host, 0);
- if (err)
- goto out;
-
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
if (host->bus_ops->suspend) {
@@ -2581,6 +2756,23 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
#endif
+/**
+ * mmc_init_context_info() - init synchronization context
+ * @host: mmc host
+ *
+ * Init struct context_info needed to implement asynchronous
+ * request mechanism, used by mmc core, host driver and mmc requests
+ * supplier.
+ */
+void mmc_init_context_info(struct mmc_host *host)
+{
+ spin_lock_init(&host->context_info.lock);
+ host->context_info.is_new_req = false;
+ host->context_info.is_done_rcv = false;
+ host->context_info.is_waiting_last_req = false;
+ init_waitqueue_head(&host->context_info.wait);
+}
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 3bdafbca354f..b9f18a2a8874 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -40,11 +40,12 @@ void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
- bool cmd11);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
void mmc_power_off(struct mmc_host *host);
+void mmc_power_cycle(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
@@ -76,5 +77,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
+void mmc_init_context_info(struct mmc_host *host);
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ee2e16b17017..2a3593d9f87d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/leds.h>
@@ -23,6 +25,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
#include "core.h"
#include "host.h"
@@ -295,6 +298,126 @@ static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
#endif
/**
+ * mmc_of_parse() - parse host's device-tree node
+ * @host: host whose node should be parsed.
+ *
+ * To keep the rest of the MMC subsystem unaware of whether DT has been
+ * used to to instantiate and configure this host instance or not, we
+ * parse the properties and set respective generic mmc-host flags and
+ * parameters.
+ */
+void mmc_of_parse(struct mmc_host *host)
+{
+ struct device_node *np;
+ u32 bus_width;
+ bool explicit_inv_wp, gpio_inv_wp = false;
+ enum of_gpio_flags flags;
+ int len, ret, gpio;
+
+ if (!host->parent || !host->parent->of_node)
+ return;
+
+ np = host->parent->of_node;
+
+ /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+ if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
+ dev_dbg(host->parent,
+ "\"bus-width\" property is missing, assuming 1 bit.\n");
+ bus_width = 1;
+ }
+
+ switch (bus_width) {
+ case 8:
+ host->caps |= MMC_CAP_8_BIT_DATA;
+ /* Hosts capable of 8-bit transfers can also do 4 bits */
+ case 4:
+ host->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ break;
+ default:
+ dev_err(host->parent,
+ "Invalid \"bus-width\" value %ud!\n", bus_width);
+ }
+
+ /* f_max is obtained from the optional "max-frequency" property */
+ of_property_read_u32(np, "max-frequency", &host->f_max);
+
+ /*
+ * Configure CD and WP pins. They are both by default active low to
+ * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
+ * mmc-gpio helpers are used to attach, configure and use them. If
+ * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH
+ * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the
+ * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability
+ * is set. If the "non-removable" property is found, the
+ * MMC_CAP_NONREMOVABLE capability is set and no card-detection
+ * configuration is performed.
+ */
+
+ /* Parse Card Detection */
+ if (of_find_property(np, "non-removable", &len)) {
+ host->caps |= MMC_CAP_NONREMOVABLE;
+ } else {
+ bool explicit_inv_cd, gpio_inv_cd = false;
+
+ explicit_inv_cd = of_property_read_bool(np, "cd-inverted");
+
+ if (of_find_property(np, "broken-cd", &len))
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
+ if (gpio_is_valid(gpio)) {
+ if (!(flags & OF_GPIO_ACTIVE_LOW))
+ gpio_inv_cd = true;
+
+ ret = mmc_gpio_request_cd(host, gpio);
+ if (ret < 0)
+ dev_err(host->parent,
+ "Failed to request CD GPIO #%d: %d!\n",
+ gpio, ret);
+ else
+ dev_info(host->parent, "Got CD GPIO #%d.\n",
+ gpio);
+ }
+
+ if (explicit_inv_cd ^ gpio_inv_cd)
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ }
+
+ /* Parse Write Protection */
+ explicit_inv_wp = of_property_read_bool(np, "wp-inverted");
+
+ gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
+ if (gpio_is_valid(gpio)) {
+ if (!(flags & OF_GPIO_ACTIVE_LOW))
+ gpio_inv_wp = true;
+
+ ret = mmc_gpio_request_ro(host, gpio);
+ if (ret < 0)
+ dev_err(host->parent,
+ "Failed to request WP GPIO: %d!\n", ret);
+ }
+ if (explicit_inv_wp ^ gpio_inv_wp)
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ if (of_find_property(np, "cap-sd-highspeed", &len))
+ host->caps |= MMC_CAP_SD_HIGHSPEED;
+ if (of_find_property(np, "cap-mmc-highspeed", &len))
+ host->caps |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_find_property(np, "cap-power-off-card", &len))
+ host->caps |= MMC_CAP_POWER_OFF_CARD;
+ if (of_find_property(np, "cap-sdio-irq", &len))
+ host->caps |= MMC_CAP_SDIO_IRQ;
+ if (of_find_property(np, "keep-power-in-suspend", &len))
+ host->pm_caps |= MMC_PM_KEEP_POWER;
+ if (of_find_property(np, "enable-sdio-wakeup", &len))
+ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+}
+
+EXPORT_SYMBOL(mmc_of_parse);
+
+/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
* @dev: pointer to host device model structure
@@ -306,19 +429,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
int err;
struct mmc_host *host;
- if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
- return NULL;
-
host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
if (!host)
return NULL;
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
+ idr_preload(GFP_KERNEL);
spin_lock(&mmc_host_lock);
- err = idr_get_new(&mmc_host_idr, host, &host->index);
+ err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
+ if (err >= 0)
+ host->index = err;
spin_unlock(&mmc_host_lock);
- if (err)
+ idr_preload_end();
+ if (err < 0)
goto free;
dev_set_name(&host->class_dev, "mmc%d", host->index);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e6e39111e05b..c8f3d6e0684e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -496,7 +496,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
- if (ext_csd[EXT_CSD_RPMB_MULT]) {
+ if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
EXT_CSD_PART_CONFIG_ACC_RPMB,
"rpmb", 0, false,
@@ -538,6 +538,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
+
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
} else {
card->ext_csd.data_sector_size = 512;
}
@@ -769,11 +774,11 @@ static int mmc_select_hs200(struct mmc_card *card)
if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
@@ -1221,8 +1226,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* WARNING: eMMC rules are NOT the same as SD DDR
*/
if (ddr == MMC_1_2V_DDR_MODE) {
- err = mmc_set_signal_voltage(host,
- MMC_SIGNAL_VOLTAGE_120, 0);
+ err = __mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_120);
if (err)
goto err;
}
@@ -1275,6 +1280,29 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
}
+ /*
+ * The mandatory minimum values are defined for packed command.
+ * read: 5, write: 3
+ */
+ if (card->ext_csd.max_packed_writes >= 3 &&
+ card->ext_csd.max_packed_reads >= 5 &&
+ host->caps2 & MMC_CAP2_PACKED_CMD) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_EXP_EVENTS_CTRL,
+ EXT_CSD_PACKED_EVENT_EN,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling packed event failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.packed_event_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.packed_event_en = 1;
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -1379,6 +1407,11 @@ static int mmc_suspend(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ err = mmc_cache_ctrl(host, 0);
+ if (err)
+ goto out;
+
if (mmc_can_poweroff_notify(host->card))
err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
else if (mmc_card_can_sleep(host))
@@ -1386,8 +1419,9 @@ static int mmc_suspend(struct mmc_host *host)
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
- mmc_release_host(host);
+out:
+ mmc_release_host(host);
return err;
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 6d8f7012d73a..49f04bc9d0eb 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -363,6 +363,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
ext_csd, 512);
}
+EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 74972c241dff..9e645e19cec6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -444,8 +444,7 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+ if (!mmc_host_uhs(card->host)) {
card->sd_bus_speed = 0;
return;
}
@@ -713,6 +712,14 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
{
int err;
u32 max_current;
+ int retries = 10;
+
+try_again:
+ if (!retries) {
+ ocr &= ~SD_OCR_S18R;
+ pr_warning("%s: Skipping voltage switch\n",
+ mmc_hostname(host));
+ }
/*
* Since we're changing the OCR value, we seem to
@@ -734,10 +741,10 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
/*
* If the host supports one of UHS-I modes, request the card
- * to switch to 1.8V signaling level.
+ * to switch to 1.8V signaling level. If the card has failed
+ * repeatedly to switch however, skip this.
*/
- if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
+ if (retries && mmc_host_uhs(host))
ocr |= SD_OCR_S18R;
/*
@@ -748,7 +755,6 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
if (max_current > 150)
ocr |= SD_OCR_XPC;
-try_again:
err = mmc_send_app_op_cond(host, ocr, rocr);
if (err)
return err;
@@ -759,9 +765,12 @@ try_again:
*/
if (!mmc_host_is_spi(host) && rocr &&
((*rocr & 0x41000000) == 0x41000000)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true);
- if (err) {
- ocr &= ~SD_OCR_S18R;
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (err == -EAGAIN) {
+ retries--;
+ goto try_again;
+ } else if (err) {
+ retries = 0;
goto try_again;
}
}
@@ -960,16 +969,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
/* Card is an ultra-high-speed card */
mmc_card_set_uhs(card);
-
- /*
- * Since initialization is now complete, enable preset
- * value registers for UHS-I cards.
- */
- if (host->ops->enable_preset_value) {
- mmc_host_clk_hold(card->host);
- host->ops->enable_preset_value(host, true);
- mmc_host_clk_release(card->host);
- }
} else {
/*
* Attempt to change to high-speed (if supported)
@@ -1148,13 +1147,6 @@ int mmc_attach_sd(struct mmc_host *host)
BUG_ON(!host);
WARN_ON(!host->claimed);
- /* Disable preset value enable if already set since last time */
- if (host->ops->enable_preset_value) {
- mmc_host_clk_hold(host);
- host->ops->enable_preset_value(host, false);
- mmc_host_clk_release(host);
- }
-
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
return err;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2273ce6b6c1a..aa0719a4dfd1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -157,10 +157,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
if (ret)
goto out;
- if (card->host->caps &
- (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
- MMC_CAP_UHS_DDR50)) {
+ if (mmc_host_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50;
@@ -478,8 +475,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
+ if (!mmc_host_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
@@ -489,23 +485,27 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
bus_speed = SDIO_SPEED_SDR104;
timing = MMC_TIMING_UHS_SDR104;
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
bus_speed = SDIO_SPEED_DDR50;
timing = MMC_TIMING_UHS_DDR50;
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
bus_speed = SDIO_SPEED_SDR50;
timing = MMC_TIMING_UHS_SDR50;
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
bus_speed = SDIO_SPEED_SDR25;
timing = MMC_TIMING_UHS_SDR25;
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
@@ -513,6 +513,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
@@ -583,10 +584,19 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
{
struct mmc_card *card;
int err;
+ int retries = 10;
BUG_ON(!host);
WARN_ON(!host->claimed);
+try_again:
+ if (!retries) {
+ pr_warning("%s: Skipping voltage switch\n",
+ mmc_hostname(host));
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+
/*
* Inform the card of the voltage
*/
@@ -645,14 +655,16 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
* systems that claim 1.8v signalling in fact do not support
* it.
*/
- if ((ocr & R4_18V_PRESENT) &&
- (host->caps &
- (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
- MMC_CAP_UHS_DDR50))) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- true);
- if (err) {
+ if (!powered_resume && (ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (err == -EAGAIN) {
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+ mmc_remove_card(card);
+ retries--;
+ goto try_again;
+ } else if (err) {
ocr &= ~R4_18V_PRESENT;
host->ocr &= ~R4_18V_PRESENT;
}
@@ -937,10 +949,12 @@ static int mmc_sdio_resume(struct mmc_host *host)
mmc_claim_host(host);
/* No need to reinitialize powered-resumed nonremovable cards */
- if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
+ if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
+ sdio_reset(host);
+ mmc_go_idle(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
- else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
+ } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
if (err > 0) {
@@ -1020,6 +1034,10 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
goto out;
}
+ if (mmc_host_uhs(host))
+ /* to query card if 1.8V signalling is supported */
+ host->ocr |= R4_18V_PRESENT;
+
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
@@ -1085,6 +1103,10 @@ int mmc_attach_sdio(struct mmc_host *host)
/*
* Detect and init the card.
*/
+ if (mmc_host_uhs(host))
+ /* to query card if 1.8V signalling is supported */
+ host->ocr |= R4_18V_PRESENT;
+
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
if (err) {
if (err == -EAGAIN) {
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 16a1c0b6f264..324235105519 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -92,6 +92,20 @@ int mmc_gpio_get_cd(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_gpio_get_cd);
+/**
+ * mmc_gpio_request_ro - request a gpio for write-protection
+ * @host: mmc host
+ * @gpio: gpio number requested
+ *
+ * As devm_* managed functions are used in mmc_gpio_request_ro(), client
+ * drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up,
+ * if the requesting and freeing are only needed at probing and unbinding time
+ * for once. However, if client drivers do something special like runtime
+ * switching for write-protection, they are responsible for calling
+ * mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own.
+ *
+ * Returns zero on success, else an error.
+ */
int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
@@ -106,7 +120,8 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
ctx = host->slot.handler_priv;
- ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
+ ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
+ ctx->ro_label);
if (ret < 0)
return ret;
@@ -116,6 +131,20 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
}
EXPORT_SYMBOL(mmc_gpio_request_ro);
+/**
+ * mmc_gpio_request_cd - request a gpio for card-detection
+ * @host: mmc host
+ * @gpio: gpio number requested
+ *
+ * As devm_* managed functions are used in mmc_gpio_request_cd(), client
+ * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
+ * if the requesting and freeing are only needed at probing and unbinding time
+ * for once. However, if client drivers do something special like runtime
+ * switching for card-detection, they are responsible for calling
+ * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
+ *
+ * Returns zero on success, else an error.
+ */
int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
@@ -128,7 +157,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
ctx = host->slot.handler_priv;
- ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label);
+ ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
+ ctx->cd_label);
if (ret < 0)
/*
* don't bother freeing memory. It might still get used by other
@@ -146,7 +176,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
irq = -EINVAL;
if (irq >= 0) {
- ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt,
+ ret = devm_request_threaded_irq(&host->class_dev, irq,
+ NULL, mmc_gpio_cd_irqt,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
ctx->cd_label, host);
if (ret < 0)
@@ -164,6 +195,13 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
}
EXPORT_SYMBOL(mmc_gpio_request_cd);
+/**
+ * mmc_gpio_free_ro - free the write-protection gpio
+ * @host: mmc host
+ *
+ * It's provided only for cases that client drivers need to manually free
+ * up the write-protection gpio requested by mmc_gpio_request_ro().
+ */
void mmc_gpio_free_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -175,10 +213,17 @@ void mmc_gpio_free_ro(struct mmc_host *host)
gpio = ctx->ro_gpio;
ctx->ro_gpio = -EINVAL;
- gpio_free(gpio);
+ devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_ro);
+/**
+ * mmc_gpio_free_cd - free the card-detection gpio
+ * @host: mmc host
+ *
+ * It's provided only for cases that client drivers need to manually free
+ * up the card-detection gpio requested by mmc_gpio_request_cd().
+ */
void mmc_gpio_free_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -188,13 +233,13 @@ void mmc_gpio_free_cd(struct mmc_host *host)
return;
if (host->slot.cd_irq >= 0) {
- free_irq(host->slot.cd_irq, host);
+ devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
host->slot.cd_irq = -EINVAL;
}
gpio = ctx->cd_gpio;
ctx->cd_gpio = -EINVAL;
- gpio_free(gpio);
+ devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_cd);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 3be8b94d7914..d88219e1d86e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -238,6 +238,17 @@ config MMC_SDHCI_S3C_DMA
YMMV.
+config MMC_SDHCI_BCM2835
+ tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
+ depends on ARCH_BCM2835
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the BCM2835 SD/MMC controller. If you have a BCM2835
+ platform with SD or MMC devices, say Y or M here.
+
+ If unsure, say N.
+
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
@@ -361,6 +372,13 @@ config MMC_DAVINCI
If you have an DAVINCI board with a Multimedia Card slot,
say Y or M here. If unsure, say N.
+config MMC_GOLDFISH
+ tristate "goldfish qemu Multimedia Card Interface support"
+ depends on GOLDFISH
+ help
+ This selects the Goldfish Multimedia card Interface emulation
+ found on the Goldfish Android virtual device emulation.
+
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
depends on SPI_MASTER && !HIGHMEM && HAS_DMA
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e4e218c930bd..c380e3cf0a3b 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
obj-$(CONFIG_MMC_MSM) += msm_sdcc.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
+obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
+obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
new file mode 100644
index 000000000000..ef3aef0f376d
--- /dev/null
+++ b/drivers/mmc/host/android-goldfish.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2007, Google Inc.
+ * Copyright 2012, Intel Inc.
+ *
+ * based on omap.c driver, which was
+ * Copyright (C) 2004 Nokia Corporation
+ * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
+ * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
+ * Other hacks (DMA, SD, etc) by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/major.h>
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/scatterlist.h>
+
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#define DRIVER_NAME "goldfish_mmc"
+
+#define BUFFER_SIZE 16384
+
+#define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
+#define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
+
+enum {
+ /* status register */
+ MMC_INT_STATUS = 0x00,
+ /* set this to enable IRQ */
+ MMC_INT_ENABLE = 0x04,
+ /* set this to specify buffer address */
+ MMC_SET_BUFFER = 0x08,
+
+ /* MMC command number */
+ MMC_CMD = 0x0C,
+
+ /* MMC argument */
+ MMC_ARG = 0x10,
+
+ /* MMC response (or R2 bits 0 - 31) */
+ MMC_RESP_0 = 0x14,
+
+ /* MMC R2 response bits 32 - 63 */
+ MMC_RESP_1 = 0x18,
+
+ /* MMC R2 response bits 64 - 95 */
+ MMC_RESP_2 = 0x1C,
+
+ /* MMC R2 response bits 96 - 127 */
+ MMC_RESP_3 = 0x20,
+
+ MMC_BLOCK_LENGTH = 0x24,
+ MMC_BLOCK_COUNT = 0x28,
+
+ /* MMC state flags */
+ MMC_STATE = 0x2C,
+
+ /* MMC_INT_STATUS bits */
+
+ MMC_STAT_END_OF_CMD = 1U << 0,
+ MMC_STAT_END_OF_DATA = 1U << 1,
+ MMC_STAT_STATE_CHANGE = 1U << 2,
+ MMC_STAT_CMD_TIMEOUT = 1U << 3,
+
+ /* MMC_STATE bits */
+ MMC_STATE_INSERTED = 1U << 0,
+ MMC_STATE_READ_ONLY = 1U << 1,
+};
+
+/*
+ * Command types
+ */
+#define OMAP_MMC_CMDTYPE_BC 0
+#define OMAP_MMC_CMDTYPE_BCR 1
+#define OMAP_MMC_CMDTYPE_AC 2
+#define OMAP_MMC_CMDTYPE_ADTC 3
+
+
+struct goldfish_mmc_host {
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct device *dev;
+ unsigned char id; /* 16xx chips have 2 MMC blocks */
+ void __iomem *virt_base;
+ unsigned int phys_base;
+ int irq;
+ unsigned char bus_mode;
+ unsigned char hw_bus_mode;
+
+ unsigned int sg_len;
+ unsigned dma_done:1;
+ unsigned dma_in_use:1;
+
+ void __iomem *reg_base;
+};
+
+static inline int
+goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host)
+{
+ return 0;
+}
+
+static ssize_t
+goldfish_mmc_show_cover_switch(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct goldfish_mmc_host *host = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" :
+ "closed");
+}
+
+static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL);
+
+static void
+goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd)
+{
+ u32 cmdreg;
+ u32 resptype;
+ u32 cmdtype;
+
+ host->cmd = cmd;
+
+ resptype = 0;
+ cmdtype = 0;
+
+ /* Our hardware needs to know exact type */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ /* resp 1, 1b, 6, 7 */
+ resptype = 1;
+ break;
+ case MMC_RSP_R2:
+ resptype = 2;
+ break;
+ case MMC_RSP_R3:
+ resptype = 3;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc),
+ "Invalid response type: %04x\n", mmc_resp_type(cmd));
+ break;
+ }
+
+ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
+ cmdtype = OMAP_MMC_CMDTYPE_ADTC;
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BC)
+ cmdtype = OMAP_MMC_CMDTYPE_BC;
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BCR)
+ cmdtype = OMAP_MMC_CMDTYPE_BCR;
+ else
+ cmdtype = OMAP_MMC_CMDTYPE_AC;
+
+ cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
+
+ if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cmdreg |= 1 << 6;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdreg |= 1 << 11;
+
+ if (host->data && !(host->data->flags & MMC_DATA_WRITE))
+ cmdreg |= 1 << 15;
+
+ GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
+ GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
+}
+
+static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (host->dma_in_use) {
+ enum dma_data_direction dma_data_dir;
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+
+ if (dma_data_dir == DMA_FROM_DEVICE) {
+ /*
+ * We don't really have DMA, so we need
+ * to copy from our platform driver buffer
+ */
+ uint8_t *dest = (uint8_t *)sg_virt(data->sg);
+ memcpy(dest, host->virt_base, data->sg->length);
+ }
+ host->data->bytes_xfered += data->sg->length;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+ dma_data_dir);
+ }
+
+ host->data = NULL;
+ host->sg_len = 0;
+
+ /*
+ * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
+ * dozens of requests until the card finishes writing data.
+ * It'd be cheaper to just wait till an EOFB interrupt arrives...
+ */
+
+ if (!data->stop) {
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, data->mrq);
+ return;
+ }
+
+ goldfish_mmc_start_command(host, data->stop);
+}
+
+static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (!host->dma_in_use) {
+ goldfish_mmc_xfer_done(host, data);
+ return;
+ }
+ if (host->dma_done)
+ goldfish_mmc_xfer_done(host, data);
+}
+
+static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_0);
+ cmd->resp[2] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_1);
+ cmd->resp[1] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_2);
+ cmd->resp[0] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_3);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_0);
+ }
+ }
+
+ if (host->data == NULL || cmd->error) {
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, cmd->mrq);
+ }
+}
+
+static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
+{
+ struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id;
+ u16 status;
+ int end_command = 0;
+ int end_transfer = 0;
+ int transfer_error = 0;
+ int state_changed = 0;
+ int cmd_timeout = 0;
+
+ while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
+ GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
+
+ if (status & MMC_STAT_END_OF_CMD)
+ end_command = 1;
+
+ if (status & MMC_STAT_END_OF_DATA)
+ end_transfer = 1;
+
+ if (status & MMC_STAT_STATE_CHANGE)
+ state_changed = 1;
+
+ if (status & MMC_STAT_CMD_TIMEOUT) {
+ end_command = 0;
+ cmd_timeout = 1;
+ }
+ }
+
+ if (cmd_timeout) {
+ struct mmc_request *mrq = host->mrq;
+ mrq->cmd->error = -ETIMEDOUT;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ }
+
+ if (end_command)
+ goldfish_mmc_cmd_done(host, host->cmd);
+
+ if (transfer_error)
+ goldfish_mmc_xfer_done(host, host->data);
+ else if (end_transfer) {
+ host->dma_done = 1;
+ goldfish_mmc_end_of_data(host, host->data);
+ } else if (host->data != NULL) {
+ /*
+ * WORKAROUND -- after porting this driver from 2.6 to 3.4,
+ * during device initialization, cases where host->data is
+ * non-null but end_transfer is false would occur. Doing
+ * nothing in such cases results in no further interrupts,
+ * and initialization failure.
+ * TODO -- find the real cause.
+ */
+ host->dma_done = 1;
+ goldfish_mmc_end_of_data(host, host->data);
+ }
+
+ if (state_changed) {
+ u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
+ pr_info("%s: Card detect now %d\n", __func__,
+ (state & MMC_STATE_INSERTED));
+ mmc_detect_change(host->mmc, 0);
+ }
+
+ if (!end_command && !end_transfer &&
+ !transfer_error && !state_changed && !cmd_timeout) {
+ status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
+ dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
+ if (status != 0) {
+ GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
+ GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
+ struct mmc_request *req)
+{
+ struct mmc_data *data = req->data;
+ int block_size;
+ unsigned sg_len;
+ enum dma_data_direction dma_data_dir;
+
+ host->data = data;
+ if (data == NULL) {
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
+ host->dma_in_use = 0;
+ return;
+ }
+
+ block_size = data->blksz;
+
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);
+
+ /*
+ * Cope with calling layer confusion; it issues "single
+ * block" writes using multi-block scatterlists.
+ */
+ sg_len = (data->blocks == 1) ? 1 : data->sg_len;
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ sg_len, dma_data_dir);
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+
+ if (dma_data_dir == DMA_TO_DEVICE) {
+ /*
+ * We don't really have DMA, so we need to copy to our
+ * platform driver buffer
+ */
+ const uint8_t *src = (uint8_t *)sg_virt(data->sg);
+ memcpy(host->virt_base, src, data->sg->length);
+ }
+}
+
+static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = req;
+ goldfish_mmc_prepare_data(host, req);
+ goldfish_mmc_start_command(host, req->cmd);
+
+ /*
+ * This is to avoid accidentally being detected as an SDIO card
+ * in mmc_attach_sdio().
+ */
+ if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
+ req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR))
+ req->cmd->error = -EINVAL;
+}
+
+static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ host->bus_mode = ios->bus_mode;
+ host->hw_bus_mode = host->bus_mode;
+}
+
+static int goldfish_mmc_get_ro(struct mmc_host *mmc)
+{
+ uint32_t state;
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ state = GOLDFISH_MMC_READ(host, MMC_STATE);
+ return ((state & MMC_STATE_READ_ONLY) != 0);
+}
+
+static const struct mmc_host_ops goldfish_mmc_ops = {
+ .request = goldfish_mmc_request,
+ .set_ios = goldfish_mmc_set_ios,
+ .get_ro = goldfish_mmc_get_ro,
+};
+
+static int goldfish_mmc_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct goldfish_mmc_host *host = NULL;
+ struct resource *res;
+ int ret = 0;
+ int irq;
+ dma_addr_t buf_addr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (res == NULL || irq < 0)
+ return -ENXIO;
+
+ mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
+ if (mmc == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_host_failed;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end);
+ host->reg_base = ioremap(res->start, res->end - res->start + 1);
+ if (host->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto ioremap_failed;
+ }
+ host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
+ &buf_addr, GFP_KERNEL);
+
+ if (host->virt_base == 0) {
+ ret = -ENOMEM;
+ goto dma_alloc_failed;
+ }
+ host->phys_base = buf_addr;
+
+ host->id = pdev->id;
+ host->irq = irq;
+
+ mmc->ops = &goldfish_mmc_ops;
+ mmc->f_min = 400000;
+ mmc->f_max = 24000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+
+ /* Use scatterlist DMA to reduce per-transfer costs.
+ * NOTE max_seg_size assumption that small blocks aren't
+ * normally used (except e.g. for reading SD registers).
+ */
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
+ mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
+ mmc->max_req_size = BUFFER_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n");
+ goto err_request_irq_failed;
+ }
+
+ host->dev = &pdev->dev;
+ platform_set_drvdata(pdev, host);
+
+ ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
+ if (ret)
+ dev_warn(mmc_dev(host->mmc),
+ "Unable to create sysfs attributes\n");
+
+ GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
+ GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
+ MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA |
+ MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT);
+
+ mmc_add_host(mmc);
+ return 0;
+
+err_request_irq_failed:
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base,
+ host->phys_base);
+dma_alloc_failed:
+ iounmap(host->reg_base);
+ioremap_failed:
+ mmc_free_host(host->mmc);
+err_alloc_host_failed:
+ return ret;
+}
+
+static int goldfish_mmc_remove(struct platform_device *pdev)
+{
+ struct goldfish_mmc_host *host = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ BUG_ON(host == NULL);
+
+ mmc_remove_host(host->mmc);
+ free_irq(host->irq, host);
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
+ iounmap(host->reg_base);
+ mmc_free_host(host->mmc);
+ return 0;
+}
+
+static struct platform_driver goldfish_mmc_driver = {
+ .probe = goldfish_mmc_probe,
+ .remove = goldfish_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(goldfish_mmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 4d50da618166..72fd0f2c9013 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -175,16 +175,6 @@ static int dw_mci_exynos_setup_bus(struct dw_mci *host,
}
}
- gpio = of_get_named_gpio(slot_np, "wp-gpios", 0);
- if (gpio_is_valid(gpio)) {
- if (devm_gpio_request(host->dev, gpio, "dw-mci-wp"))
- dev_info(host->dev, "gpio [%d] request failed\n",
- gpio);
- } else {
- dev_info(host->dev, "wp gpio not available");
- host->pdata->quirks |= DW_MCI_QUIRK_NO_WRITE_PROTECT;
- }
-
if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
return 0;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 323c5022c2ca..98342213ed21 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -34,6 +34,7 @@
#include <linux/regulator/consumer.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include "dw_mmc.h"
@@ -74,6 +75,8 @@ struct idmac_desc {
* struct dw_mci_slot - MMC slot state
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
+ * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
+ * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
* @ctype: Card type for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
@@ -88,6 +91,9 @@ struct dw_mci_slot {
struct mmc_host *mmc;
struct dw_mci *host;
+ int quirks;
+ int wp_gpio;
+
u32 ctype;
struct mmc_request *mrq;
@@ -825,10 +831,12 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_ro function, else try on board write protect */
- if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)
+ if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
read_only = 0;
else if (brd->get_ro)
read_only = brd->get_ro(slot->id);
+ else if (gpio_is_valid(slot->wp_gpio))
+ read_only = gpio_get_value(slot->wp_gpio);
else
read_only =
mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
@@ -1445,7 +1453,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
if (!sg_miter_next(sg_miter))
goto done;
- host->sg = sg_miter->__sg;
+ host->sg = sg_miter->piter.sg;
buf = sg_miter->addr;
remain = sg_miter->length;
offset = 0;
@@ -1500,7 +1508,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
if (!sg_miter_next(sg_miter))
goto done;
- host->sg = sg_miter->__sg;
+ host->sg = sg_miter->piter.sg;
buf = sg_miter->addr;
remain = sg_miter->length;
offset = 0;
@@ -1785,6 +1793,30 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
return NULL;
}
+static struct dw_mci_of_slot_quirks {
+ char *quirk;
+ int id;
+} of_slot_quirks[] = {
+ {
+ .quirk = "disable-wp",
+ .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
+ },
+};
+
+static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ int quirks = 0;
+ int idx;
+
+ /* get quirks */
+ for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
+ if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
+ quirks |= of_slot_quirks[idx].id;
+
+ return quirks;
+}
+
/* find out bus-width for a given slot */
static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
{
@@ -1799,7 +1831,34 @@ static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
" as 1\n");
return bus_wd;
}
+
+/* find the write protect gpio for a given slot; or -1 if none specified */
+static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ int gpio;
+
+ if (!np)
+ return -EINVAL;
+
+ gpio = of_get_named_gpio(np, "wp-gpios", 0);
+
+ /* Having a missing entry is valid; return silently */
+ if (!gpio_is_valid(gpio))
+ return -EINVAL;
+
+ if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
+ dev_warn(dev, "gpio [%d] request failed\n", gpio);
+ return -EINVAL;
+ }
+
+ return gpio;
+}
#else /* CONFIG_OF */
+static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+{
+ return 0;
+}
static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
{
return 1;
@@ -1808,6 +1867,10 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
{
return NULL;
}
+static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_OF */
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
@@ -1828,6 +1891,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
slot->host = host;
host->slot[id] = slot;
+ slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
+
mmc->ops = &dw_mci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
mmc->f_max = host->bus_hz;
@@ -1923,6 +1988,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
else
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
+
mmc_add_host(mmc);
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index f8dd36102949..145cdaf000d1 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -21,7 +21,11 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/sizes.h>
#include <asm/unaligned.h>
@@ -51,8 +55,6 @@ struct mvsd_host {
struct mmc_host *mmc;
struct device *dev;
struct clk *clk;
- int gpio_card_detect;
- int gpio_write_protect;
};
#define mvsd_write(offs, val) writel(val, iobase + (offs))
@@ -538,13 +540,6 @@ static void mvsd_timeout_timer(unsigned long data)
mmc_request_done(host->mmc, mrq);
}
-static irqreturn_t mvsd_card_detect_irq(int irq, void *dev)
-{
- struct mvsd_host *host = dev;
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
- return IRQ_HANDLED;
-}
-
static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mvsd_host *host = mmc_priv(mmc);
@@ -564,20 +559,6 @@ static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_unlock_irqrestore(&host->lock, flags);
}
-static int mvsd_get_ro(struct mmc_host *mmc)
-{
- struct mvsd_host *host = mmc_priv(mmc);
-
- if (host->gpio_write_protect)
- return gpio_get_value(host->gpio_write_protect);
-
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
static void mvsd_power_up(struct mvsd_host *host)
{
void __iomem *iobase = host->base;
@@ -674,7 +655,7 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static const struct mmc_host_ops mvsd_ops = {
.request = mvsd_request,
- .get_ro = mvsd_get_ro,
+ .get_ro = mmc_gpio_get_ro,
.set_ios = mvsd_set_ios,
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
@@ -703,17 +684,18 @@ mv_conf_mbus_windows(struct mvsd_host *host,
static int __init mvsd_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
- const struct mvsdio_platform_data *mvsd_data;
const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
+ int gpio_card_detect, gpio_write_protect;
+ struct pinctrl *pinctrl;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- mvsd_data = pdev->dev.platform_data;
- if (!r || irq < 0 || !mvsd_data)
+ if (!r || irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -725,8 +707,43 @@ static int __init mvsd_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->dev = &pdev->dev;
- host->base_clock = mvsd_data->clock / 2;
- host->clk = ERR_PTR(-EINVAL);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "no pins associated\n");
+
+ /*
+ * Some non-DT platforms do not pass a clock, and the clock
+ * frequency is passed through platform_data. On DT platforms,
+ * a clock must always be passed, even if there is no gatable
+ * clock associated to the SDIO interface (it can simply be a
+ * fixed rate clock).
+ */
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk))
+ clk_prepare_enable(host->clk);
+
+ if (np) {
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ host->base_clock = clk_get_rate(host->clk) / 2;
+ gpio_card_detect = of_get_named_gpio(np, "cd-gpios", 0);
+ gpio_write_protect = of_get_named_gpio(np, "wp-gpios", 0);
+ } else {
+ const struct mvsdio_platform_data *mvsd_data;
+ mvsd_data = pdev->dev.platform_data;
+ if (!mvsd_data) {
+ ret = -ENXIO;
+ goto out;
+ }
+ host->base_clock = mvsd_data->clock / 2;
+ gpio_card_detect = mvsd_data->gpio_card_detect;
+ gpio_write_protect = mvsd_data->gpio_write_protect;
+ }
mmc->ops = &mvsd_ops;
@@ -765,43 +782,14 @@ static int __init mvsd_probe(struct platform_device *pdev)
goto out;
}
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- host->clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(host->clk))
- clk_prepare_enable(host->clk);
-
- if (mvsd_data->gpio_card_detect) {
- ret = devm_gpio_request_one(&pdev->dev,
- mvsd_data->gpio_card_detect,
- GPIOF_IN, DRIVER_NAME " cd");
- if (ret == 0) {
- irq = gpio_to_irq(mvsd_data->gpio_card_detect);
- ret = devm_request_irq(&pdev->dev, irq,
- mvsd_card_detect_irq,
- IRQ_TYPE_EDGE_RISING |
- IRQ_TYPE_EDGE_FALLING,
- DRIVER_NAME " cd", host);
- if (ret == 0)
- host->gpio_card_detect =
- mvsd_data->gpio_card_detect;
- else
- devm_gpio_free(&pdev->dev,
- mvsd_data->gpio_card_detect);
- }
- }
- if (!host->gpio_card_detect)
+ if (gpio_is_valid(gpio_card_detect)) {
+ ret = mmc_gpio_request_cd(mmc, gpio_card_detect);
+ if (ret)
+ goto out;
+ } else
mmc->caps |= MMC_CAP_NEEDS_POLL;
- if (mvsd_data->gpio_write_protect) {
- ret = devm_gpio_request_one(&pdev->dev,
- mvsd_data->gpio_write_protect,
- GPIOF_IN, DRIVER_NAME " wp");
- if (ret == 0) {
- host->gpio_write_protect =
- mvsd_data->gpio_write_protect;
- }
- }
+ mmc_gpio_request_ro(mmc, gpio_write_protect);
setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
platform_set_drvdata(pdev, mmc);
@@ -811,15 +799,17 @@ static int __init mvsd_probe(struct platform_device *pdev)
pr_notice("%s: %s driver initialized, ",
mmc_hostname(mmc), DRIVER_NAME);
- if (host->gpio_card_detect)
+ if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
printk("using GPIO %d for card detection\n",
- host->gpio_card_detect);
+ gpio_card_detect);
else
printk("lacking card detect (fall back to polling)\n");
return 0;
out:
if (mmc) {
+ mmc_gpio_free_cd(mmc);
+ mmc_gpio_free_ro(mmc);
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
@@ -834,6 +824,8 @@ static int __exit mvsd_remove(struct platform_device *pdev)
struct mvsd_host *host = mmc_priv(mmc);
+ mmc_gpio_free_cd(mmc);
+ mmc_gpio_free_ro(mmc);
mmc_remove_host(mmc);
del_timer_sync(&host->timer);
mvsd_power_down(host);
@@ -873,12 +865,19 @@ static int mvsd_resume(struct platform_device *dev)
#define mvsd_resume NULL
#endif
+static const struct of_device_id mvsdio_dt_ids[] = {
+ { .compatible = "marvell,orion-sdio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
+
static struct platform_driver mvsd_driver = {
.remove = __exit_p(mvsd_remove),
.suspend = mvsd_suspend,
.resume = mvsd_resume,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = mvsdio_dt_ids,
},
};
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 5b665551a6f3..4efe3021b217 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -354,7 +354,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
struct dma_async_tx_descriptor *desc;
struct scatterlist *sgl = data->sg, *sg;
unsigned int sg_len = data->sg_len;
- int i;
+ unsigned int i;
unsigned short dma_data_dir, timeout;
enum dma_transfer_direction slave_dirn;
@@ -804,3 +804,4 @@ module_platform_driver(mxs_mmc_driver);
MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
MODULE_AUTHOR("Freescale Semiconductor");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 1534b582c419..d720b5e05b9c 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -146,7 +146,7 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
oms->pdata.get_ro = of_mmc_spi_get_ro;
oms->detect_irq = irq_of_parse_and_map(np, 0);
- if (oms->detect_irq != NO_IRQ) {
+ if (oms->detect_irq != 0) {
oms->pdata.init = of_mmc_spi_init;
oms->pdata.exit = of_mmc_spi_exit;
} else {
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index f74b5adca642..f981f7d1f6e3 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -678,12 +678,19 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
unsigned int data_size = 0;
+ int err;
if (host->eject) {
cmd->error = -ENOMEDIUM;
goto finish;
}
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err) {
+ cmd->error = err;
+ goto finish;
+ }
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
@@ -901,6 +908,9 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->eject)
return;
+ if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD))
+ return;
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
@@ -1073,6 +1083,10 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->eject)
return -ENOMEDIUM;
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err)
+ return err;
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
@@ -1083,11 +1097,6 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
voltage = OUTPUT_1V8;
if (voltage == OUTPUT_1V8) {
- err = rtsx_pci_write_register(pcr,
- SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
- if (err < 0)
- goto out;
-
err = sd_wait_voltage_stable_1(host);
if (err < 0)
goto out;
@@ -1122,6 +1131,10 @@ static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->eject)
return -ENOMEDIUM;
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err)
+ return err;
+
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_start_run(pcr);
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
new file mode 100644
index 000000000000..8ffea05152c6
--- /dev/null
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -0,0 +1,210 @@
+/*
+ * BCM2835 SDHCI
+ * Copyright (C) 2012 Stephen Warren
+ * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me
+ * Portions of the code there were obviously based on the Linux kernel at:
+ * git://github.com/raspberrypi/linux.git rpi-3.6.y
+ * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include "sdhci-pltfm.h"
+
+/*
+ * 400KHz is max freq for card ID etc. Use that as min card clock. We need to
+ * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY.
+ */
+#define MIN_FREQ 400000
+
+/*
+ * The Arasan has a bugette whereby it may lose the content of successive
+ * writes to registers that are within two SD-card clock cycles of each other
+ * (a clock domain crossing problem). It seems, however, that the data
+ * register does not have this problem, which is just as well - otherwise we'd
+ * have to nobble the DMA engine too.
+ *
+ * This should probably be dynamically calculated based on the actual card
+ * frequency. However, this is the longest we'll have to wait, and doesn't
+ * seem to slow access down too much, so the added complexity doesn't seem
+ * worth it for now.
+ *
+ * 1/MIN_FREQ is (max) time per tick of eMMC clock.
+ * 2/MIN_FREQ is time for two ticks.
+ * Multiply by 1000000 to get uS per two ticks.
+ * *1000000 for uSecs.
+ * +1 for hack rounding.
+ */
+#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1)
+
+struct bcm2835_sdhci {
+ u32 shadow;
+};
+
+static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ writel(val, host->ioaddr + reg);
+
+ udelay(BCM2835_SDHCI_WRITE_DELAY);
+}
+
+static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (reg == SDHCI_CAPABILITIES)
+ val |= SDHCI_CAN_VDD_330;
+
+ return val;
+}
+
+static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
+ u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
+ bcm2835_sdhci_readl(host, reg & ~3);
+ u32 word_num = (reg >> 1) & 1;
+ u32 word_shift = word_num * 16;
+ u32 mask = 0xffff << word_shift;
+ u32 newval = (oldval & ~mask) | (val << word_shift);
+
+ if (reg == SDHCI_TRANSFER_MODE)
+ bcm2835_host->shadow = newval;
+ else
+ bcm2835_sdhci_writel(host, newval, reg & ~3);
+}
+
+static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg)
+{
+ u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
+ u32 word_num = (reg >> 1) & 1;
+ u32 word_shift = word_num * 16;
+ u32 word = (val >> word_shift) & 0xffff;
+
+ return word;
+}
+
+static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 oldval = bcm2835_sdhci_readl(host, reg & ~3);
+ u32 byte_num = reg & 3;
+ u32 byte_shift = byte_num * 8;
+ u32 mask = 0xff << byte_shift;
+ u32 newval = (oldval & ~mask) | (val << byte_shift);
+
+ bcm2835_sdhci_writel(host, newval, reg & ~3);
+}
+
+static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
+{
+ u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
+ u32 byte_num = reg & 3;
+ u32 byte_shift = byte_num * 8;
+ u32 byte = (val >> byte_shift) & 0xff;
+
+ return byte;
+}
+
+unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
+{
+ return MIN_FREQ;
+}
+
+static struct sdhci_ops bcm2835_sdhci_ops = {
+ .write_l = bcm2835_sdhci_writel,
+ .write_w = bcm2835_sdhci_writew,
+ .write_b = bcm2835_sdhci_writeb,
+ .read_l = bcm2835_sdhci_readl,
+ .read_w = bcm2835_sdhci_readw,
+ .read_b = bcm2835_sdhci_readb,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = bcm2835_sdhci_get_min_clock,
+};
+
+static struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .ops = &bcm2835_sdhci_ops,
+};
+
+static int bcm2835_sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct bcm2835_sdhci *bcm2835_host;
+ struct sdhci_pltfm_host *pltfm_host;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host),
+ GFP_KERNEL);
+ if (!bcm2835_host) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate bcm2835_sdhci\n");
+ return -ENOMEM;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = bcm2835_host;
+
+ pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err;
+ }
+
+ return sdhci_add_host(host);
+
+err:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int bcm2835_sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_sdhci_of_match[] = {
+ { .compatible = "brcm,bcm2835-sdhci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match);
+
+static struct platform_driver bcm2835_sdhci_driver = {
+ .driver = {
+ .name = "sdhci-bcm2835",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_sdhci_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = bcm2835_sdhci_probe,
+ .remove = bcm2835_sdhci_remove,
+};
+module_platform_driver(bcm2835_sdhci_driver);
+
+MODULE_DESCRIPTION("BCM2835 SDHCI driver");
+MODULE_AUTHOR("Stephen Warren");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index e07df812ff1e..78ac00227c1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -21,6 +21,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -29,12 +30,22 @@
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
-#define SDHCI_CTRL_D3CD 0x08
+#define ESDHC_CTRL_D3CD 0x08
/* VENDOR SPEC register */
-#define SDHCI_VENDOR_SPEC 0xC0
-#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
-#define SDHCI_WTMK_LVL 0x44
-#define SDHCI_MIX_CTRL 0x48
+#define ESDHC_VENDOR_SPEC 0xc0
+#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
+#define ESDHC_WTMK_LVL 0x44
+#define ESDHC_MIX_CTRL 0x48
+#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
+/* Bits 3 and 6 are not SDHCI standard definitions */
+#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
+
+/*
+ * Our interpretation of the SDHCI_HOST_CONTROL register
+ */
+#define ESDHC_CTRL_4BITBUS (0x1 << 1)
+#define ESDHC_CTRL_8BITBUS (0x2 << 1)
+#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
/*
* There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC:
@@ -42,7 +53,7 @@
* but bit28 is used as the INT DMA ERR in fsl eSDHC design.
* Define this macro DMA error INT for fsl eSDHC
*/
-#define SDHCI_INT_VENDOR_SPEC_DMA_ERR 0x10000000
+#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28)
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
@@ -143,23 +154,8 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
-
- /* fake CARD_PRESENT flag */
u32 val = readl(host->ioaddr + reg);
- if (unlikely((reg == SDHCI_PRESENT_STATE)
- && gpio_is_valid(boarddata->cd_gpio))) {
- if (gpio_get_value(boarddata->cd_gpio))
- /* no card, if a valid gpio says so... */
- val &= ~SDHCI_CARD_PRESENT;
- else
- /* ... in all other cases assume card is present */
- val |= SDHCI_CARD_PRESENT;
- }
-
if (unlikely(reg == SDHCI_CAPABILITIES)) {
/* In FSL esdhc IC module, only bit20 is used to indicate the
* ADMA2 capability of esdhc, but this bit is messed up on
@@ -175,8 +171,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
}
if (unlikely(reg == SDHCI_INT_STATUS)) {
- if (val & SDHCI_INT_VENDOR_SPEC_DMA_ERR) {
- val &= ~SDHCI_INT_VENDOR_SPEC_DMA_ERR;
+ if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
+ val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
val |= SDHCI_INT_ADMA_ERROR;
}
}
@@ -188,17 +184,9 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
u32 data;
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
- if (boarddata->cd_type == ESDHC_CD_GPIO)
- /*
- * These interrupts won't work with a custom
- * card_detect gpio (only applied to mx25/35)
- */
- val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
-
if (val & SDHCI_INT_CARD_INT) {
/*
* Clear and then set D3CD bit to avoid missing the
@@ -209,9 +197,9 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
* re-sample it by the following steps.
*/
data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
- data &= ~SDHCI_CTRL_D3CD;
+ data &= ~ESDHC_CTRL_D3CD;
writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
- data |= SDHCI_CTRL_D3CD;
+ data |= ESDHC_CTRL_D3CD;
writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
}
}
@@ -220,15 +208,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
&& (reg == SDHCI_INT_STATUS)
&& (val & SDHCI_INT_DATA_END))) {
u32 v;
- v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
- v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK;
- writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
+ v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
}
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
if (val & SDHCI_INT_ADMA_ERROR) {
val &= ~SDHCI_INT_ADMA_ERROR;
- val |= SDHCI_INT_VENDOR_SPEC_DMA_ERR;
+ val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR;
}
}
@@ -237,15 +225,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
if (unlikely(reg == SDHCI_HOST_VERSION)) {
- u16 val = readw(host->ioaddr + (reg ^ 2));
- /*
- * uSDHC supports SDHCI v3.0, but it's encoded as value
- * 0x3 in host controller version register, which violates
- * SDHCI_SPEC_300 definition. Work it around here.
- */
- if ((val & SDHCI_SPEC_VER_MASK) == 3)
- return --val;
+ reg ^= 2;
+ if (is_imx6q_usdhc(imx_data)) {
+ /*
+ * The usdhc register returns a wrong host version.
+ * Correct it here.
+ */
+ return SDHCI_SPEC_300;
+ }
}
return readw(host->ioaddr + reg);
@@ -258,20 +249,32 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
switch (reg) {
case SDHCI_TRANSFER_MODE:
- /*
- * Postpone this write, we must do it together with a
- * command write that is down below.
- */
if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (host->cmd->opcode == SD_IO_RW_EXTENDED)
&& (host->cmd->data->blocks > 1)
&& (host->cmd->data->flags & MMC_DATA_READ)) {
u32 v;
- v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
- v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK;
- writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
+ v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+
+ if (is_imx6q_usdhc(imx_data)) {
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ /* Swap AC23 bit */
+ if (val & SDHCI_TRNS_AUTO_CMD23) {
+ val &= ~SDHCI_TRNS_AUTO_CMD23;
+ val |= ESDHC_MIX_CTRL_AC23EN;
+ }
+ m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK);
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ } else {
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ imx_data->scratchpad = val;
}
- imx_data->scratchpad = val;
return;
case SDHCI_COMMAND:
if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
@@ -279,16 +282,12 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
(imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
val |= SDHCI_CMD_ABORTCMD;
- if (is_imx6q_usdhc(imx_data)) {
- u32 m = readl(host->ioaddr + SDHCI_MIX_CTRL);
- m = imx_data->scratchpad | (m & 0xffff0000);
- writel(m, host->ioaddr + SDHCI_MIX_CTRL);
+ if (is_imx6q_usdhc(imx_data))
writel(val << 16,
host->ioaddr + SDHCI_TRANSFER_MODE);
- } else {
+ else
writel(val << 16 | imx_data->scratchpad,
host->ioaddr + SDHCI_TRANSFER_MODE);
- }
return;
case SDHCI_BLOCK_SIZE:
val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
@@ -302,6 +301,7 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
u32 new_val;
+ u32 mask;
switch (reg) {
case SDHCI_POWER_CONTROL:
@@ -311,10 +311,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
*/
return;
case SDHCI_HOST_CONTROL:
- /* FSL messed up here, so we can just keep those three */
- new_val = val & (SDHCI_CTRL_LED | \
- SDHCI_CTRL_4BITBUS | \
- SDHCI_CTRL_D3CD);
+ /* FSL messed up here, so we need to manually compose it. */
+ new_val = val & SDHCI_CTRL_LED;
/* ensure the endianness */
new_val |= ESDHC_HOST_CONTROL_LE;
/* bits 8&9 are reserved on mx25 */
@@ -323,7 +321,13 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
}
- esdhc_clrset_le(host, 0xffff, new_val, reg);
+ /*
+ * Do not touch buswidth bits here. This is done in
+ * esdhc_pltfm_bus_width.
+ */
+ mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK;
+
+ esdhc_clrset_le(host, mask, new_val, reg);
return;
}
esdhc_clrset_le(host, 0xff, val, reg);
@@ -336,15 +340,15 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
* circuit relies on. To work around it, we turn the clocks on back
* to keep card detection circuit functional.
*/
- if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1))
+ if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) {
esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
-}
-
-static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-
- return clk_get_rate(pltfm_host->clk);
+ /*
+ * The reset on usdhc fails to clear MIX_CTRL register.
+ * Do it manually here.
+ */
+ if (is_imx6q_usdhc(imx_data))
+ writel(0, host->ioaddr + ESDHC_MIX_CTRL);
+ }
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -362,8 +366,7 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
switch (boarddata->wp_type) {
case ESDHC_WP_GPIO:
- if (gpio_is_valid(boarddata->wp_gpio))
- return gpio_get_value(boarddata->wp_gpio);
+ return mmc_gpio_get_ro(host->mmc);
case ESDHC_WP_CONTROLLER:
return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
SDHCI_WRITE_PROTECT);
@@ -374,6 +377,28 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
return -ENOSYS;
}
+static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
+{
+ u32 ctrl;
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl = ESDHC_CTRL_8BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl = ESDHC_CTRL_4BITBUS;
+ break;
+ default:
+ ctrl = 0;
+ break;
+ }
+
+ esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
+ SDHCI_HOST_CONTROL);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -381,9 +406,10 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le,
.set_clock = esdhc_set_clock,
- .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_min_clock = esdhc_pltfm_get_min_clock,
.get_ro = esdhc_pltfm_get_ro,
+ .platform_bus_width = esdhc_pltfm_bus_width,
};
static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -394,14 +420,6 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
.ops = &sdhci_esdhc_ops,
};
-static irqreturn_t cd_irq(int irq, void *data)
-{
- struct sdhci_host *sdhost = (struct sdhci_host *)data;
-
- tasklet_schedule(&sdhost->card_tasklet);
- return IRQ_HANDLED;
-};
-
#ifdef CONFIG_OF
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@@ -429,6 +447,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (gpio_is_valid(boarddata->wp_gpio))
boarddata->wp_type = ESDHC_WP_GPIO;
+ of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
+
return 0;
}
#else
@@ -512,7 +532,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
* to something insane. Change it back here.
*/
if (is_imx6q_usdhc(imx_data))
- writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL);
+ writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
boarddata = &imx_data->boarddata;
if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
@@ -527,37 +547,22 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = devm_gpio_request_one(&pdev->dev, boarddata->wp_gpio,
- GPIOF_IN, "ESDHC_WP");
+ err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
if (err) {
- dev_warn(mmc_dev(host->mmc),
- "no write-protect pin available!\n");
- boarddata->wp_gpio = -EINVAL;
+ dev_err(mmc_dev(host->mmc),
+ "failed to request write-protect gpio!\n");
+ goto disable_clk;
}
- } else {
- boarddata->wp_gpio = -EINVAL;
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
- if (boarddata->cd_type != ESDHC_CD_GPIO)
- boarddata->cd_gpio = -EINVAL;
-
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = devm_gpio_request_one(&pdev->dev, boarddata->cd_gpio,
- GPIOF_IN, "ESDHC_CD");
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
if (err) {
dev_err(mmc_dev(host->mmc),
- "no card-detect pin available!\n");
- goto disable_clk;
- }
-
- err = devm_request_irq(&pdev->dev,
- gpio_to_irq(boarddata->cd_gpio), cd_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- mmc_hostname(host->mmc), host);
- if (err) {
- dev_err(mmc_dev(host->mmc), "request irq error\n");
+ "failed to request card-detect gpio!\n");
goto disable_clk;
}
/* fall through */
@@ -575,6 +580,19 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
break;
}
+ switch (boarddata->max_bus_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ break;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
err = sdhci_add_host(host);
if (err)
goto disable_clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index c7dd0cbc99de..c7ccf3034dad 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -935,7 +935,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
-static int sdhci_pci_8bit_width(struct sdhci_host *host, int width)
+static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -977,7 +977,7 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host)
static struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
- .platform_8bit_width = sdhci_pci_8bit_width,
+ .platform_bus_width = sdhci_pci_bus_width,
.hw_reset = sdhci_pci_hw_reset,
};
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index d4283ef5917a..3145a780b035 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -36,6 +36,14 @@
#endif
#include "sdhci-pltfm.h"
+unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
+
static struct sdhci_ops sdhci_pltfm_ops = {
};
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 37e0e184a0bb..153b6c509ebe 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -98,6 +98,8 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
struct sdhci_pltfm_data *pdata);
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
+extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
+
#ifdef CONFIG_PM
extern const struct dev_pm_ops sdhci_pltfm_pmops;
#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index ac854aa192a8..eeb7d439db1d 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -111,17 +111,10 @@ static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
return 0;
}
-static u32 pxav2_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-
- return clk_get_rate(pltfm_host->clk);
-}
-
static struct sdhci_ops pxav2_sdhci_ops = {
- .get_max_clock = pxav2_get_max_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
.platform_reset_exit = pxav2_set_private_registers,
- .platform_8bit_width = pxav2_mmc_set_width,
+ .platform_bus_width = pxav2_mmc_set_width,
};
#ifdef CONFIG_OF
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index fad0966427fd..a0cdbc570a83 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -32,10 +32,14 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
+#define PXAV3_RPM_DELAY_MS 50
+
#define SD_CLOCK_BURST_SIZE_SETUP 0x10A
#define SDCLK_SEL 0x100
#define SDCLK_DELAY_SHIFT 9
@@ -163,18 +167,11 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
return 0;
}
-static u32 pxav3_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-
- return clk_get_rate(pltfm_host->clk);
-}
-
static struct sdhci_ops pxav3_sdhci_ops = {
.platform_reset_exit = pxav3_set_private_registers,
.set_uhs_signaling = pxav3_set_uhs_signaling,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
- .get_max_clock = pxav3_get_max_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
};
#ifdef CONFIG_OF
@@ -303,20 +300,37 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+ pm_runtime_get_noresume(&pdev->dev);
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(&pdev->dev, "failed to add host\n");
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
goto err_add_host;
}
platform_set_drvdata(pdev, host);
+ if (pdata->pm_caps & MMC_PM_KEEP_POWER) {
+ device_init_wakeup(&pdev->dev, 1);
+ host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ } else {
+ device_init_wakeup(&pdev->dev, 0);
+ }
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err_add_host:
clk_disable_unprepare(clk);
clk_put(clk);
- mmc_gpio_free_cd(host->mmc);
err_cd_req:
err_clk_get:
sdhci_pltfm_free(pdev);
@@ -329,16 +343,14 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = pltfm_host->priv;
- struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ pm_runtime_get_sync(&pdev->dev);
sdhci_remove_host(host, 1);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
- if (gpio_is_valid(pdata->ext_cd_gpio))
- mmc_gpio_free_cd(host->mmc);
-
sdhci_pltfm_free(pdev);
kfree(pxa);
@@ -347,6 +359,83 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pxav3_suspend(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ ret = sdhci_suspend_host(host);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int sdhci_pxav3_resume(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ ret = sdhci_resume_host(host);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_pxav3_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ unsigned long flags;
+
+ if (pltfm_host->clk) {
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ clk_disable_unprepare(pltfm_host->clk);
+ }
+
+ return 0;
+}
+
+static int sdhci_pxav3_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ unsigned long flags;
+
+ if (pltfm_host->clk) {
+ clk_prepare_enable(pltfm_host->clk);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops sdhci_pxav3_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
+ SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
+ sdhci_pxav3_runtime_resume, NULL)
+};
+
+#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops)
+
+#else
+#define SDHCI_PXAV3_PMOPS NULL
+#endif
+
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
@@ -354,7 +443,7 @@ static struct platform_driver sdhci_pxav3_driver = {
.of_match_table = sdhci_pxav3_of_match,
#endif
.owner = THIS_MODULE,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = SDHCI_PXAV3_PMOPS,
},
.probe = sdhci_pxav3_probe,
.remove = sdhci_pxav3_remove,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index a0c621421ee8..7363efe72287 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -332,14 +332,14 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
}
/**
- * sdhci_s3c_platform_8bit_width - support 8bit buswidth
+ * sdhci_s3c_platform_bus_width - support 8bit buswidth
* @host: The SDHCI host being queried
* @width: MMC_BUS_WIDTH_ macro for the bus width being requested
*
* We have 8-bit width support but is not a v3 controller.
- * So we add platform_8bit_width() and support 8bit width.
+ * So we add platform_bus_width() and support 8bit width.
*/
-static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
+static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -369,7 +369,7 @@ static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
- .platform_8bit_width = sdhci_s3c_platform_8bit_width,
+ .platform_bus_width = sdhci_s3c_platform_bus_width,
};
static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 3695b2e0cbd2..08b06e9a3a21 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -27,8 +27,6 @@
#include <asm/gpio.h>
-#include <linux/platform_data/mmc-sdhci-tegra.h>
-
#include "sdhci-pltfm.h"
/* Tegra SDHOST controller vendor register definitions */
@@ -45,8 +43,11 @@ struct sdhci_tegra_soc_data {
};
struct sdhci_tegra {
- const struct tegra_sdhci_platform_data *plat;
const struct sdhci_tegra_soc_data *soc_data;
+ int cd_gpio;
+ int wp_gpio;
+ int power_gpio;
+ int is_8bit;
};
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
@@ -108,12 +109,11 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
- const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
- if (!gpio_is_valid(plat->wp_gpio))
+ if (!gpio_is_valid(tegra_host->wp_gpio))
return -1;
- return gpio_get_value(plat->wp_gpio);
+ return gpio_get_value(tegra_host->wp_gpio);
}
static irqreturn_t carddetect_irq(int irq, void *data)
@@ -143,15 +143,14 @@ static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
}
}
-static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
+static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
- const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
u32 ctrl;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
- if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+ if (tegra_host->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
ctrl |= SDHCI_CTRL_8BITBUS;
} else {
@@ -170,7 +169,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
.read_l = tegra_sdhci_readl,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
- .platform_8bit_width = tegra_sdhci_8bit,
+ .platform_bus_width = tegra_sdhci_buswidth,
.platform_reset_exit = tegra_sdhci_reset_exit,
};
@@ -217,31 +216,19 @@ static const struct of_device_id sdhci_tegra_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
-static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
- struct platform_device *pdev)
+static void sdhci_tegra_parse_dt(struct device *dev,
+ struct sdhci_tegra *tegra_host)
{
- struct tegra_sdhci_platform_data *plat;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
u32 bus_width;
- if (!np)
- return NULL;
-
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
- if (!plat) {
- dev_err(&pdev->dev, "Can't allocate platform data\n");
- return NULL;
- }
-
- plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
- plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
+ tegra_host->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ tegra_host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ tegra_host->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
bus_width == 8)
- plat->is_8bit = 1;
-
- return plat;
+ tegra_host->is_8bit = 1;
}
static int sdhci_tegra_probe(struct platform_device *pdev)
@@ -250,7 +237,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
const struct sdhci_tegra_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
- struct tegra_sdhci_platform_data *plat;
struct sdhci_tegra *tegra_host;
struct clk *clk;
int rc;
@@ -263,52 +249,40 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
host = sdhci_pltfm_init(pdev, soc_data->pdata);
if (IS_ERR(host))
return PTR_ERR(host);
-
pltfm_host = sdhci_priv(host);
- plat = pdev->dev.platform_data;
-
- if (plat == NULL)
- plat = sdhci_tegra_dt_parse_pdata(pdev);
-
- if (plat == NULL) {
- dev_err(mmc_dev(host->mmc), "missing platform data\n");
- rc = -ENXIO;
- goto err_no_plat;
- }
-
tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
if (!tegra_host) {
dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
rc = -ENOMEM;
- goto err_no_plat;
+ goto err_alloc_tegra_host;
}
-
- tegra_host->plat = plat;
tegra_host->soc_data = soc_data;
-
pltfm_host->priv = tegra_host;
- if (gpio_is_valid(plat->power_gpio)) {
- rc = gpio_request(plat->power_gpio, "sdhci_power");
+ sdhci_tegra_parse_dt(&pdev->dev, tegra_host);
+
+ if (gpio_is_valid(tegra_host->power_gpio)) {
+ rc = gpio_request(tegra_host->power_gpio, "sdhci_power");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate power gpio\n");
goto err_power_req;
}
- gpio_direction_output(plat->power_gpio, 1);
+ gpio_direction_output(tegra_host->power_gpio, 1);
}
- if (gpio_is_valid(plat->cd_gpio)) {
- rc = gpio_request(plat->cd_gpio, "sdhci_cd");
+ if (gpio_is_valid(tegra_host->cd_gpio)) {
+ rc = gpio_request(tegra_host->cd_gpio, "sdhci_cd");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate cd gpio\n");
goto err_cd_req;
}
- gpio_direction_input(plat->cd_gpio);
+ gpio_direction_input(tegra_host->cd_gpio);
- rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+ rc = request_irq(gpio_to_irq(tegra_host->cd_gpio),
+ carddetect_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
@@ -319,14 +293,14 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
}
- if (gpio_is_valid(plat->wp_gpio)) {
- rc = gpio_request(plat->wp_gpio, "sdhci_wp");
+ if (gpio_is_valid(tegra_host->wp_gpio)) {
+ rc = gpio_request(tegra_host->wp_gpio, "sdhci_wp");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
goto err_wp_req;
}
- gpio_direction_input(plat->wp_gpio);
+ gpio_direction_input(tegra_host->wp_gpio);
}
clk = clk_get(mmc_dev(host->mmc), NULL);
@@ -338,9 +312,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
pltfm_host->clk = clk;
- host->mmc->pm_caps = plat->pm_flags;
-
- if (plat->is_8bit)
+ if (tegra_host->is_8bit)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
rc = sdhci_add_host(host);
@@ -353,19 +325,19 @@ err_add_host:
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
err_clk_get:
- if (gpio_is_valid(plat->wp_gpio))
- gpio_free(plat->wp_gpio);
+ if (gpio_is_valid(tegra_host->wp_gpio))
+ gpio_free(tegra_host->wp_gpio);
err_wp_req:
- if (gpio_is_valid(plat->cd_gpio))
- free_irq(gpio_to_irq(plat->cd_gpio), host);
+ if (gpio_is_valid(tegra_host->cd_gpio))
+ free_irq(gpio_to_irq(tegra_host->cd_gpio), host);
err_cd_irq_req:
- if (gpio_is_valid(plat->cd_gpio))
- gpio_free(plat->cd_gpio);
+ if (gpio_is_valid(tegra_host->cd_gpio))
+ gpio_free(tegra_host->cd_gpio);
err_cd_req:
- if (gpio_is_valid(plat->power_gpio))
- gpio_free(plat->power_gpio);
+ if (gpio_is_valid(tegra_host->power_gpio))
+ gpio_free(tegra_host->power_gpio);
err_power_req:
-err_no_plat:
+err_alloc_tegra_host:
sdhci_pltfm_free(pdev);
return rc;
}
@@ -375,21 +347,20 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
- const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
- if (gpio_is_valid(plat->wp_gpio))
- gpio_free(plat->wp_gpio);
+ if (gpio_is_valid(tegra_host->wp_gpio))
+ gpio_free(tegra_host->wp_gpio);
- if (gpio_is_valid(plat->cd_gpio)) {
- free_irq(gpio_to_irq(plat->cd_gpio), host);
- gpio_free(plat->cd_gpio);
+ if (gpio_is_valid(tegra_host->cd_gpio)) {
+ free_irq(gpio_to_irq(tegra_host->cd_gpio), host);
+ gpio_free(tegra_host->cd_gpio);
}
- if (gpio_is_valid(plat->power_gpio))
- gpio_free(plat->power_gpio);
+ if (gpio_is_valid(tegra_host->power_gpio))
+ gpio_free(tegra_host->power_gpio);
clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6f0bfc0c8c9c..51bbba486f38 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -53,6 +53,7 @@ static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
+static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
#ifdef CONFIG_PM_RUNTIME
static int sdhci_runtime_pm_get(struct sdhci_host *host);
@@ -1082,6 +1083,37 @@ static void sdhci_finish_command(struct sdhci_host *host)
}
}
+static u16 sdhci_get_preset_value(struct sdhci_host *host)
+{
+ u16 ctrl, preset = 0;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ switch (ctrl & SDHCI_CTRL_UHS_MASK) {
+ case SDHCI_CTRL_UHS_SDR12:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+ case SDHCI_CTRL_UHS_SDR25:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
+ break;
+ case SDHCI_CTRL_UHS_SDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
+ break;
+ case SDHCI_CTRL_UHS_SDR104:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
+ break;
+ case SDHCI_CTRL_UHS_DDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
+ break;
+ default:
+ pr_warn("%s: Invalid UHS-I mode selected\n",
+ mmc_hostname(host->mmc));
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+ }
+ return preset;
+}
+
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div = 0; /* Initialized for compiler warning */
@@ -1106,35 +1138,43 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
goto out;
if (host->version >= SDHCI_SPEC_300) {
+ if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
+ SDHCI_CTRL_PRESET_VAL_ENABLE) {
+ u16 pre_val;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ pre_val = sdhci_get_preset_value(host);
+ div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
+ >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
+ if (host->clk_mul &&
+ (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div + 1;
+ clk_mul = host->clk_mul;
+ } else {
+ real_div = max_t(int, 1, div << 1);
+ }
+ goto clock_set;
+ }
+
/*
* Check if the Host Controller supports Programmable Clock
* Mode.
*/
if (host->clk_mul) {
- u16 ctrl;
-
+ for (div = 1; div <= 1024; div++) {
+ if ((host->max_clk * host->clk_mul / div)
+ <= clock)
+ break;
+ }
/*
- * We need to figure out whether the Host Driver needs
- * to select Programmable Clock Mode, or the value can
- * be set automatically by the Host Controller based on
- * the Preset Value registers.
+ * Set Programmable Clock Mode in the Clock
+ * Control register.
*/
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
- for (div = 1; div <= 1024; div++) {
- if (((host->max_clk * host->clk_mul) /
- div) <= clock)
- break;
- }
- /*
- * Set Programmable Clock Mode in the Clock
- * Control register.
- */
- clk = SDHCI_PROG_CLOCK_MODE;
- real_div = div;
- clk_mul = host->clk_mul;
- div--;
- }
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
+ div--;
} else {
/* Version 3.00 divisors must be a multiple of 2. */
if (host->max_clk <= clock)
@@ -1159,6 +1199,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
div >>= 1;
}
+clock_set:
if (real_div)
host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
@@ -1189,6 +1230,15 @@ out:
host->clock = clock;
}
+static inline void sdhci_update_clock(struct sdhci_host *host)
+{
+ unsigned int clock;
+
+ clock = host->clock;
+ host->clock = 0;
+ sdhci_set_clock(host, clock);
+}
+
static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr = 0;
@@ -1258,7 +1308,7 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
- bool present;
+ int present;
unsigned long flags;
u32 tuning_opcode;
@@ -1287,18 +1337,21 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
- /* If polling, assume that the card is always present. */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
- present = true;
- else
- present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT;
-
- /* If we're using a cd-gpio, testing the presence bit might fail. */
- if (!present) {
- int ret = mmc_gpio_get_cd(host->mmc);
- if (ret > 0)
- present = true;
+ /*
+ * Firstly check card presence from cd-gpio. The return could
+ * be one of the following possibilities:
+ * negative: cd-gpio is not available
+ * zero: cd-gpio is used, and card is removed
+ * one: cd-gpio is used, and card is present
+ */
+ present = mmc_gpio_get_cd(host->mmc);
+ if (present < 0) {
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = 1;
+ else
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
}
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
@@ -1364,6 +1417,10 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_reinit(host);
}
+ if (host->version >= SDHCI_SPEC_300 &&
+ (ios->power_mode == MMC_POWER_UP))
+ sdhci_enable_preset_value(host, false);
+
sdhci_set_clock(host, ios->clock);
if (ios->power_mode == MMC_POWER_OFF)
@@ -1383,11 +1440,11 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
/*
* If your platform has 8-bit width support but is not a v3 controller,
* or if it requires special setup code, you should implement that in
- * platform_8bit_width().
+ * platform_bus_width().
*/
- if (host->ops->platform_8bit_width)
- host->ops->platform_8bit_width(host, ios->bus_width);
- else {
+ if (host->ops->platform_bus_width) {
+ host->ops->platform_bus_width(host, ios->bus_width);
+ } else {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (ios->bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
@@ -1415,7 +1472,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
- unsigned int clock;
/* In case of UHS-I modes, set High Speed Enable */
if ((ios->timing == MMC_TIMING_MMC_HS200) ||
@@ -1455,9 +1511,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
+ sdhci_update_clock(host);
}
@@ -1487,10 +1541,22 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ ((ios->timing == MMC_TIMING_UHS_SDR12) ||
+ (ios->timing == MMC_TIMING_UHS_SDR25) ||
+ (ios->timing == MMC_TIMING_UHS_SDR50) ||
+ (ios->timing == MMC_TIMING_UHS_SDR104) ||
+ (ios->timing == MMC_TIMING_UHS_DDR50))) {
+ u16 preset;
+
+ sdhci_enable_preset_value(host, true);
+ preset = sdhci_get_preset_value(host);
+ ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
+ >> SDHCI_PRESET_DRV_SHIFT;
+ }
+
/* Re-enable SD Clock */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
+ sdhci_update_clock(host);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -1608,141 +1674,91 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_unlock_irqrestore(&host->lock, flags);
}
-static int sdhci_do_3_3v_signal_voltage_switch(struct sdhci_host *host,
- u16 ctrl)
+static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+ struct mmc_ios *ios)
{
+ u16 ctrl;
int ret;
- /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
- ctrl &= ~SDHCI_CTRL_VDD_180;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
- if (host->vqmmc) {
- ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
- if (ret) {
- pr_warning("%s: Switching to 3.3V signalling voltage "
- " failed\n", mmc_hostname(host->mmc));
- return -EIO;
- }
- }
- /* Wait for 5ms */
- usleep_range(5000, 5500);
+ /*
+ * Signal Voltage Switching is only applicable for Host Controllers
+ * v3.00 and above.
+ */
+ if (host->version < SDHCI_SPEC_300)
+ return 0;
- /* 3.3V regulator output should be stable within 5 ms */
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (!(ctrl & SDHCI_CTRL_VDD_180))
- return 0;
- pr_warning("%s: 3.3V regulator output did not became stable\n",
- mmc_hostname(host->mmc));
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
+ ctrl &= ~SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- return -EIO;
-}
+ if (host->vqmmc) {
+ ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
+ if (ret) {
+ pr_warning("%s: Switching to 3.3V signalling voltage "
+ " failed\n", mmc_hostname(host->mmc));
+ return -EIO;
+ }
+ }
+ /* Wait for 5ms */
+ usleep_range(5000, 5500);
-static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host,
- u16 ctrl)
-{
- u8 pwr;
- u16 clk;
- u32 present_state;
- int ret;
+ /* 3.3V regulator output should be stable within 5 ms */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_VDD_180))
+ return 0;
- /* Stop SDCLK */
- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- clk &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ pr_warning("%s: 3.3V regulator output did not became stable\n",
+ mmc_hostname(host->mmc));
+
+ return -EAGAIN;
+ case MMC_SIGNAL_VOLTAGE_180:
+ if (host->vqmmc) {
+ ret = regulator_set_voltage(host->vqmmc,
+ 1700000, 1950000);
+ if (ret) {
+ pr_warning("%s: Switching to 1.8V signalling voltage "
+ " failed\n", mmc_hostname(host->mmc));
+ return -EIO;
+ }
+ }
- /* Check whether DAT[3:0] is 0000 */
- present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- if (!((present_state & SDHCI_DATA_LVL_MASK) >>
- SDHCI_DATA_LVL_SHIFT)) {
/*
* Enable 1.8V Signal Enable in the Host Control2
* register
*/
- if (host->vqmmc)
- ret = regulator_set_voltage(host->vqmmc,
- 1700000, 1950000);
- else
- ret = 0;
+ ctrl |= SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- if (!ret) {
- ctrl |= SDHCI_CTRL_VDD_180;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ /* Wait for 5ms */
+ usleep_range(5000, 5500);
- /* Wait for 5ms */
- usleep_range(5000, 5500);
+ /* 1.8V regulator output should be stable within 5 ms */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (ctrl & SDHCI_CTRL_VDD_180)
+ return 0;
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (ctrl & SDHCI_CTRL_VDD_180) {
- /* Provide SDCLK again and wait for 1ms */
- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- clk |= SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
- usleep_range(1000, 1500);
+ pr_warning("%s: 1.8V regulator output did not became stable\n",
+ mmc_hostname(host->mmc));
- /*
- * If DAT[3:0] level is 1111b, then the card
- * was successfully switched to 1.8V signaling.
- */
- present_state = sdhci_readl(host,
- SDHCI_PRESENT_STATE);
- if ((present_state & SDHCI_DATA_LVL_MASK) ==
- SDHCI_DATA_LVL_MASK)
- return 0;
+ return -EAGAIN;
+ case MMC_SIGNAL_VOLTAGE_120:
+ if (host->vqmmc) {
+ ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000);
+ if (ret) {
+ pr_warning("%s: Switching to 1.2V signalling voltage "
+ " failed\n", mmc_hostname(host->mmc));
+ return -EIO;
}
}
- }
-
- /*
- * If we are here, that means the switch to 1.8V signaling
- * failed. We power cycle the card, and retry initialization
- * sequence by setting S18R to 0.
- */
- pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
- pwr &= ~SDHCI_POWER_ON;
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- if (host->vmmc)
- regulator_disable(host->vmmc);
-
- /* Wait for 1ms as per the spec */
- usleep_range(1000, 1500);
- pwr |= SDHCI_POWER_ON;
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- if (host->vmmc)
- regulator_enable(host->vmmc);
-
- pr_warning("%s: Switching to 1.8V signalling voltage failed, "
- "retrying with S18R set to 0\n", mmc_hostname(host->mmc));
-
- return -EAGAIN;
-}
-
-static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
- struct mmc_ios *ios)
-{
- u16 ctrl;
-
- /*
- * Signal Voltage Switching is only applicable for Host Controllers
- * v3.00 and above.
- */
- if (host->version < SDHCI_SPEC_300)
return 0;
-
- /*
- * We first check whether the request is to set signalling voltage
- * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
- */
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- return sdhci_do_3_3v_signal_voltage_switch(host, ctrl);
- else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
- (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180))
- return sdhci_do_1_8v_signal_voltage_switch(host, ctrl);
- else
+ default:
/* No signal voltage switch required */
return 0;
+ }
}
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -1759,6 +1775,19 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return err;
}
+static int sdhci_card_busy(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 present_state;
+
+ sdhci_runtime_pm_get(host);
+ /* Check whether DAT[3:0] is 0000 */
+ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ sdhci_runtime_pm_put(host);
+
+ return !(present_state & SDHCI_DATA_LVL_MASK);
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host;
@@ -1955,17 +1984,15 @@ out:
return err;
}
-static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable)
+
+static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
{
u16 ctrl;
- unsigned long flags;
/* Host Controller v3.00 defines preset value registers */
if (host->version < SDHCI_SPEC_300)
return;
- spin_lock_irqsave(&host->lock, flags);
-
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/*
@@ -1981,17 +2008,6 @@ static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable)
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
host->flags &= ~SDHCI_PV_ENABLED;
}
-
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
-{
- struct sdhci_host *host = mmc_priv(mmc);
-
- sdhci_runtime_pm_get(host);
- sdhci_do_enable_preset_value(host, enable);
- sdhci_runtime_pm_put(host);
}
static void sdhci_card_event(struct mmc_host *mmc)
@@ -2027,8 +2043,8 @@ static const struct mmc_host_ops sdhci_ops = {
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.execute_tuning = sdhci_execute_tuning,
- .enable_preset_value = sdhci_enable_preset_value,
.card_event = sdhci_card_event,
+ .card_busy = sdhci_card_busy,
};
/*****************************************************************************\
@@ -2080,14 +2096,9 @@ static void sdhci_tasklet_finish(unsigned long param)
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
/* Some controllers need this kick or reset won't work here */
- if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
- unsigned int clock;
-
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
- }
+ sdhci_update_clock(host);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
@@ -2455,6 +2466,32 @@ out:
\*****************************************************************************/
#ifdef CONFIG_PM
+void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val |= mask ;
+ /* Avoid fake wake up */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
+
+void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
@@ -2484,8 +2521,13 @@ int sdhci_suspend_host(struct sdhci_host *host)
return ret;
}
- free_irq(host->irq, host);
-
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ free_irq(host->irq, host);
+ } else {
+ sdhci_enable_irq_wakeups(host);
+ enable_irq_wake(host->irq);
+ }
return ret;
}
@@ -2500,10 +2542,15 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->enable_dma(host);
}
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
- mmc_hostname(host->mmc), host);
- if (ret)
- return ret;
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+ mmc_hostname(host->mmc), host);
+ if (ret)
+ return ret;
+ } else {
+ sdhci_disable_irq_wakeups(host);
+ disable_irq_wake(host->irq);
+ }
if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
(host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
@@ -2531,17 +2578,6 @@ int sdhci_resume_host(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_resume_host);
-
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
-{
- u8 val;
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= SDHCI_WAKE_ON_INT;
- sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
-}
-
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
-
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
@@ -2600,8 +2636,12 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_do_set_ios(host, &host->mmc->ios);
sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
- if (host_flags & SDHCI_PV_ENABLED)
- sdhci_do_enable_preset_value(host, true);
+ if ((host_flags & SDHCI_PV_ENABLED) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_preset_value(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
/* Set the re-tuning expiration flag */
if (host->flags & SDHCI_USING_RETUNING_TIMER)
@@ -2936,7 +2976,11 @@ int sdhci_add_host(struct sdhci_host *host)
}
#ifdef CONFIG_REGULATOR
- if (host->vmmc) {
+ /*
+ * Voltage range check makes sense only if regulator reports
+ * any voltage value.
+ */
+ if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) {
ret = regulator_is_supported_voltage(host->vmmc, 2700000,
3600000);
if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
@@ -3139,6 +3183,7 @@ int sdhci_add_host(struct sdhci_host *host)
#ifdef SDHCI_USE_LEDS_CLASS
reset:
sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
#endif
untasklet:
@@ -3181,6 +3226,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
if (!dead)
sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
del_timer_sync(&host->timer);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a6d69b7bdea2..379e09d9f3c1 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -229,6 +229,18 @@
/* 60-FB reserved */
+#define SDHCI_PRESET_FOR_SDR12 0x66
+#define SDHCI_PRESET_FOR_SDR25 0x68
+#define SDHCI_PRESET_FOR_SDR50 0x6A
+#define SDHCI_PRESET_FOR_SDR104 0x6C
+#define SDHCI_PRESET_FOR_DDR50 0x6E
+#define SDHCI_PRESET_DRV_MASK 0xC000
+#define SDHCI_PRESET_DRV_SHIFT 14
+#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
+#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10
+#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF
+#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0
+
#define SDHCI_SLOT_INT_STATUS 0xFC
#define SDHCI_HOST_VERSION 0xFE
@@ -269,7 +281,7 @@ struct sdhci_ops {
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
- int (*platform_8bit_width)(struct sdhci_host *host,
+ int (*platform_bus_width)(struct sdhci_host *host,
int width);
void (*platform_send_init_74_clocks)(struct sdhci_host *host,
u8 power_mode);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 9a4c151067dd..ba76a532ae30 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -56,6 +56,7 @@
#include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@@ -88,6 +89,7 @@
#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
#define CMD_SET_CCSH (1 << 5)
+#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
@@ -127,6 +129,10 @@
INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
INT_RDATTO | INT_RBSYTO | INT_RSPTO)
+#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
+ INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
+ INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
+
/* CE_INT_MASK */
#define MASK_ALL 0x00000000
#define MASK_MCCSDE (1 << 29)
@@ -158,6 +164,11 @@
MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
+ MASK_MBUFREN | MASK_MBUFWEN | \
+ MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
+ MASK_MCMD12RBE | MASK_MCMD12CRE)
+
/* CE_HOST_STS1 */
#define STS1_CMDSEQ (1 << 31)
@@ -195,6 +206,7 @@ enum mmcif_state {
STATE_IDLE,
STATE_REQUEST,
STATE_IOS,
+ STATE_TIMEOUT,
};
enum mmcif_wait_for {
@@ -216,6 +228,7 @@ struct sh_mmcif_host {
struct clk *hclk;
unsigned int clk;
int bus_width;
+ unsigned char timing;
bool sd_error;
bool dying;
long timeout;
@@ -230,6 +243,7 @@ struct sh_mmcif_host {
int sg_blkidx;
bool power;
bool card_present;
+ struct mutex thread_lock;
/* DMA support */
struct dma_chan *chan_rx;
@@ -253,23 +267,14 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
static void mmcif_dma_complete(void *arg)
{
struct sh_mmcif_host *host = arg;
- struct mmc_data *data = host->mrq->data;
+ struct mmc_request *mrq = host->mrq;
dev_dbg(&host->pd->dev, "Command completed\n");
- if (WARN(!data, "%s: NULL data in DMA completion!\n",
+ if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
dev_name(&host->pd->dev)))
return;
- if (data->flags & MMC_DATA_READ)
- dma_unmap_sg(host->chan_rx->device->dev,
- data->sg, data->sg_len,
- DMA_FROM_DEVICE);
- else
- dma_unmap_sg(host->chan_tx->device->dev,
- data->sg, data->sg_len,
- DMA_TO_DEVICE);
-
complete(&host->dma_complete);
}
@@ -423,8 +428,6 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
if (ret < 0)
goto ecfgrx;
- init_completion(&host->dma_complete);
-
return;
ecfgrx:
@@ -520,13 +523,16 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
}
if (state2 & STS2_CRC_ERR) {
- dev_dbg(&host->pd->dev, ": CRC error\n");
+ dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
+ host->state, host->wait_for);
ret = -EIO;
} else if (state2 & STS2_TIMEOUT_ERR) {
- dev_dbg(&host->pd->dev, ": Timeout\n");
+ dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
+ host->state, host->wait_for);
ret = -ETIMEDOUT;
} else {
- dev_dbg(&host->pd->dev, ": End/Index error\n");
+ dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
+ host->state, host->wait_for);
ret = -EIO;
}
return ret;
@@ -549,10 +555,7 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
host->pio_ptr = p;
}
- if (host->sg_idx == data->sg_len)
- return false;
-
- return true;
+ return host->sg_idx != data->sg_len;
}
static void sh_mmcif_single_read(struct sh_mmcif_host *host,
@@ -562,7 +565,6 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK) + 3;
host->wait_for = MMCIF_WAIT_FOR_READ;
- schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf read enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
@@ -576,6 +578,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -604,7 +607,7 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
- schedule_delayed_work(&host->timeout_work, host->timeout);
+
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}
@@ -616,6 +619,7 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -627,7 +631,6 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
if (!sh_mmcif_next_block(host, p))
return false;
- schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
return true;
@@ -640,7 +643,6 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK) + 3;
host->wait_for = MMCIF_WAIT_FOR_WRITE;
- schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf write enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
@@ -654,6 +656,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -682,7 +685,7 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
- schedule_delayed_work(&host->timeout_work, host->timeout);
+
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
@@ -694,6 +697,7 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -705,7 +709,6 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
if (!sh_mmcif_next_block(host, p))
return false;
- schedule_delayed_work(&host->timeout_work, host->timeout);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
return true;
@@ -756,6 +759,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
}
switch (opc) {
/* RBSY */
+ case MMC_SLEEP_AWAKE:
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
@@ -781,6 +785,17 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
dev_err(&host->pd->dev, "Unsupported bus width.\n");
break;
}
+ switch (host->timing) {
+ case MMC_TIMING_UHS_DDR50:
+ /*
+ * MMC core will only set this timing, if the host
+ * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF
+ * implementations with this capability, e.g. sh73a0,
+ * will have to set it in their platform data.
+ */
+ tmp |= CMD_SET_DARS;
+ break;
+ }
}
/* DWEN */
if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
@@ -824,7 +839,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
sh_mmcif_single_read(host, mrq);
return 0;
default:
- dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
+ dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
return -EINVAL;
}
}
@@ -838,6 +853,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
switch (opc) {
/* response busy check */
+ case MMC_SLEEP_AWAKE:
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
@@ -885,7 +901,6 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
}
host->wait_for = MMCIF_WAIT_FOR_STOP;
- schedule_delayed_work(&host->timeout_work, host->timeout);
}
static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -895,6 +910,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
+ dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
mrq->cmd->error = -EAGAIN;
mmc_request_done(mmc, mrq);
@@ -911,6 +927,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
break;
case MMC_APP_CMD:
+ case SD_IO_RW_DIRECT:
host->state = STATE_IDLE;
mrq->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, mrq);
@@ -957,6 +974,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
+ dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
return;
}
@@ -981,7 +999,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
if (host->power) {
- pm_runtime_put(&host->pd->dev);
+ pm_runtime_put_sync(&host->pd->dev);
clk_disable(host->hclk);
host->power = false;
if (ios->power_mode == MMC_POWER_OFF)
@@ -1001,6 +1019,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sh_mmcif_clock_control(host, ios->clock);
}
+ host->timing = ios->timing;
host->bus_width = ios->bus_width;
host->state = STATE_IDLE;
}
@@ -1038,14 +1057,14 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
case MMC_SELECT_CARD:
case MMC_APP_CMD:
cmd->error = -ETIMEDOUT;
- host->sd_error = false;
break;
default:
cmd->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
- cmd->opcode, cmd->error);
break;
}
+ dev_dbg(&host->pd->dev, "CMD%d error %d\n",
+ cmd->opcode, cmd->error);
+ host->sd_error = false;
return false;
}
if (!(cmd->flags & MMC_RSP_PRESENT)) {
@@ -1058,6 +1077,12 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
if (!data)
return false;
+ /*
+ * Completion can be signalled from DMA callback and error, so, have to
+ * reset here, before setting .dma_active
+ */
+ init_completion(&host->dma_complete);
+
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
sh_mmcif_start_dma_rx(host);
@@ -1068,34 +1093,47 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
if (!host->dma_active) {
data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
- if (!data->error)
- return true;
- return false;
+ return !data->error;
}
/* Running in the IRQ thread, can sleep */
time = wait_for_completion_interruptible_timeout(&host->dma_complete,
host->timeout);
+
+ if (data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev,
+ data->sg, data->sg_len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev,
+ data->sg, data->sg_len,
+ DMA_TO_DEVICE);
+
if (host->sd_error) {
dev_err(host->mmc->parent,
"Error IRQ while waiting for DMA completion!\n");
/* Woken up by an error IRQ: abort DMA */
- if (data->flags & MMC_DATA_READ)
- dmaengine_terminate_all(host->chan_rx);
- else
- dmaengine_terminate_all(host->chan_tx);
data->error = sh_mmcif_error_manage(host);
} else if (!time) {
+ dev_err(host->mmc->parent, "DMA timeout!\n");
data->error = -ETIMEDOUT;
} else if (time < 0) {
+ dev_err(host->mmc->parent,
+ "wait_for_completion_...() error %ld!\n", time);
data->error = time;
}
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
host->dma_active = false;
- if (data->error)
+ if (data->error) {
data->bytes_xfered = 0;
+ /* Abort DMA */
+ if (data->flags & MMC_DATA_READ)
+ dmaengine_terminate_all(host->chan_rx);
+ else
+ dmaengine_terminate_all(host->chan_tx);
+ }
return false;
}
@@ -1103,10 +1141,21 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
- struct mmc_request *mrq = host->mrq;
+ struct mmc_request *mrq;
+ bool wait = false;
cancel_delayed_work_sync(&host->timeout_work);
+ mutex_lock(&host->thread_lock);
+
+ mrq = host->mrq;
+ if (!mrq) {
+ dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
+ host->state, host->wait_for);
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ }
+
/*
* All handlers return true, if processing continues, and false, if the
* request has to be completed - successfully or not
@@ -1114,35 +1163,32 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
switch (host->wait_for) {
case MMCIF_WAIT_FOR_REQUEST:
/* We're too late, the timeout has already kicked in */
+ mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
case MMCIF_WAIT_FOR_CMD:
- if (sh_mmcif_end_cmd(host))
- /* Wait for data */
- return IRQ_HANDLED;
+ /* Wait for data? */
+ wait = sh_mmcif_end_cmd(host);
break;
case MMCIF_WAIT_FOR_MREAD:
- if (sh_mmcif_mread_block(host))
- /* Wait for more data */
- return IRQ_HANDLED;
+ /* Wait for more data? */
+ wait = sh_mmcif_mread_block(host);
break;
case MMCIF_WAIT_FOR_READ:
- if (sh_mmcif_read_block(host))
- /* Wait for data end */
- return IRQ_HANDLED;
+ /* Wait for data end? */
+ wait = sh_mmcif_read_block(host);
break;
case MMCIF_WAIT_FOR_MWRITE:
- if (sh_mmcif_mwrite_block(host))
- /* Wait data to write */
- return IRQ_HANDLED;
+ /* Wait data to write? */
+ wait = sh_mmcif_mwrite_block(host);
break;
case MMCIF_WAIT_FOR_WRITE:
- if (sh_mmcif_write_block(host))
- /* Wait for data end */
- return IRQ_HANDLED;
+ /* Wait for data end? */
+ wait = sh_mmcif_write_block(host);
break;
case MMCIF_WAIT_FOR_STOP:
if (host->sd_error) {
mrq->stop->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
break;
}
sh_mmcif_get_cmd12response(host, mrq->stop);
@@ -1150,13 +1196,22 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
break;
case MMCIF_WAIT_FOR_READ_END:
case MMCIF_WAIT_FOR_WRITE_END:
- if (host->sd_error)
+ if (host->sd_error) {
mrq->data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
+ }
break;
default:
BUG();
}
+ if (wait) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ /* Wait for more data */
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ }
+
if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
struct mmc_data *data = mrq->data;
if (!mrq->cmd->error && data && !data->error)
@@ -1165,8 +1220,11 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
sh_mmcif_stop_cmd(host, mrq);
- if (!mrq->stop->error)
+ if (!mrq->stop->error) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
+ }
}
}
@@ -1175,6 +1233,8 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
+ mutex_unlock(&host->thread_lock);
+
return IRQ_HANDLED;
}
@@ -1182,56 +1242,22 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
u32 state;
- int err = 0;
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
- if (state & INT_ERR_STS) {
- /* error interrupts - process first */
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
- err = 1;
- } else if (state & INT_RBSYE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_RBSYE | INT_CRSPE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
- } else if (state & INT_CRSPE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
- } else if (state & INT_BUFREN) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
- } else if (state & INT_BUFWEN) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
- } else if (state & INT_CMD12DRE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_CMD12DRE | INT_CMD12RBE |
- INT_CMD12CRE | INT_BUFRE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
- } else if (state & INT_BUFRE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
- } else if (state & INT_DTRANE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_CMD12DRE | INT_CMD12RBE |
- INT_CMD12CRE | INT_DTRANE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
- } else if (state & INT_CMD12RBE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(INT_CMD12RBE | INT_CMD12CRE));
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
- } else {
- dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
- err = 1;
- }
- if (err) {
+ if (state & ~MASK_CLEAN)
+ dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
+ state);
+
+ if (state & INT_ERR_STS || state & ~INT_ALL) {
host->sd_error = true;
- dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
+ dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
}
if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+ if (!host->mrq)
+ dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
if (!host->dma_active)
return IRQ_WAKE_THREAD;
else if (host->sd_error)
@@ -1248,11 +1274,24 @@ static void mmcif_timeout_work(struct work_struct *work)
struct delayed_work *d = container_of(work, struct delayed_work, work);
struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
struct mmc_request *mrq = host->mrq;
+ unsigned long flags;
if (host->dying)
/* Don't run after mmc_remove_host() */
return;
+ dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
+ host->wait_for, mrq->cmd->opcode);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->state == STATE_IDLE) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ host->state = STATE_TIMEOUT;
+ spin_unlock_irqrestore(&host->lock, flags);
+
/*
* Handle races with cancel_delayed_work(), unless
* cancel_delayed_work_sync() is used
@@ -1306,10 +1345,11 @@ static int sh_mmcif_probe(struct platform_device *pdev)
struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
struct resource *res;
void __iomem *reg;
+ const char *name;
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq(pdev, 1);
- if (irq[0] < 0 || irq[1] < 0) {
+ if (irq[0] < 0) {
dev_err(&pdev->dev, "Get irq error\n");
return -ENXIO;
}
@@ -1329,10 +1369,11 @@ static int sh_mmcif_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto ealloch;
}
+ mmc_of_parse(mmc);
host = mmc_priv(mmc);
host->mmc = mmc;
host->addr = reg;
- host->timeout = 1000;
+ host->timeout = msecs_to_jiffies(1000);
host->pd = pdev;
@@ -1341,7 +1382,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mmc->ops = &sh_mmcif_ops;
sh_mmcif_init_ocr(host);
- mmc->caps = MMC_CAP_MMC_HIGHSPEED;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
if (pd && pd->caps)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
@@ -1374,15 +1415,19 @@ static int sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_sync_reset(host);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
+ name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
+ ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host);
if (ret) {
- dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
+ dev_err(&pdev->dev, "request_irq error (%s)\n", name);
goto ereqirq0;
}
- ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
- if (ret) {
- dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
- goto ereqirq1;
+ if (irq[1] >= 0) {
+ ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt,
+ 0, "sh_mmc:int", host);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
+ goto ereqirq1;
+ }
}
if (pd && pd->use_cd_gpio) {
@@ -1391,6 +1436,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
goto erqcd;
}
+ mutex_init(&host->thread_lock);
+
clk_disable(host->hclk);
ret = mmc_add_host(mmc);
if (ret < 0)
@@ -1404,10 +1451,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
return ret;
emmcaddh:
- if (pd && pd->use_cd_gpio)
- mmc_gpio_free_cd(mmc);
erqcd:
- free_irq(irq[1], host);
+ if (irq[1] >= 0)
+ free_irq(irq[1], host);
ereqirq1:
free_irq(irq[0], host);
ereqirq0:
@@ -1427,7 +1473,6 @@ ealloch:
static int sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
- struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
int irq[2];
host->dying = true;
@@ -1436,9 +1481,6 @@ static int sh_mmcif_remove(struct platform_device *pdev)
dev_pm_qos_hide_latency_limit(&pdev->dev);
- if (pd && pd->use_cd_gpio)
- mmc_gpio_free_cd(host->mmc);
-
mmc_remove_host(host->mmc);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
@@ -1456,7 +1498,8 @@ static int sh_mmcif_remove(struct platform_device *pdev)
irq[1] = platform_get_irq(pdev, 1);
free_irq(irq[0], host);
- free_irq(irq[1], host);
+ if (irq[1] >= 0)
+ free_irq(irq[1], host);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 524a7f773820..fe90853900b4 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
@@ -32,6 +33,16 @@
#include "tmio_mmc.h"
+struct sh_mobile_sdhi_of_data {
+ unsigned long tmio_flags;
+};
+
+static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
+ {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
+ },
+};
+
struct sh_mobile_sdhi {
struct clk *clk;
struct tmio_mmc_data mmc_data;
@@ -117,8 +128,18 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
.cd_wakeup = sh_mobile_sdhi_cd_wakeup,
};
+static const struct of_device_id sh_mobile_sdhi_of_match[] = {
+ { .compatible = "renesas,shmobile-sdhi" },
+ { .compatible = "renesas,sh7372-sdhi" },
+ { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
+
static int sh_mobile_sdhi_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(sh_mobile_sdhi_of_match, &pdev->dev);
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
@@ -126,7 +147,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
int irq, ret, i = 0;
bool multiplexed_isr = true;
- priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
if (priv == NULL) {
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
@@ -135,15 +156,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data = &priv->mmc_data;
if (p) {
- p->pdata = mmc_data;
if (p->init) {
ret = p->init(pdev, &sdhi_ops);
if (ret)
- goto einit;
+ return ret;
}
}
- priv->clk = clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
@@ -153,10 +173,9 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->clk_enable = sh_mobile_sdhi_clk_enable;
mmc_data->clk_disable = sh_mobile_sdhi_clk_disable;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
+ mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
if (p) {
mmc_data->flags = p->tmio_flags;
- if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
- mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
mmc_data->capabilities2 |= p->tmio_caps2;
@@ -187,6 +206,11 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+ if (of_id && of_id->data) {
+ const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
+ mmc_data->flags |= of_data->tmio_flags;
+ }
+
ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
if (ret < 0)
goto eprobe;
@@ -199,33 +223,33 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
if (irq >= 0) {
multiplexed_isr = false;
- ret = request_irq(irq, tmio_mmc_card_detect_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_card_detect;
+ goto eirq;
}
irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
if (irq >= 0) {
multiplexed_isr = false;
- ret = request_irq(irq, tmio_mmc_sdio_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_sdio;
+ goto eirq;
}
irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD);
if (irq >= 0) {
multiplexed_isr = false;
- ret = request_irq(irq, tmio_mmc_sdcard_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_sdcard;
+ goto eirq;
} else if (!multiplexed_isr) {
dev_err(&pdev->dev,
"Principal SD-card IRQ is missing among named interrupts\n");
ret = irq;
- goto eirq_sdcard;
+ goto eirq;
}
if (multiplexed_isr) {
@@ -234,15 +258,15 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
if (irq < 0)
break;
i++;
- ret = request_irq(irq, tmio_mmc_irq, 0,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq_multiplexed;
+ goto eirq;
}
/* There must be at least one IRQ source */
if (!i)
- goto eirq_multiplexed;
+ goto eirq;
}
dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
@@ -252,28 +276,12 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
return ret;
-eirq_multiplexed:
- while (i--) {
- irq = platform_get_irq(pdev, i);
- free_irq(irq, host);
- }
-eirq_sdcard:
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
- if (irq >= 0)
- free_irq(irq, host);
-eirq_sdio:
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
- if (irq >= 0)
- free_irq(irq, host);
-eirq_card_detect:
+eirq:
tmio_mmc_host_remove(host);
eprobe:
- clk_put(priv->clk);
eclkget:
if (p && p->cleanup)
p->cleanup(pdev);
-einit:
- kfree(priv);
return ret;
}
@@ -281,29 +289,13 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- int i = 0, irq;
-
- if (p)
- p->pdata = NULL;
tmio_mmc_host_remove(host);
- while (1) {
- irq = platform_get_irq(pdev, i++);
- if (irq < 0)
- break;
- free_irq(irq, host);
- }
-
- clk_put(priv->clk);
-
if (p && p->cleanup)
p->cleanup(pdev);
- kfree(priv);
-
return 0;
}
@@ -314,12 +306,6 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
.runtime_resume = tmio_mmc_host_runtime_resume,
};
-static const struct of_device_id sh_mobile_sdhi_of_match[] = {
- { .compatible = "renesas,shmobile-sdhi" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
-
static struct platform_driver sh_mobile_sdhi_driver = {
.driver = {
.name = "sh_mobile_sdhi",
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 50bf495a988b..f508ecb5b8a7 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -43,6 +43,7 @@
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -155,6 +156,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
host->set_clk_div(host->pdev, (clk>>22) & 1);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
+ msleep(10);
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
@@ -768,16 +770,48 @@ static int tmio_mmc_clk_update(struct mmc_host *mmc)
return ret;
}
-static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios)
+static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
+ int ret = 0;
+
+ /* .set_ios() is returning void, so, no chance to report an error */
if (host->set_pwr)
- host->set_pwr(host->pdev, ios->power_mode != MMC_POWER_OFF);
+ host->set_pwr(host->pdev, 1);
+
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ /*
+ * Attention: empiric value. With a b43 WiFi SDIO card this
+ * delay proved necessary for reliable card-insertion probing.
+ * 100us were not enough. Is this the same 140us delay, as in
+ * tmio_mmc_set_ios()?
+ */
+ udelay(200);
+ }
+ /*
+ * It seems, VccQ should be switched on after Vcc, this is also what the
+ * omap_hsmmc.c driver does.
+ */
+ if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
+ regulator_enable(mmc->supply.vqmmc);
+ udelay(200);
+ }
+}
+
+static void tmio_mmc_power_off(struct tmio_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+
if (!IS_ERR(mmc->supply.vmmc))
- /* Errors ignored... */
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
- ios->power_mode ? ios->vdd : 0);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 0);
}
/* Set MMC clock / power.
@@ -828,18 +862,20 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (!host->power) {
tmio_mmc_clk_update(mmc);
pm_runtime_get_sync(dev);
- host->power = true;
}
tmio_mmc_set_clock(host, ios->clock);
- /* power up SD bus */
- tmio_mmc_set_power(host, ios);
+ if (!host->power) {
+ /* power up SD card and the bus */
+ tmio_mmc_power_on(host, ios->vdd);
+ host->power = true;
+ }
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (ios->power_mode == MMC_POWER_OFF)
- tmio_mmc_set_power(host, ios);
if (host->power) {
struct tmio_mmc_data *pdata = host->pdata;
+ if (ios->power_mode == MMC_POWER_OFF)
+ tmio_mmc_power_off(host);
tmio_mmc_clk_stop(host);
host->power = false;
pm_runtime_put(dev);
@@ -918,6 +954,17 @@ static void tmio_mmc_init_ocr(struct tmio_mmc_host *host)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
}
+static void tmio_mmc_of_parse(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
+{
+ const struct device_node *np = pdev->dev.of_node;
+ if (!np)
+ return;
+
+ if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
+ pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
+}
+
int tmio_mmc_host_probe(struct tmio_mmc_host **host,
struct platform_device *pdev,
struct tmio_mmc_data *pdata)
@@ -928,6 +975,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
int ret;
u32 irq_mask = TMIO_MASK_CMD;
+ tmio_mmc_of_parse(pdev, pdata);
+
+ if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
+ pdata->write16_hook = NULL;
+
res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_ctl)
return -EINVAL;
@@ -936,6 +988,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
if (!mmc)
return -ENOMEM;
+ mmc_of_parse(mmc);
+
pdata->dev = &pdev->dev;
_host = mmc_priv(mmc);
_host->pdata = pdata;
@@ -956,7 +1010,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
}
mmc->ops = &tmio_mmc_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 = pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
@@ -968,7 +1022,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
- mmc->caps & MMC_CAP_NONREMOVABLE);
+ mmc->caps & MMC_CAP_NONREMOVABLE ||
+ mmc->slot.cd_irq >= 0);
_host->power = false;
pm_runtime_enable(&pdev->dev);
@@ -1060,16 +1115,8 @@ EXPORT_SYMBOL(tmio_mmc_host_probe);
void tmio_mmc_host_remove(struct tmio_mmc_host *host)
{
struct platform_device *pdev = host->pdev;
- struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
- if (pdata->flags & TMIO_MMC_USE_GPIO_CD)
- /*
- * This means we can miss a card-eject, but this is anyway
- * possible, because of delayed processing of hotplug events.
- */
- mmc_gpio_free_cd(mmc);
-
if (!host->native_hotplug)
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 154f0e8e931c..c6d001509e5a 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -1012,7 +1012,7 @@ static const struct dev_pm_ops wmt_mci_pm = {
static struct platform_driver wmt_mci_driver = {
.probe = wmt_mci_probe,
- .remove = __exit_p(wmt_mci_remove),
+ .remove = wmt_mci_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 03f2eb5627ec..557bec599f4f 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -74,8 +74,8 @@ config MTD_REDBOOT_PARTS_READONLY
endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
- bool "Command line partition table parsing"
- depends on MTD = "y"
+ tristate "Command line partition table parsing"
+ depends on MTD
---help---
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 7c057a05adb6..ddc0a4287a4b 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -142,7 +142,13 @@ static int __init ar7_parser_init(void)
return register_mtd_parser(&ar7_parser);
}
+static void __exit ar7_parser_exit(void)
+{
+ deregister_mtd_parser(&ar7_parser);
+}
+
module_init(ar7_parser_init);
+module_exit(ar7_parser_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index e06d782489a6..63feb75cc8e0 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -14,17 +14,11 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
/* 10 parts were found on sflash on Netgear WNDR4500 */
#define BCM47XXPART_MAX_PARTS 12
-/*
- * Amount of bytes we read when analyzing each block of flash memory.
- * Set it big enough to allow detecting partition and reading important data.
- */
-#define BCM47XXPART_BYTES_TO_READ 0x404
-
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
#define POT_MAGIC1 0x54544f50 /* POTT */
@@ -59,13 +53,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
uint32_t *buf;
size_t bytes_read;
uint32_t offset;
- uint32_t blocksize = 0x10000;
+ uint32_t blocksize = master->erasesize;
struct trx_header *trx;
+ int trx_part = -1;
+ int last_trx_part = -1;
+ int max_bytes_to_read = 0x8004;
+
+ if (blocksize <= 0x10000)
+ blocksize = 0x10000;
+ if (blocksize == 0x20000)
+ max_bytes_to_read = 0x18004;
/* Alloc */
parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
GFP_KERNEL);
- buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ buf = kzalloc(max_bytes_to_read, GFP_KERNEL);
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
@@ -80,7 +82,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ if (mtd_read(master, offset, max_bytes_to_read,
&bytes_read, (uint8_t *)buf) < 0) {
pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
offset);
@@ -95,9 +97,16 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Standard NVRAM */
- if (buf[0x000 / 4] == NVRAM_HEADER) {
+ if (buf[0x000 / 4] == NVRAM_HEADER ||
+ buf[0x1000 / 4] == NVRAM_HEADER ||
+ buf[0x8000 / 4] == NVRAM_HEADER ||
+ (blocksize == 0x20000 && (
+ buf[0x10000 / 4] == NVRAM_HEADER ||
+ buf[0x11000 / 4] == NVRAM_HEADER ||
+ buf[0x18000 / 4] == NVRAM_HEADER))) {
bcm47xxpart_add_part(&parts[curr_part++], "nvram",
offset, 0);
+ offset = rounddown(offset, blocksize);
continue;
}
@@ -131,6 +140,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
if (buf[0x000 / 4] == TRX_MAGIC) {
trx = (struct trx_header *)buf;
+ trx_part = curr_part;
+ bcm47xxpart_add_part(&parts[curr_part++], "firmware",
+ offset, 0);
+
i = 0;
/* We have LZMA loader if offset[2] points to sth */
if (trx->offset[2]) {
@@ -154,6 +167,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
offset + trx->offset[i], 0);
i++;
+ last_trx_part = curr_part - 1;
+
/*
* We have whole TRX scanned, skip to the next part. Use
* roundown (not roundup), as the loop will increase
@@ -169,11 +184,15 @@ static int bcm47xxpart_parse(struct mtd_info *master,
* Assume that partitions end at the beginning of the one they are
* followed by.
*/
- for (i = 0; i < curr_part - 1; i++)
- parts[i].size = parts[i + 1].offset - parts[i].offset;
- if (curr_part > 0)
- parts[curr_part - 1].size =
- master->size - parts[curr_part - 1].offset;
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : master->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ if (i == last_trx_part && trx_part >= 0)
+ parts[trx_part].size = next_part_offset -
+ parts[trx_part].offset;
+ }
*pparts = parts;
return curr_part;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index b86197286f24..fff665d59a0d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -33,6 +33,8 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -74,6 +76,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
@@ -496,6 +502,7 @@ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
+ struct device_node __maybe_unused *np = map->device_node;
struct mtd_info *mtd;
int i;
@@ -570,6 +577,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi_tell_features(extp);
#endif
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(
+ np, "use-advanced-sector-protection")
+ && extp->BlkProtUnprot == 8) {
+ printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
+ mtd->_lock = cfi_ppb_lock;
+ mtd->_unlock = cfi_ppb_unlock;
+ mtd->_is_locked = cfi_ppb_is_locked;
+ }
+#endif
+
bootloc = extp->TopBottom;
if ((bootloc < 2) || (bootloc > 5)) {
printk(KERN_WARNING "%s: CFI contains unrecognised boot "
@@ -2172,6 +2190,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
+/*
+ * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
+ */
+
+struct ppb_lock {
+ struct flchip *chip;
+ loff_t offset;
+ int locked;
+};
+
+#define MAX_SECTORS 512
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
+#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
+
+static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* PPB entry command */
+ cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ chip->state = FL_LOCKING;
+ map_write(map, CMD(0xA0), chip->start + adr);
+ map_write(map, CMD(0x00), chip->start + adr);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ /*
+ * Unlocking of one specific sector is not supported, so we
+ * have to unlock all sectors of this device instead
+ */
+ chip->state = FL_UNLOCKING;
+ map_write(map, CMD(0x80), chip->start);
+ map_write(map, CMD(0x30), chip->start);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
+ chip->state = FL_JEDEC_QUERY;
+ /* Return locked status: 0->locked, 1->unlocked */
+ ret = !cfi_read_query(map, adr);
+ } else
+ BUG();
+
+ /*
+ * Wait for some time as unlocking of all sectors takes quite long
+ */
+ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+
+ UDELAY(map, chip, adr, 1);
+ }
+
+ /* Exit BC commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+}
+
+static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct ppb_lock *sect;
+ unsigned long adr;
+ loff_t offset;
+ uint64_t length;
+ int chipnum;
+ int i;
+ int sectors;
+ int ret;
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors. So lets
+ * first check the locking status of all sectors and save
+ * it for future use.
+ */
+ sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
+ if (!sect)
+ return -ENOMEM;
+
+ /*
+ * This code to walk all sectors is a slightly modified version
+ * of the cfi_varsize_frob() code.
+ */
+ i = 0;
+ chipnum = 0;
+ adr = 0;
+ sectors = 0;
+ offset = 0;
+ length = mtd->size;
+
+ while (length) {
+ int size = regions[i].erasesize;
+
+ /*
+ * Only test sectors that shall not be unlocked. The other
+ * sectors shall be unlocked, so lets keep their locking
+ * status at "unlocked" (locked=0) for the final re-locking.
+ */
+ if ((adr < ofs) || (adr >= (ofs + len))) {
+ sect[sectors].chip = &cfi->chips[chipnum];
+ sect[sectors].offset = offset;
+ sect[sectors].locked = do_ppb_xxlock(
+ map, &cfi->chips[chipnum], adr, 0,
+ DO_XXLOCK_ONEBLOCK_GETLOCK);
+ }
+
+ adr += size;
+ offset += size;
+ length -= size;
+
+ if (offset == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+
+ sectors++;
+ if (sectors >= MAX_SECTORS) {
+ printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
+ MAX_SECTORS);
+ kfree(sect);
+ return -EINVAL;
+ }
+ }
+
+ /* Now unlock the whole chip */
+ ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_UNLOCK);
+ if (ret) {
+ kfree(sect);
+ return ret;
+ }
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors.
+ */
+ for (i = 0; i < sectors; i++) {
+ if (sect[i].locked)
+ do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+ }
+
+ kfree(sect);
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
+}
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index c533f27d863f..721caebbc5cc 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -22,11 +22,22 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
- * where <mtd-id> is the name from the "cat /proc/mtd" command
- * <partdef> := <size>[@offset][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
+ * size is automatically truncated at end of device
+ * if specified or trucated size is 0 the part is skipped
+ * <offset> := standard linux memsize
+ * if omitted the part will immediately follow the previous part
+ * or 0 if the first part
* <name> := '(' NAME ')'
+ * NAME will appear in /proc/mtd
+ *
+ * <size> and <offset> can be specified such that the parts are out of order
+ * in physical memory and may even overlap.
+ *
+ * The parts are assigned MTD numbers in the order they are specified in the
+ * command line regardless of their order in physical memory.
*
* Examples:
*
@@ -70,6 +81,7 @@ struct cmdline_mtd_partition {
static struct cmdline_mtd_partition *partitions;
/* the command line passed to mtdpart_setup() */
+static char *mtdparts;
static char *cmdline;
static int cmdline_parsed;
@@ -330,6 +342,14 @@ static int parse_cmdline_partitions(struct mtd_info *master,
if (part->parts[i].size == SIZE_REMAINING)
part->parts[i].size = master->size - offset;
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
+
if (part->parts[i].size == 0) {
printk(KERN_WARNING ERRP
"%s: skipping zero sized partition\n",
@@ -337,16 +357,8 @@ static int parse_cmdline_partitions(struct mtd_info *master,
part->num_parts--;
memmove(&part->parts[i], &part->parts[i + 1],
sizeof(*part->parts) * (part->num_parts - i));
- continue;
- }
-
- if (offset + part->parts[i].size > master->size) {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
- part->parts[i].size = master->size - offset;
+ i--;
}
- offset += part->parts[i].size;
}
*pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
@@ -365,7 +377,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
*
* This function needs to be visible for bootloaders.
*/
-static int mtdpart_setup(char *s)
+static int __init mtdpart_setup(char *s)
{
cmdline = s;
return 1;
@@ -381,10 +393,21 @@ static struct mtd_part_parser cmdline_parser = {
static int __init cmdline_parser_init(void)
{
+ if (mtdparts)
+ mtdpart_setup(mtdparts);
return register_mtd_parser(&cmdline_parser);
}
+static void __exit cmdline_parser_exit(void)
+{
+ deregister_mtd_parser(&cmdline_parser);
+}
+
module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
+MODULE_PARM_DESC(mtdparts, "Partitioning specification");
+module_param(mtdparts, charp, 0);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 395733a30ef4..369a1943ca25 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,8 +17,10 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH) += elm.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
-CFLAGS_docg3.o += -I$(src) \ No newline at end of file
+
+CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 4714584aa993..95266285acb1 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -5,6 +5,8 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
+#include "bcm47xxsflash.h"
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
@@ -13,26 +15,28 @@ static const char *probes[] = { "bcm47xxpart", NULL };
static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- struct bcma_sflash *sflash = mtd->priv;
+ struct bcm47xxsflash *b47s = mtd->priv;
/* Check address range */
if ((from + len) > mtd->size)
return -EINVAL;
- memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from),
+ memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
len);
+ *retlen = len;
return len;
}
-static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
- struct mtd_info *mtd)
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
{
- mtd->priv = sflash;
+ struct mtd_info *mtd = &b47s->mtd;
+
+ mtd->priv = b47s;
mtd->name = "bcm47xxsflash";
mtd->owner = THIS_MODULE;
mtd->type = MTD_ROM;
- mtd->size = sflash->size;
+ mtd->size = b47s->size;
mtd->_read = bcm47xxsflash_read;
/* TODO: implement writing support and verify/change following code */
@@ -40,19 +44,30 @@ static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
mtd->writebufsize = mtd->writesize = 1;
}
-static int bcm47xxsflash_probe(struct platform_device *pdev)
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s;
int err;
- sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!sflash->mtd) {
+ b47s = kzalloc(sizeof(*b47s), GFP_KERNEL);
+ if (!b47s) {
err = -ENOMEM;
goto out;
}
- bcm47xxsflash_fill_mtd(sflash, sflash->mtd);
+ sflash->priv = b47s;
- err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0);
+ b47s->window = sflash->window;
+ b47s->blocksize = sflash->blocksize;
+ b47s->numblocks = sflash->numblocks;
+ b47s->size = sflash->size;
+ bcm47xxsflash_fill_mtd(b47s);
+
+ err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
goto err_dev_reg;
@@ -61,34 +76,40 @@ static int bcm47xxsflash_probe(struct platform_device *pdev)
return 0;
err_dev_reg:
- kfree(sflash->mtd);
+ kfree(&b47s->mtd);
out:
return err;
}
-static int bcm47xxsflash_remove(struct platform_device *pdev)
+static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s = sflash->priv;
- mtd_device_unregister(sflash->mtd);
- kfree(sflash->mtd);
+ mtd_device_unregister(&b47s->mtd);
+ kfree(b47s);
return 0;
}
static struct platform_driver bcma_sflash_driver = {
- .remove = bcm47xxsflash_remove,
+ .probe = bcm47xxsflash_bcma_probe,
+ .remove = bcm47xxsflash_bcma_remove,
.driver = {
.name = "bcma_sflash",
.owner = THIS_MODULE,
},
};
+/**************************************************
+ * Init
+ **************************************************/
+
static int __init bcm47xxsflash_init(void)
{
int err;
- err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe);
+ err = platform_driver_register(&bcma_sflash_driver);
if (err)
pr_err("Failed to register BCMA serial flash driver: %d\n",
err);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644
index 000000000000..ebf6f710e23c
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -0,0 +1,15 @@
+#ifndef __BCM47XXSFLASH_H
+#define __BCM47XXSFLASH_H
+
+#include <linux/mtd/mtd.h>
+
+struct bcm47xxsflash {
+ u32 window;
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ struct mtd_info mtd;
+};
+
+#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
new file mode 100644
index 000000000000..2ec5da9ee248
--- /dev/null
+++ b/drivers/mtd/devices/elm.c
@@ -0,0 +1,404 @@
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_IRQSTATUS 0x018
+#define ELM_IRQENABLE 0x01c
+#define ELM_LOCATION_CONFIG 0x020
+#define ELM_PAGE_CTRL 0x080
+#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_6 0x418
+#define ELM_LOCATION_STATUS 0x800
+#define ELM_ERROR_LOCATION_0 0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK 0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK BIT(8)
+#define ECC_NB_ERRORS_MASK 0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK 0x1fff
+
+#define ELM_ECC_SIZE 0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE 0x40
+#define ERROR_LOCATION_SIZE 0x100
+
+struct elm_info {
+ struct device *dev;
+ void __iomem *elm_base;
+ struct completion elm_completion;
+ struct list_head list;
+ enum bch_ecc bch_type;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+ writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+ return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev: ELM device
+ * @bch_type: Type of BCH ecc
+ */
+void elm_config(struct device *dev, enum bch_ecc bch_type)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_get_drvdata(dev);
+
+ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+ info->bch_type = bch_type;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info: elm info
+ * @index: index number of syndrome fragment vector
+ * @enable: enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+ if (enable)
+ reg_val |= BIT(index); /* enable page mode */
+ else
+ reg_val &= ~BIT(index); /* disable page mode */
+
+ elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info: elm info
+ * @err_vec: elm error vectors
+ * @ecc: buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+ struct elm_errorvec *err_vec, u8 *ecc)
+{
+ int i, offset;
+ u32 val;
+
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ elm_configure_page_mode(info, i, true);
+ offset = ELM_SYNDROME_FRAGMENT_0 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+
+ /* BCH8 */
+ if (info->bch_type) {
+
+ /* syndrome fragment 0 = ecc[9-12B] */
+ val = cpu_to_be32(*(u32 *) &ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+ offset += 4;
+ val = ecc[0];
+ elm_write_reg(info, offset, val);
+ } else {
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+ val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ }
+ }
+
+ /* Update ecc pointer with ecc byte size */
+ ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE;
+ }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, offset;
+ u32 reg_val;
+
+ /*
+ * Set syndrome vector valid, so that ELM module
+ * will process it for vectors error is reported
+ */
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ if (err_vec[i].error_reported) {
+ offset = ELM_SYNDROME_FRAGMENT_6 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+ reg_val |= ELM_SYNDROME_VALID;
+ elm_write_reg(info, offset, reg_val);
+ }
+ }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, j, errors = 0;
+ int offset;
+ u32 reg_val;
+
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+
+ /* Check correctable error or not */
+ if (reg_val & ECC_CORRECTABLE_MASK) {
+ offset = ELM_ERROR_LOCATION_0 +
+ ERROR_LOCATION_SIZE * i;
+
+ /* Read count of correctable errors */
+ err_vec[i].error_count = reg_val &
+ ECC_NB_ERRORS_MASK;
+
+ /* Update the error locations in error vector */
+ for (j = 0; j < err_vec[i].error_count; j++) {
+
+ reg_val = elm_read_reg(info, offset);
+ err_vec[i].error_loc[j] = reg_val &
+ ECC_ERROR_LOCATION_MASK;
+
+ /* Update error location register */
+ offset += 4;
+ }
+
+ errors += err_vec[i].error_count;
+ } else {
+ err_vec[i].error_uncorrectable = true;
+ }
+
+ /* Clearing interrupts for processed error vectors */
+ elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+ /* Disable page mode */
+ elm_configure_page_mode(info, i, false);
+ }
+ }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev: device pointer
+ * @ecc_calc: calculated ECC bytes from GPMC
+ * @err_vec: elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ /* Enable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+ elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+ elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+ /* Load valid ecc byte to syndrome fragment register */
+ elm_load_syndrome(info, err_vec, ecc_calc);
+
+ /* Enable syndrome processing for which syndrome fragment is updated */
+ elm_start_processing(info, err_vec);
+
+ /* Wait for ELM module to finish locating error correction */
+ wait_for_completion(&info->elm_completion);
+
+ /* Disable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQENABLE);
+ elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+ elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_id;
+
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+ /* All error vectors processed */
+ if (reg_val & INTR_STATUS_PAGE_VALID) {
+ elm_write_reg(info, ELM_IRQSTATUS,
+ reg_val & INTR_STATUS_PAGE_VALID);
+ complete(&info->elm_completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res, *irq;
+ struct elm_info *info;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ info->elm_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!info->elm_base)
+ return -EADDRNOTAVAIL;
+
+ ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev)) {
+ ret = -EINVAL;
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+ }
+
+ init_completion(&info->elm_completion);
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &elm_devices);
+ platform_set_drvdata(pdev, info);
+ return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+ { .compatible = "ti,am3352-elm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+ .driver = {
+ .name = "elm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(elm_of_match),
+ },
+ .probe = elm_probe,
+ .remove = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform: elm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4eeeb2d7f6ea..5b6b0728be21 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -565,6 +565,96 @@ time_out:
return ret;
}
+static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct m25p *flash = mtd_to_m25p(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int res = 0;
+
+ mutex_lock(&flash->lock);
+ /* Wait until finished previous command */
+ if (wait_till_ready(flash)) {
+ res = 1;
+ goto err;
+ }
+
+ status_old = read_sr(flash);
+
+ if (offset < flash->mtd.size-(flash->mtd.size/2))
+ status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/4))
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ else if (offset < flash->mtd.size-(flash->mtd.size/8))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/16))
+ status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+ else if (offset < flash->mtd.size-(flash->mtd.size/32))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/64))
+ status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+ else
+ status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) >
+ (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+ write_enable(flash);
+ if (write_sr(flash, status_new) < 0) {
+ res = 1;
+ goto err;
+ }
+ }
+
+err: mutex_unlock(&flash->lock);
+ return res;
+}
+
+static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct m25p *flash = mtd_to_m25p(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int res = 0;
+
+ mutex_lock(&flash->lock);
+ /* Wait until finished previous command */
+ if (wait_till_ready(flash)) {
+ res = 1;
+ goto err;
+ }
+
+ status_old = read_sr(flash);
+
+ if (offset+len > flash->mtd.size-(flash->mtd.size/64))
+ status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0);
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/32))
+ status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/16))
+ status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/8))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/4))
+ status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/2))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) <
+ (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+ write_enable(flash);
+ if (write_sr(flash, status_new) < 0) {
+ res = 1;
+ goto err;
+ }
+ }
+
+err: mutex_unlock(&flash->lock);
+ return res;
+}
+
/****************************************************************************/
/*
@@ -642,6 +732,10 @@ static const struct spi_device_id m25p_ids[] = {
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
+ /* GigaDevice */
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
@@ -899,6 +993,12 @@ static int m25p_probe(struct spi_device *spi)
flash->mtd._erase = m25p80_erase;
flash->mtd._read = m25p80_read;
+ /* flash protection support for STmicro chips */
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+ flash->mtd._lock = m25p80_lock;
+ flash->mtd._unlock = m25p80_unlock;
+ }
+
/* sst flash chips use AAI word program */
if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
flash->mtd._write = sst_write;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 62ba82c396c2..3ed17c4d4358 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -429,7 +429,7 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_RAM=y && (!MMU || COLDFIRE)
+ depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 7901d72c9242..363939dfad05 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -68,9 +68,6 @@ static int of_flash_remove(struct platform_device *dev)
kfree(info->list[i].res);
}
}
-
- kfree(info);
-
return 0;
}
@@ -199,8 +196,9 @@ static int of_flash_probe(struct platform_device *dev)
map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
err = -ENOMEM;
- info = kzalloc(sizeof(struct of_flash) +
- sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev,
+ sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
if (!info)
goto err_flash_remove;
@@ -241,6 +239,7 @@ static int of_flash_probe(struct platform_device *dev)
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
+ info->list[i].map.device_node = dp;
err = -ENOMEM;
info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 299bf88a6f41..c1af83db5202 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -23,12 +23,26 @@
/****************************************************************************/
+#ifdef CONFIG_MTD_ROM
+#define MAP_NAME "rom"
+#else
+#define MAP_NAME "ram"
+#endif
+
+/*
+ * Blackfin uses uclinux_ram_map during startup, so it must not be static.
+ * Provide a dummy declaration to make sparse happy.
+ */
+extern struct map_info uclinux_ram_map;
+
struct map_info uclinux_ram_map = {
- .name = "RAM",
- .phys = (unsigned long)__bss_stop,
+ .name = MAP_NAME,
.size = 0,
};
+static unsigned long physaddr = -1;
+module_param(physaddr, ulong, S_IRUGO);
+
static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
@@ -60,11 +74,17 @@ static int __init uclinux_mtd_init(void)
struct map_info *mapp;
mapp = &uclinux_ram_map;
+
+ if (physaddr == -1)
+ mapp->phys = (resource_size_t)__bss_stop;
+ else
+ mapp->phys = physaddr;
+
if (!mapp->size)
mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
- printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
+ printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
(int) mapp->phys, (int) mapp->size);
/*
@@ -82,7 +102,7 @@ static int __init uclinux_mtd_init(void)
simple_map_init(mapp);
- mtd = do_map_probe("map_ram", mapp);
+ mtd = do_map_probe("map_" MAP_NAME, mapp);
if (!mtd) {
printk("uclinux[mtd]: failed to find a mapping?\n");
return(-ENXIO);
@@ -118,6 +138,6 @@ module_exit(uclinux_mtd_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
-MODULE_DESCRIPTION("Generic RAM based MTD for uClinux");
+MODULE_DESCRIPTION("Generic MTD for uClinux");
/****************************************************************************/
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index ec794a72975d..61d5f56473e1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -349,13 +349,8 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
- do {
- if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
- goto fail_locked;
- error = idr_get_new(&mtd_idr, mtd, &i);
- } while (error == -EAGAIN);
-
- if (error)
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (i < 0)
goto fail_locked;
mtd->index = i;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index c516a9408087..ffcbcca2fd2d 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -101,6 +101,8 @@ struct atmel_nand_host {
u8 pmecc_corr_cap;
u16 pmecc_sector_size;
u32 pmecc_lookup_table_offset;
+ u32 pmecc_lookup_table_offset_512;
+ u32 pmecc_lookup_table_offset_1024;
int pmecc_bytes_per_sector;
int pmecc_sector_number;
@@ -908,6 +910,84 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
}
+/*
+ * Get ECC requirement in ONFI parameters, returns -1 if ONFI
+ * parameters is not supported.
+ * return 0 if success to get the ECC requirement.
+ */
+static int get_onfi_ecc_param(struct nand_chip *chip,
+ int *ecc_bits, int *sector_size)
+{
+ *ecc_bits = *sector_size = 0;
+
+ if (chip->onfi_params.ecc_bits == 0xff)
+ /* TODO: the sector_size and ecc_bits need to be find in
+ * extended ecc parameter, currently we don't support it.
+ */
+ return -1;
+
+ *ecc_bits = chip->onfi_params.ecc_bits;
+
+ /* The default sector size (ecc codeword size) is 512 */
+ *sector_size = 512;
+
+ return 0;
+}
+
+/*
+ * Get ecc requirement from ONFI parameters ecc requirement.
+ * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
+ * will set them according to ONFI ecc requirement. Otherwise, use the
+ * value in DTS file.
+ * return 0 if success. otherwise return error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+ int *cap, int *sector_size)
+{
+ /* Get ECC requirement from ONFI parameters */
+ *cap = *sector_size = 0;
+ if (host->nand_chip.onfi_version) {
+ if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
+ dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+ *cap, *sector_size);
+ else
+ dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
+ } else {
+ dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
+ }
+ if (*cap == 0 && *sector_size == 0) {
+ *cap = 2;
+ *sector_size = 512;
+ }
+
+ /* If dts file doesn't specify then use the one in ONFI parameters */
+ if (host->pmecc_corr_cap == 0) {
+ /* use the most fitable ecc bits (the near bigger one ) */
+ if (*cap <= 2)
+ host->pmecc_corr_cap = 2;
+ else if (*cap <= 4)
+ host->pmecc_corr_cap = 4;
+ else if (*cap < 8)
+ host->pmecc_corr_cap = 8;
+ else if (*cap < 12)
+ host->pmecc_corr_cap = 12;
+ else if (*cap < 24)
+ host->pmecc_corr_cap = 24;
+ else
+ return -EINVAL;
+ }
+ if (host->pmecc_sector_size == 0) {
+ /* use the most fitable sector size (the near smaller one ) */
+ if (*sector_size >= 1024)
+ host->pmecc_sector_size = 1024;
+ else if (*sector_size >= 512)
+ host->pmecc_sector_size = 512;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
@@ -916,8 +996,22 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct resource *regs, *regs_pmerr, *regs_rom;
int cap, sector_size, err_no;
+ err_no = pmecc_choose_ecc(host, &cap, &sector_size);
+ if (err_no) {
+ dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
+ return err_no;
+ }
+
+ if (cap != host->pmecc_corr_cap ||
+ sector_size != host->pmecc_sector_size)
+ dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
+
cap = host->pmecc_corr_cap;
sector_size = host->pmecc_sector_size;
+ host->pmecc_lookup_table_offset = (sector_size == 512) ?
+ host->pmecc_lookup_table_offset_512 :
+ host->pmecc_lookup_table_offset_1024;
+
dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
cap, sector_size);
@@ -1215,7 +1309,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
- u32 val, table_offset;
+ u32 val;
u32 offset[2];
int ecc_mode;
struct atmel_nand_data *board = &host->board;
@@ -1259,42 +1353,41 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
/* use PMECC, get correction capability, sector size and lookup
* table offset.
+ * If correction bits and sector size are not specified, then find
+ * them from NAND ONFI parameters.
*/
- if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
- dev_err(host->dev, "Cannot decide PMECC Capability\n");
- return -EINVAL;
- } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
- (val != 24)) {
- dev_err(host->dev,
- "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
- val);
- return -EINVAL;
+ if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
+ if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+ (val != 24)) {
+ dev_err(host->dev,
+ "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_corr_cap = (u8)val;
}
- host->pmecc_corr_cap = (u8)val;
- if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
- dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
- return -EINVAL;
- } else if ((val != 512) && (val != 1024)) {
- dev_err(host->dev,
- "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
- val);
- return -EINVAL;
+ if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
+ if ((val != 512) && (val != 1024)) {
+ dev_err(host->dev,
+ "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_sector_size = (u16)val;
}
- host->pmecc_sector_size = (u16)val;
if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
offset, 2) != 0) {
dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
return -EINVAL;
}
- table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
-
- if (!table_offset) {
+ if (!offset[0] && !offset[1]) {
dev_err(host->dev, "Invalid PMECC lookup table offset\n");
return -EINVAL;
}
- host->pmecc_lookup_table_offset = table_offset;
+ host->pmecc_lookup_table_offset_512 = offset[0];
+ host->pmecc_lookup_table_offset_1024 = offset[1];
return 0;
}
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
index 0bdb2ce4da75..c005a62330b1 100644
--- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -1,6 +1,10 @@
#ifndef __BCM47XXNFLASH_H
#define __BCM47XXNFLASH_H
+#ifndef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#endif
+
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 8363a9a5fa3f..7bae569fdc79 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -9,14 +9,14 @@
*
*/
+#include "bcm47xxnflash.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-#include "bcm47xxnflash.h"
-
MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rafał Miłecki");
@@ -77,6 +77,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev)
}
static struct platform_driver bcm47xxnflash_driver = {
+ .probe = bcm47xxnflash_probe,
.remove = bcm47xxnflash_remove,
.driver = {
.name = "bcma_nflash",
@@ -88,13 +89,10 @@ static int __init bcm47xxnflash_init(void)
{
int err;
- /*
- * Platform device "bcma_nflash" exists on SoCs and is registered very
- * early, it won't be added during runtime (use platform_driver_probe).
- */
- err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
+ err = platform_driver_register(&bcm47xxnflash_driver);
if (err)
- pr_err("Failed to register serial flash driver: %d\n", err);
+ pr_err("Failed to register bcm47xx nand flash driver: %d\n",
+ err);
return err;
}
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
index 595de4012e71..b2ab373c9eef 100644
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -9,13 +9,13 @@
*
*/
+#include "bcm47xxnflash.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/bcma/bcma.h>
-#include "bcm47xxnflash.h"
-
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
* shown ~1000 retries as maxiumum. */
#define NFLASH_READY_RETRIES 10000
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index feae55c7b880..94e17af8e450 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -606,7 +606,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "unable to allocate memory\n");
ret = -ENOMEM;
@@ -623,11 +623,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, resource_size(res1));
- base = ioremap(res2->start, resource_size(res2));
+ vaddr = devm_request_and_ioremap(&pdev->dev, res1);
+ base = devm_request_and_ioremap(&pdev->dev, res2);
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EINVAL;
+ ret = -EADDRNOTAVAIL;
goto err_ioremap;
}
@@ -717,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
}
info->chip.ecc.mode = ecc_mode;
- info->clk = clk_get(&pdev->dev, "aemif");
+ info->clk = devm_clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
@@ -845,8 +845,6 @@ err_timing:
clk_disable_unprepare(info->clk);
err_clk_enable:
- clk_put(info->clk);
-
spin_lock_irq(&davinci_nand_lock);
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
@@ -855,13 +853,7 @@ err_clk_enable:
err_ecc:
err_clk:
err_ioremap:
- if (base)
- iounmap(base);
- if (vaddr)
- iounmap(vaddr);
-
err_nomem:
- kfree(info);
return ret;
}
@@ -874,15 +866,9 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
- iounmap(info->base);
- iounmap(info->vaddr);
-
nand_release(&info->mtd);
clk_disable_unprepare(info->clk);
- clk_put(info->clk);
-
- kfree(info);
return 0;
}
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index ad6222627fed..f1f7f12ab501 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -176,8 +176,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
ifc_nand_ctrl->page = page_addr;
/* Program ROW0/COL0 */
- out_be32(&ifc->ifc_nand.row0, page_addr);
- out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
+ iowrite32be(page_addr, &ifc->ifc_nand.row0);
+ iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
buf_num = page_addr & priv->bufnum_mask;
@@ -239,18 +239,19 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int i;
/* set the chip select for NAND Transaction */
- out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+ iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
dev_vdbg(priv->dev,
"%s: fir0=%08x fcr0=%08x\n",
__func__,
- in_be32(&ifc->ifc_nand.nand_fir0),
- in_be32(&ifc->ifc_nand.nand_fcr0));
+ ioread32be(&ifc->ifc_nand.nand_fir0),
+ ioread32be(&ifc->ifc_nand.nand_fcr0));
ctrl->nand_stat = 0;
/* start read/write seq */
- out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -273,7 +274,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int sector_end = sector + chip->ecc.steps - 1;
for (i = sector / 4; i <= sector_end / 4; i++)
- eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
+ eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
for (i = sector; i <= sector_end; i++) {
errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -313,31 +314,33 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
if (mtd->writesize > 512) {
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
-
- out_be32(&ifc->ifc_nand.nand_fcr0,
- (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+ iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
} else {
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
if (oob)
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
else
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
}
}
@@ -357,7 +360,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
switch (command) {
/* READ0 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ0:
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, page_addr, 0);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -372,7 +375,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
- out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
+ iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, column, page_addr, 1);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -388,19 +391,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
if (command == NAND_CMD_PARAM)
timing = IFC_FIR_OP_RBCD;
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (timing << IFC_NAND_FIR0_OP2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- command << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.row3, column);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(column, &ifc->ifc_nand.row3);
/*
* although currently it's 8 bytes for READID, we always read
* the maximum 256 bytes(for PARAM)
*/
- out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+ iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 256;
set_addr(mtd, 0, 0, 0);
@@ -415,16 +418,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
- out_be32(&ifc->ifc_nand.nand_fcr0,
- (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
+ iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 0;
fsl_ifc_run_command(mtd);
return;
@@ -440,26 +443,28 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_SEQIN <<
IFC_NAND_FCR0_CMD2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1,
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
+ &ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
@@ -474,7 +479,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
column -= mtd->writesize;
ifc_nand_ctrl->oob = 1;
}
- out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
+ iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
return;
}
@@ -482,10 +487,11 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
if (ifc_nand_ctrl->oob) {
- out_be32(&ifc->ifc_nand.nand_fbcr,
- ifc_nand_ctrl->index - ifc_nand_ctrl->column);
+ iowrite32be(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
} else {
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
}
fsl_ifc_run_command(mtd);
@@ -493,12 +499,12 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
}
case NAND_CMD_STATUS:
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
@@ -512,10 +518,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
return;
case NAND_CMD_RESET:
- out_be32(&ifc->ifc_nand.nand_fir0,
- IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
fsl_ifc_run_command(mtd);
return;
@@ -639,18 +645,18 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
u32 nand_fsr;
/* Use READ_STATUS command, but wait for the device to be ready */
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
- IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
fsl_ifc_run_command(mtd);
- nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
+ nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
/*
* The chip always seems to report that it is
@@ -744,34 +750,34 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
uint32_t cs = priv->bank;
/* Save CSOR and CSOR_ext */
- csor = in_be32(&ifc->csor_cs[cs].csor);
- csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
+ csor = ioread32be(&ifc->csor_cs[cs].csor);
+ csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
/* chage PageSize 8K and SpareSize 1K*/
csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
- out_be32(&ifc->csor_cs[cs].csor, csor_8k);
- out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
+ iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
+ iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
/* READID */
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.row3, 0x0);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(0x0, &ifc->ifc_nand.row3);
- out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
/* Program ROW0/COL0 */
- out_be32(&ifc->ifc_nand.row0, 0x0);
- out_be32(&ifc->ifc_nand.col0, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.row0);
+ iowrite32be(0x0, &ifc->ifc_nand.col0);
/* set the chip select for NAND Transaction */
- out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
+ iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
/* start read seq */
- out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -781,8 +787,8 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
/* Restore CSOR and CSOR_ext */
- out_be32(&ifc->csor_cs[cs].csor, csor);
- out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
+ iowrite32be(csor, &ifc->csor_cs[cs].csor);
+ iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -799,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* fill in nand_chip structure */
/* set up function call table */
- if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
chip->read_byte = fsl_ifc_read_byte16;
else
chip->read_byte = fsl_ifc_read_byte;
@@ -813,13 +819,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
- out_be32(&ifc->ifc_nand.ncfgr, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
- if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
chip->read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
@@ -832,7 +838,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.read_page = fsl_ifc_read_page;
chip->ecc.write_page = fsl_ifc_write_page;
- csor = in_be32(&ifc->csor_cs[priv->bank].csor);
+ csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
@@ -884,7 +890,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.mode = NAND_ECC_SOFT;
}
- ver = in_be32(&ifc->ifc_rev);
+ ver = ioread32be(&ifc->ifc_rev);
if (ver == FSL_IFC_V1_1_0)
fsl_ifc_sram_init(priv);
@@ -910,7 +916,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
phys_addr_t addr)
{
- u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
+ u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
if (!(cspr & CSPR_V))
return 0;
@@ -997,17 +1003,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
dev_set_drvdata(priv->dev, priv);
- out_be32(&ifc->ifc_nand.nand_evter_en,
- IFC_NAND_EVTER_EN_OPC_EN |
- IFC_NAND_EVTER_EN_FTOER_EN |
- IFC_NAND_EVTER_EN_WPER_EN);
+ iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
/* enable NAND Machine Interrupts */
- out_be32(&ifc->ifc_nand.nand_evter_intr_en,
- IFC_NAND_EVTER_INTR_OPCIR_EN |
- IFC_NAND_EVTER_INTR_FTOERIR_EN |
- IFC_NAND_EVTER_INTR_WPERIR_EN);
-
+ iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 09af555408b7..05ba3f0c2d19 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -573,23 +573,22 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+ flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
+
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
dma_dst = host->data_pa;
- flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
} else {
dma_src = host->data_pa;
dma_dst = dma_addr;
- flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
}
tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
len, flags);
-
if (!tx) {
dev_err(host->dev, "device_prep_dma_memcpy error\n");
- dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
- return -EIO;
+ ret = -EIO;
+ goto unmap_dma;
}
tx->callback = dma_complete;
@@ -599,7 +598,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
ret = dma_submit_error(cookie);
if (ret) {
dev_err(host->dev, "dma_submit_error %d\n", cookie);
- return ret;
+ goto unmap_dma;
}
dma_async_issue_pending(chan);
@@ -610,10 +609,17 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
if (ret <= 0) {
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
dev_err(host->dev, "wait_for_completion_timeout\n");
- return ret ? ret : -ETIMEDOUT;
+ if (!ret)
+ ret = -ETIMEDOUT;
+ goto unmap_dma;
}
- return 0;
+ ret = 0;
+
+unmap_dma:
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+
+ return ret;
}
/*
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index a0924515c396..588f5374047c 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -61,6 +61,16 @@
& BM_BCH_FLASH0LAYOUT0_ECC0) \
)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
+ ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
+ : 0 \
+ )
+
#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
@@ -93,6 +103,16 @@
& BM_BCH_FLASH0LAYOUT1_ECCN) \
)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
+ ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
+ : 0 \
+ )
+
#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
@@ -103,4 +123,6 @@
? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
)
+
+#define HW_BCH_VERSION 0x00000160
#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index d84699c7968e..4f8857fa48a7 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -208,6 +208,11 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
}
/* start to print out the BCH info */
+ pr_err("Show BCH registers :\n");
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+ reg = readl(r->bch_regs + i * 0x10);
+ pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
pr_err("BCH Geometry :\n");
pr_err("GF length : %u\n", geo->gf_len);
pr_err("ECC Strength : %u\n", geo->ecc_strength);
@@ -232,6 +237,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
unsigned int metadata_size;
unsigned int ecc_strength;
unsigned int page_size;
+ unsigned int gf_len;
int ret;
if (common_nfc_set_geometry(this))
@@ -242,6 +248,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
metadata_size = bch_geo->metadata_size;
ecc_strength = bch_geo->ecc_strength >> 1;
page_size = bch_geo->page_size;
+ gf_len = bch_geo->gf_len;
ret = gpmi_enable_clk(this);
if (ret)
@@ -263,11 +270,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index e9b1c47e3cf9..717881a3d1b8 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -94,6 +94,25 @@ static inline int get_ecc_strength(struct gpmi_nand_data *this)
return round_down(ecc_strength, 2);
}
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ /* Do the sanity check. */
+ if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+ /* The mx23/mx28 only support the GF13. */
+ if (geo->gf_len == 14)
+ return false;
+
+ if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
+ return false;
+ } else if (GPMI_IS_MX6Q(this)) {
+ if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
+ return false;
+ }
+ return true;
+}
+
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
@@ -112,17 +131,24 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
/* The default for the length of Galois Field. */
geo->gf_len = 13;
- /* The default for chunk size. There is no oobsize greater then 512. */
+ /* The default for chunk size. */
geo->ecc_chunk_size = 512;
- while (geo->ecc_chunk_size < mtd->oobsize)
+ while (geo->ecc_chunk_size < mtd->oobsize) {
geo->ecc_chunk_size *= 2; /* keep C >= O */
+ geo->gf_len = 14;
+ }
geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
/* We use the same ECC strength for all chunks. */
geo->ecc_strength = get_ecc_strength(this);
- if (!geo->ecc_strength) {
- pr_err("wrong ECC strength.\n");
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "We can not support this nand chip."
+ " Its required ecc strength(%d) is beyond our"
+ " capability(%d).\n", geo->ecc_strength,
+ (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX
+ : MXS_ECC_STRENGTH_MAX));
return -EINVAL;
}
@@ -920,8 +946,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
dma_addr_t auxiliary_phys;
unsigned int i;
unsigned char *status;
- unsigned int failed;
- unsigned int corrected;
+ unsigned int max_bitflips = 0;
int ret;
pr_debug("page number is : %d\n", page);
@@ -945,35 +970,25 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
payload_virt, payload_phys);
if (ret) {
pr_err("Error in ECC-based read: %d\n", ret);
- goto exit_nfc;
+ return ret;
}
/* handle the block mark swapping */
block_mark_swapping(this, payload_virt, auxiliary_virt);
/* Loop over status bytes, accumulating ECC status. */
- failed = 0;
- corrected = 0;
- status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
continue;
if (*status == STATUS_UNCORRECTABLE) {
- failed++;
+ mtd->ecc_stats.failed++;
continue;
}
- corrected += *status;
- }
-
- /*
- * Propagate ECC status to the owning MTD only when failed or
- * corrected times nearly reaches our ECC correction threshold.
- */
- if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
- mtd->ecc_stats.failed += failed;
- mtd->ecc_stats.corrected += corrected;
+ mtd->ecc_stats.corrected += *status;
+ max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
if (oob_required) {
@@ -995,8 +1010,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
payload_virt, payload_phys);
-exit_nfc:
- return ret;
+
+ return max_bitflips;
}
static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1668,8 +1683,8 @@ exit_nfc_init:
release_resources(this);
exit_acquire_resources:
platform_set_drvdata(pdev, NULL);
- kfree(this);
dev_err(this->dev, "driver registration failed: %d\n", ret);
+ kfree(this);
return ret;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 3d93a5e39090..072947731277 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -284,6 +284,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
#define STATUS_ERASED 0xff
#define STATUS_UNCORRECTABLE 0xfe
+/* BCH's bit correction capability. */
+#define MXS_ECC_STRENGTH_MAX 20 /* mx23 and mx28 */
+#define MX6_ECC_STRENGTH_MAX 40
+
/* Use the platform_id to distinguish different Archs. */
#define IS_MX23 0x0
#define IS_MX28 0x1
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 60ac5b98b718..07e5784e5cd3 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -530,12 +530,23 @@ static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
static void send_read_id_v3(struct mxc_nand_host *host)
{
+ struct nand_chip *this = &host->nand;
+
/* Read ID into main buffer */
writel(NFC_ID, NFC_V3_LAUNCH);
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ /* compress the ID info */
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
+ }
}
/* Request the NANDFC to perform a read of the NAND device ID. */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3766682a0289..43214151b882 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -825,13 +825,8 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- unsigned long timeo = jiffies;
int status, state = chip->state;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
+ unsigned long timeo = (state == FL_ERASING ? 400 : 20);
led_trigger_event(nand_led_trigger, LED_FULL);
@@ -849,6 +844,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
while (time_before(jiffies, timeo)) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index b7cfe0d37121..053c9a2d47c3 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -55,8 +55,7 @@ struct mtd_info;
#define MODULE_AUTHOR(x) /* x */
#define MODULE_DESCRIPTION(x) /* x */
-#define printk printf
-#define KERN_ERR ""
+#define pr_err printf
#endif
/*
@@ -507,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
return 1; /* error in ECC data; no action needed */
- printk(KERN_ERR "uncorrectable error : ");
+ pr_err("%s: uncorrectable ECC error", __func__);
return -1;
}
EXPORT_SYMBOL(__nand_correct_data);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 818b65c85d12..891c52a30e6a 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1408,40 +1408,32 @@ static void clear_memalloc(int memalloc)
current->flags &= ~PF_MEMALLOC;
}
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
- mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
- err = get_pages(ns, file, count, *pos);
+ err = get_pages(ns, file, count, pos);
if (err)
return err;
- old_fs = get_fs();
- set_fs(get_ds());
memalloc = set_memalloc();
- tx = vfs_read(file, (char __user *)buf, count, pos);
+ tx = kernel_read(file, pos, buf, count);
clear_memalloc(memalloc);
- set_fs(old_fs);
put_pages(ns);
return tx;
}
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
- mm_segment_t old_fs;
ssize_t tx;
int err, memalloc;
- err = get_pages(ns, file, count, *pos);
+ err = get_pages(ns, file, count, pos);
if (err)
return err;
- old_fs = get_fs();
- set_fs(get_ds());
memalloc = set_memalloc();
- tx = vfs_write(file, (char __user *)buf, count, pos);
+ tx = kernel_write(file, buf, count, pos);
clear_memalloc(memalloc);
- set_fs(old_fs);
put_pages(ns);
return tx;
}
@@ -1476,12 +1468,12 @@ int do_read_error(struct nandsim *ns, int num)
void do_bit_flips(struct nandsim *ns, int num)
{
- if (bitflips && random32() < (1 << 22)) {
+ if (bitflips && prandom_u32() < (1 << 22)) {
int flips = 1;
if (bitflips > 1)
- flips = (random32() % (int) bitflips) + 1;
+ flips = (prandom_u32() % (int) bitflips) + 1;
while (flips--) {
- int pos = random32() % (num * 8);
+ int pos = prandom_u32() % (num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
@@ -1511,7 +1503,7 @@ static void read_page(struct nandsim *ns, int num)
if (do_read_error(ns, num))
return;
pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
- tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+ tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
if (tx != num) {
NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return;
@@ -1573,7 +1565,7 @@ static int prog_page(struct nandsim *ns, int num)
u_char *pg_off;
if (ns->cfile) {
- loff_t off, pos;
+ loff_t off;
ssize_t tx;
int all;
@@ -1585,8 +1577,7 @@ static int prog_page(struct nandsim *ns, int num)
memset(ns->file_buf, 0xff, ns->geom.pgszoob);
} else {
all = 0;
- pos = off;
- tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+ tx = read_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
@@ -1595,16 +1586,15 @@ static int prog_page(struct nandsim *ns, int num)
for (i = 0; i < num; i++)
pg_off[i] &= ns->buf.byte[i];
if (all) {
- pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
- tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+ loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
if (tx != ns->geom.pgszoob) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
ns->pages_written[ns->regs.row] = 1;
} else {
- pos = off;
- tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+ tx = write_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1d333497cfcb..8e820ddf4e08 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -22,9 +22,12 @@
#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#ifdef CONFIG_MTD_NAND_OMAP_BCH
#include <linux/bch.h>
+#include <linux/platform_data/elm.h>
#endif
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -117,6 +120,33 @@
#define OMAP24XX_DMA_GPMC 4
+#define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */
+#define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */
+
+#define SECTOR_BYTES 512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+#define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
+#define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+ 0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+#endif
+
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used
@@ -156,6 +186,9 @@ struct omap_nand_info {
#ifdef CONFIG_MTD_NAND_OMAP_BCH
struct bch_control *bch;
struct nand_ecclayout ecclayout;
+ bool is_elm_used;
+ struct device *elm_dev;
+ struct device_node *of_node;
#endif
};
@@ -1031,6 +1064,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
* omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
* @mtd: MTD device structure
* @mode: Read/Write mode
+ *
+ * When using BCH, sector size is hardcoded to 512 bytes.
+ * Using wrapping mode 6 both for reading and writing if ELM module not uses
+ * for error correction.
+ * On writing,
+ * eccsize0 = 0 (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
*/
static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
{
@@ -1039,32 +1079,57 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
struct nand_chip *chip = mtd->priv;
- u32 val;
+ u32 val, wr_mode;
+ unsigned int ecc_size1, ecc_size0;
+
+ /* Using wrapping mode 6 for writing */
+ wr_mode = BCH_WRAPMODE_6;
- nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
- dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
- nsectors = 1;
/*
- * Program GPMC to perform correction on one 512-byte sector at a time.
- * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
- * gives a slight (5%) performance gain (but requires additional code).
+ * ECC engine enabled for valid ecc_size0 nibbles
+ * and disabled for ecc_size1 nibbles.
*/
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+
+ /* Perform ecc calculation on 512-byte sector */
+ nsectors = 1;
+
+ /* Update number of error correction */
+ nerrors = info->nand.ecc.strength;
+
+ /* Multi sector reading/writing for NAND flash with page size < 4096 */
+ if (info->is_elm_used && (mtd->writesize <= 4096)) {
+ if (mode == NAND_ECC_READ) {
+ /* Using wrapping mode 1 for reading */
+ wr_mode = BCH_WRAPMODE_1;
+
+ /*
+ * ECC engine enabled for ecc_size0 nibbles
+ * and disabled for ecc_size1 nibbles.
+ */
+ ecc_size0 = (nerrors == 8) ?
+ BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
+ ecc_size1 = (nerrors == 8) ?
+ BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
+ }
+
+ /* Perform ecc calculation for one page (< 4096) */
+ nsectors = info->nand.ecc.steps;
+ }
writel(ECC1, info->reg.gpmc_ecc_control);
- /*
- * When using BCH, sector size is hardcoded to 512 bytes.
- * Here we are using wrapping mode 6 both for reading and writing, with:
- * size0 = 0 (no additional protected byte in spare area)
- * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
- */
- val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT);
+ /* Configure ecc size for BCH */
+ val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
writel(val, info->reg.gpmc_ecc_size_config);
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
/* BCH configuration */
val = ((1 << 16) | /* enable BCH */
(((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
- (0x06 << 8) | /* wrap mode = 6 */
+ (wr_mode << 8) | /* wrap mode */
(dev_width << 7) | /* bus width */
(((nsectors-1) & 0x7) << 4) | /* number of sectors */
(info->gpmc_cs << 1) | /* ECC CS */
@@ -1072,7 +1137,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
writel(val, info->reg.gpmc_ecc_config);
- /* clear ecc and enable bits */
+ /* Clear ecc and enable bits */
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
@@ -1162,6 +1227,298 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
}
/**
+ * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8 ecc vectors for the page
+ */
+static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ int i, eccbchtsel;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+ /*
+ * find BCH scheme used
+ * 0 -> BCH4
+ * 1 -> BCH8
+ */
+ eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
+
+ for (i = 0; i < nsectors; i++) {
+
+ /* Read hw-computed remainder */
+ bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
+ bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
+ if (eccbchtsel) {
+ bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
+ bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
+ }
+
+ if (eccbchtsel) {
+ /* BCH8 ecc scheme */
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ /*
+ * Setting 14th byte to zero to handle
+ * erased page & maintain compatibility
+ * with RBL
+ */
+ *ecc_code++ = 0x0;
+ } else {
+ /* BCH4 ecc scheme */
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ /*
+ * Setting 8th byte to zero to handle
+ * erased page
+ */
+ *ecc_code++ = 0x0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data: data sector buffer
+ * @oob: oob buffer
+ * @info: omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+ struct omap_nand_info *info)
+{
+ int flip_bits = 0, i;
+
+ for (i = 0; i < info->nand.ecc.size; i++) {
+ flip_bits += hweight8(~data[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+ flip_bits += hweight8(~oob[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ /*
+ * Bit flips falls in correctable level.
+ * Fill data area with 0xFF
+ */
+ if (flip_bits) {
+ memset(data, 0xFF, info->nand.ecc.size);
+ memset(oob, 0xFF, info->nand.ecc.bytes);
+ }
+
+ return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of error/erased pages non-zero error vector is reported.
+ * In case of non-zero ecc vector, check read_ecc at fixed offset
+ * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
+ * To handle bit flips in this data, count the number of 0's in
+ * read_ecc[x] and check if it greater than 4. If it is less, it is
+ * programmed page, else erased page.
+ *
+ * 1. If page is erased, check with standard ecc vector (ecc vector
+ * for erased page to find any bit flip). If check fails, bit flip
+ * is present in erased page. Count the bit flips in erased page and
+ * if it falls under correctable level, report page with 0xFF and
+ * update the correctable bit information.
+ * 2. If error is reported on programmed page, update elm error
+ * vector and correct the page with ELM error correction routine.
+ *
+ */
+static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int eccsteps = info->nand.ecc.steps;
+ int i , j, stat = 0;
+ int eccsize, eccflag, ecc_vector_size;
+ struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+ u_char *ecc_vec = calc_ecc;
+ u_char *spare_ecc = read_ecc;
+ u_char *erased_ecc_vec;
+ enum bch_ecc type;
+ bool is_error_reported = false;
+
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
+
+ if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
+ type = BCH8_ECC;
+ erased_ecc_vec = bch8_vector;
+ } else {
+ type = BCH4_ECC;
+ erased_ecc_vec = bch4_vector;
+ }
+
+ ecc_vector_size = info->nand.ecc.bytes;
+
+ /*
+ * Remove extra byte padding for BCH8 RBL
+ * compatibility and erased page handling
+ */
+ eccsize = ecc_vector_size - 1;
+
+ for (i = 0; i < eccsteps ; i++) {
+ eccflag = 0; /* initialize eccflag */
+
+ /*
+ * Check any error reported,
+ * In case of error, non zero ecc reported.
+ */
+
+ for (j = 0; (j < eccsize); j++) {
+ if (calc_ecc[j] != 0) {
+ eccflag = 1; /* non zero ecc, error present */
+ break;
+ }
+ }
+
+ if (eccflag == 1) {
+ /*
+ * Set threshold to minimum of 4, half of ecc.strength/2
+ * to allow max bit flip in byte to 4
+ */
+ unsigned int threshold = min_t(unsigned int, 4,
+ info->nand.ecc.strength / 2);
+
+ /*
+ * Check data area is programmed by counting
+ * number of 0's at fixed offset in spare area.
+ * Checking count of 0's against threshold.
+ * In case programmed page expects at least threshold
+ * zeros in byte.
+ * If zeros are less than threshold for programmed page/
+ * zeros are more than threshold erased page, either
+ * case page reported as uncorrectable.
+ */
+ if (hweight8(~read_ecc[eccsize]) >= threshold) {
+ /*
+ * Update elm error vector as
+ * data area is programmed
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
+ } else {
+ /* Error reported in erased page */
+ int bitflip_count;
+ u_char *buf = &data[info->nand.ecc.size * i];
+
+ if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+
+ if (bitflip_count)
+ stat += bitflip_count;
+ else
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Update the ecc vector */
+ calc_ecc += ecc_vector_size;
+ read_ecc += ecc_vector_size;
+ }
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+ return 0;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+ for (i = 0; i < eccsteps; i++) {
+ if (err_vec[i].error_reported) {
+ for (j = 0; j < err_vec[i].error_count; j++) {
+ u32 bit_pos, byte_pos, error_max, pos;
+
+ if (type == BCH8_ECC)
+ error_max = BCH8_ECC_MAX;
+ else
+ error_max = BCH4_ECC_MAX;
+
+ if (info->nand.ecc.strength == BCH8_MAX_ERROR)
+ pos = err_vec[i].error_loc[j];
+ else
+ /* Add 4 to take care 4 bit padding */
+ pos = err_vec[i].error_loc[j] +
+ BCH4_BIT_PAD;
+
+ /* Calculate bit position of error */
+ bit_pos = pos % 8;
+
+ /* Calculate byte position of error */
+ byte_pos = (error_max - pos - 1) / 8;
+
+ if (pos < error_max) {
+ if (byte_pos < 512)
+ data[byte_pos] ^= 1 << bit_pos;
+ else
+ spare_ecc[byte_pos - 512] ^=
+ 1 << bit_pos;
+ }
+ /* else, not interested to correct ecc */
+ }
+ }
+
+ /* Update number of correctable errors */
+ stat += err_vec[i].error_count;
+
+ /* Update page data with sector size */
+ data += info->nand.ecc.size;
+ spare_ecc += ecc_vector_size;
+ }
+
+ for (i = 0; i < eccsteps; i++)
+ /* Return error if uncorrectable error present */
+ if (err_vec[i].error_uncorrectable)
+ return -EINVAL;
+
+ return stat;
+}
+
+/**
* omap3_correct_data_bch - Decode received data and correct errors
* @mtd: MTD device structure
* @data: page data
@@ -1194,6 +1551,92 @@ static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
}
/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ /* Update ecc vector from GPMC result registers */
+ chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* Write ecc vector to OOB area */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *oob = &chip->oob_poi[eccpos[0]];
+ uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
+ int stat;
+ unsigned int max_bitflips = 0;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ /* Read data */
+ chip->read_buf(mtd, buf, mtd->writesize);
+
+ /* Read oob bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
+ chip->read_buf(mtd, oob, chip->ecc.total);
+
+ /* Calculate ecc bytes */
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+
+ stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ return max_bitflips;
+}
+
+/**
* omap3_free_bch - Release BCH ecc resources
* @mtd: MTD device structure
*/
@@ -1218,43 +1661,86 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
#ifdef CONFIG_MTD_NAND_OMAP_BCH8
- const int hw_errors = 8;
+ const int hw_errors = BCH8_MAX_ERROR;
#else
- const int hw_errors = 4;
+ const int hw_errors = BCH4_MAX_ERROR;
#endif
+ enum bch_ecc bch_type;
+ const __be32 *parp;
+ int lenp;
+ struct device_node *elm_node;
+
info->bch = NULL;
- max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+ max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
+ BCH8_MAX_ERROR : BCH4_MAX_ERROR;
if (max_errors != hw_errors) {
pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
max_errors, hw_errors);
goto fail;
}
- /* software bch library is only used to detect and locate errors */
- info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
- if (!info->bch)
- goto fail;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ info->nand.ecc.strength = max_errors;
- info->nand.ecc.size = 512;
- info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
- info->nand.ecc.correct = omap3_correct_data_bch;
- info->nand.ecc.mode = NAND_ECC_HW;
+ if (hw_errors == BCH8_MAX_ERROR)
+ bch_type = BCH8_ECC;
+ else
+ bch_type = BCH4_ECC;
- /*
- * The number of corrected errors in an ecc block that will trigger
- * block scrubbing defaults to the ecc strength (4 or 8).
- * Set mtd->bitflip_threshold here to define a custom threshold.
- */
+ /* Detect availability of ELM module */
+ parp = of_get_property(info->of_node, "elm_id", &lenp);
+ if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
+ pr_err("Missing elm_id property, fall back to Software BCH\n");
+ info->is_elm_used = false;
+ } else {
+ struct platform_device *pdev;
- if (max_errors == 8) {
- info->nand.ecc.strength = 8;
- info->nand.ecc.bytes = 13;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ pdev = of_find_device_by_node(elm_node);
+ info->elm_dev = &pdev->dev;
+ elm_config(info->elm_dev, bch_type);
+ info->is_elm_used = true;
+ }
+
+ if (info->is_elm_used && (mtd->writesize <= 4096)) {
+
+ if (hw_errors == BCH8_MAX_ERROR)
+ info->nand.ecc.bytes = BCH8_SIZE;
+ else
+ info->nand.ecc.bytes = BCH4_SIZE;
+
+ info->nand.ecc.correct = omap_elm_correct_data;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch;
+ info->nand.ecc.read_page = omap_read_page_bch;
+ info->nand.ecc.write_page = omap_write_page_bch;
} else {
- info->nand.ecc.strength = 4;
- info->nand.ecc.bytes = 7;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ /*
+ * software bch library is only used to detect and
+ * locate errors
+ */
+ info->bch = init_bch(13, max_errors,
+ 0x201b /* hw polynomial */);
+ if (!info->bch)
+ goto fail;
+
+ info->nand.ecc.correct = omap3_correct_data_bch;
+
+ /*
+ * The number of corrected errors in an ecc block that will
+ * trigger block scrubbing defaults to the ecc strength (4 or 8)
+ * Set mtd->bitflip_threshold here to define a custom threshold.
+ */
+
+ if (max_errors == 8) {
+ info->nand.ecc.bytes = 13;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ } else {
+ info->nand.ecc.bytes = 7;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ }
}
pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
@@ -1270,7 +1756,7 @@ fail:
*/
static int omap3_init_bch_tail(struct mtd_info *mtd)
{
- int i, steps;
+ int i, steps, offset;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
struct nand_ecclayout *layout = &info->ecclayout;
@@ -1292,11 +1778,21 @@ static int omap3_init_bch_tail(struct mtd_info *mtd)
goto fail;
}
+ /* ECC layout compatible with RBL for BCH8 */
+ if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+ offset = 2;
+ else
+ offset = mtd->oobsize - layout->eccbytes;
+
/* put ecc bytes at oob tail */
for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+ layout->eccpos[i] = offset + i;
+
+ if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+ layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
+ else
+ layout->oobfree[0].offset = 2;
- layout->oobfree[0].offset = 2;
layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
info->nand.ecc.layout = layout;
@@ -1360,6 +1856,9 @@ static int omap_nand_probe(struct platform_device *pdev)
info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ info->of_node = pdata->of_node;
+#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index dbd3aa574eaf..30bd907a260a 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -174,7 +174,14 @@ out:
return rc;
}
+static void __exit ofpart_parser_exit(void)
+{
+ deregister_mtd_parser(&ofpart_parser);
+ deregister_mtd_parser(&ofoldpart_parser);
+}
+
module_init(ofpart_parser_init);
+module_exit(ofpart_parser_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 1eee264509a8..70106607c247 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -44,7 +44,7 @@ struct nand_ecc_test {
static void single_bit_error_data(void *error_data, void *correct_data,
size_t size)
{
- unsigned int offset = random32() % (size * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
memcpy(error_data, correct_data, size);
__change_bit_le(offset, error_data);
@@ -55,9 +55,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
{
unsigned int offset[2];
- offset[0] = random32() % (size * BITS_PER_BYTE);
+ offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
do {
- offset[1] = random32() % (size * BITS_PER_BYTE);
+ offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
} while (offset[0] == offset[1]);
memcpy(error_data, correct_data, size);
@@ -68,7 +68,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
static unsigned int random_ecc_bit(size_t size)
{
- unsigned int offset = random32() % (3 * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
if (size == 256) {
/*
@@ -76,7 +76,7 @@ static unsigned int random_ecc_bit(size_t size)
* and 17th bit) in ECC code for 256 byte data block
*/
while (offset == 16 || offset == 17)
- offset = random32() % (3 * BITS_PER_BYTE);
+ offset = prandom_u32() % (3 * BITS_PER_BYTE);
}
return offset;
@@ -256,7 +256,7 @@ static int nand_ecc_test_run(const size_t size)
goto error;
}
- get_random_bytes(correct_data, size);
+ prandom_bytes(correct_data, size);
__nand_calculate_ecc(correct_data, size, correct_ecc);
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index e827fa8cd844..3e24b379ffa4 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -29,6 +29,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
@@ -46,26 +47,7 @@ static int use_offset;
static int use_len;
static int use_len_max;
static int vary_offset;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
static int erase_eraseblock(int ebnum)
{
@@ -129,7 +111,7 @@ static int write_eraseblock(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- set_random_data(writebuf, use_len);
+ prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -182,7 +164,7 @@ static int verify_eraseblock(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- set_random_data(writebuf, use_len);
+ prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -273,7 +255,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->ecclayout->oobavail * pgcnt;
- set_random_data(writebuf, len);
+ prandom_bytes_state(&rnd_state, writebuf, len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -424,12 +406,12 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
err = write_whole_device();
if (err)
goto out;
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
err = verify_all_eraseblocks();
if (err)
goto out;
@@ -444,13 +426,13 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -479,7 +461,7 @@ static int __init mtd_oobtest_init(void)
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
- simple_srand(5);
+ prandom_seed_state(&rnd_state, 5);
err = write_whole_device();
if (err)
@@ -490,7 +472,7 @@ static int __init mtd_oobtest_init(void)
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
- simple_srand(5);
+ prandom_seed_state(&rnd_state, 5);
err = verify_all_eraseblocks();
if (err)
goto out;
@@ -649,7 +631,7 @@ static int __init mtd_oobtest_init(void)
goto out;
/* Write all eraseblocks */
- simple_srand(11);
+ prandom_seed_state(&rnd_state, 11);
pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
@@ -659,7 +641,7 @@ static int __init mtd_oobtest_init(void)
continue;
addr = (i + 1) * mtd->erasesize - mtd->writesize;
for (pg = 0; pg < cnt; ++pg) {
- set_random_data(writebuf, sz);
+ prandom_bytes_state(&rnd_state, writebuf, sz);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -680,12 +662,13 @@ static int __init mtd_oobtest_init(void)
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(11);
+ prandom_seed_state(&rnd_state, 11);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
- set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
+ prandom_bytes_state(&rnd_state, writebuf,
+ mtd->ecclayout->oobavail * 2);
addr = (i + 1) * mtd->erasesize - mtd->writesize;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index f93a76f88113..0c1140b6c286 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -29,6 +29,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
@@ -45,26 +46,7 @@ static int bufsize;
static int ebcnt;
static int pgcnt;
static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
static int erase_eraseblock(int ebnum)
{
@@ -98,7 +80,7 @@ static int write_eraseblock(int ebnum)
size_t written;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, mtd->erasesize);
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
cond_resched();
err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
@@ -124,7 +106,7 @@ static int verify_eraseblock(int ebnum)
for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
addrn -= mtd->erasesize;
- set_random_data(writebuf, mtd->erasesize);
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
err = mtd_read(mtd, addr0, bufsize, &read, twopages);
@@ -160,7 +142,8 @@ static int verify_eraseblock(int ebnum)
}
/* Check boundary between eraseblocks */
if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
- unsigned long oldnext = next;
+ struct rnd_state old_state = rnd_state;
+
/* Do a read to set the internal dataRAMs to different data */
err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
@@ -188,13 +171,13 @@ static int verify_eraseblock(int ebnum)
return err;
}
memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
- set_random_data(boundary + pgsize, pgsize);
+ prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
if (memcmp(twopages, boundary, bufsize)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
- next = oldnext;
+ rnd_state = old_state;
}
return err;
}
@@ -326,7 +309,7 @@ static int erasecrosstest(void)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
@@ -359,7 +342,7 @@ static int erasecrosstest(void)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
@@ -417,7 +400,7 @@ static int erasetest(void)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
- set_random_data(writebuf, pgsize);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
pr_err("error: write failed at %#llx\n",
@@ -565,7 +548,7 @@ static int __init mtd_pagetest_init(void)
pr_info("erased %u eraseblocks\n", i);
/* Write all eraseblocks */
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -580,7 +563,7 @@ static int __init mtd_pagetest_init(void)
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 596cbea8df4c..a6ce9c1fa6c5 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -49,13 +49,6 @@ static int pgcnt;
static int goodebcnt;
static struct timeval start, finish;
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = random32();
-}
static int erase_eraseblock(int ebnum)
{
@@ -396,7 +389,7 @@ static int __init mtd_speedtest_init(void)
goto out;
}
- set_random_data(iobuf, mtd->erasesize);
+ prandom_bytes(iobuf, mtd->erasesize);
err = scan_for_bad_eraseblocks();
if (err)
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 3729f679ae5d..787f539d16ca 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -55,7 +55,7 @@ static int rand_eb(void)
unsigned int eb;
again:
- eb = random32();
+ eb = prandom_u32();
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
eb %= (ebcnt - 1);
if (bbt[eb])
@@ -67,7 +67,7 @@ static int rand_offs(void)
{
unsigned int offs;
- offs = random32();
+ offs = prandom_u32();
offs %= bufsize;
return offs;
}
@@ -76,7 +76,7 @@ static int rand_len(int offs)
{
unsigned int len;
- len = random32();
+ len = prandom_u32();
len %= (bufsize - offs);
return len;
}
@@ -191,7 +191,7 @@ static int do_write(void)
static int do_operation(void)
{
- if (random32() & 1)
+ if (prandom_u32() & 1)
return do_read();
else
return do_write();
@@ -282,8 +282,7 @@ static int __init mtd_stresstest_init(void)
}
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
- for (i = 0; i < bufsize; i++)
- writebuf[i] = random32();
+ prandom_bytes(writebuf, bufsize);
err = scan_for_bad_eraseblocks();
if (err)
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index c880c2229c59..aade56f27945 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -28,6 +28,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
@@ -43,26 +44,7 @@ static int bufsize;
static int ebcnt;
static int pgcnt;
static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
static inline void clear_data(unsigned char *buf, size_t len)
{
@@ -119,7 +101,7 @@ static int write_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
pr_err("error: write failed at %#llx\n",
@@ -133,7 +115,7 @@ static int write_eraseblock(int ebnum)
addr += subpgsize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
pr_err("error: write failed at %#llx\n",
@@ -157,7 +139,7 @@ static int write_eraseblock2(int ebnum)
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
- set_random_data(writebuf, subpgsize * k);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
pr_err("error: write failed at %#llx\n",
@@ -193,7 +175,7 @@ static int verify_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
@@ -220,7 +202,7 @@ static int verify_eraseblock(int ebnum)
addr += subpgsize;
- set_random_data(writebuf, subpgsize);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
@@ -257,7 +239,7 @@ static int verify_eraseblock2(int ebnum)
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
- set_random_data(writebuf, subpgsize * k);
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
@@ -430,7 +412,7 @@ static int __init mtd_subpagetest_init(void)
goto out;
pr_info("writing whole device\n");
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -443,7 +425,7 @@ static int __init mtd_subpagetest_init(void)
}
pr_info("written %u eraseblocks\n", i);
- simple_srand(1);
+ prandom_seed_state(&rnd_state, 1);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -466,7 +448,7 @@ static int __init mtd_subpagetest_init(void)
goto out;
/* Write all eraseblocks */
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -481,7 +463,7 @@ static int __init mtd_subpagetest_init(void)
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
- simple_srand(3);
+ prandom_seed_state(&rnd_state, 3);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index c4cde1e9eddb..3a9f6a6a79f9 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -208,7 +208,7 @@ static inline int write_pattern(int ebnum, void *buf)
static int __init tort_init(void)
{
int err = 0, i, infinite = !cycles_count;
- int bad_ebs[ebcnt];
+ int *bad_ebs;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
@@ -250,28 +250,24 @@ static int __init tort_init(void)
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_5A5) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_5A5)
goto out_mtd;
- }
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_A5A) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_A5A)
goto out_patt_5A5;
- }
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_FF) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_FF)
goto out_patt_A5A;
- }
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!check_buf) {
- pr_err("error: cannot allocate memory\n");
+ if (!check_buf)
goto out_patt_FF;
- }
+
+ bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL);
+ if (!bad_ebs)
+ goto out_check_buf;
err = 0;
@@ -290,7 +286,6 @@ static int __init tort_init(void)
/*
* Check if there is a bad eraseblock among those we are going to test.
*/
- memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
if (mtd_can_have_bb(mtd)) {
for (i = eb; i < eb + ebcnt; i++) {
err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
@@ -394,6 +389,8 @@ out:
pr_info("finished after %u erase cycles\n",
erase_cycles);
+ kfree(bad_ebs);
+out_check_buf:
kfree(check_buf);
out_patt_FF:
kfree(patt_FF);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index dfcc65b33e99..4f02848bb2bc 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -194,7 +194,7 @@ static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err;
mutex_lock(&inode->i_mutex);
err = ubi_sync(ubi->ubi_num);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 33f8f3b2c9b2..cba89fcd1587 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -86,7 +86,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_bitflips)
- return !(random32() % 200);
+ return !(prandom_u32() % 200);
return 0;
}
@@ -100,7 +100,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(random32() % 500);
+ return !(prandom_u32() % 500);
return 0;
}
@@ -114,7 +114,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(random32() % 400);
+ return !(prandom_u32() % 400);
return 0;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 56c2d75a63d4..87f1d39ca551 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -150,7 +150,7 @@ config MACVTAP
config VXLAN
tristate "Virtual eXtensible Local Area Network (VXLAN)"
- depends on EXPERIMENTAL && INET
+ depends on INET
---help---
This allows one to create vxlan virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index a7efec293037..9b017d9c58e9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -381,7 +381,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
}
#ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
char buf[20];
@@ -393,7 +393,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
+ if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 3fd32880e526..639049d7e92d 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -15,7 +15,7 @@
#include <linux/mii.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
static const struct bcma_device_id bgmac_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
@@ -436,6 +436,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
}
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ int j;
+
ring = &bgmac->rx_ring[i];
ring->num_slots = BGMAC_RX_RING_SLOTS;
ring->mmio_base = ring_base[i];
@@ -458,8 +460,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
/* Alloc RX slots */
- for (i = 0; i < ring->num_slots; i++) {
- err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]);
+ for (j = 0; j < ring->num_slots; j++) {
+ err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
if (err) {
bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
goto err_dma_free;
@@ -496,6 +498,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)
}
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ int j;
+
ring = &bgmac->rx_ring[i];
/* We don't implement unaligned addressing, so enable first */
@@ -505,11 +509,11 @@ static void bgmac_dma_init(struct bgmac *bgmac)
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
upper_32_bits(ring->dma_base));
- for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots;
- i++, dma_desc++) {
+ for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
+ j++, dma_desc++) {
ctl0 = ctl1 = 0;
- if (i == ring->num_slots - 1)
+ if (j == ring->num_slots - 1)
ctl0 |= BGMAC_DESC_CTL0_EOT;
ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
/* Is there any BGMAC device that requires extension? */
@@ -517,8 +521,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)
* B43_DMA64_DCTL1_ADDREXT_MASK;
*/
- dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr));
- dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr));
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
dma_desc->ctl0 = cpu_to_le32(ctl0);
dma_desc->ctl1 = cpu_to_le32(ctl1);
}
@@ -904,7 +908,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CHIPCTL_1_IF_TYPE_RMII;
char buf[2];
- if (nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
buf);
@@ -1382,7 +1386,7 @@ static int bgmac_probe(struct bcma_device *core)
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
- if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
+ if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
/* TODO: reset the external phy. Specs are needed */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index c6da77fa9d07..1663e0b6b5a0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13013,64 +13013,6 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
return 0;
}
-static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
- struct bnx2x_phy *phy,
- u8 port)
-{
- u16 val, cnt;
- /* Wait for FW completing its initialization. */
- for (cnt = 0; cnt < 1500; cnt++) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, &val);
- if (!(val & (1<<15)))
- break;
- usleep_range(1000, 2000);
- }
- if (cnt >= 1500) {
- DP(NETIF_MSG_LINK, "84833 reset timeout\n");
- return -EINVAL;
- }
-
- /* Put the port in super isolate mode. */
- bnx2x_cl45_read(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val |= MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, bp, port);
- return 0;
-}
-
-int bnx2x_pre_init_phy(struct bnx2x *bp,
- u32 shmem_base,
- u32 shmem2_base,
- u32 chip_id,
- u8 port)
-{
- int rc = 0;
- struct bnx2x_phy phy;
- if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
- port, &phy) != 0) {
- DP(NETIF_MSG_LINK, "populate_phy failed\n");
- return -EINVAL;
- }
- bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl);
- switch (phy.type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
- rc = bnx2x_84833_pre_init_phy(bp, &phy, port);
- break;
- default:
- break;
- }
- return rc;
-}
-
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 36246129864c..531eebf40d60 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -98,7 +98,7 @@ static inline int bnx2x_pfvf_status_codes(int rc)
}
}
-int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
{
struct cstorm_vf_zone_data __iomem *zone_data =
REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
@@ -141,7 +141,7 @@ int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
return 0;
}
-int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
{
u32 me_reg;
int tout = 10, interval = 100; /* Wait for 1 sec */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c6c05bfef0e0..e707e31abd81 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2347,7 +2347,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = *ppos;
- loff_t avail = file->f_path.dentry->d_inode->i_size;
+ loff_t avail = file_inode(file)->i_size;
unsigned int mem = (uintptr_t)file->private_data & 3;
struct adapter *adap = file->private_data - mem;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 29d82cf1528e..fccc3bf2141d 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1782,24 +1782,6 @@ fec_probe(struct platform_device *pdev)
fep->phy_interface = ret;
}
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- if (i)
- break;
- ret = irq;
- goto failed_irq;
- }
- ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
- if (ret) {
- while (--i >= 0) {
- irq = platform_get_irq(pdev, i);
- free_irq(irq, ndev);
- }
- goto failed_irq;
- }
- }
-
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
@@ -1850,6 +1832,24 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_init;
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ if (i)
+ break;
+ ret = irq;
+ goto failed_irq;
+ }
+ ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
+ if (ret) {
+ while (--i >= 0) {
+ irq = platform_get_irq(pdev, i);
+ free_irq(irq, ndev);
+ }
+ goto failed_irq;
+ }
+ }
+
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
@@ -1867,6 +1867,12 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
+failed_irq:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
@@ -1874,12 +1880,6 @@ failed_regulator:
clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
-failed_irq:
iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
@@ -1899,17 +1899,17 @@ fec_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- int irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
del_timer_sync(&fep->time_keep);
clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ int irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
iounmap(fep->hwp);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b5e8a692481..d2c5441d1bf0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2906,21 +2906,23 @@ static void gfar_netpoll(struct net_device *dev)
/* If the device has multiple interrupts, run tx/rx */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
for (i = 0; i < priv->num_grps; i++) {
- disable_irq(priv->gfargrp[i].interruptTransmit);
- disable_irq(priv->gfargrp[i].interruptReceive);
- disable_irq(priv->gfargrp[i].interruptError);
- gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
- enable_irq(priv->gfargrp[i].interruptError);
- enable_irq(priv->gfargrp[i].interruptReceive);
- enable_irq(priv->gfargrp[i].interruptTransmit);
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ disable_irq(gfar_irq(grp, RX)->irq);
+ disable_irq(gfar_irq(grp, ER)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, ER)->irq);
+ enable_irq(gfar_irq(grp, RX)->irq);
+ enable_irq(gfar_irq(grp, TX)->irq);
}
} else {
for (i = 0; i < priv->num_grps; i++) {
- disable_irq(priv->gfargrp[i].interruptTransmit);
- gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
- enable_irq(priv->gfargrp[i].interruptTransmit);
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, TX)->irq);
}
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f4d2e9e3c6d5..c3f1afd86906 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2197,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
union ixgbe_atr_input *mask = &adapter->fdir_mask;
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *rule = NULL;
/* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2;
- hlist_for_each_entry_safe(rule, node, node2,
+ hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) {
if (fsp->location <= rule->sw_idx)
break;
@@ -2264,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *rule;
int cnt = 0;
/* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2;
- hlist_for_each_entry_safe(rule, node, node2,
+ hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
@@ -2358,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
u16 sw_idx)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hlist_node *node, *node2, *parent;
- struct ixgbe_fdir_filter *rule;
+ struct hlist_node *node2;
+ struct ixgbe_fdir_filter *rule, *parent;
int err = -EINVAL;
parent = NULL;
rule = NULL;
- hlist_for_each_entry_safe(rule, node, node2,
+ hlist_for_each_entry_safe(rule, node2,
&adapter->fdir_filter_list, fdir_node) {
/* hash found, or no matching entry */
if (rule->sw_idx >= sw_idx)
break;
- parent = node;
+ parent = rule;
}
/* if there is an old rule occupying our place remove it */
@@ -2399,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* add filter to the list */
if (parent)
- hlist_add_after(parent, &input->fdir_node);
+ hlist_add_after(&parent->fdir_node, &input->fdir_node);
else
hlist_add_head(&input->fdir_node,
&adapter->fdir_filter_list);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 68478d6dfa2d..db5611ae407e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3891,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock);
@@ -3899,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
if (!hlist_empty(&adapter->fdir_filter_list))
ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
- hlist_for_each_entry_safe(filter, node, node2,
+ hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter,
@@ -4356,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
{
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock);
- hlist_for_each_entry_safe(filter, node, node2,
+ hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
hlist_del(&filter->fdir_node);
kfree(filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index b2cca58de910..fc27800e9c38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -198,7 +198,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
- mlx4_mr_free(dev, &mdev->mr);
+ (void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
@@ -303,7 +303,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
return mdev;
err_mr:
- mlx4_mr_free(dev, &mdev->mr);
+ (void) mlx4_mr_free(dev, &mdev->mr);
err_map:
if (!mdev->uar_map)
iounmap(mdev->uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5088dc5c3d1a..bb4d8d99f36d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -225,11 +225,10 @@ static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port)
{
- struct hlist_node *elem;
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
- hlist_for_each_entry(filter, elem,
+ hlist_for_each_entry(filter,
filter_hash_bucket(priv, src_ip, dst_ip,
src_port, dst_port),
filter_chain) {
@@ -574,13 +573,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct mlx4_mac_entry *entry;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
struct hlist_head *bucket;
unsigned int mac_hash;
mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
- hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
priv->dev->dev_addr)) {
en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
@@ -609,11 +608,11 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
- hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
@@ -1019,7 +1018,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
{
struct netdev_hw_addr *ha;
struct mlx4_mac_entry *entry;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
bool found;
u64 mac;
int err = 0;
@@ -1035,7 +1034,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
/* find what to remove */
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
- hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
found = false;
netdev_for_each_uc_addr(ha, dev) {
if (ether_addr_equal_64bits(entry->mac,
@@ -1078,7 +1077,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
netdev_for_each_uc_addr(ha, dev) {
found = false;
bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
- hlist_for_each_entry(entry, n, bucket, hlist) {
+ hlist_for_each_entry(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
found = true;
break;
@@ -1829,7 +1828,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
+ priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
if (!priv->dev->rx_cpu_rmap)
goto err;
#endif
@@ -2067,7 +2066,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ce38654bbdd0..c7f856308e1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
+#include <linux/rculist.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
@@ -617,7 +618,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
- struct hlist_node *n;
struct hlist_head *bucket;
unsigned int mac_hash;
@@ -625,7 +625,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
rcu_read_lock();
- hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+ hlist_for_each_entry_rcu(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
ethh->h_source)) {
rcu_read_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 38b62c78d5da..50917eb3013e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -762,15 +762,19 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
u64 flags;
int err = 0;
u8 field;
+ u32 bmme_flags;
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
- /* add port mng change event capability unconditionally to slaves */
+ /* add port mng change event capability and disable mw type 1
+ * unconditionally to slaves
+ */
MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
+ flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
/* For guests, report Blueflame disabled */
@@ -778,6 +782,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
+ /* For guests, disable mw type 2 */
+ MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
+ MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+
return 0;
}
@@ -1203,6 +1212,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
+#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
@@ -1319,6 +1329,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* TPT attributes */
MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
@@ -1415,6 +1426,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 3af33ff669cc..151c2bb380a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -170,6 +170,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
+ u8 mw_enabled; /* Enable memory windows */
u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b9dde139dac5..d180bc46826a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1431,6 +1431,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
init_hca.uar_page_sz = PAGE_SHIFT - 12;
+ init_hca.mw_enabled = 0;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
+ dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
+ init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ed4a6959e828..cf883345af88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -60,6 +60,8 @@
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
#define MLX4_FS_NUM_MCG (1 << 17)
+#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
+
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -113,10 +115,10 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
-enum mlx4_mr_state {
- MLX4_MR_DISABLED = 0,
- MLX4_MR_EN_HW,
- MLX4_MR_EN_SW
+enum mlx4_mpt_state {
+ MLX4_MPT_DISABLED = 0,
+ MLX4_MPT_EN_HW,
+ MLX4_MPT_EN_SW
};
#define MLX4_COMM_TIME 10000
@@ -263,6 +265,22 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
+#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
+#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
+#define MLX4_MPT_FLAG_MIO (1 << 17)
+#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
+#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
+#define MLX4_MPT_FLAG_REGION (1 << 8)
+
+#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
+#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
+#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
+
+#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7)
+
+#define MLX4_MPT_STATUS_SW 0xF0
+#define MLX4_MPT_STATUS_HW 0x00
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -863,10 +881,10 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
-int __mlx4_mr_reserve(struct mlx4_dev *dev);
-void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
-int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
-void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+int __mlx4_mpt_reserve(struct mlx4_dev *dev);
+void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c202d3ad2a0e..602ca9bf78e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -44,20 +44,6 @@
#include "mlx4.h"
#include "icm.h"
-#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
-#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
-#define MLX4_MPT_FLAG_MIO (1 << 17)
-#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
-#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
-#define MLX4_MPT_FLAG_REGION (1 << 8)
-
-#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
-#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
-#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
-
-#define MLX4_MPT_STATUS_SW 0xF0
-#define MLX4_MPT_STATUS_HW 0x00
-
static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
{
int o;
@@ -321,7 +307,7 @@ static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
mr->size = size;
mr->pd = pd;
mr->access = access;
- mr->enabled = MLX4_MR_DISABLED;
+ mr->enabled = MLX4_MPT_DISABLED;
mr->key = hw_index_to_key(mridx);
return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
@@ -335,14 +321,14 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
-int __mlx4_mr_reserve(struct mlx4_dev *dev)
+int __mlx4_mpt_reserve(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
}
-static int mlx4_mr_reserve(struct mlx4_dev *dev)
+static int mlx4_mpt_reserve(struct mlx4_dev *dev)
{
u64 out_param;
@@ -353,17 +339,17 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev)
return -1;
return get_param_l(&out_param);
}
- return __mlx4_mr_reserve(dev);
+ return __mlx4_mpt_reserve(dev);
}
-void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
}
-static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
u64 in_param;
@@ -376,17 +362,17 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
index);
return;
}
- __mlx4_mr_release(dev, index);
+ __mlx4_mpt_release(dev, index);
}
-int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}
-static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
u64 param;
@@ -397,17 +383,17 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_mr_alloc_icm(dev, index);
+ return __mlx4_mpt_alloc_icm(dev, index);
}
-void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
mlx4_table_put(dev, &mr_table->dmpt_table, index);
}
-static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
u64 in_param;
@@ -420,7 +406,7 @@ static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
index);
return;
}
- return __mlx4_mr_free_icm(dev, index);
+ return __mlx4_mpt_free_icm(dev, index);
}
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
@@ -429,41 +415,52 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
u32 index;
int err;
- index = mlx4_mr_reserve(dev);
+ index = mlx4_mpt_reserve(dev);
if (index == -1)
return -ENOMEM;
err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
access, npages, page_shift, mr);
if (err)
- mlx4_mr_release(dev, index);
+ mlx4_mpt_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
int err;
- if (mr->enabled == MLX4_MR_EN_HW) {
+ if (mr->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
- if (err)
- mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+ if (err) {
+ mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
+ mlx4_warn(dev, "MR has MWs bound to it.\n");
+ return err;
+ }
- mr->enabled = MLX4_MR_EN_SW;
+ mr->enabled = MLX4_MPT_EN_SW;
}
mlx4_mtt_cleanup(dev, &mr->mtt);
+
+ return 0;
}
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- mlx4_mr_free_reserved(dev, mr);
+ int ret;
+
+ ret = mlx4_mr_free_reserved(dev, mr);
+ if (ret)
+ return ret;
if (mr->enabled)
- mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
- mlx4_mr_release(dev, key_to_hw_index(mr->key));
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mpt_release(dev, key_to_hw_index(mr->key));
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
@@ -473,7 +470,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -520,7 +517,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
- mr->enabled = MLX4_MR_EN_HW;
+ mr->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -530,7 +527,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
- mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -657,6 +654,101 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
+int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
+ struct mlx4_mw *mw)
+{
+ u32 index;
+
+ if ((type == MLX4_MW_TYPE_1 &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
+ (type == MLX4_MW_TYPE_2 &&
+ !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
+ return -ENOTSUPP;
+
+ index = mlx4_mpt_reserve(dev);
+ if (index == -1)
+ return -ENOMEM;
+
+ mw->key = hw_index_to_key(index);
+ mw->pd = pd;
+ mw->type = type;
+ mw->enabled = MLX4_MPT_DISABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
+
+int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mpt_entry *mpt_entry;
+ int err;
+
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
+ if (err)
+ return err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_table;
+ }
+ mpt_entry = mailbox->buf;
+
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
+ * off, thus creating a memory window and not a memory region.
+ */
+ mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
+ mpt_entry->pd_flags = cpu_to_be32(mw->pd);
+ if (mw->type == MLX4_MW_TYPE_2) {
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+ mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
+ mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
+ }
+
+ err = mlx4_SW2HW_MPT(dev, mailbox,
+ key_to_hw_index(mw->key) &
+ (dev->caps.num_mpts - 1));
+ if (err) {
+ mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
+ goto err_cmd;
+ }
+ mw->enabled = MLX4_MPT_EN_HW;
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return 0;
+
+err_cmd:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_table:
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mw_enable);
+
+void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
+{
+ int err;
+
+ if (mw->enabled == MLX4_MPT_EN_HW) {
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(mw->key) &
+ (dev->caps.num_mpts - 1));
+ if (err)
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+
+ mw->enabled = MLX4_MPT_EN_SW;
+ }
+ if (mw->enabled)
+ mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
+ mlx4_mpt_release(dev, key_to_hw_index(mw->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mw_free);
+
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -831,7 +923,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
return 0;
err_free:
- mlx4_mr_free(dev, &fmr->mr);
+ (void) mlx4_mr_free(dev, &fmr->mr);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
@@ -882,17 +974,21 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
err);
return;
}
- fmr->mr.enabled = MLX4_MR_EN_SW;
+ fmr->mr.enabled = MLX4_MPT_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
+ int ret;
+
if (fmr->maps)
return -EBUSY;
- mlx4_mr_free(dev, &fmr->mr);
- fmr->mr.enabled = MLX4_MR_DISABLED;
+ ret = mlx4_mr_free(dev, &fmr->mr);
+ if (ret)
+ return ret;
+ fmr->mr.enabled = MLX4_MPT_DISABLED;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5997adc943d0..083fb48dc3d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1231,14 +1231,14 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE:
- index = __mlx4_mr_reserve(dev);
+ index = __mlx4_mpt_reserve(dev);
if (index == -1)
break;
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
- __mlx4_mr_release(dev, index);
+ __mlx4_mpt_release(dev, index);
break;
}
set_param_l(out_param, index);
@@ -1251,7 +1251,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- err = __mlx4_mr_alloc_icm(dev, mpt->key);
+ err = __mlx4_mpt_alloc_icm(dev, mpt->key);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
@@ -1586,7 +1586,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
- __mlx4_mr_release(dev, index);
+ __mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
@@ -1596,7 +1596,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- __mlx4_mr_free_icm(dev, mpt->key);
+ __mlx4_mpt_free_icm(dev, mpt->key);
res_end_move(dev, slave, RES_MPT, id);
return err;
break;
@@ -1796,6 +1796,26 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
return be32_to_cpu(mpt->mtt_sz);
}
+static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
+}
+
+static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
+}
+
+static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
+}
+
+static int mr_is_region(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
+}
+
static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
@@ -1856,12 +1876,41 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
+ u32 pd;
+ int pd_slave;
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
if (err)
return err;
+ /* Disable memory windows for VFs. */
+ if (!mr_is_region(inbox->buf)) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+
+ /* Make sure that the PD bits related to the slave id are zeros. */
+ pd = mr_get_pd(inbox->buf);
+ pd_slave = (pd >> 17) & 0x7f;
+ if (pd_slave != 0 && pd_slave != slave) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+
+ if (mr_is_fmr(inbox->buf)) {
+ /* FMR and Bind Enable are forbidden in slave devices. */
+ if (mr_is_bind_enabled(inbox->buf)) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+ /* FMR and Memory Windows are also forbidden. */
+ if (!mr_is_region(inbox->buf)) {
+ err = -EPERM;
+ goto ex_abort;
+ }
+ }
+
phys = mr_phys_mpt(inbox->buf);
if (!phys) {
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -3480,7 +3529,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
while (state != 0) {
switch (state) {
case RES_MPT_RESERVED:
- __mlx4_mr_release(dev, mpt->key);
+ __mlx4_mpt_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mpt->com.node,
&tracker->res_tree[RES_MPT]);
@@ -3491,7 +3540,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
break;
case RES_MPT_MAPPED:
- __mlx4_mr_free_icm(dev, mpt->key);
+ __mlx4_mpt_free_icm(dev, mpt->key);
state = RES_MPT_RESERVED;
break;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 0be5844d6372..b1cfbb75ff1e 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -579,8 +579,9 @@ static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
(TX_RING_SIZE-1)].dma;
freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
info->skb, dmas);
- } else
+ } else {
freed = 2;
+ }
}
kfree(txring->ring_info);
@@ -808,8 +809,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
XCT_MACRX_CSUM_S;
- } else
+ } else {
skb_checksum_none_assert(skb);
+ }
packets++;
tot_bytes += len;
@@ -1829,10 +1831,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
err);
goto out;
- } else if netif_msg_probe(mac)
+ } else if (netif_msg_probe(mac)) {
printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n",
dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
mac->dma_if, dev->dev_addr);
+ }
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 11c3db6daffd..ba3c72fce1f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 1
-#define _QLCNIC_LINUX_SUBVERSION 34
-#define QLCNIC_LINUX_VERSIONID "5.1.34"
+#define _QLCNIC_LINUX_SUBVERSION 35
+#define QLCNIC_LINUX_VERSIONID "5.1.35"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -1755,7 +1755,7 @@ static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
- return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+ return adapter->ahw->hw_ops->clear_loopback(adapter, mode);
}
static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index c53832b02b3e..5c033f268ca5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -589,13 +589,6 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
qlcnic_83xx_register_nic_idc_func(adapter, 1);
qlcnic_83xx_enable_mbx_intrpt(adapter);
- if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
- if (qlcnic_83xx_config_intrpt(adapter, 1)) {
- netdev_err(adapter->netdev,
- "Failed to enable mbx intr\n");
- return -EIO;
- }
- }
if (qlcnic_83xx_configure_opmode(adapter)) {
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 325e11e1ce0f..f89cc7a3fe6c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -576,7 +576,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
{
struct qlcnic_filter *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
int i;
unsigned long time;
@@ -584,7 +584,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL;
time = tmp_fil->ftime;
@@ -604,7 +604,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
head = &(adapter->rx_fhash.fhead[i]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
{
time = tmp_fil->ftime;
if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
@@ -621,14 +621,14 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
{
struct qlcnic_filter *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
int i;
u8 cmd;
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL;
qlcnic_sre_macaddr_change(adapter,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6387e0cc3ea9..0e630061bff3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -162,7 +162,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
unsigned long time;
u64 src_addr = 0;
@@ -179,7 +179,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
time = tmp_fil->ftime;
@@ -205,7 +205,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
spin_lock(&adapter->rx_mac_learn_lock);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
found = 1;
@@ -272,7 +272,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct sk_buff *skb)
{
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
struct net_device *netdev = adapter->netdev;
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -294,7 +294,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 289b4eefb42f..1df0ff3839e8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -614,10 +614,9 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash];
- struct hlist_node *n;
struct vnet_port *port;
- hlist_for_each_entry(port, n, hp, hash) {
+ hlist_for_each_entry(port, hp, hash) {
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index defcd8a85744..417b2af1aa80 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -55,9 +55,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
struct macvlan_dev *vlan;
- struct hlist_node *n;
- hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
+ hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
return vlan;
}
@@ -149,7 +148,6 @@ static void macvlan_broadcast(struct sk_buff *skb,
{
const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
- struct hlist_node *n;
struct sk_buff *nskb;
unsigned int i;
int err;
@@ -159,7 +157,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
return;
for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
+ hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
if (vlan->dev == src || !(vlan->mode & mode))
continue;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 97243011d319..a449439bd653 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -279,28 +279,17 @@ static int macvtap_receive(struct sk_buff *skb)
static int macvtap_get_minor(struct macvlan_dev *vlan)
{
int retval = -ENOMEM;
- int id;
mutex_lock(&minor_lock);
- if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0)
- goto exit;
-
- retval = idr_get_new_above(&minor_idr, vlan, 1, &id);
- if (retval < 0) {
- if (retval == -EAGAIN)
- retval = -ENOMEM;
- goto exit;
- }
- if (id < MACVTAP_NUM_DEVS) {
- vlan->minor = id;
- } else {
+ retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
+ if (retval >= 0) {
+ vlan->minor = retval;
+ } else if (retval == -ENOSPC) {
printk(KERN_ERR "too many macvtap devices\n");
retval = -EINVAL;
- idr_remove(&minor_idr, id);
}
-exit:
mutex_unlock(&minor_lock);
- return retval;
+ return retval < 0 ? retval : 0;
}
static void macvtap_free_minor(struct macvlan_dev *vlan)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3db9131e9229..72ff14b811c6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2953,46 +2953,21 @@ static void __exit ppp_cleanup(void)
* by holding all_ppp_mutex
*/
-static int __unit_alloc(struct idr *p, void *ptr, int n)
-{
- int unit, err;
-
-again:
- if (!idr_pre_get(p, GFP_KERNEL)) {
- pr_err("PPP: No free memory for idr\n");
- return -ENOMEM;
- }
-
- err = idr_get_new_above(p, ptr, n, &unit);
- if (err < 0) {
- if (err == -EAGAIN)
- goto again;
- return err;
- }
-
- return unit;
-}
-
/* associate pointer with specified number */
static int unit_set(struct idr *p, void *ptr, int n)
{
int unit;
- unit = __unit_alloc(p, ptr, n);
- if (unit < 0)
- return unit;
- else if (unit != n) {
- idr_remove(p, unit);
- return -EINVAL;
- }
-
+ unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
+ if (unit == -ENOSPC)
+ unit = -EINVAL;
return unit;
}
/* get new free unit number and associate pointer with it */
static int unit_get(struct idr *p, void *ptr)
{
- return __unit_alloc(p, ptr, 0);
+ return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
}
/* put unit number back to a pool */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b6f45c5d84d5..2c6a22e278ea 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -197,9 +197,8 @@ static inline u32 tun_hashfn(u32 rxhash)
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
{
struct tun_flow_entry *e;
- struct hlist_node *n;
- hlist_for_each_entry_rcu(e, n, head, hash_link) {
+ hlist_for_each_entry_rcu(e, head, hash_link) {
if (e->rxhash == rxhash)
return e;
}
@@ -241,9 +240,9 @@ static void tun_flow_flush(struct tun_struct *tun)
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
+ hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
tun_flow_delete(tun, e);
}
spin_unlock_bh(&tun->lock);
@@ -256,9 +255,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
if (e->queue_index == queue_index)
tun_flow_delete(tun, e);
}
@@ -279,9 +278,9 @@ static void tun_flow_cleanup(unsigned long data)
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
unsigned long this_timer;
count++;
this_timer = e->updated + delay;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index ff4fa37dfd1d..e6d2dea1373c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -53,7 +53,7 @@
#define FEATURE_8_WAKEUP_FILTERS (0x01)
#define FEATURE_PHY_NLP_CROSSOVER (0x02)
-#define FEATURE_AUTOSUSPEND (0x04)
+#define FEATURE_REMOTE_WAKEUP (0x04)
#define SUSPEND_SUSPEND0 (0x01)
#define SUSPEND_SUSPEND1 (0x02)
@@ -1146,7 +1146,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
(val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
pdata->features = (FEATURE_8_WAKEUP_FILTERS |
FEATURE_PHY_NLP_CROSSOVER |
- FEATURE_AUTOSUSPEND);
+ FEATURE_REMOTE_WAKEUP);
else if (val == ID_REV_CHIP_ID_9512_)
pdata->features = FEATURE_8_WAKEUP_FILTERS;
@@ -1247,10 +1247,12 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
/* read back PM_CTRL */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0)
+ return ret;
pdata->suspend_flags |= SUSPEND_SUSPEND0;
- return ret;
+ return 0;
}
static int smsc95xx_enter_suspend1(struct usbnet *dev)
@@ -1293,10 +1295,12 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
pdata->suspend_flags |= SUSPEND_SUSPEND1;
- return ret;
+ return 0;
}
static int smsc95xx_enter_suspend2(struct usbnet *dev)
@@ -1313,10 +1317,12 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
val |= PM_CTL_SUS_MODE_2;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
pdata->suspend_flags |= SUSPEND_SUSPEND2;
- return ret;
+ return 0;
}
static int smsc95xx_enter_suspend3(struct usbnet *dev)
@@ -1372,7 +1378,7 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
if (!link_up) {
/* link is down so enter EDPD mode, but only if device can
* reliably resume from it. This check should be redundant
- * as current FEATURE_AUTOSUSPEND parts also support
+ * as current FEATURE_REMOTE_WAKEUP parts also support
* FEATURE_PHY_NLP_CROSSOVER but it's included for clarity */
if (!(pdata->features & FEATURE_PHY_NLP_CROSSOVER)) {
netdev_warn(dev->net, "EDPD not supported\n");
@@ -1412,15 +1418,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
u32 val, link_up;
int ret;
- /* TODO: don't indicate this feature to usb framework if
- * our current hardware doesn't have the capability
- */
- if ((message.event == PM_EVENT_AUTO_SUSPEND) &&
- (!(pdata->features & FEATURE_AUTOSUSPEND))) {
- netdev_warn(dev->net, "autosuspend not supported\n");
- return -EBUSY;
- }
-
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
@@ -1435,7 +1432,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
/* determine if link is up using only _nopm functions */
link_up = smsc95xx_link_ok_nopm(dev);
- if (message.event == PM_EVENT_AUTO_SUSPEND) {
+ if (message.event == PM_EVENT_AUTO_SUSPEND &&
+ (pdata->features & FEATURE_REMOTE_WAKEUP)) {
ret = smsc95xx_autosuspend(dev, link_up);
goto done;
}
@@ -1872,11 +1870,11 @@ static int smsc95xx_manage_power(struct usbnet *dev, int on)
dev->intf->needs_remote_wakeup = on;
- if (pdata->features & FEATURE_AUTOSUSPEND)
+ if (pdata->features & FEATURE_REMOTE_WAKEUP)
return 0;
- /* this chip revision doesn't support autosuspend */
- netdev_info(dev->net, "hardware doesn't support USB autosuspend\n");
+ /* this chip revision isn't capable of remote wakeup */
+ netdev_info(dev->net, "hardware isn't capable of remote wakeup\n");
if (on)
usb_autopm_get_interface_no_resume(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 192c91c8e799..57ac4b0294bc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1736,17 +1736,7 @@ static struct virtio_driver virtio_net_driver = {
#endif
};
-static int __init init(void)
-{
- return register_virtio_driver(&virtio_net_driver);
-}
-
-static void __exit fini(void)
-{
- unregister_virtio_driver(&virtio_net_driver);
-}
-module_init(init);
-module_exit(fini);
+module_virtio_driver(virtio_net_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index ffb97b2a15a0..4aad350e4dae 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1385,8 +1385,8 @@ vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
}
-void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
- struct vmxnet3_adapter *adapter)
+static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
{
int i;
int j;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 9bc542be2937..a0feb17a0238 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -448,10 +448,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
- param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
- adapter->num_rx_queues;
- param->tx_pending = adapter->tx_queue[0].tx_ring.size *
- adapter->num_tx_queues;
+ param->rx_pending = adapter->rx_queue[0].rx_ring[0].size;
+ param->tx_pending = adapter->tx_queue[0].tx_ring.size;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 9d70421cf3a0..f10e58ac9c1b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -33,6 +33,7 @@
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
+#include <net/ipip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/rtnetlink.h>
@@ -144,9 +145,8 @@ static inline struct hlist_head *vni_head(struct net *net, u32 id)
static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
{
struct vxlan_dev *vxlan;
- struct hlist_node *node;
- hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
+ hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
if (vxlan->vni == id)
return vxlan;
}
@@ -291,9 +291,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f;
- struct hlist_node *node;
- hlist_for_each_entry_rcu(f, node, head, hlist) {
+ hlist_for_each_entry_rcu(f, head, hlist) {
if (compare_ether_addr(mac, f->eth_addr) == 0)
return f;
}
@@ -421,10 +420,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
- struct hlist_node *n;
int err;
- hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
if (idx < cb->args[0])
goto skip;
@@ -482,11 +480,10 @@ static bool vxlan_group_used(struct vxlan_net *vn,
const struct vxlan_dev *this)
{
const struct vxlan_dev *vxlan;
- struct hlist_node *node;
unsigned h;
for (h = 0; h < VNI_HASH_SIZE; ++h)
- hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
+ hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
if (vxlan == this)
continue;
@@ -962,13 +959,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ tunnel_ip_select_ident(skb, old_iph, &rt->dst);
vxlan_set_owner(dev, skb);
/* See iptunnel_xmit() */
if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
- ip_select_ident(iph, &rt->dst, NULL);
err = ip_local_out(skb);
if (likely(net_xmit_eval(err) == 0)) {
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 0179cefae438..84734a805092 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -938,14 +938,14 @@ static int cosa_open(struct inode *inode, struct file *file)
int ret = 0;
mutex_lock(&cosa_chardev_mutex);
- if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS)
+ if ((n=iminor(file_inode(file))>>CARD_MINOR_BITS)
>= nr_cards) {
ret = -ENODEV;
goto out;
}
cosa = cosa_cards+n;
- if ((n=iminor(file->f_path.dentry->d_inode)
+ if ((n=iminor(file_inode(file))
& ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) {
ret = -ENODEV;
goto out;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 806e34c19281..05682736e466 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4214,7 +4214,6 @@ redo:
mutex_unlock(&wl->mutex);
cancel_delayed_work_sync(&dev->periodic_work);
cancel_work_sync(&wl->tx_work);
- cancel_work_sync(&wl->firmware_load);
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev || b43_status(dev) < B43_STAT_STARTED) {
@@ -5434,6 +5433,7 @@ static void b43_bcma_remove(struct bcma_device *core)
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
+ cancel_work_sync(&wl->firmware_load);
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
@@ -5510,6 +5510,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
+ cancel_work_sync(&wl->firmware_load);
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index cecc3eff72e9..2af9c0f0798d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4615,8 +4615,10 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
switch (ifevent->action) {
case BRCMF_E_IF_ADD:
/* waiting process may have timed out */
- if (!cfg->vif_event.vif)
+ if (!cfg->vif_event.vif) {
+ mutex_unlock(&event->vif_event_lock);
return -EBADF;
+ }
ifp->vif = vif;
vif->ifp = ifp;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 4b54bcf382f3..35c79722c361 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -171,7 +171,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
- int hs_actived, i;
+ int hs_actived;
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -191,9 +191,6 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
/* Indicate device suspended */
adapter->is_suspended = true;
- for (i = 0; i < adapter->priv_num; i++)
- netif_carrier_off(adapter->priv[i]->netdev);
-
return 0;
}
@@ -209,7 +206,6 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
- int i;
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -231,10 +227,6 @@ static int mwifiex_pcie_resume(struct pci_dev *pdev)
adapter->is_suspended = false;
- for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i]->media_connected)
- netif_carrier_on(adapter->priv[i]->netdev);
-
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
@@ -916,17 +908,8 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- u32 rdptr;
-
- /* Read the TX ring read pointer set by firmware */
- if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
- dev_err(adapter->dev,
- "Flush TXBD: failed to read reg->tx_rdptr\n");
- return -1;
- }
- if (!mwifiex_pcie_txbd_empty(card, rdptr)) {
+ if (!mwifiex_pcie_txbd_empty(card, card->txbd_rdptr)) {
card->txbd_flush = 1;
/* write pointer already set at last send
* send dnld-rdy intr again, wait for completion.
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index e63f646a260e..363ba31b58bf 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
- int i;
int ret = 0;
if (func) {
@@ -198,9 +197,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Indicate device suspended */
adapter->is_suspended = true;
- for (i = 0; i < adapter->priv_num; i++)
- netif_carrier_off(adapter->priv[i]->netdev);
-
return ret;
}
@@ -220,7 +216,6 @@ static int mwifiex_sdio_resume(struct device *dev)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
- int i;
if (func) {
pm_flag = sdio_get_host_pm_caps(func);
@@ -243,10 +238,6 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter->is_suspended = false;
- for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i]->media_connected)
- netif_carrier_on(adapter->priv[i]->netdev);
-
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index e7cf37f550d1..3109c0db66e1 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2778,7 +2778,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
nr = nr * 10 + c;
p++;
} while (--len);
- *(int *)PDE(file->f_path.dentry->d_inode)->data = nr;
+ *(int *)PDE(file_inode(file))->data = nr;
return count;
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 48273dd05b63..4941f201d6c8 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -309,7 +309,6 @@ static void zd1201_usbrx(struct urb *urb)
if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
int datalen = urb->actual_length-1;
unsigned short len, fc, seq;
- struct hlist_node *node;
len = ntohs(*(__be16 *)&data[datalen-2]);
if (len>datalen)
@@ -362,7 +361,7 @@ static void zd1201_usbrx(struct urb *urb)
hlist_add_head(&frag->fnode, &zd->fraglist);
goto resubmit;
}
- hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
+ hlist_for_each_entry(frag, &zd->fraglist, fnode)
if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
break;
if (!frag)
@@ -1831,14 +1830,14 @@ err_zd:
static void zd1201_disconnect(struct usb_interface *interface)
{
struct zd1201 *zd = usb_get_intfdata(interface);
- struct hlist_node *node, *node2;
+ struct hlist_node *node2;
struct zd1201_frag *frag;
if (!zd)
return;
usb_set_intfdata(interface, NULL);
- hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) {
+ hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) {
hlist_del_init(&frag->fnode);
kfree_skb(frag->skb);
kfree(frag);
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 849357c1045c..445ffda715ad 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -139,17 +139,22 @@ static int __oprofilefs_create_file(struct super_block *sb,
struct dentry *dentry;
struct inode *inode;
+ mutex_lock(&root->d_inode->i_mutex);
dentry = d_alloc_name(root, name);
- if (!dentry)
+ if (!dentry) {
+ mutex_unlock(&root->d_inode->i_mutex);
return -ENOMEM;
+ }
inode = oprofilefs_get_inode(sb, S_IFREG | perm);
if (!inode) {
dput(dentry);
+ mutex_unlock(&root->d_inode->i_mutex);
return -ENOMEM;
}
inode->i_fop = fops;
+ inode->i_private = priv;
d_add(dentry, inode);
- dentry->d_inode->i_private = priv;
+ mutex_unlock(&root->d_inode->i_mutex);
return 0;
}
@@ -212,17 +217,22 @@ struct dentry *oprofilefs_mkdir(struct super_block *sb,
struct dentry *dentry;
struct inode *inode;
+ mutex_lock(&root->d_inode->i_mutex);
dentry = d_alloc_name(root, name);
- if (!dentry)
+ if (!dentry) {
+ mutex_unlock(&root->d_inode->i_mutex);
return NULL;
+ }
inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
if (!inode) {
dput(dentry);
+ mutex_unlock(&root->d_inode->i_mutex);
return NULL;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_add(dentry, inode);
+ mutex_unlock(&root->d_inode->i_mutex);
return dentry;
}
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index f2f501e5b6a0..d4d800c54d86 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -179,7 +179,7 @@ static int led_proc_open(struct inode *inode, struct file *file)
static ssize_t led_proc_write(struct file *file, const char *buf,
size_t count, loff_t *pos)
{
- void *data = PDE(file->f_path.dentry->d_inode)->data;
+ void *data = PDE(file_inode(file))->data;
char *cur, lbuf[32];
int d;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 3af0478c057b..1cc23661f79b 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -472,7 +472,7 @@ EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
static inline int pcie_cap_version(const struct pci_dev *dev)
{
- return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
+ return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
}
static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
@@ -497,7 +497,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
- dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT);
}
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
@@ -515,7 +515,7 @@ static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
return false;
switch (pos) {
- case PCI_EXP_FLAGS_TYPE:
+ case PCI_EXP_FLAGS:
return true;
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index ad6a8b635692..8647dc6f52d0 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -158,69 +158,38 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
return ret;
}
+void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
+
/**
- * pci_bus_add_device - add a single device
+ * pci_bus_add_device - start driver for a single device
* @dev: device to add
*
- * This adds a single pci device to the global
- * device list and adds sysfs and procfs entries
+ * This adds add sysfs entries and start device drivers
*/
int pci_bus_add_device(struct pci_dev *dev)
{
int retval;
- pci_fixup_device(pci_fixup_final, dev);
-
- retval = pcibios_add_device(dev);
- if (retval)
- return retval;
-
- retval = device_add(&dev->dev);
- if (retval)
- return retval;
-
- dev->is_added = 1;
- pci_proc_attach_device(dev);
+ /*
+ * Can not put in pci_device_add yet because resources
+ * are not assigned yet for some devices.
+ */
pci_create_sysfs_dev_files(dev);
- return 0;
-}
-
-/**
- * pci_bus_add_child - add a child bus
- * @bus: bus to add
- *
- * This adds sysfs entries for a single bus
- */
-int pci_bus_add_child(struct pci_bus *bus)
-{
- int retval;
-
- if (bus->bridge)
- bus->dev.parent = bus->bridge;
- retval = device_register(&bus->dev);
- if (retval)
- return retval;
+ dev->match_driver = true;
+ retval = device_attach(&dev->dev);
+ WARN_ON(retval < 0);
- bus->is_added = 1;
-
- /* Create legacy_io and legacy_mem files for this bus */
- pci_create_legacy_files(bus);
+ dev->is_added = 1;
- return retval;
+ return 0;
}
/**
- * pci_bus_add_devices - insert newly discovered PCI devices
+ * pci_bus_add_devices - start driver for PCI devices
* @bus: bus to check for new devices
*
- * Add newly discovered PCI devices (which are on the bus->devices
- * list) to the global PCI device list, add the sysfs and procfs
- * entries. Where a bridge is found, add the discovered bus to
- * the parents list of child buses, and recurse (breadth-first
- * to be compatible with 2.4)
- *
- * Call hotplug for each new devices.
+ * Start driver for PCI devices and add some sysfs entries.
*/
void pci_bus_add_devices(const struct pci_bus *bus)
{
@@ -233,36 +202,20 @@ void pci_bus_add_devices(const struct pci_bus *bus)
if (dev->is_added)
continue;
retval = pci_bus_add_device(dev);
- if (retval)
- dev_err(&dev->dev, "Error adding device, continuing\n");
}
list_for_each_entry(dev, &bus->devices, bus_list) {
BUG_ON(!dev->is_added);
child = dev->subordinate;
- /*
- * If there is an unattached subordinate bus, attach
- * it and then scan for unattached PCI devices.
- */
+
if (!child)
continue;
- if (list_empty(&child->node)) {
- down_write(&pci_bus_sem);
- list_add_tail(&child->node, &dev->bus->children);
- up_write(&pci_bus_sem);
- }
pci_bus_add_devices(child);
- /*
- * register the bus with sysfs as the parent is now
- * properly registered.
- */
if (child->is_added)
continue;
- retval = pci_bus_add_child(child);
- if (retval)
- dev_err(&dev->dev, "Error adding bus, continuing\n");
+ child->is_added = 1;
}
}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index a1afb5b39ad4..b70ac00a117e 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -79,7 +79,6 @@ struct acpiphp_bridge {
/* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */
struct acpiphp_func *func;
- int type;
int nr_slots;
u32 flags;
@@ -146,10 +145,6 @@ struct acpiphp_attention_info
/* PCI bus bridge HID */
#define ACPI_PCI_HOST_HID "PNP0A03"
-/* PCI BRIDGE type */
-#define BRIDGE_TYPE_HOST 0
-#define BRIDGE_TYPE_P2P 1
-
/* ACPI _STA method value (ignore bit 4; battery present) */
#define ACPI_STA_PRESENT (0x00000001)
#define ACPI_STA_ENABLED (0x00000002)
@@ -158,13 +153,7 @@ struct acpiphp_attention_info
#define ACPI_STA_ALL (0x0000000f)
/* bridge flags */
-#define BRIDGE_HAS_STA (0x00000001)
-#define BRIDGE_HAS_EJ0 (0x00000002)
-#define BRIDGE_HAS_HPP (0x00000004)
-#define BRIDGE_HAS_PS0 (0x00000010)
-#define BRIDGE_HAS_PS1 (0x00000020)
-#define BRIDGE_HAS_PS2 (0x00000040)
-#define BRIDGE_HAS_PS3 (0x00000080)
+#define BRIDGE_HAS_EJ0 (0x00000001)
/* slot flags */
@@ -193,7 +182,6 @@ extern void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
/* acpiphp_glue.c */
extern int acpiphp_glue_init (void);
extern void acpiphp_glue_exit (void);
-extern int acpiphp_get_num_slots (void);
typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
extern int acpiphp_enable_slot (struct acpiphp_slot *slot);
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 96316b74969f..c2fd3095701f 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -50,7 +50,6 @@
bool acpiphp_debug;
/* local variables */
-static int num_slots;
static struct acpiphp_attention_info *attention_info;
#define DRIVER_VERSION "0.5"
@@ -272,25 +271,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static int __init init_acpi(void)
-{
- int retval;
-
- /* initialize internal data structure etc. */
- retval = acpiphp_glue_init();
-
- /* read initial number of slots */
- if (!retval) {
- num_slots = acpiphp_get_num_slots();
- if (num_slots == 0) {
- acpiphp_glue_exit();
- retval = -ENODEV;
- }
- }
-
- return retval;
-}
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -379,7 +359,8 @@ static int __init acpiphp_init(void)
return 0;
/* read all the ACPI info from the system */
- return init_acpi();
+ /* initialize internal data structure etc. */
+ return acpiphp_glue_init();
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a951c22921d1..270fdbadc19c 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -325,8 +325,8 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge)
return;
}
- /* install notify handler */
- if (bridge->type != BRIDGE_TYPE_HOST) {
+ /* install notify handler for P2P bridges */
+ if (!pci_is_root_bus(bridge->pci_bus)) {
if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
status = acpi_remove_notify_handler(bridge->func->handle,
ACPI_SYSTEM_NOTIFY,
@@ -369,27 +369,12 @@ static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle
static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
{
acpi_handle dummy_handle;
+ struct acpiphp_func *func;
if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_STA", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_STA;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_EJ0", &dummy_handle)))
+ "_EJ0", &dummy_handle))) {
bridge->flags |= BRIDGE_HAS_EJ0;
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_PS0", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_PS0;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_PS3", &dummy_handle)))
- bridge->flags |= BRIDGE_HAS_PS3;
-
- /* is this ejectable p2p bridge? */
- if (bridge->flags & BRIDGE_HAS_EJ0) {
- struct acpiphp_func *func;
-
dbg("found ejectable p2p bridge\n");
/* make link between PCI bridge and PCI function */
@@ -412,7 +397,6 @@ static void add_host_bridge(struct acpi_pci_root *root)
if (bridge == NULL)
return;
- bridge->type = BRIDGE_TYPE_HOST;
bridge->handle = handle;
bridge->pci_bus = root->bus;
@@ -432,7 +416,6 @@ static void add_p2p_bridge(acpi_handle *handle)
return;
}
- bridge->type = BRIDGE_TYPE_P2P;
bridge->handle = handle;
config_p2p_bridge_flags(bridge);
@@ -543,13 +526,15 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
acpi_status status;
acpi_handle handle = bridge->handle;
- status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ if (!pci_is_root_bus(bridge->pci_bus)) {
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge);
- if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
+ if (ACPI_FAILURE(status))
+ err("failed to remove notify handler\n");
+ }
- if ((bridge->type != BRIDGE_TYPE_HOST) &&
- ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)) {
+ if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
status = acpi_install_notify_handler(bridge->func->handle,
ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_func,
@@ -630,9 +615,6 @@ static void remove_bridge(struct acpi_pci_root *root)
bridge = acpiphp_handle_to_bridge(handle);
if (bridge)
cleanup_bridge(bridge);
- else
- acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge);
}
static int power_on_slot(struct acpiphp_slot *slot)
@@ -793,6 +775,29 @@ static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
}
}
+static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
+{
+ struct acpiphp_func *func;
+
+ if (!dev->subordinate)
+ return;
+
+ /* quirk, or pcie could set it already */
+ if (dev->is_hotplug_bridge)
+ return;
+
+ if (PCI_SLOT(dev->devfn) != slot->device)
+ return;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ if (PCI_FUNC(dev->devfn) == func->function) {
+ /* check if this bridge has ejectable slots */
+ if ((detect_ejectable_slots(func->handle) > 0))
+ dev->is_hotplug_bridge = 1;
+ break;
+ }
+ }
+}
/**
* enable_device - enable, configure a slot
* @slot: slot to be enabled
@@ -812,6 +817,9 @@ static int __ref enable_device(struct acpiphp_slot *slot)
if (slot->flags & SLOT_ENABLED)
goto err_exit;
+ list_for_each_entry(func, &slot->funcs, sibling)
+ acpiphp_bus_add(func);
+
num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
if (num == 0) {
/* Maybe only part of funcs are added. */
@@ -827,15 +835,14 @@ static int __ref enable_device(struct acpiphp_slot *slot)
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate)
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
pci_bus_size_bridges(dev->subordinate);
+ }
}
}
}
- list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_add(func);
-
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
@@ -1093,70 +1100,11 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
}
}
-/* Program resources in newly inserted bridge */
-static int acpiphp_configure_bridge (acpi_handle handle)
-{
- struct pci_bus *bus;
-
- if (acpi_is_root_bridge(handle)) {
- struct acpi_pci_root *root = acpi_pci_find_root(handle);
- bus = root->bus;
- } else {
- struct pci_dev *pdev = acpi_get_pci_dev(handle);
- bus = pdev->subordinate;
- pci_dev_put(pdev);
- }
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(bus);
- pci_enable_bridges(bus);
- return 0;
-}
-
-static void handle_bridge_insertion(acpi_handle handle, u32 type)
-{
- struct acpi_device *device;
-
- if ((type != ACPI_NOTIFY_BUS_CHECK) &&
- (type != ACPI_NOTIFY_DEVICE_CHECK)) {
- err("unexpected notification type %d\n", type);
- return;
- }
-
- if (acpi_bus_scan(handle)) {
- err("cannot add bridge to acpi list\n");
- return;
- }
- if (acpi_bus_get_device(handle, &device)) {
- err("ACPI device object missing\n");
- return;
- }
- if (!acpiphp_configure_bridge(handle))
- add_bridge(handle);
- else
- err("cannot configure and start bridge\n");
-
-}
-
/*
* ACPI event handlers
*/
static acpi_status
-count_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int *count = (int *)context;
- struct acpiphp_bridge *bridge;
-
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- (*count)++;
- return AE_OK ;
-}
-
-static acpi_status
check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
struct acpiphp_bridge *bridge;
@@ -1174,83 +1122,33 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK ;
}
-struct acpiphp_hp_work {
- struct work_struct work;
- acpi_handle handle;
- u32 type;
- void *context;
-};
-
-static void alloc_acpiphp_hp_work(acpi_handle handle, u32 type,
- void *context,
- void (*func)(struct work_struct *work))
-{
- struct acpiphp_hp_work *hp_work;
- int ret;
-
- hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
- if (!hp_work)
- return;
-
- hp_work->handle = handle;
- hp_work->type = type;
- hp_work->context = context;
-
- INIT_WORK(&hp_work->work, func);
- ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
- if (!ret)
- kfree(hp_work);
-}
-
static void _handle_hotplug_event_bridge(struct work_struct *work)
{
struct acpiphp_bridge *bridge;
char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
- struct acpi_device *device;
- int num_sub_bridges = 0;
- struct acpiphp_hp_work *hp_work;
+ struct acpi_hp_work *hp_work;
acpi_handle handle;
u32 type;
- hp_work = container_of(work, struct acpiphp_hp_work, work);
+ hp_work = container_of(work, struct acpi_hp_work, work);
handle = hp_work->handle;
type = hp_work->type;
+ bridge = (struct acpiphp_bridge *)hp_work->context;
acpi_scan_lock_acquire();
- if (acpi_bus_get_device(handle, &device)) {
- /* This bridge must have just been physically inserted */
- handle_bridge_insertion(handle, type);
- goto out;
- }
-
- bridge = acpiphp_handle_to_bridge(handle);
- if (type == ACPI_NOTIFY_BUS_CHECK) {
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX,
- count_sub_bridges, NULL, &num_sub_bridges, NULL);
- }
-
- if (!bridge && !num_sub_bridges) {
- err("cannot get bridge info\n");
- goto out;
- }
-
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
dbg("%s: Bus check notify on %s\n", __func__, objname);
- if (bridge) {
- dbg("%s: re-enumerating slots under %s\n",
- __func__, objname);
- acpiphp_check_bridge(bridge);
- }
- if (num_sub_bridges)
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+ dbg("%s: re-enumerating slots under %s\n", __func__, objname);
+ acpiphp_check_bridge(bridge);
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
break;
case ACPI_NOTIFY_DEVICE_CHECK:
@@ -1267,8 +1165,7 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
dbg("%s: Device eject notify on %s\n", __func__, objname);
- if ((bridge->type != BRIDGE_TYPE_HOST) &&
- (bridge->flags & BRIDGE_HAS_EJ0)) {
+ if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
struct acpiphp_slot *slot;
slot = bridge->func->slot;
if (!acpiphp_disable_slot(slot))
@@ -1296,7 +1193,6 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
break;
}
-out:
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
}
@@ -1320,8 +1216,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
- alloc_acpiphp_hp_work(handle, type, context,
- _handle_hotplug_event_bridge);
+ alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
}
static void _handle_hotplug_event_func(struct work_struct *work)
@@ -1330,22 +1225,19 @@ static void _handle_hotplug_event_func(struct work_struct *work)
char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
- struct acpiphp_hp_work *hp_work;
+ struct acpi_hp_work *hp_work;
acpi_handle handle;
u32 type;
- void *context;
- hp_work = container_of(work, struct acpiphp_hp_work, work);
+ hp_work = container_of(work, struct acpi_hp_work, work);
handle = hp_work->handle;
type = hp_work->type;
- context = hp_work->context;
-
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
-
- func = (struct acpiphp_func *)context;
+ func = (struct acpiphp_func *)hp_work->context;
acpi_scan_lock_acquire();
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
@@ -1399,23 +1291,7 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
- alloc_acpiphp_hp_work(handle, type, context,
- _handle_hotplug_event_func);
-}
-
-static acpi_status
-find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int *count = (int *)context;
-
- if (!acpi_is_root_bridge(handle))
- return AE_OK;
-
- (*count)++;
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge, NULL);
-
- return AE_OK ;
+ alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func);
}
static struct acpi_pci_driver acpi_pci_hp_driver = {
@@ -1428,15 +1304,7 @@ static struct acpi_pci_driver acpi_pci_hp_driver = {
*/
int __init acpiphp_glue_init(void)
{
- int num = 0;
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL);
-
- if (num <= 0)
- return -1;
- else
- acpi_pci_register_driver(&acpi_pci_hp_driver);
+ acpi_pci_register_driver(&acpi_pci_hp_driver);
return 0;
}
@@ -1452,28 +1320,6 @@ void acpiphp_glue_exit(void)
acpi_pci_unregister_driver(&acpi_pci_hp_driver);
}
-
-/**
- * acpiphp_get_num_slots - count number of slots in a system
- */
-int __init acpiphp_get_num_slots(void)
-{
- struct acpiphp_bridge *bridge;
- int num_slots = 0;
-
- list_for_each_entry(bridge, &bridge_list, list) {
- dbg("Bus %04x:%02x has %d slot%s\n",
- pci_domain_nr(bridge->pci_bus),
- bridge->pci_bus->number, bridge->nr_slots,
- bridge->nr_slots == 1 ? "" : "s");
- num_slots += bridge->nr_slots;
- }
-
- dbg("Total %d slots\n", num_slots);
- return num_slots;
-}
-
-
/**
* acpiphp_enable_slot - power on slot
* @slot: ACPI PHP slot
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index dcc75c785443..d8add34177f2 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -252,8 +252,8 @@ int cpci_led_off(struct slot* slot)
int __ref cpci_configure_slot(struct slot *slot)
{
+ struct pci_dev *dev;
struct pci_bus *parent;
- int fn;
dbg("%s - enter", __func__);
@@ -282,18 +282,13 @@ int __ref cpci_configure_slot(struct slot *slot)
}
parent = slot->dev->bus;
- for (fn = 0; fn < 8; fn++) {
- struct pci_dev *dev;
-
- dev = pci_get_slot(parent,
- PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
pci_hp_add_bridge(dev);
- pci_dev_put(dev);
- }
+
pci_assign_unassigned_bridge_resources(parent->self);
@@ -305,8 +300,7 @@ int __ref cpci_configure_slot(struct slot *slot)
int cpci_unconfigure_slot(struct slot* slot)
{
- int i;
- struct pci_dev *dev;
+ struct pci_dev *dev, *temp;
dbg("%s - enter", __func__);
if (!slot->dev) {
@@ -314,13 +308,12 @@ int cpci_unconfigure_slot(struct slot* slot)
return -ENODEV;
}
- for (i = 0; i < 8; i++) {
- dev = pci_get_slot(slot->bus,
- PCI_DEVFN(PCI_SLOT(slot->devfn), i));
- if (dev) {
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
+ continue;
+ pci_dev_get(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
pci_dev_put(slot->dev);
slot->dev = NULL;
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 36112fe212d3..d282019cda5f 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1900,8 +1900,7 @@ static void interrupt_event_handler(struct controller *ctrl)
dbg("power fault\n");
} else {
/* refresh notification */
- if (p_slot)
- update_slot_info(ctrl, p_slot);
+ update_slot_info(ctrl, p_slot);
}
ctrl->event_queue[loop].event_type = 0;
@@ -2520,44 +2519,28 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* If we have IO resources copy them and fill in the bridge's
* IO range registers */
- if (io_node) {
- memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
- io_node->next = NULL;
+ memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
+ io_node->next = NULL;
- /* set IO base and Limit registers */
- temp_byte = io_node->base >> 8;
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte);
+ /* set IO base and Limit registers */
+ temp_byte = io_node->base >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte);
- temp_byte = (io_node->base + io_node->length - 1) >> 8;
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
- } else {
- kfree(hold_IO_node);
- hold_IO_node = NULL;
- }
-
- /* If we have memory resources copy them and fill in the
- * bridge's memory range registers. Otherwise, fill in the
- * range registers with values that disable them. */
- if (mem_node) {
- memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource));
- mem_node->next = NULL;
-
- /* set Mem base and Limit registers */
- temp_word = mem_node->base >> 16;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+ temp_byte = (io_node->base + io_node->length - 1) >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
- temp_word = (mem_node->base + mem_node->length - 1) >> 16;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
- } else {
- temp_word = 0xFFFF;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+ /* Copy the memory resources and fill in the bridge's memory
+ * range registers.
+ */
+ memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource));
+ mem_node->next = NULL;
- temp_word = 0x0000;
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
+ /* set Mem base and Limit registers */
+ temp_word = mem_node->base >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
- kfree(hold_mem_node);
- hold_mem_node = NULL;
- }
+ temp_word = (mem_node->base + mem_node->length - 1) >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource));
p_mem_node->next = NULL;
@@ -2627,7 +2610,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
/* Return unused bus resources
* First use the temporary node to store information for
* the board */
- if (hold_bus_node && bus_node && temp_resources.bus_head) {
+ if (bus_node && temp_resources.bus_head) {
hold_bus_node->length = bus_node->base - hold_bus_node->base;
hold_bus_node->next = func->bus_head;
@@ -2751,7 +2734,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
}
/* If we have prefetchable memory space available and there
* is some left at the end, return the unused portion */
- if (hold_p_mem_node && temp_resources.p_mem_head) {
+ if (temp_resources.p_mem_head) {
p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head),
&hold_p_mem_node, 0x100000);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 939bd1d4b5b1..7d72c5e2eba9 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -293,7 +293,6 @@ static void pciehp_remove(struct pcie_device *dev)
#ifdef CONFIG_PM
static int pciehp_suspend (struct pcie_device *dev)
{
- dev_info(&dev->device, "%s ENTRY\n", __func__);
return 0;
}
@@ -303,7 +302,6 @@ static int pciehp_resume (struct pcie_device *dev)
struct slot *slot;
u8 status;
- dev_info(&dev->device, "%s ENTRY\n", __func__);
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 09cecaf450c5..aac7a40e4a4a 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -39,7 +39,7 @@ int pciehp_configure_device(struct slot *p_slot)
struct pci_dev *dev;
struct pci_dev *bridge = p_slot->ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
- int num, fn;
+ int num;
struct controller *ctrl = p_slot->ctrl;
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
@@ -57,28 +57,18 @@ int pciehp_configure_device(struct slot *p_slot)
return -ENODEV;
}
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
- if (!dev)
- continue;
+ list_for_each_entry(dev, &parent->devices, bus_list)
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
pci_hp_add_bridge(dev);
- pci_dev_put(dev);
- }
pci_assign_unassigned_bridge_resources(bridge);
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
continue;
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- pci_dev_put(dev);
- continue;
- }
+
pci_configure_slot(dev);
- pci_dev_put(dev);
}
pci_bus_add_devices(parent);
@@ -89,9 +79,9 @@ int pciehp_configure_device(struct slot *p_slot)
int pciehp_unconfigure_device(struct slot *p_slot)
{
int ret, rc = 0;
- int j;
u8 bctl = 0;
u8 presence = 0;
+ struct pci_dev *dev, *temp;
struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
u16 command;
struct controller *ctrl = p_slot->ctrl;
@@ -102,33 +92,31 @@ int pciehp_unconfigure_device(struct slot *p_slot)
if (ret)
presence = 0;
- for (j = 0; j < 8; j++) {
- struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j));
- if (!temp)
- continue;
- if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
- pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
+ list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
ctrl_err(ctrl,
"Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
+ pci_name(dev));
+ pci_dev_put(dev);
rc = -EINVAL;
break;
}
}
- pci_stop_and_remove_bus_device(temp);
+ pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
* the device.
*/
if (presence) {
- pci_read_config_word(temp, PCI_COMMAND, &command);
+ pci_read_config_word(dev, PCI_COMMAND, &command);
command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
command |= PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(temp, PCI_COMMAND, command);
+ pci_write_config_word(dev, PCI_COMMAND, command);
}
- pci_dev_put(temp);
+ pci_dev_put(dev);
}
return rc;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 574421bc2fa6..b2781dfe60e9 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -334,7 +334,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
struct slot *slot = bss_hotplug_slot->private;
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
- int func, num_funcs;
+ int num_funcs;
int new_ppb = 0;
int rc;
char *ssdt = NULL;
@@ -381,29 +381,26 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
* to the Linux PCI interface and tell the drivers
* about them.
*/
- for (func = 0; func < num_funcs; func++) {
- dev = pci_get_slot(slot->pci_bus,
- PCI_DEVFN(slot->device_num + 1,
- PCI_FUNC(func)));
- if (dev) {
- /* Need to do slot fixup on PPB before fixup of children
- * (PPB's pcidev_info needs to be in pcidev_info list
- * before child's SN_PCIDEV_INFO() call to setup
- * pdi_host_pcidev_info).
- */
- pcibios_fixup_device_resources(dev);
- if (SN_ACPI_BASE_SUPPORT())
- sn_acpi_slot_fixup(dev);
- else
- sn_io_slot_fixup(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_hp_add_bridge(dev);
- if (dev->subordinate) {
- new_bus = dev->subordinate;
- new_ppb = 1;
- }
+ list_for_each_entry(dev, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ /* Need to do slot fixup on PPB before fixup of children
+ * (PPB's pcidev_info needs to be in pcidev_info list
+ * before child's SN_PCIDEV_INFO() call to setup
+ * pdi_host_pcidev_info).
+ */
+ pcibios_fixup_device_resources(dev);
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_acpi_slot_fixup(dev);
+ else
+ sn_io_slot_fixup(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_hp_add_bridge(dev);
+ if (dev->subordinate) {
+ new_bus = dev->subordinate;
+ new_ppb = 1;
}
- pci_dev_put(dev);
}
}
@@ -483,8 +480,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
- struct pci_dev *dev;
- int func;
+ struct pci_dev *dev, *temp;
int rc;
acpi_owner_id ssdt_id = 0;
@@ -545,15 +541,14 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
}
/* Free the SN resources assigned to the Linux device.*/
- for (func = 0; func < 8; func++) {
- dev = pci_get_slot(slot->pci_bus,
- PCI_DEVFN(slot->device_num + 1,
- PCI_FUNC(func)));
- if (dev) {
- sn_bus_free_data(dev);
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ pci_dev_get(dev);
+ sn_bus_free_data(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
/* Remove the SSDT for the slot from the ACPI namespace */
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index c627ed9957d1..b0e83132542e 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -40,7 +40,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
struct controller *ctrl = p_slot->ctrl;
struct pci_dev *bridge = ctrl->pci_dev;
struct pci_bus *parent = bridge->subordinate;
- int num, fn;
+ int num;
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
@@ -57,24 +57,20 @@ int __ref shpchp_configure_device(struct slot *p_slot)
return -ENODEV;
}
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
pci_hp_add_bridge(dev);
- pci_dev_put(dev);
}
pci_assign_unassigned_bridge_resources(bridge);
- for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
- if (!dev)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
pci_configure_slot(dev);
- pci_dev_put(dev);
}
pci_bus_add_devices(parent);
@@ -85,32 +81,32 @@ int __ref shpchp_configure_device(struct slot *p_slot)
int shpchp_unconfigure_device(struct slot *p_slot)
{
int rc = 0;
- int j;
u8 bctl = 0;
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_dev *dev, *temp;
struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
- for (j = 0; j < 8 ; j++) {
- struct pci_dev *temp = pci_get_slot(parent,
- (p_slot->device << 3) | j);
- if (!temp)
+ list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
continue;
- if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
+
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
ctrl_err(ctrl,
"Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
+ pci_name(dev));
+ pci_dev_put(dev);
rc = -EINVAL;
break;
}
}
- pci_stop_and_remove_bus_device(temp);
- pci_dev_put(temp);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
return rc;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index c18e5bf444fa..ee599f274f05 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -33,7 +33,6 @@ static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
{
- int rc;
struct pci_bus *child;
if (bus->number == busnr)
@@ -48,12 +47,7 @@ static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
return NULL;
pci_bus_insert_busn_res(child, busnr, busnr);
- child->dev.parent = bus->bridge;
- rc = pci_bus_add_child(child);
- if (rc) {
- pci_remove_bus(child);
- return NULL;
- }
+ bus->is_added = 1;
return child;
}
@@ -123,8 +117,6 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
virtfn->is_virtfn = 1;
rc = pci_bus_add_device(virtfn);
- if (rc)
- goto failed1;
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e407c61559ca..39c937f9b426 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -302,48 +302,11 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
return 0;
}
-static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
-{
- int num;
- unsigned int seg, bus;
-
- /*
- * The string should be the same as root bridge's name
- * Please look at 'pci_scan_bus_parented'
- */
- num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus);
- if (num != 2)
- return -ENODEV;
- *handle = acpi_get_pci_rootbridge_handle(seg, bus);
- if (!*handle)
- return -ENODEV;
- return 0;
-}
-
static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
- acpi_status status;
- acpi_handle dummy;
-
- /*
- * Evaluate and parse _PRT, if exists. This code allows parsing of
- * _PRT objects within the scope of non-bridge devices. Note that
- * _PRTs within the scope of a PCI bridge assume the bridge's
- * subordinate bus number.
- *
- * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
- */
- status = acpi_get_handle(handle, METHOD_NAME__PRT, &dummy);
- if (ACPI_SUCCESS(status)) {
- unsigned char bus;
-
- bus = pci_dev->subordinate ?
- pci_dev->subordinate->number : pci_dev->bus->number;
- acpi_pci_irq_add_prt(handle, pci_domain_nr(pci_dev->bus), bus);
- }
if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
return;
@@ -358,7 +321,6 @@ static void pci_acpi_setup(struct device *dev)
static void pci_acpi_cleanup(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
@@ -367,16 +329,11 @@ static void pci_acpi_cleanup(struct device *dev)
device_set_run_wake(dev, false);
pci_acpi_remove_pm_notifier(adev);
}
-
- if (pci_dev->subordinate)
- acpi_pci_irq_del_prt(pci_domain_nr(pci_dev->bus),
- pci_dev->subordinate->number);
}
static struct acpi_bus_type acpi_pci_bus = {
.bus = &pci_bus_type,
.find_device = acpi_pci_find_device,
- .find_bridge = acpi_pci_find_root_bridge,
.setup = pci_acpi_setup,
.cleanup = pci_acpi_cleanup,
};
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f79cbcd3944b..1fa1e482a999 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -392,7 +392,7 @@ static void pci_device_shutdown(struct device *dev)
* Turn off Bus Master bit on the device to tell it to not
* continue to do DMA
*/
- pci_disable_device(pci_dev);
+ pci_clear_master(pci_dev);
}
#ifdef CONFIG_PM
@@ -628,6 +628,7 @@ static int pci_pm_suspend(struct device *dev)
goto Fixup;
}
+ pci_dev->state_saved = false;
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -774,6 +775,7 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
+ pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -862,6 +864,7 @@ static int pci_pm_poweroff(struct device *dev)
goto Fixup;
}
+ pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
@@ -987,6 +990,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
+ pci_dev->state_saved = false;
pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
suspend_report_result(pm->runtime_suspend, error);
@@ -1186,9 +1190,13 @@ pci_dev_driver(const struct pci_dev *dev)
static int pci_bus_match(struct device *dev, struct device_driver *drv)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *pci_drv = to_pci_driver(drv);
+ struct pci_driver *pci_drv;
const struct pci_device_id *found_id;
+ if (!pci_dev->match_driver)
+ return 0;
+
+ pci_drv = to_pci_driver(drv);
found_id = pci_match_device(pci_drv, pci_dev);
if (found_id)
return 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0c4f641b7be1..b099e0025d2b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -842,9 +842,8 @@ static struct pci_cap_saved_state *pci_find_saved_cap(
struct pci_dev *pci_dev, char cap)
{
struct pci_cap_saved_state *tmp;
- struct hlist_node *pos;
- hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
+ hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
if (tmp->cap.cap_nr == cap)
return tmp;
}
@@ -1041,7 +1040,6 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
struct pci_saved_state *state;
struct pci_cap_saved_state *tmp;
struct pci_cap_saved_data *cap;
- struct hlist_node *pos;
size_t size;
if (!dev->state_saved)
@@ -1049,7 +1047,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
- hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
state = kzalloc(size, GFP_KERNEL);
@@ -1060,7 +1058,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
sizeof(state->config_space));
cap = state->cap;
- hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
memcpy(cap, &tmp->cap, len);
cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
@@ -1151,8 +1149,7 @@ int pci_reenable_device(struct pci_dev *dev)
return 0;
}
-static int __pci_enable_device_flags(struct pci_dev *dev,
- resource_size_t flags)
+static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
{
int err;
int i, bars = 0;
@@ -1169,7 +1166,7 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
- if (atomic_add_return(1, &dev->enable_cnt) > 1)
+ if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
/* only skip sriov related */
@@ -1196,7 +1193,7 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
*/
int pci_enable_device_io(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_IO);
+ return pci_enable_device_flags(dev, IORESOURCE_IO);
}
/**
@@ -1209,7 +1206,7 @@ int pci_enable_device_io(struct pci_dev *dev)
*/
int pci_enable_device_mem(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_MEM);
+ return pci_enable_device_flags(dev, IORESOURCE_MEM);
}
/**
@@ -1225,7 +1222,7 @@ int pci_enable_device_mem(struct pci_dev *dev)
*/
int pci_enable_device(struct pci_dev *dev)
{
- return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
+ return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
}
/*
@@ -1396,7 +1393,10 @@ pci_disable_device(struct pci_dev *dev)
if (dr)
dr->enabled = 0;
- if (atomic_sub_return(1, &dev->enable_cnt) != 0)
+ dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
+ "disabling already-disabled device");
+
+ if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
do_pci_disable_device(dev);
@@ -2036,17 +2036,20 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
void pci_free_cap_save_buffers(struct pci_dev *dev)
{
struct pci_cap_saved_state *tmp;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
+ hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
kfree(tmp);
}
/**
- * pci_enable_ari - enable ARI forwarding if hardware support it
+ * pci_configure_ari - enable or disable ARI forwarding
* @dev: the PCI device
+ *
+ * If @dev and its upstream bridge both support ARI, enable ARI in the
+ * bridge. Otherwise, disable ARI in the bridge.
*/
-void pci_enable_ari(struct pci_dev *dev)
+void pci_configure_ari(struct pci_dev *dev)
{
u32 cap;
struct pci_dev *bridge;
@@ -2054,9 +2057,6 @@ void pci_enable_ari(struct pci_dev *dev)
if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
return;
- if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI))
- return;
-
bridge = dev->bus->self;
if (!bridge)
return;
@@ -2065,8 +2065,15 @@ void pci_enable_ari(struct pci_dev *dev)
if (!(cap & PCI_EXP_DEVCAP2_ARI))
return;
- pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI);
- bridge->ari_enabled = 1;
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 1;
+ } else {
+ pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 0;
+ }
}
/**
@@ -3742,18 +3749,6 @@ resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
return align;
}
-/**
- * pci_is_reassigndev - check if specified PCI is target device to reassign
- * @dev: the PCI device to check
- *
- * RETURNS: non-zero for PCI device is a target device to reassign,
- * or zero is not.
- */
-int pci_is_reassigndev(struct pci_dev *dev)
-{
- return (pci_specified_resource_alignment(dev) != 0);
-}
-
/*
* This function disables memory decoding and releases memory resources
* of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@@ -3768,7 +3763,9 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
resource_size_t align, size;
u16 command;
- if (!pci_is_reassigndev(dev))
+ /* check if specified PCI is target device to reassign */
+ align = pci_specified_resource_alignment(dev);
+ if (!align)
return;
if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
@@ -3784,7 +3781,6 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
command &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, command);
- align = pci_specified_resource_alignment(dev);
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
r = &dev->resource[i];
if (!(r->flags & IORESOURCE_MEM))
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index adfd172c5b9b..7346ee68f47d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -203,8 +203,8 @@ extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
extern int pci_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
-extern int pci_bus_add_child(struct pci_bus *bus);
-extern void pci_enable_ari(struct pci_dev *dev);
+extern void pci_configure_ari(struct pci_dev *dev);
+
/**
* pci_ari_enabled - query ARI forwarding status
* @bus: the PCI bus
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 8474b6a4fc9b..d320df6375a2 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -556,6 +556,9 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
+ if (!aspm_support_enabled)
+ return;
+
if (!pci_is_pcie(pdev) || pdev->link_state)
return;
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
@@ -634,10 +637,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
- return;
- if ((pci_pcie_type(parent) != PCI_EXP_TYPE_ROOT_PORT) &&
- (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM))
+ if (!parent || !parent->link_state)
return;
down_read(&pci_bus_sem);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index b42133afca98..31063ac30992 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -272,7 +272,7 @@ static int get_port_device_capability(struct pci_dev *dev)
/* Hot-Plug Capable */
if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
- dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT) {
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT) {
pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2dcd22d9c816..b494066ef32f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -623,6 +623,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
{
struct pci_bus *child;
int i;
+ int ret;
/*
* Allocate a new bus, and inherit stuff from the parent..
@@ -637,8 +638,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->bus_flags = parent->bus_flags;
/* initialize some portions of the bus device, but don't register it
- * now as the parent is not properly set up yet. This device will get
- * registered later in pci_bus_add_devices()
+ * now as the parent is not properly set up yet.
*/
child->dev.class = &pcibus_class;
dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
@@ -651,11 +651,14 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->primary = parent->busn_res.start;
child->busn_res.end = 0xff;
- if (!bridge)
- return child;
+ if (!bridge) {
+ child->dev.parent = parent->bridge;
+ goto add_dev;
+ }
child->self = bridge;
child->bridge = get_device(&bridge->dev);
+ child->dev.parent = child->bridge;
pci_set_bus_of_node(child);
pci_set_bus_speed(child);
@@ -666,6 +669,13 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
}
bridge->subordinate = child;
+add_dev:
+ ret = device_register(&child->dev);
+ WARN_ON(ret < 0);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(child);
+
return child;
}
@@ -1285,7 +1295,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_vpd_pci22_init(dev);
/* Alternative Routing-ID Forwarding */
- pci_enable_ari(dev);
+ pci_configure_ari(dev);
/* Single Root I/O Virtualization */
pci_iov_init(dev);
@@ -1296,10 +1306,12 @@ static void pci_init_capabilities(struct pci_dev *dev)
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
+ int ret;
+
device_initialize(&dev->dev);
dev->dev.release = pci_release_dev;
- pci_dev_get(dev);
+ set_dev_node(&dev->dev, pcibus_to_node(bus));
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
@@ -1326,6 +1338,17 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
down_write(&pci_bus_sem);
list_add_tail(&dev->bus_list, &bus->devices);
up_write(&pci_bus_sem);
+
+ pci_fixup_device(pci_fixup_final, dev);
+ ret = pcibios_add_device(dev);
+ WARN_ON(ret < 0);
+
+ /* Notifier could use PCI capabilities */
+ dev->match_driver = false;
+ ret = device_add(&dev->dev);
+ WARN_ON(ret < 0);
+
+ pci_proc_attach_device(dev);
}
struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
@@ -1348,31 +1371,31 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
+static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
{
- u16 cap;
- unsigned pos, next_fn;
+ int pos;
+ u16 cap = 0;
+ unsigned next_fn;
- if (!dev)
- return 0;
+ if (pci_ari_enabled(bus)) {
+ if (!dev)
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return 0;
- pci_read_config_word(dev, pos + 4, &cap);
- next_fn = cap >> 8;
- if (next_fn <= fn)
- return 0;
- return next_fn;
-}
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return 0; /* protect against malformed list */
-static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
-{
- return (fn + 1) % 8;
-}
+ return next_fn;
+ }
+
+ /* dev may be NULL for non-contiguous multifunction devices */
+ if (!dev || dev->multifunction)
+ return (fn + 1) % 8;
-static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
-{
return 0;
}
@@ -1405,7 +1428,6 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
{
unsigned fn, nr = 0;
struct pci_dev *dev;
- unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
@@ -1416,12 +1438,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
if (!dev->is_added)
nr++;
- if (pci_ari_enabled(bus))
- next_fn = next_ari_fn;
- else if (dev->multifunction)
- next_fn = next_trad_fn;
-
- for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
+ for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!dev->is_added)
@@ -1632,6 +1649,18 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
return max;
}
+/**
+ * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
+ * @bridge: Host bridge to set up.
+ *
+ * Default empty implementation. Replace with an architecture-specific setup
+ * routine, if necessary.
+ */
+int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ return 0;
+}
+
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
@@ -1644,13 +1673,13 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
char bus_addr[64];
char *fmt;
-
b = pci_alloc_bus();
if (!b)
return NULL;
b->sysdata = sysdata;
b->ops = ops;
+ b->number = b->busn_res.start = bus;
b2 = pci_find_bus(pci_domain_nr(b), bus);
if (b2) {
/* If we already got to this bus through a different bridge, ignore it */
@@ -1665,6 +1694,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
bridge->dev.parent = parent;
bridge->dev.release = pci_release_bus_bridge_dev;
dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
+ error = pcibios_root_bridge_prepare(bridge);
+ if (error)
+ goto bridge_dev_reg_err;
+
error = device_register(&bridge->dev);
if (error)
goto bridge_dev_reg_err;
@@ -1685,8 +1718,6 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(b);
- b->number = b->busn_res.start = bus;
-
if (parent)
dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
else
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 9b8505ccc56d..0b009470e6db 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -21,7 +21,7 @@ static loff_t
proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
{
loff_t new = -1;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
mutex_lock(&inode->i_mutex);
switch (whence) {
@@ -46,7 +46,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- const struct inode *ino = file->f_path.dentry->d_inode;
+ const struct inode *ino = file_inode(file);
const struct proc_dir_entry *dp = PDE(ino);
struct pci_dev *dev = dp->data;
unsigned int pos = *ppos;
@@ -132,7 +132,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
static ssize_t
proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct inode *ino = file->f_path.dentry->d_inode;
+ struct inode *ino = file_inode(file);
const struct proc_dir_entry *dp = PDE(ino);
struct pci_dev *dev = dp->data;
int pos = *ppos;
@@ -212,7 +212,7 @@ struct pci_filp_private {
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- const struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ const struct proc_dir_entry *dp = PDE(file_inode(file));
struct pci_dev *dev = dp->data;
#ifdef HAVE_PCI_MMAP
struct pci_filp_private *fpriv = file->private_data;
@@ -253,7 +253,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
const struct proc_dir_entry *dp = PDE(inode);
struct pci_dev *dev = dp->data;
struct pci_filp_private *fpriv = file->private_data;
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 84954a726a94..cc875e6ed159 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev)
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_unregister(&dev->dev);
+ device_del(&dev->dev);
dev->is_added = 0;
}
@@ -39,7 +39,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
up_write(&pci_bus_sem);
pci_free_resources(dev);
- pci_dev_put(dev);
+ put_device(&dev->dev);
}
void pci_remove_bus(struct pci_bus *bus)
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index bf969ba58e59..d0627fa9f368 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -319,13 +319,13 @@ int pci_dev_present(const struct pci_device_id *ids)
WARN_ON(in_interrupt());
while (ids->vendor || ids->subvendor || ids->class_mask) {
found = pci_get_dev_by_id(ids, NULL);
- if (found)
- goto exit;
+ if (found) {
+ pci_dev_put(found);
+ return 1;
+ }
ids++;
}
-exit:
- if (found)
- return 1;
+
return 0;
}
EXPORT_SYMBOL(pci_dev_present);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 6d3591d57ea0..7e8739e25b9e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -283,7 +283,7 @@ static void assign_requested_resources_sorted(struct list_head *head,
idx = res - &dev_res->dev->resource[0];
if (resource_size(res) &&
pci_assign_resource(dev_res->dev, idx)) {
- if (fail_head && !pci_is_root_bus(dev_res->dev->bus)) {
+ if (fail_head) {
/*
* if the failed res is for ROM BAR, and it will
* be enabled later, don't add it to the list
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9557414954fa..14d4dced1def 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -3689,7 +3689,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
}
if (ret > 0) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
inode->i_atime = current_fs_time(inode->i_sb);
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index be9379923a99..9a907567f41e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -851,7 +851,7 @@ static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
+ struct ibm_struct *ibm = PDE(file_inode(file))->data;
char *kernbuf;
int ret;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 904476b2fa8f..242abac62d8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -583,7 +583,7 @@ static int set_lcd_status(struct backlight_device *bd)
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char cmd[42];
size_t len;
int value;
@@ -650,7 +650,7 @@ static int video_proc_open(struct inode *inode, struct file *file)
static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char *cmd, *buffer;
int ret;
int value;
@@ -750,7 +750,7 @@ static int fan_proc_open(struct inode *inode, struct file *file)
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char cmd[42];
size_t len;
int value;
@@ -822,7 +822,7 @@ static int keys_proc_open(struct inode *inode, struct file *file)
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+ struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
char cmd[42];
size_t len;
int value;
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 315b3112aca8..65f735ac6b3b 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -30,7 +30,7 @@ static struct proc_dir_entry *isapnp_proc_bus_dir = NULL;
static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
{
loff_t new = -1;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
mutex_lock(&inode->i_mutex);
switch (whence) {
@@ -55,7 +55,7 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
size_t nbytes, loff_t * ppos)
{
- struct inode *ino = file->f_path.dentry->d_inode;
+ struct inode *ino = file_inode(file);
struct proc_dir_entry *dp = PDE(ino);
struct pnp_dev *dev = dp->data;
int pos = *ppos;
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index bc89f392a629..63ddb0173456 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -244,7 +244,7 @@ static int pnpbios_proc_open(struct inode *inode, struct file *file)
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- void *data = PDE(file->f_path.dentry->d_inode)->data;
+ void *data = PDE(file_inode(file))->data;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index ca91396fc48e..0727f9256138 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1515,16 +1515,11 @@ static int bq2415x_probe(struct i2c_client *client,
}
/* Get new ID for the new device */
- ret = idr_pre_get(&bq2415x_id, GFP_KERNEL);
- if (ret == 0)
- return -ENOMEM;
-
mutex_lock(&bq2415x_id_mutex);
- ret = idr_get_new(&bq2415x_id, client, &num);
+ num = idr_alloc(&bq2415x_id, client, 0, 0, GFP_KERNEL);
mutex_unlock(&bq2415x_id_mutex);
-
- if (ret < 0)
- return ret;
+ if (num < 0)
+ return num;
name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 8ccf5d7d0add..26037ca7efb4 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -791,14 +791,11 @@ static int bq27x00_battery_probe(struct i2c_client *client,
int retval = 0;
/* Get new ID for the new battery device */
- retval = idr_pre_get(&battery_id, GFP_KERNEL);
- if (retval == 0)
- return -ENOMEM;
mutex_lock(&battery_mutex);
- retval = idr_get_new(&battery_id, client, &num);
+ num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
mutex_unlock(&battery_mutex);
- if (retval < 0)
- return retval;
+ if (num < 0)
+ return num;
name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index e7301b3ed623..c09e7726c96c 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -395,17 +395,12 @@ static int ds278x_battery_probe(struct i2c_client *client,
}
/* Get an ID for this battery */
- ret = idr_pre_get(&battery_id, GFP_KERNEL);
- if (ret == 0) {
- ret = -ENOMEM;
- goto fail_id;
- }
-
mutex_lock(&battery_lock);
- ret = idr_get_new(&battery_id, client, &num);
+ ret = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
mutex_unlock(&battery_lock);
if (ret < 0)
goto fail_id;
+ num = ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 2bf0c1b608dd..d3db26e46489 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -128,7 +128,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
}
/* allocate space for device info */
- data = kzalloc(sizeof(struct pps_gpio_device_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data),
+ GFP_KERNEL);
if (data == NULL) {
err = -ENOMEM;
goto return_error;
@@ -150,7 +151,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
data->pps = pps_register_source(&data->info, pps_default_params);
if (data->pps == NULL) {
- kfree(data);
pr_err("failed to register IRQ %d as PPS source\n", irq);
err = -EINVAL;
goto return_error;
@@ -164,7 +164,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
get_irqf_trigger_flags(pdata), data->info.name, data);
if (ret) {
pps_unregister_source(data->pps);
- kfree(data);
pr_err("failed to acquire IRQ %d\n", irq);
err = -EINVAL;
goto return_error;
@@ -190,7 +189,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
gpio_free(pdata->gpio_pin);
pps_unregister_source(data->pps);
pr_info("removed IRQ %d as PPS source\n", data->irq);
- kfree(data);
return 0;
}
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index f197e8ea185c..cdad4d95b20e 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -102,7 +102,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
goto pps_register_source_exit;
}
- /* These initializations must be done before calling idr_get_new()
+ /* These initializations must be done before calling idr_alloc()
* in order to avoid reces into pps_event().
*/
pps->params.api_version = PPS_API_VERS;
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 6437703eb10f..7173e3ad475d 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -295,29 +295,21 @@ int pps_register_cdev(struct pps_device *pps)
dev_t devt;
mutex_lock(&pps_idr_lock);
- /* Get new ID for the new PPS source */
- if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
- mutex_unlock(&pps_idr_lock);
- return -ENOMEM;
- }
-
- /* Now really allocate the PPS source.
- * After idr_get_new() calling the new source will be freely available
- * into the kernel.
+ /*
+ * Get new ID for the new PPS source. After idr_alloc() calling
+ * the new source will be freely available into the kernel.
*/
- err = idr_get_new(&pps_idr, pps, &pps->id);
- mutex_unlock(&pps_idr_lock);
-
- if (err < 0)
- return err;
-
- pps->id &= MAX_IDR_MASK;
- if (pps->id >= PPS_MAX_SOURCES) {
- pr_err("%s: too many PPS sources in the system\n",
- pps->info.name);
- err = -EBUSY;
- goto free_idr;
+ err = idr_alloc(&pps_idr, pps, 0, PPS_MAX_SOURCES, GFP_KERNEL);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ pr_err("%s: too many PPS sources in the system\n",
+ pps->info.name);
+ err = -EBUSY;
+ }
+ goto out_unlock;
}
+ pps->id = err;
+ mutex_unlock(&pps_idr_lock);
devt = MKDEV(MAJOR(pps_devt), pps->id);
@@ -351,8 +343,8 @@ del_cdev:
free_idr:
mutex_lock(&pps_idr_lock);
idr_remove(&pps_idr, pps->id);
+out_unlock:
mutex_unlock(&pps_idr_lock);
-
return err;
}
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index e513cd998170..0e0bfa035083 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -37,6 +37,18 @@ config PWM_AB8500
To compile this driver as a module, choose M here: the module
will be called pwm-ab8500.
+config PWM_ATMEL_TCB
+ tristate "Atmel TC Block PWM support"
+ depends on ATMEL_TCLIB && OF
+ help
+ Generic PWM framework driver for Atmel Timer Counter Block.
+
+ A Timer Counter Block provides 6 PWM devices grouped by 2.
+ Devices in a given group must have the same period.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-atmel-tcb.
+
config PWM_BFIN
tristate "Blackfin PWM support"
depends on BFIN_GPTIMERS
@@ -47,7 +59,7 @@ config PWM_BFIN
will be called pwm-bfin.
config PWM_IMX
- tristate "i.MX pwm support"
+ tristate "i.MX PWM support"
depends on ARCH_MXC
help
Generic PWM framework driver for i.MX.
@@ -104,7 +116,7 @@ config PWM_PXA
will be called pwm-pxa.
config PWM_SAMSUNG
- tristate "Samsung pwm support"
+ tristate "Samsung PWM support"
depends on PLAT_SAMSUNG
help
Generic PWM framework driver for Samsung.
@@ -183,7 +195,7 @@ config PWM_TWL_LED
will be called pwm-twl-led.
config PWM_VT8500
- tristate "vt8500 pwm support"
+ tristate "vt8500 PWM support"
depends on ARCH_VT8500
help
Generic PWM framework driver for vt8500.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 62a2963cfe58..94ba21e24bd6 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
+obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 903138b18842..32221cb0cbe7 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -211,6 +211,7 @@ int pwm_set_chip_data(struct pwm_device *pwm, void *data)
return 0;
}
+EXPORT_SYMBOL_GPL(pwm_set_chip_data);
/**
* pwm_get_chip_data() - get private chip data for a PWM
@@ -220,6 +221,7 @@ void *pwm_get_chip_data(struct pwm_device *pwm)
{
return pwm ? pwm->chip_data : NULL;
}
+EXPORT_SYMBOL_GPL(pwm_get_chip_data);
/**
* pwmchip_add() - register a new PWM chip
@@ -471,7 +473,7 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
}
/**
- * of_pwm_request() - request a PWM via the PWM framework
+ * of_pwm_get() - request a PWM via the PWM framework
* @np: device node to get the PWM from
* @con_id: consumer name
*
@@ -486,8 +488,7 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
* becomes mandatory for devices that look up the PWM device via the con_id
* parameter.
*/
-static struct pwm_device *of_pwm_request(struct device_node *np,
- const char *con_id)
+struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
{
struct pwm_device *pwm = NULL;
struct of_phandle_args args;
@@ -545,6 +546,7 @@ put:
return pwm;
}
+EXPORT_SYMBOL_GPL(of_pwm_get);
/**
* pwm_add_table() - register PWM device consumers
@@ -587,7 +589,7 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
/* look up via DT first */
if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
- return of_pwm_request(dev->of_node, con_id);
+ return of_pwm_get(dev->of_node, con_id);
/*
* We look up the provider in the static table typically provided by
@@ -708,6 +710,36 @@ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
}
EXPORT_SYMBOL_GPL(devm_pwm_get);
+/**
+ * devm_of_pwm_get() - resource managed of_pwm_get()
+ * @dev: device for PWM consumer
+ * @np: device node to get the PWM from
+ * @con_id: consumer name
+ *
+ * This function performs like of_pwm_get() but the acquired PWM device will
+ * automatically be released on driver detach.
+ */
+struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id)
+{
+ struct pwm_device **ptr, *pwm;
+
+ ptr = devres_alloc(devm_pwm_release, sizeof(**ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ pwm = of_pwm_get(np, con_id);
+ if (!IS_ERR(pwm)) {
+ *ptr = pwm;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(devm_of_pwm_get);
+
static int devm_pwm_match(struct device *dev, void *res, void *data)
{
struct pwm_device **p = res;
@@ -733,6 +765,18 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(devm_pwm_put);
+/**
+ * pwm_can_sleep() - report whether PWM access will sleep
+ * @pwm: PWM device
+ *
+ * It returns true if accessing the PWM can sleep, false otherwise.
+ */
+bool pwm_can_sleep(struct pwm_device *pwm)
+{
+ return pwm->chip->can_sleep;
+}
+EXPORT_SYMBOL_GPL(pwm_can_sleep);
+
#ifdef CONFIG_DEBUG_FS
static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
{
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
new file mode 100644
index 000000000000..16cb53092857
--- /dev/null
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) Overkiz SAS 2012
+ *
+ * Author: Boris BREZILLON <b.brezillon@overkiz.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/atmel_tc.h>
+#include <linux/pwm.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#define NPWM 6
+
+#define ATMEL_TC_ACMR_MASK (ATMEL_TC_ACPA | ATMEL_TC_ACPC | \
+ ATMEL_TC_AEEVT | ATMEL_TC_ASWTRG)
+
+#define ATMEL_TC_BCMR_MASK (ATMEL_TC_BCPB | ATMEL_TC_BCPC | \
+ ATMEL_TC_BEEVT | ATMEL_TC_BSWTRG)
+
+struct atmel_tcb_pwm_device {
+ enum pwm_polarity polarity; /* PWM polarity */
+ unsigned div; /* PWM clock divider */
+ unsigned duty; /* PWM duty expressed in clk cycles */
+ unsigned period; /* PWM period expressed in clk cycles */
+};
+
+struct atmel_tcb_pwm_chip {
+ struct pwm_chip chip;
+ spinlock_t lock;
+ struct atmel_tc *tc;
+ struct atmel_tcb_pwm_device *pwms[NPWM];
+};
+
+static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct atmel_tcb_pwm_chip, chip);
+}
+
+static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+
+ tcbpwm->polarity = polarity;
+
+ return 0;
+}
+
+static int atmel_tcb_pwm_request(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm;
+ struct atmel_tc *tc = tcbpwmc->tc;
+ void __iomem *regs = tc->regs;
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ unsigned cmr;
+ int ret;
+
+ tcbpwm = devm_kzalloc(chip->dev, sizeof(*tcbpwm), GFP_KERNEL);
+ if (!tcbpwm)
+ return -ENOMEM;
+
+ ret = clk_enable(tc->clk[group]);
+ if (ret) {
+ devm_kfree(chip->dev, tcbpwm);
+ return ret;
+ }
+
+ pwm_set_chip_data(pwm, tcbpwm);
+ tcbpwm->polarity = PWM_POLARITY_NORMAL;
+ tcbpwm->duty = 0;
+ tcbpwm->period = 0;
+ tcbpwm->div = 0;
+
+ spin_lock(&tcbpwmc->lock);
+ cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ /*
+ * Get init config from Timer Counter registers if
+ * Timer Counter is already configured as a PWM generator.
+ */
+ if (cmr & ATMEL_TC_WAVE) {
+ if (index == 0)
+ tcbpwm->duty =
+ __raw_readl(regs + ATMEL_TC_REG(group, RA));
+ else
+ tcbpwm->duty =
+ __raw_readl(regs + ATMEL_TC_REG(group, RB));
+
+ tcbpwm->div = cmr & ATMEL_TC_TCCLKS;
+ tcbpwm->period = __raw_readl(regs + ATMEL_TC_REG(group, RC));
+ cmr &= (ATMEL_TC_TCCLKS | ATMEL_TC_ACMR_MASK |
+ ATMEL_TC_BCMR_MASK);
+ } else
+ cmr = 0;
+
+ cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
+ __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ spin_unlock(&tcbpwmc->lock);
+
+ tcbpwmc->pwms[pwm->hwpwm] = tcbpwm;
+
+ return 0;
+}
+
+static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tc *tc = tcbpwmc->tc;
+
+ clk_disable(tc->clk[pwm->hwpwm / 2]);
+ tcbpwmc->pwms[pwm->hwpwm] = NULL;
+ devm_kfree(chip->dev, tcbpwm);
+}
+
+static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tc *tc = tcbpwmc->tc;
+ void __iomem *regs = tc->regs;
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ unsigned cmr;
+ enum pwm_polarity polarity = tcbpwm->polarity;
+
+ /*
+ * If duty is 0 the timer will be stopped and we have to
+ * configure the output correctly on software trigger:
+ * - set output to high if PWM_POLARITY_INVERSED
+ * - set output to low if PWM_POLARITY_NORMAL
+ *
+ * This is why we're reverting polarity in this case.
+ */
+ if (tcbpwm->duty == 0)
+ polarity = !polarity;
+
+ spin_lock(&tcbpwmc->lock);
+ cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+
+ /* flush old setting and set the new one */
+ if (index == 0) {
+ cmr &= ~ATMEL_TC_ACMR_MASK;
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_ASWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_ASWTRG_SET;
+ } else {
+ cmr &= ~ATMEL_TC_BCMR_MASK;
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_BSWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_BSWTRG_SET;
+ }
+
+ __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+
+ /*
+ * Use software trigger to apply the new setting.
+ * If both PWM devices in this group are disabled we stop the clock.
+ */
+ if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC)))
+ __raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
+ regs + ATMEL_TC_REG(group, CCR));
+ else
+ __raw_writel(ATMEL_TC_SWTRG, regs +
+ ATMEL_TC_REG(group, CCR));
+
+ spin_unlock(&tcbpwmc->lock);
+}
+
+static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ struct atmel_tc *tc = tcbpwmc->tc;
+ void __iomem *regs = tc->regs;
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ u32 cmr;
+ enum pwm_polarity polarity = tcbpwm->polarity;
+
+ /*
+ * If duty is 0 the timer will be stopped and we have to
+ * configure the output correctly on software trigger:
+ * - set output to high if PWM_POLARITY_INVERSED
+ * - set output to low if PWM_POLARITY_NORMAL
+ *
+ * This is why we're reverting polarity in this case.
+ */
+ if (tcbpwm->duty == 0)
+ polarity = !polarity;
+
+ spin_lock(&tcbpwmc->lock);
+ cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+
+ /* flush old setting and set the new one */
+ cmr &= ~ATMEL_TC_TCCLKS;
+
+ if (index == 0) {
+ cmr &= ~ATMEL_TC_ACMR_MASK;
+
+ /* Set CMR flags according to given polarity */
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_ASWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_ASWTRG_SET;
+ } else {
+ cmr &= ~ATMEL_TC_BCMR_MASK;
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_BSWTRG_CLEAR;
+ else
+ cmr |= ATMEL_TC_BSWTRG_SET;
+ }
+
+ /*
+ * If duty is 0 or equal to period there's no need to register
+ * a specific action on RA/RB and RC compare.
+ * The output will be configured on software trigger and keep
+ * this config till next config call.
+ */
+ if (tcbpwm->duty != tcbpwm->period && tcbpwm->duty > 0) {
+ if (index == 0) {
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_ACPA_SET | ATMEL_TC_ACPC_CLEAR;
+ else
+ cmr |= ATMEL_TC_ACPA_CLEAR | ATMEL_TC_ACPC_SET;
+ } else {
+ if (polarity == PWM_POLARITY_INVERSED)
+ cmr |= ATMEL_TC_BCPB_SET | ATMEL_TC_BCPC_CLEAR;
+ else
+ cmr |= ATMEL_TC_BCPB_CLEAR | ATMEL_TC_BCPC_SET;
+ }
+ }
+
+ __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+
+ if (index == 0)
+ __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RA));
+ else
+ __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RB));
+
+ __raw_writel(tcbpwm->period, regs + ATMEL_TC_REG(group, RC));
+
+ /* Use software trigger to apply the new setting */
+ __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ regs + ATMEL_TC_REG(group, CCR));
+ spin_unlock(&tcbpwmc->lock);
+ return 0;
+}
+
+static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+ unsigned group = pwm->hwpwm / 2;
+ unsigned index = pwm->hwpwm % 2;
+ struct atmel_tcb_pwm_device *atcbpwm = NULL;
+ struct atmel_tc *tc = tcbpwmc->tc;
+ int i;
+ int slowclk = 0;
+ unsigned period;
+ unsigned duty;
+ unsigned rate = clk_get_rate(tc->clk[group]);
+ unsigned long long min;
+ unsigned long long max;
+
+ /*
+ * Find best clk divisor:
+ * the smallest divisor which can fulfill the period_ns requirements.
+ */
+ for (i = 0; i < 5; ++i) {
+ if (atmel_tc_divisors[i] == 0) {
+ slowclk = i;
+ continue;
+ }
+ min = div_u64((u64)NSEC_PER_SEC * atmel_tc_divisors[i], rate);
+ max = min << tc->tcb_config->counter_width;
+ if (max >= period_ns)
+ break;
+ }
+
+ /*
+ * If none of the divisor are small enough to represent period_ns
+ * take slow clock (32KHz).
+ */
+ if (i == 5) {
+ i = slowclk;
+ rate = 32768;
+ min = div_u64(NSEC_PER_SEC, rate);
+ max = min << 16;
+
+ /* If period is too big return ERANGE error */
+ if (max < period_ns)
+ return -ERANGE;
+ }
+
+ duty = div_u64(duty_ns, min);
+ period = div_u64(period_ns, min);
+
+ if (index == 0)
+ atcbpwm = tcbpwmc->pwms[pwm->hwpwm + 1];
+ else
+ atcbpwm = tcbpwmc->pwms[pwm->hwpwm - 1];
+
+ /*
+ * PWM devices provided by TCB driver are grouped by 2:
+ * - group 0: PWM 0 & 1
+ * - group 1: PWM 2 & 3
+ * - group 2: PWM 4 & 5
+ *
+ * PWM devices in a given group must be configured with the
+ * same period_ns.
+ *
+ * We're checking the period value of the second PWM device
+ * in this group before applying the new config.
+ */
+ if ((atcbpwm && atcbpwm->duty > 0 &&
+ atcbpwm->duty != atcbpwm->period) &&
+ (atcbpwm->div != i || atcbpwm->period != period)) {
+ dev_err(chip->dev,
+ "failed to configure period_ns: PWM group already configured with a different value\n");
+ return -EINVAL;
+ }
+
+ tcbpwm->period = period;
+ tcbpwm->div = i;
+ tcbpwm->duty = duty;
+
+ /* If the PWM is enabled, call enable to apply the new conf */
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ atmel_tcb_pwm_enable(chip, pwm);
+
+ return 0;
+}
+
+static const struct pwm_ops atmel_tcb_pwm_ops = {
+ .request = atmel_tcb_pwm_request,
+ .free = atmel_tcb_pwm_free,
+ .config = atmel_tcb_pwm_config,
+ .set_polarity = atmel_tcb_pwm_set_polarity,
+ .enable = atmel_tcb_pwm_enable,
+ .disable = atmel_tcb_pwm_disable,
+};
+
+static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+{
+ struct atmel_tcb_pwm_chip *tcbpwm;
+ struct device_node *np = pdev->dev.of_node;
+ struct atmel_tc *tc;
+ int err;
+ int tcblock;
+
+ err = of_property_read_u32(np, "tc-block", &tcblock);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to get Timer Counter Block number from device tree (error: %d)\n",
+ err);
+ return err;
+ }
+
+ tc = atmel_tc_alloc(tcblock, "tcb-pwm");
+ if (tc == NULL) {
+ dev_err(&pdev->dev, "failed to allocate Timer Counter Block\n");
+ return -ENOMEM;
+ }
+
+ tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
+ if (tcbpwm == NULL) {
+ atmel_tc_free(tc);
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ tcbpwm->chip.dev = &pdev->dev;
+ tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
+ tcbpwm->chip.of_xlate = of_pwm_xlate_with_flags;
+ tcbpwm->chip.of_pwm_n_cells = 3;
+ tcbpwm->chip.base = -1;
+ tcbpwm->chip.npwm = NPWM;
+ tcbpwm->tc = tc;
+
+ spin_lock_init(&tcbpwm->lock);
+
+ err = pwmchip_add(&tcbpwm->chip);
+ if (err < 0) {
+ atmel_tc_free(tc);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, tcbpwm);
+
+ return 0;
+}
+
+static int atmel_tcb_pwm_remove(struct platform_device *pdev)
+{
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ int err;
+
+ err = pwmchip_remove(&tcbpwm->chip);
+ if (err < 0)
+ return err;
+
+ atmel_tc_free(tcbpwm->tc);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
+ { .compatible = "atmel,tcb-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
+
+static struct platform_driver atmel_tcb_pwm_driver = {
+ .driver = {
+ .name = "atmel-tcb-pwm",
+ .of_match_table = atmel_tcb_pwm_dt_ids,
+ },
+ .probe = atmel_tcb_pwm_probe,
+ .remove = atmel_tcb_pwm_remove,
+};
+module_platform_driver(atmel_tcb_pwm_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <b.brezillon@overkiz.com>");
+MODULE_DESCRIPTION("Atmel Timer Counter Pulse Width Modulation Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 71900e8cd3d1..af3ab48cb7ef 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -233,7 +233,6 @@ static int tegra_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
-#ifdef CONFIG_OF
static struct of_device_id tegra_pwm_of_match[] = {
{ .compatible = "nvidia,tegra20-pwm" },
{ .compatible = "nvidia,tegra30-pwm" },
@@ -241,12 +240,11 @@ static struct of_device_id tegra_pwm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
-#endif
static struct platform_driver tegra_pwm_driver = {
.driver = {
.name = "tegra-pwm",
- .of_match_table = of_match_ptr(tegra_pwm_of_match),
+ .of_match_table = tegra_pwm_of_match,
},
.probe = tegra_pwm_probe,
.remove = tegra_pwm_remove,
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 27a67d6b27c1..22e96e2bffd3 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -41,10 +41,17 @@
#define ECCTL2_SYNC_SEL_DISA (BIT(7) | BIT(6))
#define ECCTL2_TSCTR_FREERUN BIT(4)
+struct ecap_context {
+ u32 cap3;
+ u32 cap4;
+ u16 ecctl2;
+};
+
struct ecap_pwm_chip {
struct pwm_chip chip;
unsigned int clk_rate;
void __iomem *mmio_base;
+ struct ecap_context ctx;
};
static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip)
@@ -288,11 +295,57 @@ static int ecap_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
+{
+ pm_runtime_get_sync(pc->chip.dev);
+ pc->ctx.ecctl2 = readw(pc->mmio_base + ECCTL2);
+ pc->ctx.cap4 = readl(pc->mmio_base + CAP4);
+ pc->ctx.cap3 = readl(pc->mmio_base + CAP3);
+ pm_runtime_put_sync(pc->chip.dev);
+}
+
+void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
+{
+ writel(pc->ctx.cap3, pc->mmio_base + CAP3);
+ writel(pc->ctx.cap4, pc->mmio_base + CAP4);
+ writew(pc->ctx.ecctl2, pc->mmio_base + ECCTL2);
+}
+
+static int ecap_pwm_suspend(struct device *dev)
+{
+ struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_device *pwm = pc->chip.pwms;
+
+ ecap_pwm_save_context(pc);
+
+ /* Disable explicitly if PWM is running */
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int ecap_pwm_resume(struct device *dev)
+{
+ struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_device *pwm = pc->chip.pwms;
+
+ /* Enable explicitly if PWM was running */
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ pm_runtime_get_sync(dev);
+
+ ecap_pwm_restore_context(pc);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
+
static struct platform_driver ecap_pwm_driver = {
.driver = {
.name = "ecap",
.owner = THIS_MODULE,
.of_match_table = ecap_of_match,
+ .pm = &ecap_pwm_pm_ops,
},
.probe = ecap_pwm_probe,
.remove = ecap_pwm_remove,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 5a1399580533..8b4c86fa99c8 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -113,6 +113,17 @@
#define NUM_PWM_CHANNEL 2 /* EHRPWM channels */
+struct ehrpwm_context {
+ u16 tbctl;
+ u16 tbprd;
+ u16 cmpa;
+ u16 cmpb;
+ u16 aqctla;
+ u16 aqctlb;
+ u16 aqsfrc;
+ u16 aqcsfrc;
+};
+
struct ehrpwm_pwm_chip {
struct pwm_chip chip;
unsigned int clk_rate;
@@ -120,6 +131,7 @@ struct ehrpwm_pwm_chip {
unsigned long period_cycles[NUM_PWM_CHANNEL];
enum pwm_polarity polarity[NUM_PWM_CHANNEL];
struct clk *tbclk;
+ struct ehrpwm_context ctx;
};
static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
@@ -127,6 +139,11 @@ static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct ehrpwm_pwm_chip, chip);
}
+static u16 ehrpwm_read(void *base, int offset)
+{
+ return readw(base + offset);
+}
+
static void ehrpwm_write(void *base, int offset, unsigned int val)
{
writew(val & 0xFFFF, base + offset);
@@ -318,6 +335,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
unsigned short aqcsfrc_val, aqcsfrc_mask;
+ int ret;
/* Leave clock enabled on enabling PWM */
pm_runtime_get_sync(chip->dev);
@@ -341,7 +359,12 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
configure_polarity(pc, pwm->hwpwm);
/* Enable TBCLK before enabling PWM device */
- clk_enable(pc->tbclk);
+ ret = clk_prepare_enable(pc->tbclk);
+ if (ret) {
+ pr_err("Failed to enable TBCLK for %s\n",
+ dev_name(pc->chip.dev));
+ return ret;
+ }
/* Enable time counter for free_run */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
@@ -372,7 +395,7 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/* Disabling TBCLK on PWM disable */
- clk_disable(pc->tbclk);
+ clk_disable_unprepare(pc->tbclk);
/* Stop Time base counter */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
@@ -510,11 +533,77 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
+{
+ pm_runtime_get_sync(pc->chip.dev);
+ pc->ctx.tbctl = ehrpwm_read(pc->mmio_base, TBCTL);
+ pc->ctx.tbprd = ehrpwm_read(pc->mmio_base, TBPRD);
+ pc->ctx.cmpa = ehrpwm_read(pc->mmio_base, CMPA);
+ pc->ctx.cmpb = ehrpwm_read(pc->mmio_base, CMPB);
+ pc->ctx.aqctla = ehrpwm_read(pc->mmio_base, AQCTLA);
+ pc->ctx.aqctlb = ehrpwm_read(pc->mmio_base, AQCTLB);
+ pc->ctx.aqsfrc = ehrpwm_read(pc->mmio_base, AQSFRC);
+ pc->ctx.aqcsfrc = ehrpwm_read(pc->mmio_base, AQCSFRC);
+ pm_runtime_put_sync(pc->chip.dev);
+}
+
+void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
+{
+ ehrpwm_write(pc->mmio_base, TBPRD, pc->ctx.tbprd);
+ ehrpwm_write(pc->mmio_base, CMPA, pc->ctx.cmpa);
+ ehrpwm_write(pc->mmio_base, CMPB, pc->ctx.cmpb);
+ ehrpwm_write(pc->mmio_base, AQCTLA, pc->ctx.aqctla);
+ ehrpwm_write(pc->mmio_base, AQCTLB, pc->ctx.aqctlb);
+ ehrpwm_write(pc->mmio_base, AQSFRC, pc->ctx.aqsfrc);
+ ehrpwm_write(pc->mmio_base, AQCSFRC, pc->ctx.aqcsfrc);
+ ehrpwm_write(pc->mmio_base, TBCTL, pc->ctx.tbctl);
+}
+
+static int ehrpwm_pwm_suspend(struct device *dev)
+{
+ struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ int i;
+
+ ehrpwm_pwm_save_context(pc);
+ for (i = 0; i < pc->chip.npwm; i++) {
+ struct pwm_device *pwm = &pc->chip.pwms[i];
+
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ continue;
+
+ /* Disable explicitly if PWM is running */
+ pm_runtime_put_sync(dev);
+ }
+ return 0;
+}
+
+static int ehrpwm_pwm_resume(struct device *dev)
+{
+ struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < pc->chip.npwm; i++) {
+ struct pwm_device *pwm = &pc->chip.pwms[i];
+
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ continue;
+
+ /* Enable explicitly if PWM was running */
+ pm_runtime_get_sync(dev);
+ }
+ ehrpwm_pwm_restore_context(pc);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
+ ehrpwm_pwm_resume);
+
static struct platform_driver ehrpwm_pwm_driver = {
.driver = {
.name = "ehrpwm",
.owner = THIS_MODULE,
.of_match_table = ehrpwm_of_match,
+ .pm = &ehrpwm_pwm_pm_ops,
},
.probe = ehrpwm_pwm_probe,
.remove = ehrpwm_pwm_remove,
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 9dfa0f3eca30..83e25d45d640 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -300,6 +300,7 @@ static int twl_pwmled_probe(struct platform_device *pdev)
twl->chip.dev = &pdev->dev;
twl->chip.base = -1;
+ twl->chip.can_sleep = true;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index e65db95d5e59..bf3fda294223 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -200,8 +200,7 @@ out:
static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
- chip);
+ struct twl_pwm_chip *twl = to_twl(chip);
int ret;
u8 val, mask;
@@ -231,8 +230,7 @@ out:
static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
- chip);
+ struct twl_pwm_chip *twl = to_twl(chip);
int ret;
u8 val;
@@ -255,8 +253,7 @@ out:
static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
- chip);
+ struct twl_pwm_chip *twl = to_twl(chip);
int ret;
u8 val;
@@ -315,6 +312,7 @@ static int twl_pwm_probe(struct platform_device *pdev)
twl->chip.dev = &pdev->dev;
twl->chip.base = -1;
twl->chip.npwm = 2;
+ twl->chip.can_sleep = true;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index f9de9b28e46e..69effd19afc7 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -36,6 +36,25 @@
*/
#define VT8500_NR_PWMS 2
+#define REG_CTRL(pwm) (((pwm) << 4) + 0x00)
+#define REG_SCALAR(pwm) (((pwm) << 4) + 0x04)
+#define REG_PERIOD(pwm) (((pwm) << 4) + 0x08)
+#define REG_DUTY(pwm) (((pwm) << 4) + 0x0C)
+#define REG_STATUS 0x40
+
+#define CTRL_ENABLE BIT(0)
+#define CTRL_INVERT BIT(1)
+#define CTRL_AUTOLOAD BIT(2)
+#define CTRL_STOP_IMM BIT(3)
+#define CTRL_LOAD_PRESCALE BIT(4)
+#define CTRL_LOAD_PERIOD BIT(5)
+
+#define STATUS_CTRL_UPDATE BIT(0)
+#define STATUS_SCALAR_UPDATE BIT(1)
+#define STATUS_PERIOD_UPDATE BIT(2)
+#define STATUS_DUTY_UPDATE BIT(3)
+#define STATUS_ALL_UPDATE 0x0F
+
struct vt8500_chip {
struct pwm_chip chip;
void __iomem *base;
@@ -45,15 +64,17 @@ struct vt8500_chip {
#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
+static inline void pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask)
{
int loops = msecs_to_loops(10);
- while ((readb(reg) & bitmask) && --loops)
+ u32 mask = bitmask << (nr << 8);
+
+ while ((readl(vt8500->base + REG_STATUS) & mask) && --loops)
cpu_relax();
if (unlikely(!loops))
- pr_warn("Waiting for status bits 0x%x to clear timed out\n",
- bitmask);
+ dev_warn(vt8500->chip.dev, "Waiting for status bits 0x%x to clear timed out\n",
+ mask);
}
static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -63,6 +84,7 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long long c;
unsigned long period_cycles, prescale, pv, dc;
int err;
+ u32 val;
err = clk_enable(vt8500->clk);
if (err < 0) {
@@ -91,14 +113,19 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
do_div(c, period_ns);
dc = c;
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 1));
- writel(prescale, vt8500->base + 0x4 + (pwm->hwpwm << 4));
+ writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 2));
- writel(pv, vt8500->base + 0x8 + (pwm->hwpwm << 4));
+ writel(pv, vt8500->base + REG_PERIOD(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE);
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3));
- writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4));
+ writel(dc, vt8500->base + REG_DUTY(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE);
+
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+ val |= CTRL_AUTOLOAD;
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
return 0;
@@ -106,8 +133,9 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- int err;
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ int err;
+ u32 val;
err = clk_enable(vt8500->clk);
if (err < 0) {
@@ -115,25 +143,52 @@ static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return err;
}
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
- writel(5, vt8500->base + (pwm->hwpwm << 4));
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+ val |= CTRL_ENABLE;
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+
return 0;
}
static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ u32 val;
- pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
- writel(0, vt8500->base + (pwm->hwpwm << 4));
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+ val &= ~CTRL_ENABLE;
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
}
+static int vt8500_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ u32 val;
+
+ val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ val |= CTRL_INVERT;
+ else
+ val &= ~CTRL_INVERT;
+
+ writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
+ pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+
+ return 0;
+}
+
static struct pwm_ops vt8500_pwm_ops = {
.enable = vt8500_pwm_enable,
.disable = vt8500_pwm_disable,
.config = vt8500_pwm_config,
+ .set_polarity = vt8500_pwm_set_polarity,
.owner = THIS_MODULE,
};
@@ -163,6 +218,8 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
chip->chip.dev = &pdev->dev;
chip->chip.ops = &vt8500_pwm_ops;
+ chip->chip.of_xlate = of_pwm_xlate_with_flags;
+ chip->chip.of_pwm_n_cells = 3;
chip->chip.base = -1;
chip->chip.npwm = VT8500_NR_PWMS;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index dd3bfaf1ad40..29387df4bfc9 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -199,11 +199,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
- if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
- dev_err(dev, "idr_pre_get failed\n");
- return -ENOMEM;
- }
-
/*
* Allocate non-cacheable memory for the vring. In the future
* this call will also configure the IOMMU for us
@@ -221,12 +216,13 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
* TODO: let the rproc know the notifyid of this vring
* TODO: support predefined notifyids (via resource table)
*/
- ret = idr_get_new(&rproc->notifyids, rvring, &notifyid);
+ ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
if (ret) {
- dev_err(dev, "idr_get_new failed: %d\n", ret);
+ dev_err(dev, "idr_alloc failed: %d\n", ret);
dma_free_coherent(dev->parent, size, va, dma);
return ret;
}
+ notifyid = ret;
/* Store largest notifyid */
rproc->max_notifyid = max(rproc->max_notifyid, notifyid);
@@ -1180,7 +1176,6 @@ static void rproc_type_release(struct device *dev)
rproc_delete_debug_dir(rproc);
- idr_remove_all(&rproc->notifyids);
idr_destroy(&rproc->notifyids);
if (rproc->index >= 0)
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 9e198e590675..afed9b7731c4 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -222,7 +222,7 @@ static void rproc_virtio_finalize_features(struct virtio_device *vdev)
rvdev->gfeatures = vdev->features[0];
}
-static struct virtio_config_ops rproc_virtio_config_ops = {
+static const struct virtio_config_ops rproc_virtio_config_ops = {
.get_features = rproc_virtio_get_features,
.finalize_features = rproc_virtio_finalize_features,
.find_vqs = rproc_virtio_find_vqs,
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index d85446021ddb..a59684b5fc68 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -213,13 +213,10 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
void *priv, u32 addr)
{
- int err, tmpaddr, request;
+ int id_min, id_max, id;
struct rpmsg_endpoint *ept;
struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
- if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
- return NULL;
-
ept = kzalloc(sizeof(*ept), GFP_KERNEL);
if (!ept) {
dev_err(dev, "failed to kzalloc a new ept\n");
@@ -234,31 +231,28 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
ept->priv = priv;
/* do we need to allocate a local address ? */
- request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;
+ if (addr == RPMSG_ADDR_ANY) {
+ id_min = RPMSG_RESERVED_ADDRESSES;
+ id_max = 0;
+ } else {
+ id_min = addr;
+ id_max = addr + 1;
+ }
mutex_lock(&vrp->endpoints_lock);
/* bind the endpoint to an rpmsg address (and allocate one if needed) */
- err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
- if (err) {
- dev_err(dev, "idr_get_new_above failed: %d\n", err);
+ id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(dev, "idr_alloc failed: %d\n", id);
goto free_ept;
}
-
- /* make sure the user's address request is fulfilled, if relevant */
- if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
- dev_err(dev, "address 0x%x already in use\n", addr);
- goto rem_idr;
- }
-
- ept->addr = tmpaddr;
+ ept->addr = id;
mutex_unlock(&vrp->endpoints_lock);
return ept;
-rem_idr:
- idr_remove(&vrp->endpoints, request);
free_ept:
mutex_unlock(&vrp->endpoints_lock);
kref_put(&ept->refcount, __ept_release);
@@ -1036,7 +1030,6 @@ static void rpmsg_remove(struct virtio_device *vdev)
if (vrp->ns_ept)
__rpmsg_destroy_ept(vrp, vrp->ns_ept);
- idr_remove_all(&vrp->endpoints);
idr_destroy(&vrp->endpoints);
vdev->config->del_vqs(vrp->vdev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e6ab071fb6fd..79fbe3832dfc 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -305,6 +305,16 @@ config RTC_DRV_X1205
This driver can also be built as a module. If so, the module
will be called rtc-x1205.
+config RTC_DRV_PALMAS
+ tristate "TI Palmas RTC driver"
+ depends on MFD_PALMAS
+ help
+ If you say yes here you get support for the RTC of TI PALMA series PMIC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-palma.
+
config RTC_DRV_PCF8523
tristate "NXP PCF8523"
help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index e8f2e2fee06f..c33f86f1a69b 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
+obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
new file mode 100644
index 000000000000..59c42986254e
--- /dev/null
+++ b/drivers/rtc/rtc-palmas.c
@@ -0,0 +1,339 @@
+/*
+ * rtc-palmas.c -- Palmas Real Time Clock driver.
+
+ * RTC driver for TI Palma series devices like TPS65913,
+ * TPS65914 power management IC.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/bcd.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/palmas.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+struct palmas_rtc {
+ struct rtc_device *rtc;
+ struct device *dev;
+ unsigned int irq;
+};
+
+/* Total number of RTC registers needed to set time*/
+#define PALMAS_NUM_TIME_REGS (PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1)
+
+static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ /* Copy RTC counting registers to static registers or latches */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_GET_TIME, PALMAS_RTC_CTRL_REG_GET_TIME);
+ if (ret < 0) {
+ dev_err(dev, "RTC CTRL reg update failed, err: %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG,
+ rtc_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_SECONDS reg read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ tm->tm_sec = bcd2bin(rtc_data[0]);
+ tm->tm_min = bcd2bin(rtc_data[1]);
+ tm->tm_hour = bcd2bin(rtc_data[2]);
+ tm->tm_mday = bcd2bin(rtc_data[3]);
+ tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
+ tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+
+ return ret;
+}
+
+static int palmas_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ rtc_data[0] = bin2bcd(tm->tm_sec);
+ rtc_data[1] = bin2bcd(tm->tm_min);
+ rtc_data[2] = bin2bcd(tm->tm_hour);
+ rtc_data[3] = bin2bcd(tm->tm_mday);
+ rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+ rtc_data[5] = bin2bcd(tm->tm_year - 100);
+
+ /* Stop RTC while updating the RTC time registers */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_STOP_RTC, 0);
+ if (ret < 0) {
+ dev_err(dev, "RTC stop failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG,
+ rtc_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_SECONDS reg write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ /* Start back RTC */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_STOP_RTC, PALMAS_RTC_CTRL_REG_STOP_RTC);
+ if (ret < 0)
+ dev_err(dev, "RTC start failed, err = %d\n", ret);
+ return ret;
+}
+
+static int palmas_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
+{
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ u8 val;
+
+ val = enabled ? PALMAS_RTC_INTERRUPTS_REG_IT_ALARM : 0;
+ return palmas_write(palmas, PALMAS_RTC_BASE,
+ PALMAS_RTC_INTERRUPTS_REG, val);
+}
+
+static int palmas_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char alarm_data[PALMAS_NUM_TIME_REGS];
+ u32 int_val;
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE,
+ PALMAS_ALARM_SECONDS_REG,
+ alarm_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_ALARM_SECONDS read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ alm->time.tm_sec = bcd2bin(alarm_data[0]);
+ alm->time.tm_min = bcd2bin(alarm_data[1]);
+ alm->time.tm_hour = bcd2bin(alarm_data[2]);
+ alm->time.tm_mday = bcd2bin(alarm_data[3]);
+ alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1;
+ alm->time.tm_year = bcd2bin(alarm_data[5]) + 100;
+
+ ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG,
+ &int_val);
+ if (ret < 0) {
+ dev_err(dev, "RTC_INTERRUPTS reg read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ if (int_val & PALMAS_RTC_INTERRUPTS_REG_IT_ALARM)
+ alm->enabled = 1;
+ return ret;
+}
+
+static int palmas_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char alarm_data[PALMAS_NUM_TIME_REGS];
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ int ret;
+
+ ret = palmas_rtc_alarm_irq_enable(dev, 0);
+ if (ret < 0) {
+ dev_err(dev, "Disable RTC alarm failed\n");
+ return ret;
+ }
+
+ alarm_data[0] = bin2bcd(alm->time.tm_sec);
+ alarm_data[1] = bin2bcd(alm->time.tm_min);
+ alarm_data[2] = bin2bcd(alm->time.tm_hour);
+ alarm_data[3] = bin2bcd(alm->time.tm_mday);
+ alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
+ alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
+
+ ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE,
+ PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "ALARM_SECONDS_REG write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ if (alm->enabled)
+ ret = palmas_rtc_alarm_irq_enable(dev, 1);
+ return ret;
+}
+
+static int palmas_clear_interrupts(struct device *dev)
+{
+ struct palmas *palmas = dev_get_drvdata(dev->parent);
+ unsigned int rtc_reg;
+ int ret;
+
+ ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG,
+ &rtc_reg);
+ if (ret < 0) {
+ dev_err(dev, "RTC_STATUS read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_write(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG,
+ rtc_reg);
+ if (ret < 0) {
+ dev_err(dev, "RTC_STATUS write failed, err = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static irqreturn_t palmas_rtc_interrupt(int irq, void *context)
+{
+ struct palmas_rtc *palmas_rtc = context;
+ struct device *dev = palmas_rtc->dev;
+ int ret;
+
+ ret = palmas_clear_interrupts(dev);
+ if (ret < 0) {
+ dev_err(dev, "RTC interrupt clear failed, err = %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ rtc_update_irq(palmas_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static struct rtc_class_ops palmas_rtc_ops = {
+ .read_time = palmas_rtc_read_time,
+ .set_time = palmas_rtc_set_time,
+ .read_alarm = palmas_rtc_read_alarm,
+ .set_alarm = palmas_rtc_set_alarm,
+ .alarm_irq_enable = palmas_rtc_alarm_irq_enable,
+};
+
+static int palmas_rtc_probe(struct platform_device *pdev)
+{
+ struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+ struct palmas_rtc *palmas_rtc = NULL;
+ int ret;
+
+ palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc),
+ GFP_KERNEL);
+ if (!palmas_rtc)
+ return -ENOMEM;
+
+ /* Clear pending interrupts */
+ ret = palmas_clear_interrupts(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clear RTC int failed, err = %d\n", ret);
+ return ret;
+ }
+
+ palmas_rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, palmas_rtc);
+
+ /* Start RTC */
+ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
+ PALMAS_RTC_CTRL_REG_STOP_RTC,
+ PALMAS_RTC_CTRL_REG_STOP_RTC);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "RTC_CTRL write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ palmas_rtc->irq = platform_get_irq(pdev, 0);
+
+ palmas_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &palmas_rtc_ops, THIS_MODULE);
+ if (IS_ERR(palmas_rtc->rtc)) {
+ ret = PTR_ERR(palmas_rtc->rtc);
+ dev_err(&pdev->dev, "RTC register failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = request_threaded_irq(palmas_rtc->irq, NULL,
+ palmas_rtc_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT |
+ IRQF_EARLY_RESUME,
+ dev_name(&pdev->dev), palmas_rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ request failed, err = %d\n", ret);
+ rtc_device_unregister(palmas_rtc->rtc);
+ return ret;
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ return 0;
+}
+
+static int palmas_rtc_remove(struct platform_device *pdev)
+{
+ struct palmas_rtc *palmas_rtc = platform_get_drvdata(pdev);
+
+ palmas_rtc_alarm_irq_enable(&pdev->dev, 0);
+ free_irq(palmas_rtc->irq, palmas_rtc);
+ rtc_device_unregister(palmas_rtc->rtc);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int palmas_rtc_suspend(struct device *dev)
+{
+ struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(palmas_rtc->irq);
+ return 0;
+}
+
+static int palmas_rtc_resume(struct device *dev)
+{
+ struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(palmas_rtc->irq);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops palmas_rtc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(palmas_rtc_suspend, palmas_rtc_resume)
+};
+
+static struct platform_driver palmas_rtc_driver = {
+ .probe = palmas_rtc_probe,
+ .remove = palmas_rtc_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "palmas-rtc",
+ .pm = &palmas_rtc_pm_ops,
+ },
+};
+
+module_platform_driver(palmas_rtc_driver);
+
+MODULE_ALIAS("platform:palmas_rtc");
+MODULE_DESCRIPTION("TI PALMAS series RTC driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 230697aac94b..96e52bf75930 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -433,9 +433,9 @@ fs3270_open(struct inode *inode, struct file *filp)
struct idal_buffer *ib;
int minor, rc = 0;
- if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
+ if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
return -ENODEV;
- minor = iminor(filp->f_path.dentry->d_inode);
+ minor = iminor(file_inode(filp));
/* Check for minor 0 multiplexer. */
if (minor == 0) {
struct tty_struct *tty = get_current_tty();
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 2d61db3fc62a..6dc60725de92 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -273,13 +273,13 @@ tapechar_open (struct inode *inode, struct file *filp)
int minor, rc;
DBF_EVENT(6, "TCHAR:open: %i:%i\n",
- imajor(filp->f_path.dentry->d_inode),
- iminor(filp->f_path.dentry->d_inode));
+ imajor(file_inode(filp)),
+ iminor(file_inode(filp)));
- if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
+ if (imajor(file_inode(filp)) != tapechar_major)
return -ENODEV;
- minor = iminor(filp->f_path.dentry->d_inode);
+ minor = iminor(file_inode(filp));
device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 483f72ba030d..c180e3135b3b 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -703,7 +703,7 @@ static int ur_open(struct inode *inode, struct file *file)
* We treat the minor number as the devno of the ur device
* to find in the driver tree.
*/
- devno = MINOR(file->f_dentry->d_inode->i_rdev);
+ devno = MINOR(file_inode(file)->i_rdev);
urd = urdev_get_from_devno(devno);
if (!urd) {
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e6e0d31c02ac..749b72739c4a 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -128,7 +128,7 @@ static int qstat_show(struct seq_file *m, void *v)
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
- filp->f_path.dentry->d_inode->i_private);
+ file_inode(filp)->i_private);
}
static const struct file_operations debugfs_fops = {
@@ -221,7 +221,7 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
static int qperf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qperf_show,
- filp->f_path.dentry->d_inode->i_private);
+ file_inode(filp)->i_private);
}
static struct file_operations debugfs_perf_fops = {
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
index 0815690ac1e0..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/kvm/Makefile
@@ -6,4 +6,4 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-obj-$(CONFIG_S390_GUEST) += kvm_virtio.o
+obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 8491111aec12..6711e65764b5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -275,7 +275,7 @@ static const char *kvm_bus_name(struct virtio_device *vdev)
/*
* The config ops structure as defined by virtio config
*/
-static struct virtio_config_ops kvm_vq_configspace_ops = {
+static const struct virtio_config_ops kvm_vq_configspace_ops = {
.get_features = kvm_get_features,
.finalize_features = kvm_finalize_features,
.get = kvm_get,
@@ -422,6 +422,26 @@ static void kvm_extint_handler(struct ext_code ext_code,
}
/*
+ * For s390-virtio, we expect a page above main storage containing
+ * the virtio configuration. Try to actually load from this area
+ * in order to figure out if the host provides this page.
+ */
+static int __init test_devices_support(unsigned long addr)
+{
+ int ret = -EIO;
+
+ asm volatile(
+ "0: lura 0,%1\n"
+ "1: xgr %0,%0\n"
+ "2:\n"
+ EX_TABLE(0b,2b)
+ EX_TABLE(1b,2b)
+ : "+d" (ret)
+ : "a" (addr)
+ : "0", "cc");
+ return ret;
+}
+/*
* Init function for virtio
* devices are in a single page above top of "normal" mem
*/
@@ -432,21 +452,23 @@ static int __init kvm_devices_init(void)
if (!MACHINE_IS_KVM)
return -ENODEV;
+ if (test_devices_support(real_memory_size) < 0)
+ return -ENODEV;
+
+ rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+ if (rc)
+ return rc;
+
+ kvm_devices = (void *) real_memory_size;
+
kvm_root = root_device_register("kvm_s390");
if (IS_ERR(kvm_root)) {
rc = PTR_ERR(kvm_root);
printk(KERN_ERR "Could not register kvm_s390 root device");
+ vmem_remove_mapping(real_memory_size, PAGE_SIZE);
return rc;
}
- rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
- if (rc) {
- root_device_unregister(kvm_root);
- return rc;
- }
-
- kvm_devices = (void *) real_memory_size;
-
INIT_WORK(&hotplug_work, hotplug_devices);
service_subclass_irq_register();
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
new file mode 100644
index 000000000000..2029b6caa595
--- /dev/null
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -0,0 +1,926 @@
+/*
+ * ccw based virtio transport
+ *
+ * Copyright IBM Corp. 2012
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/pfn.h>
+#include <linux/async.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/kvm_para.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+
+/*
+ * virtio related functions
+ */
+
+struct vq_config_block {
+ __u16 index;
+ __u16 num;
+} __packed;
+
+#define VIRTIO_CCW_CONFIG_SIZE 0x100
+/* same as PCI config space size, should be enough for all drivers */
+
+struct virtio_ccw_device {
+ struct virtio_device vdev;
+ __u8 *status;
+ __u8 config[VIRTIO_CCW_CONFIG_SIZE];
+ struct ccw_device *cdev;
+ __u32 curr_io;
+ int err;
+ wait_queue_head_t wait_q;
+ spinlock_t lock;
+ struct list_head virtqueues;
+ unsigned long indicators;
+ unsigned long indicators2;
+ struct vq_config_block *config_block;
+};
+
+struct vq_info_block {
+ __u64 queue;
+ __u32 align;
+ __u16 index;
+ __u16 num;
+} __packed;
+
+struct virtio_feature_desc {
+ __u32 features;
+ __u8 index;
+} __packed;
+
+struct virtio_ccw_vq_info {
+ struct virtqueue *vq;
+ int num;
+ void *queue;
+ struct vq_info_block *info_block;
+ struct list_head node;
+};
+
+#define KVM_VIRTIO_CCW_RING_ALIGN 4096
+
+#define KVM_S390_VIRTIO_CCW_NOTIFY 3
+
+#define CCW_CMD_SET_VQ 0x13
+#define CCW_CMD_VDEV_RESET 0x33
+#define CCW_CMD_SET_IND 0x43
+#define CCW_CMD_SET_CONF_IND 0x53
+#define CCW_CMD_READ_FEAT 0x12
+#define CCW_CMD_WRITE_FEAT 0x11
+#define CCW_CMD_READ_CONF 0x22
+#define CCW_CMD_WRITE_CONF 0x21
+#define CCW_CMD_WRITE_STATUS 0x31
+#define CCW_CMD_READ_VQ_CONF 0x32
+
+#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
+#define VIRTIO_CCW_DOING_RESET 0x00040000
+#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
+#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
+#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
+#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
+#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
+#define VIRTIO_CCW_DOING_SET_IND 0x01000000
+#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
+#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
+#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
+
+static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
+{
+ return container_of(vdev, struct virtio_ccw_device, vdev);
+}
+
+static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
+{
+ unsigned long flags;
+ __u32 ret;
+
+ spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+ if (vcdev->err)
+ ret = 0;
+ else
+ ret = vcdev->curr_io & flag;
+ spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+ return ret;
+}
+
+static int ccw_io_helper(struct virtio_ccw_device *vcdev,
+ struct ccw1 *ccw, __u32 intparm)
+{
+ int ret;
+ unsigned long flags;
+ int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+
+ do {
+ spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+ ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
+ if (!ret)
+ vcdev->curr_io |= flag;
+ spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+ cpu_relax();
+ } while (ret == -EBUSY);
+ wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
+ return ret ? ret : vcdev->err;
+}
+
+static inline long do_kvm_notify(struct subchannel_id schid,
+ unsigned long queue_index)
+{
+ register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
+ register struct subchannel_id __schid asm("2") = schid;
+ register unsigned long __index asm("3") = queue_index;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index)
+ : "memory", "cc");
+ return __rc;
+}
+
+static void virtio_ccw_kvm_notify(struct virtqueue *vq)
+{
+ struct virtio_ccw_vq_info *info = vq->priv;
+ struct virtio_ccw_device *vcdev;
+ struct subchannel_id schid;
+
+ vcdev = to_vc_device(info->vq->vdev);
+ ccw_device_get_schid(vcdev->cdev, &schid);
+ do_kvm_notify(schid, virtqueue_get_queue_index(vq));
+}
+
+static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
+ struct ccw1 *ccw, int index)
+{
+ vcdev->config_block->index = index;
+ ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
+ ccw->flags = 0;
+ ccw->count = sizeof(struct vq_config_block);
+ ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+ return vcdev->config_block->num;
+}
+
+static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
+ struct virtio_ccw_vq_info *info = vq->priv;
+ unsigned long flags;
+ unsigned long size;
+ int ret;
+ unsigned int index = virtqueue_get_queue_index(vq);
+
+ /* Remove from our list. */
+ spin_lock_irqsave(&vcdev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+
+ /* Release from host. */
+ info->info_block->queue = 0;
+ info->info_block->align = 0;
+ info->info_block->index = index;
+ info->info_block->num = 0;
+ ccw->cmd_code = CCW_CMD_SET_VQ;
+ ccw->flags = 0;
+ ccw->count = sizeof(*info->info_block);
+ ccw->cda = (__u32)(unsigned long)(info->info_block);
+ ret = ccw_io_helper(vcdev, ccw,
+ VIRTIO_CCW_DOING_SET_VQ | index);
+ /*
+ * -ENODEV isn't considered an error: The device is gone anyway.
+ * This may happen on device detach.
+ */
+ if (ret && (ret != -ENODEV))
+ dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+ ret, index);
+
+ vring_del_virtqueue(vq);
+ size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+ free_pages_exact(info->queue, size);
+ kfree(info->info_block);
+ kfree(info);
+}
+
+static void virtio_ccw_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ virtio_ccw_del_vq(vq, ccw);
+
+ kfree(ccw);
+}
+
+static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
+ int i, vq_callback_t *callback,
+ const char *name,
+ struct ccw1 *ccw)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ int err;
+ struct virtqueue *vq = NULL;
+ struct virtio_ccw_vq_info *info;
+ unsigned long size = 0; /* silence the compiler */
+ unsigned long flags;
+
+ /* Allocate queue. */
+ info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
+ if (!info) {
+ dev_warn(&vcdev->cdev->dev, "no info\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+ info->info_block = kzalloc(sizeof(*info->info_block),
+ GFP_DMA | GFP_KERNEL);
+ if (!info->info_block) {
+ dev_warn(&vcdev->cdev->dev, "no info block\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+ info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
+ size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+ info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+ if (info->queue == NULL) {
+ dev_warn(&vcdev->cdev->dev, "no queue\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
+ true, info->queue, virtio_ccw_kvm_notify,
+ callback, name);
+ if (!vq) {
+ /* For now, we fail if we can't get the requested size. */
+ dev_warn(&vcdev->cdev->dev, "no vq\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Register it with the host. */
+ info->info_block->queue = (__u64)info->queue;
+ info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
+ info->info_block->index = i;
+ info->info_block->num = info->num;
+ ccw->cmd_code = CCW_CMD_SET_VQ;
+ ccw->flags = 0;
+ ccw->count = sizeof(*info->info_block);
+ ccw->cda = (__u32)(unsigned long)(info->info_block);
+ err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
+ if (err) {
+ dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
+ goto out_err;
+ }
+
+ info->vq = vq;
+ vq->priv = info;
+
+ /* Save it to our list. */
+ spin_lock_irqsave(&vcdev->lock, flags);
+ list_add(&info->node, &vcdev->virtqueues);
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+
+ return vq;
+
+out_err:
+ if (vq)
+ vring_del_virtqueue(vq);
+ if (info) {
+ if (info->queue)
+ free_pages_exact(info->queue, size);
+ kfree(info->info_block);
+ }
+ kfree(info);
+ return ERR_PTR(err);
+}
+
+static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ unsigned long *indicatorp = NULL;
+ int ret, i;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return -ENOMEM;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
+ ccw);
+ if (IS_ERR(vqs[i])) {
+ ret = PTR_ERR(vqs[i]);
+ vqs[i] = NULL;
+ goto out;
+ }
+ }
+ ret = -ENOMEM;
+ /* We need a data area under 2G to communicate. */
+ indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+ if (!indicatorp)
+ goto out;
+ *indicatorp = (unsigned long) &vcdev->indicators;
+ /* Register queue indicators with host. */
+ vcdev->indicators = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+ ccw->flags = 0;
+ ccw->count = sizeof(vcdev->indicators);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
+ if (ret)
+ goto out;
+ /* Register indicators2 with host for config changes */
+ *indicatorp = (unsigned long) &vcdev->indicators2;
+ vcdev->indicators2 = 0;
+ ccw->cmd_code = CCW_CMD_SET_CONF_IND;
+ ccw->flags = 0;
+ ccw->count = sizeof(vcdev->indicators2);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
+ if (ret)
+ goto out;
+
+ kfree(indicatorp);
+ kfree(ccw);
+ return 0;
+out:
+ kfree(indicatorp);
+ kfree(ccw);
+ virtio_ccw_del_vqs(vdev);
+ return ret;
+}
+
+static void virtio_ccw_reset(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ /* Zero status bits. */
+ *vcdev->status = 0;
+
+ /* Send a reset ccw on device. */
+ ccw->cmd_code = CCW_CMD_VDEV_RESET;
+ ccw->flags = 0;
+ ccw->count = 0;
+ ccw->cda = 0;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
+ kfree(ccw);
+}
+
+static u32 virtio_ccw_get_features(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct virtio_feature_desc *features;
+ int ret, rc;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return 0;
+
+ features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ if (!features) {
+ rc = 0;
+ goto out_free;
+ }
+ /* Read the feature bits from the host. */
+ /* TODO: Features > 32 bits */
+ features->index = 0;
+ ccw->cmd_code = CCW_CMD_READ_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+ if (ret) {
+ rc = 0;
+ goto out_free;
+ }
+
+ rc = le32_to_cpu(features->features);
+
+out_free:
+ kfree(features);
+ kfree(ccw);
+ return rc;
+}
+
+static void virtio_ccw_finalize_features(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct virtio_feature_desc *features;
+ int i;
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ if (!features)
+ goto out_free;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
+ i++) {
+ int highbits = i % 2 ? 32 : 0;
+ features->index = i;
+ features->features = cpu_to_le32(vdev->features[i / 2]
+ >> highbits);
+ /* Write the feature bits to the host. */
+ ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+ }
+out_free:
+ kfree(features);
+ kfree(ccw);
+}
+
+static void virtio_ccw_get_config(struct virtio_device *vdev,
+ unsigned int offset, void *buf, unsigned len)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ int ret;
+ struct ccw1 *ccw;
+ void *config_area;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ if (!config_area)
+ goto out_free;
+
+ /* Read the config area from the host. */
+ ccw->cmd_code = CCW_CMD_READ_CONF;
+ ccw->flags = 0;
+ ccw->count = offset + len;
+ ccw->cda = (__u32)(unsigned long)config_area;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
+ if (ret)
+ goto out_free;
+
+ memcpy(vcdev->config, config_area, sizeof(vcdev->config));
+ memcpy(buf, &vcdev->config[offset], len);
+
+out_free:
+ kfree(config_area);
+ kfree(ccw);
+}
+
+static void virtio_ccw_set_config(struct virtio_device *vdev,
+ unsigned int offset, const void *buf,
+ unsigned len)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+ void *config_area;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ if (!config_area)
+ goto out_free;
+
+ memcpy(&vcdev->config[offset], buf, len);
+ /* Write the config area to the host. */
+ memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+ ccw->cmd_code = CCW_CMD_WRITE_CONF;
+ ccw->flags = 0;
+ ccw->count = offset + len;
+ ccw->cda = (__u32)(unsigned long)config_area;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
+
+out_free:
+ kfree(config_area);
+ kfree(ccw);
+}
+
+static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+ return *vcdev->status;
+}
+
+static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return;
+
+ /* Write the status to the host. */
+ *vcdev->status = status;
+ ccw->cmd_code = CCW_CMD_WRITE_STATUS;
+ ccw->flags = 0;
+ ccw->count = sizeof(status);
+ ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+ kfree(ccw);
+}
+
+static struct virtio_config_ops virtio_ccw_config_ops = {
+ .get_features = virtio_ccw_get_features,
+ .finalize_features = virtio_ccw_finalize_features,
+ .get = virtio_ccw_get_config,
+ .set = virtio_ccw_set_config,
+ .get_status = virtio_ccw_get_status,
+ .set_status = virtio_ccw_set_status,
+ .reset = virtio_ccw_reset,
+ .find_vqs = virtio_ccw_find_vqs,
+ .del_vqs = virtio_ccw_del_vqs,
+};
+
+
+/*
+ * ccw bus driver related functions
+ */
+
+static void virtio_ccw_release_dev(struct device *_d)
+{
+ struct virtio_device *dev = container_of(_d, struct virtio_device,
+ dev);
+ struct virtio_ccw_device *vcdev = to_vc_device(dev);
+
+ kfree(vcdev->status);
+ kfree(vcdev->config_block);
+ kfree(vcdev);
+}
+
+static int irb_is_error(struct irb *irb)
+{
+ if (scsw_cstat(&irb->scsw) != 0)
+ return 1;
+ if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return 1;
+ if (scsw_cc(&irb->scsw) != 0)
+ return 1;
+ return 0;
+}
+
+static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
+ int index)
+{
+ struct virtio_ccw_vq_info *info;
+ unsigned long flags;
+ struct virtqueue *vq;
+
+ vq = NULL;
+ spin_lock_irqsave(&vcdev->lock, flags);
+ list_for_each_entry(info, &vcdev->virtqueues, node) {
+ if (virtqueue_get_queue_index(info->vq) == index) {
+ vq = info->vq;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+ return vq;
+}
+
+static void virtio_ccw_int_handler(struct ccw_device *cdev,
+ unsigned long intparm,
+ struct irb *irb)
+{
+ __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ int i;
+ struct virtqueue *vq;
+ struct virtio_driver *drv;
+
+ /* Check if it's a notification from the host. */
+ if ((intparm == 0) &&
+ (scsw_stctl(&irb->scsw) ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
+ /* OK */
+ }
+ if (irb_is_error(irb))
+ vcdev->err = -EIO; /* XXX - use real error */
+ if (vcdev->curr_io & activity) {
+ switch (activity) {
+ case VIRTIO_CCW_DOING_READ_FEAT:
+ case VIRTIO_CCW_DOING_WRITE_FEAT:
+ case VIRTIO_CCW_DOING_READ_CONFIG:
+ case VIRTIO_CCW_DOING_WRITE_CONFIG:
+ case VIRTIO_CCW_DOING_WRITE_STATUS:
+ case VIRTIO_CCW_DOING_SET_VQ:
+ case VIRTIO_CCW_DOING_SET_IND:
+ case VIRTIO_CCW_DOING_SET_CONF_IND:
+ case VIRTIO_CCW_DOING_RESET:
+ case VIRTIO_CCW_DOING_READ_VQ_CONF:
+ vcdev->curr_io &= ~activity;
+ wake_up(&vcdev->wait_q);
+ break;
+ default:
+ /* don't know what to do... */
+ dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
+ activity);
+ WARN_ON(1);
+ break;
+ }
+ }
+ for_each_set_bit(i, &vcdev->indicators,
+ sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+ /* The bit clear must happen before the vring kick. */
+ clear_bit(i, &vcdev->indicators);
+ barrier();
+ vq = virtio_ccw_vq_by_ind(vcdev, i);
+ vring_interrupt(0, vq);
+ }
+ if (test_bit(0, &vcdev->indicators2)) {
+ drv = container_of(vcdev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ if (drv && drv->config_changed)
+ drv->config_changed(&vcdev->vdev);
+ clear_bit(0, &vcdev->indicators2);
+ }
+}
+
+/*
+ * We usually want to autoonline all devices, but give the admin
+ * a way to exempt devices from this.
+ */
+#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+ (8*sizeof(long)))
+static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
+
+static char *no_auto = "";
+
+module_param(no_auto, charp, 0444);
+MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
+
+static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
+{
+ struct ccw_dev_id id;
+
+ ccw_device_get_id(cdev, &id);
+ if (test_bit(id.devno, devs_no_auto[id.ssid]))
+ return 0;
+ return 1;
+}
+
+static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
+{
+ struct ccw_device *cdev = data;
+ int ret;
+
+ ret = ccw_device_set_online(cdev);
+ if (ret)
+ dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
+}
+
+static int virtio_ccw_probe(struct ccw_device *cdev)
+{
+ cdev->handler = virtio_ccw_int_handler;
+
+ if (virtio_ccw_check_autoonline(cdev))
+ async_schedule(virtio_ccw_auto_online, cdev);
+ return 0;
+}
+
+static void virtio_ccw_remove(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+ if (cdev->online) {
+ unregister_virtio_device(&vcdev->vdev);
+ dev_set_drvdata(&cdev->dev, NULL);
+ }
+ cdev->handler = NULL;
+}
+
+static int virtio_ccw_offline(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+ unregister_virtio_device(&vcdev->vdev);
+ dev_set_drvdata(&cdev->dev, NULL);
+ return 0;
+}
+
+
+static int virtio_ccw_online(struct ccw_device *cdev)
+{
+ int ret;
+ struct virtio_ccw_device *vcdev;
+
+ vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
+ if (!vcdev) {
+ dev_warn(&cdev->dev, "Could not get memory for virtio\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
+ GFP_DMA | GFP_KERNEL);
+ if (!vcdev->config_block) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
+ if (!vcdev->status) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ vcdev->vdev.dev.parent = &cdev->dev;
+ vcdev->vdev.dev.release = virtio_ccw_release_dev;
+ vcdev->vdev.config = &virtio_ccw_config_ops;
+ vcdev->cdev = cdev;
+ init_waitqueue_head(&vcdev->wait_q);
+ INIT_LIST_HEAD(&vcdev->virtqueues);
+ spin_lock_init(&vcdev->lock);
+
+ dev_set_drvdata(&cdev->dev, vcdev);
+ vcdev->vdev.id.vendor = cdev->id.cu_type;
+ vcdev->vdev.id.device = cdev->id.cu_model;
+ ret = register_virtio_device(&vcdev->vdev);
+ if (ret) {
+ dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
+ ret);
+ goto out_put;
+ }
+ return 0;
+out_put:
+ dev_set_drvdata(&cdev->dev, NULL);
+ put_device(&vcdev->vdev.dev);
+ return ret;
+out_free:
+ if (vcdev) {
+ kfree(vcdev->status);
+ kfree(vcdev->config_block);
+ }
+ kfree(vcdev);
+ return ret;
+}
+
+static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
+{
+ /* TODO: Check whether we need special handling here. */
+ return 0;
+}
+
+static struct ccw_device_id virtio_ids[] = {
+ { CCW_DEVICE(0x3832, 0) },
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, virtio_ids);
+
+static struct ccw_driver virtio_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "virtio_ccw",
+ },
+ .ids = virtio_ids,
+ .probe = virtio_ccw_probe,
+ .remove = virtio_ccw_remove,
+ .set_offline = virtio_ccw_offline,
+ .set_online = virtio_ccw_online,
+ .notify = virtio_ccw_cio_notify,
+ .int_class = IRQIO_VIR,
+};
+
+static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
+ int max_digit, int max_val)
+{
+ int diff;
+
+ diff = 0;
+ *val = 0;
+
+ while (diff <= max_digit) {
+ int value = hex_to_bin(**cp);
+
+ if (value < 0)
+ break;
+ *val = *val * 16 + value;
+ (*cp)++;
+ diff++;
+ }
+
+ if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+ return 1;
+
+ return 0;
+}
+
+static int __init parse_busid(char *str, unsigned int *cssid,
+ unsigned int *ssid, unsigned int *devno)
+{
+ char *str_work;
+ int rc, ret;
+
+ rc = 1;
+
+ if (*str == '\0')
+ goto out;
+
+ str_work = str;
+ ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+ if (ret || (str_work[0] != '\0'))
+ goto out;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static void __init no_auto_parse(void)
+{
+ unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+ char *parm, *str;
+ int rc;
+
+ str = no_auto;
+ while ((parm = strsep(&str, ","))) {
+ rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+ &from_ssid, &from);
+ if (rc)
+ continue;
+ if (parm != NULL) {
+ rc = parse_busid(parm, &to_cssid,
+ &to_ssid, &to);
+ if ((from_ssid > to_ssid) ||
+ ((from_ssid == to_ssid) && (from > to)))
+ rc = -EINVAL;
+ } else {
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ }
+ if (rc)
+ continue;
+ while ((from_ssid < to_ssid) ||
+ ((from_ssid == to_ssid) && (from <= to))) {
+ set_bit(from, devs_no_auto[from_ssid]);
+ from++;
+ if (from > __MAX_SUBCHANNEL) {
+ from_ssid++;
+ from = 0;
+ }
+ }
+ }
+}
+
+static int __init virtio_ccw_init(void)
+{
+ /* parse no_auto string before we do anything further */
+ no_auto_parse();
+ return ccw_driver_register(&virtio_ccw_driver);
+}
+module_init(virtio_ccw_init);
+
+static void __exit virtio_ccw_exit(void)
+{
+ ccw_driver_unregister(&virtio_ccw_driver);
+}
+module_exit(virtio_ccw_exit);
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index e85c803b30cd..fc1339cf91ac 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -107,7 +107,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int error = 0;
u8 ireg = 0;
- if (D7S_MINOR != iminor(file->f_path.dentry->d_inode))
+ if (D7S_MINOR != iminor(file_inode(file)))
return -ENODEV;
mutex_lock(&d7s_mutex);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index d1f0120cdb98..5e1e12c0cf42 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -640,7 +640,7 @@ out:
/* This function handles ioctl for the character device */
static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
long timeout;
unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
dma_addr_t dma_handle;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 52a2f0580d97..c845bdbeb6c0 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -757,7 +757,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
dma_addr_t dma_handle;
int request_id = 0;
TW_Ioctl_Driver_Command driver_command;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
TW_Ioctl_Buf_Apache *tw_ioctl;
TW_Command_Full *full_command_packet;
TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 62071d2fc1ce..56662ae03dea 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -889,7 +889,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
unsigned long flags;
unsigned int data_buffer_length = 0;
unsigned long data_buffer_length_adjusted = 0;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned long *cpu_addr;
long timeout;
TW_New_Ioctl *tw_ioctl;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 742f5d7eb0f5..a6f7190c09a4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,13 +12,13 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 29801
+# define AAC_DRIVER_BUILD 30000
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
#define AAC_NUM_MGT_FIB 8
-#define AAC_NUM_IO_FIB (512 - AAC_NUM_MGT_FIB)
+#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
#define AAC_MAX_LUN (8)
@@ -36,6 +36,10 @@
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
+#define PMC_DEVICE_S7 0x28c
+#define PMC_DEVICE_S8 0x28d
+#define PMC_DEVICE_S9 0x28f
+
#define aac_phys_to_logical(x) ((x)+1)
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 8e5d3be16127..3f759957f4b4 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -404,7 +404,13 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
- host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ if (dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
+ (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
+ else
+ host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
dev->max_num_aif = status[4] & 0xFFFF;
/*
* NOTE:
@@ -452,6 +458,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
}
+ if (host->can_queue > AAC_NUM_IO_FIB)
+ host->can_queue = AAC_NUM_IO_FIB;
+
/*
* Ok now init the communication subsystem
*/
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b021ec63255..e2e349204e7d 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -407,7 +407,7 @@ static int aac_src_deliver_message(struct fib *fib)
fib->hw_fib_va->header.StructType = FIB_MAGIC2;
fib->hw_fib_va->header.SenderFibAddress = (u32)address;
fib->hw_fib_va->header.u.TimeStamp = 0;
- BUG_ON((u32)(address >> 32) != 0L);
+ BUG_ON(upper_32_bits(address) != 0L);
address |= fibsize;
} else {
/* Calculate the amount to the fibsize bits */
@@ -431,7 +431,7 @@ static int aac_src_deliver_message(struct fib *fib)
address |= fibsize;
}
- src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
return 0;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index e6bf12675db8..a5f7690e819e 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1034,7 +1034,7 @@ bfad_start_ops(struct bfad_s *bfad) {
sizeof(driver_info.host_os_patch) - 1);
strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name - 1));
+ sizeof(driver_info.os_device_name) - 1);
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 8f92732655c7..5864f987f206 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -523,20 +523,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
int error = 1;
mutex_lock(&bfad_mutex);
- if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
+ error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
+ if (error < 0) {
mutex_unlock(&bfad_mutex);
- printk(KERN_WARNING "idr_pre_get failure\n");
+ printk(KERN_WARNING "idr_alloc failure\n");
goto out;
}
-
- error = idr_get_new(&bfad_im_port_index, im_port,
- &im_port->idr_id);
- if (error) {
- mutex_unlock(&bfad_mutex);
- printk(KERN_WARNING "idr_get_new failure\n");
- goto out;
- }
-
+ im_port->idr_id = error;
mutex_unlock(&bfad_mutex);
im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 3486845ba301..50fcd018d14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.12"
+#define BNX2FC_VERSION "1.0.13"
#define PFX "bnx2fc: "
@@ -156,6 +156,18 @@
#define BNX2FC_RELOGIN_WAIT_TIME 200
#define BNX2FC_RELOGIN_WAIT_CNT 10
+#define BNX2FC_STATS(hba, stat, cnt) \
+ do { \
+ u32 val; \
+ \
+ val = fw_stats->stat.cnt; \
+ if (hba->prev_stats.stat.cnt <= val) \
+ val -= hba->prev_stats.stat.cnt; \
+ else \
+ val += (0xfffffff - hba->prev_stats.stat.cnt); \
+ hba->bfw_stats.cnt += val; \
+ } while (0)
+
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
extern struct fcoe_percpu_s bnx2fc_global;
@@ -167,6 +179,14 @@ struct bnx2fc_percpu_s {
spinlock_t fp_work_lock;
};
+struct bnx2fc_fw_stats {
+ u64 fc_crc_cnt;
+ u64 fcoe_tx_pkt_cnt;
+ u64 fcoe_rx_pkt_cnt;
+ u64 fcoe_tx_byte_cnt;
+ u64 fcoe_rx_byte_cnt;
+};
+
struct bnx2fc_hba {
struct list_head list;
struct cnic_dev *cnic;
@@ -207,6 +227,8 @@ struct bnx2fc_hba {
struct bnx2fc_rport **tgt_ofld_list;
/* statistics */
+ struct bnx2fc_fw_stats bfw_stats;
+ struct fcoe_statistics_params prev_stats;
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
struct completion stat_req_done;
@@ -280,6 +302,7 @@ struct bnx2fc_rport {
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
#define BNX2FC_FLAG_EXPL_LOGO 0x8
#define BNX2FC_FLAG_DISABLE_FAILED 0x9
+#define BNX2FC_FLAG_ENABLED 0xa
u8 src_addr[ETH_ALEN];
u32 max_sqes;
@@ -468,6 +491,8 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt);
int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 70ecd953a579..2daf4b0da434 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jun 04, 2012"
+#define DRV_MODULE_RELDATE "Dec 21, 2012"
static char version[] =
@@ -62,6 +62,10 @@ static int bnx2fc_destroy(struct net_device *net_device);
static int bnx2fc_enable(struct net_device *netdev);
static int bnx2fc_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev);
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
static void bnx2fc_recv_frame(struct sk_buff *skb);
static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
@@ -89,7 +93,6 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -107,44 +110,6 @@ static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
-/**
- * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void bnx2fc_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = bnx2fc_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = bnx2fc_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
-
static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
@@ -687,11 +652,16 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
return bnx2fc_stats;
}
- bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
- bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
- bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
- bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
- bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+ BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
+ bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
+ bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
+ bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
+ bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
+ bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
bnx2fc_stats->dumped_frames = 0;
bnx2fc_stats->lip_count = 0;
@@ -700,6 +670,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
bnx2fc_stats->loss_of_signal_count = 0;
bnx2fc_stats->prim_seq_protocol_err_count = 0;
+ memcpy(&hba->prev_stats, hba->stats_buffer,
+ sizeof(struct fcoe_statistics_params));
return bnx2fc_stats;
}
@@ -734,35 +706,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
return 0;
}
-static void bnx2fc_link_speed_update(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_interface *interface = port->priv;
- struct net_device *netdev = interface->netdev;
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_2500:
- lport->link_speed = FC_PORTSPEED_2GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- }
-}
static int bnx2fc_link_ok(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
@@ -820,7 +763,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
port->fcoe_pending_queue_active = 0;
setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
@@ -864,6 +807,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
u16 vlan_id)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
@@ -923,30 +867,47 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
+
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
if (link_possible && !bnx2fc_link_ok(lport)) {
- /* Reset max recv frame size to default */
- fc_set_mfs(lport, BNX2FC_MFS);
- /*
- * ctlr link up will only be handled during
- * enable to avoid sending discovery solicitation
- * on a stale vlan
- */
- if (interface->enabled)
- fcoe_ctlr_link_up(ctlr);
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ /* Reset max recv frame size to default */
+ fc_set_mfs(lport, BNX2FC_MFS);
+ /*
+ * ctlr link up will only be handled during
+ * enable to avoid sending discovery
+ * solicitation on a stale vlan
+ */
+ if (interface->enabled)
+ fcoe_ctlr_link_up(ctlr);
+ };
} else if (fcoe_ctlr_link_down(ctlr)) {
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vport, &lport->vports, list)
- fc_host_port_type(vport->host) =
- FC_PORTTYPE_UNKNOWN;
- mutex_unlock(&lport->lp_mutex);
- fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- per_cpu_ptr(lport->stats,
- get_cpu())->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
- wait_for_upload = 1;
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ wait_for_upload = 1;
+ };
}
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -1477,6 +1438,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port = lport_priv(lport);
port->lport = lport;
port->priv = interface;
+ port->get_netdev = bnx2fc_netdev;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
@@ -1996,7 +1958,9 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
-
+/**
+ * Deperecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
@@ -2022,7 +1986,9 @@ static int bnx2fc_disable(struct net_device *netdev)
return rc;
}
-
+/**
+ * Deprecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
@@ -2048,17 +2014,57 @@ static int bnx2fc_enable(struct net_device *netdev)
}
/**
- * bnx2fc_create - Create bnx2fc FCoE interface
+ * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = bnx2fc_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return bnx2fc_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return bnx2fc_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+enum bnx2fc_create_link_state {
+ BNX2FC_CREATE_LINK_DOWN,
+ BNX2FC_CREATE_LINK_UP,
+};
+
+/**
+ * _bnx2fc_create() - Create bnx2fc FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * @buffer: The name of Ethernet interface to create on
- * @kp: The associated kernel param
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Called from sysfs.
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*
* Returns: 0 for success
*/
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _bnx2fc_create(struct net_device *netdev,
+ enum fip_state fip_mode,
+ enum bnx2fc_create_link_state link_state)
{
+ struct fcoe_ctlr_device *cdev;
struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
@@ -2153,7 +2159,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this master N_port */
ctlr->lp = lport;
- if (!bnx2fc_link_ok(lport)) {
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ cdev->enabled = FCOE_CTLR_ENABLED;
+ else
+ cdev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP &&
+ !bnx2fc_link_ok(lport)) {
fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
@@ -2161,7 +2175,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
BNX2FC_HBA_DBG(lport, "create: START DISC\n");
bnx2fc_start_disc(interface);
- interface->enabled = true;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ interface->enabled = true;
+
/*
* Release from kref_init in bnx2fc_interface_setup, on success
* lport should be holding a reference taken in bnx2fc_if_create
@@ -2187,6 +2204,37 @@ mod_err:
}
/**
+ * bnx2fc_create() - Create a bnx2fc interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
+}
+
+/**
+ * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev)
+{
+ return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
+ BNX2FC_CREATE_LINK_DOWN);
+}
+
+/**
* bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
*
* @cnic: Pointer to cnic device instance
@@ -2311,6 +2359,7 @@ static struct fcoe_transport bnx2fc_transport = {
.name = {"bnx2fc"},
.attached = false,
.list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .alloc = bnx2fc_ctlr_alloc,
.match = bnx2fc_match,
.create = bnx2fc_create,
.destroy = bnx2fc_destroy,
@@ -2555,13 +2604,13 @@ module_init(bnx2fc_mod_init);
module_exit(bnx2fc_mod_exit);
static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
- .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+ .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
.get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
@@ -2660,7 +2709,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.can_queue = BNX2FC_CAN_QUEUE,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
- .max_sectors = 512,
+ .max_sectors = 1024,
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
@@ -2668,7 +2717,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
.elsct_send = bnx2fc_elsct_send,
.fcp_abort_io = bnx2fc_abort_io,
.fcp_cleanup = bnx2fc_cleanup,
- .get_lesb = bnx2fc_get_lesb,
+ .get_lesb = fcoe_get_lesb,
.rport_event_callback = bnx2fc_rport_event_handler,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index ef60afa94d0e..85ea98a80f40 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -347,7 +347,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
* @port: port structure pointer
* @tgt: bnx2fc_rport structure pointer
*/
-static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct kwqe *kwqe_arr[2];
@@ -759,8 +759,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
xid);
- memset(&io_req->err_entry, 0,
- sizeof(struct fcoe_err_report_entry));
memcpy(&io_req->err_entry, err_entry,
sizeof(struct fcoe_err_report_entry));
if (!test_bit(BNX2FC_FLAG_SRR_SENT,
@@ -847,8 +845,6 @@ ret_err_rqe:
goto ret_warn_rqe;
}
- memset(&io_req->err_entry, 0,
- sizeof(struct fcoe_err_report_entry));
memcpy(&io_req->err_entry, err_entry,
sizeof(struct fcoe_err_report_entry));
@@ -1124,7 +1120,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
- int rc;
conn_id = ofld_kcqe->fcoe_conn_id;
context_id = ofld_kcqe->fcoe_conn_context_id;
@@ -1153,17 +1148,10 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
"resources\n");
set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
}
- goto ofld_cmpl_err;
} else {
-
- /* now enable the session */
- rc = bnx2fc_send_session_enable_req(port, tgt);
- if (rc) {
- printk(KERN_ERR PFX "enable session failed\n");
- goto ofld_cmpl_err;
- }
+ /* FW offload request successfully completed */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
}
- return;
ofld_cmpl_err:
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
wake_up_interruptible(&tgt->ofld_wait);
@@ -1210,15 +1198,9 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
goto enbl_cmpl_err;
}
- if (ofld_kcqe->completion_status)
- goto enbl_cmpl_err;
- else {
+ if (!ofld_kcqe->completion_status)
/* enable successful - rport ready for issuing IOs */
- set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
- set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
- wake_up_interruptible(&tgt->ofld_wait);
- }
- return;
+ set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
enbl_cmpl_err:
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1251,6 +1233,7 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
/* disable successful */
BNX2FC_TGT_DBG(tgt, "disable successful\n");
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
wake_up_interruptible(&tgt->upld_wait);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8d4626c07a12..60798e829de6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -654,7 +654,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
&mp_req->mp_resp_bd_dma,
GFP_ATOMIC);
- if (!mp_req->mp_req_bd) {
+ if (!mp_req->mp_resp_bd) {
printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
bnx2fc_free_mp_resc(io_req);
return FAILED;
@@ -685,8 +685,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
{
struct fc_lport *lport;
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
struct fcoe_port *port;
struct bnx2fc_interface *interface;
struct bnx2fc_rport *tgt;
@@ -704,6 +704,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
unsigned long start = jiffies;
lport = shost_priv(host);
+ rport = starget_to_rport(scsi_target(sc_cmd->device));
port = lport_priv(lport);
interface = port->priv;
@@ -712,6 +713,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
rc = FAILED;
goto tmf_err;
}
+ rp = rport->dd_data;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index b9d0d9cb17f9..c57a3bb8a9fb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -33,6 +33,7 @@ static void bnx2fc_upld_timer(unsigned long data)
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
wake_up_interruptible(&tgt->upld_wait);
}
@@ -55,10 +56,25 @@ static void bnx2fc_ofld_timer(unsigned long data)
* resources are freed up in bnx2fc_offload_session
*/
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
wake_up_interruptible(&tgt->ofld_wait);
}
+static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->ofld_timer);
+}
+
static void bnx2fc_offload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt,
struct fc_rport_priv *rdata)
@@ -103,17 +119,7 @@ retry_ofld:
* wait for the session is offloaded and enabled. 3 Secs
* should be ample time for this process to complete.
*/
- setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
- mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
- wait_event_interruptible(tgt->ofld_wait,
- (test_bit(
- BNX2FC_FLAG_OFLD_REQ_CMPL,
- &tgt->flags)));
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->ofld_timer);
+ bnx2fc_ofld_wait(tgt);
if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
@@ -131,14 +137,23 @@ retry_ofld:
}
if (bnx2fc_map_doorbell(tgt)) {
printk(KERN_ERR PFX "map doorbell failed - no mem\n");
- /* upload will take care of cleaning up sess resc */
- lport->tt.rport_logoff(rdata);
+ goto ofld_err;
}
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_enable_req(port, tgt);
+ if (rval) {
+ pr_err(PFX "enable session failed\n");
+ goto ofld_err;
+ }
+ bnx2fc_ofld_wait(tgt);
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
+ goto ofld_err;
return;
ofld_err:
/* couldn't offload the session. log off from this rport */
BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
/* Free session resources */
bnx2fc_free_session_resc(hba, tgt);
tgt_init_err:
@@ -259,6 +274,19 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
spin_unlock_bh(&tgt->tgt_lock);
}
+static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->upld_timer);
+}
+
static void bnx2fc_upload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
@@ -279,19 +307,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
* wait for upload to complete. 3 Secs
* should be sufficient time for this process to complete.
*/
- setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
- mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
- wait_event_interruptible(tgt->upld_wait,
- (test_bit(
- BNX2FC_FLAG_UPLD_REQ_COMPL,
- &tgt->flags)));
-
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->upld_timer);
+ bnx2fc_upld_wait(tgt);
/*
* traverse thru the active_q and tmf_q and cleanup
@@ -308,24 +325,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
bnx2fc_send_session_destroy_req(hba, tgt);
/* wait for destroy to complete */
- setup_timer(&tgt->upld_timer,
- bnx2fc_upld_timer, (unsigned long)tgt);
- mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
- wait_event_interruptible(tgt->upld_wait,
- (test_bit(
- BNX2FC_FLAG_UPLD_REQ_COMPL,
- &tgt->flags)));
+ bnx2fc_upld_wait(tgt);
if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
tgt->flags);
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->upld_timer);
} else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
@@ -381,7 +387,9 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
- if (rdata->flags & FC_RP_FLAGS_RETRY) {
+ if (rdata->flags & FC_RP_FLAGS_RETRY &&
+ rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
+ !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
tgt->dev_type = TYPE_TAPE;
tgt->io_timeout = 0; /* use default ULP timeout */
} else {
@@ -479,7 +487,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
tgt = (struct bnx2fc_rport *)&rp[1];
/* This can happen when ADISC finds the same target */
- if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
BNX2FC_TGT_DBG(tgt, "already offloaded\n");
mutex_unlock(&hba->hba_mutex);
return;
@@ -494,11 +502,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
hba->num_ofld_sess);
- if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
- /*
- * Session is offloaded and enabled. Map
- * doorbell register for this target
- */
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
+ /* Session is offloaded and enabled. */
BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
/* This counter is protected with hba mutex */
hba->num_ofld_sess++;
@@ -535,7 +540,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
*/
tgt = (struct bnx2fc_rport *)&rp[1];
- if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
mutex_unlock(&hba->hba_mutex);
break;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91eec60252ee..a28b03e5a5f6 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1317,7 +1317,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
if (error_mask1) {
iscsi_init2.error_bit_map[0] = error_mask1;
- mask64 &= (u32)(~mask64);
+ mask64 ^= (u32)(mask64);
mask64 |= error_mask1;
} else
iscsi_init2.error_bit_map[0] = (u32) mask64;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index a15474eef5f7..2a323742ce04 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -895,7 +895,7 @@ static int ch_probe(struct device *dev)
{
struct scsi_device *sd = to_scsi_device(dev);
struct device *class_dev;
- int minor, ret = -ENOMEM;
+ int ret;
scsi_changer *ch;
if (sd->type != TYPE_MEDIUM_CHANGER)
@@ -905,22 +905,19 @@ static int ch_probe(struct device *dev)
if (NULL == ch)
return -ENOMEM;
- if (!idr_pre_get(&ch_index_idr, GFP_KERNEL))
- goto free_ch;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&ch_index_lock);
- ret = idr_get_new(&ch_index_idr, ch, &minor);
+ ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
spin_unlock(&ch_index_lock);
+ idr_preload_end();
- if (ret)
+ if (ret < 0) {
+ if (ret == -ENOSPC)
+ ret = -ENODEV;
goto free_ch;
-
- if (minor > CH_MAX_DEVS) {
- ret = -ENODEV;
- goto remove_idr;
}
- ch->minor = minor;
+ ch->minor = ret;
sprintf(ch->name,"ch%d",ch->minor);
class_dev = device_create(ch_sysfs_class, dev,
@@ -944,7 +941,7 @@ static int ch_probe(struct device *dev)
return 0;
remove_idr:
- idr_remove(&ch_index_idr, minor);
+ idr_remove(&ch_index_idr, ch->minor);
free_ch:
kfree(ch);
return ret;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 8ecdb94a59f4..bdd78fb4fc70 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -2131,13 +2131,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
value_to_add = 4 - (cf->size % 4);
cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
- if (cfg_data == NULL)
- return -ENOMEM;
+ if (cfg_data == NULL) {
+ ret = -ENOMEM;
+ goto leave;
+ }
memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
-
- if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0)
- return -EINVAL;
+ if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
+ ret = -EINVAL;
+ goto leave;
+ }
mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
@@ -2149,9 +2152,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
}
+leave:
kfree(cfg_data);
release_firmware(cf);
-
return ret;
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index b42cbbd3d92d..0604b5ff3638 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -60,18 +60,11 @@ static struct scsi_transport_template *csio_fcoe_transport_vport;
/*
* debugfs support
*/
-static int
-csio_mem_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
loff_t pos = *ppos;
- loff_t avail = file->f_path.dentry->d_inode->i_size;
+ loff_t avail = file_inode(file)->i_size;
unsigned int mem = (uintptr_t)file->private_data & 3;
struct csio_hw *hw = file->private_data - mem;
@@ -110,7 +103,7 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static const struct file_operations csio_mem_debugfs_fops = {
.owner = THIS_MODULE,
- .open = csio_mem_open,
+ .open = simple_open,
.read = csio_mem_read,
.llseek = default_llseek,
};
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index f924b3c3720e..3fecf35ba292 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1564,6 +1564,7 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
break;
case CXGB4_STATE_DETACH:
pr_info("cdev 0x%p, DETACH.\n", cdev);
+ cxgbi_device_unregister(cdev);
break;
default:
pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 865c64fa923c..fed486bfd3f4 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
dcb->max_command = 1;
dcb->target_id = target;
dcb->target_lun = lun;
+ dcb->dev_mode = eeprom->target[target].cfg0;
#ifndef DC395x_NO_DISCONNECT
dcb->identify_msg =
IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
#else
dcb->identify_msg = IDENTIFY(0, lun);
#endif
- dcb->dev_mode = eeprom->target[target].cfg0;
dcb->inquiry7 = 0;
dcb->sync_mode = 0;
dcb->min_nego_period = clock_period[period_index];
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b4f6c9a84e71..b6e2700ec1c6 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2161,7 +2161,7 @@ static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
struct inode *inode;
long ret;
- inode = file->f_dentry->d_inode;
+ inode = file_inode(file);
mutex_lock(&adpt_mutex);
ret = adpt_ioctl(inode, file, cmd, arg);
@@ -2177,7 +2177,7 @@ static long compat_adpt_ioctl(struct file *file,
struct inode *inode;
long ret;
- inode = file->f_dentry->d_inode;
+ inode = file_inode(file);
mutex_lock(&adpt_mutex);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 666b7ac4475f..b5d92fc93c70 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -82,11 +82,11 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
static void fcoe_percpu_clean(struct fc_lport *);
-static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
+static void fcoe_hostlist_del(const struct fc_lport *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
@@ -117,6 +117,11 @@ static int fcoe_destroy(struct net_device *netdev);
static int fcoe_enable(struct net_device *netdev);
static int fcoe_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int fcoe_ctlr_alloc(struct net_device *netdev);
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
unsigned int op,
@@ -126,8 +131,6 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
void *, u32 timeout);
static void fcoe_recv_frame(struct sk_buff *skb);
-static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
@@ -151,11 +154,11 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+ .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
.get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
@@ -1112,10 +1115,17 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
port = lport_priv(lport);
port->lport = lport;
port->priv = fcoe;
+ port->get_netdev = fcoe_netdev;
port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
+ /*
+ * Need to add the lport to the hostlist
+ * so we catch NETDEV_CHANGE events.
+ */
+ fcoe_hostlist_add(lport);
+
/* configure a fc_lport including the exchange manager */
rc = fcoe_lport_config(lport);
if (rc) {
@@ -1187,6 +1197,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
out_lp_destroy:
fc_exch_mgr_free(lport);
out_host_put:
+ fcoe_hostlist_del(lport);
scsi_host_put(lport->host);
out:
return ERR_PTR(rc);
@@ -1964,6 +1975,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
static int fcoe_device_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
struct fcoe_ctlr *ctlr;
@@ -2020,13 +2032,29 @@ static int fcoe_device_notification(struct notifier_block *notifier,
fcoe_link_speed_update(lport);
- if (link_possible && !fcoe_link_ok(lport))
- fcoe_ctlr_link_up(ctlr);
- else if (fcoe_ctlr_link_down(ctlr)) {
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_possible && !fcoe_link_ok(lport)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ fcoe_ctlr_link_up(ctlr);
+ };
+ } else if (fcoe_ctlr_link_down(ctlr)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ };
}
out:
return rc;
@@ -2039,6 +2067,8 @@ out:
* Called from fcoe transport.
*
* Returns: 0 for success
+ *
+ * Deprecated: use fcoe_ctlr_enabled()
*/
static int fcoe_disable(struct net_device *netdev)
{
@@ -2098,6 +2128,33 @@ out:
}
/**
+ * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return fcoe_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return fcoe_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+/**
* fcoe_destroy() - Destroy a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
*
@@ -2139,8 +2196,31 @@ static void fcoe_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fcoe_interface *fcoe;
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
port = container_of(work, struct fcoe_port, destroy_work);
+ shost = port->lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+
mutex_lock(&fcoe_config_mutex);
fcoe = port->priv;
@@ -2204,16 +2284,26 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
#endif
}
+enum fcoe_create_link_state {
+ FCOE_CREATE_LINK_DOWN,
+ FCOE_CREATE_LINK_UP,
+};
+
/**
- * fcoe_create() - Create a fcoe interface
- * @netdev : The net_device object the Ethernet interface to create on
- * @fip_mode: The FIP mode for this creation
+ * _fcoe_create() - (internal) Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * Called from fcoe transport
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Returns: 0 for success
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*/
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+ enum fcoe_create_link_state link_state)
{
int rc = 0;
struct fcoe_ctlr_device *ctlr_dev;
@@ -2254,13 +2344,29 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
- /* add to lports list */
- fcoe_hostlist_add(lport);
-
/* start FIP Discovery and FLOGI */
lport->boot_time = jiffies;
fc_fabric_login(lport);
- if (!fcoe_link_ok(lport)) {
+
+ /*
+ * If the fcoe_ctlr_device is to be set to DISABLED
+ * it must be done after the lport is added to the
+ * hostlist, but before the rtnl_lock is released.
+ * This is because the rtnl_lock protects the
+ * hostlist that fcoe_device_notification uses. If
+ * the FCoE Controller is intended to be created
+ * DISABLED then 'enabled' needs to be considered
+ * handling link events. 'enabled' must be set
+ * before the lport can be found in the hostlist
+ * when a link up event is received.
+ */
+ if (link_state == FCOE_CREATE_LINK_UP)
+ ctlr_dev->enabled = FCOE_CTLR_ENABLED;
+ else
+ ctlr_dev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == FCOE_CREATE_LINK_UP &&
+ !fcoe_link_ok(lport)) {
rtnl_unlock();
fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
@@ -2275,37 +2381,34 @@ out_nortnl:
}
/**
- * fcoe_link_speed_update() - Update the supported and actual link speeds
- * @lport: The local port to update speeds for
+ * fcoe_create() - Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
+}
+
+/**
+ * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
*
- * Returns: 0 if the ethtool query was successful
- * -1 if the ethtool query failed
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
*/
-static int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_ctlr_alloc(struct net_device *netdev)
{
- struct net_device *netdev = fcoe_netdev(lport);
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |=
- FC_PORTSPEED_10GBIT;
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- return 0;
- }
- return -1;
+ return _fcoe_create(netdev, FIP_MODE_FABRIC,
+ FCOE_CREATE_LINK_DOWN);
}
/**
@@ -2375,10 +2478,13 @@ static int fcoe_reset(struct Scsi_Host *shost)
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
- if (!fcoe_link_ok(ctlr->lp))
+
+ if (cdev->enabled != FCOE_CTLR_DISABLED &&
+ !fcoe_link_ok(ctlr->lp))
fcoe_ctlr_link_up(ctlr);
return 0;
}
@@ -2445,12 +2551,31 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
return 0;
}
+/**
+ * fcoe_hostlist_del() - Remove the FCoE interface identified by a local
+ * port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ */
+static void fcoe_hostlist_del(const struct fc_lport *lport)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ list_del(&fcoe->list);
+ return;
+}
static struct fcoe_transport fcoe_sw_transport = {
.name = {FCOE_TRANSPORT_DEFAULT},
.attached = false,
.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
.match = fcoe_match,
+ .alloc = fcoe_ctlr_alloc,
.create = fcoe_create,
.destroy = fcoe_destroy,
.enable = fcoe_enable,
@@ -2534,9 +2659,9 @@ static void __exit fcoe_exit(void)
/* releases the associated fcoe hosts */
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
- list_del(&fcoe->list);
ctlr = fcoe_to_ctlr(fcoe);
port = lport_priv(ctlr->lp);
+ fcoe_hostlist_del(port->lport);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2776,43 +2901,6 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
NULL, NULL, 3 * lport->r_a_tov);
}
-/**
- * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void fcoe_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = fcoe_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = fcoe_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index b42dc32cb5eb..2b53672bf932 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -55,12 +55,12 @@ do { \
#define FCOE_DBG(fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_LOGGING, \
- printk(KERN_INFO "fcoe: " fmt, ##args);)
+ pr_info("fcoe: " fmt, ##args);)
#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
- printk(KERN_INFO "fcoe: %s: " fmt, \
- netdev->name, ##args);)
+ pr_info("fcoe: %s: " fmt, \
+ netdev->name, ##args);)
/**
* struct fcoe_interface - A FCoE interface
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 4a909d7cfde1..08c3bc398da2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1291,8 +1291,16 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id)
+ if (!fcf || !lport->port_id) {
+ /*
+ * We are yet to select best FCF, but we got CVL in the
+ * meantime. reset the ctlr and let it rediscover the FCF
+ */
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
return;
+ }
/*
* mask of required descriptors. Validating each one clears its bit.
@@ -1551,15 +1559,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
fcf->flogi_sent, fcf->pri);
- if (fcf->fabric_name != first->fabric_name ||
- fcf->vfid != first->vfid ||
- fcf->fc_map != first->fc_map) {
- LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
- "or FC-MAP\n");
- return NULL;
- }
- if (fcf->flogi_sent)
- continue;
if (!fcoe_ctlr_fcf_usable(fcf)) {
LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
"map %x %svalid %savailable\n",
@@ -1569,6 +1568,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
"" : "un");
continue;
}
+ if (fcf->fabric_name != first->fabric_name ||
+ fcf->vfid != first->vfid ||
+ fcf->fc_map != first->fc_map) {
+ LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+ "or FC-MAP\n");
+ return NULL;
+ }
+ if (fcf->flogi_sent)
+ continue;
if (!best || fcf->pri < best->pri || best->flogi_sent)
best = fcf;
}
@@ -2864,22 +2872,21 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
}
EXPORT_SYMBOL(fcoe_fcf_get_selected);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
mutex_lock(&ctlr->ctlr_mutex);
- switch (ctlr->mode) {
- case FIP_MODE_FABRIC:
- ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
- break;
- case FIP_MODE_VN2VN:
- ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+ switch (ctlr_dev->mode) {
+ case FIP_CONN_TYPE_VN2VN:
+ ctlr->mode = FIP_MODE_VN2VN;
break;
+ case FIP_CONN_TYPE_FABRIC:
default:
- ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+ ctlr->mode = FIP_MODE_FABRIC;
break;
}
+
mutex_unlock(&ctlr->ctlr_mutex);
}
-EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
+EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 5e751689a089..8c05ae017f5b 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -21,8 +21,17 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
+#include <linux/ctype.h>
#include <scsi/fcoe_sysfs.h>
+#include <scsi/libfcoe.h>
+
+/*
+ * OK to include local libfcoe.h for debug_logging, but cannot include
+ * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
+ * have to include more than fcoe_sysfs.h.
+ */
+#include "libfcoe.h"
static atomic_t ctlr_num;
static atomic_t fcf_num;
@@ -71,6 +80,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
((x)->lesb.lesb_err_block)
#define fcoe_ctlr_fcs_error(x) \
((x)->lesb.lesb_fcs_error)
+#define fcoe_ctlr_enabled(x) \
+ ((x)->enabled)
#define fcoe_fcf_state(x) \
((x)->state)
#define fcoe_fcf_fabric_name(x) \
@@ -210,25 +221,34 @@ static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
#define fcoe_enum_name_search(title, table_type, table) \
static const char *get_fcoe_##title##_name(enum table_type table_key) \
{ \
- int i; \
- char *name = NULL; \
- \
- for (i = 0; i < ARRAY_SIZE(table); i++) { \
- if (table[i].value == table_key) { \
- name = table[i].name; \
- break; \
- } \
- } \
- return name; \
+ if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \
+ return NULL; \
+ return table[table_key]; \
+}
+
+static char *fip_conn_type_names[] = {
+ [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
+ [ FIP_CONN_TYPE_FABRIC ] = "Fabric",
+ [ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+
+static enum fip_conn_type fcoe_parse_mode(const char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
+ if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
+ return i;
+ }
+
+ return FIP_CONN_TYPE_UNKNOWN;
}
-static struct {
- enum fcf_state value;
- char *name;
-} fcf_state_names[] = {
- { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
- { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
- { FCOE_FCF_STATE_CONNECTED, "Connected" },
+static char *fcf_state_names[] = {
+ [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
+ [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
+ [ FCOE_FCF_STATE_CONNECTED ] = "Connected",
};
fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
#define FCOE_FCF_STATE_MAX_NAMELEN 50
@@ -246,17 +266,7 @@ static ssize_t show_fcf_state(struct device *dev,
}
static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
-static struct {
- enum fip_conn_type value;
- char *name;
-} fip_conn_type_names[] = {
- { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
- { FIP_CONN_TYPE_FABRIC, "Fabric" },
- { FIP_CONN_TYPE_VN2VN, "VN2VN" },
-};
-fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
-#define FCOE_CTLR_MODE_MAX_NAMELEN 50
-
+#define FCOE_MAX_MODENAME_LEN 20
static ssize_t show_ctlr_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -264,17 +274,116 @@ static ssize_t show_ctlr_mode(struct device *dev,
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
const char *name;
- if (ctlr->f->get_fcoe_ctlr_mode)
- ctlr->f->get_fcoe_ctlr_mode(ctlr);
-
name = get_fcoe_ctlr_mode_name(ctlr->mode);
if (!name)
return -EINVAL;
- return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+ return snprintf(buf, FCOE_MAX_MODENAME_LEN,
+ "%s\n", name);
+}
+
+static ssize_t store_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ char mode[FCOE_MAX_MODENAME_LEN + 1];
+
+ if (count > FCOE_MAX_MODENAME_LEN)
+ return -EINVAL;
+
+ strncpy(mode, buf, count);
+
+ if (mode[count - 1] == '\n')
+ mode[count - 1] = '\0';
+ else
+ mode[count] = '\0';
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+ return -EBUSY;
+ case FCOE_CTLR_DISABLED:
+ if (!ctlr->f->set_fcoe_ctlr_mode) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Mode change not supported by LLD.");
+ return -ENOTSUPP;
+ }
+
+ ctlr->mode = fcoe_parse_mode(mode);
+ if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Unknown mode %s provided.", buf);
+ return -EINVAL;
+ }
+
+ ctlr->f->set_fcoe_ctlr_mode(ctlr);
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+
+ return count;
+ case FCOE_CTLR_UNUSED:
+ default:
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+ return -ENOTSUPP;
+ };
+}
+
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
+ show_ctlr_mode, store_ctlr_mode);
+
+static ssize_t store_ctlr_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ int rc;
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ if (*buf == '1')
+ return count;
+ ctlr->enabled = FCOE_CTLR_DISABLED;
+ break;
+ case FCOE_CTLR_DISABLED:
+ if (*buf == '0')
+ return count;
+ ctlr->enabled = FCOE_CTLR_ENABLED;
+ break;
+ case FCOE_CTLR_UNUSED:
+ return -ENOTSUPP;
+ };
+
+ rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static char *ctlr_enabled_state_names[] = {
+ [ FCOE_CTLR_ENABLED ] = "1",
+ [ FCOE_CTLR_DISABLED ] = "0",
+};
+fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
+ ctlr_enabled_state_names)
+#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_enabled_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
"%s\n", name);
}
-static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
- show_ctlr_mode, NULL);
+
+static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
+ show_ctlr_enabled_state,
+ store_ctlr_enabled);
static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
@@ -359,6 +468,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
};
@@ -443,9 +553,16 @@ struct device_type fcoe_fcf_device_type = {
.release = fcoe_fcf_device_release,
};
+struct bus_attribute fcoe_bus_attr_group[] = {
+ __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
+ __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
+ __ATTR_NULL
+};
+
struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
+ .bus_attrs = fcoe_bus_attr_group,
};
/**
@@ -566,6 +683,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
ctlr->id = atomic_inc_return(&ctlr_num) - 1;
ctlr->f = f;
+ ctlr->mode = FIP_CONN_TYPE_FABRIC;
INIT_LIST_HEAD(&ctlr->fcfs);
mutex_init(&ctlr->lock);
ctlr->dev.parent = parent;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index ac76d8a042d7..f3a5a53e8631 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -83,6 +83,50 @@ static struct notifier_block libfcoe_notifier = {
.notifier_call = libfcoe_device_notification,
};
+/**
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
+ *
+ * Returns: 0 if the ethtool query was successful
+ * -1 if the ethtool query failed
+ */
+int fcoe_link_speed_update(struct fc_lport *lport)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+ struct ethtool_cmd ecmd;
+
+ if (!__ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |=
+ FC_PORTSPEED_10GBIT;
+ switch (ethtool_cmd_speed(&ecmd)) {
+ case SPEED_1000:
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case SPEED_10000:
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ break;
+ }
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_speed_update);
+
+/**
+ * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport
+ * @lport: The local port to update speeds for
+ * @fc_lesb: Pointer to the LESB to be filled up
+ * @netdev: Pointer to the netdev that is associated with the lport
+ *
+ * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6
+ * Clause 7.11 in v1.04.
+ */
void __fcoe_get_lesb(struct fc_lport *lport,
struct fc_els_lesb *fc_lesb,
struct net_device *netdev)
@@ -112,6 +156,51 @@ void __fcoe_get_lesb(struct fc_lport *lport,
}
EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+void fcoe_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+
+ __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_get_lesb);
+
+/**
+ * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given
+ * fcoe controller device
+ * @ctlr_dev: The given fcoe controller device
+ *
+ */
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_get_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb);
+
void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
{
u8 wwpn[8];
@@ -627,6 +716,110 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
return NOTIFY_OK;
}
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_ctlr_device *ctlr_dev = NULL;
+ int rc = 0;
+ int err;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf);
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ err = ft->alloc ? ft->alloc(netdev) : -ENODEV;
+ if (err) {
+ fcoe_del_netdev_mapping(netdev);
+ rc = -ENOMEM;
+ goto out_putdev;
+ }
+
+ err = fcoe_add_netdev_mapping(netdev, ft);
+ if (err) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (ctlr_dev) ? "succeeded" : "failed",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc)
+ return rc;
+ return count;
+}
+
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy(netdev);
+ if (rc)
+ goto out_putdev;
+
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+ rc = count; /* required for successful return */
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
/**
* fcoe_transport_create() - Create a fcoe interface
@@ -769,11 +962,7 @@ out_putdev:
dev_put(netdev);
out_nodev:
mutex_unlock(&ft_mutex);
-
- if (rc == -ERESTARTSYS)
- return restart_syscall();
- else
- return rc;
+ return rc;
}
/**
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
index 6af5fc3a17d8..d3bb16d11401 100644
--- a/drivers/scsi/fcoe/libfcoe.h
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -2,9 +2,10 @@
#define _FCOE_LIBFCOE_H_
extern unsigned int libfcoe_debug_logging;
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */
#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
do { \
@@ -16,16 +17,19 @@ do { \
#define LIBFCOE_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
+ pr_info("libfcoe: " fmt, ##args);)
#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
+ pr_info("host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
- printk(KERN_INFO "%s: " fmt, \
- __func__, ##args);)
+ pr_info("%s: " fmt, __func__, ##args);)
+
+#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \
+ pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 37c3440bc17c..383598fadf04 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -7,6 +7,8 @@ fnic-y := \
fnic_res.o \
fnic_fcs.o \
fnic_scsi.o \
+ fnic_trace.o \
+ fnic_debugfs.o \
vnic_cq.o \
vnic_dev.o \
vnic_intr.o \
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 95a5ba29320d..98436c363035 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -26,6 +26,7 @@
#include <scsi/libfcoe.h>
#include "fnic_io.h"
#include "fnic_res.h"
+#include "fnic_trace.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
@@ -56,6 +57,34 @@
#define FNIC_NO_TAG -1
/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS 0
+#define FNIC_IO_INITIALIZED BIT(0)
+#define FNIC_IO_ISSUED BIT(1)
+#define FNIC_IO_DONE BIT(2)
+#define FNIC_IO_REQ_NULL BIT(3)
+#define FNIC_IO_ABTS_PENDING BIT(4)
+#define FNIC_IO_ABORTED BIT(5)
+#define FNIC_IO_ABTS_ISSUED BIT(6)
+#define FNIC_IO_TERM_ISSUED BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8)
+#define FNIC_IO_ABT_TERM_DONE BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11)
+#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */
+#define FNIC_DEV_RST_ISSUED BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED BIT(16)
+#define FNIC_DEV_RST_DONE BIT(17)
+#define FNIC_DEV_RST_REQ_NULL BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE BIT(19)
+#define FNIC_DEV_RST_TERM_DONE BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+
+/*
* Usage of the scsi_cmnd scratchpad.
* These fields are locked by the hashed io_req_lock.
*/
@@ -64,6 +93,7 @@
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
@@ -71,9 +101,28 @@
#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
#define FNIC_MAX_FCP_TARGET 256
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE (0)
+#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \
+ __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 1)
+
extern unsigned int fnic_log_level;
#define FNIC_MAIN_LOGGING 0x01
@@ -170,6 +219,9 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
+ atomic_t in_flight; /* io counter */
+ u32 _reserved; /* fill hole */
+ unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
@@ -267,4 +319,12 @@ const char *fnic_state_to_str(unsigned int state);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+ return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
new file mode 100644
index 000000000000..adc1f7f471f5
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int len;
+ len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fnic_tracing_enabled value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic_tracing_enabled = val;
+ (*ppos)++;
+
+ return cnt;
+}
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt;
+ fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+ if (!fnic_dbg_prt)
+ return -ENOMEM;
+
+ fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ (3*(trace_max_pages * PAGE_SIZE)));
+ fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+ file->private_data = fnic_dbg_prt;
+ return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+ loff_t offset,
+ int howto)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ loff_t pos = -1;
+
+ switch (howto) {
+ case 0:
+ pos = offset;
+ break;
+ case 1:
+ pos = file->f_pos + offset;
+ break;
+ case 2:
+ pos = fnic_dbg_prt->buffer_len - offset;
+ }
+ return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
+ -EINVAL : (file->f_pos = pos);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ fnic_dbg_prt->buffer,
+ fnic_dbg_prt->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+ vfree(fnic_dbg_prt->buffer);
+ kfree(fnic_dbg_prt);
+ return 0;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_ctrl_open,
+ .read = fnic_trace_ctrl_read,
+ .write = fnic_trace_ctrl_write,
+};
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_debugfs_open,
+ .llseek = fnic_trace_debugfs_lseek,
+ .read = fnic_trace_debugfs_read,
+ .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory. It will create file trace to log fnic trace buffer
+ * output into debugfs and it will also create file trace_enable to
+ * control enable/disable of trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+ fnic_trace_enable = debugfs_create_file("tracing_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL, &fnic_trace_ctrl_fops);
+
+ if (!fnic_trace_enable) {
+ printk(KERN_DEBUG "Cannot create trace_enable file"
+ " under debugfs");
+ return rc;
+ }
+
+ fnic_trace_debugfs_file = debugfs_create_file("trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL,
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_trace_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create trace file under debugfs");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+ if (fnic_trace_debugfs_file) {
+ debugfs_remove(fnic_trace_debugfs_file);
+ fnic_trace_debugfs_file = NULL;
+ }
+ if (fnic_trace_enable) {
+ debugfs_remove(fnic_trace_enable);
+ fnic_trace_enable = NULL;
+ }
+ if (fnic_trace_debugfs_root) {
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 3c53c3478ee7..483eb9dbe663 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -495,7 +495,8 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
- fnic->vlan_hw_insert, fnic->vlan_id, 1);
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1);
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
}
@@ -563,7 +564,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
}
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
- fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1, 1, 1);
fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index f0b896988cd5..c35b8f1889ea 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -21,7 +21,7 @@
#include <scsi/fc/fc_fcp.h>
#define FNIC_DFLT_SG_DESC_CNT 32
-#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
+#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
struct host_sg_desc {
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type {
};
enum fnic_ioreq_state {
- FNIC_IOREQ_CMD_PENDING = 0,
+ FNIC_IOREQ_NOT_INITED = 0,
+ FNIC_IOREQ_CMD_PENDING,
FNIC_IOREQ_ABTS_PENDING,
FNIC_IOREQ_ABTS_COMPLETE,
FNIC_IOREQ_CMD_COMPLETE,
@@ -60,6 +61,7 @@ struct fnic_io_req {
u8 sgl_type; /* device DMA descriptor list type */
u8 io_completed:1; /* set to 1 when fw completes IO */
u32 port_id; /* remote port DID */
+ unsigned long start_time; /* in jiffies */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
};
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fbf3ac6e0c55..d601ac543c52 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -68,6 +68,10 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+ "for fnic trace buffer");
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
@@ -624,6 +628,9 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
fnic->state = FNIC_IN_FC_MODE;
+ atomic_set(&fnic->in_flight, 0);
+ fnic->state_flags = FNIC_FLAGS_NONE;
+
/* Enable hardware stripping of vlan header on ingress */
fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
@@ -858,6 +865,14 @@ static int __init fnic_init_module(void)
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ /* Allocate memory for trace buffer */
+ err = fnic_trace_buf_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Trace buffer initialization Failed "
+ "Fnic Tracing utility is disabled\n");
+ fnic_trace_free();
+ }
+
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -928,6 +943,7 @@ err_create_fnic_ioreq_slab:
err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
+ fnic_trace_free();
return err;
}
@@ -939,6 +955,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
+ fnic_trace_free();
}
module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index c40ce52ed7c6..be99e7549d89 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = {
};
static const char *fnic_ioreq_state_str[] = {
+ [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
@@ -165,6 +166,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
}
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+ unsigned long clearbits)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+ int sh_locked = spin_is_locked(host->host_lock);
+ unsigned long flags = 0;
+
+ if (!sh_locked)
+ spin_lock_irqsave(host->host_lock, flags);
+
+ if (clearbits)
+ fnic->state_flags &= ~st_flags;
+ else
+ fnic->state_flags |= st_flags;
+
+ if (!sh_locked)
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return;
+}
+
+
/*
* fnic_fw_reset_handler
* Routine to send reset msg to fw
@@ -175,9 +203,16 @@ int fnic_fw_reset_handler(struct fnic *fnic)
int ret = 0;
unsigned long flags;
+ /* indicate fwreset to io path */
+ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+ schedule_timeout(msecs_to_jiffies(1));
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -193,9 +228,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret)
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
- else
+ else {
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
+ }
+
return ret;
}
@@ -312,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_queue_wq_copy_desc failure - no descriptors\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -351,16 +391,20 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
- struct fc_lport *lp;
+ struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
- struct fnic_io_req *io_req;
- struct fnic *fnic;
+ struct fnic_io_req *io_req = NULL;
+ struct fnic *fnic = lport_priv(lp);
struct vnic_wq_copy *wq;
int ret;
- int sg_count;
+ u64 cmd_trace;
+ int sg_count = 0;
unsigned long flags;
unsigned long ptr;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
@@ -369,20 +413,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
return 0;
}
- lp = shost_priv(sc->device->host);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
+ atomic_inc(&fnic->in_flight);
+
/*
* Release host lock, use driver resource specific locks from here.
* Don't re-enable interrupts in case they were disabled prior to the
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
+ CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
- fnic = lport_priv(lp);
-
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -393,6 +438,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* Map the data buffer */
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, sc->cmnd[0],
+ sg_count, CMD_STATE(sc));
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -427,8 +475,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* initialize rest of io_req */
io_req->port_id = rport->port_id;
+ io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
+ CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
sc->scsi_done = done;
/* create copy wq desc and enqueue it */
@@ -440,7 +490,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* refetch the pointer under the lock.
*/
spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
-
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, 0, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
@@ -450,8 +502,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
+ } else {
+ /* REVISIT: Use per IO lock in the final code */
+ CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
}
out:
+ cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+ (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+ sc->cmnd[5]);
+
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, io_req,
+ sg_count, cmd_trace,
+ (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ atomic_dec(&fnic->in_flight);
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
@@ -529,6 +594,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
fnic_flush_tx(fnic);
reset_cmpl_handler_end:
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
return ret;
}
@@ -622,6 +689,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
+ u64 *ox_id_tag = (u64 *)(void *)desc;
/* mark the ack state */
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
@@ -632,6 +700,9 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
fnic->fw_ack_recd[0] = 1;
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ FNIC_TRACE(fnic_fcpio_ack_handler,
+ fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ ox_id_tag[4], ox_id_tag[5]);
}
/*
@@ -651,27 +722,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ u64 cmd_trace;
+ unsigned long start_time;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= FNIC_MAX_IO_REQ)
+ if (id >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl sc is null - "
+ "hdr status = %s tag = 0x%x desc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, desc);
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ fnic->lport->host->host_no, id,
+ ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+ (u64)icmnd_cmpl->_resvd0[0]),
+ ((u64)hdr_status << 16 |
+ (u64)icmnd_cmpl->scsi_status << 8 |
+ (u64)icmnd_cmpl->flags), desc,
+ (u64)icmnd_cmpl->residual, 0);
return;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
/* firmware completed the io */
io_req->io_completed = 1;
@@ -682,6 +779,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl ABTS pending hdr status = %s "
+ "sc 0x%p scsi_status %x residual %d\n",
+ fnic_fcpio_status_to_str(hdr_status), sc,
+ icmnd_cmpl->scsi_status,
+ icmnd_cmpl->residual);
+ break;
+ case FCPIO_ABORTED:
+ CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ break;
+ default:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl abts pending "
+ "hdr status = %s tag = 0x%x sc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status),
+ id, sc);
+ break;
+ }
return;
}
@@ -765,6 +884,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags);
@@ -772,6 +892,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
+ cmd_trace = ((u64)hdr_status << 56) |
+ (u64)icmnd_cmpl->scsi_status << 48 |
+ (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+ (u64)icmnd_cmpl->_resvd0[0] << 48 |
+ jiffies_to_msecs(jiffies - start_time)),
+ desc, cmd_trace,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
@@ -784,7 +918,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
-
}
/* fnic_fcpio_itmf_cmpl_handler
@@ -801,28 +934,54 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
struct fnic_io_req *io_req;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
- if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+ if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+ fnic_fcpio_status_to_str(hdr_status), id);
return;
-
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
- if (id & FNIC_TAG_ABORT) {
+ if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+ /* Abort and terminate completion of device reset req */
+ /* REVISIT : Add asserts about various flags */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset abts cmpl recd. id %x status %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
@@ -832,6 +991,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -855,14 +1015,58 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) |
+ CMD_STATE(sc)));
sc->scsi_done(sc);
+ }
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Terminate pending "
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ /* Need to wait for terminate completion */
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd after time out. "
+ "id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -889,7 +1093,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
struct fcpio_fw_req *desc)
{
struct fnic *fnic = vnic_dev_priv(vdev);
- int ret = 0;
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
@@ -906,11 +1109,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
- ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+ fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
break;
case FCPIO_RESET_CMPL: /* fw completed reset */
- ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+ fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
break;
default:
@@ -920,7 +1123,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
}
- return ret;
+ return 0;
}
/*
@@ -950,6 +1153,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
if (i == exclude_id)
@@ -962,6 +1166,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup_scsi_cmd;
@@ -975,6 +1196,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -984,8 +1206,18 @@ cleanup_scsi_cmd:
" DID_TRANSPORT_DISRUPTED\n");
/* Complete the command to SCSI */
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, i, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
}
@@ -998,6 +1230,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1027,6 +1260,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -1035,8 +1269,17 @@ wq_copy_cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1044,8 +1287,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
unsigned long flags;
+ spin_lock_irqsave(host->host_lock, flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1053,6 +1306,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_queue_abort_io_req: failure: no descriptors\n");
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
@@ -1060,12 +1316,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
fnic->config.ra_tov, fnic->config.ed_tov);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+
return 0;
}
-void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1075,13 +1334,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
- "fnic_rport_reset_exch called portid 0x%06x\n",
+ "fnic_rport_exch_reset called portid 0x%06x\n",
port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1096,6 +1356,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1104,9 +1373,29 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "rport_exch_reset "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst sc 0x%p\n",
+ sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1118,7 +1407,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1127,12 +1416,17 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1141,6 +1435,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1154,14 +1449,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
- " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
- rport->port_name, rport->node_name,
+ " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+ rport->port_name, rport->node_name, rport,
rport->port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1180,6 +1476,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1188,9 +1492,27 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_terminate_rport_io: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_terminate_rport_io "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1203,7 +1525,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1212,12 +1534,17 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1232,13 +1559,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
int ret = SUCCESS;
- u32 task_req;
+ u32 task_req = 0;
struct scsi_lun fc_lun;
+ int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
@@ -1249,9 +1578,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic = lport_priv(lp);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
- rport->port_id, sc->device->lun, sc->request->tag);
+ tag = sc->request->tag;
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+ rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
@@ -1318,6 +1651,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
ret = FAILED;
goto fnic_abort_cmd_end;
}
+ if (task_req == FCPIO_ITMF_ABT_TASK)
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
/*
* We queued an abort IO, wait for its completion.
@@ -1336,6 +1673,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1344,6 +1682,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* fw did not complete abort, timed out */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1359,12 +1698,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
fnic_abort_cmd_end:
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Returning from abort cmd %s\n",
+ "Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
@@ -1375,16 +1723,28 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
+ spin_lock_irqsave(host->host_lock, intr_flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+ return FAILED;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "queue_dr_io_req failure - no descriptors\n");
ret = -EAGAIN;
goto lr_io_req_end;
}
@@ -1399,6 +1759,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ atomic_dec(&fnic->in_flight);
return ret;
}
@@ -1412,7 +1773,7 @@ lr_io_req_end:
static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_cmnd *lr_sc)
{
- int tag;
+ int tag, abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1421,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_lun fc_lun;
struct scsi_device *lun_dev = lr_sc->device;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ enum fnic_ioreq_state old_ioreq_state;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
@@ -1449,7 +1811,41 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(CMD_STATE(sc)));
- BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s dev rst not pending sc 0x%p\n", __func__,
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set
+ * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and
+ * handled in this function.
+ */
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ BUG_ON(io_req->abts_done);
+
+ abt_tag = tag;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: dev rst sc 0x%p\n", __func__, sc);
+ }
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
@@ -1458,17 +1854,25 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
@@ -1479,8 +1883,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- ret = 1;
- goto clean_pending_aborts_end;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ continue;
}
io_req->abts_done = NULL;
@@ -1488,6 +1892,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* if abort is still pending with fw, fail */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1;
goto clean_pending_aborts_end;
}
@@ -1498,10 +1903,75 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
}
+ schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+ ret = FAILED;
+
clean_pending_aborts_end:
return ret;
}
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag, ret = SCSI_NO_TAG;
+
+ BUG_ON(!bqt);
+ if (!bqt) {
+ pr_err("Tags are not supported\n");
+ goto end;
+ }
+
+ do {
+ tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+ if (tag >= bqt->max_depth) {
+ pr_err("Tag allocation failure\n");
+ goto end;
+ }
+ } while (test_and_set_bit(tag, bqt->tag_map));
+
+ bqt->tag_index[tag] = sc->request;
+ sc->request->tag = tag;
+ sc->tag = tag;
+ if (!sc->request->special)
+ sc->request->special = sc;
+
+ ret = tag;
+
+end:
+ return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag = sc->request->tag;
+
+ if (tag == SCSI_NO_TAG)
+ return;
+
+ BUG_ON(!bqt || !bqt->tag_index[tag]);
+ if (!bqt)
+ return;
+
+ bqt->tag_index[tag] = NULL;
+ clear_bit(tag, bqt->tag_map);
+
+ return;
+}
+
/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
@@ -1511,13 +1981,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
int ret = FAILED;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
+ struct scsi_lun fc_lun;
+ int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
@@ -1529,8 +2003,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Device reset called FCID 0x%x, LUN 0x%x\n",
- rport->port_id, sc->device->lun);
+ "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+ rport->port_id, sc->device->lun, sc);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
@@ -1539,6 +2013,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (fc_remote_port_chkready(rport))
goto fnic_device_reset_end;
+ CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ /* Allocate tag if not present */
+
+ tag = sc->request->tag;
+ if (unlikely(tag < 0)) {
+ tag = fnic_scsi_host_start_tag(fnic, sc);
+ if (unlikely(tag == SCSI_NO_TAG))
+ goto fnic_device_reset_end;
+ tag_gen_flag = 1;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1562,8 +2046,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
- sc->request->tag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
@@ -1576,6 +2059,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
/*
* Wait on the local completion for LUN reset. The io_req may be
@@ -1588,12 +2074,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
status = CMD_LR_STATUS(sc);
- spin_unlock_irqrestore(io_lock, flags);
/*
* If lun reset not completed, bail out with failed. io_req
@@ -1602,7 +2089,53 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (status == FCPIO_INVALID_CODE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- goto fnic_device_reset_end;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ spin_unlock_irqrestore(io_lock, flags);
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ /*
+ * Issue abort and terminate on the device reset request.
+ * If q'ing of the abort fails, retry issue it after a delay.
+ */
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ spin_unlock_irqrestore(io_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+ if (fnic_queue_abort_io_req(fnic,
+ tag | FNIC_TAG_DEV_RST,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Abort and terminate issued on Device reset "
+ "tag 0x%x sc 0x%p\n", tag, sc);
+ break;
+ }
+ }
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ spin_unlock_irqrestore(io_lock, flags);
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ break;
+ } else {
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req->abts_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+ }
+ } else {
+ spin_unlock_irqrestore(io_lock, flags);
}
/* Completed, but not successful, clean up the io_req, return fail */
@@ -1645,11 +2178,24 @@ fnic_device_reset_clean:
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
fnic_device_reset_end:
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ /* free tag if it is allocated */
+ if (unlikely(tag_gen_flag))
+ fnic_scsi_host_end_tag(fnic, sc);
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
@@ -1735,7 +2281,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
DECLARE_COMPLETION_ONSTACK(remove_wait);
/* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
+
fnic->remove_wait = &remove_wait;
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
@@ -1776,7 +2330,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
struct fnic *fnic = lport_priv(lp);
/* issue fw reset */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
@@ -1822,3 +2383,61 @@ call_fc_exch_mgr_reset:
fc_exch_mgr_reset(lp, sid, did);
}
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct scsi_device *lun_dev = NULL;
+
+ if (lr_sc)
+ lun_dev = lr_sc->device;
+
+ /* walk again to check, if IOs are still pending in fw */
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ continue;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
new file mode 100644
index 000000000000..23a60e3d8527
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+ unsigned long fnic_buf_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+
+ /*
+ * Get next available memory location for writing trace information
+ * at @wr_idx and increment @wr_idx
+ */
+ fnic_buf_head =
+ fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+ fnic_trace_entries.wr_idx++;
+
+ /*
+ * Verify if trace buffer is full then change wd_idx to
+ * start from zero
+ */
+ if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.wr_idx = 0;
+
+ /*
+ * Verify if write index @wr_idx and read index @rd_idx are same then
+ * increment @rd_idx to move to next entry in trace buffer
+ */
+ if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+ fnic_trace_entries.rd_idx++;
+ if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.rd_idx = 0;
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+ int rd_idx;
+ int wr_idx;
+ int len = 0;
+ unsigned long flags;
+ char str[KSYM_SYMBOL_LEN];
+ struct timespec val;
+ fnic_trace_data_t *tbp;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+ rd_idx = fnic_trace_entries.rd_idx;
+ wr_idx = fnic_trace_entries.wr_idx;
+ if (wr_idx < rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * If rd_idx is reached to maximum trace entries
+ * then move rd_idx to zero
+ */
+ if (rd_idx > (fnic_max_trace_entries-1))
+ rd_idx = 0;
+ /*
+ * Continure dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ } else if (wr_idx > rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * Continue dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return len;
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+ unsigned long fnic_buf_head;
+ int i;
+ int err = 0;
+
+ trace_max_pages = fnic_trace_max_pages;
+ fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+ FNIC_ENTRY_SIZE_BYTES;
+
+ fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+ if (!fnic_trace_buf_p) {
+ printk(KERN_ERR PFX "Failed to allocate memory "
+ "for fnic_trace_buf_p\n");
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+ fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+ sizeof(unsigned long));
+ if (!fnic_trace_entries.page_offset) {
+ printk(KERN_ERR PFX "Failed to allocate memory for"
+ " page_offset\n");
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_entries.page_offset, 0,
+ (fnic_max_trace_entries * sizeof(unsigned long)));
+ fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+ fnic_buf_head = fnic_trace_buf_p;
+
+ /*
+ * Set page_offset field of fnic_trace_entries struct by
+ * calculating memory location for every trace entry using
+ * length of each trace entry
+ */
+ for (i = 0; i < fnic_max_trace_entries; i++) {
+ fnic_trace_entries.page_offset[i] = fnic_buf_head;
+ fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+ }
+ err = fnic_trace_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+ goto err_fnic_trace_debugfs_init;
+ }
+ printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+ return err;
+err_fnic_trace_debugfs_init:
+ fnic_trace_free();
+err_fnic_trace_buf_init:
+ return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+ fnic_tracing_enabled = 0;
+ fnic_trace_debugfs_terminate();
+ if (fnic_trace_entries.page_offset) {
+ vfree((void *)fnic_trace_entries.page_offset);
+ fnic_trace_entries.page_offset = NULL;
+ }
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
new file mode 100644
index 000000000000..cef42b4c4d6c
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+ size_t count,
+ loff_t *ppos,
+ const void *from,
+ size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+typedef struct fnic_trace_dbg {
+ int wr_idx;
+ int rd_idx;
+ unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+ int buffer_len;
+ char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ };
+ u64 val;
+ } timestamp, fnaddr;
+ u32 host_no;
+ u32 tag;
+ u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+#define FNIC_TRACE_ENTRY_SIZE \
+ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \
+ if (unlikely(fnic_tracing_enabled)) { \
+ fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+ if (trace_buf) { \
+ if (sizeof(unsigned long) < 8) { \
+ trace_buf->timestamp.low = jiffies; \
+ trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+ } else { \
+ trace_buf->timestamp.val = jiffies; \
+ trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+ } \
+ trace_buf->host_no = _hn; \
+ trace_buf->tag = _t; \
+ trace_buf->data[0] = (u64)(unsigned long)_a; \
+ trace_buf->data[1] = (u64)(unsigned long)_b; \
+ trace_buf->data[2] = (u64)(unsigned long)_c; \
+ trace_buf->data[3] = (u64)(unsigned long)_d; \
+ trace_buf->data[4] = (u64)(unsigned long)_e; \
+ } \
+ }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+#endif
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 599790e41a98..59bceac51a4c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -1107,14 +1107,8 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
pci_read_config_word(pdev, PCI_COMMAND, &command);
command |= 6;
pci_write_config_word(pdev, PCI_COMMAND, command);
- if (pci_resource_start(pdev, 8) == 1UL)
- pci_resource_start(pdev, 8) = 0UL;
- i = 0xFEFF0001UL;
- pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i);
- gdth_delay(1);
- pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
- pci_resource_start(pdev, 8));
-
+ gdth_delay(1);
+
dp6m_ptr = ha->brd;
/* Ensure that it is safe to access the non HW portions of DPMEM.
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f338061b5c3..7f4f790a3d71 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -165,7 +165,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
@@ -1131,7 +1131,7 @@ clean:
return -ENOMEM;
}
-static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg, *chain_block;
@@ -1144,8 +1144,15 @@ static void hpsa_map_sg_chain_block(struct ctlr_info *h,
(c->Header.SGTotal - h->max_cmd_sg_entries);
temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&h->pdev->dev, temp64)) {
+ /* prevent subsequent unmapping */
+ chain_sg->Addr.lower = 0;
+ chain_sg->Addr.upper = 0;
+ return -1;
+ }
chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+ return 0;
}
static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
@@ -1390,7 +1397,7 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
}
}
-static void hpsa_map_one(struct pci_dev *pdev,
+static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
@@ -1401,10 +1408,16 @@ static void hpsa_map_one(struct pci_dev *pdev,
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = 0;
- return;
+ return 0;
}
addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+ if (dma_mapping_error(&pdev->dev, addr64)) {
+ /* Prevent subsequent unmap of something never mapped */
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = 0;
+ return -1;
+ }
cp->SG[0].Addr.lower =
(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
@@ -1412,6 +1425,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
cp->SG[0].Len = buflen;
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+ return 0;
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1540,13 +1554,18 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -1564,7 +1583,9 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
return -ENOMEM;
}
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no data buffer to map. */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
+ NULL, 0, 0, scsi3addr, TYPE_MSG);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -1631,8 +1652,11 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
- fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
- buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
if (extended_response)
c->Request.CDB[1] = extended_response;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
@@ -1642,6 +1666,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -2105,7 +2130,10 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
cp->Header.SGTotal = (u16) (use_sg + 1);
- hpsa_map_sg_chain_block(h, cp);
+ if (hpsa_map_sg_chain_block(h, cp)) {
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
return 0;
}
@@ -2353,8 +2381,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
waittime = waittime * 2;
- /* Send the Test Unit Ready */
- fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+ /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+ (void) fill_cmd(c, TEST_UNIT_READY, h,
+ NULL, 0, 0, lunaddr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -2439,7 +2468,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no buffer to map */
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+ 0, 0, scsi3addr, TYPE_MSG);
if (swizzle)
swizzle_abort_tag(&c->Request.CDB[4]);
hpsa_scsi_do_simple_cmd_core(h, c);
@@ -2928,6 +2959,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
struct CommandList *c;
char *buff = NULL;
union u64bit temp64;
+ int rc = 0;
if (!argp)
return -EINVAL;
@@ -2947,8 +2979,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out_kfree;
}
} else {
memset(buff, 0, iocommand.buf_size);
@@ -2956,8 +2988,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c = cmd_special_alloc(h);
if (c == NULL) {
- kfree(buff);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_kfree;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
@@ -2982,6 +3014,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[0].Addr.lower = 0;
+ c->SG[0].Addr.upper = 0;
+ c->SG[0].Len = 0;
+ rc = -ENOMEM;
+ goto out;
+ }
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
@@ -2996,22 +3035,22 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
memcpy(&iocommand.error_info, c->err_info,
sizeof(iocommand.error_info));
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (iocommand.Request.Type.Direction == XFER_READ &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
}
- kfree(buff);
+out:
cmd_special_free(h, c);
- return 0;
+out_kfree:
+ kfree(buff);
+ return rc;
}
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
@@ -3103,6 +3142,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
for (i = 0; i < sg_used; i++) {
temp64.val = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[i].Addr.lower = 0;
+ c->SG[i].Addr.upper = 0;
+ c->SG[i].Len = 0;
+ hpsa_pci_unmap(h->pdev, c, i,
+ PCI_DMA_BIDIRECTIONAL);
+ status = -ENOMEM;
+ goto cleanup1;
+ }
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
@@ -3190,7 +3238,8 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
c = cmd_alloc(h);
if (!c)
return -ENOMEM;
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ /* fill_cmd can't fail here, no data buffer to map */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
c->waiting = NULL;
@@ -3202,7 +3251,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
return 0;
}
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
@@ -3271,7 +3320,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
- return;
+ return -1;
}
} else if (cmd_type == TYPE_MSG) {
switch (cmd) {
@@ -3343,10 +3392,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
pci_dir = PCI_DMA_BIDIRECTIONAL;
}
-
- hpsa_map_one(h->pdev, c, buff, size, pci_dir);
-
- return;
+ if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ return -1;
+ return 0;
}
/*
@@ -4882,10 +4930,13 @@ static void hpsa_flush_cache(struct ctlr_info *h)
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
goto out_of_memory;
}
- fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
- RAID_CTLR_LUNID, TYPE_CMD);
+ if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD)) {
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
if (c->err_info->CommandStatus != 0)
+out:
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_special_free(h, c);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 1d7da3f41ebb..f328089a1060 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -98,6 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
+static unsigned int ipr_number_of_msix = 2;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@@ -107,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x0022C,
.clr_interrupt_mask_reg = 0x00230,
@@ -131,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x00288,
.clr_interrupt_mask_reg = 0x0028C,
@@ -155,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 1000,
.cache_line_size = 0x20,
.clear_isr = 0,
+ .iopoll_weight = 64,
{
.set_interrupt_mask_reg = 0x00010,
.clr_interrupt_mask_reg = 0x00018,
@@ -215,6 +219,8 @@ MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to e
module_param_named(max_devs, ipr_max_devs, int, 0);
MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
+module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -549,7 +555,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
struct ipr_trace_entry *trace_entry;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
+ trace_entry = &ioa_cfg->trace[atomic_add_return
+ (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
@@ -560,6 +567,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
+ wmb();
}
#else
#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
@@ -595,8 +603,11 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
+ int hrrq_id;
+ hrrq_id = ioarcb->cmd_pkt.hrrq_id;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+ ioarcb->cmd_pkt.hrrq_id = hrrq_id;
ioarcb->data_transfer_length = 0;
ioarcb->read_data_transfer_length = 0;
ioarcb->ioadl_len = 0;
@@ -646,12 +657,16 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
* pointer to ipr command struct
**/
static
-struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
{
- struct ipr_cmnd *ipr_cmd;
+ struct ipr_cmnd *ipr_cmd = NULL;
+
+ if (likely(!list_empty(&hrrq->hrrq_free_q))) {
+ ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
+ struct ipr_cmnd, queue);
+ list_del(&ipr_cmd->queue);
+ }
- ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
- list_del(&ipr_cmd->queue);
return ipr_cmd;
}
@@ -666,7 +681,8 @@ struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
- struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ struct ipr_cmnd *ipr_cmd =
+ __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
return ipr_cmd;
}
@@ -686,9 +702,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
u32 clr_ints)
{
volatile u32 int_reg;
+ int i;
/* Stop new interrupts */
- ioa_cfg->allow_interrupts = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
/* Set interrupt mask to stop all new interrupts */
if (ioa_cfg->sis64)
@@ -761,13 +783,12 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
**/
static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
}
@@ -783,14 +804,13 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
scsi_cmd->result |= (DID_ERROR << 16);
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -805,24 +825,32 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd, *temp;
+ struct ipr_hrr_queue *hrrq;
ENTER;
- list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
- list_del(&ipr_cmd->queue);
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry_safe(ipr_cmd,
+ temp, &hrrq->hrrq_pending_q, queue) {
+ list_del(&ipr_cmd->queue);
- ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
- ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+ ipr_cmd->s.ioasa.hdr.ioasc =
+ cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+ ipr_cmd->s.ioasa.hdr.ilid =
+ cpu_to_be32(IPR_DRIVER_ILID);
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- else if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ else if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
- del_timer(&ipr_cmd->timer);
- ipr_cmd->done(ipr_cmd);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
+ IPR_IOASC_IOA_WAS_RESET);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->done(ipr_cmd);
+ }
+ spin_unlock(&hrrq->_lock);
}
-
LEAVE;
}
@@ -872,9 +900,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
void (*done) (struct ipr_cmnd *),
void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = done;
@@ -975,6 +1001,14 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
spin_lock_irq(ioa_cfg->host->host_lock);
}
+static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+{
+ if (ioa_cfg->hrrq_num == 1)
+ return 0;
+ else
+ return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+}
+
/**
* ipr_send_hcam - Send an HCAM to the adapter.
* @ioa_cfg: ioa config struct
@@ -994,9 +1028,9 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
- if (ioa_cfg->allow_cmds) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
ipr_cmd->u.hostrcb = hostrcb;
@@ -1166,14 +1200,15 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
}
/**
- * ipr_format_res_path - Format the resource path for printing.
+ * __ipr_format_res_path - Format the resource path for printing.
* @res_path: resource path
* @buf: buffer
+ * @len: length of buffer provided
*
* Return value:
* pointer to buffer
**/
-static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
+static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
{
int i;
char *p = buffer;
@@ -1187,6 +1222,27 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
}
/**
+ * ipr_format_res_path - Format the resource path for printing.
+ * @ioa_cfg: ioa config struct
+ * @res_path: resource path
+ * @buf: buffer
+ * @len: length of buffer provided
+ *
+ * Return value:
+ * pointer to buffer
+ **/
+static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
+ u8 *res_path, char *buffer, int len)
+{
+ char *p = buffer;
+
+ *p = '\0';
+ p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ __ipr_format_res_path(res_path, p, len - (buffer - p));
+ return buffer;
+}
+
+/**
* ipr_update_res_entry - Update the resource entry.
* @res: resource entry struct
* @cfgtew: config table entry wrapper struct
@@ -1226,8 +1282,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
if (res->sdev && new_path)
sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(res->ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
} else {
res->flags = cfgtew->u.cfgte->flags;
if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1363,7 +1419,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
if (ioasc != IPR_IOASC_IOA_WAS_RESET)
@@ -1613,8 +1669,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err_separator;
ipr_err("Device %d : %s", i + 1,
- ipr_format_res_path(dev_entry->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(dev_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_log_ext_vpd(&dev_entry->vpd);
ipr_err("-----New Device Information-----\n");
@@ -1960,14 +2016,16 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
path_active_desc[i].desc, path_state_desc[j].desc,
- ipr_format_res_path(fabric->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ fabric->res_path,
+ buffer, sizeof(buffer)));
return;
}
}
ipr_err("Path state=%02X Resource Path=%s\n", path_state,
- ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
+ buffer, sizeof(buffer)));
}
static const struct {
@@ -2108,18 +2166,20 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
- ipr_format_res_path(cfg->res_path, buffer,
- sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]),
+ be32_to_cpu(cfg->wwid[1]));
return;
}
}
ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
"WWN=%08X%08X\n", cfg->type_status,
- ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
@@ -2182,7 +2242,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("RAID %s Array Configuration: %s\n",
error->protection_level,
- ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, error->last_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
@@ -2203,11 +2264,12 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("Array Member %d:\n", i);
ipr_log_ext_vpd(&array_entry->vpd);
ipr_err("Current Location: %s\n",
- ipr_format_res_path(array_entry->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, array_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_err("Expected Location: %s\n",
- ipr_format_res_path(array_entry->expected_res_path,
- buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ array_entry->expected_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
}
@@ -2409,7 +2471,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (!ioasc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -2491,36 +2553,6 @@ static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_reset_reload - Reset/Reload the IOA
- * @ioa_cfg: ioa config struct
- * @shutdown_type: shutdown type
- *
- * This function resets the adapter and re-initializes it.
- * This function assumes that all new host commands have been stopped.
- * Return value:
- * SUCCESS / FAILED
- **/
-static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
- enum ipr_shutdown_type shutdown_type)
-{
- if (!ioa_cfg->in_reset_reload)
- ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irq(ioa_cfg->host->host_lock);
-
- /* If we got hit with a host reset while we were already resetting
- the adapter for some reason, and the reset failed. */
- if (ioa_cfg->ioa_is_dead) {
- ipr_trace;
- return FAILED;
- }
-
- return SUCCESS;
-}
-
-/**
* ipr_find_ses_entry - Find matching SES in SES table
* @res: resource entry struct of SES
*
@@ -3153,7 +3185,8 @@ static void ipr_worker_thread(struct work_struct *work)
restart:
do {
did_work = 0;
- if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ !ioa_cfg->allow_ml_add_del) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
@@ -3401,7 +3434,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
int len;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
len = snprintf(buf, PAGE_SIZE, "offline\n");
else
len = snprintf(buf, PAGE_SIZE, "online\n");
@@ -3427,14 +3460,20 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags;
- int result = count;
+ int result = count, i;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
- ioa_cfg->ioa_is_dead = 0;
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
+ !strncmp(buf, "online", 6)) {
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ioa_cfg->reset_retries = 0;
ioa_cfg->in_ioa_bringdown = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -3494,6 +3533,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
.store = ipr_store_reset_adapter
};
+static int ipr_iopoll(struct blk_iopoll *iop, int budget);
+ /**
+ * ipr_show_iopoll_weight - Show ipr polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_iopoll_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return len;
+}
+
+/**
+ * ipr_store_iopoll_weight - Change the adapter's polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_iopoll_weight(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long user_iopoll_weight;
+ unsigned long lock_flags = 0;
+ int i;
+
+ if (!ioa_cfg->sis64) {
+ dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
+ return -EINVAL;
+ }
+ if (kstrtoul(buf, 10, &user_iopoll_weight))
+ return -EINVAL;
+
+ if (user_iopoll_weight > 256) {
+ dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
+ return -EINVAL;
+ }
+
+ if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
+ dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
+ return strlen(buf);
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ ioa_cfg->iopoll_weight = user_iopoll_weight;
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return strlen(buf);
+}
+
+static struct device_attribute ipr_iopoll_weight_attr = {
+ .attr = {
+ .name = "iopoll_weight",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_iopoll_weight,
+ .store = ipr_store_iopoll_weight
+};
+
/**
* ipr_alloc_ucode_buffer - Allocates a microcode download buffer
* @buf_len: buffer length
@@ -3862,6 +3990,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_ioa_reset_attr,
&ipr_update_fw_attr,
&ipr_ioa_fw_type_attr,
+ &ipr_iopoll_weight_attr,
NULL,
};
@@ -4014,7 +4143,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->dump = dump;
ioa_cfg->sdt_state = WAIT_FOR_DUMP;
- if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
ioa_cfg->dump_taken = 1;
schedule_work(&ioa_cfg->work_q);
}
@@ -4227,8 +4356,8 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res && ioa_cfg->sis64)
len = snprintf(buf, PAGE_SIZE, "%s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(res->res_path, buffer,
+ sizeof(buffer)));
else if (res)
len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
res->bus, res->target, res->lun);
@@ -4556,8 +4685,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
return 0;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4638,22 +4767,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
-/**
- * ipr_eh_host_reset - Reset the host adapter
- * @scsi_cmd: scsi command struct
- *
- * Return value:
- * SUCCESS / FAILED
- **/
-static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ unsigned long lock_flags = 0;
+ int rc = SUCCESS;
ENTER;
- ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->in_reset_reload) {
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
dev_err(&ioa_cfg->pdev->dev,
"Adapter being reset as a result of error recovery.\n");
@@ -4661,20 +4786,19 @@ static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
ioa_cfg->sdt_state = GET_DUMP;
}
- rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
-
- LEAVE;
- return rc;
-}
-
-static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
-{
- int rc;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __ipr_eh_host_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
+ /* If we got hit with a host reset while we were already resetting
+ the adapter for some reason, and the reset failed. */
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+ ipr_trace;
+ rc = FAILED;
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
return rc;
}
@@ -4723,7 +4847,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
@@ -4793,6 +4917,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
struct ipr_resource_entry *res;
struct ata_port *ap;
int rc = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -4808,22 +4933,26 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
*/
if (ioa_cfg->in_reset_reload)
return FAILED;
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
- if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
- ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
- ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->qc &&
+ !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+ ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ }
}
}
+ spin_unlock(&hrrq->_lock);
}
-
res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
@@ -4833,11 +4962,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- rc = -EIO;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd,
+ &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle ==
+ res->res_handle) {
+ rc = -EIO;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
} else
rc = ipr_device_reset(ioa_cfg, res);
@@ -4890,7 +5025,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
else
ipr_cmd->sibling->done(ipr_cmd->sibling);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
LEAVE;
}
@@ -4951,6 +5086,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc, int_reg;
int op_found = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
@@ -4960,7 +5096,8 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
* This will force the mid-layer to call ipr_eh_host_reset,
* which will then go to sleep and wait for the reset to complete
*/
- if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->in_reset_reload ||
+ ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
if (!res)
return FAILED;
@@ -4975,12 +5112,16 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
if (!ipr_is_gscsi(res))
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->scsi_cmd == scsi_cmd) {
- ipr_cmd->done = ipr_scsi_eh_done;
- op_found = 1;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->scsi_cmd == scsi_cmd) {
+ ipr_cmd->done = ipr_scsi_eh_done;
+ op_found = 1;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
if (!op_found)
@@ -5007,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
ipr_trace;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
if (!ipr_is_naca_model(res))
res->needs_sync_complete = 1;
@@ -5099,6 +5240,9 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
} else {
if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ else if (int_reg & IPR_PCII_NO_HOST_RRQ)
+ dev_err(&ioa_cfg->pdev->dev,
+ "No Host RRQ. 0x%08X\n", int_reg);
else
dev_err(&ioa_cfg->pdev->dev,
"Permanent IOA failure. 0x%08X\n", int_reg);
@@ -5121,10 +5265,10 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* none
**/
-static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
{
ioa_cfg->errors_logged++;
- dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+ dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg->sdt_state = GET_DUMP;
@@ -5132,6 +5276,83 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
}
+static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
+ struct list_head *doneq)
+{
+ u32 ioasc;
+ u16 cmd_index;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
+ int num_hrrq = 0;
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrr_queue->allow_interrupts)
+ return 0;
+
+ while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrr_queue->toggle_bit) {
+
+ cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
+ IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
+ IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
+
+ if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
+ cmd_index < hrr_queue->min_cmd_id)) {
+ ipr_isr_eh(ioa_cfg,
+ "Invalid response handle from IOA: ",
+ cmd_index);
+ break;
+ }
+
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+
+ list_move_tail(&ipr_cmd->queue, doneq);
+
+ if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
+ hrr_queue->hrrq_curr++;
+ } else {
+ hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
+ hrr_queue->toggle_bit ^= 1u;
+ }
+ num_hrrq++;
+ if (budget > 0 && num_hrrq >= budget)
+ break;
+ }
+
+ return num_hrrq;
+}
+
+static int ipr_iopoll(struct blk_iopoll *iop, int budget)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_hrr_queue *hrrq;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ unsigned long hrrq_flags;
+ int completed_ops;
+ LIST_HEAD(doneq);
+
+ hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
+ ioa_cfg = hrrq->ioa_cfg;
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
+
+ if (completed_ops < budget)
+ blk_iopoll_complete(iop);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+
+ return completed_ops;
+}
+
/**
* ipr_isr - Interrupt service routine
* @irq: irq number
@@ -5142,78 +5363,48 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
**/
static irqreturn_t ipr_isr(int irq, void *devp)
{
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
- unsigned long lock_flags = 0;
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
u32 int_reg = 0;
- u32 ioasc;
- u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
LIST_HEAD(doneq);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/* If interrupts are disabled, ignore the interrupt */
- if (!ioa_cfg->allow_interrupts) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return IRQ_NONE;
}
while (1) {
- ipr_cmd = NULL;
-
- while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
- ioa_cfg->toggle_bit) {
-
- cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
- IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
-
- if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
- ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
- rc = IRQ_HANDLED;
- goto unlock_out;
- }
-
- ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
-
- ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
-
- list_move_tail(&ipr_cmd->queue, &doneq);
-
- rc = IRQ_HANDLED;
-
- if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
- ioa_cfg->hrrq_curr++;
- } else {
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit ^= 1u;
- }
- }
+ if (ipr_process_hrrq(hrrq, -1, &doneq)) {
+ rc = IRQ_HANDLED;
- if (ipr_cmd && !ioa_cfg->clear_isr)
- break;
+ if (!ioa_cfg->clear_isr)
+ break;
- if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
num_hrrq = 0;
do {
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ writel(IPR_PCII_HRRQ_UPDATED,
+ ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
- num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+ num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ ipr_isr_eh(ioa_cfg,
+ "Error clearing HRRQ: ", num_hrrq);
rc = IRQ_HANDLED;
- goto unlock_out;
+ break;
} else
break;
}
@@ -5221,14 +5412,64 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
-unlock_out:
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
+ return rc;
+}
+
+/**
+ * ipr_isr_mhrrq - Interrupt service routine
+ * @irq: irq number
+ * @devp: pointer to ioa config struct
+ *
+ * Return value:
+ * IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
+{
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_NONE;
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit) {
+ if (!blk_iopoll_sched_prep(&hrrq->iopoll))
+ blk_iopoll_sched(&hrrq->iopoll);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_HANDLED;
+ }
+ } else {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit)
+
+ if (ipr_process_hrrq(hrrq, -1, &doneq))
+ rc = IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
return rc;
}
@@ -5388,7 +5629,6 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
@@ -5406,7 +5646,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5790,7 +6030,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5809,21 +6049,21 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- unsigned long lock_flags;
+ unsigned long hrrq_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(scsi_cmd);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
} else {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
}
}
@@ -5846,22 +6086,34 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
- unsigned long lock_flags;
+ unsigned long hrrq_flags, lock_flags;
int rc;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
- spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
+ if (ipr_is_gata(res) && res->sata_port) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+ }
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -5869,19 +6121,17 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
- if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
goto err_nodev;
}
- if (ipr_is_gata(res) && res->sata_port) {
- rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
- return rc;
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
-
- ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
@@ -5902,26 +6152,27 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
}
if (scsi_cmd->cmnd[0] >= 0xC0 &&
- (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
+ (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ }
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
- spin_lock_irqsave(shost->host_lock, lock_flags);
- if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
if (!rc)
scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if (unlikely(ioa_cfg->ioa_is_dead)) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
scsi_dma_unmap(scsi_cmd);
goto err_nodev;
}
@@ -5931,18 +6182,18 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
err_nodev:
- spin_lock_irqsave(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
}
@@ -6040,7 +6291,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
goto out_unlock;
rc = ipr_device_reset(ioa_cfg, res);
@@ -6071,6 +6322,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6080,11 +6332,15 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->qc == qc) {
- ipr_device_reset(ioa_cfg, sata_port->res);
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->qc == qc) {
+ ipr_device_reset(ioa_cfg, sata_port->res);
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -6133,6 +6389,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
struct ipr_resource_entry *res = sata_port->res;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ spin_lock(&ipr_cmd->hrrq->_lock);
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
sizeof(struct ipr_ioasa_gata));
@@ -6148,7 +6405,8 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
else
qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
ata_qc_complete(qc);
}
@@ -6244,6 +6502,48 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
}
/**
+ * ipr_qc_defer - Get a free ipr_cmd
+ * @qc: queued command
+ *
+ * Return value:
+ * 0 if success
+ **/
+static int ipr_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ qc->lldd_task = NULL;
+ spin_lock(&hrrq->_lock);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ spin_unlock(&hrrq->_lock);
+ return 0;
+ }
+
+ if (unlikely(!hrrq->allow_cmds)) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ qc->lldd_task = ipr_cmd;
+ spin_unlock(&hrrq->_lock);
+ return 0;
+}
+
+/**
* ipr_qc_issue - Issue a SATA qc to a device
* @qc: queued command
*
@@ -6260,10 +6560,23 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
struct ipr_ioarcb *ioarcb;
struct ipr_ioarcb_ata_regs *regs;
- if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
+ if (qc->lldd_task == NULL)
+ ipr_qc_defer(qc);
+
+ ipr_cmd = qc->lldd_task;
+ if (ipr_cmd == NULL)
return AC_ERR_SYSTEM;
- ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ qc->lldd_task = NULL;
+ spin_lock(&ipr_cmd->hrrq->_lock);
+ if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
+ ipr_cmd->hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ return AC_ERR_SYSTEM;
+ }
+
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
ioarcb = &ipr_cmd->ioarcb;
if (ioa_cfg->sis64) {
@@ -6275,7 +6588,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
memset(regs, 0, sizeof(*regs));
ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->qc = qc;
ipr_cmd->done = ipr_sata_done;
ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6315,10 +6628,12 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
default:
WARN_ON(1);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return AC_ERR_INVALID;
}
ipr_send_command(ipr_cmd);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return 0;
}
@@ -6357,6 +6672,7 @@ static struct ata_port_operations ipr_sata_ops = {
.hardreset = ipr_sata_reset,
.post_internal_cmd = ipr_ata_post_internal,
.qc_prep = ata_noop_qc_prep,
+ .qc_defer = ipr_qc_defer,
.qc_issue = ipr_qc_issue,
.qc_fill_rtf = ipr_qc_fill_rtf,
.port_start = ata_sas_port_start,
@@ -6425,14 +6741,17 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ipr_trace;
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
+
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -6454,11 +6773,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_hostrcb *hostrcb, *temp;
- int i = 0;
+ int i = 0, j;
ENTER;
ioa_cfg->in_reset_reload = 0;
- ioa_cfg->allow_cmds = 1;
+ for (j = 0; j < ioa_cfg->hrrq_num; j++) {
+ spin_lock(&ioa_cfg->hrrq[j]._lock);
+ ioa_cfg->hrrq[j].allow_cmds = 1;
+ spin_unlock(&ioa_cfg->hrrq[j]._lock);
+ }
+ wmb();
ioa_cfg->reset_cmd = NULL;
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
@@ -6482,14 +6806,14 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
spin_unlock(ioa_cfg->host->host_lock);
scsi_unblock_requests(ioa_cfg->host);
spin_lock(ioa_cfg->host->host_lock);
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
scsi_block_requests(ioa_cfg->host);
LEAVE;
@@ -6560,9 +6884,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
if (!ioa_cfg->sis64)
ipr_cmd->job_step = ipr_set_supported_devs;
+ LEAVE;
return IPR_RC_JOB_RETURN;
}
+ LEAVE;
return IPR_RC_JOB_CONTINUE;
}
@@ -6820,7 +7146,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
return IPR_RC_JOB_RETURN;
}
@@ -7278,46 +7604,71 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_hrr_queue *hrrq;
ENTER;
+ ipr_cmd->job_step = ipr_ioafp_std_inquiry;
dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
- ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
- ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
+ hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
- ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
- if (ioa_cfg->sis64)
- ioarcb->cmd_pkt.cdb[1] = 0x1;
- ioarcb->cmd_pkt.cdb[2] =
- ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
- ioarcb->cmd_pkt.cdb[3] =
- ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
- ioarcb->cmd_pkt.cdb[4] =
- ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[5] =
- ((u64) ioa_cfg->host_rrq_dma) & 0xff;
- ioarcb->cmd_pkt.cdb[7] =
- ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[8] =
- (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
+ ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
- if (ioa_cfg->sis64) {
- ioarcb->cmd_pkt.cdb[10] =
- ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
- ioarcb->cmd_pkt.cdb[11] =
- ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
- ioarcb->cmd_pkt.cdb[12] =
- ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
- ioarcb->cmd_pkt.cdb[13] =
- ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
- }
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ if (ioa_cfg->sis64)
+ ioarcb->cmd_pkt.cdb[1] = 0x1;
- ipr_cmd->job_step = ipr_ioafp_std_inquiry;
+ if (ioa_cfg->nvectors == 1)
+ ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
+ else
+ ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
+
+ ioarcb->cmd_pkt.cdb[2] =
+ ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
+ ioarcb->cmd_pkt.cdb[3] =
+ ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
+ ioarcb->cmd_pkt.cdb[4] =
+ ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[5] =
+ ((u64) hrrq->host_rrq_dma) & 0xff;
+ ioarcb->cmd_pkt.cdb[7] =
+ ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] =
+ (sizeof(u32) * hrrq->size) & 0xff;
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[9] =
+ ioa_cfg->identify_hrrq_index;
- ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+ if (ioa_cfg->sis64) {
+ ioarcb->cmd_pkt.cdb[10] =
+ ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
+ ioarcb->cmd_pkt.cdb[11] =
+ ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
+ ioarcb->cmd_pkt.cdb[12] =
+ ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
+ ioarcb->cmd_pkt.cdb[13] =
+ ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
+ }
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[14] =
+ ioa_cfg->identify_hrrq_index;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_INTERNAL_TIMEOUT);
+
+ if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+ }
LEAVE;
- return IPR_RC_JOB_RETURN;
+ return IPR_RC_JOB_CONTINUE;
}
/**
@@ -7365,7 +7716,9 @@ static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
unsigned long timeout)
{
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+
+ ENTER;
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -7383,13 +7736,26 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
**/
static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
{
- memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
+ struct ipr_hrr_queue *hrrq;
- /* Initialize Host RRQ pointers */
- ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
- ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit = 1;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
+
+ /* Initialize Host RRQ pointers */
+ hrrq->hrrq_start = hrrq->host_rrq;
+ hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
+ hrrq->hrrq_curr = hrrq->hrrq_start;
+ hrrq->toggle_bit = 1;
+ spin_unlock(&hrrq->_lock);
+ }
+ wmb();
+
+ ioa_cfg->identify_hrrq_index = 0;
+ if (ioa_cfg->hrrq_num == 1)
+ atomic_set(&ioa_cfg->hrrq_index, 0);
+ else
+ atomic_set(&ioa_cfg->hrrq_index, 1);
/* Zero out config table */
memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7446,7 +7812,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
return IPR_RC_JOB_RETURN;
}
@@ -7466,12 +7833,18 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
volatile u32 int_reg;
volatile u64 maskval;
+ int i;
ENTER;
ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
ipr_init_ioa_mem(ioa_cfg);
- ioa_cfg->allow_interrupts = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
@@ -7511,7 +7884,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -8030,7 +8403,8 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
- if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
+ if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
@@ -8078,7 +8452,8 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
* We are doing nested adapter resets and this is
* not the current reset job.
*/
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue,
+ &ipr_cmd->hrrq->hrrq_free_q);
return;
}
@@ -8113,10 +8488,17 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
struct ipr_cmnd *ipr_cmd;
+ int i;
ioa_cfg->in_reset_reload = 1;
- ioa_cfg->allow_cmds = 0;
- scsi_block_requests(ioa_cfg->host);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ scsi_block_requests(ioa_cfg->host);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg->reset_cmd = ipr_cmd;
@@ -8141,7 +8523,9 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
- if (ioa_cfg->ioa_is_dead)
+ int i;
+
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return;
if (ioa_cfg->in_reset_reload) {
@@ -8156,7 +8540,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
"IOA taken offline - error recovery failed\n");
ioa_cfg->reset_retries = 0;
- ioa_cfg->ioa_is_dead = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->in_ioa_bringdown) {
ioa_cfg->reset_cmd = NULL;
@@ -8164,9 +8553,11 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_fail_all_ops(ioa_cfg);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
return;
} else {
ioa_cfg->in_ioa_bringdown = 1;
@@ -8188,9 +8579,17 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
*/
static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int i;
+
/* Disallow new interrupts, avoid loop */
- ipr_cmd->ioa_cfg->allow_interrupts = 0;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
return IPR_RC_JOB_RETURN;
}
@@ -8247,13 +8646,19 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
{
unsigned long flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->in_ioa_bringdown = 1;
- ioa_cfg->allow_cmds = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8310,12 +8715,11 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
} else
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
IPR_SHUTDOWN_NONE);
-
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
- if (ioa_cfg->ioa_is_dead) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
rc = -EIO;
} else if (ipr_invalid_adapter(ioa_cfg)) {
if (!ipr_testmode)
@@ -8376,8 +8780,13 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
ipr_free_cmd_blks(ioa_cfg);
- pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++)
+ pci_free_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+
pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
@@ -8408,8 +8817,23 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- free_irq(pdev->irq, ioa_cfg);
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ int i;
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ } else
+ free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ pci_disable_msi(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ pci_disable_msix(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ }
+
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@@ -8430,7 +8854,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
dma_addr_t dma_addr;
- int i;
+ int i, entries_each_hrrq, hrrq_id = 0;
ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
sizeof(struct ipr_cmnd), 512, 0);
@@ -8446,6 +8870,41 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
return -ENOMEM;
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ if (ioa_cfg->hrrq_num > 1) {
+ if (i == 0) {
+ entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
+ } else {
+ entries_each_hrrq =
+ IPR_NUM_BASE_CMD_BLKS/
+ (ioa_cfg->hrrq_num - 1);
+ ioa_cfg->hrrq[i].min_cmd_id =
+ IPR_NUM_INTERNAL_CMD_BLKS +
+ (i - 1) * entries_each_hrrq;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (IPR_NUM_INTERNAL_CMD_BLKS +
+ i * entries_each_hrrq - 1);
+ }
+ } else {
+ entries_each_hrrq = IPR_NUM_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
+ }
+ ioa_cfg->hrrq[i].size = entries_each_hrrq;
+ }
+
+ BUG_ON(ioa_cfg->hrrq_num == 0);
+
+ i = IPR_NUM_CMD_BLKS -
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
+ if (i > 0) {
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
+ }
+
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
@@ -8484,7 +8943,11 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ipr_cmd->sense_buffer_dma = dma_addr +
offsetof(struct ipr_cmnd, sense_buffer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
+ ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
+ hrrq_id++;
}
return 0;
@@ -8516,6 +8979,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+
+ if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
+ || !ioa_cfg->vset_ids)
+ goto out_free_res_entries;
}
for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
@@ -8530,15 +8997,34 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->vpd_cbs)
goto out_free_res_entries;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+ spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+ if (i == 0)
+ ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+ else
+ ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+ }
+
if (ipr_alloc_cmd_blks(ioa_cfg))
goto out_free_vpd_cbs;
- ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
- sizeof(u32) * IPR_NUM_CMD_BLKS,
- &ioa_cfg->host_rrq_dma);
-
- if (!ioa_cfg->host_rrq)
- goto out_ipr_free_cmd_blocks;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ &ioa_cfg->hrrq[i].host_rrq_dma);
+
+ if (!ioa_cfg->hrrq[i].host_rrq) {
+ while (--i > 0)
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ goto out_ipr_free_cmd_blocks;
+ }
+ ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
+ }
ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
ioa_cfg->cfg_table_size,
@@ -8582,8 +9068,12 @@ out_free_hostrcb_dma:
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
out_free_host_rrq:
- pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ }
out_ipr_free_cmd_blocks:
ipr_free_cmd_blks(ioa_cfg);
out_free_vpd_cbs:
@@ -8591,6 +9081,9 @@ out_free_vpd_cbs:
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
out_free_res_entries:
kfree(ioa_cfg->res_entries);
+ kfree(ioa_cfg->target_ids);
+ kfree(ioa_cfg->array_ids);
+ kfree(ioa_cfg->vset_ids);
goto out;
}
@@ -8638,15 +9131,11 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->doorbell = IPR_DOORBELL;
sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
- sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
- sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
- INIT_LIST_HEAD(&ioa_cfg->free_q);
- INIT_LIST_HEAD(&ioa_cfg->pending_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
@@ -8724,6 +9213,88 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
return NULL;
}
+static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
+ int i, err, vectors;
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msix(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = entries[i].vector;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, err, vectors;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msi(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
+
+ for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
+ snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
+ "host%d-%d", ioa_cfg->host->host_no, vec_idx);
+ ioa_cfg->vectors_info[vec_idx].
+ desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
+ }
+}
+
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, rc;
+
+ for (i = 1; i < ioa_cfg->nvectors; i++) {
+ rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ ipr_isr_mhrrq,
+ 0,
+ ioa_cfg->vectors_info[i].desc,
+ &ioa_cfg->hrrq[i]);
+ if (rc) {
+ while (--i >= 0)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ return rc;
+ }
+ }
+ return 0;
+}
+
/**
* ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
* @pdev: PCI device struct
@@ -8740,6 +9311,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
unsigned long lock_flags = 0;
irqreturn_t rc = IRQ_HANDLED;
+ dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg->msi_received = 1;
@@ -8787,9 +9359,9 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->msi_received) {
/* MSI test failed */
dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
@@ -8806,8 +9378,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
return rc;
}
-/**
- * ipr_probe_ioa - Allocates memory and does first stage of initialization
+ /* ipr_probe_ioa - Allocates memory and does first stage of initialization
* @pdev: PCI device struct
* @dev_id: PCI device id struct
*
@@ -8823,6 +9394,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
void __iomem *ipr_regs;
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
+ unsigned long lock_flags;
ENTER;
@@ -8918,17 +9490,56 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
goto cleanup_nomem;
}
- /* Enable MSI style interrupts if they are supported. */
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+ if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
+ dev_err(&pdev->dev, "The max number of MSIX is %d\n",
+ IPR_MAX_MSIX_VECTORS);
+ ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
+ }
+
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msix(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSIX;
+ else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msi(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSI;
+ else {
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ }
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP)
- pci_disable_msi(pdev);
+ if (rc == -EOPNOTSUPP) {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ pci_disable_msi(pdev);
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ pci_disable_msix(pdev);
+ }
+
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ }
else if (rc)
goto out_msi_disable;
- else
- dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
- } else if (ipr_debug)
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ else {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ dev_info(&pdev->dev,
+ "Request for %d MSIs succeeded with starting IRQ: %d\n",
+ ioa_cfg->nvectors, pdev->irq);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ dev_info(&pdev->dev,
+ "Request for %d MSIXs succeeded.",
+ ioa_cfg->nvectors);
+ }
+ }
+
+ ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
+ (unsigned int)num_online_cpus(),
+ (unsigned int)IPR_MAX_HRRQ_NUM);
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
@@ -8975,11 +9586,24 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- rc = request_irq(pdev->irq, ipr_isr,
- ioa_cfg->msi_received ? 0 : IRQF_SHARED,
- IPR_NAME, ioa_cfg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI
+ || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ name_msi_vectors(ioa_cfg);
+ rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
+ 0,
+ ioa_cfg->vectors_info[0].desc,
+ &ioa_cfg->hrrq[0]);
+ if (!rc)
+ rc = ipr_request_other_msi_irqs(ioa_cfg);
+ } else {
+ rc = request_irq(pdev->irq, ipr_isr,
+ IRQF_SHARED,
+ IPR_NAME, &ioa_cfg->hrrq[0]);
+ }
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
pdev->irq, rc);
@@ -9004,7 +9628,10 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ pci_disable_msi(pdev);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ pci_disable_msix(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_release_regions:
@@ -9074,6 +9701,7 @@ static void __ipr_remove(struct pci_dev *pdev)
{
unsigned long host_lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9083,6 +9711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].removing_ioa = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9138,7 +9772,7 @@ static void ipr_remove(struct pci_dev *pdev)
static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ int rc, i;
rc = ipr_probe_ioa(pdev, dev_id);
@@ -9185,6 +9819,17 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
ioa_cfg->allow_ml_add_del = 1;
ioa_cfg->host->max_channel = IPR_VSET_BUS;
+ ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+
schedule_work(&ioa_cfg->work_q);
return 0;
}
@@ -9203,8 +9848,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ ioa_cfg->iopoll_weight = 0;
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -9277,6 +9930,8 @@ static struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
@@ -9290,6 +9945,14 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -9316,9 +9979,7 @@ static struct pci_driver ipr_driver = {
**/
static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -9340,7 +10001,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (!ioa_cfg->allow_cmds) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index c8a137f83bb1..21a6ff1ed5c6 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -32,14 +32,15 @@
#include <linux/libata.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/blk-iopoll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.5.4"
-#define IPR_DRIVER_DATE "(July 11, 2012)"
+#define IPR_DRIVER_VERSION "2.6.0"
+#define IPR_DRIVER_DATE "(November 16, 2012)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -82,6 +83,7 @@
#define IPR_SUBS_DEV_ID_57B4 0x033B
#define IPR_SUBS_DEV_ID_57B2 0x035F
+#define IPR_SUBS_DEV_ID_57C0 0x0352
#define IPR_SUBS_DEV_ID_57C3 0x0353
#define IPR_SUBS_DEV_ID_57C4 0x0354
#define IPR_SUBS_DEV_ID_57C6 0x0357
@@ -94,6 +96,10 @@
#define IPR_SUBS_DEV_ID_574D 0x0356
#define IPR_SUBS_DEV_ID_57C8 0x035D
+#define IPR_SUBS_DEV_ID_57D5 0x03FB
+#define IPR_SUBS_DEV_ID_57D6 0x03FC
+#define IPR_SUBS_DEV_ID_57D7 0x03FF
+#define IPR_SUBS_DEV_ID_57D8 0x03FE
#define IPR_NAME "ipr"
/*
@@ -298,6 +304,9 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
* Misc literals
*/
#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
+#define IPR_MAX_MSIX_VECTORS 0x5
+#define IPR_MAX_HRRQ_NUM 0x10
+#define IPR_INIT_HRRQ 0x0
/*
* Adapter interface types
@@ -404,7 +413,7 @@ struct ipr_config_table_entry64 {
__be64 dev_id;
__be64 lun;
__be64 lun_wwn[2];
-#define IPR_MAX_RES_PATH_LENGTH 24
+#define IPR_MAX_RES_PATH_LENGTH 48
__be64 res_path;
struct ipr_std_inq_data std_inq_data;
u8 reserved2[4];
@@ -459,9 +468,40 @@ struct ipr_supported_device {
u8 reserved2[16];
}__attribute__((packed, aligned (4)));
+struct ipr_hrr_queue {
+ struct ipr_ioa_cfg *ioa_cfg;
+ __be32 *host_rrq;
+ dma_addr_t host_rrq_dma;
+#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
+#define IPR_HRRQ_RESP_BIT_SET 0x00000002
+#define IPR_HRRQ_TOGGLE_BIT 0x00000001
+#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
+#define IPR_ID_HRRQ_SELE_ENABLE 0x02
+ volatile __be32 *hrrq_start;
+ volatile __be32 *hrrq_end;
+ volatile __be32 *hrrq_curr;
+
+ struct list_head hrrq_free_q;
+ struct list_head hrrq_pending_q;
+ spinlock_t _lock;
+ spinlock_t *lock;
+
+ volatile u32 toggle_bit;
+ u32 size;
+ u32 min_cmd_id;
+ u32 max_cmd_id;
+ u8 allow_interrupts:1;
+ u8 ioa_is_dead:1;
+ u8 allow_cmds:1;
+ u8 removing_ioa:1;
+
+ struct blk_iopoll iopoll;
+};
+
/* Command packet structure */
struct ipr_cmd_pkt {
- __be16 reserved; /* Reserved by IOA */
+ u8 reserved; /* Reserved by IOA */
+ u8 hrrq_id;
u8 request_type;
#define IPR_RQTYPE_SCSICDB 0x00
#define IPR_RQTYPE_IOACMD 0x01
@@ -1022,6 +1062,10 @@ struct ipr_hostrcb64_fabric_desc {
struct ipr_hostrcb64_config_element elem[1];
}__attribute__((packed, aligned (8)));
+#define for_each_hrrq(hrrq, ioa_cfg) \
+ for (hrrq = (ioa_cfg)->hrrq; \
+ hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
+
#define for_each_fabric_cfg(fabric, cfg) \
for (cfg = (fabric)->elem; \
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -1308,6 +1352,7 @@ struct ipr_chip_cfg_t {
u16 max_cmds;
u8 cache_line_size;
u8 clear_isr;
+ u32 iopoll_weight;
struct ipr_interrupt_offsets regs;
};
@@ -1317,6 +1362,7 @@ struct ipr_chip_t {
u16 intr_type;
#define IPR_USE_LSI 0x00
#define IPR_USE_MSI 0x01
+#define IPR_USE_MSIX 0x02
u16 sis_type;
#define IPR_SIS32 0x00
#define IPR_SIS64 0x01
@@ -1375,13 +1421,10 @@ struct ipr_ioa_cfg {
struct list_head queue;
- u8 allow_interrupts:1;
u8 in_reset_reload:1;
u8 in_ioa_bringdown:1;
u8 ioa_unit_checked:1;
- u8 ioa_is_dead:1;
u8 dump_taken:1;
- u8 allow_cmds:1;
u8 allow_ml_add_del:1;
u8 needs_hard_reset:1;
u8 dual_raid:1;
@@ -1413,21 +1456,7 @@ struct ipr_ioa_cfg {
char trace_start[8];
#define IPR_TRACE_START_LABEL "trace"
struct ipr_trace_entry *trace;
- u32 trace_index:IPR_NUM_TRACE_INDEX_BITS;
-
- /*
- * Queue for free command blocks
- */
- char ipr_free_label[8];
-#define IPR_FREEQ_LABEL "free-q"
- struct list_head free_q;
-
- /*
- * Queue for command blocks outstanding to the adapter
- */
- char ipr_pending_label[8];
-#define IPR_PENDQ_LABEL "pend-q"
- struct list_head pending_q;
+ atomic_t trace_index;
char cfg_table_start[8];
#define IPR_CFG_TBL_START "cfg"
@@ -1452,16 +1481,10 @@ struct ipr_ioa_cfg {
struct list_head hostrcb_free_q;
struct list_head hostrcb_pending_q;
- __be32 *host_rrq;
- dma_addr_t host_rrq_dma;
-#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
-#define IPR_HRRQ_RESP_BIT_SET 0x00000002
-#define IPR_HRRQ_TOGGLE_BIT 0x00000001
-#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
- volatile __be32 *hrrq_start;
- volatile __be32 *hrrq_end;
- volatile __be32 *hrrq_curr;
- volatile u32 toggle_bit;
+ struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
+ u32 hrrq_num;
+ atomic_t hrrq_index;
+ u16 identify_hrrq_index;
struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
@@ -1507,6 +1530,17 @@ struct ipr_ioa_cfg {
u32 max_cmds;
struct ipr_cmnd **ipr_cmnd_list;
dma_addr_t *ipr_cmnd_list_dma;
+
+ u16 intr_flag;
+ unsigned int nvectors;
+
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } vectors_info[IPR_MAX_MSIX_VECTORS];
+
+ u32 iopoll_weight;
+
}; /* struct ipr_ioa_cfg */
struct ipr_cmnd {
@@ -1544,6 +1578,7 @@ struct ipr_cmnd {
struct scsi_device *sdev;
} u;
+ struct ipr_hrr_queue *hrrq;
struct ipr_ioa_cfg *ioa_cfg;
};
@@ -1717,7 +1752,8 @@ struct ipr_ucode_image_header {
if (ipr_is_device(hostrcb)) { \
if ((hostrcb)->ioa_cfg->sis64) { \
printk(KERN_ERR IPR_NAME ": %s: " fmt, \
- ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \
+ ipr_format_res_path(hostrcb->ioa_cfg, \
+ hostrcb->hcam.u.error64.fd_res_path, \
hostrcb->rp_buffer, \
sizeof(hostrcb->rp_buffer)), \
__VA_ARGS__); \
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index fcb9d0b20ee4..09c81b2f2169 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1381,10 +1381,10 @@ static void fc_fcp_timeout(unsigned long data)
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
- if (fsp->state & FC_SRB_RCV_STATUS)
- fc_fcp_complete_locked(fsp);
- else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_rec(fsp);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete_locked(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index c2830cc66d6a..b74189d89322 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -41,25 +41,25 @@ extern unsigned int fc_debug_logging;
#define FC_LIBFC_DBG(fmt, args...) \
FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
- printk(KERN_INFO "libfc: " fmt, ##args))
+ pr_info("libfc: " fmt, ##args))
#define FC_LPORT_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
- printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (lport)->port_id, ##args))
+ pr_info("host%u: lport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args))
-#define FC_DISC_DBG(disc, fmt, args...) \
- FC_CHECK_LOGGING(FC_DISC_LOGGING, \
- printk(KERN_INFO "host%u: disc: " fmt, \
- fc_disc_lport(disc)->host->host_no, \
- ##args))
+#define FC_DISC_DBG(disc, fmt, args...) \
+ FC_CHECK_LOGGING(FC_DISC_LOGGING, \
+ pr_info("host%u: disc: " fmt, \
+ fc_disc_lport(disc)->host->host_no, \
+ ##args))
#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
- printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (port_id), ##args))
+ pr_info("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (port_id), ##args))
#define FC_RPORT_DBG(rdata, fmt, args...) \
FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
@@ -70,13 +70,13 @@ extern unsigned int fc_debug_logging;
if ((pkt)->seq_ptr) { \
struct fc_exch *_ep = NULL; \
_ep = fc_seq_exch((pkt)->seq_ptr); \
- printk(KERN_INFO "host%u: fcp: %6.6x: " \
+ pr_info("host%u: fcp: %6.6x: " \
"xid %04x-%04x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, \
(_ep)->oxid, (_ep)->rxid, ##args); \
} else { \
- printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
+ pr_info("host%u: fcp: %6.6x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, ##args); \
} \
@@ -84,14 +84,14 @@ extern unsigned int fc_debug_logging;
#define FC_EXCH_DBG(exch, fmt, args...) \
FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
- printk(KERN_INFO "host%u: xid %4x: " fmt, \
- (exch)->lp->host->host_no, \
- exch->xid, ##args))
+ pr_info("host%u: xid %4x: " fmt, \
+ (exch)->lp->host->host_no, \
+ exch->xid, ##args))
#define FC_SCSI_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
- printk(KERN_INFO "host%u: scsi: " fmt, \
- (lport)->host->host_no, ##args))
+ pr_info("host%u: scsi: " fmt, \
+ (lport)->host->host_no, ##args))
/*
* FC-4 Providers.
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 83aa1efec875..d518d17e940f 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -582,7 +582,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
static void fc_rport_error_retry(struct fc_rport_priv *rdata,
struct fc_frame *fp)
{
- unsigned long delay = FC_DEF_E_D_TOV;
+ unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index df4c13a5534c..7706c99ec8bb 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -466,11 +466,13 @@ enum intr_type_t {
MSIX,
};
+#define LPFC_CT_CTX_MAX 64
struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
- uint32_t flags;
-#define UNSOL_VALID 0x00000001
+ uint32_t valid;
+#define UNSOL_INVALID 0
+#define UNSOL_VALID 1
uint16_t oxid;
uint16_t rxid;
};
@@ -750,6 +752,15 @@ struct lpfc_hba {
void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
PCI BAR2 */
+ void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0 with dual-ULP support */
+ void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
+ PCI BAR2 with dual-ULP support */
+ void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
+ PCI BAR4 with dual-ULP support */
+#define PCI_64BIT_BAR0 0
+#define PCI_64BIT_BAR2 2
+#define PCI_64BIT_BAR4 4
void __iomem *MBslimaddr; /* virtual address for mbox cmds */
void __iomem *HAregaddr; /* virtual address for host attn reg */
void __iomem *CAregaddr; /* virtual address for chip attn reg */
@@ -938,7 +949,7 @@ struct lpfc_hba {
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
- struct unsol_rcv_ct_ctx ct_ctx[64];
+ struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
uint8_t menlo_flag; /* menlo generic flags */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f7368eb80415..32d5683e6181 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -955,9 +955,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
if (phba->sli_rev == LPFC_SLI_REV4) {
evt_dat->immed_dat = phba->ctx_idx;
- phba->ctx_idx = (phba->ctx_idx + 1) % 64;
+ phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
/* Provide warning for over-run of the ct_ctx array */
- if (phba->ct_ctx[evt_dat->immed_dat].flags &
+ if (phba->ct_ctx[evt_dat->immed_dat].valid ==
UNSOL_VALID)
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"2717 CT context array entry "
@@ -973,7 +973,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
piocbq->iocb.unsli3.rcvsli3.ox_id;
phba->ct_ctx[evt_dat->immed_dat].SID =
piocbq->iocb.un.rcvels.remoteID;
- phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
+ phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
evt_dat->immed_dat = piocbq->iocb.ulpContext;
@@ -1013,6 +1013,47 @@ error_ct_unsol_exit:
}
/**
+ * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles abort to the CT command toward management plane
+ * for SLI4 port.
+ *
+ * If the pending context of a CT command to management plane present, clears
+ * such context and returns 1 for handled; otherwise, it returns 0 indicating
+ * no context exists.
+ **/
+int
+lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header fc_hdr;
+ struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
+ int ctx_idx, handled = 0;
+ uint16_t oxid, rxid;
+ uint32_t sid;
+
+ memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+ sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
+ oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
+ rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
+
+ for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
+ if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
+ continue;
+ if (phba->ct_ctx[ctx_idx].rxid != rxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].oxid != oxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].SID != sid)
+ continue;
+ phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
+ handled = 1;
+ }
+ return handled;
+}
+
+/**
* lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
* @job: SET_EVENT fc_bsg_job
**/
@@ -1318,7 +1359,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
- if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
+ if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
@@ -1352,7 +1393,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
/* The exchange is done, mark the entry as invalid */
- phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
+ phba->ct_ctx[tag].valid = UNSOL_INVALID;
} else
icmd->ulpContext = (ushort) tag;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 69d66e3662cb..76ca65dae781 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -164,8 +164,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
-void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *);
+int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
@@ -427,6 +426,7 @@ int lpfc_bsg_request(struct fc_bsg_job *);
int lpfc_bsg_timeout(struct fc_bsg_job *);
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
+int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 65f9fb6862e6..7bff3a19af56 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -164,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/**
- * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
+ * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
* @phba: Pointer to HBA context object.
- * @pring: Pointer to the driver internal I/O ring.
- * @piocbq: Pointer to the IOCBQ.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
*
- * This function serves as the default handler for the sli4 unsolicited
- * abort event. It shall be invoked when there is no application interface
- * registered unsolicited abort handler. This handler does nothing but
- * just simply releases the dma buffer used by the unsol abort event.
+ * This function serves as the upper level protocol abort handler for CT
+ * protocol.
+ *
+ * Return 1 if abort has been handled, 0 otherwise.
**/
-void
-lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocbq)
+int
+lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
{
- IOCB_t *icmd = &piocbq->iocb;
- struct lpfc_dmabuf *bdeBuf;
- uint32_t size;
+ int handled;
- /* Forward abort event to any process registered to receive ct event */
- if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
- return;
+ /* CT upper level goes through BSG */
+ handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
- /* If there is no BDE associated with IOCB, there is nothing to do */
- if (icmd->ulpBdeCount == 0)
- return;
- bdeBuf = piocbq->context2;
- piocbq->context2 = NULL;
- size = icmd->un.cont64[0].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
- lpfc_in_buf_free(phba, bdeBuf);
+ return handled;
}
static void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b9440deaad45..08d156a9094f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3122,6 +3122,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_INVALID_RPI:
+ if (cmd == ELS_CMD_PLOGI &&
+ did == NameServer_DID) {
+ /* Continue forever if plogi to */
+ /* the nameserver fails */
+ maxretry = 0;
+ delay = 100;
+ }
retry = 1;
break;
}
@@ -6517,7 +6524,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
uint32_t *payload;
- uint32_t cmd, did, newnode, rjt_err = 0;
+ uint32_t cmd, did, newnode;
+ uint8_t rjt_exp, rjt_err = 0;
IOCB_t *icmd = &elsiocb->iocb;
if (!vport || !(elsiocb->context2))
@@ -6606,12 +6614,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* If Nport discovery is delayed, reject PLOGIs */
if (vport->fc_flag & FC_DISC_DELAYED) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
/* We get here, and drop thru, if we are PT2PT with
@@ -6648,6 +6658,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
@@ -6661,6 +6672,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
@@ -6680,6 +6692,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvADISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6693,6 +6706,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPDISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6730,6 +6744,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPRLI++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
@@ -6813,6 +6828,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_REC:
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6820,6 +6840,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Unsupported ELS command, reject */
rjt_err = LSRJT_CMD_UNSUPPORTED;
+ rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6834,7 +6855,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (rjt_err) {
memset(&stat, 0, sizeof(stat));
stat.un.b.lsRjtRsnCode = rjt_err;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ stat.un.b.lsRjtRsnCodeExp = rjt_exp;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
NULL);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7398ca862e97..e8c476031703 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -538,6 +538,7 @@ struct fc_vft_header {
#define ELS_CMD_ECHO 0x10000000
#define ELS_CMD_TEST 0x11000000
#define ELS_CMD_RRQ 0x12000000
+#define ELS_CMD_REC 0x13000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_PRLO 0x21100014
#define ELS_CMD_PRLO_ACC 0x02100014
@@ -574,6 +575,7 @@ struct fc_vft_header {
#define ELS_CMD_ECHO 0x10
#define ELS_CMD_TEST 0x11
#define ELS_CMD_RRQ 0x12
+#define ELS_CMD_REC 0x13
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_PRLO 0x14001021
#define ELS_CMD_PRLO_ACC 0x14001002
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index a47cfbdd05f2..6e93b886cd4d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -106,6 +106,7 @@ struct lpfc_sli_intf {
#define LPFC_SLI4_MB_WORD_COUNT 64
#define LPFC_MAX_MQ_PAGE 8
+#define LPFC_MAX_WQ_PAGE_V0 4
#define LPFC_MAX_WQ_PAGE 8
#define LPFC_MAX_CQ_PAGE 4
#define LPFC_MAX_EQ_PAGE 8
@@ -703,24 +704,41 @@ struct lpfc_register {
* BAR0. The offsets are the same so the driver must account for
* any base address difference.
*/
-#define LPFC_RQ_DOORBELL 0x00A0
-#define lpfc_rq_doorbell_num_posted_SHIFT 16
-#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
-#define lpfc_rq_doorbell_num_posted_WORD word0
-#define lpfc_rq_doorbell_id_SHIFT 0
-#define lpfc_rq_doorbell_id_MASK 0xFFFF
-#define lpfc_rq_doorbell_id_WORD word0
-
-#define LPFC_WQ_DOORBELL 0x0040
-#define lpfc_wq_doorbell_num_posted_SHIFT 24
-#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
-#define lpfc_wq_doorbell_num_posted_WORD word0
-#define lpfc_wq_doorbell_index_SHIFT 16
-#define lpfc_wq_doorbell_index_MASK 0x00FF
-#define lpfc_wq_doorbell_index_WORD word0
-#define lpfc_wq_doorbell_id_SHIFT 0
-#define lpfc_wq_doorbell_id_MASK 0xFFFF
-#define lpfc_wq_doorbell_id_WORD word0
+#define LPFC_ULP0_RQ_DOORBELL 0x00A0
+#define LPFC_ULP1_RQ_DOORBELL 0x00C0
+#define lpfc_rq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_rq_db_list_fm_num_posted_WORD word0
+#define lpfc_rq_db_list_fm_index_SHIFT 16
+#define lpfc_rq_db_list_fm_index_MASK 0x00FF
+#define lpfc_rq_db_list_fm_index_WORD word0
+#define lpfc_rq_db_list_fm_id_SHIFT 0
+#define lpfc_rq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_list_fm_id_WORD word0
+#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_rq_db_ring_fm_num_posted_WORD word0
+#define lpfc_rq_db_ring_fm_id_SHIFT 0
+#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_ring_fm_id_WORD word0
+
+#define LPFC_ULP0_WQ_DOORBELL 0x0040
+#define LPFC_ULP1_WQ_DOORBELL 0x0060
+#define lpfc_wq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_wq_db_list_fm_num_posted_WORD word0
+#define lpfc_wq_db_list_fm_index_SHIFT 16
+#define lpfc_wq_db_list_fm_index_MASK 0x00FF
+#define lpfc_wq_db_list_fm_index_WORD word0
+#define lpfc_wq_db_list_fm_id_SHIFT 0
+#define lpfc_wq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_list_fm_id_WORD word0
+#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_wq_db_ring_fm_num_posted_WORD word0
+#define lpfc_wq_db_ring_fm_id_SHIFT 0
+#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_ring_fm_id_WORD word0
#define LPFC_EQCQ_DOORBELL 0x0120
#define lpfc_eqcq_doorbell_se_SHIFT 31
@@ -1131,12 +1149,22 @@ struct lpfc_mbx_wq_create {
struct { /* Version 0 Request */
uint32_t word0;
#define lpfc_mbx_wq_create_num_pages_SHIFT 0
-#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF
#define lpfc_mbx_wq_create_num_pages_WORD word0
+#define lpfc_mbx_wq_create_dua_SHIFT 8
+#define lpfc_mbx_wq_create_dua_MASK 0x00000001
+#define lpfc_mbx_wq_create_dua_WORD word0
#define lpfc_mbx_wq_create_cq_id_SHIFT 16
#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
#define lpfc_mbx_wq_create_cq_id_WORD word0
- struct dma_address page[LPFC_MAX_WQ_PAGE];
+ struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
+ uint32_t word9;
+#define lpfc_mbx_wq_create_bua_SHIFT 0
+#define lpfc_mbx_wq_create_bua_MASK 0x00000001
+#define lpfc_mbx_wq_create_bua_WORD word9
+#define lpfc_mbx_wq_create_ulp_num_SHIFT 8
+#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_wq_create_ulp_num_WORD word9
} request;
struct { /* Version 1 Request */
uint32_t word0; /* Word 0 is the same as in v0 */
@@ -1160,6 +1188,17 @@ struct lpfc_mbx_wq_create {
#define lpfc_mbx_wq_create_q_id_SHIFT 0
#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
#define lpfc_mbx_wq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_wq_create_bar_set_SHIFT 0
+#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_bar_set_WORD word2
+#define WQ_PCI_BAR_0_AND_1 0x00
+#define WQ_PCI_BAR_2_AND_3 0x01
+#define WQ_PCI_BAR_4_AND_5 0x02
+#define lpfc_mbx_wq_create_db_format_SHIFT 16
+#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_db_format_WORD word2
} response;
} u;
};
@@ -1223,14 +1262,31 @@ struct lpfc_mbx_rq_create {
#define lpfc_mbx_rq_create_num_pages_SHIFT 0
#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_num_pages_WORD word0
+#define lpfc_mbx_rq_create_dua_SHIFT 16
+#define lpfc_mbx_rq_create_dua_MASK 0x00000001
+#define lpfc_mbx_rq_create_dua_WORD word0
+#define lpfc_mbx_rq_create_bqu_SHIFT 17
+#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
+#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD word0
struct rq_context context;
struct dma_address page[LPFC_MAX_WQ_PAGE];
} request;
struct {
uint32_t word0;
-#define lpfc_mbx_rq_create_q_id_SHIFT 0
-#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
-#define lpfc_mbx_rq_create_q_id_WORD word0
+#define lpfc_mbx_rq_create_q_id_SHIFT 0
+#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT 0
+#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD word2
+#define lpfc_mbx_rq_create_db_format_SHIFT 16
+#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD word2
} response;
} u;
};
@@ -1388,6 +1444,33 @@ struct lpfc_mbx_get_rsrc_extent_info {
} u;
};
+struct lpfc_mbx_query_fw_config {
+ struct mbox_header header;
+ struct {
+ uint32_t config_number;
+#define LPFC_FC_FCOE 0x00000007
+ uint32_t asic_revision;
+ uint32_t physical_port;
+ uint32_t function_mode;
+#define LPFC_FCOE_INI_MODE 0x00000040
+#define LPFC_FCOE_TGT_MODE 0x00000080
+#define LPFC_DUA_MODE 0x00000800
+ uint32_t ulp0_mode;
+#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
+#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
+ uint32_t ulp0_nap_words[12];
+ uint32_t ulp1_mode;
+ uint32_t ulp1_nap_words[12];
+ uint32_t function_capabilities;
+ uint32_t cqid_base;
+ uint32_t cqid_tot;
+ uint32_t eqid_base;
+ uint32_t eqid_tot;
+ uint32_t ulp0_nap2_words[2];
+ uint32_t ulp1_nap2_words[2];
+ } rsp;
+};
+
struct lpfc_id_range {
uint32_t word5;
#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1803,51 +1886,6 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define lpfc_mbx_redisc_fcf_index_WORD word12
};
-struct lpfc_mbx_query_fw_cfg {
- struct mbox_header header;
- uint32_t config_number;
- uint32_t asic_rev;
- uint32_t phys_port;
- uint32_t function_mode;
-/* firmware Function Mode */
-#define lpfc_function_mode_toe_SHIFT 0
-#define lpfc_function_mode_toe_MASK 0x00000001
-#define lpfc_function_mode_toe_WORD function_mode
-#define lpfc_function_mode_nic_SHIFT 1
-#define lpfc_function_mode_nic_MASK 0x00000001
-#define lpfc_function_mode_nic_WORD function_mode
-#define lpfc_function_mode_rdma_SHIFT 2
-#define lpfc_function_mode_rdma_MASK 0x00000001
-#define lpfc_function_mode_rdma_WORD function_mode
-#define lpfc_function_mode_vm_SHIFT 3
-#define lpfc_function_mode_vm_MASK 0x00000001
-#define lpfc_function_mode_vm_WORD function_mode
-#define lpfc_function_mode_iscsi_i_SHIFT 4
-#define lpfc_function_mode_iscsi_i_MASK 0x00000001
-#define lpfc_function_mode_iscsi_i_WORD function_mode
-#define lpfc_function_mode_iscsi_t_SHIFT 5
-#define lpfc_function_mode_iscsi_t_MASK 0x00000001
-#define lpfc_function_mode_iscsi_t_WORD function_mode
-#define lpfc_function_mode_fcoe_i_SHIFT 6
-#define lpfc_function_mode_fcoe_i_MASK 0x00000001
-#define lpfc_function_mode_fcoe_i_WORD function_mode
-#define lpfc_function_mode_fcoe_t_SHIFT 7
-#define lpfc_function_mode_fcoe_t_MASK 0x00000001
-#define lpfc_function_mode_fcoe_t_WORD function_mode
-#define lpfc_function_mode_dal_SHIFT 8
-#define lpfc_function_mode_dal_MASK 0x00000001
-#define lpfc_function_mode_dal_WORD function_mode
-#define lpfc_function_mode_lro_SHIFT 9
-#define lpfc_function_mode_lro_MASK 0x00000001
-#define lpfc_function_mode_lro_WORD function_mode
-#define lpfc_function_mode_flex10_SHIFT 10
-#define lpfc_function_mode_flex10_MASK 0x00000001
-#define lpfc_function_mode_flex10_WORD function_mode
-#define lpfc_function_mode_ncsi_SHIFT 11
-#define lpfc_function_mode_ncsi_MASK 0x00000001
-#define lpfc_function_mode_ncsi_WORD function_mode
-};
-
/* Status field for embedded SLI_CONFIG mailbox command */
#define STATUS_SUCCESS 0x0
#define STATUS_FAILED 0x1
@@ -2965,7 +3003,7 @@ struct lpfc_mqe {
struct lpfc_mbx_read_config rd_config;
struct lpfc_mbx_request_features req_ftrs;
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
- struct lpfc_mbx_query_fw_cfg query_fw_cfg;
+ struct lpfc_mbx_query_fw_config query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 89ad55807012..314b4f61b9e3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3165,14 +3165,10 @@ destroy_port(struct lpfc_vport *vport)
int
lpfc_get_instance(void)
{
- int instance = 0;
+ int ret;
- /* Assign an unused number */
- if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
- return -1;
- if (idr_get_new(&lpfc_hba_index, NULL, &instance))
- return -1;
- return instance;
+ ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
+ return ret < 0 ? -1 : ret;
}
/**
@@ -6233,9 +6229,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_SEM_OFFSET;
phba->sli4_hba.RQDBregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_RQ_DOORBELL;
phba->sli4_hba.WQDBregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_WQ_DOORBELL;
phba->sli4_hba.EQCQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
phba->sli4_hba.MQDBregaddr =
@@ -6289,9 +6287,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
return -ENODEV;
phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
- vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_RQ_DOORBELL);
phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
- vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_WQ_DOORBELL);
phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
@@ -6987,6 +6987,19 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.fcp_wq = NULL;
}
+ if (phba->pci_bar0_memmap_p) {
+ iounmap(phba->pci_bar0_memmap_p);
+ phba->pci_bar0_memmap_p = NULL;
+ }
+ if (phba->pci_bar2_memmap_p) {
+ iounmap(phba->pci_bar2_memmap_p);
+ phba->pci_bar2_memmap_p = NULL;
+ }
+ if (phba->pci_bar4_memmap_p) {
+ iounmap(phba->pci_bar4_memmap_p);
+ phba->pci_bar4_memmap_p = NULL;
+ }
+
/* Release FCP CQ mapping array */
if (phba->sli4_hba.fcp_cq_map != NULL) {
kfree(phba->sli4_hba.fcp_cq_map);
@@ -7050,6 +7063,53 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
int rc = -ENOMEM;
int fcp_eqidx, fcp_cqidx, fcp_wqidx;
int fcp_cq_index = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t length;
+
+ /* Check for dual-ULP support */
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3249 Unable to allocate memory for "
+ "QUERY_FW_CFG mailbox command\n");
+ return -ENOMEM;
+ }
+ length = (sizeof(struct lpfc_mbx_query_fw_config) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3250 QUERY_FW_CFG mailbox failed with status "
+ "x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -ENXIO;
+ goto out_error;
+ }
+
+ phba->sli4_hba.fw_func_mode =
+ mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
+ phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
+ phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
+ "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+ phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
/*
* Set up HBA Event Queues (EQs)
@@ -7664,78 +7724,6 @@ out:
}
/**
- * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
- * @phba: pointer to lpfc hba data structure.
- * @cnt: number of nop mailbox commands to send.
- *
- * This routine is invoked to send a number @cnt of NOP mailbox command and
- * wait for each command to complete.
- *
- * Return: the number of NOP mailbox command completed.
- **/
-static int
-lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
-{
- LPFC_MBOXQ_t *mboxq;
- int length, cmdsent;
- uint32_t mbox_tmo;
- uint32_t rc = 0;
- uint32_t shdr_status, shdr_add_status;
- union lpfc_sli4_cfg_shdr *shdr;
-
- if (cnt == 0) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2518 Requested to send 0 NOP mailbox cmd\n");
- return cnt;
- }
-
- mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2519 Unable to allocate memory for issuing "
- "NOP mailbox command\n");
- return 0;
- }
-
- /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
- length = (sizeof(struct lpfc_mbx_nop) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
-
- for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
- lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_NOP, length,
- LPFC_SLI4_MBX_EMBED);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- }
- if (rc == MBX_TIMEOUT)
- break;
- /* Check return status */
- shdr = (union lpfc_sli4_cfg_shdr *)
- &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2520 NOP mailbox command failed "
- "status x%x add_status x%x mbx "
- "status x%x\n", shdr_status,
- shdr_add_status, rc);
- break;
- }
- }
-
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
-
- return cmdsent;
-}
-
-/**
* lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
* @phba: pointer to lpfc hba data structure.
*
@@ -8503,37 +8491,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to unset the HBA device initialization steps to
- * a device with SLI-4 interface spec.
- **/
-static void
-lpfc_sli4_unset_hba(struct lpfc_hba *phba)
-{
- struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(shost->host_lock);
-
- phba->pport->work_port_events = 0;
-
- /* Stop the SLI4 device port */
- lpfc_stop_port(phba);
-
- lpfc_sli4_disable_intr(phba);
-
- /* Reset SLI4 HBA FCoE function */
- lpfc_pci_function_reset(phba);
- lpfc_sli4_queue_destroy(phba);
-
- return;
-}
-
-/**
* lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
* @phba: Pointer to HBA context object.
*
@@ -9595,7 +9552,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct Scsi_Host *shost = NULL;
int error, ret;
uint32_t cfg_mode, intr_mode;
- int mcnt;
int adjusted_fcp_io_channel;
/* Allocate memory for HBA structure */
@@ -9684,58 +9640,35 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
/* Now, trying to enable interrupt and bring up the device */
cfg_mode = phba->cfg_use_msi;
- while (true) {
- /* Put device to a known state before enabling interrupt */
- lpfc_stop_port(phba);
- /* Configure and enable interrupt */
- intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
- if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0426 Failed to enable interrupt.\n");
- error = -ENODEV;
- goto out_free_sysfs_attr;
- }
- /* Default to single EQ for non-MSI-X */
- if (phba->intr_type != MSIX)
- adjusted_fcp_io_channel = 1;
- else
- adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
- phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
- /* Set up SLI-4 HBA */
- if (lpfc_sli4_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1421 Failed to set up hba\n");
- error = -ENODEV;
- goto out_disable_intr;
- }
- /* Send NOP mbx cmds for non-INTx mode active interrupt test */
- if (intr_mode != 0)
- mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
- LPFC_ACT_INTR_CNT);
-
- /* Check active interrupts received only for MSI/MSI-X */
- if (intr_mode == 0 ||
- phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
- /* Log the current active interrupt mode */
- phba->intr_mode = intr_mode;
- lpfc_log_intr_mode(phba, intr_mode);
- break;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0451 Configure interrupt mode (%d) "
- "failed active interrupt test.\n",
- intr_mode);
- /* Unset the previous SLI-4 HBA setup. */
- /*
- * TODO: Is this operation compatible with IF TYPE 2
- * devices? All port state is deleted and cleared.
- */
- lpfc_sli4_unset_hba(phba);
- /* Try next level of interrupt mode */
- cfg_mode = --intr_mode;
+ /* Put device to a known state before enabling interrupt */
+ lpfc_stop_port(phba);
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0426 Failed to enable interrupt.\n");
+ error = -ENODEV;
+ goto out_free_sysfs_attr;
+ }
+ /* Default to single EQ for non-MSI-X */
+ if (phba->intr_type != MSIX)
+ adjusted_fcp_io_channel = 1;
+ else
+ adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
+ /* Set up SLI-4 HBA */
+ if (lpfc_sli4_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1421 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_disable_intr;
}
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
+
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d8fadcb2db73..46128c679202 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1115,6 +1115,13 @@ out:
"0261 Cannot Register NameServer login\n");
}
+ /*
+ ** In case the node reference counter does not go to zero, ensure that
+ ** the stale state for the node is not processed.
+ */
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DEFER_RM;
spin_unlock_irq(shost->host_lock);
@@ -2159,13 +2166,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DEFER_RM;
+ spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 60e5a177644c..98af07c6e300 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -288,6 +288,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
}
/**
+ * lpfc_change_queue_type() - Change a device's scsi tag queuing type
+ * @sdev: Pointer the scsi device whose queue depth is to change
+ * @tag_type: Identifier for queue tag type
+ */
+static int
+lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
*
@@ -3972,7 +3992,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
break;
}
} else
- fcp_cmnd->fcpCntl1 = 0;
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
@@ -5150,6 +5170,7 @@ struct scsi_host_template lpfc_template = {
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = lpfc_change_queue_depth,
+ .change_queue_type = lpfc_change_queue_type,
};
struct scsi_host_template lpfc_vport_template = {
@@ -5172,4 +5193,5 @@ struct scsi_host_template lpfc_vport_template = {
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
.change_queue_depth = lpfc_change_queue_depth,
+ .change_queue_type = lpfc_change_queue_type,
};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 624eab370396..74b67d98e952 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -124,10 +124,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* Ring Doorbell */
doorbell.word0 = 0;
- bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
- bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
- bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
- writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+ if (q->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
+ bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+ } else if (q->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, q->db_regaddr);
return 0;
}
@@ -456,10 +463,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
/* Ring The Header Receive Queue Doorbell */
if (!(hq->host_index % hq->entry_repost)) {
doorbell.word0 = 0;
- bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
- hq->entry_repost);
- bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
- writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+ if (hq->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
+ } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_list_fm_index, &doorbell,
+ hq->host_index);
+ bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, hq->db_regaddr);
}
return put_index;
}
@@ -4939,7 +4956,7 @@ out_free_mboxq:
static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
- uint8_t fcp_eqidx;
+ int fcp_eqidx;
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
@@ -5622,6 +5639,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
}
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3279 Invalid provisioning of "
+ "rpi:%d\n", count);
+ rc = -EINVAL;
+ goto err_exit;
+ }
base = phba->sli4_hba.max_cfg_param.rpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.rpi_bmask = kzalloc(longs *
@@ -5644,6 +5668,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VPIs. */
count = phba->sli4_hba.max_cfg_param.max_vpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3280 Invalid provisioning of "
+ "vpi:%d\n", count);
+ rc = -EINVAL;
+ goto free_rpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs *
@@ -5666,6 +5697,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* XRIs. */
count = phba->sli4_hba.max_cfg_param.max_xri;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3281 Invalid provisioning of "
+ "xri:%d\n", count);
+ rc = -EINVAL;
+ goto free_vpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.xri_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.xri_bmask = kzalloc(longs *
@@ -5689,6 +5727,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VFIs. */
count = phba->sli4_hba.max_cfg_param.max_vfi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3282 Invalid provisioning of "
+ "vfi:%d\n", count);
+ rc = -EINVAL;
+ goto free_xri_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vfi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.vfi_bmask = kzalloc(longs *
@@ -6599,7 +6644,7 @@ static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
uint32_t flag)
{
- MAILBOX_t *mb;
+ MAILBOX_t *mbx;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy, hc_copy;
@@ -6653,7 +6698,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
psli = &phba->sli;
- mb = &pmbox->u.mb;
+ mbx = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
@@ -6668,7 +6713,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
!(hc_copy & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -6722,7 +6767,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
@@ -6732,15 +6777,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Bsy vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Bsy: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
return MBX_BUSY;
@@ -6751,7 +6796,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
- (mb->mbxCommand != MBX_KILL_BOARD)) {
+ (mbx->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
@@ -6773,23 +6818,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
- if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (mbx->mbxCommand != MBX_HEARTBEAT) {
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
}
@@ -6797,12 +6842,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
evtctr = psli->slistat.mbox_event;
/* next set own bit for the adapter and copy over command word */
- mb->mbxOwner = OWN_CHIP;
+ mbx->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= (uint8_t *)phba->mbox_ext
- (uint8_t *)phba->mbox;
}
@@ -6814,11 +6859,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->in_ext_byte_len);
}
/* Copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
} else {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
@@ -6828,24 +6873,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->context2, pmbox->in_ext_byte_len);
}
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
word */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+ lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((uint32_t *)mb);
+ ldata = *((uint32_t *)mbx);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
}
@@ -6920,7 +6965,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
uint32_t slimword0;
/* Check real SLIM for any errors */
@@ -6947,7 +6992,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -6956,7 +7001,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
} else {
/* First copy command data */
- lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
@@ -6971,7 +7016,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
readl(phba->HAregaddr); /* flush */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- status = mb->mbxStatus;
+ status = mbx->mbxStatus;
}
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -8370,7 +8415,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
* This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
- sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+ sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
if (!sglq)
return IOCB_ERROR;
}
@@ -8855,12 +8900,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->prt[3].type = FC_TYPE_CT;
pring->prt[3].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
- /* abort unsolicited sequence */
- pring->prt[4].profile = 0; /* Mask 4 */
- pring->prt[4].rctl = FC_RCTL_BA_ABTS;
- pring->prt[4].type = FC_TYPE_BLS;
- pring->prt[4].lpfc_sli_rcv_unsol_event =
- lpfc_sli4_ct_abort_unsol_event;
break;
}
totiocbsize += (pring->sli.sli3.numCiocb *
@@ -11873,7 +11912,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
@@ -11975,7 +12014,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
struct lpfc_hba *phba;
irqreturn_t hba_irq_rc;
bool hba_handled = false;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
phba = (struct lpfc_hba *)dev_id;
@@ -12097,6 +12136,54 @@ out_fail:
}
/**
+ * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @pci_barset: PCI BAR set flag.
+ *
+ * This function shall perform iomap of the specified PCI BAR address to host
+ * memory address if not already done so and return it. The returned host
+ * memory address can be NULL.
+ */
+static void __iomem *
+lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+{
+ struct pci_dev *pdev;
+ unsigned long bar_map, bar_map_len;
+
+ if (!phba->pcidev)
+ return NULL;
+ else
+ pdev = phba->pcidev;
+
+ switch (pci_barset) {
+ case WQ_PCI_BAR_0_AND_1:
+ if (!phba->pci_bar0_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
+ phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar0_memmap_p;
+ case WQ_PCI_BAR_2_AND_3:
+ if (!phba->pci_bar2_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar2_memmap_p;
+ case WQ_PCI_BAR_4_AND_5:
+ if (!phba->pci_bar4_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar4_memmap_p;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/**
* lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
* @phba: HBA structure that indicates port to create a queue on.
* @startq: The starting FCP EQ to modify
@@ -12673,6 +12760,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!wq || !cq)
@@ -12696,6 +12786,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
cq->queue_id);
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
+
if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
@@ -12723,6 +12814,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12740,6 +12835,47 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
status = -ENXIO;
goto out;
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
+ &wq_create->u.response);
+ if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (wq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3265 WQ[%d] doorbell format not "
+ "supported: x%x\n", wq->queue_id,
+ wq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+ pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
+ &wq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3263 WQ[%d] failed to memmap pci "
+ "barset:x%x\n", wq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+ db_offset = wq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3252 WQ[%d] doorbell offset not "
+ "supported: x%x\n", wq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ wq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3264 WQ[%d]: barset:x%x, offset:x%x\n",
+ wq->queue_id, pci_barset, db_offset);
+ } else {
+ wq->db_format = LPFC_DB_LIST_FORMAT;
+ wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
+ }
wq->type = LPFC_WQ;
wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
@@ -12816,6 +12952,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!hrq || !drq || !cq)
@@ -12894,6 +13033,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12911,6 +13053,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -ENXIO;
goto out;
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
+ &rq_create->u.response);
+ if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (hrq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3262 RQ [%d] doorbell format not "
+ "supported: x%x\n", hrq->queue_id,
+ hrq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+
+ pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
+ &rq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3269 RQ[%d] failed to memmap pci "
+ "barset:x%x\n", hrq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+
+ db_offset = rq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3270 RQ[%d] doorbell offset not "
+ "supported: x%x\n", hrq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ hrq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
+ hrq->queue_id, pci_barset, db_offset);
+ } else {
+ hrq->db_format = LPFC_DB_RING_FORMAT;
+ hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+ }
hrq->type = LPFC_HRQ;
hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
@@ -12976,6 +13162,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
@@ -14063,6 +14251,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
}
/**
+ * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the assembed sequence from upper level
+ * protocol, described by the information from basic abbort @dmabuf. It
+ * checks to see whether such pending context exists at upper level protocol.
+ * If so, it shall clean up the pending context.
+ *
+ * Return
+ * true -- if there is matching pending context of the sequence cleaned
+ * at ulp;
+ * false -- if there is no matching pending context of the sequence present
+ * at ulp.
+ **/
+static bool
+lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int handled;
+
+ /* Accepting abort at ulp with SLI4 only */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return false;
+
+ /* Register all caring upper level protocols to attend abort */
+ handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
+ if (handled)
+ return true;
+
+ return false;
+}
+
+/**
* lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
* @phba: Pointer to HBA context object.
* @cmd_iocbq: pointer to the command iocbq structure.
@@ -14077,8 +14299,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
- if (cmd_iocbq)
+ struct lpfc_nodelist *ndlp;
+
+ if (cmd_iocbq) {
+ ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+ lpfc_nlp_put(ndlp);
+ lpfc_nlp_not_used(ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
+ }
/* Failure means BLS ABORT RSP did not get delivered to remote node*/
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
@@ -14118,9 +14346,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
* event after aborting the sequence handling.
**/
static void
-lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
- struct fc_frame_header *fc_hdr)
+lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr, bool aborted)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *ctiocb = NULL;
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
@@ -14135,12 +14364,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
rxid = be16_to_cpu(fc_hdr->fh_rx_id);
- ndlp = lpfc_findnode_did(phba->pport, sid);
+ ndlp = lpfc_findnode_did(vport, sid);
if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "1268 Find ndlp returned NULL for oxid:x%x "
- "SID:x%x\n", oxid, sid);
- return;
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1268 Failed to allocate ndlp for "
+ "oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, sid);
+ /* Put ndlp onto pport node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "3275 Failed to active ndlp found "
+ "for oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
}
/* Allocate buffer for rsp iocb */
@@ -14164,7 +14408,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- ctiocb->context1 = ndlp;
+ ctiocb->context1 = lpfc_nlp_get(ndlp);
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
@@ -14183,14 +14427,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
if (lxri != NO_XRI)
lpfc_set_rrq_active(phba, ndlp, lxri,
(xri == oxid) ? rxid : oxid, 0);
- /* If the oxid maps to the FCP XRI range or if it is out of range,
- * send a BLS_RJT. The driver no longer has that exchange.
- * Override the IOCB for a BA_RJT.
+ /* For BA_ABTS from exchange responder, if the logical xri with
+ * the oxid maps to the FCP XRI range, the port no longer has
+ * that exchange context, send a BLS_RJT. Override the IOCB for
+ * a BA_RJT.
*/
- if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
- phba->sli4_hba.max_cfg_param.xri_base) ||
- xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
- phba->sli4_hba.max_cfg_param.xri_base)) {
+ if ((fctl & FC_FC_EX_CTX) &&
+ (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
+
+ /* If BA_ABTS failed to abort a partially assembled receive sequence,
+ * the driver no longer has that exchange, send a BLS_RJT. Override
+ * the IOCB for a BA_RJT.
+ */
+ if (aborted == false) {
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -14214,17 +14468,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
/* Xmit CT abts response on exchange <xid> */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2925 Failed to issue CT ABTS RSP x%x on "
- "xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
- phba->link_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2925 Failed to issue CT ABTS RSP x%x on "
+ "xri x%x, Data x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ phba->link_state);
+ lpfc_nlp_put(ndlp);
+ ctiocb->context1 = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
}
@@ -14249,32 +14505,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
struct lpfc_hba *phba = vport->phba;
struct fc_frame_header fc_hdr;
uint32_t fctl;
- bool abts_par;
+ bool aborted;
/* Make a copy of fc_hdr before the dmabuf being released */
memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
if (fctl & FC_FC_EX_CTX) {
- /*
- * ABTS sent by responder to exchange, just free the buffer
- */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by responder to exchange, no cleanup needed */
+ aborted = true;
} else {
- /*
- * ABTS sent by initiator to exchange, need to do cleanup
- */
- /* Try to abort partially assembled seq */
- abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
-
- /* Send abort to ULP if partially seq abort failed */
- if (abts_par == false)
- lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
- else
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by initiator to exchange, need to do cleanup */
+ aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+ if (aborted == false)
+ aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
}
- /* Send basic accept (BA_ACC) to the abort requester */
- lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+
+ /* Respond with BA_ACC or BA_RJT accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
}
/**
@@ -15307,10 +15556,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
{
uint16_t next_fcf_index;
+initial_priority:
/* Search start from next bit of currently registered FCF index */
+ next_fcf_index = phba->fcf.current_rec.fcf_indx;
+
next_priority:
- next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
- LPFC_SLI4_FCF_TBL_INDX_MAX;
+ /* Determine the next fcf index to check */
+ next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX,
next_fcf_index);
@@ -15337,7 +15589,7 @@ next_priority:
* at that level and continue the selection process.
*/
if (lpfc_check_next_fcf_pri_level(phba))
- goto next_priority;
+ goto initial_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 44c427a45d66..be02b59ea279 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -139,6 +139,10 @@ struct lpfc_queue {
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+ uint16_t db_format;
+#define LPFC_DB_RING_FORMAT 0x01
+#define LPFC_DB_LIST_FORMAT 0x02
+ void __iomem *db_regaddr;
/* For q stats */
uint32_t q_cnt_1;
uint32_t q_cnt_2;
@@ -508,6 +512,10 @@ struct lpfc_sli4_hba {
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+ uint8_t fw_func_mode; /* FW function protocol mode */
+ uint32_t ulp0_mode; /* ULP0 protocol mode */
+ uint32_t ulp1_mode; /* ULP1 protocol mode */
+
/* Setup information for various queue parameters */
int eq_esize;
int eq_ecount;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ba596e854bbc..f3b7795a296b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.36"
+#define LPFC_DRIVER_VERSION "8.3.37"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3b2365c8eab2..408d2548a748 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.504.01.00-rc1"
-#define MEGASAS_RELDATE "Oct. 1, 2012"
-#define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012"
+#define MEGASAS_VERSION "06.506.00.00-rc1"
+#define MEGASAS_RELDATE "Feb. 9, 2013"
+#define MEGASAS_EXT_VERSION "Sat. Feb. 9 17:00:00 PDT 2013"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 66a0fec0437b..9d53540207ec 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v06.504.01.00-rc1
+ * Version : v06.506.00.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 74030aff69ad..a7d56687bfca 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1206,7 +1206,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+ io_request->EEDPBlockSize = scp->device->sector_size;
} else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1511,7 +1511,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
- io_request->DevHandle =
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index a7c64f051996..f68a3cd11d5d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -61,7 +61,6 @@
#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
-#define MEGASAS_EEDPBLOCKSIZE 512
/*
* Raid context flags
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index ffd85c511c8e..bcb23d28b3e8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -155,7 +155,7 @@ _base_fault_reset_work(struct work_struct *work)
struct task_struct *p;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
goto rearm_timer;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
@@ -164,6 +164,20 @@ _base_fault_reset_work(struct work_struct *work)
printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
ioc->name, __func__);
+ /* It may be possible that EEH recovery can resolve some of
+ * pci bus failure issues rather removing the dead ioc function
+ * by considering controller is in a non-operational state. So
+ * here priority is given to the EEH recovery. If it doesn't
+ * not resolve this issue, mpt2sas driver will consider this
+ * controller to non-operational state and remove the dead ioc
+ * function.
+ */
+ if (ioc->non_operational_loop++ < 5) {
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ goto rearm_timer;
+ }
+
/*
* Call _scsih_flush_pending_cmds callback so that we flush all
* pending commands back to OS. This call is required to aovid
@@ -193,6 +207,8 @@ _base_fault_reset_work(struct work_struct *work)
return; /* don't rearm timer */
}
+ ioc->non_operational_loop = 0;
+
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -2007,6 +2023,14 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25LB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25LB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ break;
default:
break;
}
@@ -4386,6 +4410,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
if (missing_delay[0] != -1 && missing_delay[1] != -1)
_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
+ ioc->non_operational_loop = 0;
return 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 543d8d637479..4caaac13682f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -165,6 +165,10 @@
"Intel(R) Integrated RAID Module RMS25KB080"
#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
"Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB080"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
@@ -180,6 +184,8 @@
#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
@@ -835,6 +841,7 @@ struct MPT2SAS_ADAPTER {
u16 cpu_msix_table_sz;
u32 ioc_reset_count;
MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+ u32 non_operational_loop;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 04f8010f0770..18360032a520 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -1310,7 +1309,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
void *sg_local, *chain;
u32 chain_offset;
u32 chain_length;
- u32 chain_flags;
int sges_left;
u32 sges_in_segment;
u8 simple_sgl_flags;
@@ -1356,8 +1354,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- /* initializing the chain flags and pointers */
- chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
+ /* initializing the pointers */
chain_req = _base_get_chain_buffer_tracker(ioc, smid);
if (!chain_req)
return -1;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index ce7e59b2fc08..1df9ed4f371d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -41,7 +41,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 8af944d7d13d..054d5231c974 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -3136,7 +3135,7 @@ _ctl_diag_trigger_mpi_store(struct device *cdev,
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
memset(&ioc->diag_trigger_mpi, 0,
- sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ sizeof(ioc->diag_trigger_mpi));
memcpy(&ioc->diag_trigger_mpi, buf, sz);
if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6421a06c4ce2..dcbf7c880cb2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -41,7 +41,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -2755,13 +2754,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
int i;
u16 handle;
u16 reason_code;
- u8 phy_number;
for (i = 0; i < event_data->NumEntries; i++) {
handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
if (!handle)
continue;
- phy_number = event_data->StartPhyNum + i;
reason_code = event_data->PHY[i].PhyStatus &
MPI2_EVENT_SAS_TOPO_RC_MASK;
if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index da6c5f25749c..6f8d6213040b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -42,7 +42,6 @@
* USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 078c63913b55..532110f4562a 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -316,10 +316,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
int elem, rc, i;
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct domain_device *dev = task->dev;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct scatterlist *sg_req, *sg_resp;
u32 req_len, resp_len, tag = tei->tag;
void *buf_tmp;
@@ -392,7 +395,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
TXQ_MODE_I | tag |
- (sas_port->phy_mask << TXQ_PHY_SHIFT));
+ (MVS_PHY_ID << TXQ_PHY_SHIFT));
hdr->flags |= flags;
hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
@@ -438,11 +441,14 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
@@ -462,7 +468,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+ (MVS_PHY_ID << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 2ae77a0394b2..9f3cc13a5ce7 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -76,6 +76,7 @@ extern struct kmem_cache *mvs_task_list_cache;
(__mc) != 0 ; \
(++__lseq), (__mc) >>= 1)
+#define MVS_PHY_ID (1U << sas_phy->id)
#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
#define UNASSOC_D2H_FIS(id) \
((void *) mvi->rx_fis + 0x100 * id)
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index c06b8e5aa2cf..d8293f25ca33 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -144,6 +144,10 @@ static int _osd_get_print_system_info(struct osd_dev *od,
odi->osdname_len = get_attrs[a].len;
/* Avoid NULL for memcmp optimization 0-length is good enough */
odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+ if (!odi->osdname) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (odi->osdname_len)
memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 4c9fe733fe88..3d5e522e00fc 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -140,7 +140,8 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
pci_free_consistent(pm8001_ha->pdev,
- pm8001_ha->memoryMap.region[i].element_size,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
pm8001_ha->memoryMap.region[i].phys_addr);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 83d798428c10..1d82eef4e1eb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1272,22 +1272,29 @@ qla2x00_thermal_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval = QLA_FUNCTION_FAILED;
- uint16_t temp, frac;
+ uint16_t temp = 0;
- if (!vha->hw->flags.thermal_supported)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (!vha->hw->thermal_support) {
+ ql_log(ql_log_warn, vha, 0x70db,
+ "Thermal not supported by this card.\n");
+ goto done;
+ }
- temp = frac = 0;
- if (qla2x00_reset_active(vha))
- ql_log(ql_log_warn, vha, 0x707b,
- "ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
- if (rval != QLA_SUCCESS)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (qla2x00_reset_active(vha)) {
+ ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+ goto done;
+ }
+
+ if (vha->hw->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+ goto done;
+ }
+
+ if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+done:
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 9f34dedcdad7..ad54099cb805 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -27,7 +27,7 @@ void
qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
@@ -40,7 +40,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
- mempool_free(sp, vha->hw->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
}
int
@@ -368,7 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_unmap_sg;
}
@@ -515,7 +515,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_free_fcport;
}
@@ -531,6 +531,75 @@ done_unmap_sg:
done:
return rval;
}
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ int wait, int wait2)
+{
+ int ret = 0;
+ int rval = 0;
+ uint16_t new_config[4];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ goto done_reset_internal;
+
+ memset(new_config, 0 , sizeof(new_config));
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_INTERNAL_LOOPBACK ||
+ (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_EXTERNAL_LOOPBACK) {
+ new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+ ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+ (new_config[0] & INTERNAL_LOOPBACK_MASK));
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+ ha->notify_dcbx_comp = wait;
+ ha->notify_lb_portup_comp = wait2;
+
+ ret = qla81xx_set_port_config(vha, new_config);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ }
+
+ /* Wait for DCBX complete event */
+ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "DCBX completion not received.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "DCBX completion received.\n");
+
+ if (wait2 &&
+ !wait_for_completion_timeout(&ha->lb_portup_comp,
+ (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x70c5,
+ "Port up completion not received.\n");
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x70c6,
+ "Port up completion received.\n");
+
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ }
+done_reset_internal:
+ return rval;
+}
+
/*
* Set the port configuration to enable the internal or external loopback
* depending on the loopback mode.
@@ -566,9 +635,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
/* Wait for DCBX complete event */
- if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
+ if (!wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
ql_dbg(ql_dbg_user, vha, 0x7022,
- "State change notification not received.\n");
+ "DCBX completion not received.\n");
+ ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+ /*
+ * If the reset of the loopback mode doesn't work take a FCoE
+ * dump and reset the chip.
+ */
+ if (ret) {
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
rval = -EINVAL;
} else {
if (ha->flags.idc_compl_status) {
@@ -578,7 +657,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
ha->flags.idc_compl_status = 0;
} else
ql_dbg(ql_dbg_user, vha, 0x7023,
- "State change received.\n");
+ "DCBX completion received.\n");
}
ha->notify_dcbx_comp = 0;
@@ -587,57 +666,6 @@ done_set_internal:
return rval;
}
-/* Disable loopback mode */
-static inline int
-qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
- int wait)
-{
- int ret = 0;
- int rval = 0;
- uint16_t new_config[4];
- struct qla_hw_data *ha = vha->hw;
-
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
- goto done_reset_internal;
-
- memset(new_config, 0 , sizeof(new_config));
- if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_INTERNAL_LOOPBACK ||
- (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_EXTERNAL_LOOPBACK) {
- new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
- ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
- (new_config[0] & INTERNAL_LOOPBACK_MASK));
- memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
-
- ha->notify_dcbx_comp = wait;
- ret = qla81xx_set_port_config(vha, new_config);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7025,
- "Set port config failed.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- }
-
- /* Wait for DCBX complete event */
- if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
- (20 * HZ))) {
- ql_dbg(ql_dbg_user, vha, 0x7026,
- "State change notification not received.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- } else
- ql_dbg(ql_dbg_user, vha, 0x7027,
- "State change received.\n");
-
- ha->notify_dcbx_comp = 0;
- }
-done_reset_internal:
- return rval;
-}
-
static int
qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
{
@@ -739,6 +767,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
+
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
@@ -746,6 +775,14 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
goto done_free_dma_rsp;
}
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+ ql_dbg(ql_dbg_user, vha, 0x70c4,
+ "Loopback operation already in "
+ "progress.\n");
+ rval = -EAGAIN;
+ goto done_free_dma_rsp;
+ }
+
ql_dbg(ql_dbg_user, vha, 0x70c0,
"elreq.options=%04x\n", elreq.options);
@@ -755,7 +792,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
config, new_config, elreq.options);
else
rval = qla81xx_reset_loopback_mode(vha,
- config, 1);
+ config, 1, 0);
else
rval = qla81xx_set_loopback_mode(vha, config,
new_config, elreq.options);
@@ -772,14 +809,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
- if (new_config[0]) {
- /* Revert back to original port config
- * Also clear internal loopback
- */
- qla81xx_reset_loopback_mode(vha,
- new_config, 0);
- }
-
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
ql_log(ql_log_warn, vha, 0x7029,
@@ -788,15 +817,39 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
- if (qla81xx_restart_mpi_firmware(vha) !=
- QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x702a,
- "MPI reset failed.\n");
+ if (IS_QLA81XX(ha)) {
+ if (qla81xx_restart_mpi_firmware(vha) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
+ }
}
rval = -EIO;
goto done_free_dma_rsp;
}
+
+ if (new_config[0]) {
+ int ret;
+
+ /* Revert back to original port config
+ * Also clear internal loopback
+ */
+ ret = qla81xx_reset_loopback_mode(vha,
+ new_config, 0, 1);
+ if (ret) {
+ /*
+ * If the reset of the loopback mode
+ * doesn't work take FCoE dump and then
+ * reset the chip.
+ */
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+
+ }
+
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
ql_dbg(ql_dbg_user, vha, 0x702b,
@@ -1950,7 +2003,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
@@ -1985,6 +2038,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 37b8b7ba7421..e9f6b9bbf29a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 53f9e492f9dc..1626de52e32a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -11,20 +11,21 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x114f | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
+ * | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
+ * | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c, |
@@ -35,11 +36,11 @@
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
- * | Target Mode | 0xe06f | |
- * | Target Mode Management | 0xf071 | |
+ * | Target Mode | 0xe070 | |
+ * | Target Mode Management | 0xf072 | |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 8f911c0b1e74..35e20b4f8b6c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6e7727f46d43..c6509911772b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -37,6 +37,7 @@
#include "qla_nx.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
+#define QLA2XXX_MANUFACTURER "QLogic Corporation"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -253,8 +254,8 @@
#define LOOP_DOWN_TIME 255 /* 240 */
#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
-/* Maximum outstanding commands in ISP queues (1-65535) */
-#define MAX_OUTSTANDING_COMMANDS 1024
+#define DEFAULT_OUTSTANDING_COMMANDS 1024
+#define MIN_OUTSTANDING_COMMANDS 128
/* ISP request and response entry counts (37-65535) */
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
@@ -537,6 +538,8 @@ struct device_reg_25xxmq {
uint32_t req_q_out;
uint32_t rsp_q_in;
uint32_t rsp_q_out;
+ uint32_t atio_q_in;
+ uint32_t atio_q_out;
};
typedef union {
@@ -563,6 +566,9 @@ typedef union {
&(reg)->u.isp2100.mailbox5 : \
&(reg)->u.isp2300.rsp_q_out)
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
#define MAILBOX_REG(ha, reg, num) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
(num < 8 ? \
@@ -762,8 +768,8 @@ typedef struct {
#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
-#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
-#define MBC_DATA_RATE 0x5d /* Get RNID parameters */
+#define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */
+#define MBC_DATA_RATE 0x5d /* Data Rate */
#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */
#define MBC_INITIATE_LIP 0x62 /* Initiate Loop */
/* Initialization Procedure */
@@ -809,6 +815,7 @@ typedef struct {
#define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */
#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */
#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
#define MBC_PORT_RESET 0x120 /* Port Reset */
#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
@@ -856,6 +863,9 @@ typedef struct {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
+#define RNID_TYPE_ASIC_TEMP 0xC
+
/*
* Firmware state codes from get firmware state mailbox command
*/
@@ -1841,9 +1851,6 @@ typedef struct fc_port {
uint8_t scan_state;
} fc_port_t;
-#define QLA_FCPORT_SCAN_NONE 0
-#define QLA_FCPORT_SCAN_FOUND 1
-
/*
* Fibre channel port/lun states.
*/
@@ -2533,8 +2540,10 @@ struct req_que {
uint16_t qos;
uint16_t vp_idx;
struct rsp_que *rsp;
- srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+ srb_t **outstanding_cmds;
uint32_t current_outstanding_cmd;
+ uint16_t num_outstanding_cmds;
+#define MAX_Q_DEPTH 32
int max_q_depth;
};
@@ -2557,11 +2566,13 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
+ uint32_t __iomem *atio_q_in;
+ uint32_t __iomem *atio_q_out;
void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt *qla_tgt;
- struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
@@ -2618,7 +2629,6 @@ struct qla_hw_data {
uint32_t nic_core_hung:1;
uint32_t quiesce_owner:1;
- uint32_t thermal_supported:1;
uint32_t nic_core_reset_hdlr_active:1;
uint32_t nic_core_reset_owner:1;
uint32_t isp82xx_no_md_cap:1;
@@ -2788,6 +2798,8 @@ struct qla_hw_data {
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
/* HBA serial number */
uint8_t serial0;
@@ -2870,7 +2882,13 @@ struct qla_hw_data {
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
+ struct completion lb_portup_comp; /* Used to wait for link up during
+ * loopback */
+#define DCBX_COMP_TIMEOUT 20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
int notify_dcbx_comp;
+ int notify_lb_portup_comp;
struct mutex selflogin_lock;
/* Basic firmware related information. */
@@ -2887,6 +2905,7 @@ struct qla_hw_data {
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
uint16_t fw_xcb_count;
+ uint16_t fw_iocb_count;
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
@@ -3056,7 +3075,16 @@ struct qla_hw_data {
struct work_struct idc_state_handler;
struct work_struct nic_core_unrecoverable;
+#define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ)
+#define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ)
+ unsigned long host_last_rampdown_time;
+ unsigned long host_last_rampup_time;
+ int cfg_lun_q_depth;
+
struct qlt_hw_data tgt;
+ uint16_t thermal_support;
+#define THERMAL_SUPPORT_I2C BIT_0
+#define THERMAL_SUPPORT_ISP BIT_1
};
/*
@@ -3115,6 +3143,8 @@ typedef struct scsi_qla_host {
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
#define SCR_PENDING 21 /* SCR in target mode */
+#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
+#define HOST_RAMP_UP_QUEUE_DEPTH 23
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3248,8 +3278,6 @@ struct qla_tgt_vp_map {
#define NVRAM_DELAY() udelay(10)
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1)
-
/*
* Flash support definitions
*/
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 706c4f7bc7c9..792a29294b62 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index be6d61a89edc..1ac2b0e3a0e1 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -300,7 +300,8 @@ struct init_cb_24xx {
uint32_t prio_request_q_address[2];
uint16_t msix;
- uint8_t reserved_2[6];
+ uint16_t msix_atio;
+ uint8_t reserved_2[4];
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
@@ -1387,9 +1388,7 @@ struct qla_flt_header {
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
#define FLT_REG_FCOE_FW 0xA4
-#define FLT_REG_FCOE_VPD_0 0xA9
#define FLT_REG_FCOE_NVRAM_0 0xAA
-#define FLT_REG_FCOE_VPD_1 0xAB
#define FLT_REG_FCOE_NVRAM_1 0xAC
struct qla_flt_region {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2411d1a12b26..eb3ca21a7f17 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -55,7 +55,7 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
-extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
extern void qla84xx_put_chip(struct scsi_qla_host *);
@@ -84,6 +84,9 @@ extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
extern void qla83xx_reset_ownership(scsi_qla_host_t *);
extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -94,6 +97,7 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xmaxqdepth;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
@@ -278,6 +282,9 @@ extern int
qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
+extern int
qla2x00_lip_reset(scsi_qla_host_t *);
extern int
@@ -351,6 +358,9 @@ extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -436,6 +446,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 01efc0e9cc36..9b455250c101 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1328,8 +1328,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
- strcpy(eiter->a.manufacturer, "QLogic Corporation");
- alen = strlen(eiter->a.manufacturer);
+ alen = strlen(QLA2XXX_MANUFACTURER);
+ strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1649,8 +1649,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
- alen = strlen(eiter->a.os_dev_name);
+ alen = strlen(QLA2XXX_DRIVER_NAME);
+ strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 563eee3fa924..edf4d14a1335 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -70,9 +70,7 @@ qla2x00_sp_free(void *data, void *ptr)
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- mempool_free(sp, vha->hw->srb_mempool);
-
- QLA_VHA_MARK_NOT_BUSY(vha);
+ qla2x00_rel_sp(vha, sp);
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -525,7 +523,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->flags.thermal_supported = 1;
+ ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -621,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1559,6 +1559,47 @@ done:
return rval;
}
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+ /* Don't try to reallocate the array */
+ if (req->outstanding_cmds)
+ return QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+ (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+ else {
+ if (ha->fw_xcb_count <= ha->fw_iocb_count)
+ req->num_outstanding_cmds = ha->fw_xcb_count;
+ else
+ req->num_outstanding_cmds = ha->fw_iocb_count;
+ }
+
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ /*
+ * Try to allocate a minimal size just so we can get through
+ * initialization.
+ */
+ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ ql_log(ql_log_fatal, NULL, 0x0126,
+ "Failed to allocate memory for "
+ "outstanding_cmds for req_que %p.\n", req);
+ req->num_outstanding_cmds = 0;
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ return QLA_SUCCESS;
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @ha: HA context
@@ -1628,9 +1669,18 @@ enable_82xx_npiv:
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha, NULL,
- &ha->fw_xcb_count, NULL, NULL,
+ &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
&ha->max_npiv_vports, NULL);
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha,
+ vha->req);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
if (!fw_major_version && ql2xallocfwdump
&& !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
@@ -1914,7 +1964,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
- qlt_24xx_config_rings(vha, reg);
+ qlt_24xx_config_rings(vha);
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
@@ -1948,7 +1998,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
@@ -2157,6 +2207,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -2170,6 +2221,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
} else {
ql_log(ql_log_warn, vha, 0x2009,
"Unable to get host loop ID.\n");
+ if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+ (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+ ql_log(ql_log_warn, vha, 0x1151,
+ "Doing link init.\n");
+ if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+ return rval;
+ }
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
@@ -2690,7 +2748,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@@ -3103,7 +3160,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport;
+ fc_port_t *fcport, *fcptemp;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@@ -3141,7 +3198,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- break;
+ return rval;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -3173,16 +3230,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /* Add new ports to existing port list */
- list_splice_tail_init(&new_fcports, &vha->vp_fcports);
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
+ /*
+ * Logout all previous fabric devices marked lost, except
+ * FCP2 devices.
+ */
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -3190,8 +3252,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- /* Logout lost/gone fabric devices (non-FCP2) */
- if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@@ -3204,30 +3265,74 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
+ fcport->loop_id = FC_NO_LOOP_ID;
}
- continue;
}
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
-
- /* Login fabric devices that need a login */
- if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
- atomic_read(&vha->loop_down_timer) == 0) {
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- continue;
- }
+ }
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
+ /*
+ * Scan through our port list and login entries that need to be
+ * logged in.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
}
}
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+ }
+
+ /* Exit if out of loop IDs. */
+ if (rval != QLA_SUCCESS) {
+ break;
+ }
+
+ /*
+ * Login and add the new devices to our port list.
+ */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ /* Find a new loop ID to use. */
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+ list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
+ /* Free all new device structures not processed. */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -3263,8 +3368,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
int first_dev, last_dev;
port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
- struct scsi_qla_host *tvp;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
rval = QLA_SUCCESS;
@@ -3377,22 +3481,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
continue;
/* Bypass virtual ports of the same host. */
- found = 0;
- if (ha->num_vhosts) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (new_fcport->d_id.b24 == vp->d_id.b24) {
- found = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- if (found)
- continue;
- }
+ if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+ continue;
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
@@ -3417,7 +3507,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
- fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
@@ -5004,7 +5094,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return rval;
}
-#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
int
qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
@@ -5529,6 +5619,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (IS_T10_PI_CAPABLE(ha))
nv->frame_payload_size &= ~7;
+ qlt_81xx_config_nvram_stage1(vha, nv);
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -5569,6 +5661,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLE8XXX");
+ qlt_81xx_config_nvram_stage2(vha, icb);
+
/* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index c0462c04c885..68e2c4afc134 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -198,6 +198,13 @@ done:
}
static inline void
+qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+{
+ mempool_free(sp, vha->hw->srb_mempool);
+ QLA_VHA_MARK_NOT_BUSY(vha);
+}
+
+static inline void
qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
init_timer(&sp->u.iocb_cmd.timer);
@@ -213,3 +220,22 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)
{
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
+
+static inline void
+qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
+{
+ if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
+ if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
+ HOST_QUEUE_RAMPDOWN_INTERVAL)))
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
+ if (time_before(jiffies, (vha->hw->host_last_rampup_time +
+ HOST_QUEUE_RAMPUP_INTERVAL)))
+ return;
+
+ set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a481684479c1..d2630317cce8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -349,14 +349,14 @@ qla2x00_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1467,16 +1467,15 @@ qla24xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
- }
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
@@ -1641,15 +1640,15 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Compute number of required data segments */
@@ -1822,14 +1821,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x700b,
"No room on outstanding cmd array.\n");
goto queuing_error;
@@ -2263,14 +2262,14 @@ qla82xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -2767,15 +2766,15 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
rval = EXT_STATUS_BUSY;
goto queuing_error;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 873c82014b16..e9dbd74c20d3 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -13,6 +13,8 @@
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
+#include "qla_target.h"
+
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
struct req_que *, uint32_t);
@@ -489,10 +491,37 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
ql_log(ql_log_info, vha, 0x506a,
"IDC Device-State changed = 0x%x.\n", mb[4]);
+ if (ha->flags.nic_core_reset_owner)
+ return;
qla83xx_schedule_work(vha, MBA_IDC_AEN);
}
}
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ uint32_t vp_did;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ha->num_vhosts)
+ return ret;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ vp_did = vp->d_id.b24;
+ if (vp_did == rscn_entry) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return ret;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -899,6 +928,10 @@ skip_rio:
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
+ /* Skip RSCNs for virtual ports on the same physical port */
+ if (qla2x00_is_a_vp_did(vha, rscn_entry))
+ break;
+
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -983,14 +1016,25 @@ skip_rio:
mb[1], mb[2], mb[3]);
break;
case MBA_IDC_NOTIFY:
- /* See if we need to quiesce any I/O */
- if (IS_QLA8031(vha->hw))
- if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
- (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+ if (IS_QLA8031(vha->hw)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+ (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+ (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ /*
+ * Extend loop down timer since port is active.
+ */
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
qla2xxx_wake_dpc(vha);
}
+ }
case MBA_IDC_COMPLETE:
+ if (ha->notify_lb_portup_comp)
+ complete(&ha->lb_portup_comp);
+ /* Fallthru */
case MBA_IDC_TIME_EXT:
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
qla81xx_idc_event(vha, mb[0], mb[1]);
@@ -1029,7 +1073,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x3014,
"Invalid SCSI command index (%x).\n", index);
@@ -1067,7 +1111,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
uint16_t index;
index = LSW(pkt->handle);
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
"Invalid command index (%x).\n", index);
if (IS_QLA82XX(ha))
@@ -1740,7 +1784,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
sts24 = (struct sts_entry_24xx *) pkt;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x70af,
"Invalid SCSI completion handle 0x%x.\n", index);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1910,9 +1954,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
req = ha->req_q_map[que];
/* Validate handle. */
- if (handle < MAX_OUTSTANDING_COMMANDS) {
+ if (handle < req->num_outstanding_cmds)
sp = req->outstanding_cmds[handle];
- } else
+ else
sp = NULL;
if (sp == NULL) {
@@ -1934,6 +1978,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_do_host_ramp_up(vha);
qla2x00_process_completed_request(vha, req, handle);
return;
@@ -2193,6 +2238,9 @@ out:
cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
+ if (!res)
+ qla2x00_do_host_ramp_up(vha);
+
if (rsp->status_srb == NULL)
sp->done(ha, sp, res);
}
@@ -2747,6 +2795,12 @@ static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
+static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
+ { "qla2xxx (default)", qla24xx_msix_default },
+ { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+};
+
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
@@ -2827,9 +2881,13 @@ msix_failed:
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- if (IS_QLA82XX(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[i].handler,
+ 0, qla83xx_msix_entries[i].name, rsp);
+ } else if (IS_QLA82XX(ha)) {
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 68c55eaa318c..186dd59ce4fa 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -900,13 +900,13 @@ qla2x00_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* command not found */
return QLA_FUNCTION_FAILED;
}
@@ -1633,6 +1633,54 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
}
/*
+ * qla24xx_link_initialization
+ * Issue link initialization mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_LINK_INITIALIZATION;
+ mcp->mb[1] = BIT_6|BIT_4;
+ mcp->mb[2] = 0;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
* qla2x00_lip_reset
* Issue LIP reset mailbox command.
*
@@ -2535,12 +2583,12 @@ qla24xx_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
}
@@ -3093,6 +3141,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
unsigned long flags;
+ int found;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3128,13 +3177,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ found = 0;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list)
- if (vp_idx == vp->vp_idx)
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp_idx == vp->vp_idx) {
+ found = 1;
break;
+ }
+ }
spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!vp)
+ if (!found)
return;
vp->d_id.b.domain = rptid_entry->port_id[2];
@@ -3814,6 +3867,97 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
}
int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x1156,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * sizeof(uint32_t) - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1157,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ *temp = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x115a,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
{
@@ -4415,38 +4559,45 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
}
int
-qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
{
- int rval;
- uint8_t byte;
+ int rval = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = vha->hw;
+ uint8_t byte;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
"Entered %s.\n", __func__);
- /* Integer part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
+ *temp = byte;
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x10c9,
+ "Thermal not supported by I2C.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
}
- *temp = byte;
- /* Fraction part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
+ rval = qla2x00_read_asic_temperature(vha, temp);
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x1019,
+ "Thermal not supported by ISP.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
}
- *frac = (byte >> 6) * 25;
+ ql_log(ql_log_warn, vha, 0x1150,
+ "Thermal not supported by this card "
+ "(ignoring further requests).\n");
+ return rval;
+
+done:
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
"Done %s.\n", __func__);
-fail:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 20fd974f903a..f868a9f98afe 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -523,6 +523,7 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
}
+ kfree(req->outstanding_cmds);
kfree(req);
req = NULL;
}
@@ -649,6 +650,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
+ ret = qla2x00_alloc_outstanding_cmds(ha, req);
+ if (ret != QLA_SUCCESS)
+ goto que_failed;
+
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
@@ -685,7 +690,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
"options=0x%x.\n", req->options);
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
"options=0x%x.\n", req->options);
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 3e3f593bada3..10754f518303 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -847,14 +847,21 @@ static int
qla82xx_rom_lock(struct qla_hw_data *ha)
{
int done = 0, timeout = 0;
+ uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (!done) {
/* acquire semaphore2 from PCI HW block */
done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
- if (timeout >= qla82xx_rom_lock_timeout)
+ if (timeout >= qla82xx_rom_lock_timeout) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_dbg(ql_dbg_p3p, vha, 0xb085,
+ "Failed to acquire rom lock, acquired by %d.\n",
+ lock_owner);
return -1;
+ }
timeout++;
}
qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
@@ -3629,7 +3636,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (!sp->u.scmd.ctx ||
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6c953e8c08f0..d268e8406fdb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -897,7 +897,7 @@ struct ct6_dsd {
#define FLT_REG_BOOT_CODE_82XX 0x78
#define FLT_REG_FW_82XX 0x74
#define FLT_REG_GOLD_FW_82XX 0x75
-#define FLT_REG_VPD_82XX 0x81
+#define FLT_REG_VPD_8XXX 0x81
#define FA_VPD_SIZE_82XX 0x400
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 10d23f8b7036..2c6dd3dfe0f4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -111,8 +111,7 @@ MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
-#define MAX_Q_DEPTH 32
-static int ql2xmaxqdepth = MAX_Q_DEPTH;
+int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
@@ -360,6 +359,9 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
+ if (req)
+ kfree(req->outstanding_cmds);
+
kfree(req);
req = NULL;
}
@@ -628,7 +630,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
}
CMD_SP(cmd) = NULL;
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(sp->fcport->vha, sp);
}
static void
@@ -716,9 +718,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
- sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
- if (!sp)
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp) {
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy;
+ }
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
@@ -731,6 +735,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy_free_sp;
}
@@ -1010,7 +1015,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
spin_lock_irqsave(&ha->hardware_lock, flags);
req = vha->req;
for (cnt = 1; status == QLA_SUCCESS &&
- cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
@@ -1300,14 +1305,14 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 0);
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
"full_login_lip=%d.\n", ret);
}
- atomic_set(&vha->loop_state, LOOP_DOWN);
- atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 0);
}
if (ha->flags.enable_lip_reset) {
@@ -1337,7 +1342,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ if (!req->outstanding_cmds)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
@@ -1453,6 +1460,81 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
return tag_type;
}
+static void
+qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampdown_time = jiffies;
+
+ if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
+ return;
+
+ if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
+ ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
+ else
+ ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
+
+ /*
+ * Geometrically ramp down the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > shost->cmd_per_lun) {
+ if (sdev->queue_depth < ha->cfg_lun_q_depth)
+ continue;
+ ql_log(ql_log_warn, vp, 0x3031,
+ "%ld:%d:%d: Ramping down queue depth to %d",
+ vp->host_no, sdev->id, sdev->lun,
+ ha->cfg_lun_q_depth);
+ qla2x00_change_queue_depth(sdev,
+ ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
+static void
+qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampup_time = jiffies;
+ ha->cfg_lun_q_depth++;
+
+ /*
+ * Linearly ramp up the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > ha->cfg_lun_q_depth)
+ continue;
+ qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
+ SCSI_QDEPTH_RAMP_UP);
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
/**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
@@ -1730,6 +1812,9 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+
+ qlt_83xx_iospace_config(ha);
+
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
"MSIX Count:%d.\n", ha->msix_count);
return 0;
@@ -2230,6 +2315,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->cfg_lun_q_depth = ql2xmaxqdepth;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -2307,6 +2393,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2338,6 +2425,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2377,6 +2465,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
init_completion(&ha->dcbx_comp);
+ init_completion(&ha->lb_portup_comp);
set_bit(0, (unsigned long *) ha->vp_idx_map);
@@ -2720,6 +2809,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
scsi_qla_host_t *vha;
struct qla_hw_data *ha;
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
vha = pci_get_drvdata(pdev);
ha = vha->hw;
@@ -3974,6 +4066,8 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
uint32_t idc_lck_rcvry_stage_mask = 0x3;
uint32_t idc_lck_rcvry_owner_mask = 0x3c;
struct qla_hw_data *ha = base_vha->hw;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+ "Trying force recovery of the IDC lock.\n");
rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
if (rval)
@@ -4065,6 +4159,7 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
uint16_t options = (requester_id << 15) | BIT_6;
uint32_t data;
+ uint32_t lock_owner;
struct qla_hw_data *ha = base_vha->hw;
/* IDC-lock implementation using driver-lock/lock-id remote registers */
@@ -4076,8 +4171,11 @@ retry_lock:
qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
ha->portnum);
} else {
+ qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ &lock_owner);
ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
- "Failed to acquire IDC lock. retrying...\n");
+ "Failed to acquire IDC lock, acquired by %d, "
+ "retrying...\n", lock_owner);
/* Retry/Perform IDC-Lock recovery */
if (qla83xx_idc_lock_recovery(base_vha)
@@ -4605,6 +4703,18 @@ qla2x00_do_dpc(void *data)
qla2xxx_flash_npiv_conf(base_vha);
}
+ if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
+ &base_vha->dpc_flags)) {
+ /* Prevents simultaneous ramp up and down */
+ clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags);
+ qla2x00_host_ramp_down_queuedepth(base_vha);
+ }
+
+ if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags))
+ qla2x00_host_ramp_up_queuedepth(base_vha);
+
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
@@ -4733,7 +4843,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
cpu_flags);
req = ha->req_q_map[0];
for (index = 1;
- index < MAX_OUTSTANDING_COMMANDS;
+ index < req->num_outstanding_cmds;
index++) {
fc_port_t *sfcp;
@@ -4802,7 +4912,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -4815,12 +4927,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d.\n",
+ "relogin_needed=%d, host_ramp_down_needed=%d "
+ "host_ramp_up_needed=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 892a81e457bc..46ef0ac48f44 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 32fdc2a66dd1..3bef6736d885 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -798,20 +798,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_BOOTLOAD_82XX:
ha->flt_region_bootload = start;
break;
- case FLT_REG_VPD_82XX:
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_0:
- if (!IS_QLA8031(ha))
- break;
- ha->flt_region_vpd_nvram = start;
- if (ha->flags.port0)
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_1:
- if (!IS_QLA8031(ha))
- break;
- if (!ha->flags.port0)
+ case FLT_REG_VPD_8XXX:
+ if (IS_CNA_CAPABLE(ha))
ha->flt_region_vpd = start;
break;
case FLT_REG_FCOE_NVRAM_0:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 80f4b849e2b0..61b5d8c2b5da 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(qlini_mode,
"\"disabled\" - initiator mode will never be enabled; "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
/*
* From scsi/fc/fc_fcp.h
@@ -1119,6 +1119,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
@@ -1570,7 +1571,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
/* always increment cmd handle */
do {
++h;
- if (h > MAX_OUTSTANDING_COMMANDS)
+ if (h > DEFAULT_OUTSTANDING_COMMANDS)
h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
if (h == ha->tgt.current_handle) {
ql_dbg(ql_dbg_tgt, vha, 0xe04e,
@@ -2441,7 +2442,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
/* handle-1 is actually used */
- if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+ if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
@@ -4305,6 +4306,12 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
if (!QLA_TGT_MODE_ENABLED())
return 0;
+ if (!IS_TGT_MODE_CAPABLE(ha)) {
+ ql_log(ql_log_warn, base_vha, 0xe070,
+ "This adapter does not support target mode.\n");
+ return 0;
+ }
+
ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
"Registering target for host %ld(%p)", base_vha->host_no, ha);
@@ -4666,7 +4673,6 @@ void
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
struct atio_from_isp *pkt;
int cnt, i;
@@ -4694,26 +4700,28 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
}
/* Adjust ring index */
- WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
-qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
-/* FIXME: atio_q in/out for ha->mqenable=1..? */
- if (ha->mqenable) {
-#if 0
- WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp25mq.atio_q_out);
-#endif
- } else {
- /* Setup APTIO registers for target mode */
- WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp24.atio_q_out);
+ WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+ RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+ if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
}
}
@@ -4796,6 +4804,101 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
}
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |=
+ __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ /* Enable FC tapes support */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_81xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ ha->msix_count += 1; /* For ATIO Q */
+}
+
int
qlt_24xx_process_response_error(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt)
@@ -4828,11 +4931,41 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
+ if (ha->mqenable || IS_QLA83XX(ha)) {
+ ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+ } else {
+ ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+ }
+
mutex_init(&ha->tgt.tgt_mutex);
mutex_init(&ha->tgt.tgt_host_action_mutex);
qlt_clear_mode(base_vha);
}
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+ struct rsp_que *rsp;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index bad749561ec2..ff9ccb9fd036 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -60,8 +60,9 @@
* multi-complete should come to the tgt driver or be handled there by qla2xxx
*/
#define CTIO_COMPLETION_HANDLE_MARK BIT_29
-#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
-#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+ "DEFAULT_OUTSTANDING_COMMANDS"
#endif
#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
@@ -161,7 +162,7 @@ struct imm_ntfy_from_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -217,7 +218,7 @@ struct nack_to_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -948,6 +949,7 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
* is not set. Right now, ha value is ignored.
*/
#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+extern int ql2x_ini_mode;
static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
{
@@ -985,12 +987,15 @@ extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
-extern void qlt_24xx_config_rings(struct scsi_qla_host *,
- device_reg_t __iomem *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_24xx *);
extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_81xx *);
extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
@@ -1000,5 +1005,7 @@ extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
extern void qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 49697ca41e78..2b6e478d9e33 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 6e9af20be12f..5d8fe4f75650 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -538,7 +538,7 @@ struct device_info {
int port_num;
};
-static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
{
uint32_t drv_active;
uint32_t dev_part, dev_part1, dev_part2;
@@ -1351,31 +1351,58 @@ exit_start_fw:
/*----------------------Interrupt Related functions ---------------------*/
-void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
+ qla4_8xxx_intr_disable(ha);
+}
+
+static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
{
uint32_t mb_int, ret;
- if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
- qla4_8xxx_mbx_intr_disable(ha);
+ if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ ret = readl(&ha->qla4_83xx_reg->mbox_int);
+ mb_int = ret & ~INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+ }
+}
- ret = readl(&ha->qla4_83xx_reg->mbox_int);
- mb_int = ret & ~INT_ENABLE_FW_MB;
- writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
- writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_disable_mbox_intrs(ha);
+ qla4_83xx_disable_iocb_intrs(ha);
}
-void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
+ qla4_8xxx_intr_enable(ha);
+ set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
+ }
+}
+
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
{
uint32_t mb_int;
- qla4_8xxx_mbx_intr_enable(ha);
- mb_int = INT_ENABLE_FW_MB;
- writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
- writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+ if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ mb_int = INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+ set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
+ }
+}
- set_bit(AF_INTERRUPTS_ON, &ha->flags);
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_enable_mbox_intrs(ha);
+ qla4_83xx_enable_iocb_intrs(ha);
}
+
void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
int incount)
{
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 76819b71ada7..19ee55a6226c 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -74,16 +74,22 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
}
break;
case 2:
- /* Reset HBA */
+ /* Reset HBA and collect FW dump */
ha->isp_ops->idc_lock(ha);
dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
if (dev_state == QLA8XXX_DEV_READY) {
- ql4_printk(KERN_INFO, ha,
- "%s: Setting Need reset, reset_owner is 0x%x.\n",
- __func__, ha->func_num);
+ ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
+ __func__);
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_NEED_RESET);
- set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ if (is_qla8022(ha) ||
+ (is_qla8032(ha) &&
+ qla4_83xx_can_perform_reset(ha))) {
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ set_bit(AF_FW_RECOVERY, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
+ __func__, ha->func_num);
+ }
} else
ql4_printk(KERN_INFO, ha,
"%s: Reset not performed as device state is 0x%x\n",
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 329d553eae94..129f5dd02822 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -136,6 +136,7 @@
#define RESPONSE_QUEUE_DEPTH 64
#define QUEUE_SIZE 64
#define DMA_BUFFER_SIZE 512
+#define IOCB_HIWAT_CUSHION 4
/*
* Misc
@@ -180,6 +181,7 @@
#define DISABLE_ACB_TOV 30
#define IP_CONFIG_TOV 30
#define LOGIN_TOV 12
+#define BOOT_LOGIN_RESP_TOV 60
#define MAX_RESET_HA_RETRIES 2
#define FW_ALIVE_WAIT_TOV 3
@@ -314,6 +316,7 @@ struct ql4_tuple_ddb {
* DDB flags.
*/
#define DF_RELOGIN 0 /* Relogin to device */
+#define DF_BOOT_TGT 1 /* Boot target entry */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
@@ -501,6 +504,7 @@ struct scsi_qla_host {
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
+#define AF_LOOPBACK 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
#define AF_HA_REMOVAL 12 /* 0x00001000 */
@@ -516,6 +520,8 @@ struct scsi_qla_host {
#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
+#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
+#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
unsigned long dpc_flags;
@@ -537,6 +543,7 @@ struct scsi_qla_host {
uint32_t tot_ddbs;
uint16_t iocb_cnt;
+ uint16_t iocb_hiwat;
/* SRB cache. */
#define SRB_MIN_REQ 128
@@ -838,7 +845,8 @@ static inline int is_aer_supported(struct scsi_qla_host *ha)
static inline int adapter_up(struct scsi_qla_host *ha)
{
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
- (test_bit(AF_LINK_UP, &ha->flags) != 0);
+ (test_bit(AF_LINK_UP, &ha->flags) != 0) &&
+ (!test_bit(AF_LOOPBACK, &ha->flags));
}
static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 1c4795020357..ad9d2e2d370f 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -495,7 +495,7 @@ struct qla_flt_region {
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
#define MBOX_ASTS_IDC_COMPLETE 0x8100
-#define MBOX_ASTS_IDC_NOTIFY 0x8101
+#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -522,6 +522,10 @@ struct qla_flt_region {
#define FLASH_OPT_COMMIT 2
#define FLASH_OPT_RMW_COMMIT 3
+/* Loopback type */
+#define ENABLE_INTERNAL_LOOPBACK 0x04
+#define ENABLE_EXTERNAL_LOOPBACK 0x08
+
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 57a5a3cf5770..982293edf02c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -253,12 +253,14 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1aca1b4f70b8..8fc8548ba4ba 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -195,12 +195,10 @@ exit_get_sys_info_no_free:
* @ha: pointer to host adapter structure.
*
**/
-static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
+static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
{
/* Initialize aen queue */
ha->aen_q_count = MAX_AEN_ENTRIES;
-
- return qla4xxx_get_firmware_status(ha);
}
static uint8_t
@@ -935,14 +933,23 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
+ /*
+ * For ISP83XX, mailbox and IOCB interrupts are enabled separately.
+ * Mailbox interrupts must be enabled prior to issuing any mailbox
+ * command in order to prevent the possibility of losing interrupts
+ * while switching from polling to interrupt mode. IOCB interrupts are
+ * enabled via isp_ops->enable_intrs.
+ */
+ if (is_qla8032(ha))
+ qla4_83xx_enable_mbox_intrs(ha);
+
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
goto exit_init_hba;
- if (qla4xxx_init_local_data(ha) == QLA_ERROR)
- goto exit_init_hba;
+ qla4xxx_init_local_data(ha);
status = qla4xxx_init_firmware(ha);
if (status == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index f48f37a281d1..14fec976f634 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -316,7 +316,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
goto queuing_error;
/* total iocbs active */
- if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
+ if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
goto queuing_error;
/* Build command packet */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 15ea81465ce4..1b83dc283d2e 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -582,6 +582,33 @@ exit_prq_error:
}
/**
+ * qla4_83xx_loopback_in_progress: Is loopback in progress?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1 = loopback in progress, 0 = loopback not in progress
+ **/
+static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha)) {
+ if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
+ (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics in progress\n",
+ __func__));
+ rval = 1;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics not in progress\n",
+ __func__));
+ rval = 0;
+ }
+ }
+
+ return rval;
+}
+
+/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
* @mailbox_status: Mailbox status.
@@ -676,8 +703,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_LINK_DOWN:
clear_bit(AF_LINK_UP, &ha->flags);
- if (test_bit(AF_INIT_DONE, &ha->flags))
+ if (test_bit(AF_INIT_DONE, &ha->flags)) {
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ }
ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
@@ -806,7 +835,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
" removed\n", ha->host_no, mbox_sts[0]));
break;
- case MBOX_ASTS_IDC_NOTIFY:
+ case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
{
uint32_t opcode;
if (is_qla8032(ha)) {
@@ -840,6 +869,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi:%ld: AEN %04x IDC Complete notification\n",
ha->host_no, mbox_sts[0]));
+
+ if (qla4_83xx_loopback_in_progress(ha))
+ set_bit(AF_LOOPBACK, &ha->flags);
+ else
+ clear_bit(AF_LOOPBACK, &ha->flags);
}
break;
@@ -1124,17 +1158,18 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
- ql4_printk(KERN_ERR, ha,
- "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
- __func__);
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+ __func__));
return IRQ_NONE;
}
/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
- ql4_printk(KERN_ERR, ha,
- "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
- __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+ __func__, (leg_int_ptr & PF_BITS_MASK),
+ ha->pf_bit));
return IRQ_NONE;
}
@@ -1437,11 +1472,14 @@ irq_not_attached:
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
- if (test_bit(AF_MSIX_ENABLED, &ha->flags))
- qla4_8xxx_disable_msix(ha);
- else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- pci_disable_msi(ha->pdev);
- } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
- free_irq(ha->pdev->irq, ha);
+ if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
+ qla4_8xxx_disable_msix(ha);
+ } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ pci_disable_msi(ha->pdev);
+ } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ }
+ }
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 3d41034191f0..160d33697216 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -44,6 +44,30 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
}
/**
+ * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1=polling mode, 0=non-polling mode
+ **/
+static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha)) {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
+ rval = 0;
+ } else {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+ test_bit(AF_ONLINE, &ha->flags) &&
+ !test_bit(AF_HA_REMOVAL, &ha->flags))
+ rval = 0;
+ }
+
+ return rval;
+}
+
+/**
* qla4xxx_mailbox_command - issues mailbox commands
* @ha: Pointer to host adapter structure.
* @inCount: number of mailbox registers to load.
@@ -153,33 +177,28 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
/*
* Wait for completion: Poll or completion queue
*/
- if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
- test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
- test_bit(AF_ONLINE, &ha->flags) &&
- !test_bit(AF_HA_REMOVAL, &ha->flags)) {
- /* Do not poll for completion. Use completion queue */
- set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
- wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
- clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
- } else {
+ if (qla4xxx_is_intr_poll_mode(ha)) {
/* Poll for command to complete */
wait_count = jiffies + MBOX_TOV * HZ;
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
if (time_after_eq(jiffies, wait_count))
break;
-
/*
* Service the interrupt.
* The ISR will save the mailbox status registers
* to a temporary storage location in the adapter
* structure.
*/
-
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isp_ops->process_mailbox_interrupt(ha, outCount);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
msleep(10);
}
+ } else {
+ /* Do not poll for completion. Use completion queue */
+ set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+ wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
+ clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
}
/* Check for mailbox timeout. */
@@ -678,8 +697,24 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
return QLA_ERROR;
}
- ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
- ha->host_no, mbox_sts[2]);
+ /* High-water mark of IOCBs */
+ ha->iocb_hiwat = mbox_sts[2];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: firmware IOCBs available = %d\n", __func__,
+ ha->iocb_hiwat));
+
+ if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
+ ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
+
+ /* Ideally, we should not enter this code, as the # of firmware
+ * IOCBs is hard-coded in the firmware. We set a default
+ * iocb_hiwat here just in case */
+ if (ha->iocb_hiwat == 0) {
+ ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "%s: Setting IOCB's to = %d\n", __func__,
+ ha->iocb_hiwat));
+ }
return QLA_SUCCESS;
}
@@ -1385,10 +1420,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
- if (chap_table == NULL) {
- ret = -ENOMEM;
- goto exit_get_chap;
- }
+ if (chap_table == NULL)
+ return -ENOMEM;
chap_size = sizeof(struct ql4_chap_table);
memset(chap_table, 0, chap_size);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 499a92db1cf6..71d3d234f526 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2986,7 +2986,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
retval = qla4_8xxx_device_state_handler(ha);
- if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
+ if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
retval = qla4xxx_request_irqs(ha);
return retval;
@@ -3427,11 +3427,11 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
}
/* Make sure we receive the minimum required data to cache internally */
- if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) {
+ if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
+ offsetof(struct mbx_sys_info, reserved)) {
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
" error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
goto exit_validate_mac82;
-
}
/* Save M.A.C. address & serial_number */
@@ -3463,7 +3463,7 @@ exit_validate_mac82:
/* Interrupt handling helpers. */
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3484,7 +3484,7 @@ int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
return QLA_SUCCESS;
}
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3509,7 +3509,7 @@ int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
void
qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
{
- qla4_8xxx_mbx_intr_enable(ha);
+ qla4_8xxx_intr_enable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - reset */
@@ -3522,7 +3522,7 @@ void
qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
{
if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
- qla4_8xxx_mbx_intr_disable(ha);
+ qla4_8xxx_intr_disable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - set */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 4cec123a6a6a..6142729167f4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1337,18 +1337,18 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
sess->password_in, BIDI_CHAP,
&idx);
if (rval)
- return -EINVAL;
-
- len = sprintf(buf, "%hu\n", idx);
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
break;
case ISCSI_PARAM_CHAP_OUT_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username,
sess->password, LOCAL_CHAP,
&idx);
if (rval)
- return -EINVAL;
-
- len = sprintf(buf, "%hu\n", idx);
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
break;
default:
return iscsi_session_get_param(cls_sess, param, buf);
@@ -2242,6 +2242,7 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
!test_bit(AF_ONLINE, &ha->flags) ||
!test_bit(AF_LINK_UP, &ha->flags) ||
+ test_bit(AF_LOOPBACK, &ha->flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
@@ -2978,6 +2979,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
if (status == QLA_SUCCESS) {
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
+
ha->isp_ops->disable_intrs(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -3479,7 +3481,8 @@ dpc_post_reset_ha:
}
/* ---- link change? --- */
- if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+ if (!test_bit(AF_LOOPBACK, &ha->flags) &&
+ test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
/* ---- link down? --- */
qla4xxx_mark_all_devices_missing(ha);
@@ -3508,10 +3511,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
{
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
- if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
- /* Turn-off interrupts on the card. */
- ha->isp_ops->disable_intrs(ha);
- }
+ /* Turn-off interrupts on the card. */
+ ha->isp_ops->disable_intrs(ha);
if (is_qla40XX(ha)) {
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
@@ -3547,8 +3548,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
}
/* Detach interrupts */
- if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
- qla4xxx_free_irqs(ha);
+ qla4xxx_free_irqs(ha);
/* free extra memory */
qla4xxx_mem_free(ha);
@@ -4687,7 +4687,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
struct iscsi_endpoint *ep;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
- struct sockaddr *dst_addr;
+ struct sockaddr *t_addr;
+ struct sockaddr_storage *dst_addr;
char *ip;
/* TODO: need to destroy on unload iscsi_endpoint*/
@@ -4696,21 +4697,23 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
return NULL;
if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
- dst_addr->sa_family = AF_INET6;
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET6;
addr6 = (struct sockaddr_in6 *)dst_addr;
ip = (char *)&addr6->sin6_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
} else {
- dst_addr->sa_family = AF_INET;
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET;
addr = (struct sockaddr_in *)dst_addr;
ip = (char *)&addr->sin_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
}
- ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
vfree(dst_addr);
return ep;
}
@@ -4725,7 +4728,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
}
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry)
+ struct ddb_entry *ddb_entry,
+ uint16_t idx)
{
uint16_t def_timeout;
@@ -4745,6 +4749,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
def_timeout : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+
+ if (ql4xdisablesysfsboot &&
+ (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
+ set_bit(DF_BOOT_TGT, &ddb_entry->flags);
}
static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
@@ -4881,7 +4889,7 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
- int is_reset)
+ int is_reset, uint16_t idx)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
@@ -4919,7 +4927,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
sizeof(struct dev_db_entry));
- qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
@@ -5036,7 +5044,7 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
goto continue_next_nt;
}
- ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
if (ret == QLA_ERROR)
goto exit_nt_list;
@@ -5116,6 +5124,78 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
}
/**
+ * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
+ * response.
+ * @ha: pointer to adapter structure
+ *
+ * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
+ * set in DDB and we will wait for login response of boot targets during
+ * probe.
+ **/
+static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int max_ddbs, idx, ret;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_login_resp;
+ }
+
+ wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB index [%d]\n", __func__,
+ ddb_entry->fw_ddb_index));
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto exit_login_resp;
+
+ if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ break;
+
+ schedule_timeout_uninterruptible(HZ);
+
+ } while ((time_after(wtime, jiffies)));
+
+ if (!time_after(wtime, jiffies)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Login response wait timer expired\n",
+ __func__));
+ goto exit_login_resp;
+ }
+ }
+ }
+
+exit_login_resp:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
* @pci_device_id: pointer to pci_device entry
@@ -5270,7 +5350,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
if (is_qla80XX(ha)) {
ha->isp_ops->idc_lock(ha);
dev_state = qla4_8xxx_rd_direct(ha,
- QLA82XX_CRB_DEV_STATE);
+ QLA8XXX_CRB_DEV_STATE);
ha->isp_ops->idc_unlock(ha);
if (dev_state == QLA8XXX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha, "%s: don't retry "
@@ -5368,6 +5448,7 @@ skip_retry_init:
/* Perform the build ddb list and login to each */
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+ qla4xxx_wait_login_resp_boot_tgt(ha);
qla4xxx_create_chap_list(ha);
@@ -6008,14 +6089,6 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
goto exit_host_reset;
}
- rval = qla4xxx_wait_for_hba_online(ha);
- if (rval != QLA_SUCCESS) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
- "adapter\n", __func__));
- rval = -EIO;
- goto exit_host_reset;
- }
-
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
goto recover_adapter;
@@ -6115,7 +6188,6 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
{
uint32_t rval = QLA_ERROR;
- uint32_t ret = 0;
int fn;
struct pci_dev *other_pdev = NULL;
@@ -6201,16 +6273,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
qla4_8xxx_set_drv_active(ha);
ha->isp_ops->idc_unlock(ha);
- ret = qla4xxx_request_irqs(ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha, "Failed to "
- "reserve interrupt %d already in use.\n",
- ha->pdev->irq);
- rval = QLA_ERROR;
- } else {
- ha->isp_ops->enable_intrs(ha);
- rval = QLA_SUCCESS;
- }
+ ha->isp_ops->enable_intrs(ha);
}
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
@@ -6220,18 +6283,9 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
QLA8XXX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
- if (rval == QLA_SUCCESS) {
- ret = qla4xxx_request_irqs(ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha, "Failed to"
- " reserve interrupt %d already in"
- " use.\n", ha->pdev->irq);
- rval = QLA_ERROR;
- } else {
- ha->isp_ops->enable_intrs(ha);
- rval = QLA_SUCCESS;
- }
- }
+ if (rval == QLA_SUCCESS)
+ ha->isp_ops->enable_intrs(ha);
+
ha->isp_ops->idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
ha->isp_ops->idc_unlock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index f6df2ea91ab5..6775a45af315 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f1bf5aff68ed..765398c063c7 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2617,3 +2617,17 @@ void scsi_kunmap_atomic_sg(void *virt)
kunmap_atomic(virt);
}
EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
+
+void sdev_disable_disk_events(struct scsi_device *sdev)
+{
+ atomic_inc(&sdev->disk_events_disable_depth);
+}
+EXPORT_SYMBOL(sdev_disable_disk_events);
+
+void sdev_enable_disk_events(struct scsi_device *sdev)
+{
+ if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
+ return;
+ atomic_dec(&sdev->disk_events_disable_depth);
+}
+EXPORT_SYMBOL(sdev_enable_disk_events);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 59d427bf08e2..0a74b975efdf 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2503,6 +2503,15 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
}
static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
NULL);
+static ssize_t
+show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->target_id);
+}
+static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
+ show_priv_session_target_id, NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -2575,6 +2584,7 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_priv_sess_creator.attr,
&dev_attr_sess_chap_out_idx.attr,
&dev_attr_sess_chap_in_idx.attr,
+ &dev_attr_priv_sess_target_id.attr,
NULL,
};
@@ -2638,6 +2648,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
return S_IRUGO;
else if (attr == &dev_attr_priv_sess_creator.attr)
return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_target_id.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid session attr");
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index be2c9a6561ff..9f0c46547459 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1391,24 +1391,23 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
return ERR_PTR(-ENOMEM);
}
- if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
- printk(KERN_WARNING "idr expansion Sg_device failure\n");
- error = -ENOMEM;
- goto out;
- }
-
+ idr_preload(GFP_KERNEL);
write_lock_irqsave(&sg_index_lock, iflags);
- error = idr_get_new(&sg_index_idr, sdp, &k);
- if (error) {
- write_unlock_irqrestore(&sg_index_lock, iflags);
- printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
- error);
- goto out;
+ error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
+ if (error < 0) {
+ if (error == -ENOSPC) {
+ sdev_printk(KERN_WARNING, scsidp,
+ "Unable to attach sg device type=%d, minor number exceeds %d\n",
+ scsidp->type, SG_MAX_DEVS - 1);
+ error = -ENODEV;
+ } else {
+ printk(KERN_WARNING
+ "idr allocation Sg_device failure: %d\n", error);
+ }
+ goto out_unlock;
}
-
- if (unlikely(k >= SG_MAX_DEVS))
- goto overflow;
+ k = error;
SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
@@ -1420,25 +1419,17 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
+ error = 0;
+out_unlock:
write_unlock_irqrestore(&sg_index_lock, iflags);
+ idr_preload_end();
- error = 0;
- out:
if (error) {
kfree(sdp);
return ERR_PTR(error);
}
return sdp;
-
- overflow:
- idr_remove(&sg_index_idr, k);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- sdev_printk(KERN_WARNING, scsidp,
- "Unable to attach sg device type=%d, minor "
- "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
- error = -ENODEV;
- goto out;
}
static int
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 5fc97d2ba2fd..f2884ee90710 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -45,6 +45,7 @@
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <scsi/scsi.h>
@@ -79,6 +80,11 @@ static DEFINE_MUTEX(sr_mutex);
static int sr_probe(struct device *);
static int sr_remove(struct device *);
static int sr_done(struct scsi_cmnd *);
+static int sr_runtime_suspend(struct device *dev);
+
+static struct dev_pm_ops sr_pm_ops = {
+ .runtime_suspend = sr_runtime_suspend,
+};
static struct scsi_driver sr_template = {
.owner = THIS_MODULE,
@@ -86,6 +92,7 @@ static struct scsi_driver sr_template = {
.name = "sr",
.probe = sr_probe,
.remove = sr_remove,
+ .pm = &sr_pm_ops,
},
.done = sr_done,
};
@@ -131,6 +138,16 @@ static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
return container_of(disk->private_data, struct scsi_cd, driver);
}
+static int sr_runtime_suspend(struct device *dev)
+{
+ struct scsi_cd *cd = dev_get_drvdata(dev);
+
+ if (cd->media_present)
+ return -EBUSY;
+ else
+ return 0;
+}
+
/*
* The get and put routines for the struct scsi_cd. Note this entity
* has a scsi_device pointer and owns a reference to this.
@@ -146,7 +163,8 @@ static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
kref_get(&cd->kref);
if (scsi_device_get(cd->device))
goto out_put;
- goto out;
+ if (!scsi_autopm_get_device(cd->device))
+ goto out;
out_put:
kref_put(&cd->kref, sr_kref_release);
@@ -162,6 +180,7 @@ static void scsi_cd_put(struct scsi_cd *cd)
mutex_lock(&sr_ref_mutex);
kref_put(&cd->kref, sr_kref_release);
+ scsi_autopm_put_device(sdev);
scsi_device_put(sdev);
mutex_unlock(&sr_ref_mutex);
}
@@ -540,6 +559,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
void __user *argp = (void __user *)arg;
int ret;
+ scsi_autopm_get_device(cd->device);
+
mutex_lock(&sr_mutex);
/*
@@ -571,6 +592,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
out:
mutex_unlock(&sr_mutex);
+ scsi_autopm_put_device(cd->device);
return ret;
}
@@ -578,7 +600,17 @@ static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct scsi_cd *cd = scsi_cd(disk);
- return cdrom_check_events(&cd->cdi, clearing);
+ unsigned int ret;
+
+ if (atomic_read(&cd->device->disk_events_disable_depth) == 0) {
+ scsi_autopm_get_device(cd->device);
+ ret = cdrom_check_events(&cd->cdi, clearing);
+ scsi_autopm_put_device(cd->device);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
}
static int sr_block_revalidate_disk(struct gendisk *disk)
@@ -586,12 +618,16 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
struct scsi_cd *cd = scsi_cd(disk);
struct scsi_sense_hdr sshdr;
+ scsi_autopm_get_device(cd->device);
+
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
- return 0;
+ goto out;
sr_cd_check(&cd->cdi);
get_sectorsize(cd);
+out:
+ scsi_autopm_put_device(cd->device);
return 0;
}
@@ -718,6 +754,8 @@ static int sr_probe(struct device *dev)
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
+ scsi_autopm_put_device(cd->device);
+
return 0;
fail_put:
@@ -965,6 +1003,8 @@ static int sr_remove(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
+ scsi_autopm_get_device(cd->device);
+
blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
del_gendisk(cd->disk);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 98156a97c472..86974471af68 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -977,7 +977,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
struct st_modedef *STm;
struct st_partstat *STps;
char *name = tape_name(STp);
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int mode = TAPE_MODE(inode);
STp->ready = ST_READY;
@@ -4076,7 +4076,7 @@ static int st_probe(struct device *dev)
struct st_modedef *STm;
struct st_partstat *STps;
struct st_buffer *buffer;
- int i, dev_num, error;
+ int i, error;
char *stp;
if (SDp->type != TYPE_TAPE)
@@ -4178,27 +4178,17 @@ static int st_probe(struct device *dev)
tpnt->blksize_changed = 0;
mutex_init(&tpnt->lock);
- if (!idr_pre_get(&st_index_idr, GFP_KERNEL)) {
- pr_warn("st: idr expansion failed\n");
- error = -ENOMEM;
- goto out_put_disk;
- }
-
+ idr_preload(GFP_KERNEL);
spin_lock(&st_index_lock);
- error = idr_get_new(&st_index_idr, tpnt, &dev_num);
+ error = idr_alloc(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT);
spin_unlock(&st_index_lock);
- if (error) {
+ idr_preload_end();
+ if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
goto out_put_disk;
}
-
- if (dev_num > ST_MAX_TAPES) {
- pr_err("st: Too many tape devices (max. %d).\n", ST_MAX_TAPES);
- goto out_put_index;
- }
-
- tpnt->index = dev_num;
- sprintf(disk->disk_name, "st%d", dev_num);
+ tpnt->index = error;
+ sprintf(disk->disk_name, "st%d", tpnt->index);
dev_set_drvdata(dev, tpnt);
@@ -4218,9 +4208,8 @@ static int st_probe(struct device *dev)
out_remove_devs:
remove_cdevs(tpnt);
-out_put_index:
spin_lock(&st_index_lock);
- idr_remove(&st_index_idr, dev_num);
+ idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
out_put_disk:
put_disk(disk);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 270b3cf6f372..16a3a0cc9672 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -201,6 +201,7 @@ enum storvsc_request_type {
#define SRB_STATUS_AUTOSENSE_VALID 0x80
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
/*
@@ -295,6 +296,25 @@ struct storvsc_scan_work {
uint lun;
};
+static void storvsc_device_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ uint lun;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ lun = wrk->lun;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+ if (!sdev)
+ goto done;
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+
+done:
+ kfree(wrk);
+}
+
static void storvsc_bus_scan(struct work_struct *work)
{
struct storvsc_scan_work *wrk;
@@ -467,6 +487,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
if (!bounce_sgl)
return NULL;
+ sg_init_table(bounce_sgl, num_pages);
for (i = 0; i < num_pages; i++) {
page_buf = alloc_page(GFP_ATOMIC);
if (!page_buf)
@@ -760,6 +781,66 @@ cleanup:
return ret;
}
+static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ struct scsi_cmnd *scmnd,
+ struct Scsi_Host *host,
+ u8 asc, u8 ascq)
+{
+ struct storvsc_scan_work *wrk;
+ void (*process_err_fn)(struct work_struct *work);
+ bool do_work = false;
+
+ switch (vm_srb->srb_status) {
+ case SRB_STATUS_ERROR:
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side. However, if the command
+ * were a pass-through command deal with it appropriately.
+ */
+ switch (scmnd->cmnd[0]) {
+ case ATA_16:
+ case ATA_12:
+ set_host_byte(scmnd, DID_PASSTHROUGH);
+ break;
+ default:
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ }
+ break;
+ case SRB_STATUS_INVALID_LUN:
+ do_work = true;
+ process_err_fn = storvsc_remove_lun;
+ break;
+ case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+ if ((asc == 0x2a) && (ascq == 0x9)) {
+ do_work = true;
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that trigerred this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ }
+ break;
+ }
+
+ if (!do_work)
+ return;
+
+ /*
+ * We need to schedule work to process this error; schedule it.
+ */
+ wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!wrk) {
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ return;
+ }
+
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, process_err_fn);
+ schedule_work(&wrk->work);
+}
+
static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
{
@@ -768,8 +849,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
- struct storvsc_scan_work *wrk;
struct stor_mem_pools *memp = scmnd->device->hostdata;
+ struct Scsi_Host *host;
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
vm_srb = &cmd_request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
@@ -782,55 +868,18 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
cmd_request->bounce_sgl_count);
}
- /*
- * If there is an error; offline the device since all
- * error recovery strategies would have already been
- * deployed on the host side. However, if the command
- * were a pass-through command deal with it appropriately.
- */
scmnd->result = vm_srb->scsi_status;
- if (vm_srb->srb_status == SRB_STATUS_ERROR) {
- switch (scmnd->cmnd[0]) {
- case ATA_16:
- case ATA_12:
- set_host_byte(scmnd, DID_PASSTHROUGH);
- break;
- default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
- }
- }
-
-
- /*
- * If the LUN is invalid; remove the device.
- */
- if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
- struct storvsc_device *stor_dev;
- struct hv_device *dev = host_dev->dev;
- struct Scsi_Host *host;
-
- stor_dev = get_in_stor_device(dev);
- host = stor_dev->host;
-
- wrk = kmalloc(sizeof(struct storvsc_scan_work),
- GFP_ATOMIC);
- if (!wrk) {
- scmnd->result = DID_TARGET_FAILURE << 16;
- } else {
- wrk->host = host;
- wrk->lun = vm_srb->lun;
- INIT_WORK(&wrk->work, storvsc_remove_lun);
- schedule_work(&wrk->work);
- }
- }
-
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
scsi_print_sense_hdr("storvsc", &sense_hdr);
}
+ if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+ storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+ sense_hdr.ascq);
+
scsi_set_resid(scmnd,
cmd_request->data_buffer.len -
vm_srb->data_transfer_length);
@@ -1155,6 +1204,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
+ sdevice->no_write_same = 1;
+
return 0;
}
@@ -1237,6 +1288,8 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
u8 scsi_op = scmnd->cmnd[0];
switch (scsi_op) {
+ /* the host does not handle WRITE_SAME, log accident usage */
+ case WRITE_SAME:
/*
* smartd sends this command and the host does not handle
* this. So, don't send it.
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 8f27f9d6f91d..0371047c5922 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -2,48 +2,58 @@
# Kernel configuration file for the UFS Host Controller
#
# This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011 Samsung Samsung India Software Operations
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
#
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-
+# See the COPYING file in the top-level directory or visit
+# <http://www.gnu.org/licenses/gpl-2.0.html>
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
+#
+# This program is provided "AS IS" and "WITH ALL FAULTS" and
+# without warranty of any kind. You are solely responsible for
+# determining the appropriateness of using and distributing
+# the program and assume all risks associated with your exercise
+# of rights with respect to the program, including but not limited
+# to infringement of third party rights, the risks and costs of
+# program errors, damage to or loss of data, programs or equipment,
+# and unavailability or interruption of operations. Under no
+# circumstances will the contributor of this Program be liable for
+# any damages of any kind arising from your use or distribution of
+# this program.
-# NO WARRANTY
-# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
-# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
-# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
-# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
-# solely responsible for determining the appropriateness of using and
-# distributing the Program and assumes all risks associated with its
-# exercise of rights under this Agreement, including but not limited to
-# the risks and costs of program errors, damage to or loss of data,
-# programs or equipment, and unavailability or interruption of operations.
-
-# DISCLAIMER OF LIABILITY
-# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
-# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+config SCSI_UFSHCD
+ tristate "Universal Flash Storage Controller Driver Core"
+ depends on SCSI
+ ---help---
+ This selects the support for UFS devices in Linux, say Y and make
+ sure that you know the name of your UFS host adapter (the card
+ inside your computer that "speaks" the UFS protocol, also
+ called UFS Host Controller), because you will be asked for it.
+ The module will be called ufshcd.
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
-# USA.
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/ufs.txt>.
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a UFS device.
-config SCSI_UFSHCD
- tristate "Universal Flash Storage host controller driver"
- depends on PCI && SCSI
+config SCSI_UFSHCD_PCI
+ tristate "PCI bus based UFS Controller support"
+ depends on SCSI_UFSHCD && PCI
---help---
- This is a generic driver which supports PCIe UFS Host controllers.
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index adf7895a6a91..9eda0dfbd6df 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,2 +1,3 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index b207529f8d54..139bc0647b41 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufs.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFS_H
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
new file mode 100644
index 000000000000..5cb1d75f5868
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -0,0 +1,211 @@
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pci.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/pci.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pci_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_suspend
+ * 2. Do bus specific power management
+ */
+
+ return -ENOSYS;
+}
+
+/**
+ * ufshcd_pci_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_resume(struct pci_dev *pdev)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_resume.
+ * 2. Do bus specific wake up
+ */
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ disable_irq(pdev->irq);
+ free_irq(pdev->irq, hba);
+ ufshcd_remove(hba);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ return 0;
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ return err;
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto out_error;
+ }
+
+ pci_set_master(pdev);
+
+
+ err = pci_request_regions(pdev, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request regions failed\n");
+ goto out_disable;
+ }
+
+ mmio_base = pci_ioremap_bar(pdev, 0);
+ if (!mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ err = ufshcd_set_dma_mask(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ goto out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, hba);
+
+ return 0;
+
+out_iounmap:
+ iounmap(mmio_base);
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+out_error:
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_pci_probe,
+ .remove = ufshcd_pci_remove,
+ .shutdown = ufshcd_pci_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+#endif
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 91a4046ca9ba..60fd40c4e4c2 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1,77 +1,39 @@
/*
- * Universal Flash Storage Host controller driver
+ * Universal Flash Storage Host controller driver Core
*
* This code is based on drivers/scsi/ufs/ufshcd.c
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.1"
+#include "ufshcd.h"
enum {
UFSHCD_MAX_CHANNEL = 0,
@@ -102,121 +64,6 @@ enum {
};
/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @result: UIC command result
- */
-struct uic_command {
- u32 command;
- u32 argument1;
- u32 argument2;
- u32 argument3;
- int cmd_active;
- int result;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @pdev: PCI device handle
- * @lrb: local reference block
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @active_uic_cmd: handle of active UIC command
- * @ufshcd_tm_wait_queue: wait queue for task management
- * @tm_condition: condition variable for task management
- * @ufshcd_state: UFSHCD states
- * @int_enable_mask: Interrupt Mask Bits
- * @uic_workq: Work queue for UIC completion handling
- * @feh_workq: Work queue for fatal controller error handling
- * @errors: HBA errors
- */
-struct ufs_hba {
- void __iomem *mmio_base;
-
- /* Virtual memory reference */
- struct utp_transfer_cmd_desc *ucdl_base_addr;
- struct utp_transfer_req_desc *utrdl_base_addr;
- struct utp_task_req_desc *utmrdl_base_addr;
-
- /* DMA memory reference */
- dma_addr_t ucdl_dma_addr;
- dma_addr_t utrdl_dma_addr;
- dma_addr_t utmrdl_dma_addr;
-
- struct Scsi_Host *host;
- struct pci_dev *pdev;
-
- struct ufshcd_lrb *lrb;
-
- unsigned long outstanding_tasks;
- unsigned long outstanding_reqs;
-
- u32 capabilities;
- int nutrs;
- int nutmrs;
- u32 ufs_version;
-
- struct uic_command active_uic_cmd;
- wait_queue_head_t ufshcd_tm_wait_queue;
- unsigned long tm_condition;
-
- u32 ufshcd_state;
- u32 int_enable_mask;
-
- /* Work Queues */
- struct work_struct uic_workq;
- struct work_struct feh_workq;
-
- /* HBA Errors */
- u32 errors;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- */
-struct ufshcd_lrb {
- struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_cmd *ucd_cmd_ptr;
- struct utp_upiu_rsp *ucd_rsp_ptr;
- struct ufshcd_sg_entry *ucd_prdt_ptr;
-
- struct scsi_cmnd *cmd;
- u8 *sense_buffer;
- unsigned int sense_bufflen;
- int scsi_status;
-
- int command_type;
- int task_tag;
- unsigned int lun;
-};
-
-/**
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba - Pointer to adapter instance
*
@@ -335,21 +182,21 @@ static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
if (hba->utmrdl_base_addr) {
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+ dma_free_coherent(hba->dev, utmrdl_size,
hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
}
if (hba->utrdl_base_addr) {
utrdl_size =
(sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, utrdl_size,
+ dma_free_coherent(hba->dev, utrdl_size,
hba->utrdl_base_addr, hba->utrdl_dma_addr);
}
if (hba->ucdl_base_addr) {
ucdl_size =
(sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, ucdl_size,
+ dma_free_coherent(hba->dev, ucdl_size,
hba->ucdl_base_addr, hba->ucdl_dma_addr);
}
}
@@ -429,15 +276,6 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
}
/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
- writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
-}
-
-/**
* ufshcd_hba_start - Start controller initialization sequence
* @hba: per adapter instance
*/
@@ -724,7 +562,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for UTP command descriptors */
ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
GFP_KERNEL);
@@ -737,7 +575,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
*/
if (!hba->ucdl_base_addr ||
WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
}
@@ -747,13 +585,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTRD
*/
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
utrdl_size,
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
@@ -763,13 +601,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTMRD
*/
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
utmrdl_size,
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
@@ -777,7 +615,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for local reference block */
hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
if (!hba->lrb) {
- dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
goto out;
}
return 0;
@@ -867,7 +705,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
/* check if controller is ready to accept UIC commands */
if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
UIC_COMMAND_READY) == 0x0) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller not ready"
" to accept UIC commands\n");
return -EIO;
@@ -912,7 +750,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
/* check if device present */
reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
if (!ufshcd_is_device_present(reg)) {
- dev_err(&hba->pdev->dev, "cc: Device not present\n");
+ dev_err(hba->dev, "cc: Device not present\n");
err = -ENXIO;
goto out;
}
@@ -924,7 +762,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
if (!(ufshcd_get_lists_status(reg))) {
ufshcd_enable_run_stop_reg(hba);
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Host controller not ready to process requests");
err = -EIO;
goto out;
@@ -1005,7 +843,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
if (retry) {
retry--;
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller enable failed\n");
return -EIO;
}
@@ -1084,7 +922,7 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
/* start the initialization process */
if (ufshcd_initialize_hba(hba)) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Reset: Controller initialization failed\n");
return FAILED;
}
@@ -1167,7 +1005,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
task_result = SUCCESS;
} else {
task_result = FAILED;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"trc: Invalid ocs = %x\n", ocs_value);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1281,7 +1119,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/* check if the returned transfer response is valid */
result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
if (result) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Invalid response = %x\n", result);
break;
}
@@ -1310,7 +1148,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case OCS_FATAL_ERROR:
default:
result |= DID_ERROR << 16;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"OCS error from controller = %x\n", ocs);
break;
} /* end of switch */
@@ -1374,7 +1212,7 @@ static void ufshcd_uic_cc_handler (struct work_struct *work)
!(ufshcd_get_uic_cmd_result(hba))) {
if (ufshcd_make_hba_operational(hba))
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"cc: hba not operational state\n");
return;
}
@@ -1509,7 +1347,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
free_slot = ufshcd_get_tm_free_slot(hba);
if (free_slot >= hba->nutmrs) {
spin_unlock_irqrestore(host->host_lock, flags);
- dev_err(&hba->pdev->dev, "Task management queue full\n");
+ dev_err(hba->dev, "Task management queue full\n");
err = FAILED;
goto out;
}
@@ -1552,7 +1390,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
&hba->tm_condition) != 0),
60 * HZ);
if (!err) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task management command timed-out\n");
err = FAILED;
goto out;
@@ -1688,23 +1526,13 @@ static struct scsi_host_template ufshcd_driver_template = {
};
/**
- * ufshcd_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_shutdown(struct pci_dev *pdev)
-{
- ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-#ifdef CONFIG_PM
-/**
* ufshcd_suspend - suspend power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
* @state: power state
*
* Returns -ENOSYS
*/
-static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
{
/*
* TODO:
@@ -1717,14 +1545,15 @@ static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(ufshcd_suspend);
/**
* ufshcd_resume - resume power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
*
* Returns -ENOSYS
*/
-static int ufshcd_resume(struct pci_dev *pdev)
+int ufshcd_resume(struct ufs_hba *hba)
{
/*
* TODO:
@@ -1737,7 +1566,7 @@ static int ufshcd_resume(struct pci_dev *pdev)
return -ENOSYS;
}
-#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ufshcd_resume);
/**
* ufshcd_hba_free - free allocated memory for
@@ -1748,107 +1577,67 @@ static void ufshcd_hba_free(struct ufs_hba *hba)
{
iounmap(hba->mmio_base);
ufshcd_free_hba_memory(hba);
- pci_release_regions(hba->pdev);
}
/**
- * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
- * @pdev - pointer to PCI handle
+ * @hba - per adapter instance
*/
-static void ufshcd_remove(struct pci_dev *pdev)
+void ufshcd_remove(struct ufs_hba *hba)
{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
/* disable interrupts */
ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
- free_irq(pdev->irq, hba);
ufshcd_hba_stop(hba);
ufshcd_hba_free(hba);
scsi_remove_host(hba->host);
scsi_host_put(hba->host);
- pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-}
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- * addressing capability
- * @pdev: PCI device structure
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
- int err;
- u64 dma_mask;
-
- /*
- * If controller supports 64 bit addressing mode, then set the DMA
- * mask to 64-bit, else set the DMA mask to 32-bit
- */
- if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
- dma_mask = DMA_BIT_MASK(64);
- else
- dma_mask = DMA_BIT_MASK(32);
-
- err = pci_set_dma_mask(hba->pdev, dma_mask);
- if (err)
- return err;
-
- err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
-
- return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
/**
- * ufshcd_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
+ * ufshcd_init - Driver initialization routine
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
+ void __iomem *mmio_base, unsigned int irq)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
int err;
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_device failed\n");
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
goto out_error;
}
- pci_set_master(pdev);
+ if (!mmio_base) {
+ dev_err(dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
host = scsi_host_alloc(&ufshcd_driver_template,
sizeof(struct ufs_hba));
if (!host) {
- dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ dev_err(dev, "scsi_host_alloc failed\n");
err = -ENOMEM;
- goto out_disable;
+ goto out_error;
}
hba = shost_priv(host);
-
- err = pci_request_regions(pdev, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request regions failed\n");
- goto out_host_put;
- }
-
- hba->mmio_base = pci_ioremap_bar(pdev, 0);
- if (!hba->mmio_base) {
- dev_err(&pdev->dev, "memory map failed\n");
- err = -ENOMEM;
- goto out_release_regions;
- }
-
hba->host = host;
- hba->pdev = pdev;
+ hba->dev = dev;
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
/* Read capabilities registers */
ufshcd_hba_capabilities(hba);
@@ -1856,17 +1645,11 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
- err = ufshcd_set_dma_mask(hba);
- if (err) {
- dev_err(&pdev->dev, "set dma mask failed\n");
- goto out_iounmap;
- }
-
/* Allocate memory for host memory space */
err = ufshcd_memory_alloc(hba);
if (err) {
- dev_err(&pdev->dev, "Memory allocation failed\n");
- goto out_iounmap;
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
}
/* Configure LRB */
@@ -1888,76 +1671,50 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
/* IRQ registration */
- err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
- dev_err(&pdev->dev, "request irq failed\n");
+ dev_err(hba->dev, "request irq failed\n");
goto out_lrb_free;
}
/* Enable SCSI tag mapping */
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
- dev_err(&pdev->dev, "init shared queue failed\n");
+ dev_err(hba->dev, "init shared queue failed\n");
goto out_free_irq;
}
- pci_set_drvdata(pdev, hba);
-
- err = scsi_add_host(host, &pdev->dev);
+ err = scsi_add_host(host, hba->dev);
if (err) {
- dev_err(&pdev->dev, "scsi_add_host failed\n");
+ dev_err(hba->dev, "scsi_add_host failed\n");
goto out_free_irq;
}
/* Initialization routine */
err = ufshcd_initialize_hba(hba);
if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- goto out_free_irq;
+ dev_err(hba->dev, "Initialization failed\n");
+ goto out_remove_scsi_host;
}
+ *hba_handle = hba;
return 0;
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
out_free_irq:
- free_irq(pdev->irq, hba);
+ free_irq(irq, hba);
out_lrb_free:
ufshcd_free_hba_memory(hba);
-out_iounmap:
- iounmap(hba->mmio_base);
-out_release_regions:
- pci_release_regions(pdev);
-out_host_put:
- scsi_host_put(host);
out_disable:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
+ scsi_host_put(host);
out_error:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_init);
-static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
- { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
- .name = UFSHCD,
- .id_table = ufshcd_pci_tbl,
- .probe = ufshcd_probe,
- .remove = ufshcd_remove,
- .shutdown = ufshcd_shutdown,
-#ifdef CONFIG_PM
- .suspend = ufshcd_suspend,
- .resume = ufshcd_resume,
-#endif
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
- "Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
new file mode 100644
index 000000000000..6b99a42f5819
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -0,0 +1,202 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.2"
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ struct scsi_cmnd *cmd;
+ u8 *sense_buffer;
+ unsigned int sense_bufflen;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ unsigned int lun;
+};
+
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @ufs_version: UFS Version to which controller complies
+ * @irq: Irq number of the controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct device *dev;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 ufs_version;
+ unsigned int irq;
+
+ struct uic_command active_uic_cmd;
+ wait_queue_head_t ufshcd_tm_wait_queue;
+ unsigned long tm_condition;
+
+ u32 ufshcd_state;
+ u32 int_enable_mask;
+
+ /* Work Queues */
+ struct work_struct uic_workq;
+ struct work_struct feh_workq;
+
+ /* HBA Errors */
+ u32 errors;
+};
+
+int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
+ unsigned int);
+void ufshcd_remove(struct ufs_hba *);
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6e3510f71167..0c164847a3ef 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufshci.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFSHCI_H
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index a43415a7fbed..4c0f6d883dd3 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
@@ -322,7 +322,7 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
if (bus->bustype == SSB_BUSTYPE_SSB) {
#ifdef CONFIG_BCM47XX
char buf[20];
- if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
+ if (bcm47xx_nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
crystalfreq = simple_strtoul(buf, NULL, 0);
#endif
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3a7965d6ac28..093f10c88cce 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -112,8 +112,6 @@ source "drivers/staging/media/Kconfig"
source "drivers/staging/net/Kconfig"
-source "drivers/staging/omapdrm/Kconfig"
-
source "drivers/staging/android/Kconfig"
source "drivers/staging/ozwpan/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5971865d0c61..fa41b04cf4cb 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_USB_G_CCG) += ccg/
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 538ebe213129..24456a0de6b2 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2880,7 +2880,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
static void binder_deferred_release(struct binder_proc *proc)
{
- struct hlist_node *pos;
struct binder_transaction *t;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
@@ -2924,7 +2923,7 @@ static void binder_deferred_release(struct binder_proc *proc)
node->local_weak_refs = 0;
hlist_add_head(&node->dead_node, &binder_dead_nodes);
- hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ hlist_for_each_entry(ref, &node->refs, node_entry) {
incoming_refs++;
if (ref->death) {
death++;
@@ -3156,12 +3155,11 @@ static void print_binder_thread(struct seq_file *m,
static void print_binder_node(struct seq_file *m, struct binder_node *node)
{
struct binder_ref *ref;
- struct hlist_node *pos;
struct binder_work *w;
int count;
count = 0;
- hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
@@ -3171,7 +3169,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
node->internal_strong_refs, count);
if (count) {
seq_puts(m, " proc");
- hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
@@ -3369,7 +3367,6 @@ static void print_binder_proc_stats(struct seq_file *m,
static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- struct hlist_node *pos;
struct binder_node *node;
int do_lock = !binder_debug_no_lock;
@@ -3380,10 +3377,10 @@ static int binder_state_show(struct seq_file *m, void *unused)
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
print_binder_node(m, node);
- hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
if (do_lock)
binder_unlock(__func__);
@@ -3393,7 +3390,6 @@ static int binder_state_show(struct seq_file *m, void *unused)
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- struct hlist_node *pos;
int do_lock = !binder_debug_no_lock;
if (do_lock)
@@ -3403,7 +3399,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
print_binder_stats(m, "", &binder_stats);
- hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
if (do_lock)
binder_unlock(__func__);
@@ -3413,14 +3409,13 @@ static int binder_stats_show(struct seq_file *m, void *unused)
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- struct hlist_node *pos;
int do_lock = !binder_debug_no_lock;
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
- hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
if (do_lock)
binder_unlock(__func__);
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index b5c2c4c15f92..d23eeeb95064 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -185,7 +185,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
return -ENOENT;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)file_inode(flp)->i_size, loc);
do_gettimeofday(&tv);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) + (tv.tv_usec / 1000)));
diff --git a/drivers/staging/ccg/f_mass_storage.c b/drivers/staging/ccg/f_mass_storage.c
index 4f1142efa6d1..20bc2b454ac2 100644
--- a/drivers/staging/ccg/f_mass_storage.c
+++ b/drivers/staging/ccg/f_mass_storage.c
@@ -998,7 +998,7 @@ static int do_synchronize_cache(struct fsg_common *common)
static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
diff --git a/drivers/staging/ccg/rndis.c b/drivers/staging/ccg/rndis.c
index e4192b887de9..d9297eebbf73 100644
--- a/drivers/staging/ccg/rndis.c
+++ b/drivers/staging/ccg/rndis.c
@@ -1065,7 +1065,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+ rndis_params *p = PDE(file_inode(file))->data;
u32 speed = 0;
int i, fl_speed = 0;
diff --git a/drivers/staging/ccg/storage_common.c b/drivers/staging/ccg/storage_common.c
index 8d9bcd8207c8..abb01ac74cec 100644
--- a/drivers/staging/ccg/storage_common.c
+++ b/drivers/staging/ccg/storage_common.c
@@ -656,7 +656,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
diff --git a/drivers/staging/dgrp/dgrp_specproc.c b/drivers/staging/dgrp/dgrp_specproc.c
index 13c7ccf163c5..73f287f96604 100644
--- a/drivers/staging/dgrp/dgrp_specproc.c
+++ b/drivers/staging/dgrp/dgrp_specproc.c
@@ -357,7 +357,7 @@ static int dgrp_gen_proc_open(struct inode *inode, struct file *file)
struct dgrp_proc_entry *entry;
int ret = 0;
- de = (struct proc_dir_entry *) PDE(file->f_dentry->d_inode);
+ de = (struct proc_dir_entry *) PDE(file_inode(file));
if (!de || !de->data) {
ret = -ENXIO;
goto done;
@@ -387,7 +387,7 @@ static int dgrp_gen_proc_close(struct inode *inode, struct file *file)
struct proc_dir_entry *de;
struct dgrp_proc_entry *entry;
- de = (struct proc_dir_entry *) PDE(file->f_dentry->d_inode);
+ de = (struct proc_dir_entry *) PDE(file_inode(file));
if (!de || !de->data)
goto done;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 427218b8b10f..ae0abc350e34 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -23,6 +23,8 @@ source "drivers/staging/media/as102/Kconfig"
source "drivers/staging/media/cxd2099/Kconfig"
+source "drivers/staging/media/davinci_vpfe/Kconfig"
+
source "drivers/staging/media/dt3155v4l/Kconfig"
source "drivers/staging/media/go7007/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index aec6eb963940..2b97cae99499 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
+obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/staging/media/as102/as102_usb_drv.c
index aaf1bc2ad1b2..9f275f020150 100644
--- a/drivers/staging/media/as102/as102_usb_drv.c
+++ b/drivers/staging/media/as102/as102_usb_drv.c
@@ -374,10 +374,8 @@ static int as102_usb_probe(struct usb_interface *intf,
}
as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL);
- if (as102_dev == NULL) {
- dev_err(&intf->dev, "%s: kzalloc failed\n", __func__);
+ if (as102_dev == NULL)
return -ENOMEM;
- }
/* Assign the user-friendly device name */
for (i = 0; i < ARRAY_SIZE(as102_usb_id_table); i++) {
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/staging/media/as102/as10x_cmd_cfg.c
index d2a4bce89623..4a2bbd766655 100644
--- a/drivers/staging/media/as102/as10x_cmd_cfg.c
+++ b/drivers/staging/media/as102/as10x_cmd_cfg.c
@@ -197,7 +197,7 @@ out:
* @prsp: pointer to AS10x command response buffer
* @proc_id: id of the command
*
- * Since the contex command reponse does not follow the common
+ * Since the contex command response does not follow the common
* response, a specific parse function is required.
* Return 0 on success or negative value in case of error.
*/
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 0ff19724992f..822c487592a4 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -66,8 +66,9 @@ static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = m, .len = 2};
if (i2c_transfer(adapter, &msg, 1) != 1) {
- printk(KERN_ERR "Failed to write to I2C register %02x@%02x!\n",
- reg, adr);
+ dev_err(&adapter->dev,
+ "Failed to write to I2C register %02x@%02x!\n",
+ reg, adr);
return -1;
}
return 0;
@@ -79,7 +80,7 @@ static int i2c_write(struct i2c_adapter *adapter, u8 adr,
struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len};
if (i2c_transfer(adapter, &msg, 1) != 1) {
- printk(KERN_ERR "Failed to write to I2C!\n");
+ dev_err(&adapter->dev, "Failed to write to I2C!\n");
return -1;
}
return 0;
@@ -94,7 +95,7 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
.buf = val, .len = 1} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
- printk(KERN_ERR "error in i2c_read_reg\n");
+ dev_err(&adapter->dev, "error in i2c_read_reg\n");
return -1;
}
return 0;
@@ -109,7 +110,7 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr,
.buf = data, .len = n} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
- printk(KERN_ERR "error in i2c_read\n");
+ dev_err(&adapter->dev, "error in i2c_read\n");
return -1;
}
return 0;
@@ -277,7 +278,7 @@ static void cam_mode(struct cxd *ci, int mode)
#ifdef BUFFER_MODE
if (!ci->en.read_data)
return;
- printk(KERN_INFO "enable cam buffer mode\n");
+ dev_info(&ci->i2c->dev, "enable cam buffer mode\n");
/* write_reg(ci, 0x0d, 0x00); */
/* write_reg(ci, 0x0e, 0x01); */
write_regm(ci, 0x08, 0x40, 0x40);
@@ -524,7 +525,7 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
msleep(10);
#if 0
read_reg(ci, 0x06, &val);
- printk(KERN_INFO "%d:%02x\n", i, val);
+ dev_info(&ci->i2c->dev, "%d:%02x\n", i, val);
if (!(val&0x10))
break;
#else
@@ -542,7 +543,7 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
struct cxd *ci = ca->data;
- printk(KERN_INFO "slot_shutdown\n");
+ dev_info(&ci->i2c->dev, "slot_shutdown\n");
mutex_lock(&ci->lock);
write_regm(ci, 0x09, 0x08, 0x08);
write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
@@ -578,10 +579,10 @@ static int campoll(struct cxd *ci)
if (istat&0x40) {
ci->dr = 1;
- printk(KERN_INFO "DR\n");
+ dev_info(&ci->i2c->dev, "DR\n");
}
if (istat&0x20)
- printk(KERN_INFO "WC\n");
+ dev_info(&ci->i2c->dev, "WC\n");
if (istat&2) {
u8 slotstat;
@@ -597,7 +598,7 @@ static int campoll(struct cxd *ci)
if (ci->slot_stat) {
ci->slot_stat = 0;
write_regm(ci, 0x03, 0x00, 0x08);
- printk(KERN_INFO "NO CAM\n");
+ dev_info(&ci->i2c->dev, "NO CAM\n");
ci->ready = 0;
}
}
@@ -634,7 +635,7 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
campoll(ci);
mutex_unlock(&ci->lock);
- printk(KERN_INFO "read_data\n");
+ dev_info(&ci->i2c->dev, "read_data\n");
if (!ci->dr)
return 0;
@@ -687,7 +688,7 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
u8 val;
if (i2c_read_reg(i2c, cfg->adr, 0, &val) < 0) {
- printk(KERN_INFO "No CXD2099 detected at %02x\n", cfg->adr);
+ dev_info(&i2c->dev, "No CXD2099 detected at %02x\n", cfg->adr);
return NULL;
}
@@ -705,7 +706,7 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
ci->en = en_templ;
ci->en.data = ci;
init(ci);
- printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->cfg.adr);
+ dev_info(&i2c->dev, "Attached CXD2099AR at %02x\n", ci->cfg.adr);
return &ci->en;
}
EXPORT_SYMBOL(cxd2099_attach);
diff --git a/drivers/staging/media/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h
index 19c588a59588..0eb607c5b423 100644
--- a/drivers/staging/media/cxd2099/cxd2099.h
+++ b/drivers/staging/media/cxd2099/cxd2099.h
@@ -43,7 +43,7 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
static inline struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
void *priv, struct i2c_adapter *i2c)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
new file mode 100644
index 000000000000..2e4a28b018e8
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_DM365_VPFE
+ tristate "DM365 VPFE Media Controller Capture Driver"
+ depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for DM365 VPFE based Media Controller Capture driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vpfe-mc-capture.
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
new file mode 100644
index 000000000000..c64515c644cd
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_DM365_VPFE) += \
+ dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \
+ dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o
diff --git a/drivers/staging/media/davinci_vpfe/TODO b/drivers/staging/media/davinci_vpfe/TODO
new file mode 100644
index 000000000000..7015ab35ded5
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/TODO
@@ -0,0 +1,37 @@
+TODO (general):
+==================================
+
+- User space interface refinement
+ - Controls should be used when possible rather than private ioctl
+ - No enums should be used
+ - Use of MC and V4L2 subdev APIs when applicable
+ - Single interface header might suffice
+ - Current interface forces to configure everything at once
+- Get rid of the dm365_ipipe_hw.[ch] layer
+- Active external sub-devices defined by link configuration; no strcmp
+ needed
+- More generic platform data (i2c adapters)
+- The driver should have no knowledge of possible external subdevs; see
+ struct vpfe_subdev_id
+- Some of the hardware control should be refactorede
+- Check proper serialisation (through mutexes and spinlocks)
+- Names that are visible in kernel global namespace should have a common
+ prefix (or a few)
+- While replacing the older driver in media folder, provide a compatibility
+ layer and compatibility tests that warrants (using the libv4l's LD_PRELOAD
+ approach) there is no regression for the users using the older driver.
+
+Building of uImage and Applications:
+==================================
+
+As of now since the interface will undergo few changes all the include
+files are present in staging itself, to build for dm365 follow below steps,
+
+- copy vpfe.h from drivers/staging/media/davinci_vpfe/ to
+ include/media/davinci/ folder for building the uImage.
+- copy davinci_vpfe_user.h from drivers/staging/media/davinci_vpfe/ to
+ include/uapi/linux/davinci_vpfe.h, and add a entry in Kbuild (required
+ for building application).
+- copy dm365_ipipeif_user.h from drivers/staging/media/davinci_vpfe/ to
+ include/uapi/linux/dm365_ipipeif.h and a entry in Kbuild (required
+ for building application).
diff --git a/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt b/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
new file mode 100644
index 000000000000..1dbd56418704
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
@@ -0,0 +1,154 @@
+Davinci Video processing Front End (VPFE) driver
+
+Copyright (C) 2012 Texas Instruments Inc
+
+Contacts: Manjunath Hadli <manjunath.hadli@ti.com>
+ Prabhakar Lad <prabhakar.lad@ti.com>
+
+
+Introduction
+============
+
+This file documents the Texas Instruments Davinci Video processing Front End
+(VPFE) driver located under drivers/media/platform/davinci. The original driver
+exists for Davinci VPFE, which is now being changed to Media Controller
+Framework.
+
+Currently the driver has been successfully used on the following
+version of Davinci:
+
+ DM365/DM368
+
+The driver implements V4L2, Media controller and v4l2_subdev interfaces. Sensor,
+lens and flash drivers using the v4l2_subdev interface in the kernel are
+supported.
+
+
+Split to subdevs
+================
+
+The Davinci VPFE is split into V4L2 subdevs, each of the blocks inside the VPFE
+having one subdev to represent it. Each of the subdevs provide a V4L2 subdev
+interface to userspace.
+
+ DAVINCI ISIF
+ DAVINCI IPIPEIF
+ DAVINCI IPIPE
+ DAVINCI CROP RESIZER
+ DAVINCI RESIZER A
+ DAVINCI RESIZER B
+
+Each possible link in the VPFE is modeled by a link in the Media controller
+interface. For an example program see [1].
+
+
+ISIF, IPIPE, and RESIZER block IOCTLs
+======================================
+
+The Davinci Video processing Front End (VPFE) driver supports standard V4L2
+IOCTLs and controls where possible and practical. Much of the functions provided
+by the VPFE, however, does not fall under the standard IOCTL's.
+
+In general, there is a private ioctl for configuring each of the blocks
+containing hardware-dependent functions.
+
+The following private IOCTLs are supported:
+
+ VIDIOC_VPFE_ISIF_[S/G]_RAW_PARAMS
+ VIDIOC_VPFE_IPIPE_[S/G]_CONFIG
+ VIDIOC_VPFE_RSZ_[S/G]_CONFIG
+
+The parameter structures used by these ioctl's are described in
+include/uapi/linux/davinci_vpfe.h.
+
+The VIDIOC_VPFE_ISIF_S_RAW_PARAMS, VIDIOC_VPFE_IPIPE_S_CONFIG and
+VIDIOC_VPFE_RSZ_S_CONFIG are used to configure, enable and disable functions in
+the isif, ipipe and resizer blocks respectively. These IOCTL's control several
+functions in the blocks they control. VIDIOC_VPFE_ISIF_S_RAW_PARAMS IOCTL
+accepts a pointer to struct vpfe_isif_raw_config as its argument. Similarly
+VIDIOC_VPFE_IPIPE_S_CONFIG accepts a pointer to struct vpfe_ipipe_config. And
+VIDIOC_VPFE_RSZ_S_CONFIG accepts a pointer to struct vpfe_rsz_config as its
+argument. Similarly VIDIOC_VPFE_ISIF_G_RAW_PARAMS, VIDIOC_VPFE_IPIPE_G_CONFIG
+and VIDIOC_VPFE_RSZ_G_CONFIG are used to get the current configuration set in
+the isif, ipipe and resizer blocks respectively.
+
+The detailed functions of the VPFE itself related to a given VPFE block is
+described in the Technical Reference Manuals (TRMs) --- see the end of the
+document for those.
+
+
+IPIPEIF block IOCTLs
+======================================
+
+The following private IOCTLs are supported:
+
+ VIDIOC_VPFE_IPIPEIF_[S/G]_CONFIG
+
+The parameter structures used by these ioctl's are described in
+include/uapi/linux/dm365_ipipeif.h
+
+The VIDIOC_VPFE_IPIPEIF_S_CONFIG is used to configure the ipipeif
+hardware block. The VIDIOC_VPFE_IPIPEIF_S_CONFIG and
+VIDIOC_VPFE_IPIPEIF_G_CONFIG accepts a pointer to struct ipipeif_params
+as its argument.
+
+
+VPFE Operating Modes
+==========================================
+
+a: Continuous Modes
+------------------------
+
+1: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> SDRAM
+
+2: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
+ |
+ <--------------------<----------------<---------------------<---|
+ |
+ V
+ DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
+
+3: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
+ |
+ <--------------------<----------------<---------------------<---|
+ |
+ V
+ DAVINCI IPIPE---> DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
+
+a: Single Shot Modes
+------------------------
+
+1: SDRAM---> DAVINCI IPIPEIF---> DAVINCI IPIPE---> DAVINCI CROP RESIZER--->|
+ |
+ <----------------<----------------<------------------<---------------<--|
+ |
+ V
+DAVINCI RESIZER [A/B]---> SDRAM
+
+2: SDRAM---> DAVINCI IPIPEIF---> DAVINCI CROP RESIZER--->|
+ |
+ <----------------<----------------<---------------<---|
+ |
+ V
+DAVINCI RESIZER [A/B]---> SDRAM
+
+
+Technical reference manuals (TRMs) and other documentation
+==========================================================
+
+Davinci DM365 TRM:
+<URL:http://www.ti.com/lit/ds/sprs457e/sprs457e.pdf>
+Referenced MARCH 2009-REVISED JUNE 2011
+
+Davinci DM368 TRM:
+<URL:http://www.ti.com/lit/ds/sprs668c/sprs668c.pdf>
+Referenced APRIL 2010-REVISED JUNE 2011
+
+Davinci Video Processing Front End (VPFE) DM36x
+<URL:http://www.ti.com/lit/ug/sprufg8c/sprufg8c.pdf>
+
+
+References
+==========
+
+[1] http://git.ideasonboard.org/?p=media-ctl.git;a=summary
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
new file mode 100644
index 000000000000..7b7e7b26c1e8
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
@@ -0,0 +1,1290 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_USER_H
+#define _DAVINCI_VPFE_USER_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+/*
+ * Private IOCTL
+ *
+ * VIDIOC_VPFE_ISIF_S_RAW_PARAMS: Set raw params in isif
+ * VIDIOC_VPFE_ISIF_G_RAW_PARAMS: Get raw params from isif
+ * VIDIOC_VPFE_PRV_S_CONFIG: Set ipipe engine configuration
+ * VIDIOC_VPFE_PRV_G_CONFIG: Get ipipe engine configuration
+ * VIDIOC_VPFE_RSZ_S_CONFIG: Set resizer engine configuration
+ * VIDIOC_VPFE_RSZ_G_CONFIG: Get resizer engine configuration
+ */
+
+#define VIDIOC_VPFE_ISIF_S_RAW_PARAMS \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 1, struct vpfe_isif_raw_config)
+#define VIDIOC_VPFE_ISIF_G_RAW_PARAMS \
+ _IOR('V', BASE_VIDIOC_PRIVATE + 2, struct vpfe_isif_raw_config)
+#define VIDIOC_VPFE_IPIPE_S_CONFIG \
+ _IOWR('P', BASE_VIDIOC_PRIVATE + 3, struct vpfe_ipipe_config)
+#define VIDIOC_VPFE_IPIPE_G_CONFIG \
+ _IOWR('P', BASE_VIDIOC_PRIVATE + 4, struct vpfe_ipipe_config)
+#define VIDIOC_VPFE_RSZ_S_CONFIG \
+ _IOWR('R', BASE_VIDIOC_PRIVATE + 5, struct vpfe_rsz_config)
+#define VIDIOC_VPFE_RSZ_G_CONFIG \
+ _IOWR('R', BASE_VIDIOC_PRIVATE + 6, struct vpfe_rsz_config)
+
+/*
+ * Private Control's for ISIF
+ */
+#define VPFE_ISIF_CID_CRGAIN (V4L2_CID_USER_BASE | 0xa001)
+#define VPFE_ISIF_CID_CGRGAIN (V4L2_CID_USER_BASE | 0xa002)
+#define VPFE_ISIF_CID_CGBGAIN (V4L2_CID_USER_BASE | 0xa003)
+#define VPFE_ISIF_CID_CBGAIN (V4L2_CID_USER_BASE | 0xa004)
+#define VPFE_ISIF_CID_GAIN_OFFSET (V4L2_CID_USER_BASE | 0xa005)
+
+/*
+ * Private Control's for ISIF and IPIPEIF
+ */
+#define VPFE_CID_DPCM_PREDICTOR (V4L2_CID_USER_BASE | 0xa006)
+
+/************************************************************************
+ * Vertical Defect Correction parameters
+ ***********************************************************************/
+
+/**
+ * vertical defect correction methods
+ */
+enum vpfe_isif_vdfc_corr_mode {
+ /* Defect level subtraction. Just fed through if saturating */
+ VPFE_ISIF_VDFC_NORMAL,
+ /**
+ * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
+ * if data saturating
+ */
+ VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT,
+ /* Horizontal interpolation (((i-2)+(i+2))/2) */
+ VPFE_ISIF_VDFC_HORZ_INTERPOL
+};
+
+/**
+ * Max Size of the Vertical Defect Correction table
+ */
+#define VPFE_ISIF_VDFC_TABLE_SIZE 8
+
+/**
+ * Values used for shifting up the vdfc defect level
+ */
+enum vpfe_isif_vdfc_shift {
+ /* No Shift */
+ VPFE_ISIF_VDFC_NO_SHIFT,
+ /* Shift by 1 bit */
+ VPFE_ISIF_VDFC_SHIFT_1,
+ /* Shift by 2 bit */
+ VPFE_ISIF_VDFC_SHIFT_2,
+ /* Shift by 3 bit */
+ VPFE_ISIF_VDFC_SHIFT_3,
+ /* Shift by 4 bit */
+ VPFE_ISIF_VDFC_SHIFT_4
+};
+
+/**
+ * Defect Correction (DFC) table entry
+ */
+struct vpfe_isif_vdfc_entry {
+ /* vertical position of defect */
+ unsigned short pos_vert;
+ /* horizontal position of defect */
+ unsigned short pos_horz;
+ /**
+ * Defect level of Vertical line defect position. This is subtracted
+ * from the data at the defect position
+ */
+ unsigned char level_at_pos;
+ /**
+ * Defect level of the pixels upper than the vertical line defect.
+ * This is subtracted from the data
+ */
+ unsigned char level_up_pixels;
+ /**
+ * Defect level of the pixels lower than the vertical line defect.
+ * This is subtracted from the data
+ */
+ unsigned char level_low_pixels;
+};
+
+/**
+ * Structure for Defect Correction (DFC) parameter
+ */
+struct vpfe_isif_dfc {
+ /* enable vertical defect correction */
+ unsigned char en;
+ /* Correction methods */
+ enum vpfe_isif_vdfc_corr_mode corr_mode;
+ /**
+ * 0 - whole line corrected, 1 - not
+ * pixels upper than the defect
+ */
+ unsigned char corr_whole_line;
+ /**
+ * defect level shift value. level_at_pos, level_upper_pos,
+ * and level_lower_pos can be shifted up by this value
+ */
+ enum vpfe_isif_vdfc_shift def_level_shift;
+ /* defect saturation level */
+ unsigned short def_sat_level;
+ /* number of vertical defects. Max is VPFE_ISIF_VDFC_TABLE_SIZE */
+ short num_vdefects;
+ /* VDFC table ptr */
+ struct vpfe_isif_vdfc_entry table[VPFE_ISIF_VDFC_TABLE_SIZE];
+};
+
+/************************************************************************
+* Digital/Black clamp or DC Subtract parameters
+************************************************************************/
+/**
+ * Horizontal Black Clamp modes
+ */
+enum vpfe_isif_horz_bc_mode {
+ /**
+ * Horizontal clamp disabled. Only vertical clamp
+ * value is subtracted
+ */
+ VPFE_ISIF_HORZ_BC_DISABLE,
+ /**
+ * Horizontal clamp value is calculated and subtracted
+ * from image data along with vertical clamp value
+ */
+ VPFE_ISIF_HORZ_BC_CLAMP_CALC_ENABLED,
+ /**
+ * Horizontal clamp value calculated from previous image
+ * is subtracted from image data along with vertical clamp
+ * value. How the horizontal clamp value for the first image
+ * is calculated in this case ???
+ */
+ VPFE_ISIF_HORZ_BC_CLAMP_NOT_UPDATED
+};
+
+/**
+ * Base window selection for Horizontal Black Clamp calculations
+ */
+enum vpfe_isif_horz_bc_base_win_sel {
+ /* Select Most left window for bc calculation */
+ VPFE_ISIF_SEL_MOST_LEFT_WIN,
+
+ /* Select Most right window for bc calculation */
+ VPFE_ISIF_SEL_MOST_RIGHT_WIN,
+};
+
+/* Size of window in horizontal direction for horizontal bc */
+enum vpfe_isif_horz_bc_sz_h {
+ VPFE_ISIF_HORZ_BC_SZ_H_2PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_4PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_8PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_16PIXELS
+};
+
+/* Size of window in vertcal direction for vertical bc */
+enum vpfe_isif_horz_bc_sz_v {
+ VPFE_ISIF_HORZ_BC_SZ_H_32PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_64PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_128PIXELS,
+ VPFE_ISIF_HORZ_BC_SZ_H_256PIXELS
+};
+
+/**
+ * Structure for Horizontal Black Clamp config params
+ */
+struct vpfe_isif_horz_bclamp {
+ /* horizontal clamp mode */
+ enum vpfe_isif_horz_bc_mode mode;
+ /**
+ * pixel value limit enable.
+ * 0 - limit disabled
+ * 1 - pixel value limited to 1023
+ */
+ unsigned char clamp_pix_limit;
+ /**
+ * Select most left or right window for clamp val
+ * calculation
+ */
+ enum vpfe_isif_horz_bc_base_win_sel base_win_sel_calc;
+ /* Window count per color for calculation. range 1-32 */
+ unsigned char win_count_calc;
+ /* Window start position - horizontal for calculation. 0 - 8191 */
+ unsigned short win_start_h_calc;
+ /* Window start position - vertical for calculation 0 - 8191 */
+ unsigned short win_start_v_calc;
+ /* Width of the sample window in pixels for calculation */
+ enum vpfe_isif_horz_bc_sz_h win_h_sz_calc;
+ /* Height of the sample window in pixels for calculation */
+ enum vpfe_isif_horz_bc_sz_v win_v_sz_calc;
+};
+
+/**
+ * Black Clamp vertical reset values
+ */
+enum vpfe_isif_vert_bc_reset_val_sel {
+ /* Reset value used is the clamp value calculated */
+ VPFE_ISIF_VERT_BC_USE_HORZ_CLAMP_VAL,
+ /* Reset value used is reset_clamp_val configured */
+ VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL,
+ /* No update, previous image value is used */
+ VPFE_ISIF_VERT_BC_NO_UPDATE
+};
+
+enum vpfe_isif_vert_bc_sz_h {
+ VPFE_ISIF_VERT_BC_SZ_H_2PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_4PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_8PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_16PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_32PIXELS,
+ VPFE_ISIF_VERT_BC_SZ_H_64PIXELS
+};
+
+/**
+ * Structure for Vertical Black Clamp configuration params
+ */
+struct vpfe_isif_vert_bclamp {
+ /* Reset value selection for vertical clamp calculation */
+ enum vpfe_isif_vert_bc_reset_val_sel reset_val_sel;
+ /* U12 value if reset_sel = ISIF_BC_VERT_USE_CONFIG_CLAMP_VAL */
+ unsigned short reset_clamp_val;
+ /**
+ * U8Q8. Line average coefficient used in vertical clamp
+ * calculation
+ */
+ unsigned char line_ave_coef;
+ /* Width in pixels of the optical black region used for calculation. */
+ enum vpfe_isif_vert_bc_sz_h ob_h_sz_calc;
+ /* Height of the optical black region for calculation */
+ unsigned short ob_v_sz_calc;
+ /* Optical black region start position - horizontal. 0 - 8191 */
+ unsigned short ob_start_h;
+ /* Optical black region start position - vertical 0 - 8191 */
+ unsigned short ob_start_v;
+};
+
+/**
+ * Structure for Black Clamp configuration params
+ */
+struct vpfe_isif_black_clamp {
+ /**
+ * this offset value is added irrespective of the clamp
+ * enable status. S13
+ */
+ unsigned short dc_offset;
+ /**
+ * Enable black/digital clamp value to be subtracted
+ * from the image data
+ */
+ unsigned char en;
+ /**
+ * black clamp mode. same/separate clamp for 4 colors
+ * 0 - disable - same clamp value for all colors
+ * 1 - clamp value calculated separately for all colors
+ */
+ unsigned char bc_mode_color;
+ /* Vertical start position for bc subtraction */
+ unsigned short vert_start_sub;
+ /* Black clamp for horizontal direction */
+ struct vpfe_isif_horz_bclamp horz;
+ /* Black clamp for vertical direction */
+ struct vpfe_isif_vert_bclamp vert;
+};
+
+/*************************************************************************
+** Color Space Conversion (CSC)
+*************************************************************************/
+/**
+ * Number of Coefficient values used for CSC
+ */
+#define VPFE_ISIF_CSC_NUM_COEFF 16
+
+struct float_8_bit {
+ /* 8 bit integer part */
+ __u8 integer;
+ /* 8 bit decimal part */
+ __u8 decimal;
+};
+
+struct float_16_bit {
+ /* 16 bit integer part */
+ __u16 integer;
+ /* 16 bit decimal part */
+ __u16 decimal;
+};
+
+/*************************************************************************
+** Color Space Conversion parameters
+*************************************************************************/
+/**
+ * Structure used for CSC config params
+ */
+struct vpfe_isif_color_space_conv {
+ /* Enable color space conversion */
+ unsigned char en;
+ /**
+ * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
+ * so forth
+ */
+ struct float_8_bit coeff[VPFE_ISIF_CSC_NUM_COEFF];
+};
+
+enum vpfe_isif_datasft {
+ /* No Shift */
+ VPFE_ISIF_NO_SHIFT,
+ /* 1 bit Shift */
+ VPFE_ISIF_1BIT_SHIFT,
+ /* 2 bit Shift */
+ VPFE_ISIF_2BIT_SHIFT,
+ /* 3 bit Shift */
+ VPFE_ISIF_3BIT_SHIFT,
+ /* 4 bit Shift */
+ VPFE_ISIF_4BIT_SHIFT,
+ /* 5 bit Shift */
+ VPFE_ISIF_5BIT_SHIFT,
+ /* 6 bit Shift */
+ VPFE_ISIF_6BIT_SHIFT
+};
+
+#define VPFE_ISIF_LINEAR_TAB_SIZE 192
+/*************************************************************************
+** Linearization parameters
+*************************************************************************/
+/**
+ * Structure for Sensor data linearization
+ */
+struct vpfe_isif_linearize {
+ /* Enable or Disable linearization of data */
+ unsigned char en;
+ /* Shift value applied */
+ enum vpfe_isif_datasft corr_shft;
+ /* scale factor applied U11Q10 */
+ struct float_16_bit scale_fact;
+ /* Size of the linear table */
+ unsigned short table[VPFE_ISIF_LINEAR_TAB_SIZE];
+};
+
+/*************************************************************************
+** ISIF Raw configuration parameters
+*************************************************************************/
+enum vpfe_isif_fmt_mode {
+ VPFE_ISIF_SPLIT,
+ VPFE_ISIF_COMBINE
+};
+
+enum vpfe_isif_lnum {
+ VPFE_ISIF_1LINE,
+ VPFE_ISIF_2LINES,
+ VPFE_ISIF_3LINES,
+ VPFE_ISIF_4LINES
+};
+
+enum vpfe_isif_line {
+ VPFE_ISIF_1STLINE,
+ VPFE_ISIF_2NDLINE,
+ VPFE_ISIF_3RDLINE,
+ VPFE_ISIF_4THLINE
+};
+
+struct vpfe_isif_fmtplen {
+ /**
+ * number of program entries for SET0, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen0;
+ /**
+ * number of program entries for SET1, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen1;
+ /**
+ * number of program entries for SET2, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen2;
+ /**
+ * number of program entries for SET3, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ unsigned short plen3;
+};
+
+struct vpfe_isif_fmt_cfg {
+ /* Split or combine or line alternate */
+ enum vpfe_isif_fmt_mode fmtmode;
+ /* enable or disable line alternating mode */
+ unsigned char ln_alter_en;
+ /* Split/combine line number */
+ enum vpfe_isif_lnum lnum;
+ /* Address increment Range 1 - 16 */
+ unsigned int addrinc;
+};
+
+struct vpfe_isif_fmt_addr_ptr {
+ /* Initial address */
+ unsigned int init_addr;
+ /* output line number */
+ enum vpfe_isif_line out_line;
+};
+
+struct vpfe_isif_fmtpgm_ap {
+ /* program address pointer */
+ unsigned char pgm_aptr;
+ /* program address increment or decrement */
+ unsigned char pgmupdt;
+};
+
+struct vpfe_isif_data_formatter {
+ /* Enable/Disable data formatter */
+ unsigned char en;
+ /* data formatter configuration */
+ struct vpfe_isif_fmt_cfg cfg;
+ /* Formatter program entries length */
+ struct vpfe_isif_fmtplen plen;
+ /* first pixel in a line fed to formatter */
+ unsigned short fmtrlen;
+ /* HD interval for output line. Only valid when split line */
+ unsigned short fmthcnt;
+ /* formatter address pointers */
+ struct vpfe_isif_fmt_addr_ptr fmtaddr_ptr[16];
+ /* program enable/disable */
+ unsigned char pgm_en[32];
+ /* program address pointers */
+ struct vpfe_isif_fmtpgm_ap fmtpgm_ap[32];
+};
+
+struct vpfe_isif_df_csc {
+ /* Color Space Conversion configuration, 0 - csc, 1 - df */
+ unsigned int df_or_csc;
+ /* csc configuration valid if df_or_csc is 0 */
+ struct vpfe_isif_color_space_conv csc;
+ /* data formatter configuration valid if df_or_csc is 1 */
+ struct vpfe_isif_data_formatter df;
+ /* start pixel in a line at the input */
+ unsigned int start_pix;
+ /* number of pixels in input line */
+ unsigned int num_pixels;
+ /* start line at the input */
+ unsigned int start_line;
+ /* number of lines at the input */
+ unsigned int num_lines;
+};
+
+struct vpfe_isif_gain_offsets_adj {
+ /* Enable or Disable Gain adjustment for SDRAM data */
+ unsigned char gain_sdram_en;
+ /* Enable or Disable Gain adjustment for IPIPE data */
+ unsigned char gain_ipipe_en;
+ /* Enable or Disable Gain adjustment for H3A data */
+ unsigned char gain_h3a_en;
+ /* Enable or Disable Gain adjustment for SDRAM data */
+ unsigned char offset_sdram_en;
+ /* Enable or Disable Gain adjustment for IPIPE data */
+ unsigned char offset_ipipe_en;
+ /* Enable or Disable Gain adjustment for H3A data */
+ unsigned char offset_h3a_en;
+};
+
+struct vpfe_isif_cul {
+ /* Horizontal Cull pattern for odd lines */
+ unsigned char hcpat_odd;
+ /* Horizontal Cull pattern for even lines */
+ unsigned char hcpat_even;
+ /* Vertical Cull pattern */
+ unsigned char vcpat;
+ /* Enable or disable lpf. Apply when cull is enabled */
+ unsigned char en_lpf;
+};
+
+/* all the stuff in this struct will be provided by userland */
+struct vpfe_isif_raw_config {
+ /* Linearization parameters for image sensor data input */
+ struct vpfe_isif_linearize linearize;
+ /* Data formatter or CSC */
+ struct vpfe_isif_df_csc df_csc;
+ /* Defect Pixel Correction (DFC) confguration */
+ struct vpfe_isif_dfc dfc;
+ /* Black/Digital Clamp configuration */
+ struct vpfe_isif_black_clamp bclamp;
+ /* Gain, offset adjustments */
+ struct vpfe_isif_gain_offsets_adj gain_offset;
+ /* Culling */
+ struct vpfe_isif_cul culling;
+ /* horizontal offset for Gain/LSC/DFC */
+ unsigned short horz_offset;
+ /* vertical offset for Gain/LSC/DFC */
+ unsigned short vert_offset;
+};
+
+/**********************************************************************
+ IPIPE API Structures
+**********************************************************************/
+
+/* IPIPE module configurations */
+
+/* IPIPE input configuration */
+#define VPFE_IPIPE_INPUT_CONFIG (1 << 0)
+/* LUT based Defect Pixel Correction */
+#define VPFE_IPIPE_LUTDPC (1 << 1)
+/* On the fly (OTF) Defect Pixel Correction */
+#define VPFE_IPIPE_OTFDPC (1 << 2)
+/* Noise Filter - 1 */
+#define VPFE_IPIPE_NF1 (1 << 3)
+/* Noise Filter - 2 */
+#define VPFE_IPIPE_NF2 (1 << 4)
+/* White Balance. Also a control ID */
+#define VPFE_IPIPE_WB (1 << 5)
+/* 1st RGB to RBG Blend module */
+#define VPFE_IPIPE_RGB2RGB_1 (1 << 6)
+/* 2nd RGB to RBG Blend module */
+#define VPFE_IPIPE_RGB2RGB_2 (1 << 7)
+/* Gamma Correction */
+#define VPFE_IPIPE_GAMMA (1 << 8)
+/* 3D LUT color conversion */
+#define VPFE_IPIPE_3D_LUT (1 << 9)
+/* RGB to YCbCr module */
+#define VPFE_IPIPE_RGB2YUV (1 << 10)
+/* YUV 422 conversion module */
+#define VPFE_IPIPE_YUV422_CONV (1 << 11)
+/* Edge Enhancement */
+#define VPFE_IPIPE_YEE (1 << 12)
+/* Green Imbalance Correction */
+#define VPFE_IPIPE_GIC (1 << 13)
+/* CFA Interpolation */
+#define VPFE_IPIPE_CFA (1 << 14)
+/* Chroma Artifact Reduction */
+#define VPFE_IPIPE_CAR (1 << 15)
+/* Chroma Gain Suppression */
+#define VPFE_IPIPE_CGS (1 << 16)
+/* Global brightness and contrast control */
+#define VPFE_IPIPE_GBCE (1 << 17)
+
+#define VPFE_IPIPE_MAX_MODULES 18
+
+struct ipipe_float_u16 {
+ unsigned short integer;
+ unsigned short decimal;
+};
+
+struct ipipe_float_s16 {
+ short integer;
+ unsigned short decimal;
+};
+
+struct ipipe_float_u8 {
+ unsigned char integer;
+ unsigned char decimal;
+};
+
+/* Copy method selection for vertical correction
+ * Used when ipipe_dfc_corr_meth is IPIPE_DPC_CTORB_AFTER_HINT
+ */
+enum vpfe_ipipe_dpc_corr_meth {
+ /* replace by black or white dot specified by repl_white */
+ VPFE_IPIPE_DPC_REPL_BY_DOT = 0,
+ /* Copy from left */
+ VPFE_IPIPE_DPC_CL = 1,
+ /* Copy from right */
+ VPFE_IPIPE_DPC_CR = 2,
+ /* Horizontal interpolation */
+ VPFE_IPIPE_DPC_H_INTP = 3,
+ /* Vertical interpolation */
+ VPFE_IPIPE_DPC_V_INTP = 4,
+ /* Copy from top */
+ VPFE_IPIPE_DPC_CT = 5,
+ /* Copy from bottom */
+ VPFE_IPIPE_DPC_CB = 6,
+ /* 2D interpolation */
+ VPFE_IPIPE_DPC_2D_INTP = 7,
+};
+
+struct vpfe_ipipe_lutdpc_entry {
+ /* Horizontal position */
+ unsigned short horz_pos;
+ /* vertical position */
+ unsigned short vert_pos;
+ enum vpfe_ipipe_dpc_corr_meth method;
+};
+
+#define VPFE_IPIPE_MAX_SIZE_DPC 256
+
+/* Structure for configuring DPC module */
+struct vpfe_ipipe_lutdpc {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* 0 - replace with black dot, 1 - white dot when correction
+ * method is IPIPE_DFC_REPL_BY_DOT=0,
+ */
+ unsigned char repl_white;
+ /* number of entries in the correction table. Currently only
+ * support up-to 256 entries. infinite mode is not supported
+ */
+ unsigned short dpc_size;
+ struct vpfe_ipipe_lutdpc_entry table[VPFE_IPIPE_MAX_SIZE_DPC];
+};
+
+enum vpfe_ipipe_otfdpc_det_meth {
+ VPFE_IPIPE_DPC_OTF_MIN_MAX,
+ VPFE_IPIPE_DPC_OTF_MIN_MAX2
+};
+
+struct vpfe_ipipe_otfdpc_thr {
+ unsigned short r;
+ unsigned short gr;
+ unsigned short gb;
+ unsigned short b;
+};
+
+enum vpfe_ipipe_otfdpc_alg {
+ VPFE_IPIPE_OTFDPC_2_0,
+ VPFE_IPIPE_OTFDPC_3_0
+};
+
+struct vpfe_ipipe_otfdpc_2_0_cfg {
+ /* defect detection threshold for MIN_MAX2 method (DPC 2.0 alg) */
+ struct vpfe_ipipe_otfdpc_thr det_thr;
+ /* defect correction threshold for MIN_MAX2 method (DPC 2.0 alg) or
+ * maximum value for MIN_MAX method
+ */
+ struct vpfe_ipipe_otfdpc_thr corr_thr;
+};
+
+struct vpfe_ipipe_otfdpc_3_0_cfg {
+ /* DPC3.0 activity adj shf. activity = (max2-min2) >> (6 -shf)
+ */
+ unsigned char act_adj_shf;
+ /* DPC3.0 detection threshold, THR */
+ unsigned short det_thr;
+ /* DPC3.0 detection threshold slope, SLP */
+ unsigned short det_slp;
+ /* DPC3.0 detection threshold min, MIN */
+ unsigned short det_thr_min;
+ /* DPC3.0 detection threshold max, MAX */
+ unsigned short det_thr_max;
+ /* DPC3.0 correction threshold, THR */
+ unsigned short corr_thr;
+ /* DPC3.0 correction threshold slope, SLP */
+ unsigned short corr_slp;
+ /* DPC3.0 correction threshold min, MIN */
+ unsigned short corr_thr_min;
+ /* DPC3.0 correction threshold max, MAX */
+ unsigned short corr_thr_max;
+};
+
+struct vpfe_ipipe_otfdpc {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* defect detection method */
+ enum vpfe_ipipe_otfdpc_det_meth det_method;
+ /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
+ * used
+ */
+ enum vpfe_ipipe_otfdpc_alg alg;
+ union {
+ /* if alg is IPIPE_OTFDPC_2_0 */
+ struct vpfe_ipipe_otfdpc_2_0_cfg dpc_2_0;
+ /* if alg is IPIPE_OTFDPC_3_0 */
+ struct vpfe_ipipe_otfdpc_3_0_cfg dpc_3_0;
+ } alg_cfg;
+};
+
+/* Threshold values table size */
+#define VPFE_IPIPE_NF_THR_TABLE_SIZE 8
+/* Intensity values table size */
+#define VPFE_IPIPE_NF_STR_TABLE_SIZE 8
+
+/* NF, sampling method for green pixels */
+enum vpfe_ipipe_nf_sampl_meth {
+ /* Same as R or B */
+ VPFE_IPIPE_NF_BOX,
+ /* Diamond mode */
+ VPFE_IPIPE_NF_DIAMOND
+};
+
+/* Structure for configuring NF module */
+struct vpfe_ipipe_nf {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* Sampling method for green pixels */
+ enum vpfe_ipipe_nf_sampl_meth gr_sample_meth;
+ /* Down shift value in LUT reference address
+ */
+ unsigned char shft_val;
+ /* Spread value in NF algorithm
+ */
+ unsigned char spread_val;
+ /* Apply LSC gain to threshold. Enable this only if
+ * LSC is enabled in ISIF
+ */
+ unsigned char apply_lsc_gain;
+ /* Threshold values table */
+ unsigned short thr[VPFE_IPIPE_NF_THR_TABLE_SIZE];
+ /* intensity values table */
+ unsigned char str[VPFE_IPIPE_NF_STR_TABLE_SIZE];
+ /* Edge detection minimum threshold */
+ unsigned short edge_det_min_thr;
+ /* Edge detection maximum threshold */
+ unsigned short edge_det_max_thr;
+};
+
+enum vpfe_ipipe_gic_alg {
+ VPFE_IPIPE_GIC_ALG_CONST_GAIN,
+ VPFE_IPIPE_GIC_ALG_ADAPT_GAIN
+};
+
+enum vpfe_ipipe_gic_thr_sel {
+ VPFE_IPIPE_GIC_THR_REG,
+ VPFE_IPIPE_GIC_THR_NF
+};
+
+enum vpfe_ipipe_gic_wt_fn_type {
+ /* Use difference as index */
+ VPFE_IPIPE_GIC_WT_FN_TYP_DIF,
+ /* Use weight function as index */
+ VPFE_IPIPE_GIC_WT_FN_TYP_HP_VAL
+};
+
+/* structure for Green Imbalance Correction */
+struct vpfe_ipipe_gic {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* 0 - Constant gain , 1 - Adaptive gain algorithm */
+ enum vpfe_ipipe_gic_alg gic_alg;
+ /* GIC gain or weight. Used for Constant gain and Adaptive algorithms
+ */
+ unsigned short gain;
+ /* Threshold selection. GIC register values or NF2 thr table */
+ enum vpfe_ipipe_gic_thr_sel thr_sel;
+ /* thr1. Used when thr_sel is IPIPE_GIC_THR_REG */
+ unsigned short thr;
+ /* this value is used for thr2-thr1, thr3-thr2 or
+ * thr4-thr3 when wt_fn_type is index. Otherwise it
+ * is the
+ */
+ unsigned short slope;
+ /* Apply LSC gain to threshold. Enable this only if
+ * LSC is enabled in ISIF & thr_sel is IPIPE_GIC_THR_REG
+ */
+ unsigned char apply_lsc_gain;
+ /* Multiply Nf2 threshold by this gain. Use this when thr_sel
+ * is IPIPE_GIC_THR_NF
+ */
+ struct ipipe_float_u8 nf2_thr_gain;
+ /* Weight function uses difference as index or high pass value.
+ * Used for adaptive gain algorithm
+ */
+ enum vpfe_ipipe_gic_wt_fn_type wt_fn_type;
+};
+
+/* Structure for configuring WB module */
+struct vpfe_ipipe_wb {
+ /* Offset (S12) for R */
+ short ofst_r;
+ /* Offset (S12) for Gr */
+ short ofst_gr;
+ /* Offset (S12) for Gb */
+ short ofst_gb;
+ /* Offset (S12) for B */
+ short ofst_b;
+ /* Gain (U13Q9) for Red */
+ struct ipipe_float_u16 gain_r;
+ /* Gain (U13Q9) for Gr */
+ struct ipipe_float_u16 gain_gr;
+ /* Gain (U13Q9) for Gb */
+ struct ipipe_float_u16 gain_gb;
+ /* Gain (U13Q9) for Blue */
+ struct ipipe_float_u16 gain_b;
+};
+
+enum vpfe_ipipe_cfa_alg {
+ /* Algorithm is 2DirAC */
+ VPFE_IPIPE_CFA_ALG_2DIRAC,
+ /* Algorithm is 2DirAC + Digital Antialiasing (DAA) */
+ VPFE_IPIPE_CFA_ALG_2DIRAC_DAA,
+ /* Algorithm is DAA */
+ VPFE_IPIPE_CFA_ALG_DAA
+};
+
+/* Structure for CFA Interpolation */
+struct vpfe_ipipe_cfa {
+ /* 2DirAC or 2DirAC + DAA */
+ enum vpfe_ipipe_cfa_alg alg;
+ /* 2Dir CFA HP value Low Threshold */
+ unsigned short hpf_thr_2dir;
+ /* 2Dir CFA HP value slope */
+ unsigned short hpf_slp_2dir;
+ /* 2Dir CFA HP mix threshold */
+ unsigned short hp_mix_thr_2dir;
+ /* 2Dir CFA HP mix slope */
+ unsigned short hp_mix_slope_2dir;
+ /* 2Dir Direction threshold */
+ unsigned short dir_thr_2dir;
+ /* 2Dir Direction slope */
+ unsigned short dir_slope_2dir;
+ /* 2Dir Non Directional Weight */
+ unsigned short nd_wt_2dir;
+ /* DAA Mono Hue Fraction */
+ unsigned short hue_fract_daa;
+ /* DAA Mono Edge threshold */
+ unsigned short edge_thr_daa;
+ /* DAA Mono threshold minimum */
+ unsigned short thr_min_daa;
+ /* DAA Mono threshold slope */
+ unsigned short thr_slope_daa;
+ /* DAA Mono slope minimum */
+ unsigned short slope_min_daa;
+ /* DAA Mono slope slope */
+ unsigned short slope_slope_daa;
+ /* DAA Mono LP wight */
+ unsigned short lp_wt_daa;
+};
+
+/* Struct for configuring RGB2RGB blending module */
+struct vpfe_ipipe_rgb2rgb {
+ /* Matrix coefficient for RR S12Q8 for ID = 1 and S11Q8 for ID = 2 */
+ struct ipipe_float_s16 coef_rr;
+ /* Matrix coefficient for GR S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_gr;
+ /* Matrix coefficient for BR S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_br;
+ /* Matrix coefficient for RG S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_rg;
+ /* Matrix coefficient for GG S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_gg;
+ /* Matrix coefficient for BG S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_bg;
+ /* Matrix coefficient for RB S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_rb;
+ /* Matrix coefficient for GB S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_gb;
+ /* Matrix coefficient for BB S12Q8/S11Q8 */
+ struct ipipe_float_s16 coef_bb;
+ /* Output offset for R S13/S11 */
+ int out_ofst_r;
+ /* Output offset for G S13/S11 */
+ int out_ofst_g;
+ /* Output offset for B S13/S11 */
+ int out_ofst_b;
+};
+
+#define VPFE_IPIPE_MAX_SIZE_GAMMA 512
+
+enum vpfe_ipipe_gamma_tbl_size {
+ VPFE_IPIPE_GAMMA_TBL_SZ_64 = 64,
+ VPFE_IPIPE_GAMMA_TBL_SZ_128 = 128,
+ VPFE_IPIPE_GAMMA_TBL_SZ_256 = 256,
+ VPFE_IPIPE_GAMMA_TBL_SZ_512 = 512,
+};
+
+enum vpfe_ipipe_gamma_tbl_sel {
+ VPFE_IPIPE_GAMMA_TBL_RAM = 0,
+ VPFE_IPIPE_GAMMA_TBL_ROM = 1,
+};
+
+struct vpfe_ipipe_gamma_entry {
+ /* 10 bit slope */
+ short slope;
+ /* 10 bit offset */
+ unsigned short offset;
+};
+
+/* Structure for configuring Gamma correction module */
+struct vpfe_ipipe_gamma {
+ /* 0 - Enable Gamma correction for Red
+ * 1 - bypass Gamma correction. Data is divided by 16
+ */
+ unsigned char bypass_r;
+ /* 0 - Enable Gamma correction for Blue
+ * 1 - bypass Gamma correction. Data is divided by 16
+ */
+ unsigned char bypass_b;
+ /* 0 - Enable Gamma correction for Green
+ * 1 - bypass Gamma correction. Data is divided by 16
+ */
+ unsigned char bypass_g;
+ /* IPIPE_GAMMA_TBL_RAM or IPIPE_GAMMA_TBL_ROM */
+ enum vpfe_ipipe_gamma_tbl_sel tbl_sel;
+ /* Table size for RAM gamma table.
+ */
+ enum vpfe_ipipe_gamma_tbl_size tbl_size;
+ /* R table */
+ struct vpfe_ipipe_gamma_entry table_r[VPFE_IPIPE_MAX_SIZE_GAMMA];
+ /* Blue table */
+ struct vpfe_ipipe_gamma_entry table_b[VPFE_IPIPE_MAX_SIZE_GAMMA];
+ /* Green table */
+ struct vpfe_ipipe_gamma_entry table_g[VPFE_IPIPE_MAX_SIZE_GAMMA];
+};
+
+#define VPFE_IPIPE_MAX_SIZE_3D_LUT 729
+
+struct vpfe_ipipe_3d_lut_entry {
+ /* 10 bit entry for red */
+ unsigned short r;
+ /* 10 bit entry for green */
+ unsigned short g;
+ /* 10 bit entry for blue */
+ unsigned short b;
+};
+
+/* structure for 3D-LUT */
+struct vpfe_ipipe_3d_lut {
+ /* enable/disable 3D lut */
+ unsigned char en;
+ /* 3D - LUT table entry */
+ struct vpfe_ipipe_3d_lut_entry table[VPFE_IPIPE_MAX_SIZE_3D_LUT];
+};
+
+/* Struct for configuring rgb2ycbcr module */
+struct vpfe_ipipe_rgb2yuv {
+ /* Matrix coefficient for RY S12Q8 */
+ struct ipipe_float_s16 coef_ry;
+ /* Matrix coefficient for GY S12Q8 */
+ struct ipipe_float_s16 coef_gy;
+ /* Matrix coefficient for BY S12Q8 */
+ struct ipipe_float_s16 coef_by;
+ /* Matrix coefficient for RCb S12Q8 */
+ struct ipipe_float_s16 coef_rcb;
+ /* Matrix coefficient for GCb S12Q8 */
+ struct ipipe_float_s16 coef_gcb;
+ /* Matrix coefficient for BCb S12Q8 */
+ struct ipipe_float_s16 coef_bcb;
+ /* Matrix coefficient for RCr S12Q8 */
+ struct ipipe_float_s16 coef_rcr;
+ /* Matrix coefficient for GCr S12Q8 */
+ struct ipipe_float_s16 coef_gcr;
+ /* Matrix coefficient for BCr S12Q8 */
+ struct ipipe_float_s16 coef_bcr;
+ /* Output offset for R S11 */
+ int out_ofst_y;
+ /* Output offset for Cb S11 */
+ int out_ofst_cb;
+ /* Output offset for Cr S11 */
+ int out_ofst_cr;
+};
+
+enum vpfe_ipipe_gbce_type {
+ VPFE_IPIPE_GBCE_Y_VAL_TBL = 0,
+ VPFE_IPIPE_GBCE_GAIN_TBL = 1,
+};
+
+#define VPFE_IPIPE_MAX_SIZE_GBCE_LUT 1024
+
+/* structure for Global brightness and Contrast */
+struct vpfe_ipipe_gbce {
+ /* enable/disable GBCE */
+ unsigned char en;
+ /* Y - value table or Gain table */
+ enum vpfe_ipipe_gbce_type type;
+ /* ptr to LUT for GBCE with 1024 entries */
+ unsigned short table[VPFE_IPIPE_MAX_SIZE_GBCE_LUT];
+};
+
+/* Chrominance position. Applicable only for YCbCr input
+ * Applied after edge enhancement
+ */
+enum vpfe_chr_pos {
+ /* Co-siting, same position with luminance */
+ VPFE_IPIPE_YUV422_CHR_POS_COSITE = 0,
+ /* Centering, In the middle of luminance */
+ VPFE_IPIPE_YUV422_CHR_POS_CENTRE = 1,
+};
+
+/* Structure for configuring yuv422 conversion module */
+struct vpfe_ipipe_yuv422_conv {
+ /* Max Chrominance value */
+ unsigned char en_chrom_lpf;
+ /* 1 - enable LPF for chrminance, 0 - disable */
+ enum vpfe_chr_pos chrom_pos;
+};
+
+#define VPFE_IPIPE_MAX_SIZE_YEE_LUT 1024
+
+enum vpfe_ipipe_yee_merge_meth {
+ VPFE_IPIPE_YEE_ABS_MAX = 0,
+ VPFE_IPIPE_YEE_EE_ES = 1,
+};
+
+/* Structure for configuring YUV Edge Enhancement module */
+struct vpfe_ipipe_yee {
+ /* 1 - enable enhancement, 0 - disable */
+ unsigned char en;
+ /* enable/disable halo reduction in edge sharpner */
+ unsigned char en_halo_red;
+ /* Merge method between Edge Enhancer and Edge sharpner */
+ enum vpfe_ipipe_yee_merge_meth merge_meth;
+ /* HPF Shift length */
+ unsigned char hpf_shft;
+ /* HPF Coefficient 00, S10 */
+ short hpf_coef_00;
+ /* HPF Coefficient 01, S10 */
+ short hpf_coef_01;
+ /* HPF Coefficient 02, S10 */
+ short hpf_coef_02;
+ /* HPF Coefficient 10, S10 */
+ short hpf_coef_10;
+ /* HPF Coefficient 11, S10 */
+ short hpf_coef_11;
+ /* HPF Coefficient 12, S10 */
+ short hpf_coef_12;
+ /* HPF Coefficient 20, S10 */
+ short hpf_coef_20;
+ /* HPF Coefficient 21, S10 */
+ short hpf_coef_21;
+ /* HPF Coefficient 22, S10 */
+ short hpf_coef_22;
+ /* Lower threshold before referring to LUT */
+ unsigned short yee_thr;
+ /* Edge sharpener Gain */
+ unsigned short es_gain;
+ /* Edge sharpener lower threshold */
+ unsigned short es_thr1;
+ /* Edge sharpener upper threshold */
+ unsigned short es_thr2;
+ /* Edge sharpener gain on gradient */
+ unsigned short es_gain_grad;
+ /* Edge sharpener offset on gradient */
+ unsigned short es_ofst_grad;
+ /* Ptr to EE table. Must have 1024 entries */
+ short table[VPFE_IPIPE_MAX_SIZE_YEE_LUT];
+};
+
+enum vpfe_ipipe_car_meth {
+ /* Chromatic Gain Control */
+ VPFE_IPIPE_CAR_CHR_GAIN_CTRL = 0,
+ /* Dynamic switching between CHR_GAIN_CTRL
+ * and MED_FLTR
+ */
+ VPFE_IPIPE_CAR_DYN_SWITCH = 1,
+ /* Median Filter */
+ VPFE_IPIPE_CAR_MED_FLTR = 2,
+};
+
+enum vpfe_ipipe_car_hpf_type {
+ VPFE_IPIPE_CAR_HPF_Y = 0,
+ VPFE_IPIPE_CAR_HPF_H = 1,
+ VPFE_IPIPE_CAR_HPF_V = 2,
+ VPFE_IPIPE_CAR_HPF_2D = 3,
+ /* 2D HPF from YUV Edge Enhancement */
+ VPFE_IPIPE_CAR_HPF_2D_YEE = 4,
+};
+
+struct vpfe_ipipe_car_gain {
+ /* csup_gain */
+ unsigned char gain;
+ /* csup_shf. */
+ unsigned char shft;
+ /* gain minimum */
+ unsigned short gain_min;
+};
+
+/* Structure for Chromatic Artifact Reduction */
+struct vpfe_ipipe_car {
+ /* enable/disable */
+ unsigned char en;
+ /* Gain control or Dynamic switching */
+ enum vpfe_ipipe_car_meth meth;
+ /* Gain1 function configuration for Gain control */
+ struct vpfe_ipipe_car_gain gain1;
+ /* Gain2 function configuration for Gain control */
+ struct vpfe_ipipe_car_gain gain2;
+ /* HPF type used for CAR */
+ enum vpfe_ipipe_car_hpf_type hpf;
+ /* csup_thr: HPF threshold for Gain control */
+ unsigned char hpf_thr;
+ /* Down shift value for hpf. 2 bits */
+ unsigned char hpf_shft;
+ /* switch limit for median filter */
+ unsigned char sw0;
+ /* switch coefficient for Gain control */
+ unsigned char sw1;
+};
+
+/* structure for Chromatic Gain Suppression */
+struct vpfe_ipipe_cgs {
+ /* enable/disable */
+ unsigned char en;
+ /* gain1 bright side threshold */
+ unsigned char h_thr;
+ /* gain1 bright side slope */
+ unsigned char h_slope;
+ /* gain1 down shift value for bright side */
+ unsigned char h_shft;
+ /* gain1 bright side minimum gain */
+ unsigned char h_min;
+};
+
+/* Max pixels allowed in the input. If above this either decimation
+ * or frame division mode to be enabled
+ */
+#define VPFE_IPIPE_MAX_INPUT_WIDTH 2600
+
+struct vpfe_ipipe_input_config {
+ unsigned int vst;
+ unsigned int hst;
+};
+
+/**
+ * struct vpfe_ipipe_config - IPIPE engine configuration (user)
+ * @input_config: Pointer to structure for ipipe configuration.
+ * @flag: Specifies which ISP IPIPE functions should be enabled.
+ * @lutdpc: Pointer to luma enhancement structure.
+ * @otfdpc: Pointer to structure for defect correction.
+ * @nf1: Pointer to structure for Noise Filter.
+ * @nf2: Pointer to structure for Noise Filter.
+ * @gic: Pointer to structure for Green Imbalance.
+ * @wbal: Pointer to structure for White Balance.
+ * @cfa: Pointer to structure containing the CFA interpolation.
+ * @rgb2rgb1: Pointer to structure for RGB to RGB Blending.
+ * @rgb2rgb2: Pointer to structure for RGB to RGB Blending.
+ * @gamma: Pointer to gamma structure.
+ * @lut: Pointer to structure for 3D LUT.
+ * @rgb2yuv: Pointer to structure for RGB-YCbCr conversion.
+ * @gbce: Pointer to structure for Global Brightness,Contrast Control.
+ * @yuv422_conv: Pointer to structure for YUV 422 conversion.
+ * @yee: Pointer to structure for Edge Enhancer.
+ * @car: Pointer to structure for Chromatic Artifact Reduction.
+ * @cgs: Pointer to structure for Chromatic Gain Suppression.
+ */
+struct vpfe_ipipe_config {
+ __u32 flag;
+ struct vpfe_ipipe_input_config __user *input_config;
+ struct vpfe_ipipe_lutdpc __user *lutdpc;
+ struct vpfe_ipipe_otfdpc __user *otfdpc;
+ struct vpfe_ipipe_nf __user *nf1;
+ struct vpfe_ipipe_nf __user *nf2;
+ struct vpfe_ipipe_gic __user *gic;
+ struct vpfe_ipipe_wb __user *wbal;
+ struct vpfe_ipipe_cfa __user *cfa;
+ struct vpfe_ipipe_rgb2rgb __user *rgb2rgb1;
+ struct vpfe_ipipe_rgb2rgb __user *rgb2rgb2;
+ struct vpfe_ipipe_gamma __user *gamma;
+ struct vpfe_ipipe_3d_lut __user *lut;
+ struct vpfe_ipipe_rgb2yuv __user *rgb2yuv;
+ struct vpfe_ipipe_gbce __user *gbce;
+ struct vpfe_ipipe_yuv422_conv __user *yuv422_conv;
+ struct vpfe_ipipe_yee __user *yee;
+ struct vpfe_ipipe_car __user *car;
+ struct vpfe_ipipe_cgs __user *cgs;
+};
+
+/*******************************************************************
+** Resizer API structures
+*******************************************************************/
+/* Interpolation types used for horizontal rescale */
+enum vpfe_rsz_intp_t {
+ VPFE_RSZ_INTP_CUBIC,
+ VPFE_RSZ_INTP_LINEAR
+};
+
+/* Horizontal LPF intensity selection */
+enum vpfe_rsz_h_lpf_lse_t {
+ VPFE_RSZ_H_LPF_LSE_INTERN,
+ VPFE_RSZ_H_LPF_LSE_USER_VAL
+};
+
+enum vpfe_rsz_down_scale_ave_sz {
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_4,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_8,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_16,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_32,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_64,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_128,
+ VPFE_IPIPE_DWN_SCALE_1_OVER_256
+};
+
+struct vpfe_rsz_output_spec {
+ /* enable horizontal flip */
+ unsigned char h_flip;
+ /* enable vertical flip */
+ unsigned char v_flip;
+ /* line start offset for y. */
+ unsigned int vst_y;
+ /* line start offset for c. Only for 420 */
+ unsigned int vst_c;
+ /* vertical rescale interpolation type, YCbCr or Luminance */
+ enum vpfe_rsz_intp_t v_typ_y;
+ /* vertical rescale interpolation type for Chrominance */
+ enum vpfe_rsz_intp_t v_typ_c;
+ /* vertical lpf intensity - Luminance */
+ unsigned char v_lpf_int_y;
+ /* vertical lpf intensity - Chrominance */
+ unsigned char v_lpf_int_c;
+ /* horizontal rescale interpolation types, YCbCr or Luminance */
+ enum vpfe_rsz_intp_t h_typ_y;
+ /* horizontal rescale interpolation types, Chrominance */
+ enum vpfe_rsz_intp_t h_typ_c;
+ /* horizontal lpf intensity - Luminance */
+ unsigned char h_lpf_int_y;
+ /* horizontal lpf intensity - Chrominance */
+ unsigned char h_lpf_int_c;
+ /* Use down scale mode for scale down */
+ unsigned char en_down_scale;
+ /* if downscale, set the downscale more average size for horizontal
+ * direction. Used only if output width and height is less than
+ * input sizes
+ */
+ enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
+ /* if downscale, set the downscale more average size for vertical
+ * direction. Used only if output width and height is less than
+ * input sizes
+ */
+ enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
+ /* Y offset. If set, the offset would be added to the base address
+ */
+ unsigned int user_y_ofst;
+ /* C offset. If set, the offset would be added to the base address
+ */
+ unsigned int user_c_ofst;
+};
+
+struct vpfe_rsz_config_params {
+ unsigned int vst;
+ /* horizontal start position of the image
+ * data to IPIPE
+ */
+ unsigned int hst;
+ /* output spec of the image data coming out of resizer - 0(UYVY).
+ */
+ struct vpfe_rsz_output_spec output1;
+ /* output spec of the image data coming out of resizer - 1(UYVY).
+ */
+ struct vpfe_rsz_output_spec output2;
+ /* 0 , chroma sample at odd pixel, 1 - even pixel */
+ unsigned char chroma_sample_even;
+ unsigned char frame_div_mode_en;
+ unsigned char yuv_y_min;
+ unsigned char yuv_y_max;
+ unsigned char yuv_c_min;
+ unsigned char yuv_c_max;
+ enum vpfe_chr_pos out_chr_pos;
+ unsigned char bypass;
+};
+
+/* Structure for VIDIOC_VPFE_RSZ_[S/G]_CONFIG IOCTLs */
+struct vpfe_rsz_config {
+ struct vpfe_rsz_config_params *config;
+};
+
+#endif /* _DAVINCI_VPFE_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
new file mode 100644
index 000000000000..92853539cc0a
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -0,0 +1,1863 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ *
+ *
+ * IPIPE allows fine tuning of the input image using different
+ * tuning modules in IPIPE. Some examples :- Noise filter, Defect
+ * pixel correction etc. It essentially operate on Bayer Raw data
+ * or YUV raw data. To do image tuning, application call,
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm365_ipipe.h"
+#include "dm365_ipipe_hw.h"
+#include "vpfe_mc_capture.h"
+
+#define MIN_OUT_WIDTH 32
+#define MIN_OUT_HEIGHT 32
+
+/* ipipe input format's */
+static const unsigned int ipipe_input_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+};
+
+/* ipipe output format's */
+static const unsigned int ipipe_output_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+};
+
+static int ipipe_validate_lutdpc_params(struct vpfe_ipipe_lutdpc *lutdpc)
+{
+ int i;
+
+ if (lutdpc->en > 1 || lutdpc->repl_white > 1 ||
+ lutdpc->dpc_size > LUT_DPC_MAX_SIZE)
+ return -EINVAL;
+
+ if (lutdpc->en && !lutdpc->table)
+ return -EINVAL;
+
+ for (i = 0; i < lutdpc->dpc_size; i++)
+ if (lutdpc->table[i].horz_pos > LUT_DPC_H_POS_MASK ||
+ lutdpc->table[i].vert_pos > LUT_DPC_V_POS_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
+ struct vpfe_ipipe_lutdpc *dpc_param;
+ struct device *dev;
+
+ if (!param) {
+ memset((void *)lutdpc, 0, sizeof(struct vpfe_ipipe_lutdpc));
+ goto success;
+ }
+
+ dev = ipipe->subdev.v4l2_dev->dev;
+ dpc_param = (struct vpfe_ipipe_lutdpc *)param;
+ lutdpc->en = dpc_param->en;
+ lutdpc->repl_white = dpc_param->repl_white;
+ lutdpc->dpc_size = dpc_param->dpc_size;
+ memcpy(&lutdpc->table, &dpc_param->table,
+ (dpc_param->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
+ if (ipipe_validate_lutdpc_params(lutdpc) < 0)
+ return -EINVAL;
+
+success:
+ ipipe_set_lutdpc_regs(ipipe->base_addr, ipipe->isp5_base_addr, lutdpc);
+
+ return 0;
+}
+
+static int ipipe_get_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_lutdpc *lut_param = (struct vpfe_ipipe_lutdpc *)param;
+ struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
+
+ lut_param->en = lutdpc->en;
+ lut_param->repl_white = lutdpc->repl_white;
+ lut_param->dpc_size = lutdpc->dpc_size;
+ memcpy(&lut_param->table, &lutdpc->table,
+ (lutdpc->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
+
+ return 0;
+}
+
+static int ipipe_set_input_config(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
+
+ if (!param)
+ memset(config, 0, sizeof(struct vpfe_ipipe_input_config));
+ else
+ memcpy(config, param, sizeof(struct vpfe_ipipe_input_config));
+ return 0;
+}
+
+static int ipipe_get_input_config(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
+
+ if (!param)
+ return -EINVAL;
+
+ memcpy(param, config, sizeof(struct vpfe_ipipe_input_config));
+
+ return 0;
+}
+
+static int ipipe_validate_otfdpc_params(struct vpfe_ipipe_otfdpc *dpc_param)
+{
+ struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0;
+ struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0;
+
+ if (dpc_param->en > 1)
+ return -EINVAL;
+
+ if (dpc_param->alg == VPFE_IPIPE_OTFDPC_2_0) {
+ dpc_2_0 = &dpc_param->alg_cfg.dpc_2_0;
+ if (dpc_2_0->det_thr.r > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->det_thr.gr > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->det_thr.gb > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->det_thr.b > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.r > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.gr > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.gb > OTFDPC_DPC2_THR_MASK ||
+ dpc_2_0->corr_thr.b > OTFDPC_DPC2_THR_MASK)
+ return -EINVAL;
+ return 0;
+ }
+
+ dpc_3_0 = &dpc_param->alg_cfg.dpc_3_0;
+
+ if (dpc_3_0->act_adj_shf > OTF_DPC3_0_SHF_MASK ||
+ dpc_3_0->det_thr > OTF_DPC3_0_DET_MASK ||
+ dpc_3_0->det_slp > OTF_DPC3_0_SLP_MASK ||
+ dpc_3_0->det_thr_min > OTF_DPC3_0_DET_MASK ||
+ dpc_3_0->det_thr_max > OTF_DPC3_0_DET_MASK ||
+ dpc_3_0->corr_thr > OTF_DPC3_0_CORR_MASK ||
+ dpc_3_0->corr_slp > OTF_DPC3_0_SLP_MASK ||
+ dpc_3_0->corr_thr_min > OTF_DPC3_0_CORR_MASK ||
+ dpc_3_0->corr_thr_max > OTF_DPC3_0_CORR_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_otfdpc *dpc_param = (struct vpfe_ipipe_otfdpc *)param;
+ struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
+ struct device *dev;
+
+ if (!param) {
+ memset((void *)otfdpc, 0, sizeof(struct ipipe_otfdpc_2_0));
+ goto success;
+ }
+ dev = ipipe->subdev.v4l2_dev->dev;
+ memcpy(otfdpc, dpc_param, sizeof(struct vpfe_ipipe_otfdpc));
+ if (ipipe_validate_otfdpc_params(otfdpc) < 0) {
+ dev_err(dev, "Invalid otfdpc params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_otfdpc_regs(ipipe->base_addr, otfdpc);
+
+ return 0;
+}
+
+static int ipipe_get_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_otfdpc *dpc_param = (struct vpfe_ipipe_otfdpc *)param;
+ struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
+
+ memcpy(dpc_param, otfdpc, sizeof(struct vpfe_ipipe_otfdpc));
+ return 0;
+}
+
+static int ipipe_validate_nf_params(struct vpfe_ipipe_nf *nf_param)
+{
+ int i;
+
+ if (nf_param->en > 1 || nf_param->shft_val > D2F_SHFT_VAL_MASK ||
+ nf_param->spread_val > D2F_SPR_VAL_MASK ||
+ nf_param->apply_lsc_gain > 1 ||
+ nf_param->edge_det_min_thr > D2F_EDGE_DET_THR_MASK ||
+ nf_param->edge_det_max_thr > D2F_EDGE_DET_THR_MASK)
+ return -EINVAL;
+
+ for (i = 0; i < VPFE_IPIPE_NF_THR_TABLE_SIZE; i++)
+ if (nf_param->thr[i] > D2F_THR_VAL_MASK)
+ return -EINVAL;
+
+ for (i = 0; i < VPFE_IPIPE_NF_STR_TABLE_SIZE; i++)
+ if (nf_param->str[i] > D2F_STR_VAL_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_nf_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_nf *nf_param = (struct vpfe_ipipe_nf *)param;
+ struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
+ struct device *dev;
+
+ if (id == IPIPE_D2F_2ND)
+ nf = &ipipe->config.nf2;
+
+ if (!nf_param) {
+ memset((void *)nf, 0, sizeof(struct vpfe_ipipe_nf));
+ goto success;
+ }
+
+ dev = ipipe->subdev.v4l2_dev->dev;
+ memcpy(nf, nf_param, sizeof(struct vpfe_ipipe_nf));
+ if (ipipe_validate_nf_params(nf) < 0) {
+ dev_err(dev, "Invalid nf params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_d2f_regs(ipipe->base_addr, id, nf);
+
+ return 0;
+}
+
+static int ipipe_set_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_nf_params(ipipe, IPIPE_D2F_1ST, param);
+}
+
+static int ipipe_set_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_nf_params(ipipe, IPIPE_D2F_2ND, param);
+}
+
+static int ipipe_get_nf_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_nf *nf_param = (struct vpfe_ipipe_nf *)param;
+ struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
+
+ if (id == IPIPE_D2F_2ND)
+ nf = &ipipe->config.nf2;
+
+ memcpy(nf_param, nf, sizeof(struct vpfe_ipipe_nf));
+
+ return 0;
+}
+
+static int ipipe_get_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_nf_params(ipipe, IPIPE_D2F_1ST, param);
+}
+
+static int ipipe_get_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_nf_params(ipipe, IPIPE_D2F_2ND, param);
+}
+
+static int ipipe_validate_gic_params(struct vpfe_ipipe_gic *gic)
+{
+ if (gic->en > 1 || gic->gain > GIC_GAIN_MASK ||
+ gic->thr > GIC_THR_MASK || gic->slope > GIC_SLOPE_MASK ||
+ gic->apply_lsc_gain > 1 ||
+ gic->nf2_thr_gain.integer > GIC_NFGAN_INT_MASK ||
+ gic->nf2_thr_gain.decimal > GIC_NFGAN_DECI_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gic *gic_param = (struct vpfe_ipipe_gic *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
+
+ if (!gic_param) {
+ memset((void *)gic, 0, sizeof(struct vpfe_ipipe_gic));
+ goto success;
+ }
+
+ memcpy(gic, gic_param, sizeof(struct vpfe_ipipe_gic));
+ if (ipipe_validate_gic_params(gic) < 0) {
+ dev_err(dev, "Invalid gic params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_gic_regs(ipipe->base_addr, gic);
+
+ return 0;
+}
+
+static int ipipe_get_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gic *gic_param = (struct vpfe_ipipe_gic *)param;
+ struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
+
+ memcpy(gic_param, gic, sizeof(struct vpfe_ipipe_gic));
+
+ return 0;
+}
+
+static int ipipe_validate_wb_params(struct vpfe_ipipe_wb *wbal)
+{
+ if (wbal->ofst_r > WB_OFFSET_MASK ||
+ wbal->ofst_gr > WB_OFFSET_MASK ||
+ wbal->ofst_gb > WB_OFFSET_MASK ||
+ wbal->ofst_b > WB_OFFSET_MASK ||
+ wbal->gain_r.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_r.decimal > WB_GAIN_DECI_MASK ||
+ wbal->gain_gr.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_gr.decimal > WB_GAIN_DECI_MASK ||
+ wbal->gain_gb.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_gb.decimal > WB_GAIN_DECI_MASK ||
+ wbal->gain_b.integer > WB_GAIN_INT_MASK ||
+ wbal->gain_b.decimal > WB_GAIN_DECI_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_wb *wb_param = (struct vpfe_ipipe_wb *)param;
+ struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
+
+ if (!wb_param) {
+ const struct vpfe_ipipe_wb wb_defaults = {
+ .gain_r = {2, 0x0},
+ .gain_gr = {2, 0x0},
+ .gain_gb = {2, 0x0},
+ .gain_b = {2, 0x0}
+ };
+ memcpy(wbal, &wb_defaults, sizeof(struct vpfe_ipipe_wb));
+ goto success;
+ }
+
+ memcpy(wbal, wb_param, sizeof(struct vpfe_ipipe_wb));
+ if (ipipe_validate_wb_params(wbal) < 0)
+ return -EINVAL;
+
+success:
+ ipipe_set_wb_regs(ipipe->base_addr, wbal);
+
+ return 0;
+}
+
+static int ipipe_get_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_wb *wb_param = (struct vpfe_ipipe_wb *)param;
+ struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
+
+ memcpy(wb_param, wbal, sizeof(struct vpfe_ipipe_wb));
+ return 0;
+}
+
+static int ipipe_validate_cfa_params(struct vpfe_ipipe_cfa *cfa)
+{
+ if (cfa->hpf_thr_2dir > CFA_HPF_THR_2DIR_MASK ||
+ cfa->hpf_slp_2dir > CFA_HPF_SLOPE_2DIR_MASK ||
+ cfa->hp_mix_thr_2dir > CFA_HPF_MIX_THR_2DIR_MASK ||
+ cfa->hp_mix_slope_2dir > CFA_HPF_MIX_SLP_2DIR_MASK ||
+ cfa->dir_thr_2dir > CFA_DIR_THR_2DIR_MASK ||
+ cfa->dir_slope_2dir > CFA_DIR_SLP_2DIR_MASK ||
+ cfa->nd_wt_2dir > CFA_ND_WT_2DIR_MASK ||
+ cfa->hue_fract_daa > CFA_DAA_HUE_FRA_MASK ||
+ cfa->edge_thr_daa > CFA_DAA_EDG_THR_MASK ||
+ cfa->thr_min_daa > CFA_DAA_THR_MIN_MASK ||
+ cfa->thr_slope_daa > CFA_DAA_THR_SLP_MASK ||
+ cfa->slope_min_daa > CFA_DAA_SLP_MIN_MASK ||
+ cfa->slope_slope_daa > CFA_DAA_SLP_SLP_MASK ||
+ cfa->lp_wt_daa > CFA_DAA_LP_WT_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cfa *cfa_param = (struct vpfe_ipipe_cfa *)param;
+ struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
+
+ if (!cfa_param) {
+ memset(cfa, 0, sizeof(struct vpfe_ipipe_cfa));
+ cfa->alg = VPFE_IPIPE_CFA_ALG_2DIRAC;
+ goto success;
+ }
+
+ memcpy(cfa, cfa_param, sizeof(struct vpfe_ipipe_cfa));
+ if (ipipe_validate_cfa_params(cfa) < 0)
+ return -EINVAL;
+
+success:
+ ipipe_set_cfa_regs(ipipe->base_addr, cfa);
+
+ return 0;
+}
+
+static int ipipe_get_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cfa *cfa_param = (struct vpfe_ipipe_cfa *)param;
+ struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
+
+ memcpy(cfa_param, cfa, sizeof(struct vpfe_ipipe_cfa));
+ return 0;
+}
+
+static int
+ipipe_validate_rgb2rgb_params(struct vpfe_ipipe_rgb2rgb *rgb2rgb,
+ unsigned int id)
+{
+ u32 gain_int_upper = RGB2RGB_1_GAIN_INT_MASK;
+ u32 offset_upper = RGB2RGB_1_OFST_MASK;
+
+ if (id == IPIPE_RGB2RGB_2) {
+ offset_upper = RGB2RGB_2_OFST_MASK;
+ gain_int_upper = RGB2RGB_2_GAIN_INT_MASK;
+ }
+
+ if (rgb2rgb->coef_rr.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_rr.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_gr.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_gr.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_br.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_br.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_rg.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_rg.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_gg.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_gg.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_bg.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_bg.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_rb.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_rb.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_gb.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_gb.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->coef_bb.decimal > RGB2RGB_GAIN_DECI_MASK ||
+ rgb2rgb->coef_bb.integer > gain_int_upper)
+ return -EINVAL;
+
+ if (rgb2rgb->out_ofst_r > offset_upper ||
+ rgb2rgb->out_ofst_g > offset_upper ||
+ rgb2rgb->out_ofst_b > offset_upper)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
+
+ rgb2rgb_param = (struct vpfe_ipipe_rgb2rgb *)param;
+
+ if (id == IPIPE_RGB2RGB_2)
+ rgb2rgb = &ipipe->config.rgb2rgb2;
+
+ if (!rgb2rgb_param) {
+ const struct vpfe_ipipe_rgb2rgb rgb2rgb_defaults = {
+ .coef_rr = {1, 0}, /* 256 */
+ .coef_gr = {0, 0},
+ .coef_br = {0, 0},
+ .coef_rg = {0, 0},
+ .coef_gg = {1, 0}, /* 256 */
+ .coef_bg = {0, 0},
+ .coef_rb = {0, 0},
+ .coef_gb = {0, 0},
+ .coef_bb = {1, 0}, /* 256 */
+ };
+ /* Copy defaults for rgb2rgb conversion */
+ memcpy(rgb2rgb, &rgb2rgb_defaults,
+ sizeof(struct vpfe_ipipe_rgb2rgb));
+ goto success;
+ }
+
+ memcpy(rgb2rgb, rgb2rgb_param, sizeof(struct vpfe_ipipe_rgb2rgb));
+ if (ipipe_validate_rgb2rgb_params(rgb2rgb, id) < 0) {
+ dev_err(dev, "Invalid rgb2rgb params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_rgb2rgb_regs(ipipe->base_addr, id, rgb2rgb);
+
+ return 0;
+}
+
+static int
+ipipe_set_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
+}
+
+static int
+ipipe_set_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
+}
+
+static int ipipe_get_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
+ unsigned int id, void *param)
+{
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
+ struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
+
+ rgb2rgb_param = (struct vpfe_ipipe_rgb2rgb *)param;
+
+ if (id == IPIPE_RGB2RGB_2)
+ rgb2rgb = &ipipe->config.rgb2rgb2;
+
+ memcpy(rgb2rgb_param, rgb2rgb, sizeof(struct vpfe_ipipe_rgb2rgb));
+
+ return 0;
+}
+
+static int
+ipipe_get_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
+}
+
+static int
+ipipe_get_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
+}
+
+static int
+ipipe_validate_gamma_entry(struct vpfe_ipipe_gamma_entry *table, int size)
+{
+ int i;
+
+ if (!table)
+ return -EINVAL;
+
+ for (i = 0; i < size; i++)
+ if (table[i].slope > GAMMA_MASK ||
+ table[i].offset > GAMMA_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ipipe_validate_gamma_params(struct vpfe_ipipe_gamma *gamma, struct device *dev)
+{
+ int table_size;
+ int err;
+
+ if (gamma->bypass_r > 1 ||
+ gamma->bypass_b > 1 ||
+ gamma->bypass_g > 1)
+ return -EINVAL;
+
+ if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ return 0;
+
+ table_size = gamma->tbl_size;
+ if (!gamma->bypass_r) {
+ err = ipipe_validate_gamma_entry(gamma->table_r, table_size);
+ if (err) {
+ dev_err(dev, "GAMMA R - table entry invalid\n");
+ return err;
+ }
+ }
+
+ if (!gamma->bypass_b) {
+ err = ipipe_validate_gamma_entry(gamma->table_b, table_size);
+ if (err) {
+ dev_err(dev, "GAMMA B - table entry invalid\n");
+ return err;
+ }
+ }
+
+ if (!gamma->bypass_g) {
+ err = ipipe_validate_gamma_entry(gamma->table_g, table_size);
+ if (err) {
+ dev_err(dev, "GAMMA G - table entry invalid\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+ipipe_set_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gamma *gamma_param = (struct vpfe_ipipe_gamma *)param;
+ struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ int table_size;
+
+ if (!gamma_param) {
+ memset(gamma, 0, sizeof(struct vpfe_ipipe_gamma));
+ gamma->tbl_sel = VPFE_IPIPE_GAMMA_TBL_ROM;
+ goto success;
+ }
+
+ gamma->bypass_r = gamma_param->bypass_r;
+ gamma->bypass_b = gamma_param->bypass_b;
+ gamma->bypass_g = gamma_param->bypass_g;
+ gamma->tbl_sel = gamma_param->tbl_sel;
+ gamma->tbl_size = gamma_param->tbl_size;
+
+ if (ipipe_validate_gamma_params(gamma, dev) < 0)
+ return -EINVAL;
+
+ if (gamma_param->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ goto success;
+
+ table_size = gamma->tbl_size;
+ if (!gamma_param->bypass_r)
+ memcpy(&gamma->table_r, &gamma_param->table_r,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma_param->bypass_b)
+ memcpy(&gamma->table_b, &gamma_param->table_b,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma_param->bypass_g)
+ memcpy(&gamma->table_g, &gamma_param->table_g,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+success:
+ ipipe_set_gamma_regs(ipipe->base_addr, ipipe->isp5_base_addr, gamma);
+
+ return 0;
+}
+
+static int ipipe_get_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gamma *gamma_param = (struct vpfe_ipipe_gamma *)param;
+ struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ int table_size;
+
+ gamma_param->bypass_r = gamma->bypass_r;
+ gamma_param->bypass_g = gamma->bypass_g;
+ gamma_param->bypass_b = gamma->bypass_b;
+ gamma_param->tbl_sel = gamma->tbl_sel;
+ gamma_param->tbl_size = gamma->tbl_size;
+
+ if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ return 0;
+
+ table_size = gamma->tbl_size;
+
+ if (!gamma->bypass_r && !gamma_param->table_r) {
+ dev_err(dev,
+ "ipipe_get_gamma_params: table ptr empty for R\n");
+ return -EINVAL;
+ }
+ memcpy(gamma_param->table_r, gamma->table_r,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma->bypass_g && !gamma_param->table_g) {
+ dev_err(dev, "ipipe_get_gamma_params: table ptr empty for G\n");
+ return -EINVAL;
+ }
+ memcpy(gamma_param->table_g, gamma->table_g,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ if (!gamma->bypass_b && !gamma_param->table_b) {
+ dev_err(dev, "ipipe_get_gamma_params: table ptr empty for B\n");
+ return -EINVAL;
+ }
+ memcpy(gamma_param->table_b, gamma->table_b,
+ (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
+
+ return 0;
+}
+
+static int ipipe_validate_3d_lut_params(struct vpfe_ipipe_3d_lut *lut)
+{
+ int i;
+
+ if (!lut->en)
+ return 0;
+
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++)
+ if (lut->table[i].r > D3_LUT_ENTRY_MASK ||
+ lut->table[i].g > D3_LUT_ENTRY_MASK ||
+ lut->table[i].b > D3_LUT_ENTRY_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_get_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_3d_lut *lut_param = (struct vpfe_ipipe_3d_lut *)param;
+ struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ lut_param->en = lut->en;
+ if (!lut_param->table) {
+ dev_err(dev, "ipipe_get_3d_lut_params: Invalid table ptr\n");
+ return -EINVAL;
+ }
+
+ memcpy(lut_param->table, &lut->table,
+ (VPFE_IPIPE_MAX_SIZE_3D_LUT *
+ sizeof(struct vpfe_ipipe_3d_lut_entry)));
+
+ return 0;
+}
+
+static int
+ipipe_set_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_3d_lut *lut_param = (struct vpfe_ipipe_3d_lut *)param;
+ struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ if (!lut_param) {
+ memset(lut, 0, sizeof(struct vpfe_ipipe_3d_lut));
+ goto success;
+ }
+
+ memcpy(lut, lut_param, sizeof(struct vpfe_ipipe_3d_lut));
+ if (ipipe_validate_3d_lut_params(lut) < 0) {
+ dev_err(dev, "Invalid 3D-LUT Params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_3d_lut_regs(ipipe->base_addr, ipipe->isp5_base_addr, lut);
+
+ return 0;
+}
+
+static int ipipe_validate_rgb2yuv_params(struct vpfe_ipipe_rgb2yuv *rgb2yuv)
+{
+ if (rgb2yuv->coef_ry.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_ry.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_gy.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_gy.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_by.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_by.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_rcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_rcb.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_gcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_gcb.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_bcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_bcb.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_rcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_rcr.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_gcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_gcr.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->coef_bcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
+ rgb2yuv->coef_bcr.integer > RGB2YCBCR_COEF_INT_MASK)
+ return -EINVAL;
+
+ if (rgb2yuv->out_ofst_y > RGB2YCBCR_OFST_MASK ||
+ rgb2yuv->out_ofst_cb > RGB2YCBCR_OFST_MASK ||
+ rgb2yuv->out_ofst_cr > RGB2YCBCR_OFST_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ipipe_set_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
+
+ rgb2yuv_param = (struct vpfe_ipipe_rgb2yuv *)param;
+ if (!rgb2yuv_param) {
+ /* Defaults for rgb2yuv conversion */
+ const struct vpfe_ipipe_rgb2yuv rgb2yuv_defaults = {
+ .coef_ry = {0, 0x4d},
+ .coef_gy = {0, 0x96},
+ .coef_by = {0, 0x1d},
+ .coef_rcb = {0xf, 0xd5},
+ .coef_gcb = {0xf, 0xab},
+ .coef_bcb = {0, 0x80},
+ .coef_rcr = {0, 0x80},
+ .coef_gcr = {0xf, 0x95},
+ .coef_bcr = {0xf, 0xeb},
+ .out_ofst_cb = 0x80,
+ .out_ofst_cr = 0x80,
+ };
+ /* Copy defaults for rgb2yuv conversion */
+ memcpy(rgb2yuv, &rgb2yuv_defaults,
+ sizeof(struct vpfe_ipipe_rgb2yuv));
+ goto success;
+ }
+
+ memcpy(rgb2yuv, rgb2yuv_param, sizeof(struct vpfe_ipipe_rgb2yuv));
+ if (ipipe_validate_rgb2yuv_params(rgb2yuv) < 0) {
+ dev_err(dev, "Invalid rgb2yuv params\n");
+ return -EINVAL;
+ }
+
+success:
+ ipipe_set_rgb2ycbcr_regs(ipipe->base_addr, rgb2yuv);
+
+ return 0;
+}
+
+static int
+ipipe_get_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
+ struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
+
+ rgb2yuv_param = (struct vpfe_ipipe_rgb2yuv *)param;
+ memcpy(rgb2yuv_param, rgb2yuv, sizeof(struct vpfe_ipipe_rgb2yuv));
+ return 0;
+}
+
+static int ipipe_validate_gbce_params(struct vpfe_ipipe_gbce *gbce)
+{
+ u32 max = GBCE_Y_VAL_MASK;
+ int i;
+
+ if (!gbce->en)
+ return 0;
+
+ if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
+ max = GBCE_GAIN_VAL_MASK;
+
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; i++)
+ if (gbce->table[i] > max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gbce *gbce_param = (struct vpfe_ipipe_gbce *)param;
+ struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ if (!gbce_param) {
+ memset(gbce, 0 , sizeof(struct vpfe_ipipe_gbce));
+ } else {
+ memcpy(gbce, gbce_param, sizeof(struct vpfe_ipipe_gbce));
+ if (ipipe_validate_gbce_params(gbce) < 0) {
+ dev_err(dev, "Invalid gbce params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_gbce_regs(ipipe->base_addr, ipipe->isp5_base_addr, gbce);
+
+ return 0;
+}
+
+static int ipipe_get_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_gbce *gbce_param = (struct vpfe_ipipe_gbce *)param;
+ struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ gbce_param->en = gbce->en;
+ gbce_param->type = gbce->type;
+ if (!gbce_param->table) {
+ dev_err(dev, "ipipe_get_gbce_params: Invalid table ptr\n");
+ return -EINVAL;
+ }
+
+ memcpy(gbce_param->table, gbce->table,
+ (VPFE_IPIPE_MAX_SIZE_GBCE_LUT * sizeof(unsigned short)));
+
+ return 0;
+}
+
+static int
+ipipe_validate_yuv422_conv_params(struct vpfe_ipipe_yuv422_conv *yuv422_conv)
+{
+ if (yuv422_conv->en_chrom_lpf > 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ipipe_set_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+
+ yuv422_conv_param = (struct vpfe_ipipe_yuv422_conv *)param;
+ if (!yuv422_conv_param) {
+ memset(yuv422_conv, 0, sizeof(struct vpfe_ipipe_yuv422_conv));
+ yuv422_conv->chrom_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE;
+ } else {
+ memcpy(yuv422_conv, yuv422_conv_param,
+ sizeof(struct vpfe_ipipe_yuv422_conv));
+ if (ipipe_validate_yuv422_conv_params(yuv422_conv) < 0) {
+ dev_err(dev, "Invalid yuv422 params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_yuv422_conv_regs(ipipe->base_addr, yuv422_conv);
+
+ return 0;
+}
+
+static int
+ipipe_get_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
+ struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
+
+ yuv422_conv_param = (struct vpfe_ipipe_yuv422_conv *)param;
+ memcpy(yuv422_conv_param, yuv422_conv,
+ sizeof(struct vpfe_ipipe_yuv422_conv));
+
+ return 0;
+}
+
+static int ipipe_validate_yee_params(struct vpfe_ipipe_yee *yee)
+{
+ int i;
+
+ if (yee->en > 1 ||
+ yee->en_halo_red > 1 ||
+ yee->hpf_shft > YEE_HPF_SHIFT_MASK)
+ return -EINVAL;
+
+ if (yee->hpf_coef_00 > YEE_COEF_MASK ||
+ yee->hpf_coef_01 > YEE_COEF_MASK ||
+ yee->hpf_coef_02 > YEE_COEF_MASK ||
+ yee->hpf_coef_10 > YEE_COEF_MASK ||
+ yee->hpf_coef_11 > YEE_COEF_MASK ||
+ yee->hpf_coef_12 > YEE_COEF_MASK ||
+ yee->hpf_coef_20 > YEE_COEF_MASK ||
+ yee->hpf_coef_21 > YEE_COEF_MASK ||
+ yee->hpf_coef_22 > YEE_COEF_MASK)
+ return -EINVAL;
+
+ if (yee->yee_thr > YEE_THR_MASK ||
+ yee->es_gain > YEE_ES_GAIN_MASK ||
+ yee->es_thr1 > YEE_ES_THR1_MASK ||
+ yee->es_thr2 > YEE_THR_MASK ||
+ yee->es_gain_grad > YEE_THR_MASK ||
+ yee->es_ofst_grad > YEE_THR_MASK)
+ return -EINVAL;
+
+ for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT ; i++)
+ if (yee->table[i] > YEE_ENTRY_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yee *yee_param = (struct vpfe_ipipe_yee *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
+
+ if (!yee_param) {
+ memset(yee, 0, sizeof(struct vpfe_ipipe_yee));
+ } else {
+ memcpy(yee, yee_param, sizeof(struct vpfe_ipipe_yee));
+ if (ipipe_validate_yee_params(yee) < 0) {
+ dev_err(dev, "Invalid yee params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_ee_regs(ipipe->base_addr, ipipe->isp5_base_addr, yee);
+
+ return 0;
+}
+
+static int ipipe_get_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_yee *yee_param = (struct vpfe_ipipe_yee *)param;
+ struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
+
+ yee_param->en = yee->en;
+ yee_param->en_halo_red = yee->en_halo_red;
+ yee_param->merge_meth = yee->merge_meth;
+ yee_param->hpf_shft = yee->hpf_shft;
+ yee_param->hpf_coef_00 = yee->hpf_coef_00;
+ yee_param->hpf_coef_01 = yee->hpf_coef_01;
+ yee_param->hpf_coef_02 = yee->hpf_coef_02;
+ yee_param->hpf_coef_10 = yee->hpf_coef_10;
+ yee_param->hpf_coef_11 = yee->hpf_coef_11;
+ yee_param->hpf_coef_12 = yee->hpf_coef_12;
+ yee_param->hpf_coef_20 = yee->hpf_coef_20;
+ yee_param->hpf_coef_21 = yee->hpf_coef_21;
+ yee_param->hpf_coef_22 = yee->hpf_coef_22;
+ yee_param->yee_thr = yee->yee_thr;
+ yee_param->es_gain = yee->es_gain;
+ yee_param->es_thr1 = yee->es_thr1;
+ yee_param->es_thr2 = yee->es_thr2;
+ yee_param->es_gain_grad = yee->es_gain_grad;
+ yee_param->es_ofst_grad = yee->es_ofst_grad;
+ memcpy(yee_param->table, &yee->table,
+ (VPFE_IPIPE_MAX_SIZE_YEE_LUT * sizeof(short)));
+
+ return 0;
+}
+
+static int ipipe_validate_car_params(struct vpfe_ipipe_car *car)
+{
+ if (car->en > 1 || car->hpf_shft > CAR_HPF_SHIFT_MASK ||
+ car->gain1.shft > CAR_GAIN1_SHFT_MASK ||
+ car->gain1.gain_min > CAR_GAIN_MIN_MASK ||
+ car->gain2.shft > CAR_GAIN2_SHFT_MASK ||
+ car->gain2.gain_min > CAR_GAIN_MIN_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_car_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_car *car_param = (struct vpfe_ipipe_car *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_car *car = &ipipe->config.car;
+
+ if (!car_param) {
+ memset(car , 0, sizeof(struct vpfe_ipipe_car));
+ } else {
+ memcpy(car, car_param, sizeof(struct vpfe_ipipe_car));
+ if (ipipe_validate_car_params(car) < 0) {
+ dev_err(dev, "Invalid car params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_car_regs(ipipe->base_addr, car);
+
+ return 0;
+}
+
+static int ipipe_get_car_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_car *car_param = (struct vpfe_ipipe_car *)param;
+ struct vpfe_ipipe_car *car = &ipipe->config.car;
+
+ memcpy(car_param, car, sizeof(struct vpfe_ipipe_car));
+ return 0;
+}
+
+static int ipipe_validate_cgs_params(struct vpfe_ipipe_cgs *cgs)
+{
+ if (cgs->en > 1 || cgs->h_shft > CAR_SHIFT_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipipe_set_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cgs *cgs_param = (struct vpfe_ipipe_cgs *)param;
+ struct device *dev = ipipe->subdev.v4l2_dev->dev;
+ struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
+
+ if (!cgs_param) {
+ memset(cgs, 0, sizeof(struct vpfe_ipipe_cgs));
+ } else {
+ memcpy(cgs, cgs_param, sizeof(struct vpfe_ipipe_cgs));
+ if (ipipe_validate_cgs_params(cgs) < 0) {
+ dev_err(dev, "Invalid cgs params\n");
+ return -EINVAL;
+ }
+ }
+
+ ipipe_set_cgs_regs(ipipe->base_addr, cgs);
+
+ return 0;
+}
+
+static int ipipe_get_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
+{
+ struct vpfe_ipipe_cgs *cgs_param = (struct vpfe_ipipe_cgs *)param;
+ struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
+
+ memcpy(cgs_param, cgs, sizeof(struct vpfe_ipipe_cgs));
+
+ return 0;
+}
+
+static const struct ipipe_module_if ipipe_modules[VPFE_IPIPE_MAX_MODULES] = {
+ /* VPFE_IPIPE_INPUT_CONFIG */ {
+ offsetof(struct ipipe_module_params, input_config),
+ FIELD_SIZEOF(struct ipipe_module_params, input_config),
+ offsetof(struct vpfe_ipipe_config, input_config),
+ ipipe_set_input_config,
+ ipipe_get_input_config,
+ }, /* VPFE_IPIPE_LUTDPC */ {
+ offsetof(struct ipipe_module_params, lutdpc),
+ FIELD_SIZEOF(struct ipipe_module_params, lutdpc),
+ offsetof(struct vpfe_ipipe_config, lutdpc),
+ ipipe_set_lutdpc_params,
+ ipipe_get_lutdpc_params,
+ }, /* VPFE_IPIPE_OTFDPC */ {
+ offsetof(struct ipipe_module_params, otfdpc),
+ FIELD_SIZEOF(struct ipipe_module_params, otfdpc),
+ offsetof(struct vpfe_ipipe_config, otfdpc),
+ ipipe_set_otfdpc_params,
+ ipipe_get_otfdpc_params,
+ }, /* VPFE_IPIPE_NF1 */ {
+ offsetof(struct ipipe_module_params, nf1),
+ FIELD_SIZEOF(struct ipipe_module_params, nf1),
+ offsetof(struct vpfe_ipipe_config, nf1),
+ ipipe_set_nf1_params,
+ ipipe_get_nf1_params,
+ }, /* VPFE_IPIPE_NF2 */ {
+ offsetof(struct ipipe_module_params, nf2),
+ FIELD_SIZEOF(struct ipipe_module_params, nf2),
+ offsetof(struct vpfe_ipipe_config, nf2),
+ ipipe_set_nf2_params,
+ ipipe_get_nf2_params,
+ }, /* VPFE_IPIPE_WB */ {
+ offsetof(struct ipipe_module_params, wbal),
+ FIELD_SIZEOF(struct ipipe_module_params, wbal),
+ offsetof(struct vpfe_ipipe_config, wbal),
+ ipipe_set_wb_params,
+ ipipe_get_wb_params,
+ }, /* VPFE_IPIPE_RGB2RGB_1 */ {
+ offsetof(struct ipipe_module_params, rgb2rgb1),
+ FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb1),
+ offsetof(struct vpfe_ipipe_config, rgb2rgb1),
+ ipipe_set_rgb2rgb_1_params,
+ ipipe_get_rgb2rgb_1_params,
+ }, /* VPFE_IPIPE_RGB2RGB_2 */ {
+ offsetof(struct ipipe_module_params, rgb2rgb2),
+ FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb2),
+ offsetof(struct vpfe_ipipe_config, rgb2rgb2),
+ ipipe_set_rgb2rgb_2_params,
+ ipipe_get_rgb2rgb_2_params,
+ }, /* VPFE_IPIPE_GAMMA */ {
+ offsetof(struct ipipe_module_params, gamma),
+ FIELD_SIZEOF(struct ipipe_module_params, gamma),
+ offsetof(struct vpfe_ipipe_config, gamma),
+ ipipe_set_gamma_params,
+ ipipe_get_gamma_params,
+ }, /* VPFE_IPIPE_3D_LUT */ {
+ offsetof(struct ipipe_module_params, lut),
+ FIELD_SIZEOF(struct ipipe_module_params, lut),
+ offsetof(struct vpfe_ipipe_config, lut),
+ ipipe_set_3d_lut_params,
+ ipipe_get_3d_lut_params,
+ }, /* VPFE_IPIPE_RGB2YUV */ {
+ offsetof(struct ipipe_module_params, rgb2yuv),
+ FIELD_SIZEOF(struct ipipe_module_params, rgb2yuv),
+ offsetof(struct vpfe_ipipe_config, rgb2yuv),
+ ipipe_set_rgb2yuv_params,
+ ipipe_get_rgb2yuv_params,
+ }, /* VPFE_IPIPE_YUV422_CONV */ {
+ offsetof(struct ipipe_module_params, yuv422_conv),
+ FIELD_SIZEOF(struct ipipe_module_params, yuv422_conv),
+ offsetof(struct vpfe_ipipe_config, yuv422_conv),
+ ipipe_set_yuv422_conv_params,
+ ipipe_get_yuv422_conv_params,
+ }, /* VPFE_IPIPE_YEE */ {
+ offsetof(struct ipipe_module_params, yee),
+ FIELD_SIZEOF(struct ipipe_module_params, yee),
+ offsetof(struct vpfe_ipipe_config, yee),
+ ipipe_set_yee_params,
+ ipipe_get_yee_params,
+ }, /* VPFE_IPIPE_GIC */ {
+ offsetof(struct ipipe_module_params, gic),
+ FIELD_SIZEOF(struct ipipe_module_params, gic),
+ offsetof(struct vpfe_ipipe_config, gic),
+ ipipe_set_gic_params,
+ ipipe_get_gic_params,
+ }, /* VPFE_IPIPE_CFA */ {
+ offsetof(struct ipipe_module_params, cfa),
+ FIELD_SIZEOF(struct ipipe_module_params, cfa),
+ offsetof(struct vpfe_ipipe_config, cfa),
+ ipipe_set_cfa_params,
+ ipipe_get_cfa_params,
+ }, /* VPFE_IPIPE_CAR */ {
+ offsetof(struct ipipe_module_params, car),
+ FIELD_SIZEOF(struct ipipe_module_params, car),
+ offsetof(struct vpfe_ipipe_config, car),
+ ipipe_set_car_params,
+ ipipe_get_car_params,
+ }, /* VPFE_IPIPE_CGS */ {
+ offsetof(struct ipipe_module_params, cgs),
+ FIELD_SIZEOF(struct ipipe_module_params, cgs),
+ offsetof(struct vpfe_ipipe_config, cgs),
+ ipipe_set_cgs_params,
+ ipipe_get_cgs_params,
+ }, /* VPFE_IPIPE_GBCE */ {
+ offsetof(struct ipipe_module_params, gbce),
+ FIELD_SIZEOF(struct ipipe_module_params, gbce),
+ offsetof(struct vpfe_ipipe_config, gbce),
+ ipipe_set_gbce_params,
+ ipipe_get_gbce_params,
+ },
+};
+
+static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ unsigned int i;
+ int rval = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ipipe_modules); i++) {
+ unsigned int bit = 1 << i;
+ if (cfg->flag & bit) {
+ const struct ipipe_module_if *module_if =
+ &ipipe_modules[i];
+ struct ipipe_module_params *params;
+ void __user *from = *(void * __user *)
+ ((void *)cfg + module_if->config_offset);
+ size_t size;
+ void *to;
+
+ params = kmalloc(sizeof(struct ipipe_module_params),
+ GFP_KERNEL);
+ to = (void *)params + module_if->param_offset;
+ size = module_if->param_size;
+
+ if (to && from && size) {
+ if (copy_from_user(to, from, size)) {
+ rval = -EFAULT;
+ break;
+ }
+ rval = module_if->set(ipipe, to);
+ if (rval)
+ goto error;
+ } else if (to && !from && size) {
+ rval = module_if->set(ipipe, NULL);
+ if (rval)
+ goto error;
+ }
+ kfree(params);
+ }
+ }
+error:
+ return rval;
+}
+
+static int ipipe_g_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ unsigned int i;
+ int rval = 0;
+
+ for (i = 1; i < ARRAY_SIZE(ipipe_modules); i++) {
+ unsigned int bit = 1 << i;
+ if (cfg->flag & bit) {
+ const struct ipipe_module_if *module_if =
+ &ipipe_modules[i];
+ struct ipipe_module_params *params;
+ void __user *to = *(void * __user *)
+ ((void *)cfg + module_if->config_offset);
+ size_t size;
+ void *from;
+
+ params = kmalloc(sizeof(struct ipipe_module_params),
+ GFP_KERNEL);
+ from = (void *)params + module_if->param_offset;
+ size = module_if->param_size;
+
+ if (to && from && size) {
+ rval = module_if->get(ipipe, from);
+ if (rval)
+ goto error;
+ if (copy_to_user(to, from, size)) {
+ rval = -EFAULT;
+ break;
+ }
+ }
+ kfree(params);
+ }
+ }
+error:
+ return rval;
+}
+
+/*
+ * ipipe_ioctl() - Handle ipipe module private ioctl's
+ * @sd: pointer to v4l2 subdev structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ */
+static long ipipe_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_IPIPE_S_CONFIG:
+ ret = ipipe_s_config(sd, arg);
+ break;
+
+ case VIDIOC_VPFE_IPIPE_G_CONFIG:
+ ret = ipipe_g_config(sd, arg);
+ break;
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+ struct vpfe_ipipe_device *ipipe = &vpfe_dev->vpfe_ipipe;
+ unsigned char val;
+
+ if (ipipe->input == IPIPE_INPUT_NONE)
+ return;
+
+ /* ipipe is set to single shot */
+ if (ipipeif->input == IPIPEIF_INPUT_MEMORY && en) {
+ /* for single-shot mode, need to wait for h/w to
+ * reset many register bits
+ */
+ do {
+ val = regr_ip(vpfe_dev->vpfe_ipipe.base_addr,
+ IPIPE_SRC_EN);
+ } while (val);
+ }
+ regw_ip(vpfe_dev->vpfe_ipipe.base_addr, en, IPIPE_SRC_EN);
+}
+
+/*
+ * ipipe_set_stream() - Enable/Disable streaming on the ipipe subdevice
+ * @sd: pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ */
+static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
+
+ if (enable && ipipe->input != IPIPE_INPUT_NONE &&
+ ipipe->output != IPIPE_OUTPUT_NONE) {
+ if (config_ipipe_hw(ipipe) < 0)
+ return -EINVAL;
+ }
+
+ vpfe_ipipe_enable(vpfe_dev, enable);
+
+ return 0;
+}
+
+/*
+ * __ipipe_get_format() - helper function for getting ipipe format
+ * @ipipe: pointer to ipipe private structure.
+ * @pad: pad number.
+ * @fh: V4L2 subdev file handle.
+ * @which: wanted subdev format.
+ *
+ */
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+
+ return &ipipe->formats[pad];
+}
+
+/*
+ * ipipe_try_format() - Handle try format by pad subdev method
+ * @ipipe: VPFE ipipe device.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad num.
+ * @fmt: pointer to v4l2 format structure.
+ * @which : wanted subdev format
+ */
+static void
+ipipe_try_format(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int max_out_height;
+ unsigned int max_out_width;
+ unsigned int i;
+
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+
+ if (pad == IPIPE_PAD_SINK) {
+ for (i = 0; i < ARRAY_SIZE(ipipe_input_fmts); i++)
+ if (fmt->code == ipipe_input_fmts[i])
+ break;
+
+ /* If not found, use SBGGR10 as default */
+ if (i >= ARRAY_SIZE(ipipe_input_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ } else if (pad == IPIPE_PAD_SOURCE) {
+ for (i = 0; i < ARRAY_SIZE(ipipe_output_fmts); i++)
+ if (fmt->code == ipipe_output_fmts[i])
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(ipipe_output_fmts))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ }
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
+}
+
+/*
+ * ipipe_set_format() - Handle set format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int
+ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipe_try_format(ipipe, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (fmt->pad == IPIPE_PAD_SINK &&
+ (ipipe->input == IPIPE_INPUT_CCDC ||
+ ipipe->input == IPIPE_INPUT_MEMORY))
+ ipipe->formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == IPIPE_PAD_SOURCE &&
+ ipipe->output == IPIPE_OUTPUT_RESIZER)
+ ipipe->formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * ipipe_get_format() - Handle get format by pads subdev method.
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle.
+ * @fmt: pointer to v4l2 subdev format structure.
+ */
+static int
+ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ fmt->format = ipipe->formats[fmt->pad];
+ else
+ fmt->format = *(v4l2_subdev_get_try_format(fh, fmt->pad));
+
+ return 0;
+}
+
+/*
+ * ipipe_enum_frame_size() - enum frame sizes on pads
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle.
+ * @fse: pointer to v4l2_subdev_frame_size_enum structure.
+ */
+static int
+ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ipipe_enum_mbus_code() - enum mbus codes for pads
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ */
+static int
+ipipe_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case IPIPE_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipe_input_fmts))
+ return -EINVAL;
+ code->code = ipipe_input_fmts[code->index];
+ break;
+
+ case IPIPE_PAD_SOURCE:
+ if (code->index >= ARRAY_SIZE(ipipe_output_fmts))
+ return -EINVAL;
+ code->code = ipipe_output_fmts[code->index];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ipipe_s_ctrl() - Handle set control subdev method
+ * @ctrl: pointer to v4l2 control structure
+ */
+static int ipipe_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpfe_ipipe_device *ipipe =
+ container_of(ctrl->handler, struct vpfe_ipipe_device, ctrls);
+ struct ipipe_lum_adj *lum_adj = &ipipe->config.lum_adj;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ lum_adj->brightness = ctrl->val;
+ ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ lum_adj->contrast = ctrl->val;
+ ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ipipe_init_formats() - Initialize formats on all pads
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int
+ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPE_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipe_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPE_PAD_SOURCE;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipe_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops ipipe_v4l2_core_ops = {
+ .ioctl = ipipe_ioctl,
+};
+
+static const struct v4l2_ctrl_ops ipipe_ctrl_ops = {
+ .s_ctrl = ipipe_s_ctrl,
+};
+
+/* subdev file operations */
+static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
+ .open = ipipe_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
+ .s_stream = ipipe_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
+ .enum_mbus_code = ipipe_enum_mbus_code,
+ .enum_frame_size = ipipe_enum_frame_size,
+ .get_fmt = ipipe_get_format,
+ .set_fmt = ipipe_set_format,
+};
+
+/* v4l2 subdev operation */
+static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
+ .core = &ipipe_v4l2_core_ops,
+ .video = &ipipe_v4l2_video_ops,
+ .pad = &ipipe_v4l2_pad_ops,
+};
+
+/*
+ * Media entity operations
+ */
+
+/*
+ * ipipe_link_setup() - Setup ipipe connections
+ * @entity: ipipe media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int
+ipipe_link_setup(struct media_entity *entity, const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipe->input = IPIPE_INPUT_NONE;
+ break;
+ }
+ if (ipipe->input != IPIPE_INPUT_NONE)
+ return -EBUSY;
+ if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
+ ipipe->input = IPIPE_INPUT_MEMORY;
+ else
+ ipipe->input = IPIPE_INPUT_CCDC;
+ break;
+
+ case IPIPE_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* out to RESIZER */
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ ipipe->output = IPIPE_OUTPUT_RESIZER;
+ else
+ ipipe->output = IPIPE_OUTPUT_NONE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations ipipe_media_ops = {
+ .link_setup = ipipe_link_setup,
+};
+
+/*
+ * vpfe_ipipe_unregister_entities() - ipipe unregister entity
+ * @vpfe_ipipe: pointer to ipipe subdevice structure.
+ */
+void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *vpfe_ipipe)
+{
+ /* cleanup entity */
+ media_entity_cleanup(&vpfe_ipipe->subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&vpfe_ipipe->subdev);
+}
+
+/*
+ * vpfe_ipipe_register_entities() - ipipe register entity
+ * @ipipe: pointer to ipipe subdevice structure.
+ * @vdev: pointer to v4l2 device structure.
+ */
+int
+vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev */
+ ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
+ if (ret) {
+ pr_err("Failed to register ipipe as v4l2 subdevice\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+#define IPIPE_CONTRAST_HIGH 0xff
+#define IPIPE_BRIGHT_HIGH 0xff
+
+/*
+ * vpfe_ipipe_init() - ipipe module initialization.
+ * @ipipe: pointer to ipipe subdevice structure.
+ * @pdev: platform device pointer.
+ */
+int
+vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
+{
+ struct media_pad *pads = &ipipe->pads[0];
+ struct v4l2_subdev *sd = &ipipe->subdev;
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (!res)
+ return -ENOENT;
+
+ res_len = resource_size(res);
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res)
+ return -EBUSY;
+ ipipe->base_addr = ioremap_nocache(res->start, res_len);
+ if (!ipipe->base_addr)
+ return -EBUSY;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 6);
+ if (!res)
+ return -ENOENT;
+ ipipe->isp5_base_addr = ioremap_nocache(res->start, res_len);
+ if (!ipipe->isp5_base_addr)
+ return -EBUSY;
+
+ v4l2_subdev_init(sd, &ipipe_v4l2_ops);
+ sd->internal_ops = &ipipe_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI IPIPE", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, ipipe);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ipipe->input = IPIPE_INPUT_NONE;
+ ipipe->output = IPIPE_OUTPUT_NONE;
+
+ me->ops = &ipipe_media_ops;
+ v4l2_ctrl_handler_init(&ipipe->ctrls, 2);
+ v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0,
+ IPIPE_BRIGHT_HIGH, 1, 16);
+ v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
+ V4L2_CID_CONTRAST, 0,
+ IPIPE_CONTRAST_HIGH, 1, 16);
+
+
+ v4l2_ctrl_handler_setup(&ipipe->ctrls);
+ sd->ctrl_handler = &ipipe->ctrls;
+
+ return media_entity_init(me, IPIPE_PADS_NUM, pads, 0);
+}
+
+/*
+ * vpfe_ipipe_cleanup() - ipipe subdevice cleanup.
+ * @ipipe: pointer to ipipe subdevice
+ * @dev: pointer to platform device
+ */
+void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ v4l2_ctrl_handler_free(&ipipe->ctrls);
+
+ iounmap(ipipe->base_addr);
+ iounmap(ipipe->isp5_base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (res)
+ release_mem_region(res->start, res->end - res->start + 1);
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
new file mode 100644
index 000000000000..cf4204603eb8
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPE_H
+#define _DAVINCI_VPFE_DM365_IPIPE_H
+
+#include <linux/platform_device.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "davinci_vpfe_user.h"
+#include "vpfe_video.h"
+
+#define CEIL(a, b) (((a) + (b-1)) / (b))
+
+enum ipipe_noise_filter {
+ IPIPE_D2F_1ST = 0,
+ IPIPE_D2F_2ND = 1,
+};
+
+/* Used for driver storage */
+struct ipipe_otfdpc_2_0 {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* defect detection method */
+ enum vpfe_ipipe_otfdpc_det_meth det_method;
+ /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
+ * used
+ */
+ enum vpfe_ipipe_otfdpc_alg alg;
+ struct vpfe_ipipe_otfdpc_2_0_cfg otfdpc_2_0;
+};
+
+struct ipipe_otfdpc_3_0 {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* defect detection method */
+ enum vpfe_ipipe_otfdpc_det_meth det_method;
+ /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
+ * used
+ */
+ enum vpfe_ipipe_otfdpc_alg alg;
+ struct vpfe_ipipe_otfdpc_3_0_cfg otfdpc_3_0;
+};
+
+/* Structure for configuring Luminance Adjustment module */
+struct ipipe_lum_adj {
+ /* Brightness adjustments */
+ unsigned char brightness;
+ /* contrast adjustments */
+ unsigned char contrast;
+};
+
+enum ipipe_rgb2rgb {
+ IPIPE_RGB2RGB_1 = 0,
+ IPIPE_RGB2RGB_2 = 1,
+};
+
+struct ipipe_module_params {
+ __u32 flag;
+ struct vpfe_ipipe_input_config input_config;
+ struct vpfe_ipipe_lutdpc lutdpc;
+ struct vpfe_ipipe_otfdpc otfdpc;
+ struct vpfe_ipipe_nf nf1;
+ struct vpfe_ipipe_nf nf2;
+ struct vpfe_ipipe_gic gic;
+ struct vpfe_ipipe_wb wbal;
+ struct vpfe_ipipe_cfa cfa;
+ struct vpfe_ipipe_rgb2rgb rgb2rgb1;
+ struct vpfe_ipipe_rgb2rgb rgb2rgb2;
+ struct vpfe_ipipe_gamma gamma;
+ struct vpfe_ipipe_3d_lut lut;
+ struct vpfe_ipipe_rgb2yuv rgb2yuv;
+ struct vpfe_ipipe_gbce gbce;
+ struct vpfe_ipipe_yuv422_conv yuv422_conv;
+ struct vpfe_ipipe_yee yee;
+ struct vpfe_ipipe_car car;
+ struct vpfe_ipipe_cgs cgs;
+ struct ipipe_lum_adj lum_adj;
+};
+
+#define IPIPE_PAD_SINK 0
+#define IPIPE_PAD_SOURCE 1
+
+#define IPIPE_PADS_NUM 2
+
+#define IPIPE_OUTPUT_NONE 0
+#define IPIPE_OUTPUT_RESIZER (1 << 0)
+
+enum ipipe_input_entity {
+ IPIPE_INPUT_NONE = 0,
+ IPIPE_INPUT_MEMORY = 1,
+ IPIPE_INPUT_CCDC = 2,
+};
+
+
+struct vpfe_ipipe_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPE_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
+ enum ipipe_input_entity input;
+ unsigned int output;
+ struct v4l2_ctrl_handler ctrls;
+ void *__iomem base_addr;
+ void *__iomem isp5_base_addr;
+ struct ipipe_module_params config;
+};
+
+struct ipipe_module_if {
+ unsigned int param_offset;
+ unsigned int param_size;
+ unsigned int config_offset;
+ int (*set)(struct vpfe_ipipe_device *ipipe, void *param);
+ int (*get)(struct vpfe_ipipe_device *ipipe, void *param);
+};
+
+/* data paths */
+enum ipipe_data_paths {
+ IPIPE_RAW2YUV,
+ /* Bayer RAW input to YCbCr output */
+ IPIPE_RAW2RAW,
+ /* Bayer Raw to Bayer output */
+ IPIPE_RAW2BOX,
+ /* Bayer Raw to Boxcar output */
+ IPIPE_YUV2YUV
+ /* YUV Raw to YUV Raw output */
+};
+
+#define IPIPE_COLPTN_R_Ye 0x0
+#define IPIPE_COLPTN_Gr_Cy 0x1
+#define IPIPE_COLPTN_Gb_G 0x2
+#define IPIPE_COLPTN_B_Mg 0x3
+
+#define COLPAT_EE_SHIFT 0
+#define COLPAT_EO_SHIFT 2
+#define COLPAT_OE_SHIFT 4
+#define COLPAT_OO_SHIFT 6
+
+#define ipipe_sgrbg_pattern \
+ (IPIPE_COLPTN_Gr_Cy << COLPAT_EE_SHIFT | \
+ IPIPE_COLPTN_R_Ye << COLPAT_EO_SHIFT | \
+ IPIPE_COLPTN_B_Mg << COLPAT_OE_SHIFT | \
+ IPIPE_COLPTN_Gb_G << COLPAT_OO_SHIFT)
+
+#define ipipe_srggb_pattern \
+ (IPIPE_COLPTN_R_Ye << COLPAT_EE_SHIFT | \
+ IPIPE_COLPTN_Gr_Cy << COLPAT_EO_SHIFT | \
+ IPIPE_COLPTN_Gb_G << COLPAT_OE_SHIFT | \
+ IPIPE_COLPTN_B_Mg << COLPAT_OO_SHIFT)
+
+int vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
+ struct v4l2_device *v4l2_dev);
+int vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe,
+ struct platform_device *pdev);
+void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *ipipe);
+void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
+ struct platform_device *pdev);
+void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en);
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPE_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
new file mode 100644
index 000000000000..e027b92b54ef
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include "dm365_ipipe_hw.h"
+
+#define IPIPE_MODE_CONTINUOUS 0
+#define IPIPE_MODE_SINGLE_SHOT 1
+
+static void ipipe_clock_enable(void *__iomem base_addr)
+{
+ /* enable IPIPE MMR for register write access */
+ regw_ip(base_addr, IPIPE_GCK_MMR_DEFAULT, IPIPE_GCK_MMR);
+
+ /* enable the clock wb,cfa,dfc,d2f,pre modules */
+ regw_ip(base_addr, IPIPE_GCK_PIX_DEFAULT, IPIPE_GCK_PIX);
+}
+
+static void
+rsz_set_common_params(void *__iomem rsz_base, struct resizer_params *params)
+{
+ struct rsz_common_params *rsz_common = &params->rsz_common;
+ u32 val;
+
+ /* Set mode */
+ regw_rsz(rsz_base, params->oper_mode, RSZ_SRC_MODE);
+
+ /* data source selection and bypass */
+ val = (rsz_common->passthrough << RSZ_BYPASS_SHIFT) |
+ rsz_common->source;
+ regw_rsz(rsz_base, val, RSZ_SRC_FMT0);
+
+ /* src image selection */
+ val = (rsz_common->raw_flip & 1) |
+ (rsz_common->src_img_fmt << RSZ_SRC_IMG_FMT_SHIFT) |
+ ((rsz_common->y_c & 1) << RSZ_SRC_Y_C_SEL_SHIFT);
+ regw_rsz(rsz_base, val, RSZ_SRC_FMT1);
+
+ regw_rsz(rsz_base, rsz_common->vps & IPIPE_RSZ_VPS_MASK, RSZ_SRC_VPS);
+ regw_rsz(rsz_base, rsz_common->hps & IPIPE_RSZ_HPS_MASK, RSZ_SRC_HPS);
+ regw_rsz(rsz_base, rsz_common->vsz & IPIPE_RSZ_VSZ_MASK, RSZ_SRC_VSZ);
+ regw_rsz(rsz_base, rsz_common->hsz & IPIPE_RSZ_HSZ_MASK, RSZ_SRC_HSZ);
+ regw_rsz(rsz_base, rsz_common->yuv_y_min, RSZ_YUV_Y_MIN);
+ regw_rsz(rsz_base, rsz_common->yuv_y_max, RSZ_YUV_Y_MAX);
+ regw_rsz(rsz_base, rsz_common->yuv_c_min, RSZ_YUV_C_MIN);
+ regw_rsz(rsz_base, rsz_common->yuv_c_max, RSZ_YUV_C_MAX);
+ /* chromatic position */
+ regw_rsz(rsz_base, rsz_common->out_chr_pos, RSZ_YUV_PHS);
+}
+
+static void
+rsz_set_rsz_regs(void *__iomem rsz_base, unsigned int rsz_id,
+ struct resizer_params *params)
+{
+ struct resizer_scale_param *rsc_params;
+ struct rsz_ext_mem_param *ext_mem;
+ struct resizer_rgb *rgb;
+ u32 reg_base;
+ u32 val;
+
+ rsc_params = &params->rsz_rsc_param[rsz_id];
+ rgb = &params->rsz2rgb[rsz_id];
+ ext_mem = &params->ext_mem_param[rsz_id];
+
+ if (rsz_id == RSZ_A) {
+ val = rsc_params->h_flip << RSZA_H_FLIP_SHIFT;
+ val |= rsc_params->v_flip << RSZA_V_FLIP_SHIFT;
+ reg_base = RSZ_EN_A;
+ } else {
+ val = rsc_params->h_flip << RSZB_H_FLIP_SHIFT;
+ val |= rsc_params->v_flip << RSZB_V_FLIP_SHIFT;
+ reg_base = RSZ_EN_B;
+ }
+ /* update flip settings */
+ regw_rsz(rsz_base, val, RSZ_SEQ);
+
+ regw_rsz(rsz_base, params->oper_mode, reg_base + RSZ_MODE);
+
+ val = (rsc_params->cen << RSZ_CEN_SHIFT) | rsc_params->yen;
+ regw_rsz(rsz_base, val, reg_base + RSZ_420);
+
+ regw_rsz(rsz_base, rsc_params->i_vps & RSZ_VPS_MASK,
+ reg_base + RSZ_I_VPS);
+ regw_rsz(rsz_base, rsc_params->i_hps & RSZ_HPS_MASK,
+ reg_base + RSZ_I_HPS);
+ regw_rsz(rsz_base, rsc_params->o_vsz & RSZ_O_VSZ_MASK,
+ reg_base + RSZ_O_VSZ);
+ regw_rsz(rsz_base, rsc_params->o_hsz & RSZ_O_HSZ_MASK,
+ reg_base + RSZ_O_HSZ);
+ regw_rsz(rsz_base, rsc_params->v_phs_y & RSZ_V_PHS_MASK,
+ reg_base + RSZ_V_PHS_Y);
+ regw_rsz(rsz_base, rsc_params->v_phs_c & RSZ_V_PHS_MASK,
+ reg_base + RSZ_V_PHS_C);
+
+ /* keep this additional adjustment to zero for now */
+ regw_rsz(rsz_base, rsc_params->v_dif & RSZ_V_DIF_MASK,
+ reg_base + RSZ_V_DIF);
+
+ val = (rsc_params->v_typ_y & 1) |
+ ((rsc_params->v_typ_c & 1) << RSZ_TYP_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_V_TYP);
+
+ val = (rsc_params->v_lpf_int_y & RSZ_LPF_INT_MASK) |
+ ((rsc_params->v_lpf_int_c & RSZ_LPF_INT_MASK) <<
+ RSZ_LPF_INT_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_V_LPF);
+
+ regw_rsz(rsz_base, rsc_params->h_phs &
+ RSZ_H_PHS_MASK, reg_base + RSZ_H_PHS);
+
+ regw_rsz(rsz_base, 0, reg_base + RSZ_H_PHS_ADJ);
+ regw_rsz(rsz_base, rsc_params->h_dif &
+ RSZ_H_DIF_MASK, reg_base + RSZ_H_DIF);
+
+ val = (rsc_params->h_typ_y & 1) |
+ ((rsc_params->h_typ_c & 1) << RSZ_TYP_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_H_TYP);
+
+ val = (rsc_params->h_lpf_int_y & RSZ_LPF_INT_MASK) |
+ ((rsc_params->h_lpf_int_c & RSZ_LPF_INT_MASK) <<
+ RSZ_LPF_INT_C_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_H_LPF);
+
+ regw_rsz(rsz_base, rsc_params->dscale_en & 1, reg_base + RSZ_DWN_EN);
+
+ val = (rsc_params->h_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) |
+ ((rsc_params->v_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) <<
+ RSZ_DWN_SCALE_AV_SZ_V_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_DWN_AV);
+
+ /* setting rgb conversion parameters */
+ regw_rsz(rsz_base, rgb->rgb_en, reg_base + RSZ_RGB_EN);
+
+ val = (rgb->rgb_typ << RSZ_RGB_TYP_SHIFT) |
+ (rgb->rgb_msk0 << RSZ_RGB_MSK0_SHIFT) |
+ (rgb->rgb_msk1 << RSZ_RGB_MSK1_SHIFT);
+ regw_rsz(rsz_base, val, reg_base + RSZ_RGB_TYP);
+
+ regw_rsz(rsz_base, rgb->rgb_alpha_val & RSZ_RGB_ALPHA_MASK,
+ reg_base + RSZ_RGB_BLD);
+
+ /* setting external memory parameters */
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_y, reg_base + RSZ_SDR_Y_OFT);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_y,
+ reg_base + RSZ_SDR_Y_PTR_S);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_e_y,
+ reg_base + RSZ_SDR_Y_PTR_E);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_c, reg_base + RSZ_SDR_C_OFT);
+ regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_c,
+ reg_base + RSZ_SDR_C_PTR_S);
+ regw_rsz(rsz_base, (ext_mem->rsz_sdr_ptr_e_c >> 1),
+ reg_base + RSZ_SDR_C_PTR_E);
+}
+
+/*set the registers of either RSZ0 or RSZ1 */
+static void
+ipipe_setup_resizer(void *__iomem rsz_base, struct resizer_params *params)
+{
+ /* enable MMR gate to write to Resizer */
+ regw_rsz(rsz_base, 1, RSZ_GCK_MMR);
+
+ /* Enable resizer if it is not in bypass mode */
+ if (params->rsz_common.passthrough)
+ regw_rsz(rsz_base, 0, RSZ_GCK_SDR);
+ else
+ regw_rsz(rsz_base, 1, RSZ_GCK_SDR);
+
+ rsz_set_common_params(rsz_base, params);
+
+ regw_rsz(rsz_base, params->rsz_en[RSZ_A], RSZ_EN_A);
+
+ if (params->rsz_en[RSZ_A])
+ /*setting rescale parameters */
+ rsz_set_rsz_regs(rsz_base, RSZ_A, params);
+
+ regw_rsz(rsz_base, params->rsz_en[RSZ_B], RSZ_EN_B);
+
+ if (params->rsz_en[RSZ_B])
+ rsz_set_rsz_regs(rsz_base, RSZ_B, params);
+}
+
+static u32 ipipe_get_color_pat(enum v4l2_mbus_pixelcode pix)
+{
+ switch (pix) {
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return ipipe_sgrbg_pattern;
+
+ default:
+ return ipipe_srggb_pattern;
+ }
+}
+
+static int ipipe_get_data_path(struct vpfe_ipipe_device *ipipe)
+{
+ enum v4l2_mbus_pixelcode temp_pix_fmt;
+
+ switch (ipipe->formats[IPIPE_PAD_SINK].code) {
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ temp_pix_fmt = V4L2_MBUS_FMT_SGRBG12_1X12;
+ break;
+
+ default:
+ temp_pix_fmt = V4L2_MBUS_FMT_UYVY8_2X8;
+ }
+
+ if (temp_pix_fmt == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (ipipe->formats[IPIPE_PAD_SOURCE].code ==
+ V4L2_MBUS_FMT_SGRBG12_1X12)
+ return IPIPE_RAW2RAW;
+ return IPIPE_RAW2YUV;
+ }
+
+ return IPIPE_YUV2YUV;
+}
+
+static int get_ipipe_mode(struct vpfe_ipipe_device *ipipe)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+
+ if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
+ return IPIPE_MODE_SINGLE_SHOT;
+ else if (ipipeif_sink == IPIPEIF_INPUT_ISIF)
+ return IPIPE_MODE_CONTINUOUS;
+
+ return -EINVAL;
+}
+
+int config_ipipe_hw(struct vpfe_ipipe_device *ipipe)
+{
+ struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
+ void __iomem *ipipe_base = ipipe->base_addr;
+ struct v4l2_mbus_framefmt *outformat;
+ u32 color_pat;
+ u32 ipipe_mode;
+ u32 data_path;
+
+ /* enable clock to IPIPE */
+ vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
+ ipipe_clock_enable(ipipe_base);
+
+ if (ipipe->input == IPIPE_INPUT_NONE) {
+ regw_ip(ipipe_base, 0, IPIPE_SRC_EN);
+ return 0;
+ }
+
+ ipipe_mode = get_ipipe_mode(ipipe);
+ if (ipipe < 0) {
+ pr_err("Failed to get ipipe mode");
+ return -EINVAL;
+ }
+ regw_ip(ipipe_base, ipipe_mode, IPIPE_SRC_MODE);
+
+ data_path = ipipe_get_data_path(ipipe);
+ regw_ip(ipipe_base, data_path, IPIPE_SRC_FMT);
+
+ regw_ip(ipipe_base, config->vst & IPIPE_RSZ_VPS_MASK, IPIPE_SRC_VPS);
+ regw_ip(ipipe_base, config->hst & IPIPE_RSZ_HPS_MASK, IPIPE_SRC_HPS);
+
+ outformat = &ipipe->formats[IPIPE_PAD_SOURCE];
+ regw_ip(ipipe_base, (outformat->height + 1) & IPIPE_RSZ_VSZ_MASK,
+ IPIPE_SRC_VSZ);
+ regw_ip(ipipe_base, (outformat->width + 1) & IPIPE_RSZ_HSZ_MASK,
+ IPIPE_SRC_HSZ);
+
+ if (data_path == IPIPE_RAW2YUV ||
+ data_path == IPIPE_RAW2RAW) {
+ color_pat =
+ ipipe_get_color_pat(ipipe->formats[IPIPE_PAD_SINK].code);
+ regw_ip(ipipe_base, color_pat, IPIPE_SRC_COL);
+ }
+
+ return 0;
+}
+
+/*
+ * config_rsz_hw() - Performs hardware setup of resizer.
+ */
+int config_rsz_hw(struct vpfe_resizer_device *resizer,
+ struct resizer_params *config)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ void *__iomem ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
+ void *__iomem rsz_base = vpfe_dev->vpfe_resizer.base_addr;
+
+ /* enable VPSS clock */
+ vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
+ ipipe_clock_enable(ipipe_base);
+
+ ipipe_setup_resizer(rsz_base, config);
+
+ return 0;
+}
+
+static void
+rsz_set_y_address(void *__iomem rsz_base, unsigned int address,
+ unsigned int offset)
+{
+ u32 val;
+
+ val = address & SET_LOW_ADDR;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_L);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_L);
+
+ val = (address & SET_HIGH_ADDR) >> 16;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_H);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_H);
+}
+
+static void
+rsz_set_c_address(void *__iomem rsz_base, unsigned int address,
+ unsigned int offset)
+{
+ u32 val;
+
+ val = address & SET_LOW_ADDR;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_L);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_L);
+
+ val = (address & SET_HIGH_ADDR) >> 16;
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_H);
+ regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_H);
+}
+
+/*
+ * resizer_set_outaddr() - set the address for given resize_no
+ * @rsz_base: resizer base address
+ * @params: pointer to ipipe_params structure
+ * @resize_no: 0 - Resizer-A, 1 - Resizer B
+ * @address: the address to set
+ */
+int
+resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+ int resize_no, unsigned int address)
+{
+ struct resizer_scale_param *rsc_param;
+ struct rsz_ext_mem_param *mem_param;
+ struct rsz_common_params *rsz_common;
+ unsigned int rsz_start_add;
+ unsigned int val;
+
+ if (resize_no != RSZ_A && resize_no != RSZ_B)
+ return -EINVAL;
+
+ mem_param = &params->ext_mem_param[resize_no];
+ rsc_param = &params->rsz_rsc_param[resize_no];
+ rsz_common = &params->rsz_common;
+
+ if (resize_no == RSZ_A)
+ rsz_start_add = RSZ_EN_A;
+ else
+ rsz_start_add = RSZ_EN_B;
+
+ /* y_c = 0 for y, = 1 for c */
+ if (rsz_common->src_img_fmt == RSZ_IMG_420) {
+ if (rsz_common->y_c) {
+ /* C channel */
+ val = address + mem_param->flip_ofst_c;
+ rsz_set_c_address(rsz_base, val, rsz_start_add);
+ } else {
+ val = address + mem_param->flip_ofst_y;
+ rsz_set_y_address(rsz_base, val, rsz_start_add);
+ }
+ } else {
+ if (rsc_param->cen && rsc_param->yen) {
+ /* 420 */
+ val = address + mem_param->c_offset +
+ mem_param->flip_ofst_c +
+ mem_param->user_y_ofst +
+ mem_param->user_c_ofst;
+ if (resize_no == RSZ_B)
+ val +=
+ params->ext_mem_param[RSZ_A].user_y_ofst +
+ params->ext_mem_param[RSZ_A].user_c_ofst;
+ /* set C address */
+ rsz_set_c_address(rsz_base, val, rsz_start_add);
+ }
+ val = address + mem_param->flip_ofst_y + mem_param->user_y_ofst;
+ if (resize_no == RSZ_B)
+ val += params->ext_mem_param[RSZ_A].user_y_ofst +
+ params->ext_mem_param[RSZ_A].user_c_ofst;
+ /* set Y address */
+ rsz_set_y_address(rsz_base, val, rsz_start_add);
+ }
+ /* resizer must be enabled */
+ regw_rsz(rsz_base, params->rsz_en[resize_no], rsz_start_add);
+
+ return 0;
+}
+
+void
+ipipe_set_lutdpc_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_lutdpc *dpc)
+{
+ u32 max_tbl_size = LUT_DPC_MAX_SIZE >> 1;
+ u32 lut_start_addr = DPC_TB0_START_ADDR;
+ u32 val;
+ u32 count;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, dpc->en, DPC_LUT_EN);
+
+ if (dpc->en != 1)
+ return;
+
+ val = LUTDPC_TBL_256_EN | (dpc->repl_white & 1);
+ regw_ip(base_addr, val, DPC_LUT_SEL);
+ regw_ip(base_addr, LUT_DPC_START_ADDR, DPC_LUT_ADR);
+ regw_ip(base_addr, dpc->dpc_size, DPC_LUT_SIZ & LUT_DPC_SIZE_MASK);
+
+ if (dpc->table == NULL)
+ return;
+
+ for (count = 0; count < dpc->dpc_size; count++) {
+ if (count >= max_tbl_size)
+ lut_start_addr = DPC_TB1_START_ADDR;
+ val = (dpc->table[count].horz_pos & LUT_DPC_H_POS_MASK) |
+ ((dpc->table[count].vert_pos & LUT_DPC_V_POS_MASK) <<
+ LUT_DPC_V_POS_SHIFT) | (dpc->table[count].method <<
+ LUT_DPC_CORR_METH_SHIFT);
+ w_ip_table(isp5_base_addr, val, (lut_start_addr +
+ ((count % max_tbl_size) << 2)));
+ }
+}
+
+static void
+set_dpc_thresholds(void *__iomem base_addr,
+ struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_thr)
+{
+ regw_ip(base_addr, dpc_thr->corr_thr.r & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_R);
+ regw_ip(base_addr, dpc_thr->corr_thr.gr & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_GR);
+ regw_ip(base_addr, dpc_thr->corr_thr.gb & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_GB);
+ regw_ip(base_addr, dpc_thr->corr_thr.b & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2C_THR_B);
+ regw_ip(base_addr, dpc_thr->det_thr.r & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_R);
+ regw_ip(base_addr, dpc_thr->det_thr.gr & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_GR);
+ regw_ip(base_addr, dpc_thr->det_thr.gb & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_GB);
+ regw_ip(base_addr, dpc_thr->det_thr.b & OTFDPC_DPC2_THR_MASK,
+ DPC_OTF_2D_THR_B);
+}
+
+void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_otfdpc *otfdpc)
+{
+ struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0 = &otfdpc->alg_cfg.dpc_2_0;
+ struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0 = &otfdpc->alg_cfg.dpc_3_0;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ regw_ip(base_addr, (otfdpc->en & 1), DPC_OTF_EN);
+ if (!otfdpc->en)
+ return;
+
+ /* dpc enabled */
+ val = (otfdpc->det_method << OTF_DET_METHOD_SHIFT) | otfdpc->alg;
+ regw_ip(base_addr, val, DPC_OTF_TYP);
+
+ if (otfdpc->det_method == VPFE_IPIPE_DPC_OTF_MIN_MAX) {
+ /* ALG= 0, TYP = 0, DPC_OTF_2D_THR_[x]=0
+ * DPC_OTF_2C_THR_[x] = Maximum thresohld
+ * MinMax method
+ */
+ dpc_2_0->det_thr.r = dpc_2_0->det_thr.gb =
+ dpc_2_0->det_thr.gr = dpc_2_0->det_thr.b = 0;
+ set_dpc_thresholds(base_addr, dpc_2_0);
+ return;
+ }
+ /* MinMax2 */
+ if (otfdpc->alg == VPFE_IPIPE_OTFDPC_2_0) {
+ set_dpc_thresholds(base_addr, dpc_2_0);
+ return;
+ }
+ regw_ip(base_addr, dpc_3_0->act_adj_shf &
+ OTF_DPC3_0_SHF_MASK, DPC_OTF_3_SHF);
+ /* Detection thresholds */
+ regw_ip(base_addr, ((dpc_3_0->det_thr & OTF_DPC3_0_THR_MASK) <<
+ OTF_DPC3_0_THR_SHIFT), DPC_OTF_3D_THR);
+ regw_ip(base_addr, dpc_3_0->det_slp &
+ OTF_DPC3_0_SLP_MASK, DPC_OTF_3D_SLP);
+ regw_ip(base_addr, dpc_3_0->det_thr_min &
+ OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MIN);
+ regw_ip(base_addr, dpc_3_0->det_thr_max &
+ OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MAX);
+ /* Correction thresholds */
+ regw_ip(base_addr, ((dpc_3_0->corr_thr & OTF_DPC3_0_THR_MASK) <<
+ OTF_DPC3_0_THR_SHIFT), DPC_OTF_3C_THR);
+ regw_ip(base_addr, dpc_3_0->corr_slp &
+ OTF_DPC3_0_SLP_MASK, DPC_OTF_3C_SLP);
+ regw_ip(base_addr, dpc_3_0->corr_thr_min &
+ OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MIN);
+ regw_ip(base_addr, dpc_3_0->corr_thr_max &
+ OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MAX);
+}
+
+/* 2D Noise filter */
+void
+ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_nf *noise_filter)
+{
+
+ u32 offset = D2F_1ST;
+ int count;
+ u32 val;
+
+ if (id == IPIPE_D2F_2ND)
+ offset = D2F_2ND;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, noise_filter->en & 1, offset + D2F_EN);
+ if (!noise_filter->en)
+ return;
+
+ /*noise filter enabled */
+ /* Combine all the fields to make D2F_CFG register of IPIPE */
+ val = ((noise_filter->spread_val & D2F_SPR_VAL_MASK) <<
+ D2F_SPR_VAL_SHIFT) | ((noise_filter->shft_val &
+ D2F_SHFT_VAL_MASK) << D2F_SHFT_VAL_SHIFT) |
+ (noise_filter->gr_sample_meth << D2F_SAMPLE_METH_SHIFT) |
+ ((noise_filter->apply_lsc_gain & 1) <<
+ D2F_APPLY_LSC_GAIN_SHIFT) | D2F_USE_SPR_REG_VAL;
+ regw_ip(base_addr, val, offset + D2F_TYP);
+
+ /* edge detection minimum */
+ regw_ip(base_addr, noise_filter->edge_det_min_thr &
+ D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MIN);
+
+ /* edge detection maximum */
+ regw_ip(base_addr, noise_filter->edge_det_max_thr &
+ D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MAX);
+
+ for (count = 0; count < VPFE_IPIPE_NF_STR_TABLE_SIZE; count++)
+ regw_ip(base_addr,
+ (noise_filter->str[count] & D2F_STR_VAL_MASK),
+ offset + D2F_STR + count * 4);
+
+ for (count = 0; count < VPFE_IPIPE_NF_THR_TABLE_SIZE; count++)
+ regw_ip(base_addr, noise_filter->thr[count] & D2F_THR_VAL_MASK,
+ offset + D2F_THR + count * 4);
+}
+
+#define IPIPE_U8Q5(decimal, integer) \
+ (((decimal & 0x1f) | ((integer & 0x7) << 5)))
+
+/* Green Imbalance Correction */
+void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, gic->en & 1, GIC_EN);
+
+ if (!gic->en)
+ return;
+
+ /*gic enabled */
+ val = (gic->wt_fn_type << GIC_TYP_SHIFT) |
+ (gic->thr_sel << GIC_THR_SEL_SHIFT) |
+ ((gic->apply_lsc_gain & 1) << GIC_APPLY_LSC_GAIN_SHIFT);
+ regw_ip(base_addr, val, GIC_TYP);
+
+ regw_ip(base_addr, gic->gain & GIC_GAIN_MASK, GIC_GAN);
+
+ if (gic->gic_alg != VPFE_IPIPE_GIC_ALG_ADAPT_GAIN) {
+ /* Constant Gain. Set threshold to maximum */
+ regw_ip(base_addr, GIC_THR_MASK, GIC_THR);
+ return;
+ }
+
+ if (gic->thr_sel == VPFE_IPIPE_GIC_THR_REG) {
+ regw_ip(base_addr, gic->thr & GIC_THR_MASK, GIC_THR);
+ regw_ip(base_addr, gic->slope & GIC_SLOPE_MASK, GIC_SLP);
+ } else {
+ /* Use NF thresholds */
+ val = IPIPE_U8Q5(gic->nf2_thr_gain.decimal,
+ gic->nf2_thr_gain.integer);
+ regw_ip(base_addr, val, GIC_NFGAN);
+ }
+}
+
+#define IPIPE_U13Q9(decimal, integer) \
+ (((decimal & 0x1ff) | ((integer & 0xf) << 9)))
+/* White balance */
+void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ /* Ofsets. S12 */
+ regw_ip(base_addr, wb->ofst_r & WB_OFFSET_MASK, WB2_OFT_R);
+ regw_ip(base_addr, wb->ofst_gr & WB_OFFSET_MASK, WB2_OFT_GR);
+ regw_ip(base_addr, wb->ofst_gb & WB_OFFSET_MASK, WB2_OFT_GB);
+ regw_ip(base_addr, wb->ofst_b & WB_OFFSET_MASK, WB2_OFT_B);
+
+ /* Gains. U13Q9 */
+ val = IPIPE_U13Q9(wb->gain_r.decimal, wb->gain_r.integer);
+ regw_ip(base_addr, val, WB2_WGN_R);
+
+ val = IPIPE_U13Q9(wb->gain_gr.decimal, wb->gain_gr.integer);
+ regw_ip(base_addr, val, WB2_WGN_GR);
+
+ val = IPIPE_U13Q9(wb->gain_gb.decimal, wb->gain_gb.integer);
+ regw_ip(base_addr, val, WB2_WGN_GB);
+
+ val = IPIPE_U13Q9(wb->gain_b.decimal, wb->gain_b.integer);
+ regw_ip(base_addr, val, WB2_WGN_B);
+}
+
+/* CFA */
+void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa)
+{
+ ipipe_clock_enable(base_addr);
+
+ regw_ip(base_addr, cfa->alg, CFA_MODE);
+ regw_ip(base_addr, cfa->hpf_thr_2dir & CFA_HPF_THR_2DIR_MASK,
+ CFA_2DIR_HPF_THR);
+ regw_ip(base_addr, cfa->hpf_slp_2dir & CFA_HPF_SLOPE_2DIR_MASK,
+ CFA_2DIR_HPF_SLP);
+ regw_ip(base_addr, cfa->hp_mix_thr_2dir & CFA_HPF_MIX_THR_2DIR_MASK,
+ CFA_2DIR_MIX_THR);
+ regw_ip(base_addr, cfa->hp_mix_slope_2dir & CFA_HPF_MIX_SLP_2DIR_MASK,
+ CFA_2DIR_MIX_SLP);
+ regw_ip(base_addr, cfa->dir_thr_2dir & CFA_DIR_THR_2DIR_MASK,
+ CFA_2DIR_DIR_THR);
+ regw_ip(base_addr, cfa->dir_slope_2dir & CFA_DIR_SLP_2DIR_MASK,
+ CFA_2DIR_DIR_SLP);
+ regw_ip(base_addr, cfa->nd_wt_2dir & CFA_ND_WT_2DIR_MASK,
+ CFA_2DIR_NDWT);
+ regw_ip(base_addr, cfa->hue_fract_daa & CFA_DAA_HUE_FRA_MASK,
+ CFA_MONO_HUE_FRA);
+ regw_ip(base_addr, cfa->edge_thr_daa & CFA_DAA_EDG_THR_MASK,
+ CFA_MONO_EDG_THR);
+ regw_ip(base_addr, cfa->thr_min_daa & CFA_DAA_THR_MIN_MASK,
+ CFA_MONO_THR_MIN);
+ regw_ip(base_addr, cfa->thr_slope_daa & CFA_DAA_THR_SLP_MASK,
+ CFA_MONO_THR_SLP);
+ regw_ip(base_addr, cfa->slope_min_daa & CFA_DAA_SLP_MIN_MASK,
+ CFA_MONO_SLP_MIN);
+ regw_ip(base_addr, cfa->slope_slope_daa & CFA_DAA_SLP_SLP_MASK,
+ CFA_MONO_SLP_SLP);
+ regw_ip(base_addr, cfa->lp_wt_daa & CFA_DAA_LP_WT_MASK,
+ CFA_MONO_LPWT);
+}
+
+void
+ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_rgb2rgb *rgb)
+{
+ u32 offset_mask = RGB2RGB_1_OFST_MASK;
+ u32 offset = RGB1_MUL_BASE;
+ u32 integ_mask = 0xf;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ if (id == IPIPE_RGB2RGB_2) {
+ /* For second RGB module, gain integer is 3 bits instead
+ of 4, offset has 11 bits insread of 13 */
+ offset = RGB2_MUL_BASE;
+ integ_mask = 0x7;
+ offset_mask = RGB2RGB_2_OFST_MASK;
+ }
+ /* Gains */
+ val = (rgb->coef_rr.decimal & 0xff) |
+ ((rgb->coef_rr.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_RR);
+ val = (rgb->coef_gr.decimal & 0xff) |
+ ((rgb->coef_gr.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_GR);
+ val = (rgb->coef_br.decimal & 0xff) |
+ ((rgb->coef_br.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_BR);
+ val = (rgb->coef_rg.decimal & 0xff) |
+ ((rgb->coef_rg.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_RG);
+ val = (rgb->coef_gg.decimal & 0xff) |
+ ((rgb->coef_gg.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_GG);
+ val = (rgb->coef_bg.decimal & 0xff) |
+ ((rgb->coef_bg.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_BG);
+ val = (rgb->coef_rb.decimal & 0xff) |
+ ((rgb->coef_rb.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_RB);
+ val = (rgb->coef_gb.decimal & 0xff) |
+ ((rgb->coef_gb.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_GB);
+ val = (rgb->coef_bb.decimal & 0xff) |
+ ((rgb->coef_bb.integer & integ_mask) << 8);
+ regw_ip(base_addr, val, offset + RGB_MUL_BB);
+
+ /* Offsets */
+ regw_ip(base_addr, rgb->out_ofst_r & offset_mask, offset + RGB_OFT_OR);
+ regw_ip(base_addr, rgb->out_ofst_g & offset_mask, offset + RGB_OFT_OG);
+ regw_ip(base_addr, rgb->out_ofst_b & offset_mask, offset + RGB_OFT_OB);
+}
+
+static void
+ipipe_update_gamma_tbl(void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_gamma_entry *table, int size, u32 addr)
+{
+ int count;
+ u32 val;
+
+ for (count = 0; count < size; count++) {
+ val = table[count].slope & GAMMA_MASK;
+ val |= (table[count].offset & GAMMA_MASK) << GAMMA_SHIFT;
+ w_ip_table(isp5_base_addr, val, (addr + (count * 4)));
+ }
+}
+
+void
+ipipe_set_gamma_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_gamma *gamma)
+{
+ int table_size;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ val = (gamma->bypass_r << GAMMA_BYPR_SHIFT) |
+ (gamma->bypass_b << GAMMA_BYPG_SHIFT) |
+ (gamma->bypass_g << GAMMA_BYPB_SHIFT) |
+ (gamma->tbl_sel << GAMMA_TBL_SEL_SHIFT) |
+ (gamma->tbl_size << GAMMA_TBL_SIZE_SHIFT);
+
+ regw_ip(base_addr, val, GMM_CFG);
+
+ if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
+ return;
+
+ table_size = gamma->tbl_size;
+
+ if (!gamma->bypass_r && gamma->table_r != NULL)
+ ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_r,
+ table_size, GAMMA_R_START_ADDR);
+ if (!gamma->bypass_b && gamma->table_b != NULL)
+ ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_b,
+ table_size, GAMMA_B_START_ADDR);
+ if (!gamma->bypass_g && gamma->table_g != NULL)
+ ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_g,
+ table_size, GAMMA_G_START_ADDR);
+}
+
+void
+ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_3d_lut *lut_3d)
+{
+ struct vpfe_ipipe_3d_lut_entry *tbl;
+ u32 bnk_index;
+ u32 tbl_index;
+ u32 val;
+ u32 i;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, lut_3d->en, D3LUT_EN);
+
+ if (!lut_3d->en)
+ return;
+
+ /* lut_3d enabled */
+ if (!lut_3d->table)
+ return;
+
+ /* valied table */
+ tbl = lut_3d->table;
+ for (i = 0 ; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
+ /* Each entry has 0-9 (B), 10-19 (G) and
+ 20-29 R values */
+ val = tbl[i].b & D3_LUT_ENTRY_MASK;
+ val |= (tbl[i].g & D3_LUT_ENTRY_MASK) <<
+ D3_LUT_ENTRY_G_SHIFT;
+ val |= (tbl[i].r & D3_LUT_ENTRY_MASK) <<
+ D3_LUT_ENTRY_R_SHIFT;
+ bnk_index = i % 4;
+ tbl_index = i >> 2;
+ tbl_index <<= 2;
+ if (bnk_index == 0)
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB0_START_ADDR);
+ else if (bnk_index == 1)
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB1_START_ADDR);
+ else if (bnk_index == 2)
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB2_START_ADDR);
+ else
+ w_ip_table(isp5_base_addr, val,
+ tbl_index + D3L_TB3_START_ADDR);
+ }
+}
+
+/* Lumina adjustments */
+void
+ipipe_set_lum_adj_regs(void *__iomem base_addr, struct ipipe_lum_adj *lum_adj)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ /* combine fields of YUV_ADJ to set brightness and contrast */
+ val = lum_adj->contrast << LUM_ADJ_CONTR_SHIFT |
+ lum_adj->brightness << LUM_ADJ_BRIGHT_SHIFT;
+ regw_ip(base_addr, val, YUV_ADJ);
+}
+
+#define IPIPE_S12Q8(decimal, integer) \
+ (((decimal & 0xff) | ((integer & 0xf) << 8)))
+
+void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_rgb2yuv *yuv)
+{
+ u32 val;
+
+ /* S10Q8 */
+ ipipe_clock_enable(base_addr);
+ val = IPIPE_S12Q8(yuv->coef_ry.decimal, yuv->coef_ry.integer);
+ regw_ip(base_addr, val, YUV_MUL_RY);
+ val = IPIPE_S12Q8(yuv->coef_gy.decimal, yuv->coef_gy.integer);
+ regw_ip(base_addr, val, YUV_MUL_GY);
+ val = IPIPE_S12Q8(yuv->coef_by.decimal, yuv->coef_by.integer);
+ regw_ip(base_addr, val, YUV_MUL_BY);
+ val = IPIPE_S12Q8(yuv->coef_rcb.decimal, yuv->coef_rcb.integer);
+ regw_ip(base_addr, val, YUV_MUL_RCB);
+ val = IPIPE_S12Q8(yuv->coef_gcb.decimal, yuv->coef_gcb.integer);
+ regw_ip(base_addr, val, YUV_MUL_GCB);
+ val = IPIPE_S12Q8(yuv->coef_bcb.decimal, yuv->coef_bcb.integer);
+ regw_ip(base_addr, val, YUV_MUL_BCB);
+ val = IPIPE_S12Q8(yuv->coef_rcr.decimal, yuv->coef_rcr.integer);
+ regw_ip(base_addr, val, YUV_MUL_RCR);
+ val = IPIPE_S12Q8(yuv->coef_gcr.decimal, yuv->coef_gcr.integer);
+ regw_ip(base_addr, val, YUV_MUL_GCR);
+ val = IPIPE_S12Q8(yuv->coef_bcr.decimal, yuv->coef_bcr.integer);
+ regw_ip(base_addr, val, YUV_MUL_BCR);
+ regw_ip(base_addr, yuv->out_ofst_y & RGB2YCBCR_OFST_MASK, YUV_OFT_Y);
+ regw_ip(base_addr, yuv->out_ofst_cb & RGB2YCBCR_OFST_MASK, YUV_OFT_CB);
+ regw_ip(base_addr, yuv->out_ofst_cr & RGB2YCBCR_OFST_MASK, YUV_OFT_CR);
+}
+
+/* YUV 422 conversion */
+void
+ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_yuv422_conv *conv)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+
+ /* Combine all the fields to make YUV_PHS register of IPIPE */
+ val = (conv->chrom_pos << 0) | (conv->en_chrom_lpf << 1);
+ regw_ip(base_addr, val, YUV_PHS);
+}
+
+void
+ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_gbce *gbce)
+{
+ unsigned int count;
+ u32 mask = GBCE_Y_VAL_MASK;
+
+ if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
+ mask = GBCE_GAIN_VAL_MASK;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, gbce->en & 1, GBCE_EN);
+
+ if (!gbce->en)
+ return;
+
+ regw_ip(base_addr, gbce->type, GBCE_TYP);
+
+ if (!gbce->table)
+ return;
+
+ for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT ; count += 2)
+ w_ip_table(isp5_base_addr, ((gbce->table[count + 1] & mask) <<
+ GBCE_ENTRY_SHIFT) | (gbce->table[count] & mask),
+ ((count/2) << 2) + GBCE_TB_START_ADDR);
+}
+
+void
+ipipe_set_ee_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ struct vpfe_ipipe_yee *ee)
+{
+ unsigned int count;
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, ee->en, YEE_EN);
+
+ if (!ee->en)
+ return;
+
+ val = ee->en_halo_red & 1;
+ val |= ee->merge_meth << YEE_HALO_RED_EN_SHIFT;
+ regw_ip(base_addr, val, YEE_TYP);
+
+ regw_ip(base_addr, ee->hpf_shft, YEE_SHF);
+ regw_ip(base_addr, ee->hpf_coef_00 & YEE_COEF_MASK, YEE_MUL_00);
+ regw_ip(base_addr, ee->hpf_coef_01 & YEE_COEF_MASK, YEE_MUL_01);
+ regw_ip(base_addr, ee->hpf_coef_02 & YEE_COEF_MASK, YEE_MUL_02);
+ regw_ip(base_addr, ee->hpf_coef_10 & YEE_COEF_MASK, YEE_MUL_10);
+ regw_ip(base_addr, ee->hpf_coef_11 & YEE_COEF_MASK, YEE_MUL_11);
+ regw_ip(base_addr, ee->hpf_coef_12 & YEE_COEF_MASK, YEE_MUL_12);
+ regw_ip(base_addr, ee->hpf_coef_20 & YEE_COEF_MASK, YEE_MUL_20);
+ regw_ip(base_addr, ee->hpf_coef_21 & YEE_COEF_MASK, YEE_MUL_21);
+ regw_ip(base_addr, ee->hpf_coef_22 & YEE_COEF_MASK, YEE_MUL_22);
+ regw_ip(base_addr, ee->yee_thr & YEE_THR_MASK, YEE_THR);
+ regw_ip(base_addr, ee->es_gain & YEE_ES_GAIN_MASK, YEE_E_GAN);
+ regw_ip(base_addr, ee->es_thr1 & YEE_ES_THR1_MASK, YEE_E_THR1);
+ regw_ip(base_addr, ee->es_thr2 & YEE_THR_MASK, YEE_E_THR2);
+ regw_ip(base_addr, ee->es_gain_grad & YEE_THR_MASK, YEE_G_GAN);
+ regw_ip(base_addr, ee->es_ofst_grad & YEE_THR_MASK, YEE_G_OFT);
+
+ if (ee->table == NULL)
+ return;
+
+ for (count = 0; count < VPFE_IPIPE_MAX_SIZE_YEE_LUT; count += 2)
+ w_ip_table(isp5_base_addr, ((ee->table[count + 1] &
+ YEE_ENTRY_MASK) << YEE_ENTRY_SHIFT) |
+ (ee->table[count] & YEE_ENTRY_MASK),
+ ((count/2) << 2) + YEE_TB_START_ADDR);
+}
+
+/* Chromatic Artifact Correction. CAR */
+static void ipipe_set_mf(void *__iomem base_addr)
+{
+ /* typ to dynamic switch */
+ regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
+ /* Set SW0 to maximum */
+ regw_ip(base_addr, CAR_MF_THR, CAR_SW);
+}
+
+static void
+ipipe_set_gain_ctrl(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+{
+ regw_ip(base_addr, VPFE_IPIPE_CAR_CHR_GAIN_CTRL, CAR_TYP);
+ regw_ip(base_addr, car->hpf, CAR_HPF_TYP);
+ regw_ip(base_addr, car->hpf_shft & CAR_HPF_SHIFT_MASK, CAR_HPF_SHF);
+ regw_ip(base_addr, car->hpf_thr, CAR_HPF_THR);
+ regw_ip(base_addr, car->gain1.gain, CAR_GN1_GAN);
+ regw_ip(base_addr, car->gain1.shft & CAR_GAIN1_SHFT_MASK, CAR_GN1_SHF);
+ regw_ip(base_addr, car->gain1.gain_min & CAR_GAIN_MIN_MASK,
+ CAR_GN1_MIN);
+ regw_ip(base_addr, car->gain2.gain, CAR_GN2_GAN);
+ regw_ip(base_addr, car->gain2.shft & CAR_GAIN2_SHFT_MASK, CAR_GN2_SHF);
+ regw_ip(base_addr, car->gain2.gain_min & CAR_GAIN_MIN_MASK,
+ CAR_GN2_MIN);
+}
+
+void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+{
+ u32 val;
+
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, car->en, CAR_EN);
+
+ if (!car->en)
+ return;
+
+ switch (car->meth) {
+ case VPFE_IPIPE_CAR_MED_FLTR:
+ ipipe_set_mf(base_addr);
+ break;
+
+ case VPFE_IPIPE_CAR_CHR_GAIN_CTRL:
+ ipipe_set_gain_ctrl(base_addr, car);
+ break;
+
+ default:
+ /* Dynamic switch between MF and Gain Ctrl. */
+ ipipe_set_mf(base_addr);
+ ipipe_set_gain_ctrl(base_addr, car);
+ /* Set the threshold for switching between
+ * the two Here we overwrite the MF SW0 value
+ */
+ regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
+ val = car->sw1;
+ val <<= CAR_SW1_SHIFT;
+ val |= car->sw0;
+ regw_ip(base_addr, val, CAR_SW);
+ }
+}
+
+/* Chromatic Gain Suppression */
+void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs)
+{
+ ipipe_clock_enable(base_addr);
+ regw_ip(base_addr, cgs->en, CGS_EN);
+
+ if (!cgs->en)
+ return;
+
+ /* Set the bright side parameters */
+ regw_ip(base_addr, cgs->h_thr, CGS_GN1_H_THR);
+ regw_ip(base_addr, cgs->h_slope, CGS_GN1_H_GAN);
+ regw_ip(base_addr, cgs->h_shft & CAR_SHIFT_MASK, CGS_GN1_H_SHF);
+ regw_ip(base_addr, cgs->h_min, CGS_GN1_H_MIN);
+}
+
+void rsz_src_enable(void *__iomem rsz_base, int enable)
+{
+ regw_rsz(rsz_base, enable, RSZ_SRC_EN);
+}
+
+int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable)
+{
+ if (rsz_id == RSZ_A) {
+ regw_rsz(rsz_base, enable, RSZ_EN_A);
+ /* We always enable RSZ_A. RSZ_B is enable upon request from
+ * application. So enable RSZ_SRC_EN along with RSZ_A
+ */
+ regw_rsz(rsz_base, enable, RSZ_SRC_EN);
+ } else if (rsz_id == RSZ_B) {
+ regw_rsz(rsz_base, enable, RSZ_EN_B);
+ } else {
+ BUG();
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
new file mode 100644
index 000000000000..010fdb247faf
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPE_HW_H
+#define _DAVINCI_VPFE_DM365_IPIPE_HW_H
+
+#include "vpfe_mc_capture.h"
+
+#define SET_LOW_ADDR 0x0000ffff
+#define SET_HIGH_ADDR 0xffff0000
+
+/* Below are the internal tables */
+#define DPC_TB0_START_ADDR 0x8000
+#define DPC_TB1_START_ADDR 0x8400
+
+#define GAMMA_R_START_ADDR 0xa800
+#define GAMMA_G_START_ADDR 0xb000
+#define GAMMA_B_START_ADDR 0xb800
+
+/* RAM table addresses for edge enhancement correction*/
+#define YEE_TB_START_ADDR 0x8800
+
+/* RAM table address for GBC LUT */
+#define GBCE_TB_START_ADDR 0x9000
+
+/* RAM table for 3D NF LUT */
+#define D3L_TB0_START_ADDR 0x9800
+#define D3L_TB1_START_ADDR 0x9c00
+#define D3L_TB2_START_ADDR 0xa000
+#define D3L_TB3_START_ADDR 0xa400
+
+/* IPIPE Register Offsets from the base address */
+#define IPIPE_SRC_EN 0x0000
+#define IPIPE_SRC_MODE 0x0004
+#define IPIPE_SRC_FMT 0x0008
+#define IPIPE_SRC_COL 0x000c
+#define IPIPE_SRC_VPS 0x0010
+#define IPIPE_SRC_VSZ 0x0014
+#define IPIPE_SRC_HPS 0x0018
+#define IPIPE_SRC_HSZ 0x001c
+
+#define IPIPE_SEL_SBU 0x0020
+
+#define IPIPE_DMA_STA 0x0024
+#define IPIPE_GCK_MMR 0x0028
+#define IPIPE_GCK_PIX 0x002c
+#define IPIPE_RESERVED0 0x0030
+
+/* Defect Correction */
+#define DPC_LUT_EN 0x0034
+#define DPC_LUT_SEL 0x0038
+#define DPC_LUT_ADR 0x003c
+#define DPC_LUT_SIZ 0x0040
+#define DPC_OTF_EN 0x0044
+#define DPC_OTF_TYP 0x0048
+#define DPC_OTF_2D_THR_R 0x004c
+#define DPC_OTF_2D_THR_GR 0x0050
+#define DPC_OTF_2D_THR_GB 0x0054
+#define DPC_OTF_2D_THR_B 0x0058
+#define DPC_OTF_2C_THR_R 0x005c
+#define DPC_OTF_2C_THR_GR 0x0060
+#define DPC_OTF_2C_THR_GB 0x0064
+#define DPC_OTF_2C_THR_B 0x0068
+#define DPC_OTF_3_SHF 0x006c
+#define DPC_OTF_3D_THR 0x0070
+#define DPC_OTF_3D_SLP 0x0074
+#define DPC_OTF_3D_MIN 0x0078
+#define DPC_OTF_3D_MAX 0x007c
+#define DPC_OTF_3C_THR 0x0080
+#define DPC_OTF_3C_SLP 0x0084
+#define DPC_OTF_3C_MIN 0x0088
+#define DPC_OTF_3C_MAX 0x008c
+
+/* Lense Shading Correction */
+#define LSC_VOFT 0x90
+#define LSC_VA2 0x94
+#define LSC_VA1 0x98
+#define LSC_VS 0x9c
+#define LSC_HOFT 0xa0
+#define LSC_HA2 0xa4
+#define LSC_HA1 0xa8
+#define LSC_HS 0xac
+#define LSC_GAIN_R 0xb0
+#define LSC_GAIN_GR 0xb4
+#define LSC_GAIN_GB 0xb8
+#define LSC_GAIN_B 0xbc
+#define LSC_OFT_R 0xc0
+#define LSC_OFT_GR 0xc4
+#define LSC_OFT_GB 0xc8
+#define LSC_OFT_B 0xcc
+#define LSC_SHF 0xd0
+#define LSC_MAX 0xd4
+
+/* Noise Filter 1. Ofsets from start address given */
+#define D2F_1ST 0xd8
+#define D2F_EN 0x0
+#define D2F_TYP 0x4
+#define D2F_THR 0x8
+#define D2F_STR 0x28
+#define D2F_SPR 0x48
+#define D2F_EDG_MIN 0x68
+#define D2F_EDG_MAX 0x6c
+
+/* Noise Filter 2 */
+#define D2F_2ND 0x148
+
+/* GIC */
+#define GIC_EN 0x1b8
+#define GIC_TYP 0x1bc
+#define GIC_GAN 0x1c0
+#define GIC_NFGAN 0x1c4
+#define GIC_THR 0x1c8
+#define GIC_SLP 0x1cc
+
+/* White Balance */
+#define WB2_OFT_R 0x1d0
+#define WB2_OFT_GR 0x1d4
+#define WB2_OFT_GB 0x1d8
+#define WB2_OFT_B 0x1dc
+#define WB2_WGN_R 0x1e0
+#define WB2_WGN_GR 0x1e4
+#define WB2_WGN_GB 0x1e8
+#define WB2_WGN_B 0x1ec
+
+/* CFA interpolation */
+#define CFA_MODE 0x1f0
+#define CFA_2DIR_HPF_THR 0x1f4
+#define CFA_2DIR_HPF_SLP 0x1f8
+#define CFA_2DIR_MIX_THR 0x1fc
+#define CFA_2DIR_MIX_SLP 0x200
+#define CFA_2DIR_DIR_THR 0x204
+#define CFA_2DIR_DIR_SLP 0x208
+#define CFA_2DIR_NDWT 0x20c
+#define CFA_MONO_HUE_FRA 0x210
+#define CFA_MONO_EDG_THR 0x214
+#define CFA_MONO_THR_MIN 0x218
+#define CFA_MONO_THR_SLP 0x21c
+#define CFA_MONO_SLP_MIN 0x220
+#define CFA_MONO_SLP_SLP 0x224
+#define CFA_MONO_LPWT 0x228
+
+/* RGB to RGB conversiona - 1st */
+#define RGB1_MUL_BASE 0x22c
+/* Offsets from base */
+#define RGB_MUL_RR 0x0
+#define RGB_MUL_GR 0x4
+#define RGB_MUL_BR 0x8
+#define RGB_MUL_RG 0xc
+#define RGB_MUL_GG 0x10
+#define RGB_MUL_BG 0x14
+#define RGB_MUL_RB 0x18
+#define RGB_MUL_GB 0x1c
+#define RGB_MUL_BB 0x20
+#define RGB_OFT_OR 0x24
+#define RGB_OFT_OG 0x28
+#define RGB_OFT_OB 0x2c
+
+/* Gamma */
+#define GMM_CFG 0x25c
+
+/* RGB to RGB conversiona - 2nd */
+#define RGB2_MUL_BASE 0x260
+
+/* 3D LUT */
+#define D3LUT_EN 0x290
+
+/* RGB to YUV(YCbCr) conversion */
+#define YUV_ADJ 0x294
+#define YUV_MUL_RY 0x298
+#define YUV_MUL_GY 0x29c
+#define YUV_MUL_BY 0x2a0
+#define YUV_MUL_RCB 0x2a4
+#define YUV_MUL_GCB 0x2a8
+#define YUV_MUL_BCB 0x2ac
+#define YUV_MUL_RCR 0x2b0
+#define YUV_MUL_GCR 0x2b4
+#define YUV_MUL_BCR 0x2b8
+#define YUV_OFT_Y 0x2bc
+#define YUV_OFT_CB 0x2c0
+#define YUV_OFT_CR 0x2c4
+#define YUV_PHS 0x2c8
+
+/* Global Brightness and Contrast */
+#define GBCE_EN 0x2cc
+#define GBCE_TYP 0x2d0
+
+/* Edge Enhancer */
+#define YEE_EN 0x2d4
+#define YEE_TYP 0x2d8
+#define YEE_SHF 0x2dc
+#define YEE_MUL_00 0x2e0
+#define YEE_MUL_01 0x2e4
+#define YEE_MUL_02 0x2e8
+#define YEE_MUL_10 0x2ec
+#define YEE_MUL_11 0x2f0
+#define YEE_MUL_12 0x2f4
+#define YEE_MUL_20 0x2f8
+#define YEE_MUL_21 0x2fc
+#define YEE_MUL_22 0x300
+#define YEE_THR 0x304
+#define YEE_E_GAN 0x308
+#define YEE_E_THR1 0x30c
+#define YEE_E_THR2 0x310
+#define YEE_G_GAN 0x314
+#define YEE_G_OFT 0x318
+
+/* Chroma Artifact Reduction */
+#define CAR_EN 0x31c
+#define CAR_TYP 0x320
+#define CAR_SW 0x324
+#define CAR_HPF_TYP 0x328
+#define CAR_HPF_SHF 0x32c
+#define CAR_HPF_THR 0x330
+#define CAR_GN1_GAN 0x334
+#define CAR_GN1_SHF 0x338
+#define CAR_GN1_MIN 0x33c
+#define CAR_GN2_GAN 0x340
+#define CAR_GN2_SHF 0x344
+#define CAR_GN2_MIN 0x348
+
+/* Chroma Gain Suppression */
+#define CGS_EN 0x34c
+#define CGS_GN1_L_THR 0x350
+#define CGS_GN1_L_GAN 0x354
+#define CGS_GN1_L_SHF 0x358
+#define CGS_GN1_L_MIN 0x35c
+#define CGS_GN1_H_THR 0x360
+#define CGS_GN1_H_GAN 0x364
+#define CGS_GN1_H_SHF 0x368
+#define CGS_GN1_H_MIN 0x36c
+#define CGS_GN2_L_THR 0x370
+#define CGS_GN2_L_GAN 0x374
+#define CGS_GN2_L_SHF 0x378
+#define CGS_GN2_L_MIN 0x37c
+
+/* Resizer */
+#define RSZ_SRC_EN 0x0
+#define RSZ_SRC_MODE 0x4
+#define RSZ_SRC_FMT0 0x8
+#define RSZ_SRC_FMT1 0xc
+#define RSZ_SRC_VPS 0x10
+#define RSZ_SRC_VSZ 0x14
+#define RSZ_SRC_HPS 0x18
+#define RSZ_SRC_HSZ 0x1c
+#define RSZ_DMA_RZA 0x20
+#define RSZ_DMA_RZB 0x24
+#define RSZ_DMA_STA 0x28
+#define RSZ_GCK_MMR 0x2c
+#define RSZ_RESERVED0 0x30
+#define RSZ_GCK_SDR 0x34
+#define RSZ_IRQ_RZA 0x38
+#define RSZ_IRQ_RZB 0x3c
+#define RSZ_YUV_Y_MIN 0x40
+#define RSZ_YUV_Y_MAX 0x44
+#define RSZ_YUV_C_MIN 0x48
+#define RSZ_YUV_C_MAX 0x4c
+#define RSZ_YUV_PHS 0x50
+#define RSZ_SEQ 0x54
+
+/* Resizer Rescale Parameters */
+#define RSZ_EN_A 0x58
+#define RSZ_EN_B 0xe8
+/* offset of the registers to be added with base register of
+ either RSZ0 or RSZ1
+*/
+#define RSZ_MODE 0x4
+#define RSZ_420 0x8
+#define RSZ_I_VPS 0xc
+#define RSZ_I_HPS 0x10
+#define RSZ_O_VSZ 0x14
+#define RSZ_O_HSZ 0x18
+#define RSZ_V_PHS_Y 0x1c
+#define RSZ_V_PHS_C 0x20
+#define RSZ_V_DIF 0x24
+#define RSZ_V_TYP 0x28
+#define RSZ_V_LPF 0x2c
+#define RSZ_H_PHS 0x30
+#define RSZ_H_PHS_ADJ 0x34
+#define RSZ_H_DIF 0x38
+#define RSZ_H_TYP 0x3c
+#define RSZ_H_LPF 0x40
+#define RSZ_DWN_EN 0x44
+#define RSZ_DWN_AV 0x48
+
+/* Resizer RGB Conversion Parameters */
+#define RSZ_RGB_EN 0x4c
+#define RSZ_RGB_TYP 0x50
+#define RSZ_RGB_BLD 0x54
+
+/* Resizer External Memory Parameters */
+#define RSZ_SDR_Y_BAD_H 0x58
+#define RSZ_SDR_Y_BAD_L 0x5c
+#define RSZ_SDR_Y_SAD_H 0x60
+#define RSZ_SDR_Y_SAD_L 0x64
+#define RSZ_SDR_Y_OFT 0x68
+#define RSZ_SDR_Y_PTR_S 0x6c
+#define RSZ_SDR_Y_PTR_E 0x70
+#define RSZ_SDR_C_BAD_H 0x74
+#define RSZ_SDR_C_BAD_L 0x78
+#define RSZ_SDR_C_SAD_H 0x7c
+#define RSZ_SDR_C_SAD_L 0x80
+#define RSZ_SDR_C_OFT 0x84
+#define RSZ_SDR_C_PTR_S 0x88
+#define RSZ_SDR_C_PTR_E 0x8c
+
+/* Macro for resizer */
+#define RSZ_YUV_Y_MIN 0x40
+#define RSZ_YUV_Y_MAX 0x44
+#define RSZ_YUV_C_MIN 0x48
+#define RSZ_YUV_C_MAX 0x4c
+
+#define IPIPE_GCK_MMR_DEFAULT 1
+#define IPIPE_GCK_PIX_DEFAULT 0xe
+#define RSZ_GCK_MMR_DEFAULT 1
+#define RSZ_GCK_SDR_DEFAULT 1
+
+/* LUTDPC */
+#define LUTDPC_TBL_256_EN 0
+#define LUTDPC_INF_TBL_EN 1
+#define LUT_DPC_START_ADDR 0
+#define LUT_DPC_H_POS_MASK 0x1fff
+#define LUT_DPC_V_POS_MASK 0x1fff
+#define LUT_DPC_V_POS_SHIFT 13
+#define LUT_DPC_CORR_METH_SHIFT 26
+#define LUT_DPC_MAX_SIZE 256
+#define LUT_DPC_SIZE_MASK 0x3ff
+
+/* OTFDPC */
+#define OTFDPC_DPC2_THR_MASK 0xfff
+#define OTF_DET_METHOD_SHIFT 1
+#define OTF_DPC3_0_SHF_MASK 3
+#define OTF_DPC3_0_THR_SHIFT 6
+#define OTF_DPC3_0_THR_MASK 0x3f
+#define OTF_DPC3_0_SLP_MASK 0x3f
+#define OTF_DPC3_0_DET_MASK 0xfff
+#define OTF_DPC3_0_CORR_MASK 0xfff
+
+/* NF (D2F) */
+#define D2F_SPR_VAL_MASK 0x1f
+#define D2F_SPR_VAL_SHIFT 0
+#define D2F_SHFT_VAL_MASK 3
+#define D2F_SHFT_VAL_SHIFT 5
+#define D2F_SAMPLE_METH_SHIFT 7
+#define D2F_APPLY_LSC_GAIN_SHIFT 8
+#define D2F_USE_SPR_REG_VAL 0
+#define D2F_STR_VAL_MASK 0x1f
+#define D2F_THR_VAL_MASK 0x3ff
+#define D2F_EDGE_DET_THR_MASK 0x7ff
+
+/* Green Imbalance Correction */
+#define GIC_TYP_SHIFT 0
+#define GIC_THR_SEL_SHIFT 1
+#define GIC_APPLY_LSC_GAIN_SHIFT 2
+#define GIC_GAIN_MASK 0xff
+#define GIC_THR_MASK 0xfff
+#define GIC_SLOPE_MASK 0xfff
+#define GIC_NFGAN_INT_MASK 7
+#define GIC_NFGAN_DECI_MASK 0x1f
+
+/* WB */
+#define WB_OFFSET_MASK 0xfff
+#define WB_GAIN_INT_MASK 0xf
+#define WB_GAIN_DECI_MASK 0x1ff
+
+/* CFA */
+#define CFA_HPF_THR_2DIR_MASK 0x1fff
+#define CFA_HPF_SLOPE_2DIR_MASK 0x3ff
+#define CFA_HPF_MIX_THR_2DIR_MASK 0x1fff
+#define CFA_HPF_MIX_SLP_2DIR_MASK 0x3ff
+#define CFA_DIR_THR_2DIR_MASK 0x3ff
+#define CFA_DIR_SLP_2DIR_MASK 0x7f
+#define CFA_ND_WT_2DIR_MASK 0x3f
+#define CFA_DAA_HUE_FRA_MASK 0x3f
+#define CFA_DAA_EDG_THR_MASK 0xff
+#define CFA_DAA_THR_MIN_MASK 0x3ff
+#define CFA_DAA_THR_SLP_MASK 0x3ff
+#define CFA_DAA_SLP_MIN_MASK 0x3ff
+#define CFA_DAA_SLP_SLP_MASK 0x3ff
+#define CFA_DAA_LP_WT_MASK 0x3f
+
+/* RGB2RGB */
+#define RGB2RGB_1_OFST_MASK 0x1fff
+#define RGB2RGB_1_GAIN_INT_MASK 0xf
+#define RGB2RGB_GAIN_DECI_MASK 0xff
+#define RGB2RGB_2_OFST_MASK 0x7ff
+#define RGB2RGB_2_GAIN_INT_MASK 0x7
+
+/* Gamma */
+#define GAMMA_BYPR_SHIFT 0
+#define GAMMA_BYPG_SHIFT 1
+#define GAMMA_BYPB_SHIFT 2
+#define GAMMA_TBL_SEL_SHIFT 4
+#define GAMMA_TBL_SIZE_SHIFT 5
+#define GAMMA_MASK 0x3ff
+#define GAMMA_SHIFT 10
+
+/* 3D LUT */
+#define D3_LUT_ENTRY_MASK 0x3ff
+#define D3_LUT_ENTRY_R_SHIFT 20
+#define D3_LUT_ENTRY_G_SHIFT 10
+#define D3_LUT_ENTRY_B_SHIFT 0
+
+/* Lumina adj */
+#define LUM_ADJ_CONTR_SHIFT 0
+#define LUM_ADJ_BRIGHT_SHIFT 8
+
+/* RGB2YCbCr */
+#define RGB2YCBCR_OFST_MASK 0x7ff
+#define RGB2YCBCR_COEF_INT_MASK 0xf
+#define RGB2YCBCR_COEF_DECI_MASK 0xff
+
+/* GBCE */
+#define GBCE_Y_VAL_MASK 0xff
+#define GBCE_GAIN_VAL_MASK 0x3ff
+#define GBCE_ENTRY_SHIFT 10
+
+/* Edge Enhancements */
+#define YEE_HALO_RED_EN_SHIFT 1
+#define YEE_HPF_SHIFT_MASK 0xf
+#define YEE_COEF_MASK 0x3ff
+#define YEE_THR_MASK 0x3f
+#define YEE_ES_GAIN_MASK 0xfff
+#define YEE_ES_THR1_MASK 0xfff
+#define YEE_ENTRY_SHIFT 9
+#define YEE_ENTRY_MASK 0x1ff
+
+/* CAR */
+#define CAR_MF_THR 0xff
+#define CAR_SW1_SHIFT 8
+#define CAR_GAIN1_SHFT_MASK 7
+#define CAR_GAIN_MIN_MASK 0x1ff
+#define CAR_GAIN2_SHFT_MASK 0xf
+#define CAR_HPF_SHIFT_MASK 3
+
+/* CGS */
+#define CAR_SHIFT_MASK 3
+
+/* Resizer */
+#define RSZ_BYPASS_SHIFT 1
+#define RSZ_SRC_IMG_FMT_SHIFT 1
+#define RSZ_SRC_Y_C_SEL_SHIFT 2
+#define IPIPE_RSZ_VPS_MASK 0xffff
+#define IPIPE_RSZ_HPS_MASK 0xffff
+#define IPIPE_RSZ_VSZ_MASK 0x1fff
+#define IPIPE_RSZ_HSZ_MASK 0x1fff
+#define RSZ_HPS_MASK 0x1fff
+#define RSZ_VPS_MASK 0x1fff
+#define RSZ_O_HSZ_MASK 0x1fff
+#define RSZ_O_VSZ_MASK 0x1fff
+#define RSZ_V_PHS_MASK 0x3fff
+#define RSZ_V_DIF_MASK 0x3fff
+
+#define RSZA_H_FLIP_SHIFT 0
+#define RSZA_V_FLIP_SHIFT 1
+#define RSZB_H_FLIP_SHIFT 2
+#define RSZB_V_FLIP_SHIFT 3
+#define RSZ_A 0
+#define RSZ_B 1
+#define RSZ_CEN_SHIFT 1
+#define RSZ_YEN_SHIFT 0
+#define RSZ_TYP_Y_SHIFT 0
+#define RSZ_TYP_C_SHIFT 1
+#define RSZ_LPF_INT_MASK 0x3f
+#define RSZ_LPF_INT_MASK 0x3f
+#define RSZ_LPF_INT_C_SHIFT 6
+#define RSZ_H_PHS_MASK 0x3fff
+#define RSZ_H_DIF_MASK 0x3fff
+#define RSZ_DIFF_DOWN_THR 256
+#define RSZ_DWN_SCALE_AV_SZ_V_SHIFT 3
+#define RSZ_DWN_SCALE_AV_SZ_MASK 7
+#define RSZ_RGB_MSK1_SHIFT 2
+#define RSZ_RGB_MSK0_SHIFT 1
+#define RSZ_RGB_TYP_SHIFT 0
+#define RSZ_RGB_ALPHA_MASK 0xff
+
+static inline u32 regr_ip(void *__iomem addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+
+static inline void regw_ip(void *__iomem addr, u32 val, u32 offset)
+{
+ writel(val, addr + offset);
+}
+
+static inline u32 w_ip_table(void *__iomem addr, u32 val, u32 offset)
+{
+ writel(val, addr + offset);
+
+ return val;
+}
+
+static inline u32 regr_rsz(void *__iomem addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+
+static inline u32 regw_rsz(void *__iomem addr, u32 val, u32 offset)
+{
+ writel(val, addr + offset);
+
+ return val;
+}
+
+int config_ipipe_hw(struct vpfe_ipipe_device *ipipe);
+int resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+ int resize_no, unsigned int address);
+int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable);
+void rsz_src_enable(void *__iomem rsz_base, int enable);
+void rsz_set_in_pix_format(unsigned char y_c);
+int config_rsz_hw(struct vpfe_resizer_device *resizer,
+ struct resizer_params *config);
+void ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_nf *noise_filter);
+void ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+ struct vpfe_ipipe_rgb2rgb *rgb);
+void ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_yuv422_conv *conv);
+void ipipe_set_lum_adj_regs(void *__iomem base_addr,
+ struct ipipe_lum_adj *lum_adj);
+void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_rgb2yuv *yuv);
+void ipipe_set_lutdpc_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
+void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+ struct vpfe_ipipe_otfdpc *otfdpc);
+void ipipe_set_3d_lut_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
+void ipipe_set_gamma_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
+void ipipe_set_ee_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_yee *ee);
+void ipipe_set_gbce_regs(void *__iomem base_addr,
+ void *__iomem isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
+void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic);
+void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa);
+void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car);
+void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs);
+void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb);
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPE_HW_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
new file mode 100644
index 000000000000..c8cae51d3d00
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -0,0 +1,1071 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include "dm365_ipipeif.h"
+#include "vpfe_mc_capture.h"
+
+static const unsigned int ipipeif_input_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_YDYUYDYV8_1X16,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+};
+
+static const unsigned int ipipeif_output_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_YDYUYDYV8_1X16,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+};
+
+static int
+ipipeif_get_pack_mode(enum v4l2_mbus_pixelcode in_pix_fmt)
+{
+ switch (in_pix_fmt) {
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_UV8_1X8:
+ return IPIPEIF_5_1_PACK_8_BIT;
+
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ return IPIPEIF_5_1_PACK_8_BIT_A_LAW;
+
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return IPIPEIF_5_1_PACK_16_BIT;
+
+ case V4L2_MBUS_FMT_SBGGR12_1X12:
+ return IPIPEIF_5_1_PACK_12_BIT;
+
+ default:
+ return IPIPEIF_5_1_PACK_16_BIT;
+ }
+}
+
+static inline u32 ipipeif_read(void *addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+
+static inline void ipipeif_write(u32 val, void *addr, u32 offset)
+{
+ writel(val, addr + offset);
+}
+
+static void ipipeif_config_dpc(void *addr, struct ipipeif_dpc *dpc)
+{
+ u32 val = 0;
+
+ if (dpc->en) {
+ val = (dpc->en & 1) << IPIPEIF_DPC2_EN_SHIFT;
+ val |= dpc->thr & IPIPEIF_DPC2_THR_MASK;
+ }
+ ipipeif_write(val, addr, IPIPEIF_DPC2);
+}
+
+#define IPIPEIF_MODE_CONTINUOUS 0
+#define IPIPEIF_MODE_ONE_SHOT 1
+
+static int get_oneshot_mode(enum ipipeif_input_entity input)
+{
+ if (input == IPIPEIF_INPUT_MEMORY)
+ return IPIPEIF_MODE_ONE_SHOT;
+ else if (input == IPIPEIF_INPUT_ISIF)
+ return IPIPEIF_MODE_CONTINUOUS;
+
+ return -EINVAL;
+}
+
+static int
+ipipeif_get_cfg_src1(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+ if (ipipeif->input == IPIPEIF_INPUT_MEMORY &&
+ (informat->code == V4L2_MBUS_FMT_Y8_1X8 ||
+ informat->code == V4L2_MBUS_FMT_UV8_1X8))
+ return IPIPEIF_CCDC;
+
+ return IPIPEIF_SRC1_PARALLEL_PORT;
+}
+
+static int
+ipipeif_get_data_shift(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+
+ switch (informat->code) {
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return IPIPEIF_5_1_BITS11_0;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_UV8_1X8:
+ return IPIPEIF_5_1_BITS11_0;
+
+ default:
+ return IPIPEIF_5_1_BITS7_0;
+ }
+}
+
+static enum ipipeif_input_source
+ipipeif_get_source(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+ if (ipipeif->input == IPIPEIF_INPUT_ISIF)
+ return IPIPEIF_CCDC;
+
+ if (informat->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ return IPIPEIF_SDRAM_YUV;
+
+ return IPIPEIF_SDRAM_RAW;
+}
+
+void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif)
+{
+ struct vpfe_video_device *video_in = &ipipeif->video_in;
+
+ if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
+ return;
+
+ spin_lock(&video_in->dma_queue_lock);
+ vpfe_video_process_buffer_complete(video_in);
+ video_in->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
+ vpfe_video_schedule_next_buffer(video_in);
+ spin_unlock(&video_in->dma_queue_lock);
+}
+
+int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+
+ return ipipeif->config.decimation;
+}
+
+int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+
+ return ipipeif->config.rsz;
+}
+
+#define RD_DATA_15_2 0x7
+
+/*
+ * ipipeif_hw_setup() - This function sets up IPIPEIF
+ * @sd: pointer to v4l2 subdev structure
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_hw_setup(struct v4l2_subdev *sd)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *informat, *outformat;
+ struct ipipeif_params params = ipipeif->config;
+ enum ipipeif_input_source ipipeif_source;
+ enum v4l2_mbus_pixelcode isif_port_if;
+ void *ipipeif_base_addr;
+ unsigned int val;
+ int data_shift;
+ int pack_mode;
+ int source1;
+
+ ipipeif_base_addr = ipipeif->ipipeif_base_addr;
+
+ /* Enable clock to IPIPEIF and IPIPE */
+ vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
+
+ informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
+ outformat = &ipipeif->formats[IPIPEIF_PAD_SOURCE];
+
+ /* Combine all the fields to make CFG1 register of IPIPEIF */
+ val = get_oneshot_mode(ipipeif->input);
+ if (val < 0) {
+ pr_err("ipipeif: links setup required");
+ return -EINVAL;
+ }
+ val = val << ONESHOT_SHIFT;
+
+ ipipeif_source = ipipeif_get_source(ipipeif);
+ val |= ipipeif_source << INPSRC_SHIFT;
+
+ val |= params.clock_select << CLKSEL_SHIFT;
+ val |= params.avg_filter << AVGFILT_SHIFT;
+ val |= params.decimation << DECIM_SHIFT;
+
+ pack_mode = ipipeif_get_pack_mode(informat->code);
+ val |= pack_mode << PACK8IN_SHIFT;
+
+ source1 = ipipeif_get_cfg_src1(ipipeif);
+ val |= source1 << INPSRC1_SHIFT;
+
+ data_shift = ipipeif_get_data_shift(ipipeif);
+ if (ipipeif_source != IPIPEIF_SDRAM_YUV)
+ val |= data_shift << DATASFT_SHIFT;
+ else
+ val &= ~(RD_DATA_15_2 << DATASFT_SHIFT);
+
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG1);
+
+ switch (ipipeif_source) {
+ case IPIPEIF_CCDC:
+ ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
+ break;
+
+ case IPIPEIF_SDRAM_RAW:
+ case IPIPEIF_CCDC_DARKFM:
+ ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
+ /* fall through */
+ case IPIPEIF_SDRAM_YUV:
+ val |= data_shift << DATASFT_SHIFT;
+ ipipeif_write(params.ppln, ipipeif_base_addr, IPIPEIF_PPLN);
+ ipipeif_write(params.lpfr, ipipeif_base_addr, IPIPEIF_LPFR);
+ ipipeif_write(informat->width, ipipeif_base_addr, IPIPEIF_HNUM);
+ ipipeif_write(informat->height,
+ ipipeif_base_addr, IPIPEIF_VNUM);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*check if decimation is enable or not */
+ if (params.decimation)
+ ipipeif_write(params.rsz, ipipeif_base_addr, IPIPEIF_RSZ);
+
+ /* Setup sync alignment and initial rsz position */
+ val = params.if_5_1.align_sync & 1;
+ val <<= IPIPEIF_INIRSZ_ALNSYNC_SHIFT;
+ val |= params.if_5_1.rsz_start & IPIPEIF_INIRSZ_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_INIRSZ);
+ isif_port_if = informat->code;
+
+ if (isif_port_if == V4L2_MBUS_FMT_Y8_1X8)
+ isif_port_if = V4L2_MBUS_FMT_YUYV8_1X16;
+ else if (isif_port_if == V4L2_MBUS_FMT_UV8_1X8)
+ isif_port_if = V4L2_MBUS_FMT_SGRBG12_1X12;
+
+ /* Enable DPCM decompression */
+ switch (ipipeif_source) {
+ case IPIPEIF_SDRAM_RAW:
+ val = 0;
+ if (outformat->code == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8) {
+ val = 1;
+ val |= (IPIPEIF_DPCM_8BIT_10BIT & 1) <<
+ IPIPEIF_DPCM_BITS_SHIFT;
+ val |= (ipipeif->dpcm_predictor & 1) <<
+ IPIPEIF_DPCM_PRED_SHIFT;
+ }
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DPCM);
+
+ /* set DPC */
+ ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
+
+ ipipeif_write(params.if_5_1.clip,
+ ipipeif_base_addr, IPIPEIF_OCLIP);
+
+ /* fall through for SDRAM YUV mode */
+ /* configure CFG2 */
+ val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CFG2);
+ switch (isif_port_if) {
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
+ break;
+
+ default:
+ RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ RESETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
+ break;
+ }
+
+ case IPIPEIF_SDRAM_YUV:
+ /* Set clock divider */
+ if (params.clock_select == IPIPEIF_SDRAM_CLK) {
+ val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CLKDIV);
+ val |= (params.if_5_1.clk_div.m - 1) <<
+ IPIPEIF_CLKDIV_M_SHIFT;
+ val |= (params.if_5_1.clk_div.n - 1);
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CLKDIV);
+ }
+ break;
+
+ case IPIPEIF_CCDC:
+ case IPIPEIF_CCDC_DARKFM:
+ /* set DPC */
+ ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
+
+ /* Set DF gain & threshold control */
+ val = 0;
+ if (params.if_5_1.df_gain_en) {
+ val = params.if_5_1.df_gain_thr &
+ IPIPEIF_DF_GAIN_THR_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGTH);
+ val = (params.if_5_1.df_gain_en & 1) <<
+ IPIPEIF_DF_GAIN_EN_SHIFT;
+ val |= params.if_5_1.df_gain &
+ IPIPEIF_DF_GAIN_MASK;
+ }
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGVL);
+ /* configure CFG2 */
+ val = VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_HDPOL_SHIFT;
+ val |= VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_VDPOL_SHIFT;
+
+ switch (isif_port_if) {
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ break;
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ SETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
+ SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
+ val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT;
+ break;
+
+ default:
+ /* Bayer */
+ ipipeif_write(params.if_5_1.clip, ipipeif_base_addr,
+ IPIPEIF_OCLIP);
+ }
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ipipeif_set_config(struct v4l2_subdev *sd, struct ipipeif_params *config)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct device *dev = ipipeif->subdev.v4l2_dev->dev;
+
+ if (!config) {
+ dev_err(dev, "Invalid configuration pointer\n");
+ return -EINVAL;
+ }
+
+ ipipeif->config.clock_select = config->clock_select;
+ ipipeif->config.ppln = config->ppln;
+ ipipeif->config.lpfr = config->lpfr;
+ ipipeif->config.rsz = config->rsz;
+ ipipeif->config.decimation = config->decimation;
+ if (ipipeif->config.decimation &&
+ (ipipeif->config.rsz < IPIPEIF_RSZ_MIN ||
+ ipipeif->config.rsz > IPIPEIF_RSZ_MAX)) {
+ dev_err(dev, "rsz range is %d to %d\n",
+ IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
+ return -EINVAL;
+ }
+
+ ipipeif->config.avg_filter = config->avg_filter;
+
+ ipipeif->config.if_5_1.df_gain_thr = config->if_5_1.df_gain_thr;
+ ipipeif->config.if_5_1.df_gain = config->if_5_1.df_gain;
+ ipipeif->config.if_5_1.df_gain_en = config->if_5_1.df_gain_en;
+
+ ipipeif->config.if_5_1.rsz_start = config->if_5_1.rsz_start;
+ ipipeif->config.if_5_1.align_sync = config->if_5_1.align_sync;
+ ipipeif->config.if_5_1.clip = config->if_5_1.clip;
+
+ ipipeif->config.if_5_1.dpc.en = config->if_5_1.dpc.en;
+ ipipeif->config.if_5_1.dpc.thr = config->if_5_1.dpc.thr;
+
+ ipipeif->config.if_5_1.clk_div.m = config->if_5_1.clk_div.m;
+ ipipeif->config.if_5_1.clk_div.n = config->if_5_1.clk_div.n;
+
+ return 0;
+}
+
+static int
+ipipeif_get_config(struct v4l2_subdev *sd, void __user *arg)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ struct device *dev = ipipeif->subdev.v4l2_dev->dev;
+
+ if (!arg) {
+ dev_err(dev, "Invalid configuration pointer\n");
+ return -EINVAL;
+ }
+
+ config->clock_select = ipipeif->config.clock_select;
+ config->ppln = ipipeif->config.ppln;
+ config->lpfr = ipipeif->config.lpfr;
+ config->rsz = ipipeif->config.rsz;
+ config->decimation = ipipeif->config.decimation;
+ config->avg_filter = ipipeif->config.avg_filter;
+
+ config->if_5_1.df_gain_thr = ipipeif->config.if_5_1.df_gain_thr;
+ config->if_5_1.df_gain = ipipeif->config.if_5_1.df_gain;
+ config->if_5_1.df_gain_en = ipipeif->config.if_5_1.df_gain_en;
+
+ config->if_5_1.rsz_start = ipipeif->config.if_5_1.rsz_start;
+ config->if_5_1.align_sync = ipipeif->config.if_5_1.align_sync;
+ config->if_5_1.clip = ipipeif->config.if_5_1.clip;
+
+ config->if_5_1.dpc.en = ipipeif->config.if_5_1.dpc.en;
+ config->if_5_1.dpc.thr = ipipeif->config.if_5_1.dpc.thr;
+
+ config->if_5_1.clk_div.m = ipipeif->config.if_5_1.clk_div.m;
+ config->if_5_1.clk_div.n = ipipeif->config.if_5_1.clk_div.n;
+
+ return 0;
+}
+
+/*
+ * ipipeif_ioctl() - Handle ipipeif module private ioctl's
+ * @sd: pointer to v4l2 subdev structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ */
+static long ipipeif_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ int ret = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_IPIPEIF_S_CONFIG:
+ ret = ipipeif_set_config(sd, config);
+ break;
+
+ case VIDIOC_VPFE_IPIPEIF_G_CONFIG:
+ ret = ipipeif_get_config(sd, arg);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * ipipeif_s_ctrl() - Handle set control subdev method
+ * @ctrl: pointer to v4l2 control structure
+ */
+static int ipipeif_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpfe_ipipeif_device *ipipeif =
+ container_of(ctrl->handler, struct vpfe_ipipeif_device, ctrls);
+
+ switch (ctrl->id) {
+ case VPFE_CID_DPCM_PREDICTOR:
+ ipipeif->dpcm_predictor = ctrl->val;
+ break;
+
+ case V4L2_CID_GAIN:
+ ipipeif->gain = ctrl->val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define ENABLE_IPIPEIF 0x1
+
+void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+ void *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
+ unsigned char val;
+
+ if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
+ return;
+
+ do {
+ val = ipipeif_read(ipipeif_base_addr, IPIPEIF_ENABLE);
+ } while (val & 0x1);
+
+ ipipeif_write(ENABLE_IPIPEIF, ipipeif_base_addr, IPIPEIF_ENABLE);
+}
+
+/*
+ * ipipeif_set_stream() - Enable/Disable streaming on ipipeif subdev
+ * @sd: pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ */
+static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
+ int ret = 0;
+
+ if (!enable)
+ return ret;
+
+ ret = ipipeif_hw_setup(sd);
+ if (!ret)
+ vpfe_ipipeif_enable(vpfe_dev);
+
+ return ret;
+}
+
+/*
+ * ipipeif_enum_mbus_code() - Handle pixel format enumeration
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case IPIPEIF_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipeif_input_fmts))
+ return -EINVAL;
+
+ code->code = ipipeif_input_fmts[code->index];
+ break;
+
+ case IPIPEIF_PAD_SOURCE:
+ if (code->index >= ARRAY_SIZE(ipipeif_output_fmts))
+ return -EINVAL;
+
+ code->code = ipipeif_output_fmts[code->index];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ipipeif_get_format() - Handle get format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ */
+static int
+ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ fmt->format = ipipeif->formats[fmt->pad];
+ else
+ fmt->format = *(v4l2_subdev_get_try_format(fh, fmt->pad));
+
+ return 0;
+}
+
+#define MIN_OUT_WIDTH 32
+#define MIN_OUT_HEIGHT 32
+
+/*
+ * ipipeif_try_format() - Handle try format by pad subdev method
+ * @ipipeif: VPFE ipipeif device.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad num.
+ * @fmt: pointer to v4l2 format structure.
+ * @which : wanted subdev format
+ */
+static void
+ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ unsigned int max_out_height;
+ unsigned int max_out_width;
+ unsigned int i;
+
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+
+ if (pad == IPIPEIF_PAD_SINK) {
+ for (i = 0; i < ARRAY_SIZE(ipipeif_input_fmts); i++)
+ if (fmt->code == ipipeif_input_fmts[i])
+ break;
+
+ /* If not found, use SBGGR10 as default */
+ if (i >= ARRAY_SIZE(ipipeif_input_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ } else if (pad == IPIPEIF_PAD_SOURCE) {
+ for (i = 0; i < ARRAY_SIZE(ipipeif_output_fmts); i++)
+ if (fmt->code == ipipeif_output_fmts[i])
+ break;
+
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(ipipeif_output_fmts))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ }
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
+}
+
+static int
+ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * __ipipeif_get_format() - helper function for getting ipipeif format
+ * @ipipeif: pointer to ipipeif private structure.
+ * @pad: pad number.
+ * @fh: V4L2 subdev file handle.
+ * @which: wanted subdev format.
+ *
+ */
+static struct v4l2_mbus_framefmt *
+__ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+
+ return &ipipeif->formats[pad];
+}
+
+/*
+ * ipipeif_set_format() - Handle set format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int
+ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipeif_try_format(ipipeif, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (fmt->pad == IPIPEIF_PAD_SINK &&
+ ipipeif->input != IPIPEIF_INPUT_NONE)
+ ipipeif->formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == IPIPEIF_PAD_SOURCE &&
+ ipipeif->output != IPIPEIF_OUTPUT_NONE)
+ ipipeif->formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ipipeif_set_default_config(struct vpfe_ipipeif_device *ipipeif)
+{
+#define WIDTH_I 640
+#define HEIGHT_I 480
+
+ const struct ipipeif_params ipipeif_defaults = {
+ .clock_select = IPIPEIF_SDRAM_CLK,
+ .ppln = WIDTH_I + 8,
+ .lpfr = HEIGHT_I + 10,
+ .rsz = 16, /* resize ratio 16/rsz */
+ .decimation = IPIPEIF_DECIMATION_OFF,
+ .avg_filter = IPIPEIF_AVG_OFF,
+ .if_5_1 = {
+ .clk_div = {
+ .m = 1, /* clock = sdram clock * (m/n) */
+ .n = 6
+ },
+ .clip = 4095,
+ },
+ };
+ memset(&ipipeif->config, 0, sizeof(struct ipipeif_params));
+ memcpy(&ipipeif->config, &ipipeif_defaults,
+ sizeof(struct ipipeif_params));
+}
+
+/*
+ * ipipeif_init_formats() - Initialize formats on all pads
+ * @sd: VPFE ipipeif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int
+ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPEIF_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipeif_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPEIF_PAD_SOURCE;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ ipipeif_set_format(sd, fh, &format);
+
+ ipipeif_set_default_config(ipipeif);
+
+ return 0;
+}
+
+/*
+ * ipipeif_video_in_queue() - ipipeif video in queue
+ * @vpfe_dev: vpfe device pointer
+ * @addr: buffer address
+ */
+static int
+ipipeif_video_in_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
+{
+ struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
+ void *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
+ unsigned int adofs;
+ u32 val;
+
+ if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
+ return -EINVAL;
+
+ switch (ipipeif->formats[IPIPEIF_PAD_SINK].code) {
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_UV8_1X8:
+ case V4L2_MBUS_FMT_YDYUYDYV8_1X16:
+ adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width;
+ break;
+
+ default:
+ adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width << 1;
+ break;
+ }
+
+ /* adjust the line len to be a multiple of 32 */
+ adofs += 31;
+ adofs &= ~0x1f;
+ val = (adofs >> 5) & IPIPEIF_ADOFS_LSB_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADOFS);
+
+ /* lower sixteen bit */
+ val = (addr >> IPIPEIF_ADDRL_SHIFT) & IPIPEIF_ADDRL_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRL);
+
+ /* upper next seven bit */
+ val = (addr >> IPIPEIF_ADDRU_SHIFT) & IPIPEIF_ADDRU_MASK;
+ ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRU);
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops ipipeif_v4l2_core_ops = {
+ .ioctl = ipipeif_ioctl,
+};
+
+static const struct v4l2_ctrl_ops ipipeif_ctrl_ops = {
+ .s_ctrl = ipipeif_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vpfe_ipipeif_dpcm_pred = {
+ .ops = &ipipeif_ctrl_ops,
+ .id = VPFE_CID_DPCM_PREDICTOR,
+ .name = "DPCM Predictor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/* subdev file operations */
+static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
+ .open = ipipeif_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
+ .s_stream = ipipeif_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
+ .enum_mbus_code = ipipeif_enum_mbus_code,
+ .enum_frame_size = ipipeif_enum_frame_size,
+ .get_fmt = ipipeif_get_format,
+ .set_fmt = ipipeif_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
+ .core = &ipipeif_v4l2_core_ops,
+ .video = &ipipeif_v4l2_video_ops,
+ .pad = &ipipeif_v4l2_pad_ops,
+};
+
+static const struct vpfe_video_operations video_in_ops = {
+ .queue = ipipeif_video_in_queue,
+};
+
+static int
+ipipeif_link_setup(struct media_entity *entity, const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe = to_vpfe_device(ipipeif);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPEIF_PAD_SINK | MEDIA_ENT_T_DEVNODE:
+ /* Single shot mode */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ break;
+ }
+ ipipeif->input = IPIPEIF_INPUT_MEMORY;
+ break;
+
+ case IPIPEIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* read from isif */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ break;
+ }
+ if (ipipeif->input != IPIPEIF_INPUT_NONE)
+ return -EBUSY;
+
+ ipipeif->input = IPIPEIF_INPUT_ISIF;
+ break;
+
+ case IPIPEIF_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->output = IPIPEIF_OUTPUT_NONE;
+ break;
+ }
+ if (remote->entity == &vpfe->vpfe_ipipe.subdev.entity)
+ /* connencted to ipipe */
+ ipipeif->output = IPIPEIF_OUTPUT_IPIPE;
+ else if (remote->entity == &vpfe->vpfe_resizer.
+ crop_resizer.subdev.entity)
+ /* connected to resizer */
+ ipipeif->output = IPIPEIF_OUTPUT_RESIZER;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations ipipeif_media_ops = {
+ .link_setup = ipipeif_link_setup,
+};
+
+/*
+ * vpfe_ipipeif_unregister_entities() - Unregister entity
+ * @ipipeif - pointer to ipipeif subdevice structure.
+ */
+void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif)
+{
+ /* unregister video device */
+ vpfe_video_unregister(&ipipeif->video_in);
+
+ /* cleanup entity */
+ media_entity_cleanup(&ipipeif->subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&ipipeif->subdev);
+}
+
+int
+vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
+ unsigned int flags;
+ int ret;
+
+ /* Register the subdev */
+ ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
+ if (ret < 0)
+ return ret;
+
+ ret = vpfe_video_register(&ipipeif->video_in, vdev);
+ if (ret) {
+ pr_err("Failed to register ipipeif video-in device\n");
+ goto fail;
+ }
+ ipipeif->video_in.vpfe_dev = vpfe_dev;
+
+ flags = 0;
+ ret = media_entity_create_link(&ipipeif->video_in.video_dev.entity, 0,
+ &ipipeif->subdev.entity, 0, flags);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+fail:
+ v4l2_device_unregister_subdev(&ipipeif->subdev);
+
+ return ret;
+}
+
+#define IPIPEIF_GAIN_HIGH 0x3ff
+#define IPIPEIF_DEFAULT_GAIN 0x200
+
+int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = &ipipeif->subdev;
+ struct media_pad *pads = &ipipeif->pads[0];
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res)
+ return -ENOENT;
+
+ res_len = resource_size(res);
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res)
+ return -EBUSY;
+
+ ipipeif->ipipeif_base_addr = ioremap_nocache(res->start, res_len);
+ if (!ipipeif->ipipeif_base_addr) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
+
+ sd->internal_ops = &ipipeif_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI IPIPEIF", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+
+ v4l2_set_subdevdata(sd, ipipeif);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPEIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ ipipeif->output = IPIPEIF_OUTPUT_NONE;
+ me->ops = &ipipeif_media_ops;
+
+ ret = media_entity_init(me, IPIPEIF_NUM_PADS, pads, 0);
+ if (ret)
+ goto fail;
+
+ v4l2_ctrl_handler_init(&ipipeif->ctrls, 2);
+ v4l2_ctrl_new_std(&ipipeif->ctrls, &ipipeif_ctrl_ops,
+ V4L2_CID_GAIN, 0,
+ IPIPEIF_GAIN_HIGH, 1, IPIPEIF_DEFAULT_GAIN);
+ v4l2_ctrl_new_custom(&ipipeif->ctrls, &vpfe_ipipeif_dpcm_pred, NULL);
+ v4l2_ctrl_handler_setup(&ipipeif->ctrls);
+ sd->ctrl_handler = &ipipeif->ctrls;
+
+ ipipeif->video_in.ops = &video_in_ops;
+ ipipeif->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = vpfe_video_init(&ipipeif->video_in, "IPIPEIF");
+ if (ret) {
+ pr_err("Failed to init IPIPEIF video-in device\n");
+ goto fail;
+ }
+ ipipeif_set_default_config(ipipeif);
+ return 0;
+fail:
+ release_mem_region(res->start, res_len);
+ return ret;
+}
+
+void
+vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ v4l2_ctrl_handler_free(&ipipeif->ctrls);
+ iounmap(ipipeif->ipipeif_base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (res)
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
new file mode 100644
index 000000000000..608701fc5fed
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_H
+#define _DAVINCI_VPFE_DM365_IPIPEIF_H
+
+#include <linux/platform_device.h>
+
+#include <media/davinci/vpss.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "dm365_ipipeif_user.h"
+#include "vpfe_video.h"
+
+/* IPIPE base specific types */
+enum ipipeif_data_shift {
+ IPIPEIF_BITS15_2 = 0,
+ IPIPEIF_BITS14_1 = 1,
+ IPIPEIF_BITS13_0 = 2,
+ IPIPEIF_BITS12_0 = 3,
+ IPIPEIF_BITS11_0 = 4,
+ IPIPEIF_BITS10_0 = 5,
+ IPIPEIF_BITS9_0 = 6,
+};
+
+enum ipipeif_clkdiv {
+ IPIPEIF_DIVIDE_HALF = 0,
+ IPIPEIF_DIVIDE_THIRD = 1,
+ IPIPEIF_DIVIDE_FOURTH = 2,
+ IPIPEIF_DIVIDE_FIFTH = 3,
+ IPIPEIF_DIVIDE_SIXTH = 4,
+ IPIPEIF_DIVIDE_EIGHTH = 5,
+ IPIPEIF_DIVIDE_SIXTEENTH = 6,
+ IPIPEIF_DIVIDE_THIRTY = 7,
+};
+
+enum ipipeif_pack_mode {
+ IPIPEIF_PACK_16_BIT = 0,
+ IPIPEIF_PACK_8_BIT = 1,
+};
+
+enum ipipeif_5_1_pack_mode {
+ IPIPEIF_5_1_PACK_16_BIT = 0,
+ IPIPEIF_5_1_PACK_8_BIT = 1,
+ IPIPEIF_5_1_PACK_8_BIT_A_LAW = 2,
+ IPIPEIF_5_1_PACK_12_BIT = 3
+};
+
+enum ipipeif_input_source {
+ IPIPEIF_CCDC = 0,
+ IPIPEIF_SDRAM_RAW = 1,
+ IPIPEIF_CCDC_DARKFM = 2,
+ IPIPEIF_SDRAM_YUV = 3,
+};
+
+enum ipipeif_ialaw {
+ IPIPEIF_ALAW_OFF = 0,
+ IPIPEIF_ALAW_ON = 1,
+};
+
+enum ipipeif_input_src1 {
+ IPIPEIF_SRC1_PARALLEL_PORT = 0,
+ IPIPEIF_SRC1_SDRAM_RAW = 1,
+ IPIPEIF_SRC1_ISIF_DARKFM = 2,
+ IPIPEIF_SRC1_SDRAM_YUV = 3,
+};
+
+enum ipipeif_dfs_dir {
+ IPIPEIF_PORT_MINUS_SDRAM = 0,
+ IPIPEIF_SDRAM_MINUS_PORT = 1,
+};
+
+enum ipipeif_chroma_phase {
+ IPIPEIF_CBCR_Y = 0,
+ IPIPEIF_Y_CBCR = 1,
+};
+
+enum ipipeif_dpcm_type {
+ IPIPEIF_DPCM_8BIT_10BIT = 0,
+ IPIPEIF_DPCM_8BIT_12BIT = 1,
+};
+
+/* data shift for IPIPE 5.1 */
+enum ipipeif_5_1_data_shift {
+ IPIPEIF_5_1_BITS11_0 = 0,
+ IPIPEIF_5_1_BITS10_0 = 1,
+ IPIPEIF_5_1_BITS9_0 = 2,
+ IPIPEIF_5_1_BITS8_0 = 3,
+ IPIPEIF_5_1_BITS7_0 = 4,
+ IPIPEIF_5_1_BITS15_4 = 5,
+};
+
+#define IPIPEIF_PAD_SINK 0
+#define IPIPEIF_PAD_SOURCE 1
+
+#define IPIPEIF_NUM_PADS 2
+
+enum ipipeif_input_entity {
+ IPIPEIF_INPUT_NONE = 0,
+ IPIPEIF_INPUT_ISIF = 1,
+ IPIPEIF_INPUT_MEMORY = 2,
+};
+
+enum ipipeif_output_entity {
+ IPIPEIF_OUTPUT_NONE = 0,
+ IPIPEIF_OUTPUT_IPIPE = 1,
+ IPIPEIF_OUTPUT_RESIZER = 2,
+};
+
+struct vpfe_ipipeif_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPEIF_NUM_PADS];
+ struct v4l2_mbus_framefmt formats[IPIPEIF_NUM_PADS];
+ enum ipipeif_input_entity input;
+ unsigned int output;
+ struct vpfe_video_device video_in;
+ struct v4l2_ctrl_handler ctrls;
+ void *__iomem ipipeif_base_addr;
+ struct ipipeif_params config;
+ int dpcm_predictor;
+ int gain;
+};
+
+/* IPIPEIF Register Offsets from the base address */
+#define IPIPEIF_ENABLE 0x00
+#define IPIPEIF_CFG1 0x04
+#define IPIPEIF_PPLN 0x08
+#define IPIPEIF_LPFR 0x0c
+#define IPIPEIF_HNUM 0x10
+#define IPIPEIF_VNUM 0x14
+#define IPIPEIF_ADDRU 0x18
+#define IPIPEIF_ADDRL 0x1c
+#define IPIPEIF_ADOFS 0x20
+#define IPIPEIF_RSZ 0x24
+#define IPIPEIF_GAIN 0x28
+
+/* Below registers are available only on IPIPE 5.1 */
+#define IPIPEIF_DPCM 0x2c
+#define IPIPEIF_CFG2 0x30
+#define IPIPEIF_INIRSZ 0x34
+#define IPIPEIF_OCLIP 0x38
+#define IPIPEIF_DTUDF 0x3c
+#define IPIPEIF_CLKDIV 0x40
+#define IPIPEIF_DPC1 0x44
+#define IPIPEIF_DPC2 0x48
+#define IPIPEIF_DFSGVL 0x4c
+#define IPIPEIF_DFSGTH 0x50
+#define IPIPEIF_RSZ3A 0x54
+#define IPIPEIF_INIRSZ3A 0x58
+#define IPIPEIF_RSZ_MIN 16
+#define IPIPEIF_RSZ_MAX 112
+#define IPIPEIF_RSZ_CONST 16
+#define SETBIT(reg, bit) (reg = ((reg) | ((0x00000001)<<(bit))))
+#define RESETBIT(reg, bit) (reg = ((reg) & (~(0x00000001<<(bit)))))
+
+#define IPIPEIF_ADOFS_LSB_MASK 0x1ff
+#define IPIPEIF_ADOFS_LSB_SHIFT 5
+#define IPIPEIF_ADOFS_MSB_MASK 0x200
+#define IPIPEIF_ADDRU_MASK 0x7ff
+#define IPIPEIF_ADDRL_SHIFT 5
+#define IPIPEIF_ADDRL_MASK 0xffff
+#define IPIPEIF_ADDRU_SHIFT 21
+#define IPIPEIF_ADDRMSB_SHIFT 31
+#define IPIPEIF_ADDRMSB_LEFT_SHIFT 10
+
+/* CFG1 Masks and shifts */
+#define ONESHOT_SHIFT 0
+#define DECIM_SHIFT 1
+#define INPSRC_SHIFT 2
+#define CLKDIV_SHIFT 4
+#define AVGFILT_SHIFT 7
+#define PACK8IN_SHIFT 8
+#define IALAW_SHIFT 9
+#define CLKSEL_SHIFT 10
+#define DATASFT_SHIFT 11
+#define INPSRC1_SHIFT 14
+
+/* DPC2 */
+#define IPIPEIF_DPC2_EN_SHIFT 12
+#define IPIPEIF_DPC2_THR_MASK 0xfff
+/* Applicable for IPIPE 5.1 */
+#define IPIPEIF_DF_GAIN_EN_SHIFT 10
+#define IPIPEIF_DF_GAIN_MASK 0x3ff
+#define IPIPEIF_DF_GAIN_THR_MASK 0xfff
+/* DPCM */
+#define IPIPEIF_DPCM_BITS_SHIFT 2
+#define IPIPEIF_DPCM_PRED_SHIFT 1
+/* CFG2 */
+#define IPIPEIF_CFG2_HDPOL_SHIFT 1
+#define IPIPEIF_CFG2_VDPOL_SHIFT 2
+#define IPIPEIF_CFG2_YUV8_SHIFT 6
+#define IPIPEIF_CFG2_YUV16_SHIFT 3
+#define IPIPEIF_CFG2_YUV8P_SHIFT 7
+
+/* INIRSZ */
+#define IPIPEIF_INIRSZ_ALNSYNC_SHIFT 13
+#define IPIPEIF_INIRSZ_MASK 0x1fff
+
+/* CLKDIV */
+#define IPIPEIF_CLKDIV_M_SHIFT 8
+
+void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev);
+void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif);
+int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev);
+int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev);
+void vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev);
+int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
+ struct platform_device *pdev);
+int vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev);
+void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif);
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
new file mode 100644
index 000000000000..e2a69b59554a
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
+#define _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
+
+/* clockdiv for IPIPE 5.1 */
+struct ipipeif_5_1_clkdiv {
+ unsigned char m;
+ unsigned char n;
+};
+
+enum ipipeif_decimation {
+ IPIPEIF_DECIMATION_OFF,
+ IPIPEIF_DECIMATION_ON
+};
+
+/* DPC at the if for IPIPE 5.1 */
+struct ipipeif_dpc {
+ /* 0 - disable, 1 - enable */
+ unsigned char en;
+ /* threshold */
+ unsigned short thr;
+};
+
+enum ipipeif_clock {
+ IPIPEIF_PIXCEL_CLK,
+ IPIPEIF_SDRAM_CLK
+};
+
+enum ipipeif_avg_filter {
+ IPIPEIF_AVG_OFF,
+ IPIPEIF_AVG_ON
+};
+
+struct ipipeif_5_1 {
+ struct ipipeif_5_1_clkdiv clk_div;
+ /* Defect pixel correction */
+ struct ipipeif_dpc dpc;
+ /* clipped to this value */
+ unsigned short clip;
+ /* Align HSync and VSync to rsz_start */
+ unsigned char align_sync;
+ /* resizer start position */
+ unsigned int rsz_start;
+ /* DF gain enable */
+ unsigned char df_gain_en;
+ /* DF gain value */
+ unsigned short df_gain;
+ /* DF gain threshold value */
+ unsigned short df_gain_thr;
+};
+
+struct ipipeif_params {
+ enum ipipeif_clock clock_select;
+ unsigned int ppln;
+ unsigned int lpfr;
+ unsigned char rsz;
+ enum ipipeif_decimation decimation;
+ enum ipipeif_avg_filter avg_filter;
+ /* IPIPE 5.1 */
+ struct ipipeif_5_1 if_5_1;
+};
+
+/*
+ * Private IOCTL
+ * VIDIOC_VPFE_IPIPEIF_S_CONFIG: Set IPIEIF configuration
+ * VIDIOC_VPFE_IPIPEIF_G_CONFIG: Get IPIEIF configuration
+ */
+#define VIDIOC_VPFE_IPIPEIF_S_CONFIG \
+ _IOWR('I', BASE_VIDIOC_PRIVATE + 1, struct ipipeif_params)
+#define VIDIOC_VPFE_IPIPEIF_G_CONFIG \
+ _IOWR('I', BASE_VIDIOC_PRIVATE + 2, struct ipipeif_params)
+
+#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
new file mode 100644
index 000000000000..ebeea72e176a
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -0,0 +1,2104 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include "dm365_isif.h"
+#include "vpfe_mc_capture.h"
+
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 4096
+
+static const unsigned int isif_fmts[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+ V4L2_MBUS_FMT_YUYV10_1X20,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+ V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+};
+
+#define ISIF_COLPTN_R_Ye 0x0
+#define ISIF_COLPTN_Gr_Cy 0x1
+#define ISIF_COLPTN_Gb_G 0x2
+#define ISIF_COLPTN_B_Mg 0x3
+
+#define ISIF_CCOLP_CP01_0 0
+#define ISIF_CCOLP_CP03_2 2
+#define ISIF_CCOLP_CP05_4 4
+#define ISIF_CCOLP_CP07_6 6
+#define ISIF_CCOLP_CP11_0 8
+#define ISIF_CCOLP_CP13_2 10
+#define ISIF_CCOLP_CP15_4 12
+#define ISIF_CCOLP_CP17_6 14
+
+static const u32 isif_sgrbg_pattern =
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP01_0 |
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP03_2 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP05_4 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP07_6 |
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP11_0 |
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP13_2 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP15_4 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP17_6;
+
+static const u32 isif_srggb_pattern =
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP01_0 |
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP03_2 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP05_4 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP07_6 |
+ ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP11_0 |
+ ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP13_2 |
+ ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP15_4 |
+ ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP17_6;
+
+static inline u32 isif_read(void *__iomem base_addr, u32 offset)
+{
+ return readl(base_addr + offset);
+}
+
+static inline void isif_write(void *__iomem base_addr, u32 val, u32 offset)
+{
+ writel(val, base_addr + offset);
+}
+
+static inline u32 isif_merge(void *__iomem base_addr, u32 mask, u32 val,
+ u32 offset)
+{
+ u32 new_val = (isif_read(base_addr, offset) & ~mask) | (val & mask);
+
+ isif_write(base_addr, new_val, offset);
+
+ return new_val;
+}
+
+static void isif_enable_output_to_sdram(struct vpfe_isif_device *isif, int en)
+{
+ isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_WEN_MASK,
+ en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
+}
+
+static inline void
+isif_regw_lin_tbl(struct vpfe_isif_device *isif, u32 val, u32 offset, int i)
+{
+ if (!i)
+ writel(val, isif->isif_cfg.linear_tbl0_addr + offset);
+ else
+ writel(val, isif->isif_cfg.linear_tbl1_addr + offset);
+}
+
+static void isif_disable_all_modules(struct vpfe_isif_device *isif)
+{
+ /* disable BC */
+ isif_write(isif->isif_cfg.base_addr, 0, CLAMPCFG);
+ /* disable vdfc */
+ isif_write(isif->isif_cfg.base_addr, 0, DFCCTL);
+ /* disable CSC */
+ isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
+ /* disable linearization */
+ isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
+}
+
+static void isif_enable(struct vpfe_isif_device *isif, int en)
+{
+ if (!en)
+ /* Before disable isif, disable all ISIF modules */
+ isif_disable_all_modules(isif);
+
+ /*
+ * wait for next VD. Assume lowest scan rate is 12 Hz. So
+ * 100 msec delay is good enough
+ */
+ msleep(100);
+ isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_VDHDEN_MASK,
+ en, SYNCEN);
+}
+
+/*
+ * ISIF helper functions
+ */
+
+#define DM365_ISIF_MDFS_OFFSET 15
+#define DM365_ISIF_MDFS_MASK 0x1
+
+/* get field id in isif hardware */
+enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
+ u32 field_status;
+
+ field_status = isif_read(isif->isif_cfg.base_addr, MODESET);
+ field_status = (field_status >> DM365_ISIF_MDFS_OFFSET) &
+ DM365_ISIF_MDFS_MASK;
+ return field_status;
+}
+
+static int
+isif_set_pixel_format(struct vpfe_isif_device *isif, unsigned int pixfmt)
+{
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (pixfmt == V4L2_PIX_FMT_SBGGR16)
+ isif->isif_cfg.data_pack = ISIF_PACK_16BIT;
+ else if ((pixfmt == V4L2_PIX_FMT_SGRBG10DPCM8) ||
+ (pixfmt == V4L2_PIX_FMT_SGRBG10ALAW8))
+ isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
+ else
+ return -EINVAL;
+
+ isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
+ isif->isif_cfg.bayer.v4l2_pix_fmt = pixfmt;
+ } else {
+ if (pixfmt == V4L2_PIX_FMT_YUYV)
+ isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_YCBYCR;
+ else if (pixfmt == V4L2_PIX_FMT_UYVY)
+ isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
+ else
+ return -EINVAL;
+
+ isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
+ isif->isif_cfg.ycbcr.v4l2_pix_fmt = pixfmt;
+ }
+
+ return 0;
+}
+
+static int
+isif_set_frame_format(struct vpfe_isif_device *isif,
+ enum isif_frmfmt frm_fmt)
+{
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ isif->isif_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ isif->isif_cfg.ycbcr.frm_fmt = frm_fmt;
+
+ return 0;
+}
+
+static int isif_set_image_window(struct vpfe_isif_device *isif)
+{
+ struct v4l2_rect *win = &isif->crop;
+
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ isif->isif_cfg.bayer.win.top = win->top;
+ isif->isif_cfg.bayer.win.left = win->left;
+ isif->isif_cfg.bayer.win.width = win->width;
+ isif->isif_cfg.bayer.win.height = win->height;
+ return 0;
+ }
+ isif->isif_cfg.ycbcr.win.top = win->top;
+ isif->isif_cfg.ycbcr.win.left = win->left;
+ isif->isif_cfg.ycbcr.win.width = win->width;
+ isif->isif_cfg.ycbcr.win.height = win->height;
+
+ return 0;
+}
+
+static int
+isif_set_buftype(struct vpfe_isif_device *isif, enum isif_buftype buf_type)
+{
+ if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ isif->isif_cfg.bayer.buf_type = buf_type;
+ else
+ isif->isif_cfg.ycbcr.buf_type = buf_type;
+
+ return 0;
+}
+
+/* configure format in isif hardware */
+static int
+isif_config_format(struct vpfe_device *vpfe_dev, unsigned int pad)
+{
+ struct vpfe_isif_device *vpfe_isif = &vpfe_dev->vpfe_isif;
+ enum isif_frmfmt frm_fmt = ISIF_FRMFMT_INTERLACED;
+ struct v4l2_pix_format format;
+ int ret = 0;
+
+ v4l2_fill_pix_format(&format, &vpfe_dev->vpfe_isif.formats[pad]);
+ mbus_to_pix(&vpfe_dev->vpfe_isif.formats[pad], &format);
+
+ if (isif_set_pixel_format(vpfe_isif, format.pixelformat) < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Failed to set pixel format in isif\n");
+ return -EINVAL;
+ }
+
+ /* call for s_crop will override these values */
+ vpfe_isif->crop.left = 0;
+ vpfe_isif->crop.top = 0;
+ vpfe_isif->crop.width = format.width;
+ vpfe_isif->crop.height = format.height;
+
+ /* configure the image window */
+ isif_set_image_window(vpfe_isif);
+
+ switch (vpfe_dev->vpfe_isif.formats[pad].field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_INTERLEAVED);
+ break;
+
+ case V4L2_FIELD_NONE:
+ frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+
+ case V4L2_FIELD_SEQ_TB:
+ ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_SEPARATED);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* set the frame format */
+ if (!ret)
+ ret = isif_set_frame_format(vpfe_isif, frm_fmt);
+
+ return ret;
+}
+
+/*
+ * isif_try_format() - Try video format on a pad
+ * @isif: VPFE isif device
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ */
+static void
+isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ unsigned int width = fmt->format.width;
+ unsigned int height = fmt->format.height;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isif_fmts); i++) {
+ if (fmt->format.code == isif_fmts[i])
+ break;
+ }
+
+ /* If not found, use YUYV8_2x8 as default */
+ if (i >= ARRAY_SIZE(isif_fmts))
+ fmt->format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+
+ /* Clamp the size. */
+ fmt->format.width = clamp_t(u32, width, 32, MAX_WIDTH);
+ fmt->format.height = clamp_t(u32, height, 32, MAX_HEIGHT);
+
+ /* The data formatter truncates the number of horizontal output
+ * pixels to a multiple of 16. To avoid clipping data, allow
+ * callers to request an output size bigger than the input size
+ * up to the nearest multiple of 16.
+ */
+ if (fmt->pad == ISIF_PAD_SOURCE)
+ fmt->format.width &= ~15;
+}
+
+/*
+ * vpfe_isif_buffer_isr() - isif module non-progressive buffer scheduling isr
+ * @isif: Pointer to isif subdevice.
+ */
+void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
+ struct vpfe_video_device *video = &isif->video_out;
+ enum v4l2_field field;
+ int fid;
+
+ if (!video->started)
+ return;
+
+ field = video->fmt.fmt.pix.field;
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ if (video->cur_frm != video->next_frm)
+ vpfe_video_process_buffer_complete(video);
+ return;
+ }
+
+ /* interlaced or TB capture check which field we
+ * are in hardware
+ */
+ fid = vpfe_isif_get_fid(vpfe_dev);
+
+ /* switch the software maintained field id */
+ video->field_id ^= 1;
+ if (fid == video->field_id) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the current
+ * frame and move on
+ */
+ if (video->cur_frm != video->next_frm)
+ vpfe_video_process_buffer_complete(video);
+ /*
+ * based on whether the two fields are stored
+ * interleavely or separately in memory,
+ * reconfigure the ISIF memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_video_schedule_bottom_field(video);
+ return;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the
+ * empty queue if no frame is available hold on
+ * to the current buffer
+ */
+ spin_lock(&video->dma_queue_lock);
+ if (!list_empty(&video->dma_queue) &&
+ video->cur_frm == video->next_frm)
+ vpfe_video_schedule_next_buffer(video);
+ spin_unlock(&video->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ video->field_id = fid;
+ }
+}
+
+/*
+ * vpfe_isif_vidint1_isr() - ISIF module progressive buffer scheduling isr
+ * @isif: Pointer to isif subdevice.
+ */
+void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif)
+{
+ struct vpfe_video_device *video = &isif->video_out;
+
+ if (!video->started)
+ return;
+
+ spin_lock(&video->dma_queue_lock);
+ if (video->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
+ !list_empty(&video->dma_queue) && video->cur_frm == video->next_frm)
+ vpfe_video_schedule_next_buffer(video);
+
+ spin_unlock(&video->dma_queue_lock);
+}
+
+/*
+ * VPFE video operations
+ */
+
+static int isif_video_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
+{
+ struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
+
+ isif_write(isif->isif_cfg.base_addr, (addr >> 21) &
+ ISIF_CADU_BITS, CADU);
+ isif_write(isif->isif_cfg.base_addr, (addr >> 5) &
+ ISIF_CADL_BITS, CADL);
+
+ return 0;
+}
+
+static const struct vpfe_video_operations isif_video_ops = {
+ .queue = isif_video_queue,
+};
+
+/*
+ * V4L2 subdev operations
+ */
+
+/* Parameter operations */
+static int isif_get_params(struct v4l2_subdev *sd, void *params)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+
+ /* only raw module parameters can be set through the IOCTL */
+ if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12)
+ return -EINVAL;
+ memcpy(params, &isif->isif_cfg.bayer.config_params,
+ sizeof(isif->isif_cfg.bayer.config_params));
+ return 0;
+}
+
+static int isif_validate_df_csc_params(struct vpfe_isif_df_csc *df_csc)
+{
+ struct vpfe_isif_color_space_conv *csc;
+ int err = -EINVAL;
+ int csc_df_en;
+ int i;
+
+ if (!df_csc->df_or_csc) {
+ /* csc configuration */
+ csc = &df_csc->csc;
+ if (csc->en) {
+ csc_df_en = 1;
+ for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++)
+ if (csc->coeff[i].integer >
+ ISIF_CSC_COEF_INTEG_MASK ||
+ csc->coeff[i].decimal >
+ ISIF_CSC_COEF_DECIMAL_MASK) {
+ pr_err("Invalid CSC coefficients\n");
+ return err;
+ }
+ }
+ }
+ if (df_csc->start_pix > ISIF_DF_CSC_SPH_MASK) {
+ pr_err("Invalid df_csc start pix value\n");
+ return err;
+ }
+
+ if (df_csc->num_pixels > ISIF_DF_NUMPIX) {
+ pr_err("Invalid df_csc num pixels value\n");
+ return err;
+ }
+
+ if (df_csc->start_line > ISIF_DF_CSC_LNH_MASK) {
+ pr_err("Invalid df_csc start_line value\n");
+ return err;
+ }
+
+ if (df_csc->num_lines > ISIF_DF_NUMLINES) {
+ pr_err("Invalid df_csc num_lines value\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#define DM365_ISIF_MAX_VDFLSFT 4
+#define DM365_ISIF_MAX_VDFSLV 4095
+#define DM365_ISIF_MAX_DFCMEM0 0x1fff
+#define DM365_ISIF_MAX_DFCMEM1 0x1fff
+
+static int isif_validate_dfc_params(struct vpfe_isif_dfc *dfc)
+{
+ int err = -EINVAL;
+ int i;
+
+ if (!dfc->en)
+ return 0;
+
+ if (dfc->corr_whole_line > 1) {
+ pr_err("Invalid corr_whole_line value\n");
+ return err;
+ }
+
+ if (dfc->def_level_shift > DM365_ISIF_MAX_VDFLSFT) {
+ pr_err("Invalid def_level_shift value\n");
+ return err;
+ }
+
+ if (dfc->def_sat_level > DM365_ISIF_MAX_VDFSLV) {
+ pr_err("Invalid def_sat_level value\n");
+ return err;
+ }
+
+ if (!dfc->num_vdefects ||
+ dfc->num_vdefects > VPFE_ISIF_VDFC_TABLE_SIZE) {
+ pr_err("Invalid num_vdefects value\n");
+ return err;
+ }
+
+ for (i = 0; i < VPFE_ISIF_VDFC_TABLE_SIZE; i++) {
+ if (dfc->table[i].pos_vert > DM365_ISIF_MAX_DFCMEM0) {
+ pr_err("Invalid pos_vert value\n");
+ return err;
+ }
+ if (dfc->table[i].pos_horz > DM365_ISIF_MAX_DFCMEM1) {
+ pr_err("Invalid pos_horz value\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+#define DM365_ISIF_MAX_CLVRV 0xfff
+#define DM365_ISIF_MAX_CLDC 0x1fff
+#define DM365_ISIF_MAX_CLHSH 0x1fff
+#define DM365_ISIF_MAX_CLHSV 0x1fff
+#define DM365_ISIF_MAX_CLVSH 0x1fff
+#define DM365_ISIF_MAX_CLVSV 0x1fff
+#define DM365_ISIF_MAX_HEIGHT_BLACK_REGION 0x1fff
+
+static int isif_validate_bclamp_params(struct vpfe_isif_black_clamp *bclamp)
+{
+ int err = -EINVAL;
+
+ if (bclamp->dc_offset > DM365_ISIF_MAX_CLDC) {
+ pr_err("Invalid bclamp dc_offset value\n");
+ return err;
+ }
+ if (!bclamp->en)
+ return 0;
+ if (bclamp->horz.clamp_pix_limit > 1) {
+ pr_err("Invalid bclamp horz clamp_pix_limit value\n");
+ return err;
+ }
+ if (bclamp->horz.win_count_calc < 1 ||
+ bclamp->horz.win_count_calc > 32) {
+ pr_err("Invalid bclamp horz win_count_calc value\n");
+ return err;
+ }
+ if (bclamp->horz.win_start_h_calc > DM365_ISIF_MAX_CLHSH) {
+ pr_err("Invalid bclamp win_start_v_calc value\n");
+ return err;
+ }
+
+ if (bclamp->horz.win_start_v_calc > DM365_ISIF_MAX_CLHSV) {
+ pr_err("Invalid bclamp win_start_v_calc value\n");
+ return err;
+ }
+ if (bclamp->vert.reset_clamp_val > DM365_ISIF_MAX_CLVRV) {
+ pr_err("Invalid bclamp reset_clamp_val value\n");
+ return err;
+ }
+ if (bclamp->vert.ob_v_sz_calc > DM365_ISIF_MAX_HEIGHT_BLACK_REGION) {
+ pr_err("Invalid bclamp ob_v_sz_calc value\n");
+ return err;
+ }
+ if (bclamp->vert.ob_start_h > DM365_ISIF_MAX_CLVSH) {
+ pr_err("Invalid bclamp ob_start_h value\n");
+ return err;
+ }
+ if (bclamp->vert.ob_start_v > DM365_ISIF_MAX_CLVSV) {
+ pr_err("Invalid bclamp ob_start_h value\n");
+ return err;
+ }
+ return 0;
+}
+
+static int
+isif_validate_raw_params(struct vpfe_isif_raw_config *params)
+{
+ int ret;
+
+ ret = isif_validate_df_csc_params(&params->df_csc);
+ if (ret)
+ return ret;
+ ret = isif_validate_dfc_params(&params->dfc);
+ if (ret)
+ return ret;
+ ret = isif_validate_bclamp_params(&params->bclamp);
+ return ret;
+}
+
+static int isif_set_params(struct v4l2_subdev *sd, void *params)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct vpfe_isif_raw_config isif_raw_params;
+ int ret = -EINVAL;
+
+ /* only raw module parameters can be set through the IOCTL */
+ if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12)
+ return ret;
+
+ memcpy(&isif_raw_params, params, sizeof(isif_raw_params));
+ if (!isif_validate_raw_params(&isif_raw_params)) {
+ memcpy(&isif->isif_cfg.bayer.config_params, &isif_raw_params,
+ sizeof(isif_raw_params));
+ ret = 0;
+ }
+ return ret;
+}
+/*
+ * isif_ioctl() - isif module private ioctl's
+ * @sd: VPFE isif V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long isif_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_ISIF_S_RAW_PARAMS:
+ ret = isif_set_params(sd, arg);
+ break;
+
+ case VIDIOC_VPFE_ISIF_G_RAW_PARAMS:
+ ret = isif_get_params(sd, arg);
+ break;
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static void isif_config_gain_offset(struct vpfe_isif_device *isif)
+{
+ struct vpfe_isif_gain_offsets_adj *gain_off_ptr =
+ &isif->isif_cfg.bayer.config_params.gain_offset;
+ void *__iomem base = isif->isif_cfg.base_addr;
+ u32 val;
+
+ val = ((gain_off_ptr->gain_sdram_en & 1) << GAIN_SDRAM_EN_SHIFT) |
+ ((gain_off_ptr->gain_ipipe_en & 1) << GAIN_IPIPE_EN_SHIFT) |
+ ((gain_off_ptr->gain_h3a_en & 1) << GAIN_H3A_EN_SHIFT) |
+ ((gain_off_ptr->offset_sdram_en & 1) << OFST_SDRAM_EN_SHIFT) |
+ ((gain_off_ptr->offset_ipipe_en & 1) << OFST_IPIPE_EN_SHIFT) |
+ ((gain_off_ptr->offset_h3a_en & 1) << OFST_H3A_EN_SHIFT);
+ isif_merge(base, GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
+
+ isif_write(base, isif->isif_cfg.isif_gain_params.cr_gain, CRGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.cgr_gain, CGRGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.cgb_gain, CGBGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.cb_gain, CBGAIN);
+ isif_write(base, isif->isif_cfg.isif_gain_params.offset & OFFSET_MASK,
+ COFSTA);
+
+}
+
+static void isif_config_bclamp(struct vpfe_isif_device *isif,
+ struct vpfe_isif_black_clamp *bc)
+{
+ u32 val;
+
+ /**
+ * DC Offset is always added to image data irrespective of bc enable
+ * status
+ */
+ val = bc->dc_offset & ISIF_BC_DCOFFSET_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLDCOFST);
+
+ if (!bc->en)
+ return;
+
+ val = (bc->bc_mode_color & ISIF_BC_MODE_COLOR_MASK) <<
+ ISIF_BC_MODE_COLOR_SHIFT;
+
+ /* Enable BC and horizontal clamp caculation paramaters */
+ val = val | 1 | ((bc->horz.mode & ISIF_HORZ_BC_MODE_MASK) <<
+ ISIF_HORZ_BC_MODE_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, CLAMPCFG);
+
+ if (bc->horz.mode != VPFE_ISIF_HORZ_BC_DISABLE) {
+ /*
+ * Window count for calculation
+ * Base window selection
+ * pixel limit
+ * Horizontal size of window
+ * vertical size of the window
+ * Horizontal start position of the window
+ * Vertical start position of the window
+ */
+ val = (bc->horz.win_count_calc & ISIF_HORZ_BC_WIN_COUNT_MASK) |
+ ((bc->horz.base_win_sel_calc & 1) <<
+ ISIF_HORZ_BC_WIN_SEL_SHIFT) |
+ ((bc->horz.clamp_pix_limit & 1) <<
+ ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
+ ((bc->horz.win_h_sz_calc &
+ ISIF_HORZ_BC_WIN_H_SIZE_MASK) <<
+ ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
+ ((bc->horz.win_v_sz_calc &
+ ISIF_HORZ_BC_WIN_V_SIZE_MASK) <<
+ ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, CLHWIN0);
+
+ val = bc->horz.win_start_h_calc & ISIF_HORZ_BC_WIN_START_H_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLHWIN1);
+
+ val = bc->horz.win_start_v_calc & ISIF_HORZ_BC_WIN_START_V_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLHWIN2);
+ }
+
+ /* vertical clamp caculation paramaters */
+ /* OB H Valid */
+ val = bc->vert.ob_h_sz_calc & ISIF_VERT_BC_OB_H_SZ_MASK;
+
+ /* Reset clamp value sel for previous line */
+ val |= (bc->vert.reset_val_sel & ISIF_VERT_BC_RST_VAL_SEL_MASK) <<
+ ISIF_VERT_BC_RST_VAL_SEL_SHIFT;
+
+ /* Line average coefficient */
+ val |= bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN0);
+
+ /* Configured reset value */
+ if (bc->vert.reset_val_sel == VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL) {
+ val = bc->vert.reset_clamp_val & ISIF_VERT_BC_RST_VAL_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVRV);
+ }
+
+ /* Optical Black horizontal start position */
+ val = bc->vert.ob_start_h & ISIF_VERT_BC_OB_START_HORZ_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN1);
+
+ /* Optical Black vertical start position */
+ val = bc->vert.ob_start_v & ISIF_VERT_BC_OB_START_VERT_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN2);
+
+ val = bc->vert.ob_v_sz_calc & ISIF_VERT_BC_OB_VERT_SZ_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLVWIN3);
+
+ /* Vertical start position for BC subtraction */
+ val = bc->vert_start_sub & ISIF_BC_VERT_START_SUB_V_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, CLSV);
+}
+
+/* This function will configure the window size to be capture in ISIF reg */
+static void
+isif_setwin(struct vpfe_isif_device *isif, struct v4l2_rect *image_win,
+ enum isif_frmfmt frm_fmt, int ppc, int mode)
+{
+ int horz_nr_pixels;
+ int vert_nr_lines;
+ int horz_start;
+ int vert_start;
+ int mid_img;
+
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left << (ppc - 1);
+ horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
+
+ /* Writing the horizontal info into the registers */
+ isif_write(isif->isif_cfg.base_addr,
+ horz_start & START_PX_HOR_MASK, SPH);
+ isif_write(isif->isif_cfg.base_addr,
+ horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
+ vert_start = image_win->top;
+
+ if (frm_fmt == ISIF_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ } else {
+ /* To account for VD since line 0 doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /* configure VDINT0 and VDINT1 */
+ mid_img = vert_start + (image_win->height / 2);
+ isif_write(isif->isif_cfg.base_addr, mid_img, VDINT1);
+ }
+
+ if (!mode)
+ isif_write(isif->isif_cfg.base_addr, 0, VDINT0);
+ else
+ isif_write(isif->isif_cfg.base_addr, vert_nr_lines, VDINT0);
+ isif_write(isif->isif_cfg.base_addr,
+ vert_start & START_VER_ONE_MASK, SLV0);
+ isif_write(isif->isif_cfg.base_addr,
+ vert_start & START_VER_TWO_MASK, SLV1);
+ isif_write(isif->isif_cfg.base_addr,
+ vert_nr_lines & NUM_LINES_VER, LNV);
+}
+
+#define DM365_ISIF_DFCMWR_MEMORY_WRITE 1
+#define DM365_ISIF_DFCMRD_MEMORY_READ 0x2
+
+static void
+isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
+{
+#define DFC_WRITE_WAIT_COUNT 1000
+ u32 count = DFC_WRITE_WAIT_COUNT;
+ u32 val;
+ int i;
+
+ if (!vdfc->en)
+ return;
+
+ /* Correction mode */
+ val = (vdfc->corr_mode & ISIF_VDFC_CORR_MOD_MASK) <<
+ ISIF_VDFC_CORR_MOD_SHIFT;
+
+ /* Correct whole line or partial */
+ if (vdfc->corr_whole_line)
+ val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
+
+ /* level shift value */
+ val |= (vdfc->def_level_shift & ISIF_VDFC_LEVEL_SHFT_MASK) <<
+ ISIF_VDFC_LEVEL_SHFT_SHIFT;
+
+ isif_write(isif->isif_cfg.base_addr, val, DFCCTL);
+
+ /* Defect saturation level */
+ val = vdfc->def_sat_level & ISIF_VDFC_SAT_LEVEL_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, VDFSATLV);
+
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_vert &
+ ISIF_VDFC_POS_MASK, DFCMEM0);
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_horz &
+ ISIF_VDFC_POS_MASK, DFCMEM1);
+ if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[0].level_at_pos, DFCMEM2);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[0].level_up_pixels, DFCMEM3);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[0].level_low_pixels, DFCMEM4);
+ }
+
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ /* set DFCMARST and set DFCMWR */
+ val |= 1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT;
+ val |= 1;
+ isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
+
+ while (count && (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x01))
+ count--;
+
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ if (!count) {
+ pr_debug("defect table write timeout !!\n");
+ return;
+ }
+
+ for (i = 1; i < vdfc->num_vdefects; i++) {
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_vert &
+ ISIF_VDFC_POS_MASK, DFCMEM0);
+
+ isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_horz &
+ ISIF_VDFC_POS_MASK, DFCMEM1);
+
+ if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
+ vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[i].level_at_pos, DFCMEM2);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[i].level_up_pixels, DFCMEM3);
+ isif_write(isif->isif_cfg.base_addr,
+ vdfc->table[i].level_low_pixels, DFCMEM4);
+ }
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ /* clear DFCMARST and set DFCMWR */
+ val &= ~(1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT);
+ val |= 1;
+ isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
+
+ count = DFC_WRITE_WAIT_COUNT;
+ while (count && (isif_read(isif->isif_cfg.base_addr,
+ DFCMEMCTL) & 0x01))
+ count--;
+
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ if (!count) {
+ pr_debug("defect table write timeout !!\n");
+ return;
+ }
+ }
+ if (vdfc->num_vdefects < VPFE_ISIF_VDFC_TABLE_SIZE) {
+ /* Extra cycle needed */
+ isif_write(isif->isif_cfg.base_addr, 0, DFCMEM0);
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_MAX_DFCMEM1, DFCMEM1);
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_DFCMWR_MEMORY_WRITE, DFCMEMCTL);
+ }
+ /* enable VDFC */
+ isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
+ (1 << ISIF_VDFC_EN_SHIFT), DFCCTL);
+
+ isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
+ (0 << ISIF_VDFC_EN_SHIFT), DFCCTL);
+
+ isif_write(isif->isif_cfg.base_addr, 0x6, DFCMEMCTL);
+ for (i = 0 ; i < vdfc->num_vdefects; i++) {
+ count = DFC_WRITE_WAIT_COUNT;
+ while (count &&
+ (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x2))
+ count--;
+ val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
+ if (!count) {
+ pr_debug("defect table write timeout !!\n");
+ return;
+ }
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_DFCMRD_MEMORY_READ, DFCMEMCTL);
+ }
+}
+
+static void
+isif_config_csc(struct vpfe_isif_device *isif, struct vpfe_isif_df_csc *df_csc)
+{
+ u32 val1;
+ u32 val2;
+ u32 i;
+
+ if (!df_csc->csc.en) {
+ isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
+ return;
+ }
+ /* initialize all bits to 0 */
+ val1 = 0;
+ for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++) {
+ if ((i % 2) == 0) {
+ /* CSCM - LSB */
+ val1 = ((df_csc->csc.coeff[i].integer &
+ ISIF_CSC_COEF_INTEG_MASK) <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ ((df_csc->csc.coeff[i].decimal &
+ ISIF_CSC_COEF_DECIMAL_MASK));
+ } else {
+
+ /* CSCM - MSB */
+ val2 = ((df_csc->csc.coeff[i].integer &
+ ISIF_CSC_COEF_INTEG_MASK) <<
+ ISIF_CSC_COEF_INTEG_SHIFT) |
+ ((df_csc->csc.coeff[i].decimal &
+ ISIF_CSC_COEF_DECIMAL_MASK));
+ val2 <<= ISIF_CSCM_MSB_SHIFT;
+ val2 |= val1;
+ isif_write(isif->isif_cfg.base_addr, val2,
+ (CSCM0 + ((i-1) << 1)));
+ }
+ }
+ /* program the active area */
+ isif_write(isif->isif_cfg.base_addr, df_csc->start_pix &
+ ISIF_DF_CSC_SPH_MASK, FMTSPH);
+ /*
+ * one extra pixel as required for CSC. Actually number of
+ * pixel - 1 should be configured in this register. So we
+ * need to subtract 1 before writing to FMTSPH, but we will
+ * not do this since csc requires one extra pixel
+ */
+ isif_write(isif->isif_cfg.base_addr, df_csc->num_pixels &
+ ISIF_DF_CSC_SPH_MASK, FMTLNH);
+ isif_write(isif->isif_cfg.base_addr, df_csc->start_line &
+ ISIF_DF_CSC_SPH_MASK, FMTSLV);
+ /*
+ * one extra line as required for CSC. See reason documented for
+ * num_pixels
+ */
+ isif_write(isif->isif_cfg.base_addr, df_csc->num_lines &
+ ISIF_DF_CSC_SPH_MASK, FMTLNV);
+ /* Enable CSC */
+ isif_write(isif->isif_cfg.base_addr, 1, CSCCTL);
+}
+
+static void
+isif_config_linearization(struct vpfe_isif_device *isif,
+ struct vpfe_isif_linearize *linearize)
+{
+ u32 val;
+ u32 i;
+
+ if (!linearize->en) {
+ isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
+ return;
+ }
+ /* shift value for correction */
+ val = (linearize->corr_shft & ISIF_LIN_CORRSFT_MASK) <<
+ ISIF_LIN_CORRSFT_SHIFT;
+ /* enable */
+ val |= 1;
+ isif_write(isif->isif_cfg.base_addr, val, LINCFG0);
+ /* Scale factor */
+ val = (linearize->scale_fact.integer & 1) <<
+ ISIF_LIN_SCALE_FACT_INTEG_SHIFT;
+ val |= linearize->scale_fact.decimal & ISIF_LIN_SCALE_FACT_DECIMAL_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, LINCFG1);
+
+ for (i = 0; i < VPFE_ISIF_LINEAR_TAB_SIZE; i++) {
+ val = linearize->table[i] & ISIF_LIN_ENTRY_MASK;
+ if (i%2)
+ isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 1);
+ else
+ isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 0);
+ }
+}
+
+static void
+isif_config_culling(struct vpfe_isif_device *isif, struct vpfe_isif_cul *cul)
+{
+ u32 val;
+
+ /* Horizontal pattern */
+ val = cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT;
+ val |= cul->hcpat_odd;
+ isif_write(isif->isif_cfg.base_addr, val, CULH);
+ /* vertical pattern */
+ isif_write(isif->isif_cfg.base_addr, cul->vcpat, CULV);
+ /* LPF */
+ isif_merge(isif->isif_cfg.base_addr, ISIF_LPF_MASK << ISIF_LPF_SHIFT,
+ cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
+}
+
+static int isif_get_pix_fmt(u32 mbus_code)
+{
+ switch (mbus_code) {
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return ISIF_PIXFMT_RAW;
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ return ISIF_PIXFMT_YCBCR_8BIT;
+
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ return ISIF_PIXFMT_YCBCR_16BIT;
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+#define ISIF_INTERLACE_INVERSE_MODE 0x4b6d
+#define ISIF_INTERLACE_NON_INVERSE_MODE 0x0b6d
+#define ISIF_PROGRESSIVE_INVERSE_MODE 0x4000
+#define ISIF_PROGRESSIVE_NON_INVERSE_MODE 0x0000
+
+static int isif_config_raw(struct v4l2_subdev *sd, int mode)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct isif_params_raw *params = &isif->isif_cfg.bayer;
+ struct vpfe_isif_raw_config *module_params =
+ &isif->isif_cfg.bayer.config_params;
+ struct v4l2_mbus_framefmt *format;
+ int pix_fmt;
+ u32 val;
+
+ format = &isif->formats[ISIF_PAD_SINK];
+
+ /* In case of user has set BT656IF earlier, it should be reset
+ * when configuring for raw input.
+ */
+ isif_write(isif->isif_cfg.base_addr, 0, REC656IF);
+ /* Configure CCDCFG register
+ * Set CCD Not to swap input since input is RAW data
+ * Set FID detection function to Latch at V-Sync
+ * Set WENLOG - isif valid area
+ * Set TRGSEL
+ * Set EXTRG
+ * Packed to 8 or 16 bits
+ */
+ val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
+ ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
+ ISIF_CCDCFG_EXTRG_DISABLE | (isif->isif_cfg.data_pack &
+ ISIF_DATA_PACK_MASK);
+ isif_write(isif->isif_cfg.base_addr, val, CCDCFG);
+
+ pix_fmt = isif_get_pix_fmt(format->code);
+ if (pix_fmt < 0) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /*
+ * Configure the vertical sync polarity(MODESET.VDPOL)
+ * Configure the horizontal sync polarity (MODESET.HDPOL)
+ * Configure frame id polarity (MODESET.FLDPOL)
+ * Configure data polarity
+ * Configure External WEN Selection
+ * Configure frame format(progressive or interlace)
+ * Configure pixel format (Input mode)
+ * Configure the data shift
+ */
+ val = ISIF_VDHDOUT_INPUT | ((params->vd_pol & ISIF_VD_POL_MASK) <<
+ ISIF_VD_POL_SHIFT) | ((params->hd_pol & ISIF_HD_POL_MASK) <<
+ ISIF_HD_POL_SHIFT) | ((params->fid_pol & ISIF_FID_POL_MASK) <<
+ ISIF_FID_POL_SHIFT) | ((ISIF_DATAPOL_NORMAL &
+ ISIF_DATAPOL_MASK) << ISIF_DATAPOL_SHIFT) | ((ISIF_EXWEN_DISABLE &
+ ISIF_EXWEN_MASK) << ISIF_EXWEN_SHIFT) | ((params->frm_fmt &
+ ISIF_FRM_FMT_MASK) << ISIF_FRM_FMT_SHIFT) | ((pix_fmt &
+ ISIF_INPUT_MASK) << ISIF_INPUT_SHIFT);
+
+ /* currently only V4L2_MBUS_FMT_SGRBG12_1X12 is
+ * supported. shift appropriately depending on
+ * different MBUS fmt's added
+ */
+ if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ val |= ((VPFE_ISIF_NO_SHIFT &
+ ISIF_DATASFT_MASK) << ISIF_DATASFT_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, MODESET);
+ /*
+ * Configure GAMMAWD register
+ * CFA pattern setting
+ */
+ val = (params->cfa_pat & ISIF_GAMMAWD_CFA_MASK) <<
+ ISIF_GAMMAWD_CFA_SHIFT;
+ /* Gamma msb */
+ if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10ALAW8)
+ val = val | ISIF_ALAW_ENABLE;
+
+ val = val | ((params->data_msb & ISIF_ALAW_GAMA_WD_MASK) <<
+ ISIF_ALAW_GAMA_WD_SHIFT);
+
+ isif_write(isif->isif_cfg.base_addr, val, CGAMMAWD);
+ /* Configure DPCM compression settings */
+ if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10DPCM8) {
+ val = 1 << ISIF_DPCM_EN_SHIFT;
+ val |= (params->dpcm_predictor &
+ ISIF_DPCM_PREDICTOR_MASK) << ISIF_DPCM_PREDICTOR_SHIFT;
+ }
+ isif_write(isif->isif_cfg.base_addr, val, MISC);
+ /* Configure Gain & Offset */
+ isif_config_gain_offset(isif);
+ /* Configure Color pattern */
+ if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ val = isif_sgrbg_pattern;
+ else
+ /* default set to rggb */
+ val = isif_srggb_pattern;
+
+ isif_write(isif->isif_cfg.base_addr, val, CCOLP);
+
+ /* Configure HSIZE register */
+ val = (params->horz_flip_en & ISIF_HSIZE_FLIP_MASK) <<
+ ISIF_HSIZE_FLIP_SHIFT;
+
+ /* calculate line offset in 32 bytes based on pack value */
+ if (isif->isif_cfg.data_pack == ISIF_PACK_8BIT)
+ val |= ((params->win.width + 31) >> 5) & ISIF_LINEOFST_MASK;
+ else if (isif->isif_cfg.data_pack == ISIF_PACK_12BIT)
+ val |= ((((params->win.width + (params->win.width >> 2)) +
+ 31) >> 5) & ISIF_LINEOFST_MASK);
+ else
+ val |= (((params->win.width * 2) + 31) >> 5) &
+ ISIF_LINEOFST_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, HSIZE);
+ /* Configure SDOFST register */
+ if (params->frm_fmt == ISIF_FRMFMT_INTERLACED) {
+ if (params->image_invert_en)
+ /* For interlace inverse mode */
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_INTERLACE_INVERSE_MODE, SDOFST);
+ else
+ /* For interlace non inverse mode */
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_INTERLACE_NON_INVERSE_MODE, SDOFST);
+ } else if (params->frm_fmt == ISIF_FRMFMT_PROGRESSIVE) {
+ if (params->image_invert_en)
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_PROGRESSIVE_INVERSE_MODE, SDOFST);
+ else
+ /* For progessive non inverse mode */
+ isif_write(isif->isif_cfg.base_addr,
+ ISIF_PROGRESSIVE_NON_INVERSE_MODE, SDOFST);
+ }
+ /* Configure video window */
+ isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
+ /* Configure Black Clamp */
+ isif_config_bclamp(isif, &module_params->bclamp);
+ /* Configure Vertical Defection Pixel Correction */
+ isif_config_dfc(isif, &module_params->dfc);
+ if (!module_params->df_csc.df_or_csc)
+ /* Configure Color Space Conversion */
+ isif_config_csc(isif, &module_params->df_csc);
+
+ isif_config_linearization(isif, &module_params->linearize);
+ /* Configure Culling */
+ isif_config_culling(isif, &module_params->culling);
+ /* Configure Horizontal and vertical offsets(DFC,LSC,Gain) */
+ val = module_params->horz_offset & ISIF_DATA_H_OFFSET_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, DATAHOFST);
+
+ val = module_params->vert_offset & ISIF_DATA_V_OFFSET_MASK;
+ isif_write(isif->isif_cfg.base_addr, val, DATAVOFST);
+
+ return 0;
+}
+
+#define DM365_ISIF_HSIZE_MASK 0xffffffe0
+#define DM365_ISIF_SDOFST_2_LINES 0x00000249
+
+/* This function will configure ISIF for YCbCr parameters. */
+static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct isif_ycbcr_config *params = &isif->isif_cfg.ycbcr;
+ struct v4l2_mbus_framefmt *format;
+ int pix_fmt;
+ u32 modeset;
+ u32 ccdcfg;
+
+ format = &isif->formats[ISIF_PAD_SINK];
+ /*
+ * first reset the ISIF
+ * all registers have default values after reset
+ * This is important since we assume default values to be set in
+ * a lot of registers that we didn't touch
+ */
+ /* start with all bits zero */
+ ccdcfg = modeset = 0;
+ pix_fmt = isif_get_pix_fmt(format->code);
+ if (pix_fmt < 0) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /* configure pixel format or input mode */
+ modeset = modeset | ((pix_fmt & ISIF_INPUT_MASK) <<
+ ISIF_INPUT_SHIFT) | ((params->frm_fmt & ISIF_FRM_FMT_MASK) <<
+ ISIF_FRM_FMT_SHIFT) | (((params->fid_pol &
+ ISIF_FID_POL_MASK) << ISIF_FID_POL_SHIFT)) |
+ (((params->hd_pol & ISIF_HD_POL_MASK) << ISIF_HD_POL_SHIFT)) |
+ (((params->vd_pol & ISIF_VD_POL_MASK) << ISIF_VD_POL_SHIFT));
+ /* pack the data to 8-bit CCDCCFG */
+ switch (format->code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ modeset |= ((VPFE_PINPOL_NEGATIVE & ISIF_VD_POL_MASK) <<
+ ISIF_VD_POL_SHIFT);
+ isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
+ ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ /* setup BT.656, embedded sync */
+ isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
+ /* enable 10 bit mode in ccdcfg */
+ ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR |
+ ISIF_BW656_ENABLE;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
+ break;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ ccdcfg |= ISIF_PACK_8BIT;
+ ccdcfg |= ISIF_YCINSWP_YCBCR;
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
+ pr_debug("Invalid pix_fmt(input mode)\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ /* should never come here */
+ pr_debug("Invalid interface type\n");
+ return -EINVAL;
+ }
+ isif_write(isif->isif_cfg.base_addr, modeset, MODESET);
+ /* Set up pix order */
+ ccdcfg |= (params->pix_order & ISIF_PIX_ORDER_MASK) <<
+ ISIF_PIX_ORDER_SHIFT;
+ isif_write(isif->isif_cfg.base_addr, ccdcfg, CCDCFG);
+ /* configure video window */
+ if (format->code == V4L2_MBUS_FMT_YUYV10_1X20 ||
+ format->code == V4L2_MBUS_FMT_YUYV8_1X16)
+ isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
+ else
+ isif_setwin(isif, &params->win, params->frm_fmt, 2, mode);
+
+ /*
+ * configure the horizontal line offset
+ * this is done by rounding up width to a multiple of 16 pixels
+ * and multiply by two to account for y:cb:cr 4:2:2 data
+ */
+ isif_write(isif->isif_cfg.base_addr,
+ ((((params->win.width * 2) + 31) &
+ DM365_ISIF_HSIZE_MASK) >> 5), HSIZE);
+
+ /* configure the memory line offset */
+ if (params->frm_fmt == ISIF_FRMFMT_INTERLACED &&
+ params->buf_type == ISIF_BUFTYPE_FLD_INTERLEAVED)
+ /* two fields are interleaved in memory */
+ isif_write(isif->isif_cfg.base_addr,
+ DM365_ISIF_SDOFST_2_LINES, SDOFST);
+ return 0;
+}
+
+static int isif_configure(struct v4l2_subdev *sd, int mode)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = &isif->formats[ISIF_PAD_SINK];
+
+ switch (format->code) {
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ return isif_config_raw(sd, mode);
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_YUYV10_2X10:
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ return isif_config_ycbcr(sd, mode);
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+/*
+ * isif_set_stream() - Enable/Disable streaming on the ISIF module
+ * @sd: VPFE ISIF V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int isif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (enable) {
+ ret = isif_configure(sd,
+ (isif->output == ISIF_OUTPUT_MEMORY) ? 0 : 1);
+ if (ret)
+ return ret;
+ if (isif->output == ISIF_OUTPUT_MEMORY)
+ isif_enable_output_to_sdram(isif, 1);
+ isif_enable(isif, 1);
+ } else {
+ isif_enable(isif, 0);
+ isif_enable_output_to_sdram(isif, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * __isif_get_format() - helper function for getting isif format
+ * @isif: pointer to isif private structure.
+ * @pad: pad number.
+ * @fh: V4L2 subdev file handle.
+ * @which: wanted subdev format.
+ */
+static struct v4l2_mbus_framefmt *
+__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_subdev_format fmt;
+
+ fmt.pad = pad;
+ fmt.which = which;
+
+ return v4l2_subdev_get_try_format(fh, pad);
+ }
+ return &isif->formats[pad];
+}
+
+/*
+* isif_set_format() - set format on pad
+* @sd : VPFE ISIF device
+* @fh : V4L2 subdev file handle
+* @fmt : pointer to v4l2 subdev format structure
+*
+* Return 0 on success or -EINVAL if format or pad is invalid
+*/
+static int
+isif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __isif_get_format(isif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ isif_try_format(isif, fh, fmt);
+ memcpy(format, &fmt->format, sizeof(*format));
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (fmt->pad == ISIF_PAD_SOURCE)
+ return isif_config_format(vpfe_dev, fmt->pad);
+
+ return 0;
+}
+
+/*
+ * isif_get_format() - Retrieve the video format on a pad
+ * @sd: VPFE ISIF V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int
+isif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __isif_get_format(vpfe_isif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ memcpy(&fmt->format, format, sizeof(fmt->format));
+
+ return 0;
+}
+
+/*
+ * isif_enum_frame_size() - enum frame sizes on pads
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_frame_size_enum structure
+ */
+static int
+isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_format format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.pad = fse->pad;
+ format.format.code = fse->code;
+ format.format.width = 1;
+ format.format.height = 1;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
+ isif_try_format(isif, fh, &format);
+ fse->min_width = format.format.width;
+ fse->min_height = format.format.height;
+
+ if (format.format.code != fse->code)
+ return -EINVAL;
+
+ format.pad = fse->pad;
+ format.format.code = fse->code;
+ format.format.width = -1;
+ format.format.height = -1;
+ format.which = V4L2_SUBDEV_FORMAT_TRY;
+ isif_try_format(isif, fh, &format);
+ fse->max_width = format.format.width;
+ fse->max_height = format.format.height;
+
+ return 0;
+}
+
+/*
+ * isif_enum_mbus_code() - enum mbus codes for pads
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ */
+static int
+isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case ISIF_PAD_SINK:
+ case ISIF_PAD_SOURCE:
+ if (code->index >= ARRAY_SIZE(isif_fmts))
+ return -EINVAL;
+ code->code = isif_fmts[code->index];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * isif_pad_set_crop() - set crop rectangle on pad
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ *
+ * Return 0 on success, -EINVAL if pad is invalid
+ */
+static int
+isif_pad_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ /* check wether its a valid pad */
+ if (crop->pad != ISIF_PAD_SINK)
+ return -EINVAL;
+
+ format = __isif_get_format(vpfe_isif, fh, crop->pad, crop->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ /* check wether crop rect is within limits */
+ if (crop->rect.top < 0 || crop->rect.left < 0 ||
+ (crop->rect.left + crop->rect.width >
+ vpfe_isif->formats[ISIF_PAD_SINK].width) ||
+ (crop->rect.top + crop->rect.height >
+ vpfe_isif->formats[ISIF_PAD_SINK].height)) {
+ crop->rect.left = 0;
+ crop->rect.top = 0;
+ crop->rect.width = format->width;
+ crop->rect.height = format->height;
+ }
+ /* adjust the width to 16 pixel boundry */
+ crop->rect.width = ((crop->rect.width + 15) & ~0xf);
+ vpfe_isif->crop = crop->rect;
+ if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ isif_set_image_window(vpfe_isif);
+ } else {
+ struct v4l2_rect *rect;
+
+ rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
+ memcpy(rect, &vpfe_isif->crop, sizeof(*rect));
+ }
+ return 0;
+}
+
+/*
+ * isif_pad_get_crop() - get crop rectangle on pad
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ *
+ * Return 0 on success, -EINVAL if pad is invalid
+ */
+static int
+isif_pad_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
+
+ /* check wether its a valid pad */
+ if (crop->pad != ISIF_PAD_SINK)
+ return -EINVAL;
+
+ if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_rect *rect;
+ rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
+ memcpy(&crop->rect, rect, sizeof(*rect));
+ } else {
+ crop->rect = vpfe_isif->crop;
+ }
+
+ return 0;
+}
+
+/*
+ * isif_init_formats() - Initialize formats on all pads
+ * @sd: VPFE isif V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int
+isif_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev_crop crop;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = ISIF_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = MAX_WIDTH;
+ format.format.height = MAX_HEIGHT;
+ isif_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = ISIF_PAD_SOURCE;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.width = MAX_WIDTH;
+ format.format.height = MAX_HEIGHT;
+ isif_set_format(sd, fh, &format);
+
+ memset(&crop, 0, sizeof(crop));
+ crop.pad = ISIF_PAD_SINK;
+ crop.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ crop.rect.width = MAX_WIDTH;
+ crop.rect.height = MAX_HEIGHT;
+ isif_pad_set_crop(sd, fh, &crop);
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops isif_v4l2_core_ops = {
+ .ioctl = isif_ioctl,
+};
+
+/* subdev file operations */
+static const struct v4l2_subdev_internal_ops isif_v4l2_internal_ops = {
+ .open = isif_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops isif_v4l2_video_ops = {
+ .s_stream = isif_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops isif_v4l2_pad_ops = {
+ .enum_mbus_code = isif_enum_mbus_code,
+ .enum_frame_size = isif_enum_frame_size,
+ .get_fmt = isif_get_format,
+ .set_fmt = isif_set_format,
+ .set_crop = isif_pad_set_crop,
+ .get_crop = isif_pad_get_crop,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops isif_v4l2_ops = {
+ .core = &isif_v4l2_core_ops,
+ .video = &isif_v4l2_video_ops,
+ .pad = &isif_v4l2_pad_ops,
+};
+
+/*
+ * Media entity operations
+ */
+
+/*
+ * isif_link_setup() - Setup isif connections
+ * @entity: isif media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int
+isif_link_setup(struct media_entity *entity, const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case ISIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* read from decoder/sensor */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ isif->input = ISIF_INPUT_NONE;
+ break;
+ }
+ if (isif->input != ISIF_INPUT_NONE)
+ return -EBUSY;
+ isif->input = ISIF_INPUT_PARALLEL;
+ break;
+
+ case ISIF_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ /* write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ isif->output = ISIF_OUTPUT_MEMORY;
+ else
+ isif->output = ISIF_OUTPUT_NONE;
+ break;
+
+ case ISIF_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ isif->output = ISIF_OUTPUT_IPIPEIF;
+ else
+ isif->output = ISIF_OUTPUT_NONE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+static const struct media_entity_operations isif_media_ops = {
+ .link_setup = isif_link_setup,
+};
+
+/*
+ * vpfe_isif_unregister_entities() - isif unregister entity
+ * @isif - pointer to isif subdevice structure.
+ */
+void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif)
+{
+ vpfe_video_unregister(&isif->video_out);
+ /* cleanup entity */
+ media_entity_cleanup(&isif->subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&isif->subdev);
+}
+
+static void isif_restore_defaults(struct vpfe_isif_device *isif)
+{
+ enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
+ int i;
+
+ memset(&isif->isif_cfg.bayer.config_params, 0,
+ sizeof(struct vpfe_isif_raw_config));
+
+ isif->isif_cfg.bayer.config_params.linearize.corr_shft =
+ VPFE_ISIF_NO_SHIFT;
+ isif->isif_cfg.bayer.config_params.linearize.scale_fact.integer = 1;
+ isif->isif_cfg.bayer.config_params.culling.hcpat_odd =
+ ISIF_CULLING_HCAPT_ODD;
+ isif->isif_cfg.bayer.config_params.culling.hcpat_even =
+ ISIF_CULLING_HCAPT_EVEN;
+ isif->isif_cfg.bayer.config_params.culling.vcpat = ISIF_CULLING_VCAPT;
+ /* Enable clock to ISIF, IPIPEIF and BL */
+ vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
+ vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
+ vpss_enable_clock(VPSS_BL_CLOCK, 1);
+
+ /* set all registers to default value */
+ for (i = 0; i <= 0x1f8; i += 4)
+ isif_write(isif->isif_cfg.base_addr, 0, i);
+ /* no culling support */
+ isif_write(isif->isif_cfg.base_addr, 0xffff, CULH);
+ isif_write(isif->isif_cfg.base_addr, 0xff, CULV);
+
+ /* Set default offset and gain */
+ isif_config_gain_offset(isif);
+ vpss_select_ccdc_source(source);
+}
+
+/*
+ * vpfe_isif_register_entities() - isif register entity
+ * @isif - pointer to isif subdevice structure.
+ * @vdev: pointer to v4l2 device structure.
+ */
+int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
+ struct v4l2_device *vdev)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
+ unsigned int flags;
+ int ret;
+
+ /* Register the subdev */
+ ret = v4l2_device_register_subdev(vdev, &isif->subdev);
+ if (ret < 0)
+ return ret;
+
+ isif_restore_defaults(isif);
+ ret = vpfe_video_register(&isif->video_out, vdev);
+ if (ret) {
+ pr_err("Failed to register isif video out device\n");
+ goto out_video_register;
+ }
+ isif->video_out.vpfe_dev = vpfe_dev;
+ flags = 0;
+ /* connect isif to video node */
+ ret = media_entity_create_link(&isif->subdev.entity, 1,
+ &isif->video_out.video_dev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+ return 0;
+out_create_link:
+ vpfe_video_unregister(&isif->video_out);
+out_video_register:
+ v4l2_device_unregister_subdev(&isif->subdev);
+ return ret;
+}
+
+/* -------------------------------------------------------------------
+ * V4L2 subdev control operations
+ */
+
+static int vpfe_isif_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpfe_isif_device *isif =
+ container_of(ctrl->handler, struct vpfe_isif_device, ctrls);
+ struct isif_oper_config *config = &isif->isif_cfg;
+
+ switch (ctrl->id) {
+ case VPFE_CID_DPCM_PREDICTOR:
+ config->bayer.dpcm_predictor = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CRGAIN:
+ config->isif_gain_params.cr_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CGRGAIN:
+ config->isif_gain_params.cgr_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CGBGAIN:
+ config->isif_gain_params.cgb_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_CBGAIN:
+ config->isif_gain_params.cb_gain = ctrl->val;
+ break;
+
+ case VPFE_ISIF_CID_GAIN_OFFSET:
+ config->isif_gain_params.offset = ctrl->val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vpfe_isif_ctrl_ops = {
+ .s_ctrl = vpfe_isif_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_dpcm_pred = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_CID_DPCM_PREDICTOR,
+ .name = "DPCM Predictor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_crgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CRGAIN,
+ .name = "CRGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_cgrgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CGRGAIN,
+ .name = "CGRGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_cgbgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CGBGAIN,
+ .name = "CGBGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_cbgain = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_CBGAIN,
+ .name = "CBGAIN",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config vpfe_isif_gain_offset = {
+ .ops = &vpfe_isif_ctrl_ops,
+ .id = VPFE_ISIF_CID_GAIN_OFFSET,
+ .name = "Gain Offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 12) - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static void isif_remove(struct vpfe_isif_device *isif,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ int i = 0;
+
+ iounmap(isif->isif_cfg.base_addr);
+ iounmap(isif->isif_cfg.linear_tbl0_addr);
+ iounmap(isif->isif_cfg.linear_tbl1_addr);
+
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res)
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+ i++;
+ }
+}
+
+static void isif_config_defaults(struct vpfe_isif_device *isif)
+{
+ isif->isif_cfg.ycbcr.v4l2_pix_fmt = V4L2_PIX_FMT_UYVY;
+ isif->isif_cfg.ycbcr.pix_fmt = ISIF_PIXFMT_YCBCR_8BIT;
+ isif->isif_cfg.ycbcr.frm_fmt = ISIF_FRMFMT_INTERLACED;
+ isif->isif_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
+ isif->isif_cfg.ycbcr.buf_type = ISIF_BUFTYPE_FLD_INTERLEAVED;
+
+ isif->isif_cfg.bayer.v4l2_pix_fmt = V4L2_PIX_FMT_SGRBG10ALAW8;
+ isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
+ isif->isif_cfg.bayer.frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
+ isif->isif_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
+ isif->isif_cfg.bayer.cfa_pat = ISIF_CFA_PAT_MOSAIC;
+ isif->isif_cfg.bayer.data_msb = ISIF_BIT_MSB_11;
+ isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
+}
+/*
+ * vpfe_isif_init() - Initialize V4L2 subdev and media entity
+ * @isif: VPFE isif module
+ * @pdev: Pointer to platform device structure.
+ * Return 0 on success and a negative error code on failure.
+ */
+int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = &isif->subdev;
+ struct media_pad *pads = &isif->pads[0];
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+ void *__iomem addr;
+ int status;
+ int i = 0;
+
+ /* Get the ISIF base address, linearization table0 and table1 addr. */
+ while (i < 3) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ status = -ENOENT;
+ goto fail_nobase_res;
+ }
+ res_len = res->end - res->start + 1;
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res) {
+ status = -EBUSY;
+ goto fail_nobase_res;
+ }
+ addr = ioremap_nocache(res->start, res_len);
+ if (!addr) {
+ status = -EBUSY;
+ goto fail_base_iomap;
+ }
+ switch (i) {
+ case 0:
+ /* ISIF base address */
+ isif->isif_cfg.base_addr = addr;
+ break;
+ case 1:
+ /* ISIF linear tbl0 address */
+ isif->isif_cfg.linear_tbl0_addr = addr;
+ break;
+ default:
+ /* ISIF linear tbl0 address */
+ isif->isif_cfg.linear_tbl1_addr = addr;
+ break;
+ }
+ i++;
+ }
+ davinci_cfg_reg(DM365_VIN_CAM_WEN);
+ davinci_cfg_reg(DM365_VIN_CAM_VD);
+ davinci_cfg_reg(DM365_VIN_CAM_HD);
+ davinci_cfg_reg(DM365_VIN_YIN4_7_EN);
+ davinci_cfg_reg(DM365_VIN_YIN0_3_EN);
+
+ /* queue ops */
+ isif->video_out.ops = &isif_video_ops;
+ v4l2_subdev_init(sd, &isif_v4l2_ops);
+ sd->internal_ops = &isif_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI ISIF", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, isif);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ pads[ISIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[ISIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ isif->input = ISIF_INPUT_NONE;
+ isif->output = ISIF_OUTPUT_NONE;
+ me->ops = &isif_media_ops;
+ status = media_entity_init(me, ISIF_PADS_NUM, pads, 0);
+ if (status)
+ goto isif_fail;
+ isif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ status = vpfe_video_init(&isif->video_out, "ISIF");
+ if (status) {
+ pr_err("Failed to init isif-out video device\n");
+ goto isif_fail;
+ }
+ v4l2_ctrl_handler_init(&isif->ctrls, 6);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_crgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgrgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgbgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cbgain, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_gain_offset, NULL);
+ v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_dpcm_pred, NULL);
+
+ v4l2_ctrl_handler_setup(&isif->ctrls);
+ sd->ctrl_handler = &isif->ctrls;
+ isif_config_defaults(isif);
+ return 0;
+fail_base_iomap:
+ release_mem_region(res->start, res_len);
+ i--;
+fail_nobase_res:
+ if (isif->isif_cfg.base_addr)
+ iounmap(isif->isif_cfg.base_addr);
+ if (isif->isif_cfg.linear_tbl0_addr)
+ iounmap(isif->isif_cfg.linear_tbl0_addr);
+
+ while (i >= 0) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ release_mem_region(res->start, res_len);
+ i--;
+ }
+ return status;
+isif_fail:
+ v4l2_ctrl_handler_free(&isif->ctrls);
+ isif_remove(isif, pdev);
+ return status;
+}
+
+/*
+ * vpfe_isif_cleanup - isif module cleanup
+ * @isif: pointer to isif subdevice
+ * @dev: pointer to platform device structure
+ */
+void
+vpfe_isif_cleanup(struct vpfe_isif_device *isif, struct platform_device *pdev)
+{
+ isif_remove(isif, pdev);
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.h b/drivers/staging/media/davinci_vpfe/dm365_isif.h
new file mode 100644
index 000000000000..473fd2cfe350
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_ISIF_H
+#define _DAVINCI_VPFE_DM365_ISIF_H
+
+#include <linux/platform_device.h>
+
+#include <mach/mux.h>
+
+#include <media/davinci/vpfe_types.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#include "davinci_vpfe_user.h"
+#include "dm365_isif_regs.h"
+#include "vpfe_video.h"
+
+#define ISIF_CULLING_HCAPT_ODD 0xff
+#define ISIF_CULLING_HCAPT_EVEN 0xff
+#define ISIF_CULLING_VCAPT 0xff
+
+#define ISIF_CADU_BITS 0x07ff
+#define ISIF_CADL_BITS 0x0ffff
+
+enum isif_pixfmt {
+ ISIF_PIXFMT_RAW = 0,
+ ISIF_PIXFMT_YCBCR_16BIT = 1,
+ ISIF_PIXFMT_YCBCR_8BIT = 2,
+};
+
+enum isif_frmfmt {
+ ISIF_FRMFMT_PROGRESSIVE = 0,
+ ISIF_FRMFMT_INTERLACED = 1,
+};
+
+/* PIXEL ORDER IN MEMORY from LSB to MSB */
+/* only applicable for 8-bit input mode */
+enum isif_pixorder {
+ ISIF_PIXORDER_YCBYCR = 0,
+ ISIF_PIXORDER_CBYCRY = 1,
+};
+
+enum isif_buftype {
+ ISIF_BUFTYPE_FLD_INTERLEAVED = 0,
+ ISIF_BUFTYPE_FLD_SEPARATED = 1,
+};
+
+struct isif_ycbcr_config {
+ /* v4l2 pixel format */
+ unsigned long v4l2_pix_fmt;
+ /* isif pixel format */
+ enum isif_pixfmt pix_fmt;
+ /* isif frame format */
+ enum isif_frmfmt frm_fmt;
+ /* isif crop window */
+ struct v4l2_rect win;
+ /* field polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* interface VD polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* interface HD polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* isif pix order. Only used for ycbcr capture */
+ enum isif_pixorder pix_order;
+ /* isif buffer type. Only used for ycbcr capture */
+ enum isif_buftype buf_type;
+};
+
+enum isif_cfa_pattern {
+ ISIF_CFA_PAT_MOSAIC = 0,
+ ISIF_CFA_PAT_STRIPE = 1,
+};
+
+enum isif_data_msb {
+ /* MSB b15 */
+ ISIF_BIT_MSB_15 = 0,
+ /* MSB b14 */
+ ISIF_BIT_MSB_14 = 1,
+ /* MSB b13 */
+ ISIF_BIT_MSB_13 = 2,
+ /* MSB b12 */
+ ISIF_BIT_MSB_12 = 3,
+ /* MSB b11 */
+ ISIF_BIT_MSB_11 = 4,
+ /* MSB b10 */
+ ISIF_BIT_MSB_10 = 5,
+ /* MSB b9 */
+ ISIF_BIT_MSB_9 = 6,
+ /* MSB b8 */
+ ISIF_BIT_MSB_8 = 7,
+ /* MSB b7 */
+ ISIF_BIT_MSB_7 = 8,
+};
+
+struct isif_params_raw {
+ /* v4l2 pixel format */
+ unsigned long v4l2_pix_fmt;
+ /* isif pixel format */
+ enum isif_pixfmt pix_fmt;
+ /* isif frame format */
+ enum isif_frmfmt frm_fmt;
+ /* video window */
+ struct v4l2_rect win;
+ /* field polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* interface VD polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* interface HD polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* buffer type. Applicable for interlaced mode */
+ enum isif_buftype buf_type;
+ /* cfa pattern */
+ enum isif_cfa_pattern cfa_pat;
+ /* Data MSB position */
+ enum isif_data_msb data_msb;
+ /* Enable horizontal flip */
+ unsigned char horz_flip_en;
+ /* Enable image invert vertically */
+ unsigned char image_invert_en;
+ unsigned char dpcm_predictor;
+ struct vpfe_isif_raw_config config_params;
+};
+
+enum isif_data_pack {
+ ISIF_PACK_16BIT = 0,
+ ISIF_PACK_12BIT = 1,
+ ISIF_PACK_8BIT = 2,
+};
+
+struct isif_gain_values {
+ unsigned int cr_gain;
+ unsigned int cgr_gain;
+ unsigned int cgb_gain;
+ unsigned int cb_gain;
+ unsigned int offset;
+};
+
+struct isif_oper_config {
+ struct isif_ycbcr_config ycbcr;
+ struct isif_params_raw bayer;
+ enum isif_data_pack data_pack;
+ struct isif_gain_values isif_gain_params;
+ void *__iomem base_addr;
+ void *__iomem linear_tbl0_addr;
+ void *__iomem linear_tbl1_addr;
+};
+
+#define ISIF_PAD_SINK 0
+#define ISIF_PAD_SOURCE 1
+
+#define ISIF_PADS_NUM 2
+
+enum isif_input_entity {
+ ISIF_INPUT_NONE = 0,
+ ISIF_INPUT_PARALLEL = 1,
+};
+
+#define ISIF_OUTPUT_NONE (0)
+#define ISIF_OUTPUT_MEMORY (1 << 0)
+#define ISIF_OUTPUT_IPIPEIF (1 << 1)
+
+struct vpfe_isif_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[ISIF_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[ISIF_PADS_NUM];
+ enum isif_input_entity input;
+ unsigned int output;
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_rect crop;
+ struct isif_oper_config isif_cfg;
+ struct vpfe_video_device video_out;
+};
+
+enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev);
+void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif);
+int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
+ struct v4l2_device *dev);
+int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev);
+void vpfe_isif_cleanup(struct vpfe_isif_device *vpfe_isif,
+ struct platform_device *pdev);
+void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif);
+void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif);
+
+#endif /* _DAVINCI_VPFE_DM365_ISIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
new file mode 100644
index 000000000000..8aceabb43f8e
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_ISIF_REGS_H
+#define _DAVINCI_VPFE_DM365_ISIF_REGS_H
+
+/* ISIF registers relative offsets */
+#define SYNCEN 0x00
+#define MODESET 0x04
+#define HDW 0x08
+#define VDW 0x0c
+#define PPLN 0x10
+#define LPFR 0x14
+#define SPH 0x18
+#define LNH 0x1c
+#define SLV0 0x20
+#define SLV1 0x24
+#define LNV 0x28
+#define CULH 0x2c
+#define CULV 0x30
+#define HSIZE 0x34
+#define SDOFST 0x38
+#define CADU 0x3c
+#define CADL 0x40
+#define LINCFG0 0x44
+#define LINCFG1 0x48
+#define CCOLP 0x4c
+#define CRGAIN 0x50
+#define CGRGAIN 0x54
+#define CGBGAIN 0x58
+#define CBGAIN 0x5c
+#define COFSTA 0x60
+#define FLSHCFG0 0x64
+#define FLSHCFG1 0x68
+#define FLSHCFG2 0x6c
+#define VDINT0 0x70
+#define VDINT1 0x74
+#define VDINT2 0x78
+#define MISC 0x7c
+#define CGAMMAWD 0x80
+#define REC656IF 0x84
+#define CCDCFG 0x88
+/*****************************************************
+* Defect Correction registers
+*****************************************************/
+#define DFCCTL 0x8c
+#define VDFSATLV 0x90
+#define DFCMEMCTL 0x94
+#define DFCMEM0 0x98
+#define DFCMEM1 0x9c
+#define DFCMEM2 0xa0
+#define DFCMEM3 0xa4
+#define DFCMEM4 0xa8
+/****************************************************
+* Black Clamp registers
+****************************************************/
+#define CLAMPCFG 0xac
+#define CLDCOFST 0xb0
+#define CLSV 0xb4
+#define CLHWIN0 0xb8
+#define CLHWIN1 0xbc
+#define CLHWIN2 0xc0
+#define CLVRV 0xc4
+#define CLVWIN0 0xc8
+#define CLVWIN1 0xcc
+#define CLVWIN2 0xd0
+#define CLVWIN3 0xd4
+/****************************************************
+* Lense Shading Correction
+****************************************************/
+#define DATAHOFST 0xd8
+#define DATAVOFST 0xdc
+#define LSCHVAL 0xe0
+#define LSCVVAL 0xe4
+#define TWODLSCCFG 0xe8
+#define TWODLSCOFST 0xec
+#define TWODLSCINI 0xf0
+#define TWODLSCGRBU 0xf4
+#define TWODLSCGRBL 0xf8
+#define TWODLSCGROF 0xfc
+#define TWODLSCORBU 0x100
+#define TWODLSCORBL 0x104
+#define TWODLSCOROF 0x108
+#define TWODLSCIRQEN 0x10c
+#define TWODLSCIRQST 0x110
+/****************************************************
+* Data formatter
+****************************************************/
+#define FMTCFG 0x114
+#define FMTPLEN 0x118
+#define FMTSPH 0x11c
+#define FMTLNH 0x120
+#define FMTSLV 0x124
+#define FMTLNV 0x128
+#define FMTRLEN 0x12c
+#define FMTHCNT 0x130
+#define FMTAPTR_BASE 0x134
+/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
+#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
+#define FMTPGMVF0 0x174
+#define FMTPGMVF1 0x178
+#define FMTPGMAPU0 0x17c
+#define FMTPGMAPU1 0x180
+#define FMTPGMAPS0 0x184
+#define FMTPGMAPS1 0x188
+#define FMTPGMAPS2 0x18c
+#define FMTPGMAPS3 0x190
+#define FMTPGMAPS4 0x194
+#define FMTPGMAPS5 0x198
+#define FMTPGMAPS6 0x19c
+#define FMTPGMAPS7 0x1a0
+/************************************************
+* Color Space Converter
+************************************************/
+#define CSCCTL 0x1a4
+#define CSCM0 0x1a8
+#define CSCM1 0x1ac
+#define CSCM2 0x1b0
+#define CSCM3 0x1b4
+#define CSCM4 0x1b8
+#define CSCM5 0x1bc
+#define CSCM6 0x1c0
+#define CSCM7 0x1c4
+#define OBWIN0 0x1c8
+#define OBWIN1 0x1cc
+#define OBWIN2 0x1d0
+#define OBWIN3 0x1d4
+#define OBVAL0 0x1d8
+#define OBVAL1 0x1dc
+#define OBVAL2 0x1e0
+#define OBVAL3 0x1e4
+#define OBVAL4 0x1e8
+#define OBVAL5 0x1ec
+#define OBVAL6 0x1f0
+#define OBVAL7 0x1f4
+#define CLKCTL 0x1f8
+
+/* Masks & Shifts below */
+#define START_PX_HOR_MASK 0x7fff
+#define NUM_PX_HOR_MASK 0x7fff
+#define START_VER_ONE_MASK 0x7fff
+#define START_VER_TWO_MASK 0x7fff
+#define NUM_LINES_VER 0x7fff
+
+/* gain - offset masks */
+#define OFFSET_MASK 0xfff
+#define GAIN_SDRAM_EN_SHIFT 12
+#define GAIN_IPIPE_EN_SHIFT 13
+#define GAIN_H3A_EN_SHIFT 14
+#define OFST_SDRAM_EN_SHIFT 8
+#define OFST_IPIPE_EN_SHIFT 9
+#define OFST_H3A_EN_SHIFT 10
+#define GAIN_OFFSET_EN_MASK 0x7700
+
+/* Culling */
+#define CULL_PAT_EVEN_LINE_SHIFT 8
+
+/* CCDCFG register */
+#define ISIF_YCINSWP_RAW (0x00 << 4)
+#define ISIF_YCINSWP_YCBCR (0x01 << 4)
+#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
+#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
+#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
+#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
+#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
+#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
+#define ISIF_DATA_PACK_MASK 0x03
+#define ISIF_PIX_ORDER_SHIFT 11
+#define ISIF_PIX_ORDER_MASK 0x01
+#define ISIF_BW656_ENABLE (0x01 << 5)
+
+/* MODESET registers */
+#define ISIF_VDHDOUT_INPUT (0x00 << 0)
+#define ISIF_INPUT_MASK 0x03
+#define ISIF_INPUT_SHIFT 12
+#define ISIF_FID_POL_MASK 0x01
+#define ISIF_FID_POL_SHIFT 4
+#define ISIF_HD_POL_MASK 0x01
+#define ISIF_HD_POL_SHIFT 3
+#define ISIF_VD_POL_MASK 0x01
+#define ISIF_VD_POL_SHIFT 2
+#define ISIF_DATAPOL_NORMAL 0x00
+#define ISIF_DATAPOL_MASK 0x01
+#define ISIF_DATAPOL_SHIFT 6
+#define ISIF_EXWEN_DISABLE 0x00
+#define ISIF_EXWEN_MASK 0x01
+#define ISIF_EXWEN_SHIFT 5
+#define ISIF_FRM_FMT_MASK 0x01
+#define ISIF_FRM_FMT_SHIFT 7
+#define ISIF_DATASFT_MASK 0x07
+#define ISIF_DATASFT_SHIFT 8
+#define ISIF_LPF_SHIFT 14
+#define ISIF_LPF_MASK 0x1
+
+/* GAMMAWD registers */
+#define ISIF_ALAW_GAMA_WD_MASK 0xf
+#define ISIF_ALAW_GAMA_WD_SHIFT 1
+#define ISIF_ALAW_ENABLE 0x01
+#define ISIF_GAMMAWD_CFA_MASK 0x01
+#define ISIF_GAMMAWD_CFA_SHIFT 5
+
+/* HSIZE registers */
+#define ISIF_HSIZE_FLIP_MASK 0x01
+#define ISIF_HSIZE_FLIP_SHIFT 12
+#define ISIF_LINEOFST_MASK 0xfff
+
+/* MISC registers */
+#define ISIF_DPCM_EN_SHIFT 12
+#define ISIF_DPCM_PREDICTOR_SHIFT 13
+#define ISIF_DPCM_PREDICTOR_MASK 1
+
+/* Black clamp related */
+#define ISIF_BC_DCOFFSET_MASK 0x1fff
+#define ISIF_BC_MODE_COLOR_MASK 1
+#define ISIF_BC_MODE_COLOR_SHIFT 4
+#define ISIF_HORZ_BC_MODE_MASK 3
+#define ISIF_HORZ_BC_MODE_SHIFT 1
+#define ISIF_HORZ_BC_WIN_COUNT_MASK 0x1f
+#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
+#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
+#define ISIF_HORZ_BC_WIN_H_SIZE_MASK 3
+#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
+#define ISIF_HORZ_BC_WIN_V_SIZE_MASK 3
+#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
+#define ISIF_HORZ_BC_WIN_START_H_MASK 0x1fff
+#define ISIF_HORZ_BC_WIN_START_V_MASK 0x1fff
+#define ISIF_VERT_BC_OB_H_SZ_MASK 7
+#define ISIF_VERT_BC_RST_VAL_SEL_MASK 3
+#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
+#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
+#define ISIF_VERT_BC_OB_START_HORZ_MASK 0x1fff
+#define ISIF_VERT_BC_OB_START_VERT_MASK 0x1fff
+#define ISIF_VERT_BC_OB_VERT_SZ_MASK 0x1fff
+#define ISIF_VERT_BC_RST_VAL_MASK 0xfff
+#define ISIF_BC_VERT_START_SUB_V_MASK 0x1fff
+
+/* VDFC registers */
+#define ISIF_VDFC_EN_SHIFT 4
+#define ISIF_VDFC_CORR_MOD_MASK 3
+#define ISIF_VDFC_CORR_MOD_SHIFT 5
+#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
+#define ISIF_VDFC_LEVEL_SHFT_MASK 7
+#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
+#define ISIF_VDFC_SAT_LEVEL_MASK 0xfff
+#define ISIF_VDFC_POS_MASK 0x1fff
+#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
+
+/* CSC registers */
+#define ISIF_CSC_COEF_INTEG_MASK 7
+#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
+#define ISIF_CSC_COEF_INTEG_SHIFT 5
+#define ISIF_CSCM_MSB_SHIFT 8
+#define ISIF_DF_CSC_SPH_MASK 0x1fff
+#define ISIF_DF_CSC_LNH_MASK 0x1fff
+#define ISIF_DF_CSC_SLV_MASK 0x1fff
+#define ISIF_DF_CSC_LNV_MASK 0x1fff
+#define ISIF_DF_NUMLINES 0x7fff
+#define ISIF_DF_NUMPIX 0x1fff
+
+/* Offsets for LSC/DFC/Gain */
+#define ISIF_DATA_H_OFFSET_MASK 0x1fff
+#define ISIF_DATA_V_OFFSET_MASK 0x1fff
+
+/* Linearization */
+#define ISIF_LIN_CORRSFT_MASK 7
+#define ISIF_LIN_CORRSFT_SHIFT 4
+#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
+#define ISIF_LIN_SCALE_FACT_DECIMAL_MASK 0x3ff
+#define ISIF_LIN_ENTRY_MASK 0x3ff
+
+/* masks and shifts*/
+#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
+#define ISIF_SYNCEN_WEN_MASK (1 << 1)
+#define ISIF_SYNCEN_WEN_SHIFT 1
+
+#endif /* _DAVINCI_VPFE_DM365_ISIF_REGS_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
new file mode 100644
index 000000000000..9cb0262b9b33
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -0,0 +1,1999 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ *
+ *
+ * Resizer allows upscaling or downscaling a image to a desired
+ * resolution. There are 2 resizer modules. both operating on the
+ * same input image, but can have different output resolution.
+ */
+
+#include "dm365_ipipe_hw.h"
+#include "dm365_resizer.h"
+
+#define MIN_IN_WIDTH 32
+#define MIN_IN_HEIGHT 32
+#define MAX_IN_WIDTH 4095
+#define MAX_IN_HEIGHT 4095
+#define MIN_OUT_WIDTH 16
+#define MIN_OUT_HEIGHT 2
+
+static const unsigned int resizer_input_formats[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+};
+
+static const unsigned int resizer_output_formats[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_UV8_1X8,
+ V4L2_MBUS_FMT_YDYUYDYV8_1X16,
+ V4L2_MBUS_FMT_SGRBG12_1X12,
+};
+
+/* resizer_calculate_line_length() - This function calculates the line length of
+ * various image planes at the input and
+ * output.
+ */
+static void
+resizer_calculate_line_length(enum v4l2_mbus_pixelcode pix, int width,
+ int height, int *line_len, int *line_len_c)
+{
+ *line_len = 0;
+ *line_len_c = 0;
+
+ if (pix == V4L2_MBUS_FMT_UYVY8_2X8 ||
+ pix == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ *line_len = width << 1;
+ } else if (pix == V4L2_MBUS_FMT_Y8_1X8 ||
+ pix == V4L2_MBUS_FMT_UV8_1X8) {
+ *line_len = width;
+ *line_len_c = width;
+ } else {
+ /* YUV 420 */
+ /* round width to upper 32 byte boundary */
+ *line_len = width;
+ *line_len_c = width;
+ }
+ /* adjust the line len to be a multiple of 32 */
+ *line_len += 31;
+ *line_len &= ~0x1f;
+ *line_len_c += 31;
+ *line_len_c &= ~0x1f;
+}
+
+static inline int
+resizer_validate_output_image_format(struct device *dev,
+ struct v4l2_mbus_framefmt *format,
+ int *in_line_len, int *in_line_len_c)
+{
+ if (format->code != V4L2_MBUS_FMT_UYVY8_2X8 &&
+ format->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ format->code != V4L2_MBUS_FMT_UV8_1X8 &&
+ format->code != V4L2_MBUS_FMT_YDYUYDYV8_1X16 &&
+ format->code != V4L2_MBUS_FMT_SGRBG12_1X12) {
+ dev_err(dev, "Invalid Mbus format, %d\n", format->code);
+ return -EINVAL;
+ }
+ if (!format->width || !format->height) {
+ dev_err(dev, "invalid width or height\n");
+ return -EINVAL;
+ }
+ resizer_calculate_line_length(format->code, format->width,
+ format->height, in_line_len, in_line_len_c);
+ return 0;
+}
+
+static void
+resizer_configure_passthru(struct vpfe_resizer_device *resizer, int bypass)
+{
+ struct resizer_params *param = &resizer->config;
+
+ param->rsz_rsc_param[RSZ_A].cen = DISABLE;
+ param->rsz_rsc_param[RSZ_A].yen = DISABLE;
+ param->rsz_rsc_param[RSZ_A].v_phs_y = 0;
+ param->rsz_rsc_param[RSZ_A].v_phs_c = 0;
+ param->rsz_rsc_param[RSZ_A].v_dif = 256;
+ param->rsz_rsc_param[RSZ_A].v_lpf_int_y = 0;
+ param->rsz_rsc_param[RSZ_A].v_lpf_int_c = 0;
+ param->rsz_rsc_param[RSZ_A].h_phs = 0;
+ param->rsz_rsc_param[RSZ_A].h_dif = 256;
+ param->rsz_rsc_param[RSZ_A].h_lpf_int_y = 0;
+ param->rsz_rsc_param[RSZ_A].h_lpf_int_c = 0;
+ param->rsz_rsc_param[RSZ_A].dscale_en = DISABLE;
+ param->rsz2rgb[RSZ_A].rgb_en = DISABLE;
+ param->rsz_en[RSZ_A] = ENABLE;
+ param->rsz_en[RSZ_B] = DISABLE;
+ if (bypass) {
+ param->rsz_rsc_param[RSZ_A].i_vps = 0;
+ param->rsz_rsc_param[RSZ_A].i_hps = 0;
+ /* Raw Bypass */
+ param->rsz_common.passthrough = BYPASS_ON;
+ }
+}
+
+static void
+configure_resizer_out_params(struct vpfe_resizer_device *resizer, int index,
+ void *output_spec, unsigned char partial,
+ unsigned flag)
+{
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *outformat;
+ struct vpfe_rsz_output_spec *output;
+
+ if (index == RSZ_A &&
+ resizer->resizer_a.output == RESIZER_OUTPUT_NONE) {
+ param->rsz_en[index] = DISABLE;
+ return;
+ }
+ if (index == RSZ_B &&
+ resizer->resizer_b.output == RESIZER_OUTPUT_NONE) {
+ param->rsz_en[index] = DISABLE;
+ return;
+ }
+ output = (struct vpfe_rsz_output_spec *)output_spec;
+ param->rsz_en[index] = ENABLE;
+ if (partial) {
+ param->rsz_rsc_param[index].h_flip = output->h_flip;
+ param->rsz_rsc_param[index].v_flip = output->v_flip;
+ param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
+ param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
+ param->rsz_rsc_param[index].v_lpf_int_y =
+ output->v_lpf_int_y;
+ param->rsz_rsc_param[index].v_lpf_int_c =
+ output->v_lpf_int_c;
+ param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
+ param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
+ param->rsz_rsc_param[index].h_lpf_int_y =
+ output->h_lpf_int_y;
+ param->rsz_rsc_param[index].h_lpf_int_c =
+ output->h_lpf_int_c;
+ param->rsz_rsc_param[index].dscale_en =
+ output->en_down_scale;
+ param->rsz_rsc_param[index].h_dscale_ave_sz =
+ output->h_dscale_ave_sz;
+ param->rsz_rsc_param[index].v_dscale_ave_sz =
+ output->v_dscale_ave_sz;
+ param->ext_mem_param[index].user_y_ofst =
+ (output->user_y_ofst + 31) & ~0x1f;
+ param->ext_mem_param[index].user_c_ofst =
+ (output->user_c_ofst + 31) & ~0x1f;
+ return;
+ }
+
+ if (index == RSZ_A)
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ else
+ outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+ param->rsz_rsc_param[index].o_vsz = outformat->height - 1;
+ param->rsz_rsc_param[index].o_hsz = outformat->width - 1;
+ param->ext_mem_param[index].rsz_sdr_ptr_s_y = output->vst_y;
+ param->ext_mem_param[index].rsz_sdr_ptr_e_y = outformat->height;
+ param->ext_mem_param[index].rsz_sdr_ptr_s_c = output->vst_c;
+ param->ext_mem_param[index].rsz_sdr_ptr_e_c = outformat->height;
+
+ if (!flag)
+ return;
+ /* update common parameters */
+ param->rsz_rsc_param[index].h_flip = output->h_flip;
+ param->rsz_rsc_param[index].v_flip = output->v_flip;
+ param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
+ param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
+ param->rsz_rsc_param[index].v_lpf_int_y = output->v_lpf_int_y;
+ param->rsz_rsc_param[index].v_lpf_int_c = output->v_lpf_int_c;
+ param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
+ param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
+ param->rsz_rsc_param[index].h_lpf_int_y = output->h_lpf_int_y;
+ param->rsz_rsc_param[index].h_lpf_int_c = output->h_lpf_int_c;
+ param->rsz_rsc_param[index].dscale_en = output->en_down_scale;
+ param->rsz_rsc_param[index].h_dscale_ave_sz = output->h_dscale_ave_sz;
+ param->rsz_rsc_param[index].v_dscale_ave_sz = output->h_dscale_ave_sz;
+ param->ext_mem_param[index].user_y_ofst =
+ (output->user_y_ofst + 31) & ~0x1f;
+ param->ext_mem_param[index].user_c_ofst =
+ (output->user_c_ofst + 31) & ~0x1f;
+}
+
+/*
+ * resizer_calculate_resize_ratios() - Calculates resize ratio for resizer
+ * A or B. This is called after setting
+ * the input size or output size.
+ * @resizer: Pointer to VPFE resizer subdevice.
+ * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
+ */
+void
+resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index)
+{
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *informat, *outformat;
+
+ informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
+
+ if (index == RSZ_A)
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ else
+ outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+
+ if (outformat->field != V4L2_FIELD_INTERLACED)
+ param->rsz_rsc_param[index].v_dif =
+ ((informat->height) * 256) / (outformat->height);
+ else
+ param->rsz_rsc_param[index].v_dif =
+ ((informat->height >> 1) * 256) / (outformat->height);
+ param->rsz_rsc_param[index].h_dif =
+ ((informat->width) * 256) / (outformat->width);
+}
+
+void
+static resizer_enable_422_420_conversion(struct resizer_params *param,
+ int index, bool en)
+{
+ param->rsz_rsc_param[index].cen = en;
+ param->rsz_rsc_param[index].yen = en;
+}
+
+/* resizer_calculate_sdram_offsets() - This function calculates the offsets from
+ * start of buffer for the C plane when
+ * output format is YUV420SP. It also
+ * calculates the offsets from the start of
+ * the buffer when the image is flipped
+ * vertically or horizontally for ycbcr/y/c
+ * planes.
+ * @resizer: Pointer to resizer subdevice.
+ * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
+ */
+static int
+resizer_calculate_sdram_offsets(struct vpfe_resizer_device *resizer, int index)
+{
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *outformat;
+ int bytesperpixel = 2;
+ int image_height;
+ int image_width;
+ int yuv_420 = 0;
+ int offset = 0;
+
+ if (index == RSZ_A)
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ else
+ outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+
+ image_height = outformat->height + 1;
+ image_width = outformat->width + 1;
+ param->ext_mem_param[index].c_offset = 0;
+ param->ext_mem_param[index].flip_ofst_y = 0;
+ param->ext_mem_param[index].flip_ofst_c = 0;
+ if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) {
+ /* YUV 420 */
+ yuv_420 = 1;
+ bytesperpixel = 1;
+ }
+
+ if (param->rsz_rsc_param[index].h_flip)
+ /* width * bytesperpixel - 1 */
+ offset = (image_width * bytesperpixel) - 1;
+ if (param->rsz_rsc_param[index].v_flip)
+ offset += (image_height - 1) *
+ param->ext_mem_param[index].rsz_sdr_oft_y;
+ param->ext_mem_param[index].flip_ofst_y = offset;
+ if (!yuv_420)
+ return 0;
+ offset = 0;
+ /* half height for c-plane */
+ if (param->rsz_rsc_param[index].h_flip)
+ /* width * bytesperpixel - 1 */
+ offset = image_width - 1;
+ if (param->rsz_rsc_param[index].v_flip)
+ offset += (((image_height >> 1) - 1) *
+ param->ext_mem_param[index].rsz_sdr_oft_c);
+ param->ext_mem_param[index].flip_ofst_c = offset;
+ param->ext_mem_param[index].c_offset =
+ param->ext_mem_param[index].rsz_sdr_oft_y * image_height;
+ return 0;
+}
+
+int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
+{
+ struct resizer_params *param = &resizer->config;
+ struct vpfe_rsz_output_spec output_specs;
+ struct v4l2_mbus_framefmt *outformat;
+ int line_len_c;
+ int line_len;
+ int ret;
+
+ outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+
+ output_specs.vst_y = param->user_config.vst;
+ if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ output_specs.vst_c = param->user_config.vst;
+
+ configure_resizer_out_params(resizer, RSZ_A, &output_specs, 0, 0);
+ resizer_calculate_line_length(outformat->code,
+ param->rsz_rsc_param[0].o_hsz + 1,
+ param->rsz_rsc_param[0].o_vsz + 1,
+ &line_len, &line_len_c);
+ param->ext_mem_param[0].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[0].rsz_sdr_oft_c = line_len_c;
+ resizer_calculate_resize_ratios(resizer, RSZ_A);
+ if (param->rsz_en[RSZ_B])
+ resizer_calculate_resize_ratios(resizer, RSZ_B);
+
+ if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param, RSZ_A, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param, RSZ_A, DISABLE);
+
+ ret = resizer_calculate_sdram_offsets(resizer, RSZ_A);
+ if (!ret && param->rsz_en[RSZ_B])
+ ret = resizer_calculate_sdram_offsets(resizer, RSZ_B);
+
+ if (ret)
+ pr_err("Error in calculating sdram offsets\n");
+ return ret;
+}
+
+static int
+resizer_calculate_down_scale_f_div_param(struct device *dev,
+ int input_width, int output_width,
+ struct resizer_scale_param *param)
+{
+ /* rsz = R, input_width = H, output width = h in the equation */
+ unsigned int two_power;
+ unsigned int upper_h1;
+ unsigned int upper_h2;
+ unsigned int val1;
+ unsigned int val;
+ unsigned int rsz;
+ unsigned int h1;
+ unsigned int h2;
+ unsigned int o;
+ unsigned int n;
+
+ upper_h1 = input_width >> 1;
+ n = param->h_dscale_ave_sz;
+ /* 2 ^ (scale+1) */
+ two_power = 1 << (n + 1);
+ upper_h1 = (upper_h1 >> (n + 1)) << (n + 1);
+ upper_h2 = input_width - upper_h1;
+ if (upper_h2 % two_power) {
+ dev_err(dev, "frame halves to be a multiple of 2 power n+1\n");
+ return -EINVAL;
+ }
+ two_power = 1 << n;
+ rsz = (input_width << 8) / output_width;
+ val = rsz * two_power;
+ val = ((upper_h1 << 8) / val) + 1;
+ if (!(val % 2)) {
+ h1 = val;
+ } else {
+ val = upper_h1 << 8;
+ val >>= n + 1;
+ val -= rsz >> 1;
+ val /= rsz << 1;
+ val <<= 1;
+ val += 2;
+ h1 = val;
+ }
+ o = 10 + (two_power << 2);
+ if (((input_width << 7) / rsz) % 2)
+ o += (((CEIL(rsz, 1024)) << 1) << n);
+ h2 = output_width - h1;
+ /* phi */
+ val = (h1 * rsz) - (((upper_h1 - (o - 10)) / two_power) << 8);
+ /* skip */
+ val1 = ((val - 1024) >> 9) << 1;
+ param->f_div.num_passes = MAX_PASSES;
+ param->f_div.pass[0].o_hsz = h1 - 1;
+ param->f_div.pass[0].i_hps = 0;
+ param->f_div.pass[0].h_phs = 0;
+ param->f_div.pass[0].src_hps = 0;
+ param->f_div.pass[0].src_hsz = upper_h1 + o;
+ param->f_div.pass[1].o_hsz = h2 - 1;
+ param->f_div.pass[1].i_hps = 10 + (val1 * two_power);
+ param->f_div.pass[1].h_phs = (val - (val1 << 8));
+ param->f_div.pass[1].src_hps = upper_h1 - o;
+ param->f_div.pass[1].src_hsz = upper_h2 + o;
+
+ return 0;
+}
+
+static int
+resizer_configure_common_in_params(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct resizer_params *param = &resizer->config;
+ struct vpfe_rsz_config_params *user_config;
+ struct v4l2_mbus_framefmt *informat;
+
+ informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
+ user_config = &resizer->config.user_config;
+ param->rsz_common.vps = param->user_config.vst;
+ param->rsz_common.hps = param->user_config.hst;
+
+ if (vpfe_ipipeif_decimation_enabled(vpfe_dev))
+ param->rsz_common.hsz = (((informat->width - 1) *
+ IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev));
+ else
+ param->rsz_common.hsz = informat->width - 1;
+
+ if (informat->field == V4L2_FIELD_INTERLACED)
+ param->rsz_common.vsz = (informat->height - 1) >> 1;
+ else
+ param->rsz_common.vsz = informat->height - 1;
+
+ param->rsz_common.raw_flip = 0;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF)
+ param->rsz_common.source = IPIPEIF_DATA;
+ else
+ param->rsz_common.source = IPIPE_DATA;
+
+ switch (informat->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ param->rsz_common.src_img_fmt = RSZ_IMG_422;
+ param->rsz_common.raw_flip = 0;
+ break;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ param->rsz_common.src_img_fmt = RSZ_IMG_420;
+ /* Select y */
+ param->rsz_common.y_c = 0;
+ param->rsz_common.raw_flip = 0;
+ break;
+
+ case V4L2_MBUS_FMT_UV8_1X8:
+ param->rsz_common.src_img_fmt = RSZ_IMG_420;
+ /* Select y */
+ param->rsz_common.y_c = 1;
+ param->rsz_common.raw_flip = 0;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ param->rsz_common.raw_flip = 1;
+ break;
+
+ default:
+ param->rsz_common.src_img_fmt = RSZ_IMG_422;
+ param->rsz_common.source = IPIPE_DATA;
+ }
+
+ param->rsz_common.yuv_y_min = user_config->yuv_y_min;
+ param->rsz_common.yuv_y_max = user_config->yuv_y_max;
+ param->rsz_common.yuv_c_min = user_config->yuv_c_min;
+ param->rsz_common.yuv_c_max = user_config->yuv_c_max;
+ param->rsz_common.out_chr_pos = user_config->out_chr_pos;
+ param->rsz_common.rsz_seq_crv = user_config->chroma_sample_even;
+
+ return 0;
+}
+static int
+resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
+{
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+ struct resizer_params *param = &resizer->config;
+ struct vpfe_rsz_config_params *cont_config;
+ int line_len_c;
+ int line_len;
+ int ret;
+
+ if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) {
+ dev_err(dev, "enable resizer - Resizer-A\n");
+ return -EINVAL;
+ }
+
+ cont_config = &resizer->config.user_config;
+ param->rsz_en[RSZ_A] = ENABLE;
+ configure_resizer_out_params(resizer, RSZ_A,
+ &cont_config->output1, 1, 0);
+ param->rsz_en[RSZ_B] = DISABLE;
+ param->oper_mode = RESIZER_MODE_CONTINIOUS;
+
+ if (resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ struct v4l2_mbus_framefmt *outformat2;
+
+ param->rsz_en[RSZ_B] = ENABLE;
+ outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+ ret = resizer_validate_output_image_format(dev, outformat2,
+ &line_len, &line_len_c);
+ if (ret)
+ return ret;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
+ configure_resizer_out_params(resizer, RSZ_B,
+ &cont_config->output2, 0, 1);
+ if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, DISABLE);
+ }
+ resizer_configure_common_in_params(resizer);
+ ret = resizer_configure_output_win(resizer);
+ if (ret)
+ return ret;
+
+ param->rsz_common.passthrough = cont_config->bypass;
+ if (cont_config->bypass)
+ resizer_configure_passthru(resizer, 1);
+
+ return 0;
+}
+
+static inline int
+resizer_validate_input_image_format(struct device *dev,
+ enum v4l2_mbus_pixelcode pix,
+ int width, int height, int *line_len)
+{
+ int val;
+
+ if (pix != V4L2_MBUS_FMT_UYVY8_2X8 &&
+ pix != V4L2_MBUS_FMT_Y8_1X8 &&
+ pix != V4L2_MBUS_FMT_UV8_1X8 &&
+ pix != V4L2_MBUS_FMT_SGRBG12_1X12) {
+ dev_err(dev,
+ "resizer validate output: pix format not supported, %d\n", pix);
+ return -EINVAL;
+ }
+
+ if (!width || !height) {
+ dev_err(dev,
+ "resizer validate input: invalid width or height\n");
+ return -EINVAL;
+ }
+
+ if (pix == V4L2_MBUS_FMT_UV8_1X8)
+ resizer_calculate_line_length(pix, width,
+ height, &val, line_len);
+ else
+ resizer_calculate_line_length(pix, width,
+ height, line_len, &val);
+
+ return 0;
+}
+
+static int
+resizer_validate_decimation(struct device *dev, enum ipipeif_decimation dec_en,
+ unsigned char rsz, unsigned char frame_div_mode_en,
+ int width)
+{
+ if (dec_en && frame_div_mode_en) {
+ dev_err(dev,
+ "dec_en & frame_div_mode_en can not enabled simultaneously\n");
+ return -EINVAL;
+ }
+
+ if (frame_div_mode_en) {
+ dev_err(dev, "frame_div_mode mode not supported\n");
+ return -EINVAL;
+ }
+
+ if (!dec_en)
+ return 0;
+
+ if (width <= VPFE_IPIPE_MAX_INPUT_WIDTH) {
+ dev_err(dev,
+ "image width to be more than %d for decimation\n",
+ VPFE_IPIPE_MAX_INPUT_WIDTH);
+ return -EINVAL;
+ }
+
+ if (rsz < IPIPEIF_RSZ_MIN || rsz > IPIPEIF_RSZ_MAX) {
+ dev_err(dev, "rsz range is %d to %d\n",
+ IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* resizer_calculate_normal_f_div_param() - Algorithm to calculate the frame
+ * division parameters for resizer.
+ * in normal mode.
+ */
+static int
+resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
+ int output_width, struct resizer_scale_param *param)
+{
+ /* rsz = R, input_width = H, output width = h in the equation */
+ unsigned int val1;
+ unsigned int rsz;
+ unsigned int val;
+ unsigned int h1;
+ unsigned int h2;
+ unsigned int o;
+
+ if (output_width > input_width) {
+ dev_err(dev, "frame div mode is used for scale down only\n");
+ return -EINVAL;
+ }
+
+ rsz = (input_width << 8) / output_width;
+ val = rsz << 1;
+ val = ((input_width << 8) / val) + 1;
+ o = 14;
+ if (!(val % 2)) {
+ h1 = val;
+ } else {
+ val = (input_width << 7);
+ val -= rsz >> 1;
+ val /= rsz << 1;
+ val <<= 1;
+ val += 2;
+ o += ((CEIL(rsz, 1024)) << 1);
+ h1 = val;
+ }
+ h2 = output_width - h1;
+ /* phi */
+ val = (h1 * rsz) - (((input_width >> 1) - o) << 8);
+ /* skip */
+ val1 = ((val - 1024) >> 9) << 1;
+ param->f_div.num_passes = MAX_PASSES;
+ param->f_div.pass[0].o_hsz = h1 - 1;
+ param->f_div.pass[0].i_hps = 0;
+ param->f_div.pass[0].h_phs = 0;
+ param->f_div.pass[0].src_hps = 0;
+ param->f_div.pass[0].src_hsz = (input_width >> 2) + o;
+ param->f_div.pass[1].o_hsz = h2 - 1;
+ param->f_div.pass[1].i_hps = val1;
+ param->f_div.pass[1].h_phs = (val - (val1 << 8));
+ param->f_div.pass[1].src_hps = (input_width >> 2) - o;
+ param->f_div.pass[1].src_hsz = (input_width >> 2) + o;
+
+ return 0;
+}
+
+static int
+resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_rsz_config_params *config = &resizer->config.user_config;
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct v4l2_mbus_framefmt *outformat1, *outformat2;
+ struct resizer_params *param = &resizer->config;
+ struct v4l2_mbus_framefmt *informat;
+ int decimation;
+ int line_len_c;
+ int line_len;
+ int rsz;
+ int ret;
+
+ informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
+ outformat1 = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
+ outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
+
+ decimation = vpfe_ipipeif_decimation_enabled(vpfe_dev);
+ rsz = vpfe_ipipeif_get_rsz(vpfe_dev);
+ if (decimation && param->user_config.frame_div_mode_en) {
+ dev_err(dev,
+ "dec_en & frame_div_mode_en cannot enabled simultaneously\n");
+ return -EINVAL;
+ }
+
+ ret = resizer_validate_decimation(dev, decimation, rsz,
+ param->user_config.frame_div_mode_en, informat->width);
+ if (ret)
+ return -EINVAL;
+
+ ret = resizer_validate_input_image_format(dev, informat->code,
+ informat->width, informat->height, &line_len);
+ if (ret)
+ return -EINVAL;
+
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
+ param->rsz_en[RSZ_A] = ENABLE;
+ ret = resizer_validate_output_image_format(dev, outformat1,
+ &line_len, &line_len_c);
+ if (ret)
+ return ret;
+ param->ext_mem_param[RSZ_A].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[RSZ_A].rsz_sdr_oft_c = line_len_c;
+ configure_resizer_out_params(resizer, RSZ_A,
+ &param->user_config.output1, 0, 1);
+
+ if (outformat1->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ param->rsz_common.raw_flip = 1;
+ else
+ param->rsz_common.raw_flip = 0;
+
+ if (outformat1->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param,
+ RSZ_A, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param,
+ RSZ_A, DISABLE);
+ }
+
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
+ param->rsz_en[RSZ_B] = ENABLE;
+ ret = resizer_validate_output_image_format(dev, outformat2,
+ &line_len, &line_len_c);
+ if (ret)
+ return ret;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
+ param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
+ configure_resizer_out_params(resizer, RSZ_B,
+ &param->user_config.output2, 0, 1);
+ if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, ENABLE);
+ else
+ resizer_enable_422_420_conversion(param,
+ RSZ_B, DISABLE);
+ }
+
+ resizer_configure_common_in_params(resizer);
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
+ resizer_calculate_resize_ratios(resizer, RSZ_A);
+ resizer_calculate_sdram_offsets(resizer, RSZ_A);
+ /* Overriding resize ratio calculation */
+ if (informat->code == V4L2_MBUS_FMT_UV8_1X8) {
+ param->rsz_rsc_param[RSZ_A].v_dif =
+ (((informat->height + 1) * 2) * 256) /
+ (param->rsz_rsc_param[RSZ_A].o_vsz + 1);
+ }
+ }
+
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
+ resizer_calculate_resize_ratios(resizer, RSZ_B);
+ resizer_calculate_sdram_offsets(resizer, RSZ_B);
+ /* Overriding resize ratio calculation */
+ if (informat->code == V4L2_MBUS_FMT_UV8_1X8) {
+ param->rsz_rsc_param[RSZ_B].v_dif =
+ (((informat->height + 1) * 2) * 256) /
+ (param->rsz_rsc_param[RSZ_B].o_vsz + 1);
+ }
+ }
+ if (param->user_config.frame_div_mode_en &&
+ param->rsz_en[RSZ_A]) {
+ if (!param->rsz_rsc_param[RSZ_A].dscale_en)
+ ret = resizer_calculate_normal_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_A].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_A]);
+ else
+ ret = resizer_calculate_down_scale_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_A].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_A]);
+ if (ret)
+ return -EINVAL;
+ }
+ if (param->user_config.frame_div_mode_en &&
+ param->rsz_en[RSZ_B]) {
+ if (!param->rsz_rsc_param[RSZ_B].dscale_en)
+ ret = resizer_calculate_normal_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_B].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_B]);
+ else
+ ret = resizer_calculate_down_scale_f_div_param(dev,
+ informat->width,
+ param->rsz_rsc_param[RSZ_B].o_vsz + 1,
+ &param->rsz_rsc_param[RSZ_B]);
+ if (ret)
+ return -EINVAL;
+ }
+ param->rsz_common.passthrough = config->bypass;
+ if (config->bypass)
+ resizer_configure_passthru(resizer, 1);
+ return 0;
+}
+
+static void
+resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
+{
+#define WIDTH_I 640
+#define HEIGHT_I 480
+#define WIDTH_O 640
+#define HEIGHT_O 480
+ const struct resizer_params rsz_default_config = {
+ .oper_mode = RESIZER_MODE_ONE_SHOT,
+ .rsz_common = {
+ .vsz = HEIGHT_I - 1,
+ .hsz = WIDTH_I - 1,
+ .src_img_fmt = RSZ_IMG_422,
+ .raw_flip = 1, /* flip preserve Raw format */
+ .source = IPIPE_DATA,
+ .passthrough = BYPASS_OFF,
+ .yuv_y_max = 255,
+ .yuv_c_max = 255,
+ .rsz_seq_crv = DISABLE,
+ .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
+ },
+ .rsz_rsc_param = {
+ {
+ .h_flip = DISABLE,
+ .v_flip = DISABLE,
+ .cen = DISABLE,
+ .yen = DISABLE,
+ .o_vsz = HEIGHT_O - 1,
+ .o_hsz = WIDTH_O - 1,
+ .v_dif = 256,
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dif = 256,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ {
+ .h_flip = DISABLE,
+ .v_flip = DISABLE,
+ .cen = DISABLE,
+ .yen = DISABLE,
+ .o_vsz = HEIGHT_O - 1,
+ .o_hsz = WIDTH_O - 1,
+ .v_dif = 256,
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dif = 256,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ },
+ .rsz2rgb = {
+ {
+ .rgb_en = DISABLE
+ },
+ {
+ .rgb_en = DISABLE
+ }
+ },
+ .ext_mem_param = {
+ {
+ .rsz_sdr_oft_y = WIDTH_O << 1,
+ .rsz_sdr_ptr_e_y = HEIGHT_O,
+ .rsz_sdr_oft_c = WIDTH_O,
+ .rsz_sdr_ptr_e_c = HEIGHT_O >> 1,
+ },
+ {
+ .rsz_sdr_oft_y = WIDTH_O << 1,
+ .rsz_sdr_ptr_e_y = HEIGHT_O,
+ .rsz_sdr_oft_c = WIDTH_O,
+ .rsz_sdr_ptr_e_c = HEIGHT_O,
+ },
+ },
+ .rsz_en[0] = ENABLE,
+ .rsz_en[1] = DISABLE,
+ .user_config = {
+ .output1 = {
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ .output2 = {
+ .v_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_y = VPFE_RSZ_INTP_CUBIC,
+ .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .h_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ .v_dscale_ave_sz =
+ VPFE_IPIPE_DWN_SCALE_1_OVER_2,
+ },
+ .yuv_y_max = 255,
+ .yuv_c_max = 255,
+ .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
+ },
+ };
+ memset(&resizer->config, 0, sizeof(struct resizer_params));
+ memcpy(&resizer->config, &rsz_default_config,
+ sizeof(struct resizer_params));
+}
+
+/*
+ * resizer_set_configuration() - set resizer config
+ * @resizer: vpfe resizer device pointer.
+ * @chan_config: resizer channel configuration.
+ */
+static int
+resizer_set_configuration(struct vpfe_resizer_device *resizer,
+ struct vpfe_rsz_config *chan_config)
+{
+ if (!chan_config->config)
+ resizer_set_defualt_configuration(resizer);
+ else
+ if (copy_from_user(&resizer->config.user_config,
+ chan_config->config, sizeof(struct vpfe_rsz_config_params)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * resizer_get_configuration() - get resizer config
+ * @resizer: vpfe resizer device pointer.
+ * @channel: image processor logical channel.
+ * @chan_config: resizer channel configuration.
+ */
+static int
+resizer_get_configuration(struct vpfe_resizer_device *resizer,
+ struct vpfe_rsz_config *chan_config)
+{
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+
+ if (!chan_config->config) {
+ dev_err(dev, "Resizer channel invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void *)chan_config->config,
+ (void *)&resizer->config.user_config,
+ sizeof(struct vpfe_rsz_config_params))) {
+ dev_err(dev, "resizer_get_configuration: Error in copy to user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * VPFE video operations
+ */
+
+/*
+ * resizer_a_video_out_queue() - RESIZER-A video out queue
+ * @vpfe_dev: vpfe device pointer.
+ * @addr: buffer address.
+ */
+static int resizer_a_video_out_queue(struct vpfe_device *vpfe_dev,
+ unsigned long addr)
+{
+ struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
+
+ return resizer_set_outaddr(resizer->base_addr,
+ &resizer->config, RSZ_A, addr);
+}
+
+/*
+ * resizer_b_video_out_queue() - RESIZER-B video out queue
+ * @vpfe_dev: vpfe device pointer.
+ * @addr: buffer address.
+ */
+static int resizer_b_video_out_queue(struct vpfe_device *vpfe_dev,
+ unsigned long addr)
+{
+ struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
+
+ return resizer_set_outaddr(resizer->base_addr,
+ &resizer->config, RSZ_B, addr);
+}
+
+static const struct vpfe_video_operations resizer_a_video_ops = {
+ .queue = resizer_a_video_out_queue,
+};
+
+static const struct vpfe_video_operations resizer_b_video_ops = {
+ .queue = resizer_b_video_out_queue,
+};
+
+static void resizer_enable(struct vpfe_resizer_device *resizer, int en)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+ unsigned char val;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
+ return;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF &&
+ ipipeif_sink == IPIPEIF_INPUT_MEMORY) {
+ do {
+ val = regr_rsz(resizer->base_addr, RSZ_SRC_EN);
+ } while (val);
+
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
+ do {
+ val = regr_rsz(resizer->base_addr, RSZ_A);
+ } while (val);
+ }
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
+ do {
+ val = regr_rsz(resizer->base_addr, RSZ_B);
+ } while (val);
+ }
+ }
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
+ rsz_enable(resizer->base_addr, RSZ_A, en);
+
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
+ rsz_enable(resizer->base_addr, RSZ_B, en);
+}
+
+
+/*
+ * resizer_ss_isr() - resizer module single-shot buffer scheduling isr
+ * @resizer: vpfe resizer device pointer.
+ */
+static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
+ struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct vpfe_pipeline *pipe = &video_out->pipe;
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+ u32 val;
+
+ if (ipipeif_sink != IPIPEIF_INPUT_MEMORY)
+ return;
+
+ if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ val = vpss_dma_complete_interrupt();
+ if (val != 0 && val != 2)
+ return;
+ }
+
+ if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ spin_lock(&video_out->dma_queue_lock);
+ vpfe_video_process_buffer_complete(video_out);
+ video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
+ vpfe_video_schedule_next_buffer(video_out);
+ spin_unlock(&video_out->dma_queue_lock);
+ }
+
+ /* If resizer B is enabled */
+ if (pipe->output_num > 1 && resizer->resizer_b.output ==
+ RESIZER_OUPUT_MEMORY) {
+ spin_lock(&video_out->dma_queue_lock);
+ vpfe_video_process_buffer_complete(video_out2);
+ video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
+ vpfe_video_schedule_next_buffer(video_out2);
+ spin_unlock(&video_out2->dma_queue_lock);
+ }
+
+ /* start HW if buffers are queued */
+ if (vpfe_video_is_pipe_ready(pipe) &&
+ resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ resizer_enable(resizer, 1);
+ vpfe_ipipe_enable(vpfe_dev, 1);
+ vpfe_ipipeif_enable(vpfe_dev);
+ }
+}
+
+/*
+ * vpfe_resizer_buffer_isr() - resizer module buffer scheduling isr
+ * @resizer: vpfe resizer device pointer.
+ */
+void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
+ struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
+ struct vpfe_pipeline *pipe = &resizer->resizer_a.video_out.pipe;
+ enum v4l2_field field;
+ int fid;
+
+ if (!video_out->started)
+ return;
+
+ if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
+ return;
+
+ field = video_out->fmt.fmt.pix.field;
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ if (video_out->cur_frm != video_out->next_frm) {
+ vpfe_video_process_buffer_complete(video_out);
+ if (pipe->output_num > 1)
+ vpfe_video_process_buffer_complete(video_out2);
+ }
+
+ video_out->skip_frame_count--;
+ if (!video_out->skip_frame_count) {
+ video_out->skip_frame_count =
+ video_out->skip_frame_count_init;
+ rsz_src_enable(resizer->base_addr, 1);
+ } else {
+ rsz_src_enable(resizer->base_addr, 0);
+ }
+ return;
+ }
+
+ /* handle interlaced frame capture */
+ fid = vpfe_isif_get_fid(vpfe_dev);
+
+ /* switch the software maintained field id */
+ video_out->field_id ^= 1;
+ if (fid == video_out->field_id) {
+ /*
+ * we are in-sync here,continue.
+ * One frame is just being captured. If the
+ * next frame is available, release the current
+ * frame and move on
+ */
+ if (fid == 0 && video_out->cur_frm != video_out->next_frm) {
+ vpfe_video_process_buffer_complete(video_out);
+ if (pipe->output_num > 1)
+ vpfe_video_process_buffer_complete(video_out2);
+ }
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ video_out->field_id = fid;
+ }
+}
+
+/*
+ * vpfe_resizer_dma_isr() - resizer module dma isr
+ * @resizer: vpfe resizer device pointer.
+ */
+void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
+ struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ struct vpfe_pipeline *pipe = &video_out->pipe;
+ int schedule_capture = 0;
+ enum v4l2_field field;
+ int fid;
+
+ if (!video_out->started)
+ return;
+
+ if (pipe->state == VPFE_PIPELINE_STREAM_SINGLESHOT) {
+ resizer_ss_isr(resizer);
+ return;
+ }
+
+ field = video_out->fmt.fmt.pix.field;
+ if (field == V4L2_FIELD_NONE) {
+ if (!list_empty(&video_out->dma_queue) &&
+ video_out->cur_frm == video_out->next_frm)
+ schedule_capture = 1;
+ } else {
+ fid = vpfe_isif_get_fid(vpfe_dev);
+ if (fid == video_out->field_id) {
+ /* we are in-sync here,continue */
+ if (fid == 1 && !list_empty(&video_out->dma_queue) &&
+ video_out->cur_frm == video_out->next_frm)
+ schedule_capture = 1;
+ }
+ }
+
+ if (!schedule_capture)
+ return;
+
+ spin_lock(&video_out->dma_queue_lock);
+ vpfe_video_schedule_next_buffer(video_out);
+ spin_unlock(&video_out->dma_queue_lock);
+ if (pipe->output_num > 1) {
+ spin_lock(&video_out2->dma_queue_lock);
+ vpfe_video_schedule_next_buffer(video_out2);
+ spin_unlock(&video_out2->dma_queue_lock);
+ }
+}
+
+/*
+ * V4L2 subdev operations
+ */
+
+/*
+ * resizer_ioctl() - Handle resizer module private ioctl's
+ * @sd: pointer to v4l2 subdev structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ */
+static long resizer_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
+ struct vpfe_rsz_config *user_config;
+ int ret = -ENOIOCTLCMD;
+
+ if (&resizer->crop_resizer.subdev != sd)
+ return ret;
+
+ switch (cmd) {
+ case VIDIOC_VPFE_RSZ_S_CONFIG:
+ user_config = (struct vpfe_rsz_config *)arg;
+ ret = resizer_set_configuration(resizer, user_config);
+ break;
+
+ case VIDIOC_VPFE_RSZ_G_CONFIG:
+ user_config = (struct vpfe_rsz_config *)arg;
+ if (!user_config->config) {
+ dev_err(dev, "error in VIDIOC_VPFE_RSZ_G_CONFIG\n");
+ return -EINVAL;
+ }
+ ret = resizer_get_configuration(resizer, user_config);
+ break;
+ }
+ return ret;
+}
+
+static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
+ u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
+ struct resizer_params *param = &resizer->config;
+ int ret = 0;
+
+ if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY ||
+ resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ if (ipipeif_sink == IPIPEIF_INPUT_MEMORY &&
+ ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
+ ret = resizer_configure_in_single_shot_mode(resizer);
+ else
+ ret = resizer_configure_in_continious_mode(resizer);
+ if (ret)
+ return ret;
+ ret = config_rsz_hw(resizer, param);
+ }
+ return ret;
+}
+
+/*
+ * resizer_set_stream() - Enable/Disable streaming on resizer subdev
+ * @sd: pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ */
+static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+
+ if (&resizer->crop_resizer.subdev != sd)
+ return 0;
+
+ if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY)
+ return 0;
+
+ switch (enable) {
+ case 1:
+ if (resizer_do_hw_setup(resizer) < 0)
+ return -EINVAL;
+ resizer_enable(resizer, enable);
+ break;
+
+ case 0:
+ resizer_enable(resizer, enable);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * __resizer_get_format() - helper function for getting resizer format
+ * @sd: pointer to subdev.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad number.
+ * @which: wanted subdev format.
+ * Retun wanted mbus frame format.
+ */
+static struct v4l2_mbus_framefmt *
+__resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ if (&resizer->crop_resizer.subdev == sd)
+ return &resizer->crop_resizer.formats[pad];
+ if (&resizer->resizer_a.subdev == sd)
+ return &resizer->resizer_a.formats[pad];
+ if (&resizer->resizer_b.subdev == sd)
+ return &resizer->resizer_b.formats[pad];
+ return NULL;
+}
+
+/*
+ * resizer_try_format() - Handle try format by pad subdev method
+ * @sd: pointer to subdev.
+ * @fh: V4L2 subdev file handle.
+ * @pad: pad num.
+ * @fmt: pointer to v4l2 format structure.
+ * @which: wanted subdev format.
+ */
+static void
+resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ unsigned int max_out_height;
+ unsigned int max_out_width;
+ unsigned int i;
+
+ if ((&resizer->resizer_a.subdev == sd && pad == RESIZER_PAD_SINK) ||
+ (&resizer->resizer_b.subdev == sd && pad == RESIZER_PAD_SINK) ||
+ (&resizer->crop_resizer.subdev == sd &&
+ (pad == RESIZER_CROP_PAD_SOURCE ||
+ pad == RESIZER_CROP_PAD_SOURCE2 || pad == RESIZER_CROP_PAD_SINK))) {
+ for (i = 0; i < ARRAY_SIZE(resizer_input_formats); i++) {
+ if (fmt->code == resizer_input_formats[i])
+ break;
+ }
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_input_formats))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
+ MAX_IN_WIDTH);
+ fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT,
+ MAX_IN_HEIGHT);
+ } else if (&resizer->resizer_a.subdev == sd &&
+ pad == RESIZER_PAD_SOURCE) {
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+
+ for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
+ if (fmt->code == resizer_output_formats[i])
+ break;
+ }
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_output_formats))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
+ max_out_width);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
+ max_out_height);
+ } else if (&resizer->resizer_b.subdev == sd &&
+ pad == RESIZER_PAD_SOURCE) {
+ max_out_width = IPIPE_MAX_OUTPUT_WIDTH_B;
+ max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_B;
+
+ for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
+ if (fmt->code == resizer_output_formats[i])
+ break;
+ }
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_output_formats))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
+ max_out_width);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
+ max_out_height);
+ }
+}
+
+/*
+ * resizer_set_format() - Handle set format by pads subdev method
+ * @sd: pointer to v4l2 subdev structure
+ * @fh: V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(sd, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ resizer_try_format(sd, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ if (&resizer->crop_resizer.subdev == sd) {
+ if (fmt->pad == RESIZER_CROP_PAD_SINK) {
+ resizer->crop_resizer.formats[fmt->pad] = fmt->format;
+ } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE &&
+ resizer->crop_resizer.output == RESIZER_A) {
+ resizer->crop_resizer.formats[fmt->pad] = fmt->format;
+ resizer->crop_resizer.
+ formats[RESIZER_CROP_PAD_SOURCE2] = fmt->format;
+ } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE2 &&
+ resizer->crop_resizer.output2 == RESIZER_B) {
+ resizer->crop_resizer.formats[fmt->pad] = fmt->format;
+ resizer->crop_resizer.
+ formats[RESIZER_CROP_PAD_SOURCE] = fmt->format;
+ } else {
+ return -EINVAL;
+ }
+ } else if (&resizer->resizer_a.subdev == sd) {
+ if (fmt->pad == RESIZER_PAD_SINK)
+ resizer->resizer_a.formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == RESIZER_PAD_SOURCE)
+ resizer->resizer_a.formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+ } else if (&resizer->resizer_b.subdev == sd) {
+ if (fmt->pad == RESIZER_PAD_SINK)
+ resizer->resizer_b.formats[fmt->pad] = fmt->format;
+ else if (fmt->pad == RESIZER_PAD_SOURCE)
+ resizer->resizer_b.formats[fmt->pad] = fmt->format;
+ else
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * resizer_get_format() - Retrieve the video format on a pad
+ * @sd: pointer to v4l2 subdev structure.
+ * @fh: V4L2 subdev file handle.
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(sd, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+/*
+ * resizer_enum_frame_size() - enum frame sizes on pads
+ * @sd: Pointer to subdevice.
+ * @fh: V4L2 subdev file handle.
+ * @code: pointer to v4l2_subdev_frame_size_enum structure.
+ */
+static int resizer_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ resizer_try_format(sd, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ resizer_try_format(sd, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * resizer_enum_mbus_code() - enum mbus codes for pads
+ * @sd: Pointer to subdevice.
+ * @fh: V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ */
+static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad == RESIZER_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(resizer_input_formats))
+ return -EINVAL;
+
+ code->code = resizer_input_formats[code->index];
+ } else if (code->pad == RESIZER_PAD_SOURCE) {
+ if (code->index >= ARRAY_SIZE(resizer_output_formats))
+ return -EINVAL;
+
+ code->code = resizer_output_formats[code->index];
+ }
+
+ return 0;
+}
+
+/*
+ * resizer_init_formats() - Initialize formats on all pads
+ * @sd: Pointer to subdevice.
+ * @fh: V4L2 subdev file handle.
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int resizer_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ __u32 which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_format format;
+
+ if (&resizer->crop_resizer.subdev == sd) {
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_CROP_PAD_SINK;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_HEIGHT;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_CROP_PAD_SOURCE;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_WIDTH;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_CROP_PAD_SOURCE2;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_WIDTH;
+ resizer_set_format(sd, fh, &format);
+ } else if (&resizer->resizer_a.subdev == sd) {
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SINK;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_HEIGHT;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SOURCE;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
+ resizer_set_format(sd, fh, &format);
+ } else if (&resizer->resizer_b.subdev == sd) {
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SINK;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.width = MAX_IN_WIDTH;
+ format.format.height = MAX_IN_HEIGHT;
+ resizer_set_format(sd, fh, &format);
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SOURCE;
+ format.which = which;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B;
+ format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B;
+ resizer_set_format(sd, fh, &format);
+ }
+
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops resizer_v4l2_core_ops = {
+ .ioctl = resizer_ioctl,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
+ .open = resizer_init_formats,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
+ .s_stream = resizer_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
+ .enum_mbus_code = resizer_enum_mbus_code,
+ .enum_frame_size = resizer_enum_frame_size,
+ .get_fmt = resizer_get_format,
+ .set_fmt = resizer_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops resizer_v4l2_ops = {
+ .core = &resizer_v4l2_core_ops,
+ .video = &resizer_v4l2_video_ops,
+ .pad = &resizer_v4l2_pad_ops,
+};
+
+/*
+ * Media entity operations
+ */
+
+/*
+ * resizer_link_setup() - Setup resizer connections
+ * @entity: Pointer to media entity structure
+ * @local: Pointer to local pad array
+ * @remote: Pointer to remote pad array
+ * @flags: Link flags
+ * return -EINVAL or zero on success
+ */
+static int resizer_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
+ u16 ipipe_source = vpfe_dev->vpfe_ipipe.output;
+
+ if (&resizer->crop_resizer.subdev == sd) {
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_CROP_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->crop_resizer.input =
+ RESIZER_CROP_INPUT_NONE;
+ break;
+ }
+
+ if (resizer->crop_resizer.input !=
+ RESIZER_CROP_INPUT_NONE)
+ return -EBUSY;
+ if (ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
+ resizer->crop_resizer.input =
+ RESIZER_CROP_INPUT_IPIPEIF;
+ else if (ipipe_source == IPIPE_OUTPUT_RESIZER)
+ resizer->crop_resizer.input =
+ RESIZER_CROP_INPUT_IPIPE;
+ else
+ return -EINVAL;
+ break;
+
+ case RESIZER_CROP_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->crop_resizer.output =
+ RESIZER_CROP_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->crop_resizer.output !=
+ RESIZER_CROP_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->crop_resizer.output = RESIZER_A;
+ break;
+
+ case RESIZER_CROP_PAD_SOURCE2 | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->crop_resizer.output2 =
+ RESIZER_CROP_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->crop_resizer.output2 !=
+ RESIZER_CROP_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->crop_resizer.output2 = RESIZER_B;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else if (&resizer->resizer_a.subdev == sd) {
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_a.input = RESIZER_INPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_a.input != RESIZER_INPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_a.input = RESIZER_INPUT_CROP_RESIZER;
+ break;
+
+ case RESIZER_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_a.output = RESIZER_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_a.output = RESIZER_OUPUT_MEMORY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else if (&resizer->resizer_b.subdev == sd) {
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_b.input = RESIZER_INPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_b.input != RESIZER_INPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_b.input = RESIZER_INPUT_CROP_RESIZER;
+ break;
+
+ case RESIZER_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->resizer_b.output = RESIZER_OUTPUT_NONE;
+ break;
+ }
+ if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
+ return -EBUSY;
+ resizer->resizer_b.output = RESIZER_OUPUT_MEMORY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations resizer_media_ops = {
+ .link_setup = resizer_link_setup,
+};
+
+/*
+ * vpfe_resizer_unregister_entities() - Unregister entity
+ * @vpfe_rsz - pointer to resizer subdevice structure.
+ */
+void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz)
+{
+ /* unregister video devices */
+ vpfe_video_unregister(&vpfe_rsz->resizer_a.video_out);
+ vpfe_video_unregister(&vpfe_rsz->resizer_b.video_out);
+
+ /* cleanup entity */
+ media_entity_cleanup(&vpfe_rsz->crop_resizer.subdev.entity);
+ media_entity_cleanup(&vpfe_rsz->resizer_a.subdev.entity);
+ media_entity_cleanup(&vpfe_rsz->resizer_b.subdev.entity);
+ /* unregister subdev */
+ v4l2_device_unregister_subdev(&vpfe_rsz->crop_resizer.subdev);
+ v4l2_device_unregister_subdev(&vpfe_rsz->resizer_a.subdev);
+ v4l2_device_unregister_subdev(&vpfe_rsz->resizer_b.subdev);
+}
+
+/*
+ * vpfe_resizer_register_entities() - Register entity
+ * @resizer - pointer to resizer devive.
+ * @vdev: pointer to v4l2 device structure.
+ */
+int vpfe_resizer_register_entities(struct vpfe_resizer_device *resizer,
+ struct v4l2_device *vdev)
+{
+ struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
+ unsigned int flags = 0;
+ int ret;
+
+ /* Register the crop resizer subdev */
+ ret = v4l2_device_register_subdev(vdev, &resizer->crop_resizer.subdev);
+ if (ret < 0) {
+ pr_err("Failed to register crop resizer as v4l2-subdev\n");
+ return ret;
+ }
+ /* Register Resizer-A subdev */
+ ret = v4l2_device_register_subdev(vdev, &resizer->resizer_a.subdev);
+ if (ret < 0) {
+ pr_err("Failed to register resizer-a as v4l2-subdev\n");
+ return ret;
+ }
+ /* Register Resizer-B subdev */
+ ret = v4l2_device_register_subdev(vdev, &resizer->resizer_b.subdev);
+ if (ret < 0) {
+ pr_err("Failed to register resizer-b as v4l2-subdev\n");
+ return ret;
+ }
+ /* Register video-out device for resizer-a */
+ ret = vpfe_video_register(&resizer->resizer_a.video_out, vdev);
+ if (ret) {
+ pr_err("Failed to register RSZ-A video-out device\n");
+ goto out_video_out2_register;
+ }
+ resizer->resizer_a.video_out.vpfe_dev = vpfe_dev;
+
+ /* Register video-out device for resizer-b */
+ ret = vpfe_video_register(&resizer->resizer_b.video_out, vdev);
+ if (ret) {
+ pr_err("Failed to register RSZ-B video-out device\n");
+ goto out_video_out2_register;
+ }
+ resizer->resizer_b.video_out.vpfe_dev = vpfe_dev;
+
+ /* create link between Resizer Crop----> Resizer A*/
+ ret = media_entity_create_link(&resizer->crop_resizer.subdev.entity, 1,
+ &resizer->resizer_a.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ /* create link between Resizer Crop----> Resizer B*/
+ ret = media_entity_create_link(&resizer->crop_resizer.subdev.entity, 2,
+ &resizer->resizer_b.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ /* create link between Resizer A ----> video out */
+ ret = media_entity_create_link(&resizer->resizer_a.subdev.entity, 1,
+ &resizer->resizer_a.video_out.video_dev.entity, 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ /* create link between Resizer B ----> video out */
+ ret = media_entity_create_link(&resizer->resizer_b.subdev.entity, 1,
+ &resizer->resizer_b.video_out.video_dev.entity, 0, flags);
+ if (ret < 0)
+ goto out_create_link;
+
+ return 0;
+
+out_create_link:
+ vpfe_video_unregister(&resizer->resizer_b.video_out);
+out_video_out2_register:
+ vpfe_video_unregister(&resizer->resizer_a.video_out);
+ media_entity_cleanup(&resizer->crop_resizer.subdev.entity);
+ media_entity_cleanup(&resizer->resizer_a.subdev.entity);
+ media_entity_cleanup(&resizer->resizer_b.subdev.entity);
+ v4l2_device_unregister_subdev(&resizer->crop_resizer.subdev);
+ v4l2_device_unregister_subdev(&resizer->resizer_a.subdev);
+ v4l2_device_unregister_subdev(&resizer->resizer_b.subdev);
+ return ret;
+}
+
+/*
+ * vpfe_resizer_init() - resizer device initialization.
+ * @vpfe_rsz - pointer to resizer device
+ * @pdev: platform device pointer.
+ */
+int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = &vpfe_rsz->crop_resizer.subdev;
+ struct media_pad *pads = &vpfe_rsz->crop_resizer.pads[0];
+ struct media_entity *me = &sd->entity;
+ static resource_size_t res_len;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
+ if (!res)
+ return -ENOENT;
+
+ res_len = resource_size(res);
+ res = request_mem_region(res->start, res_len, res->name);
+ if (!res)
+ return -EBUSY;
+
+ vpfe_rsz->base_addr = ioremap_nocache(res->start, res_len);
+ if (!vpfe_rsz->base_addr)
+ return -EBUSY;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI RESIZER CROP", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, vpfe_rsz);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_CROP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_CROP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[RESIZER_CROP_PAD_SOURCE2].flags = MEDIA_PAD_FL_SOURCE;
+
+ vpfe_rsz->crop_resizer.input = RESIZER_CROP_INPUT_NONE;
+ vpfe_rsz->crop_resizer.output = RESIZER_CROP_OUTPUT_NONE;
+ vpfe_rsz->crop_resizer.output2 = RESIZER_CROP_OUTPUT_NONE;
+ vpfe_rsz->crop_resizer.rsz_device = vpfe_rsz;
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_CROP_PADS_NUM, pads, 0);
+ if (ret)
+ return ret;
+
+ sd = &vpfe_rsz->resizer_a.subdev;
+ pads = &vpfe_rsz->resizer_a.pads[0];
+ me = &sd->entity;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI RESIZER A", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, vpfe_rsz);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ vpfe_rsz->resizer_a.input = RESIZER_INPUT_NONE;
+ vpfe_rsz->resizer_a.output = RESIZER_OUTPUT_NONE;
+ vpfe_rsz->resizer_a.rsz_device = vpfe_rsz;
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
+ if (ret)
+ return ret;
+
+ sd = &vpfe_rsz->resizer_b.subdev;
+ pads = &vpfe_rsz->resizer_b.pads[0];
+ me = &sd->entity;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "DAVINCI RESIZER B", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
+ v4l2_set_subdevdata(sd, vpfe_rsz);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ vpfe_rsz->resizer_b.input = RESIZER_INPUT_NONE;
+ vpfe_rsz->resizer_b.output = RESIZER_OUTPUT_NONE;
+ vpfe_rsz->resizer_b.rsz_device = vpfe_rsz;
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
+ if (ret)
+ return ret;
+
+ vpfe_rsz->resizer_a.video_out.ops = &resizer_a_video_ops;
+ vpfe_rsz->resizer_a.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ret = vpfe_video_init(&vpfe_rsz->resizer_a.video_out, "RSZ-A");
+ if (ret) {
+ pr_err("Failed to init RSZ video-out device\n");
+ return ret;
+ }
+ vpfe_rsz->resizer_b.video_out.ops = &resizer_b_video_ops;
+ vpfe_rsz->resizer_b.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ret = vpfe_video_init(&vpfe_rsz->resizer_b.video_out, "RSZ-B");
+ if (ret) {
+ pr_err("Failed to init RSZ video-out2 device\n");
+ return ret;
+ }
+ memset(&vpfe_rsz->config, 0, sizeof(struct resizer_params));
+
+ return 0;
+}
+
+void
+vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ iounmap(vpfe_rsz->base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
+ if (res)
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
new file mode 100644
index 000000000000..59a79422b914
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_DM365_RESIZER_H
+#define _DAVINCI_VPFE_DM365_RESIZER_H
+
+enum resizer_oper_mode {
+ RESIZER_MODE_CONTINIOUS = 0,
+ RESIZER_MODE_ONE_SHOT = 1,
+};
+
+struct f_div_pass {
+ unsigned int o_hsz;
+ unsigned int i_hps;
+ unsigned int h_phs;
+ unsigned int src_hps;
+ unsigned int src_hsz;
+};
+
+#define MAX_PASSES 2
+
+struct f_div_param {
+ unsigned char en;
+ unsigned int num_passes;
+ struct f_div_pass pass[MAX_PASSES];
+};
+
+/* Resizer Rescale Parameters*/
+struct resizer_scale_param {
+ bool h_flip;
+ bool v_flip;
+ bool cen;
+ bool yen;
+ unsigned short i_vps;
+ unsigned short i_hps;
+ unsigned short o_vsz;
+ unsigned short o_hsz;
+ unsigned short v_phs_y;
+ unsigned short v_phs_c;
+ unsigned short v_dif;
+ /* resize method - Luminance */
+ enum vpfe_rsz_intp_t v_typ_y;
+ /* resize method - Chrominance */
+ enum vpfe_rsz_intp_t v_typ_c;
+ /* vertical lpf intensity - Luminance */
+ unsigned char v_lpf_int_y;
+ /* vertical lpf intensity - Chrominance */
+ unsigned char v_lpf_int_c;
+ unsigned short h_phs;
+ unsigned short h_dif;
+ /* resize method - Luminance */
+ enum vpfe_rsz_intp_t h_typ_y;
+ /* resize method - Chrominance */
+ enum vpfe_rsz_intp_t h_typ_c;
+ /* horizontal lpf intensity - Luminance */
+ unsigned char h_lpf_int_y;
+ /* horizontal lpf intensity - Chrominance */
+ unsigned char h_lpf_int_c;
+ bool dscale_en;
+ enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
+ enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
+ /* store the calculated frame division parameter */
+ struct f_div_param f_div;
+};
+
+enum resizer_rgb_t {
+ OUTPUT_32BIT,
+ OUTPUT_16BIT
+};
+
+enum resizer_rgb_msk_t {
+ NOMASK = 0,
+ MASKLAST2 = 1,
+};
+
+/* Resizer RGB Conversion Parameters */
+struct resizer_rgb {
+ bool rgb_en;
+ enum resizer_rgb_t rgb_typ;
+ enum resizer_rgb_msk_t rgb_msk0;
+ enum resizer_rgb_msk_t rgb_msk1;
+ unsigned int rgb_alpha_val;
+};
+
+/* Resizer External Memory Parameters */
+struct rsz_ext_mem_param {
+ unsigned int rsz_sdr_oft_y;
+ unsigned int rsz_sdr_ptr_s_y;
+ unsigned int rsz_sdr_ptr_e_y;
+ unsigned int rsz_sdr_oft_c;
+ unsigned int rsz_sdr_ptr_s_c;
+ unsigned int rsz_sdr_ptr_e_c;
+ /* offset to be added to buffer start when flipping for y/ycbcr */
+ unsigned int flip_ofst_y;
+ /* offset to be added to buffer start when flipping for c */
+ unsigned int flip_ofst_c;
+ /* c offset for YUV 420SP */
+ unsigned int c_offset;
+ /* User Defined Y offset for YUV 420SP or YUV420ILE data */
+ unsigned int user_y_ofst;
+ /* User Defined C offset for YUV 420SP data */
+ unsigned int user_c_ofst;
+};
+
+enum rsz_data_source {
+ IPIPE_DATA,
+ IPIPEIF_DATA
+};
+
+enum rsz_src_img_fmt {
+ RSZ_IMG_422,
+ RSZ_IMG_420
+};
+
+enum rsz_dpaths_bypass_t {
+ BYPASS_OFF = 0,
+ BYPASS_ON = 1,
+};
+
+struct rsz_common_params {
+ unsigned int vps;
+ unsigned int vsz;
+ unsigned int hps;
+ unsigned int hsz;
+ /* 420 or 422 */
+ enum rsz_src_img_fmt src_img_fmt;
+ /* Y or C when src_fmt is 420, 0 - y, 1 - c */
+ unsigned char y_c;
+ /* flip raw or ycbcr */
+ unsigned char raw_flip;
+ /* IPIPE or IPIPEIF data */
+ enum rsz_data_source source;
+ enum rsz_dpaths_bypass_t passthrough;
+ unsigned char yuv_y_min;
+ unsigned char yuv_y_max;
+ unsigned char yuv_c_min;
+ unsigned char yuv_c_max;
+ bool rsz_seq_crv;
+ enum vpfe_chr_pos out_chr_pos;
+};
+
+struct resizer_params {
+ enum resizer_oper_mode oper_mode;
+ struct rsz_common_params rsz_common;
+ struct resizer_scale_param rsz_rsc_param[2];
+ struct resizer_rgb rsz2rgb[2];
+ struct rsz_ext_mem_param ext_mem_param[2];
+ bool rsz_en[2];
+ struct vpfe_rsz_config_params user_config;
+};
+
+#define ENABLE 1
+#define DISABLE (!ENABLE)
+
+#define RESIZER_CROP_PAD_SINK 0
+#define RESIZER_CROP_PAD_SOURCE 1
+#define RESIZER_CROP_PAD_SOURCE2 2
+
+#define RESIZER_CROP_PADS_NUM 3
+
+enum resizer_crop_input_entity {
+ RESIZER_CROP_INPUT_NONE = 0,
+ RESIZER_CROP_INPUT_IPIPEIF = 1,
+ RESIZER_CROP_INPUT_IPIPE = 2,
+};
+
+enum resizer_crop_output_entity {
+ RESIZER_CROP_OUTPUT_NONE,
+ RESIZER_A,
+ RESIZER_B,
+};
+
+struct dm365_crop_resizer_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RESIZER_CROP_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[RESIZER_CROP_PADS_NUM];
+ enum resizer_crop_input_entity input;
+ enum resizer_crop_output_entity output;
+ enum resizer_crop_output_entity output2;
+ struct vpfe_resizer_device *rsz_device;
+};
+
+#define RESIZER_PAD_SINK 0
+#define RESIZER_PAD_SOURCE 1
+
+#define RESIZER_PADS_NUM 2
+
+enum resizer_input_entity {
+ RESIZER_INPUT_NONE = 0,
+ RESIZER_INPUT_CROP_RESIZER = 1,
+};
+
+enum resizer_output_entity {
+ RESIZER_OUTPUT_NONE = 0,
+ RESIZER_OUPUT_MEMORY = 1,
+};
+
+struct dm365_resizer_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RESIZER_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
+ enum resizer_input_entity input;
+ enum resizer_output_entity output;
+ struct vpfe_video_device video_out;
+ struct vpfe_resizer_device *rsz_device;
+};
+
+struct vpfe_resizer_device {
+ struct dm365_crop_resizer_device crop_resizer;
+ struct dm365_resizer_device resizer_a;
+ struct dm365_resizer_device resizer_b;
+ struct resizer_params config;
+ void *__iomem base_addr;
+};
+
+int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev);
+int vpfe_resizer_register_entities(struct vpfe_resizer_device *vpfe_rsz,
+ struct v4l2_device *v4l2_dev);
+void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz);
+void vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
+ struct platform_device *pdev);
+void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer);
+void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer);
+
+#endif /* _DAVINCI_VPFE_DM365_RESIZER_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe.h b/drivers/staging/media/davinci_vpfe/vpfe.h
new file mode 100644
index 000000000000..0587bc52a840
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _VPFE_H
+#define _VPFE_H
+
+#ifdef __KERNEL__
+#include <linux/v4l2-subdev.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+
+#include <media/davinci/vpfe_types.h>
+
+#define CAPTURE_DRV_NAME "vpfe-capture"
+
+struct vpfe_route {
+ __u32 input;
+ __u32 output;
+};
+
+enum vpfe_subdev_id {
+ VPFE_SUBDEV_TVP5146 = 1,
+ VPFE_SUBDEV_MT9T031 = 2,
+ VPFE_SUBDEV_TVP7002 = 3,
+ VPFE_SUBDEV_MT9P031 = 4,
+};
+
+struct vpfe_ext_subdev_info {
+ /* v4l2 subdev */
+ struct v4l2_subdev *subdev;
+ /* Sub device module name */
+ char module_name[32];
+ /* Sub device group id */
+ int grp_id;
+ /* Number of inputs supported */
+ int num_inputs;
+ /* inputs available at the sub device */
+ struct v4l2_input *inputs;
+ /* Sub dev routing information for each input */
+ struct vpfe_route *routes;
+ /* ccdc bus/interface configuration */
+ struct vpfe_hw_if_param ccdc_if_params;
+ /* i2c subdevice board info */
+ struct i2c_board_info board_info;
+ /* Is this a camera sub device ? */
+ unsigned is_camera:1;
+ /* check if sub dev supports routing */
+ unsigned can_route:1;
+ /* registered ? */
+ unsigned registered:1;
+};
+
+struct vpfe_config {
+ /* Number of sub devices connected to vpfe */
+ int num_subdevs;
+ /* information about each subdev */
+ struct vpfe_ext_subdev_info *sub_devs;
+ /* evm card info */
+ char *card_name;
+ /* setup function for the input path */
+ int (*setup_input)(enum vpfe_subdev_id id);
+ /* number of clocks */
+ int num_clocks;
+ /* clocks used for vpfe capture */
+ char *clocks[];
+};
+#endif
+#endif
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
new file mode 100644
index 000000000000..7b351717bcbb
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -0,0 +1,740 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ *
+ *
+ * Driver name : VPFE Capture driver
+ * VPFE Capture driver allows applications to capture and stream video
+ * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
+ * TVP5146 or Raw Bayer RGB image data from an image sensor
+ * such as Microns' MT9T001, MT9T031 etc.
+ *
+ * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
+ * consists of a Video Processing Front End (VPFE) for capturing
+ * video/raw image data and Video Processing Back End (VPBE) for displaying
+ * YUV data through an in-built analog encoder or Digital LCD port. This
+ * driver is for capture through VPFE. A typical EVM using these SoCs have
+ * following high level configuration.
+ *
+ * decoder(TVP5146/ YUV/
+ * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
+ * data input | |
+ * V |
+ * SDRAM |
+ * V
+ * Image Processor
+ * |
+ * V
+ * SDRAM
+ * The data flow happens from a decoder connected to the VPFE over a
+ * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
+ * and to the input of VPFE through an optional MUX (if more inputs are
+ * to be interfaced on the EVM). The input data is first passed through
+ * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
+ * does very little or no processing on YUV data and does pre-process Raw
+ * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
+ * Color Space Conversion (CSC), data gain/offset etc. After this, data
+ * can be written to SDRAM or can be connected to the image processing
+ * block such as IPIPE (on DM355/DM365 only).
+ *
+ * Features supported
+ * - MMAP IO
+ * - USERPTR IO
+ * - Capture using TVP5146 over BT.656
+ * - Support for interfacing decoders using sub device model
+ * - Work with DM365 or DM355 or DM6446 CCDC to do Raw Bayer
+ * RGB/YUV data capture to SDRAM.
+ * - Chaining of Image Processor
+ * - SINGLE-SHOT mode
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "vpfe.h"
+#include "vpfe_mc_capture.h"
+
+static bool debug;
+static bool interface;
+
+module_param(interface, bool, S_IRUGO);
+module_param(debug, bool, 0644);
+
+/**
+ * VPFE capture can be used for capturing video such as from TVP5146 or TVP7002
+ * and for capture raw bayer data from camera sensors such as mt9p031. At this
+ * point there is problem in co-existence of mt9p031 and tvp5146 due to i2c
+ * address collision. So set the variable below from bootargs to do either video
+ * capture or camera capture.
+ * interface = 0 - video capture (from TVP514x or such),
+ * interface = 1 - Camera capture (from mt9p031 or such)
+ * Re-visit this when we fix the co-existence issue
+ */
+MODULE_PARM_DESC(interface, "interface 0-1 (default:0)");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/* map mbus_fmt to pixelformat */
+void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix)
+{
+ switch (mbus->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_YUYV10_1X20:
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ pix->pixelformat = V4L2_PIX_FMT_SBGGR16;
+ pix->bytesperline = pix->width * 2;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_YDYUYDYV8_1X16:
+ pix->pixelformat = V4L2_PIX_FMT_NV12;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_Y8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_GREY;
+ pix->bytesperline = pix->width;
+ break;
+
+ case V4L2_MBUS_FMT_UV8_1X8:
+ pix->pixelformat = V4L2_PIX_FMT_UV8;
+ pix->bytesperline = pix->width;
+ break;
+
+ default:
+ pr_err("Invalid mbus code set\n");
+ }
+ /* pitch should be 32 bytes aligned */
+ pix->bytesperline = ALIGN(pix->bytesperline, 32);
+ if (pix->pixelformat == V4L2_PIX_FMT_NV12)
+ pix->sizeimage = pix->bytesperline * pix->height +
+ ((pix->bytesperline * pix->height) >> 1);
+ else
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+/* ISR for VINT0*/
+static irqreturn_t vpfe_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_isr\n");
+ vpfe_isif_buffer_isr(&vpfe_dev->vpfe_isif);
+ vpfe_resizer_buffer_isr(&vpfe_dev->vpfe_resizer);
+ return IRQ_HANDLED;
+}
+
+/* vpfe_vdint1_isr() - isr handler for VINT1 interrupt */
+static irqreturn_t vpfe_vdint1_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_vdint1_isr\n");
+ vpfe_isif_vidint1_isr(&vpfe_dev->vpfe_isif);
+ return IRQ_HANDLED;
+}
+
+/* vpfe_imp_dma_isr() - ISR for ipipe dma completion */
+static irqreturn_t vpfe_imp_dma_isr(int irq, void *dev_id)
+{
+ struct vpfe_device *vpfe_dev = dev_id;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_imp_dma_isr\n");
+ vpfe_ipipeif_ss_buffer_isr(&vpfe_dev->vpfe_ipipeif);
+ vpfe_resizer_dma_isr(&vpfe_dev->vpfe_resizer);
+ return IRQ_HANDLED;
+}
+
+/*
+ * vpfe_disable_clock() - Disable clocks for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Disables clocks defined in vpfe configuration. The function
+ * assumes that at least one clock is to be defined which is
+ * true as of now.
+ */
+static void vpfe_disable_clock(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
+ int i;
+
+ for (i = 0; i < vpfe_cfg->num_clocks; i++) {
+ clk_disable_unprepare(vpfe_dev->clks[i]);
+ clk_put(vpfe_dev->clks[i]);
+ }
+ kzfree(vpfe_dev->clks);
+ v4l2_info(vpfe_dev->pdev->driver, "vpfe capture clocks disabled\n");
+}
+
+/*
+ * vpfe_enable_clock() - Enable clocks for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Enables clocks defined in vpfe configuration. The function
+ * assumes that at least one clock is to be defined which is
+ * true as of now.
+ */
+static int vpfe_enable_clock(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
+ int ret = -EFAULT;
+ int i;
+
+ if (!vpfe_cfg->num_clocks)
+ return 0;
+
+ vpfe_dev->clks = kzalloc(vpfe_cfg->num_clocks *
+ sizeof(struct clock *), GFP_KERNEL);
+ if (vpfe_dev->clks == NULL) {
+ v4l2_err(vpfe_dev->pdev->driver, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < vpfe_cfg->num_clocks; i++) {
+ if (vpfe_cfg->clocks[i] == NULL) {
+ v4l2_err(vpfe_dev->pdev->driver,
+ "clock %s is not defined in vpfe config\n",
+ vpfe_cfg->clocks[i]);
+ goto out;
+ }
+
+ vpfe_dev->clks[i] =
+ clk_get(vpfe_dev->pdev, vpfe_cfg->clocks[i]);
+ if (vpfe_dev->clks[i] == NULL) {
+ v4l2_err(vpfe_dev->pdev->driver,
+ "Failed to get clock %s\n",
+ vpfe_cfg->clocks[i]);
+ goto out;
+ }
+
+ if (clk_prepare_enable(vpfe_dev->clks[i])) {
+ v4l2_err(vpfe_dev->pdev->driver,
+ "vpfe clock %s not enabled\n",
+ vpfe_cfg->clocks[i]);
+ goto out;
+ }
+
+ v4l2_info(vpfe_dev->pdev->driver, "vpss clock %s enabled",
+ vpfe_cfg->clocks[i]);
+ }
+
+ return 0;
+out:
+ for (i = 0; i < vpfe_cfg->num_clocks; i++)
+ if (vpfe_dev->clks[i]) {
+ clk_disable_unprepare(vpfe_dev->clks[i]);
+ clk_put(vpfe_dev->clks[i]);
+ }
+
+ v4l2_err(vpfe_dev->pdev->driver, "Failed to enable clocks\n");
+ kzfree(vpfe_dev->clks);
+
+ return ret;
+}
+
+/*
+ * vpfe_detach_irq() - Detach IRQs for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Detach all IRQs defined in vpfe configuration.
+ */
+static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
+{
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
+ free_irq(vpfe_dev->imp_dma_irq, vpfe_dev);
+}
+
+/*
+ * vpfe_attach_irq() - Attach IRQs for vpfe capture driver
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * Attach all IRQs defined in vpfe configuration.
+ */
+static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
+{
+ int ret = 0;
+
+ ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED,
+ "vpfe_capture0", vpfe_dev);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error: requesting VINT0 interrupt\n");
+ return ret;
+ }
+
+ ret = request_irq(vpfe_dev->ccdc_irq1, vpfe_vdint1_isr, IRQF_DISABLED,
+ "vpfe_capture1", vpfe_dev);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error: requesting VINT1 interrupt\n");
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ return ret;
+ }
+
+ ret = request_irq(vpfe_dev->imp_dma_irq, vpfe_imp_dma_isr,
+ IRQF_DISABLED, "Imp_Sdram_Irq", vpfe_dev);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "Error: requesting IMP IRQ interrupt\n");
+ free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
+ free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * register_i2c_devices() - register all i2c v4l2 subdevs
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * register all i2c v4l2 subdevs
+ */
+static int register_i2c_devices(struct vpfe_device *vpfe_dev)
+{
+ struct vpfe_ext_subdev_info *sdinfo;
+ struct vpfe_config *vpfe_cfg;
+ struct i2c_adapter *i2c_adap;
+ unsigned int num_subdevs;
+ int ret;
+ int i;
+ int k;
+
+ vpfe_cfg = vpfe_dev->cfg;
+ i2c_adap = i2c_get_adapter(1);
+ num_subdevs = vpfe_cfg->num_subdevs;
+ vpfe_dev->sd =
+ kzalloc(sizeof(struct v4l2_subdev *)*num_subdevs, GFP_KERNEL);
+ if (vpfe_dev->sd == NULL) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "unable to allocate memory for subdevice\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0, k = 0; i < num_subdevs; i++) {
+ sdinfo = &vpfe_cfg->sub_devs[i];
+ /*
+ * register subdevices based on interface setting. Currently
+ * tvp5146 and mt9p031 cannot co-exists due to i2c address
+ * conflicts. So only one of them is registered. Re-visit this
+ * once we have support for i2c switch handling in i2c driver
+ * framework
+ */
+ if (interface == sdinfo->is_camera) {
+ /* setup input path */
+ if (vpfe_cfg->setup_input &&
+ vpfe_cfg->setup_input(sdinfo->grp_id) < 0) {
+ ret = -EFAULT;
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "could not setup input for %s\n",
+ sdinfo->module_name);
+ goto probe_sd_out;
+ }
+ /* Load up the subdevice */
+ vpfe_dev->sd[k] =
+ v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
+ i2c_adap, &sdinfo->board_info,
+ NULL);
+ if (vpfe_dev->sd[k]) {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ sdinfo->module_name);
+
+ vpfe_dev->sd[k]->grp_id = sdinfo->grp_id;
+ k++;
+
+ sdinfo->registered = 1;
+ }
+ } else {
+ v4l2_info(&vpfe_dev->v4l2_dev,
+ "v4l2 sub device %s is not registered\n",
+ sdinfo->module_name);
+ }
+ }
+ vpfe_dev->num_ext_subdevs = k;
+
+ return 0;
+
+probe_sd_out:
+ kzfree(vpfe_dev->sd);
+
+ return ret;
+}
+
+/*
+ * vpfe_register_entities() - register all v4l2 subdevs and media entities
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * register all v4l2 subdevs, media entities, and creates links
+ * between entities
+ */
+static int vpfe_register_entities(struct vpfe_device *vpfe_dev)
+{
+ unsigned int flags = 0;
+ int ret;
+ int i;
+
+ /* register i2c devices first */
+ ret = register_i2c_devices(vpfe_dev);
+ if (ret)
+ return ret;
+
+ /* register rest of the sub-devs */
+ ret = vpfe_isif_register_entities(&vpfe_dev->vpfe_isif,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = vpfe_ipipeif_register_entities(&vpfe_dev->vpfe_ipipeif,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ goto out_isif_register;
+
+ ret = vpfe_ipipe_register_entities(&vpfe_dev->vpfe_ipipe,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ goto out_ipipeif_register;
+
+ ret = vpfe_resizer_register_entities(&vpfe_dev->vpfe_resizer,
+ &vpfe_dev->v4l2_dev);
+ if (ret)
+ goto out_ipipe_register;
+
+ /* create links now, starting with external(i2c) entities */
+ for (i = 0; i < vpfe_dev->num_ext_subdevs; i++)
+ /* if entity has no pads (ex: amplifier),
+ cant establish link */
+ if (vpfe_dev->sd[i]->entity.num_pads) {
+ ret = media_entity_create_link(&vpfe_dev->sd[i]->entity,
+ 0, &vpfe_dev->vpfe_isif.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+ }
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_isif.subdev.entity, 1,
+ &vpfe_dev->vpfe_ipipeif.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
+ &vpfe_dev->vpfe_ipipe.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_ipipe.subdev.entity,
+ 1, &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = media_entity_create_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
+ &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
+ 0, flags);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ ret = v4l2_device_register_subdev_nodes(&vpfe_dev->v4l2_dev);
+ if (ret < 0)
+ goto out_resizer_register;
+
+ return 0;
+
+out_resizer_register:
+ vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
+out_ipipe_register:
+ vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
+out_ipipeif_register:
+ vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
+out_isif_register:
+ vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
+
+ return ret;
+}
+
+/*
+ * vpfe_unregister_entities() - unregister all v4l2 subdevs and media entities
+ * @vpfe_dev - ptr to vpfe capture device
+ *
+ * unregister all v4l2 subdevs and media entities
+ */
+static void vpfe_unregister_entities(struct vpfe_device *vpfe_dev)
+{
+ vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
+ vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
+ vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
+ vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
+}
+
+/*
+ * vpfe_cleanup_modules() - cleanup all non-i2c v4l2 subdevs
+ * @vpfe_dev - ptr to vpfe capture device
+ * @pdev - pointer to platform device
+ *
+ * cleanup all v4l2 subdevs
+ */
+static void vpfe_cleanup_modules(struct vpfe_device *vpfe_dev,
+ struct platform_device *pdev)
+{
+ vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
+ vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
+ vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
+ vpfe_resizer_cleanup(&vpfe_dev->vpfe_resizer, pdev);
+}
+
+/*
+ * vpfe_initialize_modules() - initialize all non-i2c v4l2 subdevs
+ * @vpfe_dev - ptr to vpfe capture device
+ * @pdev - pointer to platform device
+ *
+ * intialize all v4l2 subdevs and media entities
+ */
+static int vpfe_initialize_modules(struct vpfe_device *vpfe_dev,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ret = vpfe_isif_init(&vpfe_dev->vpfe_isif, pdev);
+ if (ret)
+ return ret;
+
+ ret = vpfe_ipipeif_init(&vpfe_dev->vpfe_ipipeif, pdev);
+ if (ret)
+ goto out_isif_init;
+
+ ret = vpfe_ipipe_init(&vpfe_dev->vpfe_ipipe, pdev);
+ if (ret)
+ goto out_ipipeif_init;
+
+ ret = vpfe_resizer_init(&vpfe_dev->vpfe_resizer, pdev);
+ if (ret)
+ goto out_ipipe_init;
+
+ return 0;
+
+out_ipipe_init:
+ vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
+out_ipipeif_init:
+ vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
+out_isif_init:
+ vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
+
+ return ret;
+}
+
+/*
+ * vpfe_probe() : vpfe probe function
+ * @pdev: platform device pointer
+ *
+ * This function creates device entries by register itself to the V4L2 driver
+ * and initializes fields of each device objects
+ */
+static int vpfe_probe(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe_dev;
+ struct resource *res1;
+ int ret = -ENOMEM;
+
+ vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
+ if (!vpfe_dev) {
+ v4l2_err(pdev->dev.driver,
+ "Failed to allocate memory for vpfe_dev\n");
+ return ret;
+ }
+
+ if (pdev->dev.platform_data == NULL) {
+ v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+
+ vpfe_dev->cfg = pdev->dev.platform_data;
+ if (vpfe_dev->cfg->card_name == NULL ||
+ vpfe_dev->cfg->sub_devs == NULL) {
+ v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+
+ /* Get VINT0 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT0\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+ vpfe_dev->ccdc_irq0 = res1->start;
+
+ /* Get VINT1 irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for VINT1\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+ vpfe_dev->ccdc_irq1 = res1->start;
+
+ /* Get DMA irq resource */
+ res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+ if (!res1) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to get interrupt for DMA\n");
+ ret = -ENOENT;
+ goto probe_free_dev_mem;
+ }
+ vpfe_dev->imp_dma_irq = res1->start;
+
+ vpfe_dev->pdev = &pdev->dev;
+
+ /* enable vpss clocks */
+ ret = vpfe_enable_clock(vpfe_dev);
+ if (ret)
+ goto probe_free_dev_mem;
+
+ if (vpfe_initialize_modules(vpfe_dev, pdev))
+ goto probe_disable_clock;
+
+ vpfe_dev->media_dev.dev = vpfe_dev->pdev;
+ strcpy((char *)&vpfe_dev->media_dev.model, "davinci-media");
+
+ ret = media_device_register(&vpfe_dev->media_dev);
+ if (ret) {
+ v4l2_err(pdev->dev.driver,
+ "Unable to register media device.\n");
+ goto probe_out_entities_cleanup;
+ }
+
+ vpfe_dev->v4l2_dev.mdev = &vpfe_dev->media_dev;
+ ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(pdev->dev.driver, "Unable to register v4l2 device.\n");
+ goto probe_out_media_unregister;
+ }
+
+ v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpfe_dev);
+ /* register subdevs/entities */
+ if (vpfe_register_entities(vpfe_dev))
+ goto probe_out_v4l2_unregister;
+
+ ret = vpfe_attach_irq(vpfe_dev);
+ if (ret)
+ goto probe_out_entities_unregister;
+
+ return 0;
+
+probe_out_entities_unregister:
+ vpfe_unregister_entities(vpfe_dev);
+ kzfree(vpfe_dev->sd);
+probe_out_v4l2_unregister:
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+probe_out_media_unregister:
+ media_device_unregister(&vpfe_dev->media_dev);
+probe_out_entities_cleanup:
+ vpfe_cleanup_modules(vpfe_dev, pdev);
+probe_disable_clock:
+ vpfe_disable_clock(vpfe_dev);
+probe_free_dev_mem:
+ kzfree(vpfe_dev);
+
+ return ret;
+}
+
+/*
+ * vpfe_remove : This function un-registers device from V4L2 driver
+ */
+static int vpfe_remove(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
+
+ v4l2_info(pdev->dev.driver, "vpfe_remove\n");
+
+ kzfree(vpfe_dev->sd);
+ vpfe_detach_irq(vpfe_dev);
+ vpfe_unregister_entities(vpfe_dev);
+ vpfe_cleanup_modules(vpfe_dev, pdev);
+ v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+ media_device_unregister(&vpfe_dev->media_dev);
+ vpfe_disable_clock(vpfe_dev);
+ kzfree(vpfe_dev);
+
+ return 0;
+}
+
+static struct platform_driver vpfe_driver = {
+ .driver = {
+ .name = CAPTURE_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = vpfe_probe,
+ .remove = vpfe_remove,
+};
+
+/**
+ * vpfe_init : This function registers device driver
+ */
+static __init int vpfe_init(void)
+{
+ /* Register driver to the kernel */
+ return platform_driver_register(&vpfe_driver);
+}
+
+/**
+ * vpfe_cleanup : This function un-registers device driver
+ */
+static void vpfe_cleanup(void)
+{
+ platform_driver_unregister(&vpfe_driver);
+}
+
+module_init(vpfe_init);
+module_exit(vpfe_cleanup);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
new file mode 100644
index 000000000000..68f6fe43a5b5
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_MC_CAPTURE_H
+#define _DAVINCI_VPFE_MC_CAPTURE_H
+
+#include "dm365_ipipe.h"
+#include "dm365_ipipeif.h"
+#include "dm365_isif.h"
+#include "dm365_resizer.h"
+#include "vpfe_video.h"
+
+#define VPFE_MAJOR_RELEASE 0
+#define VPFE_MINOR_RELEASE 0
+#define VPFE_BUILD 1
+#define VPFE_CAPTURE_VERSION_CODE ((VPFE_MAJOR_RELEASE << 16) | \
+ (VPFE_MINOR_RELEASE << 8) | \
+ VPFE_BUILD)
+
+/* IPIPE hardware limits */
+#define IPIPE_MAX_OUTPUT_WIDTH_A 2176
+#define IPIPE_MAX_OUTPUT_WIDTH_B 640
+
+/* Based on max resolution supported. QXGA */
+#define IPIPE_MAX_OUTPUT_HEIGHT_A 1536
+/* Based on max resolution supported. VGA */
+#define IPIPE_MAX_OUTPUT_HEIGHT_B 480
+
+#define to_vpfe_device(ptr_module) \
+ container_of(ptr_module, struct vpfe_device, vpfe_##ptr_module)
+#define to_device(ptr_module) \
+ (to_vpfe_device(ptr_module)->dev)
+
+struct vpfe_device {
+ /* external registered sub devices */
+ struct v4l2_subdev **sd;
+ /* number of registered external subdevs */
+ unsigned int num_ext_subdevs;
+ /* vpfe cfg */
+ struct vpfe_config *cfg;
+ /* clock ptrs for vpfe capture */
+ struct clk **clks;
+ /* V4l2 device */
+ struct v4l2_device v4l2_dev;
+ /* parent device */
+ struct device *pdev;
+ /* IRQ number for DMA transfer completion at the image processor */
+ unsigned int imp_dma_irq;
+ /* CCDC IRQs used when CCDC/ISIF output to SDRAM */
+ unsigned int ccdc_irq0;
+ unsigned int ccdc_irq1;
+ /* maximum video memory that is available*/
+ unsigned int video_limit;
+ /* media device */
+ struct media_device media_dev;
+ /* ccdc subdevice */
+ struct vpfe_isif_device vpfe_isif;
+ /* ipipeif subdevice */
+ struct vpfe_ipipeif_device vpfe_ipipeif;
+ /* ipipe subdevice */
+ struct vpfe_ipipe_device vpfe_ipipe;
+ /* resizer subdevice */
+ struct vpfe_resizer_device vpfe_resizer;
+};
+
+/* File handle structure */
+struct vpfe_fh {
+ struct v4l2_fh vfh;
+ struct vpfe_video_device *video;
+ /* Indicates whether this file handle is doing IO */
+ u8 io_allowed;
+ /* Used to keep track priority of this instance */
+ enum v4l2_priority prio;
+};
+
+void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix);
+
+#endif /* _DAVINCI_VPFE_MC_CAPTURE_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
new file mode 100644
index 000000000000..99ccbebea598
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -0,0 +1,1620 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "vpfe.h"
+#include "vpfe_mc_capture.h"
+
+/* minimum number of buffers needed in cont-mode */
+#define MIN_NUM_BUFFERS 3
+
+static int debug;
+
+/* get v4l2 subdev pointer to external subdev which is active */
+static struct media_entity *vpfe_get_input_entity
+ (struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct media_pad *remote;
+
+ remote = media_entity_remote_source(&vpfe_dev->vpfe_isif.pads[0]);
+ if (remote == NULL) {
+ pr_err("Invalid media connection to isif/ccdc\n");
+ return NULL;
+ }
+ return remote->entity;
+}
+
+/* updates external subdev(sensor/decoder) which is active */
+static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_config *vpfe_cfg;
+ struct v4l2_subdev *subdev;
+ struct media_pad *remote;
+ int i;
+
+ remote = media_entity_remote_source(&vpfe_dev->vpfe_isif.pads[0]);
+ if (remote == NULL) {
+ pr_err("Invalid media connection to isif/ccdc\n");
+ return -EINVAL;
+ }
+
+ subdev = media_entity_to_v4l2_subdev(remote->entity);
+ vpfe_cfg = vpfe_dev->pdev->platform_data;
+ for (i = 0; i < vpfe_cfg->num_subdevs; i++) {
+ if (!strcmp(vpfe_cfg->sub_devs[i].module_name, subdev->name)) {
+ video->current_ext_subdev = &vpfe_cfg->sub_devs[i];
+ break;
+ }
+ }
+
+ /* if user not linked decoder/sensor to isif/ccdc */
+ if (i == vpfe_cfg->num_subdevs) {
+ pr_err("Invalid media chain connection to isif/ccdc\n");
+ return -EINVAL;
+ }
+ /* find the v4l2 subdev pointer */
+ for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) {
+ if (!strcmp(video->current_ext_subdev->module_name,
+ vpfe_dev->sd[i]->name))
+ video->current_ext_subdev->subdev = vpfe_dev->sd[i];
+ }
+ return 0;
+}
+
+/* get the subdev which is connected to the output video node */
+static struct v4l2_subdev *
+vpfe_video_remote_subdev(struct vpfe_video_device *video, u32 *pad)
+{
+ struct media_pad *remote = media_entity_remote_source(&video->pad);
+
+ if (remote == NULL || remote->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+ if (pad)
+ *pad = remote->index;
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* get the format set at output pad of the adjacent subdev */
+static int
+__vpfe_video_get_format(struct vpfe_video_device *video,
+ struct v4l2_format *format)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ struct media_pad *remote;
+ u32 pad;
+ int ret;
+
+ subdev = vpfe_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ remote = media_entity_remote_source(&video->pad);
+ fmt.pad = remote->index;
+
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret == -ENOIOCTLCMD)
+ return -EINVAL;
+
+ format->type = video->type;
+ /* convert mbus_format to v4l2_format */
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(&fmt.format, &format->fmt.pix);
+
+ return 0;
+}
+
+/* make a note of pipeline details */
+static void vpfe_prepare_pipeline(struct vpfe_video_device *video)
+{
+ struct media_entity *entity = &video->video_dev.entity;
+ struct media_device *mdev = entity->parent;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ struct vpfe_video_device *far_end = NULL;
+ struct media_entity_graph graph;
+
+ pipe->input_num = 0;
+ pipe->output_num = 0;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pipe->inputs[pipe->input_num++] = video;
+ else
+ pipe->outputs[pipe->output_num++] = video;
+
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (entity == &video->video_dev.entity)
+ continue;
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ continue;
+ far_end = to_vpfe_video(media_entity_to_video_device(entity));
+ if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pipe->inputs[pipe->input_num++] = far_end;
+ else
+ pipe->outputs[pipe->output_num++] = far_end;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+}
+
+/* update pipe state selected by user */
+static int vpfe_update_pipe_state(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+ int ret;
+
+ vpfe_prepare_pipeline(video);
+
+ /* Find out if there is any input video
+ if yes, it is single shot.
+ */
+ if (pipe->input_num == 0) {
+ pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
+ ret = vpfe_update_current_ext_subdev(video);
+ if (ret) {
+ pr_err("Invalid external subdev\n");
+ return ret;
+ }
+ } else {
+ pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
+ }
+ video->initialized = 1;
+ video->skip_frame_count = 1;
+ video->skip_frame_count_init = 1;
+ return 0;
+}
+
+/* checks wether pipeline is ready for enabling */
+int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->input_num; i++)
+ if (!pipe->inputs[i]->started ||
+ pipe->inputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
+ return 0;
+ for (i = 0; i < pipe->output_num; i++)
+ if (!pipe->outputs[i]->started ||
+ pipe->outputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
+ return 0;
+ return 1;
+}
+
+/**
+ * Validate a pipeline by checking both ends of all links for format
+ * discrepancies.
+ *
+ * Return 0 if all formats match, or -EPIPE if at least one link is found with
+ * different formats on its two ends.
+ */
+static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe)
+{
+ struct v4l2_subdev_format fmt_source;
+ struct v4l2_subdev_format fmt_sink;
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ int ret;
+
+ /*
+ * Should not matter if it is output[0] or 1 as
+ * the general ideas is to traverse backwards and
+ * the fact that the out video node always has the
+ * format of the connected pad.
+ */
+ subdev = vpfe_video_remote_subdev(pipe->outputs[0], NULL);
+ if (subdev == NULL)
+ return -EPIPE;
+
+ while (1) {
+ /* Retrieve the sink format */
+ pad = &subdev->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_sink.pad = pad->index;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL,
+ &fmt_sink);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ /* Retrieve the source format */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ pad->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_source.pad = pad->index;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ /* Check if the two ends match */
+ if (fmt_source.format.code != fmt_sink.format.code ||
+ fmt_source.format.width != fmt_sink.format.width ||
+ fmt_source.format.height != fmt_sink.format.height)
+ return -EPIPE;
+ }
+ return 0;
+}
+
+/*
+ * vpfe_pipeline_enable() - Enable streaming on a pipeline
+ * @vpfe_dev: vpfe device
+ * @pipe: vpfe pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity;
+ struct v4l2_subdev *subdev;
+ struct media_device *mdev;
+ int ret = 0;
+
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
+ entity = vpfe_get_input_entity(pipe->outputs[0]);
+ else
+ entity = &pipe->inputs[0]->video_dev.entity;
+
+ mdev = entity->parent;
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ continue;
+ subdev = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ break;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+}
+
+/*
+ * vpfe_pipeline_disable() - Disable streaming on a pipeline
+ * @vpfe_dev: vpfe device
+ * @pipe: VPFE pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain.
+ *
+ * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
+ * can't be stopped.
+ */
+static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity;
+ struct v4l2_subdev *subdev;
+ struct media_device *mdev;
+ int ret = 0;
+
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
+ entity = vpfe_get_input_entity(pipe->outputs[0]);
+ else
+ entity = &pipe->inputs[0]->video_dev.entity;
+
+ mdev = entity->parent;
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ continue;
+ subdev = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ break;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+
+ return (ret == 0) ? ret : -ETIMEDOUT ;
+}
+
+/*
+ * vpfe_pipeline_set_stream() - Enable/disable streaming on a pipeline
+ * @vpfe_dev: VPFE device
+ * @pipe: VPFE pipeline
+ * @state: Stream state (stopped or active)
+ *
+ * Set the pipeline to the given stream state.
+ *
+ * Return 0 if successfull, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int vpfe_pipeline_set_stream(struct vpfe_pipeline *pipe,
+ enum vpfe_pipeline_stream_state state)
+{
+ if (state == VPFE_PIPELINE_STREAM_STOPPED)
+ return vpfe_pipeline_disable(pipe);
+
+ return vpfe_pipeline_enable(pipe);
+}
+
+static int all_videos_stopped(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+ int i;
+
+ for (i = 0; i < pipe->input_num; i++)
+ if (pipe->inputs[i]->started)
+ return 0;
+ for (i = 0; i < pipe->output_num; i++)
+ if (pipe->outputs[i]->started)
+ return 0;
+ return 1;
+}
+
+/*
+ * vpfe_open() - open video device
+ * @file: file pointer
+ *
+ * initialize media pipeline state, allocate memory for file handle
+ *
+ * Return 0 if successful, or the return -ENODEV otherwise.
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_fh *handle;
+
+ /* Allocate memory for the file handle object */
+ handle = kzalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
+
+ if (handle == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(&handle->vfh, &video->video_dev);
+ v4l2_fh_add(&handle->vfh);
+
+ mutex_lock(&video->lock);
+ /* If decoder is not initialized. initialize it */
+ if (!video->initialized && vpfe_update_pipe_state(video)) {
+ mutex_unlock(&video->lock);
+ return -ENODEV;
+ }
+ /* Increment device users counter */
+ video->usrs++;
+ /* Set io_allowed member to false */
+ handle->io_allowed = 0;
+ v4l2_prio_open(&video->prio, &handle->prio);
+ handle->video = video;
+ file->private_data = &handle->vfh;
+ mutex_unlock(&video->lock);
+
+ return 0;
+}
+
+/* get the next buffer available from dma queue */
+static unsigned long
+vpfe_video_get_next_buffer(struct vpfe_video_device *video)
+{
+ video->cur_frm = video->next_frm =
+ list_entry(video->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+
+ list_del(&video->next_frm->list);
+ video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+}
+
+/* schedule the next buffer which is available on dma queue */
+void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+
+ if (list_empty(&video->dma_queue))
+ return;
+
+ video->next_frm = list_entry(video->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+
+ if (VPFE_PIPELINE_STREAM_SINGLESHOT == video->pipe.state)
+ video->cur_frm = video->next_frm;
+
+ list_del(&video->next_frm->list);
+ video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+ video->ops->queue(vpfe_dev, addr);
+ video->state = VPFE_VIDEO_BUFFER_QUEUED;
+}
+
+/* schedule the buffer for capturing bottom field */
+void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video)
+{
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+
+ addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+ addr += video->field_off;
+ video->ops->queue(vpfe_dev, addr);
+}
+
+/* make buffer available for dequeue */
+void vpfe_video_process_buffer_complete(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+
+ do_gettimeofday(&video->cur_frm->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_DONE);
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
+ video->cur_frm = video->next_frm;
+}
+
+/* vpfe_stop_capture() - stop streaming */
+static void vpfe_stop_capture(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+
+ video->started = 0;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return;
+ if (all_videos_stopped(video))
+ vpfe_pipeline_set_stream(pipe,
+ VPFE_PIPELINE_STREAM_STOPPED);
+}
+
+/*
+ * vpfe_release() - release video device
+ * @file: file pointer
+ *
+ * deletes buffer queue, frees the buffers and the vpfe file handle
+ *
+ * Return 0
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = container_of(vfh, struct vpfe_fh, vfh);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
+
+ /* Get the device lock */
+ mutex_lock(&video->lock);
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ if (video->started) {
+ vpfe_stop_capture(video);
+ /* mark pipe state as stopped in vpfe_release(),
+ as app might call streamon() after streamoff()
+ in which case driver has to start streaming.
+ */
+ video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED;
+ vb2_streamoff(&video->buffer_queue,
+ video->buffer_queue.type);
+ }
+ video->io_usrs = 0;
+ /* Free buffers allocated */
+ vb2_queue_release(&video->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
+ }
+ /* Decrement device users counter */
+ video->usrs--;
+ /* Close the priority */
+ v4l2_prio_close(&video->prio, fh->prio);
+ /* If this is the last file handle */
+ if (!video->usrs)
+ video->initialized = 0;
+ mutex_unlock(&video->lock);
+ file->private_data = NULL;
+ /* Free memory allocated to file handle object */
+ v4l2_fh_del(vfh);
+ kzfree(fh);
+ return 0;
+}
+
+/*
+ * vpfe_mmap() - It is used to map kernel space buffers
+ * into user spaces
+ */
+static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
+ return vb2_mmap(&video->buffer_queue, vma);
+}
+
+/*
+ * vpfe_poll() - It is used for select/poll system call
+ */
+static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
+ if (video->started)
+ return vb2_poll(&video->buffer_queue, file, wait);
+ return 0;
+}
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpfe_mmap,
+ .poll = vpfe_poll
+};
+
+/*
+ * vpfe_querycap() - query capabilities of video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @cap: pointer to v4l2_capability structure
+ *
+ * fills v4l2 capabilities structure
+ *
+ * Return 0
+ */
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = cap->capabilities;
+ cap->version = VPFE_CAPTURE_VERSION_CODE;
+ strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
+ strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
+
+ return 0;
+}
+
+/*
+ * vpfe_g_fmt() - get the format which is active on video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_format structure
+ *
+ * fills v4l2 format structure with active format
+ *
+ * Return 0
+ */
+static int vpfe_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt\n");
+ /* Fill in the information about format */
+ *fmt = video->fmt;
+ return 0;
+}
+
+/*
+ * vpfe_enum_fmt() - enum formats supported on media chain
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_fmtdesc structure
+ *
+ * fills v4l2_fmtdesc structure with output format set on adjacent subdev,
+ * only one format is enumearted as subdevs are already configured
+ *
+ * Return 0 if successfull, error code otherwise
+ */
+static int vpfe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt mbus;
+ struct v4l2_subdev *subdev;
+ struct v4l2_format format;
+ struct media_pad *remote;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
+
+ /* since already subdev pad format is set,
+ only one pixel format is available */
+ if (fmt->index > 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n");
+ return -EINVAL;
+ }
+ /* get the remote pad */
+ remote = media_entity_remote_source(&video->pad);
+ if (remote == NULL) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "invalid remote pad for video node\n");
+ return -EINVAL;
+ }
+ /* get the remote subdev */
+ subdev = vpfe_video_remote_subdev(video, NULL);
+ if (subdev == NULL) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "invalid remote subdev for video node\n");
+ return -EINVAL;
+ }
+ sd_fmt.pad = remote->index;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ /* get output format of remote subdev */
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ if (ret) {
+ v4l2_err(&vpfe_dev->v4l2_dev,
+ "invalid remote subdev for video node\n");
+ return ret;
+ }
+ /* convert to pix format */
+ mbus.code = sd_fmt.format.code;
+ mbus_to_pix(&mbus, &format.fmt.pix);
+ /* copy the result */
+ fmt->pixelformat = format.fmt.pix.pixelformat;
+
+ return 0;
+}
+
+/*
+ * vpfe_s_fmt() - set the format on video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_format structure
+ *
+ * validate and set the format on video device
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_format format;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
+ /* If streaming is started, return error */
+ if (video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ /* get adjacent subdev's output pad format */
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
+ *fmt = format;
+ video->fmt = *fmt;
+ return 0;
+}
+
+/*
+ * vpfe_try_fmt() - try the format on video device
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_format structure
+ *
+ * validate the format, update with correct format
+ * based on output format set on adjacent subdev
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_format format;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
+ /* get adjacent subdev's output pad format */
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
+
+ *fmt = format;
+ return 0;
+}
+
+/*
+ * vpfe_enum_input() - enum inputs supported on media chain
+ * @file: file pointer
+ * @priv: void pointer
+ * @fmt: pointer to v4l2_fmtdesc structure
+ *
+ * fills v4l2_input structure with input available on media chain,
+ * only one input is enumearted as media chain is setup by this time
+ *
+ * Return 0 if successfull, -EINVAL is media chain is invalid
+ */
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_ext_subdev_info *sdinfo = video->current_ext_subdev;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
+ /* enumerate from the subdev user has choosen through mc */
+ if (inp->index < sdinfo->num_inputs) {
+ memcpy(inp, &sdinfo->inputs[inp->index],
+ sizeof(struct v4l2_input));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_g_input() - get index of the input which is active
+ * @file: file pointer
+ * @priv: void pointer
+ * @index: pointer to unsigned int
+ *
+ * set index with input index which is active
+ */
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
+
+ *index = video->current_input;
+ return 0;
+}
+
+/*
+ * vpfe_s_input() - set input which is pointed by input index
+ * @file: file pointer
+ * @priv: void pointer
+ * @index: pointer to unsigned int
+ *
+ * set input on external subdev
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_ext_subdev_info *sdinfo;
+ struct vpfe_route *route;
+ struct v4l2_input *inps;
+ u32 output;
+ u32 input;
+ int ret;
+ int i;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+ /*
+ * If streaming is started return device busy
+ * error
+ */
+ if (video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ sdinfo = video->current_ext_subdev;
+ if (!sdinfo->registered) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ if (vpfe_dev->cfg->setup_input &&
+ vpfe_dev->cfg->setup_input(sdinfo->grp_id) < 0) {
+ ret = -EFAULT;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "couldn't setup input for %s\n",
+ sdinfo->module_name);
+ goto unlock_out;
+ }
+ route = &sdinfo->routes[index];
+ if (route && sdinfo->can_route) {
+ input = route->input;
+ output = route->output;
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ sdinfo->grp_id, video,
+ s_routing, input, output, 0);
+ if (ret) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "s_input:error in setting input in decoder\n");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ }
+ /* set standards set by subdev in video device */
+ for (i = 0; i < sdinfo->num_inputs; i++) {
+ inps = &sdinfo->inputs[i];
+ video->video_dev.tvnorms |= inps->std;
+ }
+ video->current_input = index;
+unlock_out:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+/*
+ * vpfe_querystd() - query std which is being input on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @std_id: pointer to v4l2_std_id structure
+ *
+ * call external subdev through v4l2_device_call_until_err to
+ * get the std that is being active.
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_ext_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
+
+ ret = mutex_lock_interruptible(&video->lock);
+ sdinfo = video->current_ext_subdev;
+ if (ret)
+ return ret;
+ /* Call querystd function of decoder device */
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ video, querystd, std_id);
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+/*
+ * vpfe_s_std() - set std on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @std_id: pointer to v4l2_std_id structure
+ *
+ * set std pointed by std_id on external subdev by calling it using
+ * v4l2_device_call_until_err
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_ext_subdev_info *sdinfo;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
+
+ /* Call decoder driver function to set the standard */
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+ sdinfo = video->current_ext_subdev;
+ /* If streaming is started, return device busy error */
+ if (video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+ core, s_std, *std_id);
+ if (ret < 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
+ video->stdid = V4L2_STD_UNKNOWN;
+ goto unlock_out;
+ }
+ video->stdid = *std_id;
+unlock_out:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
+ *tvnorm = video->stdid;
+ return 0;
+}
+
+/*
+ * vpfe_enum_dv_timings() - enumerate dv_timings which are supported by
+ * to external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_enum_dv_timings structure
+ *
+ * enum dv_timings's which are supported by external subdev through
+ * v4l2_subdev_call
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_dv_timings\n");
+ return v4l2_subdev_call(subdev, video, enum_dv_timings, timings);
+}
+
+/*
+ * vpfe_query_dv_timings() - query the dv_timings which is being input
+ * to external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_dv_timings structure
+ *
+ * get dv_timings which is being input on external subdev through
+ * v4l2_subdev_call
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_query_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_query_dv_timings\n");
+ return v4l2_subdev_call(subdev, video, query_dv_timings, timings);
+}
+
+/*
+ * vpfe_s_dv_timings() - set dv_preset on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_dv_timings structure
+ *
+ * set dv_timings pointed by preset on external subdev through
+ * v4l2_device_call_until_err, this configures amplifier also
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_s_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_dv_timings\n");
+
+ video->stdid = V4L2_STD_UNKNOWN;
+ return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+ video->current_ext_subdev->grp_id,
+ video, s_dv_timings, timings);
+}
+
+/*
+ * vpfe_g_dv_timings() - get dv_preset which is set on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @timings: pointer to v4l2_dv_timings structure
+ *
+ * get dv_preset which is set on external subdev through
+ * v4l2_subdev_call
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int
+vpfe_g_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_dv_timings\n");
+ return v4l2_subdev_call(subdev, video, g_dv_timings, timings);
+}
+
+/*
+ * Videobuf operations
+ */
+/*
+ * vpfe_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer nbuffers and buffer size
+ */
+static int
+vpfe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ unsigned long size;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue_setup\n");
+ size = video->fmt.fmt.pix.sizeimage;
+
+ if (vpfe_dev->video_limit) {
+ while (size * *nbuffers > vpfe_dev->video_limit)
+ (*nbuffers)--;
+ }
+ if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS) {
+ if (*nbuffers < MIN_NUM_BUFFERS)
+ *nbuffers = MIN_NUM_BUFFERS;
+ }
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = video->alloc_ctx;
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+ "nbuffers=%d, size=%lu\n", *nbuffers, size);
+ return 0;
+}
+
+/*
+ * vpfe_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpfe_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
+
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED)
+ return 0;
+
+ /* Initialize buffer */
+ vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Make sure user addresses are aligned to 32 bytes */
+ if (!ALIGN(addr, 32))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void vpfe_buffer_queue(struct vb2_buffer *vb)
+{
+ /* Get the file handle object and device object */
+ struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer, vb);
+ unsigned long flags;
+ unsigned long empty;
+ unsigned long addr;
+
+ spin_lock_irqsave(&video->dma_queue_lock, flags);
+ empty = list_empty(&video->dma_queue);
+ /* add the buffer to the DMA queue */
+ list_add_tail(&buf->list, &video->dma_queue);
+ spin_unlock_irqrestore(&video->dma_queue_lock, flags);
+ /* this case happens in case of single shot */
+ if (empty && video->started && pipe->state ==
+ VPFE_PIPELINE_STREAM_SINGLESHOT &&
+ video->state == VPFE_VIDEO_BUFFER_NOT_QUEUED) {
+ spin_lock(&video->dma_queue_lock);
+ addr = vpfe_video_get_next_buffer(video);
+ video->ops->queue(vpfe_dev, addr);
+
+ video->state = VPFE_VIDEO_BUFFER_QUEUED;
+ spin_unlock(&video->dma_queue_lock);
+
+ /* enable h/w each time in single shot */
+ if (vpfe_video_is_pipe_ready(pipe))
+ vpfe_pipeline_set_stream(pipe,
+ VPFE_PIPELINE_STREAM_SINGLESHOT);
+ }
+}
+
+/* vpfe_start_capture() - start streaming on all the subdevs */
+static int vpfe_start_capture(struct vpfe_video_device *video)
+{
+ struct vpfe_pipeline *pipe = &video->pipe;
+ int ret = 0;
+
+ video->started = 1;
+ if (vpfe_video_is_pipe_ready(pipe))
+ ret = vpfe_pipeline_set_stream(pipe, pipe->state);
+
+ return ret;
+}
+
+static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ unsigned long addr;
+ int ret;
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ goto streamoff;
+
+ /* Get the next frame from the buffer queue */
+ video->cur_frm = video->next_frm =
+ list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&video->cur_frm->list);
+ /* Mark state of the current frame to active */
+ video->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ video->field_id = 0;
+ addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+ video->ops->queue(vpfe_dev, addr);
+ video->state = VPFE_VIDEO_BUFFER_QUEUED;
+
+ ret = vpfe_start_capture(video);
+ if (ret)
+ goto unlock_out;
+
+ mutex_unlock(&video->lock);
+
+ return ret;
+unlock_out:
+ mutex_unlock(&video->lock);
+streamoff:
+ ret = vb2_streamoff(&video->buffer_queue, video->buffer_queue.type);
+ return 0;
+}
+
+static int vpfe_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+ return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int vpfe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpfe_video_device *video = fh->video;
+
+ if (!vb2_is_streaming(vq))
+ return 0;
+ /* release all active buffers */
+ while (!list_empty(&video->dma_queue)) {
+ video->next_frm = list_entry(video->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&video->next_frm->list);
+ vb2_buffer_done(&video->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+ return 0;
+}
+
+static void vpfe_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_video_device *video = fh->video;
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_cap_buffer *buf = container_of(vb,
+ struct vpfe_cap_buffer, vb);
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpfe_buffer_queue_setup,
+ .buf_init = vpfe_buffer_init,
+ .buf_prepare = vpfe_buffer_prepare,
+ .start_streaming = vpfe_start_streaming,
+ .stop_streaming = vpfe_stop_streaming,
+ .buf_cleanup = vpfe_buf_cleanup,
+ .buf_queue = vpfe_buffer_queue,
+};
+
+/*
+ * vpfe_reqbufs() - supported REQBUF only once opening
+ * the device.
+ */
+static int vpfe_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = file->private_data;
+ struct vb2_queue *q;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+
+ if (video->io_usrs != 0) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+ video->memory = req_buf->memory;
+
+ /* Initialize videobuf2 queue as per the buffer type */
+ video->alloc_ctx = vb2_dma_contig_init_ctx(vpfe_dev->pdev);
+ if (IS_ERR(video->alloc_ctx)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Failed to get the context\n");
+ return PTR_ERR(video->alloc_ctx);
+ }
+
+ q = &video->buffer_queue;
+ q->type = req_buf->type;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
+ return ret;
+ }
+
+ fh->io_allowed = 1;
+ video->io_usrs = 1;
+ INIT_LIST_HEAD(&video->dma_queue);
+ ret = vb2_reqbufs(&video->buffer_queue, req_buf);
+
+unlock_out:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+/*
+ * vpfe_querybuf() - query buffers for exchange
+ */
+static int vpfe_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ if (video->memory != V4L2_MEMORY_MMAP) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
+ return -EINVAL;
+ }
+
+ /* Call vb2_querybuf to get information */
+ return vb2_querybuf(&video->buffer_queue, buf);
+}
+
+/*
+ * vpfe_qbuf() - queue buffers for capture or processing
+ */
+static int vpfe_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = file->private_data;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+ /*
+ * If this file handle is not allowed to do IO,
+ * return error
+ */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ return vb2_qbuf(&video->buffer_queue, p);
+}
+
+/*
+ * vpfe_dqbuf() - deque buffer which is done with processing
+ */
+static int vpfe_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ return vb2_dqbuf(&video->buffer_queue,
+ buf, (file->f_flags & O_NONBLOCK));
+}
+
+/*
+ * vpfe_streamon() - get dv_preset which is set on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @buf_type: enum v4l2_buf_type
+ *
+ * queue buffer onto hardware for capture/processing and
+ * start all the subdevs which are in media chain
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_pipeline *pipe = &video->pipe;
+ struct vpfe_fh *fh = file->private_data;
+ struct vpfe_ext_subdev_info *sdinfo;
+ int ret = -EINVAL;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type &&
+ V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return ret;
+ }
+ /* If file handle is not allowed IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+ sdinfo = video->current_ext_subdev;
+ /* If buffer queue is empty, return error */
+ if (list_empty(&video->buffer_queue.queued_list)) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
+ return -EIO;
+ }
+ /* Validate the pipeline */
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE == buf_type) {
+ ret = vpfe_video_validate_pipeline(pipe);
+ if (ret < 0)
+ return ret;
+ }
+ /* Call vb2_streamon to start streaming */
+ return vb2_streamon(&video->buffer_queue, buf_type);
+}
+
+/*
+ * vpfe_streamoff() - get dv_preset which is set on external subdev
+ * @file: file pointer
+ * @priv: void pointer
+ * @buf_type: enum v4l2_buf_type
+ *
+ * stop all the subdevs which are in media chain
+ *
+ * Return 0 on success, error code otherwise
+ */
+static int vpfe_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpfe_video_device *video = video_drvdata(file);
+ struct vpfe_device *vpfe_dev = video->vpfe_dev;
+ struct vpfe_fh *fh = file->private_data;
+ int ret = 0;
+
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
+
+ if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ /* If io is allowed for this file handle, return error */
+ if (!fh->io_allowed) {
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+ return -EACCES;
+ }
+
+ /* If streaming is not started, return error */
+ if (!video->started) {
+ v4l2_err(&vpfe_dev->v4l2_dev, "device is not started\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
+
+ vpfe_stop_capture(video);
+ ret = vb2_streamoff(&video->buffer_queue, buf_type);
+ mutex_unlock(&video->lock);
+
+ return ret;
+}
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+ .vidioc_querycap = vpfe_querycap,
+ .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
+ .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
+ .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
+ .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
+ .vidioc_g_fmt_vid_out = vpfe_g_fmt,
+ .vidioc_s_fmt_vid_out = vpfe_s_fmt,
+ .vidioc_try_fmt_vid_out = vpfe_try_fmt,
+ .vidioc_enum_fmt_vid_out = vpfe_enum_fmt,
+ .vidioc_enum_input = vpfe_enum_input,
+ .vidioc_g_input = vpfe_g_input,
+ .vidioc_s_input = vpfe_s_input,
+ .vidioc_querystd = vpfe_querystd,
+ .vidioc_s_std = vpfe_s_std,
+ .vidioc_g_std = vpfe_g_std,
+ .vidioc_enum_dv_timings = vpfe_enum_dv_timings,
+ .vidioc_query_dv_timings = vpfe_query_dv_timings,
+ .vidioc_s_dv_timings = vpfe_s_dv_timings,
+ .vidioc_g_dv_timings = vpfe_g_dv_timings,
+ .vidioc_reqbufs = vpfe_reqbufs,
+ .vidioc_querybuf = vpfe_querybuf,
+ .vidioc_qbuf = vpfe_qbuf,
+ .vidioc_dqbuf = vpfe_dqbuf,
+ .vidioc_streamon = vpfe_streamon,
+ .vidioc_streamoff = vpfe_streamoff,
+};
+
+/* VPFE video init function */
+int vpfe_video_init(struct vpfe_video_device *video, const char *name)
+{
+ const char *direction;
+ int ret;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ /* Initialize field of video device */
+ video->video_dev.release = video_device_release;
+ video->video_dev.fops = &vpfe_fops;
+ video->video_dev.ioctl_ops = &vpfe_ioctl_ops;
+ video->video_dev.minor = -1;
+ video->video_dev.tvnorms = 0;
+ snprintf(video->video_dev.name, sizeof(video->video_dev.name),
+ "DAVINCI VIDEO %s %s", name, direction);
+
+ /* Initialize prio member of device object */
+ v4l2_prio_init(&video->prio);
+ spin_lock_init(&video->irqlock);
+ spin_lock_init(&video->dma_queue_lock);
+ mutex_init(&video->lock);
+ ret = media_entity_init(&video->video_dev.entity,
+ 1, &video->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ video_set_drvdata(&video->video_dev, video);
+
+ return 0;
+}
+
+/* vpfe video device register function */
+int vpfe_video_register(struct vpfe_video_device *video,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ video->video_dev.v4l2_dev = vdev;
+
+ ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ pr_err("%s: could not register video device (%d)\n",
+ __func__, ret);
+ return ret;
+}
+
+/* vpfe video device unregister function */
+void vpfe_video_unregister(struct vpfe_video_device *video)
+{
+ if (video_is_registered(&video->video_dev)) {
+ media_entity_cleanup(&video->video_dev.entity);
+ video_unregister_device(&video->video_dev);
+ }
+}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
new file mode 100644
index 000000000000..bf8af01d4a1b
--- /dev/null
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contributors:
+ * Manjunath Hadli <manjunath.hadli@ti.com>
+ * Prabhakar Lad <prabhakar.lad@ti.com>
+ */
+
+#ifndef _DAVINCI_VPFE_VIDEO_H
+#define _DAVINCI_VPFE_VIDEO_H
+
+#include <media/videobuf2-dma-contig.h>
+
+struct vpfe_device;
+
+/*
+ * struct vpfe_video_operations - VPFE video operations
+ * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
+ * if there was no buffer previously queued.
+ */
+struct vpfe_video_operations {
+ int(*queue) (struct vpfe_device *vpfe_dev, unsigned long addr);
+};
+
+enum vpfe_pipeline_stream_state {
+ VPFE_PIPELINE_STREAM_STOPPED = 0,
+ VPFE_PIPELINE_STREAM_CONTINUOUS = 1,
+ VPFE_PIPELINE_STREAM_SINGLESHOT = 2,
+};
+
+enum vpfe_video_state {
+ /* indicates that buffer is not queued */
+ VPFE_VIDEO_BUFFER_NOT_QUEUED = 0,
+ /* indicates that buffer is queued */
+ VPFE_VIDEO_BUFFER_QUEUED = 1,
+};
+
+struct vpfe_pipeline {
+ /* media pipeline */
+ struct media_pipeline *pipe;
+ /* state of the pipeline, continuous,
+ * single-shot or stopped
+ */
+ enum vpfe_pipeline_stream_state state;
+ /* number of active input video entities */
+ unsigned int input_num;
+ /* number of active output video entities */
+ unsigned int output_num;
+ /* input video nodes in case of single-shot mode */
+ struct vpfe_video_device *inputs[10];
+ /* capturing video nodes */
+ struct vpfe_video_device *outputs[10];
+};
+
+#define to_vpfe_pipeline(__e) \
+ container_of((__e)->pipe, struct vpfe_pipeline, pipe)
+
+#define to_vpfe_video(vdev) \
+ container_of(vdev, struct vpfe_video_device, video_dev)
+
+struct vpfe_cap_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+struct vpfe_video_device {
+ /* vpfe device */
+ struct vpfe_device *vpfe_dev;
+ /* video dev */
+ struct video_device video_dev;
+ /* media pad of video entity */
+ struct media_pad pad;
+ /* video operations supported by video device */
+ const struct vpfe_video_operations *ops;
+ /* type of the video buffers used by user */
+ enum v4l2_buf_type type;
+ /* Indicates id of the field which is being captured */
+ u32 field_id;
+ /* pipeline for which video device is part of */
+ struct vpfe_pipeline pipe;
+ /* Indicates whether streaming started */
+ u8 started;
+ /* Indicates state of the stream */
+ unsigned int state;
+ /* current input at the sub device */
+ int current_input;
+ /*
+ * This field keeps track of type of buffer exchange mechanism
+ * user has selected
+ */
+ enum v4l2_memory memory;
+ /* Used to keep track of state of the priority */
+ struct v4l2_prio_state prio;
+ /* number of open instances of the channel */
+ u32 usrs;
+ /* flag to indicate whether decoder is initialized */
+ u8 initialized;
+ /* skip frame count */
+ u8 skip_frame_count;
+ /* skip frame count init value */
+ u8 skip_frame_count_init;
+ /* time per frame for skipping */
+ struct v4l2_fract timeperframe;
+ /* ptr to currently selected sub device */
+ struct vpfe_ext_subdev_info *current_ext_subdev;
+ /* Pointer pointing to current vpfe_cap_buffer */
+ struct vpfe_cap_buffer *cur_frm;
+ /* Pointer pointing to next vpfe_cap_buffer */
+ struct vpfe_cap_buffer *next_frm;
+ /* Used to store pixel format */
+ struct v4l2_format fmt;
+ struct vb2_queue buffer_queue;
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ spinlock_t irqlock;
+ /* IRQ lock for DMA queue */
+ spinlock_t dma_queue_lock;
+ /* lock used to access this structure */
+ struct mutex lock;
+ /* number of users performing IO */
+ u32 io_usrs;
+ /* Currently selected or default standard */
+ v4l2_std_id stdid;
+ /*
+ * offset where second field starts from the starting of the
+ * buffer for field seperated YCbCr formats
+ */
+ u32 field_off;
+};
+
+int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe);
+void vpfe_video_unregister(struct vpfe_video_device *video);
+int vpfe_video_register(struct vpfe_video_device *video,
+ struct v4l2_device *vdev);
+int vpfe_video_init(struct vpfe_video_device *video, const char *name);
+void vpfe_video_process_buffer_complete(struct vpfe_video_device *video);
+void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video);
+void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video);
+
+#endif /* _DAVINCI_VPFE_VIDEO_H */
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 479c643da2f6..e33b7f55d84e 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -785,7 +785,7 @@ dt3155_init_board(struct pci_dev *pdev)
}
write_i2c_reg(pd->regs, CONFIG, pd->config); /* ACQ_MODE_EVEN */
- /* select chanel 1 for input and set sync level */
+ /* select channel 1 for input and set sync level */
write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
diff --git a/drivers/staging/media/go7007/go7007-driver.c b/drivers/staging/media/go7007/go7007-driver.c
index ece2dd146487..66950916df23 100644
--- a/drivers/staging/media/go7007/go7007-driver.c
+++ b/drivers/staging/media/go7007/go7007-driver.c
@@ -108,14 +108,13 @@ static int go7007_load_encoder(struct go7007 *go)
return -1;
}
fw_len = fw_entry->size - 16;
- bounce = kmalloc(fw_len, GFP_KERNEL);
+ bounce = kmemdup(fw_entry->data + 16, fw_len, GFP_KERNEL);
if (bounce == NULL) {
v4l2_err(go, "unable to allocate %d bytes for "
"firmware transfer\n", fw_len);
release_firmware(fw_entry);
return -1;
}
- memcpy(bounce, fw_entry->data + 16, fw_len);
release_firmware(fw_entry);
if (go7007_interface_reset(go) < 0 ||
go7007_send_firmware(go, bounce, fw_len) < 0 ||
@@ -173,6 +172,11 @@ static int go7007_init_encoder(struct go7007 *go)
go7007_write_addr(go, 0x3c82, 0x0001);
go7007_write_addr(go, 0x3c80, 0x00fe);
}
+ if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) {
+ /* set GPIO5 to be an output, currently low */
+ go7007_write_addr(go, 0x3c82, 0x0000);
+ go7007_write_addr(go, 0x3c80, 0x00df);
+ }
return 0;
}
@@ -201,7 +205,8 @@ static int init_i2c_module(struct i2c_adapter *adapter, const char *type,
if (v4l2_i2c_new_subdev(v4l2_dev, adapter, type, addr, NULL))
return 0;
- printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type);
+ dev_info(&adapter->dev,
+ "go7007: probing for module i2c:%s failed\n", type);
return -1;
}
@@ -217,7 +222,7 @@ int go7007_register_encoder(struct go7007 *go)
{
int i, ret;
- printk(KERN_INFO "go7007: registering new %s\n", go->name);
+ dev_info(go->dev, "go7007: registering new %s\n", go->name);
mutex_lock(&go->hw_lock);
ret = go7007_init_encoder(go);
@@ -571,7 +576,7 @@ struct go7007 *go7007_alloc(struct go7007_board_info *board, struct device *dev)
struct go7007 *go;
int i;
- go = kmalloc(sizeof(struct go7007), GFP_KERNEL);
+ go = kzalloc(sizeof(struct go7007), GFP_KERNEL);
if (go == NULL)
return NULL;
go->dev = dev;
diff --git a/drivers/staging/media/go7007/go7007-fw.c b/drivers/staging/media/go7007/go7007-fw.c
index f99c05b454b0..a5ede1c109d0 100644
--- a/drivers/staging/media/go7007/go7007-fw.c
+++ b/drivers/staging/media/go7007/go7007-fw.c
@@ -381,11 +381,8 @@ static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space)
int size = 0, i, off = 0, chunk;
buf = kzalloc(4096, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(go->dev,
- "unable to allocate 4096 bytes for firmware construction\n");
+ if (buf == NULL)
return -1;
- }
for (i = 1; i < 32; ++i) {
mjpeg_frame_header(go, buf + size, i);
@@ -651,11 +648,9 @@ static int gen_mpeg1hdr_to_package(struct go7007 *go,
int i, off = 0, chunk;
buf = kzalloc(5120, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(go->dev,
- "unable to allocate 5120 bytes for firmware construction\n");
+ if (buf == NULL)
return -1;
- }
+
framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME);
if (go->interlace_coding)
framelen[0] += mpeg1_frame_header(go, buf + framelen[0] / 8,
@@ -838,11 +833,9 @@ static int gen_mpeg4hdr_to_package(struct go7007 *go,
int i, off = 0, chunk;
buf = kzalloc(5120, GFP_KERNEL);
- if (buf == NULL) {
- dev_err(go->dev,
- "unable to allocate 5120 bytes for firmware construction\n");
+ if (buf == NULL)
return -1;
- }
+
framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME);
i = 368;
framelen[1] = mpeg4_frame_header(go, buf + i, 0, BFRAME_PRE);
@@ -1582,12 +1575,9 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
return -1;
}
code = kzalloc(codespace * 2, GFP_KERNEL);
- if (code == NULL) {
- dev_err(go->dev,
- "unable to allocate %d bytes for firmware construction\n",
- codespace * 2);
+ if (code == NULL)
goto fw_failed;
- }
+
src = (__le16 *)fw_entry->data;
srclen = fw_entry->size / 2;
while (srclen >= 2) {
diff --git a/drivers/staging/media/go7007/go7007-i2c.c b/drivers/staging/media/go7007/go7007-i2c.c
index 6bc82aaeef11..39456a36b2c6 100644
--- a/drivers/staging/media/go7007/go7007-i2c.c
+++ b/drivers/staging/media/go7007/go7007-i2c.c
@@ -60,10 +60,10 @@ static int go7007_i2c_xfer(struct go7007 *go, u16 addr, int read,
#ifdef GO7007_I2C_DEBUG
if (read)
- printk(KERN_DEBUG "go7007-i2c: reading 0x%02x on 0x%02x\n",
+ dev_dbg(go->dev, "go7007-i2c: reading 0x%02x on 0x%02x\n",
command, addr);
else
- printk(KERN_DEBUG
+ dev_dbg(go->dev,
"go7007-i2c: writing 0x%02x to 0x%02x on 0x%02x\n",
*data, command, addr);
#endif
@@ -85,7 +85,7 @@ static int go7007_i2c_xfer(struct go7007 *go, u16 addr, int read,
msleep(100);
}
if (i == 10) {
- printk(KERN_ERR "go7007-i2c: I2C adapter is hung\n");
+ dev_err(go->dev, "go7007-i2c: I2C adapter is hung\n");
goto i2c_done;
}
@@ -119,7 +119,7 @@ static int go7007_i2c_xfer(struct go7007 *go, u16 addr, int read,
msleep(100);
}
if (i == 10) {
- printk(KERN_ERR "go7007-i2c: I2C adapter is hung\n");
+ dev_err(go->dev, "go7007-i2c: I2C adapter is hung\n");
goto i2c_done;
}
@@ -216,7 +216,7 @@ int go7007_i2c_init(struct go7007 *go)
go->i2c_adapter.dev.parent = go->dev;
i2c_set_adapdata(&go->i2c_adapter, go);
if (i2c_add_adapter(&go->i2c_adapter) < 0) {
- printk(KERN_ERR
+ dev_err(go->dev,
"go7007-i2c: error: i2c_add_adapter failed\n");
return -1;
}
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 5443e25086e9..9dbf5ecd05a2 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1110,9 +1110,6 @@ static int go7007_usb_probe(struct usb_interface *intf,
} else {
u16 channel;
- /* set GPIO5 to be an output, currently low */
- go7007_write_addr(go, 0x3c82, 0x0000);
- go7007_write_addr(go, 0x3c80, 0x00df);
/* read channel number from GPIO[1:0] */
go7007_read_addr(go, 0x3c81, &channel);
channel &= 0x3;
@@ -1245,7 +1242,6 @@ static void go7007_usb_disconnect(struct usb_interface *intf)
struct urb *vurb, *aurb;
int i;
- go->status = STATUS_SHUTDOWN;
usb_kill_urb(usb->intr_urb);
/* Free USB-related structs */
@@ -1269,6 +1265,7 @@ static void go7007_usb_disconnect(struct usb_interface *intf)
kfree(go->hpi_context);
go7007_remove(go);
+ go->status = STATUS_SHUTDOWN;
}
static struct usb_driver go7007_usb_driver = {
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index a78133b67de2..cb9fe33050c7 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -98,7 +98,7 @@ static int go7007_open(struct file *file)
if (go->status != STATUS_ONLINE)
return -EBUSY;
- gofh = kmalloc(sizeof(struct go7007_file), GFP_KERNEL);
+ gofh = kzalloc(sizeof(struct go7007_file), GFP_KERNEL);
if (gofh == NULL)
return -ENOMEM;
++go->ref_count;
@@ -953,6 +953,7 @@ static int vidioc_streamon(struct file *file, void *priv,
}
mutex_unlock(&go->hw_lock);
mutex_unlock(&gofh->lock);
+ call_all(&go->v4l2_dev, video, s_stream, 1);
return retval;
}
@@ -968,6 +969,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
mutex_lock(&gofh->lock);
go7007_streamoff(go);
mutex_unlock(&gofh->lock);
+ call_all(&go->v4l2_dev, video, s_stream, 0);
return 0;
}
@@ -1811,8 +1813,8 @@ int go7007_v4l2_init(struct go7007 *go)
}
video_set_drvdata(go->video_dev, go);
++go->ref_count;
- printk(KERN_INFO "%s: registered device %s [v4l2]\n",
- go->video_dev->name, video_device_node_name(go->video_dev));
+ dev_info(go->dev, "registered device %s [v4l2]\n",
+ video_device_node_name(go->video_dev));
return 0;
}
@@ -1832,5 +1834,6 @@ void go7007_v4l2_remove(struct go7007 *go)
mutex_unlock(&go->hw_lock);
if (go->video_dev)
video_unregister_device(go->video_dev);
- v4l2_device_unregister(&go->v4l2_dev);
+ if (go->status != STATUS_SHUTDOWN)
+ v4l2_device_unregister(&go->v4l2_dev);
}
diff --git a/drivers/staging/media/go7007/s2250-board.c b/drivers/staging/media/go7007/s2250-board.c
index b3974100c6cd..37400bfa6ccb 100644
--- a/drivers/staging/media/go7007/s2250-board.c
+++ b/drivers/staging/media/go7007/s2250-board.c
@@ -103,8 +103,7 @@ static u16 vid_regs_fp[] = {
};
/* PAL specific values */
-static u16 vid_regs_fp_pal[] =
-{
+static u16 vid_regs_fp_pal[] = {
0x120, 0x017,
0x121, 0xd22,
0x122, 0x122,
@@ -174,7 +173,7 @@ static int write_reg(struct i2c_client *client, u8 reg, u8 value)
usb = go->hpi_context;
if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
- printk(KERN_INFO "i2c lock failed\n");
+ dev_info(&client->dev, "i2c lock failed\n");
kfree(buf);
return -EINTR;
}
@@ -213,7 +212,7 @@ static int write_reg_fp(struct i2c_client *client, u16 addr, u16 val)
usb = go->hpi_context;
if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
- printk(KERN_INFO "i2c lock failed\n");
+ dev_info(&client->dev, "i2c lock failed\n");
kfree(buf);
return -EINTR;
}
@@ -231,13 +230,13 @@ static int write_reg_fp(struct i2c_client *client, u16 addr, u16 val)
val_read = (buf[2] << 8) + buf[3];
kfree(buf);
if (val_read != val) {
- printk(KERN_INFO "invalid fp write %x %x\n",
- val_read, val);
+ dev_info(&client->dev, "invalid fp write %x %x\n",
+ val_read, val);
return -EFAULT;
}
if (subaddr != addr) {
- printk(KERN_INFO "invalid fp write addr %x %x\n",
- subaddr, addr);
+ dev_info(&client->dev, "invalid fp write addr %x %x\n",
+ subaddr, addr);
return -EFAULT;
}
} else {
@@ -275,7 +274,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
memset(buf, 0xcd, 6);
usb = go->hpi_context;
if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
- printk(KERN_INFO "i2c lock failed\n");
+ dev_info(&client->dev, "i2c lock failed\n");
kfree(buf);
return -EINTR;
}
@@ -299,7 +298,7 @@ static int write_regs(struct i2c_client *client, u8 *regs)
for (i = 0; !((regs[i] == 0x00) && (regs[i+1] == 0x00)); i += 2) {
if (write_reg(client, regs[i], regs[i+1]) < 0) {
- printk(KERN_INFO "s2250: failed\n");
+ dev_info(&client->dev, "failed\n");
return -1;
}
}
@@ -312,7 +311,7 @@ static int write_regs_fp(struct i2c_client *client, u16 *regs)
for (i = 0; !((regs[i] == 0x00) && (regs[i+1] == 0x00)); i += 2) {
if (write_reg_fp(client, regs[i], regs[i+1]) < 0) {
- printk(KERN_INFO "s2250: failed fp\n");
+ dev_info(&client->dev, "failed fp\n");
return -1;
}
}
@@ -535,7 +534,7 @@ static int s2250_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Brightness: %d\n", state->brightness);
v4l2_info(sd, "Contrast: %d\n", state->contrast);
v4l2_info(sd, "Saturation: %d\n", state->saturation);
- v4l2_info(sd, "Hue: %d\n", state->hue); return 0;
+ v4l2_info(sd, "Hue: %d\n", state->hue);
v4l2_info(sd, "Audio input: %s\n", state->audio_input == 0 ? "Line In" :
state->audio_input == 1 ? "Mic" :
state->audio_input == 2 ? "Mic Boost" :
@@ -606,23 +605,20 @@ static int s2250_probe(struct i2c_client *client,
/* initialize the audio */
if (write_regs(audio, aud_regs) < 0) {
- printk(KERN_ERR
- "s2250: error initializing audio\n");
+ dev_err(&client->dev, "error initializing audio\n");
i2c_unregister_device(audio);
kfree(state);
return 0;
}
if (write_regs(client, vid_regs) < 0) {
- printk(KERN_ERR
- "s2250: error initializing decoder\n");
+ dev_err(&client->dev, "error initializing decoder\n");
i2c_unregister_device(audio);
kfree(state);
return 0;
}
if (write_regs_fp(client, vid_regs_fp) < 0) {
- printk(KERN_ERR
- "s2250: error initializing decoder\n");
+ dev_err(&client->dev, "error initializing decoder\n");
i2c_unregister_device(audio);
kfree(state);
return 0;
diff --git a/drivers/staging/media/go7007/s2250-loader.c b/drivers/staging/media/go7007/s2250-loader.c
index f1bd159ac195..72e5175fe7e3 100644
--- a/drivers/staging/media/go7007/s2250-loader.c
+++ b/drivers/staging/media/go7007/s2250-loader.c
@@ -55,16 +55,16 @@ static int s2250loader_probe(struct usb_interface *interface,
usbdev = usb_get_dev(interface_to_usbdev(interface));
if (!usbdev) {
- printk(KERN_ERR "Enter s2250loader_probe failed\n");
+ dev_err(&interface->dev, "Enter s2250loader_probe failed\n");
return -1;
}
- printk(KERN_INFO "Enter s2250loader_probe 2.6 kernel\n");
- printk(KERN_INFO "vendor id 0x%x, device id 0x%x devnum:%d\n",
- usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
- usbdev->devnum);
+ dev_info(&interface->dev, "Enter s2250loader_probe 2.6 kernel\n");
+ dev_info(&interface->dev, "vendor id 0x%x, device id 0x%x devnum:%d\n",
+ usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
+ usbdev->devnum);
if (usbdev->descriptor.bNumConfigurations != 1) {
- printk(KERN_ERR "can't handle multiple config\n");
+ dev_err(&interface->dev, "can't handle multiple config\n");
return -1;
}
mutex_lock(&s2250_dev_table_mutex);
@@ -75,31 +75,31 @@ static int s2250loader_probe(struct usb_interface *interface,
}
if (minor < 0 || minor >= MAX_DEVICES) {
- printk(KERN_ERR "Invalid minor: %d\n", minor);
+ dev_err(&interface->dev, "Invalid minor: %d\n", minor);
goto failed;
}
/* Allocate dev data structure */
s = kmalloc(sizeof(device_extension_t), GFP_KERNEL);
- if (s == NULL) {
- printk(KERN_ERR "Out of memory\n");
+ if (s == NULL)
goto failed;
- }
+
s2250_dev_table[minor] = s;
- printk(KERN_INFO "s2250loader_probe: Device %d on Bus %d Minor %d\n",
- usbdev->devnum, usbdev->bus->busnum, minor);
+ dev_info(&interface->dev,
+ "s2250loader_probe: Device %d on Bus %d Minor %d\n",
+ usbdev->devnum, usbdev->bus->busnum, minor);
memset(s, 0, sizeof(device_extension_t));
s->usbdev = usbdev;
- printk(KERN_INFO "loading 2250 loader\n");
+ dev_info(&interface->dev, "loading 2250 loader\n");
kref_init(&(s->kref));
mutex_unlock(&s2250_dev_table_mutex);
if (request_firmware(&fw, S2250_LOADER_FIRMWARE, &usbdev->dev)) {
- printk(KERN_ERR
+ dev_err(&interface->dev,
"s2250: unable to load firmware from file \"%s\"\n",
S2250_LOADER_FIRMWARE);
goto failed2;
@@ -107,12 +107,12 @@ static int s2250loader_probe(struct usb_interface *interface,
ret = usb_cypress_load_firmware(usbdev, fw, CYPRESS_FX2);
release_firmware(fw);
if (0 != ret) {
- printk(KERN_ERR "loader download failed\n");
+ dev_err(&interface->dev, "loader download failed\n");
goto failed2;
}
if (request_firmware(&fw, S2250_FIRMWARE, &usbdev->dev)) {
- printk(KERN_ERR
+ dev_err(&interface->dev,
"s2250: unable to load firmware from file \"%s\"\n",
S2250_FIRMWARE);
goto failed2;
@@ -120,7 +120,7 @@ static int s2250loader_probe(struct usb_interface *interface,
ret = usb_cypress_load_firmware(usbdev, fw, CYPRESS_FX2);
release_firmware(fw);
if (0 != ret) {
- printk(KERN_ERR "firmware_s2250 download failed\n");
+ dev_err(&interface->dev, "firmware_s2250 download failed\n");
goto failed2;
}
@@ -133,14 +133,14 @@ failed2:
if (s)
kref_put(&(s->kref), s2250loader_delete);
- printk(KERN_ERR "probe failed\n");
+ dev_err(&interface->dev, "probe failed\n");
return -1;
}
static void s2250loader_disconnect(struct usb_interface *interface)
{
pdevice_extension_t s;
- printk(KERN_INFO "s2250: disconnect\n");
+ dev_info(&interface->dev, "s2250: disconnect\n");
s = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
kref_put(&(s->kref), s2250loader_delete);
diff --git a/drivers/staging/media/go7007/wis-saa7113.c b/drivers/staging/media/go7007/wis-saa7113.c
index 8810c1e6e1ed..891cde713a47 100644
--- a/drivers/staging/media/go7007/wis-saa7113.c
+++ b/drivers/staging/media/go7007/wis-saa7113.c
@@ -141,7 +141,7 @@ static int wis_saa7113_command(struct i2c_client *client,
} else if (dec->norm & V4L2_STD_PAL) {
write_reg(client, 0x0e, 0x01);
write_reg(client, 0x10, 0x48);
- } else if (dec->norm * V4L2_STD_SECAM) {
+ } else if (dec->norm & V4L2_STD_SECAM) {
write_reg(client, 0x0e, 0x50);
write_reg(client, 0x10, 0x48);
}
diff --git a/drivers/staging/media/go7007/wis-sony-tuner.c b/drivers/staging/media/go7007/wis-sony-tuner.c
index 1291ab79d2af..5d7ff8c81d6d 100644
--- a/drivers/staging/media/go7007/wis-sony-tuner.c
+++ b/drivers/staging/media/go7007/wis-sony-tuner.c
@@ -95,8 +95,8 @@ static int set_freq(struct i2c_client *client, int freq)
band_name = "UHF";
band_select = tun->UHF;
}
- printk(KERN_DEBUG "wis-sony-tuner: tuning to frequency %d.%04d (%s)\n",
- freq / 16, (freq % 16) * 625, band_name);
+ dev_dbg(&client->dev, "tuning to frequency %d.%04d (%s)\n",
+ freq / 16, (freq % 16) * 625, band_name);
n = freq + tun->IFPCoff;
buffer[0] = n >> 8;
@@ -288,16 +288,16 @@ static int mpx_setup(struct i2c_client *client)
u8 buf1[3], buf2[2];
struct i2c_msg msgs[2];
- printk(KERN_DEBUG "wis-sony-tuner: MPX registers: %04x %04x "
- "%04x %04x %04x %04x %04x %04x\n",
- mpx_audio_modes[t->mpxmode].modus,
- source,
- mpx_audio_modes[t->mpxmode].acb,
- mpx_audio_modes[t->mpxmode].fm_prescale,
- mpx_audio_modes[t->mpxmode].nicam_prescale,
- mpx_audio_modes[t->mpxmode].scart_prescale,
- mpx_audio_modes[t->mpxmode].system,
- mpx_audio_modes[t->mpxmode].volume);
+ dev_dbg(&client->dev,
+ "MPX registers: %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ mpx_audio_modes[t->mpxmode].modus,
+ source,
+ mpx_audio_modes[t->mpxmode].acb,
+ mpx_audio_modes[t->mpxmode].fm_prescale,
+ mpx_audio_modes[t->mpxmode].nicam_prescale,
+ mpx_audio_modes[t->mpxmode].scart_prescale,
+ mpx_audio_modes[t->mpxmode].system,
+ mpx_audio_modes[t->mpxmode].volume);
buf1[0] = 0x11;
buf1[1] = 0x00;
buf1[2] = 0x7e;
@@ -310,14 +310,14 @@ static int mpx_setup(struct i2c_client *client)
msgs[1].len = 2;
msgs[1].buf = buf2;
i2c_transfer(client->adapter, msgs, 2);
- printk(KERN_DEBUG "wis-sony-tuner: MPX system: %02x%02x\n",
- buf2[0], buf2[1]);
+ dev_dbg(&client->dev, "MPX system: %02x%02x\n",
+ buf2[0], buf2[1]);
buf1[0] = 0x11;
buf1[1] = 0x02;
buf1[2] = 0x00;
i2c_transfer(client->adapter, msgs, 2);
- printk(KERN_DEBUG "wis-sony-tuner: MPX status: %02x%02x\n",
- buf2[0], buf2[1]);
+ dev_dbg(&client->dev, "MPX status: %02x%02x\n",
+ buf2[0], buf2[1]);
}
#endif
return 0;
@@ -375,8 +375,7 @@ static int set_if(struct i2c_client *client)
t->mpxmode = force_mpx_mode;
else
t->mpxmode = default_mpx_mode;
- printk(KERN_DEBUG "wis-sony-tuner: setting MPX to mode %d\n",
- t->mpxmode);
+ dev_dbg(&client->dev, "setting MPX to mode %d\n", t->mpxmode);
mpx_setup(client);
return 0;
@@ -401,8 +400,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
if (t->type >= 0) {
if (t->type != *type)
- printk(KERN_ERR "wis-sony-tuner: type already "
- "set to %d, ignoring request for %d\n",
+ dev_err(&client->dev,
+ "type already set to %d, ignoring request for %d\n",
t->type, *type);
break;
}
@@ -414,28 +413,28 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
case 'B':
case 'g':
case 'G':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to PAL-B/G bands\n");
+ dev_info(&client->dev,
+ "forcing tuner to PAL-B/G bands\n");
force_band = V4L2_STD_PAL_BG;
break;
case 'i':
case 'I':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to PAL-I band\n");
+ dev_info(&client->dev,
+ "forcing tuner to PAL-I band\n");
force_band = V4L2_STD_PAL_I;
break;
case 'd':
case 'D':
case 'k':
case 'K':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to PAL-D/K bands\n");
+ dev_info(&client->dev,
+ "forcing tuner to PAL-D/K bands\n");
force_band = V4L2_STD_PAL_I;
break;
case 'l':
case 'L':
- printk(KERN_INFO "wis-sony-tuner: forcing "
- "tuner to SECAM-L band\n");
+ dev_info(&client->dev,
+ "forcing tuner to SECAM-L band\n");
force_band = V4L2_STD_SECAM_L;
break;
default:
@@ -455,14 +454,15 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
t->std = V4L2_STD_NTSC_M;
break;
default:
- printk(KERN_ERR "wis-sony-tuner: tuner type %d is not "
- "supported by this module\n", *type);
+ dev_err(&client->dev,
+ "tuner type %d is not supported by this module\n",
+ *type);
break;
}
if (type >= 0)
- printk(KERN_INFO
- "wis-sony-tuner: type set to %d (%s)\n",
- t->type, sony_tuners[t->type - 200].name);
+ dev_info(&clinet->dev,
+ "type set to %d (%s)\n",
+ t->type, sony_tuners[t->type - 200].name);
break;
}
#endif
@@ -544,9 +544,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
if (force_band && (*std & force_band) != *std &&
*std != V4L2_STD_PAL &&
*std != V4L2_STD_SECAM) {
- printk(KERN_DEBUG "wis-sony-tuner: ignoring "
- "requested TV standard in "
- "favor of force_band value\n");
+ dev_dbg(&client->dev,
+ "ignoring requested TV standard in favor of force_band value\n");
t->std = force_band;
} else if (*std & V4L2_STD_PAL_BG) { /* default */
t->std = V4L2_STD_PAL_BG;
@@ -557,8 +556,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
} else if (*std & V4L2_STD_SECAM_L) {
t->std = V4L2_STD_SECAM_L;
} else {
- printk(KERN_ERR "wis-sony-tuner: TV standard "
- "not supported\n");
+ dev_err(&client->dev,
+ "TV standard not supported\n");
*std = 0; /* hack to indicate EINVAL */
break;
}
@@ -567,15 +566,15 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
break;
case TUNER_SONY_BTF_PK467Z:
if (!(*std & V4L2_STD_NTSC_M_JP)) {
- printk(KERN_ERR "wis-sony-tuner: TV standard "
- "not supported\n");
+ dev_err(&client->dev,
+ "TV standard not supported\n");
*std = 0; /* hack to indicate EINVAL */
}
break;
case TUNER_SONY_BTF_PB463Z:
if (!(*std & V4L2_STD_NTSC_M)) {
- printk(KERN_ERR "wis-sony-tuner: TV standard "
- "not supported\n");
+ dev_err(&client->dev,
+ "TV standard not supported\n");
*std = 0; /* hack to indicate EINVAL */
}
break;
@@ -673,8 +672,7 @@ static int wis_sony_tuner_probe(struct i2c_client *client,
t->audmode = V4L2_TUNER_MODE_STEREO;
i2c_set_clientdata(client, t);
- printk(KERN_DEBUG
- "wis-sony-tuner: initializing tuner at address %d on %s\n",
+ dev_dbg(&client->dev, "initializing tuner at address %d on %s\n",
client->addr, adapter->name);
return 0;
diff --git a/drivers/staging/media/go7007/wis-tw2804.c b/drivers/staging/media/go7007/wis-tw2804.c
index d6410ee01be8..290fd8c7bfef 100644
--- a/drivers/staging/media/go7007/wis-tw2804.c
+++ b/drivers/staging/media/go7007/wis-tw2804.c
@@ -128,30 +128,32 @@ static int wis_tw2804_command(struct i2c_client *client,
int *input = arg;
if (*input < 0 || *input > 3) {
- printk(KERN_ERR "wis-tw2804: channel %d is not "
- "between 0 and 3!\n", *input);
+ dev_err(&client->dev,
+ "channel %d is not between 0 and 3!\n", *input);
return 0;
}
dec->channel = *input;
- printk(KERN_DEBUG "wis-tw2804: initializing TW2804 "
- "channel %d\n", dec->channel);
+ dev_dbg(&client->dev, "initializing TW2804 channel %d\n",
+ dec->channel);
if (dec->channel == 0 &&
write_regs(client, global_registers, 0) < 0) {
- printk(KERN_ERR "wis-tw2804: error initializing "
- "TW2804 global registers\n");
+ dev_err(&client->dev,
+ "error initializing TW2804 global registers\n");
return 0;
}
if (write_regs(client, channel_registers, dec->channel) < 0) {
- printk(KERN_ERR "wis-tw2804: error initializing "
- "TW2804 channel %d\n", dec->channel);
+ dev_err(&client->dev,
+ "error initializing TW2804 channel %d\n",
+ dec->channel);
return 0;
}
return 0;
}
if (dec->channel < 0) {
- printk(KERN_DEBUG "wis-tw2804: ignoring command %08x until "
- "channel number is set\n", cmd);
+ dev_dbg(&client->dev,
+ "ignoring command %08x until channel number is set\n",
+ cmd);
return 0;
}
@@ -311,7 +313,7 @@ static int wis_tw2804_probe(struct i2c_client *client,
dec->hue = 128;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG "wis-tw2804: creating TW2804 at address %d on %s\n",
+ dev_dbg(&client->dev, "creating TW2804 at address %d on %s\n",
client->addr, adapter->name);
return 0;
diff --git a/drivers/staging/media/go7007/wis-tw9903.c b/drivers/staging/media/go7007/wis-tw9903.c
index 94071def3bb4..684ca37f0382 100644
--- a/drivers/staging/media/go7007/wis-tw9903.c
+++ b/drivers/staging/media/go7007/wis-tw9903.c
@@ -31,8 +31,7 @@ struct wis_tw9903 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x02, 0x44, /* input 1, composite */
0x03, 0x92, /* correct digital format */
0x04, 0x00,
@@ -128,8 +127,8 @@ static int wis_tw9903_command(struct i2c_client *client,
0x06, 0xc0, /* reset device */
0, 0,
};
- printk(KERN_DEBUG "vscale is %04x, hscale is %04x\n",
- vscale, hscale);
+ dev_dbg(&client->dev, "vscale is %04x, hscale is %04x\n",
+ vscale, hscale);
/*write_regs(client, regs);*/
break;
}
@@ -288,12 +287,11 @@ static int wis_tw9903_probe(struct i2c_client *client,
dec->hue = 0;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG
- "wis-tw9903: initializing TW9903 at address %d on %s\n",
+ dev_dbg(&client->dev, "initializing TW9903 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR "wis-tw9903: error initializing TW9903\n");
+ dev_err(&client->dev, "error initializing TW9903\n");
kfree(dec);
return -ENODEV;
}
diff --git a/drivers/staging/media/go7007/wis-uda1342.c b/drivers/staging/media/go7007/wis-uda1342.c
index 05ac798f35f7..582ea120a531 100644
--- a/drivers/staging/media/go7007/wis-uda1342.c
+++ b/drivers/staging/media/go7007/wis-uda1342.c
@@ -47,8 +47,8 @@ static int wis_uda1342_command(struct i2c_client *client,
write_reg(client, 0x00, 0x1241); /* select input 1 */
break;
default:
- printk(KERN_ERR "wis-uda1342: input %d not supported\n",
- *inp);
+ dev_err(&client->dev, "input %d not supported\n",
+ *inp);
break;
}
break;
@@ -67,8 +67,7 @@ static int wis_uda1342_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- printk(KERN_DEBUG
- "wis-uda1342: initializing UDA1342 at address %d on %s\n",
+ dev_dbg(&client->dev, "initializing UDA1342 at address %d on %s\n",
client->addr, adapter->name);
write_reg(client, 0x00, 0x8000); /* reset registers */
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index 951007a3fc96..fa31ee7dd6a9 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/threads.h>
@@ -72,20 +74,19 @@ static struct pci_dev *do_pci_probe(void)
my_dev = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_264VT, NULL);
if (my_dev) {
- printk(KERN_ERR DRIVER_NAME ": Using device: %s\n",
- pci_name(my_dev));
+ pr_err("Using device: %s\n", pci_name(my_dev));
pci_addr_phys = 0;
if (my_dev->resource[0].flags & IORESOURCE_MEM) {
pci_addr_phys = my_dev->resource[0].start;
- printk(KERN_INFO DRIVER_NAME ": memory at 0x%08X\n",
+ pr_info("memory at 0x%08X\n",
(unsigned int)pci_addr_phys);
}
if (pci_addr_phys == 0) {
- printk(KERN_ERR DRIVER_NAME ": no memory resource ?\n");
+ pr_err("no memory resource ?\n");
return NULL;
}
} else {
- printk(KERN_ERR DRIVER_NAME ": pci_probe failed\n");
+ pr_err("pci_probe failed\n");
return NULL;
}
return my_dev;
@@ -140,7 +141,7 @@ int init_module(void)
atir_minor = lirc_register_driver(&atir_driver);
if (atir_minor < 0) {
- printk(KERN_ERR DRIVER_NAME ": failed to register driver!\n");
+ pr_err("failed to register driver!\n");
return atir_minor;
}
dprintk("driver is registered on minor %d\n", atir_minor);
@@ -159,7 +160,7 @@ static int atir_init_start(void)
{
pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400);
if (pci_addr_lin == 0) {
- printk(KERN_INFO DRIVER_NAME ": pci mem must be mapped\n");
+ pr_info("pci mem must be mapped\n");
return 0;
}
return 1;
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 939a801c23e4..2faa391006db 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -223,8 +223,8 @@ static int unregister_from_lirc(struct igorplug *ir)
int devnum;
if (!ir) {
- printk(KERN_ERR "%s: called with NULL device struct!\n",
- __func__);
+ dev_err(&ir->usbdev->dev,
+ "%s: called with NULL device struct!\n", __func__);
return -EINVAL;
}
@@ -232,8 +232,8 @@ static int unregister_from_lirc(struct igorplug *ir)
d = ir->d;
if (!d) {
- printk(KERN_ERR "%s: called with NULL lirc driver struct!\n",
- __func__);
+ dev_err(&ir->usbdev->dev,
+ "%s: called with NULL lirc driver struct!\n", __func__);
return -EINVAL;
}
@@ -347,8 +347,8 @@ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
if (ir->buf_in[2] == 0)
send_fragment(ir, buf, DEVICE_HEADERLEN, ret);
else {
- printk(KERN_WARNING DRIVER_NAME
- "[%d]: Device buffer overrun.\n", ir->devnum);
+ dev_warn(&ir->usbdev->dev,
+ "[%d]: Device buffer overrun.\n", ir->devnum);
/* HHHNNNNNNNNNNNOOOOOOOO H = header
<---[2]---> N = newer
<---------ret--------> O = older */
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 2944fde89f44..0a2c45dd4475 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -205,12 +207,12 @@ static void deregister_from_lirc(struct imon_context *context)
retval = lirc_unregister_driver(minor);
if (retval)
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: unable to deregister from lirc(%d)",
- __func__, retval);
+ dev_err(&context->usbdev->dev,
+ ": %s: unable to deregister from lirc(%d)",
+ __func__, retval);
else
- printk(KERN_INFO MOD_NAME ": Deregistered iMON driver "
- "(minor:%d)\n", minor);
+ dev_info(&context->usbdev->dev,
+ "Deregistered iMON driver (minor:%d)\n", minor);
}
@@ -231,8 +233,7 @@ static int display_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
interface = usb_find_interface(&imon_driver, subminor);
if (!interface) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: could not find interface for minor %d\n",
+ pr_err("%s: could not find interface for minor %d\n",
__func__, subminor);
retval = -ENODEV;
goto exit;
@@ -282,8 +283,7 @@ static int display_close(struct inode *inode, struct file *file)
context = file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- "%s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -391,8 +391,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
context = file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- "%s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -521,8 +520,7 @@ static void ir_close(void *data)
context = (struct imon_context *)data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- "%s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return;
}
@@ -746,7 +744,6 @@ static int imon_probe(struct usb_interface *interface,
context = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
if (!context) {
- dev_err(dev, "%s: kzalloc failed for context\n", __func__);
alloc_status = 1;
goto alloc_status_switch;
}
@@ -828,13 +825,11 @@ static int imon_probe(struct usb_interface *interface,
driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
if (!driver) {
- dev_err(dev, "%s: kzalloc failed for lirc_driver\n", __func__);
alloc_status = 2;
goto alloc_status_switch;
}
rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
if (!rbuf) {
- dev_err(dev, "%s: kmalloc failed for lirc_buffer\n", __func__);
alloc_status = 3;
goto alloc_status_switch;
}
@@ -1009,8 +1004,8 @@ static void imon_disconnect(struct usb_interface *interface)
mutex_unlock(&driver_lock);
- printk(KERN_INFO "%s: iMON device (intf%d) disconnected\n",
- __func__, ifnum);
+ dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n",
+ __func__, ifnum);
}
static int imon_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index ec14bc81851b..41d110f8bc02 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -22,6 +22,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
/*** Includes ***/
#include <linux/module.h>
@@ -115,8 +117,7 @@ static void out(int offset, int value)
parport_write_control(pport, value);
break;
case LIRC_LP_STATUS:
- printk(KERN_INFO "%s: attempt to write to status register\n",
- LIRC_DRIVER_NAME);
+ pr_info("attempt to write to status register\n");
break;
}
}
@@ -166,27 +167,23 @@ static unsigned int init_lirc_timer(void)
if (default_timer == 0) {
/* autodetect timer */
newtimer = (1000000*count)/timeelapsed;
- printk(KERN_INFO "%s: %u Hz timer detected\n",
- LIRC_DRIVER_NAME, newtimer);
+ pr_info("%u Hz timer detected\n", newtimer);
return newtimer;
} else {
newtimer = (1000000*count)/timeelapsed;
if (abs(newtimer - default_timer) > default_timer/10) {
/* bad timer */
- printk(KERN_NOTICE "%s: bad timer: %u Hz\n",
- LIRC_DRIVER_NAME, newtimer);
- printk(KERN_NOTICE "%s: using default timer: "
- "%u Hz\n",
- LIRC_DRIVER_NAME, default_timer);
+ pr_notice("bad timer: %u Hz\n", newtimer);
+ pr_notice("using default timer: %u Hz\n",
+ default_timer);
return default_timer;
} else {
- printk(KERN_INFO "%s: %u Hz timer detected\n",
- LIRC_DRIVER_NAME, newtimer);
+ pr_info("%u Hz timer detected\n", newtimer);
return newtimer; /* use detected value */
}
}
} else {
- printk(KERN_NOTICE "%s: no timer detected\n", LIRC_DRIVER_NAME);
+ pr_notice("no timer detected\n");
return 0;
}
}
@@ -194,13 +191,10 @@ static unsigned int init_lirc_timer(void)
static int lirc_claim(void)
{
if (parport_claim(ppdevice) != 0) {
- printk(KERN_WARNING "%s: could not claim port\n",
- LIRC_DRIVER_NAME);
- printk(KERN_WARNING "%s: waiting for port becoming available"
- "\n", LIRC_DRIVER_NAME);
+ pr_warn("could not claim port\n");
+ pr_warn("waiting for port becoming available\n");
if (parport_claim_or_block(ppdevice) < 0) {
- printk(KERN_NOTICE "%s: could not claim port, giving"
- " up\n", LIRC_DRIVER_NAME);
+ pr_notice("could not claim port, giving up\n");
return 0;
}
}
@@ -219,7 +213,7 @@ static void rbuf_write(int signal)
if (nwptr == rptr) {
/* no new signals will be accepted */
lost_irqs++;
- printk(KERN_NOTICE "%s: buffer overrun\n", LIRC_DRIVER_NAME);
+ pr_notice("buffer overrun\n");
return;
}
rbuf[wptr] = signal;
@@ -290,7 +284,7 @@ static void irq_handler(void *blah)
if (signal > timeout
|| (check_pselecd && (in(1) & LP_PSELECD))) {
signal = 0;
- printk(KERN_NOTICE "%s: timeout\n", LIRC_DRIVER_NAME);
+ pr_notice("timeout\n");
break;
}
} while (lirc_get_signal());
@@ -644,8 +638,7 @@ static int __init lirc_parallel_init(void)
result = platform_driver_register(&lirc_parallel_driver);
if (result) {
- printk(KERN_NOTICE "platform_driver_register"
- " returned %d\n", result);
+ pr_notice("platform_driver_register returned %d\n", result);
return result;
}
@@ -661,8 +654,7 @@ static int __init lirc_parallel_init(void)
pport = parport_find_base(io);
if (pport == NULL) {
- printk(KERN_NOTICE "%s: no port at %x found\n",
- LIRC_DRIVER_NAME, io);
+ pr_notice("no port at %x found\n", io);
result = -ENXIO;
goto exit_device_put;
}
@@ -670,8 +662,7 @@ static int __init lirc_parallel_init(void)
pf, kf, irq_handler, 0, NULL);
parport_put_port(pport);
if (ppdevice == NULL) {
- printk(KERN_NOTICE "%s: parport_register_device() failed\n",
- LIRC_DRIVER_NAME);
+ pr_notice("parport_register_device() failed\n");
result = -ENXIO;
goto exit_device_put;
}
@@ -706,14 +697,12 @@ static int __init lirc_parallel_init(void)
driver.dev = &lirc_parallel_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
- printk(KERN_NOTICE "%s: register_chrdev() failed\n",
- LIRC_DRIVER_NAME);
+ pr_notice("register_chrdev() failed\n");
parport_unregister_device(ppdevice);
result = -EIO;
goto exit_device_put;
}
- printk(KERN_INFO "%s: installed using port 0x%04x irq %d\n",
- LIRC_DRIVER_NAME, io, irq);
+ pr_info("installed using port 0x%04x irq %d\n", io, irq);
return 0;
exit_device_put:
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index f4e4d9003f38..68acca74ddb1 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -34,6 +34,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -171,7 +173,7 @@ static void delete_context(struct sasem_context *context)
kfree(context);
if (debug)
- printk(KERN_INFO "%s: context deleted\n", __func__);
+ pr_info("%s: context deleted\n", __func__);
}
static void deregister_from_lirc(struct sasem_context *context)
@@ -181,11 +183,10 @@ static void deregister_from_lirc(struct sasem_context *context)
retval = lirc_unregister_driver(minor);
if (retval)
- printk(KERN_ERR "%s: unable to deregister from lirc (%d)\n",
- __func__, retval);
+ pr_err("%s: unable to deregister from lirc (%d)\n",
+ __func__, retval);
else
- printk(KERN_INFO "Deregistered Sasem driver (minor:%d)\n",
- minor);
+ pr_info("Deregistered Sasem driver (minor:%d)\n", minor);
}
@@ -206,8 +207,7 @@ static int vfd_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
interface = usb_find_interface(&sasem_driver, subminor);
if (!interface) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: could not find interface for minor %d\n",
+ pr_err("%s: could not find interface for minor %d\n",
__func__, subminor);
retval = -ENODEV;
goto exit;
@@ -252,8 +252,7 @@ static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg)
context = (struct sasem_context *) file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -266,7 +265,7 @@ static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg)
context->vfd_contrast = (unsigned int)arg;
break;
default:
- printk(KERN_INFO "Unknown IOCTL command\n");
+ pr_info("Unknown IOCTL command\n");
mutex_unlock(&context->ctx_lock);
return -ENOIOCTLCMD; /* not supported */
}
@@ -287,8 +286,7 @@ static int vfd_close(struct inode *inode, struct file *file)
context = (struct sasem_context *) file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
@@ -299,7 +297,7 @@ static int vfd_close(struct inode *inode, struct file *file)
retval = -EIO;
} else {
context->vfd_isopen = 0;
- printk(KERN_INFO "VFD port closed\n");
+ dev_info(&context->dev->dev, "VFD port closed\n");
if (!context->dev_present && !context->ir_isopen) {
/* Device disconnected before close and IR port is
@@ -373,16 +371,14 @@ static ssize_t vfd_write(struct file *file, const char *buf,
context = (struct sasem_context *) file->private_data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return -ENODEV;
}
mutex_lock(&context->ctx_lock);
if (!context->dev_present) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no Sasem device present\n", __func__);
+ pr_err("%s: no Sasem device present\n", __func__);
retval = -ENODEV;
goto exit;
}
@@ -519,7 +515,7 @@ static int ir_open(void *data)
__func__, retval);
else {
context->ir_isopen = 1;
- printk(KERN_INFO "IR port opened\n");
+ dev_info(&context->dev->dev, "IR port opened\n");
}
exit:
@@ -538,8 +534,7 @@ static void ir_close(void *data)
context = (struct sasem_context *)data;
if (!context) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: no context for device\n", __func__);
+ pr_err("%s: no context for device\n", __func__);
return;
}
@@ -547,7 +542,7 @@ static void ir_close(void *data)
usb_kill_urb(context->rx_urb);
context->ir_isopen = 0;
- printk(KERN_INFO "IR port closed\n");
+ pr_info("IR port closed\n");
if (!context->dev_present) {
@@ -584,8 +579,9 @@ static void incoming_packet(struct sasem_context *context,
int i;
if (len != 8) {
- printk(KERN_WARNING "%s: invalid incoming packet size (%d)\n",
- __func__, len);
+ dev_warn(&context->dev->dev,
+ "%s: invalid incoming packet size (%d)\n",
+ __func__, len);
return;
}
@@ -663,7 +659,7 @@ static void usb_rx_callback(struct urb *urb)
break;
default:
- printk(KERN_WARNING "%s: status (%d): ignored",
+ dev_warn(&urb->dev->dev, "%s: status (%d): ignored",
__func__, urb->status);
break;
}
@@ -763,22 +759,16 @@ static int sasem_probe(struct usb_interface *interface,
context = kzalloc(sizeof(struct sasem_context), GFP_KERNEL);
if (!context) {
- dev_err(&interface->dev,
- "%s: kzalloc failed for context\n", __func__);
alloc_status = 1;
goto alloc_status_switch;
}
driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
if (!driver) {
- dev_err(&interface->dev,
- "%s: kzalloc failed for lirc_driver\n", __func__);
alloc_status = 2;
goto alloc_status_switch;
}
rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
if (!rbuf) {
- dev_err(&interface->dev,
- "%s: kmalloc failed for lirc_buffer\n", __func__);
alloc_status = 3;
goto alloc_status_switch;
}
@@ -830,8 +820,9 @@ static int sasem_probe(struct usb_interface *interface,
retval = lirc_minor;
goto unlock;
} else
- printk(KERN_INFO "%s: Registered Sasem driver (minor:%d)\n",
- __func__, lirc_minor);
+ dev_info(&interface->dev,
+ "%s: Registered Sasem driver (minor:%d)\n",
+ __func__, lirc_minor);
/* Needed while unregistering! */
driver->minor = lirc_minor;
@@ -852,15 +843,18 @@ static int sasem_probe(struct usb_interface *interface,
if (vfd_ep_found) {
if (debug)
- printk(KERN_INFO "Registering VFD with sysfs\n");
+ dev_info(&interface->dev,
+ "Registering VFD with sysfs\n");
if (usb_register_dev(interface, &sasem_class))
/* Not a fatal error, so ignore */
- printk(KERN_INFO "%s: could not get a minor number "
- "for VFD\n", __func__);
+ dev_info(&interface->dev,
+ "%s: could not get a minor number for VFD\n",
+ __func__);
}
- printk(KERN_INFO "%s: Sasem device on usb<%d:%d> initialized\n",
- __func__, dev->bus->busnum, dev->devnum);
+ dev_info(&interface->dev,
+ "%s: Sasem device on usb<%d:%d> initialized\n",
+ __func__, dev->bus->busnum, dev->devnum);
unlock:
mutex_unlock(&context->ctx_lock);
@@ -891,7 +885,7 @@ exit:
}
/**
- * Callback function for USB core API: disonnect
+ * Callback function for USB core API: disconnect
*/
static void sasem_disconnect(struct usb_interface *interface)
{
@@ -903,7 +897,8 @@ static void sasem_disconnect(struct usb_interface *interface)
context = usb_get_intfdata(interface);
mutex_lock(&context->ctx_lock);
- printk(KERN_INFO "%s: Sasem device disconnected\n", __func__);
+ dev_info(&interface->dev, "%s: Sasem device disconnected\n",
+ __func__);
usb_set_intfdata(interface, NULL);
context->dev_present = 0;
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index b5d0088f3102..af08e677b60f 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -48,6 +48,8 @@
* Steve Davies <steve@daviesfam.org> July 2001
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
@@ -667,8 +669,7 @@ static irqreturn_t irq_handler(int i, void *blah)
counter++;
status = sinp(UART_MSR);
if (counter > RS_ISR_PASS_LIMIT) {
- printk(KERN_WARNING LIRC_DRIVER_NAME ": AIEEEE: "
- "We're caught!\n");
+ pr_warn("AIEEEE: We're caught!\n");
break;
}
if ((status & hardware[type].signal_pin_change)
@@ -703,11 +704,10 @@ static irqreturn_t irq_handler(int i, void *blah)
dcd = (status & hardware[type].signal_pin) ? 1 : 0;
if (dcd == last_dcd) {
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": ignoring spike: %d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n",
+ dcd, sense,
+ tv.tv_sec, lasttv.tv_sec,
+ tv.tv_usec, lasttv.tv_usec);
continue;
}
@@ -715,25 +715,20 @@ static irqreturn_t irq_handler(int i, void *blah)
if (tv.tv_sec < lasttv.tv_sec ||
(tv.tv_sec == lasttv.tv_sec &&
tv.tv_usec < lasttv.tv_usec)) {
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": AIEEEE: your clock just jumped "
- "backwards\n");
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": %d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ pr_warn("AIEEEE: your clock just jumped backwards\n");
+ pr_warn("%d %d %lx %lx %lx %lx\n",
+ dcd, sense,
+ tv.tv_sec, lasttv.tv_sec,
+ tv.tv_usec, lasttv.tv_usec);
data = PULSE_MASK;
} else if (deltv > 15) {
data = PULSE_MASK; /* really long time */
if (!(dcd^sense)) {
/* sanity check */
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": AIEEEE: "
- "%d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- tv.tv_usec, lasttv.tv_usec);
+ pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n",
+ dcd, sense,
+ tv.tv_sec, lasttv.tv_sec,
+ tv.tv_usec, lasttv.tv_usec);
/*
* detecting pulse while this
* MUST be a space!
@@ -776,8 +771,7 @@ static int hardware_init_port(void)
soutp(UART_IER, scratch);
if (scratch2 != 0 || scratch3 != 0x0f) {
/* we fail, there's nothing here */
- printk(KERN_ERR LIRC_DRIVER_NAME ": port existence test "
- "failed, cannot continue\n");
+ pr_err("port existence test failed, cannot continue\n");
return -ENODEV;
}
@@ -850,11 +844,9 @@ static int lirc_serial_probe(struct platform_device *dev)
LIRC_DRIVER_NAME, (void *)&hardware);
if (result < 0) {
if (result == -EBUSY)
- printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n",
- irq);
+ dev_err(&dev->dev, "IRQ %d busy\n", irq);
else if (result == -EINVAL)
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": Bad irq number or handler\n");
+ dev_err(&dev->dev, "Bad irq number or handler\n");
return result;
}
@@ -869,14 +861,11 @@ static int lirc_serial_probe(struct platform_device *dev)
LIRC_DRIVER_NAME) == NULL))
|| ((iommap == 0)
&& (request_region(io, 8, LIRC_DRIVER_NAME) == NULL))) {
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": port %04x already in use\n", io);
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": use 'setserial /dev/ttySX uart none'\n");
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": or compile the serial port driver as module and\n");
- printk(KERN_WARNING LIRC_DRIVER_NAME
- ": make sure this module is loaded first\n");
+ dev_err(&dev->dev, "port %04x already in use\n", io);
+ dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
+ dev_warn(&dev->dev,
+ "or compile the serial port driver as module and\n");
+ dev_warn(&dev->dev, "make sure this module is loaded first\n");
result = -EBUSY;
goto exit_free_irq;
}
@@ -907,11 +896,11 @@ static int lirc_serial_probe(struct platform_device *dev)
msleep(40);
}
sense = (nlow >= nhigh ? 1 : 0);
- printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active "
- "%s receiver\n", sense ? "low" : "high");
+ dev_info(&dev->dev, "auto-detected active %s receiver\n",
+ sense ? "low" : "high");
} else
- printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active "
- "%s receiver\n", sense ? "low" : "high");
+ dev_info(&dev->dev, "Manually using active %s receiver\n",
+ sense ? "low" : "high");
dprintk("Interrupt %d, port %04x obtained\n", irq, io);
return 0;
@@ -1251,8 +1240,7 @@ static int __init lirc_serial_init_module(void)
driver.dev = &lirc_serial_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": register_chrdev failed!\n");
+ pr_err("register_chrdev failed!\n");
lirc_serial_exit();
return driver.minor;
}
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index a45799874a21..63a554c36f75 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -33,6 +33,8 @@
* parts cut'n'pasted from sa1100_ir.c (C) 2000 Russell King
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
@@ -495,7 +497,7 @@ static int init_chrdev(void)
driver.dev = &lirc_sir_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
+ pr_err("init_chrdev() failed.\n");
return -EIO;
}
return 0;
@@ -604,7 +606,7 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
}
if (status & UTSR0_TFS)
- printk(KERN_ERR "transmit fifo not full, shouldn't happen\n");
+ pr_err("transmit fifo not full, shouldn't happen\n");
/* We must clear certain bits. */
status &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB);
@@ -787,7 +789,7 @@ static int init_hardware(void)
#ifdef LIRC_ON_SA1100
#ifdef CONFIG_SA1100_BITSY
if (machine_is_bitsy()) {
- printk(KERN_INFO "Power on IR module\n");
+ pr_info("Power on IR module\n");
set_bitsy_egpio(EGPIO_BITSY_IR_ON);
}
#endif
@@ -885,8 +887,7 @@ static int init_hardware(void)
udelay(1500);
/* read previous control byte */
- printk(KERN_INFO LIRC_DRIVER_NAME
- ": 0x%02x\n", sinp(UART_RX));
+ pr_info("0x%02x\n", sinp(UART_RX));
/* Set DLAB 1. */
soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
@@ -964,8 +965,7 @@ static int init_port(void)
/* get I/O port access and IRQ line */
#ifndef LIRC_ON_SA1100
if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) {
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": i/o port 0x%.4x already in use.\n", io);
+ pr_err("i/o port 0x%.4x already in use.\n", io);
return -EBUSY;
}
#endif
@@ -975,15 +975,11 @@ static int init_port(void)
# ifndef LIRC_ON_SA1100
release_region(io, 8);
# endif
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": IRQ %d already in use.\n",
- irq);
+ pr_err("IRQ %d already in use.\n", irq);
return retval;
}
#ifndef LIRC_ON_SA1100
- printk(KERN_INFO LIRC_DRIVER_NAME
- ": I/O port 0x%.4x, IRQ %d.\n",
- io, irq);
+ pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
#endif
init_timer(&timerlist);
@@ -1213,8 +1209,7 @@ static int init_lirc_sir(void)
if (retval < 0)
return retval;
init_hardware();
- printk(KERN_INFO LIRC_DRIVER_NAME
- ": Installed.\n");
+ pr_info("Installed.\n");
return 0;
}
@@ -1243,23 +1238,20 @@ static int __init lirc_sir_init(void)
retval = platform_driver_register(&lirc_sir_driver);
if (retval) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
- "failed!\n");
+ pr_err("Platform driver register failed!\n");
return -ENODEV;
}
lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
if (!lirc_sir_dev) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
- "failed!\n");
+ pr_err("Platform device alloc failed!\n");
retval = -ENOMEM;
goto pdev_alloc_fail;
}
retval = platform_device_add(lirc_sir_dev);
if (retval) {
- printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
- "failed!\n");
+ pr_err("Platform device add failed!\n");
retval = -ENODEV;
goto pdev_add_fail;
}
@@ -1292,7 +1284,7 @@ static void __exit lirc_sir_exit(void)
drop_port();
platform_device_unregister(lirc_sir_dev);
platform_driver_unregister(&lirc_sir_driver);
- printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
+ pr_info("Uninstalled.\n");
}
module_init(lirc_sir_init);
diff --git a/drivers/staging/media/solo6x10/p2m.c b/drivers/staging/media/solo6x10/p2m.c
index 56210f0fc5ec..58ab61b1f1d9 100644
--- a/drivers/staging/media/solo6x10/p2m.c
+++ b/drivers/staging/media/solo6x10/p2m.c
@@ -231,15 +231,15 @@ static void run_p2m_test(struct solo_dev *solo_dev)
u32 size = SOLO_JPEG_EXT_ADDR(solo_dev) + SOLO_JPEG_EXT_SIZE(solo_dev);
int i, d;
- printk(KERN_WARNING "%s: Testing %u bytes of external ram\n",
- SOLO6X10_NAME, size);
+ dev_warn(&solo_dev->pdev->dev, "Testing %u bytes of external ram\n",
+ size);
for (i = 0; i < size; i += TEST_CHUNK_SIZE)
for (d = 0; d < 4; d++)
errs += p2m_test(solo_dev, d, i, TEST_CHUNK_SIZE);
- printk(KERN_WARNING "%s: Found %llu errors during p2m test\n",
- SOLO6X10_NAME, errs);
+ dev_warn(&solo_dev->pdev->dev, "Found %llu errors during p2m test\n",
+ errs);
return;
}
diff --git a/drivers/staging/media/solo6x10/v4l2-enc.c b/drivers/staging/media/solo6x10/v4l2-enc.c
index f8f0da952288..4977e869d5b7 100644
--- a/drivers/staging/media/solo6x10/v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/v4l2-enc.c
@@ -1619,6 +1619,8 @@ static int solo_s_ext_ctrls(struct file *file, void *priv,
solo_enc->osd_text[OSD_TEXT_MAX] = '\0';
if (!err)
err = solo_osd_print(solo_enc);
+ else
+ err = -EFAULT;
}
break;
default:
@@ -1654,6 +1656,8 @@ static int solo_g_ext_ctrls(struct file *file, void *priv,
err = copy_to_user(ctrl->string,
solo_enc->osd_text,
OSD_TEXT_MAX);
+ if (err)
+ err = -EFAULT;
}
break;
default:
diff --git a/drivers/staging/media/solo6x10/v4l2.c b/drivers/staging/media/solo6x10/v4l2.c
index 571c3a348d30..ca774cc57539 100644
--- a/drivers/staging/media/solo6x10/v4l2.c
+++ b/drivers/staging/media/solo6x10/v4l2.c
@@ -415,10 +415,7 @@ static int solo_start_thread(struct solo_filehandle *fh)
{
fh->kthread = kthread_run(solo_thread, fh, SOLO6X10_NAME "_disp");
- if (IS_ERR(fh->kthread))
- return PTR_ERR(fh->kthread);
-
- return 0;
+ return PTR_RET(fh->kthread);
}
static void solo_stop_thread(struct solo_filehandle *fh)
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
deleted file mode 100644
index abeeb00aaa12..000000000000
--- a/drivers/staging/omapdrm/TODO
+++ /dev/null
@@ -1,32 +0,0 @@
-TODO
-. add video decode/encode support (via syslink3 + codec-engine)
- . NOTE: with dmabuf this probably could be split into different driver
- so perhaps this TODO doesn't belong here
-. where should we do eviction (detatch_pages())? We aren't necessarily
- accessing the pages via a GART, so maybe we need some other threshold
- to put a cap on the # of pages that can be pin'd. (It is mostly only
- of interest in case you have a swap partition/file.. which a lot of
- these devices do not.. but it doesn't hurt for the driver to do the
- right thing anyways.)
- . Use mm_shrinker to trigger unpinning pages. Need to figure out how
- to handle next issue first (I think?)
- . Note TTM already has some mm_shrinker stuff.. maybe an argument to
- move to TTM? Or maybe something that could be factored out in common?
-. GEM/shmem backed pages can have existing mappings (kernel linear map,
- etc..), which isn't really ideal.
-. Revisit GEM sync object infrastructure.. TTM has some framework for this
- already. Possibly this could be refactored out and made more common?
- There should be some way to do this with less wheel-reinvention.
-. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
- access is made from any component in the system. Which means on suspend
- CRTC's should be disabled, and on resume the LUT should be reprogrammed
- before CRTC's are re-enabled, to prevent DSS from trying to DMA from a
- buffer mapped in DMM/TILER before LUT is reloaded.
-
-Userspace:
-. git://github.com/robclark/xf86-video-omap.git
-
-Currently tested on
-. OMAP3530 beagleboard
-. OMAP4430 pandaboard
-. OMAP4460 pandaboard
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 75aa5bfcb8dd..539fa5785afe 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -411,7 +411,7 @@ struct socket *sockfd_to_socket(unsigned int sockfd)
return NULL;
}
- inode = file->f_dentry->d_inode;
+ inode = file_inode(file);
if (!inode || !S_ISSOCK(inode->i_mode)) {
fput(file);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 57474cff51f0..d074b1ecb41a 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -318,7 +318,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ unsigned int minor = MINOR(file_inode(file)->i_rdev);
ssize_t retval;
size_t image_size;
size_t okcount;
@@ -364,7 +364,7 @@ static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
static ssize_t vme_user_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ unsigned int minor = MINOR(file_inode(file)->i_rdev);
ssize_t retval;
size_t image_size;
size_t okcount;
@@ -410,7 +410,7 @@ static ssize_t vme_user_write(struct file *file, const char __user *buf,
static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
{
loff_t absolute = -1;
- unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ unsigned int minor = MINOR(file_inode(file)->i_rdev);
size_t image_size;
if (minor == CONTROL_MINOR)
@@ -583,7 +583,7 @@ vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int ret;
mutex_lock(&vme_user_mutex);
- ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
+ ret = vme_user_ioctl(file_inode(file), file, cmd, arg);
mutex_unlock(&vme_user_mutex);
return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 339f97f7085b..7ea246a07731 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -144,23 +144,24 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
spin_lock_init(&tiqn->login_stats.lock);
spin_lock_init(&tiqn->logout_stats.lock);
- if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
- pr_err("idr_pre_get() for tiqn_idr failed\n");
- kfree(tiqn);
- return ERR_PTR(-ENOMEM);
- }
tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+ idr_preload(GFP_KERNEL);
spin_lock(&tiqn_lock);
- ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+
+ ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
if (ret < 0) {
- pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+ pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
spin_unlock(&tiqn_lock);
+ idr_preload_end();
kfree(tiqn);
return ERR_PTR(ret);
}
+ tiqn->tiqn_index = ret;
list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+
spin_unlock(&tiqn_lock);
+ idr_preload_end();
pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
@@ -264,16 +265,50 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
return 0;
}
-static struct iscsi_np *iscsit_get_np(
+bool iscsit_check_np_match(
struct __kernel_sockaddr_storage *sockaddr,
+ struct iscsi_np *np,
int network_transport)
{
struct sockaddr_in *sock_in, *sock_in_e;
struct sockaddr_in6 *sock_in6, *sock_in6_e;
- struct iscsi_np *np;
- int ip_match = 0;
+ bool ip_match = false;
u16 port;
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+ if (!memcmp(&sock_in6->sin6_addr.in6_u,
+ &sock_in6_e->sin6_addr.in6_u,
+ sizeof(struct in6_addr)))
+ ip_match = true;
+
+ port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+
+ if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
+ ip_match = true;
+
+ port = ntohs(sock_in->sin_port);
+ }
+
+ if ((ip_match == true) && (np->np_port == port) &&
+ (np->np_network_transport == network_transport))
+ return true;
+
+ return false;
+}
+
+static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct iscsi_np *np;
+ bool match;
+
spin_lock_bh(&np_lock);
list_for_each_entry(np, &g_np_list, np_list) {
spin_lock(&np->np_thread_lock);
@@ -282,29 +317,8 @@ static struct iscsi_np *iscsit_get_np(
continue;
}
- if (sockaddr->ss_family == AF_INET6) {
- sock_in6 = (struct sockaddr_in6 *)sockaddr;
- sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
-
- if (!memcmp(&sock_in6->sin6_addr.in6_u,
- &sock_in6_e->sin6_addr.in6_u,
- sizeof(struct in6_addr)))
- ip_match = 1;
-
- port = ntohs(sock_in6->sin6_port);
- } else {
- sock_in = (struct sockaddr_in *)sockaddr;
- sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
-
- if (sock_in->sin_addr.s_addr ==
- sock_in_e->sin_addr.s_addr)
- ip_match = 1;
-
- port = ntohs(sock_in->sin_port);
- }
-
- if ((ip_match == 1) && (np->np_port == port) &&
- (np->np_network_transport == network_transport)) {
+ match = iscsit_check_np_match(sockaddr, np, network_transport);
+ if (match == true) {
/*
* Increment the np_exports reference count now to
* prevent iscsit_del_np() below from being called
@@ -3570,6 +3584,10 @@ check_rsp_state:
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SENT_STATUS;
spin_unlock_bh(&cmd->istate_lock);
+
+ if (atomic_read(&conn->check_immediate_queue))
+ return 1;
+
continue;
} else if (ret == 2) {
/* Still must send status,
@@ -3659,7 +3677,7 @@ check_rsp_state:
}
if (atomic_read(&conn->check_immediate_queue))
- break;
+ return 1;
}
return 0;
@@ -3703,12 +3721,15 @@ restart:
signal_pending(current))
goto transport_err;
+get_immediate:
ret = handle_immediate_queue(conn);
if (ret < 0)
goto transport_err;
ret = handle_response_queue(conn);
- if (ret == -EAGAIN)
+ if (ret == 1)
+ goto get_immediate;
+ else if (ret == -EAGAIN)
goto restart;
else if (ret < 0)
goto transport_err;
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index f1e4f3155bac..b1a1e6350707 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -8,6 +8,8 @@ extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
+ struct iscsi_np *, int);
extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
char *, int);
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index fdb632f0ab85..2535d4d46c0e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -247,19 +247,16 @@ static int iscsi_login_zero_tsih_s1(
spin_lock_init(&sess->session_usage_lock);
spin_lock_init(&sess->ttt_lock);
- if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
- pr_err("idr_pre_get() for sess_idr failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess);
- return -ENOMEM;
- }
+ idr_preload(GFP_KERNEL);
spin_lock_bh(&sess_idr_lock);
- ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
+ ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ sess->session_index = ret;
spin_unlock_bh(&sess_idr_lock);
+ idr_preload_end();
if (ret < 0) {
- pr_err("idr_get_new() for sess_idr failed\n");
+ pr_err("idr_alloc() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
kfree(sess);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index d89164287d00..ca2be406f141 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1095,11 +1095,11 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
SET_PSTATE_REPLY_OPTIONAL(param);
}
} else if (IS_TYPE_NUMBER(param)) {
- char *tmpptr, buf[10];
+ char *tmpptr, buf[11];
u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
- memset(buf, 0, 10);
+ memset(buf, 0, sizeof(buf));
if (!strcmp(param->name, MAXCONNECTIONS) ||
!strcmp(param->name, MAXBURSTLENGTH) ||
@@ -1503,8 +1503,8 @@ static int iscsi_enforce_integrity_rules(
FirstBurstLength = simple_strtoul(param->value,
&tmpptr, 0);
if (FirstBurstLength > MaxBurstLength) {
- char tmpbuf[10];
- memset(tmpbuf, 0, 10);
+ char tmpbuf[11];
+ memset(tmpbuf, 0, sizeof(tmpbuf));
sprintf(tmpbuf, "%u", MaxBurstLength);
if (iscsi_update_param_value(param, tmpbuf))
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 421d6947dc64..464b4206a51e 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -410,14 +410,16 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[8];
+ int ret;
spin_lock(&lstat->lock);
- snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
- "ipv6" : "ipv4");
+ if (lstat->last_intr_fail_ip_family == AF_INET6)
+ ret = snprintf(page, PAGE_SIZE, "ipv6\n");
+ else
+ ret = snprintf(page, PAGE_SIZE, "ipv4\n");
spin_unlock(&lstat->lock);
- return snprintf(page, PAGE_SIZE, "%s\n", buf);
+ return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
@@ -427,16 +429,19 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[32];
+ int ret;
spin_lock(&lstat->lock);
- if (lstat->last_intr_fail_ip_family == AF_INET6)
- snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
- else
- snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
+ if (lstat->last_intr_fail_ip_family == AF_INET6) {
+ ret = snprintf(page, PAGE_SIZE, "[%s]\n",
+ lstat->last_intr_fail_ip_addr);
+ } else {
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ lstat->last_intr_fail_ip_addr);
+ }
spin_unlock(&lstat->lock);
- return snprintf(page, PAGE_SIZE, "%s\n", buf);
+ return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index de9ea32b6104..ee8f8c66248d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -422,6 +422,35 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
return NULL;
}
+static bool iscsit_tpg_check_network_portal(
+ struct iscsi_tiqn *tiqn,
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ struct iscsi_np *np;
+ bool match = false;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ np = tpg_np->tpg_np;
+
+ match = iscsit_check_np_match(sockaddr, np,
+ network_transport);
+ if (match == true)
+ break;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return match;
+}
+
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_portal_group *tpg,
struct __kernel_sockaddr_storage *sockaddr,
@@ -432,6 +461,16 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_np *np;
struct iscsi_tpg_np *tpg_np;
+ if (!tpg_np_parent) {
+ if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
+ network_transport) == true) {
+ pr_err("Network Portal: %s already exists on a"
+ " different TPG on %s\n", ip_str,
+ tpg->tpg_tiqn->tiqn);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
if (!tpg_np) {
pr_err("Unable to allocate memory for"
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 6917a9e938e7..d3536f57444f 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2598,7 +2598,7 @@ static int __init sbp_init(void)
return 0;
};
-static void sbp_exit(void)
+static void __exit sbp_exit(void)
{
sbp_deregister_configfs();
};
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 7d4ec02e29a9..ff1c5ee352cb 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -27,6 +27,7 @@
#include <linux/spinlock.h>
#include <linux/configfs.h>
#include <linux/export.h>
+#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <asm/unaligned.h>
@@ -715,36 +716,18 @@ static int core_alua_write_tpg_metadata(
unsigned char *md_buf,
u32 md_buf_len)
{
- mm_segment_t old_fs;
- struct file *file;
- struct iovec iov[1];
- int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
-
- memset(iov, 0, sizeof(struct iovec));
+ struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ int ret;
- file = filp_open(path, flags, 0600);
- if (IS_ERR(file) || !file || !file->f_dentry) {
- pr_err("filp_open(%s) for ALUA metadata failed\n",
- path);
+ if (IS_ERR(file)) {
+ pr_err("filp_open(%s) for ALUA metadata failed\n", path);
return -ENODEV;
}
-
- iov[0].iov_base = &md_buf[0];
- iov[0].iov_len = md_buf_len;
-
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
- set_fs(old_fs);
-
- if (ret < 0) {
+ ret = kernel_write(file, md_buf, md_buf_len, 0);
+ if (ret < 0)
pr_err("Error writing ALUA metadata file: %s\n", path);
- filp_close(file, NULL);
- return -EIO;
- }
- filp_close(file, NULL);
-
- return 0;
+ fput(file);
+ return ret ? -EIO : 0;
}
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 4efb61b8d001..43b7ac6c5b1c 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -609,6 +609,9 @@ static struct target_core_dev_attrib_attribute \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_show_attr_##_name);
+DEF_DEV_ATTRIB(emulate_model_alias);
+SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(emulate_dpo);
SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
@@ -681,6 +684,7 @@ SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+ &target_core_dev_attrib_emulate_model_alias.attr,
&target_core_dev_attrib_emulate_dpo.attr,
&target_core_dev_attrib_emulate_fua_write.attr,
&target_core_dev_attrib_emulate_fua_read.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index f2aa7543d20a..2e4d655471bc 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -713,6 +713,44 @@ int se_dev_set_max_write_same_len(
return 0;
}
+static void dev_set_t10_wwn_model_alias(struct se_device *dev)
+{
+ const char *configname;
+
+ configname = config_item_name(&dev->dev_group.cg_item);
+ if (strlen(configname) >= 16) {
+ pr_warn("dev[%p]: Backstore name '%s' is too long for "
+ "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
+ configname);
+ }
+ snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
+}
+
+int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
+{
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change model alias"
+ " while export_count is %d\n",
+ dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ if (flag != 0 && flag != 1) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ if (flag) {
+ dev_set_t10_wwn_model_alias(dev);
+ } else {
+ strncpy(&dev->t10_wwn.model[0],
+ dev->transport->inquiry_prod, 16);
+ }
+ dev->dev_attrib.emulate_model_alias = flag;
+
+ return 0;
+}
+
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
if (flag != 0 && flag != 1) {
@@ -772,6 +810,12 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("emulate_write_cache not supported for pSCSI\n");
return -EINVAL;
}
+ if (dev->transport->get_write_cache) {
+ pr_warn("emulate_write_cache cannot be changed when underlying"
+ " HW reports WriteCacheEnabled, ignoring request\n");
+ return 0;
+ }
+
dev->dev_attrib.emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
dev, dev->dev_attrib.emulate_write_cache);
@@ -1182,24 +1226,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
+ struct se_node_acl *nacl,
u32 mapped_lun,
- char *initiatorname,
int *ret)
{
struct se_lun_acl *lacl;
- struct se_node_acl *nacl;
- if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
+ if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
- nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!nacl) {
- *ret = -EINVAL;
- return NULL;
- }
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!lacl) {
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
@@ -1210,7 +1248,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
INIT_LIST_HEAD(&lacl->lacl_list);
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
+ nacl->initiatorname);
return lacl;
}
@@ -1390,6 +1429,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
+ dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index c57bbbc7a7d1..04c775cb3e65 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
ret = -EINVAL;
goto out;
}
+ if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ "-1: %u for Target Portal Group: %u\n", mapped_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+ ret = -EINVAL;
+ goto out;
+ }
- lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
- config_item_name(acl_ci), &ret);
+ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
+ mapped_lun, &ret);
if (!lacl) {
ret = -EINVAL;
goto out;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b9c88497e8f0..17a6acbc3ab0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -190,6 +190,11 @@ static int fd_configure_device(struct se_device *dev)
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
+ /*
+ * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
+ * based upon struct iovec limit for vfs_writev()
+ */
+ dev->dev_attrib.max_write_same_len = 0x1000;
pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
@@ -265,7 +270,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
- if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+ if (S_ISBLK(file_inode(fd)->i_mode)) {
if (ret < 0 || ret != cmd->data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
@@ -328,6 +333,114 @@ fd_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
+static unsigned char *
+fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
+ unsigned int len)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ unsigned int block_size = se_dev->dev_attrib.block_size;
+ unsigned int i = 0, end;
+ unsigned char *buf, *p, *kmap_buf;
+
+ buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate fd_execute_write_same buf\n");
+ return NULL;
+ }
+
+ kmap_buf = kmap(sg_page(sg)) + sg->offset;
+ if (!kmap_buf) {
+ pr_err("kmap() failed in fd_setup_write_same\n");
+ kfree(buf);
+ return NULL;
+ }
+ /*
+ * Fill local *buf to contain multiple WRITE_SAME blocks up to
+ * min(len, PAGE_SIZE)
+ */
+ p = buf;
+ end = min_t(unsigned int, len, PAGE_SIZE);
+
+ while (i < end) {
+ memcpy(p, kmap_buf, block_size);
+
+ i += block_size;
+ p += block_size;
+ }
+ kunmap(sg_page(sg));
+
+ return buf;
+}
+
+static sense_reason_t
+fd_execute_write_same(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *f = fd_dev->fd_file;
+ struct scatterlist *sg;
+ struct iovec *iov;
+ mm_segment_t old_fs;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
+ unsigned int len, len_tmp, iov_num;
+ int i, rc;
+ unsigned char *buf;
+
+ if (!nolb) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+ sg = &cmd->t_data_sg[0];
+
+ if (cmd->t_data_nents > 1 ||
+ sg->length != cmd->se_dev->dev_attrib.block_size) {
+ pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+ " block_size: %u\n", cmd->t_data_nents, sg->length,
+ cmd->se_dev->dev_attrib.block_size);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ len = len_tmp = nolb * se_dev->dev_attrib.block_size;
+ iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ buf = fd_setup_write_same_buf(cmd, sg, len);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ iov = vzalloc(sizeof(struct iovec) * iov_num);
+ if (!iov) {
+ pr_err("Unable to allocate fd_execute_write_same iovecs\n");
+ kfree(buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * Map the single fabric received scatterlist block now populated
+ * in *buf into each iovec for I/O submission.
+ */
+ for (i = 0; i < iov_num; i++) {
+ iov[i].iov_base = buf;
+ iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
+ len_tmp -= iov[i].iov_len;
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ rc = vfs_writev(f, &iov[0], iov_num, &pos);
+ set_fs(old_fs);
+
+ vfree(iov);
+ kfree(buf);
+
+ if (rc < 0 || rc != len) {
+ pr_err("vfs_writev() returned %d for write same\n", rc);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
static sense_reason_t
fd_execute_rw(struct se_cmd *cmd)
{
@@ -486,6 +599,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
+ .execute_write_same = fd_execute_write_same,
};
static sense_reason_t
@@ -517,7 +631,7 @@ static int __init fileio_module_init(void)
return transport_subsystem_register(&fileio_template);
}
-static void fileio_module_exit(void)
+static void __exit fileio_module_exit(void)
{
transport_subsystem_release(&fileio_template);
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b526d23dcd4f..8bcc514ec8b6 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,6 +154,7 @@ static int iblock_configure_device(struct se_device *dev)
if (blk_queue_nonrot(q))
dev->dev_attrib.is_nonrot = 1;
+
return 0;
out_free_bioset:
@@ -390,10 +391,19 @@ iblock_execute_unmap(struct se_cmd *cmd)
sense_reason_t ret = 0;
int dl, bd_dl, err;
+ /* We never set ANC_SUP */
+ if (cmd->t_task_cdb[1])
+ return TCM_INVALID_CDB_FIELD;
+
+ if (cmd->data_length == 0) {
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+
if (cmd->data_length < 8) {
pr_warn("UNMAP parameter list length %u too small\n",
cmd->data_length);
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
buf = transport_kmap_data_sg(cmd);
@@ -463,7 +473,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd)
int rc;
rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
- spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
+ sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
if (rc < 0) {
pr_warn("blkdev_issue_discard() failed: %d\n", rc);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -481,7 +491,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
struct bio *bio;
struct bio_list list;
sector_t block_lba = cmd->t_task_lba;
- sector_t sectors = spc_get_write_same_sectors(cmd);
+ sector_t sectors = sbc_get_write_same_sectors(cmd);
sg = &cmd->t_data_sg[0];
@@ -654,20 +664,24 @@ iblock_execute_rw(struct se_cmd *cmd)
u32 sg_num = sgl_nents;
sector_t block_lba;
unsigned bio_cnt;
- int rw;
+ int rw = 0;
int i;
if (data_direction == DMA_TO_DEVICE) {
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
- * Force data to disk if we pretend to not have a volatile
- * write cache, or the initiator set the Force Unit Access bit.
+ * Force writethrough using WRITE_FUA if a volatile write cache
+ * is not enabled, or if initiator set the Force Unit Access bit.
*/
- if (dev->dev_attrib.emulate_write_cache == 0 ||
- (dev->dev_attrib.emulate_fua_write > 0 &&
- (cmd->se_cmd_flags & SCF_FUA)))
- rw = WRITE_FUA;
- else
+ if (q->flush_flags & REQ_FUA) {
+ if (cmd->se_cmd_flags & SCF_FUA)
+ rw = WRITE_FUA;
+ else if (!(q->flush_flags & REQ_FLUSH))
+ rw = WRITE_FUA;
+ } else {
rw = WRITE;
+ }
} else {
rw = READ;
}
@@ -774,6 +788,15 @@ iblock_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}
+bool iblock_get_write_cache(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ struct request_queue *q = bdev_get_queue(bd);
+
+ return q->flush_flags & REQ_FLUSH;
+}
+
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
@@ -790,6 +813,7 @@ static struct se_subsystem_api iblock_template = {
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
+ .get_write_cache = iblock_get_write_cache,
};
static int __init iblock_module_init(void)
@@ -797,7 +821,7 @@ static int __init iblock_module_init(void)
return transport_subsystem_register(&iblock_template);
}
-static void iblock_module_exit(void)
+static void __exit iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 93e9c1f580b0..853bab60e362 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -25,6 +25,7 @@ int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
+int se_dev_set_emulate_model_alias(struct se_device *, int);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
@@ -45,7 +46,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3
int core_dev_del_lun(struct se_portal_group *, u32);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
- u32, char *, int *);
+ struct se_node_acl *, u32, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8e0290b38e43..3240f2cc81ef 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <asm/unaligned.h>
@@ -1957,13 +1958,10 @@ static int __core_scsi3_write_aptpl_to_file(
{
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
- struct iovec iov[1];
- mm_segment_t old_fs;
int flags = O_RDWR | O_CREAT | O_TRUNC;
char path[512];
int ret;
- memset(iov, 0, sizeof(struct iovec));
memset(path, 0, 512);
if (strlen(&wwn->unit_serial[0]) >= 512) {
@@ -1974,31 +1972,22 @@ static int __core_scsi3_write_aptpl_to_file(
snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
- if (IS_ERR(file) || !file || !file->f_dentry) {
+ if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
+ return PTR_ERR(file);
}
- iov[0].iov_base = &buf[0];
if (!pr_aptpl_buf_len)
- iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
- else
- iov[0].iov_len = pr_aptpl_buf_len;
+ pr_aptpl_buf_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
- set_fs(old_fs);
+ ret = kernel_write(file, buf, pr_aptpl_buf_len, 0);
- if (ret < 0) {
+ if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
- filp_close(file, NULL);
- return -EIO;
- }
- filp_close(file, NULL);
+ fput(file);
- return 0;
+ return ret ? -EIO : 0;
}
static int
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 2bcfd79cf595..82e78d72fdb6 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -840,14 +840,14 @@ static void pscsi_bi_endio(struct bio *bio, int error)
bio_put(bio);
}
-static inline struct bio *pscsi_get_bio(int sg_num)
+static inline struct bio *pscsi_get_bio(int nr_vecs)
{
struct bio *bio;
/*
* Use bio_malloc() following the comment in for bio -> struct request
* in block/blk-core.c:blk_make_request()
*/
- bio = bio_kmalloc(GFP_KERNEL, sg_num);
+ bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
if (!bio) {
pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
@@ -940,7 +940,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bio = NULL;
}
- page++;
len -= bytes;
data_len -= bytes;
off = 0;
@@ -952,7 +951,6 @@ fail:
while (*hbio) {
bio = *hbio;
*hbio = (*hbio)->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1092,7 +1090,6 @@ fail_free_bio:
while (hbio) {
struct bio *bio = hbio;
hbio = hbio->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1178,7 +1175,7 @@ static int __init pscsi_module_init(void)
return transport_subsystem_register(&pscsi_template);
}
-static void pscsi_module_exit(void)
+static void __exit pscsi_module_exit(void)
{
transport_subsystem_release(&pscsi_template);
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 0457de362e68..e0b3c379aa14 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -256,10 +256,12 @@ static void rd_free_device(struct se_device *dev)
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
- u32 i;
struct rd_dev_sg_table *sg_table;
+ u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
- for (i = 0; i < rd_dev->sg_table_count; i++) {
+ i = page / sg_per_table;
+ if (i < rd_dev->sg_table_count) {
sg_table = &rd_dev->sg_table_array[i];
if ((sg_table->page_start_offset <= page) &&
(sg_table->page_end_offset >= page))
@@ -314,7 +316,19 @@ rd_execute_rw(struct se_cmd *cmd)
void *rd_addr;
sg_miter_next(&m);
+ if (!(u32)m.length) {
+ pr_debug("RD[%u]: invalid sgl %p len %zu\n",
+ dev->rd_dev_id, m.addr, m.length);
+ sg_miter_stop(&m);
+ return TCM_INCORRECT_AMOUNT_OF_DATA;
+ }
len = min((u32)m.length, src_len);
+ if (len > rd_size) {
+ pr_debug("RD[%u]: size underrun page %d offset %d "
+ "size %d\n", dev->rd_dev_id,
+ rd_page, rd_offset, rd_size);
+ len = rd_size;
+ }
m.consumed = len;
rd_addr = sg_virt(rd_sg) + rd_offset;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a664c664a31a..290230de2c53 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -105,7 +105,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
return 0;
}
-sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
+sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
{
u32 num_blocks;
@@ -126,7 +126,7 @@ sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
cmd->t_task_lba + 1;
}
-EXPORT_SYMBOL(spc_get_write_same_sectors);
+EXPORT_SYMBOL(sbc_get_write_same_sectors);
static sense_reason_t
sbc_emulate_noop(struct se_cmd *cmd)
@@ -233,7 +233,7 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
static sense_reason_t
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
{
- unsigned int sectors = spc_get_write_same_sectors(cmd);
+ unsigned int sectors = sbc_get_write_same_sectors(cmd);
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -486,7 +486,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
- return TCM_INVALID_CDB_FIELD;
+ return TCM_ADDRESS_OUT_OF_RANGE;
}
cmd->execute_cmd = ops->execute_sync_cache;
break;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 2d88f087d961..4cb667d720a7 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -66,8 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
-static sense_reason_t
-spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+sense_reason_t
+spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
@@ -104,6 +104,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
return 0;
}
+EXPORT_SYMBOL(spc_emulate_inquiry_std);
/* unit serial number */
static sense_reason_t
@@ -160,7 +161,7 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
-static sense_reason_t
+sense_reason_t
spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@@ -404,17 +405,33 @@ check_scsi_name:
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
return 0;
}
+EXPORT_SYMBOL(spc_emulate_evpd_83);
+
+static bool
+spc_check_dev_wce(struct se_device *dev)
+{
+ bool wce = false;
+
+ if (dev->transport->get_write_cache)
+ wce = dev->transport->get_write_cache(dev);
+ else if (dev->dev_attrib.emulate_write_cache > 0)
+ wce = true;
+
+ return wce;
+}
/* Extended INQUIRY Data VPD Page */
static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
+ struct se_device *dev = cmd->se_dev;
+
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
+ if (spc_check_dev_wce(dev))
buf[6] = 0x01;
return 0;
}
@@ -764,7 +781,7 @@ static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
if (pc == 1)
goto out;
- if (dev->dev_attrib.emulate_write_cache > 0)
+ if (spc_check_dev_wce(dev))
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
@@ -876,7 +893,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
spc_modesense_write_protect(&buf[length], type);
- if ((dev->dev_attrib.emulate_write_cache > 0) &&
+ if ((spc_check_dev_wce(dev)) &&
(dev->dev_attrib.emulate_fua_write > 0))
spc_modesense_dpofua(&buf[length], type);
@@ -983,6 +1000,14 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
int ret = 0;
int i;
+ if (!cmd->data_length) {
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+ }
+
+ if (cmd->data_length < off + 2)
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+
buf = transport_kmap_data_sg(cmd);
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1007,6 +1032,11 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
goto out;
check_contents:
+ if (cmd->data_length < off + length) {
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
if (memcmp(buf + off, tbuf, length))
ret = TCM_INVALID_PARAMETER_LIST;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index c6e0293ffdb0..d0b4dd95b91e 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -331,18 +331,6 @@ static void core_tmr_drain_state_list(
fe_count = atomic_read(&cmd->t_fe_count);
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
- " cdb: %p, t_fe_count: %d dev: %p\n", cmd,
- fe_count, dev);
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
- continue;
- }
- pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
- " t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5192ac0337f7..9169d6a5d7e4 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -111,16 +111,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
spin_lock_irq(&tpg->acl_node_lock);
- list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!strcmp(acl->initiatorname, initiatorname) &&
- !acl->dynamic_node_acl) {
- spin_unlock_irq(&tpg->acl_node_lock);
- return acl;
- }
- }
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
spin_unlock_irq(&tpg->acl_node_lock);
- return NULL;
+ return acl;
}
/* core_tpg_add_node_to_devs():
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bd587b70661a..2030b608136d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -907,15 +907,18 @@ int transport_dump_vpd_ident(
switch (vpd->device_identifier_code_set) {
case 0x01: /* Binary */
- sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD Binary Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x02: /* ASCII */
- sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD ASCII Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
case 0x03: /* UTF-8 */
- sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+ snprintf(buf, sizeof(buf),
+ "T10 VPD UTF-8 Device Identifier: %s\n",
&vpd->device_identifier[0]);
break;
default:
@@ -1514,6 +1517,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
case TCM_INVALID_PARAMETER_LIST:
+ case TCM_PARAMETER_LIST_LENGTH_ERROR:
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
@@ -2674,6 +2678,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
/* INVALID FIELD IN PARAMETER LIST */
buffer[SPC_ASC_KEY_OFFSET] = 0x26;
break;
+ case TCM_PARAMETER_LIST_LENGTH_ERROR:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* PARAMETER LIST LENGTH ERROR */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
+ break;
case TCM_UNEXPECTED_UNSOLICITED_DATA:
/* CURRENT ERROR */
buffer[0] = 0x70;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6659dd36e806..113f33598b9f 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -169,7 +169,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
{
struct ft_tport *tport;
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
rcu_read_lock();
@@ -178,7 +177,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
goto out;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
@@ -201,10 +200,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
{
struct ft_sess *sess;
struct hlist_head *head;
- struct hlist_node *pos;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash)
+ hlist_for_each_entry_rcu(sess, head, hash)
if (sess->port_id == port_id)
return sess;
@@ -253,11 +251,10 @@ static void ft_sess_unhash(struct ft_sess *sess)
static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
head = &tport->hash[ft_sess_hash(port_id)];
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
ft_sess_unhash(sess);
return sess;
@@ -273,12 +270,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_sess_delete_all(struct ft_tport *tport)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct ft_sess *sess;
for (head = tport->hash;
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
- hlist_for_each_entry_rcu(sess, pos, head, hash) {
+ hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c2c77d1ac499..a764f165b589 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -29,14 +29,14 @@ choice
config THERMAL_DEFAULT_GOV_STEP_WISE
bool "step_wise"
- select STEP_WISE
+ select THERMAL_GOV_STEP_WISE
help
Use the step_wise governor as default. This throttles the
devices one step at a time.
config THERMAL_DEFAULT_GOV_FAIR_SHARE
bool "fair_share"
- select FAIR_SHARE
+ select THERMAL_GOV_FAIR_SHARE
help
Use the fair_share governor as default. This throttles the
devices based on their 'contribution' to a zone. The
@@ -44,24 +44,24 @@ config THERMAL_DEFAULT_GOV_FAIR_SHARE
config THERMAL_DEFAULT_GOV_USER_SPACE
bool "user_space"
- select USER_SPACE
+ select THERMAL_GOV_USER_SPACE
help
Select this if you want to let the user space manage the
lpatform thermals.
endchoice
-config FAIR_SHARE
+config THERMAL_GOV_FAIR_SHARE
bool "Fair-share thermal governor"
help
Enable this to manage platform thermals using fair-share governor.
-config STEP_WISE
+config THERMAL_GOV_STEP_WISE
bool "Step_wise thermal governor"
help
Enable this to manage platform thermals using a simple linear
-config USER_SPACE
+config THERMAL_GOV_USER_SPACE
bool "User_space thermal governor"
help
Enable this to let the user space manage the platform thermals.
@@ -78,6 +78,14 @@ config CPU_THERMAL
and not the ACPI interface.
If you want this support, you should say Y here.
+config THERMAL_EMULATION
+ bool "Thermal emulation mode support"
+ help
+ Enable this option to make a emul_temp sysfs node in thermal zone
+ directory to support temperature emulation. With emulation sysfs node,
+ user can manually input temperature and test the different trip
+ threshold behaviour for simulation purpose.
+
config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
depends on PLAT_SPEAR
@@ -93,6 +101,14 @@ config RCAR_THERMAL
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework
+config KIRKWOOD_THERMAL
+ tristate "Temperature sensor on Marvell Kirkwood SoCs"
+ depends on ARCH_KIRKWOOD
+ depends on OF
+ help
+ Support for the Kirkwood thermal sensor driver into the Linux thermal
+ framework. Only kirkwood 88F6282 and 88F6283 have this sensor.
+
config EXYNOS_THERMAL
tristate "Temperature sensor on Samsung EXYNOS"
depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
@@ -101,6 +117,23 @@ config EXYNOS_THERMAL
If you say yes here you get support for TMU (Thermal Management
Unit) on SAMSUNG EXYNOS series of SoC.
+config EXYNOS_THERMAL_EMUL
+ bool "EXYNOS TMU emulation mode support"
+ depends on EXYNOS_THERMAL
+ help
+ Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
+ Enable this option will be make sysfs node in exynos thermal platform
+ device directory to support emulation mode. With emulation mode sysfs
+ node, you can manually input temperature to TMU for simulation purpose.
+
+config DOVE_THERMAL
+ tristate "Temperature sensor on Marvell Dove SoCs"
+ depends on ARCH_DOVE
+ depends on OF
+ help
+ Support for the Dove thermal sensor driver in the Linux thermal
+ framework.
+
config DB8500_THERMAL
bool "DB8500 thermal management"
depends on ARCH_U8500
@@ -122,4 +155,14 @@ config DB8500_CPUFREQ_COOLING
bound cpufreq cooling device turns active to set CPU frequency low to
cool down the CPU.
+config INTEL_POWERCLAMP
+ tristate "Intel PowerClamp idle injection driver"
+ depends on THERMAL
+ depends on X86
+ depends on CPU_SUP_INTEL
+ help
+ Enable this to enable Intel PowerClamp idle injection driver. This
+ enforce idle time which results in more package C-state residency. The
+ user interface is exposed via generic thermal framework.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d8da683245fc..d3a2b38c31e8 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -5,9 +5,9 @@
obj-$(CONFIG_THERMAL) += thermal_sys.o
# governors
-obj-$(CONFIG_FAIR_SHARE) += fair_share.o
-obj-$(CONFIG_STEP_WISE) += step_wise.o
-obj-$(CONFIG_USER_SPACE) += user_space.o
+obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
+obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
+obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
# cpufreq cooling
obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
@@ -15,6 +15,10 @@ obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
+obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
+obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
+obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
+
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 836828e29a87..8dc44cbb3e09 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -73,21 +73,14 @@ static struct cpufreq_cooling_device *notify_device;
*/
static int get_idr(struct idr *idr, int *id)
{
- int err;
-again:
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
+ int ret;
mutex_lock(&cooling_cpufreq_lock);
- err = idr_get_new(idr, NULL, id);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&cooling_cpufreq_lock);
-
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return err;
-
- *id = *id & MAX_IDR_MASK;
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
return 0;
}
@@ -118,8 +111,8 @@ static int is_cpufreq_valid(int cpu)
/**
* get_cpu_frequency - get the absolute value of frequency from level.
* @cpu: cpu for which frequency is fetched.
- * @level: level of frequency of the CPU
- * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
+ * @level: level of frequency, equals cooling state of cpu cooling device
+ * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
*/
static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
{
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 4cf8e72af90a..21419851fc02 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -21,6 +21,7 @@
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -73,15 +74,13 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
{ .compatible = "stericsson,db8500-cpufreq-cooling" },
{},
};
-#else
-#define db8500_cpufreq_cooling_match NULL
#endif
static struct platform_driver db8500_cpufreq_cooling_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "db8500-cpufreq-cooling",
- .of_match_table = db8500_cpufreq_cooling_match,
+ .of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
},
.probe = db8500_cpufreq_cooling_probe,
.suspend = db8500_cpufreq_cooling_suspend,
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index ec71ade3e317..61ce60a35921 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -508,15 +508,13 @@ static const struct of_device_id db8500_thermal_match[] = {
{ .compatible = "stericsson,db8500-thermal" },
{},
};
-#else
-#define db8500_thermal_match NULL
#endif
static struct platform_driver db8500_thermal_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "db8500-thermal",
- .of_match_table = db8500_thermal_match,
+ .of_match_table = of_match_ptr(db8500_thermal_match),
},
.probe = db8500_thermal_probe,
.suspend = db8500_thermal_suspend,
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
new file mode 100644
index 000000000000..7b0bfa0e7a9c
--- /dev/null
+++ b/drivers/thermal/dove_thermal.c
@@ -0,0 +1,209 @@
+/*
+ * Dove thermal sensor driver
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define DOVE_THERMAL_TEMP_OFFSET 1
+#define DOVE_THERMAL_TEMP_MASK 0x1FF
+
+/* Dove Thermal Manager Control and Status Register */
+#define PMU_TM_DISABLE_OFFS 0
+#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS)
+
+/* Dove Theraml Diode Control 0 Register */
+#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
+#define PMU_TDC0_SEL_VCAL_OFFS 5
+#define PMU_TDC0_SEL_VCAL_MASK (0x3 << PMU_TDC0_SEL_VCAL_OFFS)
+#define PMU_TDC0_REF_CAL_CNT_OFFS 11
+#define PMU_TDC0_REF_CAL_CNT_MASK (0x1FF << PMU_TDC0_REF_CAL_CNT_OFFS)
+#define PMU_TDC0_AVG_NUM_OFFS 25
+#define PMU_TDC0_AVG_NUM_MASK (0x7 << PMU_TDC0_AVG_NUM_OFFS)
+
+/* Dove Thermal Diode Control 1 Register */
+#define PMU_TEMP_DIOD_CTRL1_REG 0x04
+#define PMU_TDC1_TEMP_VALID_MASK (0x1 << 10)
+
+/* Dove Thermal Sensor Dev Structure */
+struct dove_thermal_priv {
+ void __iomem *sensor;
+ void __iomem *control;
+};
+
+static int dove_init_sensor(const struct dove_thermal_priv *priv)
+{
+ u32 reg;
+ u32 i;
+
+ /* Configure the Diode Control Register #0 */
+ reg = readl_relaxed(priv->control);
+
+ /* Use average of 2 */
+ reg &= ~PMU_TDC0_AVG_NUM_MASK;
+ reg |= (0x1 << PMU_TDC0_AVG_NUM_OFFS);
+
+ /* Reference calibration value */
+ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+ reg |= (0x0F1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+
+ /* Set the high level reference for calibration */
+ reg &= ~PMU_TDC0_SEL_VCAL_MASK;
+ reg |= (0x2 << PMU_TDC0_SEL_VCAL_OFFS);
+ writel(reg, priv->control);
+
+ /* Reset the sensor */
+ reg = readl_relaxed(priv->control);
+ writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
+ writel(reg, priv->control);
+
+ /* Enable the sensor */
+ reg = readl_relaxed(priv->sensor);
+ reg &= ~PMU_TM_DISABLE_MASK;
+ writel(reg, priv->sensor);
+
+ /* Poll the sensor for the first reading */
+ for (i = 0; i < 1000000; i++) {
+ reg = readl_relaxed(priv->sensor);
+ if (reg & DOVE_THERMAL_TEMP_MASK)
+ break;
+ }
+
+ if (i == 1000000)
+ return -EIO;
+
+ return 0;
+}
+
+static int dove_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ unsigned long reg;
+ struct dove_thermal_priv *priv = thermal->devdata;
+
+ /* Valid check */
+ reg = readl_relaxed(priv->control + PMU_TEMP_DIOD_CTRL1_REG);
+ if ((reg & PMU_TDC1_TEMP_VALID_MASK) == 0x0) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /*
+ * Calculate temperature. See Section 8.10.1 of 88AP510,
+ * Documentation/arm/Marvell/README
+ */
+ reg = readl_relaxed(priv->sensor);
+ reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
+ *temp = ((2281638UL - (7298*reg)) / 10);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = dove_get_temp,
+};
+
+static const struct of_device_id dove_thermal_id_table[] = {
+ { .compatible = "marvell,dove-thermal" },
+ {}
+};
+
+static int dove_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal = NULL;
+ struct dove_thermal_priv *priv;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->sensor) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+ priv->control = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->control) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = dove_init_sensor(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize sensor\n");
+ return ret;
+ }
+
+ thermal = thermal_zone_device_register("dove_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int dove_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *dove_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(dove_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, dove_thermal_id_table);
+
+static struct platform_driver dove_thermal_driver = {
+ .probe = dove_thermal_probe,
+ .remove = dove_thermal_exit,
+ .driver = {
+ .name = "dove_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dove_thermal_id_table),
+ },
+};
+
+module_platform_driver(dove_thermal_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Dove thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index bada1308318b..e04ebd8671ac 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -82,7 +82,7 @@
#define EXYNOS_TRIMINFO_RELOAD 0x1
#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
-#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 16)
+#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
#define EXYNOS_MUX_ADDR_VALUE 6
#define EXYNOS_MUX_ADDR_SHIFT 20
#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
@@ -94,11 +94,20 @@
#define SENSOR_NAME_LEN 16
#define MAX_TRIP_COUNT 8
#define MAX_COOLING_DEVICE 4
+#define MAX_THRESHOLD_LEVS 4
#define ACTIVE_INTERVAL 500
#define IDLE_INTERVAL 10000
#define MCELSIUS 1000
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+#define EXYNOS_EMUL_TIME 0x57F0
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK 0xFF
+#define EXYNOS_EMUL_ENABLE 0x1
+#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
+
/* CPU Zone information */
#define PANIC_ZONE 4
#define WARN_ZONE 3
@@ -125,6 +134,7 @@ struct exynos_tmu_data {
struct thermal_trip_point_conf {
int trip_val[MAX_TRIP_COUNT];
int trip_count;
+ u8 trigger_falling;
};
struct thermal_cooling_conf {
@@ -174,7 +184,8 @@ static int exynos_set_mode(struct thermal_zone_device *thermal,
mutex_lock(&th_zone->therm_dev->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling)
th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
else
th_zone->therm_dev->polling_delay = 0;
@@ -284,7 +295,7 @@ static int exynos_bind(struct thermal_zone_device *thermal,
case MONITOR_ZONE:
case WARN_ZONE:
if (thermal_zone_bind_cooling_device(thermal, i, cdev,
- level, level)) {
+ level, 0)) {
pr_err("error binding cdev inst %d\n", i);
ret = -EINVAL;
}
@@ -362,10 +373,17 @@ static int exynos_get_temp(struct thermal_zone_device *thermal,
static int exynos_get_trend(struct thermal_zone_device *thermal,
int trip, enum thermal_trend *trend)
{
- if (thermal->temperature >= trip)
- *trend = THERMAL_TREND_RAISING;
+ int ret;
+ unsigned long trip_temp;
+
+ ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
+ if (ret < 0)
+ return ret;
+
+ if (thermal->temperature >= trip_temp)
+ *trend = THERMAL_TREND_RAISE_FULL;
else
- *trend = THERMAL_TREND_DROPPING;
+ *trend = THERMAL_TREND_DROP_FULL;
return 0;
}
@@ -413,7 +431,8 @@ static void exynos_report_trigger(void)
break;
}
- if (th_zone->mode == THERMAL_DEVICE_ENABLED) {
+ if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling) {
if (i > 0)
th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
else
@@ -452,7 +471,8 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0,
- IDLE_INTERVAL);
+ sensor_conf->trip_data.trigger_falling ?
+ 0 : IDLE_INTERVAL);
if (IS_ERR(th_zone->therm_dev)) {
pr_err("Failed to register thermal zone device\n");
@@ -559,8 +579,9 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- unsigned int status, trim_info, rising_threshold;
- int ret = 0, threshold_code;
+ unsigned int status, trim_info;
+ unsigned int rising_threshold = 0, falling_threshold = 0;
+ int ret = 0, threshold_code, i, trigger_levs = 0;
mutex_lock(&data->lock);
clk_enable(data->clk);
@@ -585,6 +606,11 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
(data->temp_error2 != 0))
data->temp_error1 = pdata->efuse_value;
+ /* Count trigger levels to be enabled */
+ for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
+ if (pdata->trigger_levels[i])
+ trigger_levs++;
+
if (data->soc == SOC_ARCH_EXYNOS4210) {
/* Write temperature code for threshold */
threshold_code = temp_to_code(data, pdata->threshold);
@@ -594,44 +620,38 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
}
writeb(threshold_code,
data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
-
- writeb(pdata->trigger_levels[0],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0);
- writeb(pdata->trigger_levels[1],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1);
- writeb(pdata->trigger_levels[2],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2);
- writeb(pdata->trigger_levels[3],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3);
+ for (i = 0; i < trigger_levs; i++)
+ writeb(pdata->trigger_levels[i],
+ data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
data->base + EXYNOS_TMU_REG_INTCLEAR);
} else if (data->soc == SOC_ARCH_EXYNOS) {
- /* Write temperature code for threshold */
- threshold_code = temp_to_code(data, pdata->trigger_levels[0]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- rising_threshold = threshold_code;
- threshold_code = temp_to_code(data, pdata->trigger_levels[1]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- rising_threshold |= (threshold_code << 8);
- threshold_code = temp_to_code(data, pdata->trigger_levels[2]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
+ /* Write temperature code for rising and falling threshold */
+ for (i = 0; i < trigger_levs; i++) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i]);
+ if (threshold_code < 0) {
+ ret = threshold_code;
+ goto out;
+ }
+ rising_threshold |= threshold_code << 8 * i;
+ if (pdata->threshold_falling) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i] -
+ pdata->threshold_falling);
+ if (threshold_code > 0)
+ falling_threshold |=
+ threshold_code << 8 * i;
+ }
}
- rising_threshold |= (threshold_code << 16);
writel(rising_threshold,
data->base + EXYNOS_THD_TEMP_RISE);
- writel(0, data->base + EXYNOS_THD_TEMP_FALL);
+ writel(falling_threshold,
+ data->base + EXYNOS_THD_TEMP_FALL);
- writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT,
+ writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
data->base + EXYNOS_TMU_REG_INTCLEAR);
}
out:
@@ -664,6 +684,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
pdata->trigger_level2_en << 8 |
pdata->trigger_level1_en << 4 |
pdata->trigger_level0_en;
+ if (pdata->threshold_falling)
+ interrupt_en |= interrupt_en << 16;
} else {
con |= EXYNOS_TMU_CORE_OFF;
interrupt_en = 0; /* Disable all interrupts */
@@ -697,20 +719,19 @@ static void exynos_tmu_work(struct work_struct *work)
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
+ exynos_report_trigger();
mutex_lock(&data->lock);
clk_enable(data->clk);
-
-
if (data->soc == SOC_ARCH_EXYNOS)
- writel(EXYNOS_TMU_CLEAR_RISE_INT,
+ writel(EXYNOS_TMU_CLEAR_RISE_INT |
+ EXYNOS_TMU_CLEAR_FALL_INT,
data->base + EXYNOS_TMU_REG_INTCLEAR);
else
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
data->base + EXYNOS_TMU_REG_INTCLEAR);
-
clk_disable(data->clk);
mutex_unlock(&data->lock);
- exynos_report_trigger();
+
enable_irq(data->irq);
}
@@ -759,6 +780,7 @@ static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
+ .threshold_falling = 10,
.trigger_levels[0] = 85,
.trigger_levels[1] = 103,
.trigger_levels[2] = 110,
@@ -800,8 +822,6 @@ static const struct of_device_id exynos_tmu_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, exynos_tmu_match);
-#else
-#define exynos_tmu_match NULL
#endif
static struct platform_device_id exynos_tmu_driver_ids[] = {
@@ -832,6 +852,94 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
return (struct exynos_tmu_platform_data *)
platform_get_device_id(pdev)->driver_data;
}
+
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+static ssize_t exynos_tmu_emulation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ unsigned int reg;
+ u8 temp_code;
+ int temp = 0;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ if (reg & EXYNOS_EMUL_ENABLE) {
+ reg >>= EXYNOS_EMUL_DATA_SHIFT;
+ temp_code = reg & EXYNOS_EMUL_DATA_MASK;
+ temp = code_to_temp(data, temp_code);
+ }
+out:
+ return sprintf(buf, "%d\n", temp * MCELSIUS);
+}
+
+static ssize_t exynos_tmu_emulation_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ unsigned int reg;
+ int temp;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ if (!sscanf(buf, "%d\n", &temp) || temp < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+
+ if (temp) {
+ /* Both CELSIUS and MCELSIUS type are available for input */
+ if (temp > MCELSIUS)
+ temp /= MCELSIUS;
+
+ reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
+ (temp_to_code(data, (temp / MCELSIUS))
+ << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
+ } else {
+ reg &= ~EXYNOS_EMUL_ENABLE;
+ }
+
+ writel(reg, data->base + EXYNOS_EMUL_CON);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+out:
+ return count;
+}
+
+static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
+ exynos_tmu_emulation_store);
+static int create_emulation_sysfs(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_emulation);
+}
+static void remove_emulation_sysfs(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_emulation);
+}
+#else
+static inline int create_emulation_sysfs(struct device *dev) { return 0; }
+static inline void remove_emulation_sysfs(struct device *dev) {}
+#endif
+
static int exynos_tmu_probe(struct platform_device *pdev)
{
struct exynos_tmu_data *data;
@@ -914,6 +1022,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
exynos_sensor_conf.trip_data.trip_val[i] =
pdata->threshold + pdata->trigger_levels[i];
+ exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
+
exynos_sensor_conf.cooling_data.freq_clip_count =
pdata->freq_tab_count;
for (i = 0; i < pdata->freq_tab_count; i++) {
@@ -928,6 +1038,11 @@ static int exynos_tmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register thermal interface\n");
goto err_clk;
}
+
+ ret = create_emulation_sysfs(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
+
return 0;
err_clk:
platform_set_drvdata(pdev, NULL);
@@ -939,6 +1054,8 @@ static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ remove_emulation_sysfs(&pdev->dev);
+
exynos_tmu_control(pdev, false);
exynos_unregister_thermal();
@@ -980,7 +1097,7 @@ static struct platform_driver exynos_tmu_driver = {
.name = "exynos-tmu",
.owner = THIS_MODULE,
.pm = EXYNOS_TMU_PM,
- .of_match_table = exynos_tmu_match,
+ .of_match_table = of_match_ptr(exynos_tmu_match),
},
.probe = exynos_tmu_probe,
.remove = exynos_tmu_remove,
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
new file mode 100644
index 000000000000..b40b37cd25e0
--- /dev/null
+++ b/drivers/thermal/intel_powerclamp.c
@@ -0,0 +1,795 @@
+/*
+ * intel_powerclamp.c - package c-state idle injection
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Authors:
+ * Arjan van de Ven <arjan@linux.intel.com>
+ * Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ *
+ * TODO:
+ * 1. better handle wakeup from external interrupts, currently a fixed
+ * compensation is added to clamping duration when excessive amount
+ * of wakeups are observed during idle time. the reason is that in
+ * case of external interrupts without need for ack, clamping down
+ * cpu in non-irq context does not reduce irq. for majority of the
+ * cases, clamping down cpu does help reduce irq as well, we should
+ * be able to differenciate the two cases and give a quantitative
+ * solution for the irqs that we can control. perhaps based on
+ * get_cpu_iowait_time_us()
+ *
+ * 2. synchronization with other hw blocks
+ *
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched/rt.h>
+
+#include <asm/nmi.h>
+#include <asm/msr.h>
+#include <asm/mwait.h>
+#include <asm/cpu_device_id.h>
+#include <asm/idle.h>
+#include <asm/hardirq.h>
+
+#define MAX_TARGET_RATIO (50U)
+/* For each undisturbed clamping period (no extra wake ups during idle time),
+ * we increment the confidence counter for the given target ratio.
+ * CONFIDENCE_OK defines the level where runtime calibration results are
+ * valid.
+ */
+#define CONFIDENCE_OK (3)
+/* Default idle injection duration, driver adjust sleep time to meet target
+ * idle ratio. Similar to frequency modulation.
+ */
+#define DEFAULT_DURATION_JIFFIES (6)
+
+static unsigned int target_mwait;
+static struct dentry *debug_dir;
+
+/* user selected target */
+static unsigned int set_target_ratio;
+static unsigned int current_ratio;
+static bool should_skip;
+static bool reduce_irq;
+static atomic_t idle_wakeup_counter;
+static unsigned int control_cpu; /* The cpu assigned to collect stat and update
+ * control parameters. default to BSP but BSP
+ * can be offlined.
+ */
+static bool clamping;
+
+
+static struct task_struct * __percpu *powerclamp_thread;
+static struct thermal_cooling_device *cooling_dev;
+static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
+ * clamping thread
+ */
+
+static unsigned int duration;
+static unsigned int pkg_cstate_ratio_cur;
+static unsigned int window_size;
+
+static int duration_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_duration;
+
+ ret = kstrtoul(arg, 10, &new_duration);
+ if (ret)
+ goto exit;
+ if (new_duration > 25 || new_duration < 6) {
+ pr_err("Out of recommended range %lu, between 6-25ms\n",
+ new_duration);
+ ret = -EINVAL;
+ }
+
+ duration = clamp(new_duration, 6ul, 25ul);
+ smp_mb();
+
+exit:
+
+ return ret;
+}
+
+static struct kernel_param_ops duration_ops = {
+ .set = duration_set,
+ .get = param_get_int,
+};
+
+
+module_param_cb(duration, &duration_ops, &duration, 0644);
+MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
+
+struct powerclamp_calibration_data {
+ unsigned long confidence; /* used for calibration, basically a counter
+ * gets incremented each time a clamping
+ * period is completed without extra wakeups
+ * once that counter is reached given level,
+ * compensation is deemed usable.
+ */
+ unsigned long steady_comp; /* steady state compensation used when
+ * no extra wakeups occurred.
+ */
+ unsigned long dynamic_comp; /* compensate excessive wakeup from idle
+ * mostly from external interrupts.
+ */
+};
+
+static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
+
+static int window_size_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_window_size;
+
+ ret = kstrtoul(arg, 10, &new_window_size);
+ if (ret)
+ goto exit_win;
+ if (new_window_size > 10 || new_window_size < 2) {
+ pr_err("Out of recommended window size %lu, between 2-10\n",
+ new_window_size);
+ ret = -EINVAL;
+ }
+
+ window_size = clamp(new_window_size, 2ul, 10ul);
+ smp_mb();
+
+exit_win:
+
+ return ret;
+}
+
+static struct kernel_param_ops window_size_ops = {
+ .set = window_size_set,
+ .get = param_get_int,
+};
+
+module_param_cb(window_size, &window_size_ops, &window_size, 0644);
+MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
+ "\tpowerclamp controls idle ratio within this window. larger\n"
+ "\twindow size results in slower response time but more smooth\n"
+ "\tclamping results. default to 2.");
+
+static void find_target_mwait(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int highest_cstate = 0;
+ unsigned int highest_subcstate = 0;
+ int i;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return;
+
+ edx >>= MWAIT_SUBSTATE_SIZE;
+ for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+ if (edx & MWAIT_SUBSTATE_MASK) {
+ highest_cstate = i;
+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+ }
+ }
+ target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+
+}
+
+static u64 pkg_state_counter(void)
+{
+ u64 val;
+ u64 count = 0;
+
+ static bool skip_c2;
+ static bool skip_c3;
+ static bool skip_c6;
+ static bool skip_c7;
+
+ if (!skip_c2) {
+ if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c2 = true;
+ }
+
+ if (!skip_c3) {
+ if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c3 = true;
+ }
+
+ if (!skip_c6) {
+ if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c6 = true;
+ }
+
+ if (!skip_c7) {
+ if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c7 = true;
+ }
+
+ return count;
+}
+
+static void noop_timer(unsigned long foo)
+{
+ /* empty... just the fact that we get the interrupt wakes us up */
+}
+
+static unsigned int get_compensation(int ratio)
+{
+ unsigned int comp = 0;
+
+ /* we only use compensation if all adjacent ones are good */
+ if (ratio == 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio + 1].steady_comp +
+ cal_data[ratio + 2].steady_comp) / 3;
+ } else if (ratio == MAX_TARGET_RATIO - 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio - 2].steady_comp) / 3;
+ } else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio + 1].steady_comp) / 3;
+ }
+
+ /* REVISIT: simple penalty of double idle injection */
+ if (reduce_irq)
+ comp = ratio;
+ /* do not exceed limit */
+ if (comp + ratio >= MAX_TARGET_RATIO)
+ comp = MAX_TARGET_RATIO - ratio - 1;
+
+ return comp;
+}
+
+static void adjust_compensation(int target_ratio, unsigned int win)
+{
+ int delta;
+ struct powerclamp_calibration_data *d = &cal_data[target_ratio];
+
+ /*
+ * adjust compensations if confidence level has not been reached or
+ * there are too many wakeups during the last idle injection period, we
+ * cannot trust the data for compensation.
+ */
+ if (d->confidence >= CONFIDENCE_OK ||
+ atomic_read(&idle_wakeup_counter) >
+ win * num_online_cpus())
+ return;
+
+ delta = set_target_ratio - current_ratio;
+ /* filter out bad data */
+ if (delta >= 0 && delta <= (1+target_ratio/10)) {
+ if (d->steady_comp)
+ d->steady_comp =
+ roundup(delta+d->steady_comp, 2)/2;
+ else
+ d->steady_comp = delta;
+ d->confidence++;
+ }
+}
+
+static bool powerclamp_adjust_controls(unsigned int target_ratio,
+ unsigned int guard, unsigned int win)
+{
+ static u64 msr_last, tsc_last;
+ u64 msr_now, tsc_now;
+ u64 val64;
+
+ /* check result for the last window */
+ msr_now = pkg_state_counter();
+ rdtscll(tsc_now);
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ current_ratio = 1;
+ else if (tsc_now-tsc_last) {
+ val64 = 100*(msr_now-msr_last);
+ do_div(val64, (tsc_now-tsc_last));
+ current_ratio = val64;
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ tsc_last = tsc_now;
+
+ adjust_compensation(target_ratio, win);
+ /*
+ * too many external interrupts, set flag such
+ * that we can take measure later.
+ */
+ reduce_irq = atomic_read(&idle_wakeup_counter) >=
+ 2 * win * num_online_cpus();
+
+ atomic_set(&idle_wakeup_counter, 0);
+ /* if we are above target+guard, skip */
+ return set_target_ratio + guard <= current_ratio;
+}
+
+static int clamp_thread(void *arg)
+{
+ int cpunr = (unsigned long)arg;
+ DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
+ static const struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO/2,
+ };
+ unsigned int count = 0;
+ unsigned int target_ratio;
+
+ set_bit(cpunr, cpu_clamping_mask);
+ set_freezable();
+ init_timer_on_stack(&wakeup_timer);
+ sched_setscheduler(current, SCHED_FIFO, &param);
+
+ while (true == clamping && !kthread_should_stop() &&
+ cpu_online(cpunr)) {
+ int sleeptime;
+ unsigned long target_jiffies;
+ unsigned int guard;
+ unsigned int compensation = 0;
+ int interval; /* jiffies to sleep for each attempt */
+ unsigned int duration_jiffies = msecs_to_jiffies(duration);
+ unsigned int window_size_now;
+
+ try_to_freeze();
+ /*
+ * make sure user selected ratio does not take effect until
+ * the next round. adjust target_ratio if user has changed
+ * target such that we can converge quickly.
+ */
+ target_ratio = set_target_ratio;
+ guard = 1 + target_ratio/20;
+ window_size_now = window_size;
+ count++;
+
+ /*
+ * systems may have different ability to enter package level
+ * c-states, thus we need to compensate the injected idle ratio
+ * to achieve the actual target reported by the HW.
+ */
+ compensation = get_compensation(target_ratio);
+ interval = duration_jiffies*100/(target_ratio+compensation);
+
+ /* align idle time */
+ target_jiffies = roundup(jiffies, interval);
+ sleeptime = target_jiffies - jiffies;
+ if (sleeptime <= 0)
+ sleeptime = 1;
+ schedule_timeout_interruptible(sleeptime);
+ /*
+ * only elected controlling cpu can collect stats and update
+ * control parameters.
+ */
+ if (cpunr == control_cpu && !(count%window_size_now)) {
+ should_skip =
+ powerclamp_adjust_controls(target_ratio,
+ guard, window_size_now);
+ smp_mb();
+ }
+
+ if (should_skip)
+ continue;
+
+ target_jiffies = jiffies + duration_jiffies;
+ mod_timer(&wakeup_timer, target_jiffies);
+ if (unlikely(local_softirq_pending()))
+ continue;
+ /*
+ * stop tick sched during idle time, interrupts are still
+ * allowed. thus jiffies are updated properly.
+ */
+ preempt_disable();
+ tick_nohz_idle_enter();
+ /* mwait until target jiffies is reached */
+ while (time_before(jiffies, target_jiffies)) {
+ unsigned long ecx = 1;
+ unsigned long eax = target_mwait;
+
+ /*
+ * REVISIT: may call enter_idle() to notify drivers who
+ * can save power during cpu idle. same for exit_idle()
+ */
+ local_touch_nmi();
+ stop_critical_timings();
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ cpu_relax(); /* allow HT sibling to run */
+ __mwait(eax, ecx);
+ start_critical_timings();
+ atomic_inc(&idle_wakeup_counter);
+ }
+ tick_nohz_idle_exit();
+ preempt_enable_no_resched();
+ }
+ del_timer_sync(&wakeup_timer);
+ clear_bit(cpunr, cpu_clamping_mask);
+
+ return 0;
+}
+
+/*
+ * 1 HZ polling while clamping is active, useful for userspace
+ * to monitor actual idle ratio.
+ */
+static void poll_pkg_cstate(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
+static void poll_pkg_cstate(struct work_struct *dummy)
+{
+ static u64 msr_last;
+ static u64 tsc_last;
+ static unsigned long jiffies_last;
+
+ u64 msr_now;
+ unsigned long jiffies_now;
+ u64 tsc_now;
+ u64 val64;
+
+ msr_now = pkg_state_counter();
+ rdtscll(tsc_now);
+ jiffies_now = jiffies;
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ pkg_cstate_ratio_cur = 1;
+ else {
+ if (tsc_now - tsc_last) {
+ val64 = 100 * (msr_now - msr_last);
+ do_div(val64, (tsc_now - tsc_last));
+ pkg_cstate_ratio_cur = val64;
+ }
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ jiffies_last = jiffies_now;
+ tsc_last = tsc_now;
+
+ if (true == clamping)
+ schedule_delayed_work(&poll_pkg_cstate_work, HZ);
+}
+
+static int start_power_clamp(void)
+{
+ unsigned long cpu;
+ struct task_struct *thread;
+
+ /* check if pkg cstate counter is completely 0, abort in this case */
+ if (!pkg_state_counter()) {
+ pr_err("pkg cstate counter not functional, abort\n");
+ return -EINVAL;
+ }
+
+ set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
+ /* prevent cpu hotplug */
+ get_online_cpus();
+
+ /* prefer BSP */
+ control_cpu = 0;
+ if (!cpu_online(control_cpu))
+ control_cpu = smp_processor_id();
+
+ clamping = true;
+ schedule_delayed_work(&poll_pkg_cstate_work, 0);
+
+ /* start one thread per online cpu */
+ for_each_online_cpu(cpu) {
+ struct task_struct **p =
+ per_cpu_ptr(powerclamp_thread, cpu);
+
+ thread = kthread_create_on_node(clamp_thread,
+ (void *) cpu,
+ cpu_to_node(cpu),
+ "kidle_inject/%ld", cpu);
+ /* bind to cpu here */
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+ *p = thread;
+ }
+
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static void end_power_clamp(void)
+{
+ int i;
+ struct task_struct *thread;
+
+ clamping = false;
+ /*
+ * make clamping visible to other cpus and give per cpu clamping threads
+ * sometime to exit, or gets killed later.
+ */
+ smp_mb();
+ msleep(20);
+ if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
+ for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
+ pr_debug("clamping thread for cpu %d alive, kill\n", i);
+ thread = *per_cpu_ptr(powerclamp_thread, i);
+ kthread_stop(thread);
+ }
+ }
+}
+
+static int powerclamp_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+ struct task_struct *thread;
+ struct task_struct **percpu_thread =
+ per_cpu_ptr(powerclamp_thread, cpu);
+
+ if (false == clamping)
+ goto exit_ok;
+
+ switch (action) {
+ case CPU_ONLINE:
+ thread = kthread_create_on_node(clamp_thread,
+ (void *) cpu,
+ cpu_to_node(cpu),
+ "kidle_inject/%lu", cpu);
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+ *percpu_thread = thread;
+ }
+ /* prefer BSP as controlling CPU */
+ if (cpu == 0) {
+ control_cpu = 0;
+ smp_mb();
+ }
+ break;
+ case CPU_DEAD:
+ if (test_bit(cpu, cpu_clamping_mask)) {
+ pr_err("cpu %lu dead but powerclamping thread is not\n",
+ cpu);
+ kthread_stop(*percpu_thread);
+ }
+ if (cpu == control_cpu) {
+ control_cpu = smp_processor_id();
+ smp_mb();
+ }
+ }
+
+exit_ok:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block powerclamp_cpu_notifier = {
+ .notifier_call = powerclamp_cpu_callback,
+};
+
+static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MAX_TARGET_RATIO;
+
+ return 0;
+}
+
+static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ if (true == clamping)
+ *state = pkg_cstate_ratio_cur;
+ else
+ /* to save power, do not poll idle ratio while not clamping */
+ *state = -1; /* indicates invalid state */
+
+ return 0;
+}
+
+static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_target_ratio)
+{
+ int ret = 0;
+
+ new_target_ratio = clamp(new_target_ratio, 0UL,
+ (unsigned long) (MAX_TARGET_RATIO-1));
+ if (set_target_ratio == 0 && new_target_ratio > 0) {
+ pr_info("Start idle injection to reduce power\n");
+ set_target_ratio = new_target_ratio;
+ ret = start_power_clamp();
+ goto exit_set;
+ } else if (set_target_ratio > 0 && new_target_ratio == 0) {
+ pr_info("Stop forced idle injection\n");
+ set_target_ratio = 0;
+ end_power_clamp();
+ } else /* adjust currently running */ {
+ set_target_ratio = new_target_ratio;
+ /* make new set_target_ratio visible to other cpus */
+ smp_mb();
+ }
+
+exit_set:
+ return ret;
+}
+
+/* bind to generic thermal layer as cooling device*/
+static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
+ .get_max_state = powerclamp_get_max_state,
+ .get_cur_state = powerclamp_get_cur_state,
+ .set_cur_state = powerclamp_set_cur_state,
+};
+
+/* runs on Nehalem and later */
+static const struct x86_cpu_id intel_powerclamp_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x1a},
+ { X86_VENDOR_INTEL, 6, 0x1c},
+ { X86_VENDOR_INTEL, 6, 0x1e},
+ { X86_VENDOR_INTEL, 6, 0x1f},
+ { X86_VENDOR_INTEL, 6, 0x25},
+ { X86_VENDOR_INTEL, 6, 0x26},
+ { X86_VENDOR_INTEL, 6, 0x2a},
+ { X86_VENDOR_INTEL, 6, 0x2c},
+ { X86_VENDOR_INTEL, 6, 0x2d},
+ { X86_VENDOR_INTEL, 6, 0x2e},
+ { X86_VENDOR_INTEL, 6, 0x2f},
+ { X86_VENDOR_INTEL, 6, 0x3a},
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
+static int powerclamp_probe(void)
+{
+ if (!x86_match_cpu(intel_powerclamp_ids)) {
+ pr_err("Intel powerclamp does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+ if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
+ !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
+ !boot_cpu_has(X86_FEATURE_MWAIT) ||
+ !boot_cpu_has(X86_FEATURE_ARAT))
+ return -ENODEV;
+
+ /* find the deepest mwait value */
+ find_target_mwait();
+
+ return 0;
+}
+
+static int powerclamp_debug_show(struct seq_file *m, void *unused)
+{
+ int i = 0;
+
+ seq_printf(m, "controlling cpu: %d\n", control_cpu);
+ seq_printf(m, "pct confidence steady dynamic (compensation)\n");
+ for (i = 0; i < MAX_TARGET_RATIO; i++) {
+ seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
+ i,
+ cal_data[i].confidence,
+ cal_data[i].steady_comp,
+ cal_data[i].dynamic_comp);
+ }
+
+ return 0;
+}
+
+static int powerclamp_debug_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, powerclamp_debug_show, inode->i_private);
+}
+
+static const struct file_operations powerclamp_debug_fops = {
+ .open = powerclamp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static inline void powerclamp_create_debug_files(void)
+{
+ debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
+ if (!debug_dir)
+ return;
+
+ if (!debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir,
+ cal_data, &powerclamp_debug_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(debug_dir);
+}
+
+static int powerclamp_init(void)
+{
+ int retval;
+ int bitmap_size;
+
+ bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
+ cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!cpu_clamping_mask)
+ return -ENOMEM;
+
+ /* probe cpu features and ids here */
+ retval = powerclamp_probe();
+ if (retval)
+ return retval;
+ /* set default limit, maybe adjusted during runtime based on feedback */
+ window_size = 2;
+ register_hotcpu_notifier(&powerclamp_cpu_notifier);
+ powerclamp_thread = alloc_percpu(struct task_struct *);
+ cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
+ &powerclamp_cooling_ops);
+ if (IS_ERR(cooling_dev))
+ return -ENODEV;
+
+ if (!duration)
+ duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+ powerclamp_create_debug_files();
+
+ return 0;
+}
+module_init(powerclamp_init);
+
+static void powerclamp_exit(void)
+{
+ unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+ end_power_clamp();
+ free_percpu(powerclamp_thread);
+ thermal_cooling_device_unregister(cooling_dev);
+ kfree(cpu_clamping_mask);
+
+ cancel_delayed_work_sync(&poll_pkg_cstate_work);
+ debugfs_remove_recursive(debug_dir);
+}
+module_exit(powerclamp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
new file mode 100644
index 000000000000..65cb4f09e8f6
--- /dev/null
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -0,0 +1,134 @@
+/*
+ * Kirkwood thermal sensor driver
+ *
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define KIRKWOOD_THERMAL_VALID_OFFSET 9
+#define KIRKWOOD_THERMAL_VALID_MASK 0x1
+#define KIRKWOOD_THERMAL_TEMP_OFFSET 10
+#define KIRKWOOD_THERMAL_TEMP_MASK 0x1FF
+
+/* Kirkwood Thermal Sensor Dev Structure */
+struct kirkwood_thermal_priv {
+ void __iomem *sensor;
+};
+
+static int kirkwood_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ unsigned long reg;
+ struct kirkwood_thermal_priv *priv = thermal->devdata;
+
+ reg = readl_relaxed(priv->sensor);
+
+ /* Valid check */
+ if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
+ KIRKWOOD_THERMAL_VALID_MASK) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /*
+ * Calculate temperature. See Section 8.10.1 of the 88AP510,
+ * datasheet, which has the same sensor.
+ * Documentation/arm/Marvell/README
+ */
+ reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
+ KIRKWOOD_THERMAL_TEMP_MASK;
+ *temp = ((2281638UL - (7298*reg)) / 10);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = kirkwood_get_temp,
+};
+
+static const struct of_device_id kirkwood_thermal_id_table[] = {
+ { .compatible = "marvell,kirkwood-thermal" },
+ {}
+};
+
+static int kirkwood_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal = NULL;
+ struct kirkwood_thermal_priv *priv;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->sensor) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int kirkwood_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *kirkwood_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(kirkwood_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table);
+
+static struct platform_driver kirkwood_thermal_driver = {
+ .probe = kirkwood_thermal_probe,
+ .remove = kirkwood_thermal_exit,
+ .driver = {
+ .name = "kirkwood_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(kirkwood_thermal_id_table),
+ },
+};
+
+module_platform_driver(kirkwood_thermal_driver);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu@nigauri.org>");
+MODULE_DESCRIPTION("kirkwood thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 90db951725da..28f091994013 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -19,225 +19,473 @@
*/
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/thermal.h>
-#define THSCR 0x2c
-#define THSSR 0x30
+#define IDLE_INTERVAL 5000
+
+#define COMMON_STR 0x00
+#define COMMON_ENR 0x04
+#define COMMON_INTMSK 0x0c
+
+#define REG_POSNEG 0x20
+#define REG_FILONOFF 0x28
+#define REG_THSCR 0x2c
+#define REG_THSSR 0x30
+#define REG_INTCTRL 0x34
/* THSCR */
-#define CPTAP 0xf
+#define CPCTL (1 << 12)
/* THSSR */
#define CTEMP 0x3f
-
-struct rcar_thermal_priv {
+struct rcar_thermal_common {
void __iomem *base;
struct device *dev;
+ struct list_head head;
spinlock_t lock;
- u32 comp;
};
+struct rcar_thermal_priv {
+ void __iomem *base;
+ struct rcar_thermal_common *common;
+ struct thermal_zone_device *zone;
+ struct delayed_work work;
+ struct mutex lock;
+ struct list_head list;
+ int id;
+ int ctemp;
+};
+
+#define rcar_thermal_for_each_priv(pos, common) \
+ list_for_each_entry(pos, &common->head, list)
+
#define MCELSIUS(temp) ((temp) * 1000)
-#define rcar_zone_to_priv(zone) (zone->devdata)
+#define rcar_zone_to_priv(zone) ((zone)->devdata)
+#define rcar_priv_to_dev(priv) ((priv)->common->dev)
+#define rcar_has_irq_support(priv) ((priv)->common->base)
+#define rcar_id_to_shift(priv) ((priv)->id * 8)
+
+#ifdef DEBUG
+# define rcar_force_update_temp(priv) 1
+#else
+# define rcar_force_update_temp(priv) 0
+#endif
/*
* basic functions
*/
-static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
+#define rcar_thermal_common_read(c, r) \
+ _rcar_thermal_common_read(c, COMMON_ ##r)
+static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common,
+ u32 reg)
{
- unsigned long flags;
- u32 ret;
-
- spin_lock_irqsave(&priv->lock, flags);
+ return ioread32(common->base + reg);
+}
- ret = ioread32(priv->base + reg);
+#define rcar_thermal_common_write(c, r, d) \
+ _rcar_thermal_common_write(c, COMMON_ ##r, d)
+static void _rcar_thermal_common_write(struct rcar_thermal_common *common,
+ u32 reg, u32 data)
+{
+ iowrite32(data, common->base + reg);
+}
- spin_unlock_irqrestore(&priv->lock, flags);
+#define rcar_thermal_common_bset(c, r, m, d) \
+ _rcar_thermal_common_bset(c, COMMON_ ##r, m, d)
+static void _rcar_thermal_common_bset(struct rcar_thermal_common *common,
+ u32 reg, u32 mask, u32 data)
+{
+ u32 val;
- return ret;
+ val = ioread32(common->base + reg);
+ val &= ~mask;
+ val |= (data & mask);
+ iowrite32(val, common->base + reg);
}
-#if 0 /* no user at this point */
-static void rcar_thermal_write(struct rcar_thermal_priv *priv,
- u32 reg, u32 data)
+#define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r)
+static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
+ return ioread32(priv->base + reg);
+}
+#define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d)
+static void _rcar_thermal_write(struct rcar_thermal_priv *priv,
+ u32 reg, u32 data)
+{
iowrite32(data, priv->base + reg);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
-#endif
-static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
- u32 mask, u32 data)
+#define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d)
+static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
+ u32 mask, u32 data)
{
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&priv->lock, flags);
-
val = ioread32(priv->base + reg);
val &= ~mask;
val |= (data & mask);
iowrite32(val, priv->base + reg);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
/*
* zone device functions
*/
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
- unsigned long *temp)
+static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
{
- struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
- int val, min, max, tmp;
-
- tmp = -200; /* default */
- while (1) {
- if (priv->comp < 1 || priv->comp > 12) {
- dev_err(priv->dev,
- "THSSR invalid data (%d)\n", priv->comp);
- priv->comp = 4; /* for next thermal */
- return -EINVAL;
- }
+ struct device *dev = rcar_priv_to_dev(priv);
+ int i;
+ int ctemp, old, new;
- /*
- * THS comparator offset and the reference temperature
- *
- * Comparator | reference | Temperature field
- * offset | temperature | measurement
- * | (degrees C) | (degrees C)
- * -------------+---------------+-------------------
- * 1 | -45 | -45 to -30
- * 2 | -30 | -30 to -15
- * 3 | -15 | -15 to 0
- * 4 | 0 | 0 to +15
- * 5 | +15 | +15 to +30
- * 6 | +30 | +30 to +45
- * 7 | +45 | +45 to +60
- * 8 | +60 | +60 to +75
- * 9 | +75 | +75 to +90
- * 10 | +90 | +90 to +105
- * 11 | +105 | +105 to +120
- * 12 | +120 | +120 to +135
- */
+ mutex_lock(&priv->lock);
- /* calculate thermal limitation */
- min = (priv->comp * 15) - 60;
- max = min + 15;
+ /*
+ * TSC decides a value of CPTAP automatically,
+ * and this is the conditions which validate interrupt.
+ */
+ rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL);
+ ctemp = 0;
+ old = ~0;
+ for (i = 0; i < 128; i++) {
/*
* we need to wait 300us after changing comparator offset
* to get stable temperature.
* see "Usage Notes" on datasheet
*/
- rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp);
udelay(300);
- /* calculate current temperature */
- val = rcar_thermal_read(priv, THSSR) & CTEMP;
- val = (val * 5) - 65;
+ new = rcar_thermal_read(priv, THSSR) & CTEMP;
+ if (new == old) {
+ ctemp = new;
+ break;
+ }
+ old = new;
+ }
- dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n",
- priv->comp, min, max, val);
+ if (!ctemp) {
+ dev_err(dev, "thermal sensor was broken\n");
+ return -EINVAL;
+ }
- /*
- * If val is same as min/max, then,
- * it should try again on next comparator.
- * But the val might be correct temperature.
- * Keep it on "tmp" and compare with next val.
- */
- if (tmp == val)
- break;
+ /*
+ * enable IRQ
+ */
+ if (rcar_has_irq_support(priv)) {
+ rcar_thermal_write(priv, FILONOFF, 0);
- if (val <= min) {
- tmp = min;
- priv->comp--; /* try again */
- } else if (val >= max) {
- tmp = max;
- priv->comp++; /* try again */
- } else {
- tmp = val;
- break;
- }
+ /* enable Rising/Falling edge interrupt */
+ rcar_thermal_write(priv, POSNEG, 0x1);
+ rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) |
+ ((ctemp - 1) << 0)));
+ }
+
+ dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
+
+ priv->ctemp = ctemp;
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
+ unsigned long *temp)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+ if (!rcar_has_irq_support(priv) || rcar_force_update_temp(priv))
+ rcar_thermal_update_temp(priv);
+
+ mutex_lock(&priv->lock);
+ *temp = MCELSIUS((priv->ctemp * 5) - 65);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
+ int trip, enum thermal_trip_type *type)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ /* see rcar_thermal_get_temp() */
+ switch (trip) {
+ case 0: /* +90 <= temp */
+ *type = THERMAL_TRIP_CRITICAL;
+ break;
+ default:
+ dev_err(dev, "rcar driver trip error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ int trip, unsigned long *temp)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ /* see rcar_thermal_get_temp() */
+ switch (trip) {
+ case 0: /* +90 <= temp */
+ *temp = MCELSIUS(90);
+ break;
+ default:
+ dev_err(dev, "rcar driver trip error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_thermal_notify(struct thermal_zone_device *zone,
+ int trip, enum thermal_trip_type type)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ switch (type) {
+ case THERMAL_TRIP_CRITICAL:
+ /* FIXME */
+ dev_warn(dev, "Thermal reached to critical temperature\n");
+ break;
+ default:
+ break;
}
- *temp = MCELSIUS(tmp);
return 0;
}
static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
- .get_temp = rcar_thermal_get_temp,
+ .get_temp = rcar_thermal_get_temp,
+ .get_trip_type = rcar_thermal_get_trip_type,
+ .get_trip_temp = rcar_thermal_get_trip_temp,
+ .notify = rcar_thermal_notify,
};
/*
- * platform functions
+ * interrupt
*/
-static int rcar_thermal_probe(struct platform_device *pdev)
+#define rcar_thermal_irq_enable(p) _rcar_thermal_irq_ctrl(p, 1)
+#define rcar_thermal_irq_disable(p) _rcar_thermal_irq_ctrl(p, 0)
+static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
+{
+ struct rcar_thermal_common *common = priv->common;
+ unsigned long flags;
+ u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */
+
+ spin_lock_irqsave(&common->lock, flags);
+
+ rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask);
+
+ spin_unlock_irqrestore(&common->lock, flags);
+}
+
+static void rcar_thermal_work(struct work_struct *work)
{
- struct thermal_zone_device *zone;
struct rcar_thermal_priv *priv;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Could not get platform resource\n");
- return -ENODEV;
+ priv = container_of(work, struct rcar_thermal_priv, work.work);
+
+ rcar_thermal_update_temp(priv);
+ rcar_thermal_irq_enable(priv);
+ thermal_zone_device_update(priv->zone);
+}
+
+static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
+{
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ status = (status >> rcar_id_to_shift(priv)) & 0x3;
+
+ if (status & 0x3) {
+ dev_dbg(dev, "thermal%d %s%s\n",
+ priv->id,
+ (status & 0x2) ? "Rising " : "",
+ (status & 0x1) ? "Falling" : "");
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "Could not allocate priv\n");
- return -ENOMEM;
+ return status;
+}
+
+static irqreturn_t rcar_thermal_irq(int irq, void *data)
+{
+ struct rcar_thermal_common *common = data;
+ struct rcar_thermal_priv *priv;
+ unsigned long flags;
+ u32 status, mask;
+
+ spin_lock_irqsave(&common->lock, flags);
+
+ mask = rcar_thermal_common_read(common, INTMSK);
+ status = rcar_thermal_common_read(common, STR);
+ rcar_thermal_common_write(common, STR, 0x000F0F0F & mask);
+
+ spin_unlock_irqrestore(&common->lock, flags);
+
+ status = status & ~mask;
+
+ /*
+ * check the status
+ */
+ rcar_thermal_for_each_priv(priv, common) {
+ if (rcar_thermal_had_changed(priv, status)) {
+ rcar_thermal_irq_disable(priv);
+ schedule_delayed_work(&priv->work,
+ msecs_to_jiffies(300));
+ }
}
- priv->comp = 4; /* basic setup */
- priv->dev = &pdev->dev;
- spin_lock_init(&priv->lock);
- priv->base = devm_ioremap_nocache(&pdev->dev,
- res->start, resource_size(res));
- if (!priv->base) {
- dev_err(&pdev->dev, "Unable to ioremap thermal register\n");
+ return IRQ_HANDLED;
+}
+
+/*
+ * platform functions
+ */
+static int rcar_thermal_probe(struct platform_device *pdev)
+{
+ struct rcar_thermal_common *common;
+ struct rcar_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct resource *res, *irq;
+ int mres = 0;
+ int i;
+ int idle = IDLE_INTERVAL;
+
+ common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+ if (!common) {
+ dev_err(dev, "Could not allocate common\n");
return -ENOMEM;
}
- zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv,
- &rcar_thermal_zone_ops, NULL, 0, 0);
- if (IS_ERR(zone)) {
- dev_err(&pdev->dev, "thermal zone device is NULL\n");
- return PTR_ERR(zone);
+ INIT_LIST_HEAD(&common->head);
+ spin_lock_init(&common->lock);
+ common->dev = dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq) {
+ int ret;
+
+ /*
+ * platform has IRQ support.
+ * Then, drier use common register
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+ if (!res) {
+ dev_err(dev, "Could not get platform resource\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+ dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "irq request failed\n ");
+ return ret;
+ }
+
+ /*
+ * rcar_has_irq_support() will be enabled
+ */
+ common->base = devm_request_and_ioremap(dev, res);
+ if (!common->base) {
+ dev_err(dev, "Unable to ioremap thermal register\n");
+ return -ENOMEM;
+ }
+
+ /* enable temperature comparation */
+ rcar_thermal_common_write(common, ENR, 0x00030303);
+
+ idle = 0; /* polling delaye is not needed */
}
- platform_set_drvdata(pdev, zone);
+ for (i = 0;; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+ if (!res)
+ break;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "Could not allocate priv\n");
+ return -ENOMEM;
+ }
+
+ priv->base = devm_request_and_ioremap(dev, res);
+ if (!priv->base) {
+ dev_err(dev, "Unable to ioremap priv register\n");
+ return -ENOMEM;
+ }
- dev_info(&pdev->dev, "proved\n");
+ priv->common = common;
+ priv->id = i;
+ mutex_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->list);
+ INIT_DELAYED_WORK(&priv->work, rcar_thermal_work);
+ rcar_thermal_update_temp(priv);
+
+ priv->zone = thermal_zone_device_register("rcar_thermal",
+ 1, 0, priv,
+ &rcar_thermal_zone_ops, NULL, 0,
+ idle);
+ if (IS_ERR(priv->zone)) {
+ dev_err(dev, "can't register thermal zone\n");
+ goto error_unregister;
+ }
+
+ list_move_tail(&priv->list, &common->head);
+
+ if (rcar_has_irq_support(priv))
+ rcar_thermal_irq_enable(priv);
+ }
+
+ platform_set_drvdata(pdev, common);
+
+ dev_info(dev, "%d sensor proved\n", i);
return 0;
+
+error_unregister:
+ rcar_thermal_for_each_priv(priv, common)
+ thermal_zone_device_unregister(priv->zone);
+
+ return -ENODEV;
}
static int rcar_thermal_remove(struct platform_device *pdev)
{
- struct thermal_zone_device *zone = platform_get_drvdata(pdev);
+ struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+ struct rcar_thermal_priv *priv;
+
+ rcar_thermal_for_each_priv(priv, common)
+ thermal_zone_device_unregister(priv->zone);
- thermal_zone_device_unregister(zone);
platform_set_drvdata(pdev, NULL);
return 0;
}
+static const struct of_device_id rcar_thermal_dt_ids[] = {
+ { .compatible = "renesas,rcar-thermal", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
+
static struct platform_driver rcar_thermal_driver = {
.driver = {
.name = "rcar_thermal",
+ .of_match_table = rcar_thermal_dt_ids,
},
.probe = rcar_thermal_probe,
.remove = rcar_thermal_remove,
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 6b2d8b21aaee..3c5ee5607977 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -131,7 +131,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
return -ENOMEM;
}
- stdev->clk = clk_get(&pdev->dev, NULL);
+ stdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(stdev->clk)) {
dev_err(&pdev->dev, "Can't get clock\n");
return PTR_ERR(stdev->clk);
@@ -140,7 +140,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
ret = clk_enable(stdev->clk);
if (ret) {
dev_err(&pdev->dev, "Can't enable clock\n");
- goto put_clk;
+ return ret;
}
stdev->flags = val;
@@ -163,8 +163,6 @@ static int spear_thermal_probe(struct platform_device *pdev)
disable_clk:
clk_disable(stdev->clk);
-put_clk:
- clk_put(stdev->clk);
return ret;
}
@@ -183,7 +181,6 @@ static int spear_thermal_exit(struct platform_device *pdev)
writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
clk_disable(stdev->clk);
- clk_put(stdev->clk);
return 0;
}
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 0cd5e9fbab1c..407cde3211c1 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -35,21 +35,54 @@
* state for this trip point
* b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
* state for this trip point
+ * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
+ * for this trip point
+ * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
+ * for this trip point
+ * If the temperature is lower than a trip point,
+ * a. if the trend is THERMAL_TREND_RAISING, do nothing
+ * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
+ * state for this trip point, if the cooling state already
+ * equals lower limit, deactivate the thermal instance
+ * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
+ * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
+ * if the cooling state already equals lower limit,
+ * deactive the thermal instance
*/
static unsigned long get_target_state(struct thermal_instance *instance,
- enum thermal_trend trend)
+ enum thermal_trend trend, bool throttle)
{
struct thermal_cooling_device *cdev = instance->cdev;
unsigned long cur_state;
cdev->ops->get_cur_state(cdev, &cur_state);
- if (trend == THERMAL_TREND_RAISING) {
- cur_state = cur_state < instance->upper ?
- (cur_state + 1) : instance->upper;
- } else if (trend == THERMAL_TREND_DROPPING) {
- cur_state = cur_state > instance->lower ?
- (cur_state - 1) : instance->lower;
+ switch (trend) {
+ case THERMAL_TREND_RAISING:
+ if (throttle)
+ cur_state = cur_state < instance->upper ?
+ (cur_state + 1) : instance->upper;
+ break;
+ case THERMAL_TREND_RAISE_FULL:
+ if (throttle)
+ cur_state = instance->upper;
+ break;
+ case THERMAL_TREND_DROPPING:
+ if (cur_state == instance->lower) {
+ if (!throttle)
+ cur_state = -1;
+ } else
+ cur_state -= 1;
+ break;
+ case THERMAL_TREND_DROP_FULL:
+ if (cur_state == instance->lower) {
+ if (!throttle)
+ cur_state = -1;
+ } else
+ cur_state = instance->lower;
+ break;
+ default:
+ break;
}
return cur_state;
@@ -66,57 +99,14 @@ static void update_passive_instance(struct thermal_zone_device *tz,
tz->passive += value;
}
-static void update_instance_for_throttle(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type,
- enum thermal_trend trend)
-{
- struct thermal_instance *instance;
-
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
- continue;
-
- instance->target = get_target_state(instance, trend);
-
- /* Activate a passive thermal instance */
- if (instance->target == THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, 1);
-
- instance->cdev->updated = false; /* cdev needs update */
- }
-}
-
-static void update_instance_for_dethrottle(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type)
-{
- struct thermal_instance *instance;
- struct thermal_cooling_device *cdev;
- unsigned long cur_state;
-
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip ||
- instance->target == THERMAL_NO_TARGET)
- continue;
-
- cdev = instance->cdev;
- cdev->ops->get_cur_state(cdev, &cur_state);
-
- instance->target = cur_state > instance->lower ?
- (cur_state - 1) : THERMAL_NO_TARGET;
-
- /* Deactivate a passive thermal instance */
- if (instance->target == THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, -1);
-
- cdev->updated = false; /* cdev needs update */
- }
-}
-
static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
{
long trip_temp;
enum thermal_trip_type trip_type;
enum thermal_trend trend;
+ struct thermal_instance *instance;
+ bool throttle = false;
+ int old_target;
if (trip == THERMAL_TRIPS_NONE) {
trip_temp = tz->forced_passive;
@@ -128,12 +118,30 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
trend = get_tz_trend(tz, trip);
+ if (tz->temperature >= trip_temp)
+ throttle = true;
+
mutex_lock(&tz->lock);
- if (tz->temperature >= trip_temp)
- update_instance_for_throttle(tz, trip, trip_type, trend);
- else
- update_instance_for_dethrottle(tz, trip, trip_type);
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ old_target = instance->target;
+ instance->target = get_target_state(instance, trend, throttle);
+
+ /* Activate a passive thermal instance */
+ if (old_target == THERMAL_NO_TARGET &&
+ instance->target != THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, 1);
+ /* Deactivate a passive thermal instance */
+ else if (old_target != THERMAL_NO_TARGET &&
+ instance->target == THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, -1);
+
+
+ instance->cdev->updated = false; /* cdev needs update */
+ }
mutex_unlock(&tz->lock);
}
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 8c8ce806180f..5b7863a03f98 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -32,7 +32,6 @@
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/thermal.h>
-#include <linux/spinlock.h>
#include <linux/reboot.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -132,23 +131,16 @@ EXPORT_SYMBOL_GPL(thermal_unregister_governor);
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
- int err;
-
-again:
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
+ int ret;
if (lock)
mutex_lock(lock);
- err = idr_get_new(idr, NULL, id);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
if (lock)
mutex_unlock(lock);
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return err;
-
- *id = *id & MAX_IDR_MASK;
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
return 0;
}
@@ -355,8 +347,9 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
tz->ops->notify(tz, trip, trip_type);
if (trip_type == THERMAL_TRIP_CRITICAL) {
- pr_emerg("Critical temperature reached(%d C),shutting down\n",
- tz->temperature / 1000);
+ dev_emerg(&tz->device,
+ "critical temperature reached(%d C),shutting down\n",
+ tz->temperature / 1000);
orderly_poweroff(true);
}
}
@@ -378,23 +371,57 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
monitor_thermal_zone(tz);
}
+static int thermal_zone_get_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ int ret = 0;
+#ifdef CONFIG_THERMAL_EMULATION
+ int count;
+ unsigned long crit_temp = -1UL;
+ enum thermal_trip_type type;
+#endif
+
+ mutex_lock(&tz->lock);
+
+ ret = tz->ops->get_temp(tz, temp);
+#ifdef CONFIG_THERMAL_EMULATION
+ if (!tz->emul_temperature)
+ goto skip_emul;
+
+ for (count = 0; count < tz->trips; count++) {
+ ret = tz->ops->get_trip_type(tz, count, &type);
+ if (!ret && type == THERMAL_TRIP_CRITICAL) {
+ ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
+ break;
+ }
+ }
+
+ if (ret)
+ goto skip_emul;
+
+ if (*temp < crit_temp)
+ *temp = tz->emul_temperature;
+skip_emul:
+#endif
+ mutex_unlock(&tz->lock);
+ return ret;
+}
+
static void update_temperature(struct thermal_zone_device *tz)
{
long temp;
int ret;
- mutex_lock(&tz->lock);
-
- ret = tz->ops->get_temp(tz, &temp);
+ ret = thermal_zone_get_temp(tz, &temp);
if (ret) {
- pr_warn("failed to read out thermal zone %d\n", tz->id);
- goto exit;
+ dev_warn(&tz->device, "failed to read out thermal zone %d\n",
+ tz->id);
+ return;
}
+ mutex_lock(&tz->lock);
tz->last_temperature = tz->temperature;
tz->temperature = temp;
-
-exit:
mutex_unlock(&tz->lock);
}
@@ -437,10 +464,7 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
long temperature;
int ret;
- if (!tz->ops->get_temp)
- return -EPERM;
-
- ret = tz->ops->get_temp(tz, &temperature);
+ ret = thermal_zone_get_temp(tz, &temperature);
if (ret)
return ret;
@@ -700,6 +724,31 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%s\n", tz->governor->name);
}
+#ifdef CONFIG_THERMAL_EMULATION
+static ssize_t
+emul_temp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int ret = 0;
+ unsigned long temperature;
+
+ if (kstrtoul(buf, 10, &temperature))
+ return -EINVAL;
+
+ if (!tz->ops->set_emul_temp) {
+ mutex_lock(&tz->lock);
+ tz->emul_temperature = temperature;
+ mutex_unlock(&tz->lock);
+ } else {
+ ret = tz->ops->set_emul_temp(tz, temperature);
+ }
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
+#endif/*CONFIG_THERMAL_EMULATION*/
+
static DEVICE_ATTR(type, 0444, type_show, NULL);
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
@@ -842,7 +891,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
temp_input);
struct thermal_zone_device *tz = temp->tz;
- ret = tz->ops->get_temp(tz, &temperature);
+ ret = thermal_zone_get_temp(tz, &temperature);
if (ret)
return ret;
@@ -1529,6 +1578,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (!ops || !ops->get_temp)
return ERR_PTR(-EINVAL);
+ if (trips > 0 && !ops->get_trip_type)
+ return ERR_PTR(-EINVAL);
+
tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
if (!tz)
return ERR_PTR(-ENOMEM);
@@ -1592,6 +1644,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
}
+#ifdef CONFIG_THERMAL_EMULATION
+ result = device_create_file(&tz->device, &dev_attr_emul_temp);
+ if (result)
+ goto unregister;
+#endif
/* Create policy attribute */
result = device_create_file(&tz->device, &dev_attr_policy);
if (result)
@@ -1711,7 +1768,8 @@ static struct genl_multicast_group thermal_event_mcgrp = {
.name = THERMAL_GENL_MCAST_GROUP_NAME,
};
-int thermal_generate_netlink_event(u32 orig, enum events event)
+int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+ enum events event)
{
struct sk_buff *skb;
struct nlattr *attr;
@@ -1721,6 +1779,9 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
int result;
static unsigned int thermal_event_seqnum;
+ if (!tz)
+ return -EINVAL;
+
/* allocate memory */
size = nla_total_size(sizeof(struct thermal_genl_event)) +
nla_total_size(0);
@@ -1755,7 +1816,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
memset(thermal_event, 0, sizeof(struct thermal_genl_event));
- thermal_event->orig = orig;
+ thermal_event->orig = tz->id;
thermal_event->event = event;
/* send multicast genetlink message */
@@ -1767,7 +1828,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
if (result)
- pr_info("failed to send netlink event:%d\n", result);
+ dev_err(&tz->device, "Failed to send netlink event:%d", result);
return result;
}
@@ -1807,6 +1868,7 @@ static int __init thermal_init(void)
idr_destroy(&thermal_cdev_idr);
mutex_destroy(&thermal_idr_lock);
mutex_destroy(&thermal_list_lock);
+ return result;
}
result = genetlink_init();
return result;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 19843ec3f80a..682210d778bd 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -230,7 +230,7 @@ static int xen_hvm_console_init(void)
if (r < 0 || v == 0)
goto err;
mfn = v;
- info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
+ info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a0162cbf0557..cf9210db9fa9 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -729,19 +729,19 @@ config SERIAL_SH_SCI_DMA
config SERIAL_PNX8XXX
bool "Enable PNX8XXX SoCs' UART Support"
- depends on SOC_PNX8550 || SOC_PNX833X
+ depends on SOC_PNX833X
select SERIAL_CORE
help
- If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
- and you want to use serial ports, say Y. Otherwise, say N.
+ If you have a MIPS-based Philips SoC such as PNX8330 and you want
+ to use serial ports, say Y. Otherwise, say N.
config SERIAL_PNX8XXX_CONSOLE
bool "Enable PNX8XX0 serial console"
depends on SERIAL_PNX8XXX
select SERIAL_CORE_CONSOLE
help
- If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
- and you want to use serial console, say Y. Otherwise, say N.
+ If you have a MIPS-based Philips SoC such as PNX8330 and you want
+ to use serial console, say Y. Otherwise, say N.
config SERIAL_HS_LPC32XX
tristate "LPC32XX high speed serial port support"
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 814655ee2d61..3687f0cad642 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -870,21 +870,20 @@ static struct input_handler sysrq_handler = {
static bool sysrq_handler_registered;
+unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
+
static inline void sysrq_register_handler(void)
{
- extern unsigned short platform_sysrq_reset_seq[] __weak;
unsigned short key;
int error;
int i;
- if (platform_sysrq_reset_seq) {
- for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
- key = platform_sysrq_reset_seq[i];
- if (key == KEY_RESERVED || key > KEY_MAX)
- break;
+ for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
+ key = platform_sysrq_reset_seq[i];
+ if (key == KEY_RESERVED || key > KEY_MAX)
+ break;
- sysrq_reset_seq[sysrq_reset_seq_len++] = key;
- }
+ sysrq_reset_seq[sysrq_reset_seq_len++] = key;
}
error = input_register_handler(&sysrq_handler);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index fa7268a93c06..e4ca345873c3 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -101,7 +101,7 @@ vcs_poll_data_get(struct file *file)
poll = kzalloc(sizeof(*poll), GFP_KERNEL);
if (!poll)
return NULL;
- poll->cons_num = iminor(file->f_path.dentry->d_inode) & 127;
+ poll->cons_num = iminor(file_inode(file)) & 127;
init_waitqueue_head(&poll->waitq);
poll->notifier.notifier_call = vcs_notifier;
if (register_vt_notifier(&poll->notifier) != 0) {
@@ -182,7 +182,7 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
int size;
console_lock();
- size = vcs_size(file->f_path.dentry->d_inode);
+ size = vcs_size(file_inode(file));
console_unlock();
if (size < 0)
return size;
@@ -208,7 +208,7 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
static ssize_t
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned int currcons = iminor(inode);
struct vc_data *vc;
struct vcs_poll_data *poll;
@@ -386,7 +386,7 @@ unlock_out:
static ssize_t
vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned int currcons = iminor(inode);
struct vc_data *vc;
long pos;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 1a2728034599..fbd447b390f7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -539,7 +539,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p = (unsigned short *) vc->vc_pos;
- scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x) * 2);
+ scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2);
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
if (DO_UPDATE(vc))
@@ -638,7 +638,7 @@ static inline void save_screen(struct vc_data *vc)
* Redrawing of screen
*/
-static void clear_buffer_attributes(struct vc_data *vc)
+void clear_buffer_attributes(struct vc_data *vc)
{
unsigned short *p = (unsigned short *)vc->vc_origin;
int count = vc->vc_screenbuf_size / 2;
@@ -2987,7 +2987,7 @@ int __init vty_init(const struct file_operations *console_fops)
static struct class *vtconsole_class;
-static int bind_con_driver(const struct consw *csw, int first, int last,
+static int do_bind_con_driver(const struct consw *csw, int first, int last,
int deflt)
{
struct module *owner = csw->owner;
@@ -2998,7 +2998,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
+ WARN_CONSOLE_UNLOCKED();
/* check if driver is registered */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3083,11 +3083,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
retval = 0;
err:
- console_unlock();
module_put(owner);
return retval;
};
+
+static int bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+{
+ int ret;
+
+ console_lock();
+ ret = do_bind_con_driver(csw, first, last, deflt);
+ console_unlock();
+ return ret;
+}
+
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
static int con_is_graphics(const struct consw *csw, int first, int last)
{
@@ -3124,6 +3135,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
*/
int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
+ int retval;
+
+ console_lock();
+ retval = do_unbind_con_driver(csw, first, last, deflt);
+ console_unlock();
+ return retval;
+}
+EXPORT_SYMBOL(unbind_con_driver);
+
+/* unlocked version of unbind_con_driver() */
+int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+{
struct module *owner = csw->owner;
const struct consw *defcsw = NULL;
struct con_driver *con_driver = NULL, *con_back = NULL;
@@ -3132,7 +3155,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
+ WARN_CONSOLE_UNLOCKED();
/* check if driver is registered and if it is unbindable */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3145,10 +3168,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
}
- if (retval) {
- console_unlock();
+ if (retval)
goto err;
- }
retval = -ENODEV;
@@ -3164,15 +3185,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
}
- if (retval) {
- console_unlock();
+ if (retval)
goto err;
- }
- if (!con_is_bound(csw)) {
- console_unlock();
+ if (!con_is_bound(csw))
goto err;
- }
first = max(first, con_driver->first);
last = min(last, con_driver->last);
@@ -3199,15 +3216,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!con_is_bound(csw))
con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
- console_unlock();
/* ignore return value, binding should not fail */
- bind_con_driver(defcsw, first, last, deflt);
+ do_bind_con_driver(defcsw, first, last, deflt);
err:
module_put(owner);
return retval;
}
-EXPORT_SYMBOL(unbind_con_driver);
+EXPORT_SYMBOL_GPL(do_unbind_con_driver);
static int vt_bind(struct con_driver *con)
{
@@ -3492,28 +3508,18 @@ int con_debug_leave(void)
}
EXPORT_SYMBOL_GPL(con_debug_leave);
-/**
- * register_con_driver - register console driver to console layer
- * @csw: console driver
- * @first: the first console to take over, minimum value is 0
- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
- *
- * DESCRIPTION: This function registers a console driver which can later
- * bind to a range of consoles specified by @first and @last. It will
- * also initialize the console driver by calling con_startup().
- */
-int register_con_driver(const struct consw *csw, int first, int last)
+static int do_register_con_driver(const struct consw *csw, int first, int last)
{
struct module *owner = csw->owner;
struct con_driver *con_driver;
const char *desc;
int i, retval = 0;
+ WARN_CONSOLE_UNLOCKED();
+
if (!try_module_get(owner))
return -ENODEV;
- console_lock();
-
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = &registered_con_driver[i];
@@ -3566,10 +3572,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
}
err:
- console_unlock();
module_put(owner);
return retval;
}
+
+/**
+ * register_con_driver - register console driver to console layer
+ * @csw: console driver
+ * @first: the first console to take over, minimum value is 0
+ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+ *
+ * DESCRIPTION: This function registers a console driver which can later
+ * bind to a range of consoles specified by @first and @last. It will
+ * also initialize the console driver by calling con_startup().
+ */
+int register_con_driver(const struct consw *csw, int first, int last)
+{
+ int retval;
+
+ console_lock();
+ retval = do_register_con_driver(csw, first, last);
+ console_unlock();
+ return retval;
+}
EXPORT_SYMBOL(register_con_driver);
/**
@@ -3585,9 +3610,18 @@ EXPORT_SYMBOL(register_con_driver);
*/
int unregister_con_driver(const struct consw *csw)
{
- int i, retval = -ENODEV;
+ int retval;
console_lock();
+ retval = do_unregister_con_driver(csw);
+ console_unlock();
+ return retval;
+}
+EXPORT_SYMBOL(unregister_con_driver);
+
+int do_unregister_con_driver(const struct consw *csw)
+{
+ int i, retval = -ENODEV;
/* cannot unregister a bound driver */
if (con_is_bound(csw))
@@ -3613,27 +3647,53 @@ int unregister_con_driver(const struct consw *csw)
}
}
err:
- console_unlock();
return retval;
}
-EXPORT_SYMBOL(unregister_con_driver);
+EXPORT_SYMBOL_GPL(do_unregister_con_driver);
/*
* If we support more console drivers, this function is used
* when a driver wants to take over some existing consoles
* and become default driver for newly opened ones.
*
- * take_over_console is basically a register followed by unbind
+ * take_over_console is basically a register followed by unbind
+ */
+int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
+{
+ int err;
+
+ err = do_register_con_driver(csw, first, last);
+ /*
+ * If we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+ * but not unregistered it.
+ */
+ if (err == -EBUSY)
+ err = 0;
+ if (!err)
+ do_bind_con_driver(csw, first, last, deflt);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(do_take_over_console);
+
+/*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+ * take_over_console is basically a register followed by unbind
*/
int take_over_console(const struct consw *csw, int first, int last, int deflt)
{
int err;
err = register_con_driver(csw, first, last);
- /* if we get an busy error we still want to bind the console driver
+ /*
+ * If we get an busy error we still want to bind the console driver
* and return success, as we may have unbound the console driver
-  * but not unregistered it.
- */
+ * but not unregistered it.
+ */
if (err == -EBUSY)
err = 0;
if (!err)
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 5110f367f1f1..c8b926291e28 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -369,26 +369,15 @@ static void uio_dev_del_attributes(struct uio_device *idev)
static int uio_get_minor(struct uio_device *idev)
{
int retval = -ENOMEM;
- int id;
mutex_lock(&minor_lock);
- if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
- goto exit;
-
- retval = idr_get_new(&uio_idr, idev, &id);
- if (retval < 0) {
- if (retval == -EAGAIN)
- retval = -ENOMEM;
- goto exit;
- }
- if (id < UIO_MAX_DEVICES) {
- idev->minor = id;
- } else {
+ retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
+ if (retval >= 0) {
+ idev->minor = retval;
+ } else if (retval == -ENOSPC) {
dev_err(idev->dev, "too many uio devices\n");
retval = -EINVAL;
- idr_remove(&uio_idr, id);
}
-exit:
mutex_unlock(&minor_lock);
return retval;
}
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index e33224e23770..2a3bbdf7eb94 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -665,7 +665,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- mutex_lock(&file->f_dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
switch (orig) {
case 0:
@@ -681,7 +681,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return ret;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4a863fdbdccd..8823e98989fe 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -161,7 +161,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- mutex_lock(&file->f_dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
switch (orig) {
case 0:
@@ -177,7 +177,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return ret;
}
@@ -1971,7 +1971,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p)
{
struct dev_state *ps = file->private_data;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct usb_device *dev = ps->dev;
int ret = -ENOTTY;
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index bc19496bcec0..b66130c97269 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -93,7 +93,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EFAULT;
- mutex_lock(&file->f_dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(file)->i_mutex);
list_for_each_entry_safe(req, tmp_req, queue, queue) {
len = snprintf(tmpbuf, sizeof(tmpbuf),
"%8p %08x %c%c%c %5d %c%c%c\n",
@@ -120,7 +120,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
nbytes -= len;
buf += len;
}
- mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return actual;
}
@@ -168,13 +168,13 @@ out:
static ssize_t regs_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
int ret;
mutex_lock(&inode->i_mutex);
ret = simple_read_from_buffer(buf, nbytes, ppos,
file->private_data,
- file->f_dentry->d_inode->i_size);
+ file_inode(file)->i_size);
mutex_unlock(&inode->i_mutex);
return ret;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index fc5c16ca5e0a..97666e8b1b95 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -978,7 +978,7 @@ static int do_synchronize_cache(struct fsg_common *common)
static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 35bcc83d1e04..bf7a56b6d48a 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -688,7 +688,7 @@ static int
printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
{
struct printer_dev *dev = fd->private_data;
- struct inode *inode = fd->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(fd);
unsigned long flags;
int tx_list_empty;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index e4192b887de9..d9297eebbf73 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1065,7 +1065,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+ rndis_params *p = PDE(file_inode(file))->data;
u32 speed = 0;
int i, fl_speed = 0;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 4ecbf8496f48..dbce3a9074e6 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -440,7 +440,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 99899e808c6a..0555ee42d7cb 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -107,7 +107,7 @@ static int omap_ehci_init(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int rc;
- struct ehci_hcd_omap_platform_data *pdata;
+ struct usbhs_omap_platform_data *pdata;
pdata = hcd->self.controller->platform_data;
@@ -151,7 +151,7 @@ static int omap_ehci_init(struct usb_hcd *hcd)
}
static void disable_put_regulator(
- struct ehci_hcd_omap_platform_data *pdata)
+ struct usbhs_omap_platform_data *pdata)
{
int i;
@@ -176,7 +176,7 @@ static void disable_put_regulator(
static int ehci_hcd_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ehci_hcd_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 5980758563eb..c41b01e2b693 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -6,3 +6,13 @@ config VFIO_PCI
use of PCI drivers using the VFIO framework.
If you don't know what to do here, say N.
+
+config VFIO_PCI_VGA
+ bool "VFIO PCI support for VGA devices"
+ depends on VFIO_PCI && X86 && VGA_ARB
+ help
+ Support for VGA extension to VFIO PCI. This exposes an additional
+ region on VGA devices for accessing legacy VGA addresses used by
+ BIOS and generic video drivers.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b28e66c4376a..8189cb6a86af 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -84,6 +84,11 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
} else
vdev->msix_bar = 0xFF;
+#ifdef CONFIG_VFIO_PCI_VGA
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ vdev->has_vga = true;
+#endif
+
return 0;
}
@@ -285,6 +290,16 @@ static long vfio_pci_ioctl(void *device_data,
info.flags = VFIO_REGION_INFO_FLAG_READ;
break;
}
+ case VFIO_PCI_VGA_REGION_INDEX:
+ if (!vdev->has_vga)
+ return -EINVAL;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0xc0000;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+
+ break;
default:
return -EINVAL;
}
@@ -366,52 +381,50 @@ static long vfio_pci_ioctl(void *device_data,
return -ENOTTY;
}
-static ssize_t vfio_pci_read(void *device_data, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct vfio_pci_device *vdev = device_data;
- struct pci_dev *pdev = vdev->pdev;
if (index >= VFIO_PCI_NUM_REGIONS)
return -EINVAL;
- if (index == VFIO_PCI_CONFIG_REGION_INDEX)
- return vfio_pci_config_readwrite(vdev, buf, count, ppos, false);
- else if (index == VFIO_PCI_ROM_REGION_INDEX)
- return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
- return vfio_pci_io_readwrite(vdev, buf, count, ppos, false);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM)
- return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ if (iswrite)
+ return -EINVAL;
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_VGA_REGION_INDEX:
+ return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ }
return -EINVAL;
}
-static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t vfio_pci_read(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos)
{
- unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- struct vfio_pci_device *vdev = device_data;
- struct pci_dev *pdev = vdev->pdev;
+ if (!count)
+ return 0;
- if (index >= VFIO_PCI_NUM_REGIONS)
- return -EINVAL;
+ return vfio_pci_rw(device_data, buf, count, ppos, false);
+}
- if (index == VFIO_PCI_CONFIG_REGION_INDEX)
- return vfio_pci_config_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- else if (index == VFIO_PCI_ROM_REGION_INDEX)
- return -EINVAL;
- else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
- return vfio_pci_io_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM) {
- return vfio_pci_mem_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- }
+static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (!count)
+ return 0;
- return -EINVAL;
+ return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 8b8f7d11e102..964ff22bf281 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -587,12 +587,46 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
return 0;
}
+static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ if (offset == PCI_PM_CTRL) {
+ pci_power_t state;
+
+ switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
+ case 0:
+ state = PCI_D0;
+ break;
+ case 1:
+ state = PCI_D1;
+ break;
+ case 2:
+ state = PCI_D2;
+ break;
+ case 3:
+ state = PCI_D3hot;
+ break;
+ }
+
+ pci_set_power_state(vdev->pdev, state);
+ }
+
+ return count;
+}
+
/* Permissions for the Power Management capability */
static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
return -ENOMEM;
+ perm->writefn = vfio_pm_config_write;
+
/*
* We always virtualize the next field so we can remove
* capabilities from the chain if we want to.
@@ -600,10 +634,11 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
- * Power management is defined *per function*,
- * so we let the user write this
+ * Power management is defined *per function*, so we can let
+ * the user change power state, but we trap and initiate the
+ * change ourselves, so the state bits are read-only.
*/
- p_setd(perm, PCI_PM_CTRL, NO_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
return 0;
}
@@ -985,12 +1020,12 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
if (ret)
return pcibios_err_to_errno(ret);
+ vdev->extended_caps = true;
+
if ((word & PCI_EXP_FLAGS_VERS) == 1)
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
- else {
- vdev->extended_caps = true;
+ else
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
- }
case PCI_CAP_ID_HT:
ret = pci_read_config_byte(pdev, pos + 3, &byte);
if (ret)
@@ -1501,9 +1536,8 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
return ret;
}
-ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
size_t done = 0;
int ret = 0;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 611827cba8cd..d7e55d03f49e 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -53,6 +53,7 @@ struct vfio_pci_device {
bool reset_works;
bool extended_caps;
bool bardirty;
+ bool has_vga;
struct pci_saved_state *pci_saved_state;
atomic_t refcnt;
};
@@ -70,15 +71,15 @@ extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev,
uint32_t flags, unsigned index,
unsigned start, unsigned count, void *data);
-extern ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
+extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
extern int vfio_pci_init_perm_bits(void);
extern void vfio_pci_uninit_perm_bits(void);
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index f72323ef618f..210db24d2204 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -17,253 +17,222 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/vgaarb.h>
#include "vfio_pci_private.h"
-/* I/O Port BAR access */
-ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
+/*
+ * Read or write from an __iomem region (MMIO or I/O port) with an excluded
+ * range which is inaccessible. The excluded range drops writes and fills
+ * reads with -1. This is intended for handling MSI-X vector tables and
+ * leftover space for ROM BARs.
+ */
+static ssize_t do_io_rw(void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+ size_t x_end, bool iswrite)
{
- struct pci_dev *pdev = vdev->pdev;
- loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- void __iomem *io;
- size_t done = 0;
-
- if (!pci_resource_start(pdev, bar))
- return -EINVAL;
-
- if (pos + count > pci_resource_len(pdev, bar))
- return -EINVAL;
-
- if (!vdev->barmap[bar]) {
- int ret;
-
- ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
- if (ret)
- return ret;
-
- vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
-
- if (!vdev->barmap[bar]) {
- pci_release_selected_regions(pdev, 1 << bar);
- return -EINVAL;
- }
- }
-
- io = vdev->barmap[bar];
+ ssize_t done = 0;
while (count) {
- int filled;
+ size_t fillable, filled;
+
+ if (off < x_start)
+ fillable = min(count, (size_t)(x_start - off));
+ else if (off >= x_end)
+ fillable = count;
+ else
+ fillable = 0;
- if (count >= 3 && !(pos % 4)) {
+ if (fillable >= 4 && !(off % 4)) {
__le32 val;
if (iswrite) {
if (copy_from_user(&val, buf, 4))
return -EFAULT;
- iowrite32(le32_to_cpu(val), io + pos);
+ iowrite32(le32_to_cpu(val), io + off);
} else {
- val = cpu_to_le32(ioread32(io + pos));
+ val = cpu_to_le32(ioread32(io + off));
if (copy_to_user(buf, &val, 4))
return -EFAULT;
}
filled = 4;
-
- } else if ((pos % 2) == 0 && count >= 2) {
+ } else if (fillable >= 2 && !(off % 2)) {
__le16 val;
if (iswrite) {
if (copy_from_user(&val, buf, 2))
return -EFAULT;
- iowrite16(le16_to_cpu(val), io + pos);
+ iowrite16(le16_to_cpu(val), io + off);
} else {
- val = cpu_to_le16(ioread16(io + pos));
+ val = cpu_to_le16(ioread16(io + off));
if (copy_to_user(buf, &val, 2))
return -EFAULT;
}
filled = 2;
- } else {
+ } else if (fillable) {
u8 val;
if (iswrite) {
if (copy_from_user(&val, buf, 1))
return -EFAULT;
- iowrite8(val, io + pos);
+ iowrite8(val, io + off);
} else {
- val = ioread8(io + pos);
+ val = ioread8(io + off);
if (copy_to_user(buf, &val, 1))
return -EFAULT;
}
filled = 1;
+ } else {
+ /* Fill reads with -1, drop writes */
+ filled = min(count, (size_t)(x_end - off));
+ if (!iswrite) {
+ u8 val = 0xFF;
+ size_t i;
+
+ for (i = 0; i < filled; i++)
+ if (copy_to_user(buf + i, &val, 1))
+ return -EFAULT;
+ }
}
count -= filled;
done += filled;
+ off += filled;
buf += filled;
- pos += filled;
}
- *ppos += done;
-
return done;
}
-/*
- * MMIO BAR access
- * We handle two excluded ranges here as well, if the user tries to read
- * the ROM beyond what PCI tells us is available or the MSI-X table region,
- * we return 0xFF and writes are dropped.
- */
-ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- void __iomem *io;
+ size_t x_start = 0, x_end = 0;
resource_size_t end;
- size_t done = 0;
- size_t x_start = 0, x_end = 0; /* excluded range */
+ void __iomem *io;
+ ssize_t done;
if (!pci_resource_start(pdev, bar))
return -EINVAL;
end = pci_resource_len(pdev, bar);
- if (pos > end)
+ if (pos >= end)
return -EINVAL;
- if (pos == end)
- return 0;
-
- if (pos + count > end)
- count = end - pos;
+ count = min(count, (size_t)(end - pos));
if (bar == PCI_ROM_RESOURCE) {
+ /*
+ * The ROM can fill less space than the BAR, so we start the
+ * excluded range at the end of the actual ROM. This makes
+ * filling large ROM BARs much faster.
+ */
io = pci_map_rom(pdev, &x_start);
+ if (!io)
+ return -ENOMEM;
x_end = end;
- } else {
- if (!vdev->barmap[bar]) {
- int ret;
-
- ret = pci_request_selected_regions(pdev, 1 << bar,
- "vfio");
- if (ret)
- return ret;
+ } else if (!vdev->barmap[bar]) {
+ int ret;
- vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
+ ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
+ if (ret)
+ return ret;
- if (!vdev->barmap[bar]) {
- pci_release_selected_regions(pdev, 1 << bar);
- return -EINVAL;
- }
+ io = pci_iomap(pdev, bar, 0);
+ if (!io) {
+ pci_release_selected_regions(pdev, 1 << bar);
+ return -ENOMEM;
}
+ vdev->barmap[bar] = io;
+ } else
io = vdev->barmap[bar];
- if (bar == vdev->msix_bar) {
- x_start = vdev->msix_offset;
- x_end = vdev->msix_offset + vdev->msix_size;
- }
+ if (bar == vdev->msix_bar) {
+ x_start = vdev->msix_offset;
+ x_end = vdev->msix_offset + vdev->msix_size;
}
- if (!io)
- return -EINVAL;
-
- while (count) {
- size_t fillable, filled;
-
- if (pos < x_start)
- fillable = x_start - pos;
- else if (pos >= x_end)
- fillable = end - pos;
- else
- fillable = 0;
-
- if (fillable >= 4 && !(pos % 4) && (count >= 4)) {
- __le32 val;
-
- if (iswrite) {
- if (copy_from_user(&val, buf, 4))
- goto out;
-
- iowrite32(le32_to_cpu(val), io + pos);
- } else {
- val = cpu_to_le32(ioread32(io + pos));
+ done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
- if (copy_to_user(buf, &val, 4))
- goto out;
- }
+ if (done >= 0)
+ *ppos += done;
- filled = 4;
- } else if (fillable >= 2 && !(pos % 2) && (count >= 2)) {
- __le16 val;
-
- if (iswrite) {
- if (copy_from_user(&val, buf, 2))
- goto out;
-
- iowrite16(le16_to_cpu(val), io + pos);
- } else {
- val = cpu_to_le16(ioread16(io + pos));
-
- if (copy_to_user(buf, &val, 2))
- goto out;
- }
-
- filled = 2;
- } else if (fillable) {
- u8 val;
+ if (bar == PCI_ROM_RESOURCE)
+ pci_unmap_rom(pdev, io);
- if (iswrite) {
- if (copy_from_user(&val, buf, 1))
- goto out;
+ return done;
+}
- iowrite8(val, io + pos);
- } else {
- val = ioread8(io + pos);
+ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ int ret;
+ loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ void __iomem *iomem = NULL;
+ unsigned int rsrc;
+ bool is_ioport;
+ ssize_t done;
+
+ if (!vdev->has_vga)
+ return -EINVAL;
- if (copy_to_user(buf, &val, 1))
- goto out;
- }
+ switch (pos) {
+ case 0xa0000 ... 0xbffff:
+ count = min(count, (size_t)(0xc0000 - pos));
+ iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+ off = pos - 0xa0000;
+ rsrc = VGA_RSRC_LEGACY_MEM;
+ is_ioport = false;
+ break;
+ case 0x3b0 ... 0x3bb:
+ count = min(count, (size_t)(0x3bc - pos));
+ iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
+ off = pos - 0x3b0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ case 0x3c0 ... 0x3df:
+ count = min(count, (size_t)(0x3e0 - pos));
+ iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
+ off = pos - 0x3c0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ default:
+ return -EINVAL;
+ }
- filled = 1;
- } else {
- /* Drop writes, fill reads with FF */
- filled = min((size_t)(x_end - pos), count);
- if (!iswrite) {
- char val = 0xFF;
- size_t i;
+ if (!iomem)
+ return -ENOMEM;
- for (i = 0; i < filled; i++) {
- if (put_user(val, buf + i))
- goto out;
- }
- }
+ ret = vga_get_interruptible(vdev->pdev, rsrc);
+ if (ret) {
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
+ return ret;
+ }
- }
+ done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
- count -= filled;
- done += filled;
- buf += filled;
- pos += filled;
- }
+ vga_put(vdev->pdev, rsrc);
- *ppos += done;
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
-out:
- if (bar == PCI_ROM_RESOURCE)
- pci_unmap_rom(pdev, io);
+ if (done >= 0)
+ *ppos += done;
- return count ? -EFAULT : done;
+ return done;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 12c264d3b058..fcc12f3e60a3 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -139,23 +139,8 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
*/
static int vfio_alloc_group_minor(struct vfio_group *group)
{
- int ret, minor;
-
-again:
- if (unlikely(idr_pre_get(&vfio.group_idr, GFP_KERNEL) == 0))
- return -ENOMEM;
-
/* index 0 is used by /dev/vfio/vfio */
- ret = idr_get_new_above(&vfio.group_idr, group, 1, &minor);
- if (ret == -EAGAIN)
- goto again;
- if (ret || minor > MINORMASK) {
- if (minor > MINORMASK)
- idr_remove(&vfio.group_idr, minor);
- return -ENOSPC;
- }
-
- return minor;
+ return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL);
}
static void vfio_free_group_minor(int minor)
@@ -442,7 +427,7 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
* a device. It's not always practical to leave a device within a group
* driverless as it could get re-bound to something unsafe.
*/
-static const char * const vfio_driver_whitelist[] = { "pci-stub" };
+static const char * const vfio_driver_whitelist[] = { "pci-stub", "pcieport" };
static bool vfio_whitelisted_driver(struct device_driver *drv)
{
@@ -642,33 +627,16 @@ int vfio_add_group_dev(struct device *dev,
}
EXPORT_SYMBOL_GPL(vfio_add_group_dev);
-/* Test whether a struct device is present in our tracking */
-static bool vfio_dev_present(struct device *dev)
+/* Given a referenced group, check if it contains the device */
+static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
{
- struct iommu_group *iommu_group;
- struct vfio_group *group;
struct vfio_device *device;
- iommu_group = iommu_group_get(dev);
- if (!iommu_group)
- return false;
-
- group = vfio_group_get_from_iommu(iommu_group);
- if (!group) {
- iommu_group_put(iommu_group);
- return false;
- }
-
device = vfio_group_get_device(group, dev);
- if (!device) {
- vfio_group_put(group);
- iommu_group_put(iommu_group);
+ if (!device)
return false;
- }
vfio_device_put(device);
- vfio_group_put(group);
- iommu_group_put(iommu_group);
return true;
}
@@ -682,10 +650,18 @@ void *vfio_del_group_dev(struct device *dev)
struct iommu_group *iommu_group = group->iommu_group;
void *device_data = device->device_data;
+ /*
+ * The group exists so long as we have a device reference. Get
+ * a group reference and use it to scan for the device going away.
+ */
+ vfio_group_get(group);
+
vfio_device_put(device);
/* TODO send a signal to encourage this to be released */
- wait_event(vfio.release_q, !vfio_dev_present(dev));
+ wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+
+ vfio_group_put(group);
iommu_group_put(iommu_group);
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 22321cf84fbe..9951297b2427 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -47,6 +47,8 @@
#include <linux/vhost.h>
#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
#include <linux/virtio_scsi.h>
+#include <linux/llist.h>
+#include <linux/bitmap.h>
#include "vhost.c"
#include "vhost.h"
@@ -58,14 +60,20 @@ enum {
VHOST_SCSI_VQ_IO = 2,
};
+#define VHOST_SCSI_MAX_TARGET 256
+#define VHOST_SCSI_MAX_VQ 128
+
struct vhost_scsi {
- struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
+ /* Protected by vhost_scsi->dev.mutex */
+ struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
+ char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
+ bool vs_endpoint;
+
struct vhost_dev dev;
- struct vhost_virtqueue vqs[3];
+ struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
struct vhost_work vs_completion_work; /* cmd completion work item */
- struct list_head vs_completion_list; /* cmd completion queue */
- spinlock_t vs_completion_lock; /* protects s_completion_list */
+ struct llist_head vs_completion_list; /* cmd completion queue */
};
/* Local pointer to allocated TCM configfs fabric module */
@@ -77,6 +85,12 @@ static struct workqueue_struct *tcm_vhost_workqueue;
static DEFINE_MUTEX(tcm_vhost_mutex);
static LIST_HEAD(tcm_vhost_list);
+static int iov_num_pages(struct iovec *iov)
+{
+ return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
+ ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
+}
+
static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
{
return 1;
@@ -301,9 +315,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
{
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
- spin_lock_bh(&vs->vs_completion_lock);
- list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
- spin_unlock_bh(&vs->vs_completion_lock);
+ llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
@@ -347,27 +359,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
kfree(tv_cmd);
}
-/* Dequeue a command from the completion list */
-static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
- struct vhost_scsi *vs)
-{
- struct tcm_vhost_cmd *tv_cmd = NULL;
-
- spin_lock_bh(&vs->vs_completion_lock);
- if (list_empty(&vs->vs_completion_list)) {
- spin_unlock_bh(&vs->vs_completion_lock);
- return NULL;
- }
-
- list_for_each_entry(tv_cmd, &vs->vs_completion_list,
- tvc_completion_list) {
- list_del(&tv_cmd->tvc_completion_list);
- break;
- }
- spin_unlock_bh(&vs->vs_completion_lock);
- return tv_cmd;
-}
-
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
@@ -377,12 +368,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
+ DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
+ struct virtio_scsi_cmd_resp v_rsp;
struct tcm_vhost_cmd *tv_cmd;
-
- while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
- struct virtio_scsi_cmd_resp v_rsp;
- struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
- int ret;
+ struct llist_node *llnode;
+ struct se_cmd *se_cmd;
+ int ret, vq;
+
+ bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
+ llnode = llist_del_all(&vs->vs_completion_list);
+ while (llnode) {
+ tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
+ tvc_completion_list);
+ llnode = llist_next(llnode);
+ se_cmd = &tv_cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
@@ -395,15 +394,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
v_rsp.sense_len);
ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
- if (likely(ret == 0))
- vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
- else
+ if (likely(ret == 0)) {
+ vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
+ vq = tv_cmd->tvc_vq - vs->vqs;
+ __set_bit(vq, signal);
+ } else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
vhost_scsi_free_cmd(tv_cmd);
}
- vhost_signal(&vs->dev, &vs->vqs[2]);
+ vq = -1;
+ while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
+ < VHOST_SCSI_MAX_VQ)
+ vhost_signal(&vs->dev, &vs->vqs[vq]);
}
static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
@@ -426,7 +430,6 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
pr_err("Unable to allocate struct tcm_vhost_cmd\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
tv_cmd->tvc_tag = v_req->tag;
tv_cmd->tvc_task_attr = v_req->task_attr;
tv_cmd->tvc_exp_data_len = exp_data_len;
@@ -442,40 +445,47 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
* Returns the number of scatterlist entries used or -errno on error.
*/
static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
- unsigned int sgl_count, void __user *ptr, size_t len, int write)
+ unsigned int sgl_count, struct iovec *iov, int write)
{
+ unsigned int npages = 0, pages_nr, offset, nbytes;
struct scatterlist *sg = sgl;
- unsigned int npages = 0;
- int ret;
+ void __user *ptr = iov->iov_base;
+ size_t len = iov->iov_len;
+ struct page **pages;
+ int ret, i;
- while (len > 0) {
- struct page *page;
- unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
- unsigned int nbytes = min_t(unsigned int,
- PAGE_SIZE - offset, len);
+ pages_nr = iov_num_pages(iov);
+ if (pages_nr > sgl_count)
+ return -ENOBUFS;
- if (npages == sgl_count) {
- ret = -ENOBUFS;
- goto err;
- }
+ pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
- BUG_ON(ret == 0); /* we should either get our page or fail */
- if (ret < 0)
- goto err;
+ ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
+ /* No pages were pinned */
+ if (ret < 0)
+ goto out;
+ /* Less pages pinned than wanted */
+ if (ret != pages_nr) {
+ for (i = 0; i < ret; i++)
+ put_page(pages[i]);
+ ret = -EFAULT;
+ goto out;
+ }
- sg_set_page(sg, page, nbytes, offset);
+ while (len > 0) {
+ offset = (uintptr_t)ptr & ~PAGE_MASK;
+ nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
+ sg_set_page(sg, pages[npages], nbytes, offset);
ptr += nbytes;
len -= nbytes;
sg++;
npages++;
}
- return npages;
-err:
- /* Put pages that we hold */
- for (sg = sgl; sg != &sgl[npages]; sg++)
- put_page(sg_page(sg));
+out:
+ kfree(pages);
return ret;
}
@@ -491,11 +501,9 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
* Find out how long sglist needs to be
*/
sgl_count = 0;
- for (i = 0; i < niov; i++) {
- sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
- PAGE_SIZE - 1) >> PAGE_SHIFT) -
- ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
- }
+ for (i = 0; i < niov; i++)
+ sgl_count += iov_num_pages(&iov[i]);
+
/* TODO overflow checking */
sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
@@ -510,8 +518,7 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
for (i = 0; i < niov; i++) {
- ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
- iov[i].iov_len, write);
+ ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
if (ret < 0) {
for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
@@ -563,19 +570,19 @@ static void tcm_vhost_submission_work(struct work_struct *work)
}
}
-static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
+static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq)
{
- struct vhost_virtqueue *vq = &vs->vqs[2];
struct virtio_scsi_cmd_req v_req;
struct tcm_vhost_tpg *tv_tpg;
struct tcm_vhost_cmd *tv_cmd;
u32 exp_data_len, data_first, data_num, data_direction;
unsigned out, in, i;
int head, ret;
+ u8 target;
/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
- tv_tpg = vs->vs_tpg;
- if (unlikely(!tv_tpg))
+ if (unlikely(!vs->vs_endpoint))
return;
mutex_lock(&vq->mutex);
@@ -643,6 +650,28 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
break;
}
+ /* Extract the tpgt */
+ target = v_req.lun[1];
+ tv_tpg = vs->vs_tpg[target];
+
+ /* Target does not exist, fail the request */
+ if (unlikely(!tv_tpg)) {
+ struct virtio_scsi_cmd_resp __user *resp;
+ struct virtio_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ resp = vq->iov[out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev,
+ vq, head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
+ continue;
+ }
+
exp_data_len = 0;
for (i = 0; i < data_num; i++)
exp_data_len += vq->iov[data_first + i].iov_len;
@@ -658,6 +687,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
": %d\n", tv_cmd, exp_data_len, data_direction);
tv_cmd->tvc_vhost = vs;
+ tv_cmd->tvc_vq = vq;
if (unlikely(vq->iov[out].iov_len !=
sizeof(struct virtio_scsi_cmd_resp))) {
@@ -738,7 +768,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
poll.work);
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
- vhost_scsi_handle_vq(vs);
+ vhost_scsi_handle_vq(vs, vq);
}
/*
@@ -751,7 +781,8 @@ static int vhost_scsi_set_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- int index;
+ bool match = false;
+ int index, ret;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -762,7 +793,6 @@ static int vhost_scsi_set_endpoint(
return -EFAULT;
}
}
- mutex_unlock(&vs->dev.mutex);
mutex_lock(&tcm_vhost_mutex);
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
@@ -777,30 +807,33 @@ static int vhost_scsi_set_endpoint(
}
tv_tport = tv_tpg->tport;
- if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
- (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
- tv_tpg->tv_tpg_vhost_count++;
- mutex_unlock(&tv_tpg->tv_tpg_mutex);
- mutex_unlock(&tcm_vhost_mutex);
-
- mutex_lock(&vs->dev.mutex);
- if (vs->vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- mutex_lock(&tv_tpg->tv_tpg_mutex);
- tv_tpg->tv_tpg_vhost_count--;
+ if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+ if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ mutex_unlock(&tcm_vhost_mutex);
+ mutex_unlock(&vs->dev.mutex);
return -EEXIST;
}
-
- vs->vs_tpg = tv_tpg;
+ tv_tpg->tv_tpg_vhost_count++;
+ vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
smp_mb__after_atomic_inc();
- mutex_unlock(&vs->dev.mutex);
- return 0;
+ match = true;
}
mutex_unlock(&tv_tpg->tv_tpg_mutex);
}
mutex_unlock(&tcm_vhost_mutex);
- return -EINVAL;
+
+ if (match) {
+ memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
+ sizeof(vs->vs_vhost_wwpn));
+ vs->vs_endpoint = true;
+ ret = 0;
+ } else {
+ ret = -EEXIST;
+ }
+
+ mutex_unlock(&vs->dev.mutex);
+ return ret;
}
static int vhost_scsi_clear_endpoint(
@@ -809,7 +842,8 @@ static int vhost_scsi_clear_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- int index, ret;
+ int index, ret, i;
+ u8 target;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -819,27 +853,32 @@ static int vhost_scsi_clear_endpoint(
goto err;
}
}
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ target = i;
- if (!vs->vs_tpg) {
- ret = -ENODEV;
- goto err;
- }
- tv_tpg = vs->vs_tpg;
- tv_tport = tv_tpg->tport;
-
- if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
- (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
- pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
- " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
- tv_tport->tport_name, tv_tpg->tport_tpgt,
- t->vhost_wwpn, t->vhost_tpgt);
- ret = -EINVAL;
- goto err;
+ tv_tpg = vs->vs_tpg[target];
+ if (!tv_tpg)
+ continue;
+
+ tv_tport = tv_tpg->tport;
+ if (!tv_tport) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+ pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
+ " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
+ tv_tport->tport_name, tv_tpg->tport_tpgt,
+ t->vhost_wwpn, t->vhost_tpgt);
+ ret = -EINVAL;
+ goto err;
+ }
+ tv_tpg->tv_tpg_vhost_count--;
+ vs->vs_tpg[target] = NULL;
+ vs->vs_endpoint = false;
}
- tv_tpg->tv_tpg_vhost_count--;
- vs->vs_tpg = NULL;
mutex_unlock(&vs->dev.mutex);
-
return 0;
err:
@@ -850,20 +889,19 @@ err:
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *s;
- int r;
+ int r, i;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
- INIT_LIST_HEAD(&s->vs_completion_list);
- spin_lock_init(&s->vs_completion_lock);
s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
- s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
- r = vhost_dev_init(&s->dev, s->vqs, 3);
+ for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
+ s->vqs[i].handle_kick = vhost_scsi_handle_kick;
+ r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
if (r < 0) {
kfree(s);
return r;
@@ -876,16 +914,12 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
static int vhost_scsi_release(struct inode *inode, struct file *f)
{
struct vhost_scsi *s = f->private_data;
+ struct vhost_scsi_target t;
- if (s->vs_tpg && s->vs_tpg->tport) {
- struct vhost_scsi_target backend;
-
- memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
- sizeof(backend.vhost_wwpn));
- backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
- vhost_scsi_clear_endpoint(s, &backend);
- }
-
+ mutex_lock(&s->dev.mutex);
+ memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
+ mutex_unlock(&s->dev.mutex);
+ vhost_scsi_clear_endpoint(s, &t);
vhost_dev_stop(&s->dev);
vhost_dev_cleanup(&s->dev, false);
kfree(s);
@@ -899,9 +933,10 @@ static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
- vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
- vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
- vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
+ int i;
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+ vhost_scsi_flush_vq(vs, i);
}
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 7e87c63ecbcd..1d2ae7a60e11 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -23,6 +23,8 @@ struct tcm_vhost_cmd {
struct virtio_scsi_cmd_resp __user *tvc_resp;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
+ /* Pointer to vhost_virtqueue for the cmd */
+ struct vhost_virtqueue *tvc_vq;
/* Pointer to vhost nexus memory */
struct tcm_vhost_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */
@@ -34,7 +36,7 @@ struct tcm_vhost_cmd {
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/* Completed commands list, serviced from vhost worker thread */
- struct list_head tvc_completion_list;
+ struct llist_node tvc_completion_list;
};
struct tcm_vhost_nexus {
@@ -93,9 +95,11 @@ struct tcm_vhost_tport {
*
* ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
* RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
+ * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
+ * All the targets under vhost_wwpn can be seen and used by guset.
*/
-#define VHOST_SCSI_ABI_VERSION 0
+#define VHOST_SCSI_ABI_VERSION 1
struct vhost_scsi_target {
int abi_version;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 80cbd21b483f..4c1546f71d56 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -21,8 +21,6 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/drm/Kconfig"
-source "drivers/gpu/stub/Kconfig"
-
config VGASTATE
tristate
default n
@@ -33,6 +31,30 @@ config VIDEO_OUTPUT_CONTROL
This framework adds support for low-level control of the video
output switch.
+config DISPLAY_TIMING
+ bool
+
+config VIDEOMODE
+ bool
+
+config OF_DISPLAY_TIMING
+ bool "Enable device tree display timing support"
+ depends on OF
+ select DISPLAY_TIMING
+ help
+ helper to parse display timings from the devicetree
+
+config OF_VIDEOMODE
+ bool "Enable device tree videomode support"
+ depends on OF
+ select VIDEOMODE
+ select OF_DISPLAY_TIMING
+ help
+ helper to get videomodes from the devicetree
+
+config HDMI
+ bool
+
menuconfig FB
tristate "Support for frame buffer devices"
---help---
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 0577f834fdcd..9df387334cb7 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_VGASTATE) += vgastate.o
+obj-$(CONFIG_HDMI) += hdmi.o
obj-y += fb_notify.o
obj-$(CONFIG_FB) += fb.o
fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
@@ -170,3 +171,7 @@ obj-$(CONFIG_FB_VIRTUAL) += vfb.o
#video output switch sysfs driver
obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
+obj-$(CONFIG_DISPLAY_TIMING) += display_timing.o
+obj-$(CONFIG_OF_DISPLAY_TIMING) += of_display_timing.o
+obj-$(CONFIG_VIDEOMODE) += videomode.o
+obj-$(CONFIG_OF_VIDEOMODE) += of_videomode.o
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index be27b551473f..db10d0120d2b 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -384,6 +384,12 @@ config BACKLIGHT_LP855X
This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
backlight driver.
+config BACKLIGHT_LP8788
+ tristate "Backlight driver for TI LP8788 MFD"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788
+ help
+ This supports TI LP8788 backlight driver.
+
config BACKLIGHT_OT200
tristate "Backlight driver for ot200 visualisation device"
depends on BACKLIGHT_CLASS_DEVICE && CS5535_MFGPT && GPIO_CS5535
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 4606c218e8e4..96c4d620c5ce 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_BACKLIGHT_LM3630) += lm3630_bl.o
obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
+obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index d29e49443f29..c02aa2c2575a 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -317,10 +317,7 @@ static int ams369fg06_power_on(struct ams369fg06 *lcd)
pd = lcd->lcd_pd;
bd = lcd->bd;
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EINVAL;
- } else {
+ if (pd->power_on) {
pd->power_on(lcd->ld, 1);
msleep(pd->power_on_delay);
}
@@ -370,7 +367,8 @@ static int ams369fg06_power_off(struct ams369fg06 *lcd)
msleep(pd->power_off_delay);
- pd->power_on(lcd->ld, 0);
+ if (pd->power_on)
+ pd->power_on(lcd->ld, 0);
return 0;
}
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
new file mode 100644
index 000000000000..4bb8b4f140cf
--- /dev/null
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -0,0 +1,333 @@
+/*
+ * TI LP8788 MFD - backlight driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+/* Register address */
+#define LP8788_BL_CONFIG 0x96
+#define LP8788_BL_EN BIT(0)
+#define LP8788_BL_PWM_INPUT_EN BIT(5)
+#define LP8788_BL_FULLSCALE_SHIFT 2
+#define LP8788_BL_DIM_MODE_SHIFT 1
+#define LP8788_BL_PWM_POLARITY_SHIFT 6
+
+#define LP8788_BL_BRIGHTNESS 0x97
+
+#define LP8788_BL_RAMP 0x98
+#define LP8788_BL_RAMP_RISE_SHIFT 4
+
+#define MAX_BRIGHTNESS 127
+#define DEFAULT_BL_NAME "lcd-backlight"
+
+struct lp8788_bl_config {
+ enum lp8788_bl_ctrl_mode bl_mode;
+ enum lp8788_bl_dim_mode dim_mode;
+ enum lp8788_bl_full_scale_current full_scale;
+ enum lp8788_bl_ramp_step rise_time;
+ enum lp8788_bl_ramp_step fall_time;
+ enum pwm_polarity pwm_pol;
+};
+
+struct lp8788_bl {
+ struct lp8788 *lp;
+ struct backlight_device *bl_dev;
+ struct lp8788_backlight_platform_data *pdata;
+ enum lp8788_bl_ctrl_mode mode;
+ struct pwm_device *pwm;
+};
+
+struct lp8788_bl_config default_bl_config = {
+ .bl_mode = LP8788_BL_REGISTER_ONLY,
+ .dim_mode = LP8788_DIM_EXPONENTIAL,
+ .full_scale = LP8788_FULLSCALE_1900uA,
+ .rise_time = LP8788_RAMP_8192us,
+ .fall_time = LP8788_RAMP_8192us,
+ .pwm_pol = PWM_POLARITY_NORMAL,
+};
+
+static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode)
+{
+ return (mode == LP8788_BL_COMB_PWM_BASED);
+}
+
+static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode)
+{
+ return (mode == LP8788_BL_REGISTER_ONLY ||
+ mode == LP8788_BL_COMB_REGISTER_BASED);
+}
+
+static int lp8788_backlight_configure(struct lp8788_bl *bl)
+{
+ struct lp8788_backlight_platform_data *pdata = bl->pdata;
+ struct lp8788_bl_config *cfg = &default_bl_config;
+ int ret;
+ u8 val;
+
+ /*
+ * Update chip configuration if platform data exists,
+ * otherwise use the default settings.
+ */
+ if (pdata) {
+ cfg->bl_mode = pdata->bl_mode;
+ cfg->dim_mode = pdata->dim_mode;
+ cfg->full_scale = pdata->full_scale;
+ cfg->rise_time = pdata->rise_time;
+ cfg->fall_time = pdata->fall_time;
+ cfg->pwm_pol = pdata->pwm_pol;
+ }
+
+ /* Brightness ramp up/down */
+ val = (cfg->rise_time << LP8788_BL_RAMP_RISE_SHIFT) | cfg->fall_time;
+ ret = lp8788_write_byte(bl->lp, LP8788_BL_RAMP, val);
+ if (ret)
+ return ret;
+
+ /* Fullscale current setting */
+ val = (cfg->full_scale << LP8788_BL_FULLSCALE_SHIFT) |
+ (cfg->dim_mode << LP8788_BL_DIM_MODE_SHIFT);
+
+ /* Brightness control mode */
+ switch (cfg->bl_mode) {
+ case LP8788_BL_REGISTER_ONLY:
+ val |= LP8788_BL_EN;
+ break;
+ case LP8788_BL_COMB_PWM_BASED:
+ case LP8788_BL_COMB_REGISTER_BASED:
+ val |= LP8788_BL_EN | LP8788_BL_PWM_INPUT_EN |
+ (cfg->pwm_pol << LP8788_BL_PWM_POLARITY_SHIFT);
+ break;
+ default:
+ dev_err(bl->lp->dev, "invalid mode: %d\n", cfg->bl_mode);
+ return -EINVAL;
+ }
+
+ bl->mode = cfg->bl_mode;
+
+ return lp8788_write_byte(bl->lp, LP8788_BL_CONFIG, val);
+}
+
+static void lp8788_pwm_ctrl(struct lp8788_bl *bl, int br, int max_br)
+{
+ unsigned int period;
+ unsigned int duty;
+ struct device *dev;
+ struct pwm_device *pwm;
+
+ if (!bl->pdata)
+ return;
+
+ period = bl->pdata->period_ns;
+ duty = br * period / max_br;
+ dev = bl->lp->dev;
+
+ /* request PWM device with the consumer name */
+ if (!bl->pwm) {
+ pwm = devm_pwm_get(dev, LP8788_DEV_BACKLIGHT);
+ if (IS_ERR(pwm)) {
+ dev_err(dev, "can not get PWM device\n");
+ return;
+ }
+
+ bl->pwm = pwm;
+ }
+
+ pwm_config(bl->pwm, duty, period);
+ if (duty)
+ pwm_enable(bl->pwm);
+ else
+ pwm_disable(bl->pwm);
+}
+
+static int lp8788_bl_update_status(struct backlight_device *bl_dev)
+{
+ struct lp8788_bl *bl = bl_get_data(bl_dev);
+ enum lp8788_bl_ctrl_mode mode = bl->mode;
+
+ if (bl_dev->props.state & BL_CORE_SUSPENDED)
+ bl_dev->props.brightness = 0;
+
+ if (is_brightness_ctrl_by_pwm(mode)) {
+ int brt = bl_dev->props.brightness;
+ int max = bl_dev->props.max_brightness;
+
+ lp8788_pwm_ctrl(bl, brt, max);
+ } else if (is_brightness_ctrl_by_register(mode)) {
+ u8 brt = bl_dev->props.brightness;
+
+ lp8788_write_byte(bl->lp, LP8788_BL_BRIGHTNESS, brt);
+ }
+
+ return 0;
+}
+
+static int lp8788_bl_get_brightness(struct backlight_device *bl_dev)
+{
+ return bl_dev->props.brightness;
+}
+
+static const struct backlight_ops lp8788_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = lp8788_bl_update_status,
+ .get_brightness = lp8788_bl_get_brightness,
+};
+
+static int lp8788_backlight_register(struct lp8788_bl *bl)
+{
+ struct backlight_device *bl_dev;
+ struct backlight_properties props;
+ struct lp8788_backlight_platform_data *pdata = bl->pdata;
+ int init_brt;
+ char *name;
+
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = MAX_BRIGHTNESS;
+
+ /* Initial brightness */
+ if (pdata)
+ init_brt = min_t(int, pdata->initial_brightness,
+ props.max_brightness);
+ else
+ init_brt = 0;
+
+ props.brightness = init_brt;
+
+ /* Backlight device name */
+ if (!pdata || !pdata->name)
+ name = DEFAULT_BL_NAME;
+ else
+ name = pdata->name;
+
+ bl_dev = backlight_device_register(name, bl->lp->dev, bl,
+ &lp8788_bl_ops, &props);
+ if (IS_ERR(bl_dev))
+ return PTR_ERR(bl_dev);
+
+ bl->bl_dev = bl_dev;
+
+ return 0;
+}
+
+static void lp8788_backlight_unregister(struct lp8788_bl *bl)
+{
+ struct backlight_device *bl_dev = bl->bl_dev;
+
+ if (bl_dev)
+ backlight_device_unregister(bl_dev);
+}
+
+static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lp8788_bl *bl = dev_get_drvdata(dev);
+ enum lp8788_bl_ctrl_mode mode = bl->mode;
+ char *strmode;
+
+ if (is_brightness_ctrl_by_pwm(mode))
+ strmode = "PWM based";
+ else if (is_brightness_ctrl_by_register(mode))
+ strmode = "Register based";
+ else
+ strmode = "Invalid mode";
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", strmode);
+}
+
+static DEVICE_ATTR(bl_ctl_mode, S_IRUGO, lp8788_get_bl_ctl_mode, NULL);
+
+static struct attribute *lp8788_attributes[] = {
+ &dev_attr_bl_ctl_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group lp8788_attr_group = {
+ .attrs = lp8788_attributes,
+};
+
+static int lp8788_backlight_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ struct lp8788_bl *bl;
+ int ret;
+
+ bl = devm_kzalloc(lp->dev, sizeof(struct lp8788_bl), GFP_KERNEL);
+ if (!bl)
+ return -ENOMEM;
+
+ bl->lp = lp;
+ if (lp->pdata)
+ bl->pdata = lp->pdata->bl_pdata;
+
+ platform_set_drvdata(pdev, bl);
+
+ ret = lp8788_backlight_configure(bl);
+ if (ret) {
+ dev_err(lp->dev, "backlight config err: %d\n", ret);
+ goto err_dev;
+ }
+
+ ret = lp8788_backlight_register(bl);
+ if (ret) {
+ dev_err(lp->dev, "register backlight err: %d\n", ret);
+ goto err_dev;
+ }
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &lp8788_attr_group);
+ if (ret) {
+ dev_err(lp->dev, "register sysfs err: %d\n", ret);
+ goto err_sysfs;
+ }
+
+ backlight_update_status(bl->bl_dev);
+
+ return 0;
+
+err_sysfs:
+ lp8788_backlight_unregister(bl);
+err_dev:
+ return ret;
+}
+
+static int lp8788_backlight_remove(struct platform_device *pdev)
+{
+ struct lp8788_bl *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl_dev = bl->bl_dev;
+
+ bl_dev->props.brightness = 0;
+ backlight_update_status(bl_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group);
+ lp8788_backlight_unregister(bl);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_bl_driver = {
+ .probe = lp8788_backlight_probe,
+ .remove = lp8788_backlight_remove,
+ .driver = {
+ .name = LP8788_DEV_BACKLIGHT,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(lp8788_bl_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8788 Backlight Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-backlight");
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 2c9bce050aa9..5ca11b066b7e 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -101,6 +101,29 @@ static const struct backlight_ops max8925_backlight_ops = {
.get_brightness = max8925_backlight_get_brightness,
};
+#ifdef CONFIG_OF
+static int max8925_backlight_dt_init(struct platform_device *pdev,
+ struct max8925_backlight_pdata *pdata)
+{
+ struct device_node *nproot = pdev->dev.parent->of_node, *np;
+ int dual_string;
+
+ if (!nproot)
+ return -ENODEV;
+ np = of_find_node_by_name(nproot, "backlight");
+ if (!np) {
+ dev_err(&pdev->dev, "failed to find backlight node\n");
+ return -ENODEV;
+ }
+
+ of_property_read_u32(np, "maxim,max8925-dual-string", &dual_string);
+ pdata->dual_string = dual_string;
+ return 0;
+}
+#else
+#define max8925_backlight_dt_init(x, y) (-1)
+#endif
+
static int max8925_backlight_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -147,6 +170,13 @@ static int max8925_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
value = 0;
+ if (pdev->dev.parent->of_node && !pdata) {
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct max8925_backlight_pdata),
+ GFP_KERNEL);
+ max8925_backlight_dt_init(pdev, pdata);
+ }
+
if (pdata) {
if (pdata->lxw_scl)
value |= (1 << 7);
@@ -158,7 +188,6 @@ static int max8925_backlight_probe(struct platform_device *pdev)
ret = max8925_set_bits(chip->i2c, data->reg_mode_cntl, 0xfe, value);
if (ret < 0)
goto out_brt;
-
backlight_update_status(bl);
return 0;
out_brt:
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index f2f4c43d6e22..fa00304a63d8 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -41,10 +41,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
int brightness = bl->props.brightness;
int max = bl->props.max_brightness;
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK)
brightness = 0;
if (pb->notify)
@@ -135,12 +134,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
if (ret < 0)
return ret;
- if (value >= data->max_brightness) {
- dev_warn(dev, "invalid default brightness level: %u, using %u\n",
- value, data->max_brightness - 1);
- value = data->max_brightness - 1;
- }
-
data->dft_brightness = value;
data->max_brightness--;
}
@@ -249,6 +242,13 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
+ if (data->dft_brightness > data->max_brightness) {
+ dev_warn(&pdev->dev,
+ "invalid default brightness level: %u, using %u\n",
+ data->dft_brightness, data->max_brightness);
+ data->dft_brightness = data->max_brightness;
+ }
+
bl->props.brightness = data->dft_brightness;
backlight_update_status(bl);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index f8a61e210d2e..3cd675927826 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -529,6 +529,33 @@ static int search_for_mapped_con(void)
return retval;
}
+static int do_fbcon_takeover(int show_logo)
+{
+ int err, i;
+
+ if (!num_registered_fb)
+ return -ENODEV;
+
+ if (!show_logo)
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map[i] = info_idx;
+
+ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+
+ if (err) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map[i] = -1;
+ info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
+ }
+
+ return err;
+}
+
static int fbcon_takeover(int show_logo)
{
int err, i;
@@ -815,6 +842,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
*
* Maps a virtual console @unit to a frame buffer device
* @newidx.
+ *
+ * This should be called with the console lock held.
*/
static int set_con2fb_map(int unit, int newidx, int user)
{
@@ -832,7 +861,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
info_idx = newidx;
- return fbcon_takeover(0);
+ return do_fbcon_takeover(0);
}
if (oldidx != -1)
@@ -840,7 +869,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
found = search_fb_in_map(newidx);
- console_lock();
con2fb_map[unit] = newidx;
if (!err && !found)
err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -867,7 +895,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (!search_fb_in_map(info_idx))
info_idx = newidx;
- console_unlock();
return err;
}
@@ -990,7 +1017,7 @@ static const char *fbcon_startup(void)
}
/* Setup default font */
- if (!p->fontdata) {
+ if (!p->fontdata && !vc->vc_font.data) {
if (!fontname[0] || !(font = find_font(fontname)))
font = get_default_font(info->var.xres,
info->var.yres,
@@ -1000,6 +1027,8 @@ static const char *fbcon_startup(void)
vc->vc_font.height = font->height;
vc->vc_font.data = (void *)(p->fontdata = font->data);
vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
+ } else {
+ p->fontdata = vc->vc_font.data;
}
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -1159,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->p = &fb_display[fg_console];
}
-static void fbcon_free_font(struct display *p)
+static void fbcon_free_font(struct display *p, bool freefont)
{
- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
p->fontdata = NULL;
p->userfont = 0;
@@ -1173,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc)
struct fb_info *info;
struct fbcon_ops *ops;
int idx;
+ bool free_font = true;
- fbcon_free_font(p);
idx = con2fb_map[vc->vc_num];
if (idx == -1)
@@ -1185,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc)
if (!info)
goto finished;
+ if (info->flags & FBINFO_MISC_FIRMWARE)
+ free_font = false;
ops = info->fbcon_par;
if (!ops)
@@ -1196,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc)
ops->flags &= ~FBCON_FLAGS_INIT;
finished:
+ fbcon_free_font(p, free_font);
+
if (!con_is_bound(&fb_con))
fbcon_exit();
@@ -2985,7 +3018,7 @@ static int fbcon_unbind(void)
{
int ret;
- ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
fbcon_is_default);
if (!ret)
@@ -3000,6 +3033,7 @@ static inline int fbcon_unbind(void)
}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+/* called with console_lock held */
static int fbcon_fb_unbind(int idx)
{
int i, new_idx = -1, ret = 0;
@@ -3026,6 +3060,7 @@ static int fbcon_fb_unbind(int idx)
return ret;
}
+/* called with console_lock held */
static int fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
@@ -3058,11 +3093,12 @@ static int fbcon_fb_unregistered(struct fb_info *info)
primary_device = -1;
if (!num_registered_fb)
- unregister_con_driver(&fb_con);
+ do_unregister_con_driver(&fb_con);
return 0;
}
+/* called with console_lock held */
static void fbcon_remap_all(int idx)
{
int i;
@@ -3107,6 +3143,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
}
#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+/* called with console_lock held */
static int fbcon_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
@@ -3123,7 +3160,7 @@ static int fbcon_fb_registered(struct fb_info *info)
}
if (info_idx != -1)
- ret = fbcon_takeover(1);
+ ret = do_fbcon_takeover(1);
} else {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map_boot[i] == idx)
@@ -3259,6 +3296,7 @@ static int fbcon_event_notify(struct notifier_block *self,
ret = fbcon_fb_unregistered(info);
break;
case FB_EVENT_SET_CONSOLE_MAP:
+ /* called with console lock held */
con2fb = event->data;
ret = set_con2fb_map(con2fb->console - 1,
con2fb->framebuffer, 1);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index d449a74d4a31..5855d17d19ac 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
unsigned short video_port_status = vga_video_port_reg + 6;
int font_select = 0x00, beg, i;
char *charmap;
-
+ bool clear_attribs = false;
if (vga_video_type != VIDEO_TYPE_EGAM) {
charmap = (char *) VGA_MAP_MEM(colourmap, 0);
beg = 0x0e;
@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
/* if 512 char mode is already enabled don't re-enable it. */
if ((set) && (ch512 != vga_512_chars)) {
- /* attribute controller */
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- struct vc_data *c = vc_cons[i].d;
- if (c && c->vc_sw == &vga_con)
- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
- }
vga_512_chars = ch512;
/* 256-char: enable intensity bit
512-char: disable intensity bit */
@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
it means, but it works, and it appears necessary */
inb_p(video_port_status);
vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
+ clear_attribs = true;
}
raw_spin_unlock_irq(&vga_lock);
+
+ if (clear_attribs) {
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ struct vc_data *c = vc_cons[i].d;
+ if (c && c->vc_sw == &vga_con) {
+ /* force hi font mask to 0, so we always clear
+ the bit on either transition */
+ c->vc_hi_font_mask = 0x00;
+ clear_buffer_attributes(c);
+ c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+ }
+ }
+ }
return 0;
}
diff --git a/drivers/video/display_timing.c b/drivers/video/display_timing.c
new file mode 100644
index 000000000000..5e1822cef571
--- /dev/null
+++ b/drivers/video/display_timing.c
@@ -0,0 +1,24 @@
+/*
+ * generic display timing functions
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <video/display_timing.h>
+
+void display_timings_release(struct display_timings *disp)
+{
+ if (disp->timings) {
+ unsigned int i;
+
+ for (i = 0; i < disp->num_timings; i++)
+ kfree(disp->timings[i]);
+ kfree(disp->timings);
+ }
+ kfree(disp);
+}
+EXPORT_SYMBOL_GPL(display_timings_release);
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 88cad6b8b479..900aa4ecd617 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -69,7 +69,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct fb_info *info = file->private_data;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (err)
return err;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 3ff0105a496a..7c254084b6a0 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -727,7 +727,7 @@ static const struct file_operations fb_proc_fops = {
*/
static struct fb_info *file_fb_info(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
@@ -1177,8 +1177,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
event.data = &con2fb;
if (!lock_fb_info(info))
return -ENODEV;
+ console_lock();
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
+ console_unlock();
unlock_fb_info(info);
break;
case FBIOBLANK:
@@ -1650,7 +1652,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
event.info = fb_info;
if (!lock_fb_info(fb_info))
return -ENODEV;
+ console_lock();
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
+ console_unlock();
unlock_fb_info(fb_info);
return 0;
}
@@ -1666,8 +1670,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (!lock_fb_info(fb_info))
return -ENODEV;
+ console_lock();
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+ console_unlock();
unlock_fb_info(fb_info);
if (ret)
@@ -1682,7 +1688,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
+ console_lock();
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+ console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
@@ -1853,11 +1861,8 @@ int fb_new_modelist(struct fb_info *info)
err = 1;
if (!list_empty(&info->modelist)) {
- if (!lock_fb_info(info))
- return -ENODEV;
event.info = info;
err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
- unlock_fb_info(info);
}
return err;
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index cef65574db6c..94ad0f71383c 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -31,6 +31,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <video/edid.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
#ifdef CONFIG_PPC_OF
#include <asm/prom.h>
#include <asm/pci-bridge.h>
@@ -1373,6 +1375,98 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
kfree(timings);
return err;
}
+
+#if IS_ENABLED(CONFIG_VIDEOMODE)
+int fb_videomode_from_videomode(const struct videomode *vm,
+ struct fb_videomode *fbmode)
+{
+ unsigned int htotal, vtotal;
+
+ fbmode->xres = vm->hactive;
+ fbmode->left_margin = vm->hback_porch;
+ fbmode->right_margin = vm->hfront_porch;
+ fbmode->hsync_len = vm->hsync_len;
+
+ fbmode->yres = vm->vactive;
+ fbmode->upper_margin = vm->vback_porch;
+ fbmode->lower_margin = vm->vfront_porch;
+ fbmode->vsync_len = vm->vsync_len;
+
+ /* prevent division by zero in KHZ2PICOS macro */
+ fbmode->pixclock = vm->pixelclock ?
+ KHZ2PICOS(vm->pixelclock / 1000) : 0;
+
+ fbmode->sync = 0;
+ fbmode->vmode = 0;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ fbmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ fbmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
+ fbmode->vmode |= FB_VMODE_INTERLACED;
+ if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
+ fbmode->vmode |= FB_VMODE_DOUBLE;
+ fbmode->flag = 0;
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hback_porch +
+ vm->hsync_len;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
+ vm->vsync_len;
+ /* prevent division by zero */
+ if (htotal && vtotal) {
+ fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ /* a mode must have htotal and vtotal != 0 or it is invalid */
+ } else {
+ fbmode->refresh = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fb_videomode_from_videomode);
+#endif
+
+#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
+static inline void dump_fb_videomode(const struct fb_videomode *m)
+{
+ pr_debug("fb_videomode = %ux%u@%uHz (%ukHz) %u %u %u %u %u %u %u %u %u\n",
+ m->xres, m->yres, m->refresh, m->pixclock, m->left_margin,
+ m->right_margin, m->upper_margin, m->lower_margin,
+ m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
+}
+
+/**
+ * of_get_fb_videomode - get a fb_videomode from devicetree
+ * @np: device_node with the timing specification
+ * @fb: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * DESCRIPTION:
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings ond
+ * work with that instead.
+ */
+int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb,
+ int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ fb_videomode_from_videomode(&vm, fb);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ dump_fb_videomode(fb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_fb_videomode);
+#endif
+
#else
int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
{
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index a55e3669d135..ef476b02fbe5 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -177,6 +177,8 @@ static ssize_t store_modes(struct device *device,
if (i * sizeof(struct fb_videomode) != count)
return -EINVAL;
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
console_lock();
list_splice(&fb_info->modelist, &old_list);
fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
@@ -188,6 +190,7 @@ static ssize_t store_modes(struct device *device,
fb_destroy_modelist(&old_list);
console_unlock();
+ unlock_fb_info(fb_info);
return 0;
}
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
new file mode 100644
index 000000000000..ab23c9b79143
--- /dev/null
+++ b/drivers/video/hdmi.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hdmi.h>
+#include <linux/string.h>
+
+static void hdmi_infoframe_checksum(void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ u8 csum = 0;
+ size_t i;
+
+ /* compute checksum */
+ for (i = 0; i < size; i++)
+ csum += ptr[i];
+
+ ptr[3] = 256 - csum;
+}
+
+/**
+ * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AVI;
+ frame->version = 2;
+ frame->length = 13;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_init);
+
+/**
+ * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
+
+ if (frame->active_info_valid)
+ ptr[0] |= BIT(4);
+
+ if (frame->horizontal_bar_valid)
+ ptr[0] |= BIT(3);
+
+ if (frame->vertical_bar_valid)
+ ptr[0] |= BIT(2);
+
+ ptr[1] = ((frame->colorimetry & 0x3) << 6) |
+ ((frame->picture_aspect & 0x3) << 4) |
+ (frame->active_aspect & 0xf);
+
+ ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
+ ((frame->quantization_range & 0x3) << 2) |
+ (frame->nups & 0x3);
+
+ if (frame->itc)
+ ptr[2] |= BIT(7);
+
+ ptr[3] = frame->video_code & 0x7f;
+
+ ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
+ ((frame->content_type & 0x3) << 4) |
+ (frame->pixel_repeat & 0xf);
+
+ ptr[5] = frame->top_bar & 0xff;
+ ptr[6] = (frame->top_bar >> 8) & 0xff;
+ ptr[7] = frame->bottom_bar & 0xff;
+ ptr[8] = (frame->bottom_bar >> 8) & 0xff;
+ ptr[9] = frame->left_bar & 0xff;
+ ptr[10] = (frame->left_bar >> 8) & 0xff;
+ ptr[11] = frame->right_bar & 0xff;
+ ptr[12] = (frame->right_bar >> 8) & 0xff;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
+
+/**
+ * hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ * @vendor: vendor string
+ * @product: product string
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_SPD;
+ frame->version = 1;
+ frame->length = 25;
+
+ strncpy(frame->vendor, vendor, sizeof(frame->vendor));
+ strncpy(frame->product, product, sizeof(frame->product));
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_init);
+
+/**
+ * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ memcpy(ptr, frame->vendor, sizeof(frame->vendor));
+ memcpy(ptr + 8, frame->product, sizeof(frame->product));
+
+ ptr[24] = frame->sdi;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
+
+/**
+ * hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
+ frame->version = 1;
+ frame->length = 10;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_init);
+
+/**
+ * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
+ * @frame: HDMI audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ unsigned char channels;
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ if (frame->channels >= 2)
+ channels = frame->channels - 1;
+ else
+ channels = 0;
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
+ ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
+ (frame->sample_size & 0x3);
+ ptr[2] = frame->coding_type_ext & 0x1f;
+ ptr[3] = frame->channel_allocation;
+ ptr[4] = (frame->level_shift_value & 0xf) << 3;
+
+ if (frame->downmix_inhibit)
+ ptr[4] |= BIT(7);
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
+
+/**
+ * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary
+ * buffer
+ * @frame: HDMI vendor infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, length);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length);
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index f2566c19e71c..113c7876c855 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -261,7 +261,7 @@ int get_img(struct mdp_img *img, struct fb_info *info,
if (f.file == NULL)
return -1;
- if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+ if (MAJOR(file_inode(f.file)->i_rdev) == FB_MAJOR) {
*start = info->fix.smem_start;
*len = info->fix.smem_len;
} else
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
new file mode 100644
index 000000000000..13ecd9897010
--- /dev/null
+++ b/drivers/video/of_display_timing.c
@@ -0,0 +1,239 @@
+/*
+ * OF helpers for parsing display timings
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * based on of_videomode.c by Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This file is released under the GPLv2
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+/**
+ * parse_timing_property - parse timing_entry from device_node
+ * @np: device_node with the property
+ * @name: name of the property
+ * @result: will be set to the return value
+ *
+ * DESCRIPTION:
+ * Every display_timing can be specified with either just the typical value or
+ * a range consisting of min/typ/max. This function helps handling this
+ **/
+static int parse_timing_property(struct device_node *np, const char *name,
+ struct timing_entry *result)
+{
+ struct property *prop;
+ int length, cells, ret;
+
+ prop = of_find_property(np, name, &length);
+ if (!prop) {
+ pr_err("%s: could not find property %s\n",
+ of_node_full_name(np), name);
+ return -EINVAL;
+ }
+
+ cells = length / sizeof(u32);
+ if (cells == 1) {
+ ret = of_property_read_u32(np, name, &result->typ);
+ result->min = result->typ;
+ result->max = result->typ;
+ } else if (cells == 3) {
+ ret = of_property_read_u32_array(np, name, &result->min, cells);
+ } else {
+ pr_err("%s: illegal timing specification in %s\n",
+ of_node_full_name(np), name);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * of_get_display_timing - parse display_timing entry from device_node
+ * @np: device_node with the properties
+ **/
+static struct display_timing *of_get_display_timing(struct device_node *np)
+{
+ struct display_timing *dt;
+ u32 val = 0;
+ int ret = 0;
+
+ dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+ if (!dt) {
+ pr_err("%s: could not allocate display_timing struct\n",
+ of_node_full_name(np));
+ return NULL;
+ }
+
+ ret |= parse_timing_property(np, "hback-porch", &dt->hback_porch);
+ ret |= parse_timing_property(np, "hfront-porch", &dt->hfront_porch);
+ ret |= parse_timing_property(np, "hactive", &dt->hactive);
+ ret |= parse_timing_property(np, "hsync-len", &dt->hsync_len);
+ ret |= parse_timing_property(np, "vback-porch", &dt->vback_porch);
+ ret |= parse_timing_property(np, "vfront-porch", &dt->vfront_porch);
+ ret |= parse_timing_property(np, "vactive", &dt->vactive);
+ ret |= parse_timing_property(np, "vsync-len", &dt->vsync_len);
+ ret |= parse_timing_property(np, "clock-frequency", &dt->pixelclock);
+
+ dt->dmt_flags = 0;
+ dt->data_flags = 0;
+ if (!of_property_read_u32(np, "vsync-active", &val))
+ dt->dmt_flags |= val ? VESA_DMT_VSYNC_HIGH :
+ VESA_DMT_VSYNC_LOW;
+ if (!of_property_read_u32(np, "hsync-active", &val))
+ dt->dmt_flags |= val ? VESA_DMT_HSYNC_HIGH :
+ VESA_DMT_HSYNC_LOW;
+ if (!of_property_read_u32(np, "de-active", &val))
+ dt->data_flags |= val ? DISPLAY_FLAGS_DE_HIGH :
+ DISPLAY_FLAGS_DE_LOW;
+ if (!of_property_read_u32(np, "pixelclk-active", &val))
+ dt->data_flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+
+ if (of_property_read_bool(np, "interlaced"))
+ dt->data_flags |= DISPLAY_FLAGS_INTERLACED;
+ if (of_property_read_bool(np, "doublescan"))
+ dt->data_flags |= DISPLAY_FLAGS_DOUBLESCAN;
+
+ if (ret) {
+ pr_err("%s: error reading timing properties\n",
+ of_node_full_name(np));
+ kfree(dt);
+ return NULL;
+ }
+
+ return dt;
+}
+
+/**
+ * of_get_display_timings - parse all display_timing entries from a device_node
+ * @np: device_node with the subnodes
+ **/
+struct display_timings *of_get_display_timings(struct device_node *np)
+{
+ struct device_node *timings_np;
+ struct device_node *entry;
+ struct device_node *native_mode;
+ struct display_timings *disp;
+
+ if (!np) {
+ pr_err("%s: no devicenode given\n", of_node_full_name(np));
+ return NULL;
+ }
+
+ timings_np = of_find_node_by_name(np, "display-timings");
+ if (!timings_np) {
+ pr_err("%s: could not find display-timings node\n",
+ of_node_full_name(np));
+ return NULL;
+ }
+
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp) {
+ pr_err("%s: could not allocate struct disp'\n",
+ of_node_full_name(np));
+ goto dispfail;
+ }
+
+ entry = of_parse_phandle(timings_np, "native-mode", 0);
+ /* assume first child as native mode if none provided */
+ if (!entry)
+ entry = of_get_next_child(np, NULL);
+ /* if there is no child, it is useless to go on */
+ if (!entry) {
+ pr_err("%s: no timing specifications given\n",
+ of_node_full_name(np));
+ goto entryfail;
+ }
+
+ pr_debug("%s: using %s as default timing\n",
+ of_node_full_name(np), entry->name);
+
+ native_mode = entry;
+
+ disp->num_timings = of_get_child_count(timings_np);
+ if (disp->num_timings == 0) {
+ /* should never happen, as entry was already found above */
+ pr_err("%s: no timings specified\n", of_node_full_name(np));
+ goto entryfail;
+ }
+
+ disp->timings = kzalloc(sizeof(struct display_timing *) *
+ disp->num_timings, GFP_KERNEL);
+ if (!disp->timings) {
+ pr_err("%s: could not allocate timings array\n",
+ of_node_full_name(np));
+ goto entryfail;
+ }
+
+ disp->num_timings = 0;
+ disp->native_mode = 0;
+
+ for_each_child_of_node(timings_np, entry) {
+ struct display_timing *dt;
+
+ dt = of_get_display_timing(entry);
+ if (!dt) {
+ /*
+ * to not encourage wrong devicetrees, fail in case of
+ * an error
+ */
+ pr_err("%s: error in timing %d\n",
+ of_node_full_name(np), disp->num_timings + 1);
+ goto timingfail;
+ }
+
+ if (native_mode == entry)
+ disp->native_mode = disp->num_timings;
+
+ disp->timings[disp->num_timings] = dt;
+ disp->num_timings++;
+ }
+ of_node_put(timings_np);
+ /*
+ * native_mode points to the device_node returned by of_parse_phandle
+ * therefore call of_node_put on it
+ */
+ of_node_put(native_mode);
+
+ pr_debug("%s: got %d timings. Using timing #%d as default\n",
+ of_node_full_name(np), disp->num_timings,
+ disp->native_mode + 1);
+
+ return disp;
+
+timingfail:
+ if (native_mode)
+ of_node_put(native_mode);
+ display_timings_release(disp);
+entryfail:
+ kfree(disp);
+dispfail:
+ of_node_put(timings_np);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(of_get_display_timings);
+
+/**
+ * of_display_timings_exist - check if a display-timings node is provided
+ * @np: device_node with the timing
+ **/
+int of_display_timings_exist(struct device_node *np)
+{
+ struct device_node *timings_np;
+
+ if (!np)
+ return -EINVAL;
+
+ timings_np = of_parse_phandle(np, "display-timings", 0);
+ if (!timings_np)
+ return -EINVAL;
+
+ of_node_put(timings_np);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(of_display_timings_exist);
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
new file mode 100644
index 000000000000..5b8066cd397f
--- /dev/null
+++ b/drivers/video/of_videomode.c
@@ -0,0 +1,54 @@
+/*
+ * generic videomode helper
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/**
+ * of_get_videomode - get the videomode #<index> from devicetree
+ * @np - devicenode with the display_timings
+ * @vm - set to return value
+ * @index - index into list of display_timings
+ * (Set this to OF_USE_NATIVE_MODE to use whatever mode is
+ * specified as native mode in the DT.)
+ *
+ * DESCRIPTION:
+ * Get a list of all display timings and put the one
+ * specified by index into *vm. This function should only be used, if
+ * only one videomode is to be retrieved. A driver that needs to work
+ * with multiple/all videomodes should work with
+ * of_get_display_timings instead.
+ **/
+int of_get_videomode(struct device_node *np, struct videomode *vm,
+ int index)
+{
+ struct display_timings *disp;
+ int ret;
+
+ disp = of_get_display_timings(np);
+ if (!disp) {
+ pr_err("%s: no timings specified\n", of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (index == OF_USE_NATIVE_MODE)
+ index = disp->native_mode;
+
+ ret = videomode_from_timing(disp, vm, index);
+ if (ret)
+ return ret;
+
+ display_timings_release(disp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 80233dae358a..22450908306c 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -1467,10 +1467,10 @@ void viafb_set_vclock(u32 clk, int set_iga)
via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
}
-struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
+struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres)
{
- struct display_timing timing;
+ struct via_display_timing timing;
u16 dx = (var->xres - cxres) / 2, dy = (var->yres - cyres) / 2;
timing.hor_addr = cxres;
@@ -1491,7 +1491,7 @@ struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres, int iga)
{
- struct display_timing crt_reg = var_to_timing(var,
+ struct via_display_timing crt_reg = var_to_timing(var,
cxres ? cxres : var->xres, cyres ? cyres : var->yres);
if (iga == IGA1)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index a8205754c736..3be073c58b03 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -637,7 +637,7 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
+struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres);
void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
u16 cxres, u16 cyres, int iga);
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 980ee1b1dcf3..5d21ff436ec8 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -549,7 +549,7 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
int panel_hres = plvds_setting_info->lcd_panel_hres;
int panel_vres = plvds_setting_info->lcd_panel_vres;
u32 clock;
- struct display_timing timing;
+ struct via_display_timing timing;
struct fb_var_screeninfo panel_var;
const struct fb_videomode *mode_crt_table, *panel_crt_table;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 3158dfc90bed..65c65c611e0a 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -319,7 +319,7 @@ struct crt_mode_table {
int refresh_rate;
int h_sync_polarity;
int v_sync_polarity;
- struct display_timing crtc;
+ struct via_display_timing crtc;
};
struct io_reg {
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
index 0e431aee17bb..0b414b09b9b4 100644
--- a/drivers/video/via/via_modesetting.c
+++ b/drivers/video/via/via_modesetting.c
@@ -30,9 +30,9 @@
#include "debug.h"
-void via_set_primary_timing(const struct display_timing *timing)
+void via_set_primary_timing(const struct via_display_timing *timing)
{
- struct display_timing raw;
+ struct via_display_timing raw;
raw.hor_total = timing->hor_total / 8 - 5;
raw.hor_addr = timing->hor_addr / 8 - 1;
@@ -88,9 +88,9 @@ void via_set_primary_timing(const struct display_timing *timing)
via_write_reg_mask(VIACR, 0x17, 0x80, 0x80);
}
-void via_set_secondary_timing(const struct display_timing *timing)
+void via_set_secondary_timing(const struct via_display_timing *timing)
{
- struct display_timing raw;
+ struct via_display_timing raw;
raw.hor_total = timing->hor_total - 1;
raw.hor_addr = timing->hor_addr - 1;
diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/via/via_modesetting.h
index 06e09fe351ae..f6a6503da3b3 100644
--- a/drivers/video/via/via_modesetting.h
+++ b/drivers/video/via/via_modesetting.h
@@ -33,7 +33,7 @@
#define VIA_PITCH_MAX 0x3FF8
-struct display_timing {
+struct via_display_timing {
u16 hor_total;
u16 hor_addr;
u16 hor_blank_start;
@@ -49,8 +49,8 @@ struct display_timing {
};
-void via_set_primary_timing(const struct display_timing *timing);
-void via_set_secondary_timing(const struct display_timing *timing);
+void via_set_primary_timing(const struct via_display_timing *timing);
+void via_set_secondary_timing(const struct via_display_timing *timing);
void via_set_primary_address(u32 addr);
void via_set_secondary_address(u32 addr);
void via_set_primary_pitch(u32 pitch);
diff --git a/drivers/video/videomode.c b/drivers/video/videomode.c
new file mode 100644
index 000000000000..21c47a202afa
--- /dev/null
+++ b/drivers/video/videomode.c
@@ -0,0 +1,39 @@
+/*
+ * generic display timing functions
+ *
+ * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
+int videomode_from_timing(const struct display_timings *disp,
+ struct videomode *vm, unsigned int index)
+{
+ struct display_timing *dt;
+
+ dt = display_timings_get(disp, index);
+ if (!dt)
+ return -EINVAL;
+
+ vm->pixelclock = display_timing_get_value(&dt->pixelclock, TE_TYP);
+ vm->hactive = display_timing_get_value(&dt->hactive, TE_TYP);
+ vm->hfront_porch = display_timing_get_value(&dt->hfront_porch, TE_TYP);
+ vm->hback_porch = display_timing_get_value(&dt->hback_porch, TE_TYP);
+ vm->hsync_len = display_timing_get_value(&dt->hsync_len, TE_TYP);
+
+ vm->vactive = display_timing_get_value(&dt->vactive, TE_TYP);
+ vm->vfront_porch = display_timing_get_value(&dt->vfront_porch, TE_TYP);
+ vm->vback_porch = display_timing_get_value(&dt->vback_porch, TE_TYP);
+ vm->vsync_len = display_timing_get_value(&dt->vsync_len, TE_TYP);
+
+ vm->dmt_flags = dt->dmt_flags;
+ vm->data_flags = dt->data_flags;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videomode_from_timing);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 797e1c79a104..8dab163c5ef0 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -560,18 +560,7 @@ static struct virtio_driver virtio_balloon_driver = {
#endif
};
-static int __init init(void)
-{
- return register_virtio_driver(&virtio_balloon_driver);
-}
-
-static void __exit fini(void)
-{
- unregister_virtio_driver(&virtio_balloon_driver);
-}
-module_init(init);
-module_exit(fini);
-
+module_virtio_driver(virtio_balloon_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio balloon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 31f966f4d27f..1ba0d6831015 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -75,7 +75,7 @@
*
* 0x050 W QueueNotify Queue notifier
* 0x060 R InterruptStatus Interrupt status register
- * 0x060 W InterruptACK Interrupt acknowledge register
+ * 0x064 W InterruptACK Interrupt acknowledge register
* 0x070 RW Status Device status register
*
* 0x100+ RW Device-specific configuration space
@@ -423,7 +423,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
return vm_dev->pdev->name;
}
-static struct virtio_config_ops virtio_mmio_config_ops = {
+static const struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
.set = vm_set,
.get_status = vm_get_status,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 0c142892c105..a7ce73029f59 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -91,9 +91,9 @@ struct virtio_pci_vq_info
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
-static struct pci_device_id virtio_pci_id_table[] = {
- { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { 0 },
+static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = {
+ { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
+ { 0 }
};
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
@@ -652,7 +652,7 @@ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
return 0;
}
-static struct virtio_config_ops virtio_pci_config_ops = {
+static const struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 372c8c0d54a0..950d354d50e2 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -157,9 +157,16 @@ static int mxc_w1_remove(struct platform_device *pdev)
return 0;
}
+static struct of_device_id mxc_w1_dt_ids[] = {
+ { .compatible = "fsl,imx21-owire" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
+
static struct platform_driver mxc_w1_driver = {
.driver = {
- .name = "mxc_w1",
+ .name = "mxc_w1",
+ .of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
.remove = mxc_w1_remove,
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 67526690acbc..762561fbabbf 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -17,11 +17,16 @@ config W1_SLAVE_SMEM
simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
config W1_SLAVE_DS2408
- tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
- help
- Say Y here if you want to use a 1-wire
+ tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
+ help
+ Say Y here if you want to use a 1-wire
+ DS2408 8-Channel Addressable Switch device support
- DS2408 8-Channel Addressable Switch device support
+config W1_SLAVE_DS2413
+ tristate "Dual Channel Addressable Switch 0x3a family support (DS2413)"
+ help
+ Say Y here if you want to use a 1-wire
+ DS2413 Dual Channel Addressable Switch device support
config W1_SLAVE_DS2423
tristate "Counter 1-wire device (DS2423)"
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 05188f6aab5a..06529f3157ab 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,7 +4,8 @@
obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
-obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
+obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
+obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
new file mode 100644
index 000000000000..829786252c6b
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -0,0 +1,177 @@
+/*
+ * w1_ds2413.c - w1 family 3a (DS2413) driver
+ * based on w1_ds2408.c by Jean-Francois Dagenais <dagenaisj@sonatest.com>
+ *
+ * Copyright (c) 2013 Mariusz Bialonczyk <manio@skyboo.net>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
+MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
+
+#define W1_F3A_RETRIES 3
+#define W1_F3A_FUNC_PIO_ACCESS_READ 0xF5
+#define W1_F3A_FUNC_PIO_ACCESS_WRITE 0x5A
+#define W1_F3A_SUCCESS_CONFIRM_BYTE 0xAA
+
+static ssize_t w1_f3a_read_state(
+ struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ dev_dbg(&sl->dev,
+ "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
+ bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex locked");
+
+ if (w1_reset_select_slave(sl)) {
+ mutex_unlock(&sl->master->bus_mutex);
+ return -EIO;
+ }
+
+ w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
+ *buf = w1_read_8(sl->master);
+
+ mutex_unlock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex unlocked");
+
+ /* check for correct complement */
+ if ((*buf & 0x0F) != ((~*buf >> 4) & 0x0F))
+ return -EIO;
+ else
+ return 1;
+}
+
+static ssize_t w1_f3a_write_output(
+ struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ u8 w1_buf[3];
+ unsigned int retries = W1_F3A_RETRIES;
+
+ if (count != 1 || off != 0)
+ return -EFAULT;
+
+ dev_dbg(&sl->dev, "locking mutex for write_output");
+ mutex_lock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex locked");
+
+ if (w1_reset_select_slave(sl))
+ goto error;
+
+ /* according to the DS2413 datasheet the most significant 6 bits
+ should be set to "1"s, so do it now */
+ *buf = *buf | 0xFC;
+
+ while (retries--) {
+ w1_buf[0] = W1_F3A_FUNC_PIO_ACCESS_WRITE;
+ w1_buf[1] = *buf;
+ w1_buf[2] = ~(*buf);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ if (w1_read_8(sl->master) == W1_F3A_SUCCESS_CONFIRM_BYTE) {
+ mutex_unlock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex unlocked, retries:%d", retries);
+ return 1;
+ }
+ if (w1_reset_resume_command(sl->master))
+ goto error;
+ }
+
+error:
+ mutex_unlock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
+ return -EIO;
+}
+
+#define NB_SYSFS_BIN_FILES 2
+static struct bin_attribute w1_f3a_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
+ {
+ .attr = {
+ .name = "state",
+ .mode = S_IRUGO,
+ },
+ .size = 1,
+ .read = w1_f3a_read_state,
+ },
+ {
+ .attr = {
+ .name = "output",
+ .mode = S_IRUGO | S_IWUSR | S_IWGRP,
+ },
+ .size = 1,
+ .write = w1_f3a_write_output,
+ }
+};
+
+static int w1_f3a_add_slave(struct w1_slave *sl)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
+ err = sysfs_create_bin_file(
+ &sl->dev.kobj,
+ &(w1_f3a_sysfs_bin_files[i]));
+ if (err)
+ while (--i >= 0)
+ sysfs_remove_bin_file(&sl->dev.kobj,
+ &(w1_f3a_sysfs_bin_files[i]));
+ return err;
+}
+
+static void w1_f3a_remove_slave(struct w1_slave *sl)
+{
+ int i;
+ for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
+ sysfs_remove_bin_file(&sl->dev.kobj,
+ &(w1_f3a_sysfs_bin_files[i]));
+}
+
+static struct w1_family_ops w1_f3a_fops = {
+ .add_slave = w1_f3a_add_slave,
+ .remove_slave = w1_f3a_remove_slave,
+};
+
+static struct w1_family w1_family_3a = {
+ .fid = W1_FAMILY_DS2413,
+ .fops = &w1_f3a_fops,
+};
+
+static int __init w1_f3a_init(void)
+{
+ return w1_register_family(&w1_family_3a);
+}
+
+static void __exit w1_f3a_exit(void)
+{
+ w1_unregister_family(&w1_family_3a);
+}
+
+module_init(w1_f3a_init);
+module_exit(w1_f3a_exit);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index a1f0ce151d53..625dd08f775f 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -39,6 +39,7 @@
#define W1_EEPROM_DS2431 0x2D
#define W1_FAMILY_DS2760 0x30
#define W1_FAMILY_DS2780 0x32
+#define W1_FAMILY_DS2413 0x3A
#define W1_THERM_DS1825 0x3B
#define W1_FAMILY_DS2781 0x3D
#define W1_THERM_DS28EA00 0x42
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7f809fd4a57f..26e1fdbddf69 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -364,6 +364,18 @@ config IMX2_WDT
To compile this driver as a module, choose M here: the
module will be called imx2_wdt.
+config UX500_WATCHDOG
+ tristate "ST-Ericsson Ux500 watchdog"
+ depends on MFD_DB8500_PRCMU
+ select WATCHDOG_CORE
+ default y
+ help
+ Say Y here to include Watchdog timer support for the watchdog
+ existing in the prcmu of ST-Ericsson Ux500 series platforms.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ux500_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 97bbdb3a4648..bec86ee6e9e3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 11d55ce5ca81..70387582843f 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -411,7 +411,7 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
.identity = DRIVER_NAME,
};
void __user *argp = (void __user *)arg;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int index = iminor(inode) - WD0_MINOR;
struct cpwd *p = cpwd_device;
int setopt = 0;
@@ -499,7 +499,7 @@ static long cpwd_compat_ioctl(struct file *file, unsigned int cmd,
static ssize_t cpwd_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct cpwd *p = cpwd_device;
int index = iminor(inode);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
new file mode 100644
index 000000000000..a614d84121c3
--- /dev/null
+++ b/drivers/watchdog/ux500_wdt.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011-2013
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/ux500_wdt.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define WATCHDOG_TIMEOUT 600 /* 10 minutes */
+
+#define WATCHDOG_MIN 0
+#define WATCHDOG_MAX28 268435 /* 28 bit resolution in ms == 268435.455 s */
+#define WATCHDOG_MAX32 4294967 /* 32 bit resolution in ms == 4294967.295 s */
+
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int ux500_wdt_start(struct watchdog_device *wdd)
+{
+ return prcmu_enable_a9wdog(PRCMU_WDOG_ALL);
+}
+
+static int ux500_wdt_stop(struct watchdog_device *wdd)
+{
+ return prcmu_disable_a9wdog(PRCMU_WDOG_ALL);
+}
+
+static int ux500_wdt_keepalive(struct watchdog_device *wdd)
+{
+ return prcmu_kick_a9wdog(PRCMU_WDOG_ALL);
+}
+
+static int ux500_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ ux500_wdt_stop(wdd);
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+ ux500_wdt_start(wdd);
+
+ return 0;
+}
+
+static const struct watchdog_info ux500_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "Ux500 WDT",
+ .firmware_version = 1,
+};
+
+static const struct watchdog_ops ux500_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = ux500_wdt_start,
+ .stop = ux500_wdt_stop,
+ .ping = ux500_wdt_keepalive,
+ .set_timeout = ux500_wdt_set_timeout,
+};
+
+static struct watchdog_device ux500_wdt = {
+ .info = &ux500_wdt_info,
+ .ops = &ux500_wdt_ops,
+ .min_timeout = WATCHDOG_MIN,
+ .max_timeout = WATCHDOG_MAX32,
+};
+
+static int ux500_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ux500_wdt_data *pdata = pdev->dev.platform_data;
+
+ if (pdata) {
+ if (pdata->timeout > 0)
+ timeout = pdata->timeout;
+ if (pdata->has_28_bits_resolution)
+ ux500_wdt.max_timeout = WATCHDOG_MAX28;
+ }
+
+ watchdog_set_nowayout(&ux500_wdt, nowayout);
+
+ /* disable auto off on sleep */
+ prcmu_config_a9wdog(PRCMU_WDOG_CPU1, false);
+
+ /* set HW initial value */
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+
+ ret = watchdog_register_device(&ux500_wdt);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+}
+
+static int ux500_wdt_remove(struct platform_device *dev)
+{
+ watchdog_unregister_device(&ux500_wdt);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ux500_wdt_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ if (watchdog_active(&ux500_wdt)) {
+ ux500_wdt_stop(&ux500_wdt);
+ prcmu_config_a9wdog(PRCMU_WDOG_CPU1, true);
+
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+ ux500_wdt_start(&ux500_wdt);
+ }
+ return 0;
+}
+
+static int ux500_wdt_resume(struct platform_device *pdev)
+{
+ if (watchdog_active(&ux500_wdt)) {
+ ux500_wdt_stop(&ux500_wdt);
+ prcmu_config_a9wdog(PRCMU_WDOG_CPU1, false);
+
+ prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000);
+ ux500_wdt_start(&ux500_wdt);
+ }
+ return 0;
+}
+#else
+#define ux500_wdt_suspend NULL
+#define ux500_wdt_resume NULL
+#endif
+
+static struct platform_driver ux500_wdt_driver = {
+ .probe = ux500_wdt_probe,
+ .remove = ux500_wdt_remove,
+ .suspend = ux500_wdt_suspend,
+ .resume = ux500_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ux500_wdt",
+ },
+};
+
+module_platform_driver(ux500_wdt_driver);
+
+MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
+MODULE_DESCRIPTION("Ux500 Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:ux500_wdt");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index cabfa97f4674..5a32232cf7c1 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -180,6 +180,40 @@ config XEN_PRIVCMD
depends on XEN
default m
+config XEN_STUB
+ bool "Xen stub drivers"
+ depends on XEN && X86_64
+ default n
+ help
+ Allow kernel to install stub drivers, to reserve space for Xen drivers,
+ i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
+ so that real Xen drivers can be modular.
+
+ To enable Xen features like cpu and memory hotplug, select Y here.
+
+config XEN_ACPI_HOTPLUG_MEMORY
+ tristate "Xen ACPI memory hotplug"
+ depends on XEN_DOM0 && XEN_STUB && ACPI
+ default n
+ help
+ This is Xen ACPI memory hotplug.
+
+ Currently Xen only support ACPI memory hot-add. If you want
+ to hot-add memory at runtime (the hot-added memory cannot be
+ removed until machine stop), select Y/M here, otherwise select N.
+
+config XEN_ACPI_HOTPLUG_CPU
+ tristate "Xen ACPI cpu hotplug"
+ depends on XEN_DOM0 && XEN_STUB && ACPI
+ select ACPI_CONTAINER
+ default n
+ help
+ Xen ACPI cpu enumerating and hotplugging
+
+ For hotplugging, currently Xen only support ACPI cpu hotadd.
+ If you want to hotadd cpu at runtime (the hotadded cpu cannot
+ be removed until machine stop), select Y/M here.
+
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index fb213cf81a7b..eabd0ee1c2bc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -30,6 +30,9 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
+obj-$(CONFIG_XEN_STUB) += xen-stub.o
+obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o
+obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 22f77c5f6012..d17aa41a9041 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -120,7 +120,22 @@ static unsigned long *pirq_eoi_map;
#endif
static bool (*pirq_needs_eoi)(unsigned irq);
-static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
+/*
+ * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
+ * careful to only use bitops which allow for this (e.g
+ * test_bit/find_first_bit and friends but not __ffs) and to pass
+ * BITS_PER_EVTCHN_WORD as the bitmask length.
+ */
+#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
+/*
+ * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
+ * array. Primarily to avoid long lines (hence the terse name).
+ */
+#define BM(x) (unsigned long *)(x)
+/* Find the first set bit in a evtchn mask */
+#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
+
+static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
cpu_evtchn_mask);
/* Xen will never allocate port zero for any purpose. */
@@ -294,9 +309,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static inline unsigned long active_evtchns(unsigned int cpu,
- struct shared_info *sh,
- unsigned int idx)
+static inline xen_ulong_t active_evtchns(unsigned int cpu,
+ struct shared_info *sh,
+ unsigned int idx)
{
return sh->evtchn_pending[idx] &
per_cpu(cpu_evtchn_mask, cpu)[idx] &
@@ -312,8 +327,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
- clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
- set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
+ clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
+ set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
info_for_irq(irq)->cpu = cpu;
}
@@ -339,19 +354,19 @@ static void init_evtchn_cpu_bindings(void)
static inline void clear_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- sync_clear_bit(port, &s->evtchn_pending[0]);
+ sync_clear_bit(port, BM(&s->evtchn_pending[0]));
}
static inline void set_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, &s->evtchn_pending[0]);
+ sync_set_bit(port, BM(&s->evtchn_pending[0]));
}
static inline int test_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- return sync_test_bit(port, &s->evtchn_pending[0]);
+ return sync_test_bit(port, BM(&s->evtchn_pending[0]));
}
@@ -375,7 +390,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq);
static void mask_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, &s->evtchn_mask[0]);
+ sync_set_bit(port, BM(&s->evtchn_mask[0]));
}
static void unmask_evtchn(int port)
@@ -389,7 +404,7 @@ static void unmask_evtchn(int port)
if (unlikely((cpu != cpu_from_evtchn(port))))
do_hypercall = 1;
else
- evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]);
+ evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
if (unlikely(evtchn_pending && xen_hvm_domain()))
do_hypercall = 1;
@@ -403,7 +418,7 @@ static void unmask_evtchn(int port)
} else {
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
- sync_clear_bit(port, &s->evtchn_mask[0]);
+ sync_clear_bit(port, BM(&s->evtchn_mask[0]));
/*
* The following is basically the equivalent of
@@ -411,8 +426,8 @@ static void unmask_evtchn(int port)
* the interrupt edge' if the channel is masked.
*/
if (evtchn_pending &&
- !sync_test_and_set_bit(port / BITS_PER_LONG,
- &vcpu_info->evtchn_pending_sel))
+ !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
+ BM(&vcpu_info->evtchn_pending_sel)))
vcpu_info->evtchn_upcall_pending = 1;
}
@@ -1189,7 +1204,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
{
struct shared_info *sh = HYPERVISOR_shared_info;
int cpu = smp_processor_id();
- unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
+ xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
int i;
unsigned long flags;
static DEFINE_SPINLOCK(debug_lock);
@@ -1205,7 +1220,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
pending = (get_irq_regs() && i == cpu)
? xen_irqs_disabled(get_irq_regs())
: v->evtchn_upcall_mask;
- printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
+ printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
pending, v->evtchn_upcall_pending,
(int)(sizeof(v->evtchn_pending_sel)*2),
v->evtchn_pending_sel);
@@ -1214,49 +1229,52 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
printk("\npending:\n ");
for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
- printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)sizeof(sh->evtchn_pending[0])*2,
sh->evtchn_pending[i],
i % 8 == 0 ? "\n " : " ");
printk("\nglobal mask:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*lx%s",
+ printk("%0*"PRI_xen_ulong"%s",
(int)(sizeof(sh->evtchn_mask[0])*2),
sh->evtchn_mask[i],
i % 8 == 0 ? "\n " : " ");
printk("\nglobally unmasked:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
i % 8 == 0 ? "\n " : " ");
printk("\nlocal cpu%d mask:\n ", cpu);
- for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
- printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
+ for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
cpu_evtchn[i],
i % 8 == 0 ? "\n " : " ");
printk("\nlocally unmasked:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
- unsigned long pending = sh->evtchn_pending[i]
+ xen_ulong_t pending = sh->evtchn_pending[i]
& ~sh->evtchn_mask[i]
& cpu_evtchn[i];
- printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
pending, i % 8 == 0 ? "\n " : " ");
}
printk("\npending list:\n");
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (sync_test_bit(i, sh->evtchn_pending)) {
- int word_idx = i / BITS_PER_LONG;
+ if (sync_test_bit(i, BM(sh->evtchn_pending))) {
+ int word_idx = i / BITS_PER_EVTCHN_WORD;
printk(" %d: event %d -> irq %d%s%s%s\n",
cpu_from_evtchn(i), i,
evtchn_to_irq[i],
- sync_test_bit(word_idx, &v->evtchn_pending_sel)
+ sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
? "" : " l2-clear",
- !sync_test_bit(i, sh->evtchn_mask)
+ !sync_test_bit(i, BM(sh->evtchn_mask))
? "" : " globally-masked",
- sync_test_bit(i, cpu_evtchn)
+ sync_test_bit(i, BM(cpu_evtchn))
? "" : " locally-masked");
}
}
@@ -1273,7 +1291,7 @@ static DEFINE_PER_CPU(unsigned int, current_bit_idx);
/*
* Mask out the i least significant bits of w
*/
-#define MASK_LSBS(w, i) (w & ((~0UL) << i))
+#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
/*
* Search the CPUs pending events bitmasks. For each one found, map
@@ -1295,18 +1313,19 @@ static void __xen_evtchn_do_upcall(void)
unsigned count;
do {
- unsigned long pending_words;
+ xen_ulong_t pending_words;
vcpu_info->evtchn_upcall_pending = 0;
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
-#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
- /* Clear master flag /before/ clearing selector flag. */
- wmb();
-#endif
- pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+ /*
+ * Master flag must be cleared /before/ clearing
+ * selector flag. xchg_xen_ulong must contain an
+ * appropriate barrier.
+ */
+ pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
start_word_idx = __this_cpu_read(current_word_idx);
start_bit_idx = __this_cpu_read(current_bit_idx);
@@ -1314,8 +1333,8 @@ static void __xen_evtchn_do_upcall(void)
word_idx = start_word_idx;
for (i = 0; pending_words != 0; i++) {
- unsigned long pending_bits;
- unsigned long words;
+ xen_ulong_t pending_bits;
+ xen_ulong_t words;
words = MASK_LSBS(pending_words, word_idx);
@@ -1327,7 +1346,7 @@ static void __xen_evtchn_do_upcall(void)
bit_idx = 0;
continue;
}
- word_idx = __ffs(words);
+ word_idx = EVTCHN_FIRST_BIT(words);
pending_bits = active_evtchns(cpu, s, word_idx);
bit_idx = 0; /* usually scan entire word from start */
@@ -1342,7 +1361,7 @@ static void __xen_evtchn_do_upcall(void)
}
do {
- unsigned long bits;
+ xen_ulong_t bits;
int port, irq;
struct irq_desc *desc;
@@ -1352,10 +1371,10 @@ static void __xen_evtchn_do_upcall(void)
if (bits == 0)
break;
- bit_idx = __ffs(bits);
+ bit_idx = EVTCHN_FIRST_BIT(bits);
/* Process port. */
- port = (word_idx * BITS_PER_LONG) + bit_idx;
+ port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
irq = evtchn_to_irq[port];
if (irq != -1) {
@@ -1364,12 +1383,12 @@ static void __xen_evtchn_do_upcall(void)
generic_handle_irq_desc(irq, desc);
}
- bit_idx = (bit_idx + 1) % BITS_PER_LONG;
+ bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
/* Next caller starts at last processed + 1 */
__this_cpu_write(current_word_idx,
bit_idx ? word_idx :
- (word_idx+1) % BITS_PER_LONG);
+ (word_idx+1) % BITS_PER_EVTCHN_WORD);
__this_cpu_write(current_bit_idx, bit_idx);
} while (bit_idx != 0);
@@ -1377,7 +1396,7 @@ static void __xen_evtchn_do_upcall(void)
if ((word_idx != start_word_idx) || (i != 0))
pending_words &= ~(1UL << word_idx);
- word_idx = (word_idx + 1) % BITS_PER_LONG;
+ word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
}
BUG_ON(!irqs_disabled());
@@ -1487,8 +1506,8 @@ int resend_irq_on_evtchn(unsigned int irq)
if (!VALID_EVTCHN(evtchn))
return 1;
- masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
- sync_set_bit(evtchn, s->evtchn_pending);
+ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+ sync_set_bit(evtchn, BM(s->evtchn_pending));
if (!masked)
unmask_evtchn(evtchn);
@@ -1536,8 +1555,8 @@ static int retrigger_dynirq(struct irq_data *data)
if (VALID_EVTCHN(evtchn)) {
int masked;
- masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
- sync_set_bit(evtchn, sh->evtchn_pending);
+ masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
+ sync_set_bit(evtchn, BM(sh->evtchn_pending));
if (!masked)
unmask_evtchn(evtchn);
ret = 1;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index b1f60a0c0bea..45c8efaa6b3e 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
u->name, (void *)(unsigned long)port);
if (rc >= 0)
rc = evtchn_make_refcounted(port);
+ else {
+ /* bind failed, should close the port now */
+ struct evtchn_close close;
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+ set_port_user(port, NULL);
+ }
return rc;
}
@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
{
int irq = irq_from_evtchn(port);
+ BUG_ON(irq < 0);
+
unbind_from_irqhandler(irq, (void *)(unsigned long)port);
set_port_user(port, NULL);
@@ -534,10 +544,10 @@ static int __init evtchn_init(void)
spin_lock_init(&port_user_lock);
- /* Create '/dev/misc/evtchn'. */
+ /* Create '/dev/xen/evtchn'. */
err = misc_register(&evtchn_miscdev);
if (err != 0) {
- printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+ printk(KERN_ERR "Could not register /dev/xen/evtchn\n");
return err;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 157c0ccda3ef..04c1b2d9b775 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1147,7 +1147,7 @@ static int gnttab_setup(void)
return gnttab_map(0, nr_grant_frames - 1);
if (gnttab_shared.addr == NULL) {
- gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
+ gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
PAGE_SIZE * max_nr_gframes);
if (gnttab_shared.addr == NULL) {
printk(KERN_WARNING
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 5a27a4599a4a..6536d5ab1697 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -332,6 +332,41 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* Sync with Xen hypervisor after cpu hotadded */
+void xen_pcpu_hotplug_sync(void)
+{
+ schedule_work(&xen_pcpu_work);
+}
+EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
+
+/*
+ * For hypervisor presented cpu, return logic cpu id;
+ * For hypervisor non-presented cpu, return -ENODEV.
+ */
+int xen_pcpu_id(uint32_t acpi_id)
+{
+ int cpu_id = 0, max_id = 0;
+ struct xen_platform_op op;
+
+ op.cmd = XENPF_get_cpuinfo;
+ while (cpu_id <= max_id) {
+ op.u.pcpu_info.xen_cpuid = cpu_id;
+ if (HYPERVISOR_dom0_op(&op)) {
+ cpu_id++;
+ continue;
+ }
+
+ if (acpi_id == op.u.pcpu_info.acpi_id)
+ return cpu_id;
+ if (op.u.pcpu_info.max_present > max_id)
+ max_id = op.u.pcpu_info.max_present;
+ cpu_id++;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(xen_pcpu_id);
+
static int __init xen_pcpu_init(void)
{
int irq, ret;
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 144564e5eb29..3ee836d42581 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -385,7 +385,7 @@ static int __init xen_tmem_init(void)
if (old_ops.init != NULL)
s = " (WARNING: frontswap_ops overridden)";
printk(KERN_INFO "frontswap enabled, RAM provided by "
- "Xen Transcendent Memory\n");
+ "Xen Transcendent Memory%s\n", s);
}
#endif
#ifdef CONFIG_CLEANCACHE
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
new file mode 100644
index 000000000000..757827966e34
--- /dev/null
+++ b/drivers/xen/xen-acpi-cpuhotplug.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Liu Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/processor.h>
+
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+
+#define PREFIX "ACPI:xen_cpu_hotplug:"
+
+#define INSTALL_NOTIFY_HANDLER 0
+#define UNINSTALL_NOTIFY_HANDLER 1
+
+static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr);
+
+/* --------------------------------------------------------------------------
+ Driver Interface
+-------------------------------------------------------------------------- */
+
+static int xen_acpi_processor_enable(struct acpi_device *device)
+{
+ acpi_status status = 0;
+ unsigned long long value;
+ union acpi_object object = { 0 };
+ struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+ struct acpi_processor *pr;
+
+ pr = acpi_driver_data(device);
+ if (!pr) {
+ pr_err(PREFIX "Cannot find driver data\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
+ /* Declared with "Processor" statement; match ProcessorID */
+ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_err(PREFIX "Evaluating processor object\n");
+ return -ENODEV;
+ }
+
+ pr->acpi_id = object.processor.proc_id;
+ } else {
+ /* Declared with "Device" statement; match _UID */
+ status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
+ NULL, &value);
+ if (ACPI_FAILURE(status)) {
+ pr_err(PREFIX "Evaluating processor _UID\n");
+ return -ENODEV;
+ }
+
+ pr->acpi_id = value;
+ }
+
+ pr->id = xen_pcpu_id(pr->acpi_id);
+
+ if ((int)pr->id < 0)
+ /* This cpu is not presented at hypervisor, try to hotadd it */
+ if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) {
+ pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n",
+ pr->acpi_id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __cpuinit xen_acpi_processor_add(struct acpi_device *device)
+{
+ int ret;
+ struct acpi_processor *pr;
+
+ if (!device)
+ return -EINVAL;
+
+ pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
+ if (!pr)
+ return -ENOMEM;
+
+ pr->handle = device->handle;
+ strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
+ device->driver_data = pr;
+
+ ret = xen_acpi_processor_enable(device);
+ if (ret)
+ pr_err(PREFIX "Error when enabling Xen processor\n");
+
+ return ret;
+}
+
+static int xen_acpi_processor_remove(struct acpi_device *device)
+{
+ struct acpi_processor *pr;
+
+ if (!device)
+ return -EINVAL;
+
+ pr = acpi_driver_data(device);
+ if (!pr)
+ return -EINVAL;
+
+ kfree(pr);
+ return 0;
+}
+
+/*--------------------------------------------------------------
+ Acpi processor hotplug support
+--------------------------------------------------------------*/
+
+static int is_processor_present(acpi_handle handle)
+{
+ acpi_status status;
+ unsigned long long sta = 0;
+
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+
+ if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
+ return 1;
+
+ /*
+ * _STA is mandatory for a processor that supports hot plug
+ */
+ if (status == AE_NOT_FOUND)
+ pr_info(PREFIX "Processor does not support hot plug\n");
+ else
+ pr_info(PREFIX "Processor Device is not present");
+ return 0;
+}
+
+static int xen_apic_id(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ struct acpi_madt_local_apic *lapic;
+ int apic_id;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
+ return -EINVAL;
+
+ if (!buffer.length || !buffer.pointer)
+ return -EINVAL;
+
+ obj = buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER ||
+ obj->buffer.length < sizeof(*lapic)) {
+ kfree(buffer.pointer);
+ return -EINVAL;
+ }
+
+ lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
+
+ if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
+ !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
+ kfree(buffer.pointer);
+ return -EINVAL;
+ }
+
+ apic_id = (uint32_t)lapic->id;
+ kfree(buffer.pointer);
+ buffer.length = ACPI_ALLOCATE_BUFFER;
+ buffer.pointer = NULL;
+
+ return apic_id;
+}
+
+static int xen_hotadd_cpu(struct acpi_processor *pr)
+{
+ int cpu_id, apic_id, pxm;
+ struct xen_platform_op op;
+
+ apic_id = xen_apic_id(pr->handle);
+ if (apic_id < 0) {
+ pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n",
+ pr->acpi_id);
+ return -ENODEV;
+ }
+
+ pxm = xen_acpi_get_pxm(pr->handle);
+ if (pxm < 0) {
+ pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n",
+ pr->acpi_id);
+ return pxm;
+ }
+
+ op.cmd = XENPF_cpu_hotadd;
+ op.u.cpu_add.apic_id = apic_id;
+ op.u.cpu_add.acpi_id = pr->acpi_id;
+ op.u.cpu_add.pxm = pxm;
+
+ cpu_id = HYPERVISOR_dom0_op(&op);
+ if (cpu_id < 0)
+ pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n",
+ pr->acpi_id);
+
+ return cpu_id;
+}
+
+static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr)
+{
+ if (!is_processor_present(pr->handle))
+ return AE_ERROR;
+
+ pr->id = xen_hotadd_cpu(pr);
+ if ((int)pr->id < 0)
+ return AE_ERROR;
+
+ /*
+ * Sync with Xen hypervisor, providing new /sys/.../xen_cpuX
+ * interface after cpu hotadded.
+ */
+ xen_pcpu_hotplug_sync();
+
+ return AE_OK;
+}
+
+static
+int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
+{
+ acpi_handle phandle;
+ struct acpi_device *pdev;
+
+ if (acpi_get_parent(handle, &phandle))
+ return -ENODEV;
+
+ if (acpi_bus_get_device(phandle, &pdev))
+ return -ENODEV;
+
+ if (acpi_bus_scan(handle))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int acpi_processor_device_remove(struct acpi_device *device)
+{
+ pr_debug(PREFIX "Xen does not support CPU hotremove\n");
+
+ return -ENOSYS;
+}
+
+static void acpi_processor_hotplug_notify(acpi_handle handle,
+ u32 event, void *data)
+{
+ struct acpi_processor *pr;
+ struct acpi_device *device = NULL;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ int result;
+
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Processor driver received %s event\n",
+ (event == ACPI_NOTIFY_BUS_CHECK) ?
+ "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
+
+ if (!is_processor_present(handle))
+ break;
+
+ if (!acpi_bus_get_device(handle, &device))
+ break;
+
+ result = acpi_processor_device_add(handle, &device);
+ if (result) {
+ pr_err(PREFIX "Unable to add the device\n");
+ break;
+ }
+
+ ost_code = ACPI_OST_SC_SUCCESS;
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "received ACPI_NOTIFY_EJECT_REQUEST\n"));
+
+ if (acpi_bus_get_device(handle, &device)) {
+ pr_err(PREFIX "Device don't exist, dropping EJECT\n");
+ break;
+ }
+ pr = acpi_driver_data(device);
+ if (!pr) {
+ pr_err(PREFIX "Driver data is NULL, dropping EJECT\n");
+ break;
+ }
+
+ /*
+ * TBD: implement acpi_processor_device_remove if Xen support
+ * CPU hotremove in the future.
+ */
+ acpi_processor_device_remove(device);
+ break;
+
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+
+ /* non-hotplug event; possibly handled by other handler */
+ return;
+ }
+
+ (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ return;
+}
+
+static acpi_status is_processor_device(acpi_handle handle)
+{
+ struct acpi_device_info *info;
+ char *hid;
+ acpi_status status;
+
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (info->type == ACPI_TYPE_PROCESSOR) {
+ kfree(info);
+ return AE_OK; /* found a processor object */
+ }
+
+ if (!(info->valid & ACPI_VALID_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ hid = info->hardware_id.string;
+ if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ kfree(info);
+ return AE_OK; /* found a processor device object */
+}
+
+static acpi_status
+processor_walk_namespace_cb(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ acpi_status status;
+ int *action = context;
+
+ status = is_processor_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* not a processor; continue to walk */
+
+ switch (*action) {
+ case INSTALL_NOTIFY_HANDLER:
+ acpi_install_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_processor_hotplug_notify,
+ NULL);
+ break;
+ case UNINSTALL_NOTIFY_HANDLER:
+ acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_processor_hotplug_notify);
+ break;
+ default:
+ break;
+ }
+
+ /* found a processor; skip walking underneath */
+ return AE_CTRL_DEPTH;
+}
+
+static
+void acpi_processor_install_hotplug_notify(void)
+{
+ int action = INSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_ANY,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ processor_walk_namespace_cb, NULL, &action, NULL);
+}
+
+static
+void acpi_processor_uninstall_hotplug_notify(void)
+{
+ int action = UNINSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_ANY,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ processor_walk_namespace_cb, NULL, &action, NULL);
+}
+
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, 0},
+ {ACPI_PROCESSOR_DEVICE_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+static struct acpi_driver xen_acpi_processor_driver = {
+ .name = "processor",
+ .class = ACPI_PROCESSOR_CLASS,
+ .ids = processor_device_ids,
+ .ops = {
+ .add = xen_acpi_processor_add,
+ .remove = xen_acpi_processor_remove,
+ },
+};
+
+static int __init xen_acpi_processor_init(void)
+{
+ int result = 0;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* unregister the stub which only used to reserve driver space */
+ xen_stub_processor_exit();
+
+ result = acpi_bus_register_driver(&xen_acpi_processor_driver);
+ if (result < 0) {
+ xen_stub_processor_init();
+ return result;
+ }
+
+ acpi_processor_install_hotplug_notify();
+ return 0;
+}
+
+static void __exit xen_acpi_processor_exit(void)
+{
+ if (!xen_initial_domain())
+ return;
+
+ acpi_processor_uninstall_hotplug_notify();
+
+ acpi_bus_unregister_driver(&xen_acpi_processor_driver);
+
+ /*
+ * stub reserve space again to prevent any chance of native
+ * driver loading.
+ */
+ xen_stub_processor_init();
+ return;
+}
+
+module_init(xen_acpi_processor_init);
+module_exit(xen_acpi_processor_exit);
+ACPI_MODULE_NAME("xen-acpi-cpuhotplug");
+MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
+MODULE_DESCRIPTION("Xen Hotplug CPU Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
new file mode 100644
index 000000000000..853b12dba5bb
--- /dev/null
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Liu Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+
+#define PREFIX "ACPI:xen_memory_hotplug:"
+
+struct acpi_memory_info {
+ struct list_head list;
+ u64 start_addr; /* Memory Range start physical addr */
+ u64 length; /* Memory Range length */
+ unsigned short caching; /* memory cache attribute */
+ unsigned short write_protect; /* memory read/write attribute */
+ /* copied from buffer getting from _CRS */
+ unsigned int enabled:1;
+};
+
+struct acpi_memory_device {
+ struct acpi_device *device;
+ struct list_head res_list;
+};
+
+static bool acpi_hotmem_initialized __read_mostly;
+
+static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info)
+{
+ int rc;
+ struct xen_platform_op op;
+
+ op.cmd = XENPF_mem_hotadd;
+ op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT;
+ op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT;
+ op.u.mem_add.pxm = pxm;
+
+ rc = HYPERVISOR_dom0_op(&op);
+ if (rc)
+ pr_err(PREFIX "Xen Hotplug Memory Add failed on "
+ "0x%lx -> 0x%lx, _PXM: %d, error: %d\n",
+ (unsigned long)info->start_addr,
+ (unsigned long)(info->start_addr + info->length),
+ pxm, rc);
+
+ return rc;
+}
+
+static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device)
+{
+ int pxm, result;
+ int num_enabled = 0;
+ struct acpi_memory_info *info;
+
+ if (!mem_device)
+ return -EINVAL;
+
+ pxm = xen_acpi_get_pxm(mem_device->device->handle);
+ if (pxm < 0)
+ return pxm;
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if (info->enabled) { /* just sanity check...*/
+ num_enabled++;
+ continue;
+ }
+
+ if (!info->length)
+ continue;
+
+ result = xen_hotadd_memory(pxm, info);
+ if (result)
+ continue;
+ info->enabled = 1;
+ num_enabled++;
+ }
+
+ if (!num_enabled)
+ return -ENODEV;
+
+ return 0;
+}
+
+static acpi_status
+acpi_memory_get_resource(struct acpi_resource *resource, void *context)
+{
+ struct acpi_memory_device *mem_device = context;
+ struct acpi_resource_address64 address64;
+ struct acpi_memory_info *info, *new;
+ acpi_status status;
+
+ status = acpi_resource_to_address64(resource, &address64);
+ if (ACPI_FAILURE(status) ||
+ (address64.resource_type != ACPI_MEMORY_RANGE))
+ return AE_OK;
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if ((info->caching == address64.info.mem.caching) &&
+ (info->write_protect == address64.info.mem.write_protect) &&
+ (info->start_addr + info->length == address64.minimum)) {
+ info->length += address64.address_length;
+ return AE_OK;
+ }
+ }
+
+ new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
+ if (!new)
+ return AE_ERROR;
+
+ INIT_LIST_HEAD(&new->list);
+ new->caching = address64.info.mem.caching;
+ new->write_protect = address64.info.mem.write_protect;
+ new->start_addr = address64.minimum;
+ new->length = address64.address_length;
+ list_add_tail(&new->list, &mem_device->res_list);
+
+ return AE_OK;
+}
+
+static int
+acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
+{
+ acpi_status status;
+ struct acpi_memory_info *info, *n;
+
+ if (!list_empty(&mem_device->res_list))
+ return 0;
+
+ status = acpi_walk_resources(mem_device->device->handle,
+ METHOD_NAME__CRS, acpi_memory_get_resource, mem_device);
+
+ if (ACPI_FAILURE(status)) {
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list)
+ kfree(info);
+ INIT_LIST_HEAD(&mem_device->res_list);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+acpi_memory_get_device(acpi_handle handle,
+ struct acpi_memory_device **mem_device)
+{
+ acpi_status status;
+ acpi_handle phandle;
+ struct acpi_device *device = NULL;
+ struct acpi_device *pdevice = NULL;
+ int result;
+
+ if (!acpi_bus_get_device(handle, &device) && device)
+ goto end;
+
+ status = acpi_get_parent(handle, &phandle);
+ if (ACPI_FAILURE(status)) {
+ pr_warn(PREFIX "Cannot find acpi parent\n");
+ return -EINVAL;
+ }
+
+ /* Get the parent device */
+ result = acpi_bus_get_device(phandle, &pdevice);
+ if (result) {
+ pr_warn(PREFIX "Cannot get acpi bus device\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now add the notified device. This creates the acpi_device
+ * and invokes .add function
+ */
+ result = acpi_bus_scan(handle);
+ if (result) {
+ pr_warn(PREFIX "Cannot add acpi bus\n");
+ return -EINVAL;
+ }
+
+end:
+ *mem_device = acpi_driver_data(device);
+ if (!(*mem_device)) {
+ pr_err(PREFIX "Driver data not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
+{
+ unsigned long long current_status;
+
+ /* Get device present/absent information from the _STA */
+ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+ "_STA", NULL, &current_status)))
+ return -ENODEV;
+ /*
+ * Check for device status. Device should be
+ * present/enabled/functioning.
+ */
+ if (!((current_status & ACPI_STA_DEVICE_PRESENT)
+ && (current_status & ACPI_STA_DEVICE_ENABLED)
+ && (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
+{
+ pr_debug(PREFIX "Xen does not support memory hotremove\n");
+
+ return -ENOSYS;
+}
+
+static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_memory_device *mem_device;
+ struct acpi_device *device;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived BUS CHECK notification for device\n"));
+ /* Fall Through */
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ if (event == ACPI_NOTIFY_DEVICE_CHECK)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived DEVICE CHECK notification for device\n"));
+
+ if (acpi_memory_get_device(handle, &mem_device)) {
+ pr_err(PREFIX "Cannot find driver data\n");
+ break;
+ }
+
+ ost_code = ACPI_OST_SC_SUCCESS;
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "\nReceived EJECT REQUEST notification for device\n"));
+
+ if (acpi_bus_get_device(handle, &device)) {
+ pr_err(PREFIX "Device doesn't exist\n");
+ break;
+ }
+ mem_device = acpi_driver_data(device);
+ if (!mem_device) {
+ pr_err(PREFIX "Driver Data is NULL\n");
+ break;
+ }
+
+ /*
+ * TBD: implement acpi_memory_disable_device and invoke
+ * acpi_bus_remove if Xen support hotremove in the future
+ */
+ acpi_memory_disable_device(mem_device);
+ break;
+
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+ /* non-hotplug event; possibly handled by other handler */
+ return;
+ }
+
+ (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ return;
+}
+
+static int xen_acpi_memory_device_add(struct acpi_device *device)
+{
+ int result;
+ struct acpi_memory_device *mem_device = NULL;
+
+
+ if (!device)
+ return -EINVAL;
+
+ mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
+ if (!mem_device)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mem_device->res_list);
+ mem_device->device = device;
+ sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
+ sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
+ device->driver_data = mem_device;
+
+ /* Get the range from the _CRS */
+ result = acpi_memory_get_device_resources(mem_device);
+ if (result) {
+ kfree(mem_device);
+ return result;
+ }
+
+ /*
+ * For booting existed memory devices, early boot code has recognized
+ * memory area by EFI/E820. If DSDT shows these memory devices on boot,
+ * hotplug is not necessary for them.
+ * For hot-added memory devices during runtime, it need hypercall to
+ * Xen hypervisor to add memory.
+ */
+ if (!acpi_hotmem_initialized)
+ return 0;
+
+ if (!acpi_memory_check_device(mem_device))
+ result = xen_acpi_memory_enable_device(mem_device);
+
+ return result;
+}
+
+static int xen_acpi_memory_device_remove(struct acpi_device *device)
+{
+ struct acpi_memory_device *mem_device = NULL;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ mem_device = acpi_driver_data(device);
+ kfree(mem_device);
+
+ return 0;
+}
+
+/*
+ * Helper function to check for memory device
+ */
+static acpi_status is_memory_device(acpi_handle handle)
+{
+ char *hardware_id;
+ acpi_status status;
+ struct acpi_device_info *info;
+
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (!(info->valid & ACPI_VALID_HID)) {
+ kfree(info);
+ return AE_ERROR;
+ }
+
+ hardware_id = info->hardware_id.string;
+ if ((hardware_id == NULL) ||
+ (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
+ status = AE_ERROR;
+
+ kfree(info);
+ return status;
+}
+
+static acpi_status
+acpi_memory_register_notify_handler(acpi_handle handle,
+ u32 level, void *ctxt, void **retv)
+{
+ acpi_status status;
+
+ status = is_memory_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* continue */
+
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ acpi_memory_device_notify, NULL);
+ /* continue */
+ return AE_OK;
+}
+
+static acpi_status
+acpi_memory_deregister_notify_handler(acpi_handle handle,
+ u32 level, void *ctxt, void **retv)
+{
+ acpi_status status;
+
+ status = is_memory_device(handle);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* continue */
+
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ acpi_memory_device_notify);
+
+ return AE_OK; /* continue */
+}
+
+static const struct acpi_device_id memory_device_ids[] = {
+ {ACPI_MEMORY_DEVICE_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, memory_device_ids);
+
+static struct acpi_driver xen_acpi_memory_device_driver = {
+ .name = "acpi_memhotplug",
+ .class = ACPI_MEMORY_DEVICE_CLASS,
+ .ids = memory_device_ids,
+ .ops = {
+ .add = xen_acpi_memory_device_add,
+ .remove = xen_acpi_memory_device_remove,
+ },
+};
+
+static int __init xen_acpi_memory_device_init(void)
+{
+ int result;
+ acpi_status status;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* unregister the stub which only used to reserve driver space */
+ xen_stub_memory_device_exit();
+
+ result = acpi_bus_register_driver(&xen_acpi_memory_device_driver);
+ if (result < 0) {
+ xen_stub_memory_device_init();
+ return -ENODEV;
+ }
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_memory_register_notify_handler,
+ NULL, NULL, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ pr_warn(PREFIX "walk_namespace failed\n");
+ acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
+ xen_stub_memory_device_init();
+ return -ENODEV;
+ }
+
+ acpi_hotmem_initialized = true;
+ return 0;
+}
+
+static void __exit xen_acpi_memory_device_exit(void)
+{
+ acpi_status status;
+
+ if (!xen_initial_domain())
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_memory_deregister_notify_handler,
+ NULL, NULL, NULL);
+ if (ACPI_FAILURE(status))
+ pr_warn(PREFIX "walk_namespace failed\n");
+
+ acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
+
+ /*
+ * stub reserve space again to prevent any chance of native
+ * driver loading.
+ */
+ xen_stub_memory_device_init();
+ return;
+}
+
+module_init(xen_acpi_memory_device_init);
+module_exit(xen_acpi_memory_device_exit);
+ACPI_MODULE_NAME("xen-acpi-memhotplug");
+MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
+MODULE_DESCRIPTION("Xen Hotplug Mem Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-stub.c b/drivers/xen/xen-stub.c
new file mode 100644
index 000000000000..d85e411cbf89
--- /dev/null
+++ b/drivers/xen/xen-stub.c
@@ -0,0 +1,101 @@
+/*
+ * xen-stub.c - stub drivers to reserve space for Xen
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Liu Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * Copyright (C) 2012 Oracle Inc
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <xen/acpi.h>
+
+#ifdef CONFIG_ACPI
+
+/*--------------------------------------------
+ stub driver for Xen memory hotplug
+--------------------------------------------*/
+
+static const struct acpi_device_id memory_device_ids[] = {
+ {ACPI_MEMORY_DEVICE_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver xen_stub_memory_device_driver = {
+ /* same name as native memory driver to block native loaded */
+ .name = "acpi_memhotplug",
+ .class = ACPI_MEMORY_DEVICE_CLASS,
+ .ids = memory_device_ids,
+};
+
+int xen_stub_memory_device_init(void)
+{
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* just reserve space for Xen, block native driver loaded */
+ return acpi_bus_register_driver(&xen_stub_memory_device_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
+subsys_initcall(xen_stub_memory_device_init);
+
+void xen_stub_memory_device_exit(void)
+{
+ acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
+
+
+/*--------------------------------------------
+ stub driver for Xen cpu hotplug
+--------------------------------------------*/
+
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, 0},
+ {ACPI_PROCESSOR_DEVICE_HID, 0},
+ {"", 0},
+};
+
+static struct acpi_driver xen_stub_processor_driver = {
+ /* same name as native processor driver to block native loaded */
+ .name = "processor",
+ .class = ACPI_PROCESSOR_CLASS,
+ .ids = processor_device_ids,
+};
+
+int xen_stub_processor_init(void)
+{
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* just reserve space for Xen, block native driver loaded */
+ return acpi_bus_register_driver(&xen_stub_processor_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_processor_init);
+subsys_initcall(xen_stub_processor_init);
+
+void xen_stub_processor_exit(void)
+{
+ acpi_bus_unregister_driver(&xen_stub_processor_driver);
+}
+EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
+
+#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 038b71dbf03c..3325884c693f 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -769,7 +769,7 @@ static int __init xenbus_init(void)
goto out_error;
xen_store_mfn = (unsigned long)v;
xen_store_interface =
- ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
+ xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
break;
default:
pr_warn("Xenstore state unknown\n");
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 459b9ac45cf5..ec0abb6df3c3 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -24,47 +24,6 @@
MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL");
-static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
-{
- struct inode *ret = new_inode(sb);
-
- if (ret) {
- ret->i_mode = mode;
- ret->i_uid = GLOBAL_ROOT_UID;
- ret->i_gid = GLOBAL_ROOT_GID;
- ret->i_blocks = 0;
- ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
- }
- return ret;
-}
-
-static struct dentry *xenfs_create_file(struct super_block *sb,
- struct dentry *parent,
- const char *name,
- const struct file_operations *fops,
- void *data,
- int mode)
-{
- struct dentry *dentry;
- struct inode *inode;
-
- dentry = d_alloc_name(parent, name);
- if (!dentry)
- return NULL;
-
- inode = xenfs_make_inode(sb, S_IFREG | mode);
- if (!inode) {
- dput(dentry);
- return NULL;
- }
-
- inode->i_fop = fops;
- inode->i_private = data;
-
- d_add(dentry, inode);
- return dentry;
-}
-
static ssize_t capabilities_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
@@ -84,26 +43,23 @@ static const struct file_operations capabilities_file_ops = {
static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
- [1] = {},
- { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
+ [2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
{ "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
{""},
};
- int rc;
- rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
- if (rc < 0)
- return rc;
-
- if (xen_initial_domain()) {
- xenfs_create_file(sb, sb->s_root, "xsd_kva",
- &xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
- xenfs_create_file(sb, sb->s_root, "xsd_port",
- &xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
- }
+ static struct tree_descr xenfs_init_files[] = {
+ [2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
+ { "capabilities", &capabilities_file_ops, S_IRUGO },
+ { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
+ { "xsd_kva", &xsd_kva_file_ops, S_IRUSR|S_IWUSR},
+ { "xsd_port", &xsd_port_file_ops, S_IRUSR|S_IWUSR},
+ {""},
+ };
- return rc;
+ return simple_fill_super(sb, XENFS_SUPER_MAGIC,
+ xen_initial_domain() ? xenfs_init_files : xenfs_files);
}
static struct dentry *xenfs_mount(struct file_system_type *fs_type,
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 988880dcee75..73b33837e12c 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -22,7 +22,7 @@ static loff_t
proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
{
loff_t new = -1;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
mutex_lock(&inode->i_mutex);
switch (whence) {
@@ -47,7 +47,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct inode *ino = file->f_path.dentry->d_inode;
+ struct inode *ino = file_inode(file);
struct proc_dir_entry *dp = PDE(ino);
struct zorro_dev *z = dp->data;
struct ConfigDev cd;
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 15b679166201..7af425f53bee 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -23,6 +23,7 @@
#include "acl.h"
#include "v9fs.h"
#include "v9fs_vfs.h"
+#include "fid.h"
static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
{
@@ -113,16 +114,12 @@ struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type)
}
-static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
+static int v9fs_set_acl(struct p9_fid *fid, int type, struct posix_acl *acl)
{
int retval;
char *name;
size_t size;
void *buffer;
- struct inode *inode = dentry->d_inode;
-
- set_cached_acl(inode, type, acl);
-
if (!acl)
return 0;
@@ -144,17 +141,16 @@ static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
default:
BUG();
}
- retval = v9fs_xattr_set(dentry, name, buffer, size, 0);
+ retval = v9fs_fid_xattr_set(fid, name, buffer, size, 0);
err_free_out:
kfree(buffer);
return retval;
}
-int v9fs_acl_chmod(struct dentry *dentry)
+int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid)
{
int retval = 0;
struct posix_acl *acl;
- struct inode *inode = dentry->d_inode;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
@@ -163,25 +159,30 @@ int v9fs_acl_chmod(struct dentry *dentry)
retval = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (retval)
return retval;
- retval = v9fs_set_acl(dentry, ACL_TYPE_ACCESS, acl);
+ set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
+ retval = v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
}
return retval;
}
-int v9fs_set_create_acl(struct dentry *dentry,
- struct posix_acl **dpacl, struct posix_acl **pacl)
+int v9fs_set_create_acl(struct inode *inode, struct p9_fid *fid,
+ struct posix_acl *dacl, struct posix_acl *acl)
{
- if (dentry) {
- v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, *dpacl);
- v9fs_set_acl(dentry, ACL_TYPE_ACCESS, *pacl);
- }
- posix_acl_release(*dpacl);
- posix_acl_release(*pacl);
- *dpacl = *pacl = NULL;
+ set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl);
+ set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
+ v9fs_set_acl(fid, ACL_TYPE_DEFAULT, dacl);
+ v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl);
return 0;
}
+void v9fs_put_acl(struct posix_acl *dacl,
+ struct posix_acl *acl)
+{
+ posix_acl_release(dacl);
+ posix_acl_release(acl);
+}
+
int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl)
{
diff --git a/fs/9p/acl.h b/fs/9p/acl.h
index 559556411965..e4f7e882272b 100644
--- a/fs/9p/acl.h
+++ b/fs/9p/acl.h
@@ -17,27 +17,33 @@
#ifdef CONFIG_9P_FS_POSIX_ACL
extern int v9fs_get_acl(struct inode *, struct p9_fid *);
extern struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type);
-extern int v9fs_acl_chmod(struct dentry *);
-extern int v9fs_set_create_acl(struct dentry *,
- struct posix_acl **, struct posix_acl **);
+extern int v9fs_acl_chmod(struct inode *, struct p9_fid *);
+extern int v9fs_set_create_acl(struct inode *, struct p9_fid *,
+ struct posix_acl *, struct posix_acl *);
extern int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl);
+extern void v9fs_put_acl(struct posix_acl *dacl, struct posix_acl *acl);
#else
#define v9fs_iop_get_acl NULL
static inline int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
{
return 0;
}
-static inline int v9fs_acl_chmod(struct dentry *dentry)
+static inline int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid)
{
return 0;
}
-static inline int v9fs_set_create_acl(struct dentry *dentry,
- struct posix_acl **dpacl,
- struct posix_acl **pacl)
+static inline int v9fs_set_create_acl(struct inode *inode,
+ struct p9_fid *fid,
+ struct posix_acl *dacl,
+ struct posix_acl *acl)
{
return 0;
}
+static inline void v9fs_put_acl(struct posix_acl *dacl,
+ struct posix_acl *acl)
+{
+}
static inline int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl,
struct posix_acl **pacl)
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index da8eefbe830d..afd4724b2d92 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -74,19 +74,20 @@ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
*
*/
-static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any)
+static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
{
struct v9fs_dentry *dent;
struct p9_fid *fid, *ret;
p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
- dentry->d_name.name, dentry, uid, any);
+ dentry->d_name.name, dentry, from_kuid(&init_user_ns, uid),
+ any);
dent = (struct v9fs_dentry *) dentry->d_fsdata;
ret = NULL;
if (dent) {
spin_lock(&dent->lock);
list_for_each_entry(fid, &dent->fidlist, dlist) {
- if (any || fid->uid == uid) {
+ if (any || uid_eq(fid->uid, uid)) {
ret = fid;
break;
}
@@ -126,7 +127,7 @@ err_out:
}
static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
- uid_t uid, int any)
+ kuid_t uid, int any)
{
struct dentry *ds;
char **wnames, *uname;
@@ -233,7 +234,7 @@ err_out:
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
{
- uid_t uid;
+ kuid_t uid;
int any, access;
struct v9fs_session_info *v9ses;
@@ -253,7 +254,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
break;
default:
- uid = ~0;
+ uid = INVALID_UID;
any = 0;
break;
}
@@ -272,7 +273,7 @@ struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
return ret;
}
-static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
+static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, kuid_t uid)
{
struct p9_fid *fid, *ret;
@@ -289,7 +290,7 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
int err;
struct p9_fid *fid;
- fid = v9fs_fid_clone_with_uid(dentry, 0);
+ fid = v9fs_fid_clone_with_uid(dentry, GLOBAL_ROOT_UID);
if (IS_ERR(fid))
goto error_out;
/*
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index d934f04e7736..58e6cbce4156 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -161,7 +161,13 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
ret = r;
continue;
}
- v9ses->dfltuid = option;
+ v9ses->dfltuid = make_kuid(current_user_ns(), option);
+ if (!uid_valid(v9ses->dfltuid)) {
+ p9_debug(P9_DEBUG_ERROR,
+ "uid field, but not a uid?\n");
+ ret = -EINVAL;
+ continue;
+ }
break;
case Opt_dfltgid:
r = match_int(&args[0], &option);
@@ -171,7 +177,13 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
ret = r;
continue;
}
- v9ses->dfltgid = option;
+ v9ses->dfltgid = make_kgid(current_user_ns(), option);
+ if (!gid_valid(v9ses->dfltgid)) {
+ p9_debug(P9_DEBUG_ERROR,
+ "gid field, but not a gid?\n");
+ ret = -EINVAL;
+ continue;
+ }
break;
case Opt_afid:
r = match_int(&args[0], &option);
@@ -248,8 +260,9 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
else if (strcmp(s, "client") == 0) {
v9ses->flags |= V9FS_ACCESS_CLIENT;
} else {
+ uid_t uid;
v9ses->flags |= V9FS_ACCESS_SINGLE;
- v9ses->uid = simple_strtoul(s, &e, 10);
+ uid = simple_strtoul(s, &e, 10);
if (*e != '\0') {
ret = -EINVAL;
pr_info("Unknown access argument %s\n",
@@ -257,6 +270,13 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
kfree(s);
goto free_and_return;
}
+ v9ses->uid = make_kuid(current_user_ns(), uid);
+ if (!uid_valid(v9ses->uid)) {
+ ret = -EINVAL;
+ pr_info("Uknown uid %s\n", s);
+ kfree(s);
+ goto free_and_return;
+ }
}
kfree(s);
@@ -319,7 +339,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
list_add(&v9ses->slist, &v9fs_sessionlist);
spin_unlock(&v9fs_sessionlist_lock);
- v9ses->uid = ~0;
+ v9ses->uid = INVALID_UID;
v9ses->dfltuid = V9FS_DEFUID;
v9ses->dfltgid = V9FS_DEFGID;
@@ -364,7 +384,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags &= ~V9FS_ACCESS_MASK;
v9ses->flags |= V9FS_ACCESS_ANY;
- v9ses->uid = ~0;
+ v9ses->uid = INVALID_UID;
}
if (!v9fs_proto_dotl(v9ses) ||
!((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) {
@@ -375,7 +395,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags &= ~V9FS_ACL_MASK;
}
- fid = p9_client_attach(v9ses->clnt, NULL, v9ses->uname, ~0,
+ fid = p9_client_attach(v9ses->clnt, NULL, v9ses->uname, INVALID_UID,
v9ses->aname);
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
@@ -387,7 +407,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
if ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_SINGLE)
fid->uid = v9ses->uid;
else
- fid->uid = ~0;
+ fid->uid = INVALID_UID;
#ifdef CONFIG_9P_FSCACHE
/* register the session for caching */
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 34c59f14a1c9..a8e127c89627 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -109,9 +109,9 @@ struct v9fs_session_info {
char *uname; /* user name to mount as */
char *aname; /* name of remote hierarchy being mounted */
unsigned int maxdata; /* max data for client interface */
- unsigned int dfltuid; /* default uid/muid for legacy support */
- unsigned int dfltgid; /* default gid for legacy support */
- u32 uid; /* if ACCESS_SINGLE, the uid that has access */
+ kuid_t dfltuid; /* default uid/muid for legacy support */
+ kgid_t dfltgid; /* default gid for legacy support */
+ kuid_t uid; /* if ACCESS_SINGLE, the uid that has access */
struct p9_client *clnt; /* 9p client */
struct list_head slist; /* list of sessions registered with v9fs */
struct backing_dev_info bdi;
@@ -165,8 +165,8 @@ extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
#define V9FS_PORT 564
#define V9FS_DEFUSER "nobody"
#define V9FS_DEFANAME ""
-#define V9FS_DEFUID (-2)
-#define V9FS_DEFGID (-2)
+#define V9FS_DEFUID KUIDT_INIT(-2)
+#define V9FS_DEFGID KGIDT_INIT(-2)
static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode)
{
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 64600b5d0522..9ad68628522c 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -137,6 +137,7 @@ out_valid:
const struct dentry_operations v9fs_cached_dentry_operations = {
.d_revalidate = v9fs_lookup_revalidate,
+ .d_weak_revalidate = v9fs_lookup_revalidate,
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index c921ac92ea4c..d384a8b77ee8 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -129,7 +129,7 @@ out_error:
static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
{
int res = 0;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
@@ -298,7 +298,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int ret = -ENOLCK;
p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
@@ -334,7 +334,7 @@ out_err:
static int v9fs_file_flock_dotl(struct file *filp, int cmd,
struct file_lock *fl)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int ret = -ENOLCK;
p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
@@ -525,7 +525,7 @@ v9fs_file_write(struct file *filp, const char __user * data,
if (!count)
goto out;
- retval = v9fs_file_write_internal(filp->f_path.dentry->d_inode,
+ retval = v9fs_file_write_internal(file_inode(filp),
filp->private_data,
data, count, &origin, 1);
/* update offset on successful write */
@@ -600,7 +600,7 @@ v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct v9fs_inode *v9inode;
struct page *page = vmf->page;
struct file *filp = vma->vm_file;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 57d017ac68e4..b5340c829de1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -225,9 +225,9 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
wstat->uid = NULL;
wstat->gid = NULL;
wstat->muid = NULL;
- wstat->n_uid = ~0;
- wstat->n_gid = ~0;
- wstat->n_muid = ~0;
+ wstat->n_uid = INVALID_UID;
+ wstat->n_gid = INVALID_GID;
+ wstat->n_muid = INVALID_UID;
wstat->extension = NULL;
}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 8d24ad66dfb8..61e4fa70a6fa 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -57,7 +57,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
* group of the new file system object.
*/
-static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
+static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
{
BUG_ON(dir_inode == NULL);
@@ -245,7 +245,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
int *opened)
{
int err = 0;
- gid_t gid;
+ kgid_t gid;
umode_t mode;
char *name = NULL;
struct p9_qid qid;
@@ -330,14 +330,14 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
goto error;
}
+ /* Now set the ACL based on the default value */
+ v9fs_set_create_acl(inode, fid, dacl, pacl);
+
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
d_instantiate(dentry, inode);
- /* Now set the ACL based on the default value */
- v9fs_set_create_acl(dentry, &dacl, &pacl);
-
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if (v9ses->cache && !v9inode->writeback_fid &&
@@ -369,6 +369,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
#endif
*opened |= FILE_CREATED;
out:
+ v9fs_put_acl(dacl, pacl);
dput(res);
return err;
@@ -378,7 +379,6 @@ error:
err_clunk_old_fid:
if (ofid)
p9_client_clunk(ofid);
- v9fs_set_create_acl(NULL, &dacl, &pacl);
goto out;
}
@@ -396,7 +396,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
int err;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
- gid_t gid;
+ kgid_t gid;
char *name;
umode_t mode;
struct inode *inode;
@@ -435,17 +435,17 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
if (err < 0)
goto error;
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
- fid = NULL;
- goto error;
- }
-
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -456,6 +456,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
+ v9fs_set_create_acl(inode, fid, dacl, pacl);
d_instantiate(dentry, inode);
fid = NULL;
} else {
@@ -469,16 +470,15 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
err = PTR_ERR(inode);
goto error;
}
+ v9fs_set_create_acl(inode, fid, dacl, pacl);
d_instantiate(dentry, inode);
}
- /* Now set the ACL based on the default value */
- v9fs_set_create_acl(dentry, &dacl, &pacl);
inc_nlink(dir);
v9fs_invalidate_inode_attr(dir);
error:
if (fid)
p9_client_clunk(fid);
- v9fs_set_create_acl(NULL, &dacl, &pacl);
+ v9fs_put_acl(dacl, pacl);
return err;
}
@@ -572,10 +572,11 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_iattr_dotl p9attr;
+ struct inode *inode = dentry->d_inode;
p9_debug(P9_DEBUG_VFS, "\n");
- retval = inode_change_ok(dentry->d_inode, iattr);
+ retval = inode_change_ok(inode, iattr);
if (retval)
return retval;
@@ -596,23 +597,23 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
return PTR_ERR(fid);
/* Write all dirty data */
- if (S_ISREG(dentry->d_inode->i_mode))
- filemap_write_and_wait(dentry->d_inode->i_mapping);
+ if (S_ISREG(inode->i_mode))
+ filemap_write_and_wait(inode->i_mapping);
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0)
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(dentry->d_inode))
- truncate_setsize(dentry->d_inode, iattr->ia_size);
+ iattr->ia_size != i_size_read(inode))
+ truncate_setsize(inode, iattr->ia_size);
- v9fs_invalidate_inode_attr(dentry->d_inode);
- setattr_copy(dentry->d_inode, iattr);
- mark_inode_dirty(dentry->d_inode);
+ v9fs_invalidate_inode_attr(inode);
+ setattr_copy(inode, iattr);
+ mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE) {
/* We also want to update ACL when we update mode bits */
- retval = v9fs_acl_chmod(dentry);
+ retval = v9fs_acl_chmod(inode, fid);
if (retval < 0)
return retval;
}
@@ -697,7 +698,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
const char *symname)
{
int err;
- gid_t gid;
+ kgid_t gid;
char *name;
struct p9_qid qid;
struct inode *inode;
@@ -837,7 +838,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dev_t rdev)
{
int err;
- gid_t gid;
+ kgid_t gid;
char *name;
umode_t mode;
struct v9fs_session_info *v9ses;
@@ -880,17 +881,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
goto error;
v9fs_invalidate_inode_attr(dir);
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
- fid = NULL;
- goto error;
- }
-
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -898,6 +899,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
err);
goto error;
}
+ v9fs_set_create_acl(inode, fid, dacl, pacl);
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
@@ -913,14 +915,13 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
err = PTR_ERR(inode);
goto error;
}
+ v9fs_set_create_acl(inode, fid, dacl, pacl);
d_instantiate(dentry, inode);
}
- /* Now set the ACL based on the default value */
- v9fs_set_create_acl(dentry, &dacl, &pacl);
error:
if (fid)
p9_client_clunk(fid);
- v9fs_set_create_acl(NULL, &dacl, &pacl);
+ v9fs_put_acl(dacl, pacl);
return err;
}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 137d50396898..91dad63e5a2d 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -363,5 +363,5 @@ struct file_system_type v9fs_fs_type = {
.mount = v9fs_mount,
.kill_sb = v9fs_kill_super,
.owner = THIS_MODULE,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT,
+ .fs_flags = FS_RENAME_DOES_D_MOVE,
};
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 29653b70a9c3..c45e016b190f 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -111,19 +111,26 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
int v9fs_xattr_set(struct dentry *dentry, const char *name,
const void *value, size_t value_len, int flags)
{
+ struct p9_fid *fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+ return v9fs_fid_xattr_set(fid, name, value, value_len, flags);
+}
+
+int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+ const void *value, size_t value_len, int flags)
+{
u64 offset = 0;
int retval, msize, write_count;
- struct p9_fid *fid = NULL;
p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
name, value_len, flags);
- fid = v9fs_fid_clone(dentry);
- if (IS_ERR(fid)) {
- retval = PTR_ERR(fid);
- fid = NULL;
- goto error;
- }
+ /* Clone it */
+ fid = p9_client_walk(fid, 0, NULL, 1);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
/*
* On success fid points to xattr
*/
@@ -131,7 +138,8 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
if (retval < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
retval);
- goto error;
+ p9_client_clunk(fid);
+ return retval;
}
msize = fid->clnt->msize;
while (value_len) {
@@ -144,17 +152,12 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
if (write_count < 0) {
/* error in xattr write */
retval = write_count;
- goto error;
+ break;
}
offset += write_count;
value_len -= write_count;
}
- /* Total read xattr bytes */
- retval = offset;
-error:
- if (fid)
- retval = p9_client_clunk(fid);
- return retval;
+ return p9_client_clunk(fid);
}
ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
diff --git a/fs/9p/xattr.h b/fs/9p/xattr.h
index eaa837c53bd5..eec348a3df71 100644
--- a/fs/9p/xattr.h
+++ b/fs/9p/xattr.h
@@ -27,6 +27,8 @@ extern ssize_t v9fs_fid_xattr_get(struct p9_fid *, const char *,
void *, size_t);
extern ssize_t v9fs_xattr_get(struct dentry *, const char *,
void *, size_t);
+extern int v9fs_fid_xattr_set(struct p9_fid *, const char *,
+ const void *, size_t, int);
extern int v9fs_xattr_set(struct dentry *, const char *,
const void *, size_t, int);
extern ssize_t v9fs_listxattr(struct dentry *, char *, size_t);
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index b3be2e7c5643..9cf874ce8336 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -19,7 +19,7 @@ static DEFINE_RWLOCK(adfs_dir_lock);
static int
adfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
struct object_info obj;
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index eb82ee53ee0b..d9a43674cb94 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -125,9 +125,8 @@ static void
affs_fix_dcache(struct inode *inode, u32 entry_ino)
{
struct dentry *dentry;
- struct hlist_node *p;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (entry_ino == (u32)(long)dentry->d_fsdata) {
dentry->d_fsdata = (void *)inode->i_ino;
break;
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index 8ca8f3a55599..fd11a6d608ee 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -42,7 +42,7 @@ const struct inode_operations affs_dir_inode_operations = {
static int
affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct buffer_head *dir_bh;
struct buffer_head *fh_bh;
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index c548aa346f0d..3c462ff6db63 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -119,8 +119,8 @@ struct afs_file_status {
u64 size; /* file size */
afs_dataversion_t data_version; /* current data version */
u32 author; /* author ID */
- u32 owner; /* owner ID */
- u32 group; /* group ID */
+ kuid_t owner; /* owner ID */
+ kgid_t group; /* group ID */
afs_access_t caller_access; /* access rights for authenticated caller */
afs_access_t anon_access; /* access rights for unauthenticated caller */
umode_t mode; /* UNIX mode */
@@ -133,13 +133,6 @@ struct afs_file_status {
/*
* AFS file status change request
*/
-struct afs_store_status {
- u32 mask; /* which bits of the struct are set */
- u32 mtime_client; /* last time client changed data */
- u32 owner; /* owner ID */
- u32 group; /* group ID */
- umode_t mode; /* UNIX mode */
-};
#define AFS_SET_MTIME 0x01 /* set the mtime */
#define AFS_SET_OWNER 0x02 /* set the owner ID */
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index db477906ba4f..7a465ed04444 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -393,12 +393,12 @@ static int afs_readdir(struct file *file, void *cookie, filldir_t filldir)
int ret;
_enter("{%Ld,{%lu}}",
- file->f_pos, file->f_path.dentry->d_inode->i_ino);
+ file->f_pos, file_inode(file)->i_ino);
ASSERT(file->private_data != NULL);
fpos = file->f_pos;
- ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos,
+ ret = afs_dir_iterate(file_inode(file), &fpos,
cookie, filldir, file->private_data);
file->f_pos = fpos;
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 757d664575dd..2497bf306c70 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -514,7 +514,7 @@ error:
*/
int afs_lock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
_enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -537,7 +537,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
*/
int afs_flock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
_enter("{%x:%u},%d,{t=%x,fl=%x}",
vnode->fid.vid, vnode->fid.vnode, cmd,
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index b960ff05ea0b..c2e930ec2888 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -42,6 +42,8 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
umode_t mode;
u64 data_version, size;
u32 changed = 0; /* becomes non-zero if ctime-type changes seen */
+ kuid_t owner;
+ kgid_t group;
#define EXTRACT(DST) \
do { \
@@ -56,7 +58,9 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
size = ntohl(*bp++);
data_version = ntohl(*bp++);
EXTRACT(status->author);
- EXTRACT(status->owner);
+ owner = make_kuid(&init_user_ns, ntohl(*bp++));
+ changed |= !uid_eq(owner, status->owner);
+ status->owner = owner;
EXTRACT(status->caller_access); /* call ticket dependent */
EXTRACT(status->anon_access);
EXTRACT(status->mode);
@@ -65,7 +69,9 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
bp++; /* seg size */
status->mtime_client = ntohl(*bp++);
status->mtime_server = ntohl(*bp++);
- EXTRACT(status->group);
+ group = make_kgid(&init_user_ns, ntohl(*bp++));
+ changed |= !gid_eq(group, status->group);
+ status->group = group;
bp++; /* sync counter */
data_version |= (u64) ntohl(*bp++) << 32;
EXTRACT(status->lock_count);
@@ -181,12 +187,12 @@ static void xdr_encode_AFS_StoreStatus(__be32 **_bp, struct iattr *attr)
if (attr->ia_valid & ATTR_UID) {
mask |= AFS_SET_OWNER;
- owner = attr->ia_uid;
+ owner = from_kuid(&init_user_ns, attr->ia_uid);
}
if (attr->ia_valid & ATTR_GID) {
mask |= AFS_SET_GROUP;
- group = attr->ia_gid;
+ group = from_kgid(&init_user_ns, attr->ia_gid);
}
if (attr->ia_valid & ATTR_MODE) {
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 95cffd38239f..789bc253b5f6 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -69,7 +69,7 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
set_nlink(inode, vnode->status.nlink);
inode->i_uid = vnode->status.owner;
- inode->i_gid = 0;
+ inode->i_gid = GLOBAL_ROOT_GID;
inode->i_size = vnode->status.size;
inode->i_ctime.tv_sec = vnode->status.mtime_server;
inode->i_ctime.tv_nsec = 0;
@@ -175,8 +175,8 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
inode->i_op = &afs_autocell_inode_operations;
set_nlink(inode, 2);
- inode->i_uid = 0;
- inode->i_gid = 0;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
inode->i_ctime.tv_sec = get_seconds();
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 43165009428d..7c31ec399575 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -24,6 +24,8 @@
#include <linux/parser.h>
#include <linux/statfs.h>
#include <linux/sched.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
#include "internal.h"
#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
@@ -363,6 +365,10 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
memset(&params, 0, sizeof(params));
+ ret = -EINVAL;
+ if (current->nsproxy->net_ns != &init_net)
+ goto error;
+
/* parse the options and device name */
if (options) {
ret = afs_parse_options(&params, options, &dev_name);
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 9aa52d93c73c..7e03eadb40c0 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -120,7 +120,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
struct afs_writeback *candidate, *wb;
- struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
struct page *page;
struct key *key = file->private_data;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
@@ -245,7 +245,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
loff_t i_size, maybe_i_size;
_enter("{%x:%u},{%lx}",
@@ -627,8 +627,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct dentry *dentry = iocb->ki_filp->f_path.dentry;
- struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
ssize_t result;
size_t count = iov_length(iov, nr_segs);
diff --git a/fs/aio.c b/fs/aio.c
index 064bfbe37566..3f941f2a3059 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -591,11 +591,10 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{
struct mm_struct *mm = current->mm;
struct kioctx *ctx, *ret = NULL;
- struct hlist_node *n;
rcu_read_lock();
- hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
+ hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
/*
* RCU protects us against accessing freed memory but
* we have to be careful not to get a reference when the
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 28d39fb84ae3..47a65df8c871 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -131,7 +131,6 @@ struct file *anon_inode_getfile(const char *name,
struct qstr this;
struct path path;
struct file *file;
- int error;
if (IS_ERR(anon_inode_inode))
return ERR_PTR(-ENODEV);
@@ -143,7 +142,7 @@ struct file *anon_inode_getfile(const char *name,
* Link the inode to a directory entry by creating a unique name
* using the inode sequence number.
*/
- error = -ENOMEM;
+ file = ERR_PTR(-ENOMEM);
this.name = name;
this.len = strlen(name);
this.hash = 0;
@@ -160,15 +159,12 @@ struct file *anon_inode_getfile(const char *name,
d_instantiate(path.dentry, anon_inode_inode);
- error = -ENFILE;
file = alloc_file(&path, OPEN_FMODE(flags), fops);
- if (!file)
+ if (IS_ERR(file))
goto err_dput;
file->f_mapping = anon_inode_inode->i_mapping;
- file->f_pos = 0;
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
- file->f_version = 0;
file->private_data = priv;
return file;
@@ -177,7 +173,7 @@ err_dput:
path_put(&path);
err_module:
module_put(fops->owner);
- return ERR_PTR(error);
+ return file;
}
EXPORT_SYMBOL_GPL(anon_inode_getfile);
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index b785e7707959..3f1128b37e46 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -273,7 +273,7 @@ static inline int autofs_prepare_pipe(struct file *pipe)
{
if (!pipe->f_op || !pipe->f_op->write)
return -EINVAL;
- if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
+ if (!S_ISFIFO(file_inode(pipe)->i_mode))
return -EINVAL;
/* We want a packet pipe */
pipe->f_flags |= O_DIRECT;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 9f68a37bb2b2..743c7c2c949d 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -159,7 +159,7 @@ static struct autofs_sb_info *autofs_dev_ioctl_sbi(struct file *f)
struct inode *inode;
if (f) {
- inode = f->f_path.dentry->d_inode;
+ inode = file_inode(f);
sbi = autofs4_sbi(inode->i_sb);
}
return sbi;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index c93447604da8..9bd16255dd9c 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -383,8 +383,10 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
goto done;
}
} else {
- if (!simple_empty(dentry))
+ if (!simple_empty(dentry)) {
+ spin_unlock(&sbi->fs_lock);
goto done;
+ }
}
ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
@@ -587,7 +589,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
/* This allows root to remove symlinks */
if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
if (atomic_dec_and_test(&ino->count)) {
p_ino = autofs4_dentry_ino(dentry->d_parent);
@@ -874,7 +876,7 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
static long autofs4_root_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
}
@@ -882,7 +884,7 @@ static long autofs4_root_ioctl(struct file *filp,
static long autofs4_root_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int ret;
if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 03bc1d347d8e..3db70dae40d3 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -42,10 +42,8 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
while (wq) {
nwq = wq->next;
wq->status = -ENOENT; /* Magic is gone - report failure */
- if (wq->name.name) {
- kfree(wq->name.name);
- wq->name.name = NULL;
- }
+ kfree(wq->name.name);
+ wq->name.name = NULL;
wq->wait_ctr--;
wake_up_interruptible(&wq->queue);
wq = nwq;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 2b3bda8d5e68..c8f4e25eb9e2 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -213,7 +213,7 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
static int
befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
befs_off_t value;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 2785ef91191a..3f422f6bb5ca 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -28,7 +28,7 @@ static struct buffer_head *bfs_find_entry(struct inode *dir,
static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir)
{
- struct inode *dir = f->f_path.dentry->d_inode;
+ struct inode *dir = file_inode(f);
struct buffer_head *bh;
struct bfs_dirent *de;
struct bfs_sb_info *info = BFS_SB(dir->i_sb);
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 6043567b95c2..bbc8f8827eac 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -214,7 +214,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
N_TRSIZE(ex) || N_DRSIZE(ex) ||
- i_size_read(bprm->file->f_path.dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+ i_size_read(file_inode(bprm->file)) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
return -ENOEXEC;
}
@@ -367,7 +367,7 @@ static int load_aout_library(struct file *file)
int retval;
struct exec ex;
- inode = file->f_path.dentry->d_inode;
+ inode = file_inode(file);
retval = -ENOEXEC;
error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index ff9dbc630efa..a5702d74d2bd 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1141,7 +1141,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
/* By default, dump shared memory if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED) {
- if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
+ if (file_inode(vma->vm_file)->i_nlink == 0 ?
FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
goto whole;
return 0;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index cb240dd3b402..9c13e023e2b7 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -909,7 +909,7 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
dynamic_error:
printk("ELF FDPIC %s with invalid DYNAMIC section (inode=%lu)\n",
- what, file->f_path.dentry->d_inode->i_ino);
+ what, file_inode(file)->i_ino);
return -ELIBBAD;
}
@@ -1219,7 +1219,7 @@ static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
/* By default, dump shared memory if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED) {
- if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0) {
+ if (file_inode(vma->vm_file)->i_nlink == 0) {
dump_ok = test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
vma->vm_flags, dump_ok ? "yes" : "no");
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index b56371981d16..2036d21baaef 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -438,7 +438,7 @@ static int load_flat_file(struct linux_binprm * bprm,
int ret;
hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */
- inode = bprm->file->f_path.dentry->d_inode;
+ inode = file_inode(bprm->file);
text_len = ntohl(hdr->data_start);
data_len = ntohl(hdr->data_end) - ntohl(hdr->data_start);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 0c8869fdd14e..fecbbf3f8ff2 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -531,7 +531,7 @@ static void kill_node(Node *e)
static ssize_t
bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
{
- Node *e = file->f_path.dentry->d_inode->i_private;
+ Node *e = file_inode(file)->i_private;
ssize_t res;
char *page;
@@ -550,7 +550,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct dentry *root;
- Node *e = file->f_path.dentry->d_inode->i_private;
+ Node *e = file_inode(file)->i_private;
int res = parse_command(buffer, count);
switch (res) {
diff --git a/fs/bio.c b/fs/bio.c
index b96fc6ce4855..bb5768f59b32 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1428,6 +1428,8 @@ void bio_endio(struct bio *bio, int error)
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
+ trace_block_bio_complete(bio, error);
+
if (bio->bi_end_io)
bio->bi_end_io(bio, error);
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 78333a37f49d..aea605c98ba6 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -318,7 +318,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
/*
* private llseek:
- * for a block special file file->f_path.dentry->d_inode->i_size is zero
+ * for a block special file file_inode(file)->i_size is zero
* so we compute the size by hand (just as in block_read/write above)
*/
static loff_t block_llseek(struct file *file, loff_t offset, int whence)
@@ -1033,7 +1033,9 @@ void bd_set_size(struct block_device *bdev, loff_t size)
{
unsigned bsize = bdev_logical_block_size(bdev);
- bdev->bd_inode->i_size = size;
+ mutex_lock(&bdev->bd_inode->i_mutex);
+ i_size_write(bdev->bd_inode, size);
+ mutex_unlock(&bdev->bd_inode->i_mutex);
while (bsize < PAGE_CACHE_SIZE) {
if (size & bsize)
break;
@@ -1118,7 +1120,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
}
}
- if (!ret && !bdev->bd_openers) {
+ if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index ccd25ba7a9ac..9a8622a5b867 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -5,6 +5,9 @@ config BTRFS_FS
select ZLIB_DEFLATE
select LZO_COMPRESS
select LZO_DECOMPRESS
+ select RAID6_PQ
+ select XOR_BLOCKS
+
help
Btrfs is a new filesystem with extents, writable snapshotting,
support for multiple devices and many more features.
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 7df3e0f0ee51..3932224f99e9 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
- reada.o backref.o ulist.o qgroup.o send.o dev-replace.o
+ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 04edf69be875..bd605c87adfd 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -352,11 +352,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
err = __resolve_indirect_ref(fs_info, search_commit_root,
time_seq, ref, parents,
extent_item_pos);
- if (err) {
- if (ret == 0)
- ret = err;
+ if (err)
continue;
- }
/* we put the first parent into the ref at hand */
ULIST_ITER_INIT(&uiter);
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index d61feca79455..310a7f6d09b1 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -19,7 +19,7 @@
#ifndef __BTRFS_BACKREF__
#define __BTRFS_BACKREF__
-#include "ioctl.h"
+#include <linux/btrfs.h>
#include "ulist.h"
#include "extent_io.h"
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 2a8c242bc4f5..d9b97d4960e6 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -40,6 +40,8 @@
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
#define BTRFS_INODE_NEEDS_FULL_SYNC 7
#define BTRFS_INODE_COPY_EVERYTHING 8
+#define BTRFS_INODE_IN_DELALLOC_LIST 9
+#define BTRFS_INODE_READDIO_NEED_LOCK 10
/* in memory btrfs inode */
struct btrfs_inode {
@@ -216,4 +218,22 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
return 0;
}
+/*
+ * Disable DIO read nolock optimization, so new dio readers will be forced
+ * to grab i_mutex. It is used to avoid the endless truncate due to
+ * nonlocked dio read.
+ */
+static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
+{
+ set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags);
+ smp_mb();
+}
+
+static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+ &BTRFS_I(inode)->runtime_flags);
+}
+
#endif
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 11d47bfb62b4..18af6f48781a 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -813,8 +813,7 @@ static int btrfsic_process_superblock_dev_mirror(
(bh->b_data + (dev_bytenr & 4095));
if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
- strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
- sizeof(super_tmp->magic)) ||
+ super_tmp->magic != cpu_to_le64(BTRFS_MAGIC) ||
memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
btrfs_super_nodesize(super_tmp) != state->metablock_size ||
btrfs_super_leafsize(super_tmp) != state->metablock_size ||
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 94ab2f80e7e3..15b94089abc4 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
- ret = io_tree->ops->merge_bio_hook(page, 0,
+ ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
PAGE_CACHE_SIZE,
bio, 0);
else
@@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_CACHE_SHIFT;
if (comp_bio->bi_size)
- ret = tree->ops->merge_bio_hook(page, 0,
+ ret = tree->ops->merge_bio_hook(READ, page, 0,
PAGE_CACHE_SIZE,
comp_bio, 0);
else
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index eea5da7a2b9a..ecd25a1b4e51 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1138,6 +1138,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
switch (tm->op) {
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
BUG_ON(tm->slot < n);
+ /* Fallthrough */
case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
case MOD_LOG_KEY_REMOVE:
btrfs_set_node_key(eb, &tm->key, tm->slot);
@@ -1222,7 +1223,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
- BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
return eb_rewin;
}
@@ -1441,7 +1442,7 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
*/
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, int cache_only, u64 *last_ret,
+ int start_slot, u64 *last_ret,
struct btrfs_key *progress)
{
struct extent_buffer *cur;
@@ -1461,8 +1462,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
parent_level = btrfs_header_level(parent);
- if (cache_only && parent_level != 1)
- return 0;
WARN_ON(trans->transaction != root->fs_info->running_transaction);
WARN_ON(trans->transid != root->fs_info->generation);
@@ -1508,10 +1507,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
else
uptodate = 0;
if (!cur || !uptodate) {
- if (cache_only) {
- free_extent_buffer(cur);
- continue;
- }
if (!cur) {
cur = read_tree_block(root, blocknr,
blocksize, gen);
@@ -4825,8 +4820,8 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
/*
* A helper function to walk down the tree starting at min_key, and looking
- * for nodes or leaves that are either in cache or have a minimum
- * transaction id. This is used by the btree defrag code, and tree logging
+ * for nodes or leaves that are have a minimum transaction id.
+ * This is used by the btree defrag code, and tree logging
*
* This does not cow, but it does stuff the starting key it finds back
* into min_key, so you can call btrfs_search_slot with cow=1 on the
@@ -4847,7 +4842,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
*/
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_key *max_key,
- struct btrfs_path *path, int cache_only,
+ struct btrfs_path *path,
u64 min_trans)
{
struct extent_buffer *cur;
@@ -4887,15 +4882,12 @@ again:
if (sret && slot > 0)
slot--;
/*
- * check this node pointer against the cache_only and
- * min_trans parameters. If it isn't in cache or is too
- * old, skip to the next one.
+ * check this node pointer against the min_trans parameters.
+ * If it is too old, old, skip to the next one.
*/
while (slot < nritems) {
u64 blockptr;
u64 gen;
- struct extent_buffer *tmp;
- struct btrfs_disk_key disk_key;
blockptr = btrfs_node_blockptr(cur, slot);
gen = btrfs_node_ptr_generation(cur, slot);
@@ -4903,27 +4895,7 @@ again:
slot++;
continue;
}
- if (!cache_only)
- break;
-
- if (max_key) {
- btrfs_node_key(cur, &disk_key, slot);
- if (comp_keys(&disk_key, max_key) >= 0) {
- ret = 1;
- goto out;
- }
- }
-
- tmp = btrfs_find_tree_block(root, blockptr,
- btrfs_level_size(root, level - 1));
-
- if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
- free_extent_buffer(tmp);
- break;
- }
- if (tmp)
- free_extent_buffer(tmp);
- slot++;
+ break;
}
find_next_key:
/*
@@ -4934,7 +4906,7 @@ find_next_key:
path->slots[level] = slot;
btrfs_set_path_blocking(path);
sret = btrfs_find_next_key(root, path, min_key, level,
- cache_only, min_trans);
+ min_trans);
if (sret == 0) {
btrfs_release_path(path);
goto again;
@@ -5399,8 +5371,7 @@ out:
/*
* this is similar to btrfs_next_leaf, but does not try to preserve
* and fixup the path. It looks for and returns the next key in the
- * tree based on the current path and the cache_only and min_trans
- * parameters.
+ * tree based on the current path and the min_trans parameters.
*
* 0 is returned if another key is found, < 0 if there are any errors
* and 1 is returned if there are no higher keys in the tree
@@ -5409,8 +5380,7 @@ out:
* calling this function.
*/
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *key, int level,
- int cache_only, u64 min_trans)
+ struct btrfs_key *key, int level, u64 min_trans)
{
int slot;
struct extent_buffer *c;
@@ -5461,22 +5431,8 @@ next:
if (level == 0)
btrfs_item_key_to_cpu(c, key, slot);
else {
- u64 blockptr = btrfs_node_blockptr(c, slot);
u64 gen = btrfs_node_ptr_generation(c, slot);
- if (cache_only) {
- struct extent_buffer *cur;
- cur = btrfs_find_tree_block(root, blockptr,
- btrfs_level_size(root, level - 1));
- if (!cur ||
- btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
- slot++;
- if (cur)
- free_extent_buffer(cur);
- goto next;
- }
- free_extent_buffer(cur);
- }
if (gen < min_trans) {
slot++;
goto next;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 547b7b05727f..0d82922179db 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -31,10 +31,10 @@
#include <trace/events/btrfs.h>
#include <asm/kmap_types.h>
#include <linux/pagemap.h>
+#include <linux/btrfs.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
-#include "ioctl.h"
struct btrfs_trans_handle;
struct btrfs_transaction;
@@ -46,7 +46,7 @@ extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum;
-#define BTRFS_MAGIC "_BHRfS_M"
+#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
#define BTRFS_MAX_MIRRORS 3
@@ -191,6 +191,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
/* ioprio of readahead is set to idle */
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
+#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
+
/*
* The key defines the order in the tree, and so it also defines (optimal)
* block layout.
@@ -336,7 +338,10 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
/*
* File system states
*/
+#define BTRFS_FS_STATE_ERROR 0
+#define BTRFS_FS_STATE_REMOUNTING 1
+/* Super block flags */
/* Errors detected */
#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2)
@@ -502,6 +507,7 @@ struct btrfs_super_block {
#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
+#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
@@ -511,6 +517,7 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
+ BTRFS_FEATURE_INCOMPAT_RAID56 | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
/*
@@ -952,8 +959,20 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4)
#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
+#define BTRFS_BLOCK_GROUP_RAID5 (1 << 7)
+#define BTRFS_BLOCK_GROUP_RAID6 (1 << 8)
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
-#define BTRFS_NR_RAID_TYPES 5
+
+enum btrfs_raid_types {
+ BTRFS_RAID_RAID10,
+ BTRFS_RAID_RAID1,
+ BTRFS_RAID_DUP,
+ BTRFS_RAID_RAID0,
+ BTRFS_RAID_SINGLE,
+ BTRFS_RAID_RAID5,
+ BTRFS_RAID_RAID6,
+ BTRFS_NR_RAID_TYPES
+};
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
BTRFS_BLOCK_GROUP_SYSTEM | \
@@ -961,6 +980,8 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \
+ BTRFS_BLOCK_GROUP_RAID5 | \
+ BTRFS_BLOCK_GROUP_RAID6 | \
BTRFS_BLOCK_GROUP_DUP | \
BTRFS_BLOCK_GROUP_RAID10)
/*
@@ -1185,6 +1206,10 @@ struct btrfs_block_group_cache {
u64 flags;
u64 sectorsize;
u64 cache_generation;
+
+ /* for raid56, this is a full stripe, without parity */
+ unsigned long full_stripe_len;
+
unsigned int ro:1;
unsigned int dirty:1;
unsigned int iref:1;
@@ -1225,6 +1250,28 @@ struct seq_list {
u64 seq;
};
+enum btrfs_orphan_cleanup_state {
+ ORPHAN_CLEANUP_STARTED = 1,
+ ORPHAN_CLEANUP_DONE = 2,
+};
+
+/* used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash {
+ struct list_head hash_list;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
+
+/* used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash_table {
+ struct list_head stripe_cache;
+ spinlock_t cache_lock;
+ int cache_size;
+ struct btrfs_stripe_hash table[];
+};
+
+#define BTRFS_STRIPE_HASH_TABLE_BITS 11
+
/* fs_info */
struct reloc_control;
struct btrfs_device;
@@ -1250,6 +1297,7 @@ struct btrfs_fs_info {
/* block group cache stuff */
spinlock_t block_group_cache_lock;
+ u64 first_logical_byte;
struct rb_root block_group_cache_tree;
/* keep track of unallocated space */
@@ -1288,7 +1336,23 @@ struct btrfs_fs_info {
u64 last_trans_log_full_commit;
unsigned long mount_opt;
unsigned long compress_type:4;
+ /*
+ * It is a suggestive number, the read side is safe even it gets a
+ * wrong number because we will write out the data into a regular
+ * extent. The write side(mount/remount) is under ->s_umount lock,
+ * so it is also safe.
+ */
u64 max_inline;
+ /*
+ * Protected by ->chunk_mutex and sb->s_umount.
+ *
+ * The reason that we use two lock to protect it is because only
+ * remount and mount operations can change it and these two operations
+ * are under sb->s_umount, but the read side (chunk allocation) can not
+ * acquire sb->s_umount or the deadlock would happen. So we use two
+ * locks to protect it. On the write side, we must acquire two locks,
+ * and on the read side, we just need acquire one of them.
+ */
u64 alloc_start;
struct btrfs_transaction *running_transaction;
wait_queue_head_t transaction_throttle;
@@ -1307,6 +1371,13 @@ struct btrfs_fs_info {
struct mutex cleaner_mutex;
struct mutex chunk_mutex;
struct mutex volume_mutex;
+
+ /* this is used during read/modify/write to make sure
+ * no two ios are trying to mod the same stripe at the same
+ * time
+ */
+ struct btrfs_stripe_hash_table *stripe_hash_table;
+
/*
* this protects the ordered operations list only while we are
* processing all of the entries on it. This way we make
@@ -1365,6 +1436,7 @@ struct btrfs_fs_info {
*/
struct list_head ordered_extents;
+ spinlock_t delalloc_lock;
/*
* all of the inodes that have delalloc bytes. It is possible for
* this list to be empty even when there is still dirty data=ordered
@@ -1373,13 +1445,6 @@ struct btrfs_fs_info {
struct list_head delalloc_inodes;
/*
- * special rename and truncate targets that must be on disk before
- * we're allowed to commit. This is basically the ext3 style
- * data=ordered list.
- */
- struct list_head ordered_operations;
-
- /*
* there is a pool of worker threads for checksumming during writes
* and a pool for checksumming after reads. This is because readers
* can run with FS locks held, and the writers may be waiting for
@@ -1395,6 +1460,8 @@ struct btrfs_fs_info {
struct btrfs_workers flush_workers;
struct btrfs_workers endio_workers;
struct btrfs_workers endio_meta_workers;
+ struct btrfs_workers endio_raid56_workers;
+ struct btrfs_workers rmw_workers;
struct btrfs_workers endio_meta_write_workers;
struct btrfs_workers endio_write_workers;
struct btrfs_workers endio_freespace_worker;
@@ -1423,10 +1490,12 @@ struct btrfs_fs_info {
u64 total_pinned;
- /* protected by the delalloc lock, used to keep from writing
- * metadata until there is a nice batch
- */
- u64 dirty_metadata_bytes;
+ /* used to keep from writing metadata until there is a nice batch */
+ struct percpu_counter dirty_metadata_bytes;
+ struct percpu_counter delalloc_bytes;
+ s32 dirty_metadata_batch;
+ s32 delalloc_batch;
+
struct list_head dirty_cowonly_roots;
struct btrfs_fs_devices *fs_devices;
@@ -1442,9 +1511,6 @@ struct btrfs_fs_info {
struct reloc_control *reloc_ctl;
- spinlock_t delalloc_lock;
- u64 delalloc_bytes;
-
/* data_alloc_cluster is only used in ssd mode */
struct btrfs_free_cluster data_alloc_cluster;
@@ -1456,6 +1522,8 @@ struct btrfs_fs_info {
struct rb_root defrag_inodes;
atomic_t defrag_running;
+ /* Used to protect avail_{data, metadata, system}_alloc_bits */
+ seqlock_t profiles_lock;
/*
* these three are in extended format (availability of single
* chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
@@ -1520,7 +1588,7 @@ struct btrfs_fs_info {
u64 qgroup_seq;
/* filesystem state */
- u64 fs_state;
+ unsigned long fs_state;
struct btrfs_delayed_root *delayed_root;
@@ -1623,6 +1691,9 @@ struct btrfs_root {
struct list_head root_list;
+ spinlock_t log_extents_lock[2];
+ struct list_head logged_list[2];
+
spinlock_t orphan_lock;
atomic_t orphan_inodes;
struct btrfs_block_rsv *orphan_block_rsv;
@@ -1832,6 +1903,7 @@ struct btrfs_ioctl_defrag_range_args {
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
+#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
BTRFS_MOUNT_##opt)
/*
@@ -2936,8 +3008,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
u64 num_bytes, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num, int reserved);
-int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
u64 bytenr, u64 num_bytes);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -3035,8 +3106,13 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode);
void btrfs_orphan_release_metadata(struct inode *inode);
-int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_pending_snapshot *pending);
+int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int nitems,
+ u64 *qgroup_reserved);
+void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ u64 qgroup_reserved);
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
@@ -3092,10 +3168,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *key, int lowest_level,
- int cache_only, u64 min_trans);
+ u64 min_trans);
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_key *max_key,
- struct btrfs_path *path, int cache_only,
+ struct btrfs_path *path,
u64 min_trans);
enum btrfs_compare_tree_result {
BTRFS_COMPARE_TREE_NEW,
@@ -3148,7 +3224,7 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
int find_higher, int return_any);
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, int cache_only, u64 *last_ret,
+ int start_slot, u64 *last_ret,
struct btrfs_key *progress);
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
@@ -3459,9 +3535,9 @@ int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root, u64 new_dirid);
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
- size_t size, struct bio *bio, unsigned long bio_flags);
-
+int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
+ size_t size, struct bio *bio,
+ unsigned long bio_flags);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
@@ -3543,7 +3619,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int cache_only);
+ struct btrfs_root *root);
/* sysfs.c */
int btrfs_init_sysfs(void);
@@ -3620,11 +3696,14 @@ __printf(5, 6)
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
+/*
+ * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
+ * will panic(). Otherwise we BUG() here.
+ */
#define btrfs_panic(fs_info, errno, fmt, args...) \
do { \
- struct btrfs_fs_info *_i = (fs_info); \
- __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
- BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
+ __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
+ BUG(); \
} while (0)
/* acl.c */
@@ -3745,4 +3824,11 @@ static inline int is_fstree(u64 rootid)
return 1;
return 0;
}
+
+static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
+{
+ return signal_pending(current);
+}
+
+
#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 34836036f01b..0b278b117cbe 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -875,7 +875,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_delayed_item *delayed_item)
{
struct extent_buffer *leaf;
- struct btrfs_item *item;
char *ptr;
int ret;
@@ -886,7 +885,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
- item = btrfs_item_nr(leaf, path->slots[0]);
ptr = btrfs_item_ptr(leaf, path->slots[0], char);
write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
@@ -1065,32 +1063,25 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
}
}
-static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_delayed_node *node)
+static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
{
struct btrfs_key key;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
int ret;
- mutex_lock(&node->mutex);
- if (!node->inode_dirty) {
- mutex_unlock(&node->mutex);
- return 0;
- }
-
key.objectid = node->inode_id;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
+
ret = btrfs_lookup_inode(trans, root, path, &key, 1);
if (ret > 0) {
btrfs_release_path(path);
- mutex_unlock(&node->mutex);
return -ENOENT;
} else if (ret < 0) {
- mutex_unlock(&node->mutex);
return ret;
}
@@ -1105,11 +1096,47 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
btrfs_delayed_inode_release_metadata(root, node);
btrfs_release_delayed_inode(node);
- mutex_unlock(&node->mutex);
return 0;
}
+static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
+{
+ int ret;
+
+ mutex_lock(&node->mutex);
+ if (!node->inode_dirty) {
+ mutex_unlock(&node->mutex);
+ return 0;
+ }
+
+ ret = __btrfs_update_delayed_inode(trans, root, path, node);
+ mutex_unlock(&node->mutex);
+ return ret;
+}
+
+static inline int
+__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
+{
+ int ret;
+
+ ret = btrfs_insert_delayed_items(trans, path, node->root, node);
+ if (ret)
+ return ret;
+
+ ret = btrfs_delete_delayed_items(trans, path, node->root, node);
+ if (ret)
+ return ret;
+
+ ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ return ret;
+}
+
/*
* Called when committing the transaction.
* Returns 0 on success.
@@ -1119,7 +1146,6 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr)
{
- struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path;
@@ -1142,15 +1168,8 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node && (!count || (count && nr--))) {
- curr_root = curr_node->root;
- ret = btrfs_insert_delayed_items(trans, path, curr_root,
- curr_node);
- if (!ret)
- ret = btrfs_delete_delayed_items(trans, path,
- curr_root, curr_node);
- if (!ret)
- ret = btrfs_update_delayed_inode(trans, curr_root,
- path, curr_node);
+ ret = __btrfs_commit_inode_delayed_items(trans, path,
+ curr_node);
if (ret) {
btrfs_release_delayed_node(curr_node);
curr_node = NULL;
@@ -1183,51 +1202,93 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
return __btrfs_run_delayed_items(trans, root, nr);
}
-static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_node *node)
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct inode *inode)
{
+ struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret;
+ if (!delayed_node)
+ return 0;
+
+ mutex_lock(&delayed_node->mutex);
+ if (!delayed_node->count) {
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return 0;
+ }
+ mutex_unlock(&delayed_node->mutex);
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
- trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
+ trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
- ret = btrfs_insert_delayed_items(trans, path, node->root, node);
- if (!ret)
- ret = btrfs_delete_delayed_items(trans, path, node->root, node);
- if (!ret)
- ret = btrfs_update_delayed_inode(trans, node->root, path, node);
- btrfs_free_path(path);
+ ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
+ btrfs_release_delayed_node(delayed_node);
+ btrfs_free_path(path);
trans->block_rsv = block_rsv;
+
return ret;
}
-int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
- struct inode *inode)
+int btrfs_commit_inode_delayed_inode(struct inode *inode)
{
+ struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ struct btrfs_path *path;
+ struct btrfs_block_rsv *block_rsv;
int ret;
if (!delayed_node)
return 0;
mutex_lock(&delayed_node->mutex);
- if (!delayed_node->count) {
+ if (!delayed_node->inode_dirty) {
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node);
return 0;
}
mutex_unlock(&delayed_node->mutex);
- ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
+ trans = btrfs_join_transaction(delayed_node->root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto trans_out;
+ }
+ path->leave_spinning = 1;
+
+ block_rsv = trans->block_rsv;
+ trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
+
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->inode_dirty)
+ ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
+ path, delayed_node);
+ else
+ ret = 0;
+ mutex_unlock(&delayed_node->mutex);
+
+ btrfs_free_path(path);
+ trans->block_rsv = block_rsv;
+trans_out:
+ btrfs_end_transaction(trans, delayed_node->root);
+ btrfs_btree_balance_dirty(delayed_node->root);
+out:
btrfs_release_delayed_node(delayed_node);
+
return ret;
}
@@ -1258,7 +1319,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
struct btrfs_root *root;
struct btrfs_block_rsv *block_rsv;
int need_requeue = 0;
- int ret;
async_node = container_of(work, struct btrfs_async_delayed_node, work);
@@ -1277,14 +1337,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
block_rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->delayed_block_rsv;
- ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
- if (!ret)
- ret = btrfs_delete_delayed_items(trans, path, root,
- delayed_node);
-
- if (!ret)
- btrfs_update_delayed_inode(trans, root, path, delayed_node);
-
+ __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
/*
* Maybe new delayed items have been inserted, so we need requeue
* the work. Besides that, we must dequeue the empty delayed nodes
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 4f808e1baeed..78b6ad0fc669 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -117,6 +117,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
/* Used for evicting the inode. */
void btrfs_remove_delayed_node(struct inode *inode);
void btrfs_kill_delayed_inode_items(struct inode *inode);
+int btrfs_commit_inode_delayed_inode(struct inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ae9411773397..b7a0641ead77 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -23,6 +23,10 @@
#include "delayed-ref.h"
#include "transaction.h"
+struct kmem_cache *btrfs_delayed_ref_head_cachep;
+struct kmem_cache *btrfs_delayed_tree_ref_cachep;
+struct kmem_cache *btrfs_delayed_data_ref_cachep;
+struct kmem_cache *btrfs_delayed_extent_op_cachep;
/*
* delayed back reference update tracking. For subvolume trees
* we queue up extent allocations and backref maintenance for
@@ -422,6 +426,14 @@ again:
return 1;
}
+void btrfs_release_ref_cluster(struct list_head *cluster)
+{
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, cluster)
+ list_del_init(pos);
+}
+
/*
* helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same
@@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
ref->extent_op->flags_to_set;
existing_ref->extent_op->update_flags = 1;
}
- kfree(ref->extent_op);
+ btrfs_free_delayed_extent_op(ref->extent_op);
}
}
/*
@@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(head_ref);
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
} else {
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
@@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(full_ref);
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
} else {
delayed_refs->num_entries++;
trans->delayed_ref_updates++;
@@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(full_ref);
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
} else {
delayed_refs->num_entries++;
trans->delayed_ref_updates++;
@@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs;
BUG_ON(extent_op && extent_op->is_data);
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
return -ENOMEM;
}
@@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs;
BUG_ON(extent_op && !extent_op->is_data);
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
return -ENOMEM;
}
@@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref)
return -ENOMEM;
@@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
return btrfs_delayed_node_to_head(ref);
return NULL;
}
+
+void btrfs_delayed_ref_exit(void)
+{
+ if (btrfs_delayed_ref_head_cachep)
+ kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
+ if (btrfs_delayed_tree_ref_cachep)
+ kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
+ if (btrfs_delayed_data_ref_cachep)
+ kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
+ if (btrfs_delayed_extent_op_cachep)
+ kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
+}
+
+int btrfs_delayed_ref_init(void)
+{
+ btrfs_delayed_ref_head_cachep = kmem_cache_create(
+ "btrfs_delayed_ref_head",
+ sizeof(struct btrfs_delayed_ref_head), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_ref_head_cachep)
+ goto fail;
+
+ btrfs_delayed_tree_ref_cachep = kmem_cache_create(
+ "btrfs_delayed_tree_ref",
+ sizeof(struct btrfs_delayed_tree_ref), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_tree_ref_cachep)
+ goto fail;
+
+ btrfs_delayed_data_ref_cachep = kmem_cache_create(
+ "btrfs_delayed_data_ref",
+ sizeof(struct btrfs_delayed_data_ref), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_data_ref_cachep)
+ goto fail;
+
+ btrfs_delayed_extent_op_cachep = kmem_cache_create(
+ "btrfs_delayed_extent_op",
+ sizeof(struct btrfs_delayed_extent_op), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_extent_op_cachep)
+ goto fail;
+
+ return 0;
+fail:
+ btrfs_delayed_ref_exit();
+ return -ENOMEM;
+}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index c9d703693df0..f75fcaf79aeb 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -132,6 +132,15 @@ struct btrfs_delayed_ref_root {
unsigned long num_heads_ready;
/*
+ * bumped when someone is making progress on the delayed
+ * refs, so that other procs know they are just adding to
+ * contention intead of helping
+ */
+ atomic_t procs_running_refs;
+ atomic_t ref_seq;
+ wait_queue_head_t wait;
+
+ /*
* set when the tree is flushing before a transaction commit,
* used by the throttling code to decide if new updates need
* to be run right away
@@ -141,12 +150,47 @@ struct btrfs_delayed_ref_root {
u64 run_delayed_start;
};
+extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
+extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
+
+int btrfs_delayed_ref_init(void);
+void btrfs_delayed_ref_exit(void);
+
+static inline struct btrfs_delayed_extent_op *
+btrfs_alloc_delayed_extent_op(void)
+{
+ return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
+}
+
+static inline void
+btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
+{
+ if (op)
+ kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
+}
+
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{
WARN_ON(atomic_read(&ref->refs) == 0);
if (atomic_dec_and_test(&ref->refs)) {
WARN_ON(ref->in_tree);
- kfree(ref);
+ switch (ref->type) {
+ case BTRFS_TREE_BLOCK_REF_KEY:
+ case BTRFS_SHARED_BLOCK_REF_KEY:
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ break;
+ case BTRFS_EXTENT_DATA_REF_KEY:
+ case BTRFS_SHARED_DATA_REF_KEY:
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+ break;
+ case 0:
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
+ break;
+ default:
+ BUG();
+ }
}
}
@@ -176,8 +220,14 @@ struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head);
+static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
+{
+ mutex_unlock(&head->mutex);
+}
+
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start);
+void btrfs_release_ref_cluster(struct list_head *cluster);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 66dbc8dbddf7..7ba7b3900cb8 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -465,7 +465,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished
*/
- btrfs_start_delalloc_inodes(root, 0);
+ ret = btrfs_start_delalloc_inodes(root, 0);
+ if (ret) {
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return ret;
+ }
btrfs_wait_ordered_extents(root, 0);
trans = btrfs_start_transaction(root, 0);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a8f652dc940b..02369a3c162e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -46,6 +46,7 @@
#include "check-integrity.h"
#include "rcu-string.h"
#include "dev-replace.h"
+#include "raid56.h"
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
@@ -56,7 +57,8 @@ static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
-static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
+static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+ struct btrfs_root *root);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root);
@@ -420,7 +422,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
struct extent_io_tree *tree;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 found_start;
struct extent_buffer *eb;
@@ -639,8 +641,15 @@ err:
btree_readahead_hook(root, eb, eb->start, ret);
}
- if (ret)
+ if (ret) {
+ /*
+ * our io error hook is going to dec the io pages
+ * again, we have to make sure it has something
+ * to decrement
+ */
+ atomic_inc(&eb->io_pages);
clear_extent_buffer_uptodate(eb);
+ }
free_extent_buffer(eb);
out:
return ret;
@@ -654,6 +663,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
eb = (struct extent_buffer *)page->private;
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
eb->read_mirror = failed_mirror;
+ atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
btree_readahead_hook(root, eb, eb->start, -EIO);
return -EIO; /* we fixed nothing */
@@ -670,17 +680,23 @@ static void end_workqueue_bio(struct bio *bio, int err)
end_io_wq->work.flags = 0;
if (bio->bi_rw & REQ_WRITE) {
- if (end_io_wq->metadata == 1)
+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
btrfs_queue_worker(&fs_info->endio_meta_write_workers,
&end_io_wq->work);
- else if (end_io_wq->metadata == 2)
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
btrfs_queue_worker(&fs_info->endio_freespace_worker,
&end_io_wq->work);
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+ btrfs_queue_worker(&fs_info->endio_raid56_workers,
+ &end_io_wq->work);
else
btrfs_queue_worker(&fs_info->endio_write_workers,
&end_io_wq->work);
} else {
- if (end_io_wq->metadata)
+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+ btrfs_queue_worker(&fs_info->endio_raid56_workers,
+ &end_io_wq->work);
+ else if (end_io_wq->metadata)
btrfs_queue_worker(&fs_info->endio_meta_workers,
&end_io_wq->work);
else
@@ -695,6 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
* 0 - if data
* 1 - if normal metadta
* 2 - if writing to the free space cache area
+ * 3 - raid parity work
*/
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata)
@@ -946,18 +963,20 @@ static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct extent_io_tree *tree;
+ struct btrfs_fs_info *fs_info;
+ int ret;
+
tree = &BTRFS_I(mapping->host)->io_tree;
if (wbc->sync_mode == WB_SYNC_NONE) {
- struct btrfs_root *root = BTRFS_I(mapping->host)->root;
- u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
if (wbc->for_kupdate)
return 0;
+ fs_info = BTRFS_I(mapping->host)->root->fs_info;
/* this is a bit racy, but that's ok */
- num_dirty = root->fs_info->dirty_metadata_bytes;
- if (num_dirty < thresh)
+ ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH);
+ if (ret < 0)
return 0;
}
return btree_write_cache_pages(mapping, wbc);
@@ -1125,24 +1144,16 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
if (btrfs_header_generation(buf) ==
- root->fs_info->running_transaction->transid) {
+ fs_info->running_transaction->transid) {
btrfs_assert_tree_locked(buf);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
- spin_lock(&root->fs_info->delalloc_lock);
- if (root->fs_info->dirty_metadata_bytes >= buf->len)
- root->fs_info->dirty_metadata_bytes -= buf->len;
- else {
- spin_unlock(&root->fs_info->delalloc_lock);
- btrfs_panic(root->fs_info, -EOVERFLOW,
- "Can't clear %lu bytes from "
- " dirty_mdatadata_bytes (%llu)",
- buf->len,
- root->fs_info->dirty_metadata_bytes);
- }
- spin_unlock(&root->fs_info->delalloc_lock);
-
+ __percpu_counter_add(&fs_info->dirty_metadata_bytes,
+ -buf->len,
+ fs_info->dirty_metadata_batch);
/* ugh, clear_extent_buffer_dirty needs to lock the page */
btrfs_set_lock_blocking(buf);
clear_extent_buffer_dirty(buf);
@@ -1178,9 +1189,13 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
INIT_LIST_HEAD(&root->dirty_list);
INIT_LIST_HEAD(&root->root_list);
+ INIT_LIST_HEAD(&root->logged_list[0]);
+ INIT_LIST_HEAD(&root->logged_list[1]);
spin_lock_init(&root->orphan_lock);
spin_lock_init(&root->inode_lock);
spin_lock_init(&root->accounting_lock);
+ spin_lock_init(&root->log_extents_lock[0]);
+ spin_lock_init(&root->log_extents_lock[1]);
mutex_init(&root->objectid_mutex);
mutex_init(&root->log_mutex);
init_waitqueue_head(&root->log_writer_wait);
@@ -2004,10 +2019,24 @@ int open_ctree(struct super_block *sb,
goto fail_srcu;
}
+ ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
+ if (ret) {
+ err = ret;
+ goto fail_bdi;
+ }
+ fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
+ (1 + ilog2(nr_cpu_ids));
+
+ ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
+ if (ret) {
+ err = ret;
+ goto fail_dirty_metadata_bytes;
+ }
+
fs_info->btree_inode = new_inode(sb);
if (!fs_info->btree_inode) {
err = -ENOMEM;
- goto fail_bdi;
+ goto fail_delalloc_bytes;
}
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
@@ -2017,7 +2046,6 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
- INIT_LIST_HEAD(&fs_info->ordered_operations);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->trans_lock);
@@ -2028,6 +2056,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->tree_mod_seq_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->reloc_mutex);
+ seqlock_init(&fs_info->profiles_lock);
init_completion(&fs_info->kobj_unregister);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
@@ -2126,6 +2155,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT;
+ fs_info->first_logical_byte = (u64)-1;
extent_io_tree_init(&fs_info->freed_extents[0],
fs_info->btree_inode->i_mapping);
@@ -2165,6 +2195,12 @@ int open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
+ ret = btrfs_alloc_stripe_hash_table(fs_info);
+ if (ret) {
+ err = ret;
+ goto fail_alloc;
+ }
+
__setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
@@ -2187,7 +2223,8 @@ int open_ctree(struct super_block *sb,
goto fail_alloc;
/* check FS state, whether FS is broken. */
- fs_info->fs_state |= btrfs_super_flags(disk_super);
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
+ set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
if (ret) {
@@ -2261,6 +2298,8 @@ int open_ctree(struct super_block *sb,
leafsize = btrfs_super_leafsize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
stripesize = btrfs_super_stripesize(disk_super);
+ fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
+ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
/*
* mixed block groups end up with duplicate but slightly offset
@@ -2332,6 +2371,12 @@ int open_ctree(struct super_block *sb,
btrfs_init_workers(&fs_info->endio_meta_write_workers,
"endio-meta-write", fs_info->thread_pool_size,
&fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->endio_raid56_workers,
+ "endio-raid56", fs_info->thread_pool_size,
+ &fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->rmw_workers,
+ "rmw", fs_info->thread_pool_size,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
fs_info->thread_pool_size,
&fs_info->generic_worker);
@@ -2350,6 +2395,8 @@ int open_ctree(struct super_block *sb,
*/
fs_info->endio_workers.idle_thresh = 4;
fs_info->endio_meta_workers.idle_thresh = 4;
+ fs_info->endio_raid56_workers.idle_thresh = 4;
+ fs_info->rmw_workers.idle_thresh = 2;
fs_info->endio_write_workers.idle_thresh = 2;
fs_info->endio_meta_write_workers.idle_thresh = 2;
@@ -2366,6 +2413,8 @@ int open_ctree(struct super_block *sb,
ret |= btrfs_start_workers(&fs_info->fixup_workers);
ret |= btrfs_start_workers(&fs_info->endio_workers);
ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+ ret |= btrfs_start_workers(&fs_info->rmw_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
ret |= btrfs_start_workers(&fs_info->endio_write_workers);
ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
@@ -2390,8 +2439,7 @@ int open_ctree(struct super_block *sb,
sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize);
- if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
- sizeof(disk_super->magic))) {
+ if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
goto fail_sb_buffer;
}
@@ -2694,13 +2742,13 @@ fail_cleaner:
* kthreads
*/
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
- invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_block_groups:
btrfs_free_block_groups(fs_info);
fail_tree_roots:
free_root_pointers(fs_info, 1);
+ invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_sb_buffer:
btrfs_stop_workers(&fs_info->generic_worker);
@@ -2710,6 +2758,8 @@ fail_sb_buffer:
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers);
+ btrfs_stop_workers(&fs_info->endio_raid56_workers);
+ btrfs_stop_workers(&fs_info->rmw_workers);
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
@@ -2721,13 +2771,17 @@ fail_alloc:
fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
- invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode);
+fail_delalloc_bytes:
+ percpu_counter_destroy(&fs_info->delalloc_bytes);
+fail_dirty_metadata_bytes:
+ percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
fail_bdi:
bdi_destroy(&fs_info->bdi);
fail_srcu:
cleanup_srcu_struct(&fs_info->subvol_srcu);
fail:
+ btrfs_free_stripe_hash_table(fs_info);
btrfs_close_devices(fs_info->fs_devices);
return err;
@@ -2795,8 +2849,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
super = (struct btrfs_super_block *)bh->b_data;
if (btrfs_super_bytenr(super) != bytenr ||
- strncmp((char *)(&super->magic), BTRFS_MAGIC,
- sizeof(super->magic))) {
+ super->magic != cpu_to_le64(BTRFS_MAGIC)) {
brelse(bh);
continue;
}
@@ -3076,11 +3129,16 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
== 0)))
num_tolerated_disk_barrier_failures = 0;
- else if (num_tolerated_disk_barrier_failures > 1
- &&
- (flags & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10)))
- num_tolerated_disk_barrier_failures = 1;
+ else if (num_tolerated_disk_barrier_failures > 1) {
+ if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID10)) {
+ num_tolerated_disk_barrier_failures = 1;
+ } else if (flags &
+ BTRFS_BLOCK_GROUP_RAID5) {
+ num_tolerated_disk_barrier_failures = 2;
+ }
+ }
}
}
up_read(&sinfo->groups_sem);
@@ -3195,6 +3253,11 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
if (btrfs_root_refs(&root->root_item) == 0)
synchronize_srcu(&fs_info->subvol_srcu);
+ if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ btrfs_free_log(NULL, root);
+ btrfs_free_log_root_tree(NULL, fs_info);
+ }
+
__btrfs_remove_free_space_cache(root->free_ino_pinned);
__btrfs_remove_free_space_cache(root->free_ino_ctl);
free_fs_root(root);
@@ -3339,7 +3402,7 @@ int close_ctree(struct btrfs_root *root)
printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
}
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
btrfs_error_commit_super(root);
btrfs_put_block_group_cache(fs_info);
@@ -3352,9 +3415,9 @@ int close_ctree(struct btrfs_root *root)
btrfs_free_qgroup_config(root->fs_info);
- if (fs_info->delalloc_bytes) {
- printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
- (unsigned long long)fs_info->delalloc_bytes);
+ if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
+ printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
+ percpu_counter_sum(&fs_info->delalloc_bytes));
}
free_extent_buffer(fs_info->extent_root->node);
@@ -3384,6 +3447,8 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers);
+ btrfs_stop_workers(&fs_info->endio_raid56_workers);
+ btrfs_stop_workers(&fs_info->rmw_workers);
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
@@ -3401,9 +3466,13 @@ int close_ctree(struct btrfs_root *root)
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
+ percpu_counter_destroy(&fs_info->delalloc_bytes);
bdi_destroy(&fs_info->bdi);
cleanup_srcu_struct(&fs_info->subvol_srcu);
+ btrfs_free_stripe_hash_table(fs_info);
+
return 0;
}
@@ -3443,11 +3512,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
(unsigned long long)transid,
(unsigned long long)root->fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf);
- if (!was_dirty) {
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->dirty_metadata_bytes += buf->len;
- spin_unlock(&root->fs_info->delalloc_lock);
- }
+ if (!was_dirty)
+ __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
+ buf->len,
+ root->fs_info->dirty_metadata_batch);
}
static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
@@ -3457,8 +3525,7 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
* looks as though older kernels can get into trouble with
* this code, they end up stuck in balance_dirty_pages forever
*/
- u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
+ int ret;
if (current->flags & PF_MEMALLOC)
return;
@@ -3466,9 +3533,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
if (flush_delayed)
btrfs_balance_delayed_items(root);
- num_dirty = root->fs_info->dirty_metadata_bytes;
-
- if (num_dirty > thresh) {
+ ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH);
+ if (ret > 0) {
balance_dirty_pages_ratelimited(
root->fs_info->btree_inode->i_mapping);
}
@@ -3518,7 +3585,8 @@ void btrfs_error_commit_super(struct btrfs_root *root)
btrfs_cleanup_transaction(root);
}
-static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
+static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+ struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
struct list_head splice;
@@ -3528,7 +3596,7 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
mutex_lock(&root->fs_info->ordered_operations_mutex);
spin_lock(&root->fs_info->ordered_extent_lock);
- list_splice_init(&root->fs_info->ordered_operations, &splice);
+ list_splice_init(&t->ordered_operations, &splice);
while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
@@ -3544,35 +3612,16 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
{
- struct list_head splice;
struct btrfs_ordered_extent *ordered;
- struct inode *inode;
-
- INIT_LIST_HEAD(&splice);
spin_lock(&root->fs_info->ordered_extent_lock);
-
- list_splice_init(&root->fs_info->ordered_extents, &splice);
- while (!list_empty(&splice)) {
- ordered = list_entry(splice.next, struct btrfs_ordered_extent,
- root_extent_list);
-
- list_del_init(&ordered->root_extent_list);
- atomic_inc(&ordered->refs);
-
- /* the inode may be getting freed (in sys_unlink path). */
- inode = igrab(ordered->inode);
-
- spin_unlock(&root->fs_info->ordered_extent_lock);
- if (inode)
- iput(inode);
-
- atomic_set(&ordered->refs, 1);
- btrfs_put_ordered_extent(ordered);
-
- spin_lock(&root->fs_info->ordered_extent_lock);
- }
-
+ /*
+ * This will just short circuit the ordered completion stuff which will
+ * make sure the ordered extent gets properly cleaned up.
+ */
+ list_for_each_entry(ordered, &root->fs_info->ordered_extents,
+ root_extent_list)
+ set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
spin_unlock(&root->fs_info->ordered_extent_lock);
}
@@ -3594,11 +3643,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
}
while ((node = rb_first(&delayed_refs->root)) != NULL) {
- ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+ struct btrfs_delayed_ref_head *head = NULL;
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
atomic_set(&ref->refs, 1);
if (btrfs_delayed_ref_is_head(ref)) {
- struct btrfs_delayed_ref_head *head;
head = btrfs_delayed_node_to_head(ref);
if (!mutex_trylock(&head->mutex)) {
@@ -3614,16 +3663,18 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
continue;
}
- kfree(head->extent_op);
+ btrfs_free_delayed_extent_op(head->extent_op);
delayed_refs->num_heads--;
if (list_empty(&head->cluster))
delayed_refs->num_heads_ready--;
list_del_init(&head->cluster);
}
+
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
-
+ if (head)
+ mutex_unlock(&head->mutex);
spin_unlock(&delayed_refs->lock);
btrfs_put_delayed_ref(ref);
@@ -3671,6 +3722,8 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
delalloc_inodes);
list_del_init(&btrfs_inode->delalloc_inodes);
+ clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &btrfs_inode->runtime_flags);
btrfs_invalidate_inodes(btrfs_inode->root);
}
@@ -3823,10 +3876,8 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
while (!list_empty(&list)) {
t = list_entry(list.next, struct btrfs_transaction, list);
- if (!t)
- break;
- btrfs_destroy_ordered_operations(root);
+ btrfs_destroy_ordered_operations(t, root);
btrfs_destroy_ordered_extents(root);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 305c33efb0e3..034d7dc552b2 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -25,6 +25,13 @@
#define BTRFS_SUPER_MIRROR_MAX 3
#define BTRFS_SUPER_MIRROR_SHIFT 12
+enum {
+ BTRFS_WQ_ENDIO_DATA = 0,
+ BTRFS_WQ_ENDIO_METADATA = 1,
+ BTRFS_WQ_ENDIO_FREE_SPACE = 2,
+ BTRFS_WQ_ENDIO_RAID56 = 3,
+};
+
static inline u64 btrfs_sb_offset(int mirror)
{
u64 start = 16 * 1024;
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 614f34a899c2..81ee29eeb7ca 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -22,10 +22,10 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
*max_len = BTRFS_FID_SIZE_CONNECTABLE;
- return 255;
+ return FILEID_INVALID;
} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
*max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
- return 255;
+ return FILEID_INVALID;
}
len = BTRFS_FID_SIZE_NON_CONNECTABLE;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1e59ed575cc9..3e074dab2d57 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -31,6 +31,7 @@
#include "print-tree.h"
#include "transaction.h"
#include "volumes.h"
+#include "raid56.h"
#include "locking.h"
#include "free-space-cache.h"
#include "math.h"
@@ -72,8 +73,7 @@ enum {
RESERVE_ALLOC_NO_ACCOUNT = 2,
};
-static int update_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+static int update_block_group(struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -103,6 +103,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 num_bytes, int reserve);
+static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
+ u64 num_bytes);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -162,6 +164,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
rb_link_node(&block_group->cache_node, parent, p);
rb_insert_color(&block_group->cache_node,
&info->block_group_cache_tree);
+
+ if (info->first_logical_byte > block_group->key.objectid)
+ info->first_logical_byte = block_group->key.objectid;
+
spin_unlock(&info->block_group_cache_lock);
return 0;
@@ -203,8 +209,11 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
break;
}
}
- if (ret)
+ if (ret) {
btrfs_get_block_group(ret);
+ if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
+ info->first_logical_byte = ret->key.objectid;
+ }
spin_unlock(&info->block_group_cache_lock);
return ret;
@@ -468,8 +477,6 @@ out:
}
static int cache_block_group(struct btrfs_block_group_cache *cache,
- struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
int load_cache_only)
{
DEFINE_WAIT(wait);
@@ -527,12 +534,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->cached = BTRFS_CACHE_FAST;
spin_unlock(&cache->lock);
- /*
- * We can't do the read from on-disk cache during a commit since we need
- * to have the normal tree locking. Also if we are currently trying to
- * allocate blocks for the tree root we can't do the fast caching since
- * we likely hold important locks.
- */
if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
ret = load_free_space_cache(fs_info, cache);
@@ -1852,6 +1853,8 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
*actual_bytes = discarded_bytes;
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
return ret;
}
@@ -2143,7 +2146,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
node->num_bytes);
}
}
- mutex_unlock(&head->mutex);
return ret;
}
@@ -2258,7 +2260,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
* process of being added. Don't run this ref yet.
*/
list_del_init(&locked_ref->cluster);
- mutex_unlock(&locked_ref->mutex);
+ btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
@@ -2285,7 +2287,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref = &locked_ref->node;
if (extent_op && must_insert_reserved) {
- kfree(extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
extent_op = NULL;
}
@@ -2294,28 +2296,25 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_delayed_extent_op(trans, root,
ref, extent_op);
- kfree(extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- list_del_init(&locked_ref->cluster);
- mutex_unlock(&locked_ref->mutex);
-
- printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+ printk(KERN_DEBUG
+ "btrfs: run_delayed_extent_op "
+ "returned %d\n", ret);
spin_lock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(locked_ref);
return ret;
}
goto next;
}
-
- list_del_init(&locked_ref->cluster);
- locked_ref = NULL;
}
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- if (locked_ref) {
+ if (!btrfs_delayed_ref_is_head(ref)) {
/*
* when we play the delayed ref, also correct the
* ref_mod on head
@@ -2337,20 +2336,29 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_one_delayed_ref(trans, root, ref, extent_op,
must_insert_reserved);
- btrfs_put_delayed_ref(ref);
- kfree(extent_op);
- count++;
-
+ btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- if (locked_ref) {
- list_del_init(&locked_ref->cluster);
- mutex_unlock(&locked_ref->mutex);
- }
- printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+ btrfs_delayed_ref_unlock(locked_ref);
+ btrfs_put_delayed_ref(ref);
+ printk(KERN_DEBUG
+ "btrfs: run_one_delayed_ref returned %d\n", ret);
spin_lock(&delayed_refs->lock);
return ret;
}
+ /*
+ * If this node is a head, that means all the refs in this head
+ * have been dealt with, and we will pick the next head to deal
+ * with, so we must unlock the head and drop it from the cluster
+ * list before we release it.
+ */
+ if (btrfs_delayed_ref_is_head(ref)) {
+ list_del_init(&locked_ref->cluster);
+ btrfs_delayed_ref_unlock(locked_ref);
+ locked_ref = NULL;
+ }
+ btrfs_put_delayed_ref(ref);
+ count++;
next:
cond_resched();
spin_lock(&delayed_refs->lock);
@@ -2435,6 +2443,16 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
return ret;
}
+static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
+ int count)
+{
+ int val = atomic_read(&delayed_refs->ref_seq);
+
+ if (val < seq || val >= seq + count)
+ return 1;
+ return 0;
+}
+
/*
* this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be
@@ -2469,6 +2487,44 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
+ if (count == 0) {
+ count = delayed_refs->num_entries * 2;
+ run_most = 1;
+ }
+
+ if (!run_all && !run_most) {
+ int old;
+ int seq = atomic_read(&delayed_refs->ref_seq);
+
+progress:
+ old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
+ if (old) {
+ DEFINE_WAIT(__wait);
+ if (delayed_refs->num_entries < 16348)
+ return 0;
+
+ prepare_to_wait(&delayed_refs->wait, &__wait,
+ TASK_UNINTERRUPTIBLE);
+
+ old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
+ if (old) {
+ schedule();
+ finish_wait(&delayed_refs->wait, &__wait);
+
+ if (!refs_newer(delayed_refs, seq, 256))
+ goto progress;
+ else
+ return 0;
+ } else {
+ finish_wait(&delayed_refs->wait, &__wait);
+ goto again;
+ }
+ }
+
+ } else {
+ atomic_inc(&delayed_refs->procs_running_refs);
+ }
+
again:
loops = 0;
spin_lock(&delayed_refs->lock);
@@ -2477,10 +2533,6 @@ again:
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
- if (count == 0) {
- count = delayed_refs->num_entries * 2;
- run_most = 1;
- }
while (1) {
if (!(run_all || run_most) &&
delayed_refs->num_heads_ready < 64)
@@ -2500,11 +2552,15 @@ again:
ret = run_clustered_refs(trans, root, &cluster);
if (ret < 0) {
+ btrfs_release_ref_cluster(&cluster);
spin_unlock(&delayed_refs->lock);
btrfs_abort_transaction(trans, root, ret);
+ atomic_dec(&delayed_refs->procs_running_refs);
return ret;
}
+ atomic_add(ret, &delayed_refs->ref_seq);
+
count -= min_t(unsigned long, ret, count);
if (count == 0)
@@ -2573,6 +2629,11 @@ again:
goto again;
}
out:
+ atomic_dec(&delayed_refs->procs_running_refs);
+ smp_mb();
+ if (waitqueue_active(&delayed_refs->wait))
+ wake_up(&delayed_refs->wait);
+
spin_unlock(&delayed_refs->lock);
assert_qgroups_uptodate(trans);
return 0;
@@ -2586,7 +2647,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_delayed_extent_op *extent_op;
int ret;
- extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+ extent_op = btrfs_alloc_delayed_extent_op();
if (!extent_op)
return -ENOMEM;
@@ -2598,7 +2659,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
num_bytes, extent_op);
if (ret)
- kfree(extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
return ret;
}
@@ -3223,12 +3284,14 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
+ write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits |= extra_flags;
+ write_sequnlock(&fs_info->profiles_lock);
}
/*
@@ -3276,6 +3339,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
u64 num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;
u64 target;
+ u64 tmp;
/*
* see if restripe for this chunk_type is in progress, if so
@@ -3292,40 +3356,48 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
}
spin_unlock(&root->fs_info->balance_lock);
+ /* First, mask out the RAID levels which aren't possible */
if (num_devices == 1)
- flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
+ flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID5);
+ if (num_devices < 3)
+ flags &= ~BTRFS_BLOCK_GROUP_RAID6;
if (num_devices < 4)
flags &= ~BTRFS_BLOCK_GROUP_RAID10;
- if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
- (flags & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))) {
- flags &= ~BTRFS_BLOCK_GROUP_DUP;
- }
-
- if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
- (flags & BTRFS_BLOCK_GROUP_RAID10)) {
- flags &= ~BTRFS_BLOCK_GROUP_RAID1;
- }
+ tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
+ flags &= ~tmp;
- if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
- ((flags & BTRFS_BLOCK_GROUP_RAID1) |
- (flags & BTRFS_BLOCK_GROUP_RAID10) |
- (flags & BTRFS_BLOCK_GROUP_DUP))) {
- flags &= ~BTRFS_BLOCK_GROUP_RAID0;
- }
+ if (tmp & BTRFS_BLOCK_GROUP_RAID6)
+ tmp = BTRFS_BLOCK_GROUP_RAID6;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
+ tmp = BTRFS_BLOCK_GROUP_RAID5;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
+ tmp = BTRFS_BLOCK_GROUP_RAID10;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
+ tmp = BTRFS_BLOCK_GROUP_RAID1;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
+ tmp = BTRFS_BLOCK_GROUP_RAID0;
- return extended_to_chunk(flags);
+ return extended_to_chunk(flags | tmp);
}
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
{
- if (flags & BTRFS_BLOCK_GROUP_DATA)
- flags |= root->fs_info->avail_data_alloc_bits;
- else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
- flags |= root->fs_info->avail_system_alloc_bits;
- else if (flags & BTRFS_BLOCK_GROUP_METADATA)
- flags |= root->fs_info->avail_metadata_alloc_bits;
+ unsigned seq;
+
+ do {
+ seq = read_seqbegin(&root->fs_info->profiles_lock);
+
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ flags |= root->fs_info->avail_data_alloc_bits;
+ else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ flags |= root->fs_info->avail_system_alloc_bits;
+ else if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ flags |= root->fs_info->avail_metadata_alloc_bits;
+ } while (read_seqretry(&root->fs_info->profiles_lock, seq));
return btrfs_reduce_alloc_profile(root, flags);
}
@@ -3333,6 +3405,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
{
u64 flags;
+ u64 ret;
if (data)
flags = BTRFS_BLOCK_GROUP_DATA;
@@ -3341,7 +3414,8 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
else
flags = BTRFS_BLOCK_GROUP_METADATA;
- return get_alloc_profile(root, flags);
+ ret = get_alloc_profile(root, flags);
+ return ret;
}
/*
@@ -3357,7 +3431,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
int ret = 0, committed = 0, alloc_chunk = 1;
/* make sure bytes are sectorsize aligned */
- bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+ bytes = ALIGN(bytes, root->sectorsize);
if (root == root->fs_info->tree_root ||
BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
@@ -3452,7 +3526,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
struct btrfs_space_info *data_sinfo;
/* make sure bytes are sectorsize aligned */
- bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+ bytes = ALIGN(bytes, root->sectorsize);
data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
@@ -3516,8 +3590,10 @@ static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
{
u64 num_dev;
- if (type & BTRFS_BLOCK_GROUP_RAID10 ||
- type & BTRFS_BLOCK_GROUP_RAID0)
+ if (type & (BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6))
num_dev = root->fs_info->fs_devices->rw_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_dev = 2;
@@ -3564,6 +3640,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
int wait_for_alloc = 0;
int ret = 0;
+ /* Don't re-enter if we're already allocating a chunk */
+ if (trans->allocating_chunk)
+ return -ENOSPC;
+
space_info = __find_space_info(extent_root->fs_info, flags);
if (!space_info) {
ret = update_space_info(extent_root->fs_info, flags,
@@ -3606,6 +3686,8 @@ again:
goto again;
}
+ trans->allocating_chunk = true;
+
/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
@@ -3632,19 +3714,20 @@ again:
check_system_chunk(trans, extent_root, flags);
ret = btrfs_alloc_chunk(trans, extent_root, flags);
- if (ret < 0 && ret != -ENOSPC)
- goto out;
+ trans->allocating_chunk = false;
spin_lock(&space_info->lock);
+ if (ret < 0 && ret != -ENOSPC)
+ goto out;
if (ret)
space_info->full = 1;
else
ret = 1;
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+out:
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
-out:
mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
@@ -3653,13 +3736,31 @@ static int can_overcommit(struct btrfs_root *root,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
u64 profile = btrfs_get_alloc_profile(root, 0);
+ u64 rsv_size = 0;
u64 avail;
u64 used;
+ u64 to_add;
used = space_info->bytes_used + space_info->bytes_reserved +
- space_info->bytes_pinned + space_info->bytes_readonly +
- space_info->bytes_may_use;
+ space_info->bytes_pinned + space_info->bytes_readonly;
+
+ spin_lock(&global_rsv->lock);
+ rsv_size = global_rsv->size;
+ spin_unlock(&global_rsv->lock);
+
+ /*
+ * We only want to allow over committing if we have lots of actual space
+ * free, but if we don't have enough space to handle the global reserve
+ * space then we could end up having a real enospc problem when trying
+ * to allocate a chunk or some other such important allocation.
+ */
+ rsv_size <<= 1;
+ if (used + rsv_size >= space_info->total_bytes)
+ return 0;
+
+ used += space_info->bytes_may_use;
spin_lock(&root->fs_info->free_chunk_lock);
avail = root->fs_info->free_chunk_space;
@@ -3667,40 +3768,58 @@ static int can_overcommit(struct btrfs_root *root,
/*
* If we have dup, raid1 or raid10 then only half of the free
- * space is actually useable.
+ * space is actually useable. For raid56, the space info used
+ * doesn't include the parity drive, so we don't have to
+ * change the math
*/
if (profile & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
avail >>= 1;
+ to_add = space_info->total_bytes;
+
/*
* If we aren't flushing all things, let us overcommit up to
* 1/2th of the space. If we can flush, don't let us overcommit
* too much, let it overcommit up to 1/8 of the space.
*/
if (flush == BTRFS_RESERVE_FLUSH_ALL)
- avail >>= 3;
+ to_add >>= 3;
else
- avail >>= 1;
+ to_add >>= 1;
+
+ /*
+ * Limit the overcommit to the amount of free space we could possibly
+ * allocate for chunks.
+ */
+ to_add = min(avail, to_add);
- if (used + bytes < space_info->total_bytes + avail)
+ if (used + bytes < space_info->total_bytes + to_add)
return 1;
return 0;
}
-static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
- unsigned long nr_pages,
- enum wb_reason reason)
+void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+ unsigned long nr_pages)
{
- if (!writeback_in_progress(sb->s_bdi) &&
- down_read_trylock(&sb->s_umount)) {
- writeback_inodes_sb_nr(sb, nr_pages, reason);
- up_read(&sb->s_umount);
- return 1;
- }
+ struct super_block *sb = root->fs_info->sb;
+ int started;
- return 0;
+ /* If we can not start writeback, just sync all the delalloc file. */
+ started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
+ WB_REASON_FS_FREE_SPACE);
+ if (!started) {
+ /*
+ * We needn't worry the filesystem going from r/w to r/o though
+ * we don't acquire ->s_umount mutex, because the filesystem
+ * should guarantee the delalloc inodes list be empty after
+ * the filesystem is readonly(all dirty pages are written to
+ * the disk).
+ */
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0);
+ }
}
/*
@@ -3724,7 +3843,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
space_info = block_rsv->space_info;
smp_mb();
- delalloc_bytes = root->fs_info->delalloc_bytes;
+ delalloc_bytes = percpu_counter_sum_positive(
+ &root->fs_info->delalloc_bytes);
if (delalloc_bytes == 0) {
if (trans)
return;
@@ -3735,10 +3855,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim);
nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
- writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb,
- nr_pages,
- WB_REASON_FS_FREE_SPACE);
-
+ btrfs_writeback_inodes_sb_nr(root, nr_pages);
/*
* We need to wait for the async pages to actually start before
* we do anything.
@@ -3766,7 +3883,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
break;
}
smp_mb();
- delalloc_bytes = root->fs_info->delalloc_bytes;
+ delalloc_bytes = percpu_counter_sum_positive(
+ &root->fs_info->delalloc_bytes);
}
}
@@ -4030,6 +4148,15 @@ again:
goto again;
out:
+ if (ret == -ENOSPC &&
+ unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
+ struct btrfs_block_rsv *global_rsv =
+ &root->fs_info->global_block_rsv;
+
+ if (block_rsv != global_rsv &&
+ !block_rsv_use_bytes(global_rsv, orig_bytes))
+ ret = 0;
+ }
if (flushing) {
spin_lock(&space_info->lock);
space_info->flush = 0;
@@ -4416,19 +4543,60 @@ void btrfs_orphan_release_metadata(struct inode *inode)
btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
-int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_pending_snapshot *pending)
+/*
+ * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
+ * root: the root of the parent directory
+ * rsv: block reservation
+ * items: the number of items that we need do reservation
+ * qgroup_reserved: used to return the reserved size in qgroup
+ *
+ * This function is used to reserve the space for snapshot/subvolume
+ * creation and deletion. Those operations are different with the
+ * common file/directory operations, they change two fs/file trees
+ * and root tree, the number of items that the qgroup reserves is
+ * different with the free space reservation. So we can not use
+ * the space reseravtion mechanism in start_transaction().
+ */
+int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int items,
+ u64 *qgroup_reserved)
{
- struct btrfs_root *root = pending->root;
- struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
- struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
- /*
- * two for root back/forward refs, two for directory entries,
- * one for root of the snapshot and one for parent inode.
- */
- u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
- dst_rsv->space_info = src_rsv->space_info;
- return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
+ u64 num_bytes;
+ int ret;
+
+ if (root->fs_info->quota_enabled) {
+ /* One for parent inode, two for dir entries */
+ num_bytes = 3 * root->leafsize;
+ ret = btrfs_qgroup_reserve(root, num_bytes);
+ if (ret)
+ return ret;
+ } else {
+ num_bytes = 0;
+ }
+
+ *qgroup_reserved = num_bytes;
+
+ num_bytes = btrfs_calc_trans_metadata_size(root, items);
+ rsv->space_info = __find_space_info(root->fs_info,
+ BTRFS_BLOCK_GROUP_METADATA);
+ ret = btrfs_block_rsv_add(root, rsv, num_bytes,
+ BTRFS_RESERVE_FLUSH_ALL);
+ if (ret) {
+ if (*qgroup_reserved)
+ btrfs_qgroup_free(root, *qgroup_reserved);
+ }
+
+ return ret;
+}
+
+void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ u64 qgroup_reserved)
+{
+ btrfs_block_rsv_release(root, rsv, (u64)-1);
+ if (qgroup_reserved)
+ btrfs_qgroup_free(root, qgroup_reserved);
}
/**
@@ -4536,6 +4704,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
bool delalloc_lock = true;
+ u64 to_free = 0;
+ unsigned dropped;
/* If we are a free space inode we need to not flush since we will be in
* the middle of a transaction commit. We also don't need the delalloc
@@ -4579,54 +4749,19 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
- if (root->fs_info->quota_enabled)
+ if (root->fs_info->quota_enabled) {
ret = btrfs_qgroup_reserve(root, num_bytes +
nr_extents * root->leafsize);
+ if (ret)
+ goto out_fail;
+ }
- /*
- * ret != 0 here means the qgroup reservation failed, we go straight to
- * the shared error handling then.
- */
- if (ret == 0)
- ret = reserve_metadata_bytes(root, block_rsv,
- to_reserve, flush);
-
- if (ret) {
- u64 to_free = 0;
- unsigned dropped;
-
- spin_lock(&BTRFS_I(inode)->lock);
- dropped = drop_outstanding_extent(inode);
- /*
- * If the inodes csum_bytes is the same as the original
- * csum_bytes then we know we haven't raced with any free()ers
- * so we can just reduce our inodes csum bytes and carry on.
- * Otherwise we have to do the normal free thing to account for
- * the case that the free side didn't free up its reserve
- * because of this outstanding reservation.
- */
- if (BTRFS_I(inode)->csum_bytes == csum_bytes)
- calc_csum_metadata_size(inode, num_bytes, 0);
- else
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
- spin_unlock(&BTRFS_I(inode)->lock);
- if (dropped)
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
- if (to_free) {
- btrfs_block_rsv_release(root, block_rsv, to_free);
- trace_btrfs_space_reservation(root->fs_info,
- "delalloc",
- btrfs_ino(inode),
- to_free, 0);
- }
- if (root->fs_info->quota_enabled) {
+ ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
+ if (unlikely(ret)) {
+ if (root->fs_info->quota_enabled)
btrfs_qgroup_free(root, num_bytes +
nr_extents * root->leafsize);
- }
- if (delalloc_lock)
- mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
- return ret;
+ goto out_fail;
}
spin_lock(&BTRFS_I(inode)->lock);
@@ -4647,6 +4782,34 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
block_rsv_add_bytes(block_rsv, to_reserve, 1);
return 0;
+
+out_fail:
+ spin_lock(&BTRFS_I(inode)->lock);
+ dropped = drop_outstanding_extent(inode);
+ /*
+ * If the inodes csum_bytes is the same as the original
+ * csum_bytes then we know we haven't raced with any free()ers
+ * so we can just reduce our inodes csum bytes and carry on.
+ * Otherwise we have to do the normal free thing to account for
+ * the case that the free side didn't free up its reserve
+ * because of this outstanding reservation.
+ */
+ if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+ calc_csum_metadata_size(inode, num_bytes, 0);
+ else
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ spin_unlock(&BTRFS_I(inode)->lock);
+ if (dropped)
+ to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
+ if (to_free) {
+ btrfs_block_rsv_release(root, block_rsv, to_free);
+ trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ btrfs_ino(inode), to_free, 0);
+ }
+ if (delalloc_lock)
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+ return ret;
}
/**
@@ -4668,7 +4831,8 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
spin_lock(&BTRFS_I(inode)->lock);
dropped = drop_outstanding_extent(inode);
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ if (num_bytes)
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
spin_unlock(&BTRFS_I(inode)->lock);
if (dropped > 0)
to_free += btrfs_calc_trans_metadata_size(root, dropped);
@@ -4735,8 +4899,7 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
btrfs_free_reserved_data_space(inode, num_bytes);
}
-static int update_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+static int update_block_group(struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc)
{
struct btrfs_block_group_cache *cache = NULL;
@@ -4773,7 +4936,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && cache->cached == BTRFS_CACHE_NO)
- cache_block_group(cache, trans, NULL, 1);
+ cache_block_group(cache, 1);
byte_in_group = bytenr - cache->key.objectid;
WARN_ON(byte_in_group > cache->key.offset);
@@ -4823,6 +4986,13 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
struct btrfs_block_group_cache *cache;
u64 bytenr;
+ spin_lock(&root->fs_info->block_group_cache_lock);
+ bytenr = root->fs_info->first_logical_byte;
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+
+ if (bytenr < (u64)-1)
+ return bytenr;
+
cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
if (!cache)
return 0;
@@ -4873,8 +5043,7 @@ int btrfs_pin_extent(struct btrfs_root *root,
/*
* this function must be called within transaction
*/
-int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
u64 bytenr, u64 num_bytes)
{
struct btrfs_block_group_cache *cache;
@@ -4888,7 +5057,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
* to one because the slow code to read in the free extents does check
* the pinned extents.
*/
- cache_block_group(cache, trans, root, 1);
+ cache_block_group(cache, 1);
pin_down_extent(root, cache, bytenr, num_bytes, 0);
@@ -5285,7 +5454,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
}
- ret = update_block_group(trans, root, bytenr, num_bytes, 0);
+ ret = update_block_group(root, bytenr, num_bytes, 0);
if (ret) {
btrfs_abort_transaction(trans, extent_root, ret);
goto out;
@@ -5330,7 +5499,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (head->extent_op) {
if (!head->must_insert_reserved)
goto out;
- kfree(head->extent_op);
+ btrfs_free_delayed_extent_op(head->extent_op);
head->extent_op = NULL;
}
@@ -5453,10 +5622,11 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return ret;
}
-static u64 stripe_align(struct btrfs_root *root, u64 val)
+static u64 stripe_align(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache,
+ u64 val, u64 num_bytes)
{
- u64 mask = ((u64)root->stripesize - 1);
- u64 ret = (val + mask) & ~mask;
+ u64 ret = ALIGN(val, root->stripesize);
return ret;
}
@@ -5476,7 +5646,6 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
u64 num_bytes)
{
struct btrfs_caching_control *caching_ctl;
- DEFINE_WAIT(wait);
caching_ctl = get_caching_control(cache);
if (!caching_ctl)
@@ -5493,7 +5662,6 @@ static noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
{
struct btrfs_caching_control *caching_ctl;
- DEFINE_WAIT(wait);
caching_ctl = get_caching_control(cache);
if (!caching_ctl)
@@ -5507,20 +5675,20 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
int __get_raid_index(u64 flags)
{
- int index;
-
if (flags & BTRFS_BLOCK_GROUP_RAID10)
- index = 0;
+ return BTRFS_RAID_RAID10;
else if (flags & BTRFS_BLOCK_GROUP_RAID1)
- index = 1;
+ return BTRFS_RAID_RAID1;
else if (flags & BTRFS_BLOCK_GROUP_DUP)
- index = 2;
+ return BTRFS_RAID_DUP;
else if (flags & BTRFS_BLOCK_GROUP_RAID0)
- index = 3;
- else
- index = 4;
+ return BTRFS_RAID_RAID0;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID5)
+ return BTRFS_RAID_RAID5;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID6)
+ return BTRFS_RAID_RAID6;
- return index;
+ return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
}
static int get_block_group_index(struct btrfs_block_group_cache *cache)
@@ -5663,6 +5831,8 @@ search:
if (!block_group_bits(block_group, data)) {
u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID10;
/*
@@ -5678,8 +5848,7 @@ have_block_group:
cached = block_group_cache_done(block_group);
if (unlikely(!cached)) {
found_uncached_bg = true;
- ret = cache_block_group(block_group, trans,
- orig_root, 0);
+ ret = cache_block_group(block_group, 0);
BUG_ON(ret < 0);
ret = 0;
}
@@ -5692,6 +5861,7 @@ have_block_group:
* lets look there
*/
if (last_ptr) {
+ unsigned long aligned_cluster;
/*
* the refill lock keeps out other
* people trying to start a new cluster
@@ -5758,11 +5928,15 @@ refill_cluster:
goto unclustered_alloc;
}
+ aligned_cluster = max_t(unsigned long,
+ empty_cluster + empty_size,
+ block_group->full_stripe_len);
+
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr,
search_start, num_bytes,
- empty_cluster + empty_size);
+ aligned_cluster);
if (ret == 0) {
/*
* now pull our allocation out of this
@@ -5833,7 +6007,8 @@ unclustered_alloc:
goto loop;
}
checks:
- search_start = stripe_align(root, offset);
+ search_start = stripe_align(root, used_block_group,
+ offset, num_bytes);
/* move on to the next group */
if (search_start + num_bytes >
@@ -5984,7 +6159,7 @@ again:
if (ret == -ENOSPC) {
if (!final_tried) {
num_bytes = num_bytes >> 1;
- num_bytes = num_bytes & ~(root->sectorsize - 1);
+ num_bytes = round_down(num_bytes, root->sectorsize);
num_bytes = max(num_bytes, min_alloc_size);
if (num_bytes == min_alloc_size)
final_tried = true;
@@ -6108,7 +6283,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+ ret = update_block_group(root, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
printk(KERN_ERR "btrfs update block group failed for %llu "
"%llu\n", (unsigned long long)ins->objectid,
@@ -6172,7 +6347,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+ ret = update_block_group(root, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
printk(KERN_ERR "btrfs update block group failed for %llu "
"%llu\n", (unsigned long long)ins->objectid,
@@ -6215,7 +6390,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
u64 num_bytes = ins->offset;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
- cache_block_group(block_group, trans, NULL, 0);
+ cache_block_group(block_group, 0);
caching_ctl = get_caching_control(block_group);
if (!caching_ctl) {
@@ -6329,12 +6504,14 @@ use_block_rsv(struct btrfs_trans_handle *trans,
if (!ret)
return block_rsv;
if (ret && !block_rsv->failfast) {
- static DEFINE_RATELIMIT_STATE(_rs,
- DEFAULT_RATELIMIT_INTERVAL,
- /*DEFAULT_RATELIMIT_BURST*/ 2);
- if (__ratelimit(&_rs))
- WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
- ret);
+ if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL * 10,
+ /*DEFAULT_RATELIMIT_BURST*/ 1);
+ if (__ratelimit(&_rs))
+ WARN(1, KERN_DEBUG
+ "btrfs: block rsv returned %d\n", ret);
+ }
ret = reserve_metadata_bytes(root, block_rsv, blocksize,
BTRFS_RESERVE_NO_FLUSH);
if (!ret) {
@@ -6400,7 +6577,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
struct btrfs_delayed_extent_op *extent_op;
- extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+ extent_op = btrfs_alloc_delayed_extent_op();
BUG_ON(!extent_op); /* -ENOMEM */
if (key)
memcpy(&extent_op->key, key, sizeof(extent_op->key));
@@ -7203,6 +7380,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
root->fs_info->fs_devices->missing_devices;
stripped = BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
if (num_devices == 1) {
@@ -7481,16 +7659,16 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
index = get_block_group_index(block_group);
}
- if (index == 0) {
+ if (index == BTRFS_RAID_RAID10) {
dev_min = 4;
/* Divide by 2 */
min_free >>= 1;
- } else if (index == 1) {
+ } else if (index == BTRFS_RAID_RAID1) {
dev_min = 2;
- } else if (index == 2) {
+ } else if (index == BTRFS_RAID_DUP) {
/* Multiply by 2 */
min_free <<= 1;
- } else if (index == 3) {
+ } else if (index == BTRFS_RAID_RAID0) {
dev_min = fs_devices->rw_devices;
do_div(min_free, dev_min);
}
@@ -7651,11 +7829,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
space_info = list_entry(info->space_info.next,
struct btrfs_space_info,
list);
- if (space_info->bytes_pinned > 0 ||
- space_info->bytes_reserved > 0 ||
- space_info->bytes_may_use > 0) {
- WARN_ON(1);
- dump_space_info(space_info, 0, 0);
+ if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
+ if (space_info->bytes_pinned > 0 ||
+ space_info->bytes_reserved > 0 ||
+ space_info->bytes_may_use > 0) {
+ WARN_ON(1);
+ dump_space_info(space_info, 0, 0);
+ }
}
list_del(&space_info->list);
kfree(space_info);
@@ -7754,7 +7934,9 @@ int btrfs_read_block_groups(struct btrfs_root *root)
btrfs_release_path(path);
cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize;
-
+ cache->full_stripe_len = btrfs_full_stripe_len(root,
+ &root->fs_info->mapping_tree,
+ found_key.objectid);
btrfs_init_free_space_ctl(cache);
/*
@@ -7808,6 +7990,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
if (!(get_alloc_profile(root, space_info->flags) &
(BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_DUP)))
continue;
/*
@@ -7883,6 +8067,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info;
+ cache->full_stripe_len = btrfs_full_stripe_len(root,
+ &root->fs_info->mapping_tree,
+ chunk_offset);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
@@ -7932,12 +8119,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
+ write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits &= ~extra_flags;
+ write_sequnlock(&fs_info->profiles_lock);
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
@@ -8036,6 +8225,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
&root->fs_info->block_group_cache_tree);
+
+ if (root->fs_info->first_logical_byte == block_group->key.objectid)
+ root->fs_info->first_logical_byte = (u64)-1;
spin_unlock(&root->fs_info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
@@ -8158,7 +8350,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
if (end - start >= range->minlen) {
if (!block_group_cache_done(cache)) {
- ret = cache_block_group(cache, NULL, root, 0);
+ ret = cache_block_group(cache, 0);
if (!ret)
wait_block_group_cache_done(cache);
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1b319df29eee..f173c5af6461 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4,7 +4,6 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/page-flags.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
@@ -1834,7 +1833,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
*/
static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
{
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
SetPageUptodate(page);
@@ -1846,7 +1845,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
*/
static void check_page_locked(struct extent_io_tree *tree, struct page *page)
{
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
unlock_page(page);
@@ -1895,13 +1894,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
if (ret)
err = ret;
- if (did_repair) {
- ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
- rec->start + rec->len - 1,
- EXTENT_DAMAGED, GFP_NOFS);
- if (ret && !err)
- err = ret;
- }
+ ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
+ rec->start + rec->len - 1,
+ EXTENT_DAMAGED, GFP_NOFS);
+ if (ret && !err)
+ err = ret;
kfree(rec);
return err;
@@ -1932,10 +1929,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
u64 map_length = 0;
u64 sector;
struct btrfs_bio *bbio = NULL;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
int ret;
BUG_ON(!mirror_num);
+ /* we can't repair anything in raid56 yet */
+ if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
+ return 0;
+
bio = bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
@@ -1960,7 +1962,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
return -EIO;
}
bio->bi_bdev = dev->bdev;
- bio_add_page(bio, page, length, start-page_offset(page));
+ bio_add_page(bio, page, length, start - page_offset(page));
btrfsic_submit_bio(WRITE_SYNC, bio);
wait_for_completion(&compl);
@@ -2052,6 +2054,7 @@ static int clean_io_failure(u64 start, struct page *page)
failrec->failed_mirror);
did_repair = !ret;
}
+ ret = 0;
}
out:
@@ -2293,8 +2296,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
struct page *page = bvec->bv_page;
tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
+ start = page_offset(page) + bvec->bv_offset;
end = start + bvec->bv_len - 1;
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2353,8 +2355,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
(long int)bio->bi_bdev);
tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
+ start = page_offset(page) + bvec->bv_offset;
end = start + bvec->bv_len - 1;
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2471,7 +2472,7 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
struct extent_io_tree *tree = bio->bi_private;
u64 start;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
+ start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
@@ -2489,13 +2490,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
return ret;
}
-static int merge_bio(struct extent_io_tree *tree, struct page *page,
+static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
unsigned long offset, size_t size, struct bio *bio,
unsigned long bio_flags)
{
int ret = 0;
if (tree->ops && tree->ops->merge_bio_hook)
- ret = tree->ops->merge_bio_hook(page, offset, size, bio,
+ ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
bio_flags);
BUG_ON(ret < 0);
return ret;
@@ -2530,7 +2531,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
sector;
if (prev_bio_flags != bio_flags || !contig ||
- merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
+ merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
bio_add_page(bio, page, page_size, offset) < page_size) {
ret = submit_one_bio(rw, bio, mirror_num,
prev_bio_flags);
@@ -2595,7 +2596,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
unsigned long *bio_flags)
{
struct inode *inode = page->mapping->host;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 page_end = start + PAGE_CACHE_SIZE - 1;
u64 end;
u64 cur = start;
@@ -2648,6 +2649,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
}
}
while (cur <= end) {
+ unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+
if (cur >= last_byte) {
char *userpage;
struct extent_state *cached = NULL;
@@ -2682,7 +2685,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
iosize = min(extent_map_end(em) - cur, end - cur + 1);
cur_end = min(extent_map_end(em) - 1, end);
- iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+ iosize = ALIGN(iosize, blocksize);
if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
disk_io_size = em->block_len;
sector = em->block_start >> 9;
@@ -2735,26 +2738,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
continue;
}
- ret = 0;
- if (tree->ops && tree->ops->readpage_io_hook) {
- ret = tree->ops->readpage_io_hook(page, cur,
- cur + iosize - 1);
- }
- if (!ret) {
- unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
- pnr -= page->index;
- ret = submit_extent_page(READ, tree, page,
+ pnr -= page->index;
+ ret = submit_extent_page(READ, tree, page,
sector, disk_io_size, pg_offset,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
*bio_flags,
this_bio_flag);
- if (!ret) {
- nr++;
- *bio_flags = this_bio_flag;
- }
- }
- if (ret) {
+ if (!ret) {
+ nr++;
+ *bio_flags = this_bio_flag;
+ } else {
SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1);
}
@@ -2806,7 +2800,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct inode *inode = page->mapping->host;
struct extent_page_data *epd = data;
struct extent_io_tree *tree = epd->tree;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 delalloc_start;
u64 page_end = start + PAGE_CACHE_SIZE - 1;
u64 end;
@@ -2982,7 +2976,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
BUG_ON(extent_map_end(em) <= cur);
BUG_ON(end < cur);
iosize = min(extent_map_end(em) - cur, end - cur + 1);
- iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+ iosize = ALIGN(iosize, blocksize);
sector = (em->block_start + extent_offset) >> 9;
bdev = em->bdev;
block_start = em->block_start;
@@ -3124,12 +3118,9 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
spin_unlock(&eb->refs_lock);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
- spin_lock(&fs_info->delalloc_lock);
- if (fs_info->dirty_metadata_bytes >= eb->len)
- fs_info->dirty_metadata_bytes -= eb->len;
- else
- WARN_ON(1);
- spin_unlock(&fs_info->delalloc_lock);
+ __percpu_counter_add(&fs_info->dirty_metadata_bytes,
+ -eb->len,
+ fs_info->dirty_metadata_batch);
ret = 1;
} else {
spin_unlock(&eb->refs_lock);
@@ -3446,15 +3437,9 @@ retry:
* swizzled back from swapper_space to tmpfs file
* mapping
*/
- if (tree->ops &&
- tree->ops->write_cache_pages_lock_hook) {
- tree->ops->write_cache_pages_lock_hook(page,
- data, flush_fn);
- } else {
- if (!trylock_page(page)) {
- flush_fn(data);
- lock_page(page);
- }
+ if (!trylock_page(page)) {
+ flush_fn(data);
+ lock_page(page);
}
if (unlikely(page->mapping != mapping)) {
@@ -3674,11 +3659,11 @@ int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset)
{
struct extent_state *cached_state = NULL;
- u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
- start += (offset + blocksize - 1) & ~(blocksize - 1);
+ start += ALIGN(offset, blocksize);
if (start > end)
return 0;
@@ -3700,7 +3685,7 @@ int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask)
{
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
int ret = 1;
@@ -3739,7 +3724,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
gfp_t mask)
{
struct extent_map *em;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
if ((mask & __GFP_WAIT) &&
@@ -3797,7 +3782,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
len = last - offset;
if (len == 0)
break;
- len = (len + sectorsize - 1) & ~(sectorsize - 1);
+ len = ALIGN(len, sectorsize);
em = get_extent(inode, NULL, 0, offset, len, 0);
if (IS_ERR_OR_NULL(em))
return em;
@@ -3995,8 +3980,6 @@ static void __free_extent_buffer(struct extent_buffer *eb)
list_del(&eb->leak_list);
spin_unlock_irqrestore(&leak_lock, flags);
#endif
- if (eb->pages && eb->pages != eb->inline_pages)
- kfree(eb->pages);
kmem_cache_free(extent_buffer_cache, eb);
}
@@ -4037,19 +4020,12 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
atomic_set(&eb->refs, 1);
atomic_set(&eb->io_pages, 0);
- if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
- struct page **pages;
- int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- pages = kzalloc(num_pages, mask);
- if (!pages) {
- __free_extent_buffer(eb);
- return NULL;
- }
- eb->pages = pages;
- } else {
- eb->pages = eb->inline_pages;
- }
+ /*
+ * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
+ */
+ BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
+ > MAX_INLINE_EXTENT_BUFFER_SIZE);
+ BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
return eb;
}
@@ -4180,6 +4156,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
+ int refs;
/* the ref bit is tricky. We have to make sure it is set
* if we have the buffer dirty. Otherwise the
* code to free a buffer can end up dropping a dirty
@@ -4200,6 +4177,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
* So bump the ref count first, then set the bit. If someone
* beat us to it, drop the ref we added.
*/
+ refs = atomic_read(&eb->refs);
+ if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+ return;
+
spin_lock(&eb->refs_lock);
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
atomic_inc(&eb->refs);
@@ -4401,9 +4382,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
void free_extent_buffer(struct extent_buffer *eb)
{
+ int refs;
+ int old;
if (!eb)
return;
+ while (1) {
+ refs = atomic_read(&eb->refs);
+ if (refs <= 3)
+ break;
+ old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
+ if (old == refs)
+ return;
+ }
+
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 2eacfabd3263..6068a1985560 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -72,10 +72,9 @@ struct extent_io_ops {
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
extent_submit_bio_hook_t *submit_bio_hook;
- int (*merge_bio_hook)(struct page *page, unsigned long offset,
+ int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags);
- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror);
@@ -90,8 +89,6 @@ struct extent_io_ops {
struct extent_state *other);
void (*split_extent_hook)(struct inode *inode,
struct extent_state *orig, u64 split);
- int (*write_cache_pages_lock_hook)(struct page *page, void *data,
- void (*flush_fn)(void *));
};
struct extent_io_tree {
@@ -161,8 +158,7 @@ struct extent_buffer {
*/
wait_queue_head_t read_lock_wq;
wait_queue_head_t lock_wq;
- struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
- struct page **pages;
+ struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
};
static inline void extent_set_compress_type(unsigned long *bio_flags,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index fdb7a8db3b57..2834ca5768ea 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1,6 +1,5 @@
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include "ctree.h"
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 94aa53b38721..ec160202be3e 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -684,6 +684,24 @@ out:
return ret;
}
+static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
+ struct btrfs_sector_sum *sector_sum,
+ u64 total_bytes, u64 sectorsize)
+{
+ u64 tmp = sectorsize;
+ u64 next_sector = sector_sum->bytenr;
+ struct btrfs_sector_sum *next = sector_sum + 1;
+
+ while ((tmp + total_bytes) < sums->len) {
+ if (next_sector + sectorsize != next->bytenr)
+ break;
+ tmp += sectorsize;
+ next_sector = next->bytenr;
+ next++;
+ }
+ return tmp;
+}
+
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums)
@@ -789,20 +807,32 @@ again:
goto insert;
}
- if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
+ if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
csum_size) {
- u32 diff = (csum_offset + 1) * csum_size;
+ int extend_nr;
+ u64 tmp;
+ u32 diff;
+ u32 free_space;
- /*
- * is the item big enough already? we dropped our lock
- * before and need to recheck
- */
- if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
- goto csum;
+ if (btrfs_leaf_free_space(root, leaf) <
+ sizeof(struct btrfs_item) + csum_size * 2)
+ goto insert;
+
+ free_space = btrfs_leaf_free_space(root, leaf) -
+ sizeof(struct btrfs_item) - csum_size;
+ tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
+ root->sectorsize);
+ tmp >>= root->fs_info->sb->s_blocksize_bits;
+ WARN_ON(tmp < 1);
+
+ extend_nr = max_t(int, 1, (int)tmp);
+ diff = (csum_offset + extend_nr) * csum_size;
+ diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
- if (diff != csum_size)
- goto insert;
+ diff = min(free_space, diff);
+ diff /= csum_size;
+ diff *= csum_size;
btrfs_extend_item(trans, root, path, diff);
goto csum;
@@ -812,19 +842,14 @@ insert:
btrfs_release_path(path);
csum_offset = 0;
if (found_next) {
- u64 tmp = total_bytes + root->sectorsize;
- u64 next_sector = sector_sum->bytenr;
- struct btrfs_sector_sum *next = sector_sum + 1;
+ u64 tmp;
- while (tmp < sums->len) {
- if (next_sector + root->sectorsize != next->bytenr)
- break;
- tmp += root->sectorsize;
- next_sector = next->bytenr;
- next++;
- }
- tmp = min(tmp, next_offset - file_key.offset);
+ tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
+ root->sectorsize);
tmp >>= root->fs_info->sb->s_blocksize_bits;
+ tmp = min(tmp, (next_offset - file_key.offset) >>
+ root->fs_info->sb->s_blocksize_bits);
+
tmp = max((u64)1, tmp);
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
ins_size = csum_size * tmp;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index aeb84469d2c4..af1d0605a5c1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -30,11 +30,11 @@
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/slab.h>
+#include <linux/btrfs.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "tree-log.h"
#include "locking.h"
@@ -374,6 +374,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
atomic_inc(&fs_info->defrag_running);
while(1) {
+ /* Pause the auto defragger. */
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING,
+ &fs_info->fs_state))
+ break;
+
if (!__need_auto_defrag(fs_info->tree_root))
break;
@@ -505,8 +510,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1);
- num_bytes = (write_bytes + pos - start_pos +
- root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+ num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -1225,7 +1229,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
struct extent_state *cached_state = NULL;
int i;
unsigned long index = pos >> PAGE_CACHE_SHIFT;
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int err = 0;
int faili = 0;
@@ -1312,7 +1316,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct iov_iter *i,
loff_t pos)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
unsigned long first_index;
@@ -1500,7 +1504,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
loff_t *ppos = &iocb->ki_pos;
u64 start_pos;
@@ -1544,7 +1548,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
* although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency.
*/
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
mutex_unlock(&inode->i_mutex);
err = -EROFS;
goto out;
@@ -1627,7 +1631,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
*/
if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
&BTRFS_I(inode)->runtime_flags)) {
- btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ /*
+ * We need to block on a committing transaction to keep us from
+ * throwing a ordered operation on to the list and causing
+ * something like sync to deadlock trying to flush out this
+ * inode.
+ */
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
+ btrfs_end_transaction(trans, root);
if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(inode->i_mapping);
}
@@ -1654,16 +1671,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
struct btrfs_trans_handle *trans;
+ bool full_sync = 0;
trace_btrfs_sync_file(file, datasync);
/*
* We write the dirty pages in the range and wait until they complete
* out of the ->i_mutex. If so, we can flush the dirty pages by
- * multi-task, and make the performance up.
+ * multi-task, and make the performance up. See
+ * btrfs_wait_ordered_range for an explanation of the ASYNC check.
*/
atomic_inc(&BTRFS_I(inode)->sync_writers);
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
atomic_dec(&BTRFS_I(inode)->sync_writers);
if (ret)
return ret;
@@ -1675,7 +1697,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* range being left.
*/
atomic_inc(&root->log_batch);
- btrfs_wait_ordered_range(inode, start, end - start + 1);
+ full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+ if (full_sync)
+ btrfs_wait_ordered_range(inode, start, end - start + 1);
atomic_inc(&root->log_batch);
/*
@@ -1742,13 +1767,25 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret != BTRFS_NO_LOG_SYNC) {
if (ret > 0) {
+ /*
+ * If we didn't already wait for ordered extents we need
+ * to do that now.
+ */
+ if (!full_sync)
+ btrfs_wait_ordered_range(inode, start,
+ end - start + 1);
ret = btrfs_commit_transaction(trans, root);
} else {
ret = btrfs_sync_log(trans, root);
- if (ret == 0)
+ if (ret == 0) {
ret = btrfs_end_transaction(trans, root);
- else
+ } else {
+ if (!full_sync)
+ btrfs_wait_ordered_range(inode, start,
+ end -
+ start + 1);
ret = btrfs_commit_transaction(trans, root);
+ }
}
} else {
ret = btrfs_end_transaction(trans, root);
@@ -2102,7 +2139,7 @@ out:
static long btrfs_fallocate(struct file *file, int mode,
loff_t offset, loff_t len)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct extent_state *cached_state = NULL;
u64 cur_offset;
u64 last_byte;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0be7a8742a43..1f84fc09c1a8 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
+ max_bitmaps = max(max_bitmaps, 1);
+
BUG_ON(ctl->total_bitmaps > max_bitmaps);
/*
@@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
}
static struct btrfs_free_space *
-find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+ unsigned long align)
{
struct btrfs_free_space *entry;
struct rb_node *node;
+ u64 ctl_off;
+ u64 tmp;
+ u64 align_off;
int ret;
if (!ctl->free_space_offset.rb_node)
@@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
if (entry->bytes < *bytes)
continue;
+ /* make sure the space returned is big enough
+ * to match our requested alignment
+ */
+ if (*bytes >= align) {
+ ctl_off = entry->offset - ctl->start;
+ tmp = ctl_off + align - 1;;
+ do_div(tmp, align);
+ tmp = tmp * align + ctl->start;
+ align_off = tmp - entry->offset;
+ } else {
+ align_off = 0;
+ tmp = entry->offset;
+ }
+
+ if (entry->bytes < *bytes + align_off)
+ continue;
+
if (entry->bitmap) {
- ret = search_bitmap(ctl, entry, offset, bytes);
- if (!ret)
+ ret = search_bitmap(ctl, entry, &tmp, bytes);
+ if (!ret) {
+ *offset = tmp;
return entry;
+ }
continue;
}
- *offset = entry->offset;
- *bytes = entry->bytes;
+ *offset = tmp;
+ *bytes = entry->bytes - align_off;
return entry;
}
@@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
}
/*
- * some block groups are so tiny they can't be enveloped by a bitmap, so
- * don't even bother to create a bitmap for this
+ * The original block groups from mkfs can be really small, like 8
+ * megabytes, so don't bother with a bitmap for those entries. However
+ * some block groups can be smaller than what a bitmap would cover but
+ * are still large enough that they could overflow the 32k memory limit,
+ * so allow those block groups to still be allowed to have a bitmap
+ * entry.
*/
- if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
+ if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
return false;
return true;
@@ -2095,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *entry = NULL;
u64 bytes_search = bytes + empty_size;
u64 ret = 0;
+ u64 align_gap = 0;
+ u64 align_gap_len = 0;
spin_lock(&ctl->tree_lock);
- entry = find_free_space(ctl, &offset, &bytes_search);
+ entry = find_free_space(ctl, &offset, &bytes_search,
+ block_group->full_stripe_len);
if (!entry)
goto out;
@@ -2107,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
if (!entry->bytes)
free_bitmap(ctl, entry);
} else {
+
unlink_free_space(ctl, entry);
- entry->offset += bytes;
- entry->bytes -= bytes;
+ align_gap_len = offset - entry->offset;
+ align_gap = entry->offset;
+
+ entry->offset = offset + bytes;
+ WARN_ON(entry->bytes < bytes + align_gap_len);
+
+ entry->bytes -= bytes + align_gap_len;
if (!entry->bytes)
kmem_cache_free(btrfs_free_space_cachep, entry);
else
@@ -2119,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
out:
spin_unlock(&ctl->tree_lock);
+ if (align_gap_len)
+ __btrfs_add_free_space(ctl, align_gap, align_gap_len);
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index cc93b23ca352..c226daefd65d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -39,12 +39,13 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/mount.h>
+#include <linux/btrfs.h>
+#include <linux/blkdev.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "ordered-data.h"
#include "xattr.h"
@@ -54,6 +55,7 @@
#include "locking.h"
#include "free-space-cache.h"
#include "inode-map.h"
+#include "backref.h"
struct btrfs_iget_args {
u64 ino;
@@ -231,8 +233,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
u64 isize = i_size_read(inode);
u64 actual_end = min(end + 1, isize);
u64 inline_len = actual_end - start;
- u64 aligned_end = (end + root->sectorsize - 1) &
- ~((u64)root->sectorsize - 1);
+ u64 aligned_end = ALIGN(end, root->sectorsize);
u64 data_len = inline_len;
int ret;
@@ -265,6 +266,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
return 1;
}
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0;
@@ -389,7 +391,7 @@ again:
* a compressed extent to 128k.
*/
total_compressed = min(total_compressed, max_uncompressed);
- num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+ num_bytes = ALIGN(end - start + 1, blocksize);
num_bytes = max(blocksize, num_bytes);
total_in = 0;
ret = 0;
@@ -488,15 +490,13 @@ cont:
* up to a block size boundary so the allocator does sane
* things
*/
- total_compressed = (total_compressed + blocksize - 1) &
- ~(blocksize - 1);
+ total_compressed = ALIGN(total_compressed, blocksize);
/*
* one last check to make sure the compression is really a
* win, compare the page count read with the blocks on disk
*/
- total_in = (total_in + PAGE_CACHE_SIZE - 1) &
- ~(PAGE_CACHE_SIZE - 1);
+ total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
if (total_compressed >= total_in) {
will_compress = 0;
} else {
@@ -608,7 +608,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
if (list_empty(&async_cow->extents))
return 0;
-
+again:
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
struct async_extent, list);
@@ -648,6 +648,8 @@ retry:
async_extent->ram_size - 1,
btrfs_get_extent,
WB_SYNC_ALL);
+ else if (ret)
+ unlock_page(async_cow->locked_page);
kfree(async_extent);
cond_resched();
continue;
@@ -672,6 +674,7 @@ retry:
if (ret) {
int i;
+
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
page_cache_release(async_extent->pages[i]);
@@ -679,12 +682,10 @@ retry:
kfree(async_extent->pages);
async_extent->nr_pages = 0;
async_extent->pages = NULL;
- unlock_extent(io_tree, async_extent->start,
- async_extent->start +
- async_extent->ram_size - 1);
+
if (ret == -ENOSPC)
goto retry;
- goto out_free; /* JDM: Requeue? */
+ goto out_free;
}
/*
@@ -696,10 +697,13 @@ retry:
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
- BUG_ON(!em); /* -ENOMEM */
+ if (!em)
+ goto out_free_reserve;
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
+ em->mod_start = em->start;
+ em->mod_len = em->len;
em->block_start = ins.objectid;
em->block_len = ins.offset;
@@ -726,6 +730,9 @@ retry:
async_extent->ram_size - 1, 0);
}
+ if (ret)
+ goto out_free_reserve;
+
ret = btrfs_add_ordered_extent_compress(inode,
async_extent->start,
ins.objectid,
@@ -733,7 +740,8 @@ retry:
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ goto out_free_reserve;
/*
* clear dirty, set writeback and unlock the pages.
@@ -754,18 +762,30 @@ retry:
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages);
-
- BUG_ON(ret); /* -ENOMEM */
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
+ if (ret)
+ goto out;
cond_resched();
}
ret = 0;
out:
return ret;
+out_free_reserve:
+ btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
out_free:
+ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+ async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1,
+ NULL, EXTENT_CLEAR_UNLOCK_PAGE |
+ EXTENT_CLEAR_UNLOCK |
+ EXTENT_CLEAR_DELALLOC |
+ EXTENT_CLEAR_DIRTY |
+ EXTENT_SET_WRITEBACK |
+ EXTENT_END_WRITEBACK);
kfree(async_extent);
- goto out;
+ goto again;
}
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
@@ -834,7 +854,7 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
BUG_ON(btrfs_is_free_space_inode(inode));
- num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+ num_bytes = ALIGN(end - start + 1, blocksize);
num_bytes = max(blocksize, num_bytes);
disk_num_bytes = num_bytes;
@@ -892,6 +912,8 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
em->orig_start = em->start;
ram_size = ins.offset;
em->len = ins.offset;
+ em->mod_start = em->start;
+ em->mod_len = em->len;
em->block_start = ins.objectid;
em->block_len = ins.offset;
@@ -1338,6 +1360,8 @@ out_check:
em->block_start = disk_bytenr;
em->orig_block_len = disk_num_bytes;
em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->mod_start = em->start;
+ em->mod_len = em->len;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_FILLING, &em->flags);
em->generation = -1;
@@ -1508,14 +1532,22 @@ static void btrfs_set_bit_hook(struct inode *inode,
spin_unlock(&BTRFS_I(inode)->lock);
}
- spin_lock(&root->fs_info->delalloc_lock);
+ __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
+ root->fs_info->delalloc_batch);
+ spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes += len;
- root->fs_info->delalloc_bytes += len;
- if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
- list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
- &root->fs_info->delalloc_inodes);
+ if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags)) {
+ spin_lock(&root->fs_info->delalloc_lock);
+ if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+ list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
+ &root->fs_info->delalloc_inodes);
+ set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags);
+ }
+ spin_unlock(&root->fs_info->delalloc_lock);
}
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_unlock(&BTRFS_I(inode)->lock);
}
}
@@ -1550,15 +1582,22 @@ static void btrfs_clear_bit_hook(struct inode *inode,
&& do_list)
btrfs_free_reserved_data_space(inode, len);
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->delalloc_bytes -= len;
+ __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
+ root->fs_info->delalloc_batch);
+ spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes -= len;
-
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
- !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
- list_del_init(&BTRFS_I(inode)->delalloc_inodes);
+ test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags)) {
+ spin_lock(&root->fs_info->delalloc_lock);
+ if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+ list_del_init(&BTRFS_I(inode)->delalloc_inodes);
+ clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags);
+ }
+ spin_unlock(&root->fs_info->delalloc_lock);
}
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_unlock(&BTRFS_I(inode)->lock);
}
}
@@ -1566,7 +1605,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
*/
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
+int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
@@ -1581,7 +1620,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
length = bio->bi_size;
map_length = length;
- ret = btrfs_map_block(root->fs_info, READ, logical,
+ ret = btrfs_map_block(root->fs_info, rw, logical,
&map_length, NULL, 0);
/* Will always return 0 with map_multi == NULL */
BUG_ON(ret < 0);
@@ -1892,6 +1931,640 @@ out:
return ret;
}
+/* snapshot-aware defrag */
+struct sa_defrag_extent_backref {
+ struct rb_node node;
+ struct old_sa_defrag_extent *old;
+ u64 root_id;
+ u64 inum;
+ u64 file_pos;
+ u64 extent_offset;
+ u64 num_bytes;
+ u64 generation;
+};
+
+struct old_sa_defrag_extent {
+ struct list_head list;
+ struct new_sa_defrag_extent *new;
+
+ u64 extent_offset;
+ u64 bytenr;
+ u64 offset;
+ u64 len;
+ int count;
+};
+
+struct new_sa_defrag_extent {
+ struct rb_root root;
+ struct list_head head;
+ struct btrfs_path *path;
+ struct inode *inode;
+ u64 file_pos;
+ u64 len;
+ u64 bytenr;
+ u64 disk_len;
+ u8 compress_type;
+};
+
+static int backref_comp(struct sa_defrag_extent_backref *b1,
+ struct sa_defrag_extent_backref *b2)
+{
+ if (b1->root_id < b2->root_id)
+ return -1;
+ else if (b1->root_id > b2->root_id)
+ return 1;
+
+ if (b1->inum < b2->inum)
+ return -1;
+ else if (b1->inum > b2->inum)
+ return 1;
+
+ if (b1->file_pos < b2->file_pos)
+ return -1;
+ else if (b1->file_pos > b2->file_pos)
+ return 1;
+
+ /*
+ * [------------------------------] ===> (a range of space)
+ * |<--->| |<---->| =============> (fs/file tree A)
+ * |<---------------------------->| ===> (fs/file tree B)
+ *
+ * A range of space can refer to two file extents in one tree while
+ * refer to only one file extent in another tree.
+ *
+ * So we may process a disk offset more than one time(two extents in A)
+ * and locate at the same extent(one extent in B), then insert two same
+ * backrefs(both refer to the extent in B).
+ */
+ return 0;
+}
+
+static void backref_insert(struct rb_root *root,
+ struct sa_defrag_extent_backref *backref)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct sa_defrag_extent_backref *entry;
+ int ret;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
+
+ ret = backref_comp(backref, entry);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&backref->node, parent, p);
+ rb_insert_color(&backref->node, root);
+}
+
+/*
+ * Note the backref might has changed, and in this case we just return 0.
+ */
+static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
+ void *ctx)
+{
+ struct btrfs_file_extent_item *extent;
+ struct btrfs_fs_info *fs_info;
+ struct old_sa_defrag_extent *old = ctx;
+ struct new_sa_defrag_extent *new = old->new;
+ struct btrfs_path *path = new->path;
+ struct btrfs_key key;
+ struct btrfs_root *root;
+ struct sa_defrag_extent_backref *backref;
+ struct extent_buffer *leaf;
+ struct inode *inode = new->inode;
+ int slot;
+ int ret;
+ u64 extent_offset;
+ u64 num_bytes;
+
+ if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
+ inum == btrfs_ino(inode))
+ return 0;
+
+ key.objectid = root_id;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ fs_info = BTRFS_I(inode)->root->fs_info;
+ root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(root)) {
+ if (PTR_ERR(root) == -ENOENT)
+ return 0;
+ WARN_ON(1);
+ pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
+ inum, offset, root_id);
+ return PTR_ERR(root);
+ }
+
+ key.objectid = inum;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ if (offset > (u64)-1 << 32)
+ key.offset = 0;
+ else
+ key.offset = offset;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ WARN_ON(1);
+ return ret;
+ }
+
+ while (1) {
+ cond_resched();
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ goto out;
+ } else if (ret > 0) {
+ ret = 0;
+ goto out;
+ }
+ continue;
+ }
+
+ path->slots[0]++;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
+ if (key.objectid > inum)
+ goto out;
+
+ if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
+ continue;
+
+ extent = btrfs_item_ptr(leaf, slot,
+ struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
+ continue;
+
+ extent_offset = btrfs_file_extent_offset(leaf, extent);
+ if (key.offset - extent_offset != offset)
+ continue;
+
+ num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
+ if (extent_offset >= old->extent_offset + old->offset +
+ old->len || extent_offset + num_bytes <=
+ old->extent_offset + old->offset)
+ continue;
+
+ break;
+ }
+
+ backref = kmalloc(sizeof(*backref), GFP_NOFS);
+ if (!backref) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ backref->root_id = root_id;
+ backref->inum = inum;
+ backref->file_pos = offset + extent_offset;
+ backref->num_bytes = num_bytes;
+ backref->extent_offset = extent_offset;
+ backref->generation = btrfs_file_extent_generation(leaf, extent);
+ backref->old = old;
+ backref_insert(&new->root, backref);
+ old->count++;
+out:
+ btrfs_release_path(path);
+ WARN_ON(ret);
+ return ret;
+}
+
+static noinline bool record_extent_backrefs(struct btrfs_path *path,
+ struct new_sa_defrag_extent *new)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
+ struct old_sa_defrag_extent *old, *tmp;
+ int ret;
+
+ new->path = path;
+
+ list_for_each_entry_safe(old, tmp, &new->head, list) {
+ ret = iterate_inodes_from_logical(old->bytenr, fs_info,
+ path, record_one_backref,
+ old);
+ BUG_ON(ret < 0 && ret != -ENOENT);
+
+ /* no backref to be processed for this extent */
+ if (!old->count) {
+ list_del(&old->list);
+ kfree(old);
+ }
+ }
+
+ if (list_empty(&new->head))
+ return false;
+
+ return true;
+}
+
+static int relink_is_mergable(struct extent_buffer *leaf,
+ struct btrfs_file_extent_item *fi,
+ u64 disk_bytenr)
+{
+ if (btrfs_file_extent_disk_bytenr(leaf, fi) != disk_bytenr)
+ return 0;
+
+ if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
+ return 0;
+
+ if (btrfs_file_extent_compression(leaf, fi) ||
+ btrfs_file_extent_encryption(leaf, fi) ||
+ btrfs_file_extent_other_encoding(leaf, fi))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Note the backref might has changed, and in this case we just return 0.
+ */
+static noinline int relink_extent_backref(struct btrfs_path *path,
+ struct sa_defrag_extent_backref *prev,
+ struct sa_defrag_extent_backref *backref)
+{
+ struct btrfs_file_extent_item *extent;
+ struct btrfs_file_extent_item *item;
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_root *root;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct old_sa_defrag_extent *old = backref->old;
+ struct new_sa_defrag_extent *new = old->new;
+ struct inode *src_inode = new->inode;
+ struct inode *inode;
+ struct extent_state *cached = NULL;
+ int ret = 0;
+ u64 start;
+ u64 len;
+ u64 lock_start;
+ u64 lock_end;
+ bool merge = false;
+ int index;
+
+ if (prev && prev->root_id == backref->root_id &&
+ prev->inum == backref->inum &&
+ prev->file_pos + prev->num_bytes == backref->file_pos)
+ merge = true;
+
+ /* step 1: get root */
+ key.objectid = backref->root_id;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ fs_info = BTRFS_I(src_inode)->root->fs_info;
+ index = srcu_read_lock(&fs_info->subvol_srcu);
+
+ root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(root)) {
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+ if (PTR_ERR(root) == -ENOENT)
+ return 0;
+ return PTR_ERR(root);
+ }
+ if (btrfs_root_refs(&root->root_item) == 0) {
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+ /* parse ENOENT to 0 */
+ return 0;
+ }
+
+ /* step 2: get inode */
+ key.objectid = backref->inum;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ if (IS_ERR(inode)) {
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+ return 0;
+ }
+
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+
+ /* step 3: relink backref */
+ lock_start = backref->file_pos;
+ lock_end = backref->file_pos + backref->num_bytes - 1;
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
+ 0, &cached);
+
+ ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
+ if (ordered) {
+ btrfs_put_ordered_extent(ordered);
+ goto out_unlock;
+ }
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_unlock;
+ }
+
+ key.objectid = backref->inum;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = backref->file_pos;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ goto out_free_path;
+ } else if (ret > 0) {
+ ret = 0;
+ goto out_free_path;
+ }
+
+ extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_generation(path->nodes[0], extent) !=
+ backref->generation)
+ goto out_free_path;
+
+ btrfs_release_path(path);
+
+ start = backref->file_pos;
+ if (backref->extent_offset < old->extent_offset + old->offset)
+ start += old->extent_offset + old->offset -
+ backref->extent_offset;
+
+ len = min(backref->extent_offset + backref->num_bytes,
+ old->extent_offset + old->offset + old->len);
+ len -= max(backref->extent_offset, old->extent_offset + old->offset);
+
+ ret = btrfs_drop_extents(trans, root, inode, start,
+ start + len, 1);
+ if (ret)
+ goto out_free_path;
+again:
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+ if (merge) {
+ struct btrfs_file_extent_item *fi;
+ u64 extent_len;
+ struct btrfs_key found_key;
+
+ ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
+ if (ret < 0)
+ goto out_free_path;
+
+ path->slots[0]--;
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_len = btrfs_file_extent_num_bytes(leaf, fi);
+
+ if (relink_is_mergable(leaf, fi, new->bytenr) &&
+ extent_len + found_key.offset == start) {
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_len + len);
+ btrfs_mark_buffer_dirty(leaf);
+ inode_add_bytes(inode, len);
+
+ ret = 1;
+ goto out_free_path;
+ } else {
+ merge = false;
+ btrfs_release_path(path);
+ goto again;
+ }
+ }
+
+ ret = btrfs_insert_empty_item(trans, root, path, &key,
+ sizeof(*extent));
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_free_path;
+ }
+
+ leaf = path->nodes[0];
+ item = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
+ btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
+ btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
+ btrfs_set_file_extent_num_bytes(leaf, item, len);
+ btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
+ btrfs_set_file_extent_generation(leaf, item, trans->transid);
+ btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_compression(leaf, item, new->compress_type);
+ btrfs_set_file_extent_encryption(leaf, item, 0);
+ btrfs_set_file_extent_other_encoding(leaf, item, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+ inode_add_bytes(inode, len);
+
+ ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
+ new->disk_len, 0,
+ backref->root_id, backref->inum,
+ new->file_pos, 0); /* start - extent_offset */
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_free_path;
+ }
+
+ ret = 1;
+out_free_path:
+ btrfs_release_path(path);
+ btrfs_end_transaction(trans, root);
+out_unlock:
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
+ &cached, GFP_NOFS);
+ iput(inode);
+ return ret;
+}
+
+static void relink_file_extents(struct new_sa_defrag_extent *new)
+{
+ struct btrfs_path *path;
+ struct old_sa_defrag_extent *old, *tmp;
+ struct sa_defrag_extent_backref *backref;
+ struct sa_defrag_extent_backref *prev = NULL;
+ struct inode *inode;
+ struct btrfs_root *root;
+ struct rb_node *node;
+ int ret;
+
+ inode = new->inode;
+ root = BTRFS_I(inode)->root;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return;
+
+ if (!record_extent_backrefs(path, new)) {
+ btrfs_free_path(path);
+ goto out;
+ }
+ btrfs_release_path(path);
+
+ while (1) {
+ node = rb_first(&new->root);
+ if (!node)
+ break;
+ rb_erase(node, &new->root);
+
+ backref = rb_entry(node, struct sa_defrag_extent_backref, node);
+
+ ret = relink_extent_backref(path, prev, backref);
+ WARN_ON(ret < 0);
+
+ kfree(prev);
+
+ if (ret == 1)
+ prev = backref;
+ else
+ prev = NULL;
+ cond_resched();
+ }
+ kfree(prev);
+
+ btrfs_free_path(path);
+
+ list_for_each_entry_safe(old, tmp, &new->head, list) {
+ list_del(&old->list);
+ kfree(old);
+ }
+out:
+ atomic_dec(&root->fs_info->defrag_running);
+ wake_up(&root->fs_info->transaction_wait);
+
+ kfree(new);
+}
+
+static struct new_sa_defrag_extent *
+record_old_file_extents(struct inode *inode,
+ struct btrfs_ordered_extent *ordered)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct old_sa_defrag_extent *old, *tmp;
+ struct new_sa_defrag_extent *new;
+ int ret;
+
+ new = kmalloc(sizeof(*new), GFP_NOFS);
+ if (!new)
+ return NULL;
+
+ new->inode = inode;
+ new->file_pos = ordered->file_offset;
+ new->len = ordered->len;
+ new->bytenr = ordered->start;
+ new->disk_len = ordered->disk_len;
+ new->compress_type = ordered->compress_type;
+ new->root = RB_ROOT;
+ INIT_LIST_HEAD(&new->head);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ goto out_kfree;
+
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = new->file_pos;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out_free_path;
+ if (ret > 0 && path->slots[0] > 0)
+ path->slots[0]--;
+
+ /* find out all the old extents for the file range */
+ while (1) {
+ struct btrfs_file_extent_item *extent;
+ struct extent_buffer *l;
+ int slot;
+ u64 num_bytes;
+ u64 offset;
+ u64 end;
+ u64 disk_bytenr;
+ u64 extent_offset;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+
+ if (slot >= btrfs_header_nritems(l)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto out_free_list;
+ else if (ret > 0)
+ break;
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(l, &key, slot);
+
+ if (key.objectid != btrfs_ino(inode))
+ break;
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ break;
+ if (key.offset >= new->file_pos + new->len)
+ break;
+
+ extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
+
+ num_bytes = btrfs_file_extent_num_bytes(l, extent);
+ if (key.offset + num_bytes < new->file_pos)
+ goto next;
+
+ disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
+ if (!disk_bytenr)
+ goto next;
+
+ extent_offset = btrfs_file_extent_offset(l, extent);
+
+ old = kmalloc(sizeof(*old), GFP_NOFS);
+ if (!old)
+ goto out_free_list;
+
+ offset = max(new->file_pos, key.offset);
+ end = min(new->file_pos + new->len, key.offset + num_bytes);
+
+ old->bytenr = disk_bytenr;
+ old->extent_offset = extent_offset;
+ old->offset = offset - key.offset;
+ old->len = end - offset;
+ old->new = new;
+ old->count = 0;
+ list_add_tail(&old->list, &new->head);
+next:
+ path->slots[0]++;
+ cond_resched();
+ }
+
+ btrfs_free_path(path);
+ atomic_inc(&root->fs_info->defrag_running);
+
+ return new;
+
+out_free_list:
+ list_for_each_entry_safe(old, tmp, &new->head, list) {
+ list_del(&old->list);
+ kfree(old);
+ }
+out_free_path:
+ btrfs_free_path(path);
+out_kfree:
+ kfree(new);
+ return NULL;
+}
+
/*
* helper function for btrfs_finish_ordered_io, this
* just reads in some of the csum leaves to prime them into ram
@@ -1909,6 +2582,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
struct btrfs_trans_handle *trans = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
+ struct new_sa_defrag_extent *new = NULL;
int compress_type = 0;
int ret;
bool nolock;
@@ -1943,6 +2617,20 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state);
+ ret = test_range_bit(io_tree, ordered_extent->file_offset,
+ ordered_extent->file_offset + ordered_extent->len - 1,
+ EXTENT_DEFRAG, 1, cached_state);
+ if (ret) {
+ u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
+ if (last_snapshot >= BTRFS_I(inode)->generation)
+ /* the inode is shared */
+ new = record_old_file_extents(inode, ordered_extent);
+
+ clear_extent_bit(io_tree, ordered_extent->file_offset,
+ ordered_extent->file_offset + ordered_extent->len - 1,
+ EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
+ }
+
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
@@ -2001,17 +2689,33 @@ out:
if (trans)
btrfs_end_transaction(trans, root);
- if (ret)
+ if (ret) {
clear_extent_uptodate(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, NULL, GFP_NOFS);
+ /*
+ * If the ordered extent had an IOERR or something else went
+ * wrong we need to return the space for this ordered extent
+ * back to the allocator.
+ */
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
+ !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
+ btrfs_free_reserved_extent(root, ordered_extent->start,
+ ordered_extent->disk_len);
+ }
+
+
/*
* This needs to be done to make sure anybody waiting knows we are done
* updating everything for this ordered extent.
*/
btrfs_remove_ordered_extent(inode, ordered_extent);
+ /* for snapshot-aware defrag */
+ if (new)
+ relink_file_extents(new);
+
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
@@ -2062,7 +2766,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror)
{
- size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
+ size_t offset = start - page_offset(page);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
char *kaddr;
@@ -2167,11 +2871,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
}
}
-enum btrfs_orphan_cleanup_state {
- ORPHAN_CLEANUP_STARTED = 1,
- ORPHAN_CLEANUP_DONE = 2,
-};
-
/*
* This is called in transaction commit time. If there are no orphan
* files in the subvolume, it removes orphan item and frees block_rsv
@@ -2469,6 +3168,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
*/
set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags);
+ atomic_inc(&root->orphan_inodes);
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
@@ -2491,6 +3191,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
goto out;
ret = btrfs_truncate(inode);
+ if (ret)
+ btrfs_orphan_del(NULL, inode);
} else {
nr_unlink++;
}
@@ -2709,34 +3411,41 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode)
{
- btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
- btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
- btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
- btrfs_set_inode_mode(leaf, item, inode->i_mode);
- btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
+
+ btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
+ btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
+ btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
+ &token);
+ btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
+ btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
- btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
- inode->i_atime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
- inode->i_atime.tv_nsec);
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_nsec, &token);
- btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
- inode->i_mtime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
- inode->i_mtime.tv_nsec);
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_nsec, &token);
- btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
- inode->i_ctime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
- inode->i_ctime.tv_nsec);
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_nsec, &token);
- btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
- btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
- btrfs_set_inode_sequence(leaf, item, inode->i_version);
- btrfs_set_inode_transid(leaf, item, trans->transid);
- btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
- btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
- btrfs_set_inode_block_group(leaf, item, 0);
+ btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
+ &token);
+ btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
+ &token);
+ btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
+ btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
+ btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
+ btrfs_set_token_inode_block_group(leaf, item, 0, &token);
}
/*
@@ -3304,7 +4013,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
- u64 mask = root->sectorsize - 1;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
@@ -3328,7 +4036,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* extent just the way it is.
*/
if (root->ref_cows || root == root->fs_info->tree_root)
- btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0);
+ btrfs_drop_extent_cache(inode, ALIGN(new_size,
+ root->sectorsize), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
@@ -3407,10 +4116,9 @@ search_again:
if (!del_item) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
- extent_num_bytes = new_size -
- found_key.offset + root->sectorsize - 1;
- extent_num_bytes = extent_num_bytes &
- ~((u64)root->sectorsize - 1);
+ extent_num_bytes = ALIGN(new_size -
+ found_key.offset,
+ root->sectorsize);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
@@ -3646,9 +4354,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- u64 mask = root->sectorsize - 1;
- u64 hole_start = (oldsize + mask) & ~mask;
- u64 block_end = (size + mask) & ~mask;
+ u64 hole_start = ALIGN(oldsize, root->sectorsize);
+ u64 block_end = ALIGN(size, root->sectorsize);
u64 last_byte;
u64 cur_offset;
u64 hole_size;
@@ -3681,7 +4388,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
break;
}
last_byte = min(extent_map_end(em), block_end);
- last_byte = (last_byte + mask) & ~mask;
+ last_byte = ALIGN(last_byte , root->sectorsize);
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
struct extent_map *hole_em;
hole_size = last_byte - cur_offset;
@@ -3832,6 +4539,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
/* we don't support swapfiles, so vmtruncate shouldn't fail */
truncate_setsize(inode, newsize);
+
+ /* Disable nonlocked read DIO to avoid the end less truncate */
+ btrfs_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+ btrfs_inode_resume_unlocked_dio(inode);
+
ret = btrfs_truncate(inode);
if (ret && inode->i_nlink)
btrfs_orphan_del(NULL, inode);
@@ -3904,6 +4617,12 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
+ ret = btrfs_commit_inode_delayed_inode(inode);
+ if (ret) {
+ btrfs_orphan_del(NULL, inode);
+ goto no_delete;
+ }
+
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
@@ -3941,7 +4660,7 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
- trans = btrfs_start_transaction_lflush(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
@@ -3955,9 +4674,6 @@ void btrfs_evict_inode(struct inode *inode)
break;
trans->block_rsv = &root->fs_info->trans_block_rsv;
- ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
-
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root);
@@ -4391,7 +5107,7 @@ unsigned char btrfs_filetype_table[] = {
static int btrfs_real_readdir(struct file *filp, void *dirent,
filldir_t filldir)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
@@ -4854,7 +5570,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(root, NODATACOW))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_NODATASUM;
}
insert_inode_hash(inode);
@@ -5006,12 +5723,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- err = btrfs_update_inode(trans, root, inode);
- if (err) {
- drop_inode = 1;
- goto out_unlock;
- }
-
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -5396,8 +6107,7 @@ again:
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, item);
- extent_end = (extent_start + size + root->sectorsize - 1) &
- ~((u64)root->sectorsize - 1);
+ extent_end = ALIGN(extent_start + size, root->sectorsize);
}
if (start >= extent_end) {
@@ -5469,8 +6179,7 @@ again:
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
- em->len = (copy_size + root->sectorsize - 1) &
- ~((u64)root->sectorsize - 1);
+ em->len = ALIGN(copy_size, root->sectorsize);
em->orig_block_len = em->len;
em->orig_start = em->start;
if (compress_type) {
@@ -5949,6 +6658,8 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
em->start = start;
em->orig_start = orig_start;
+ em->mod_start = start;
+ em->mod_len = len;
em->len = len;
em->block_len = block_len;
em->block_start = block_start;
@@ -5990,16 +6701,12 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
int unlock_bits = EXTENT_LOCKED;
- int ret;
+ int ret = 0;
- if (create) {
- ret = btrfs_delalloc_reserve_space(inode, len);
- if (ret)
- return ret;
+ if (create)
unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
- } else {
+ else
len = min_t(u64, len, root->sectorsize);
- }
lockstart = start;
lockend = start + len - 1;
@@ -6011,14 +6718,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
return -ENOTBLK;
- if (create) {
- ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, EXTENT_DELALLOC, NULL,
- &cached_state, GFP_NOFS);
- if (ret)
- goto unlock_err;
- }
-
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
@@ -6050,7 +6749,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
- ret = 0;
goto unlock_err;
}
@@ -6148,6 +6846,15 @@ unlock:
*/
if (start + len > i_size_read(inode))
i_size_write(inode, start + len);
+
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents++;
+ spin_unlock(&BTRFS_I(inode)->lock);
+
+ ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockstart + len - 1, EXTENT_DELALLOC, NULL,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret);
}
/*
@@ -6156,24 +6863,9 @@ unlock:
* aren't using if there is any left over space.
*/
if (lockstart < lockend) {
- if (create && len < lockend - lockstart) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockstart + len - 1,
- unlock_bits | EXTENT_DEFRAG, 1, 0,
- &cached_state, GFP_NOFS);
- /*
- * Beside unlock, we also need to cleanup reserved space
- * for the left range by attaching EXTENT_DO_ACCOUNTING.
- */
- clear_extent_bit(&BTRFS_I(inode)->io_tree,
- lockstart + len, lockend,
- unlock_bits | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS);
- } else {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, unlock_bits, 1, 0,
- &cached_state, GFP_NOFS);
- }
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
} else {
free_extent_state(cached_state);
}
@@ -6183,9 +6875,6 @@ unlock:
return 0;
unlock_err:
- if (create)
- unlock_bits |= EXTENT_DO_ACCOUNTING;
-
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
unlock_bits, 1, 0, &cached_state, GFP_NOFS);
return ret;
@@ -6426,19 +7115,24 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int async_submit = 0;
map_length = orig_bio->bi_size;
- ret = btrfs_map_block(root->fs_info, READ, start_sector << 9,
+ ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(orig_bio);
return -EIO;
}
-
if (map_length >= orig_bio->bi_size) {
bio = orig_bio;
goto submit;
}
- async_submit = 1;
+ /* async crcs make it difficult to collect full stripe writes. */
+ if (btrfs_get_alloc_profile(root, 1) &
+ (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
+ async_submit = 0;
+ else
+ async_submit = 1;
+
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
return -ENOMEM;
@@ -6480,7 +7174,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio->bi_end_io = btrfs_end_dio_bio;
map_length = orig_bio->bi_size;
- ret = btrfs_map_block(root->fs_info, READ,
+ ret = btrfs_map_block(root->fs_info, rw,
start_sector << 9,
&map_length, NULL, 0);
if (ret) {
@@ -6623,15 +7317,60 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ size_t count = 0;
+ int flags = 0;
+ bool wakeup = true;
+ bool relock = false;
+ ssize_t ret;
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
offset, nr_segs))
return 0;
- return __blockdev_direct_IO(rw, iocb, inode,
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
- iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
- btrfs_submit_direct, 0);
+ atomic_inc(&inode->i_dio_count);
+ smp_mb__after_atomic_inc();
+
+ if (rw & WRITE) {
+ count = iov_length(iov, nr_segs);
+ /*
+ * If the write DIO is beyond the EOF, we need update
+ * the isize, but it is protected by i_mutex. So we can
+ * not unlock the i_mutex at this case.
+ */
+ if (offset + count <= inode->i_size) {
+ mutex_unlock(&inode->i_mutex);
+ relock = true;
+ }
+ ret = btrfs_delalloc_reserve_space(inode, count);
+ if (ret)
+ goto out;
+ } else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+ &BTRFS_I(inode)->runtime_flags))) {
+ inode_dio_done(inode);
+ flags = DIO_LOCKING | DIO_SKIP_HOLES;
+ wakeup = false;
+ }
+
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+ iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+ btrfs_submit_direct, flags);
+ if (rw & WRITE) {
+ if (ret < 0 && ret != -EIOCBQUEUED)
+ btrfs_delalloc_release_space(inode, count);
+ else if (ret >= 0 && (size_t)ret < count)
+ btrfs_delalloc_release_space(inode,
+ count - (size_t)ret);
+ else
+ btrfs_delalloc_release_metadata(inode, 0);
+ }
+out:
+ if (wakeup)
+ inode_dio_done(inode);
+ if (relock)
+ mutex_lock(&inode->i_mutex);
+
+ return ret;
}
#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
@@ -6735,8 +7474,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
return;
}
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
- ordered = btrfs_lookup_ordered_extent(inode,
- page_offset(page));
+ ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
if (ordered) {
/*
* IO on this page will never be started, so we need
@@ -6791,7 +7529,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = fdentry(vma->vm_file)->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
@@ -7216,8 +7954,9 @@ int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ /* the snap/subvol tree is on deleting */
if (btrfs_root_refs(&root->root_item) == 0 &&
- !btrfs_is_free_space_inode(inode))
+ root != root->fs_info->tree_root)
return 1;
else
return generic_drop_inode(inode);
@@ -7299,40 +8038,22 @@ fail:
static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
+ u64 delalloc_bytes;
struct inode *inode = dentry->d_inode;
u32 blocksize = inode->i_sb->s_blocksize;
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
+
+ spin_lock(&BTRFS_I(inode)->lock);
+ delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
+ spin_unlock(&BTRFS_I(inode)->lock);
stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
- ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
+ ALIGN(delalloc_bytes, blocksize)) >> 9;
return 0;
}
-/*
- * If a file is moved, it will inherit the cow and compression flags of the new
- * directory.
- */
-static void fixup_inode_flags(struct inode *dir, struct inode *inode)
-{
- struct btrfs_inode *b_dir = BTRFS_I(dir);
- struct btrfs_inode *b_inode = BTRFS_I(inode);
-
- if (b_dir->flags & BTRFS_INODE_NODATACOW)
- b_inode->flags |= BTRFS_INODE_NODATACOW;
- else
- b_inode->flags &= ~BTRFS_INODE_NODATACOW;
-
- if (b_dir->flags & BTRFS_INODE_COMPRESS) {
- b_inode->flags |= BTRFS_INODE_COMPRESS;
- b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
- } else {
- b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
- BTRFS_INODE_NOCOMPRESS);
- }
-}
-
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
@@ -7498,8 +8219,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
- fixup_inode_flags(new_dir, old_inode);
-
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
@@ -7583,7 +8302,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
INIT_LIST_HEAD(&works);
INIT_LIST_HEAD(&splice);
-again:
+
spin_lock(&root->fs_info->delalloc_lock);
list_splice_init(&root->fs_info->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
@@ -7593,8 +8312,11 @@ again:
list_del_init(&binode->delalloc_inodes);
inode = igrab(&binode->vfs_inode);
- if (!inode)
+ if (!inode) {
+ clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &binode->runtime_flags);
continue;
+ }
list_add_tail(&binode->delalloc_inodes,
&root->fs_info->delalloc_inodes);
@@ -7619,13 +8341,6 @@ again:
btrfs_wait_and_free_delalloc_work(work);
}
- spin_lock(&root->fs_info->delalloc_lock);
- if (!list_empty(&root->fs_info->delalloc_inodes)) {
- spin_unlock(&root->fs_info->delalloc_lock);
- goto again;
- }
- spin_unlock(&root->fs_info->delalloc_lock);
-
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
@@ -7801,8 +8516,9 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
}
}
- ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
- 0, *alloc_hint, &ins, 1);
+ ret = btrfs_reserve_extent(trans, root,
+ min(num_bytes, 256ULL * 1024 * 1024),
+ min_size, 0, *alloc_hint, &ins, 1);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 338f2597bf7f..c83086fdda05 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -42,12 +42,12 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/uuid.h>
+#include <linux/btrfs.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "volumes.h"
#include "locking.h"
@@ -152,7 +152,7 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
{
- struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
+ struct btrfs_inode *ip = BTRFS_I(file_inode(file));
unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
if (copy_to_user(arg, &flags, sizeof(flags)))
@@ -177,7 +177,7 @@ static int check_flags(unsigned int flags)
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_inode *ip = BTRFS_I(inode);
struct btrfs_root *root = ip->root;
struct btrfs_trans_handle *trans;
@@ -310,7 +310,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
return put_user(inode->i_generation, arg);
}
@@ -363,46 +363,52 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return 0;
}
-static noinline int create_subvol(struct btrfs_root *root,
+static noinline int create_subvol(struct inode *dir,
struct dentry *dentry,
char *name, int namelen,
u64 *async_transid,
- struct btrfs_qgroup_inherit **inherit)
+ struct btrfs_qgroup_inherit *inherit)
{
struct btrfs_trans_handle *trans;
struct btrfs_key key;
struct btrfs_root_item root_item;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *new_root;
- struct dentry *parent = dentry->d_parent;
- struct inode *dir;
+ struct btrfs_block_rsv block_rsv;
struct timespec cur_time = CURRENT_TIME;
int ret;
int err;
u64 objectid;
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
+ u64 qgroup_reserved;
uuid_le new_uuid;
ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
if (ret)
return ret;
- dir = parent->d_inode;
-
+ btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
- * 1 - inode item
- * 2 - refs
- * 1 - root item
- * 2 - dir items
+ * The same as the snapshot creation, please see the comment
+ * of create_snapshot().
*/
- trans = btrfs_start_transaction(root, 6);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
+ 7, &qgroup_reserved);
+ if (ret)
+ return ret;
- ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
- inherit ? *inherit : NULL);
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
+
+ ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
if (ret)
goto fail;
@@ -516,6 +522,8 @@ static noinline int create_subvol(struct btrfs_root *root,
BUG_ON(ret);
fail:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
if (async_transid) {
*async_transid = trans->transid;
err = btrfs_commit_transaction_async(trans, root, 1);
@@ -527,13 +535,15 @@ fail:
if (!ret)
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
-
+out:
+ btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
return ret;
}
-static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
- char *name, int namelen, u64 *async_transid,
- bool readonly, struct btrfs_qgroup_inherit **inherit)
+static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ struct dentry *dentry, char *name, int namelen,
+ u64 *async_transid, bool readonly,
+ struct btrfs_qgroup_inherit *inherit)
{
struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
@@ -549,23 +559,31 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
BTRFS_BLOCK_RSV_TEMP);
+ /*
+ * 1 - parent dir inode
+ * 2 - dir entries
+ * 1 - root item
+ * 2 - root ref/backref
+ * 1 - root of snapshot
+ */
+ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
+ &pending_snapshot->block_rsv, 7,
+ &pending_snapshot->qgroup_reserved);
+ if (ret)
+ goto out;
+
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
- if (inherit) {
- pending_snapshot->inherit = *inherit;
- *inherit = NULL; /* take responsibility to free it */
- }
+ pending_snapshot->dir = dir;
+ pending_snapshot->inherit = inherit;
- trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
+ trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto fail;
}
- ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
- BUG_ON(ret);
-
spin_lock(&root->fs_info->trans_lock);
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
@@ -602,6 +620,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
d_instantiate(dentry, inode);
ret = 0;
fail:
+ btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
+ &pending_snapshot->block_rsv,
+ pending_snapshot->qgroup_reserved);
+out:
kfree(pending_snapshot);
return ret;
}
@@ -695,7 +717,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
char *name, int namelen,
struct btrfs_root *snap_src,
u64 *async_transid, bool readonly,
- struct btrfs_qgroup_inherit **inherit)
+ struct btrfs_qgroup_inherit *inherit)
{
struct inode *dir = parent->dentry->d_inode;
struct dentry *dentry;
@@ -732,11 +754,11 @@ static noinline int btrfs_mksubvol(struct path *parent,
goto out_up_read;
if (snap_src) {
- error = create_snapshot(snap_src, dentry, name, namelen,
+ error = create_snapshot(snap_src, dir, dentry, name, namelen,
async_transid, readonly, inherit);
} else {
- error = create_subvol(BTRFS_I(dir)->root, dentry,
- name, namelen, async_transid, inherit);
+ error = create_subvol(dir, dentry, name, namelen,
+ async_transid, inherit);
}
if (!error)
fsnotify_mkdir(dir, dentry);
@@ -818,7 +840,7 @@ static int find_new_extents(struct btrfs_root *root,
while(1) {
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, newer_than);
+ path, newer_than);
if (ret != 0)
goto none;
if (min_key.objectid != ino)
@@ -1206,6 +1228,12 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!(inode->i_sb->s_flags & MS_ACTIVE))
break;
+ if (btrfs_defrag_cancelled(root->fs_info)) {
+ printk(KERN_DEBUG "btrfs: defrag_file cancelled\n");
+ ret = -EAGAIN;
+ break;
+ }
+
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
extent_thresh, &last_len, &skip,
&defrag_end, range->flags &
@@ -1320,7 +1348,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
u64 new_size;
u64 old_size;
u64 devid = 1;
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_ioctl_vol_args *vol_args;
struct btrfs_trans_handle *trans;
struct btrfs_device *device = NULL;
@@ -1329,9 +1357,6 @@ static noinline int btrfs_ioctl_resize(struct file *file,
int ret = 0;
int mod = 0;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1363,6 +1388,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
*devstr = '\0';
devstr = vol_args->name;
devid = simple_strtoull(devstr, &end, 10);
+ if (!devid) {
+ ret = -EINVAL;
+ goto out_free;
+ }
printk(KERN_INFO "btrfs: resizing devid %llu\n",
(unsigned long long)devid);
}
@@ -1371,7 +1400,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (!device) {
printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
(unsigned long long)devid);
- ret = -EINVAL;
+ ret = -ENODEV;
goto out_free;
}
@@ -1379,7 +1408,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
printk(KERN_INFO "btrfs: resizer unable to apply on "
"readonly device %llu\n",
(unsigned long long)devid);
- ret = -EINVAL;
+ ret = -EPERM;
goto out_free;
}
@@ -1401,7 +1430,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
}
if (device->is_tgtdev_for_dev_replace) {
- ret = -EINVAL;
+ ret = -EPERM;
goto out_free;
}
@@ -1457,7 +1486,7 @@ out:
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
char *name, unsigned long fd, int subvol,
u64 *transid, bool readonly,
- struct btrfs_qgroup_inherit **inherit)
+ struct btrfs_qgroup_inherit *inherit)
{
int namelen;
int ret = 0;
@@ -1489,8 +1518,8 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
goto out_drop_write;
}
- src_inode = src.file->f_path.dentry->d_inode;
- if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
+ src_inode = file_inode(src.file);
+ if (src_inode->i_sb != file_inode(file)->i_sb) {
printk(KERN_INFO "btrfs: Snapshot src from "
"another FS\n");
ret = -EINVAL;
@@ -1566,7 +1595,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol, ptr,
- readonly, &inherit);
+ readonly, inherit);
if (ret == 0 && ptr &&
copy_to_user(arg +
@@ -1582,7 +1611,7 @@ out:
static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
void __user *arg)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
u64 flags = 0;
@@ -1604,7 +1633,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
void __user *arg)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 root_flags;
@@ -1863,7 +1892,7 @@ static noinline int search_ioctl(struct inode *inode,
path->keep_locks = 1;
while(1) {
- ret = btrfs_search_forward(root, &key, &max_key, path, 0,
+ ret = btrfs_search_forward(root, &key, &max_key, path,
sk->min_transid);
if (ret != 0) {
if (ret > 0)
@@ -1898,7 +1927,7 @@ static noinline int btrfs_ioctl_tree_search(struct file *file,
if (IS_ERR(args))
return PTR_ERR(args);
- inode = fdentry(file)->d_inode;
+ inode = file_inode(file);
ret = search_ioctl(inode, args);
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
ret = -EFAULT;
@@ -2008,7 +2037,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
if (IS_ERR(args))
return PTR_ERR(args);
- inode = fdentry(file)->d_inode;
+ inode = file_inode(file);
if (args->treeid == 0)
args->treeid = BTRFS_I(inode)->root->root_key.objectid;
@@ -2035,6 +2064,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
struct btrfs_root *dest = NULL;
struct btrfs_ioctl_vol_args *vol_args;
struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv block_rsv;
+ u64 qgroup_reserved;
int namelen;
int ret;
int err = 0;
@@ -2124,12 +2155,23 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (err)
goto out_up_write;
+ btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ /*
+ * One for dir inode, two for dir entries, two for root
+ * ref/backref.
+ */
+ err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
+ 5, &qgroup_reserved);
+ if (err)
+ goto out_up_write;
+
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- goto out_up_write;
+ goto out_release;
}
- trans->block_rsv = &root->fs_info->global_block_rsv;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
ret = btrfs_unlink_subvol(trans, root, dir,
dest->root_key.objectid,
@@ -2159,10 +2201,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
}
}
out_end_trans:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
ret = btrfs_end_transaction(trans, root);
if (ret && !err)
err = ret;
inode->i_flags |= S_DEAD;
+out_release:
+ btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
out_up_write:
up_write(&root->fs_info->subvol_sem);
out_unlock:
@@ -2171,6 +2217,12 @@ out_unlock:
shrink_dcache_sb(root->fs_info->sb);
btrfs_invalidate_inodes(dest);
d_delete(dentry);
+
+ /* the last ref */
+ if (dest->cache_inode) {
+ iput(dest->cache_inode);
+ dest->cache_inode = NULL;
+ }
}
out_dput:
dput(dentry);
@@ -2184,7 +2236,7 @@ out:
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_defrag_range_args *range;
int ret;
@@ -2211,10 +2263,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = -EPERM;
goto out;
}
- ret = btrfs_defrag_root(root, 0);
+ ret = btrfs_defrag_root(root);
if (ret)
goto out;
- ret = btrfs_defrag_root(root->fs_info->extent_root, 0);
+ ret = btrfs_defrag_root(root->fs_info->extent_root);
break;
case S_IFREG:
if (!(file->f_mode & FMODE_WRITE)) {
@@ -2244,7 +2296,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
/* the rest are all set to zero by kzalloc */
range->len = (u64)-1;
}
- ret = btrfs_defrag_file(fdentry(file)->d_inode, file,
+ ret = btrfs_defrag_file(file_inode(file), file,
range, 0, 0);
if (ret > 0)
ret = 0;
@@ -2292,7 +2344,7 @@ out:
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_ioctl_vol_args *vol_args;
int ret;
@@ -2415,7 +2467,7 @@ out:
static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
u64 off, u64 olen, u64 destoff)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct fd src_file;
struct inode *src;
@@ -2461,7 +2513,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (src_file.file->f_path.mnt != file->f_path.mnt)
goto out_fput;
- src = src_file.file->f_dentry->d_inode;
+ src = file_inode(src_file.file);
ret = -EINVAL;
if (src == inode)
@@ -2823,7 +2875,7 @@ static long btrfs_ioctl_clone_range(struct file *file, void __user *argp)
*/
static long btrfs_ioctl_trans_start(struct file *file)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
@@ -2863,7 +2915,7 @@ out:
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root *new_root;
struct btrfs_dir_item *di;
@@ -3087,7 +3139,7 @@ out:
*/
long btrfs_ioctl_trans_end(struct file *file)
{
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
@@ -3111,7 +3163,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
u64 transid;
int ret;
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT)
return PTR_ERR(trans);
@@ -3149,7 +3201,7 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_ioctl_scrub_args *sa;
int ret;
@@ -3289,7 +3341,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
struct inode_fs_paths *ipath = NULL;
struct btrfs_path *path;
- if (!capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_DAC_READ_SEARCH))
return -EPERM;
path = btrfs_alloc_path();
@@ -3440,7 +3492,7 @@ void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
struct btrfs_balance_control *bctl;
@@ -3630,7 +3682,7 @@ out:
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_ioctl_quota_ctl_args *sa;
struct btrfs_trans_handle *trans = NULL;
int ret;
@@ -3689,7 +3741,7 @@ drop_write:
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_ioctl_qgroup_assign_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -3736,7 +3788,7 @@ drop_write:
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_ioctl_qgroup_create_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -3787,7 +3839,7 @@ drop_write:
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
struct btrfs_ioctl_qgroup_limit_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -3837,7 +3889,7 @@ static long btrfs_ioctl_set_received_subvol(struct file *file,
void __user *arg)
{
struct btrfs_ioctl_received_subvol_args *sa = NULL;
- struct inode *inode = fdentry(file)->d_inode;
+ struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root_item *root_item = &root->root_item;
struct btrfs_trans_handle *trans;
@@ -3914,10 +3966,69 @@ out:
return ret;
}
+static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ const char *label = root->fs_info->super_copy->label;
+ size_t len = strnlen(label, BTRFS_LABEL_SIZE);
+ int ret;
+
+ if (len == BTRFS_LABEL_SIZE) {
+ pr_warn("btrfs: label is too long, return the first %zu bytes\n",
+ --len);
+ }
+
+ mutex_lock(&root->fs_info->volume_mutex);
+ ret = copy_to_user(arg, label, len);
+ mutex_unlock(&root->fs_info->volume_mutex);
+
+ return ret ? -EFAULT : 0;
+}
+
+static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_super_block *super_block = root->fs_info->super_copy;
+ struct btrfs_trans_handle *trans;
+ char label[BTRFS_LABEL_SIZE];
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(label, arg, sizeof(label)))
+ return -EFAULT;
+
+ if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
+ pr_err("btrfs: unable to set label with more than %d bytes\n",
+ BTRFS_LABEL_SIZE - 1);
+ return -EINVAL;
+ }
+
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ mutex_lock(&root->fs_info->volume_mutex);
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_unlock;
+ }
+
+ strcpy(super_block->label, label);
+ ret = btrfs_end_transaction(trans, root);
+
+out_unlock:
+ mutex_unlock(&root->fs_info->volume_mutex);
+ mnt_drop_write_file(file);
+ return ret;
+}
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
void __user *argp = (void __user *)arg;
switch (cmd) {
@@ -4014,6 +4125,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_qgroup_limit(file, argp);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(root, argp);
+ case BTRFS_IOC_GET_FSLABEL:
+ return btrfs_ioctl_get_fslabel(file, argp);
+ case BTRFS_IOC_SET_FSLABEL:
+ return btrfs_ioctl_set_fslabel(file, argp);
}
return -ENOTTY;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 2a1762c66041..e95df435d897 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -113,11 +113,10 @@ again:
read_unlock(&eb->lock);
return;
}
- read_unlock(&eb->lock);
- wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
- read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers)) {
read_unlock(&eb->lock);
+ wait_event(eb->write_lock_wq,
+ atomic_read(&eb->blocking_writers) == 0);
goto again;
}
atomic_inc(&eb->read_locks);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e5ed56729607..dc08d77b717e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -196,6 +196,9 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
entry->file_offset = file_offset;
entry->start = start;
entry->len = len;
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
+ !(type == BTRFS_ORDERED_NOCOW))
+ entry->csum_bytes_left = disk_len;
entry->disk_len = disk_len;
entry->bytes_left = len;
entry->inode = igrab(inode);
@@ -213,6 +216,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
INIT_LIST_HEAD(&entry->root_extent_list);
INIT_LIST_HEAD(&entry->work_list);
init_completion(&entry->completion);
+ INIT_LIST_HEAD(&entry->log_list);
trace_btrfs_ordered_extent_add(inode, entry);
@@ -270,6 +274,10 @@ void btrfs_add_ordered_sum(struct inode *inode,
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock_irq(&tree->lock);
list_add_tail(&sum->list, &entry->list);
+ WARN_ON(entry->csum_bytes_left < sum->len);
+ entry->csum_bytes_left -= sum->len;
+ if (entry->csum_bytes_left == 0)
+ wake_up(&entry->wait);
spin_unlock_irq(&tree->lock);
}
@@ -405,6 +413,66 @@ out:
return ret == 0;
}
+/* Needs to either be called under a log transaction or the log_mutex */
+void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
+{
+ struct btrfs_ordered_inode_tree *tree;
+ struct btrfs_ordered_extent *ordered;
+ struct rb_node *n;
+ int index = log->log_transid % 2;
+
+ tree = &BTRFS_I(inode)->ordered_tree;
+ spin_lock_irq(&tree->lock);
+ for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+ ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+ spin_lock(&log->log_extents_lock[index]);
+ if (list_empty(&ordered->log_list)) {
+ list_add_tail(&ordered->log_list, &log->logged_list[index]);
+ atomic_inc(&ordered->refs);
+ }
+ spin_unlock(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&tree->lock);
+}
+
+void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
+{
+ struct btrfs_ordered_extent *ordered;
+ int index = transid % 2;
+
+ spin_lock_irq(&log->log_extents_lock[index]);
+ while (!list_empty(&log->logged_list[index])) {
+ ordered = list_first_entry(&log->logged_list[index],
+ struct btrfs_ordered_extent,
+ log_list);
+ list_del_init(&ordered->log_list);
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
+ &ordered->flags));
+ btrfs_put_ordered_extent(ordered);
+ spin_lock_irq(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
+void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
+{
+ struct btrfs_ordered_extent *ordered;
+ int index = transid % 2;
+
+ spin_lock_irq(&log->log_extents_lock[index]);
+ while (!list_empty(&log->logged_list[index])) {
+ ordered = list_first_entry(&log->logged_list[index],
+ struct btrfs_ordered_extent,
+ log_list);
+ list_del_init(&ordered->log_list);
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ btrfs_put_ordered_extent(ordered);
+ spin_lock_irq(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
/*
* used to drop a reference on an ordered extent. This will free
* the extent if the last reference is dropped
@@ -544,10 +612,12 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
* extra check to make sure the ordered operation list really is empty
* before we return
*/
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
+int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int wait)
{
struct btrfs_inode *btrfs_inode;
struct inode *inode;
+ struct btrfs_transaction *cur_trans = trans->transaction;
struct list_head splice;
struct list_head works;
struct btrfs_delalloc_work *work, *next;
@@ -558,14 +628,10 @@ int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
mutex_lock(&root->fs_info->ordered_operations_mutex);
spin_lock(&root->fs_info->ordered_extent_lock);
-again:
- list_splice_init(&root->fs_info->ordered_operations, &splice);
-
+ list_splice_init(&cur_trans->ordered_operations, &splice);
while (!list_empty(&splice)) {
-
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
-
inode = &btrfs_inode->vfs_inode;
list_del_init(&btrfs_inode->ordered_operations);
@@ -574,24 +640,22 @@ again:
* the inode may be getting freed (in sys_unlink path).
*/
inode = igrab(inode);
-
- if (!wait && inode) {
- list_add_tail(&BTRFS_I(inode)->ordered_operations,
- &root->fs_info->ordered_operations);
- }
-
if (!inode)
continue;
+
+ if (!wait)
+ list_add_tail(&BTRFS_I(inode)->ordered_operations,
+ &cur_trans->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
work = btrfs_alloc_delalloc_work(inode, wait, 1);
if (!work) {
+ spin_lock(&root->fs_info->ordered_extent_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations))
list_add_tail(&btrfs_inode->ordered_operations,
&splice);
- spin_lock(&root->fs_info->ordered_extent_lock);
list_splice_tail(&splice,
- &root->fs_info->ordered_operations);
+ &cur_trans->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
ret = -ENOMEM;
goto out;
@@ -603,9 +667,6 @@ again:
cond_resched();
spin_lock(&root->fs_info->ordered_extent_lock);
}
- if (wait && !list_empty(&root->fs_info->ordered_operations))
- goto again;
-
spin_unlock(&root->fs_info->ordered_extent_lock);
out:
list_for_each_entry_safe(work, next, &works, list) {
@@ -974,6 +1035,7 @@ out:
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
+ struct btrfs_transaction *cur_trans = trans->transaction;
u64 last_mod;
last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
@@ -988,7 +1050,7 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->ordered_extent_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
list_add_tail(&BTRFS_I(inode)->ordered_operations,
- &root->fs_info->ordered_operations);
+ &cur_trans->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index f29d4bf5fbe7..8eadfe406cdd 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -79,6 +79,8 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
* has done its due diligence in updating
* the isize. */
+#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
+ ordered extent */
struct btrfs_ordered_extent {
/* logical offset in the file */
@@ -96,6 +98,9 @@ struct btrfs_ordered_extent {
/* number of bytes that still need writing */
u64 bytes_left;
+ /* number of bytes that still need csumming */
+ u64 csum_bytes_left;
+
/*
* the end of the ordered extent which is behind it but
* didn't update disk_i_size. Please see the comment of
@@ -118,6 +123,9 @@ struct btrfs_ordered_extent {
/* list of checksums for insertion when the extent io is done */
struct list_head list;
+ /* If we need to wait on this to be done */
+ struct list_head log_list;
+
/* used to wait for the BTRFS_ORDERED_COMPLETE bit */
wait_queue_head_t wait;
@@ -189,11 +197,15 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
+int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
+void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
+void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
+void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
int __init ordered_data_init(void);
void ordered_data_exit(void);
#endif
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 50d95fd190a5..920957ecb27e 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -294,6 +294,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
btrfs_dev_extent_chunk_offset(l, dev_extent),
(unsigned long long)
btrfs_dev_extent_length(l, dev_extent));
+ break;
case BTRFS_DEV_STATS_KEY:
printk(KERN_INFO "\t\tdevice stats\n");
break;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a5c856234323..aee4b1cc3d98 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -23,13 +23,13 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/btrfs.h>
#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "locking.h"
#include "ulist.h"
-#include "ioctl.h"
#include "backref.h"
/* TODO XXX FIXME
@@ -620,7 +620,9 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
key.offset = qgroupid;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -661,7 +663,9 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
key.offset = qgroup->qgroupid;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -702,7 +706,9 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
key.offset = 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -732,33 +738,38 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
{
struct btrfs_path *path;
struct btrfs_key key;
+ struct extent_buffer *leaf = NULL;
int ret;
-
- if (!root)
- return -EINVAL;
+ int nr = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- while (1) {
- key.objectid = 0;
- key.offset = 0;
- key.type = 0;
+ path->leave_spinning = 1;
- path->leave_spinning = 1;
+ key.objectid = 0;
+ key.offset = 0;
+ key.type = 0;
+
+ while (1) {
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret > 0) {
- if (path->slots[0] == 0)
- break;
- path->slots[0]--;
- } else if (ret < 0) {
+ if (ret < 0)
+ goto out;
+ leaf = path->nodes[0];
+ nr = btrfs_header_nritems(leaf);
+ if (!nr)
break;
- }
-
- ret = btrfs_del_item(trans, root, path);
+ /*
+ * delete the leaf one by one
+ * since the whole tree is going
+ * to be deleted.
+ */
+ path->slots[0] = 0;
+ ret = btrfs_del_items(trans, root, path, 0, nr);
if (ret)
goto out;
+
btrfs_release_path(path);
}
ret = 0;
@@ -847,6 +858,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
int ret = 0;
spin_lock(&fs_info->qgroup_lock);
+ if (!fs_info->quota_root) {
+ spin_unlock(&fs_info->qgroup_lock);
+ return 0;
+ }
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
quota_root = fs_info->quota_root;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
new file mode 100644
index 000000000000..07222053c7d8
--- /dev/null
+++ b/fs/btrfs/raid56.c
@@ -0,0 +1,2099 @@
+/*
+ * Copyright (C) 2012 Fusion-io All rights reserved.
+ * Copyright (C) 2012 Intel Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/iocontext.h>
+#include <linux/capability.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/raid/pq.h>
+#include <linux/hash.h>
+#include <linux/list_sort.h>
+#include <linux/raid/xor.h>
+#include <asm/div64.h>
+#include "compat.h"
+#include "ctree.h"
+#include "extent_map.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "print-tree.h"
+#include "volumes.h"
+#include "raid56.h"
+#include "async-thread.h"
+#include "check-integrity.h"
+#include "rcu-string.h"
+
+/* set when additional merges to this rbio are not allowed */
+#define RBIO_RMW_LOCKED_BIT 1
+
+/*
+ * set when this rbio is sitting in the hash, but it is just a cache
+ * of past RMW
+ */
+#define RBIO_CACHE_BIT 2
+
+/*
+ * set when it is safe to trust the stripe_pages for caching
+ */
+#define RBIO_CACHE_READY_BIT 3
+
+
+#define RBIO_CACHE_SIZE 1024
+
+struct btrfs_raid_bio {
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_bio *bbio;
+
+ /*
+ * logical block numbers for the start of each stripe
+ * The last one or two are p/q. These are sorted,
+ * so raid_map[0] is the start of our full stripe
+ */
+ u64 *raid_map;
+
+ /* while we're doing rmw on a stripe
+ * we put it into a hash table so we can
+ * lock the stripe and merge more rbios
+ * into it.
+ */
+ struct list_head hash_list;
+
+ /*
+ * LRU list for the stripe cache
+ */
+ struct list_head stripe_cache;
+
+ /*
+ * for scheduling work in the helper threads
+ */
+ struct btrfs_work work;
+
+ /*
+ * bio list and bio_list_lock are used
+ * to add more bios into the stripe
+ * in hopes of avoiding the full rmw
+ */
+ struct bio_list bio_list;
+ spinlock_t bio_list_lock;
+
+ /* also protected by the bio_list_lock, the
+ * plug list is used by the plugging code
+ * to collect partial bios while plugged. The
+ * stripe locking code also uses it to hand off
+ * the stripe lock to the next pending IO
+ */
+ struct list_head plug_list;
+
+ /*
+ * flags that tell us if it is safe to
+ * merge with this bio
+ */
+ unsigned long flags;
+
+ /* size of each individual stripe on disk */
+ int stripe_len;
+
+ /* number of data stripes (no p/q) */
+ int nr_data;
+
+ /*
+ * set if we're doing a parity rebuild
+ * for a read from higher up, which is handled
+ * differently from a parity rebuild as part of
+ * rmw
+ */
+ int read_rebuild;
+
+ /* first bad stripe */
+ int faila;
+
+ /* second bad stripe (for raid6 use) */
+ int failb;
+
+ /*
+ * number of pages needed to represent the full
+ * stripe
+ */
+ int nr_pages;
+
+ /*
+ * size of all the bios in the bio_list. This
+ * helps us decide if the rbio maps to a full
+ * stripe or not
+ */
+ int bio_list_bytes;
+
+ atomic_t refs;
+
+ /*
+ * these are two arrays of pointers. We allocate the
+ * rbio big enough to hold them both and setup their
+ * locations when the rbio is allocated
+ */
+
+ /* pointers to pages that we allocated for
+ * reading/writing stripes directly from the disk (including P/Q)
+ */
+ struct page **stripe_pages;
+
+ /*
+ * pointers to the pages in the bio_list. Stored
+ * here for faster lookup
+ */
+ struct page **bio_pages;
+};
+
+static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
+static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
+static void rmw_work(struct btrfs_work *work);
+static void read_rebuild_work(struct btrfs_work *work);
+static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
+static void async_read_rebuild(struct btrfs_raid_bio *rbio);
+static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
+static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
+static void __free_raid_bio(struct btrfs_raid_bio *rbio);
+static void index_rbio_pages(struct btrfs_raid_bio *rbio);
+static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
+
+/*
+ * the stripe hash table is used for locking, and to collect
+ * bios in hopes of making a full stripe
+ */
+int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
+{
+ struct btrfs_stripe_hash_table *table;
+ struct btrfs_stripe_hash_table *x;
+ struct btrfs_stripe_hash *cur;
+ struct btrfs_stripe_hash *h;
+ int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
+ int i;
+ int table_size;
+
+ if (info->stripe_hash_table)
+ return 0;
+
+ /*
+ * The table is large, starting with order 4 and can go as high as
+ * order 7 in case lock debugging is turned on.
+ *
+ * Try harder to allocate and fallback to vmalloc to lower the chance
+ * of a failing mount.
+ */
+ table_size = sizeof(*table) + sizeof(*h) * num_entries;
+ table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!table) {
+ table = vzalloc(table_size);
+ if (!table)
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&table->cache_lock);
+ INIT_LIST_HEAD(&table->stripe_cache);
+
+ h = table->table;
+
+ for (i = 0; i < num_entries; i++) {
+ cur = h + i;
+ INIT_LIST_HEAD(&cur->hash_list);
+ spin_lock_init(&cur->lock);
+ init_waitqueue_head(&cur->wait);
+ }
+
+ x = cmpxchg(&info->stripe_hash_table, NULL, table);
+ if (x) {
+ if (is_vmalloc_addr(x))
+ vfree(x);
+ else
+ kfree(x);
+ }
+ return 0;
+}
+
+/*
+ * caching an rbio means to copy anything from the
+ * bio_pages array into the stripe_pages array. We
+ * use the page uptodate bit in the stripe cache array
+ * to indicate if it has valid data
+ *
+ * once the caching is done, we set the cache ready
+ * bit.
+ */
+static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ char *s;
+ char *d;
+ int ret;
+
+ ret = alloc_rbio_pages(rbio);
+ if (ret)
+ return;
+
+ for (i = 0; i < rbio->nr_pages; i++) {
+ if (!rbio->bio_pages[i])
+ continue;
+
+ s = kmap(rbio->bio_pages[i]);
+ d = kmap(rbio->stripe_pages[i]);
+
+ memcpy(d, s, PAGE_CACHE_SIZE);
+
+ kunmap(rbio->bio_pages[i]);
+ kunmap(rbio->stripe_pages[i]);
+ SetPageUptodate(rbio->stripe_pages[i]);
+ }
+ set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+}
+
+/*
+ * we hash on the first logical address of the stripe
+ */
+static int rbio_bucket(struct btrfs_raid_bio *rbio)
+{
+ u64 num = rbio->raid_map[0];
+
+ /*
+ * we shift down quite a bit. We're using byte
+ * addressing, and most of the lower bits are zeros.
+ * This tends to upset hash_64, and it consistently
+ * returns just one or two different values.
+ *
+ * shifting off the lower bits fixes things.
+ */
+ return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
+}
+
+/*
+ * stealing an rbio means taking all the uptodate pages from the stripe
+ * array in the source rbio and putting them into the destination rbio
+ */
+static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
+{
+ int i;
+ struct page *s;
+ struct page *d;
+
+ if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
+ return;
+
+ for (i = 0; i < dest->nr_pages; i++) {
+ s = src->stripe_pages[i];
+ if (!s || !PageUptodate(s)) {
+ continue;
+ }
+
+ d = dest->stripe_pages[i];
+ if (d)
+ __free_page(d);
+
+ dest->stripe_pages[i] = s;
+ src->stripe_pages[i] = NULL;
+ }
+}
+
+/*
+ * merging means we take the bio_list from the victim and
+ * splice it into the destination. The victim should
+ * be discarded afterwards.
+ *
+ * must be called with dest->rbio_list_lock held
+ */
+static void merge_rbio(struct btrfs_raid_bio *dest,
+ struct btrfs_raid_bio *victim)
+{
+ bio_list_merge(&dest->bio_list, &victim->bio_list);
+ dest->bio_list_bytes += victim->bio_list_bytes;
+ bio_list_init(&victim->bio_list);
+}
+
+/*
+ * used to prune items that are in the cache. The caller
+ * must hold the hash table lock.
+ */
+static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
+{
+ int bucket = rbio_bucket(rbio);
+ struct btrfs_stripe_hash_table *table;
+ struct btrfs_stripe_hash *h;
+ int freeit = 0;
+
+ /*
+ * check the bit again under the hash table lock.
+ */
+ if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
+ return;
+
+ table = rbio->fs_info->stripe_hash_table;
+ h = table->table + bucket;
+
+ /* hold the lock for the bucket because we may be
+ * removing it from the hash table
+ */
+ spin_lock(&h->lock);
+
+ /*
+ * hold the lock for the bio list because we need
+ * to make sure the bio list is empty
+ */
+ spin_lock(&rbio->bio_list_lock);
+
+ if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
+ list_del_init(&rbio->stripe_cache);
+ table->cache_size -= 1;
+ freeit = 1;
+
+ /* if the bio list isn't empty, this rbio is
+ * still involved in an IO. We take it out
+ * of the cache list, and drop the ref that
+ * was held for the list.
+ *
+ * If the bio_list was empty, we also remove
+ * the rbio from the hash_table, and drop
+ * the corresponding ref
+ */
+ if (bio_list_empty(&rbio->bio_list)) {
+ if (!list_empty(&rbio->hash_list)) {
+ list_del_init(&rbio->hash_list);
+ atomic_dec(&rbio->refs);
+ BUG_ON(!list_empty(&rbio->plug_list));
+ }
+ }
+ }
+
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock(&h->lock);
+
+ if (freeit)
+ __free_raid_bio(rbio);
+}
+
+/*
+ * prune a given rbio from the cache
+ */
+static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
+{
+ struct btrfs_stripe_hash_table *table;
+ unsigned long flags;
+
+ if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
+ return;
+
+ table = rbio->fs_info->stripe_hash_table;
+
+ spin_lock_irqsave(&table->cache_lock, flags);
+ __remove_rbio_from_cache(rbio);
+ spin_unlock_irqrestore(&table->cache_lock, flags);
+}
+
+/*
+ * remove everything in the cache
+ */
+void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
+{
+ struct btrfs_stripe_hash_table *table;
+ unsigned long flags;
+ struct btrfs_raid_bio *rbio;
+
+ table = info->stripe_hash_table;
+
+ spin_lock_irqsave(&table->cache_lock, flags);
+ while (!list_empty(&table->stripe_cache)) {
+ rbio = list_entry(table->stripe_cache.next,
+ struct btrfs_raid_bio,
+ stripe_cache);
+ __remove_rbio_from_cache(rbio);
+ }
+ spin_unlock_irqrestore(&table->cache_lock, flags);
+}
+
+/*
+ * remove all cached entries and free the hash table
+ * used by unmount
+ */
+void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
+{
+ if (!info->stripe_hash_table)
+ return;
+ btrfs_clear_rbio_cache(info);
+ if (is_vmalloc_addr(info->stripe_hash_table))
+ vfree(info->stripe_hash_table);
+ else
+ kfree(info->stripe_hash_table);
+ info->stripe_hash_table = NULL;
+}
+
+/*
+ * insert an rbio into the stripe cache. It
+ * must have already been prepared by calling
+ * cache_rbio_pages
+ *
+ * If this rbio was already cached, it gets
+ * moved to the front of the lru.
+ *
+ * If the size of the rbio cache is too big, we
+ * prune an item.
+ */
+static void cache_rbio(struct btrfs_raid_bio *rbio)
+{
+ struct btrfs_stripe_hash_table *table;
+ unsigned long flags;
+
+ if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
+ return;
+
+ table = rbio->fs_info->stripe_hash_table;
+
+ spin_lock_irqsave(&table->cache_lock, flags);
+ spin_lock(&rbio->bio_list_lock);
+
+ /* bump our ref if we were not in the list before */
+ if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
+ atomic_inc(&rbio->refs);
+
+ if (!list_empty(&rbio->stripe_cache)){
+ list_move(&rbio->stripe_cache, &table->stripe_cache);
+ } else {
+ list_add(&rbio->stripe_cache, &table->stripe_cache);
+ table->cache_size += 1;
+ }
+
+ spin_unlock(&rbio->bio_list_lock);
+
+ if (table->cache_size > RBIO_CACHE_SIZE) {
+ struct btrfs_raid_bio *found;
+
+ found = list_entry(table->stripe_cache.prev,
+ struct btrfs_raid_bio,
+ stripe_cache);
+
+ if (found != rbio)
+ __remove_rbio_from_cache(found);
+ }
+
+ spin_unlock_irqrestore(&table->cache_lock, flags);
+ return;
+}
+
+/*
+ * helper function to run the xor_blocks api. It is only
+ * able to do MAX_XOR_BLOCKS at a time, so we need to
+ * loop through.
+ */
+static void run_xor(void **pages, int src_cnt, ssize_t len)
+{
+ int src_off = 0;
+ int xor_src_cnt = 0;
+ void *dest = pages[src_cnt];
+
+ while(src_cnt > 0) {
+ xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
+ xor_blocks(xor_src_cnt, len, dest, pages + src_off);
+
+ src_cnt -= xor_src_cnt;
+ src_off += xor_src_cnt;
+ }
+}
+
+/*
+ * returns true if the bio list inside this rbio
+ * covers an entire stripe (no rmw required).
+ * Must be called with the bio list lock held, or
+ * at a time when you know it is impossible to add
+ * new bios into the list
+ */
+static int __rbio_is_full(struct btrfs_raid_bio *rbio)
+{
+ unsigned long size = rbio->bio_list_bytes;
+ int ret = 1;
+
+ if (size != rbio->nr_data * rbio->stripe_len)
+ ret = 0;
+
+ BUG_ON(size > rbio->nr_data * rbio->stripe_len);
+ return ret;
+}
+
+static int rbio_is_full(struct btrfs_raid_bio *rbio)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&rbio->bio_list_lock, flags);
+ ret = __rbio_is_full(rbio);
+ spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+ return ret;
+}
+
+/*
+ * returns 1 if it is safe to merge two rbios together.
+ * The merging is safe if the two rbios correspond to
+ * the same stripe and if they are both going in the same
+ * direction (read vs write), and if neither one is
+ * locked for final IO
+ *
+ * The caller is responsible for locking such that
+ * rmw_locked is safe to test
+ */
+static int rbio_can_merge(struct btrfs_raid_bio *last,
+ struct btrfs_raid_bio *cur)
+{
+ if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
+ test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
+ return 0;
+
+ /*
+ * we can't merge with cached rbios, since the
+ * idea is that when we merge the destination
+ * rbio is going to run our IO for us. We can
+ * steal from cached rbio's though, other functions
+ * handle that.
+ */
+ if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
+ test_bit(RBIO_CACHE_BIT, &cur->flags))
+ return 0;
+
+ if (last->raid_map[0] !=
+ cur->raid_map[0])
+ return 0;
+
+ /* reads can't merge with writes */
+ if (last->read_rebuild !=
+ cur->read_rebuild) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * helper to index into the pstripe
+ */
+static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
+{
+ index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
+ return rbio->stripe_pages[index];
+}
+
+/*
+ * helper to index into the qstripe, returns null
+ * if there is no qstripe
+ */
+static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
+{
+ if (rbio->nr_data + 1 == rbio->bbio->num_stripes)
+ return NULL;
+
+ index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
+ PAGE_CACHE_SHIFT;
+ return rbio->stripe_pages[index];
+}
+
+/*
+ * The first stripe in the table for a logical address
+ * has the lock. rbios are added in one of three ways:
+ *
+ * 1) Nobody has the stripe locked yet. The rbio is given
+ * the lock and 0 is returned. The caller must start the IO
+ * themselves.
+ *
+ * 2) Someone has the stripe locked, but we're able to merge
+ * with the lock owner. The rbio is freed and the IO will
+ * start automatically along with the existing rbio. 1 is returned.
+ *
+ * 3) Someone has the stripe locked, but we're not able to merge.
+ * The rbio is added to the lock owner's plug list, or merged into
+ * an rbio already on the plug list. When the lock owner unlocks,
+ * the next rbio on the list is run and the IO is started automatically.
+ * 1 is returned
+ *
+ * If we return 0, the caller still owns the rbio and must continue with
+ * IO submission. If we return 1, the caller must assume the rbio has
+ * already been freed.
+ */
+static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
+{
+ int bucket = rbio_bucket(rbio);
+ struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
+ struct btrfs_raid_bio *cur;
+ struct btrfs_raid_bio *pending;
+ unsigned long flags;
+ DEFINE_WAIT(wait);
+ struct btrfs_raid_bio *freeit = NULL;
+ struct btrfs_raid_bio *cache_drop = NULL;
+ int ret = 0;
+ int walk = 0;
+
+ spin_lock_irqsave(&h->lock, flags);
+ list_for_each_entry(cur, &h->hash_list, hash_list) {
+ walk++;
+ if (cur->raid_map[0] == rbio->raid_map[0]) {
+ spin_lock(&cur->bio_list_lock);
+
+ /* can we steal this cached rbio's pages? */
+ if (bio_list_empty(&cur->bio_list) &&
+ list_empty(&cur->plug_list) &&
+ test_bit(RBIO_CACHE_BIT, &cur->flags) &&
+ !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
+ list_del_init(&cur->hash_list);
+ atomic_dec(&cur->refs);
+
+ steal_rbio(cur, rbio);
+ cache_drop = cur;
+ spin_unlock(&cur->bio_list_lock);
+
+ goto lockit;
+ }
+
+ /* can we merge into the lock owner? */
+ if (rbio_can_merge(cur, rbio)) {
+ merge_rbio(cur, rbio);
+ spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
+ ret = 1;
+ goto out;
+ }
+
+
+ /*
+ * we couldn't merge with the running
+ * rbio, see if we can merge with the
+ * pending ones. We don't have to
+ * check for rmw_locked because there
+ * is no way they are inside finish_rmw
+ * right now
+ */
+ list_for_each_entry(pending, &cur->plug_list,
+ plug_list) {
+ if (rbio_can_merge(pending, rbio)) {
+ merge_rbio(pending, rbio);
+ spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
+ ret = 1;
+ goto out;
+ }
+ }
+
+ /* no merging, put us on the tail of the plug list,
+ * our rbio will be started with the currently
+ * running rbio unlocks
+ */
+ list_add_tail(&rbio->plug_list, &cur->plug_list);
+ spin_unlock(&cur->bio_list_lock);
+ ret = 1;
+ goto out;
+ }
+ }
+lockit:
+ atomic_inc(&rbio->refs);
+ list_add(&rbio->hash_list, &h->hash_list);
+out:
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (cache_drop)
+ remove_rbio_from_cache(cache_drop);
+ if (freeit)
+ __free_raid_bio(freeit);
+ return ret;
+}
+
+/*
+ * called as rmw or parity rebuild is completed. If the plug list has more
+ * rbios waiting for this stripe, the next one on the list will be started
+ */
+static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
+{
+ int bucket;
+ struct btrfs_stripe_hash *h;
+ unsigned long flags;
+ int keep_cache = 0;
+
+ bucket = rbio_bucket(rbio);
+ h = rbio->fs_info->stripe_hash_table->table + bucket;
+
+ if (list_empty(&rbio->plug_list))
+ cache_rbio(rbio);
+
+ spin_lock_irqsave(&h->lock, flags);
+ spin_lock(&rbio->bio_list_lock);
+
+ if (!list_empty(&rbio->hash_list)) {
+ /*
+ * if we're still cached and there is no other IO
+ * to perform, just leave this rbio here for others
+ * to steal from later
+ */
+ if (list_empty(&rbio->plug_list) &&
+ test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
+ keep_cache = 1;
+ clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ BUG_ON(!bio_list_empty(&rbio->bio_list));
+ goto done;
+ }
+
+ list_del_init(&rbio->hash_list);
+ atomic_dec(&rbio->refs);
+
+ /*
+ * we use the plug list to hold all the rbios
+ * waiting for the chance to lock this stripe.
+ * hand the lock over to one of them.
+ */
+ if (!list_empty(&rbio->plug_list)) {
+ struct btrfs_raid_bio *next;
+ struct list_head *head = rbio->plug_list.next;
+
+ next = list_entry(head, struct btrfs_raid_bio,
+ plug_list);
+
+ list_del_init(&rbio->plug_list);
+
+ list_add(&next->hash_list, &h->hash_list);
+ atomic_inc(&next->refs);
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ if (next->read_rebuild)
+ async_read_rebuild(next);
+ else {
+ steal_rbio(rbio, next);
+ async_rmw_stripe(next);
+ }
+
+ goto done_nolock;
+ } else if (waitqueue_active(&h->wait)) {
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock_irqrestore(&h->lock, flags);
+ wake_up(&h->wait);
+ goto done_nolock;
+ }
+ }
+done:
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+done_nolock:
+ if (!keep_cache)
+ remove_rbio_from_cache(rbio);
+}
+
+static void __free_raid_bio(struct btrfs_raid_bio *rbio)
+{
+ int i;
+
+ WARN_ON(atomic_read(&rbio->refs) < 0);
+ if (!atomic_dec_and_test(&rbio->refs))
+ return;
+
+ WARN_ON(!list_empty(&rbio->stripe_cache));
+ WARN_ON(!list_empty(&rbio->hash_list));
+ WARN_ON(!bio_list_empty(&rbio->bio_list));
+
+ for (i = 0; i < rbio->nr_pages; i++) {
+ if (rbio->stripe_pages[i]) {
+ __free_page(rbio->stripe_pages[i]);
+ rbio->stripe_pages[i] = NULL;
+ }
+ }
+ kfree(rbio->raid_map);
+ kfree(rbio->bbio);
+ kfree(rbio);
+}
+
+static void free_raid_bio(struct btrfs_raid_bio *rbio)
+{
+ unlock_stripe(rbio);
+ __free_raid_bio(rbio);
+}
+
+/*
+ * this frees the rbio and runs through all the bios in the
+ * bio_list and calls end_io on them
+ */
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
+{
+ struct bio *cur = bio_list_get(&rbio->bio_list);
+ struct bio *next;
+ free_raid_bio(rbio);
+
+ while (cur) {
+ next = cur->bi_next;
+ cur->bi_next = NULL;
+ if (uptodate)
+ set_bit(BIO_UPTODATE, &cur->bi_flags);
+ bio_endio(cur, err);
+ cur = next;
+ }
+}
+
+/*
+ * end io function used by finish_rmw. When we finally
+ * get here, we've written a full stripe
+ */
+static void raid_write_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ return;
+
+ err = 0;
+
+ /* OK, we have read all the stripes we need to. */
+ if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ err = -EIO;
+
+ rbio_orig_end_io(rbio, err, 0);
+ return;
+}
+
+/*
+ * the read/modify/write code wants to use the original bio for
+ * any pages it included, and then use the rbio for everything
+ * else. This function decides if a given index (stripe number)
+ * and page number in that stripe fall inside the original bio
+ * or the rbio.
+ *
+ * if you set bio_list_only, you'll get a NULL back for any ranges
+ * that are outside the bio_list
+ *
+ * This doesn't take any refs on anything, you get a bare page pointer
+ * and the caller must bump refs as required.
+ *
+ * You must call index_rbio_pages once before you can trust
+ * the answers from this function.
+ */
+static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
+ int index, int pagenr, int bio_list_only)
+{
+ int chunk_page;
+ struct page *p = NULL;
+
+ chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
+
+ spin_lock_irq(&rbio->bio_list_lock);
+ p = rbio->bio_pages[chunk_page];
+ spin_unlock_irq(&rbio->bio_list_lock);
+
+ if (p || bio_list_only)
+ return p;
+
+ return rbio->stripe_pages[chunk_page];
+}
+
+/*
+ * number of pages we need for the entire stripe across all the
+ * drives
+ */
+static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
+{
+ unsigned long nr = stripe_len * nr_stripes;
+ return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+}
+
+/*
+ * allocation and initial setup for the btrfs_raid_bio. Not
+ * this does not allocate any pages for rbio->pages.
+ */
+static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len)
+{
+ struct btrfs_raid_bio *rbio;
+ int nr_data = 0;
+ int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
+ void *p;
+
+ rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
+ GFP_NOFS);
+ if (!rbio) {
+ kfree(raid_map);
+ kfree(bbio);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bio_list_init(&rbio->bio_list);
+ INIT_LIST_HEAD(&rbio->plug_list);
+ spin_lock_init(&rbio->bio_list_lock);
+ INIT_LIST_HEAD(&rbio->stripe_cache);
+ INIT_LIST_HEAD(&rbio->hash_list);
+ rbio->bbio = bbio;
+ rbio->raid_map = raid_map;
+ rbio->fs_info = root->fs_info;
+ rbio->stripe_len = stripe_len;
+ rbio->nr_pages = num_pages;
+ rbio->faila = -1;
+ rbio->failb = -1;
+ atomic_set(&rbio->refs, 1);
+
+ /*
+ * the stripe_pages and bio_pages array point to the extra
+ * memory we allocated past the end of the rbio
+ */
+ p = rbio + 1;
+ rbio->stripe_pages = p;
+ rbio->bio_pages = p + sizeof(struct page *) * num_pages;
+
+ if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
+ nr_data = bbio->num_stripes - 2;
+ else
+ nr_data = bbio->num_stripes - 1;
+
+ rbio->nr_data = nr_data;
+ return rbio;
+}
+
+/* allocate pages for all the stripes in the bio, including parity */
+static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < rbio->nr_pages; i++) {
+ if (rbio->stripe_pages[i])
+ continue;
+ page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[i] = page;
+ ClearPageUptodate(page);
+ }
+ return 0;
+}
+
+/* allocate pages for just the p/q stripes */
+static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ struct page *page;
+
+ i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
+
+ for (; i < rbio->nr_pages; i++) {
+ if (rbio->stripe_pages[i])
+ continue;
+ page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[i] = page;
+ }
+ return 0;
+}
+
+/*
+ * add a single page from a specific stripe into our list of bios for IO
+ * this will try to merge into existing bios if possible, and returns
+ * zero if all went well.
+ */
+int rbio_add_io_page(struct btrfs_raid_bio *rbio,
+ struct bio_list *bio_list,
+ struct page *page,
+ int stripe_nr,
+ unsigned long page_index,
+ unsigned long bio_max_len)
+{
+ struct bio *last = bio_list->tail;
+ u64 last_end = 0;
+ int ret;
+ struct bio *bio;
+ struct btrfs_bio_stripe *stripe;
+ u64 disk_start;
+
+ stripe = &rbio->bbio->stripes[stripe_nr];
+ disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
+
+ /* if the device is missing, just fail this stripe */
+ if (!stripe->dev->bdev)
+ return fail_rbio_index(rbio, stripe_nr);
+
+ /* see if we can add this page onto our existing bio */
+ if (last) {
+ last_end = (u64)last->bi_sector << 9;
+ last_end += last->bi_size;
+
+ /*
+ * we can't merge these if they are from different
+ * devices or if they are not contiguous
+ */
+ if (last_end == disk_start && stripe->dev->bdev &&
+ test_bit(BIO_UPTODATE, &last->bi_flags) &&
+ last->bi_bdev == stripe->dev->bdev) {
+ ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
+ if (ret == PAGE_CACHE_SIZE)
+ return 0;
+ }
+ }
+
+ /* put a new bio on the list */
+ bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_size = 0;
+ bio->bi_bdev = stripe->dev->bdev;
+ bio->bi_sector = disk_start >> 9;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+ bio_list_add(bio_list, bio);
+ return 0;
+}
+
+/*
+ * while we're doing the read/modify/write cycle, we could
+ * have errors in reading pages off the disk. This checks
+ * for errors and if we're not able to read the page it'll
+ * trigger parity reconstruction. The rmw will be finished
+ * after we've reconstructed the failed stripes
+ */
+static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
+{
+ if (rbio->faila >= 0 || rbio->failb >= 0) {
+ BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1);
+ __raid56_parity_recover(rbio);
+ } else {
+ finish_rmw(rbio);
+ }
+}
+
+/*
+ * these are just the pages from the rbio array, not from anything
+ * the FS sent down to us
+ */
+static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
+{
+ int index;
+ index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
+ index += page;
+ return rbio->stripe_pages[index];
+}
+
+/*
+ * helper function to walk our bio list and populate the bio_pages array with
+ * the result. This seems expensive, but it is faster than constantly
+ * searching through the bio list as we setup the IO in finish_rmw or stripe
+ * reconstruction.
+ *
+ * This must be called before you trust the answers from page_in_rbio
+ */
+static void index_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+ struct bio *bio;
+ u64 start;
+ unsigned long stripe_offset;
+ unsigned long page_index;
+ struct page *p;
+ int i;
+
+ spin_lock_irq(&rbio->bio_list_lock);
+ bio_list_for_each(bio, &rbio->bio_list) {
+ start = (u64)bio->bi_sector << 9;
+ stripe_offset = start - rbio->raid_map[0];
+ page_index = stripe_offset >> PAGE_CACHE_SHIFT;
+
+ for (i = 0; i < bio->bi_vcnt; i++) {
+ p = bio->bi_io_vec[i].bv_page;
+ rbio->bio_pages[page_index + i] = p;
+ }
+ }
+ spin_unlock_irq(&rbio->bio_list_lock);
+}
+
+/*
+ * this is called from one of two situations. We either
+ * have a full stripe from the higher layers, or we've read all
+ * the missing bits off disk.
+ *
+ * This will calculate the parity and then send down any
+ * changed blocks.
+ */
+static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
+{
+ struct btrfs_bio *bbio = rbio->bbio;
+ void *pointers[bbio->num_stripes];
+ int stripe_len = rbio->stripe_len;
+ int nr_data = rbio->nr_data;
+ int stripe;
+ int pagenr;
+ int p_stripe = -1;
+ int q_stripe = -1;
+ struct bio_list bio_list;
+ struct bio *bio;
+ int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
+ int ret;
+
+ bio_list_init(&bio_list);
+
+ if (bbio->num_stripes - rbio->nr_data == 1) {
+ p_stripe = bbio->num_stripes - 1;
+ } else if (bbio->num_stripes - rbio->nr_data == 2) {
+ p_stripe = bbio->num_stripes - 2;
+ q_stripe = bbio->num_stripes - 1;
+ } else {
+ BUG();
+ }
+
+ /* at this point we either have a full stripe,
+ * or we've read the full stripe from the drive.
+ * recalculate the parity and write the new results.
+ *
+ * We're not allowed to add any new bios to the
+ * bio list here, anyone else that wants to
+ * change this stripe needs to do their own rmw.
+ */
+ spin_lock_irq(&rbio->bio_list_lock);
+ set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ spin_unlock_irq(&rbio->bio_list_lock);
+
+ atomic_set(&rbio->bbio->error, 0);
+
+ /*
+ * now that we've set rmw_locked, run through the
+ * bio list one last time and map the page pointers
+ *
+ * We don't cache full rbios because we're assuming
+ * the higher layers are unlikely to use this area of
+ * the disk again soon. If they do use it again,
+ * hopefully they will send another full bio.
+ */
+ index_rbio_pages(rbio);
+ if (!rbio_is_full(rbio))
+ cache_rbio_pages(rbio);
+ else
+ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+ for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
+ struct page *p;
+ /* first collect one page from each data stripe */
+ for (stripe = 0; stripe < nr_data; stripe++) {
+ p = page_in_rbio(rbio, stripe, pagenr, 0);
+ pointers[stripe] = kmap(p);
+ }
+
+ /* then add the parity stripe */
+ p = rbio_pstripe_page(rbio, pagenr);
+ SetPageUptodate(p);
+ pointers[stripe++] = kmap(p);
+
+ if (q_stripe != -1) {
+
+ /*
+ * raid6, add the qstripe and call the
+ * library function to fill in our p/q
+ */
+ p = rbio_qstripe_page(rbio, pagenr);
+ SetPageUptodate(p);
+ pointers[stripe++] = kmap(p);
+
+ raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
+ pointers);
+ } else {
+ /* raid5 */
+ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+ }
+
+
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++)
+ kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+ }
+
+ /*
+ * time to start writing. Make bios for everything from the
+ * higher layers (the bio_list in our rbio) and our p/q. Ignore
+ * everything else.
+ */
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+ for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
+ struct page *page;
+ if (stripe < rbio->nr_data) {
+ page = page_in_rbio(rbio, stripe, pagenr, 1);
+ if (!page)
+ continue;
+ } else {
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ }
+
+ ret = rbio_add_io_page(rbio, &bio_list,
+ page, stripe, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
+ atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list));
+ BUG_ON(atomic_read(&bbio->stripes_pending) == 0);
+
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_write_end_io;
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(WRITE, bio);
+ }
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+/*
+ * helper to find the stripe number for a given bio. Used to figure out which
+ * stripe has failed. This expects the bio to correspond to a physical disk,
+ * so it looks up based on physical sector numbers.
+ */
+static int find_bio_stripe(struct btrfs_raid_bio *rbio,
+ struct bio *bio)
+{
+ u64 physical = bio->bi_sector;
+ u64 stripe_start;
+ int i;
+ struct btrfs_bio_stripe *stripe;
+
+ physical <<= 9;
+
+ for (i = 0; i < rbio->bbio->num_stripes; i++) {
+ stripe = &rbio->bbio->stripes[i];
+ stripe_start = stripe->physical;
+ if (physical >= stripe_start &&
+ physical < stripe_start + rbio->stripe_len) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
+ * helper to find the stripe number for a given
+ * bio (before mapping). Used to figure out which stripe has
+ * failed. This looks up based on logical block numbers.
+ */
+static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
+ struct bio *bio)
+{
+ u64 logical = bio->bi_sector;
+ u64 stripe_start;
+ int i;
+
+ logical <<= 9;
+
+ for (i = 0; i < rbio->nr_data; i++) {
+ stripe_start = rbio->raid_map[i];
+ if (logical >= stripe_start &&
+ logical < stripe_start + rbio->stripe_len) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
+ * returns -EIO if we had too many failures
+ */
+static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rbio->bio_list_lock, flags);
+
+ /* we already know this stripe is bad, move on */
+ if (rbio->faila == failed || rbio->failb == failed)
+ goto out;
+
+ if (rbio->faila == -1) {
+ /* first failure on this rbio */
+ rbio->faila = failed;
+ atomic_inc(&rbio->bbio->error);
+ } else if (rbio->failb == -1) {
+ /* second failure on this rbio */
+ rbio->failb = failed;
+ atomic_inc(&rbio->bbio->error);
+ } else {
+ ret = -EIO;
+ }
+out:
+ spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+
+ return ret;
+}
+
+/*
+ * helper to fail a stripe based on a physical disk
+ * bio.
+ */
+static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
+ struct bio *bio)
+{
+ int failed = find_bio_stripe(rbio, bio);
+
+ if (failed < 0)
+ return -EIO;
+
+ return fail_rbio_index(rbio, failed);
+}
+
+/*
+ * this sets each page in the bio uptodate. It should only be used on private
+ * rbio pages, nothing that comes in from the higher layers
+ */
+static void set_bio_pages_uptodate(struct bio *bio)
+{
+ int i;
+ struct page *p;
+
+ for (i = 0; i < bio->bi_vcnt; i++) {
+ p = bio->bi_io_vec[i].bv_page;
+ SetPageUptodate(p);
+ }
+}
+
+/*
+ * end io for the read phase of the rmw cycle. All the bios here are physical
+ * stripe bios we've read from the disk so we can recalculate the parity of the
+ * stripe.
+ *
+ * This will usually kick off finish_rmw once all the bios are read in, but it
+ * may trigger parity reconstruction if we had any errors along the way
+ */
+static void raid_rmw_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+ else
+ set_bio_pages_uptodate(bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ return;
+
+ err = 0;
+ if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ goto cleanup;
+
+ /*
+ * this will normally call finish_rmw to start our write
+ * but if there are any failed stripes we'll reconstruct
+ * from parity first
+ */
+ validate_rbio_for_rmw(rbio);
+ return;
+
+cleanup:
+
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
+{
+ rbio->work.flags = 0;
+ rbio->work.func = rmw_work;
+
+ btrfs_queue_worker(&rbio->fs_info->rmw_workers,
+ &rbio->work);
+}
+
+static void async_read_rebuild(struct btrfs_raid_bio *rbio)
+{
+ rbio->work.flags = 0;
+ rbio->work.func = read_rebuild_work;
+
+ btrfs_queue_worker(&rbio->fs_info->rmw_workers,
+ &rbio->work);
+}
+
+/*
+ * the stripe must be locked by the caller. It will
+ * unlock after all the writes are done
+ */
+static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
+{
+ int bios_to_read = 0;
+ struct btrfs_bio *bbio = rbio->bbio;
+ struct bio_list bio_list;
+ int ret;
+ int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int pagenr;
+ int stripe;
+ struct bio *bio;
+
+ bio_list_init(&bio_list);
+
+ ret = alloc_rbio_pages(rbio);
+ if (ret)
+ goto cleanup;
+
+ index_rbio_pages(rbio);
+
+ atomic_set(&rbio->bbio->error, 0);
+ /*
+ * build a list of bios to read all the missing parts of this
+ * stripe
+ */
+ for (stripe = 0; stripe < rbio->nr_data; stripe++) {
+ for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ struct page *page;
+ /*
+ * we want to find all the pages missing from
+ * the rbio and read them from the disk. If
+ * page_in_rbio finds a page in the bio list
+ * we don't need to read it off the stripe.
+ */
+ page = page_in_rbio(rbio, stripe, pagenr, 1);
+ if (page)
+ continue;
+
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ /*
+ * the bio cache may have handed us an uptodate
+ * page. If so, be happy and use it
+ */
+ if (PageUptodate(page))
+ continue;
+
+ ret = rbio_add_io_page(rbio, &bio_list, page,
+ stripe, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
+ bios_to_read = bio_list_size(&bio_list);
+ if (!bios_to_read) {
+ /*
+ * this can happen if others have merged with
+ * us, it means there is nothing left to read.
+ * But if there are missing devices it may not be
+ * safe to do the full stripe write yet.
+ */
+ goto finish;
+ }
+
+ /*
+ * the bbio may be freed once we submit the last bio. Make sure
+ * not to touch it after that
+ */
+ atomic_set(&bbio->stripes_pending, bios_to_read);
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_rmw_end_io;
+
+ btrfs_bio_wq_end_io(rbio->fs_info, bio,
+ BTRFS_WQ_ENDIO_RAID56);
+
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(READ, bio);
+ }
+ /* the actual write will happen once the reads are done */
+ return 0;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return -EIO;
+
+finish:
+ validate_rbio_for_rmw(rbio);
+ return 0;
+}
+
+/*
+ * if the upper layers pass in a full stripe, we thank them by only allocating
+ * enough pages to hold the parity, and sending it all down quickly.
+ */
+static int full_stripe_write(struct btrfs_raid_bio *rbio)
+{
+ int ret;
+
+ ret = alloc_rbio_parity_pages(rbio);
+ if (ret)
+ return ret;
+
+ ret = lock_stripe_add(rbio);
+ if (ret == 0)
+ finish_rmw(rbio);
+ return 0;
+}
+
+/*
+ * partial stripe writes get handed over to async helpers.
+ * We're really hoping to merge a few more writes into this
+ * rbio before calculating new parity
+ */
+static int partial_stripe_write(struct btrfs_raid_bio *rbio)
+{
+ int ret;
+
+ ret = lock_stripe_add(rbio);
+ if (ret == 0)
+ async_rmw_stripe(rbio);
+ return 0;
+}
+
+/*
+ * sometimes while we were reading from the drive to
+ * recalculate parity, enough new bios come into create
+ * a full stripe. So we do a check here to see if we can
+ * go directly to finish_rmw
+ */
+static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
+{
+ /* head off into rmw land if we don't have a full stripe */
+ if (!rbio_is_full(rbio))
+ return partial_stripe_write(rbio);
+ return full_stripe_write(rbio);
+}
+
+/*
+ * We use plugging call backs to collect full stripes.
+ * Any time we get a partial stripe write while plugged
+ * we collect it into a list. When the unplug comes down,
+ * we sort the list by logical block number and merge
+ * everything we can into the same rbios
+ */
+struct btrfs_plug_cb {
+ struct blk_plug_cb cb;
+ struct btrfs_fs_info *info;
+ struct list_head rbio_list;
+ struct btrfs_work work;
+};
+
+/*
+ * rbios on the plug list are sorted for easier merging.
+ */
+static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
+ plug_list);
+ struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
+ plug_list);
+ u64 a_sector = ra->bio_list.head->bi_sector;
+ u64 b_sector = rb->bio_list.head->bi_sector;
+
+ if (a_sector < b_sector)
+ return -1;
+ if (a_sector > b_sector)
+ return 1;
+ return 0;
+}
+
+static void run_plug(struct btrfs_plug_cb *plug)
+{
+ struct btrfs_raid_bio *cur;
+ struct btrfs_raid_bio *last = NULL;
+
+ /*
+ * sort our plug list then try to merge
+ * everything we can in hopes of creating full
+ * stripes.
+ */
+ list_sort(NULL, &plug->rbio_list, plug_cmp);
+ while (!list_empty(&plug->rbio_list)) {
+ cur = list_entry(plug->rbio_list.next,
+ struct btrfs_raid_bio, plug_list);
+ list_del_init(&cur->plug_list);
+
+ if (rbio_is_full(cur)) {
+ /* we have a full stripe, send it down */
+ full_stripe_write(cur);
+ continue;
+ }
+ if (last) {
+ if (rbio_can_merge(last, cur)) {
+ merge_rbio(last, cur);
+ __free_raid_bio(cur);
+ continue;
+
+ }
+ __raid56_parity_write(last);
+ }
+ last = cur;
+ }
+ if (last) {
+ __raid56_parity_write(last);
+ }
+ kfree(plug);
+}
+
+/*
+ * if the unplug comes from schedule, we have to push the
+ * work off to a helper thread
+ */
+static void unplug_work(struct btrfs_work *work)
+{
+ struct btrfs_plug_cb *plug;
+ plug = container_of(work, struct btrfs_plug_cb, work);
+ run_plug(plug);
+}
+
+static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct btrfs_plug_cb *plug;
+ plug = container_of(cb, struct btrfs_plug_cb, cb);
+
+ if (from_schedule) {
+ plug->work.flags = 0;
+ plug->work.func = unplug_work;
+ btrfs_queue_worker(&plug->info->rmw_workers,
+ &plug->work);
+ return;
+ }
+ run_plug(plug);
+}
+
+/*
+ * our main entry point for writes from the rest of the FS.
+ */
+int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len)
+{
+ struct btrfs_raid_bio *rbio;
+ struct btrfs_plug_cb *plug = NULL;
+ struct blk_plug_cb *cb;
+
+ rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+ if (IS_ERR(rbio)) {
+ kfree(raid_map);
+ kfree(bbio);
+ return PTR_ERR(rbio);
+ }
+ bio_list_add(&rbio->bio_list, bio);
+ rbio->bio_list_bytes = bio->bi_size;
+
+ /*
+ * don't plug on full rbios, just get them out the door
+ * as quickly as we can
+ */
+ if (rbio_is_full(rbio))
+ return full_stripe_write(rbio);
+
+ cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
+ sizeof(*plug));
+ if (cb) {
+ plug = container_of(cb, struct btrfs_plug_cb, cb);
+ if (!plug->info) {
+ plug->info = root->fs_info;
+ INIT_LIST_HEAD(&plug->rbio_list);
+ }
+ list_add_tail(&rbio->plug_list, &plug->rbio_list);
+ } else {
+ return __raid56_parity_write(rbio);
+ }
+ return 0;
+}
+
+/*
+ * all parity reconstruction happens here. We've read in everything
+ * we can find from the drives and this does the heavy lifting of
+ * sorting the good from the bad.
+ */
+static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
+{
+ int pagenr, stripe;
+ void **pointers;
+ int faila = -1, failb = -1;
+ int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ struct page *page;
+ int err;
+ int i;
+
+ pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *),
+ GFP_NOFS);
+ if (!pointers) {
+ err = -ENOMEM;
+ goto cleanup_io;
+ }
+
+ faila = rbio->faila;
+ failb = rbio->failb;
+
+ if (rbio->read_rebuild) {
+ spin_lock_irq(&rbio->bio_list_lock);
+ set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ spin_unlock_irq(&rbio->bio_list_lock);
+ }
+
+ index_rbio_pages(rbio);
+
+ for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ /* setup our array of pointers with pages
+ * from each stripe
+ */
+ for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+ /*
+ * if we're rebuilding a read, we have to use
+ * pages from the bio list
+ */
+ if (rbio->read_rebuild &&
+ (stripe == faila || stripe == failb)) {
+ page = page_in_rbio(rbio, stripe, pagenr, 0);
+ } else {
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ }
+ pointers[stripe] = kmap(page);
+ }
+
+ /* all raid6 handling here */
+ if (rbio->raid_map[rbio->bbio->num_stripes - 1] ==
+ RAID6_Q_STRIPE) {
+
+ /*
+ * single failure, rebuild from parity raid5
+ * style
+ */
+ if (failb < 0) {
+ if (faila == rbio->nr_data) {
+ /*
+ * Just the P stripe has failed, without
+ * a bad data or Q stripe.
+ * TODO, we should redo the xor here.
+ */
+ err = -EIO;
+ goto cleanup;
+ }
+ /*
+ * a single failure in raid6 is rebuilt
+ * in the pstripe code below
+ */
+ goto pstripe;
+ }
+
+ /* make sure our ps and qs are in order */
+ if (faila > failb) {
+ int tmp = failb;
+ failb = faila;
+ faila = tmp;
+ }
+
+ /* if the q stripe is failed, do a pstripe reconstruction
+ * from the xors.
+ * If both the q stripe and the P stripe are failed, we're
+ * here due to a crc mismatch and we can't give them the
+ * data they want
+ */
+ if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
+ if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
+ err = -EIO;
+ goto cleanup;
+ }
+ /*
+ * otherwise we have one bad data stripe and
+ * a good P stripe. raid5!
+ */
+ goto pstripe;
+ }
+
+ if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
+ raid6_datap_recov(rbio->bbio->num_stripes,
+ PAGE_SIZE, faila, pointers);
+ } else {
+ raid6_2data_recov(rbio->bbio->num_stripes,
+ PAGE_SIZE, faila, failb,
+ pointers);
+ }
+ } else {
+ void *p;
+
+ /* rebuild from P stripe here (raid5 or raid6) */
+ BUG_ON(failb != -1);
+pstripe:
+ /* Copy parity block into failed block to start with */
+ memcpy(pointers[faila],
+ pointers[rbio->nr_data],
+ PAGE_CACHE_SIZE);
+
+ /* rearrange the pointer array */
+ p = pointers[faila];
+ for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
+ pointers[stripe] = pointers[stripe + 1];
+ pointers[rbio->nr_data - 1] = p;
+
+ /* xor in the rest */
+ run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
+ }
+ /* if we're doing this rebuild as part of an rmw, go through
+ * and set all of our private rbio pages in the
+ * failed stripes as uptodate. This way finish_rmw will
+ * know they can be trusted. If this was a read reconstruction,
+ * other endio functions will fiddle the uptodate bits
+ */
+ if (!rbio->read_rebuild) {
+ for (i = 0; i < nr_pages; i++) {
+ if (faila != -1) {
+ page = rbio_stripe_page(rbio, faila, i);
+ SetPageUptodate(page);
+ }
+ if (failb != -1) {
+ page = rbio_stripe_page(rbio, failb, i);
+ SetPageUptodate(page);
+ }
+ }
+ }
+ for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+ /*
+ * if we're rebuilding a read, we have to use
+ * pages from the bio list
+ */
+ if (rbio->read_rebuild &&
+ (stripe == faila || stripe == failb)) {
+ page = page_in_rbio(rbio, stripe, pagenr, 0);
+ } else {
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ }
+ kunmap(page);
+ }
+ }
+
+ err = 0;
+cleanup:
+ kfree(pointers);
+
+cleanup_io:
+
+ if (rbio->read_rebuild) {
+ if (err == 0)
+ cache_rbio_pages(rbio);
+ else
+ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+ rbio_orig_end_io(rbio, err, err == 0);
+ } else if (err == 0) {
+ rbio->faila = -1;
+ rbio->failb = -1;
+ finish_rmw(rbio);
+ } else {
+ rbio_orig_end_io(rbio, err, 0);
+ }
+}
+
+/*
+ * This is called only for stripes we've read from disk to
+ * reconstruct the parity.
+ */
+static void raid_recover_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ /*
+ * we only read stripe pages off the disk, set them
+ * up to date if there were no errors
+ */
+ if (err)
+ fail_bio_stripe(rbio, bio);
+ else
+ set_bio_pages_uptodate(bio);
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ return;
+
+ if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ rbio_orig_end_io(rbio, -EIO, 0);
+ else
+ __raid_recover_end_io(rbio);
+}
+
+/*
+ * reads everything we need off the disk to reconstruct
+ * the parity. endio handlers trigger final reconstruction
+ * when the IO is done.
+ *
+ * This is used both for reads from the higher layers and for
+ * parity construction required to finish a rmw cycle.
+ */
+static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+{
+ int bios_to_read = 0;
+ struct btrfs_bio *bbio = rbio->bbio;
+ struct bio_list bio_list;
+ int ret;
+ int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int pagenr;
+ int stripe;
+ struct bio *bio;
+
+ bio_list_init(&bio_list);
+
+ ret = alloc_rbio_pages(rbio);
+ if (ret)
+ goto cleanup;
+
+ atomic_set(&rbio->bbio->error, 0);
+
+ /*
+ * read everything that hasn't failed. Thanks to the
+ * stripe cache, it is possible that some or all of these
+ * pages are going to be uptodate.
+ */
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+ if (rbio->faila == stripe ||
+ rbio->failb == stripe)
+ continue;
+
+ for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ struct page *p;
+
+ /*
+ * the rmw code may have already read this
+ * page in
+ */
+ p = rbio_stripe_page(rbio, stripe, pagenr);
+ if (PageUptodate(p))
+ continue;
+
+ ret = rbio_add_io_page(rbio, &bio_list,
+ rbio_stripe_page(rbio, stripe, pagenr),
+ stripe, pagenr, rbio->stripe_len);
+ if (ret < 0)
+ goto cleanup;
+ }
+ }
+
+ bios_to_read = bio_list_size(&bio_list);
+ if (!bios_to_read) {
+ /*
+ * we might have no bios to read just because the pages
+ * were up to date, or we might have no bios to read because
+ * the devices were gone.
+ */
+ if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) {
+ __raid_recover_end_io(rbio);
+ goto out;
+ } else {
+ goto cleanup;
+ }
+ }
+
+ /*
+ * the bbio may be freed once we submit the last bio. Make sure
+ * not to touch it after that
+ */
+ atomic_set(&bbio->stripes_pending, bios_to_read);
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_recover_end_io;
+
+ btrfs_bio_wq_end_io(rbio->fs_info, bio,
+ BTRFS_WQ_ENDIO_RAID56);
+
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(READ, bio);
+ }
+out:
+ return 0;
+
+cleanup:
+ if (rbio->read_rebuild)
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return -EIO;
+}
+
+/*
+ * the main entry point for reads from the higher layers. This
+ * is really only called when the normal read path had a failure,
+ * so we assume the bio they send down corresponds to a failed part
+ * of the drive.
+ */
+int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, int mirror_num)
+{
+ struct btrfs_raid_bio *rbio;
+ int ret;
+
+ rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+ if (IS_ERR(rbio)) {
+ return PTR_ERR(rbio);
+ }
+
+ rbio->read_rebuild = 1;
+ bio_list_add(&rbio->bio_list, bio);
+ rbio->bio_list_bytes = bio->bi_size;
+
+ rbio->faila = find_logical_bio_stripe(rbio, bio);
+ if (rbio->faila == -1) {
+ BUG();
+ kfree(rbio);
+ return -EIO;
+ }
+
+ /*
+ * reconstruct from the q stripe if they are
+ * asking for mirror 3
+ */
+ if (mirror_num == 3)
+ rbio->failb = bbio->num_stripes - 2;
+
+ ret = lock_stripe_add(rbio);
+
+ /*
+ * __raid56_parity_recover will end the bio with
+ * any errors it hits. We don't want to return
+ * its error value up the stack because our caller
+ * will end up calling bio_endio with any nonzero
+ * return
+ */
+ if (ret == 0)
+ __raid56_parity_recover(rbio);
+ /*
+ * our rbio has been added to the list of
+ * rbios that will be handled after the
+ * currently lock owner is done
+ */
+ return 0;
+
+}
+
+static void rmw_work(struct btrfs_work *work)
+{
+ struct btrfs_raid_bio *rbio;
+
+ rbio = container_of(work, struct btrfs_raid_bio, work);
+ raid56_rmw_stripe(rbio);
+}
+
+static void read_rebuild_work(struct btrfs_work *work)
+{
+ struct btrfs_raid_bio *rbio;
+
+ rbio = container_of(work, struct btrfs_raid_bio, work);
+ __raid56_parity_recover(rbio);
+}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
new file mode 100644
index 000000000000..ea5d73bfdfbe
--- /dev/null
+++ b/fs/btrfs/raid56.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 Fusion-io All rights reserved.
+ * Copyright (C) 2012 Intel Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_RAID56__
+#define __BTRFS_RAID56__
+static inline int nr_parity_stripes(struct map_lookup *map)
+{
+ if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+ return 1;
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ return 2;
+ else
+ return 0;
+}
+
+static inline int nr_data_stripes(struct map_lookup *map)
+{
+ return map->num_stripes - nr_parity_stripes(map);
+}
+#define RAID5_P_STRIPE ((u64)-2)
+#define RAID6_Q_STRIPE ((u64)-1)
+
+#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
+ ((x) == RAID6_Q_STRIPE))
+
+int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, int mirror_num);
+int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len);
+
+int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
+void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
+#endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 17c306bf177a..50695dc5e2ab 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3017,7 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
}
}
- page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+ page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 67783e03d121..53c3501fa4ca 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -28,6 +28,7 @@
#include "dev-replace.h"
#include "check-integrity.h"
#include "rcu-string.h"
+#include "raid56.h"
/*
* This is only the first step towards a full-features scrub. It reads all
@@ -2254,6 +2255,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct btrfs_device *extent_dev;
int extent_mirror_num;
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ if (num >= nr_data_stripes(map)) {
+ return 0;
+ }
+ }
+
nstripes = length;
offset = 0;
do_div(nstripes, map->stripe_len);
@@ -2708,7 +2716,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
int ret;
struct btrfs_root *root = sctx->dev_root;
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
return -EIO;
gen = root->fs_info->last_trans_committed;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 321b7fb4e441..f7a8b861058b 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -85,6 +85,7 @@ struct send_ctx {
u32 send_max_size;
u64 total_send_size;
u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
+ u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
struct vfsmount *mnt;
@@ -3709,6 +3710,39 @@ out:
return ret;
}
+/*
+ * Send an update extent command to user space.
+ */
+static int send_update_extent(struct send_ctx *sctx,
+ u64 offset, u32 len)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
static int send_write_or_clone(struct send_ctx *sctx,
struct btrfs_path *path,
struct btrfs_key *key,
@@ -3744,7 +3778,11 @@ static int send_write_or_clone(struct send_ctx *sctx,
goto out;
}
- if (!clone_root) {
+ if (clone_root) {
+ ret = send_clone(sctx, offset, len, clone_root);
+ } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
+ ret = send_update_extent(sctx, offset, len);
+ } else {
while (pos < len) {
l = len - pos;
if (l > BTRFS_SEND_READ_SIZE)
@@ -3757,10 +3795,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
pos += ret;
}
ret = 0;
- } else {
- ret = send_clone(sctx, offset, len, clone_root);
}
-
out:
return ret;
}
@@ -4536,7 +4571,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
struct btrfs_fs_info *fs_info;
struct btrfs_ioctl_send_args *arg = NULL;
struct btrfs_key key;
- struct file *filp = NULL;
struct send_ctx *sctx = NULL;
u32 i;
u64 *clone_sources_tmp = NULL;
@@ -4544,7 +4578,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- send_root = BTRFS_I(fdentry(mnt_file)->d_inode)->root;
+ send_root = BTRFS_I(file_inode(mnt_file))->root;
fs_info = send_root->fs_info;
arg = memdup_user(arg_, sizeof(*arg));
@@ -4561,6 +4595,11 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
goto out;
}
+ if (arg->flags & ~BTRFS_SEND_FLAG_NO_FILE_DATA) {
+ ret = -EINVAL;
+ goto out;
+ }
+
sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
if (!sctx) {
ret = -ENOMEM;
@@ -4572,6 +4611,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
INIT_LIST_HEAD(&sctx->name_cache_list);
+ sctx->flags = arg->flags;
+
sctx->send_filp = fget(arg->send_fd);
if (IS_ERR(sctx->send_filp)) {
ret = PTR_ERR(sctx->send_filp);
@@ -4673,8 +4714,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
goto out;
out:
- if (filp)
- fput(filp);
kfree(arg);
vfree(clone_sources_tmp);
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 1bf4f32fd4ef..8bb18f7ccaa6 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -86,6 +86,7 @@ enum btrfs_send_cmd {
BTRFS_SEND_C_UTIMES,
BTRFS_SEND_C_END,
+ BTRFS_SEND_C_UPDATE_EXTENT,
__BTRFS_SEND_C_MAX,
};
#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d8982e9601d3..68a29a1ea068 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -41,13 +41,13 @@
#include <linux/slab.h>
#include <linux/cleancache.h>
#include <linux/ratelimit.h>
+#include <linux/btrfs.h>
#include "compat.h"
#include "delayed-inode.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "xattr.h"
#include "volumes.h"
@@ -63,8 +63,7 @@
static const struct super_operations btrfs_super_ops;
static struct file_system_type btrfs_fs_type;
-static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
- char nbuf[16])
+static const char *btrfs_decode_error(int errno, char nbuf[16])
{
char *errstr = NULL;
@@ -98,7 +97,7 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
* today we only save the error info into ram. Long term we'll
* also send it down to the disk
*/
- fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
+ set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
}
static void save_error_info(struct btrfs_fs_info *fs_info)
@@ -114,7 +113,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
if (sb->s_flags & MS_RDONLY)
return;
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
sb->s_flags |= MS_RDONLY;
printk(KERN_INFO "btrfs is forced readonly\n");
/*
@@ -142,8 +141,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
struct super_block *sb = fs_info->sb;
char nbuf[16];
const char *errstr;
- va_list args;
- va_start(args, fmt);
/*
* Special case: if the error is EROFS, and we're already
@@ -152,15 +149,18 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
return;
- errstr = btrfs_decode_error(fs_info, errno, nbuf);
+ errstr = btrfs_decode_error(errno, nbuf);
if (fmt) {
- struct va_format vaf = {
- .fmt = fmt,
- .va = &args,
- };
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
sb->s_id, function, line, errstr, &vaf);
+ va_end(args);
} else {
printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
sb->s_id, function, line, errstr);
@@ -171,7 +171,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
save_error_info(fs_info);
btrfs_handle_error(fs_info);
}
- va_end(args);
}
static const char * const logtypes[] = {
@@ -261,7 +260,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
char nbuf[16];
const char *errstr;
- errstr = btrfs_decode_error(root->fs_info, errno, nbuf);
+ errstr = btrfs_decode_error(errno, nbuf);
btrfs_printk(root->fs_info,
"%s:%d: Aborting unused transaction(%s).\n",
function, line, errstr);
@@ -289,8 +288,8 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
va_start(args, fmt);
vaf.va = &args;
- errstr = btrfs_decode_error(fs_info, errno, nbuf);
- if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
+ errstr = btrfs_decode_error(errno, nbuf);
+ if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR))
panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
s_id, function, line, &vaf, errstr);
@@ -438,6 +437,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_compress_force:
case Opt_compress_force_type:
compress_force = true;
+ /* Fallthrough */
case Opt_compress:
case Opt_compress_type:
if (token == Opt_compress ||
@@ -519,7 +519,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_alloc_start:
num = match_strdup(&args[0]);
if (num) {
+ mutex_lock(&info->chunk_mutex);
info->alloc_start = memparse(num, NULL);
+ mutex_unlock(&info->chunk_mutex);
kfree(num);
printk(KERN_INFO
"btrfs: allocations start at %llu\n",
@@ -876,7 +878,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
btrfs_wait_ordered_extents(root, 0);
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
/* no transaction, don't bother */
if (PTR_ERR(trans) == -ENOENT)
@@ -1200,6 +1202,38 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
new_pool_size);
}
+static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info,
+ unsigned long old_opts, int flags)
+{
+ set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+
+ if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
+ (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
+ (flags & MS_RDONLY))) {
+ /* wait for any defraggers to finish */
+ wait_event(fs_info->transaction_wait,
+ (atomic_read(&fs_info->defrag_running) == 0));
+ if (flags & MS_RDONLY)
+ sync_filesystem(fs_info->sb);
+ }
+}
+
+static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
+ unsigned long old_opts)
+{
+ /*
+ * We need cleanup all defragable inodes if the autodefragment is
+ * close or the fs is R/O.
+ */
+ if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
+ (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
+ (fs_info->sb->s_flags & MS_RDONLY))) {
+ btrfs_cleanup_defrag_inodes(fs_info);
+ }
+
+ clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
static int btrfs_remount(struct super_block *sb, int *flags, char *data)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -1213,6 +1247,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
unsigned int old_metadata_ratio = fs_info->metadata_ratio;
int ret;
+ btrfs_remount_prepare(fs_info, old_opts, *flags);
+
ret = btrfs_parse_options(root, data);
if (ret) {
ret = -EINVAL;
@@ -1223,7 +1259,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
fs_info->thread_pool_size, old_thread_pool_size);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
- return 0;
+ goto out;
if (*flags & MS_RDONLY) {
/*
@@ -1278,7 +1314,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
sb->s_flags &= ~MS_RDONLY;
}
-
+out:
+ btrfs_remount_cleanup(fs_info, old_opts);
return 0;
restore:
@@ -1289,10 +1326,13 @@ restore:
fs_info->mount_opt = old_opts;
fs_info->compress_type = old_compress_type;
fs_info->max_inline = old_max_inline;
+ mutex_lock(&fs_info->chunk_mutex);
fs_info->alloc_start = old_alloc_start;
+ mutex_unlock(&fs_info->chunk_mutex);
btrfs_resize_thread_pool(fs_info,
old_thread_pool_size, fs_info->thread_pool_size);
fs_info->metadata_ratio = old_metadata_ratio;
+ btrfs_remount_cleanup(fs_info, old_opts);
return ret;
}
@@ -1559,7 +1599,7 @@ static int btrfs_freeze(struct super_block *sb)
struct btrfs_trans_handle *trans;
struct btrfs_root *root = btrfs_sb(sb)->tree_root;
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
/* no transaction, don't bother */
if (PTR_ERR(trans) == -ENOENT)
@@ -1684,10 +1724,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_delayed_inode;
- err = btrfs_interface_init();
+ err = btrfs_delayed_ref_init();
if (err)
goto free_auto_defrag;
+ err = btrfs_interface_init();
+ if (err)
+ goto free_delayed_ref;
+
err = register_filesystem(&btrfs_fs_type);
if (err)
goto unregister_ioctl;
@@ -1699,6 +1743,8 @@ static int __init init_btrfs_fs(void)
unregister_ioctl:
btrfs_interface_exit();
+free_delayed_ref:
+ btrfs_delayed_ref_exit();
free_auto_defrag:
btrfs_auto_defrag_exit();
free_delayed_inode:
@@ -1720,6 +1766,7 @@ free_compress:
static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
+ btrfs_delayed_ref_exit();
btrfs_auto_defrag_exit();
btrfs_delayed_inode_exit();
ordered_data_exit();
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index daac9ae6d731..5b326cd60a4a 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -21,7 +21,6 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
-#include <linux/module.h>
#include <linux/kobject.h>
#include "ctree.h"
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4c0067c4f76d..e52da6fb1165 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -40,7 +40,6 @@ void put_transaction(struct btrfs_transaction *transaction)
if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(transaction->delayed_refs.root.rb_node);
- memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
}
@@ -51,6 +50,14 @@ static noinline void switch_commit_root(struct btrfs_root *root)
root->commit_root = btrfs_root_node(root);
}
+static inline int can_join_transaction(struct btrfs_transaction *trans,
+ int type)
+{
+ return !(trans->in_commit &&
+ type != TRANS_JOIN &&
+ type != TRANS_JOIN_NOLOCK);
+}
+
/*
* either allocate a new transaction or hop into the existing one
*/
@@ -62,7 +69,7 @@ static noinline int join_transaction(struct btrfs_root *root, int type)
spin_lock(&fs_info->trans_lock);
loop:
/* The file system has been taken offline. No new transactions. */
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
spin_unlock(&fs_info->trans_lock);
return -EROFS;
}
@@ -86,6 +93,10 @@ loop:
spin_unlock(&fs_info->trans_lock);
return cur_trans->aborted;
}
+ if (!can_join_transaction(cur_trans, type)) {
+ spin_unlock(&fs_info->trans_lock);
+ return -EBUSY;
+ }
atomic_inc(&cur_trans->use_count);
atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
@@ -113,7 +124,7 @@ loop:
*/
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
goto loop;
- } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
spin_unlock(&fs_info->trans_lock);
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
return -EROFS;
@@ -155,8 +166,12 @@ loop:
spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock);
+ atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
+ atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
+ init_waitqueue_head(&cur_trans->delayed_refs.wait);
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+ INIT_LIST_HEAD(&cur_trans->ordered_operations);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
fs_info->btree_inode->i_mapping);
@@ -301,7 +316,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
int ret;
u64 qgroup_reserved = 0;
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
return ERR_PTR(-EROFS);
if (current->journal_info) {
@@ -359,8 +374,11 @@ again:
do {
ret = join_transaction(root, type);
- if (ret == -EBUSY)
+ if (ret == -EBUSY) {
wait_current_trans(root);
+ if (unlikely(type == TRANS_ATTACH))
+ ret = -ENOENT;
+ }
} while (ret == -EBUSY);
if (ret < 0) {
@@ -382,9 +400,10 @@ again:
h->block_rsv = NULL;
h->orig_rsv = NULL;
h->aborted = 0;
- h->qgroup_reserved = qgroup_reserved;
+ h->qgroup_reserved = 0;
h->delayed_ref_elem.seq = 0;
h->type = type;
+ h->allocating_chunk = false;
INIT_LIST_HEAD(&h->qgroup_ref_list);
INIT_LIST_HEAD(&h->new_bgs);
@@ -400,6 +419,7 @@ again:
h->block_rsv = &root->fs_info->trans_block_rsv;
h->bytes_reserved = num_bytes;
}
+ h->qgroup_reserved = qgroup_reserved;
got_it:
btrfs_record_root_in_trans(h, root);
@@ -451,11 +471,43 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
return start_transaction(root, 0, TRANS_USERSPACE, 0);
}
+/*
+ * btrfs_attach_transaction() - catch the running transaction
+ *
+ * It is used when we want to commit the current the transaction, but
+ * don't want to start a new one.
+ *
+ * Note: If this function return -ENOENT, it just means there is no
+ * running transaction. But it is possible that the inactive transaction
+ * is still in the memory, not fully on disk. If you hope there is no
+ * inactive transaction in the fs when -ENOENT is returned, you should
+ * invoke
+ * btrfs_attach_transaction_barrier()
+ */
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_ATTACH, 0);
}
+/*
+ * btrfs_attach_transaction() - catch the running transaction
+ *
+ * It is similar to the above function, the differentia is this one
+ * will wait for all the inactive transactions until they fully
+ * complete.
+ */
+struct btrfs_trans_handle *
+btrfs_attach_transaction_barrier(struct btrfs_root *root)
+{
+ struct btrfs_trans_handle *trans;
+
+ trans = start_transaction(root, 0, TRANS_ATTACH, 0);
+ if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
+ btrfs_wait_for_commit(root, 0);
+
+ return trans;
+}
+
/* wait for a transaction commit to be fully complete */
static noinline void wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
@@ -587,7 +639,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
- while (count < 2) {
+ while (count < 1) {
unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (cur &&
@@ -599,6 +651,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
}
count++;
}
+
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
@@ -644,12 +697,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_run_delayed_iputs(root);
if (trans->aborted ||
- root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
err = -EIO;
- }
assert_qgroups_uptodate(trans);
- memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
return err;
}
@@ -696,7 +747,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
+ struct blk_plug plug;
+ blk_start_plug(&plug);
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
mark, &cached_state)) {
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
@@ -710,6 +763,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
}
if (err)
werr = err;
+ blk_finish_plug(&plug);
return werr;
}
@@ -960,10 +1014,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
}
/*
- * defrag a given btree. If cacheonly == 1, this won't read from the disk,
- * otherwise every leaf in the btree is read and defragged.
+ * defrag a given btree.
+ * Every leaf in the btree is read and defragged.
*/
-int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
+int btrfs_defrag_root(struct btrfs_root *root)
{
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_trans_handle *trans;
@@ -977,7 +1031,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_defrag_leaves(trans, root, cacheonly);
+ ret = btrfs_defrag_leaves(trans, root);
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(info->tree_root);
@@ -985,6 +1039,12 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
break;
+
+ if (btrfs_defrag_cancelled(root->fs_info)) {
+ printk(KERN_DEBUG "btrfs: defrag_root cancelled\n");
+ ret = -EAGAIN;
+ break;
+ }
}
root->defrag_running = 0;
return ret;
@@ -1007,7 +1067,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct inode *parent_inode;
struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
- struct dentry *parent;
struct dentry *dentry;
struct extent_buffer *tmp;
struct extent_buffer *old;
@@ -1022,7 +1081,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path) {
ret = pending->error = -ENOMEM;
- goto path_alloc_fail;
+ return ret;
}
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
@@ -1062,10 +1121,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv;
+ trans->bytes_reserved = trans->block_rsv->reserved;
dentry = pending->dentry;
- parent = dget_parent(dentry);
- parent_inode = parent->d_inode;
+ parent_inode = pending->dir;
parent_root = BTRFS_I(parent_inode)->root;
record_root_in_trans(trans, parent_root);
@@ -1213,14 +1272,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (ret)
btrfs_abort_transaction(trans, root, ret);
fail:
- dput(parent);
trans->block_rsv = rsv;
+ trans->bytes_reserved = 0;
no_free_objectid:
kfree(new_root_item);
root_item_alloc_fail:
btrfs_free_path(path);
-path_alloc_fail:
- btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
return ret;
}
@@ -1306,13 +1363,13 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
struct btrfs_async_commit {
struct btrfs_trans_handle *newtrans;
struct btrfs_root *root;
- struct delayed_work work;
+ struct work_struct work;
};
static void do_async_commit(struct work_struct *work)
{
struct btrfs_async_commit *ac =
- container_of(work, struct btrfs_async_commit, work.work);
+ container_of(work, struct btrfs_async_commit, work);
/*
* We've got freeze protection passed with the transaction.
@@ -1340,7 +1397,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
if (!ac)
return -ENOMEM;
- INIT_DELAYED_WORK(&ac->work, do_async_commit);
+ INIT_WORK(&ac->work, do_async_commit);
ac->root = root;
ac->newtrans = btrfs_join_transaction(root);
if (IS_ERR(ac->newtrans)) {
@@ -1364,7 +1421,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1, _THIS_IP_);
- schedule_delayed_work(&ac->work, 0);
+ schedule_work(&ac->work);
/* wait for transaction to start and unblock */
if (wait_for_unblock)
@@ -1384,6 +1441,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int err)
{
struct btrfs_transaction *cur_trans = trans->transaction;
+ DEFINE_WAIT(wait);
WARN_ON(trans->use_count > 1);
@@ -1392,8 +1450,13 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->trans_lock);
list_del_init(&cur_trans->list);
if (cur_trans == root->fs_info->running_transaction) {
+ root->fs_info->trans_no_join = 1;
+ spin_unlock(&root->fs_info->trans_lock);
+ wait_event(cur_trans->writer_wait,
+ atomic_read(&cur_trans->num_writers) == 1);
+
+ spin_lock(&root->fs_info->trans_lock);
root->fs_info->running_transaction = NULL;
- root->fs_info->trans_no_join = 0;
}
spin_unlock(&root->fs_info->trans_lock);
@@ -1427,7 +1490,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
}
if (flush_on_commit || snap_pending) {
- btrfs_start_delalloc_inodes(root, 1);
+ ret = btrfs_start_delalloc_inodes(root, 1);
+ if (ret)
+ return ret;
btrfs_wait_ordered_extents(root, 1);
}
@@ -1449,9 +1514,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
* it here and no for sure that nothing new will be added
* to the list
*/
- btrfs_run_ordered_operations(root, 1);
+ ret = btrfs_run_ordered_operations(trans, root, 1);
- return 0;
+ return ret;
}
/*
@@ -1472,27 +1537,35 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
int should_grow = 0;
unsigned long now = get_seconds();
- ret = btrfs_run_ordered_operations(root, 0);
+ ret = btrfs_run_ordered_operations(trans, root, 0);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
- goto cleanup_transaction;
+ btrfs_end_transaction(trans, root);
+ return ret;
}
/* Stop the commit early if ->aborted is set */
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
- goto cleanup_transaction;
+ btrfs_end_transaction(trans, root);
+ return ret;
}
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
ret = btrfs_run_delayed_refs(trans, root, 0);
- if (ret)
- goto cleanup_transaction;
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
+ if (trans->qgroup_reserved) {
+ btrfs_qgroup_free(root, trans->qgroup_reserved);
+ trans->qgroup_reserved = 0;
+ }
cur_trans = trans->transaction;
@@ -1506,8 +1579,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_create_pending_block_groups(trans, root);
ret = btrfs_run_delayed_refs(trans, root, 0);
- if (ret)
- goto cleanup_transaction;
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
spin_lock(&cur_trans->commit_lock);
if (cur_trans->in_commit) {
@@ -1771,6 +1846,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
cleanup_transaction:
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
+ if (trans->qgroup_reserved) {
+ btrfs_qgroup_free(root, trans->qgroup_reserved);
+ trans->qgroup_reserved = 0;
+ }
btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
// WARN_ON(1);
if (current->journal_info == trans)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 0e8aa1e6c287..3c8e0d25c8e4 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -43,6 +43,7 @@ struct btrfs_transaction {
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait;
struct list_head pending_snapshots;
+ struct list_head ordered_operations;
struct btrfs_delayed_ref_root delayed_refs;
int aborted;
};
@@ -68,6 +69,7 @@ struct btrfs_trans_handle {
struct btrfs_block_rsv *orig_rsv;
short aborted;
short adding_csums;
+ bool allocating_chunk;
enum btrfs_trans_type type;
/*
* this root is only needed to validate that the root passed to
@@ -82,11 +84,13 @@ struct btrfs_trans_handle {
struct btrfs_pending_snapshot {
struct dentry *dentry;
+ struct inode *dir;
struct btrfs_root *root;
struct btrfs_root *snap;
struct btrfs_qgroup_inherit *inherit;
/* block reservation for the operation */
struct btrfs_block_rsv block_rsv;
+ u64 qgroup_reserved;
/* extra metadata reseration for relocation */
int error;
bool readonly;
@@ -110,13 +114,15 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush(
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
+ struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
+int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 3b580ee8ab1d..94e05c1f118a 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -23,13 +23,14 @@
#include "transaction.h"
#include "locking.h"
-/* defrag all the leaves in a given btree. If cache_only == 1, don't read
- * things from disk, otherwise read all the leaves and try to get key order to
+/*
+ * Defrag all the leaves in a given btree.
+ * Read all the leaves and try to get key order to
* better reflect disk order
*/
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int cache_only)
+ struct btrfs_root *root)
{
struct btrfs_path *path = NULL;
struct btrfs_key key;
@@ -41,9 +42,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
u64 last_ret = 0;
u64 min_trans = 0;
- if (cache_only)
- goto out;
-
if (root->fs_info->extent_root == root) {
/*
* there's recursion here right now in the tree locking,
@@ -86,11 +84,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
}
path->keep_locks = 1;
- if (cache_only)
- min_trans = root->defrag_trans_start;
- ret = btrfs_search_forward(root, &key, NULL, path,
- cache_only, min_trans);
+ ret = btrfs_search_forward(root, &key, NULL, path, min_trans);
if (ret < 0)
goto out;
if (ret > 0) {
@@ -109,11 +104,11 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
goto out;
}
path->slots[1] = btrfs_header_nritems(path->nodes[1]);
- next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only,
+ next_key_ret = btrfs_find_next_key(root, path, &key, 1,
min_trans);
ret = btrfs_realloc_node(trans, root,
path->nodes[1], 0,
- cache_only, &last_ret,
+ &last_ret,
&root->defrag_progress);
if (ret) {
WARN_ON(ret == -EAGAIN);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9027bb1e7466..c7ef569eb22a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -278,8 +278,7 @@ static int process_one_buffer(struct btrfs_root *log,
struct walk_control *wc, u64 gen)
{
if (wc->pin)
- btrfs_pin_extent_for_log_replay(wc->trans,
- log->fs_info->extent_root,
+ btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
eb->start, eb->len);
if (btrfs_buffer_uptodate(eb, gen, 0)) {
@@ -485,7 +484,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_key *key)
{
int found_type;
- u64 mask = root->sectorsize - 1;
u64 extent_end;
u64 start = key->offset;
u64 saved_nbytes;
@@ -502,7 +500,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
extent_end = start + btrfs_file_extent_num_bytes(eb, item);
else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, item);
- extent_end = (start + size + mask) & ~mask;
+ extent_end = ALIGN(start + size, root->sectorsize);
} else {
ret = 0;
goto out;
@@ -2281,6 +2279,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
unsigned long log_transid = 0;
mutex_lock(&root->log_mutex);
+ log_transid = root->log_transid;
index1 = root->log_transid % 2;
if (atomic_read(&root->log_commit[index1])) {
wait_log_commit(trans, root, root->log_transid);
@@ -2308,11 +2307,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
/* bail out if we need to do a full commit */
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
ret = -EAGAIN;
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
- log_transid = root->log_transid;
if (log_transid % 2 == 0)
mark = EXTENT_DIRTY;
else
@@ -2324,6 +2323,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
@@ -2363,6 +2363,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
root->fs_info->last_trans_log_full_commit = trans->transid;
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out;
@@ -2373,6 +2374,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
wait_log_commit(trans, log_root_tree,
log_root_tree->log_transid);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = 0;
goto out;
@@ -2392,6 +2394,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
*/
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
@@ -2402,10 +2405,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_wait_logged_extents(log, log_transid);
btrfs_set_super_log_root(root->fs_info->super_for_commit,
log_root_tree->node->start);
@@ -2461,8 +2466,10 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
.process_func = process_one_buffer
};
- ret = walk_log_tree(trans, log, &wc);
- BUG_ON(ret);
+ if (trans) {
+ ret = walk_log_tree(trans, log, &wc);
+ BUG_ON(ret);
+ }
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -2475,6 +2482,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
+ /*
+ * We may have short-circuited the log tree with the full commit logic
+ * and left ordered extents on our list, so clear these out to keep us
+ * from leaking inodes and memory.
+ */
+ btrfs_free_logged_extents(log, 0);
+ btrfs_free_logged_extents(log, 1);
+
free_extent_buffer(log->node);
kfree(log);
}
@@ -2724,7 +2739,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
path->keep_locks = 1;
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, trans->transid);
+ path, trans->transid);
/*
* we didn't find anything from this transaction, see if there
@@ -3271,16 +3286,21 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *log = root->log_root;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
+ struct btrfs_ordered_extent *ordered;
struct list_head ordered_sums;
struct btrfs_map_token token;
struct btrfs_key key;
- u64 csum_offset = em->mod_start - em->start;
- u64 csum_len = em->mod_len;
+ u64 mod_start = em->mod_start;
+ u64 mod_len = em->mod_len;
+ u64 csum_offset;
+ u64 csum_len;
u64 extent_offset = em->start - em->orig_start;
u64 block_len;
int ret;
+ int index = log->log_transid % 2;
bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+insert:
INIT_LIST_HEAD(&ordered_sums);
btrfs_init_map_token(&token);
key.objectid = btrfs_ino(inode);
@@ -3296,6 +3316,23 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
+
+ /*
+ * If we are overwriting an inline extent with a real one then we need
+ * to just delete the inline extent as it may not be large enough to
+ * have the entire file_extent_item.
+ */
+ if (ret && btrfs_token_file_extent_type(leaf, fi, &token) ==
+ BTRFS_FILE_EXTENT_INLINE) {
+ ret = btrfs_del_item(trans, log, path);
+ btrfs_release_path(path);
+ if (ret) {
+ path->really_keep_locks = 0;
+ return ret;
+ }
+ goto insert;
+ }
+
btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
&token);
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3362,6 +3399,92 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
csum_len = block_len;
}
+ /*
+ * First check and see if our csums are on our outstanding ordered
+ * extents.
+ */
+again:
+ spin_lock_irq(&log->log_extents_lock[index]);
+ list_for_each_entry(ordered, &log->logged_list[index], log_list) {
+ struct btrfs_ordered_sum *sum;
+
+ if (!mod_len)
+ break;
+
+ if (ordered->inode != inode)
+ continue;
+
+ if (ordered->file_offset + ordered->len <= mod_start ||
+ mod_start + mod_len <= ordered->file_offset)
+ continue;
+
+ /*
+ * We are going to copy all the csums on this ordered extent, so
+ * go ahead and adjust mod_start and mod_len in case this
+ * ordered extent has already been logged.
+ */
+ if (ordered->file_offset > mod_start) {
+ if (ordered->file_offset + ordered->len >=
+ mod_start + mod_len)
+ mod_len = ordered->file_offset - mod_start;
+ /*
+ * If we have this case
+ *
+ * |--------- logged extent ---------|
+ * |----- ordered extent ----|
+ *
+ * Just don't mess with mod_start and mod_len, we'll
+ * just end up logging more csums than we need and it
+ * will be ok.
+ */
+ } else {
+ if (ordered->file_offset + ordered->len <
+ mod_start + mod_len) {
+ mod_len = (mod_start + mod_len) -
+ (ordered->file_offset + ordered->len);
+ mod_start = ordered->file_offset +
+ ordered->len;
+ } else {
+ mod_len = 0;
+ }
+ }
+
+ /*
+ * To keep us from looping for the above case of an ordered
+ * extent that falls inside of the logged extent.
+ */
+ if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
+ &ordered->flags))
+ continue;
+ atomic_inc(&ordered->refs);
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ /*
+ * we've dropped the lock, we must either break or
+ * start over after this.
+ */
+
+ wait_event(ordered->wait, ordered->csum_bytes_left == 0);
+
+ list_for_each_entry(sum, &ordered->list, list) {
+ ret = btrfs_csum_file_blocks(trans, log, sum);
+ if (ret) {
+ btrfs_put_ordered_extent(ordered);
+ goto unlocked;
+ }
+ }
+ btrfs_put_ordered_extent(ordered);
+ goto again;
+
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+unlocked:
+
+ if (!mod_len || ret)
+ return ret;
+
+ csum_offset = mod_start - em->start;
+ csum_len = mod_len;
+
/* block start is already adjusted for the file extent offset. */
ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
em->block_start + csum_offset,
@@ -3393,6 +3516,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
u64 test_gen;
int ret = 0;
+ int num = 0;
INIT_LIST_HEAD(&extents);
@@ -3401,16 +3525,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
list_del_init(&em->list);
+
+ /*
+ * Just an arbitrary number, this can be really CPU intensive
+ * once we start getting a lot of extents, and really once we
+ * have a bunch of extents we just want to commit since it will
+ * be faster.
+ */
+ if (++num > 32768) {
+ list_del_init(&tree->modified_extents);
+ ret = -EFBIG;
+ goto process;
+ }
+
if (em->generation <= test_gen)
continue;
/* Need a ref to keep it from getting evicted from cache */
atomic_inc(&em->refs);
set_bit(EXTENT_FLAG_LOGGING, &em->flags);
list_add_tail(&em->list, &extents);
+ num++;
}
list_sort(NULL, &extents, extent_cmp);
+process:
while (!list_empty(&extents)) {
em = list_entry(extents.next, struct extent_map, list);
@@ -3513,6 +3652,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
mutex_lock(&BTRFS_I(inode)->log_mutex);
+ btrfs_get_logged_extents(log, inode);
+
/*
* a brute force approach to making sure we get the most uptodate
* copies of everything.
@@ -3558,7 +3699,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
while (1) {
ins_nr = 0;
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, trans->transid);
+ path, trans->transid);
if (ret != 0)
break;
again:
@@ -3656,6 +3797,8 @@ log_extents:
BTRFS_I(inode)->logged_trans = trans->transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
out_unlock:
+ if (err)
+ btrfs_free_logged_extents(log, log->log_transid);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
btrfs_free_path(path);
@@ -3822,7 +3965,6 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans:
dput(old_parent);
if (ret < 0) {
- WARN_ON(ret != -ENOSPC);
root->fs_info->last_trans_log_full_commit = trans->transid;
ret = 1;
}
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 99be4c138db6..ddc61cad0080 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -5,7 +5,7 @@
*/
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include "ulist.h"
/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5cbb7f4b1672..35bb2d4ed29f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -25,6 +25,8 @@
#include <linux/capability.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <linux/raid/pq.h>
+#include <asm/div64.h>
#include "compat.h"
#include "ctree.h"
#include "extent_map.h"
@@ -32,6 +34,7 @@
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
+#include "raid56.h"
#include "async-thread.h"
#include "check-integrity.h"
#include "rcu-string.h"
@@ -647,6 +650,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
new_device->writeable = 0;
new_device->in_fs_metadata = 0;
new_device->can_discard = 0;
+ spin_lock_init(&new_device->io_lock);
list_replace_rcu(&device->dev_list, &new_device->dev_list);
call_rcu(&device->rcu, free_device);
@@ -792,26 +796,75 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
return ret;
}
+/*
+ * Look for a btrfs signature on a device. This may be called out of the mount path
+ * and we are not allowed to call set_blocksize during the scan. The superblock
+ * is read via pagecache
+ */
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret)
{
struct btrfs_super_block *disk_super;
struct block_device *bdev;
- struct buffer_head *bh;
- int ret;
+ struct page *page;
+ void *p;
+ int ret = -EINVAL;
u64 devid;
u64 transid;
u64 total_devices;
+ u64 bytenr;
+ pgoff_t index;
+ /*
+ * we would like to check all the supers, but that would make
+ * a btrfs mount succeed after a mkfs from a different FS.
+ * So, we need to add a special mount option to scan for
+ * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+ */
+ bytenr = btrfs_sb_offset(0);
flags |= FMODE_EXCL;
mutex_lock(&uuid_mutex);
- ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
- if (ret)
+
+ bdev = blkdev_get_by_path(path, flags, holder);
+
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
goto error;
- disk_super = (struct btrfs_super_block *)bh->b_data;
+ }
+
+ /* make sure our super fits in the device */
+ if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
+ goto error_bdev_put;
+
+ /* make sure our super fits in the page */
+ if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
+ goto error_bdev_put;
+
+ /* make sure our super doesn't straddle pages on disk */
+ index = bytenr >> PAGE_CACHE_SHIFT;
+ if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
+ goto error_bdev_put;
+
+ /* pull in the page with our super */
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ index, GFP_NOFS);
+
+ if (IS_ERR_OR_NULL(page))
+ goto error_bdev_put;
+
+ p = kmap(page);
+
+ /* align our pointer to the offset of the super block */
+ disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
+
+ if (btrfs_super_bytenr(disk_super) != bytenr ||
+ disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
+ goto error_unmap;
+
devid = btrfs_stack_device_id(&disk_super->dev_item);
transid = btrfs_super_generation(disk_super);
total_devices = btrfs_super_num_devices(disk_super);
+
if (disk_super->label[0]) {
if (disk_super->label[BTRFS_LABEL_SIZE - 1])
disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
@@ -819,12 +872,19 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
} else {
printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
}
+
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
+
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
if (!ret && fs_devices_ret)
(*fs_devices_ret)->total_devices = total_devices;
- brelse(bh);
+
+error_unmap:
+ kunmap(page);
+ page_cache_release(page);
+
+error_bdev_put:
blkdev_put(bdev, flags);
error:
mutex_unlock(&uuid_mutex);
@@ -1372,14 +1432,19 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
u64 devid;
u64 num_devices;
u8 *dev_uuid;
+ unsigned seq;
int ret = 0;
bool clear_super = false;
mutex_lock(&uuid_mutex);
- all_avail = root->fs_info->avail_data_alloc_bits |
- root->fs_info->avail_system_alloc_bits |
- root->fs_info->avail_metadata_alloc_bits;
+ do {
+ seq = read_seqbegin(&root->fs_info->profiles_lock);
+
+ all_avail = root->fs_info->avail_data_alloc_bits |
+ root->fs_info->avail_system_alloc_bits |
+ root->fs_info->avail_metadata_alloc_bits;
+ } while (read_seqretry(&root->fs_info->profiles_lock, seq));
num_devices = root->fs_info->fs_devices->num_devices;
btrfs_dev_replace_lock(&root->fs_info->dev_replace);
@@ -1403,6 +1468,21 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
goto out;
}
+ if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
+ root->fs_info->fs_devices->rw_devices <= 2) {
+ printk(KERN_ERR "btrfs: unable to go below two "
+ "devices on raid5\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
+ root->fs_info->fs_devices->rw_devices <= 3) {
+ printk(KERN_ERR "btrfs: unable to go below three "
+ "devices on raid6\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
if (strcmp(device_path, "missing") == 0) {
struct list_head *devices;
struct btrfs_device *tmp;
@@ -2616,7 +2696,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
chunk_used = btrfs_block_group_used(&cache->item);
if (bargs->usage == 0)
- user_thresh = 0;
+ user_thresh = 1;
else if (bargs->usage > 100)
user_thresh = cache->key.offset;
else
@@ -2664,11 +2744,15 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
return 0;
if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
- factor = num_stripes / factor;
+ BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
+ factor = num_stripes / 2;
+ } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
+ factor = num_stripes - 1;
+ } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
+ factor = num_stripes - 2;
+ } else {
+ factor = num_stripes;
+ }
for (i = 0; i < num_stripes; i++) {
stripe = btrfs_stripe_nr(chunk, i);
@@ -2985,6 +3069,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
int mixed = 0;
int ret;
u64 num_devices;
+ unsigned seq;
if (btrfs_fs_closing(fs_info) ||
atomic_read(&fs_info->balance_pause_req) ||
@@ -3027,7 +3112,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
else
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10);
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6);
if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(!alloc_profile_is_valid(bctl->data.target, 1) ||
@@ -3067,23 +3154,29 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
/* allow to reduce meta or sys integrity only if force set */
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10;
- if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
- (fs_info->avail_system_alloc_bits & allowed) &&
- !(bctl->sys.target & allowed)) ||
- ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
- (fs_info->avail_metadata_alloc_bits & allowed) &&
- !(bctl->meta.target & allowed))) {
- if (bctl->flags & BTRFS_BALANCE_FORCE) {
- printk(KERN_INFO "btrfs: force reducing metadata "
- "integrity\n");
- } else {
- printk(KERN_ERR "btrfs: balance will reduce metadata "
- "integrity, use force if you want this\n");
- ret = -EINVAL;
- goto out;
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6;
+ do {
+ seq = read_seqbegin(&fs_info->profiles_lock);
+
+ if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (fs_info->avail_system_alloc_bits & allowed) &&
+ !(bctl->sys.target & allowed)) ||
+ ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (fs_info->avail_metadata_alloc_bits & allowed) &&
+ !(bctl->meta.target & allowed))) {
+ if (bctl->flags & BTRFS_BALANCE_FORCE) {
+ printk(KERN_INFO "btrfs: force reducing metadata "
+ "integrity\n");
+ } else {
+ printk(KERN_ERR "btrfs: balance will reduce metadata "
+ "integrity, use force if you want this\n");
+ ret = -EINVAL;
+ goto out;
+ }
}
- }
+ } while (read_seqretry(&fs_info->profiles_lock, seq));
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
int num_tolerated_disk_barrier_failures;
@@ -3127,21 +3220,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
mutex_lock(&fs_info->balance_mutex);
atomic_dec(&fs_info->balance_running);
- if (bargs) {
- memset(bargs, 0, sizeof(*bargs));
- update_ioctl_balance_args(fs_info, 0, bargs);
- }
-
- if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
- balance_need_close(fs_info)) {
- __cancel_balance(fs_info);
- }
-
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
fs_info->num_tolerated_disk_barrier_failures =
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
}
+ if (bargs) {
+ memset(bargs, 0, sizeof(*bargs));
+ update_ioctl_balance_args(fs_info, 0, bargs);
+ }
+
wake_up(&fs_info->balance_wait_q);
return ret;
@@ -3504,13 +3592,86 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
}
struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
- { 2, 1, 0, 4, 2, 2 /* raid10 */ },
- { 1, 1, 2, 2, 2, 2 /* raid1 */ },
- { 1, 2, 1, 1, 1, 2 /* dup */ },
- { 1, 1, 0, 2, 1, 1 /* raid0 */ },
- { 1, 1, 1, 1, 1, 1 /* single */ },
+ [BTRFS_RAID_RAID10] = {
+ .sub_stripes = 2,
+ .dev_stripes = 1,
+ .devs_max = 0, /* 0 == as many as possible */
+ .devs_min = 4,
+ .devs_increment = 2,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_RAID1] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 2,
+ .devs_min = 2,
+ .devs_increment = 2,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_DUP] = {
+ .sub_stripes = 1,
+ .dev_stripes = 2,
+ .devs_max = 1,
+ .devs_min = 1,
+ .devs_increment = 1,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_RAID0] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 2,
+ .devs_increment = 1,
+ .ncopies = 1,
+ },
+ [BTRFS_RAID_SINGLE] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 1,
+ .devs_min = 1,
+ .devs_increment = 1,
+ .ncopies = 1,
+ },
+ [BTRFS_RAID_RAID5] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 2,
+ .devs_increment = 1,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_RAID6] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 3,
+ .devs_increment = 1,
+ .ncopies = 3,
+ },
};
+static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
+{
+ /* TODO allow them to set a preferred stripe size */
+ return 64 * 1024;
+}
+
+static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
+{
+ u64 features;
+
+ if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
+ return;
+
+ features = btrfs_super_incompat_flags(info->super_copy);
+ if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
+ return;
+
+ features |= BTRFS_FEATURE_INCOMPAT_RAID56;
+ btrfs_set_super_incompat_flags(info->super_copy, features);
+ printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
+}
+
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct map_lookup **map_ret,
@@ -3526,6 +3687,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_device_info *devices_info = NULL;
u64 total_avail;
int num_stripes; /* total number of stripes to allocate */
+ int data_stripes; /* number of stripes that count for
+ block group size */
int sub_stripes; /* sub_stripes info for map */
int dev_stripes; /* stripes per dev */
int devs_max; /* max devs to use */
@@ -3537,6 +3700,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 max_chunk_size;
u64 stripe_size;
u64 num_bytes;
+ u64 raid_stripe_len = BTRFS_STRIPE_LEN;
int ndevs;
int i;
int j;
@@ -3631,12 +3795,16 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
continue;
+ if (ndevs == fs_devices->rw_devices) {
+ WARN(1, "%s: found more than %llu devices\n",
+ __func__, fs_devices->rw_devices);
+ break;
+ }
devices_info[ndevs].dev_offset = dev_offset;
devices_info[ndevs].max_avail = max_avail;
devices_info[ndevs].total_avail = total_avail;
devices_info[ndevs].dev = device;
++ndevs;
- WARN_ON(ndevs > fs_devices->rw_devices);
}
/*
@@ -3662,16 +3830,48 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
stripe_size = devices_info[ndevs-1].max_avail;
num_stripes = ndevs * dev_stripes;
- if (stripe_size * ndevs > max_chunk_size * ncopies) {
- stripe_size = max_chunk_size * ncopies;
- do_div(stripe_size, ndevs);
+ /*
+ * this will have to be fixed for RAID1 and RAID10 over
+ * more drives
+ */
+ data_stripes = num_stripes / ncopies;
+
+ if (type & BTRFS_BLOCK_GROUP_RAID5) {
+ raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
+ btrfs_super_stripesize(info->super_copy));
+ data_stripes = num_stripes - 1;
+ }
+ if (type & BTRFS_BLOCK_GROUP_RAID6) {
+ raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
+ btrfs_super_stripesize(info->super_copy));
+ data_stripes = num_stripes - 2;
+ }
+
+ /*
+ * Use the number of data stripes to figure out how big this chunk
+ * is really going to be in terms of logical address space,
+ * and compare that answer with the max chunk size
+ */
+ if (stripe_size * data_stripes > max_chunk_size) {
+ u64 mask = (1ULL << 24) - 1;
+ stripe_size = max_chunk_size;
+ do_div(stripe_size, data_stripes);
+
+ /* bump the answer up to a 16MB boundary */
+ stripe_size = (stripe_size + mask) & ~mask;
+
+ /* but don't go higher than the limits we found
+ * while searching for free extents
+ */
+ if (stripe_size > devices_info[ndevs-1].max_avail)
+ stripe_size = devices_info[ndevs-1].max_avail;
}
do_div(stripe_size, dev_stripes);
/* align to BTRFS_STRIPE_LEN */
- do_div(stripe_size, BTRFS_STRIPE_LEN);
- stripe_size *= BTRFS_STRIPE_LEN;
+ do_div(stripe_size, raid_stripe_len);
+ stripe_size *= raid_stripe_len;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
@@ -3689,14 +3889,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
}
}
map->sector_size = extent_root->sectorsize;
- map->stripe_len = BTRFS_STRIPE_LEN;
- map->io_align = BTRFS_STRIPE_LEN;
- map->io_width = BTRFS_STRIPE_LEN;
+ map->stripe_len = raid_stripe_len;
+ map->io_align = raid_stripe_len;
+ map->io_width = raid_stripe_len;
map->type = type;
map->sub_stripes = sub_stripes;
*map_ret = map;
- num_bytes = stripe_size * (num_stripes / ncopies);
+ num_bytes = stripe_size * data_stripes;
*stripe_size_out = stripe_size;
*num_bytes_out = num_bytes;
@@ -3718,15 +3918,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
- free_extent_map(em);
- if (ret)
- goto error;
-
- ret = btrfs_make_block_group(trans, extent_root, 0, type,
- BTRFS_FIRST_CHUNK_TREE_OBJECTID,
- start, num_bytes);
- if (ret)
+ if (ret) {
+ free_extent_map(em);
goto error;
+ }
for (i = 0; i < map->num_stripes; ++i) {
struct btrfs_device *device;
@@ -3739,15 +3934,44 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
info->chunk_root->root_key.objectid,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, dev_offset, stripe_size);
- if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
- goto error;
- }
+ if (ret)
+ goto error_dev_extent;
+ }
+
+ ret = btrfs_make_block_group(trans, extent_root, 0, type,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+ start, num_bytes);
+ if (ret) {
+ i = map->num_stripes - 1;
+ goto error_dev_extent;
}
+ free_extent_map(em);
+ check_raid56_incompat_flag(extent_root->fs_info, type);
+
kfree(devices_info);
return 0;
+error_dev_extent:
+ for (; i >= 0; i--) {
+ struct btrfs_device *device;
+ int err;
+
+ device = map->stripes[i].dev;
+ err = btrfs_free_dev_extent(trans, device, start);
+ if (err) {
+ btrfs_abort_transaction(trans, extent_root, err);
+ break;
+ }
+ }
+ write_lock(&em_tree->lock);
+ remove_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+
+ /* One for our allocation */
+ free_extent_map(em);
+ /* One for the tree reference */
+ free_extent_map(em);
error:
kfree(map);
kfree(devices_info);
@@ -3887,10 +4111,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
- fs_info->avail_metadata_alloc_bits;
- alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
-
+ alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
&stripe_size, chunk_offset, alloc_profile);
if (ret)
@@ -3898,10 +4119,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
sys_chunk_offset = chunk_offset + chunk_size;
- alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
- fs_info->avail_system_alloc_bits;
- alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
-
+ alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
&sys_chunk_size, &sys_stripe_size,
sys_chunk_offset, alloc_profile);
@@ -4014,6 +4232,10 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
ret = map->sub_stripes;
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+ ret = 2;
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ ret = 3;
else
ret = 1;
free_extent_map(em);
@@ -4026,6 +4248,52 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return ret;
}
+unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+ struct btrfs_mapping_tree *map_tree,
+ u64 logical)
+{
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct extent_map_tree *em_tree = &map_tree->map_tree;
+ unsigned long len = root->sectorsize;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, logical, len);
+ read_unlock(&em_tree->lock);
+ BUG_ON(!em);
+
+ BUG_ON(em->start > logical || em->start + em->len < logical);
+ map = (struct map_lookup *)em->bdev;
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ len = map->stripe_len * nr_data_stripes(map);
+ }
+ free_extent_map(em);
+ return len;
+}
+
+int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+ u64 logical, u64 len, int mirror_num)
+{
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct extent_map_tree *em_tree = &map_tree->map_tree;
+ int ret = 0;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, logical, len);
+ read_unlock(&em_tree->lock);
+ BUG_ON(!em);
+
+ BUG_ON(em->start > logical || em->start + em->len < logical);
+ map = (struct map_lookup *)em->bdev;
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6))
+ ret = 1;
+ free_extent_map(em);
+ return ret;
+}
+
static int find_live_mirror(struct btrfs_fs_info *fs_info,
struct map_lookup *map, int first, int num,
int optimal, int dev_replace_is_ongoing)
@@ -4063,10 +4331,39 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
return optimal;
}
+static inline int parity_smaller(u64 a, u64 b)
+{
+ return a > b;
+}
+
+/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
+static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
+{
+ struct btrfs_bio_stripe s;
+ int i;
+ u64 l;
+ int again = 1;
+
+ while (again) {
+ again = 0;
+ for (i = 0; i < bbio->num_stripes - 1; i++) {
+ if (parity_smaller(raid_map[i], raid_map[i+1])) {
+ s = bbio->stripes[i];
+ l = raid_map[i];
+ bbio->stripes[i] = bbio->stripes[i+1];
+ raid_map[i] = raid_map[i+1];
+ bbio->stripes[i+1] = s;
+ raid_map[i+1] = l;
+ again = 1;
+ }
+ }
+ }
+}
+
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
- int mirror_num)
+ int mirror_num, u64 **raid_map_ret)
{
struct extent_map *em;
struct map_lookup *map;
@@ -4078,6 +4375,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 stripe_nr;
u64 stripe_nr_orig;
u64 stripe_nr_end;
+ u64 stripe_len;
+ u64 *raid_map = NULL;
int stripe_index;
int i;
int ret = 0;
@@ -4089,6 +4388,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
int num_alloc_stripes;
int patch_the_first_stripe_for_dev_replace = 0;
u64 physical_to_patch_in_first_stripe = 0;
+ u64 raid56_full_stripe_start = (u64)-1;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length);
@@ -4105,29 +4405,63 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
map = (struct map_lookup *)em->bdev;
offset = logical - em->start;
+ if (mirror_num > map->num_stripes)
+ mirror_num = 0;
+
+ stripe_len = map->stripe_len;
stripe_nr = offset;
/*
* stripe_nr counts the total number of stripes we have to stride
* to get to this block
*/
- do_div(stripe_nr, map->stripe_len);
+ do_div(stripe_nr, stripe_len);
- stripe_offset = stripe_nr * map->stripe_len;
+ stripe_offset = stripe_nr * stripe_len;
BUG_ON(offset < stripe_offset);
/* stripe_offset is the offset of this block in its stripe*/
stripe_offset = offset - stripe_offset;
- if (rw & REQ_DISCARD)
+ /* if we're here for raid56, we need to know the stripe aligned start */
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+ unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
+ raid56_full_stripe_start = offset;
+
+ /* allow a write of a full stripe, but make sure we don't
+ * allow straddling of stripes
+ */
+ do_div(raid56_full_stripe_start, full_stripe_len);
+ raid56_full_stripe_start *= full_stripe_len;
+ }
+
+ if (rw & REQ_DISCARD) {
+ /* we don't discard raid56 yet */
+ if (map->type &
+ (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
*length = min_t(u64, em->len - offset, *length);
- else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
- /* we limit the length of each bio to what fits in a stripe */
- *length = min_t(u64, em->len - offset,
- map->stripe_len - stripe_offset);
+ } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ u64 max_len;
+ /* For writes to RAID[56], allow a full stripeset across all disks.
+ For other RAID types and for RAID[56] reads, just allow a single
+ stripe (on a single disk). */
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
+ (rw & REQ_WRITE)) {
+ max_len = stripe_len * nr_data_stripes(map) -
+ (offset - raid56_full_stripe_start);
+ } else {
+ /* we limit the length of each bio to what fits in a stripe */
+ max_len = stripe_len - stripe_offset;
+ }
+ *length = min_t(u64, em->len - offset, max_len);
} else {
*length = em->len - offset;
}
+ /* This is for when we're called from btrfs_merge_bio_hook() and all
+ it cares about is the length */
if (!bbio_ret)
goto out;
@@ -4160,7 +4494,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 physical_of_found = 0;
ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
- logical, &tmp_length, &tmp_bbio, 0);
+ logical, &tmp_length, &tmp_bbio, 0, NULL);
if (ret) {
WARN_ON(tmp_bbio != NULL);
goto out;
@@ -4221,11 +4555,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
num_stripes = 1;
stripe_index = 0;
stripe_nr_orig = stripe_nr;
- stripe_nr_end = (offset + *length + map->stripe_len - 1) &
- (~(map->stripe_len - 1));
+ stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
do_div(stripe_nr_end, map->stripe_len);
stripe_end_offset = stripe_nr_end * map->stripe_len -
(offset + *length);
+
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
if (rw & REQ_DISCARD)
num_stripes = min_t(u64, map->num_stripes,
@@ -4276,6 +4610,65 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
dev_replace_is_ongoing);
mirror_num = stripe_index - old_stripe_index + 1;
}
+
+ } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ u64 tmp;
+
+ if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
+ && raid_map_ret) {
+ int i, rot;
+
+ /* push stripe_nr back to the start of the full stripe */
+ stripe_nr = raid56_full_stripe_start;
+ do_div(stripe_nr, stripe_len);
+
+ stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+
+ /* RAID[56] write or recovery. Return all stripes */
+ num_stripes = map->num_stripes;
+ max_errors = nr_parity_stripes(map);
+
+ raid_map = kmalloc(sizeof(u64) * num_stripes,
+ GFP_NOFS);
+ if (!raid_map) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Work out the disk rotation on this stripe-set */
+ tmp = stripe_nr;
+ rot = do_div(tmp, num_stripes);
+
+ /* Fill in the logical address of each stripe */
+ tmp = stripe_nr * nr_data_stripes(map);
+ for (i = 0; i < nr_data_stripes(map); i++)
+ raid_map[(i+rot) % num_stripes] =
+ em->start + (tmp + i) * map->stripe_len;
+
+ raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
+ if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ raid_map[(i+rot+1) % num_stripes] =
+ RAID6_Q_STRIPE;
+
+ *length = map->stripe_len;
+ stripe_index = 0;
+ stripe_offset = 0;
+ } else {
+ /*
+ * Mirror #0 or #1 means the original data block.
+ * Mirror #2 is RAID5 parity block.
+ * Mirror #3 is RAID6 Q block.
+ */
+ stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+ if (mirror_num > 1)
+ stripe_index = nr_data_stripes(map) +
+ mirror_num - 2;
+
+ /* We distribute the parity blocks across stripes */
+ tmp = stripe_nr + stripe_index;
+ stripe_index = do_div(tmp, map->num_stripes);
+ }
} else {
/*
* after this do_div call, stripe_nr is the number of stripes
@@ -4384,8 +4777,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_DUP)) {
max_errors = 1;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
+ max_errors = 2;
}
}
@@ -4486,6 +4882,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
bbio->mirror_num = map->num_stripes + 1;
}
+ if (raid_map) {
+ sort_parity_stripes(bbio, raid_map);
+ *raid_map_ret = raid_map;
+ }
out:
if (dev_replace_is_ongoing)
btrfs_dev_replace_unlock(dev_replace);
@@ -4498,7 +4898,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
struct btrfs_bio **bbio_ret, int mirror_num)
{
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
- mirror_num);
+ mirror_num, NULL);
}
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -4512,6 +4912,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 bytenr;
u64 length;
u64 stripe_nr;
+ u64 rmap_len;
int i, j, nr = 0;
read_lock(&em_tree->lock);
@@ -4522,10 +4923,17 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
map = (struct map_lookup *)em->bdev;
length = em->len;
+ rmap_len = map->stripe_len;
+
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
do_div(length, map->num_stripes / map->sub_stripes);
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
do_div(length, map->num_stripes);
+ else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ do_div(length, nr_data_stripes(map));
+ rmap_len = map->stripe_len * nr_data_stripes(map);
+ }
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
BUG_ON(!buf); /* -ENOMEM */
@@ -4545,8 +4953,11 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
do_div(stripe_nr, map->sub_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = stripe_nr * map->num_stripes + i;
- }
- bytenr = chunk_start + stripe_nr * map->stripe_len;
+ } /* else if RAID[56], multiply by nr_data_stripes().
+ * Alternatively, just use rmap_len below instead of
+ * map->stripe_len */
+
+ bytenr = chunk_start + stripe_nr * rmap_len;
WARN_ON(nr >= map->num_stripes);
for (j = 0; j < nr; j++) {
if (buf[j] == bytenr)
@@ -4560,7 +4971,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
*logical = buf;
*naddrs = nr;
- *stripe_len = map->stripe_len;
+ *stripe_len = rmap_len;
free_extent_map(em);
return 0;
@@ -4634,7 +5045,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
bio->bi_bdev = (struct block_device *)
(unsigned long)bbio->mirror_num;
/* only send an error to the higher layers if it is
- * beyond the tolerance of the multi-bio
+ * beyond the tolerance of the btrfs bio
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
err = -EIO;
@@ -4668,13 +5079,18 @@ struct async_sched {
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-static noinline void schedule_bio(struct btrfs_root *root,
+noinline void btrfs_schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
int rw, struct bio *bio)
{
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
+ if (device->missing || !device->bdev) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
/* don't bother with additional async steps for reads, right now */
if (!(rw & REQ_WRITE)) {
bio_get(bio);
@@ -4772,7 +5188,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
#endif
bio->bi_bdev = dev->bdev;
if (async)
- schedule_bio(root, dev, rw, bio);
+ btrfs_schedule_bio(root, dev, rw, bio);
else
btrfsic_submit_bio(rw, bio);
}
@@ -4831,6 +5247,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
+ u64 *raid_map = NULL;
int ret;
int dev_nr = 0;
int total_devs = 1;
@@ -4839,12 +5256,30 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
length = bio->bi_size;
map_length = length;
- ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
- mirror_num);
- if (ret)
+ ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
+ mirror_num, &raid_map);
+ if (ret) /* -ENOMEM */
return ret;
total_devs = bbio->num_stripes;
+ bbio->orig_bio = first_bio;
+ bbio->private = first_bio->bi_private;
+ bbio->end_io = first_bio->bi_end_io;
+ atomic_set(&bbio->stripes_pending, bbio->num_stripes);
+
+ if (raid_map) {
+ /* In this case, map_length has been set to the length of
+ a single stripe; not the whole write */
+ if (rw & WRITE) {
+ return raid56_parity_write(root, bio, bbio,
+ raid_map, map_length);
+ } else {
+ return raid56_parity_recover(root, bio, bbio,
+ raid_map, map_length,
+ mirror_num);
+ }
+ }
+
if (map_length < length) {
printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
"len %llu\n", (unsigned long long)logical,
@@ -4853,11 +5288,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
BUG();
}
- bbio->orig_bio = first_bio;
- bbio->private = first_bio->bi_private;
- bbio->end_io = first_bio->bi_end_io;
- atomic_set(&bbio->stripes_pending, bbio->num_stripes);
-
while (dev_nr < total_devs) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index d3c3939ac751..062d8604d35b 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -21,8 +21,8 @@
#include <linux/bio.h>
#include <linux/sort.h>
+#include <linux/btrfs.h>
#include "async-thread.h"
-#include "ioctl.h"
#define BTRFS_STRIPE_LEN (64 * 1024)
@@ -321,7 +321,14 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
struct btrfs_device *tgtdev);
int btrfs_scratch_superblock(struct btrfs_device *device);
-
+void btrfs_schedule_bio(struct btrfs_root *root,
+ struct btrfs_device *device,
+ int rw, struct bio *bio);
+int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+ u64 logical, u64 len, int mirror_num);
+unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+ struct btrfs_mapping_tree *map_tree,
+ u64 logical);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
{
diff --git a/fs/buffer.c b/fs/buffer.c
index 62169c192c21..b4dcb34c9635 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -41,6 +41,7 @@
#include <linux/bitops.h>
#include <linux/mpage.h>
#include <linux/bit_spinlock.h>
+#include <trace/events/block.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
@@ -53,6 +54,13 @@ void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
}
EXPORT_SYMBOL(init_buffer);
+inline void touch_buffer(struct buffer_head *bh)
+{
+ trace_block_touch_buffer(bh);
+ mark_page_accessed(bh->b_page);
+}
+EXPORT_SYMBOL(touch_buffer);
+
static int sleep_on_buffer(void *word)
{
io_schedule();
@@ -1113,6 +1121,8 @@ void mark_buffer_dirty(struct buffer_head *bh)
{
WARN_ON_ONCE(!buffer_uptodate(bh));
+ trace_block_dirty_buffer(bh);
+
/*
* Very *carefully* optimize the it-is-already-dirty case.
*
@@ -2332,7 +2342,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
struct page *page = vmf->page;
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
unsigned long end;
loff_t size;
int ret;
@@ -2371,7 +2381,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
int ret;
- struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
+ struct super_block *sb = file_inode(vma->vm_file)->i_sb;
sb_start_pagefault(sb);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 064d1a68d2c1..a60ea977af6f 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -195,7 +195,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
*/
static int readpage_nounlock(struct file *filp, struct page *page)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
&ceph_inode_to_client(inode)->client->osdc;
@@ -236,16 +236,10 @@ static int ceph_readpage(struct file *filp, struct page *page)
static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
{
struct inode *inode = req->r_inode;
- struct ceph_osd_reply_head *replyhead;
- int rc, bytes;
+ int rc = req->r_result;
+ int bytes = le32_to_cpu(msg->hdr.data_len);
int i;
- /* parse reply */
- replyhead = msg->front.iov_base;
- WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
- rc = le32_to_cpu(replyhead->result);
- bytes = le32_to_cpu(msg->hdr.data_len);
-
dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
/* unlock all pages, zeroing any data we didn't read */
@@ -315,7 +309,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, 0,
ci->i_truncate_seq, ci->i_truncate_size,
- NULL, false, 1, 0);
+ NULL, false, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -370,7 +364,7 @@ out:
static int ceph_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned nr_pages)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
int rc = 0;
int max = 0;
@@ -492,8 +486,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
&ci->i_layout, snapc,
page_off, len,
ci->i_truncate_seq, ci->i_truncate_size,
- &inode->i_mtime,
- &page, 1, 0, 0, true);
+ &inode->i_mtime, &page, 1);
if (err < 0) {
dout("writepage setting page/mapping error %d %p\n", err, page);
SetPageError(page);
@@ -554,27 +547,18 @@ static void writepages_finish(struct ceph_osd_request *req,
struct ceph_msg *msg)
{
struct inode *inode = req->r_inode;
- struct ceph_osd_reply_head *replyhead;
- struct ceph_osd_op *op;
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned wrote;
struct page *page;
int i;
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
- __s32 rc = -EIO;
- u64 bytes = 0;
+ int rc = req->r_result;
+ u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
long writeback_stat;
unsigned issued = ceph_caps_issued(ci);
- /* parse reply */
- replyhead = msg->front.iov_base;
- WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
- op = (void *)(replyhead + 1);
- rc = le32_to_cpu(replyhead->result);
- bytes = le64_to_cpu(op->extent.length);
-
if (rc >= 0) {
/*
* Assume we wrote the pages we originally sent. The
@@ -741,8 +725,6 @@ retry:
struct page *page;
int want;
u64 offset, len;
- struct ceph_osd_request_head *reqhead;
- struct ceph_osd_op *op;
long writeback_stat;
next = 0;
@@ -838,7 +820,7 @@ get_more_pages:
snapc, do_sync,
ci->i_truncate_seq,
ci->i_truncate_size,
- &inode->i_mtime, true, 1, 0);
+ &inode->i_mtime, true, 0);
if (IS_ERR(req)) {
rc = PTR_ERR(req);
@@ -906,10 +888,8 @@ get_more_pages:
/* revise final length, page count */
req->r_num_pages = locked_pages;
- reqhead = req->r_request->front.iov_base;
- op = (void *)(reqhead + 1);
- op->extent.length = cpu_to_le64(len);
- op->payload_len = cpu_to_le32(len);
+ req->r_request_ops[0].extent.length = cpu_to_le64(len);
+ req->r_request_ops[0].payload_len = cpu_to_le32(len);
req->r_request->hdr.data_len = cpu_to_le32(len);
rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
@@ -977,7 +957,7 @@ static int ceph_update_writeable_page(struct file *file,
loff_t pos, unsigned len,
struct page *page)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
loff_t page_off = pos & PAGE_CACHE_MASK;
@@ -1086,7 +1066,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = file->private_data;
struct page *page;
@@ -1144,7 +1124,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1228,7 +1208,7 @@ const struct address_space_operations ceph_aops = {
*/
static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct page *page = vmf->page;
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
loff_t off = page_offset(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a1d9bb30c1bf..78e2f575247d 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -611,8 +611,16 @@ retry:
if (flags & CEPH_CAP_FLAG_AUTH)
ci->i_auth_cap = cap;
- else if (ci->i_auth_cap == cap)
+ else if (ci->i_auth_cap == cap) {
ci->i_auth_cap = NULL;
+ spin_lock(&mdsc->cap_dirty_lock);
+ if (!list_empty(&ci->i_dirty_item)) {
+ dout(" moving %p to cap_dirty_migrating\n", inode);
+ list_move(&ci->i_dirty_item,
+ &mdsc->cap_dirty_migrating);
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+ }
dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
@@ -930,7 +938,7 @@ static int send_cap_msg(struct ceph_mds_session *session,
u64 size, u64 max_size,
struct timespec *mtime, struct timespec *atime,
u64 time_warp_seq,
- uid_t uid, gid_t gid, umode_t mode,
+ kuid_t uid, kgid_t gid, umode_t mode,
u64 xattr_version,
struct ceph_buffer *xattrs_buf,
u64 follows)
@@ -974,8 +982,8 @@ static int send_cap_msg(struct ceph_mds_session *session,
ceph_encode_timespec(&fc->atime, atime);
fc->time_warp_seq = cpu_to_le32(time_warp_seq);
- fc->uid = cpu_to_le32(uid);
- fc->gid = cpu_to_le32(gid);
+ fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
+ fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
fc->mode = cpu_to_le32(mode);
fc->xattr_version = cpu_to_le64(xattr_version);
@@ -1081,8 +1089,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
struct timespec mtime, atime;
int wake = 0;
umode_t mode;
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
struct ceph_mds_session *session;
u64 xattr_version = 0;
struct ceph_buffer *xattr_blob = NULL;
@@ -1460,7 +1468,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
- int file_wanted, used;
+ int file_wanted, used, cap_used;
int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
int issued, implemented, want, retain, revoking, flushing = 0;
int mds = -1; /* keep track of how far we've gone through i_caps list
@@ -1563,9 +1571,14 @@ retry_locked:
/* NOTE: no side-effects allowed, until we take s_mutex */
+ cap_used = used;
+ if (ci->i_auth_cap && cap != ci->i_auth_cap)
+ cap_used &= ~ci->i_auth_cap->issued;
+
revoking = cap->implemented & ~cap->issued;
- dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
+ dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
cap->mds, cap, ceph_cap_string(cap->issued),
+ ceph_cap_string(cap_used),
ceph_cap_string(cap->implemented),
ceph_cap_string(revoking));
@@ -1593,7 +1606,7 @@ retry_locked:
}
/* completed revocation? going down and there are no caps? */
- if (revoking && (revoking & used) == 0) {
+ if (revoking && (revoking & cap_used) == 0) {
dout("completed revocation of %s\n",
ceph_cap_string(cap->implemented & ~cap->issued));
goto ack;
@@ -1670,8 +1683,8 @@ ack:
sent++;
/* __send_cap drops i_ceph_lock */
- delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
- retain, flushing, NULL);
+ delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
+ want, retain, flushing, NULL);
goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
@@ -2359,10 +2372,11 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
inode->i_mode = le32_to_cpu(grant->mode);
- inode->i_uid = le32_to_cpu(grant->uid);
- inode->i_gid = le32_to_cpu(grant->gid);
+ inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
+ inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
- inode->i_uid, inode->i_gid);
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kgid(&init_user_ns, inode->i_gid));
}
if ((issued & CEPH_CAP_LINK_EXCL) == 0)
@@ -2416,7 +2430,9 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
dout("mds wanted %s -> %s\n",
ceph_cap_string(le32_to_cpu(grant->wanted)),
ceph_cap_string(wanted));
- grant->wanted = cpu_to_le32(wanted);
+ /* imported cap may not have correct mds_wanted */
+ if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
+ check_caps = 1;
}
cap->seq = seq;
@@ -2820,6 +2836,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
(unsigned)seq);
+ if (op == CEPH_CAP_OP_IMPORT)
+ ceph_add_cap_releases(mdsc, session);
+
/* lookup ino */
inode = ceph_find_inode(sb, vino);
ci = ceph_inode(inode);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 8c1aabe93b67..6d797f46d772 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -238,7 +238,7 @@ static int note_last_dentry(struct ceph_file_info *fi, const char *name,
static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct ceph_file_info *fi = filp->private_data;
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1138,7 +1138,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
struct ceph_file_info *cf = file->private_data;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
int left;
const int bufsize = 1024;
@@ -1188,7 +1188,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct list_head *head = &ci->i_unsafe_dirops;
struct ceph_mds_request *req;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index ca3ab3f9ca70..16796be53ca5 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -81,7 +81,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
if (parent_inode) {
/* nfsd wants connectable */
*max_len = connected_handle_length;
- type = 255;
+ type = FILEID_INVALID;
} else {
dout("encode_fh %p\n", dentry);
fh->ino = ceph_ino(inode);
@@ -90,7 +90,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
}
} else {
*max_len = handle_length;
- type = 255;
+ type = FILEID_INVALID;
}
if (dentry)
dput(dentry);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index e51558fca3a3..bf338d9b67e3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -243,6 +243,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = ceph_mdsc_do_request(mdsc,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req);
+ if (err)
+ goto out_err;
+
err = ceph_handle_snapdir(req, dentry, err);
if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
@@ -263,6 +266,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = finish_no_open(file, dn);
} else {
dout("atomic_open finish_open on dn %p\n", dn);
+ if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
+ *opened |= FILE_CREATED;
+ }
err = finish_open(file, dentry, ceph_open, opened);
}
@@ -393,7 +399,7 @@ more:
static ssize_t ceph_sync_read(struct file *file, char __user *data,
unsigned len, loff_t *poff, int *checkeof)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct page **pages;
u64 off = *poff;
int num_pages, ret;
@@ -466,7 +472,7 @@ static void sync_write_commit(struct ceph_osd_request *req,
static ssize_t ceph_sync_write(struct file *file, const char __user *data,
size_t left, loff_t *offset)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req;
@@ -483,7 +489,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
int ret;
struct timespec mtime = CURRENT_TIME;
- if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
+ if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
dout("sync_write on file %p %lld~%u %s\n", file, *offset,
@@ -535,7 +541,7 @@ more:
ci->i_snap_realm->cached_context,
do_sync,
ci->i_truncate_seq, ci->i_truncate_size,
- &mtime, false, 2, page_align);
+ &mtime, false, page_align);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -637,7 +643,7 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
struct ceph_file_info *fi = filp->private_data;
loff_t *ppos = &iocb->ki_pos;
size_t len = iov->iov_len;
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
void __user *base = iov->iov_base;
ssize_t ret;
@@ -707,7 +713,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
struct file *file = iocb->ki_filp;
struct ceph_file_info *fi = file->private_data;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 2971eaa65cdc..851814d951cd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -612,10 +612,11 @@ static int fill_inode(struct inode *inode,
if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
inode->i_mode = le32_to_cpu(info->mode);
- inode->i_uid = le32_to_cpu(info->uid);
- inode->i_gid = le32_to_cpu(info->gid);
+ inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
+ inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
- inode->i_uid, inode->i_gid);
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kgid(&init_user_ns, inode->i_gid));
}
if ((issued & CEPH_CAP_LINK_EXCL) == 0)
@@ -1130,8 +1131,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
req->r_request_started);
dout(" final dn %p\n", dn);
i++;
- } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
- req->r_op == CEPH_MDS_OP_MKSNAP) {
+ } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
+ req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) {
struct dentry *dn = req->r_dentry;
/* fill out a snapdir LOOKUPSNAP dentry */
@@ -1195,6 +1196,39 @@ done:
/*
* Prepopulate our cache with readdir results, leases, etc.
*/
+static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
+ struct ceph_mds_session *session)
+{
+ struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ int i, err = 0;
+
+ for (i = 0; i < rinfo->dir_nr; i++) {
+ struct ceph_vino vino;
+ struct inode *in;
+ int rc;
+
+ vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
+ vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
+
+ in = ceph_get_inode(req->r_dentry->d_sb, vino);
+ if (IS_ERR(in)) {
+ err = PTR_ERR(in);
+ dout("new_inode badness got %d\n", err);
+ continue;
+ }
+ rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
+ req->r_request_started, -1,
+ &req->r_caps_reservation);
+ if (rc < 0) {
+ pr_err("fill_inode badness on %p got %d\n", in, rc);
+ err = rc;
+ continue;
+ }
+ }
+
+ return err;
+}
+
int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct ceph_mds_session *session)
{
@@ -1209,6 +1243,9 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
u64 frag = le32_to_cpu(rhead->args.readdir.frag);
struct ceph_dentry_info *di;
+ if (req->r_aborted)
+ return readdir_prepopulate_inodes_only(req, session);
+
if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
snapdir = ceph_get_snapdir(parent->d_inode);
parent = d_find_alias(snapdir);
@@ -1565,26 +1602,30 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
if (ia_valid & ATTR_UID) {
dout("setattr %p uid %d -> %d\n", inode,
- inode->i_uid, attr->ia_uid);
+ from_kuid(&init_user_ns, inode->i_uid),
+ from_kuid(&init_user_ns, attr->ia_uid));
if (issued & CEPH_CAP_AUTH_EXCL) {
inode->i_uid = attr->ia_uid;
dirtied |= CEPH_CAP_AUTH_EXCL;
} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
- attr->ia_uid != inode->i_uid) {
- req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
+ !uid_eq(attr->ia_uid, inode->i_uid)) {
+ req->r_args.setattr.uid = cpu_to_le32(
+ from_kuid(&init_user_ns, attr->ia_uid));
mask |= CEPH_SETATTR_UID;
release |= CEPH_CAP_AUTH_SHARED;
}
}
if (ia_valid & ATTR_GID) {
dout("setattr %p gid %d -> %d\n", inode,
- inode->i_gid, attr->ia_gid);
+ from_kgid(&init_user_ns, inode->i_gid),
+ from_kgid(&init_user_ns, attr->ia_gid));
if (issued & CEPH_CAP_AUTH_EXCL) {
inode->i_gid = attr->ia_gid;
dirtied |= CEPH_CAP_AUTH_EXCL;
} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
- attr->ia_gid != inode->i_gid) {
- req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
+ !gid_eq(attr->ia_gid, inode->i_gid)) {
+ req->r_args.setattr.gid = cpu_to_le32(
+ from_kgid(&init_user_ns, attr->ia_gid));
mask |= CEPH_SETATTR_GID;
release |= CEPH_CAP_AUTH_SHARED;
}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 36549a46e311..4a989345b37b 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -16,11 +16,11 @@
*/
static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
{
- struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
+ struct ceph_inode_info *ci = ceph_inode(file_inode(file));
struct ceph_ioctl_layout l;
int err;
- err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
+ err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT);
if (!err) {
l.stripe_unit = ceph_file_layout_su(ci->i_layout);
l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
@@ -63,12 +63,12 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct inode *parent_inode;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
- struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
+ struct ceph_inode_info *ci = ceph_inode(file_inode(file));
struct ceph_ioctl_layout nl;
int err;
@@ -76,7 +76,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
return -EFAULT;
/* validate changed params against current layout */
- err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
+ err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT);
if (err)
return err;
@@ -136,7 +136,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
*/
static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
int err;
@@ -179,13 +179,12 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
{
struct ceph_ioctl_dataloc dl;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
u64 len = 1, olen;
u64 tmp;
- struct ceph_object_layout ol;
struct ceph_pg pgid;
int r;
@@ -194,7 +193,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
return -EFAULT;
down_read(&osdc->map_sem);
- r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
+ r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
&dl.object_no, &dl.object_offset,
&olen);
if (r < 0)
@@ -209,10 +208,9 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
ceph_ino(inode), dl.object_no);
- ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout,
+ ceph_calc_object_layout(&pgid, dl.object_name, &ci->i_layout,
osdc->osdmap);
- pgid = ol.ol_pgid;
dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
if (dl.osd >= 0) {
struct ceph_entity_addr *a =
@@ -234,7 +232,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
static long ceph_ioctl_lazyio(struct file *file)
{
struct ceph_file_info *fi = file->private_data;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 80576d05d687..202dd3d68be0 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -13,7 +13,7 @@
static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
int cmd, u8 wait, struct file_lock *fl)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ceph_mds_client *mdsc =
ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 9165eb8309eb..442880d099c9 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -233,6 +233,30 @@ bad:
}
/*
+ * parse create results
+ */
+static int parse_reply_info_create(void **p, void *end,
+ struct ceph_mds_reply_info_parsed *info,
+ int features)
+{
+ if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
+ if (*p == end) {
+ info->has_create_ino = false;
+ } else {
+ info->has_create_ino = true;
+ info->ino = ceph_decode_64(p);
+ }
+ }
+
+ if (unlikely(*p != end))
+ goto bad;
+ return 0;
+
+bad:
+ return -EIO;
+}
+
+/*
* parse extra results
*/
static int parse_reply_info_extra(void **p, void *end,
@@ -241,8 +265,12 @@ static int parse_reply_info_extra(void **p, void *end,
{
if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
- else
+ else if (info->head->op == CEPH_MDS_OP_READDIR)
return parse_reply_info_dir(p, end, info, features);
+ else if (info->head->op == CEPH_MDS_OP_CREATE)
+ return parse_reply_info_create(p, end, info, features);
+ else
+ return -EIO;
}
/*
@@ -1658,8 +1686,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
head->op = cpu_to_le32(req->r_op);
- head->caller_uid = cpu_to_le32(req->r_uid);
- head->caller_gid = cpu_to_le32(req->r_gid);
+ head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
+ head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
head->args = req->r_args;
ceph_encode_filepath(&p, end, ino1, path1);
@@ -2170,7 +2198,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_lock(&req->r_fill_mutex);
err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
if (err == 0) {
- if (result == 0 && req->r_op != CEPH_MDS_OP_GETFILELOCK &&
+ if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
+ req->r_op == CEPH_MDS_OP_LSSNAP) &&
rinfo->dir_nr)
ceph_readdir_prepopulate(req, req->r_session);
ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index dd26846dd71d..c2a19fbbe517 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -74,6 +74,12 @@ struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_info_in *dir_in;
u8 dir_complete, dir_end;
};
+
+ /* for create results */
+ struct {
+ bool has_create_ino;
+ u64 ino;
+ };
};
/* encoded blob describing snapshot contexts for certain
@@ -184,8 +190,8 @@ struct ceph_mds_request {
union ceph_mds_request_args r_args;
int r_fmode; /* file mode, if expecting cap */
- uid_t r_uid;
- gid_t r_gid;
+ kuid_t r_uid;
+ kgid_t r_gid;
/* for choosing which mds to send this request to */
int r_direct_mode;
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 73b7d44e8a35..0d3c9240c61b 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -59,6 +59,10 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
return ERR_PTR(-ENOMEM);
ceph_decode_16_safe(p, end, version, bad);
+ if (version > 3) {
+ pr_warning("got mdsmap version %d > 3, failing", version);
+ goto bad;
+ }
ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
m->m_epoch = ceph_decode_32(p);
@@ -144,13 +148,13 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
/* pg_pools */
ceph_decode_32_safe(p, end, n, bad);
m->m_num_data_pg_pools = n;
- m->m_data_pg_pools = kcalloc(n, sizeof(u32), GFP_NOFS);
+ m->m_data_pg_pools = kcalloc(n, sizeof(u64), GFP_NOFS);
if (!m->m_data_pg_pools)
goto badmem;
- ceph_decode_need(p, end, sizeof(u32)*(n+1), bad);
+ ceph_decode_need(p, end, sizeof(u64)*(n+1), bad);
for (i = 0; i < n; i++)
- m->m_data_pg_pools[i] = ceph_decode_32(p);
- m->m_cas_pg_pool = ceph_decode_32(p);
+ m->m_data_pg_pools[i] = ceph_decode_64(p);
+ m->m_cas_pg_pool = ceph_decode_64(p);
/* ok, we don't care about the rest. */
dout("mdsmap_decode success epoch %u\n", m->m_epoch);
diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c
index cd5097d7c804..89fa4a940a0f 100644
--- a/fs/ceph/strings.c
+++ b/fs/ceph/strings.c
@@ -15,6 +15,7 @@ const char *ceph_mds_state_name(int s)
case CEPH_MDS_STATE_BOOT: return "up:boot";
case CEPH_MDS_STATE_STANDBY: return "up:standby";
case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay";
+ case CEPH_MDS_STATE_REPLAYONCE: return "up:oneshot-replay";
case CEPH_MDS_STATE_CREATING: return "up:creating";
case CEPH_MDS_STATE_STARTING: return "up:starting";
/* up and in */
@@ -50,10 +51,13 @@ const char *ceph_mds_op_name(int op)
case CEPH_MDS_OP_LOOKUP: return "lookup";
case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash";
case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent";
+ case CEPH_MDS_OP_LOOKUPINO: return "lookupino";
case CEPH_MDS_OP_GETATTR: return "getattr";
case CEPH_MDS_OP_SETXATTR: return "setxattr";
case CEPH_MDS_OP_SETATTR: return "setattr";
case CEPH_MDS_OP_RMXATTR: return "rmxattr";
+ case CEPH_MDS_OP_SETLAYOUT: return "setlayou";
+ case CEPH_MDS_OP_SETDIRLAYOUT: return "setdirlayout";
case CEPH_MDS_OP_READDIR: return "readdir";
case CEPH_MDS_OP_MKNOD: return "mknod";
case CEPH_MDS_OP_LINK: return "link";
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index e86aa9948124..9fe17c6c2876 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -71,8 +71,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
/*
* express utilization in terms of large blocks to avoid
* overflow on 32-bit machines.
+ *
+ * NOTE: for the time being, we make bsize == frsize to humor
+ * not-yet-ancient versions of glibc that are broken.
+ * Someday, we will probably want to report a real block
+ * size... whatever that may mean for a network file system!
*/
buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
+ buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
@@ -80,7 +86,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_files = le64_to_cpu(st.num_objects);
buf->f_ffree = -1;
buf->f_namelen = NAME_MAX;
- buf->f_frsize = PAGE_CACHE_SIZE;
/* leave fsid little-endian, regardless of host endianness */
fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 66ebe720e40d..c7b309723dcc 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -21,7 +21,7 @@
/* large granularity for statfs utilization stats to facilitate
* large volume sizes on 32-bit machines. */
-#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
+#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
@@ -138,8 +138,8 @@ struct ceph_cap_snap {
struct ceph_snap_context *context;
umode_t mode;
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
struct ceph_buffer *xattr_blob;
u64 xattr_version;
@@ -798,13 +798,7 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
/* file.c */
extern const struct file_operations ceph_file_fops;
extern const struct address_space_operations ceph_aops;
-extern int ceph_copy_to_page_vector(struct page **pages,
- const char *data,
- loff_t off, size_t len);
-extern int ceph_copy_from_page_vector(struct page **pages,
- char *data,
- loff_t off, size_t len);
-extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
+
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode,
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 2c2ae5be9902..9b6b2b6dd164 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -29,9 +29,94 @@ struct ceph_vxattr {
size_t name_size; /* strlen(name) + 1 (for '\0') */
size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
size_t size);
- bool readonly;
+ bool readonly, hidden;
+ bool (*exists_cb)(struct ceph_inode_info *ci);
};
+/* layouts */
+
+static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
+{
+ size_t s;
+ char *p = (char *)&ci->i_layout;
+
+ for (s = 0; s < sizeof(ci->i_layout); s++, p++)
+ if (*p)
+ return true;
+ return false;
+}
+
+static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ int ret;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+ const char *pool_name;
+
+ dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
+ down_read(&osdc->map_sem);
+ pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
+ if (pool_name)
+ ret = snprintf(val, size,
+ "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%s",
+ (unsigned long long)ceph_file_layout_su(ci->i_layout),
+ (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
+ (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
+ pool_name);
+ else
+ ret = snprintf(val, size,
+ "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
+ (unsigned long long)ceph_file_layout_su(ci->i_layout),
+ (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
+ (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
+ (unsigned long long)pool);
+
+ up_read(&osdc->map_sem);
+ return ret;
+}
+
+static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ return snprintf(val, size, "%lld",
+ (unsigned long long)ceph_file_layout_su(ci->i_layout));
+}
+
+static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ return snprintf(val, size, "%lld",
+ (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout));
+}
+
+static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ return snprintf(val, size, "%lld",
+ (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
+}
+
+static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ int ret;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+ const char *pool_name;
+
+ down_read(&osdc->map_sem);
+ pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
+ if (pool_name)
+ ret = snprintf(val, size, "%s", pool_name);
+ else
+ ret = snprintf(val, size, "%lld", (unsigned long long)pool);
+ up_read(&osdc->map_sem);
+ return ret;
+}
+
/* directories */
static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
@@ -83,17 +168,43 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
(long)ci->i_rctime.tv_nsec);
}
-#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
-#define XATTR_NAME_CEPH(_type, _name) \
- { \
- .name = CEPH_XATTR_NAME(_type, _name), \
- .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
- .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
- .readonly = true, \
- }
+#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
+#define CEPH_XATTR_NAME2(_type, _name, _name2) \
+ XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
+
+#define XATTR_NAME_CEPH(_type, _name) \
+ { \
+ .name = CEPH_XATTR_NAME(_type, _name), \
+ .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
+ .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
+ .readonly = true, \
+ .hidden = false, \
+ .exists_cb = NULL, \
+ }
+#define XATTR_LAYOUT_FIELD(_type, _name, _field) \
+ { \
+ .name = CEPH_XATTR_NAME2(_type, _name, _field), \
+ .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
+ .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
+ .readonly = false, \
+ .hidden = true, \
+ .exists_cb = ceph_vxattrcb_layout_exists, \
+ }
static struct ceph_vxattr ceph_dir_vxattrs[] = {
+ {
+ .name = "ceph.dir.layout",
+ .name_size = sizeof("ceph.dir.layout"),
+ .getxattr_cb = ceph_vxattrcb_layout,
+ .readonly = false,
+ .hidden = false,
+ .exists_cb = ceph_vxattrcb_layout_exists,
+ },
+ XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
+ XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
+ XATTR_LAYOUT_FIELD(dir, layout, object_size),
+ XATTR_LAYOUT_FIELD(dir, layout, pool),
XATTR_NAME_CEPH(dir, entries),
XATTR_NAME_CEPH(dir, files),
XATTR_NAME_CEPH(dir, subdirs),
@@ -102,35 +213,26 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_NAME_CEPH(dir, rsubdirs),
XATTR_NAME_CEPH(dir, rbytes),
XATTR_NAME_CEPH(dir, rctime),
- { 0 } /* Required table terminator */
+ { .name = NULL, 0 } /* Required table terminator */
};
static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
/* files */
-static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
- size_t size)
-{
- int ret;
-
- ret = snprintf(val, size,
- "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
- (unsigned long long)ceph_file_layout_su(ci->i_layout),
- (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
- (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
- return ret;
-}
-
static struct ceph_vxattr ceph_file_vxattrs[] = {
- XATTR_NAME_CEPH(file, layout),
- /* The following extended attribute name is deprecated */
{
- .name = XATTR_CEPH_PREFIX "layout",
- .name_size = sizeof (XATTR_CEPH_PREFIX "layout"),
- .getxattr_cb = ceph_vxattrcb_file_layout,
- .readonly = true,
+ .name = "ceph.file.layout",
+ .name_size = sizeof("ceph.file.layout"),
+ .getxattr_cb = ceph_vxattrcb_layout,
+ .readonly = false,
+ .hidden = false,
+ .exists_cb = ceph_vxattrcb_layout_exists,
},
- { 0 } /* Required table terminator */
+ XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
+ XATTR_LAYOUT_FIELD(file, layout, stripe_count),
+ XATTR_LAYOUT_FIELD(file, layout, object_size),
+ XATTR_LAYOUT_FIELD(file, layout, pool),
+ { .name = NULL, 0 } /* Required table terminator */
};
static size_t ceph_file_vxattrs_name_size; /* total size of all names */
@@ -164,7 +266,8 @@ static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
size_t size = 0;
for (vxattr = vxattrs; vxattr->name; vxattr++)
- size += vxattr->name_size;
+ if (!vxattr->hidden)
+ size += vxattr->name_size;
return size;
}
@@ -572,13 +675,17 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
if (!ceph_is_valid_xattr(name))
return -ENODATA;
- /* let's see if a virtual xattr was requested */
- vxattr = ceph_match_vxattr(inode, name);
-
spin_lock(&ci->i_ceph_lock);
dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
+ /* let's see if a virtual xattr was requested */
+ vxattr = ceph_match_vxattr(inode, name);
+ if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
+ err = vxattr->getxattr_cb(ci, value, size);
+ goto out;
+ }
+
if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto get_xattr;
@@ -592,11 +699,6 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
spin_lock(&ci->i_ceph_lock);
- if (vxattr && vxattr->readonly) {
- err = vxattr->getxattr_cb(ci, value, size);
- goto out;
- }
-
err = __build_xattrs(inode);
if (err < 0)
goto out;
@@ -604,11 +706,8 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
get_xattr:
err = -ENODATA; /* == ENOATTR */
xattr = __get_xattr(ci, name);
- if (!xattr) {
- if (vxattr)
- err = vxattr->getxattr_cb(ci, value, size);
+ if (!xattr)
goto out;
- }
err = -ERANGE;
if (size && size < xattr->val_len)
@@ -664,23 +763,30 @@ list_xattr:
vir_namelen = ceph_vxattrs_name_size(vxattrs);
/* adding 1 byte per each variable due to the null termination */
- namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
+ namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
err = -ERANGE;
- if (size && namelen > size)
+ if (size && vir_namelen + namelen > size)
goto out;
- err = namelen;
+ err = namelen + vir_namelen;
if (size == 0)
goto out;
names = __copy_xattr_names(ci, names);
/* virtual xattr names, too */
- if (vxattrs)
+ err = namelen;
+ if (vxattrs) {
for (i = 0; vxattrs[i].name; i++) {
- len = sprintf(names, "%s", vxattrs[i].name);
- names += len + 1;
+ if (!vxattrs[i].hidden &&
+ !(vxattrs[i].exists_cb &&
+ !vxattrs[i].exists_cb(ci))) {
+ len = sprintf(names, "%s", vxattrs[i].name);
+ names += len + 1;
+ err += len + 1;
+ }
}
+ }
out:
spin_unlock(&ci->i_ceph_lock);
@@ -782,6 +888,10 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
if (vxattr && vxattr->readonly)
return -EOPNOTSUPP;
+ /* pass any unhandled ceph.* xattrs through to the MDS */
+ if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
+ goto do_sync_unlocked;
+
/* preallocate memory for xattr name, value, index node */
err = -ENOMEM;
newname = kmemdup(name, name_len + 1, GFP_NOFS);
@@ -838,6 +948,7 @@ retry:
do_sync:
spin_unlock(&ci->i_ceph_lock);
+do_sync_unlocked:
err = ceph_sync_setxattr(dentry, name, value, size, flags);
out:
kfree(newname);
@@ -892,6 +1003,10 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
if (vxattr && vxattr->readonly)
return -EOPNOTSUPP;
+ /* pass any unhandled ceph.* xattrs through to the MDS */
+ if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
+ goto do_sync_unlocked;
+
err = -ENOMEM;
spin_lock(&ci->i_ceph_lock);
retry:
@@ -931,6 +1046,7 @@ retry:
return err;
do_sync:
spin_unlock(&ci->i_ceph_lock);
+do_sync_unlocked:
err = ceph_send_removexattr(dentry, name);
out:
return err;
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index c865bfdfe819..37e4a72a7d1c 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -55,10 +55,10 @@ struct cifs_sb_info {
unsigned int wsize;
unsigned long actimeo; /* attribute cache timeout (jiffies) */
atomic_t active;
- uid_t mnt_uid;
- gid_t mnt_gid;
- uid_t mnt_backupuid;
- gid_t mnt_backupgid;
+ kuid_t mnt_uid;
+ kgid_t mnt_gid;
+ kuid_t mnt_backupuid;
+ kgid_t mnt_backupgid;
umode_t mnt_file_mode;
umode_t mnt_dir_mode;
unsigned int mnt_cifs_flags;
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 086f381d6489..10e774761299 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -149,10 +149,12 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
goto out;
dp = description + strlen(description);
- sprintf(dp, ";uid=0x%x", sesInfo->linux_uid);
+ sprintf(dp, ";uid=0x%x",
+ from_kuid_munged(&init_user_ns, sesInfo->linux_uid));
dp = description + strlen(description);
- sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
+ sprintf(dp, ";creduid=0x%x",
+ from_kuid_munged(&init_user_ns, sesInfo->cred_uid));
if (sesInfo->user_name) {
dp = description + strlen(description);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 5cbd00e74067..f1e3f25fe004 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -266,8 +266,8 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
struct key *sidkey;
char *sidstr;
const struct cred *saved_cred;
- uid_t fuid = cifs_sb->mnt_uid;
- gid_t fgid = cifs_sb->mnt_gid;
+ kuid_t fuid = cifs_sb->mnt_uid;
+ kgid_t fgid = cifs_sb->mnt_gid;
/*
* If we have too many subauthorities, then something is really wrong.
@@ -297,6 +297,7 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
* probably a safe assumption but might be better to check based on
* sidtype.
*/
+ BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
if (sidkey->datalen != sizeof(uid_t)) {
rc = -EIO;
cFYI(1, "%s: Downcall contained malformed key "
@@ -305,10 +306,21 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
goto out_key_put;
}
- if (sidtype == SIDOWNER)
- memcpy(&fuid, &sidkey->payload.value, sizeof(uid_t));
- else
- memcpy(&fgid, &sidkey->payload.value, sizeof(gid_t));
+ if (sidtype == SIDOWNER) {
+ kuid_t uid;
+ uid_t id;
+ memcpy(&id, &sidkey->payload.value, sizeof(uid_t));
+ uid = make_kuid(&init_user_ns, id);
+ if (uid_valid(uid))
+ fuid = uid;
+ } else {
+ kgid_t gid;
+ gid_t id;
+ memcpy(&id, &sidkey->payload.value, sizeof(gid_t));
+ gid = make_kgid(&init_user_ns, id);
+ if (gid_valid(gid))
+ fgid = gid;
+ }
out_key_put:
key_put(sidkey);
@@ -346,7 +358,8 @@ init_cifs_idmap(void)
if (!cred)
return -ENOMEM;
- keyring = keyring_alloc(".cifs_idmap", 0, 0, cred,
+ keyring = keyring_alloc(".cifs_idmap",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
@@ -774,7 +787,7 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
/* Convert permission bits from mode to equivalent CIFS ACL */
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
- __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
+ __u32 secdesclen, __u64 nmode, kuid_t uid, kgid_t gid, int *aclflag)
{
int rc = 0;
__u32 dacloffset;
@@ -806,17 +819,19 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
*aclflag = CIFS_ACL_DACL;
} else {
memcpy(pnntsd, pntsd, secdesclen);
- if (uid != NO_CHANGE_32) { /* chown */
+ if (uid_valid(uid)) { /* chown */
+ uid_t id;
owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
le32_to_cpu(pnntsd->osidoffset));
nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!nowner_sid_ptr)
return -ENOMEM;
- rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
+ id = from_kuid(&init_user_ns, uid);
+ rc = id_to_sid(id, SIDOWNER, nowner_sid_ptr);
if (rc) {
cFYI(1, "%s: Mapping error %d for owner id %d",
- __func__, rc, uid);
+ __func__, rc, id);
kfree(nowner_sid_ptr);
return rc;
}
@@ -824,17 +839,19 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
kfree(nowner_sid_ptr);
*aclflag = CIFS_ACL_OWNER;
}
- if (gid != NO_CHANGE_32) { /* chgrp */
+ if (gid_valid(gid)) { /* chgrp */
+ gid_t id;
group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
le32_to_cpu(pnntsd->gsidoffset));
ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!ngroup_sid_ptr)
return -ENOMEM;
- rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
+ id = from_kgid(&init_user_ns, gid);
+ rc = id_to_sid(id, SIDGROUP, ngroup_sid_ptr);
if (rc) {
cFYI(1, "%s: Mapping error %d for group id %d",
- __func__, rc, gid);
+ __func__, rc, id);
kfree(ngroup_sid_ptr);
return rc;
}
@@ -1002,7 +1019,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
/* Convert mode bits to an ACL so we can update the ACL on the server */
int
id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
- uid_t uid, gid_t gid)
+ kuid_t uid, kgid_t gid)
{
int rc = 0;
int aclflag = CIFS_ACL_DACL; /* default flag to set */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index de7f9168a118..1a052c0eee8e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -375,13 +375,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
(int)(srcaddr->sa_family));
}
- seq_printf(s, ",uid=%u", cifs_sb->mnt_uid);
+ seq_printf(s, ",uid=%u",
+ from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
seq_printf(s, ",forceuid");
else
seq_printf(s, ",noforceuid");
- seq_printf(s, ",gid=%u", cifs_sb->mnt_gid);
+ seq_printf(s, ",gid=%u",
+ from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
seq_printf(s, ",forcegid");
else
@@ -436,9 +438,13 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
seq_printf(s, ",noperm");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
- seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
+ seq_printf(s, ",backupuid=%u",
+ from_kuid_munged(&init_user_ns,
+ cifs_sb->mnt_backupuid));
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
- seq_printf(s, ",backupgid=%u", cifs_sb->mnt_backupgid);
+ seq_printf(s, ",backupgid=%u",
+ from_kgid_munged(&init_user_ns,
+ cifs_sb->mnt_backupgid));
seq_printf(s, ",rsize=%u", cifs_sb->rsize);
seq_printf(s, ",wsize=%u", cifs_sb->wsize);
@@ -558,6 +564,11 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
dentry = ERR_PTR(-ENOENT);
break;
}
+ if (!S_ISDIR(dir->i_mode)) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOTDIR);
+ break;
+ }
/* skip separators */
while (*s == sep)
@@ -677,7 +688,7 @@ out_nls:
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(iocb->ki_filp);
ssize_t written;
int rc;
@@ -701,7 +712,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
*/
if (whence != SEEK_SET && whence != SEEK_CUR) {
int rc;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
/*
* We need to be sure that all dirty pages are written and the
@@ -733,7 +744,7 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
{
/* note that this is called by vfs setlease with lock_flocks held
to protect *lease from going away */
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct cifsFileInfo *cfile = file->private_data;
if (!(S_ISREG(inode->i_mode)))
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index e6899cea1c35..4f07f6fbe494 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -400,11 +400,11 @@ struct smb_vol {
char *iocharset; /* local code page for mapping to and from Unicode */
char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
- uid_t cred_uid;
- uid_t linux_uid;
- gid_t linux_gid;
- uid_t backupuid;
- gid_t backupgid;
+ kuid_t cred_uid;
+ kuid_t linux_uid;
+ kgid_t linux_gid;
+ kuid_t backupuid;
+ kgid_t backupgid;
umode_t file_mode;
umode_t dir_mode;
unsigned secFlg;
@@ -703,8 +703,8 @@ struct cifs_ses {
char *serverNOS; /* name of network operating system of server */
char *serverDomain; /* security realm of server */
__u64 Suid; /* remote smb uid */
- uid_t linux_uid; /* overriding owner of files on the mount */
- uid_t cred_uid; /* owner of credentials */
+ kuid_t linux_uid; /* overriding owner of files on the mount */
+ kuid_t cred_uid; /* owner of credentials */
unsigned int capabilities;
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
@@ -838,7 +838,7 @@ struct cifs_tcon {
*/
struct tcon_link {
struct rb_node tl_rbnode;
- uid_t tl_uid;
+ kuid_t tl_uid;
unsigned long tl_flags;
#define TCON_LINK_MASTER 0
#define TCON_LINK_PENDING 1
@@ -931,7 +931,7 @@ struct cifsFileInfo {
struct list_head tlist; /* pointer to next fid owned by tcon */
struct list_head flist; /* next fid (file instance) for this inode */
struct cifs_fid_locks *llist; /* brlocks held by this fid */
- unsigned int uid; /* allows finding which FileInfo structure */
+ kuid_t uid; /* allows finding which FileInfo structure */
__u32 pid; /* process id who opened file */
struct cifs_fid fid; /* file id from remote */
/* BB add lock scope info here if needed */ ;
@@ -1245,8 +1245,8 @@ struct cifs_fattr {
u64 cf_eof;
u64 cf_bytes;
u64 cf_createtime;
- uid_t cf_uid;
- gid_t cf_gid;
+ kuid_t cf_uid;
+ kgid_t cf_gid;
umode_t cf_mode;
dev_t cf_rdev;
unsigned int cf_nlink;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index b9d59a948a2c..e996ff6b26d1 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -277,7 +277,6 @@
#define CIFS_NO_HANDLE 0xFFFF
#define NO_CHANGE_64 0xFFFFFFFFFFFFFFFFULL
-#define NO_CHANGE_32 0xFFFFFFFFUL
/* IPC$ in ASCII */
#define CIFS_IPC_RESOURCE "\x49\x50\x43\x24"
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1988c1baa224..f450f0683ddd 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -46,7 +46,8 @@ extern void _free_xid(unsigned int);
({ \
unsigned int __xid = _get_xid(); \
cFYI(1, "CIFS VFS: in %s as Xid: %u with uid: %d", \
- __func__, __xid, current_fsuid()); \
+ __func__, __xid, \
+ from_kuid(&init_user_ns, current_fsuid())); \
__xid; \
})
@@ -161,7 +162,7 @@ extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode,
const char *path, const __u16 *pfid);
extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
- uid_t, gid_t);
+ kuid_t, kgid_t);
extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
const char *, u32 *);
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
@@ -304,8 +305,8 @@ struct cifs_unix_set_info_args {
__u64 atime;
__u64 mtime;
__u64 mode;
- __u64 uid;
- __u64 gid;
+ kuid_t uid;
+ kgid_t gid;
dev_t device;
};
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 76d0d2998850..7353bc5d73d7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1909,8 +1909,11 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
} while (rc == -EAGAIN);
for (i = 0; i < wdata->nr_pages; i++) {
- if (rc != 0)
+ if (rc != 0) {
SetPageError(wdata->pages[i]);
+ end_page_writeback(wdata->pages[i]);
+ page_cache_release(wdata->pages[i]);
+ }
unlock_page(wdata->pages[i]);
}
@@ -5819,8 +5822,14 @@ static void
cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
const struct cifs_unix_set_info_args *args)
{
+ u64 uid = NO_CHANGE_64, gid = NO_CHANGE_64;
u64 mode = args->mode;
+ if (uid_valid(args->uid))
+ uid = from_kuid(&init_user_ns, args->uid);
+ if (gid_valid(args->gid))
+ gid = from_kgid(&init_user_ns, args->gid);
+
/*
* Samba server ignores set of file size to zero due to bugs in some
* older clients, but we should be precise - we use SetFileSize to
@@ -5833,8 +5842,8 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
data_offset->LastStatusChange = cpu_to_le64(args->ctime);
data_offset->LastAccessTime = cpu_to_le64(args->atime);
data_offset->LastModificationTime = cpu_to_le64(args->mtime);
- data_offset->Uid = cpu_to_le64(args->uid);
- data_offset->Gid = cpu_to_le64(args->gid);
+ data_offset->Uid = cpu_to_le64(uid);
+ data_offset->Gid = cpu_to_le64(gid);
/* better to leave device as zero when it is */
data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 12b3da39733b..54125e04fd0c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -987,6 +987,41 @@ static int get_option_ul(substring_t args[], unsigned long *option)
return rc;
}
+static int get_option_uid(substring_t args[], kuid_t *result)
+{
+ unsigned long value;
+ kuid_t uid;
+ int rc;
+
+ rc = get_option_ul(args, &value);
+ if (rc)
+ return rc;
+
+ uid = make_kuid(current_user_ns(), value);
+ if (!uid_valid(uid))
+ return -EINVAL;
+
+ *result = uid;
+ return 0;
+}
+
+static int get_option_gid(substring_t args[], kgid_t *result)
+{
+ unsigned long value;
+ kgid_t gid;
+ int rc;
+
+ rc = get_option_ul(args, &value);
+ if (rc)
+ return rc;
+
+ gid = make_kgid(current_user_ns(), value);
+ if (!gid_valid(gid))
+ return -EINVAL;
+
+ *result = gid;
+ return 0;
+}
static int cifs_parse_security_flavors(char *value,
struct smb_vol *vol)
@@ -996,7 +1031,7 @@ static int cifs_parse_security_flavors(char *value,
switch (match_token(value, cifs_secflavor_tokens, args)) {
case Opt_sec_krb5:
- vol->secFlg |= CIFSSEC_MAY_KRB5;
+ vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_SIGN;
break;
case Opt_sec_krb5i:
vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
@@ -1424,47 +1459,42 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
/* Numeric Values */
case Opt_backupuid:
- if (get_option_ul(args, &option)) {
+ if (get_option_uid(args, &vol->backupuid)) {
cERROR(1, "%s: Invalid backupuid value",
__func__);
goto cifs_parse_mount_err;
}
- vol->backupuid = option;
vol->backupuid_specified = true;
break;
case Opt_backupgid:
- if (get_option_ul(args, &option)) {
+ if (get_option_gid(args, &vol->backupgid)) {
cERROR(1, "%s: Invalid backupgid value",
__func__);
goto cifs_parse_mount_err;
}
- vol->backupgid = option;
vol->backupgid_specified = true;
break;
case Opt_uid:
- if (get_option_ul(args, &option)) {
+ if (get_option_uid(args, &vol->linux_uid)) {
cERROR(1, "%s: Invalid uid value",
__func__);
goto cifs_parse_mount_err;
}
- vol->linux_uid = option;
uid_specified = true;
break;
case Opt_cruid:
- if (get_option_ul(args, &option)) {
+ if (get_option_uid(args, &vol->cred_uid)) {
cERROR(1, "%s: Invalid cruid value",
__func__);
goto cifs_parse_mount_err;
}
- vol->cred_uid = option;
break;
case Opt_gid:
- if (get_option_ul(args, &option)) {
+ if (get_option_gid(args, &vol->linux_gid)) {
cERROR(1, "%s: Invalid gid value",
__func__);
goto cifs_parse_mount_err;
}
- vol->linux_gid = option;
gid_specified = true;
break;
case Opt_file_mode:
@@ -2241,7 +2271,7 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
{
switch (ses->server->secType) {
case Kerberos:
- if (vol->cred_uid != ses->cred_uid)
+ if (!uid_eq(vol->cred_uid, ses->cred_uid))
return 0;
break;
default:
@@ -2713,7 +2743,7 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
if (new->rsize && new->rsize < old->rsize)
return 0;
- if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
+ if (!uid_eq(old->mnt_uid, new->mnt_uid) || !gid_eq(old->mnt_gid, new->mnt_gid))
return 0;
if (old->mnt_file_mode != new->mnt_file_mode ||
@@ -3919,7 +3949,7 @@ cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses)
}
static struct cifs_tcon *
-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
+cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
@@ -3989,7 +4019,7 @@ cifs_sb_tcon_pending_wait(void *unused)
/* find and return a tlink with given uid */
static struct tcon_link *
-tlink_rb_search(struct rb_root *root, uid_t uid)
+tlink_rb_search(struct rb_root *root, kuid_t uid)
{
struct rb_node *node = root->rb_node;
struct tcon_link *tlink;
@@ -3997,9 +4027,9 @@ tlink_rb_search(struct rb_root *root, uid_t uid)
while (node) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
- if (tlink->tl_uid > uid)
+ if (uid_gt(tlink->tl_uid, uid))
node = node->rb_left;
- else if (tlink->tl_uid < uid)
+ else if (uid_lt(tlink->tl_uid, uid))
node = node->rb_right;
else
return tlink;
@@ -4018,7 +4048,7 @@ tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
parent = *new;
- if (tlink->tl_uid > new_tlink->tl_uid)
+ if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
@@ -4048,7 +4078,7 @@ struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
int ret;
- uid_t fsuid = current_fsuid();
+ kuid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 8719bbe0dcc3..1cd016217448 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -342,14 +342,14 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
*created |= FILE_CREATED;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- args.uid = (__u64) current_fsuid();
+ args.uid = current_fsuid();
if (inode->i_mode & S_ISGID)
- args.gid = (__u64) inode->i_gid;
+ args.gid = inode->i_gid;
else
- args.gid = (__u64) current_fsgid();
+ args.gid = current_fsgid();
} else {
- args.uid = NO_CHANGE_64;
- args.gid = NO_CHANGE_64;
+ args.uid = INVALID_UID; /* no change */
+ args.gid = INVALID_GID; /* no change */
}
CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid->netfid,
current->tgid);
@@ -588,11 +588,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
.device = device_number,
};
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- args.uid = (__u64) current_fsuid();
- args.gid = (__u64) current_fsgid();
+ args.uid = current_fsuid();
+ args.gid = current_fsgid();
} else {
- args.uid = NO_CHANGE_64;
- args.gid = NO_CHANGE_64;
+ args.uid = INVALID_UID; /* no change */
+ args.gid = INVALID_GID; /* no change */
}
rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
cifs_sb->local_nls,
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8ea6ca50a665..8c0d85577314 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -43,6 +43,7 @@
#include "cifs_fs_sb.h"
#include "fscache.h"
+
static inline int cifs_convert_flags(unsigned int flags)
{
if ((flags & O_ACCMODE) == O_RDONLY)
@@ -72,10 +73,15 @@ static u32 cifs_posix_convert_flags(unsigned int flags)
else if ((flags & O_ACCMODE) == O_RDWR)
posix_flags = SMB_O_RDWR;
- if (flags & O_CREAT)
+ if (flags & O_CREAT) {
posix_flags |= SMB_O_CREAT;
- if (flags & O_EXCL)
- posix_flags |= SMB_O_EXCL;
+ if (flags & O_EXCL)
+ posix_flags |= SMB_O_EXCL;
+ } else if (flags & O_EXCL)
+ cFYI(1, "Application %s pid %d has incorrectly set O_EXCL flag"
+ "but not O_CREAT on file open. Ignoring O_EXCL",
+ current->comm, current->tgid);
+
if (flags & O_TRUNC)
posix_flags |= SMB_O_TRUNC;
/* be safe and imply O_SYNC for O_DSYNC */
@@ -515,8 +521,8 @@ int cifs_open(struct inode *inode, struct file *file)
*/
struct cifs_unix_set_info_args args = {
.mode = inode->i_mode,
- .uid = NO_CHANGE_64,
- .gid = NO_CHANGE_64,
+ .uid = INVALID_UID, /* no change */
+ .gid = INVALID_GID, /* no change */
.ctime = NO_CHANGE_64,
.atime = NO_CHANGE_64,
.mtime = NO_CHANGE_64,
@@ -947,7 +953,7 @@ static int
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
int rc = 0;
- struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
unsigned char saved_type = flock->fl_type;
if ((flock->fl_flags & FL_POSIX) == 0)
@@ -974,7 +980,7 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
- struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+ struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
int rc = 1;
if ((flock->fl_flags & FL_POSIX) == 0)
@@ -1548,7 +1554,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
netfid = cfile->fid.netfid;
- cinode = CIFS_I(file->f_path.dentry->d_inode);
+ cinode = CIFS_I(file_inode(file));
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
@@ -1693,7 +1699,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
are always at the end of the list but since the first entry might
have a close pending, we go through the whole list */
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
- if (fsuid_only && open_file->uid != current_fsuid())
+ if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
if (!open_file->invalidHandle) {
@@ -1746,7 +1752,7 @@ refind_writable:
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (!any_available && open_file->pid != current->tgid)
continue;
- if (fsuid_only && open_file->uid != current_fsuid())
+ if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
@@ -2171,7 +2177,7 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
@@ -2246,7 +2252,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
*/
int cifs_flush(struct file *file, fl_owner_t id)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int rc = 0;
if (file->f_mode & FMODE_WRITE)
@@ -2480,7 +2486,7 @@ ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
ssize_t written;
struct inode *inode;
- inode = iocb->ki_filp->f_path.dentry->d_inode;
+ inode = file_inode(iocb->ki_filp);
/*
* BB - optimize the way when signing is disabled. We can drop this
@@ -2543,7 +2549,7 @@ ssize_t
cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(iocb->ki_filp);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)
@@ -2915,7 +2921,7 @@ ssize_t
cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(iocb->ki_filp);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)
@@ -3063,7 +3069,7 @@ static struct vm_operations_struct cifs_file_vm_ops = {
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
xid = get_xid();
@@ -3356,7 +3362,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
int rc;
/* Is the page cached? */
- rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
+ rc = cifs_readpage_from_fscache(file_inode(file), page);
if (rc == 0)
goto read_complete;
@@ -3371,8 +3377,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
else
cFYI(1, "Bytes read %d", rc);
- file->f_path.dentry->d_inode->i_atime =
- current_fs_time(file->f_path.dentry->d_inode->i_sb);
+ file_inode(file)->i_atime =
+ current_fs_time(file_inode(file)->i_sb);
if (PAGE_CACHE_SIZE > rc)
memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
@@ -3381,7 +3387,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
SetPageUptodate(page);
/* send this page to the cache */
- cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
+ cifs_readpage_to_fscache(file_inode(file), page);
rc = 0;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ed6208ff85a7..83f2606c76d0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -244,15 +244,25 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
break;
}
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
- fattr->cf_uid = cifs_sb->mnt_uid;
- else
- fattr->cf_uid = le64_to_cpu(info->Uid);
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
- fattr->cf_gid = cifs_sb->mnt_gid;
- else
- fattr->cf_gid = le64_to_cpu(info->Gid);
+ fattr->cf_uid = cifs_sb->mnt_uid;
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) {
+ u64 id = le64_to_cpu(info->Uid);
+ if (id < ((uid_t)-1)) {
+ kuid_t uid = make_kuid(&init_user_ns, id);
+ if (uid_valid(uid))
+ fattr->cf_uid = uid;
+ }
+ }
+
+ fattr->cf_gid = cifs_sb->mnt_gid;
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) {
+ u64 id = le64_to_cpu(info->Gid);
+ if (id < ((gid_t)-1)) {
+ kgid_t gid = make_kgid(&init_user_ns, id);
+ if (gid_valid(gid))
+ fattr->cf_gid = gid;
+ }
+ }
fattr->cf_nlink = le64_to_cpu(info->Nlinks);
}
@@ -289,7 +299,7 @@ cifs_get_file_info_unix(struct file *filp)
unsigned int xid;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
@@ -558,7 +568,7 @@ cifs_get_file_info(struct file *filp)
unsigned int xid;
FILE_ALL_INFO find_data;
struct cifs_fattr fattr;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
@@ -806,10 +816,9 @@ static bool
inode_has_hashed_dentries(struct inode *inode)
{
struct dentry *dentry;
- struct hlist_node *p;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
spin_unlock(&inode->i_lock);
return true;
@@ -1245,14 +1254,14 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
.device = 0,
};
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- args.uid = (__u64)current_fsuid();
+ args.uid = current_fsuid();
if (parent->i_mode & S_ISGID)
- args.gid = (__u64)parent->i_gid;
+ args.gid = parent->i_gid;
else
- args.gid = (__u64)current_fsgid();
+ args.gid = current_fsgid();
} else {
- args.uid = NO_CHANGE_64;
- args.gid = NO_CHANGE_64;
+ args.uid = INVALID_UID; /* no change */
+ args.gid = INVALID_GID; /* no change */
}
CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
cifs_sb->local_nls,
@@ -1678,7 +1687,7 @@ cifs_invalidate_mapping(struct inode *inode)
int cifs_revalidate_file_attr(struct file *filp)
{
int rc = 0;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
if (!cifs_inode_needs_reval(inode))
@@ -1735,7 +1744,7 @@ out:
int cifs_revalidate_file(struct file *filp)
{
int rc;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
rc = cifs_revalidate_file_attr(filp);
if (rc)
@@ -2013,12 +2022,12 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
if (attrs->ia_valid & ATTR_UID)
args->uid = attrs->ia_uid;
else
- args->uid = NO_CHANGE_64;
+ args->uid = INVALID_UID; /* no change */
if (attrs->ia_valid & ATTR_GID)
args->gid = attrs->ia_gid;
else
- args->gid = NO_CHANGE_64;
+ args->gid = INVALID_GID; /* no change */
if (attrs->ia_valid & ATTR_ATIME)
args->atime = cifs_UnixTimeToNT(attrs->ia_atime);
@@ -2086,8 +2095,8 @@ static int
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
{
unsigned int xid;
- uid_t uid = NO_CHANGE_32;
- gid_t gid = NO_CHANGE_32;
+ kuid_t uid = INVALID_UID;
+ kgid_t gid = INVALID_GID;
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
@@ -2146,7 +2155,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
#ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- if (uid != NO_CHANGE_32 || gid != NO_CHANGE_32) {
+ if (uid_valid(uid) || gid_valid(gid)) {
rc = id_mode_to_cifs_acl(inode, full_path, NO_CHANGE_64,
uid, gid);
if (rc) {
@@ -2170,7 +2179,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
#ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = id_mode_to_cifs_acl(inode, full_path, mode,
- NO_CHANGE_32, NO_CHANGE_32);
+ INVALID_UID, INVALID_GID);
if (rc) {
cFYI(1, "%s: Setting ACL failed with error: %d",
__func__, rc);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index fd5009d56f9f..6c9f1214cf0b 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -30,7 +30,7 @@
long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
{
- struct inode *inode = filep->f_dentry->d_inode;
+ struct inode *inode = file_inode(filep);
int rc = -ENOTTY; /* strange error - but the precedent */
unsigned int xid;
struct cifs_sb_info *cifs_sb;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 3a00c0d0cead..1b15bf839f37 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -569,7 +569,7 @@ bool
backup_cred(struct cifs_sb_info *cifs_sb)
{
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
- if (cifs_sb->mnt_backupuid == current_fsuid())
+ if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
return true;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index cdd6ff48246b..df40cc5fd13a 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -82,12 +82,10 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
cFYI(1, "%s: for %s", __func__, name->name);
- if (parent->d_op && parent->d_op->d_hash)
- parent->d_op->d_hash(parent, parent->d_inode, name);
- else
- name->hash = full_name_hash(name->name, name->len);
+ dentry = d_hash_and_lookup(parent, name);
+ if (unlikely(IS_ERR(dentry)))
+ return;
- dentry = d_lookup(parent, name);
if (dentry) {
int err;
@@ -505,7 +503,7 @@ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode)
whether we can use the cached search results from the previous search */
static int is_dir_changed(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
if (cifsInfo->time == 0)
@@ -778,7 +776,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
switch ((int) file->f_pos) {
case 0:
if (filldir(direntry, ".", 1, file->f_pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) < 0) {
+ file_inode(file)->i_ino, DT_DIR) < 0) {
cERROR(1, "Filldir for current dir failed");
rc = -ENOMEM;
break;
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 958ae0e0ff8c..1da168c61d35 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -33,7 +33,7 @@ void coda_cache_enter(struct inode *inode, int mask)
spin_lock(&cii->c_lock);
cii->c_cached_epoch = atomic_read(&permission_epoch);
- if (cii->c_uid != current_fsuid()) {
+ if (!uid_eq(cii->c_uid, current_fsuid())) {
cii->c_uid = current_fsuid();
cii->c_cached_perm = mask;
} else
@@ -65,7 +65,7 @@ int coda_cache_check(struct inode *inode, int mask)
spin_lock(&cii->c_lock);
hit = (mask & cii->c_cached_perm) == mask &&
- cii->c_uid == current_fsuid() &&
+ uid_eq(cii->c_uid, current_fsuid()) &&
cii->c_cached_epoch == atomic_read(&permission_epoch);
spin_unlock(&cii->c_lock);
diff --git a/fs/coda/coda_fs_i.h b/fs/coda/coda_fs_i.h
index b24fdfd8a3f0..c64075213218 100644
--- a/fs/coda/coda_fs_i.h
+++ b/fs/coda/coda_fs_i.h
@@ -25,7 +25,7 @@ struct coda_inode_info {
u_short c_flags; /* flags (see below) */
unsigned int c_mapcount; /* nr of times this inode is mapped */
unsigned int c_cached_epoch; /* epoch for cached permissions */
- vuid_t c_uid; /* fsuid for cached permissions */
+ kuid_t c_uid; /* fsuid for cached permissions */
unsigned int c_cached_perm; /* cached access permissions */
spinlock_t c_lock;
struct inode vfs_inode;
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index 854ace712685..2849f41e72a2 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -100,9 +100,9 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
if (attr->va_mode != (u_short) -1)
inode->i_mode = attr->va_mode | inode_type;
if (attr->va_uid != -1)
- inode->i_uid = (uid_t) attr->va_uid;
+ inode->i_uid = make_kuid(&init_user_ns, (uid_t) attr->va_uid);
if (attr->va_gid != -1)
- inode->i_gid = (gid_t) attr->va_gid;
+ inode->i_gid = make_kgid(&init_user_ns, (gid_t) attr->va_gid);
if (attr->va_nlink != -1)
set_nlink(inode, attr->va_nlink);
if (attr->va_size != -1)
@@ -171,10 +171,10 @@ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr)
vattr->va_mode = iattr->ia_mode;
}
if ( valid & ATTR_UID ) {
- vattr->va_uid = (vuid_t) iattr->ia_uid;
+ vattr->va_uid = (vuid_t) from_kuid(&init_user_ns, iattr->ia_uid);
}
if ( valid & ATTR_GID ) {
- vattr->va_gid = (vgid_t) iattr->ia_gid;
+ vattr->va_gid = (vgid_t) from_kgid(&init_user_ns, iattr->ia_gid);
}
if ( valid & ATTR_SIZE ) {
vattr->va_size = iattr->ia_size;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 49fe52d25600..b7d3a05c062c 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -397,7 +397,7 @@ static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
* We can't use vfs_readdir because we have to keep the file
* position in sync between the coda_file and the host_file.
* and as such we need grab the inode mutex. */
- struct inode *host_inode = host_file->f_path.dentry->d_inode;
+ struct inode *host_inode = file_inode(host_file);
mutex_lock(&host_inode->i_mutex);
host_file->f_pos = coda_file->f_pos;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 8edd404e6419..fa4c100bdc7d 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -66,7 +66,7 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
static ssize_t
coda_file_write(struct file *coda_file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct inode *host_inode, *coda_inode = coda_file->f_path.dentry->d_inode;
+ struct inode *host_inode, *coda_inode = file_inode(coda_file);
struct coda_file_info *cfi;
struct file *host_file;
ssize_t ret;
@@ -78,7 +78,7 @@ coda_file_write(struct file *coda_file, const char __user *buf, size_t count, lo
if (!host_file->f_op || !host_file->f_op->write)
return -EINVAL;
- host_inode = host_file->f_path.dentry->d_inode;
+ host_inode = file_inode(host_file);
mutex_lock(&coda_inode->i_mutex);
ret = host_file->f_op->write(host_file, buf, count, ppos);
@@ -106,8 +106,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
if (!host_file->f_op || !host_file->f_op->mmap)
return -ENODEV;
- coda_inode = coda_file->f_path.dentry->d_inode;
- host_inode = host_file->f_path.dentry->d_inode;
+ coda_inode = file_inode(coda_file);
+ host_inode = file_inode(host_file);
cii = ITOC(coda_inode);
spin_lock(&cii->c_lock);
@@ -178,7 +178,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
coda_flags, coda_file->f_cred->fsuid);
- host_inode = cfi->cfi_container->f_path.dentry->d_inode;
+ host_inode = file_inode(cfi->cfi_container);
cii = ITOC(coda_inode);
/* did we mmap this file? */
@@ -202,7 +202,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
{
struct file *host_file;
- struct inode *coda_inode = coda_file->f_path.dentry->d_inode;
+ struct inode *coda_inode = file_inode(coda_file);
struct coda_file_info *cfi;
int err;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index be2aa4909487..dada9d0abede 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -20,6 +20,7 @@
#include <linux/file.h>
#include <linux/vfs.h>
#include <linux/slab.h>
+#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
@@ -48,7 +49,7 @@ static struct inode *coda_alloc_inode(struct super_block *sb)
return NULL;
memset(&ei->c_fid, 0, sizeof(struct CodaFid));
ei->c_flags = 0;
- ei->c_uid = 0;
+ ei->c_uid = GLOBAL_ROOT_UID;
ei->c_cached_perm = 0;
spin_lock_init(&ei->c_lock);
return &ei->vfs_inode;
@@ -129,7 +130,7 @@ static int get_device_index(struct coda_mount_data *data)
f = fdget(data->fd);
if (!f.file)
goto Ebadf;
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
if (!S_ISCHR(inode->i_mode) || imajor(inode) != CODA_PSDEV_MAJOR) {
fdput(f);
goto Ebadf;
@@ -157,6 +158,9 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
int error;
int idx;
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
idx = get_device_index((struct coda_mount_data *) data);
/* Ignore errors in data, for backward compatibility */
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index ee0981f1375b..3f5de96bbb58 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -52,7 +52,7 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
struct path path;
int error;
struct PioctlData data;
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct inode *target_inode = NULL;
struct coda_inode_info *cnp;
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 761d5b31b18d..ebc2bae6c289 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -37,6 +37,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/pid_namespace.h>
#include <asm/io.h>
#include <asm/poll.h>
#include <asm/uaccess.h>
@@ -266,6 +267,12 @@ static int coda_psdev_open(struct inode * inode, struct file * file)
struct venus_comm *vcp;
int idx, err;
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ if (current_user_ns() != &init_user_ns)
+ return -EINVAL;
+
idx = iminor(inode);
if (idx < 0 || idx >= MAX_CODADEVS)
return -ENODEV;
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 0c68fd31fbf2..3a731976dc5e 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -50,9 +50,9 @@ static void *alloc_upcall(int opcode, int size)
return ERR_PTR(-ENOMEM);
inp->ih.opcode = opcode;
- inp->ih.pid = current->pid;
- inp->ih.pgid = task_pgrp_nr(current);
- inp->ih.uid = current_fsuid();
+ inp->ih.pid = task_pid_nr_ns(current, &init_pid_ns);
+ inp->ih.pgid = task_pgrp_nr_ns(current, &init_pid_ns);
+ inp->ih.uid = from_kuid(&init_user_ns, current_fsuid());
return (void*)inp;
}
@@ -157,7 +157,7 @@ int venus_lookup(struct super_block *sb, struct CodaFid *fid,
}
int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
- vuid_t uid)
+ kuid_t uid)
{
union inputArgs *inp;
union outputArgs *outp;
@@ -166,7 +166,7 @@ int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
insize = SIZE(release);
UPARG(CODA_CLOSE);
- inp->ih.uid = uid;
+ inp->ih.uid = from_kuid(&init_user_ns, uid);
inp->coda_close.VFid = *fid;
inp->coda_close.flags = flags;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index e2f57a007029..3ced75f765ca 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1582,7 +1582,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
case FIBMAP:
case FIGETBSZ:
case FIONREAD:
- if (S_ISREG(f.file->f_path.dentry->d_inode->i_mode))
+ if (S_ISREG(file_inode(f.file)->i_mode))
break;
/*FALL THROUGH*/
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index e9dcfa3c208c..7aabc6ad4e9b 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1626,7 +1626,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
if (offset >= 0)
break;
default:
- mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(file)->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
diff --git a/fs/coredump.c b/fs/coredump.c
index 177493272a61..c6479658d487 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -411,7 +411,7 @@ static void wait_for_dump_helpers(struct file *file)
{
struct pipe_inode_info *pipe;
- pipe = file->f_path.dentry->d_inode->i_pipe;
+ pipe = file_inode(file)->i_pipe;
pipe_lock(pipe);
pipe->readers++;
@@ -501,7 +501,7 @@ void do_coredump(siginfo_t *siginfo)
* so we dump it as root in mode 2, and only into a controlled
* environment (pipe handler or fully qualified path).
*/
- if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
+ if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
/* Setuid core dump mode */
flag = O_EXCL; /* Stop rewrite attacks */
cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
@@ -600,7 +600,7 @@ void do_coredump(siginfo_t *siginfo)
if (IS_ERR(cprm.file))
goto fail_unlock;
- inode = cprm.file->f_path.dentry->d_inode;
+ inode = file_inode(cprm.file);
if (inode->i_nlink > 1)
goto close_fail;
if (d_unhashed(cprm.file->f_path.dentry))
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index c6c3f91ecf06..3ceb9ec976e1 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -351,7 +351,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
*/
static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
char *buf;
unsigned int offset;
diff --git a/fs/dcache.c b/fs/dcache.c
index 19153a0a810c..fbfae008ba44 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -675,11 +675,10 @@ EXPORT_SYMBOL(dget_parent);
static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
{
struct dentry *alias, *discon_alias;
- struct hlist_node *p;
again:
discon_alias = NULL;
- hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
spin_lock(&alias->d_lock);
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) &&
@@ -730,10 +729,9 @@ EXPORT_SYMBOL(d_find_alias);
void d_prune_aliases(struct inode *inode)
{
struct dentry *dentry;
- struct hlist_node *p;
restart:
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (!dentry->d_count) {
__dget_dlock(dentry);
@@ -1358,6 +1356,7 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
DCACHE_OP_COMPARE |
DCACHE_OP_REVALIDATE |
+ DCACHE_OP_WEAK_REVALIDATE |
DCACHE_OP_DELETE ));
dentry->d_op = op;
if (!op)
@@ -1368,6 +1367,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
dentry->d_flags |= DCACHE_OP_COMPARE;
if (op->d_revalidate)
dentry->d_flags |= DCACHE_OP_REVALIDATE;
+ if (op->d_weak_revalidate)
+ dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
if (op->d_delete)
dentry->d_flags |= DCACHE_OP_DELETE;
if (op->d_prune)
@@ -1440,14 +1441,13 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
int len = entry->d_name.len;
const char *name = entry->d_name.name;
unsigned int hash = entry->d_name.hash;
- struct hlist_node *p;
if (!inode) {
__d_instantiate(entry, NULL);
return NULL;
}
- hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
/*
* Don't need alias->d_lock here, because aliases with
* d_parent == entry->d_parent are not subject to name or
@@ -1672,7 +1672,6 @@ EXPORT_SYMBOL(d_splice_alias);
struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
struct qstr *name)
{
- int error;
struct dentry *found;
struct dentry *new;
@@ -1681,10 +1680,12 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
* if not go ahead and create it now.
*/
found = d_hash_and_lookup(dentry->d_parent, name);
+ if (unlikely(IS_ERR(found)))
+ goto err_out;
if (!found) {
new = d_alloc(dentry->d_parent, name);
if (!new) {
- error = -ENOMEM;
+ found = ERR_PTR(-ENOMEM);
goto err_out;
}
@@ -1725,7 +1726,7 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
err_out:
iput(inode);
- return ERR_PTR(error);
+ return found;
}
EXPORT_SYMBOL(d_add_ci);
@@ -1889,7 +1890,7 @@ seqretry:
* dentry is returned. The caller must use dput to free the entry when it has
* finished using it. %NULL is returned if the dentry does not exist.
*/
-struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
+struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
{
struct dentry *dentry;
unsigned seq;
@@ -1919,7 +1920,7 @@ EXPORT_SYMBOL(d_lookup);
*
* __d_lookup callers must be commented.
*/
-struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
+struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
{
unsigned int len = name->len;
unsigned int hash = name->hash;
@@ -1997,12 +1998,10 @@ next:
* @dir: Directory to search in
* @name: qstr of name we wish to find
*
- * On hash failure or on lookup failure NULL is returned.
+ * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
*/
struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
{
- struct dentry *dentry = NULL;
-
/*
* Check for a fs-specific hash function. Note that we must
* calculate the standard hash first, as the d_op->d_hash()
@@ -2010,13 +2009,13 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
*/
name->hash = full_name_hash(name->name, name->len);
if (dir->d_flags & DCACHE_OP_HASH) {
- if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
- goto out;
+ int err = dir->d_op->d_hash(dir, dir->d_inode, name);
+ if (unlikely(err < 0))
+ return ERR_PTR(err);
}
- dentry = d_lookup(dir, name);
-out:
- return dentry;
+ return d_lookup(dir, name);
}
+EXPORT_SYMBOL(d_hash_and_lookup);
/**
* d_validate - verify dentry provided from insecure source (deprecated)
@@ -2394,7 +2393,7 @@ out_err:
*/
static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
{
- struct dentry *dparent, *aparent;
+ struct dentry *dparent;
dentry_lock_for_move(anon, dentry);
@@ -2402,24 +2401,15 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
write_seqcount_begin(&anon->d_seq);
dparent = dentry->d_parent;
- aparent = anon->d_parent;
switch_names(dentry, anon);
swap(dentry->d_name.hash, anon->d_name.hash);
- dentry->d_parent = (aparent == anon) ? dentry : aparent;
- list_del(&dentry->d_u.d_child);
- if (!IS_ROOT(dentry))
- list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
- else
- INIT_LIST_HEAD(&dentry->d_u.d_child);
-
- anon->d_parent = (dparent == dentry) ? anon : dparent;
+ dentry->d_parent = dentry;
+ list_del_init(&dentry->d_u.d_child);
+ anon->d_parent = dparent;
list_del(&anon->d_u.d_child);
- if (!IS_ROOT(anon))
- list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
- else
- INIT_LIST_HEAD(&anon->d_u.d_child);
+ list_add(&anon->d_u.d_child, &dparent->d_subdirs);
write_seqcount_end(&dentry->d_seq);
write_seqcount_end(&anon->d_seq);
@@ -2722,37 +2712,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
}
EXPORT_SYMBOL(d_path);
-/**
- * d_path_with_unreachable - return the path of a dentry
- * @path: path to report
- * @buf: buffer to return value in
- * @buflen: buffer length
- *
- * The difference from d_path() is that this prepends "(unreachable)"
- * to paths which are unreachable from the current process' root.
- */
-char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
-{
- char *res = buf + buflen;
- struct path root;
- int error;
-
- if (path->dentry->d_op && path->dentry->d_op->d_dname)
- return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
-
- get_fs_root(current->fs, &root);
- write_seqlock(&rename_lock);
- error = path_with_deleted(path, &root, &res, &buflen);
- if (error > 0)
- error = prepend_unreachable(&res, &buflen);
- write_sequnlock(&rename_lock);
- path_put(&root);
- if (error)
- res = ERR_PTR(error);
-
- return res;
-}
-
/*
* Helper function for dentry_operations.d_dname() members
*/
@@ -3035,7 +2994,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
ino_t ino = 0;
dentry = d_hash_and_lookup(dir, name);
- if (dentry) {
+ if (!IS_ERR_OR_NULL(dentry)) {
if (dentry->d_inode)
ino = dentry->d_inode->i_ino;
dput(dentry);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 472e6befc54d..073d30b9d1ac 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -243,6 +243,13 @@ static int mknod_ptmx(struct super_block *sb)
struct dentry *root = sb->s_root;
struct pts_fs_info *fsi = DEVPTS_SB(sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
+ kuid_t root_uid;
+ kgid_t root_gid;
+
+ root_uid = make_kuid(current_user_ns(), 0);
+ root_gid = make_kgid(current_user_ns(), 0);
+ if (!uid_valid(root_uid) || !gid_valid(root_gid))
+ return -EINVAL;
mutex_lock(&root->d_inode->i_mutex);
@@ -273,6 +280,8 @@ static int mknod_ptmx(struct super_block *sb)
mode = S_IFCHR|opts->ptmxmode;
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
+ inode->i_uid = root_uid;
+ inode->i_gid = root_gid;
d_add(dentry, inode);
@@ -438,6 +447,12 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type,
if (error)
return ERR_PTR(error);
+ /* Require newinstance for all user namespace mounts to ensure
+ * the mount options are not changed.
+ */
+ if ((current_user_ns() != &init_user_ns) && !opts.newinstance)
+ return ERR_PTR(-EINVAL);
+
if (opts.newinstance)
s = sget(fs_type, NULL, set_anon_super, flags, NULL);
else
@@ -491,6 +506,9 @@ static struct file_system_type devpts_fs_type = {
.name = "devpts",
.mount = devpts_mount,
.kill_sb = devpts_kill_sb,
+#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
+ .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
+#endif
};
/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index cf5b44b10c67..f853263cf74f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -261,9 +261,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
dio->end_io(dio->iocb, offset, transferred,
dio->private, ret, is_async);
} else {
+ inode_dio_done(dio->inode);
if (is_async)
aio_complete(dio->iocb, ret, 0);
- inode_dio_done(dio->inode);
}
return ret;
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index a0387dd8b1f0..7d58d5b112b5 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -158,7 +158,7 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
unsigned int x;
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
x = simple_strtoul(buf, NULL, 0);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index f7501651762d..1b1146670c4b 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1183,7 +1183,7 @@ static void detach_lkb(struct dlm_lkb *lkb)
static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb;
- int rv, id;
+ int rv;
lkb = dlm_allocate_lkb(ls);
if (!lkb)
@@ -1199,19 +1199,13 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
mutex_init(&lkb->lkb_cb_mutex);
INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
- retry:
- rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
- if (!rv)
- return -ENOMEM;
-
+ idr_preload(GFP_NOFS);
spin_lock(&ls->ls_lkbidr_spin);
- rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id);
- if (!rv)
- lkb->lkb_id = id;
+ rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
+ if (rv >= 0)
+ lkb->lkb_id = rv;
spin_unlock(&ls->ls_lkbidr_spin);
-
- if (rv == -EAGAIN)
- goto retry;
+ idr_preload_end();
if (rv < 0) {
log_error(ls, "create_lkb idr error %d", rv);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 2e99fb0c9737..3ca79d3253b9 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -796,7 +796,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
*/
idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
- idr_remove_all(&ls->ls_lkbidr);
idr_destroy(&ls->ls_lkbidr);
/*
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index dd87a31bcc21..4f5ad246582f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -177,12 +177,11 @@ static inline int nodeid_hash(int nodeid)
static struct connection *__find_con(int nodeid)
{
int r;
- struct hlist_node *h;
struct connection *con;
r = nodeid_hash(nodeid);
- hlist_for_each_entry(con, h, &connection_hash[r], list) {
+ hlist_for_each_entry(con, &connection_hash[r], list) {
if (con->nodeid == nodeid)
return con;
}
@@ -232,13 +231,12 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
static void foreach_conn(void (*conn_func)(struct connection *c))
{
int i;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
struct connection *con;
for (i = 0; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
+ hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
conn_func(con);
- }
}
}
@@ -257,13 +255,12 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
static struct connection *assoc2con(int assoc_id)
{
int i;
- struct hlist_node *h;
struct connection *con;
mutex_lock(&connections_lock);
for (i = 0 ; i < CONN_HASH_SIZE; i++) {
- hlist_for_each_entry(con, h, &connection_hash[i], list) {
+ hlist_for_each_entry(con, &connection_hash[i], list) {
if (con->sctp_assoc == assoc_id) {
mutex_unlock(&connections_lock);
return con;
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index aedea28a86a1..a6bc63f6e31b 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -305,27 +305,26 @@ static int recover_idr_empty(struct dlm_ls *ls)
static int recover_idr_add(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- int rv, id;
-
- rv = idr_pre_get(&ls->ls_recover_idr, GFP_NOFS);
- if (!rv)
- return -ENOMEM;
+ int rv;
+ idr_preload(GFP_NOFS);
spin_lock(&ls->ls_recover_idr_lock);
if (r->res_id) {
- spin_unlock(&ls->ls_recover_idr_lock);
- return -1;
- }
- rv = idr_get_new_above(&ls->ls_recover_idr, r, 1, &id);
- if (rv) {
- spin_unlock(&ls->ls_recover_idr_lock);
- return rv;
+ rv = -1;
+ goto out_unlock;
}
- r->res_id = id;
+ rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
+ if (rv < 0)
+ goto out_unlock;
+
+ r->res_id = rv;
ls->ls_recover_list_count++;
dlm_hold_rsb(r);
+ rv = 0;
+out_unlock:
spin_unlock(&ls->ls_recover_idr_lock);
- return 0;
+ idr_preload_end();
+ return rv;
}
static void recover_idr_del(struct dlm_rsb *r)
@@ -351,24 +350,21 @@ static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
return r;
}
-static int recover_idr_clear_rsb(int id, void *p, void *data)
+static void recover_idr_clear(struct dlm_ls *ls)
{
- struct dlm_ls *ls = data;
- struct dlm_rsb *r = p;
+ struct dlm_rsb *r;
+ int id;
- r->res_id = 0;
- r->res_recover_locks_count = 0;
- ls->ls_recover_list_count--;
+ spin_lock(&ls->ls_recover_idr_lock);
- dlm_put_rsb(r);
- return 0;
-}
+ idr_for_each_entry(&ls->ls_recover_idr, r, id) {
+ idr_remove(&ls->ls_recover_idr, id);
+ r->res_id = 0;
+ r->res_recover_locks_count = 0;
+ ls->ls_recover_list_count--;
-static void recover_idr_clear(struct dlm_ls *ls)
-{
- spin_lock(&ls->ls_recover_idr_lock);
- idr_for_each(&ls->ls_recover_idr, recover_idr_clear_rsb, ls);
- idr_remove_all(&ls->ls_recover_idr);
+ dlm_put_rsb(r);
+ }
if (ls->ls_recover_list_count != 0) {
log_error(ls, "warning: recover_list_count %d",
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index cfb4b9fed520..7e2c6f5d7985 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -509,6 +509,12 @@ ecryptfs_dentry_to_lower_mnt(struct dentry *dentry)
return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.mnt;
}
+static inline struct path *
+ecryptfs_dentry_to_lower_path(struct dentry *dentry)
+{
+ return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
+}
+
static inline void
ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
{
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index d45ba4568128..53acc9d0c138 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -118,7 +118,7 @@ static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
lower_file = ecryptfs_file_to_lower(file);
lower_file->f_pos = file->f_pos;
- inode = file->f_path.dentry->d_inode;
+ inode = file_inode(file);
memset(&buf, 0, sizeof(buf));
buf.dirent = dirent;
buf.dentry = file->f_path.dentry;
@@ -133,7 +133,7 @@ static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
goto out;
if (rc >= 0)
fsstack_copy_attr_atime(inode,
- lower_file->f_path.dentry->d_inode);
+ file_inode(lower_file));
out:
return rc;
}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index cc7709e7c508..e0f07fb6d56b 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1027,8 +1027,7 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat lower_stat;
int rc;
- rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
- ecryptfs_dentry_to_lower(dentry), &lower_stat);
+ rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat);
if (!rc) {
fsstack_copy_attr_all(dentry->d_inode,
ecryptfs_inode_to_lower(dentry->d_inode));
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 5fa2471796c2..8d7a577ae497 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -115,10 +115,9 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
*/
int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
{
- struct hlist_node *elem;
int rc;
- hlist_for_each_entry(*daemon, elem,
+ hlist_for_each_entry(*daemon,
&ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
euid_chain) {
if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
@@ -445,7 +444,6 @@ void ecryptfs_release_messaging(void)
mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
}
if (ecryptfs_daemon_hash) {
- struct hlist_node *elem;
struct ecryptfs_daemon *daemon;
int i;
@@ -453,7 +451,7 @@ void ecryptfs_release_messaging(void)
for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
int rc;
- hlist_for_each_entry(daemon, elem,
+ hlist_for_each_entry(daemon,
&ecryptfs_daemon_hash[i],
euid_chain) {
rc = ecryptfs_exorcise_daemon(daemon);
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index b2a34a192f4f..6a160539cd23 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -40,16 +40,12 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size)
{
struct file *lower_file;
- mm_segment_t fs_save;
ssize_t rc;
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
- fs_save = get_fs();
- set_fs(get_ds());
- rc = vfs_write(lower_file, data, size, &offset);
- set_fs(fs_save);
+ rc = kernel_write(lower_file, data, size, offset);
mark_inode_dirty_sync(ecryptfs_inode);
return rc;
}
diff --git a/fs/efs/dir.c b/fs/efs/dir.c
index 7ee6f7e3a608..055a9e9ca747 100644
--- a/fs/efs/dir.c
+++ b/fs/efs/dir.c
@@ -20,7 +20,7 @@ const struct inode_operations efs_dir_inode_operations = {
};
static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct buffer_head *bh;
struct efs_dir *dirblock;
diff --git a/fs/exec.c b/fs/exec.c
index 20df02c1cc70..a96a4885bbbf 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -123,7 +123,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
goto out;
error = -EINVAL;
- if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ if (!S_ISREG(file_inode(file)->i_mode))
goto exit;
error = -EACCES;
@@ -355,7 +355,7 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len)
* flags, permissions, and offset, so we use temporary values. We'll update
* them later in setup_arg_pages().
*/
-int bprm_mm_init(struct linux_binprm *bprm)
+static int bprm_mm_init(struct linux_binprm *bprm)
{
int err;
struct mm_struct *mm = NULL;
@@ -764,7 +764,7 @@ struct file *open_exec(const char *name)
goto out;
err = -EACCES;
- if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ if (!S_ISREG(file_inode(file)->i_mode))
goto exit;
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
@@ -1098,7 +1098,7 @@ EXPORT_SYMBOL(flush_old_exec);
void would_dump(struct linux_binprm *bprm, struct file *file)
{
- if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0)
+ if (inode_permission(file_inode(file), MAY_READ) < 0)
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
}
EXPORT_SYMBOL(would_dump);
@@ -1111,7 +1111,7 @@ void setup_new_exec(struct linux_binprm * bprm)
current->sas_ss_sp = current->sas_ss_size = 0;
if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
- set_dumpable(current->mm, SUID_DUMPABLE_ENABLED);
+ set_dumpable(current->mm, SUID_DUMP_USER);
else
set_dumpable(current->mm, suid_dumpable);
@@ -1270,7 +1270,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
int prepare_binprm(struct linux_binprm *bprm)
{
umode_t mode;
- struct inode * inode = bprm->file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(bprm->file);
int retval;
mode = inode->i_mode;
@@ -1639,17 +1639,17 @@ EXPORT_SYMBOL(set_binfmt);
void set_dumpable(struct mm_struct *mm, int value)
{
switch (value) {
- case SUID_DUMPABLE_DISABLED:
+ case SUID_DUMP_DISABLE:
clear_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
- case SUID_DUMPABLE_ENABLED:
+ case SUID_DUMP_USER:
set_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
- case SUID_DUMPABLE_SAFE:
+ case SUID_DUMP_ROOT:
set_bit(MMF_DUMP_SECURELY, &mm->flags);
smp_wmb();
set_bit(MMF_DUMPABLE, &mm->flags);
@@ -1662,7 +1662,7 @@ int __get_dumpable(unsigned long mm_flags)
int ret;
ret = mm_flags & MMF_DUMPABLE_MASK;
- return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
+ return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
}
int get_dumpable(struct mm_struct *mm)
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index c61e62ac231c..46375896cfc0 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -242,7 +242,7 @@ static int
exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
loff_t pos = filp->f_pos;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
unsigned long npages = dir_pages(inode);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 5df4bb4aab14..262fc9940982 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -44,14 +44,13 @@ find_acceptable_alias(struct dentry *result,
{
struct dentry *dentry, *toput = NULL;
struct inode *inode;
- struct hlist_node *p;
if (acceptable(context, result))
return result;
inode = result->d_inode;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
dget(dentry);
spin_unlock(&inode->i_lock);
if (toput)
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 2616d0ea5c5c..9f9992b37924 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -159,15 +159,6 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
return bh;
}
-static void release_blocks(struct super_block *sb, int count)
-{
- if (count) {
- struct ext2_sb_info *sbi = EXT2_SB(sb);
-
- percpu_counter_add(&sbi->s_freeblocks_counter, count);
- }
-}
-
static void group_adjust_blocks(struct super_block *sb, int group_no,
struct ext2_group_desc *desc, struct buffer_head *bh, int count)
{
@@ -568,8 +559,11 @@ do_more:
}
error_return:
brelse(bitmap_bh);
- release_blocks(sb, freed);
- dquot_free_block_nodirty(inode, freed);
+ if (freed) {
+ percpu_counter_add(&sbi->s_freeblocks_counter, freed);
+ dquot_free_block_nodirty(inode, freed);
+ mark_inode_dirty(inode);
+ }
}
/**
@@ -1239,10 +1233,6 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
*errp = -ENOSPC;
sb = inode->i_sb;
- if (!sb) {
- printk("ext2_new_blocks: nonexistent device");
- return 0;
- }
/*
* Check quota for allocation of this block.
@@ -1416,9 +1406,11 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
- dquot_free_block_nodirty(inode, *count-num);
- mark_inode_dirty(inode);
- *count = num;
+ if (num < *count) {
+ dquot_free_block_nodirty(inode, *count-num);
+ mark_inode_dirty(inode);
+ *count = num;
+ }
return ret_block;
io_error:
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 0f4f5c929257..4237722bfd27 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -290,7 +290,7 @@ static int
ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
{
loff_t pos = filp->f_pos;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 6363ac66fafa..c3881e56662e 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -495,6 +495,10 @@ static int ext2_alloc_branch(struct inode *inode,
* parent to disk.
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ if (unlikely(!bh)) {
+ err = -ENOMEM;
+ goto failed;
+ }
branch[n].bh = bh;
lock_buffer(bh);
memset(bh->b_data, 0, blocksize);
@@ -523,6 +527,14 @@ static int ext2_alloc_branch(struct inode *inode,
}
*blks = num;
return err;
+
+failed:
+ for (i = 1; i < n; i++)
+ bforget(branch[i].bh);
+ for (i = 0; i < indirect_blks; i++)
+ ext2_free_blocks(inode, new_blocks[i], 1);
+ ext2_free_blocks(inode, new_blocks[i], num);
+ return err;
}
/**
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 2de655f5d625..5d46c09863f0 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -19,7 +19,7 @@
long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ext2_inode_info *ei = EXT2_I(inode);
unsigned int flags;
unsigned short rsv_window_size;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index fa04d023177e..7f68c8114026 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1500,7 +1500,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
bh = sb_bread(sb, tmp_bh.b_blocknr);
else
bh = sb_getblk(sb, tmp_bh.b_blocknr);
- if (!bh) {
+ if (unlikely(!bh)) {
err = -EIO;
goto out;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index b6754dbbce3c..2d7557db3ae8 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -662,10 +662,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
ea_idebug(inode, "creating block %d", block);
new_bh = sb_getblk(sb, block);
- if (!new_bh) {
+ if (unlikely(!new_bh)) {
ext2_free_blocks(inode, block, 1);
mark_inode_dirty(inode);
- error = -EIO;
+ error = -ENOMEM;
goto cleanup;
}
lock_buffer(new_bh);
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index dd91264ba94f..87eccbbca255 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -99,7 +99,7 @@ static int ext3_readdir(struct file * filp,
int i, stored;
struct ext3_dir_entry_2 *de;
int err;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
int ret = 0;
int dir_has_error = 0;
@@ -114,7 +114,7 @@ static int ext3_readdir(struct file * filp,
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
- EXT3_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL;
+ EXT3_I(file_inode(filp))->i_flags &= ~EXT3_INDEX_FL;
}
stored = 0;
offset = filp->f_pos & (sb->s_blocksize - 1);
@@ -457,7 +457,7 @@ static int call_filldir(struct file * filp, void * dirent,
{
struct dir_private_info *info = filp->private_data;
loff_t curr_pos;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block * sb;
int error;
@@ -487,7 +487,7 @@ static int ext3_dx_readdir(struct file * filp,
void * dirent, filldir_t filldir)
{
struct dir_private_info *info = filp->private_data;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct fname *fname;
int ret;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index b176d4253544..d512c4bc4ad7 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -676,6 +676,10 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
* parent to disk.
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ if (unlikely(!bh)) {
+ err = -ENOMEM;
+ goto failed;
+ }
branch[n].bh = bh;
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
@@ -717,7 +721,7 @@ failed:
BUFFER_TRACE(branch[i].bh, "call journal_forget");
ext3_journal_forget(handle, branch[i].bh);
}
- for (i = 0; i <indirect_blks; i++)
+ for (i = 0; i < indirect_blks; i++)
ext3_free_blocks(handle, inode, new_blocks[i], 1);
ext3_free_blocks(handle, inode, new_blocks[i], num);
@@ -1078,8 +1082,8 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
if (!err && buffer_mapped(&dummy)) {
struct buffer_head *bh;
bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
- if (!bh) {
- *errp = -EIO;
+ if (unlikely(!bh)) {
+ *errp = -ENOMEM;
goto err;
}
if (buffer_new(&dummy)) {
@@ -2729,12 +2733,12 @@ static int __ext3_get_inode_loc(struct inode *inode,
return -EIO;
bh = sb_getblk(inode->i_sb, block);
- if (!bh) {
+ if (unlikely(!bh)) {
ext3_error (inode->i_sb, "ext3_get_inode_loc",
"unable to read inode block - "
"inode=%lu, block="E3FSBLK,
inode->i_ino, block);
- return -EIO;
+ return -ENOMEM;
}
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
@@ -2783,7 +2787,7 @@ static int __ext3_get_inode_loc(struct inode *inode,
bitmap_bh = sb_getblk(inode->i_sb,
le32_to_cpu(desc->bg_inode_bitmap));
- if (!bitmap_bh)
+ if (unlikely(!bitmap_bh))
goto make_io;
/*
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 677a5c27dc69..4d96e9a64532 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -14,7 +14,7 @@
long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ext3_inode_info *ei = EXT3_I(inode);
unsigned int flags;
unsigned short rsv_window_size;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 890b8947c546..692de13e3596 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -36,7 +36,6 @@
#define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static struct buffer_head *ext3_append(handle_t *handle,
struct inode *inode,
@@ -624,7 +623,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
start_minor_hash));
- dir = dir_file->f_path.dentry->d_inode;
+ dir = file_inode(dir_file);
if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
@@ -638,7 +637,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
}
hinfo.hash = start_hash;
hinfo.minor_hash = 0;
- frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err);
+ frame = dx_probe(NULL, file_inode(dir_file), &hinfo, frames, &err);
if (!frame)
return err;
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 0f814f3450de..27105655502c 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -116,8 +116,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
int err;
bh = sb_getblk(sb, blk);
- if (!bh)
- return ERR_PTR(-EIO);
+ if (unlikely(!bh))
+ return ERR_PTR(-ENOMEM);
if ((err = ext3_journal_get_write_access(handle, bh))) {
brelse(bh);
bh = ERR_PTR(err);
@@ -234,8 +234,8 @@ static int setup_new_group_blocks(struct super_block *sb,
goto exit_bh;
gdb = sb_getblk(sb, block);
- if (!gdb) {
- err = -EIO;
+ if (unlikely(!gdb)) {
+ err = -ENOMEM;
goto exit_bh;
}
if ((err = ext3_journal_get_write_access(handle, gdb))) {
@@ -722,8 +722,8 @@ static void update_backups(struct super_block *sb,
break;
bh = sb_getblk(sb, group * bpg + blk_off);
- if (!bh) {
- err = -EIO;
+ if (unlikely(!bh)) {
+ err = -ENOMEM;
break;
}
ext3_debug("update metadata backup %#04lx\n",
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4ba2683c1d44..5546ca225ffe 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -916,21 +916,24 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"Not enough memory for storing quotafile name");
return 0;
}
- if (sbi->s_qf_names[qtype] &&
- strcmp(sbi->s_qf_names[qtype], qname)) {
- ext3_msg(sb, KERN_ERR,
- "%s quota file already specified", QTYPE2NAME(qtype));
+ if (sbi->s_qf_names[qtype]) {
+ int same = !strcmp(sbi->s_qf_names[qtype], qname);
+
kfree(qname);
- return 0;
+ if (!same) {
+ ext3_msg(sb, KERN_ERR,
+ "%s quota file already specified",
+ QTYPE2NAME(qtype));
+ }
+ return same;
}
- sbi->s_qf_names[qtype] = qname;
- if (strchr(sbi->s_qf_names[qtype], '/')) {
+ if (strchr(qname, '/')) {
ext3_msg(sb, KERN_ERR,
"quotafile must be on filesystem root");
- kfree(sbi->s_qf_names[qtype]);
- sbi->s_qf_names[qtype] = NULL;
+ kfree(qname);
return 0;
}
+ sbi->s_qf_names[qtype] = qname;
set_opt(sbi->s_mount_opt, QUOTA);
return 1;
}
@@ -945,11 +948,10 @@ static int clear_qf_name(struct super_block *sb, int qtype) {
" when quota turned on");
return 0;
}
- /*
- * The space will be released later when all options are confirmed
- * to be correct
- */
- sbi->s_qf_names[qtype] = NULL;
+ if (sbi->s_qf_names[qtype]) {
+ kfree(sbi->s_qf_names[qtype]);
+ sbi->s_qf_names[qtype] = NULL;
+ }
return 1;
}
#endif
@@ -2606,7 +2608,18 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
#ifdef CONFIG_QUOTA
old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++)
- old_opts.s_qf_names[i] = sbi->s_qf_names[i];
+ if (sbi->s_qf_names[i]) {
+ old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
+ GFP_KERNEL);
+ if (!old_opts.s_qf_names[i]) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ kfree(old_opts.s_qf_names[j]);
+ return -ENOMEM;
+ }
+ } else
+ old_opts.s_qf_names[i] = NULL;
#endif
/*
@@ -2699,9 +2712,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
- if (old_opts.s_qf_names[i] &&
- old_opts.s_qf_names[i] != sbi->s_qf_names[i])
- kfree(old_opts.s_qf_names[i]);
+ kfree(old_opts.s_qf_names[i]);
#endif
if (enable_quota)
dquot_resume(sb, -1);
@@ -2715,9 +2726,7 @@ restore_opts:
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
- if (sbi->s_qf_names[i] &&
- old_opts.s_qf_names[i] != sbi->s_qf_names[i])
- kfree(sbi->s_qf_names[i]);
+ kfree(sbi->s_qf_names[i]);
sbi->s_qf_names[i] = old_opts.s_qf_names[i];
}
#endif
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index d22ebb7a4f55..b1fc96383e08 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -813,10 +813,10 @@ inserted:
ea_idebug(inode, "creating block %d", block);
new_bh = sb_getblk(sb, block);
- if (!new_bh) {
+ if (unlikely(!new_bh)) {
getblk_failed:
ext3_free_blocks(handle, inode, block, 1);
- error = -EIO;
+ error = -ENOMEM;
goto cleanup;
}
lock_buffer(new_bh);
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index e6e0d988439b..39a54a0e9fe4 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -324,8 +324,8 @@ ext4_acl_chmod(struct inode *inode)
if (error)
return error;
retry:
- handle = ext4_journal_start(inode,
- EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+ handle = ext4_journal_start(inode, EXT4_HT_XATTR,
+ ext4_jbd2_credits_xattr(inode));
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
ext4_std_error(inode->i_sb, error);
@@ -422,7 +422,8 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
acl = NULL;
retry:
- handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+ handle = ext4_journal_start(inode, EXT4_HT_XATTR,
+ ext4_jbd2_credits_xattr(inode));
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto release_and_out;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index cf1821784a16..92e68b33fffd 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -358,7 +358,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
}
/**
- * ext4_read_block_bitmap()
+ * ext4_read_block_bitmap_nowait()
* @sb: super block
* @block_group: given block group
*
@@ -457,6 +457,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
struct buffer_head *bh;
bh = ext4_read_block_bitmap_nowait(sb, block_group);
+ if (!bh)
+ return NULL;
if (ext4_wait_block_bitmap(sb, block_group, bh)) {
put_bh(bh);
return NULL;
@@ -482,11 +484,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
free_clusters = percpu_counter_read_positive(fcc);
dirty_clusters = percpu_counter_read_positive(dcc);
- root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
+
+ /*
+ * r_blocks_count should always be multiple of the cluster ratio so
+ * we are safe to do a plane bit shift only.
+ */
+ root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
EXT4_FREECLUSTERS_WATERMARK) {
- free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
+ free_clusters = percpu_counter_sum_positive(fcc);
dirty_clusters = percpu_counter_sum_positive(dcc);
}
/* Check whether we have space after accounting for current
@@ -628,7 +635,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
brelse(bitmap_bh);
printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
", computed = %llu, %llu\n",
- EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
+ EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
desc_count, bitmap_count);
return bitmap_count;
#else
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 80a28b297279..d8cd1f0f4661 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -110,7 +110,7 @@ static int ext4_readdir(struct file *filp,
int i, stored;
struct ext4_dir_entry_2 *de;
int err;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
int ret = 0;
int dir_has_error = 0;
@@ -133,7 +133,7 @@ static int ext4_readdir(struct file *filp,
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
- ext4_clear_inode_flag(filp->f_path.dentry->d_inode,
+ ext4_clear_inode_flag(file_inode(filp),
EXT4_INODE_INDEX);
}
stored = 0;
@@ -185,6 +185,7 @@ static int ext4_readdir(struct file *filp,
"at offset %llu",
(unsigned long long)filp->f_pos);
filp->f_pos += sb->s_blocksize - offset;
+ brelse(bh);
continue;
}
set_buffer_verified(bh);
@@ -333,7 +334,7 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
*
* For non-htree, ext4_llseek already chooses the proper max offset.
*/
-loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
+static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int dx_dir = is_dx_dir(inode);
@@ -494,7 +495,7 @@ static int call_filldir(struct file *filp, void *dirent,
{
struct dir_private_info *info = filp->private_data;
loff_t curr_pos;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb;
int error;
@@ -526,7 +527,7 @@ static int ext4_dx_readdir(struct file *filp,
void *dirent, filldir_t filldir)
{
struct dir_private_info *info = filp->private_data;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct fname *fname;
int ret;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8462eb3c33aa..4a01ba315262 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -194,8 +194,7 @@ struct mpage_da_data {
*/
#define EXT4_IO_END_UNWRITTEN 0x0001
#define EXT4_IO_END_ERROR 0x0002
-#define EXT4_IO_END_QUEUED 0x0004
-#define EXT4_IO_END_DIRECT 0x0008
+#define EXT4_IO_END_DIRECT 0x0004
struct ext4_io_page {
struct page *p_page;
@@ -215,10 +214,8 @@ typedef struct ext4_io_end {
struct list_head list; /* per-file finished IO list */
struct inode *inode; /* file being written to */
unsigned int flag; /* unwritten or not */
- struct page *page; /* for writepage() path */
loff_t offset; /* offset in the file */
ssize_t size; /* size of the extent */
- struct work_struct work; /* data work queue */
struct kiocb *iocb; /* iocb struct for AIO */
int result; /* error value for AIO */
int num_io_pages; /* for writepages() */
@@ -582,6 +579,8 @@ enum {
#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080
/* Do not take i_data_sem locking in ext4_map_blocks */
#define EXT4_GET_BLOCKS_NO_LOCK 0x0100
+ /* Do not put hole in extent cache */
+#define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200
/*
* Flags used by ext4_free_blocks
@@ -810,17 +809,6 @@ do { \
#endif /* defined(__KERNEL__) || defined(__linux__) */
-/*
- * storage for cached extent
- * If ec_len == 0, then the cache is invalid.
- * If ec_start == 0, then the cache represents a gap (null mapping)
- */
-struct ext4_ext_cache {
- ext4_fsblk_t ec_start;
- ext4_lblk_t ec_block;
- __u32 ec_len; /* must be 32bit to return holes */
-};
-
#include "extents_status.h"
/*
@@ -887,7 +875,6 @@ struct ext4_inode_info {
struct inode vfs_inode;
struct jbd2_inode *jinode;
- struct ext4_ext_cache i_cached_extent;
/*
* File creation time. Its function is same as that of
* struct timespec i_{a,c,m}time in the generic inode.
@@ -901,6 +888,8 @@ struct ext4_inode_info {
/* extents status tree */
struct ext4_es_tree i_es_tree;
rwlock_t i_es_lock;
+ struct list_head i_es_lru;
+ unsigned int i_es_lru_nr; /* protected by i_es_lock */
/* ialloc */
ext4_group_t i_last_alloc_group;
@@ -930,6 +919,7 @@ struct ext4_inode_info {
spinlock_t i_completed_io_lock;
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
atomic_t i_unwritten; /* Nr. of inflight conversions pending */
+ struct work_struct i_unwritten_work; /* deferred extent conversion */
spinlock_t i_block_reservation_lock;
@@ -985,7 +975,6 @@ struct ext4_inode_info {
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
-#define EXT4_MOUNT_MBLK_IO_SUBMIT 0x4000000 /* multi-block io submits */
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
@@ -1316,6 +1305,12 @@ struct ext4_sb_info {
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_csum_seed;
+
+ /* Reclaim extents from extent status tree */
+ struct shrinker s_es_shrinker;
+ struct list_head s_es_lru;
+ struct percpu_counter s_extent_cache_cnt;
+ spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -2007,9 +2002,20 @@ extern int ext4fs_dirhash(const char *name, int len, struct
dx_hash_info *hinfo);
/* ialloc.c */
-extern struct inode *ext4_new_inode(handle_t *, struct inode *, umode_t,
- const struct qstr *qstr, __u32 goal,
- uid_t *owner);
+extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t,
+ const struct qstr *qstr, __u32 goal,
+ uid_t *owner, int handle_type,
+ unsigned int line_no, int nblocks);
+
+#define ext4_new_inode(handle, dir, mode, qstr, goal, owner) \
+ __ext4_new_inode((handle), (dir), (mode), (qstr), (goal), (owner), \
+ 0, 0, 0)
+#define ext4_new_inode_start_handle(dir, mode, qstr, goal, owner, \
+ type, nblocks) \
+ __ext4_new_inode(NULL, (dir), (mode), (qstr), (goal), (owner), \
+ (type), __LINE__, (nblocks))
+
+
extern void ext4_free_inode(handle_t *, struct inode *);
extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
extern unsigned long ext4_count_free_inodes(struct super_block *);
@@ -2103,6 +2109,7 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
extern void ext4_ind_truncate(struct inode *inode);
+extern int ext4_ind_punch_hole(struct file *file, loff_t offset, loff_t length);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
@@ -2151,6 +2158,8 @@ extern void *ext4_kvzalloc(size_t size, gfp_t flags);
extern void ext4_kvfree(void *ptr);
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
+extern const char *ext4_decode_error(struct super_block *sb, int errno,
+ char nbuf[16]);
extern __printf(4, 5)
void __ext4_error(struct super_block *, const char *, unsigned int,
const char *, ...);
@@ -2227,6 +2236,8 @@ extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
struct ext4_group_desc *gdp);
extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
struct ext4_group_desc *gdp);
+extern int ext4_register_li_request(struct super_block *sb,
+ ext4_group_t first_not_zeroed);
static inline int ext4_has_group_desc_csum(struct super_block *sb)
{
@@ -2454,6 +2465,75 @@ extern const struct file_operations ext4_file_operations;
extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
extern void ext4_unwritten_wait(struct inode *inode);
+/* inline.c */
+extern int ext4_has_inline_data(struct inode *inode);
+extern int ext4_get_inline_size(struct inode *inode);
+extern int ext4_get_max_inline_size(struct inode *inode);
+extern int ext4_find_inline_data_nolock(struct inode *inode);
+extern void ext4_write_inline_data(struct inode *inode,
+ struct ext4_iloc *iloc,
+ void *buffer, loff_t pos,
+ unsigned int len);
+extern int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+ unsigned int len);
+extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
+ unsigned int len);
+extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
+
+extern int ext4_readpage_inline(struct inode *inode, struct page *page);
+extern int ext4_try_to_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned flags,
+ struct page **pagep);
+extern int ext4_write_inline_data_end(struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned copied,
+ struct page *page);
+extern struct buffer_head *
+ext4_journalled_write_inline_data(struct inode *inode,
+ unsigned len,
+ struct page *page);
+extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned flags,
+ struct page **pagep,
+ void **fsdata);
+extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
+ unsigned len, unsigned copied,
+ struct page *page);
+extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
+ struct inode *inode);
+extern int ext4_try_create_inline_dir(handle_t *handle,
+ struct inode *parent,
+ struct inode *inode);
+extern int ext4_read_inline_dir(struct file *filp,
+ void *dirent, filldir_t filldir,
+ int *has_inline_data);
+extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir,
+ int *has_inline_data);
+extern int ext4_delete_inline_entry(handle_t *handle,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de_del,
+ struct buffer_head *bh,
+ int *has_inline_data);
+extern int empty_inline_dir(struct inode *dir, int *has_inline_data);
+extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
+ struct ext4_dir_entry_2 **parent_de,
+ int *retval);
+extern int ext4_inline_data_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ int *has_inline);
+extern int ext4_try_to_evict_inline_data(handle_t *handle,
+ struct inode *inode,
+ int needed);
+extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
+
+extern int ext4_convert_inline_data(struct inode *inode);
+
/* namei.c */
extern const struct inode_operations ext4_dir_inode_operations;
extern const struct inode_operations ext4_special_inode_operations;
@@ -2520,6 +2600,9 @@ extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
struct ext4_ext_path *);
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
+extern int ext4_find_delalloc_range(struct inode *inode,
+ ext4_lblk_t lblk_start,
+ ext4_lblk_t lblk_end);
extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
@@ -2537,6 +2620,7 @@ extern void ext4_exit_pageio(void);
extern void ext4_ioend_wait(struct inode *);
extern void ext4_free_io_end(ext4_io_end_t *io);
extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
+extern void ext4_end_io_work(struct work_struct *work);
extern void ext4_io_submit(struct ext4_io_submit *io);
extern int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 487fda12bc00..8643ff5bbeb7 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -193,12 +193,6 @@ static inline unsigned short ext_depth(struct inode *inode)
return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
}
-static inline void
-ext4_ext_invalidate_cache(struct inode *inode)
-{
- EXT4_I(inode)->i_cached_extent.ec_len = 0;
-}
-
static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
{
/* We can not have an uninitialized extent of zero length! */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index b4323ba846b5..7058975e3a55 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -6,6 +6,108 @@
#include <trace/events/ext4.h>
+/* Just increment the non-pointer handle value */
+static handle_t *ext4_get_nojournal(void)
+{
+ handle_t *handle = current->journal_info;
+ unsigned long ref_cnt = (unsigned long)handle;
+
+ BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
+
+ ref_cnt++;
+ handle = (handle_t *)ref_cnt;
+
+ current->journal_info = handle;
+ return handle;
+}
+
+
+/* Decrement the non-pointer handle value */
+static void ext4_put_nojournal(handle_t *handle)
+{
+ unsigned long ref_cnt = (unsigned long)handle;
+
+ BUG_ON(ref_cnt == 0);
+
+ ref_cnt--;
+ handle = (handle_t *)ref_cnt;
+
+ current->journal_info = handle;
+}
+
+/*
+ * Wrappers for jbd2_journal_start/end.
+ */
+handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+ int type, int nblocks)
+{
+ journal_t *journal;
+
+ trace_ext4_journal_start(sb, nblocks, _RET_IP_);
+ if (sb->s_flags & MS_RDONLY)
+ return ERR_PTR(-EROFS);
+
+ WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
+ journal = EXT4_SB(sb)->s_journal;
+ if (!journal)
+ return ext4_get_nojournal();
+ /*
+ * Special case here: if the journal has aborted behind our
+ * backs (eg. EIO in the commit thread), then we still need to
+ * take the FS itself readonly cleanly.
+ */
+ if (is_journal_aborted(journal)) {
+ ext4_abort(sb, "Detected aborted journal");
+ return ERR_PTR(-EROFS);
+ }
+ return jbd2__journal_start(journal, nblocks, GFP_NOFS, type, line);
+}
+
+int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
+{
+ struct super_block *sb;
+ int err;
+ int rc;
+
+ if (!ext4_handle_valid(handle)) {
+ ext4_put_nojournal(handle);
+ return 0;
+ }
+ sb = handle->h_transaction->t_journal->j_private;
+ err = handle->h_err;
+ rc = jbd2_journal_stop(handle);
+
+ if (!err)
+ err = rc;
+ if (err)
+ __ext4_std_error(sb, where, line, err);
+ return err;
+}
+
+void ext4_journal_abort_handle(const char *caller, unsigned int line,
+ const char *err_fn, struct buffer_head *bh,
+ handle_t *handle, int err)
+{
+ char nbuf[16];
+ const char *errstr = ext4_decode_error(NULL, err, nbuf);
+
+ BUG_ON(!ext4_handle_valid(handle));
+
+ if (bh)
+ BUFFER_TRACE(bh, "abort");
+
+ if (!handle->h_err)
+ handle->h_err = err;
+
+ if (is_handle_aborted(handle))
+ return;
+
+ printk(KERN_ERR "EXT4-fs: %s:%d: aborting transaction: %s in %s\n",
+ caller, line, errstr, err_fn);
+
+ jbd2_journal_abort_handle(handle);
+}
+
int __ext4_journal_get_write_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 7177f9b21cb2..4c216b1bf20c 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -59,12 +59,6 @@
#define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \
EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data. Be
- * generous. We can grow the delete transaction later if necessary. */
-
-#define EXT4_DELETE_TRANS_BLOCKS(sb) (2 * EXT4_DATA_TRANS_BLOCKS(sb) + 64)
-
/* Define an arbitrary limit for the amount of data we will anticipate
* writing to any given transaction. For unbounded transactions such as
* write(2) and truncate(2) we can write more than this, but we always
@@ -110,6 +104,36 @@
#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
+static inline int ext4_jbd2_credits_xattr(struct inode *inode)
+{
+ int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
+
+ /*
+ * In case of inline data, we may push out the data to a block,
+ * so we need to reserve credits for this eventuality
+ */
+ if (ext4_has_inline_data(inode))
+ credits += ext4_writepage_trans_blocks(inode) + 1;
+ return credits;
+}
+
+
+/*
+ * Ext4 handle operation types -- for logging purposes
+ */
+#define EXT4_HT_MISC 0
+#define EXT4_HT_INODE 1
+#define EXT4_HT_WRITE_PAGE 2
+#define EXT4_HT_MAP_BLOCKS 3
+#define EXT4_HT_DIR 4
+#define EXT4_HT_TRUNCATE 5
+#define EXT4_HT_QUOTA 6
+#define EXT4_HT_RESIZE 7
+#define EXT4_HT_MIGRATE 8
+#define EXT4_HT_MOVE_EXTENTS 9
+#define EXT4_HT_XATTR 10
+#define EXT4_HT_MAX 11
+
/**
* struct ext4_journal_cb_entry - Base structure for callback information.
*
@@ -234,7 +258,8 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
#define ext4_handle_dirty_super(handle, sb) \
__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
-handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
+handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+ int type, int nblocks);
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -268,9 +293,17 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
return 1;
}
-static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
+#define ext4_journal_start_sb(sb, type, nblocks) \
+ __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks))
+
+#define ext4_journal_start(inode, type, nblocks) \
+ __ext4_journal_start((inode), __LINE__, (type), (nblocks))
+
+static inline handle_t *__ext4_journal_start(struct inode *inode,
+ unsigned int line, int type,
+ int nblocks)
{
- return ext4_journal_start_sb(inode->i_sb, nblocks);
+ return __ext4_journal_start_sb(inode->i_sb, line, type, nblocks);
}
#define ext4_journal_stop(handle) \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 5ae1674ec12f..28dd8eeea6a9 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -112,7 +112,7 @@ static int ext4_split_extent_at(handle_t *handle,
int flags);
static int ext4_find_delayed_extent(struct inode *inode,
- struct ext4_ext_cache *newex);
+ struct extent_status *newes);
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
@@ -714,7 +714,6 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
eh->eh_magic = EXT4_EXT_MAGIC;
eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
ext4_mark_inode_dirty(handle, inode);
- ext4_ext_invalidate_cache(inode);
return 0;
}
@@ -725,6 +724,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
struct ext4_extent_header *eh;
struct buffer_head *bh;
short int depth, i, ppos = 0, alloc = 0;
+ int ret;
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
@@ -752,12 +752,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
path[ppos].p_ext = NULL;
bh = sb_getblk(inode->i_sb, path[ppos].p_block);
- if (unlikely(!bh))
+ if (unlikely(!bh)) {
+ ret = -ENOMEM;
goto err;
+ }
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, block,
path[ppos].p_block);
- if (bh_submit_read(bh) < 0) {
+ ret = bh_submit_read(bh);
+ if (ret < 0) {
put_bh(bh);
goto err;
}
@@ -768,13 +771,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
put_bh(bh);
EXT4_ERROR_INODE(inode,
"ppos %d > depth %d", ppos, depth);
+ ret = -EIO;
goto err;
}
path[ppos].p_bh = bh;
path[ppos].p_hdr = eh;
i--;
- if (ext4_ext_check_block(inode, eh, i, bh))
+ ret = ext4_ext_check_block(inode, eh, i, bh);
+ if (ret < 0)
goto err;
}
@@ -796,7 +801,7 @@ err:
ext4_ext_drop_refs(path);
if (alloc)
kfree(path);
- return ERR_PTR(-EIO);
+ return ERR_PTR(ret);
}
/*
@@ -950,8 +955,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
goto cleanup;
}
bh = sb_getblk(inode->i_sb, newblock);
- if (!bh) {
- err = -EIO;
+ if (unlikely(!bh)) {
+ err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
@@ -1023,8 +1028,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
oldblock = newblock;
newblock = ablocks[--a];
bh = sb_getblk(inode->i_sb, newblock);
- if (!bh) {
- err = -EIO;
+ if (unlikely(!bh)) {
+ err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
@@ -1136,11 +1141,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
return err;
bh = sb_getblk(inode->i_sb, newblock);
- if (!bh) {
- err = -EIO;
- ext4_std_error(inode->i_sb, err);
- return err;
- }
+ if (unlikely(!bh))
+ return -ENOMEM;
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
@@ -1960,7 +1962,6 @@ cleanup:
ext4_ext_drop_refs(npath);
kfree(npath);
}
- ext4_ext_invalidate_cache(inode);
return err;
}
@@ -1969,8 +1970,8 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
struct ext4_ext_path *path = NULL;
- struct ext4_ext_cache newex;
struct ext4_extent *ex;
+ struct extent_status es;
ext4_lblk_t next, next_del, start = 0, end = 0;
ext4_lblk_t last = block + num;
int exists, depth = 0, err = 0;
@@ -2044,37 +2045,47 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
BUG_ON(end <= start);
if (!exists) {
- newex.ec_block = start;
- newex.ec_len = end - start;
- newex.ec_start = 0;
+ es.es_lblk = start;
+ es.es_len = end - start;
+ es.es_pblk = 0;
} else {
- newex.ec_block = le32_to_cpu(ex->ee_block);
- newex.ec_len = ext4_ext_get_actual_len(ex);
- newex.ec_start = ext4_ext_pblock(ex);
+ es.es_lblk = le32_to_cpu(ex->ee_block);
+ es.es_len = ext4_ext_get_actual_len(ex);
+ es.es_pblk = ext4_ext_pblock(ex);
if (ext4_ext_is_uninitialized(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
}
/*
- * Find delayed extent and update newex accordingly. We call
- * it even in !exists case to find out whether newex is the
+ * Find delayed extent and update es accordingly. We call
+ * it even in !exists case to find out whether es is the
* last existing extent or not.
*/
- next_del = ext4_find_delayed_extent(inode, &newex);
+ next_del = ext4_find_delayed_extent(inode, &es);
if (!exists && next_del) {
exists = 1;
flags |= FIEMAP_EXTENT_DELALLOC;
}
up_read(&EXT4_I(inode)->i_data_sem);
- if (unlikely(newex.ec_len == 0)) {
- EXT4_ERROR_INODE(inode, "newex.ec_len == 0");
+ if (unlikely(es.es_len == 0)) {
+ EXT4_ERROR_INODE(inode, "es.es_len == 0");
err = -EIO;
break;
}
- /* This is possible iff next == next_del == EXT_MAX_BLOCKS */
- if (next == next_del) {
+ /*
+ * This is possible iff next == next_del == EXT_MAX_BLOCKS.
+ * we need to check next == EXT_MAX_BLOCKS because it is
+ * possible that an extent is with unwritten and delayed
+ * status due to when an extent is delayed allocated and
+ * is allocated by fallocate status tree will track both of
+ * them in a extent.
+ *
+ * So we could return a unwritten and delayed extent, and
+ * its block is equal to 'next'.
+ */
+ if (next == next_del && next == EXT_MAX_BLOCKS) {
flags |= FIEMAP_EXTENT_LAST;
if (unlikely(next_del != EXT_MAX_BLOCKS ||
next != EXT_MAX_BLOCKS)) {
@@ -2089,9 +2100,9 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
if (exists) {
err = fiemap_fill_next_extent(fieinfo,
- (__u64)newex.ec_block << blksize_bits,
- (__u64)newex.ec_start << blksize_bits,
- (__u64)newex.ec_len << blksize_bits,
+ (__u64)es.es_lblk << blksize_bits,
+ (__u64)es.es_pblk << blksize_bits,
+ (__u64)es.es_len << blksize_bits,
flags);
if (err < 0)
break;
@@ -2101,7 +2112,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
}
}
- block = newex.ec_block + newex.ec_len;
+ block = es.es_lblk + es.es_len;
}
if (path) {
@@ -2112,21 +2123,6 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
return err;
}
-static void
-ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
- __u32 len, ext4_fsblk_t start)
-{
- struct ext4_ext_cache *cex;
- BUG_ON(len == 0);
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- trace_ext4_ext_put_in_cache(inode, block, len, start);
- cex = &EXT4_I(inode)->i_cached_extent;
- cex->ec_block = block;
- cex->ec_len = len;
- cex->ec_start = start;
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
-}
-
/*
* ext4_ext_put_gap_in_cache:
* calculate boundaries of the gap that the requested block fits into
@@ -2143,9 +2139,10 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ex = path[depth].p_ext;
if (ex == NULL) {
- /* there is no extent yet, so gap is [0;-] */
- lblock = 0;
- len = EXT_MAX_BLOCKS;
+ /*
+ * there is no extent yet, so gap is [0;-] and we
+ * don't cache it
+ */
ext_debug("cache gap(whole file):");
} else if (block < le32_to_cpu(ex->ee_block)) {
lblock = block;
@@ -2154,6 +2151,9 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
block,
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
+ if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
+ ext4_es_insert_extent(inode, lblock, len, ~0,
+ EXTENT_STATUS_HOLE);
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
ext4_lblk_t next;
@@ -2167,58 +2167,15 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
block);
BUG_ON(next == lblock);
len = next - lblock;
+ if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
+ ext4_es_insert_extent(inode, lblock, len, ~0,
+ EXTENT_STATUS_HOLE);
} else {
lblock = len = 0;
BUG();
}
ext_debug(" -> %u:%lu\n", lblock, len);
- ext4_ext_put_in_cache(inode, lblock, len, 0);
-}
-
-/*
- * ext4_ext_in_cache()
- * Checks to see if the given block is in the cache.
- * If it is, the cached extent is stored in the given
- * cache extent pointer.
- *
- * @inode: The files inode
- * @block: The block to look for in the cache
- * @ex: Pointer where the cached extent will be stored
- * if it contains block
- *
- * Return 0 if cache is invalid; 1 if the cache is valid
- */
-static int
-ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
- struct ext4_extent *ex)
-{
- struct ext4_ext_cache *cex;
- int ret = 0;
-
- /*
- * We borrow i_block_reservation_lock to protect i_cached_extent
- */
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- cex = &EXT4_I(inode)->i_cached_extent;
-
- /* has cache valid data? */
- if (cex->ec_len == 0)
- goto errout;
-
- if (in_range(block, cex->ec_block, cex->ec_len)) {
- ex->ee_block = cpu_to_le32(cex->ec_block);
- ext4_ext_store_pblock(ex, cex->ec_start);
- ex->ee_len = cpu_to_le16(cex->ec_len);
- ext_debug("%u cached by %u:%u:%llu\n",
- block,
- cex->ec_block, cex->ec_len, cex->ec_start);
- ret = 1;
- }
-errout:
- trace_ext4_ext_in_cache(inode, block, ret);
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
- return ret;
}
/*
@@ -2653,13 +2610,11 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
- handle = ext4_journal_start(inode, depth + 1);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
again:
- ext4_ext_invalidate_cache(inode);
-
trace_ext4_ext_remove_space(inode, start, depth);
/*
@@ -3519,19 +3474,19 @@ out:
*
* Return 1 if there is a delalloc block in the range, otherwise 0.
*/
-static int ext4_find_delalloc_range(struct inode *inode,
- ext4_lblk_t lblk_start,
- ext4_lblk_t lblk_end)
+int ext4_find_delalloc_range(struct inode *inode,
+ ext4_lblk_t lblk_start,
+ ext4_lblk_t lblk_end)
{
struct extent_status es;
- es.start = lblk_start;
- ext4_es_find_extent(inode, &es);
- if (es.len == 0)
+ ext4_es_find_delayed_extent(inode, lblk_start, &es);
+ if (es.es_len == 0)
return 0; /* there is no delay extent in this tree */
- else if (es.start <= lblk_start && lblk_start < es.start + es.len)
+ else if (es.es_lblk <= lblk_start &&
+ lblk_start < es.es_lblk + es.es_len)
return 1;
- else if (lblk_start <= es.start && es.start <= lblk_end)
+ else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
return 1;
else
return 0;
@@ -3656,6 +3611,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
ext4_set_io_unwritten_flag(inode, io);
else
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
if (ext4_should_dioread_nolock(inode))
map->m_flags |= EXT4_MAP_UNINIT;
goto out;
@@ -3677,8 +3633,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
* repeat fallocate creation request
* we already have an unwritten extent
*/
- if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
+ if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
goto map_out;
+ }
/* buffered READ or buffered write_begin() lookup */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
@@ -3898,35 +3856,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
map->m_lblk, map->m_len, inode->i_ino);
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
- /* check in cache */
- if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
- if (!newex.ee_start_lo && !newex.ee_start_hi) {
- if ((sbi->s_cluster_ratio > 1) &&
- ext4_find_delalloc_cluster(inode, map->m_lblk))
- map->m_flags |= EXT4_MAP_FROM_CLUSTER;
-
- if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
- /*
- * block isn't allocated yet and
- * user doesn't want to allocate it
- */
- goto out2;
- }
- /* we should allocate requested block */
- } else {
- /* block is already allocated */
- if (sbi->s_cluster_ratio > 1)
- map->m_flags |= EXT4_MAP_FROM_CLUSTER;
- newblock = map->m_lblk
- - le32_to_cpu(newex.ee_block)
- + ext4_ext_pblock(&newex);
- /* number of remaining blocks in the extent */
- allocated = ext4_ext_get_actual_len(&newex) -
- (map->m_lblk - le32_to_cpu(newex.ee_block));
- goto out;
- }
- }
-
/* find extent for this block */
path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
if (IS_ERR(path)) {
@@ -3973,15 +3902,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
ee_block, ee_len, newblock);
- /*
- * Do not put uninitialized extent
- * in the cache
- */
- if (!ext4_ext_is_uninitialized(ex)) {
- ext4_ext_put_in_cache(inode, ee_block,
- ee_len, ee_start);
+ if (!ext4_ext_is_uninitialized(ex))
goto out;
- }
+
allocated = ext4_ext_handle_uninitialized_extents(
handle, inode, map, path, flags,
allocated, newblock);
@@ -4002,7 +3925,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* put just found gap into cache to speed up
* subsequent requests
*/
- ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
+ if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
+ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
goto out2;
}
@@ -4108,6 +4032,7 @@ got_allocated_blocks:
/* Mark uninitialized */
if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
ext4_ext_mark_uninitialized(&newex);
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
/*
* io_end structure was created for every IO write to an
* uninitialized extent. To avoid unnecessary conversion,
@@ -4241,10 +4166,9 @@ got_allocated_blocks:
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an uninitialized extent.
*/
- if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
- ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
+ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
- } else
+ else
ext4_update_inode_fsync_trans(handle, inode, 0);
out:
if (allocated > map->m_len)
@@ -4284,7 +4208,7 @@ void ext4_ext_truncate(struct inode *inode)
* probably first extent we're gonna free will be last in block
*/
err = ext4_writepage_trans_blocks(inode);
- handle = ext4_journal_start(inode, err);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, err);
if (IS_ERR(handle))
return;
@@ -4303,7 +4227,6 @@ void ext4_ext_truncate(struct inode *inode)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_ext_invalidate_cache(inode);
ext4_discard_preallocations(inode);
@@ -4386,7 +4309,7 @@ static void ext4_falloc_update_inode(struct inode *inode,
*/
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
handle_t *handle;
loff_t new_size;
unsigned int max_blocks;
@@ -4397,13 +4320,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
struct ext4_map_blocks map;
unsigned int credits, blkbits = inode->i_blkbits;
- /*
- * currently supporting (pre)allocate mode for extent-based
- * files _only_
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return -EOPNOTSUPP;
-
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
@@ -4415,6 +4331,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (ret)
return ret;
+ /*
+ * currently supporting (pre)allocate mode for extent-based
+ * files _only_
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+ return -EOPNOTSUPP;
+
trace_ext4_fallocate_enter(inode, offset, len, mode);
map.m_lblk = offset >> blkbits;
/*
@@ -4451,7 +4374,8 @@ retry:
while (ret >= 0 && ret < max_blocks) {
map.m_lblk = map.m_lblk + ret;
map.m_len = max_blocks = max_blocks - ret;
- handle = ext4_journal_start(inode, credits);
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+ credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
@@ -4459,11 +4383,11 @@ retry:
ret = ext4_map_blocks(handle, inode, &map, flags);
if (ret <= 0) {
#ifdef EXT4FS_DEBUG
- WARN_ON(ret <= 0);
- printk(KERN_ERR "%s: ext4_ext_map_blocks "
- "returned error inode#%lu, block=%u, "
- "max_blocks=%u", __func__,
- inode->i_ino, map.m_lblk, max_blocks);
+ ext4_warning(inode->i_sb,
+ "inode #%lu: block %u: len %u: "
+ "ext4_ext_map_blocks returned %d",
+ inode->i_ino, map.m_lblk,
+ map.m_len, ret);
#endif
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
@@ -4529,21 +4453,19 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
while (ret >= 0 && ret < max_blocks) {
map.m_lblk += ret;
map.m_len = (max_blocks -= ret);
- handle = ext4_journal_start(inode, credits);
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
ret = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
- if (ret <= 0) {
- WARN_ON(ret <= 0);
- ext4_msg(inode->i_sb, KERN_ERR,
- "%s:%d: inode #%lu: block %u: len %u: "
- "ext4_ext_map_blocks returned %d",
- __func__, __LINE__, inode->i_ino, map.m_lblk,
- map.m_len, ret);
- }
+ if (ret <= 0)
+ ext4_warning(inode->i_sb,
+ "inode #%lu: block %u: len %u: "
+ "ext4_ext_map_blocks returned %d",
+ inode->i_ino, map.m_lblk,
+ map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret <= 0 || ret2 )
@@ -4553,42 +4475,48 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
}
/*
- * If newex is not existing extent (newex->ec_start equals zero) find
- * delayed extent at start of newex and update newex accordingly and
+ * If newes is not existing extent (newes->ec_pblk equals zero) find
+ * delayed extent at start of newes and update newes accordingly and
* return start of the next delayed extent.
*
- * If newex is existing extent (newex->ec_start is not equal zero)
+ * If newes is existing extent (newes->ec_pblk is not equal zero)
* return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
- * extent found. Leave newex unmodified.
+ * extent found. Leave newes unmodified.
*/
static int ext4_find_delayed_extent(struct inode *inode,
- struct ext4_ext_cache *newex)
+ struct extent_status *newes)
{
struct extent_status es;
- ext4_lblk_t next_del;
+ ext4_lblk_t block, next_del;
- es.start = newex->ec_block;
- next_del = ext4_es_find_extent(inode, &es);
+ ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
- if (newex->ec_start == 0) {
+ if (newes->es_pblk == 0) {
/*
- * No extent in extent-tree contains block @newex->ec_start,
+ * No extent in extent-tree contains block @newes->es_pblk,
* then the block may stay in 1)a hole or 2)delayed-extent.
*/
- if (es.len == 0)
+ if (es.es_len == 0)
/* A hole found. */
return 0;
- if (es.start > newex->ec_block) {
+ if (es.es_lblk > newes->es_lblk) {
/* A hole found. */
- newex->ec_len = min(es.start - newex->ec_block,
- newex->ec_len);
+ newes->es_len = min(es.es_lblk - newes->es_lblk,
+ newes->es_len);
return 0;
}
- newex->ec_len = es.start + es.len - newex->ec_block;
+ newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
}
+ block = newes->es_lblk + newes->es_len;
+ ext4_es_find_delayed_extent(inode, block, &es);
+ if (es.es_len == 0)
+ next_del = EXT_MAX_BLOCKS;
+ else
+ next_del = es.es_lblk;
+
return next_del;
}
/* fiemap flags we can handle specified here */
@@ -4643,7 +4571,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
*/
int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
@@ -4709,7 +4637,7 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
inode_dio_wait(inode);
credits = ext4_writepage_trans_blocks(inode);
- handle = ext4_journal_start(inode, credits);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto out_dio;
@@ -4786,14 +4714,12 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
goto out;
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_ext_invalidate_cache(inode);
ext4_discard_preallocations(inode);
err = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
- ext4_ext_invalidate_cache(inode);
ext4_discard_preallocations(inode);
if (IS_SYNC(inode))
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 564d981a2fcc..95796a1b7522 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -23,40 +23,53 @@
* (e.g. Reservation space warning), and provide extent-level locking.
* Delay extent tree is the first step to achieve this goal. It is
* original built by Yongqiang Yang. At that time it is called delay
- * extent tree, whose goal is only track delay extent in memory to
+ * extent tree, whose goal is only track delayed extents in memory to
* simplify the implementation of fiemap and bigalloc, and introduce
* lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
- * delay extent tree at the following comment. But for better
- * understand what it does, it has been rename to extent status tree.
+ * delay extent tree at the first commit. But for better understand
+ * what it does, it has been rename to extent status tree.
*
- * Currently the first step has been done. All delay extents are
- * tracked in the tree. It maintains the delay extent when a delay
- * allocation is issued, and the delay extent is written out or
+ * Step1:
+ * Currently the first step has been done. All delayed extents are
+ * tracked in the tree. It maintains the delayed extent when a delayed
+ * allocation is issued, and the delayed extent is written out or
* invalidated. Therefore the implementation of fiemap and bigalloc
* are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
*
* The following comment describes the implemenmtation of extent
* status tree and future works.
+ *
+ * Step2:
+ * In this step all extent status are tracked by extent status tree.
+ * Thus, we can first try to lookup a block mapping in this tree before
+ * finding it in extent tree. Hence, single extent cache can be removed
+ * because extent status tree can do a better job. Extents in status
+ * tree are loaded on-demand. Therefore, the extent status tree may not
+ * contain all of the extents in a file. Meanwhile we define a shrinker
+ * to reclaim memory from extent status tree because fragmented extent
+ * tree will make status tree cost too much memory. written/unwritten/-
+ * hole extents in the tree will be reclaimed by this shrinker when we
+ * are under high memory pressure. Delayed extents will not be
+ * reclimed because fiemap, bigalloc, and seek_data/hole need it.
*/
/*
- * extents status tree implementation for ext4.
+ * Extent status tree implementation for ext4.
*
*
* ==========================================================================
- * Extents status encompass delayed extents and extent locks
+ * Extent status tree tracks all extent status.
*
- * 1. Why delayed extent implementation ?
+ * 1. Why we need to implement extent status tree?
*
- * Without delayed extent, ext4 identifies a delayed extent by looking
+ * Without extent status tree, ext4 identifies a delayed extent by looking
* up page cache, this has several deficiencies - complicated, buggy,
* and inefficient code.
*
- * FIEMAP, SEEK_HOLE/DATA, bigalloc, punch hole and writeout all need
- * to know if a block or a range of blocks are belonged to a delayed
- * extent.
+ * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
+ * block or a range of blocks are belonged to a delayed extent.
*
- * Let us have a look at how they do without delayed extents implementation.
+ * Let us have a look at how they do without extent status tree.
* -- FIEMAP
* FIEMAP looks up page cache to identify delayed allocations from holes.
*
@@ -68,47 +81,48 @@
* already under delayed allocation or not to determine whether
* quota reserving is needed for the cluster.
*
- * -- punch hole
- * punch hole looks up page cache to identify a delayed extent.
- *
* -- writeout
* Writeout looks up whole page cache to see if a buffer is
* mapped, If there are not very many delayed buffers, then it is
* time comsuming.
*
- * With delayed extents implementation, FIEMAP, SEEK_HOLE/DATA,
+ * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
* bigalloc and writeout can figure out if a block or a range of
* blocks is under delayed allocation(belonged to a delayed extent) or
- * not by searching the delayed extent tree.
+ * not by searching the extent tree.
*
*
* ==========================================================================
- * 2. ext4 delayed extents impelmentation
+ * 2. Ext4 extent status tree impelmentation
+ *
+ * -- extent
+ * A extent is a range of blocks which are contiguous logically and
+ * physically. Unlike extent in extent tree, this extent in ext4 is
+ * a in-memory struct, there is no corresponding on-disk data. There
+ * is no limit on length of extent, so an extent can contain as many
+ * blocks as they are contiguous logically and physically.
*
- * -- delayed extent
- * A delayed extent is a range of blocks which are contiguous
- * logically and under delayed allocation. Unlike extent in
- * ext4, delayed extent in ext4 is a in-memory struct, there is
- * no corresponding on-disk data. There is no limit on length of
- * delayed extent, so a delayed extent can contain as many blocks
- * as they are contiguous logically.
+ * -- extent status tree
+ * Every inode has an extent status tree and all allocation blocks
+ * are added to the tree with different status. The extent in the
+ * tree are ordered by logical block no.
*
- * -- delayed extent tree
- * Every inode has a delayed extent tree and all under delayed
- * allocation blocks are added to the tree as delayed extents.
- * Delayed extents in the tree are ordered by logical block no.
+ * -- operations on a extent status tree
+ * There are three important operations on a delayed extent tree: find
+ * next extent, adding a extent(a range of blocks) and removing a extent.
*
- * -- operations on a delayed extent tree
- * There are three operations on a delayed extent tree: find next
- * delayed extent, adding a space(a range of blocks) and removing
- * a space.
+ * -- race on a extent status tree
+ * Extent status tree is protected by inode->i_es_lock.
*
- * -- race on a delayed extent tree
- * Delayed extent tree is protected inode->i_es_lock.
+ * -- memory consumption
+ * Fragmented extent tree will make extent status tree cost too much
+ * memory. Hence, we will reclaim written/unwritten/hole extents from
+ * the tree under a heavy memory pressure.
*
*
* ==========================================================================
- * 3. performance analysis
+ * 3. Performance analysis
+ *
* -- overhead
* 1. There is a cache extent for write access, so if writes are
* not very random, adding space operaions are in O(1) time.
@@ -120,18 +134,25 @@
*
* ==========================================================================
* 4. TODO list
- * -- Track all extent status
*
- * -- Improve get block process
+ * -- Refactor delayed space reservation
*
* -- Extent-level locking
*/
static struct kmem_cache *ext4_es_cachep;
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
+static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t end);
+static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
+ int nr_to_scan);
+
int __init ext4_init_es(void)
{
- ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
+ ext4_es_cachep = kmem_cache_create("ext4_extent_status",
+ sizeof(struct extent_status),
+ 0, (SLAB_RECLAIM_ACCOUNT), NULL);
if (ext4_es_cachep == NULL)
return -ENOMEM;
return 0;
@@ -161,7 +182,9 @@ static void ext4_es_print_tree(struct inode *inode)
while (node) {
struct extent_status *es;
es = rb_entry(node, struct extent_status, rb_node);
- printk(KERN_DEBUG " [%u/%u)", es->start, es->len);
+ printk(KERN_DEBUG " [%u/%u) %llu %llx",
+ es->es_lblk, es->es_len,
+ ext4_es_pblock(es), ext4_es_status(es));
node = rb_next(node);
}
printk(KERN_DEBUG "\n");
@@ -170,10 +193,10 @@ static void ext4_es_print_tree(struct inode *inode)
#define ext4_es_print_tree(inode)
#endif
-static inline ext4_lblk_t extent_status_end(struct extent_status *es)
+static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
{
- BUG_ON(es->start + es->len < es->start);
- return es->start + es->len - 1;
+ BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
+ return es->es_lblk + es->es_len - 1;
}
/*
@@ -181,25 +204,25 @@ static inline ext4_lblk_t extent_status_end(struct extent_status *es)
* it can't be found, try to find next extent.
*/
static struct extent_status *__es_tree_search(struct rb_root *root,
- ext4_lblk_t offset)
+ ext4_lblk_t lblk)
{
struct rb_node *node = root->rb_node;
struct extent_status *es = NULL;
while (node) {
es = rb_entry(node, struct extent_status, rb_node);
- if (offset < es->start)
+ if (lblk < es->es_lblk)
node = node->rb_left;
- else if (offset > extent_status_end(es))
+ else if (lblk > ext4_es_end(es))
node = node->rb_right;
else
return es;
}
- if (es && offset < es->start)
+ if (es && lblk < es->es_lblk)
return es;
- if (es && offset > extent_status_end(es)) {
+ if (es && lblk > ext4_es_end(es)) {
node = rb_next(&es->rb_node);
return node ? rb_entry(node, struct extent_status, rb_node) :
NULL;
@@ -209,79 +232,124 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
}
/*
- * ext4_es_find_extent: find the 1st delayed extent covering @es->start
- * if it exists, otherwise, the next extent after @es->start.
+ * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
+ * if it exists, otherwise, the next extent after @es->lblk.
*
* @inode: the inode which owns delayed extents
+ * @lblk: the offset where we start to search
* @es: delayed extent that we found
- *
- * Returns the first block of the next extent after es, otherwise
- * EXT_MAX_BLOCKS if no delay extent is found.
- * Delayed extent is returned via @es.
*/
-ext4_lblk_t ext4_es_find_extent(struct inode *inode, struct extent_status *es)
+void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es)
{
struct ext4_es_tree *tree = NULL;
struct extent_status *es1 = NULL;
struct rb_node *node;
- ext4_lblk_t ret = EXT_MAX_BLOCKS;
- trace_ext4_es_find_extent_enter(inode, es->start);
+ BUG_ON(es == NULL);
+ trace_ext4_es_find_delayed_extent_enter(inode, lblk);
read_lock(&EXT4_I(inode)->i_es_lock);
tree = &EXT4_I(inode)->i_es_tree;
- /* find delay extent in cache firstly */
+ /* find extent in cache firstly */
+ es->es_lblk = es->es_len = es->es_pblk = 0;
if (tree->cache_es) {
es1 = tree->cache_es;
- if (in_range(es->start, es1->start, es1->len)) {
- es_debug("%u cached by [%u/%u)\n",
- es->start, es1->start, es1->len);
+ if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u) %llu %llx\n",
+ lblk, es1->es_lblk, es1->es_len,
+ ext4_es_pblock(es1), ext4_es_status(es1));
goto out;
}
}
- es->len = 0;
- es1 = __es_tree_search(&tree->root, es->start);
+ es1 = __es_tree_search(&tree->root, lblk);
out:
- if (es1) {
- tree->cache_es = es1;
- es->start = es1->start;
- es->len = es1->len;
- node = rb_next(&es1->rb_node);
- if (node) {
+ if (es1 && !ext4_es_is_delayed(es1)) {
+ while ((node = rb_next(&es1->rb_node)) != NULL) {
es1 = rb_entry(node, struct extent_status, rb_node);
- ret = es1->start;
+ if (ext4_es_is_delayed(es1))
+ break;
}
}
+ if (es1 && ext4_es_is_delayed(es1)) {
+ tree->cache_es = es1;
+ es->es_lblk = es1->es_lblk;
+ es->es_len = es1->es_len;
+ es->es_pblk = es1->es_pblk;
+ }
+
read_unlock(&EXT4_I(inode)->i_es_lock);
- trace_ext4_es_find_extent_exit(inode, es, ret);
- return ret;
+ ext4_es_lru_add(inode);
+ trace_ext4_es_find_delayed_extent_exit(inode, es);
}
static struct extent_status *
-ext4_es_alloc_extent(ext4_lblk_t start, ext4_lblk_t len)
+ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+ ext4_fsblk_t pblk)
{
struct extent_status *es;
es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
if (es == NULL)
return NULL;
- es->start = start;
- es->len = len;
+ es->es_lblk = lblk;
+ es->es_len = len;
+ es->es_pblk = pblk;
+
+ /*
+ * We don't count delayed extent because we never try to reclaim them
+ */
+ if (!ext4_es_is_delayed(es)) {
+ EXT4_I(inode)->i_es_lru_nr++;
+ percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
+ }
+
return es;
}
-static void ext4_es_free_extent(struct extent_status *es)
+static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
{
+ /* Decrease the lru counter when this es is not delayed */
+ if (!ext4_es_is_delayed(es)) {
+ BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
+ EXT4_I(inode)->i_es_lru_nr--;
+ percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
+ }
+
kmem_cache_free(ext4_es_cachep, es);
}
+/*
+ * Check whether or not two extents can be merged
+ * Condition:
+ * - logical block number is contiguous
+ * - physical block number is contiguous
+ * - status is equal
+ */
+static int ext4_es_can_be_merged(struct extent_status *es1,
+ struct extent_status *es2)
+{
+ if (es1->es_lblk + es1->es_len != es2->es_lblk)
+ return 0;
+
+ if (ext4_es_status(es1) != ext4_es_status(es2))
+ return 0;
+
+ if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
+ (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2)))
+ return 0;
+
+ return 1;
+}
+
static struct extent_status *
-ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
+ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
{
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct extent_status *es1;
struct rb_node *node;
@@ -290,10 +358,10 @@ ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
return es;
es1 = rb_entry(node, struct extent_status, rb_node);
- if (es->start == extent_status_end(es1) + 1) {
- es1->len += es->len;
+ if (ext4_es_can_be_merged(es1, es)) {
+ es1->es_len += es->es_len;
rb_erase(&es->rb_node, &tree->root);
- ext4_es_free_extent(es);
+ ext4_es_free_extent(inode, es);
es = es1;
}
@@ -301,8 +369,9 @@ ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
}
static struct extent_status *
-ext4_es_try_to_merge_right(struct ext4_es_tree *tree, struct extent_status *es)
+ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
{
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct extent_status *es1;
struct rb_node *node;
@@ -311,69 +380,57 @@ ext4_es_try_to_merge_right(struct ext4_es_tree *tree, struct extent_status *es)
return es;
es1 = rb_entry(node, struct extent_status, rb_node);
- if (es1->start == extent_status_end(es) + 1) {
- es->len += es1->len;
+ if (ext4_es_can_be_merged(es, es1)) {
+ es->es_len += es1->es_len;
rb_erase(node, &tree->root);
- ext4_es_free_extent(es1);
+ ext4_es_free_extent(inode, es1);
}
return es;
}
-static int __es_insert_extent(struct ext4_es_tree *tree, ext4_lblk_t offset,
- ext4_lblk_t len)
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
{
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node **p = &tree->root.rb_node;
struct rb_node *parent = NULL;
struct extent_status *es;
- ext4_lblk_t end = offset + len - 1;
-
- BUG_ON(end < offset);
- es = tree->cache_es;
- if (es && offset == (extent_status_end(es) + 1)) {
- es_debug("cached by [%u/%u)\n", es->start, es->len);
- es->len += len;
- es = ext4_es_try_to_merge_right(tree, es);
- goto out;
- } else if (es && es->start == end + 1) {
- es_debug("cached by [%u/%u)\n", es->start, es->len);
- es->start = offset;
- es->len += len;
- es = ext4_es_try_to_merge_left(tree, es);
- goto out;
- } else if (es && es->start <= offset &&
- end <= extent_status_end(es)) {
- es_debug("cached by [%u/%u)\n", es->start, es->len);
- goto out;
- }
while (*p) {
parent = *p;
es = rb_entry(parent, struct extent_status, rb_node);
- if (offset < es->start) {
- if (es->start == end + 1) {
- es->start = offset;
- es->len += len;
- es = ext4_es_try_to_merge_left(tree, es);
+ if (newes->es_lblk < es->es_lblk) {
+ if (ext4_es_can_be_merged(newes, es)) {
+ /*
+ * Here we can modify es_lblk directly
+ * because it isn't overlapped.
+ */
+ es->es_lblk = newes->es_lblk;
+ es->es_len += newes->es_len;
+ if (ext4_es_is_written(es) ||
+ ext4_es_is_unwritten(es))
+ ext4_es_store_pblock(es,
+ newes->es_pblk);
+ es = ext4_es_try_to_merge_left(inode, es);
goto out;
}
p = &(*p)->rb_left;
- } else if (offset > extent_status_end(es)) {
- if (offset == extent_status_end(es) + 1) {
- es->len += len;
- es = ext4_es_try_to_merge_right(tree, es);
+ } else if (newes->es_lblk > ext4_es_end(es)) {
+ if (ext4_es_can_be_merged(es, newes)) {
+ es->es_len += newes->es_len;
+ es = ext4_es_try_to_merge_right(inode, es);
goto out;
}
p = &(*p)->rb_right;
} else {
- if (extent_status_end(es) <= end)
- es->len = offset - es->start + len;
- goto out;
+ BUG_ON(1);
+ return -EINVAL;
}
}
- es = ext4_es_alloc_extent(offset, len);
+ es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
+ newes->es_pblk);
if (!es)
return -ENOMEM;
rb_link_node(&es->rb_node, parent, p);
@@ -385,85 +442,166 @@ out:
}
/*
- * ext4_es_insert_extent() adds a space to a delayed extent tree.
- * Caller holds inode->i_es_lock.
+ * ext4_es_insert_extent() adds a space to a extent status tree.
*
* ext4_es_insert_extent is called by ext4_da_write_begin and
* ext4_es_remove_extent.
*
* Return 0 on success, error code on failure.
*/
-int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t offset,
- ext4_lblk_t len)
+int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned long long status)
{
- struct ext4_es_tree *tree;
+ struct extent_status newes;
+ ext4_lblk_t end = lblk + len - 1;
int err = 0;
- trace_ext4_es_insert_extent(inode, offset, len);
- es_debug("add [%u/%u) to extent status tree of inode %lu\n",
- offset, len, inode->i_ino);
+ es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
+ lblk, len, pblk, status, inode->i_ino);
+
+ if (!len)
+ return 0;
+
+ BUG_ON(end < lblk);
+
+ newes.es_lblk = lblk;
+ newes.es_len = len;
+ ext4_es_store_pblock(&newes, pblk);
+ ext4_es_store_status(&newes, status);
+ trace_ext4_es_insert_extent(inode, &newes);
write_lock(&EXT4_I(inode)->i_es_lock);
- tree = &EXT4_I(inode)->i_es_tree;
- err = __es_insert_extent(tree, offset, len);
+ err = __es_remove_extent(inode, lblk, end);
+ if (err != 0)
+ goto error;
+ err = __es_insert_extent(inode, &newes);
+
+error:
write_unlock(&EXT4_I(inode)->i_es_lock);
+ ext4_es_lru_add(inode);
ext4_es_print_tree(inode);
return err;
}
/*
- * ext4_es_remove_extent() removes a space from a delayed extent tree.
- * Caller holds inode->i_es_lock.
+ * ext4_es_lookup_extent() looks up an extent in extent status tree.
*
- * Return 0 on success, error code on failure.
+ * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
+ *
+ * Return: 1 on found, 0 on not
*/
-int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
- ext4_lblk_t len)
+int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es)
{
- struct rb_node *node;
struct ext4_es_tree *tree;
+ struct extent_status *es1 = NULL;
+ struct rb_node *node;
+ int found = 0;
+
+ trace_ext4_es_lookup_extent_enter(inode, lblk);
+ es_debug("lookup extent in block %u\n", lblk);
+
+ tree = &EXT4_I(inode)->i_es_tree;
+ read_lock(&EXT4_I(inode)->i_es_lock);
+
+ /* find extent in cache firstly */
+ es->es_lblk = es->es_len = es->es_pblk = 0;
+ if (tree->cache_es) {
+ es1 = tree->cache_es;
+ if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u)\n",
+ lblk, es1->es_lblk, es1->es_len);
+ found = 1;
+ goto out;
+ }
+ }
+
+ node = tree->root.rb_node;
+ while (node) {
+ es1 = rb_entry(node, struct extent_status, rb_node);
+ if (lblk < es1->es_lblk)
+ node = node->rb_left;
+ else if (lblk > ext4_es_end(es1))
+ node = node->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+out:
+ if (found) {
+ BUG_ON(!es1);
+ es->es_lblk = es1->es_lblk;
+ es->es_len = es1->es_len;
+ es->es_pblk = es1->es_pblk;
+ }
+
+ read_unlock(&EXT4_I(inode)->i_es_lock);
+
+ ext4_es_lru_add(inode);
+ trace_ext4_es_lookup_extent_exit(inode, es, found);
+ return found;
+}
+
+static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t end)
+{
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+ struct rb_node *node;
struct extent_status *es;
struct extent_status orig_es;
- ext4_lblk_t len1, len2, end;
+ ext4_lblk_t len1, len2;
+ ext4_fsblk_t block;
int err = 0;
- trace_ext4_es_remove_extent(inode, offset, len);
- es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
- offset, len, inode->i_ino);
-
- end = offset + len - 1;
- BUG_ON(end < offset);
- write_lock(&EXT4_I(inode)->i_es_lock);
- tree = &EXT4_I(inode)->i_es_tree;
- es = __es_tree_search(&tree->root, offset);
+ es = __es_tree_search(&tree->root, lblk);
if (!es)
goto out;
- if (es->start > end)
+ if (es->es_lblk > end)
goto out;
/* Simply invalidate cache_es. */
tree->cache_es = NULL;
- orig_es.start = es->start;
- orig_es.len = es->len;
- len1 = offset > es->start ? offset - es->start : 0;
- len2 = extent_status_end(es) > end ?
- extent_status_end(es) - end : 0;
+ orig_es.es_lblk = es->es_lblk;
+ orig_es.es_len = es->es_len;
+ orig_es.es_pblk = es->es_pblk;
+
+ len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
+ len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
if (len1 > 0)
- es->len = len1;
+ es->es_len = len1;
if (len2 > 0) {
if (len1 > 0) {
- err = __es_insert_extent(tree, end + 1, len2);
+ struct extent_status newes;
+
+ newes.es_lblk = end + 1;
+ newes.es_len = len2;
+ if (ext4_es_is_written(&orig_es) ||
+ ext4_es_is_unwritten(&orig_es)) {
+ block = ext4_es_pblock(&orig_es) +
+ orig_es.es_len - len2;
+ ext4_es_store_pblock(&newes, block);
+ }
+ ext4_es_store_status(&newes, ext4_es_status(&orig_es));
+ err = __es_insert_extent(inode, &newes);
if (err) {
- es->start = orig_es.start;
- es->len = orig_es.len;
+ es->es_lblk = orig_es.es_lblk;
+ es->es_len = orig_es.es_len;
goto out;
}
} else {
- es->start = end + 1;
- es->len = len2;
+ es->es_lblk = end + 1;
+ es->es_len = len2;
+ if (ext4_es_is_written(es) ||
+ ext4_es_is_unwritten(es)) {
+ block = orig_es.es_pblk + orig_es.es_len - len2;
+ ext4_es_store_pblock(es, block);
+ }
}
goto out;
}
@@ -476,10 +614,10 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
es = NULL;
}
- while (es && extent_status_end(es) <= end) {
+ while (es && ext4_es_end(es) <= end) {
node = rb_next(&es->rb_node);
rb_erase(&es->rb_node, &tree->root);
- ext4_es_free_extent(es);
+ ext4_es_free_extent(inode, es);
if (!node) {
es = NULL;
break;
@@ -487,14 +625,166 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
es = rb_entry(node, struct extent_status, rb_node);
}
- if (es && es->start < end + 1) {
- len1 = extent_status_end(es) - end;
- es->start = end + 1;
- es->len = len1;
+ if (es && es->es_lblk < end + 1) {
+ ext4_lblk_t orig_len = es->es_len;
+
+ len1 = ext4_es_end(es) - end;
+ es->es_lblk = end + 1;
+ es->es_len = len1;
+ if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
+ block = es->es_pblk + orig_len - len1;
+ ext4_es_store_pblock(es, block);
+ }
}
out:
+ return err;
+}
+
+/*
+ * ext4_es_remove_extent() removes a space from a extent status tree.
+ *
+ * Return 0 on success, error code on failure.
+ */
+int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
+{
+ ext4_lblk_t end;
+ int err = 0;
+
+ trace_ext4_es_remove_extent(inode, lblk, len);
+ es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
+ lblk, len, inode->i_ino);
+
+ if (!len)
+ return err;
+
+ end = lblk + len - 1;
+ BUG_ON(end < lblk);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+ err = __es_remove_extent(inode, lblk, end);
write_unlock(&EXT4_I(inode)->i_es_lock);
ext4_es_print_tree(inode);
return err;
}
+
+static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct ext4_sb_info *sbi = container_of(shrink,
+ struct ext4_sb_info, s_es_shrinker);
+ struct ext4_inode_info *ei;
+ struct list_head *cur, *tmp, scanned;
+ int nr_to_scan = sc->nr_to_scan;
+ int ret, nr_shrunk = 0;
+
+ ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
+ trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
+
+ if (!nr_to_scan)
+ return ret;
+
+ INIT_LIST_HEAD(&scanned);
+
+ spin_lock(&sbi->s_es_lru_lock);
+ list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
+ list_move_tail(cur, &scanned);
+
+ ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
+
+ read_lock(&ei->i_es_lock);
+ if (ei->i_es_lru_nr == 0) {
+ read_unlock(&ei->i_es_lock);
+ continue;
+ }
+ read_unlock(&ei->i_es_lock);
+
+ write_lock(&ei->i_es_lock);
+ ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
+ write_unlock(&ei->i_es_lock);
+
+ nr_shrunk += ret;
+ nr_to_scan -= ret;
+ if (nr_to_scan == 0)
+ break;
+ }
+ list_splice_tail(&scanned, &sbi->s_es_lru);
+ spin_unlock(&sbi->s_es_lru_lock);
+
+ ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
+ trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
+ return ret;
+}
+
+void ext4_es_register_shrinker(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi;
+
+ sbi = EXT4_SB(sb);
+ INIT_LIST_HEAD(&sbi->s_es_lru);
+ spin_lock_init(&sbi->s_es_lru_lock);
+ sbi->s_es_shrinker.shrink = ext4_es_shrink;
+ sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&sbi->s_es_shrinker);
+}
+
+void ext4_es_unregister_shrinker(struct super_block *sb)
+{
+ unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
+}
+
+void ext4_es_lru_add(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+ spin_lock(&sbi->s_es_lru_lock);
+ if (list_empty(&ei->i_es_lru))
+ list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
+ else
+ list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
+ spin_unlock(&sbi->s_es_lru_lock);
+}
+
+void ext4_es_lru_del(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+ spin_lock(&sbi->s_es_lru_lock);
+ if (!list_empty(&ei->i_es_lru))
+ list_del_init(&ei->i_es_lru);
+ spin_unlock(&sbi->s_es_lru_lock);
+}
+
+static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
+ int nr_to_scan)
+{
+ struct inode *inode = &ei->vfs_inode;
+ struct ext4_es_tree *tree = &ei->i_es_tree;
+ struct rb_node *node;
+ struct extent_status *es;
+ int nr_shrunk = 0;
+
+ if (ei->i_es_lru_nr == 0)
+ return 0;
+
+ node = rb_first(&tree->root);
+ while (node != NULL) {
+ es = rb_entry(node, struct extent_status, rb_node);
+ node = rb_next(&es->rb_node);
+ /*
+ * We can't reclaim delayed extent from status tree because
+ * fiemap, bigallic, and seek_data/hole need to use it.
+ */
+ if (!ext4_es_is_delayed(es)) {
+ rb_erase(&es->rb_node, &tree->root);
+ ext4_es_free_extent(inode, es);
+ nr_shrunk++;
+ if (--nr_to_scan == 0)
+ break;
+ }
+ }
+ tree->cache_es = NULL;
+ return nr_shrunk;
+}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 077f82db092a..f190dfe969da 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -20,10 +20,24 @@
#define es_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
+/*
+ * These flags live in the high bits of extent_status.es_pblk
+ */
+#define EXTENT_STATUS_WRITTEN (1ULL << 63)
+#define EXTENT_STATUS_UNWRITTEN (1ULL << 62)
+#define EXTENT_STATUS_DELAYED (1ULL << 61)
+#define EXTENT_STATUS_HOLE (1ULL << 60)
+
+#define EXTENT_STATUS_FLAGS (EXTENT_STATUS_WRITTEN | \
+ EXTENT_STATUS_UNWRITTEN | \
+ EXTENT_STATUS_DELAYED | \
+ EXTENT_STATUS_HOLE)
+
struct extent_status {
struct rb_node rb_node;
- ext4_lblk_t start; /* first block extent covers */
- ext4_lblk_t len; /* length of extent in block */
+ ext4_lblk_t es_lblk; /* first logical block extent covers */
+ ext4_lblk_t es_len; /* length of extent in block */
+ ext4_fsblk_t es_pblk; /* first physical block */
};
struct ext4_es_tree {
@@ -35,11 +49,69 @@ extern int __init ext4_init_es(void);
extern void ext4_exit_es(void);
extern void ext4_es_init_tree(struct ext4_es_tree *tree);
-extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t start,
+extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned long long status);
+extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
-extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t start,
- ext4_lblk_t len);
-extern ext4_lblk_t ext4_es_find_extent(struct inode *inode,
- struct extent_status *es);
+extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es);
+extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es);
+
+static inline int ext4_es_is_written(struct extent_status *es)
+{
+ return (es->es_pblk & EXTENT_STATUS_WRITTEN) != 0;
+}
+
+static inline int ext4_es_is_unwritten(struct extent_status *es)
+{
+ return (es->es_pblk & EXTENT_STATUS_UNWRITTEN) != 0;
+}
+
+static inline int ext4_es_is_delayed(struct extent_status *es)
+{
+ return (es->es_pblk & EXTENT_STATUS_DELAYED) != 0;
+}
+
+static inline int ext4_es_is_hole(struct extent_status *es)
+{
+ return (es->es_pblk & EXTENT_STATUS_HOLE) != 0;
+}
+
+static inline ext4_fsblk_t ext4_es_status(struct extent_status *es)
+{
+ return (es->es_pblk & EXTENT_STATUS_FLAGS);
+}
+
+static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
+{
+ return (es->es_pblk & ~EXTENT_STATUS_FLAGS);
+}
+
+static inline void ext4_es_store_pblock(struct extent_status *es,
+ ext4_fsblk_t pb)
+{
+ ext4_fsblk_t block;
+
+ block = (pb & ~EXTENT_STATUS_FLAGS) |
+ (es->es_pblk & EXTENT_STATUS_FLAGS);
+ es->es_pblk = block;
+}
+
+static inline void ext4_es_store_status(struct extent_status *es,
+ unsigned long long status)
+{
+ ext4_fsblk_t block;
+
+ block = (status & EXTENT_STATUS_FLAGS) |
+ (es->es_pblk & ~EXTENT_STATUS_FLAGS);
+ es->es_pblk = block;
+}
+
+extern void ext4_es_register_shrinker(struct super_block *sb);
+extern void ext4_es_unregister_shrinker(struct super_block *sb);
+extern void ext4_es_lru_add(struct inode *inode);
+extern void ext4_es_lru_del(struct inode *inode);
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 405565a62277..64848b595b24 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -167,7 +167,7 @@ static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
/*
@@ -240,7 +240,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
handle_t *handle;
int err;
- handle = ext4_journal_start_sb(sb, 1);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
@@ -464,10 +464,8 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
* If there is a delay extent at this offset,
* it will be as a data.
*/
- es.start = last;
- (void)ext4_es_find_extent(inode, &es);
- if (last >= es.start &&
- last < es.start + es.len) {
+ ext4_es_find_delayed_extent(inode, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
if (last != start)
dataoff = last << blkbits;
break;
@@ -549,11 +547,9 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
* If there is a delay extent at this offset,
* we will skip this extent.
*/
- es.start = last;
- (void)ext4_es_find_extent(inode, &es);
- if (last >= es.start &&
- last < es.start + es.len) {
- last = es.start + es.len;
+ ext4_es_find_delayed_extent(inode, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+ last = es.es_lblk + es.es_len;
holeoff = last << blkbits;
continue;
}
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index fa8e4911d354..3d586f02883e 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -155,11 +155,11 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
/* Check to see if the seed is all zero's */
if (hinfo->seed) {
for (i = 0; i < 4; i++) {
- if (hinfo->seed[i])
+ if (hinfo->seed[i]) {
+ memcpy(buf, hinfo->seed, sizeof(buf));
break;
+ }
}
- if (i < 4)
- memcpy(buf, hinfo->seed, sizeof(buf));
}
switch (hinfo->hash_version) {
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3f32c8012447..32fd2b9075dd 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -634,8 +634,10 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
* For other inodes, search forward from the parent directory's block
* group to find a free inode.
*/
-struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
- const struct qstr *qstr, __u32 goal, uid_t *owner)
+struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
+ umode_t mode, const struct qstr *qstr,
+ __u32 goal, uid_t *owner, int handle_type,
+ unsigned int line_no, int nblocks)
{
struct super_block *sb;
struct buffer_head *inode_bitmap_bh = NULL;
@@ -725,6 +727,15 @@ repeat_in_this_group:
"inode=%lu", ino + 1);
continue;
}
+ if (!handle) {
+ BUG_ON(nblocks <= 0);
+ handle = __ext4_journal_start_sb(dir->i_sb, line_no,
+ handle_type, nblocks);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto fail;
+ }
+ }
BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
if (err)
@@ -1017,17 +1028,17 @@ iget_failed:
inode = NULL;
bad_orphan:
ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
- printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+ printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
bit, (unsigned long long)bitmap_bh->b_blocknr,
ext4_test_bit(bit, bitmap_bh->b_data));
- printk(KERN_NOTICE "inode=%p\n", inode);
+ printk(KERN_WARNING "inode=%p\n", inode);
if (inode) {
- printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
+ printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
is_bad_inode(inode));
- printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
+ printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
NEXT_ORPHAN(inode));
- printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
- printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
+ printk(KERN_WARNING "max_ino=%lu\n", max_ino);
+ printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
/* Avoid freeing blocks if we got a bad deleted inode */
if (inode->i_nlink == 0)
inode->i_blocks = 0;
@@ -1137,7 +1148,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
goto out;
- handle = ext4_journal_start_sb(sb, 1);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 20862f96e8ae..c541ab8b64dd 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -146,6 +146,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
+ int ret = -EIO;
*err = 0;
/* i_data is not going away, no lock needed */
@@ -154,8 +155,10 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
goto no_block;
while (--depth) {
bh = sb_getblk(sb, le32_to_cpu(p->key));
- if (unlikely(!bh))
+ if (unlikely(!bh)) {
+ ret = -ENOMEM;
goto failure;
+ }
if (!bh_uptodate_or_lock(bh)) {
if (bh_submit_read(bh) < 0) {
@@ -177,7 +180,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
return NULL;
failure:
- *err = -EIO;
+ *err = ret;
no_block:
return p;
}
@@ -355,9 +358,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
* for the first direct block
*/
new_blocks[index] = current_block;
- printk(KERN_INFO "%s returned more blocks than "
+ WARN(1, KERN_INFO "%s returned more blocks than "
"requested\n", __func__);
- WARN_ON(1);
break;
}
}
@@ -471,7 +473,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
if (unlikely(!bh)) {
- err = -EIO;
+ err = -ENOMEM;
goto failed;
}
@@ -789,7 +791,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
if (final_size > inode->i_size) {
/* Credits for sb + inode write */
- handle = ext4_journal_start(inode, 2);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
@@ -849,7 +851,7 @@ locked:
int err;
/* Credits for sb + inode write */
- handle = ext4_journal_start(inode, 2);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
/* This is really bad luck. We've written the data
* but cannot extend i_size. Bail out and pretend
@@ -948,7 +950,8 @@ static handle_t *start_transaction(struct inode *inode)
{
handle_t *result;
- result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode));
+ result = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
+ ext4_blocks_for_truncate(inode));
if (!IS_ERR(result))
return result;
@@ -1515,3 +1518,243 @@ out_stop:
trace_ext4_truncate_exit(inode);
}
+static int free_hole_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *parent_bh, __le32 *i_data,
+ int level, ext4_lblk_t first,
+ ext4_lblk_t count, int max)
+{
+ struct buffer_head *bh = NULL;
+ int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ int ret = 0;
+ int i, inc;
+ ext4_lblk_t offset;
+ __le32 blk;
+
+ inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
+ for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
+ if (offset >= count + first)
+ break;
+ if (*i_data == 0 || (offset + inc) <= first)
+ continue;
+ blk = *i_data;
+ if (level > 0) {
+ ext4_lblk_t first2;
+ bh = sb_bread(inode->i_sb, blk);
+ if (!bh) {
+ EXT4_ERROR_INODE_BLOCK(inode, blk,
+ "Read failure");
+ return -EIO;
+ }
+ first2 = (first > offset) ? first - offset : 0;
+ ret = free_hole_blocks(handle, inode, bh,
+ (__le32 *)bh->b_data, level - 1,
+ first2, count - offset,
+ inode->i_sb->s_blocksize >> 2);
+ if (ret) {
+ brelse(bh);
+ goto err;
+ }
+ }
+ if (level == 0 ||
+ (bh && all_zeroes((__le32 *)bh->b_data,
+ (__le32 *)bh->b_data + addr_per_block))) {
+ ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
+ *i_data = 0;
+ }
+ brelse(bh);
+ bh = NULL;
+ }
+
+err:
+ return ret;
+}
+
+static int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
+ ext4_lblk_t first, ext4_lblk_t stop)
+{
+ int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ int level, ret = 0;
+ int num = EXT4_NDIR_BLOCKS;
+ ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
+ __le32 *i_data = EXT4_I(inode)->i_data;
+
+ count = stop - first;
+ for (level = 0; level < 4; level++, max *= addr_per_block) {
+ if (first < max) {
+ ret = free_hole_blocks(handle, inode, NULL, i_data,
+ level, first, count, num);
+ if (ret)
+ goto err;
+ if (count > max - first)
+ count -= max - first;
+ else
+ break;
+ first = 0;
+ } else {
+ first -= max;
+ }
+ i_data += num;
+ if (level == 0) {
+ num = 1;
+ max = 1;
+ }
+ }
+
+err:
+ return ret;
+}
+
+int ext4_ind_punch_hole(struct file *file, loff_t offset, loff_t length)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ ext4_lblk_t first_block, stop_block;
+ struct address_space *mapping = inode->i_mapping;
+ handle_t *handle = NULL;
+ loff_t first_page, last_page, page_len;
+ loff_t first_page_offset, last_page_offset;
+ int err = 0;
+
+ /*
+ * Write out all dirty pages to avoid race conditions
+ * Then release them.
+ */
+ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ err = filemap_write_and_wait_range(mapping,
+ offset, offset + length - 1);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&inode->i_mutex);
+ /* It's not possible punch hole on append only file */
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
+ err = -EPERM;
+ goto out_mutex;
+ }
+ if (IS_SWAPFILE(inode)) {
+ err = -ETXTBSY;
+ goto out_mutex;
+ }
+
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+ goto out_mutex;
+
+ /*
+ * If the hole extents beyond i_size, set the hole
+ * to end after the page that contains i_size
+ */
+ if (offset + length > inode->i_size) {
+ length = inode->i_size +
+ PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
+ offset;
+ }
+
+ first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ last_page = (offset + length) >> PAGE_CACHE_SHIFT;
+
+ first_page_offset = first_page << PAGE_CACHE_SHIFT;
+ last_page_offset = last_page << PAGE_CACHE_SHIFT;
+
+ /* Now release the pages */
+ if (last_page_offset > first_page_offset) {
+ truncate_pagecache_range(inode, first_page_offset,
+ last_page_offset - 1);
+ }
+
+ /* Wait all existing dio works, newcomers will block on i_mutex */
+ inode_dio_wait(inode);
+
+ handle = start_transaction(inode);
+ if (IS_ERR(handle))
+ goto out_mutex;
+
+ /*
+ * Now we need to zero out the non-page-aligned data in the
+ * pages at the start and tail of the hole, and unmap the buffer
+ * heads for the block aligned regions of the page that were
+ * completely zerod.
+ */
+ if (first_page > last_page) {
+ /*
+ * If the file space being truncated is contained within a page
+ * just zero out and unmap the middle of that page
+ */
+ err = ext4_discard_partial_page_buffers(handle,
+ mapping, offset, length, 0);
+ if (err)
+ goto out;
+ } else {
+ /*
+ * Zero out and unmap the paritial page that contains
+ * the start of the hole
+ */
+ page_len = first_page_offset - offset;
+ if (page_len > 0) {
+ err = ext4_discard_partial_page_buffers(handle, mapping,
+ offset, page_len, 0);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * Zero out and unmap the partial page that contains
+ * the end of the hole
+ */
+ page_len = offset + length - last_page_offset;
+ if (page_len > 0) {
+ err = ext4_discard_partial_page_buffers(handle, mapping,
+ last_page_offset, page_len, 0);
+ if (err)
+ goto out;
+ }
+ }
+
+ /*
+ * If i_size contained in the last page, we need to
+ * unmap and zero the paritial page after i_size
+ */
+ if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
+ inode->i_size % PAGE_CACHE_SIZE != 0) {
+ page_len = PAGE_CACHE_SIZE -
+ (inode->i_size & (PAGE_CACHE_SIZE - 1));
+ if (page_len > 0) {
+ err = ext4_discard_partial_page_buffers(handle,
+ mapping, inode->i_size, page_len, 0);
+ if (err)
+ goto out;
+ }
+ }
+
+ first_block = (offset + sb->s_blocksize - 1) >>
+ EXT4_BLOCK_SIZE_BITS(sb);
+ stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+
+ if (first_block >= stop_block)
+ goto out;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+
+ err = ext4_es_remove_extent(inode, first_block,
+ stop_block - first_block);
+ err = ext4_free_hole_blocks(handle, inode, first_block, stop_block);
+
+ ext4_discard_preallocations(inode);
+
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+
+ up_write(&EXT4_I(inode)->i_data_sem);
+
+out:
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+
+out_mutex:
+ mutex_unlock(&inode->i_mutex);
+
+ return err;
+}
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 387c47c6cda9..c0fd1a123f7d 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -545,7 +545,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
return ret;
retry:
- handle = ext4_journal_start(inode, needed_blocks);
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
@@ -657,7 +657,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
* The possible write could happen in the inode,
* so try to reserve the space in inode first.
*/
- handle = ext4_journal_start(inode, 1);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
@@ -853,7 +853,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
if (ret)
return ret;
- handle = ext4_journal_start(inode, 1);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
@@ -1188,7 +1188,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
data_bh = sb_getblk(inode->i_sb, map.m_pblk);
if (!data_bh) {
- error = -EIO;
+ error = -ENOMEM;
goto out_restore;
}
@@ -1298,7 +1298,7 @@ int ext4_read_inline_dir(struct file *filp,
int i, stored;
struct ext4_dir_entry_2 *de;
struct super_block *sb;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int ret, inline_size = 0;
struct ext4_iloc iloc;
void *dir_buf = NULL;
@@ -1770,7 +1770,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
needed_blocks = ext4_writepage_trans_blocks(inode);
- handle = ext4_journal_start(inode, needed_blocks);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, needed_blocks);
if (IS_ERR(handle))
return;
@@ -1862,7 +1862,7 @@ int ext4_convert_inline_data(struct inode *inode)
if (error)
return error;
- handle = ext4_journal_start(inode, needed_blocks);
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto out_free;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cd818d8bb221..9ea0cde3fa9e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -132,10 +132,6 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
}
static void ext4_invalidatepage(struct page *page, unsigned long offset);
-static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create);
-static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
-static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
@@ -238,7 +234,8 @@ void ext4_evict_inode(struct inode *inode)
* protection against it
*/
sb_start_intwrite(inode->i_sb);
- handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
+ ext4_blocks_for_truncate(inode)+3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
@@ -346,7 +343,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
spin_lock(&ei->i_block_reservation_lock);
trace_ext4_da_update_reserve_space(inode, used, quota_claim);
if (unlikely(used > ei->i_reserved_data_blocks)) {
- ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
+ ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
"with only %d reserved data blocks",
__func__, inode->i_ino, used,
ei->i_reserved_data_blocks);
@@ -355,10 +352,12 @@ void ext4_da_update_reserve_space(struct inode *inode,
}
if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
- ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
- "with only %d reserved metadata blocks\n", __func__,
- inode->i_ino, ei->i_allocated_meta_blocks,
- ei->i_reserved_meta_blocks);
+ ext4_warning(inode->i_sb, "ino %lu, allocated %d "
+ "with only %d reserved metadata blocks "
+ "(releasing %d blocks with reserved %d data blocks)",
+ inode->i_ino, ei->i_allocated_meta_blocks,
+ ei->i_reserved_meta_blocks, used,
+ ei->i_reserved_data_blocks);
WARN_ON(1);
ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
}
@@ -508,12 +507,33 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
+ struct extent_status es;
int retval;
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
+
+ /* Lookup extent status tree firstly */
+ if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+ if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
+ map->m_pblk = ext4_es_pblock(&es) +
+ map->m_lblk - es.es_lblk;
+ map->m_flags |= ext4_es_is_written(&es) ?
+ EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
+ retval = es.es_len - (map->m_lblk - es.es_lblk);
+ if (retval > map->m_len)
+ retval = map->m_len;
+ map->m_len = retval;
+ } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
+ retval = 0;
+ } else {
+ BUG_ON(1);
+ }
+ goto found;
+ }
+
/*
* Try to see if we can get the block without requesting a new
* file system block.
@@ -527,20 +547,27 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
+ if (retval > 0) {
+ int ret;
+ unsigned long long status;
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+ ext4_find_delalloc_range(inode, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
+ status |= EXTENT_STATUS_DELAYED;
+ ret = ext4_es_insert_extent(inode, map->m_lblk,
+ map->m_len, map->m_pblk, status);
+ if (ret < 0)
+ retval = ret;
+ }
if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
up_read((&EXT4_I(inode)->i_data_sem));
+found:
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret;
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- /* delayed alloc may be allocated by fallocate and
- * coverted to initialized by directIO.
- * we need to handle delayed extent here.
- */
- down_write((&EXT4_I(inode)->i_data_sem));
- goto delayed_mapped;
- }
- ret = check_block_validity(inode, map);
+ int ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
@@ -560,16 +587,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
return retval;
/*
- * When we call get_blocks without the create flag, the
- * BH_Unwritten flag could have gotten set if the blocks
- * requested were part of a uninitialized extent. We need to
- * clear this flag now that we are committed to convert all or
- * part of the uninitialized extent to be an initialized
- * extent. This is because we need to avoid the combination
- * of BH_Unwritten and BH_Mapped flags being simultaneously
- * set on the buffer_head.
+ * Here we clear m_flags because after allocating an new extent,
+ * it will be set again.
*/
- map->m_flags &= ~EXT4_MAP_UNWRITTEN;
+ map->m_flags &= ~EXT4_MAP_FLAGS;
/*
* New blocks allocate and/or writing to uninitialized extent
@@ -615,18 +636,23 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
ext4_da_update_reserve_space(inode, retval, 1);
}
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
- if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret;
-delayed_mapped:
- /* delayed allocation blocks has been allocated */
- ret = ext4_es_remove_extent(inode, map->m_lblk,
- map->m_len);
- if (ret < 0)
- retval = ret;
- }
+ if (retval > 0) {
+ int ret;
+ unsigned long long status;
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+ ext4_find_delalloc_range(inode, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
+ status |= EXTENT_STATUS_DELAYED;
+ ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status);
+ if (ret < 0)
+ retval = ret;
}
up_write((&EXT4_I(inode)->i_data_sem));
@@ -660,7 +686,8 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
if (map.m_len > DIO_MAX_BLOCKS)
map.m_len = DIO_MAX_BLOCKS;
dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
- handle = ext4_journal_start(inode, dio_credits);
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+ dio_credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
return ret;
@@ -707,14 +734,16 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
/* ensure we send some value back into *errp */
*errp = 0;
+ if (create && err == 0)
+ err = -ENOSPC; /* should never happen */
if (err < 0)
*errp = err;
if (err <= 0)
return NULL;
bh = sb_getblk(inode->i_sb, map.m_pblk);
- if (!bh) {
- *errp = -EIO;
+ if (unlikely(!bh)) {
+ *errp = -ENOMEM;
return NULL;
}
if (map.m_flags & EXT4_MAP_NEW) {
@@ -808,11 +837,10 @@ int ext4_walk_page_buffers(handle_t *handle,
* and the commit_write(). So doing the jbd2_journal_start at the start of
* prepare_write() is the right place.
*
- * Also, this function can nest inside ext4_writepage() ->
- * block_write_full_page(). In that case, we *know* that ext4_writepage()
- * has generated enough buffer credits to do the whole page. So we won't
- * block on the journal in that case, which is good, because the caller may
- * be PF_MEMALLOC.
+ * Also, this function can nest inside ext4_writepage(). In that case, we
+ * *know* that ext4_writepage() has generated enough buffer credits to do the
+ * whole page. So we won't block on the journal in that case, which is good,
+ * because the caller may be PF_MEMALLOC.
*
* By accident, ext4 can be reentered when a transaction is open via
* quota file writes. If we were to commit the transaction while thus
@@ -878,32 +906,40 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
flags, pagep);
if (ret < 0)
- goto out;
- if (ret == 1) {
- ret = 0;
- goto out;
- }
+ return ret;
+ if (ret == 1)
+ return 0;
}
-retry:
- handle = ext4_journal_start(inode, needed_blocks);
+ /*
+ * grab_cache_page_write_begin() can take a long time if the
+ * system is thrashing due to memory pressure, or if the page
+ * is being written back. So grab it first before we start
+ * the transaction handle. This also allows us to allocate
+ * the page (if needed) without using GFP_NOFS.
+ */
+retry_grab:
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+ unlock_page(page);
+
+retry_journal:
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
+ page_cache_release(page);
+ return PTR_ERR(handle);
}
- /* We cannot recurse into the filesystem as the transaction is already
- * started */
- flags |= AOP_FLAG_NOFS;
-
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page) {
+ lock_page(page);
+ if (page->mapping != mapping) {
+ /* The page got truncated from under us */
+ unlock_page(page);
+ page_cache_release(page);
ext4_journal_stop(handle);
- ret = -ENOMEM;
- goto out;
+ goto retry_grab;
}
-
- *pagep = page;
+ wait_on_page_writeback(page);
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len, ext4_get_block_write);
@@ -918,7 +954,6 @@ retry:
if (ret) {
unlock_page(page);
- page_cache_release(page);
/*
* __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
@@ -942,11 +977,14 @@ retry:
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
- }
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
-out:
+ if (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry_journal;
+ page_cache_release(page);
+ return ret;
+ }
+ *pagep = page;
return ret;
}
@@ -1256,7 +1294,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
* function is called from invalidate page, it's
* harmless to return without any action.
*/
- ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
+ ext4_warning(inode->i_sb, "ext4_da_release_space: "
"ino %lu, to_free %d with only %d reserved "
"data blocks", inode->i_ino, to_free,
ei->i_reserved_data_blocks);
@@ -1357,7 +1395,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
loff_t size = i_size_read(inode);
unsigned int len, block_start;
struct buffer_head *bh, *page_bufs = NULL;
- int journal_data = ext4_should_journal_data(inode);
sector_t pblock = 0, cur_logical = 0;
struct ext4_io_submit io_submit;
@@ -1378,7 +1415,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
- int commit_write = 0, skip_page = 0;
+ int skip_page = 0;
struct page *page = pvec.pages[i];
index = page->index;
@@ -1400,27 +1437,9 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
- /*
- * If the page does not have buffers (for
- * whatever reason), try to create them using
- * __block_write_begin. If this fails,
- * skip the page and move on.
- */
- if (!page_has_buffers(page)) {
- if (__block_write_begin(page, 0, len,
- noalloc_get_block_write)) {
- skip_page:
- unlock_page(page);
- continue;
- }
- commit_write = 1;
- }
-
bh = page_bufs = page_buffers(page);
block_start = 0;
do {
- if (!bh)
- goto skip_page;
if (map && (cur_logical >= map->m_lblk) &&
(cur_logical <= (map->m_lblk +
(map->m_len - 1)))) {
@@ -1448,33 +1467,14 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
pblock++;
} while (bh != page_bufs);
- if (skip_page)
- goto skip_page;
-
- if (commit_write)
- /* mark the buffer_heads as dirty & uptodate */
- block_commit_write(page, 0, len);
+ if (skip_page) {
+ unlock_page(page);
+ continue;
+ }
clear_page_dirty_for_io(page);
- /*
- * Delalloc doesn't support data journalling,
- * but eventually maybe we'll lift this
- * restriction.
- */
- if (unlikely(journal_data && PageChecked(page)))
- err = __ext4_journalled_writepage(page, len);
- else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
- err = ext4_bio_write_page(&io_submit, page,
- len, mpd->wbc);
- else if (buffer_uninit(page_bufs)) {
- ext4_set_bh_endio(page_bufs, inode);
- err = block_write_full_page_endio(page,
- noalloc_get_block_write,
- mpd->wbc, ext4_end_io_buffer_write);
- } else
- err = block_write_full_page(page,
- noalloc_get_block_write, mpd->wbc);
-
+ err = ext4_bio_write_page(&io_submit, page, len,
+ mpd->wbc);
if (!err)
mpd->pages_written++;
/*
@@ -1640,7 +1640,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
(unsigned long long) next,
mpd->b_size >> mpd->inode->i_blkbits, err);
ext4_msg(sb, KERN_CRIT,
- "This should not happen!! Data will be lost\n");
+ "This should not happen!! Data will be lost");
if (err == -ENOSPC)
ext4_print_free_blocks(mpd->inode);
}
@@ -1690,16 +1690,16 @@ submit_io:
*
* @mpd->lbh - extent of blocks
* @logical - logical number of the block in the file
- * @bh - bh of the block (used to access block's state)
+ * @b_state - b_state of the buffer head added
*
* the function is used to collect contig. blocks in same state
*/
-static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
- sector_t logical, size_t b_size,
+static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical,
unsigned long b_state)
{
sector_t next;
- int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
+ int blkbits = mpd->inode->i_blkbits;
+ int nrblocks = mpd->b_size >> blkbits;
/*
* XXX Don't go larger than mballoc is willing to allocate
@@ -1707,11 +1707,11 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
* mpage_da_submit_io() into this function and then call
* ext4_map_blocks() multiple times in a loop
*/
- if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
+ if (nrblocks >= (8*1024*1024 >> blkbits))
goto flush_it;
- /* check if thereserved journal credits might overflow */
- if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
+ /* check if the reserved journal credits might overflow */
+ if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) {
if (nrblocks >= EXT4_MAX_TRANS_DATA) {
/*
* With non-extent format we are limited by the journal
@@ -1720,16 +1720,6 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
* nrblocks. So limit nrblocks.
*/
goto flush_it;
- } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
- EXT4_MAX_TRANS_DATA) {
- /*
- * Adding the new buffer_head would make it cross the
- * allowed limit for which we have journal credit
- * reserved. So limit the new bh->b_size
- */
- b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
- mpd->inode->i_blkbits;
- /* we will do mpage_da_submit_io in the next loop */
}
}
/*
@@ -1737,7 +1727,7 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
*/
if (mpd->b_size == 0) {
mpd->b_blocknr = logical;
- mpd->b_size = b_size;
+ mpd->b_size = 1 << blkbits;
mpd->b_state = b_state & BH_FLAGS;
return;
}
@@ -1747,7 +1737,7 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
* Can we merge the block to our big extent?
*/
if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
- mpd->b_size += b_size;
+ mpd->b_size += 1 << blkbits;
return;
}
@@ -1775,6 +1765,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
struct ext4_map_blocks *map,
struct buffer_head *bh)
{
+ struct extent_status es;
int retval;
sector_t invalid_block = ~((sector_t) 0xffff);
@@ -1785,6 +1776,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
"logical block %lu\n", inode->i_ino, map->m_len,
(unsigned long) map->m_lblk);
+
+ /* Lookup extent status tree firstly */
+ if (ext4_es_lookup_extent(inode, iblock, &es)) {
+
+ if (ext4_es_is_hole(&es)) {
+ retval = 0;
+ down_read((&EXT4_I(inode)->i_data_sem));
+ goto add_delayed;
+ }
+
+ /*
+ * Delayed extent could be allocated by fallocate.
+ * So we need to check it.
+ */
+ if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
+ map_bh(bh, inode->i_sb, invalid_block);
+ set_buffer_new(bh);
+ set_buffer_delay(bh);
+ return 0;
+ }
+
+ map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
+ retval = es.es_len - (iblock - es.es_lblk);
+ if (retval > map->m_len)
+ retval = map->m_len;
+ map->m_len = retval;
+ if (ext4_es_is_written(&es))
+ map->m_flags |= EXT4_MAP_MAPPED;
+ else if (ext4_es_is_unwritten(&es))
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
+ else
+ BUG_ON(1);
+
+ return retval;
+ }
+
/*
* Try to see if we can get the block without requesting a new
* file system block.
@@ -1803,11 +1830,15 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
retval = 0;
} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- retval = ext4_ext_map_blocks(NULL, inode, map, 0);
+ retval = ext4_ext_map_blocks(NULL, inode, map,
+ EXT4_GET_BLOCKS_NO_PUT_HOLE);
else
- retval = ext4_ind_map_blocks(NULL, inode, map, 0);
+ retval = ext4_ind_map_blocks(NULL, inode, map,
+ EXT4_GET_BLOCKS_NO_PUT_HOLE);
+add_delayed:
if (retval == 0) {
+ int ret;
/*
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
@@ -1815,15 +1846,20 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
/* If the block was allocated from previously allocated cluster,
* then we dont need to reserve it again. */
if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
- retval = ext4_da_reserve_space(inode, iblock);
- if (retval)
+ ret = ext4_da_reserve_space(inode, iblock);
+ if (ret) {
/* not enough space to reserve */
+ retval = ret;
goto out_unlock;
+ }
}
- retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len);
- if (retval)
+ ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ ~0, EXTENT_STATUS_DELAYED);
+ if (ret) {
+ retval = ret;
goto out_unlock;
+ }
/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
* and it should not appear on the bh->b_state.
@@ -1833,6 +1869,16 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
+ } else if (retval > 0) {
+ int ret;
+ unsigned long long status;
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status);
+ if (ret != 0)
+ retval = ret;
}
out_unlock:
@@ -1890,27 +1936,6 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
return 0;
}
-/*
- * This function is used as a standard get_block_t calback function
- * when there is no desire to allocate any blocks. It is used as a
- * callback function for block_write_begin() and block_write_full_page().
- * These functions should only try to map a single block at a time.
- *
- * Since this function doesn't do block allocations even if the caller
- * requests it by passing in create=1, it is critically important that
- * any caller checks to make sure that any buffer heads are returned
- * by this function are either all already mapped or marked for
- * delayed allocation before calling block_write_full_page(). Otherwise,
- * b_blocknr could be left unitialized, and the page write functions will
- * be taken by surprise.
- */
-static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
- return _ext4_get_block(inode, iblock, bh_result, 0);
-}
-
static int bget_one(handle_t *handle, struct buffer_head *bh)
{
get_bh(bh);
@@ -1955,7 +1980,8 @@ static int __ext4_journalled_writepage(struct page *page,
* references to buffers so we are safe */
unlock_page(page);
- handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+ ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
@@ -2035,11 +2061,12 @@ out:
static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
- int ret = 0, commit_write = 0;
+ int ret = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
+ struct ext4_io_submit io_submit;
trace_ext4_writepage(page);
size = i_size_read(inode);
@@ -2048,39 +2075,29 @@ static int ext4_writepage(struct page *page,
else
len = PAGE_CACHE_SIZE;
+ page_bufs = page_buffers(page);
/*
- * If the page does not have buffers (for whatever reason),
- * try to create them using __block_write_begin. If this
- * fails, redirty the page and move on.
+ * We cannot do block allocation or other extent handling in this
+ * function. If there are buffers needing that, we have to redirty
+ * the page. But we may reach here when we do a journal commit via
+ * journal_submit_inode_data_buffers() and in that case we must write
+ * allocated buffers to achieve data=ordered mode guarantees.
*/
- if (!page_has_buffers(page)) {
- if (__block_write_begin(page, 0, len,
- noalloc_get_block_write)) {
- redirty_page:
- redirty_page_for_writepage(wbc, page);
+ if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
+ ext4_bh_delay_or_unwritten)) {
+ redirty_page_for_writepage(wbc, page);
+ if (current->flags & PF_MEMALLOC) {
+ /*
+ * For memory cleaning there's no point in writing only
+ * some buffers. So just bail out. Warn if we came here
+ * from direct reclaim.
+ */
+ WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
+ == PF_MEMALLOC);
unlock_page(page);
return 0;
}
- commit_write = 1;
- }
- page_bufs = page_buffers(page);
- if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
- ext4_bh_delay_or_unwritten)) {
- /*
- * We don't want to do block allocation, so redirty
- * the page and return. We may reach here when we do
- * a journal commit via journal_submit_inode_data_buffers.
- * We can also reach here via shrink_page_list but it
- * should never be for direct reclaim so warn if that
- * happens
- */
- WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
- PF_MEMALLOC);
- goto redirty_page;
}
- if (commit_write)
- /* now mark the buffer_heads as dirty and uptodate */
- block_commit_write(page, 0, len);
if (PageChecked(page) && ext4_should_journal_data(inode))
/*
@@ -2089,14 +2106,9 @@ static int ext4_writepage(struct page *page,
*/
return __ext4_journalled_writepage(page, len);
- if (buffer_uninit(page_bufs)) {
- ext4_set_bh_endio(page_bufs, inode);
- ret = block_write_full_page_endio(page, noalloc_get_block_write,
- wbc, ext4_end_io_buffer_write);
- } else
- ret = block_write_full_page(page, noalloc_get_block_write,
- wbc);
-
+ memset(&io_submit, 0, sizeof(io_submit));
+ ret = ext4_bio_write_page(&io_submit, page, len, wbc);
+ ext4_io_submit(&io_submit);
return ret;
}
@@ -2228,51 +2240,38 @@ static int write_cache_pages_da(handle_t *handle,
logical = (sector_t) page->index <<
(PAGE_CACHE_SHIFT - inode->i_blkbits);
- if (!page_has_buffers(page)) {
- mpage_add_bh_to_extent(mpd, logical,
- PAGE_CACHE_SIZE,
- (1 << BH_Dirty) | (1 << BH_Uptodate));
- if (mpd->io_done)
- goto ret_extent_tail;
- } else {
+ /* Add all dirty buffers to mpd */
+ head = page_buffers(page);
+ bh = head;
+ do {
+ BUG_ON(buffer_locked(bh));
/*
- * Page with regular buffer heads,
- * just add all dirty ones
+ * We need to try to allocate unmapped blocks
+ * in the same page. Otherwise we won't make
+ * progress with the page in ext4_writepage
*/
- head = page_buffers(page);
- bh = head;
- do {
- BUG_ON(buffer_locked(bh));
+ if (ext4_bh_delay_or_unwritten(NULL, bh)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ bh->b_state);
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else if (buffer_dirty(bh) &&
+ buffer_mapped(bh)) {
/*
- * We need to try to allocate
- * unmapped blocks in the same page.
- * Otherwise we won't make progress
- * with the page in ext4_writepage
+ * mapped dirty buffer. We need to
+ * update the b_state because we look
+ * at b_state in mpage_da_map_blocks.
+ * We don't update b_size because if we
+ * find an unmapped buffer_head later
+ * we need to use the b_state flag of
+ * that buffer_head.
*/
- if (ext4_bh_delay_or_unwritten(NULL, bh)) {
- mpage_add_bh_to_extent(mpd, logical,
- bh->b_size,
- bh->b_state);
- if (mpd->io_done)
- goto ret_extent_tail;
- } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
- /*
- * mapped dirty buffer. We need
- * to update the b_state
- * because we look at b_state
- * in mpage_da_map_blocks. We
- * don't update b_size because
- * if we find an unmapped
- * buffer_head later we need to
- * use the b_state flag of that
- * buffer_head.
- */
- if (mpd->b_size == 0)
- mpd->b_state = bh->b_state & BH_FLAGS;
- }
- logical++;
- } while ((bh = bh->b_this_page) != head);
- }
+ if (mpd->b_size == 0)
+ mpd->b_state =
+ bh->b_state & BH_FLAGS;
+ }
+ logical++;
+ } while ((bh = bh->b_this_page) != head);
if (nr_to_write > 0) {
nr_to_write--;
@@ -2413,7 +2412,8 @@ retry:
needed_blocks = ext4_da_writepages_trans_blocks(inode);
/* start a new transaction*/
- handle = ext4_journal_start(inode, needed_blocks);
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+ needed_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
@@ -2512,12 +2512,8 @@ static int ext4_nonda_switch(struct super_block *sb)
/*
* Start pushing delalloc when 1/2 of free blocks are dirty.
*/
- if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
- !writeback_in_progress(sb->s_bdi) &&
- down_read_trylock(&sb->s_umount)) {
- writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
- up_read(&sb->s_umount);
- }
+ if (dirty_blocks && (free_blocks < 2 * dirty_blocks))
+ try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
if (2 * free_blocks < 3 * dirty_blocks ||
free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
@@ -2555,42 +2551,52 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
pos, len, flags,
pagep, fsdata);
if (ret < 0)
- goto out;
- if (ret == 1) {
- ret = 0;
- goto out;
- }
+ return ret;
+ if (ret == 1)
+ return 0;
}
-retry:
+ /*
+ * grab_cache_page_write_begin() can take a long time if the
+ * system is thrashing due to memory pressure, or if the page
+ * is being written back. So grab it first before we start
+ * the transaction handle. This also allows us to allocate
+ * the page (if needed) without using GFP_NOFS.
+ */
+retry_grab:
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+ unlock_page(page);
+
/*
* With delayed allocation, we don't log the i_disksize update
* if there is delayed block allocation. But we still need
* to journalling the i_disksize update if writes to the end
* of file which has an already mapped buffer.
*/
- handle = ext4_journal_start(inode, 1);
+retry_journal:
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
+ page_cache_release(page);
+ return PTR_ERR(handle);
}
- /* We cannot recurse into the filesystem as the transaction is already
- * started */
- flags |= AOP_FLAG_NOFS;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page) {
+ lock_page(page);
+ if (page->mapping != mapping) {
+ /* The page got truncated from under us */
+ unlock_page(page);
+ page_cache_release(page);
ext4_journal_stop(handle);
- ret = -ENOMEM;
- goto out;
+ goto retry_grab;
}
- *pagep = page;
+ /* In case writeback began while the page was unlocked */
+ wait_on_page_writeback(page);
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
- page_cache_release(page);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
@@ -2598,11 +2604,16 @@ retry:
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
+
+ if (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry_journal;
+
+ page_cache_release(page);
+ return ret;
}
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
-out:
+ *pagep = page;
return ret;
}
@@ -2858,36 +2869,10 @@ ext4_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
-static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
-{
- struct buffer_head *head, *bh;
- unsigned int curr_off = 0;
-
- if (!page_has_buffers(page))
- return;
- head = bh = page_buffers(page);
- do {
- if (offset <= curr_off && test_clear_buffer_uninit(bh)
- && bh->b_private) {
- ext4_free_io_end(bh->b_private);
- bh->b_private = NULL;
- bh->b_end_io = NULL;
- }
- curr_off = curr_off + bh->b_size;
- bh = bh->b_this_page;
- } while (bh != head);
-}
-
static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
trace_ext4_invalidatepage(page, offset);
- /*
- * free any io_end structure allocated for buffers to be discarded
- */
- if (ext4_should_dioread_nolock(page->mapping->host))
- ext4_invalidatepage_free_endio(page, offset);
-
/* No journalling happens on data buffers when this function is used */
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
@@ -2959,7 +2944,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private, int ret,
bool is_async)
{
- struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(iocb->ki_filp);
ext4_io_end_t *io_end = iocb->private;
/* if not async direct IO or dio with 0 bytes write, just return */
@@ -2977,9 +2962,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end);
out:
+ inode_dio_done(inode);
if (is_async)
aio_complete(iocb, ret, 0);
- inode_dio_done(inode);
return;
}
@@ -2993,65 +2978,6 @@ out:
ext4_add_complete_io(io_end);
}
-static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
-{
- ext4_io_end_t *io_end = bh->b_private;
- struct inode *inode;
-
- if (!test_clear_buffer_uninit(bh) || !io_end)
- goto out;
-
- if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
- ext4_msg(io_end->inode->i_sb, KERN_INFO,
- "sb umounted, discard end_io request for inode %lu",
- io_end->inode->i_ino);
- ext4_free_io_end(io_end);
- goto out;
- }
-
- /*
- * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
- * but being more careful is always safe for the future change.
- */
- inode = io_end->inode;
- ext4_set_io_unwritten_flag(inode, io_end);
- ext4_add_complete_io(io_end);
-out:
- bh->b_private = NULL;
- bh->b_end_io = NULL;
- clear_buffer_uninit(bh);
- end_buffer_async_write(bh, uptodate);
-}
-
-static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
-{
- ext4_io_end_t *io_end;
- struct page *page = bh->b_page;
- loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
- size_t size = bh->b_size;
-
-retry:
- io_end = ext4_init_io_end(inode, GFP_ATOMIC);
- if (!io_end) {
- pr_warn_ratelimited("%s: allocation fail\n", __func__);
- schedule();
- goto retry;
- }
- io_end->offset = offset;
- io_end->size = size;
- /*
- * We need to hold a reference to the page to make sure it
- * doesn't get evicted before ext4_end_io_work() has a chance
- * to convert the extent from written to unwritten.
- */
- io_end->page = page;
- get_page(io_end->page);
-
- bh->b_private = io_end;
- bh->b_end_io = ext4_end_io_buffer_write;
- return 0;
-}
-
/*
* For ext4 extent files, ext4 will do direct-io write to holes,
* preallocated extents, and those write extend the file, no need to
@@ -3553,20 +3479,20 @@ int ext4_can_truncate(struct inode *inode)
int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- /* TODO: Add support for non extent hole punching */
- return -EOPNOTSUPP;
- }
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ return ext4_ind_punch_hole(file, offset, length);
if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
/* TODO: Add support for bigalloc file systems */
return -EOPNOTSUPP;
}
+ trace_ext4_punch_hole(inode, offset, length);
+
return ext4_ext_punch_hole(file, offset, length);
}
@@ -3660,11 +3586,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
bh = sb_getblk(sb, block);
- if (!bh) {
- EXT4_ERROR_INODE_BLOCK(inode, block,
- "unable to read itable block");
- return -EIO;
- }
+ if (unlikely(!bh))
+ return -ENOMEM;
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
@@ -3696,7 +3619,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
/* Is the inode bitmap in cache? */
bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
- if (!bitmap_bh)
+ if (unlikely(!bitmap_bh))
goto make_io;
/*
@@ -4404,8 +4327,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
/* (user+group)*(old+new) structure, inode write (sb,
* inode block, ? - but truncate inode update has it) */
- handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
- EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
+ handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
+ (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
+ EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
@@ -4440,7 +4364,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
(attr->ia_size < inode->i_size)) {
handle_t *handle;
- handle = ext4_journal_start(inode, 3);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
@@ -4460,7 +4384,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_size);
if (error) {
/* Do as much error cleanup as possible */
- handle = ext4_journal_start(inode, 3);
+ handle = ext4_journal_start(inode,
+ EXT4_HT_INODE, 3);
if (IS_ERR(handle)) {
ext4_orphan_del(NULL, inode);
goto err_out;
@@ -4801,7 +4726,7 @@ void ext4_dirty_inode(struct inode *inode, int flags)
{
handle_t *handle;
- handle = ext4_journal_start(inode, 2);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle))
goto out;
@@ -4902,7 +4827,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
/* Finally we can mark the inode as dirty. */
- handle = ext4_journal_start(inode, 1);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -4926,7 +4851,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long len;
int ret;
struct file *file = vma->vm_file;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
get_block_t *get_block;
@@ -4980,7 +4905,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
else
get_block = ext4_get_block;
retry_alloc:
- handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+ ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = VM_FAULT_SIGBUS;
goto out;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 5747f52f7c72..721f4d33e148 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -22,7 +22,7 @@
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int flags;
@@ -104,7 +104,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
} else if (oldflags & EXT4_EOFBLOCKS_FL)
ext4_truncate(inode);
- handle = ext4_journal_start(inode, 1);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto flags_out;
@@ -173,7 +173,7 @@ flags_out:
}
mutex_lock(&inode->i_mutex);
- handle = ext4_journal_start(inode, 1);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto unlock_out;
@@ -313,6 +313,9 @@ mext_out:
if (err == 0)
err = err2;
mnt_drop_write_file(filp);
+ if (!err && ext4_has_group_desc_csum(sb) &&
+ test_opt(sb, INIT_INODE_TABLE))
+ err = ext4_register_li_request(sb, input.group);
group_add_out:
ext4_resize_end(sb);
return err;
@@ -358,6 +361,7 @@ group_add_out:
ext4_fsblk_t n_blocks_count;
struct super_block *sb = inode->i_sb;
int err = 0, err2 = 0;
+ ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
@@ -388,6 +392,11 @@ group_add_out:
if (err == 0)
err = err2;
mnt_drop_write_file(filp);
+ if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
+ ext4_has_group_desc_csum(sb) &&
+ test_opt(sb, INIT_INODE_TABLE))
+ err = ext4_register_li_request(sb, o_group);
+
resizefs_out:
ext4_resize_end(sb);
return err;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 1bf6fe785c4f..7bb713a46fe4 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -23,11 +23,18 @@
#include "ext4_jbd2.h"
#include "mballoc.h"
-#include <linux/debugfs.h>
#include <linux/log2.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <trace/events/ext4.h>
+#ifdef CONFIG_EXT4_DEBUG
+ushort ext4_mballoc_debug __read_mostly;
+
+module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
+MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
+#endif
+
/*
* MUSTDO:
* - test ext4_ext_search_left() and ext4_ext_search_right()
@@ -1884,15 +1891,19 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
case 0:
BUG_ON(ac->ac_2order == 0);
- if (grp->bb_largest_free_order < ac->ac_2order)
- return 0;
-
/* Avoid using the first bg of a flexgroup for data files */
if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
(flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
((group % flex_size) == 0))
return 0;
+ if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
+ (free / fragments) >= ac->ac_g_ex.fe_len)
+ return 1;
+
+ if (grp->bb_largest_free_order < ac->ac_2order)
+ return 0;
+
return 1;
case 1:
if ((free / fragments) >= ac->ac_g_ex.fe_len)
@@ -2007,7 +2018,7 @@ repeat:
}
ac->ac_groups_scanned++;
- if (cr == 0)
+ if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
ext4_mb_simple_scan_group(ac, &e4b);
else if (cr == 1 && sbi->s_stripe &&
!(ac->ac_g_ex.fe_len % sbi->s_stripe))
@@ -2656,40 +2667,6 @@ static void ext4_free_data_callback(struct super_block *sb,
mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
}
-#ifdef CONFIG_EXT4_DEBUG
-u8 mb_enable_debug __read_mostly;
-
-static struct dentry *debugfs_dir;
-static struct dentry *debugfs_debug;
-
-static void __init ext4_create_debugfs_entry(void)
-{
- debugfs_dir = debugfs_create_dir("ext4", NULL);
- if (debugfs_dir)
- debugfs_debug = debugfs_create_u8("mballoc-debug",
- S_IRUGO | S_IWUSR,
- debugfs_dir,
- &mb_enable_debug);
-}
-
-static void ext4_remove_debugfs_entry(void)
-{
- debugfs_remove(debugfs_debug);
- debugfs_remove(debugfs_dir);
-}
-
-#else
-
-static void __init ext4_create_debugfs_entry(void)
-{
-}
-
-static void ext4_remove_debugfs_entry(void)
-{
-}
-
-#endif
-
int __init ext4_init_mballoc(void)
{
ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
@@ -2711,7 +2688,6 @@ int __init ext4_init_mballoc(void)
kmem_cache_destroy(ext4_ac_cachep);
return -ENOMEM;
}
- ext4_create_debugfs_entry();
return 0;
}
@@ -2726,7 +2702,6 @@ void ext4_exit_mballoc(void)
kmem_cache_destroy(ext4_ac_cachep);
kmem_cache_destroy(ext4_free_data_cachep);
ext4_groupinfo_destroy_slabs();
- ext4_remove_debugfs_entry();
}
@@ -3444,7 +3419,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
win = offs;
ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
- EXT4_B2C(sbi, win);
+ EXT4_NUM_B2C(sbi, win);
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
}
@@ -3872,7 +3847,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
struct super_block *sb = ac->ac_sb;
ext4_group_t ngroups, i;
- if (!mb_enable_debug ||
+ if (!ext4_mballoc_debug ||
(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
return;
@@ -4005,8 +3980,8 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
len = ar->len;
/* just a dirty hack to filter too big requests */
- if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10)
- len = EXT4_CLUSTERS_PER_GROUP(sb) - 10;
+ if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
+ len = EXT4_CLUSTERS_PER_GROUP(sb);
/* start searching from the goal */
goal = ar->goal;
@@ -4136,7 +4111,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
/* The max size of hash table is PREALLOC_TB_SIZE */
order = PREALLOC_TB_SIZE - 1;
/* Add the prealloc space to lg */
- rcu_read_lock();
+ spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
pa_inode_list) {
spin_lock(&tmp_pa->pa_lock);
@@ -4160,12 +4135,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
if (!added)
list_add_tail_rcu(&pa->pa_inode_list,
&lg->lg_prealloc_list[order]);
- rcu_read_unlock();
+ spin_unlock(&lg->lg_prealloc_lock);
/* Now trim the list to be not more than 8 elements */
if (lg_prealloc_count > 8) {
ext4_mb_discard_lg_preallocations(sb, lg,
- order, lg_prealloc_count);
+ order, lg_prealloc_count);
return;
}
return ;
@@ -4590,7 +4565,7 @@ do_more:
EXT4_BLOCKS_PER_GROUP(sb);
count -= overflow;
}
- count_clusters = EXT4_B2C(sbi, count);
+ count_clusters = EXT4_NUM_B2C(sbi, count);
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
if (!bitmap_bh) {
err = -EIO;
@@ -4832,11 +4807,11 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter,
- EXT4_B2C(sbi, blocks_freed));
+ EXT4_NUM_B2C(sbi, blocks_freed));
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic_add(EXT4_B2C(sbi, blocks_freed),
+ atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
&sbi->s_flex_groups[flex_group].free_clusters);
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 3ccd889ba953..08481ee84cd5 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -37,11 +37,11 @@
/*
*/
#ifdef CONFIG_EXT4_DEBUG
-extern u8 mb_enable_debug;
+extern ushort ext4_mballoc_debug;
#define mb_debug(n, fmt, a...) \
do { \
- if ((n) <= mb_enable_debug) { \
+ if ((n) <= ext4_mballoc_debug) { \
printk(KERN_DEBUG "(%s, %d): %s: ", \
__FILE__, __LINE__, __func__); \
printk(fmt, ## a); \
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index db8226d595fa..480acf4a085f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -456,11 +456,14 @@ int ext4_ext_migrate(struct inode *inode)
*/
return retval;
- handle = ext4_journal_start(inode,
- EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)
- + 1);
+ /*
+ * Worst case we can touch the allocation bitmaps, a bgd
+ * block, and a block to link in the orphan list. We do need
+ * need to worry about credits for modifying the quota inode.
+ */
+ handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
+ 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
+
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
return retval;
@@ -507,7 +510,7 @@ int ext4_ext_migrate(struct inode *inode)
ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
up_read((&EXT4_I(inode)->i_data_sem));
- handle = ext4_journal_start(inode, 1);
+ handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle)) {
/*
* It is impossible to update on-disk structures without
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index fe7c63f4717e..f9b551561d2c 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -80,6 +80,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
* is not blocked in the elevator. */
if (!*bh)
*bh = sb_getblk(sb, mmp_block);
+ if (!*bh)
+ return -ENOMEM;
if (*bh) {
get_bh(*bh);
lock_buffer(*bh);
@@ -91,7 +93,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
*bh = NULL;
}
}
- if (!*bh) {
+ if (unlikely(!*bh)) {
ext4_warning(sb, "Error while reading MMP block %llu",
mmp_block);
return -EIO;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index d9cc5ee42f53..4e81d47aa8cb 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -681,6 +681,8 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
depth = ext_depth(donor_inode);
dext = donor_path[depth].p_ext;
+ if (unlikely(!dext))
+ goto missing_donor_extent;
tmp_dext = *dext;
*err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
@@ -691,7 +693,8 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
/* Loop for the donor extents */
while (1) {
/* The extent for donor must be found. */
- if (!dext) {
+ if (unlikely(!dext)) {
+ missing_donor_extent:
EXT4_ERROR_INODE(donor_inode,
"The extent for donor must be found");
*err = -EIO;
@@ -761,9 +764,6 @@ out:
kfree(donor_path);
}
- ext4_ext_invalidate_cache(orig_inode);
- ext4_ext_invalidate_cache(donor_inode);
-
return replaced_count;
}
@@ -900,7 +900,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
pgoff_t orig_page_offset, int data_offset_in_page,
int block_len_in_page, int uninit, int *err)
{
- struct inode *orig_inode = o_filp->f_dentry->d_inode;
+ struct inode *orig_inode = file_inode(o_filp);
struct page *pagep[2] = {NULL, NULL};
handle_t *handle;
ext4_lblk_t orig_blk_offset;
@@ -920,7 +920,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
again:
*err = 0;
jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
- handle = ext4_journal_start(orig_inode, jblocks);
+ handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
if (IS_ERR(handle)) {
*err = PTR_ERR(handle);
return 0;
@@ -1279,8 +1279,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
__u64 orig_start, __u64 donor_start, __u64 len,
__u64 *moved_len)
{
- struct inode *orig_inode = o_filp->f_dentry->d_inode;
- struct inode *donor_inode = d_filp->f_dentry->d_inode;
+ struct inode *orig_inode = file_inode(o_filp);
+ struct inode *donor_inode = file_inode(d_filp);
struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL;
struct ext4_extent *ext_prev, *ext_cur, *ext_dummy;
ext4_lblk_t block_start = orig_start;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index f9ed946a448e..3825d6aa8336 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -47,38 +47,111 @@
#define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static struct buffer_head *ext4_append(handle_t *handle,
struct inode *inode,
- ext4_lblk_t *block, int *err)
+ ext4_lblk_t *block)
{
struct buffer_head *bh;
+ int err = 0;
if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
((inode->i_size >> 10) >=
- EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) {
- *err = -ENOSPC;
- return NULL;
- }
+ EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
+ return ERR_PTR(-ENOSPC);
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
- bh = ext4_bread(handle, inode, *block, 1, err);
- if (bh) {
- inode->i_size += inode->i_sb->s_blocksize;
- EXT4_I(inode)->i_disksize = inode->i_size;
- *err = ext4_journal_get_write_access(handle, bh);
- if (*err) {
+ bh = ext4_bread(handle, inode, *block, 1, &err);
+ if (!bh)
+ return ERR_PTR(err);
+ inode->i_size += inode->i_sb->s_blocksize;
+ EXT4_I(inode)->i_disksize = inode->i_size;
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err) {
+ brelse(bh);
+ ext4_std_error(inode->i_sb, err);
+ return ERR_PTR(err);
+ }
+ return bh;
+}
+
+static int ext4_dx_csum_verify(struct inode *inode,
+ struct ext4_dir_entry *dirent);
+
+typedef enum {
+ EITHER, INDEX, DIRENT
+} dirblock_type_t;
+
+#define ext4_read_dirblock(inode, block, type) \
+ __ext4_read_dirblock((inode), (block), (type), __LINE__)
+
+static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+ ext4_lblk_t block,
+ dirblock_type_t type,
+ unsigned int line)
+{
+ struct buffer_head *bh;
+ struct ext4_dir_entry *dirent;
+ int err = 0, is_dx_block = 0;
+
+ bh = ext4_bread(NULL, inode, block, 0, &err);
+ if (!bh) {
+ if (err == 0) {
+ ext4_error_inode(inode, __func__, line, block,
+ "Directory hole found");
+ return ERR_PTR(-EIO);
+ }
+ __ext4_warning(inode->i_sb, __func__, line,
+ "error reading directory block "
+ "(ino %lu, block %lu)", inode->i_ino,
+ (unsigned long) block);
+ return ERR_PTR(err);
+ }
+ dirent = (struct ext4_dir_entry *) bh->b_data;
+ /* Determine whether or not we have an index block */
+ if (is_dx(inode)) {
+ if (block == 0)
+ is_dx_block = 1;
+ else if (ext4_rec_len_from_disk(dirent->rec_len,
+ inode->i_sb->s_blocksize) ==
+ inode->i_sb->s_blocksize)
+ is_dx_block = 1;
+ }
+ if (!is_dx_block && type == INDEX) {
+ ext4_error_inode(inode, __func__, line, block,
+ "directory leaf block found instead of index block");
+ return ERR_PTR(-EIO);
+ }
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
+ buffer_verified(bh))
+ return bh;
+
+ /*
+ * An empty leaf block can get mistaken for a index block; for
+ * this reason, we can only check the index checksum when the
+ * caller is sure it should be an index block.
+ */
+ if (is_dx_block && type == INDEX) {
+ if (ext4_dx_csum_verify(inode, dirent))
+ set_buffer_verified(bh);
+ else {
+ ext4_error_inode(inode, __func__, line, block,
+ "Directory index failed checksum");
brelse(bh);
- bh = NULL;
+ return ERR_PTR(-EIO);
}
}
- if (!bh && !(*err)) {
- *err = -EIO;
- ext4_error(inode->i_sb,
- "Directory hole detected on inode %lu\n",
- inode->i_ino);
+ if (!is_dx_block) {
+ if (ext4_dirent_csum_verify(inode, dirent))
+ set_buffer_verified(bh);
+ else {
+ ext4_error_inode(inode, __func__, line, block,
+ "Directory block failed checksum");
+ brelse(bh);
+ return ERR_PTR(-EIO);
+ }
}
return bh;
}
@@ -604,9 +677,9 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
u32 hash;
frame->bh = NULL;
- if (!(bh = ext4_bread(NULL, dir, 0, 0, err))) {
- if (*err == 0)
- *err = ERR_BAD_DX_DIR;
+ bh = ext4_read_dirblock(dir, 0, INDEX);
+ if (IS_ERR(bh)) {
+ *err = PTR_ERR(bh);
goto fail;
}
root = (struct dx_root *) bh->b_data;
@@ -643,15 +716,6 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
goto fail;
}
- if (!buffer_verified(bh) &&
- !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) {
- ext4_warning(dir->i_sb, "Root failed checksum");
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto fail;
- }
- set_buffer_verified(bh);
-
entries = (struct dx_entry *) (((char *)&root->info) +
root->info.info_length);
@@ -709,22 +773,12 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
frame->entries = entries;
frame->at = at;
if (!indirect--) return frame;
- if (!(bh = ext4_bread(NULL, dir, dx_get_block(at), 0, err))) {
- if (!(*err))
- *err = ERR_BAD_DX_DIR;
- goto fail2;
- }
- at = entries = ((struct dx_node *) bh->b_data)->entries;
-
- if (!buffer_verified(bh) &&
- !ext4_dx_csum_verify(dir,
- (struct ext4_dir_entry *)bh->b_data)) {
- ext4_warning(dir->i_sb, "Node failed checksum");
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
+ bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+ if (IS_ERR(bh)) {
+ *err = PTR_ERR(bh);
goto fail2;
}
- set_buffer_verified(bh);
+ entries = ((struct dx_node *) bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit (dir)) {
ext4_warning(dir->i_sb,
@@ -783,7 +837,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
{
struct dx_frame *p;
struct buffer_head *bh;
- int err, num_frames = 0;
+ int num_frames = 0;
__u32 bhash;
p = frame;
@@ -822,25 +876,9 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
* block so no check is necessary
*/
while (num_frames--) {
- if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
- 0, &err))) {
- if (!err) {
- ext4_error(dir->i_sb,
- "Directory hole detected on inode %lu\n",
- dir->i_ino);
- return -EIO;
- }
- return err; /* Failure */
- }
-
- if (!buffer_verified(bh) &&
- !ext4_dx_csum_verify(dir,
- (struct ext4_dir_entry *)bh->b_data)) {
- ext4_warning(dir->i_sb, "Node failed checksum");
- return -EIO;
- }
- set_buffer_verified(bh);
-
+ bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
p++;
brelse(p->bh);
p->bh = bh;
@@ -866,20 +904,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
- if (!(bh = ext4_bread(NULL, dir, block, 0, &err))) {
- if (!err) {
- err = -EIO;
- ext4_error(dir->i_sb,
- "Directory hole detected on inode %lu\n",
- dir->i_ino);
- }
- return err;
- }
-
- if (!buffer_verified(bh) &&
- !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
- return -EIO;
- set_buffer_verified(bh);
+ bh = ext4_read_dirblock(dir, block, DIRENT);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
de = (struct ext4_dir_entry_2 *) bh->b_data;
top = (struct ext4_dir_entry_2 *) ((char *) de +
@@ -937,7 +964,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
start_hash, start_minor_hash));
- dir = dir_file->f_path.dentry->d_inode;
+ dir = file_inode(dir_file);
if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
@@ -1333,26 +1360,11 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
return NULL;
do {
block = dx_get_block(frame->at);
- if (!(bh = ext4_bread(NULL, dir, block, 0, err))) {
- if (!(*err)) {
- *err = -EIO;
- ext4_error(dir->i_sb,
- "Directory hole detected on inode %lu\n",
- dir->i_ino);
- }
- goto errout;
- }
-
- if (!buffer_verified(bh) &&
- !ext4_dirent_csum_verify(dir,
- (struct ext4_dir_entry *)bh->b_data)) {
- EXT4_ERROR_INODE(dir, "checksumming directory "
- "block %lu", (unsigned long)block);
- brelse(bh);
- *err = -EIO;
+ bh = ext4_read_dirblock(dir, block, DIRENT);
+ if (IS_ERR(bh)) {
+ *err = PTR_ERR(bh);
goto errout;
}
- set_buffer_verified(bh);
retval = search_dirblock(bh, dir, d_name,
block << EXT4_BLOCK_SIZE_BITS(sb),
res_dir);
@@ -1536,11 +1548,12 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
- bh2 = ext4_append (handle, dir, &newblock, &err);
- if (!(bh2)) {
+ bh2 = ext4_append(handle, dir, &newblock);
+ if (IS_ERR(bh2)) {
brelse(*bh);
*bh = NULL;
- goto errout;
+ *error = PTR_ERR(bh2);
+ return NULL;
}
BUFFER_TRACE(*bh, "get_write_access");
@@ -1621,7 +1634,6 @@ journal_error:
brelse(bh2);
*bh = NULL;
ext4_std_error(dir->i_sb, err);
-errout:
*error = err;
return NULL;
}
@@ -1699,7 +1711,6 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned int blocksize = dir->i_sb->s_blocksize;
- unsigned short reclen;
int csum_size = 0;
int err;
@@ -1707,7 +1718,6 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
- reclen = EXT4_DIR_REC_LEN(namelen);
if (!de) {
err = ext4_find_dest_de(dir, inode,
bh, bh->b_data, blocksize - csum_size,
@@ -1798,10 +1808,10 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
len = ((char *) root) + (blocksize - csum_size) - (char *) de;
/* Allocate new block for the 0th block's dirents */
- bh2 = ext4_append(handle, dir, &block, &retval);
- if (!(bh2)) {
+ bh2 = ext4_append(handle, dir, &block);
+ if (IS_ERR(bh2)) {
brelse(bh);
- return retval;
+ return PTR_ERR(bh2);
}
ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
data1 = bh2->b_data;
@@ -1918,20 +1928,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0; block < blocks; block++) {
- if (!(bh = ext4_bread(handle, dir, block, 0, &retval))) {
- if (!retval) {
- retval = -EIO;
- ext4_error(inode->i_sb,
- "Directory hole detected on inode %lu\n",
- inode->i_ino);
- }
- return retval;
- }
- if (!buffer_verified(bh) &&
- !ext4_dirent_csum_verify(dir,
- (struct ext4_dir_entry *)bh->b_data))
- return -EIO;
- set_buffer_verified(bh);
+ bh = ext4_read_dirblock(dir, block, DIRENT);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
if (retval != -ENOSPC) {
brelse(bh);
@@ -1943,9 +1943,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return make_indexed_dir(handle, dentry, inode, bh);
brelse(bh);
}
- bh = ext4_append(handle, dir, &block, &retval);
- if (!bh)
- return retval;
+ bh = ext4_append(handle, dir, &block);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
de = (struct ext4_dir_entry_2 *) bh->b_data;
de->inode = 0;
de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
@@ -1982,22 +1982,13 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
return err;
entries = frame->entries;
at = frame->at;
-
- if (!(bh = ext4_bread(handle, dir, dx_get_block(frame->at), 0, &err))) {
- if (!err) {
- err = -EIO;
- ext4_error(dir->i_sb,
- "Directory hole detected on inode %lu\n",
- dir->i_ino);
- }
+ bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
+ if (IS_ERR(bh)) {
+ err = PTR_ERR(bh);
+ bh = NULL;
goto cleanup;
}
- if (!buffer_verified(bh) &&
- !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
- goto journal_error;
- set_buffer_verified(bh);
-
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
if (err)
@@ -2025,9 +2016,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
err = -ENOSPC;
goto cleanup;
}
- bh2 = ext4_append (handle, dir, &newblock, &err);
- if (!(bh2))
+ bh2 = ext4_append(handle, dir, &newblock);
+ if (IS_ERR(bh2)) {
+ err = PTR_ERR(bh2);
goto cleanup;
+ }
node2 = (struct dx_node *)(bh2->b_data);
entries2 = node2->entries;
memset(&node2->fake, 0, sizeof(struct fake_dirent));
@@ -2106,8 +2099,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
journal_error:
ext4_std_error(dir->i_sb, err);
cleanup:
- if (bh)
- brelse(bh);
+ brelse(bh);
dx_release(frames);
return err;
}
@@ -2254,29 +2246,28 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
{
handle_t *handle;
struct inode *inode;
- int err, retries = 0;
+ int err, credits, retries = 0;
dquot_initialize(dir);
+ credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+ EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
retry:
- handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- if (IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
- inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL);
+ inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
+ NULL, EXT4_HT_DIR, credits);
+ handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
err = ext4_add_nondir(handle, dentry, inode);
+ if (!err && IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
}
- ext4_journal_stop(handle);
+ if (handle)
+ ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2287,31 +2278,30 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry,
{
handle_t *handle;
struct inode *inode;
- int err, retries = 0;
+ int err, credits, retries = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
+ credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+ EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
retry:
- handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- if (IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
- inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL);
+ inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
+ NULL, EXT4_HT_DIR, credits);
+ handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &ext4_special_inode_operations;
err = ext4_add_nondir(handle, dentry, inode);
+ if (!err && IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
}
- ext4_journal_stop(handle);
+ if (handle)
+ ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2351,6 +2341,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
struct buffer_head *dir_block = NULL;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
+ ext4_lblk_t block = 0;
unsigned int blocksize = dir->i_sb->s_blocksize;
int csum_size = 0;
int err;
@@ -2367,16 +2358,10 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
goto out;
}
- inode->i_size = EXT4_I(inode)->i_disksize = blocksize;
- if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
- if (!err) {
- err = -EIO;
- ext4_error(inode->i_sb,
- "Directory hole detected on inode %lu\n",
- inode->i_ino);
- }
- goto out;
- }
+ inode->i_size = 0;
+ dir_block = ext4_append(handle, inode, &block);
+ if (IS_ERR(dir_block))
+ return PTR_ERR(dir_block);
BUFFER_TRACE(dir_block, "get_write_access");
err = ext4_journal_get_write_access(handle, dir_block);
if (err)
@@ -2403,25 +2388,21 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
- int err, retries = 0;
+ int err, credits, retries = 0;
if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
dquot_initialize(dir);
+ credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+ EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
retry:
- handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- if (IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
- inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
- &dentry->d_name, 0, NULL);
+ inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode,
+ &dentry->d_name,
+ 0, NULL, EXT4_HT_DIR, credits);
+ handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
@@ -2449,8 +2430,12 @@ out_clear_inode:
goto out_clear_inode;
unlock_new_inode(inode);
d_instantiate(dentry, inode);
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
+
out_stop:
- ext4_journal_stop(handle);
+ if (handle)
+ ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2476,25 +2461,14 @@ static int empty_dir(struct inode *inode)
}
sb = inode->i_sb;
- if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
- !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
- if (err)
- EXT4_ERROR_INODE(inode,
- "error %d reading directory lblock 0", err);
- else
- ext4_warning(inode->i_sb,
- "bad directory (dir #%lu) - no data block",
- inode->i_ino);
+ if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
+ EXT4_ERROR_INODE(inode, "invalid size");
return 1;
}
- if (!buffer_verified(bh) &&
- !ext4_dirent_csum_verify(inode,
- (struct ext4_dir_entry *)bh->b_data)) {
- EXT4_ERROR_INODE(inode, "checksum error reading directory "
- "lblock 0");
- return -EIO;
- }
- set_buffer_verified(bh);
+ bh = ext4_read_dirblock(inode, 0, EITHER);
+ if (IS_ERR(bh))
+ return 1;
+
de = (struct ext4_dir_entry_2 *) bh->b_data;
de1 = ext4_next_entry(de, sb->s_blocksize);
if (le32_to_cpu(de->inode) != inode->i_ino ||
@@ -2517,28 +2491,9 @@ static int empty_dir(struct inode *inode)
err = 0;
brelse(bh);
lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
- bh = ext4_bread(NULL, inode, lblock, 0, &err);
- if (!bh) {
- if (err)
- EXT4_ERROR_INODE(inode,
- "error %d reading directory "
- "lblock %u", err, lblock);
- else
- ext4_warning(inode->i_sb,
- "bad directory (dir #%lu) - no data block",
- inode->i_ino);
-
- offset += sb->s_blocksize;
- continue;
- }
- if (!buffer_verified(bh) &&
- !ext4_dirent_csum_verify(inode,
- (struct ext4_dir_entry *)bh->b_data)) {
- EXT4_ERROR_INODE(inode, "checksum error "
- "reading directory lblock 0");
- return -EIO;
- }
- set_buffer_verified(bh);
+ bh = ext4_read_dirblock(inode, lblock, EITHER);
+ if (IS_ERR(bh))
+ return 1;
de = (struct ext4_dir_entry_2 *) bh->b_data;
}
if (ext4_check_dir_entry(inode, NULL, de, bh,
@@ -2717,25 +2672,18 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
struct inode *inode;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
- handle_t *handle;
+ handle_t *handle = NULL;
/* Initialize quotas before so that eventual writes go in
* separate transaction */
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
- handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (!bh)
goto end_rmdir;
- if (IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
inode = dentry->d_inode;
retval = -EIO;
@@ -2746,6 +2694,17 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
if (!empty_dir(inode))
goto end_rmdir;
+ handle = ext4_journal_start(dir, EXT4_HT_DIR,
+ EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+ handle = NULL;
+ goto end_rmdir;
+ }
+
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
+
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_rmdir;
@@ -2767,8 +2726,9 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
ext4_mark_inode_dirty(handle, dir);
end_rmdir:
- ext4_journal_stop(handle);
brelse(bh);
+ if (handle)
+ ext4_journal_stop(handle);
return retval;
}
@@ -2778,7 +2738,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de;
- handle_t *handle;
+ handle_t *handle = NULL;
trace_ext4_unlink_enter(dir, dentry);
/* Initialize quotas before so that eventual writes go
@@ -2786,13 +2746,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
- handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- if (IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (!bh)
@@ -2804,6 +2757,17 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_unlink;
+ handle = ext4_journal_start(dir, EXT4_HT_DIR,
+ EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+ handle = NULL;
+ goto end_unlink;
+ }
+
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
+
if (!inode->i_nlink) {
ext4_warning(inode->i_sb,
"Deleting nonexistent file (%lu), %d",
@@ -2824,8 +2788,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
retval = 0;
end_unlink:
- ext4_journal_stop(handle);
brelse(bh);
+ if (handle)
+ ext4_journal_stop(handle);
trace_ext4_unlink_exit(dentry, retval);
return retval;
}
@@ -2865,15 +2830,10 @@ static int ext4_symlink(struct inode *dir,
EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
}
retry:
- handle = ext4_journal_start(dir, credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- if (IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
- inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO,
- &dentry->d_name, 0, NULL);
+ inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
+ &dentry->d_name, 0, NULL,
+ EXT4_HT_DIR, credits);
+ handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
@@ -2903,7 +2863,7 @@ retry:
* Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
* + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
*/
- handle = ext4_journal_start(dir,
+ handle = ext4_journal_start(dir, EXT4_HT_DIR,
EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
if (IS_ERR(handle)) {
@@ -2926,8 +2886,12 @@ retry:
}
EXT4_I(inode)->i_disksize = inode->i_size;
err = ext4_add_nondir(handle, dentry, inode);
+ if (!err && IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
+
out_stop:
- ext4_journal_stop(handle);
+ if (handle)
+ ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2950,8 +2914,9 @@ static int ext4_link(struct dentry *old_dentry,
dquot_initialize(dir);
retry:
- handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS);
+ handle = ext4_journal_start(dir, EXT4_HT_DIR,
+ (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+ EXT4_INDEX_EXTRA_TRANS_BLOCKS));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2991,13 +2956,9 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
struct buffer_head *bh;
if (!ext4_has_inline_data(inode)) {
- if (!(bh = ext4_bread(handle, inode, 0, 0, retval))) {
- if (!*retval) {
- *retval = -EIO;
- ext4_error(inode->i_sb,
- "Directory hole detected on inode %lu\n",
- inode->i_ino);
- }
+ bh = ext4_read_dirblock(inode, 0, EITHER);
+ if (IS_ERR(bh)) {
+ *retval = PTR_ERR(bh);
return NULL;
}
*parent_de = ext4_next_entry(
@@ -3034,9 +2995,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
* in separate transaction */
if (new_dentry->d_inode)
dquot_initialize(new_dentry->d_inode);
- handle = ext4_journal_start(old_dir, 2 *
- EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
+ handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
+ (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -3076,11 +3037,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
&inlined);
if (!dir_bh)
goto end_rename;
- if (!inlined && !buffer_verified(dir_bh) &&
- !ext4_dirent_csum_verify(old_inode,
- (struct ext4_dir_entry *)dir_bh->b_data))
- goto end_rename;
- set_buffer_verified(dir_bh);
if (le32_to_cpu(parent_de->inode) != old_dir->i_ino)
goto end_rename;
retval = -EMLINK;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0016fbca2a40..809b31003ecc 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -23,6 +23,7 @@
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -73,8 +74,6 @@ void ext4_free_io_end(ext4_io_end_t *io)
BUG_ON(!list_empty(&io->list));
BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
- if (io->page)
- put_page(io->page);
for (i = 0; i < io->num_io_pages; i++)
put_io_page(io->pages[i]);
io->num_io_pages = 0;
@@ -103,14 +102,13 @@ static int ext4_end_io(ext4_io_end_t *io)
"(inode %lu, offset %llu, size %zd, error %d)",
inode->i_ino, offset, size, ret);
}
- if (io->iocb)
- aio_complete(io->iocb, io->result, 0);
-
- if (io->flag & EXT4_IO_END_DIRECT)
- inode_dio_done(inode);
/* Wake up anyone waiting on unwritten extent conversion */
if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
wake_up_all(ext4_ioend_wq(inode));
+ if (io->flag & EXT4_IO_END_DIRECT)
+ inode_dio_done(inode);
+ if (io->iocb)
+ aio_complete(io->iocb, io->result, 0);
return ret;
}
@@ -119,7 +117,6 @@ static void dump_completed_IO(struct inode *inode)
#ifdef EXT4FS_DEBUG
struct list_head *cur, *before, *after;
ext4_io_end_t *io, *io0, *io1;
- unsigned long flags;
if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
ext4_debug("inode %lu completed_io list is empty\n",
@@ -152,26 +149,20 @@ void ext4_add_complete_io(ext4_io_end_t *io_end)
wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- if (list_empty(&ei->i_completed_io_list)) {
- io_end->flag |= EXT4_IO_END_QUEUED;
- queue_work(wq, &io_end->work);
- }
+ if (list_empty(&ei->i_completed_io_list))
+ queue_work(wq, &ei->i_unwritten_work);
list_add_tail(&io_end->list, &ei->i_completed_io_list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
}
-static int ext4_do_flush_completed_IO(struct inode *inode,
- ext4_io_end_t *work_io)
+static int ext4_do_flush_completed_IO(struct inode *inode)
{
ext4_io_end_t *io;
- struct list_head unwritten, complete, to_free;
+ struct list_head unwritten;
unsigned long flags;
struct ext4_inode_info *ei = EXT4_I(inode);
int err, ret = 0;
- INIT_LIST_HEAD(&complete);
- INIT_LIST_HEAD(&to_free);
-
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
dump_completed_IO(inode);
list_replace_init(&ei->i_completed_io_list, &unwritten);
@@ -185,32 +176,7 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
err = ext4_end_io(io);
if (unlikely(!ret && err))
ret = err;
-
- list_add_tail(&io->list, &complete);
- }
- spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- while (!list_empty(&complete)) {
- io = list_entry(complete.next, ext4_io_end_t, list);
io->flag &= ~EXT4_IO_END_UNWRITTEN;
- /* end_io context can not be destroyed now because it still
- * used by queued worker. Worker thread will destroy it later */
- if (io->flag & EXT4_IO_END_QUEUED)
- list_del_init(&io->list);
- else
- list_move(&io->list, &to_free);
- }
- /* If we are called from worker context, it is time to clear queued
- * flag, and destroy it's end_io if it was converted already */
- if (work_io) {
- work_io->flag &= ~EXT4_IO_END_QUEUED;
- if (!(work_io->flag & EXT4_IO_END_UNWRITTEN))
- list_add_tail(&work_io->list, &to_free);
- }
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-
- while (!list_empty(&to_free)) {
- io = list_entry(to_free.next, ext4_io_end_t, list);
- list_del_init(&io->list);
ext4_free_io_end(io);
}
return ret;
@@ -219,10 +185,11 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
/*
* work on completed aio dio IO, to convert unwritten extents to extents
*/
-static void ext4_end_io_work(struct work_struct *work)
+void ext4_end_io_work(struct work_struct *work)
{
- ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
- ext4_do_flush_completed_IO(io->inode, io);
+ struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
+ i_unwritten_work);
+ ext4_do_flush_completed_IO(&ei->vfs_inode);
}
int ext4_flush_unwritten_io(struct inode *inode)
@@ -230,7 +197,7 @@ int ext4_flush_unwritten_io(struct inode *inode)
int ret;
WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
!(inode->i_state & I_FREEING));
- ret = ext4_do_flush_completed_IO(inode, NULL);
+ ret = ext4_do_flush_completed_IO(inode);
ext4_unwritten_wait(inode);
return ret;
}
@@ -241,7 +208,6 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
if (io) {
atomic_inc(&EXT4_I(inode)->i_ioend_count);
io->inode = inode;
- INIT_WORK(&io->work, ext4_end_io_work);
INIT_LIST_HEAD(&io->list);
}
return io;
@@ -382,14 +348,6 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
}
- if (!buffer_mapped(bh) || buffer_delay(bh)) {
- if (!buffer_mapped(bh))
- clear_buffer_dirty(bh);
- if (io->io_bio)
- ext4_io_submit(io);
- return 0;
- }
-
if (io->io_bio && bh->b_blocknr != io->io_next_block) {
submit_and_retry:
ext4_io_submit(io);
@@ -436,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
- set_page_dirty(page);
+ redirty_page_for_writepage(wbc, page);
unlock_page(page);
return -ENOMEM;
}
@@ -468,7 +426,15 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
set_buffer_uptodate(bh);
continue;
}
- clear_buffer_dirty(bh);
+ if (!buffer_dirty(bh) || buffer_delay(bh) ||
+ !buffer_mapped(bh) || buffer_unwritten(bh)) {
+ /* A hole? We can safely clear the dirty bit */
+ if (!buffer_mapped(bh))
+ clear_buffer_dirty(bh);
+ if (io->io_bio)
+ ext4_io_submit(io);
+ continue;
+ }
ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
if (ret) {
/*
@@ -476,9 +442,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
* we can do but mark the page as dirty, and
* better luck next time.
*/
- set_page_dirty(page);
+ redirty_page_for_writepage(wbc, page);
break;
}
+ clear_buffer_dirty(bh);
}
unlock_page(page);
/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index d99387b89edd..b2c8ee56eb98 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -333,8 +333,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
int err;
bh = sb_getblk(sb, blk);
- if (!bh)
- return ERR_PTR(-EIO);
+ if (unlikely(!bh))
+ return ERR_PTR(-ENOMEM);
if ((err = ext4_journal_get_write_access(handle, bh))) {
brelse(bh);
bh = ERR_PTR(err);
@@ -410,8 +410,8 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
return err;
bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
- if (!bh)
- return -EIO;
+ if (unlikely(!bh))
+ return -ENOMEM;
err = ext4_journal_get_write_access(handle, bh);
if (err)
@@ -466,7 +466,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
/* This transaction may be extended/restarted along the way */
- handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -500,8 +500,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
goto out;
gdb = sb_getblk(sb, block);
- if (!gdb) {
- err = -EIO;
+ if (unlikely(!gdb)) {
+ err = -ENOMEM;
goto out;
}
@@ -1031,7 +1031,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
handle_t *handle;
int err = 0, err2;
- handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
if (IS_ERR(handle)) {
group = 1;
err = PTR_ERR(handle);
@@ -1064,8 +1064,8 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
ext4_bg_has_super(sb, group));
bh = sb_getblk(sb, backup_block);
- if (!bh) {
- err = -EIO;
+ if (unlikely(!bh)) {
+ err = -ENOMEM;
break;
}
ext4_debug("update metadata backup %llu(+%llu)\n",
@@ -1168,7 +1168,7 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
{
struct buffer_head *bh = sb_getblk(sb, block);
- if (!bh)
+ if (unlikely(!bh))
return NULL;
if (!bh_uptodate_or_lock(bh)) {
if (bh_submit_read(bh) < 0) {
@@ -1247,7 +1247,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
ext4_inode_table_set(sb, gdp, group_data->inode_table);
ext4_free_group_clusters_set(sb, gdp,
- EXT4_B2C(sbi, group_data->free_blocks_count));
+ EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
if (ext4_has_group_desc_csum(sb))
ext4_itable_unused_set(sb, gdp,
@@ -1349,7 +1349,7 @@ static void ext4_update_super(struct super_block *sb,
/* Update the free space counts */
percpu_counter_add(&sbi->s_freeclusters_counter,
- EXT4_B2C(sbi, free_blocks));
+ EXT4_NUM_B2C(sbi, free_blocks));
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
@@ -1360,7 +1360,7 @@ static void ext4_update_super(struct super_block *sb,
sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, group_data[0].group);
- atomic_add(EXT4_B2C(sbi, free_blocks),
+ atomic_add(EXT4_NUM_B2C(sbi, free_blocks),
&sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
&sbi->s_flex_groups[flex_group].free_inodes);
@@ -1412,7 +1412,7 @@ static int ext4_flex_group_add(struct super_block *sb,
* modify each of the reserved GDT dindirect blocks.
*/
credit = flex_gd->count * 4 + reserved_gdb;
- handle = ext4_journal_start_sb(sb, credit);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto exit;
@@ -1506,10 +1506,12 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
group_data[i].blocks_count = blocks_per_group;
overhead = ext4_group_overhead_blocks(sb, group + i);
group_data[i].free_blocks_count = blocks_per_group - overhead;
- if (ext4_has_group_desc_csum(sb))
+ if (ext4_has_group_desc_csum(sb)) {
flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
EXT4_BG_INODE_UNINIT;
- else
+ if (!test_opt(sb, INIT_INODE_TABLE))
+ flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
+ } else
flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
}
@@ -1594,7 +1596,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
err = ext4_alloc_flex_bg_array(sb, input->group + 1);
if (err)
- return err;
+ goto out;
err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
if (err)
@@ -1622,7 +1624,7 @@ static int ext4_group_extend_no_check(struct super_block *sb,
/* We will update the superblock, one block bitmap, and
* one group descriptor via ext4_group_add_blocks().
*/
- handle = ext4_journal_start_sb(sb, 3);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
ext4_warning(sb, "error %d on journal start", err);
@@ -1786,7 +1788,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
credits += 3; /* block bitmap, bg descriptor, resize inode */
}
- handle = ext4_journal_start_sb(sb, credits);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3d4fb81bacd5..5e6c87836193 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -69,8 +69,6 @@ static void ext4_mark_recovery_complete(struct super_block *sb,
static void ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es);
static int ext4_sync_fs(struct super_block *sb, int wait);
-static const char *ext4_decode_error(struct super_block *sb, int errno,
- char nbuf[16]);
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ext4_unfreeze(struct super_block *sb);
@@ -296,107 +294,6 @@ void ext4_itable_unused_set(struct super_block *sb,
}
-/* Just increment the non-pointer handle value */
-static handle_t *ext4_get_nojournal(void)
-{
- handle_t *handle = current->journal_info;
- unsigned long ref_cnt = (unsigned long)handle;
-
- BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
-
- ref_cnt++;
- handle = (handle_t *)ref_cnt;
-
- current->journal_info = handle;
- return handle;
-}
-
-
-/* Decrement the non-pointer handle value */
-static void ext4_put_nojournal(handle_t *handle)
-{
- unsigned long ref_cnt = (unsigned long)handle;
-
- BUG_ON(ref_cnt == 0);
-
- ref_cnt--;
- handle = (handle_t *)ref_cnt;
-
- current->journal_info = handle;
-}
-
-/*
- * Wrappers for jbd2_journal_start/end.
- */
-handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
-{
- journal_t *journal;
-
- trace_ext4_journal_start(sb, nblocks, _RET_IP_);
- if (sb->s_flags & MS_RDONLY)
- return ERR_PTR(-EROFS);
-
- WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
- journal = EXT4_SB(sb)->s_journal;
- if (!journal)
- return ext4_get_nojournal();
- /*
- * Special case here: if the journal has aborted behind our
- * backs (eg. EIO in the commit thread), then we still need to
- * take the FS itself readonly cleanly.
- */
- if (is_journal_aborted(journal)) {
- ext4_abort(sb, "Detected aborted journal");
- return ERR_PTR(-EROFS);
- }
- return jbd2_journal_start(journal, nblocks);
-}
-
-int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
-{
- struct super_block *sb;
- int err;
- int rc;
-
- if (!ext4_handle_valid(handle)) {
- ext4_put_nojournal(handle);
- return 0;
- }
- sb = handle->h_transaction->t_journal->j_private;
- err = handle->h_err;
- rc = jbd2_journal_stop(handle);
-
- if (!err)
- err = rc;
- if (err)
- __ext4_std_error(sb, where, line, err);
- return err;
-}
-
-void ext4_journal_abort_handle(const char *caller, unsigned int line,
- const char *err_fn, struct buffer_head *bh,
- handle_t *handle, int err)
-{
- char nbuf[16];
- const char *errstr = ext4_decode_error(NULL, err, nbuf);
-
- BUG_ON(!ext4_handle_valid(handle));
-
- if (bh)
- BUFFER_TRACE(bh, "abort");
-
- if (!handle->h_err)
- handle->h_err = err;
-
- if (is_handle_aborted(handle))
- return;
-
- printk(KERN_ERR "EXT4-fs: %s:%d: aborting transaction: %s in %s\n",
- caller, line, errstr, err_fn);
-
- jbd2_journal_abort_handle(handle);
-}
-
static void __save_error_info(struct super_block *sb, const char *func,
unsigned int line)
{
@@ -553,7 +450,7 @@ void ext4_error_file(struct file *file, const char *function,
va_list args;
struct va_format vaf;
struct ext4_super_block *es;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
char pathname[80], *path;
es = EXT4_SB(inode->i_sb)->s_es;
@@ -582,8 +479,8 @@ void ext4_error_file(struct file *file, const char *function,
ext4_handle_error(inode->i_sb);
}
-static const char *ext4_decode_error(struct super_block *sb, int errno,
- char nbuf[16])
+const char *ext4_decode_error(struct super_block *sb, int errno,
+ char nbuf[16])
{
char *errstr = NULL;
@@ -858,6 +755,7 @@ static void ext4_put_super(struct super_block *sb)
ext4_abort(sb, "Couldn't clean up the journal");
}
+ ext4_es_unregister_shrinker(sb);
del_timer(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
@@ -885,6 +783,7 @@ static void ext4_put_super(struct super_block *sb)
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_extent_cache_cnt);
brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
@@ -939,11 +838,12 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
return NULL;
ei->vfs_inode.i_version = 1;
- memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
INIT_LIST_HEAD(&ei->i_prealloc_list);
spin_lock_init(&ei->i_prealloc_lock);
ext4_es_init_tree(&ei->i_es_tree);
rwlock_init(&ei->i_es_lock);
+ INIT_LIST_HEAD(&ei->i_es_lru);
+ ei->i_es_lru_nr = 0;
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
@@ -960,6 +860,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_datasync_tid = 0;
atomic_set(&ei->i_ioend_count, 0);
atomic_set(&ei->i_unwritten, 0);
+ INIT_WORK(&ei->i_unwritten_work, ext4_end_io_work);
return &ei->vfs_inode;
}
@@ -1031,6 +932,7 @@ void ext4_clear_inode(struct inode *inode)
dquot_drop(inode);
ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+ ext4_es_lru_del(inode);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode);
@@ -1280,8 +1182,8 @@ static const match_table_t tokens = {
{Opt_stripe, "stripe=%u"},
{Opt_delalloc, "delalloc"},
{Opt_nodelalloc, "nodelalloc"},
- {Opt_mblk_io_submit, "mblk_io_submit"},
- {Opt_nomblk_io_submit, "nomblk_io_submit"},
+ {Opt_removed, "mblk_io_submit"},
+ {Opt_removed, "nomblk_io_submit"},
{Opt_block_validity, "block_validity"},
{Opt_noblock_validity, "noblock_validity"},
{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
@@ -1337,6 +1239,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
char *qname;
+ int ret = -1;
if (sb_any_quota_loaded(sb) &&
!sbi->s_qf_names[qtype]) {
@@ -1345,29 +1248,37 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"quota options when quota turned on");
return -1;
}
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+ ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
+ "when QUOTA feature is enabled");
+ return -1;
+ }
qname = match_strdup(args);
if (!qname) {
ext4_msg(sb, KERN_ERR,
"Not enough memory for storing quotafile name");
return -1;
}
- if (sbi->s_qf_names[qtype] &&
- strcmp(sbi->s_qf_names[qtype], qname)) {
- ext4_msg(sb, KERN_ERR,
- "%s quota file already specified", QTYPE2NAME(qtype));
- kfree(qname);
- return -1;
+ if (sbi->s_qf_names[qtype]) {
+ if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+ ret = 1;
+ else
+ ext4_msg(sb, KERN_ERR,
+ "%s quota file already specified",
+ QTYPE2NAME(qtype));
+ goto errout;
}
- sbi->s_qf_names[qtype] = qname;
- if (strchr(sbi->s_qf_names[qtype], '/')) {
+ if (strchr(qname, '/')) {
ext4_msg(sb, KERN_ERR,
"quotafile must be on filesystem root");
- kfree(sbi->s_qf_names[qtype]);
- sbi->s_qf_names[qtype] = NULL;
- return -1;
+ goto errout;
}
+ sbi->s_qf_names[qtype] = qname;
set_opt(sb, QUOTA);
return 1;
+errout:
+ kfree(qname);
+ return ret;
}
static int clear_qf_name(struct super_block *sb, int qtype)
@@ -1381,10 +1292,7 @@ static int clear_qf_name(struct super_block *sb, int qtype)
" when quota turned on");
return -1;
}
- /*
- * The space will be released later when all options are confirmed
- * to be correct
- */
+ kfree(sbi->s_qf_names[qtype]);
sbi->s_qf_names[qtype] = NULL;
return 1;
}
@@ -1404,6 +1312,9 @@ static int clear_qf_name(struct super_block *sb, int qtype)
#define MOPT_QFMT MOPT_NOSUPPORT
#endif
#define MOPT_DATAJ 0x0080
+#define MOPT_NO_EXT2 0x0100
+#define MOPT_NO_EXT3 0x0200
+#define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
static const struct mount_opts {
int token;
@@ -1414,25 +1325,31 @@ static const struct mount_opts {
{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
- {Opt_mblk_io_submit, EXT4_MOUNT_MBLK_IO_SUBMIT, MOPT_SET},
- {Opt_nomblk_io_submit, EXT4_MOUNT_MBLK_IO_SUBMIT, MOPT_CLEAR},
{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
- {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_SET},
- {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_CLEAR},
+ {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
+ MOPT_EXT4_ONLY | MOPT_SET},
+ {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
+ MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
- {Opt_delalloc, EXT4_MOUNT_DELALLOC, MOPT_SET | MOPT_EXPLICIT},
- {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_CLEAR | MOPT_EXPLICIT},
- {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_SET},
+ {Opt_delalloc, EXT4_MOUNT_DELALLOC,
+ MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
+ {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
+ MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
+ {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
+ MOPT_EXT4_ONLY | MOPT_SET},
{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
- EXT4_MOUNT_JOURNAL_CHECKSUM), MOPT_SET},
- {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_SET},
+ EXT4_MOUNT_JOURNAL_CHECKSUM),
+ MOPT_EXT4_ONLY | MOPT_SET},
+ {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
- {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_SET},
- {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_CLEAR},
+ {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
+ MOPT_NO_EXT2 | MOPT_SET},
+ {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
+ MOPT_NO_EXT2 | MOPT_CLEAR},
{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
@@ -1444,9 +1361,14 @@ static const struct mount_opts {
{Opt_inode_readahead_blks, 0, MOPT_GTE0},
{Opt_init_itable, 0, MOPT_GTE0},
{Opt_stripe, 0, MOPT_GTE0},
- {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_DATAJ},
- {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_DATAJ},
- {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, MOPT_DATAJ},
+ {Opt_resuid, 0, MOPT_GTE0},
+ {Opt_resgid, 0, MOPT_GTE0},
+ {Opt_journal_dev, 0, MOPT_GTE0},
+ {Opt_journal_ioprio, 0, MOPT_GTE0},
+ {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
+ {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
+ {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
+ MOPT_NO_EXT2 | MOPT_DATAJ},
{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
#ifdef CONFIG_EXT4_FS_POSIX_ACL
@@ -1496,8 +1418,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
else if (token == Opt_offgrpjquota)
return clear_qf_name(sb, GRPQUOTA);
#endif
- if (args->from && match_int(args, &arg))
- return -1;
switch (token) {
case Opt_noacl:
case Opt_nouser_xattr:
@@ -1506,138 +1426,156 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
case Opt_sb:
return 1; /* handled by get_sb_block() */
case Opt_removed:
- ext4_msg(sb, KERN_WARNING,
- "Ignoring removed %s option", opt);
+ ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
+ return 1;
+ case Opt_abort:
+ sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
+ return 1;
+ case Opt_i_version:
+ sb->s_flags |= MS_I_VERSION;
return 1;
- case Opt_resuid:
+ }
+
+ for (m = ext4_mount_opts; m->token != Opt_err; m++)
+ if (token == m->token)
+ break;
+
+ if (m->token == Opt_err) {
+ ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
+ "or missing value", opt);
+ return -1;
+ }
+
+ if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "Mount option \"%s\" incompatible with ext2", opt);
+ return -1;
+ }
+ if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "Mount option \"%s\" incompatible with ext3", opt);
+ return -1;
+ }
+
+ if (args->from && match_int(args, &arg))
+ return -1;
+ if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
+ return -1;
+ if (m->flags & MOPT_EXPLICIT)
+ set_opt2(sb, EXPLICIT_DELALLOC);
+ if (m->flags & MOPT_CLEAR_ERR)
+ clear_opt(sb, ERRORS_MASK);
+ if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
+ ext4_msg(sb, KERN_ERR, "Cannot change quota "
+ "options when quota turned on");
+ return -1;
+ }
+
+ if (m->flags & MOPT_NOSUPPORT) {
+ ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
+ } else if (token == Opt_commit) {
+ if (arg == 0)
+ arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
+ sbi->s_commit_interval = HZ * arg;
+ } else if (token == Opt_max_batch_time) {
+ if (arg == 0)
+ arg = EXT4_DEF_MAX_BATCH_TIME;
+ sbi->s_max_batch_time = arg;
+ } else if (token == Opt_min_batch_time) {
+ sbi->s_min_batch_time = arg;
+ } else if (token == Opt_inode_readahead_blks) {
+ if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
+ ext4_msg(sb, KERN_ERR,
+ "EXT4-fs: inode_readahead_blks must be "
+ "0 or a power of 2 smaller than 2^31");
+ return -1;
+ }
+ sbi->s_inode_readahead_blks = arg;
+ } else if (token == Opt_init_itable) {
+ set_opt(sb, INIT_INODE_TABLE);
+ if (!args->from)
+ arg = EXT4_DEF_LI_WAIT_MULT;
+ sbi->s_li_wait_mult = arg;
+ } else if (token == Opt_max_dir_size_kb) {
+ sbi->s_max_dir_size_kb = arg;
+ } else if (token == Opt_stripe) {
+ sbi->s_stripe = arg;
+ } else if (token == Opt_resuid) {
uid = make_kuid(current_user_ns(), arg);
if (!uid_valid(uid)) {
ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
return -1;
}
sbi->s_resuid = uid;
- return 1;
- case Opt_resgid:
+ } else if (token == Opt_resgid) {
gid = make_kgid(current_user_ns(), arg);
if (!gid_valid(gid)) {
ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
return -1;
}
sbi->s_resgid = gid;
- return 1;
- case Opt_abort:
- sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
- return 1;
- case Opt_i_version:
- sb->s_flags |= MS_I_VERSION;
- return 1;
- case Opt_journal_dev:
+ } else if (token == Opt_journal_dev) {
if (is_remount) {
ext4_msg(sb, KERN_ERR,
"Cannot specify journal on remount");
return -1;
}
*journal_devnum = arg;
- return 1;
- case Opt_journal_ioprio:
- if (arg < 0 || arg > 7)
- return -1;
- *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
- return 1;
- }
-
- for (m = ext4_mount_opts; m->token != Opt_err; m++) {
- if (token != m->token)
- continue;
- if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
- return -1;
- if (m->flags & MOPT_EXPLICIT)
- set_opt2(sb, EXPLICIT_DELALLOC);
- if (m->flags & MOPT_CLEAR_ERR)
- clear_opt(sb, ERRORS_MASK);
- if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
- ext4_msg(sb, KERN_ERR, "Cannot change quota "
- "options when quota turned on");
+ } else if (token == Opt_journal_ioprio) {
+ if (arg > 7) {
+ ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
+ " (must be 0-7)");
return -1;
}
-
- if (m->flags & MOPT_NOSUPPORT) {
- ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
- } else if (token == Opt_commit) {
- if (arg == 0)
- arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
- sbi->s_commit_interval = HZ * arg;
- } else if (token == Opt_max_batch_time) {
- if (arg == 0)
- arg = EXT4_DEF_MAX_BATCH_TIME;
- sbi->s_max_batch_time = arg;
- } else if (token == Opt_min_batch_time) {
- sbi->s_min_batch_time = arg;
- } else if (token == Opt_inode_readahead_blks) {
- if (arg > (1 << 30))
- return -1;
- if (arg && !is_power_of_2(arg)) {
+ *journal_ioprio =
+ IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
+ } else if (m->flags & MOPT_DATAJ) {
+ if (is_remount) {
+ if (!sbi->s_journal)
+ ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
+ else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
ext4_msg(sb, KERN_ERR,
- "EXT4-fs: inode_readahead_blks"
- " must be a power of 2");
- return -1;
- }
- sbi->s_inode_readahead_blks = arg;
- } else if (token == Opt_init_itable) {
- set_opt(sb, INIT_INODE_TABLE);
- if (!args->from)
- arg = EXT4_DEF_LI_WAIT_MULT;
- sbi->s_li_wait_mult = arg;
- } else if (token == Opt_max_dir_size_kb) {
- sbi->s_max_dir_size_kb = arg;
- } else if (token == Opt_stripe) {
- sbi->s_stripe = arg;
- } else if (m->flags & MOPT_DATAJ) {
- if (is_remount) {
- if (!sbi->s_journal)
- ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
- else if (test_opt(sb, DATA_FLAGS) !=
- m->mount_opt) {
- ext4_msg(sb, KERN_ERR,
"Cannot change data mode on remount");
- return -1;
- }
- } else {
- clear_opt(sb, DATA_FLAGS);
- sbi->s_mount_opt |= m->mount_opt;
- }
-#ifdef CONFIG_QUOTA
- } else if (m->flags & MOPT_QFMT) {
- if (sb_any_quota_loaded(sb) &&
- sbi->s_jquota_fmt != m->mount_opt) {
- ext4_msg(sb, KERN_ERR, "Cannot "
- "change journaled quota options "
- "when quota turned on");
return -1;
}
- sbi->s_jquota_fmt = m->mount_opt;
-#endif
} else {
- if (!args->from)
- arg = 1;
- if (m->flags & MOPT_CLEAR)
- arg = !arg;
- else if (unlikely(!(m->flags & MOPT_SET))) {
- ext4_msg(sb, KERN_WARNING,
- "buggy handling of option %s", opt);
- WARN_ON(1);
- return -1;
- }
- if (arg != 0)
- sbi->s_mount_opt |= m->mount_opt;
- else
- sbi->s_mount_opt &= ~m->mount_opt;
+ clear_opt(sb, DATA_FLAGS);
+ sbi->s_mount_opt |= m->mount_opt;
}
- return 1;
+#ifdef CONFIG_QUOTA
+ } else if (m->flags & MOPT_QFMT) {
+ if (sb_any_quota_loaded(sb) &&
+ sbi->s_jquota_fmt != m->mount_opt) {
+ ext4_msg(sb, KERN_ERR, "Cannot change journaled "
+ "quota options when quota turned on");
+ return -1;
+ }
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+ ext4_msg(sb, KERN_ERR,
+ "Cannot set journaled quota options "
+ "when QUOTA feature is enabled");
+ return -1;
+ }
+ sbi->s_jquota_fmt = m->mount_opt;
+#endif
+ } else {
+ if (!args->from)
+ arg = 1;
+ if (m->flags & MOPT_CLEAR)
+ arg = !arg;
+ else if (unlikely(!(m->flags & MOPT_SET))) {
+ ext4_msg(sb, KERN_WARNING,
+ "buggy handling of option %s", opt);
+ WARN_ON(1);
+ return -1;
+ }
+ if (arg != 0)
+ sbi->s_mount_opt |= m->mount_opt;
+ else
+ sbi->s_mount_opt &= ~m->mount_opt;
}
- ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
- "or missing value", opt);
- return -1;
+ return 1;
}
static int parse_options(char *options, struct super_block *sb,
@@ -1667,6 +1605,12 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
#ifdef CONFIG_QUOTA
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+ (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
+ ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
+ "feature is enabled");
+ return 0;
+ }
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
@@ -2776,7 +2720,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
break;
}
- if (group == ngroups)
+ if (group >= ngroups)
ret = 1;
if (!ret) {
@@ -3016,33 +2960,34 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
return elr;
}
-static int ext4_register_li_request(struct super_block *sb,
- ext4_group_t first_not_zeroed)
+int ext4_register_li_request(struct super_block *sb,
+ ext4_group_t first_not_zeroed)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_li_request *elr;
+ struct ext4_li_request *elr = NULL;
ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
int ret = 0;
+ mutex_lock(&ext4_li_mtx);
if (sbi->s_li_request != NULL) {
/*
* Reset timeout so it can be computed again, because
* s_li_wait_mult might have changed.
*/
sbi->s_li_request->lr_timeout = 0;
- return 0;
+ goto out;
}
if (first_not_zeroed == ngroups ||
(sb->s_flags & MS_RDONLY) ||
!test_opt(sb, INIT_INODE_TABLE))
- return 0;
+ goto out;
elr = ext4_li_request_new(sb, first_not_zeroed);
- if (!elr)
- return -ENOMEM;
-
- mutex_lock(&ext4_li_mtx);
+ if (!elr) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (NULL == ext4_li_info) {
ret = ext4_li_info_new();
@@ -3235,7 +3180,7 @@ int ext4_calculate_overhead(struct super_block *sb)
}
/* Add the journal blocks as well */
if (sbi->s_journal)
- overhead += EXT4_B2C(sbi, sbi->s_journal->j_maxlen);
+ overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
sbi->s_overhead = overhead;
smp_wmb();
@@ -3379,7 +3324,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
#endif
- set_opt(sb, MBLK_IO_SUBMIT);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
set_opt(sb, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3763,6 +3707,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
}
+ if (!err) {
+ err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
+ }
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3;
@@ -3772,6 +3719,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_max_writeback_mb_bump = 128;
sbi->s_extent_max_zeroout_kb = 32;
+ /* Register extent status tree shrinker */
+ ext4_es_register_shrinker(sb);
+
/*
* set up enough so that it can read an inode
*/
@@ -3783,13 +3733,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_QUOTA
- sb->s_qcop = &ext4_qctl_operations;
sb->dq_op = &ext4_quota_operations;
-
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
- /* Use qctl operations for hidden quota files. */
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
sb->s_qcop = &ext4_qctl_sysfile_operations;
- }
+ else
+ sb->s_qcop = &ext4_qctl_operations;
#endif
memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
@@ -3985,6 +3933,16 @@ no_journal:
if (err)
goto failed_mount7;
+#ifdef CONFIG_QUOTA
+ /* Enable quota usage during mount. */
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+ !(sb->s_flags & MS_RDONLY)) {
+ err = ext4_enable_quotas(sb);
+ if (err)
+ goto failed_mount8;
+ }
+#endif /* CONFIG_QUOTA */
+
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
@@ -4002,16 +3960,6 @@ no_journal:
} else
descr = "out journal";
-#ifdef CONFIG_QUOTA
- /* Enable quota usage during mount. */
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
- !(sb->s_flags & MS_RDONLY)) {
- err = ext4_enable_quotas(sb);
- if (err)
- goto failed_mount7;
- }
-#endif /* CONFIG_QUOTA */
-
if (test_opt(sb, DISCARD)) {
struct request_queue *q = bdev_get_queue(sb->s_bdev);
if (!blk_queue_discard(q))
@@ -4035,6 +3983,10 @@ cantfind_ext4:
ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
goto failed_mount;
+#ifdef CONFIG_QUOTA
+failed_mount8:
+ kobject_del(&sbi->s_kobj);
+#endif
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
@@ -4061,6 +4013,7 @@ failed_mount3:
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_extent_cache_cnt);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
@@ -4476,16 +4429,12 @@ static void ext4_clear_journal_err(struct super_block *sb,
int ext4_force_commit(struct super_block *sb)
{
journal_t *journal;
- int ret = 0;
if (sb->s_flags & MS_RDONLY)
return 0;
journal = EXT4_SB(sb)->s_journal;
- if (journal)
- ret = ext4_journal_force_commit(journal);
-
- return ret;
+ return ext4_journal_force_commit(journal);
}
static int ext4_sync_fs(struct super_block *sb, int wait)
@@ -4588,7 +4537,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
int err = 0;
#ifdef CONFIG_QUOTA
- int i;
+ int i, j;
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -4604,7 +4553,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#ifdef CONFIG_QUOTA
old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++)
- old_opts.s_qf_names[i] = sbi->s_qf_names[i];
+ if (sbi->s_qf_names[i]) {
+ old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
+ GFP_KERNEL);
+ if (!old_opts.s_qf_names[i]) {
+ for (j = 0; j < i; j++)
+ kfree(old_opts.s_qf_names[j]);
+ kfree(orig_data);
+ return -ENOMEM;
+ }
+ } else
+ old_opts.s_qf_names[i] = NULL;
#endif
if (sbi->s_journal && sbi->s_journal->j_task->io_context)
journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
@@ -4737,9 +4696,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
- if (old_opts.s_qf_names[i] &&
- old_opts.s_qf_names[i] != sbi->s_qf_names[i])
- kfree(old_opts.s_qf_names[i]);
+ kfree(old_opts.s_qf_names[i]);
if (enable_quota) {
if (sb_any_quota_suspended(sb))
dquot_resume(sb, -1);
@@ -4768,9 +4725,7 @@ restore_opts:
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
- if (sbi->s_qf_names[i] &&
- old_opts.s_qf_names[i] != sbi->s_qf_names[i])
- kfree(sbi->s_qf_names[i]);
+ kfree(sbi->s_qf_names[i]);
sbi->s_qf_names[i] = old_opts.s_qf_names[i];
}
#endif
@@ -4835,7 +4790,7 @@ static int ext4_write_dquot(struct dquot *dquot)
struct inode *inode;
inode = dquot_to_inode(dquot);
- handle = ext4_journal_start(inode,
+ handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -4851,7 +4806,7 @@ static int ext4_acquire_dquot(struct dquot *dquot)
int ret, err;
handle_t *handle;
- handle = ext4_journal_start(dquot_to_inode(dquot),
+ handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -4867,7 +4822,7 @@ static int ext4_release_dquot(struct dquot *dquot)
int ret, err;
handle_t *handle;
- handle = ext4_journal_start(dquot_to_inode(dquot),
+ handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle)) {
/* Release dquot anyway to avoid endless cycle in dqput() */
@@ -4883,9 +4838,12 @@ static int ext4_release_dquot(struct dquot *dquot)
static int ext4_mark_dquot_dirty(struct dquot *dquot)
{
+ struct super_block *sb = dquot->dq_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
/* Are we journaling quotas? */
- if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
- EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) ||
+ sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
dquot_mark_dquot_dirty(dquot);
return ext4_write_dquot(dquot);
} else {
@@ -4899,7 +4857,7 @@ static int ext4_write_info(struct super_block *sb, int type)
handle_t *handle;
/* Data block + inode block */
- handle = ext4_journal_start(sb->s_root->d_inode, 2);
+ handle = ext4_journal_start(sb->s_root->d_inode, EXT4_HT_QUOTA, 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit_info(sb, type);
@@ -5005,9 +4963,9 @@ static int ext4_enable_quotas(struct super_block *sb)
DQUOT_USAGE_ENABLED);
if (err) {
ext4_warning(sb,
- "Failed to enable quota (type=%d) "
- "tracking. Please run e2fsck to fix.",
- type);
+ "Failed to enable quota tracking "
+ "(type=%d, err=%d). Please run "
+ "e2fsck to fix.", type, err);
return err;
}
}
@@ -5045,7 +5003,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
/* Update modification times of quota files when userspace can
* start looking at them */
- handle = ext4_journal_start(inode, 1);
+ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
if (IS_ERR(handle))
goto out;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 3a91ebc2b66f..3a120b277240 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -549,7 +549,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext4_handle_dirty_xattr_block(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- dquot_free_block(inode, 1);
+ dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
}
@@ -832,7 +832,8 @@ inserted:
else {
/* The old block is released after updating
the inode. */
- error = dquot_alloc_block(inode, 1);
+ error = dquot_alloc_block(inode,
+ EXT4_C2B(EXT4_SB(sb), 1));
if (error)
goto cleanup;
error = ext4_journal_get_write_access(handle,
@@ -886,17 +887,18 @@ inserted:
(unsigned long long)block);
new_bh = sb_getblk(sb, block);
- if (!new_bh) {
+ if (unlikely(!new_bh)) {
+ error = -ENOMEM;
getblk_failed:
ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA);
- error = -EIO;
goto cleanup;
}
lock_buffer(new_bh);
error = ext4_journal_get_create_access(handle, new_bh);
if (error) {
unlock_buffer(new_bh);
+ error = -EIO;
goto getblk_failed;
}
memcpy(new_bh->b_data, s->base, new_bh->b_size);
@@ -928,7 +930,7 @@ cleanup:
return error;
cleanup_dquot:
- dquot_free_block(inode, 1);
+ dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
goto cleanup;
bad_block:
@@ -1164,17 +1166,10 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
{
handle_t *handle;
int error, retries = 0;
- int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
+ int credits = ext4_jbd2_credits_xattr(inode);
retry:
- /*
- * In case of inline data, we may push out the data to a block,
- * So reserve the journal space first.
- */
- if (ext4_has_inline_data(inode))
- credits += ext4_writepage_trans_blocks(inode) + 1;
-
- handle = ext4_journal_start(inode, credits);
+ handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
} else {
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 69eda787a96a..aa25deb5c6cd 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -125,74 +125,6 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is);
-extern int ext4_has_inline_data(struct inode *inode);
-extern int ext4_get_inline_size(struct inode *inode);
-extern int ext4_get_max_inline_size(struct inode *inode);
-extern int ext4_find_inline_data_nolock(struct inode *inode);
-extern void ext4_write_inline_data(struct inode *inode,
- struct ext4_iloc *iloc,
- void *buffer, loff_t pos,
- unsigned int len);
-extern int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
- unsigned int len);
-extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
- unsigned int len);
-extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
-
-extern int ext4_readpage_inline(struct inode *inode, struct page *page);
-extern int ext4_try_to_write_inline_data(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- unsigned flags,
- struct page **pagep);
-extern int ext4_write_inline_data_end(struct inode *inode,
- loff_t pos, unsigned len,
- unsigned copied,
- struct page *page);
-extern struct buffer_head *
-ext4_journalled_write_inline_data(struct inode *inode,
- unsigned len,
- struct page *page);
-extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
- struct inode *inode,
- loff_t pos, unsigned len,
- unsigned flags,
- struct page **pagep,
- void **fsdata);
-extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
- unsigned len, unsigned copied,
- struct page *page);
-extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
- struct inode *inode);
-extern int ext4_try_create_inline_dir(handle_t *handle,
- struct inode *parent,
- struct inode *inode);
-extern int ext4_read_inline_dir(struct file *filp,
- void *dirent, filldir_t filldir,
- int *has_inline_data);
-extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
- const struct qstr *d_name,
- struct ext4_dir_entry_2 **res_dir,
- int *has_inline_data);
-extern int ext4_delete_inline_entry(handle_t *handle,
- struct inode *dir,
- struct ext4_dir_entry_2 *de_del,
- struct buffer_head *bh,
- int *has_inline_data);
-extern int empty_inline_dir(struct inode *dir, int *has_inline_data);
-extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
- struct ext4_dir_entry_2 **parent_de,
- int *retval);
-extern int ext4_inline_data_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- int *has_inline);
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed);
-extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
-
-extern int ext4_convert_inline_data(struct inode *inode);
-
#ifdef CONFIG_EXT4_FS_SECURITY
extern int ext4_init_security(handle_t *handle, struct inode *inode,
struct inode *dir, const struct qstr *qstr);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index ff3c8439af87..2b6fc131e2ce 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -72,22 +72,22 @@ static int f2fs_write_meta_page(struct page *page,
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- int err;
- wait_on_page_writeback(page);
-
- err = write_meta_page(sbi, page, wbc);
- if (err) {
+ /* Should not write any meta pages, if any IO error was occurred */
+ if (wbc->for_reclaim ||
+ is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
+ dec_page_count(sbi, F2FS_DIRTY_META);
wbc->pages_skipped++;
set_page_dirty(page);
+ return AOP_WRITEPAGE_ACTIVATE;
}
- dec_page_count(sbi, F2FS_DIRTY_META);
+ wait_on_page_writeback(page);
- /* In this case, we should not unlock this page */
- if (err != AOP_WRITEPAGE_ACTIVATE)
- unlock_page(page);
- return err;
+ write_meta_page(sbi, page);
+ dec_page_count(sbi, F2FS_DIRTY_META);
+ unlock_page(page);
+ return 0;
}
static int f2fs_write_meta_pages(struct address_space *mapping,
@@ -138,7 +138,10 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
BUG_ON(page->mapping != mapping);
BUG_ON(!PageDirty(page));
clear_page_dirty_for_io(page);
- f2fs_write_meta_page(page, &wbc);
+ if (f2fs_write_meta_page(page, &wbc)) {
+ unlock_page(page);
+ break;
+ }
if (nwritten++ >= nr_to_write)
break;
}
@@ -161,7 +164,6 @@ static int f2fs_set_meta_page_dirty(struct page *page)
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(sbi, F2FS_DIRTY_META);
- F2FS_SET_SB_DIRT(sbi);
return 1;
}
return 0;
@@ -216,19 +218,11 @@ retry:
new->ino = ino;
/* add new_oentry into list which is sorted by inode number */
- if (orphan) {
- struct orphan_inode_entry *prev;
-
- /* get previous entry */
- prev = list_entry(orphan->list.prev, typeof(*prev), list);
- if (&prev->list != head)
- /* insert new orphan inode entry */
- list_add(&new->list, &prev->list);
- else
- list_add(&new->list, head);
- } else {
+ if (orphan)
+ list_add(&new->list, this->prev);
+ else
list_add_tail(&new->list, head);
- }
+
sbi->n_orphans++;
out:
mutex_unlock(&sbi->orphan_inode_mutex);
@@ -545,7 +539,7 @@ retry:
/*
* Freeze all the FS-operations for checkpoint.
*/
-void block_operations(struct f2fs_sb_info *sbi)
+static void block_operations(struct f2fs_sb_info *sbi)
{
int t;
struct writeback_control wbc = {
@@ -717,27 +711,24 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
sbi->alloc_valid_block_count = 0;
/* Here, we only have one bio having CP pack */
- if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))
- sbi->sb->s_flags |= MS_RDONLY;
- else
- sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
+ sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
- clear_prefree_segments(sbi);
- F2FS_RESET_SB_DIRT(sbi);
+ if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
+ clear_prefree_segments(sbi);
+ F2FS_RESET_SB_DIRT(sbi);
+ }
}
/*
* We guarantee that this checkpoint procedure should not fail.
*/
-void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount)
+void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_ver;
- if (!blocked) {
- mutex_lock(&sbi->cp_mutex);
- block_operations(sbi);
- }
+ mutex_lock(&sbi->cp_mutex);
+ block_operations(sbi);
f2fs_submit_bio(sbi, DATA, true);
f2fs_submit_bio(sbi, NODE, true);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index c8c37307b326..025b9e2f935d 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -183,10 +183,12 @@ static int stat_show(struct seq_file *s, void *v)
mutex_lock(&f2fs_stat_mutex);
list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
+ char devname[BDEVNAME_SIZE];
update_general_status(si->sbi);
- seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
+ seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n",
+ bdevname(si->sbi->sb->s_bdev, devname), i++);
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 989980e16d0b..a1f38443ecee 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -265,7 +265,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
mutex_unlock_op(sbi, DENTRY_OPS);
}
-void init_dent_inode(struct dentry *dentry, struct page *ipage)
+void init_dent_inode(const struct qstr *name, struct page *ipage)
{
struct f2fs_node *rn;
@@ -274,20 +274,19 @@ void init_dent_inode(struct dentry *dentry, struct page *ipage)
wait_on_page_writeback(ipage);
- /* copy dentry info. to this inode page */
+ /* copy name info. to this inode page */
rn = (struct f2fs_node *)page_address(ipage);
- rn->i.i_namelen = cpu_to_le32(dentry->d_name.len);
- memcpy(rn->i.i_name, dentry->d_name.name, dentry->d_name.len);
+ rn->i.i_namelen = cpu_to_le32(name->len);
+ memcpy(rn->i.i_name, name->name, name->len);
set_page_dirty(ipage);
}
-static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
+static int init_inode_metadata(struct inode *inode,
+ struct inode *dir, const struct qstr *name)
{
- struct inode *dir = dentry->d_parent->d_inode;
-
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
int err;
- err = new_inode_page(inode, dentry);
+ err = new_inode_page(inode, name);
if (err)
return err;
@@ -310,7 +309,7 @@ static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
if (IS_ERR(ipage))
return PTR_ERR(ipage);
set_cold_node(inode, ipage);
- init_dent_inode(dentry, ipage);
+ init_dent_inode(name, ipage);
f2fs_put_page(ipage, 1);
}
if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
@@ -371,7 +370,7 @@ next:
goto next;
}
-int f2fs_add_link(struct dentry *dentry, struct inode *inode)
+int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode)
{
unsigned int bit_pos;
unsigned int level;
@@ -380,17 +379,15 @@ int f2fs_add_link(struct dentry *dentry, struct inode *inode)
f2fs_hash_t dentry_hash;
struct f2fs_dir_entry *de;
unsigned int nbucket, nblock;
- struct inode *dir = dentry->d_parent->d_inode;
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
- const char *name = dentry->d_name.name;
- size_t namelen = dentry->d_name.len;
+ size_t namelen = name->len;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
int slots = GET_DENTRY_SLOTS(namelen);
int err = 0;
int i;
- dentry_hash = f2fs_dentry_hash(name, dentry->d_name.len);
+ dentry_hash = f2fs_dentry_hash(name->name, name->len);
level = 0;
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
@@ -433,7 +430,7 @@ start:
++level;
goto start;
add_dentry:
- err = init_inode_metadata(inode, dentry);
+ err = init_inode_metadata(inode, dir, name);
if (err)
goto fail;
@@ -442,7 +439,7 @@ add_dentry:
de = &dentry_blk->dentry[bit_pos];
de->hash_code = dentry_hash;
de->name_len = cpu_to_le16(namelen);
- memcpy(dentry_blk->filename[bit_pos], name, namelen);
+ memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode);
for (i = 0; i < slots; i++)
@@ -603,7 +600,7 @@ bool f2fs_empty_dir(struct inode *dir)
static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
unsigned long pos = file->f_pos;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
unsigned long npages = dir_blocks(inode);
unsigned char *types = NULL;
unsigned int bit_pos = 0, start_bit_pos = 0;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c8e2d751ef9c..cc2213afdcc7 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -104,6 +104,20 @@ static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
}
/*
+ * ioctl commands
+ */
+#define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
+#define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
+
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#endif
+
+/*
* For INODE and NODE manager
*/
#define XATTR_NODE_OFFSET (-1) /*
@@ -141,7 +155,7 @@ struct f2fs_inode_info {
/* Use below internally in f2fs*/
unsigned long flags; /* use to pass per-file flags */
- unsigned long long data_version;/* lastes version of data for fsync */
+ unsigned long long data_version;/* latest version of data for fsync */
atomic_t dirty_dents; /* # of dirty dentry pages */
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
@@ -573,6 +587,14 @@ static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
return atomic_read(&sbi->nr_pages[count_type]);
}
+static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
+{
+ unsigned int pages_per_sec = sbi->segs_per_sec *
+ (1 << sbi->log_blocks_per_seg);
+ return ((get_pages(sbi, block_type) + pages_per_sec - 1)
+ >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+}
+
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
{
block_t ret;
@@ -842,12 +864,12 @@ void f2fs_truncate(struct inode *);
int f2fs_setattr(struct dentry *, struct iattr *);
int truncate_hole(struct inode *, pgoff_t, pgoff_t);
long f2fs_ioctl(struct file *, unsigned int, unsigned long);
+long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
/*
* inode.c
*/
void f2fs_set_inode_flags(struct inode *);
-struct inode *f2fs_iget_nowait(struct super_block *, unsigned long);
struct inode *f2fs_iget(struct super_block *, unsigned long);
void update_inode(struct inode *, struct page *);
int f2fs_write_inode(struct inode *, struct writeback_control *);
@@ -867,12 +889,18 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
-void init_dent_inode(struct dentry *, struct page *);
-int f2fs_add_link(struct dentry *, struct inode *);
+void init_dent_inode(const struct qstr *, struct page *);
+int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
int f2fs_make_empty(struct inode *, struct inode *);
bool f2fs_empty_dir(struct inode *);
+static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
+{
+ return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
+ inode);
+}
+
/*
* super.c
*/
@@ -896,7 +924,7 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
int truncate_inode_blocks(struct inode *, pgoff_t);
int remove_inode_page(struct inode *);
-int new_inode_page(struct inode *, struct dentry *);
+int new_inode_page(struct inode *, const struct qstr *);
struct page *new_node_page(struct dnode_of_data *, unsigned int);
void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
@@ -929,8 +957,7 @@ void allocate_new_segments(struct f2fs_sb_info *);
struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
struct bio *f2fs_bio_alloc(struct block_device *, int);
void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
-int write_meta_page(struct f2fs_sb_info *, struct page *,
- struct writeback_control *);
+void write_meta_page(struct f2fs_sb_info *, struct page *);
void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
block_t, block_t *);
void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
@@ -963,8 +990,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *);
void set_dirty_dir_page(struct inode *, struct page *);
void remove_dirty_dir_inode(struct inode *);
void sync_dirty_dir_inodes(struct f2fs_sb_info *);
-void block_operations(struct f2fs_sb_info *);
-void write_checkpoint(struct f2fs_sb_info *, bool, bool);
+void write_checkpoint(struct f2fs_sb_info *, bool);
void init_orphan_info(struct f2fs_sb_info *);
int __init create_checkpoint_caches(void);
void destroy_checkpoint_caches(void);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3191b52aafb0..b7a053d4c6d3 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -15,6 +15,7 @@
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/types.h>
+#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/mount.h>
@@ -157,11 +158,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
- if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
+ else if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
need_cp = true;
- if (!space_for_roll_forward(sbi))
+ else if (!space_for_roll_forward(sbi))
need_cp = true;
- if (need_to_sync_dir(sbi, inode))
+ else if (need_to_sync_dir(sbi, inode))
need_cp = true;
if (need_cp) {
@@ -298,8 +299,6 @@ void f2fs_truncate(struct inode *inode)
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
}
-
- f2fs_balance_fs(F2FS_SB(inode->i_sb));
}
static int f2fs_getattr(struct vfsmount *mnt,
@@ -356,6 +355,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attr->ia_size);
f2fs_truncate(inode);
+ f2fs_balance_fs(F2FS_SB(inode->i_sb));
}
__setattr_copy(inode, attr);
@@ -387,12 +387,17 @@ const struct inode_operations f2fs_file_inode_operations = {
static void fill_zero(struct inode *inode, pgoff_t index,
loff_t start, loff_t len)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct page *page;
if (!len)
return;
+ f2fs_balance_fs(sbi);
+
+ mutex_lock_op(sbi, DATA_NEW);
page = get_new_data_page(inode, index, false);
+ mutex_unlock_op(sbi, DATA_NEW);
if (!IS_ERR(page)) {
wait_on_page_writeback(page);
@@ -630,6 +635,23 @@ out:
}
}
+#ifdef CONFIG_COMPAT
+long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case F2FS_IOC32_GETFLAGS:
+ cmd = F2FS_IOC_GETFLAGS;
+ break;
+ case F2FS_IOC32_SETFLAGS:
+ cmd = F2FS_IOC_SETFLAGS;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
const struct file_operations f2fs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
@@ -641,6 +663,9 @@ const struct file_operations f2fs_file_operations = {
.fsync = f2fs_sync_file,
.fallocate = f2fs_fallocate,
.unlocked_ioctl = f2fs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = f2fs_compat_ioctl,
+#endif
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
};
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index c386910dacc5..94b8a0c48453 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -44,10 +44,10 @@ static int gc_thread_func(void *data)
if (kthread_should_stop())
break;
- f2fs_balance_fs(sbi);
-
- if (!test_opt(sbi, BG_GC))
+ if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
+ wait_ms = GC_THREAD_MAX_SLEEP_TIME;
continue;
+ }
/*
* [GC triggering condition]
@@ -78,7 +78,8 @@ static int gc_thread_func(void *data)
sbi->bg_gc++;
- if (f2fs_gc(sbi) == GC_NONE)
+ /* if return value is not zero, no victim was selected */
+ if (f2fs_gc(sbi))
wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -90,7 +91,10 @@ static int gc_thread_func(void *data)
int start_gc_thread(struct f2fs_sb_info *sbi)
{
struct f2fs_gc_kthread *gc_th;
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ if (!test_opt(sbi, BG_GC))
+ return 0;
gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th)
return -ENOMEM;
@@ -98,9 +102,10 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
sbi->gc_thread = gc_th;
init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
- GC_THREAD_NAME);
+ "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(gc_th->f2fs_gc_task)) {
kfree(gc_th);
+ sbi->gc_thread = NULL;
return -ENOMEM;
}
return 0;
@@ -141,6 +146,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
struct victim_sel_policy *p)
{
+ /* SSR allocates in a segment unit */
+ if (p->alloc_mode == SSR)
+ return 1 << sbi->log_blocks_per_seg;
if (p->gc_mode == GC_GREEDY)
return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
else if (p->gc_mode == GC_CB)
@@ -356,7 +364,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
sentry = get_seg_entry(sbi, segno);
ret = f2fs_test_bit(offset, sentry->cur_valid_map);
mutex_unlock(&sit_i->sentry_lock);
- return ret ? GC_OK : GC_NEXT;
+ return ret;
}
/*
@@ -364,7 +372,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
bool initial = true;
@@ -376,21 +384,12 @@ next_step:
for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
- int err;
- /*
- * It makes sure that free segments are able to write
- * all the dirty node pages before CP after this CP.
- * So let's check the space of dirty node pages.
- */
- if (should_do_checkpoint(sbi)) {
- mutex_lock(&sbi->cp_mutex);
- block_operations(sbi);
- return GC_BLOCKED;
- }
+ /* stop BG_GC if there is not enough free sections. */
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+ return;
- err = check_valid_map(sbi, segno, off);
- if (err == GC_NEXT)
+ if (check_valid_map(sbi, segno, off) == 0)
continue;
if (initial) {
@@ -420,7 +419,6 @@ next_step:
};
sync_node_pages(sbi, 0, &wbc);
}
- return GC_DONE;
}
/*
@@ -463,13 +461,13 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
- return GC_NEXT;
+ return 0;
get_node_info(sbi, nid, dni);
if (sum->version != dni->version) {
f2fs_put_page(node_page, 1);
- return GC_NEXT;
+ return 0;
}
*nofs = ofs_of_node(node_page);
@@ -477,8 +475,8 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
f2fs_put_page(node_page, 1);
if (source_blkaddr != blkaddr)
- return GC_NEXT;
- return GC_OK;
+ return 0;
+ return 1;
}
static void move_data_page(struct inode *inode, struct page *page, int gc_type)
@@ -519,13 +517,13 @@ out:
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct list_head *ilist, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
struct f2fs_summary *entry;
block_t start_addr;
- int err, off;
+ int off;
int phase = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -539,20 +537,11 @@ next_step:
unsigned int ofs_in_node, nofs;
block_t start_bidx;
- /*
- * It makes sure that free segments are able to write
- * all the dirty node pages before CP after this CP.
- * So let's check the space of dirty node pages.
- */
- if (should_do_checkpoint(sbi)) {
- mutex_lock(&sbi->cp_mutex);
- block_operations(sbi);
- err = GC_BLOCKED;
- goto stop;
- }
+ /* stop BG_GC if there is not enough free sections. */
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+ return;
- err = check_valid_map(sbi, segno, off);
- if (err == GC_NEXT)
+ if (check_valid_map(sbi, segno, off) == 0)
continue;
if (phase == 0) {
@@ -561,8 +550,7 @@ next_step:
}
/* Get an inode by ino with checking validity */
- err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs);
- if (err == GC_NEXT)
+ if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
continue;
if (phase == 1) {
@@ -574,7 +562,7 @@ next_step:
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 2) {
- inode = f2fs_iget_nowait(sb, dni.ino);
+ inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode))
continue;
@@ -602,11 +590,9 @@ next_iput:
}
if (++phase < 4)
goto next_step;
- err = GC_DONE;
-stop:
+
if (gc_type == FG_GC)
f2fs_submit_bio(sbi, DATA, true);
- return err;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -620,17 +606,16 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
return ret;
}
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
struct list_head *ilist, int gc_type)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
- int ret = GC_DONE;
/* read segment summary of victim */
sum_page = get_sum_page(sbi, segno);
if (IS_ERR(sum_page))
- return GC_ERROR;
+ return;
/*
* CP needs to lock sum_page. In this time, we don't need
@@ -642,17 +627,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
switch (GET_SUM_TYPE((&sum->footer))) {
case SUM_TYPE_NODE:
- ret = gc_node_segment(sbi, sum->entries, segno, gc_type);
+ gc_node_segment(sbi, sum->entries, segno, gc_type);
break;
case SUM_TYPE_DATA:
- ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
+ gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
break;
}
stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
stat_inc_call_count(sbi->stat_info);
f2fs_put_page(sum_page, 0);
- return ret;
}
int f2fs_gc(struct f2fs_sb_info *sbi)
@@ -660,40 +644,38 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
struct list_head ilist;
unsigned int segno, i;
int gc_type = BG_GC;
- int gc_status = GC_NONE;
+ int nfree = 0;
+ int ret = -1;
INIT_LIST_HEAD(&ilist);
gc_more:
if (!(sbi->sb->s_flags & MS_ACTIVE))
goto stop;
- if (has_not_enough_free_secs(sbi))
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree))
gc_type = FG_GC;
if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
goto stop;
+ ret = 0;
- for (i = 0; i < sbi->segs_per_sec; i++) {
- /*
- * do_garbage_collect will give us three gc_status:
- * GC_ERROR, GC_DONE, and GC_BLOCKED.
- * If GC is finished uncleanly, we have to return
- * the victim to dirty segment list.
- */
- gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
- if (gc_status != GC_DONE)
- break;
- }
- if (has_not_enough_free_secs(sbi)) {
- write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
- if (has_not_enough_free_secs(sbi))
- goto gc_more;
- }
+ for (i = 0; i < sbi->segs_per_sec; i++)
+ do_garbage_collect(sbi, segno + i, &ilist, gc_type);
+
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+ nfree++;
+
+ if (has_not_enough_free_secs(sbi, nfree))
+ goto gc_more;
+
+ if (gc_type == FG_GC)
+ write_checkpoint(sbi, false);
stop:
mutex_unlock(&sbi->gc_mutex);
put_gc_inode(&ilist);
- return gc_status;
+ return ret;
}
void build_gc_manager(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index b026d9354ccd..30b2db003acd 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define GC_THREAD_NAME "f2fs_gc_task"
#define GC_THREAD_MIN_WB_PAGES 1 /*
* a threshold to determine
* whether IO subsystem is idle
@@ -23,15 +22,6 @@
/* Search max. number of dirty segments to select a victim segment */
#define MAX_VICTIM_SEARCH 20
-enum {
- GC_NONE = 0,
- GC_ERROR,
- GC_OK,
- GC_NEXT,
- GC_BLOCKED,
- GC_DONE,
-};
-
struct f2fs_gc_kthread {
struct task_struct *f2fs_gc_task;
wait_queue_head_t gc_wait_queue_head;
@@ -104,14 +94,3 @@ static inline int is_idle(struct f2fs_sb_info *sbi)
struct request_list *rl = &q->root_rl;
return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
}
-
-static inline bool should_do_checkpoint(struct f2fs_sb_info *sbi)
-{
- unsigned int pages_per_sec = sbi->segs_per_sec *
- (1 << sbi->log_blocks_per_seg);
- int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
- int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
- return free_sections(sbi) <= (node_secs + 2 * dent_secs + 2);
-}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 794241777322..ddae412d30c8 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -16,11 +16,6 @@
#include "f2fs.h"
#include "node.h"
-struct f2fs_iget_args {
- u64 ino;
- int on_free;
-};
-
void f2fs_set_inode_flags(struct inode *inode)
{
unsigned int flags = F2FS_I(inode)->i_flags;
@@ -40,34 +35,6 @@ void f2fs_set_inode_flags(struct inode *inode)
inode->i_flags |= S_DIRSYNC;
}
-static int f2fs_iget_test(struct inode *inode, void *data)
-{
- struct f2fs_iget_args *args = data;
-
- if (inode->i_ino != args->ino)
- return 0;
- if (inode->i_state & (I_FREEING | I_WILL_FREE)) {
- args->on_free = 1;
- return 0;
- }
- return 1;
-}
-
-struct inode *f2fs_iget_nowait(struct super_block *sb, unsigned long ino)
-{
- struct f2fs_iget_args args = {
- .ino = ino,
- .on_free = 0
- };
- struct inode *inode = ilookup5(sb, ino, f2fs_iget_test, &args);
-
- if (inode)
- return inode;
- if (!args.on_free)
- return f2fs_iget(sb, ino);
- return ERR_PTR(-ENOENT);
-}
-
static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -100,6 +67,10 @@ static int do_read_inode(struct inode *inode)
inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
inode->i_generation = le32_to_cpu(ri->i_generation);
+ if (ri->i_addr[0])
+ inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+ else
+ inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
@@ -203,6 +174,20 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
ri->i_generation = cpu_to_le32(inode->i_generation);
+
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+ if (old_valid_dev(inode->i_rdev)) {
+ ri->i_addr[0] =
+ cpu_to_le32(old_encode_dev(inode->i_rdev));
+ ri->i_addr[1] = 0;
+ } else {
+ ri->i_addr[0] = 0;
+ ri->i_addr[1] =
+ cpu_to_le32(new_encode_dev(inode->i_rdev));
+ ri->i_addr[2] = 0;
+ }
+ }
+
set_cold_node(inode, node_page);
set_page_dirty(node_page);
}
@@ -260,6 +245,7 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
+ sb_start_intwrite(inode->i_sb);
set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
i_size_write(inode, 0);
@@ -267,6 +253,7 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_truncate(inode);
remove_inode_page(inode);
+ sb_end_intwrite(inode->i_sb);
no_delete:
clear_inode(inode);
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9bda63c9c166..e275218904ed 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -104,7 +104,7 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
f2fs_put_page(page, 1);
continue;
}
- page_cache_release(page);
+ f2fs_put_page(page, 0);
}
}
@@ -660,7 +660,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
int err = 0, cont = 1;
int level, offset[4], noffset[4];
- unsigned int nofs;
+ unsigned int nofs = 0;
struct f2fs_node *rn;
struct dnode_of_data dn;
struct page *page;
@@ -780,7 +780,7 @@ int remove_inode_page(struct inode *inode)
return 0;
}
-int new_inode_page(struct inode *inode, struct dentry *dentry)
+int new_inode_page(struct inode *inode, const struct qstr *name)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct page *page;
@@ -790,7 +790,7 @@ int new_inode_page(struct inode *inode, struct dentry *dentry)
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
mutex_lock_op(sbi, NODE_NEW);
page = new_node_page(&dn, 0);
- init_dent_inode(dentry, page);
+ init_dent_inode(name, page);
mutex_unlock_op(sbi, NODE_NEW);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -874,15 +874,11 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
return;
if (read_node_page(apage, READA))
- goto unlock_out;
+ unlock_page(apage);
- page_cache_release(apage);
- return;
-
-unlock_out:
- unlock_page(apage);
release_out:
- page_cache_release(apage);
+ f2fs_put_page(apage, 0);
+ return;
}
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1139,7 +1135,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
/* First check balancing cached NAT entries */
if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
- write_checkpoint(sbi, false, false);
+ write_checkpoint(sbi, false);
return 0;
}
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index f42e4060b399..b235215ac138 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -42,7 +42,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
{
struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
struct f2fs_inode *raw_inode = &(raw_node->i);
- struct dentry dent, parent;
+ struct qstr name;
struct f2fs_dir_entry *de;
struct page *page;
struct inode *dir;
@@ -57,17 +57,15 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
goto out;
}
- parent.d_inode = dir;
- dent.d_parent = &parent;
- dent.d_name.len = le32_to_cpu(raw_inode->i_namelen);
- dent.d_name.name = raw_inode->i_name;
+ name.len = le32_to_cpu(raw_inode->i_namelen);
+ name.name = raw_inode->i_name;
- de = f2fs_find_entry(dir, &dent.d_name, &page);
+ de = f2fs_find_entry(dir, &name, &page);
if (de) {
kunmap(page);
f2fs_put_page(page, 0);
} else {
- err = f2fs_add_link(&dent, inode);
+ err = __f2fs_add_link(dir, &name, inode);
}
iput(dir);
out:
@@ -226,7 +224,7 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
f2fs_put_page(node_page, 1);
/* Deallocate previous index in the node page */
- inode = f2fs_iget_nowait(sbi->sb, ino);
+ inode = f2fs_iget(sbi->sb, ino);
if (IS_ERR(inode))
return;
@@ -373,5 +371,5 @@ void recover_fsync_data(struct f2fs_sb_info *sbi)
out:
destroy_fsync_dnodes(sbi, &inode_list);
kmem_cache_destroy(fsync_entry_slab);
- write_checkpoint(sbi, false, false);
+ write_checkpoint(sbi, false);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 4b0099066582..777f17e496e6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -29,7 +29,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
- if (has_not_enough_free_secs(sbi)) {
+ if (has_not_enough_free_secs(sbi, 0)) {
mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi);
}
@@ -308,7 +308,7 @@ static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
* If there is not enough reserved sections,
* we should not reuse prefree segments.
*/
- if (has_not_enough_free_secs(sbi))
+ if (has_not_enough_free_secs(sbi, 0))
return NULL_SEGNO;
/*
@@ -536,6 +536,23 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
}
}
+static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
+
+ if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
+ return v_ops->get_victim(sbi,
+ &(curseg)->next_segno, BG_GC, type, SSR);
+
+ /* For data segments, let's do SSR more intensively */
+ for (; type >= CURSEG_HOT_DATA; type--)
+ if (v_ops->get_victim(sbi, &(curseg)->next_segno,
+ BG_GC, type, SSR))
+ return 1;
+ return 0;
+}
+
/*
* flush out current segment and replace it with new segment
* This function should be returned with success, otherwise BUG
@@ -600,6 +617,7 @@ static void f2fs_end_io_write(struct bio *bio, int err)
if (page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
+ p->sbi->sb->s_flags |= MS_RDONLY;
}
end_page_writeback(page);
dec_page_count(p->sbi, F2FS_WRITEBACK);
@@ -815,15 +833,10 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
mutex_unlock(&curseg->curseg_mutex);
}
-int write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
- struct writeback_control *wbc)
+void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
{
- if (wbc->for_reclaim)
- return AOP_WRITEPAGE_ACTIVATE;
-
set_page_writeback(page);
submit_write_page(sbi, page, page->index, META);
- return 0;
}
void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 66a288a52fd3..552dadbb2327 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -450,29 +450,16 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
return (free_sections(sbi) < overprovision_sections(sbi));
}
-static inline int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
{
- struct curseg_info *curseg = CURSEG_I(sbi, type);
- return DIRTY_I(sbi)->v_ops->get_victim(sbi,
- &(curseg)->next_segno, BG_GC, type, SSR);
-}
-
-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi)
-{
- unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) *
- sbi->segs_per_sec;
- int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
- int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
+ int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
if (sbi->por_doing)
return false;
- if (free_sections(sbi) <= (node_secs + 2 * dent_secs +
- reserved_sections(sbi)))
- return true;
- return false;
+ return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
+ reserved_sections(sbi)));
}
static inline int utilization(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 37fad04c8669..8c117649a035 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -112,7 +112,7 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_stats(sbi);
stop_gc_thread(sbi);
- write_checkpoint(sbi, false, true);
+ write_checkpoint(sbi, true);
iput(sbi->node_inode);
iput(sbi->meta_inode);
@@ -136,13 +136,29 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
return 0;
if (sync)
- write_checkpoint(sbi, false, false);
+ write_checkpoint(sbi, false);
else
f2fs_balance_fs(sbi);
return 0;
}
+static int f2fs_freeze(struct super_block *sb)
+{
+ int err;
+
+ if (sb->s_flags & MS_RDONLY)
+ return 0;
+
+ err = f2fs_sync_fs(sb, 1);
+ return err;
+}
+
+static int f2fs_unfreeze(struct super_block *sb)
+{
+ return 0;
+}
+
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
@@ -198,7 +214,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noacl");
#endif
if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
- seq_puts(seq, ",disable_ext_indentify");
+ seq_puts(seq, ",disable_ext_identify");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
@@ -213,6 +229,8 @@ static struct super_operations f2fs_sops = {
.evict_inode = f2fs_evict_inode,
.put_super = f2fs_put_super,
.sync_fs = f2fs_sync_fs,
+ .freeze_fs = f2fs_freeze,
+ .unfreeze_fs = f2fs_unfreeze,
.statfs = f2fs_statfs,
};
@@ -366,14 +384,23 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
+ /* Currently, support only 4KB page cache size */
+ if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid page_cache_size (%lu), supports only 4KB\n",
+ PAGE_CACHE_SIZE);
+ return 1;
+ }
+
/* Currently, support only 4KB block size */
blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
- if (blocksize != PAGE_CACHE_SIZE) {
+ if (blocksize != F2FS_BLKSIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid blocksize (%u), supports only 4KB\n",
blocksize);
return 1;
}
+
if (le32_to_cpu(raw_super->log_sectorsize) !=
F2FS_LOG_SECTOR_SIZE) {
f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
@@ -387,10 +414,11 @@ static int sanity_check_raw_super(struct super_block *sb,
return 0;
}
-static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
- struct f2fs_checkpoint *ckpt)
+static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
{
unsigned int total, fsmeta;
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -401,6 +429,11 @@ static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
if (fsmeta >= total)
return 1;
+
+ if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
+ return 1;
+ }
return 0;
}
@@ -429,6 +462,32 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
atomic_set(&sbi->nr_pages[i], 0);
}
+static int validate_superblock(struct super_block *sb,
+ struct f2fs_super_block **raw_super,
+ struct buffer_head **raw_super_buf, sector_t block)
+{
+ const char *super = (block == 0 ? "first" : "second");
+
+ /* read f2fs raw super block */
+ *raw_super_buf = sb_bread(sb, block);
+ if (!*raw_super_buf) {
+ f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
+ super);
+ return 1;
+ }
+
+ *raw_super = (struct f2fs_super_block *)
+ ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
+
+ /* sanity checking of raw super */
+ if (!sanity_check_raw_super(sb, *raw_super))
+ return 0;
+
+ f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
+ "in %s superblock", super);
+ return 1;
+}
+
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
@@ -449,16 +508,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
- /* read f2fs raw super block */
- raw_super_buf = sb_bread(sb, 0);
- if (!raw_super_buf) {
- err = -EIO;
- f2fs_msg(sb, KERN_ERR, "unable to read superblock");
- goto free_sbi;
+ if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
+ brelse(raw_super_buf);
+ if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
+ goto free_sb_buf;
}
- raw_super = (struct f2fs_super_block *)
- ((char *)raw_super_buf->b_data + F2FS_SUPER_OFFSET);
-
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
@@ -474,12 +528,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (parse_options(sb, sbi, (char *)data))
goto free_sb_buf;
- /* sanity checking of raw super */
- if (sanity_check_raw_super(sb, raw_super)) {
- f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");
- goto free_sb_buf;
- }
-
sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
@@ -525,7 +573,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
/* sanity checking of checkpoint */
err = -EINVAL;
- if (sanity_check_ckpt(raw_super, sbi->ckpt)) {
+ if (sanity_check_ckpt(sbi)) {
f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
goto free_cp;
}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 58bf744dbf39..165012ef363a 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -698,7 +698,7 @@ out:
static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
}
@@ -779,7 +779,7 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct __fat_dirent __user *d1 = (struct __fat_dirent __user *)arg;
int short_only, both;
@@ -819,7 +819,7 @@ FAT_IOCTL_FILLDIR_FUNC(fat_compat_ioctl_filldir, compat_dirent)
static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct compat_dirent __user *d1 = compat_ptr(arg);
int short_only, both;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 12701a567752..e9cc3f0d58e2 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -95,6 +95,8 @@ struct msdos_sb_info {
spinlock_t dir_hash_lock;
struct hlist_head dir_hashtable[FAT_HASH_SIZE];
+
+ unsigned int dirty; /* fs state before mount */
};
#define FAT_CACHE_VALID 0 /* special case for valid cache */
diff --git a/fs/fat/file.c b/fs/fat/file.c
index a62e0ecbe2db..3978f8ca1823 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -32,7 +32,7 @@ static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int is_dir = S_ISDIR(inode->i_mode);
u32 attr, oldattr;
@@ -116,7 +116,7 @@ out:
long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
u32 __user *user_attr = (u32 __user *)arg;
switch (cmd) {
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index f8f491677a4a..acf6e479b443 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -341,12 +341,11 @@ struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
- struct hlist_node *_p;
struct msdos_inode_info *i;
struct inode *inode = NULL;
spin_lock(&sbi->inode_hash_lock);
- hlist_for_each_entry(i, _p, head, i_fat_hash) {
+ hlist_for_each_entry(i, head, i_fat_hash) {
BUG_ON(i->vfs_inode.i_sb != sb);
if (i->i_pos != i_pos)
continue;
@@ -488,10 +487,59 @@ static void fat_evict_inode(struct inode *inode)
fat_detach(inode);
}
+static void fat_set_state(struct super_block *sb,
+ unsigned int set, unsigned int force)
+{
+ struct buffer_head *bh;
+ struct fat_boot_sector *b;
+ struct msdos_sb_info *sbi = sb->s_fs_info;
+
+ /* do not change any thing if mounted read only */
+ if ((sb->s_flags & MS_RDONLY) && !force)
+ return;
+
+ /* do not change state if fs was dirty */
+ if (sbi->dirty) {
+ /* warn only on set (mount). */
+ if (set)
+ fat_msg(sb, KERN_WARNING, "Volume was not properly "
+ "unmounted. Some data may be corrupt. "
+ "Please run fsck.");
+ return;
+ }
+
+ bh = sb_bread(sb, 0);
+ if (bh == NULL) {
+ fat_msg(sb, KERN_ERR, "unable to read boot sector "
+ "to mark fs as dirty");
+ return;
+ }
+
+ b = (struct fat_boot_sector *) bh->b_data;
+
+ if (sbi->fat_bits == 32) {
+ if (set)
+ b->fat32.state |= FAT_STATE_DIRTY;
+ else
+ b->fat32.state &= ~FAT_STATE_DIRTY;
+ } else /* fat 16 and 12 */ {
+ if (set)
+ b->fat16.state |= FAT_STATE_DIRTY;
+ else
+ b->fat16.state &= ~FAT_STATE_DIRTY;
+ }
+
+ mark_buffer_dirty(bh);
+ sync_dirty_buffer(bh);
+ brelse(bh);
+}
+
static void fat_put_super(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ fat_set_state(sb, 0, 0);
+
iput(sbi->fsinfo_inode);
iput(sbi->fat_inode);
@@ -566,8 +614,18 @@ static void __exit fat_destroy_inodecache(void)
static int fat_remount(struct super_block *sb, int *flags, char *data)
{
+ int new_rdonly;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
*flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+
+ /* make sure we update state on remount. */
+ new_rdonly = *flags & MS_RDONLY;
+ if (new_rdonly != (sb->s_flags & MS_RDONLY)) {
+ if (new_rdonly)
+ fat_set_state(sb, 0, 0);
+ else
+ fat_set_state(sb, 1, 1);
+ }
return 0;
}
@@ -1298,17 +1356,17 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sbi->prev_free = FAT_START_ENT;
sb->s_maxbytes = 0xffffffff;
- if (!sbi->fat_length && b->fat32_length) {
+ if (!sbi->fat_length && b->fat32.length) {
struct fat_boot_fsinfo *fsinfo;
struct buffer_head *fsinfo_bh;
/* Must be FAT32 */
sbi->fat_bits = 32;
- sbi->fat_length = le32_to_cpu(b->fat32_length);
- sbi->root_cluster = le32_to_cpu(b->root_cluster);
+ sbi->fat_length = le32_to_cpu(b->fat32.length);
+ sbi->root_cluster = le32_to_cpu(b->fat32.root_cluster);
/* MC - if info_sector is 0, don't multiply by 0 */
- sbi->fsinfo_sector = le16_to_cpu(b->info_sector);
+ sbi->fsinfo_sector = le16_to_cpu(b->fat32.info_sector);
if (sbi->fsinfo_sector == 0)
sbi->fsinfo_sector = 1;
@@ -1362,6 +1420,12 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
if (sbi->fat_bits != 32)
sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
+ /* some OSes set FAT_STATE_DIRTY and clean it on unmount. */
+ if (sbi->fat_bits == 32)
+ sbi->dirty = b->fat32.state & FAT_STATE_DIRTY;
+ else /* fat 16 or 12 */
+ sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
+
/* check that FAT table does not overflow */
fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
@@ -1456,6 +1520,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
"the device does not support discard");
}
+ fat_set_state(sb, 1, 0);
return 0;
out_invalid:
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index ef4b5faba87b..499c10438ca2 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -21,13 +21,12 @@ static struct inode *fat_dget(struct super_block *sb, int i_logstart)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct hlist_head *head;
- struct hlist_node *_p;
struct msdos_inode_info *i;
struct inode *inode = NULL;
head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
spin_lock(&sbi->dir_hash_lock);
- hlist_for_each_entry(i, _p, head, i_dir_hash) {
+ hlist_for_each_entry(i, head, i_dir_hash) {
BUG_ON(i->vfs_inode.i_sb != sb);
if (i->i_logstart != i_logstart)
continue;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 71a600a19f06..6599222536eb 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -30,7 +30,7 @@
static int setfl(int fd, struct file * filp, unsigned long arg)
{
- struct inode * inode = filp->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(filp);
int error = 0;
/*
diff --git a/fs/file_table.c b/fs/file_table.c
index de9e9653d611..aa07d3684a2e 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -94,8 +94,8 @@ int proc_nr_files(ctl_table *table, int write,
#endif
/* Find an unused file structure and return a pointer to it.
- * Returns NULL, if there are no more free file structures or
- * we run out of memory.
+ * Returns an error pointer if some error happend e.g. we over file
+ * structures limit, run out of memory or operation is not permitted.
*
* Be very careful using this. You are responsible for
* getting write access to any mount that you might assign
@@ -107,7 +107,8 @@ struct file *get_empty_filp(void)
{
const struct cred *cred = current_cred();
static long old_max;
- struct file * f;
+ struct file *f;
+ int error;
/*
* Privileged users can go above max_files
@@ -122,13 +123,16 @@ struct file *get_empty_filp(void)
}
f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
- if (f == NULL)
- goto fail;
+ if (unlikely(!f))
+ return ERR_PTR(-ENOMEM);
percpu_counter_inc(&nr_files);
f->f_cred = get_cred(cred);
- if (security_file_alloc(f))
- goto fail_sec;
+ error = security_file_alloc(f);
+ if (unlikely(error)) {
+ file_free(f);
+ return ERR_PTR(error);
+ }
INIT_LIST_HEAD(&f->f_u.fu_list);
atomic_long_set(&f->f_count, 1);
@@ -144,12 +148,7 @@ over:
pr_info("VFS: file-max limit %lu reached\n", get_max_files());
old_max = get_nr_files();
}
- goto fail;
-
-fail_sec:
- file_free(f);
-fail:
- return NULL;
+ return ERR_PTR(-ENFILE);
}
/**
@@ -173,8 +172,8 @@ struct file *alloc_file(struct path *path, fmode_t mode,
struct file *file;
file = get_empty_filp();
- if (!file)
- return NULL;
+ if (IS_ERR(file))
+ return file;
file->f_path = *path;
file->f_mapping = path->dentry->d_inode->i_mapping;
@@ -447,7 +446,7 @@ void mark_files_ro(struct super_block *sb)
lg_global_lock(&files_lglock);
do_file_list_for_each_entry(sb, f) {
- if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
+ if (!S_ISREG(file_inode(f)->i_mode))
continue;
if (!file_count(f))
continue;
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index bd447e88f208..664b07a53870 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -237,7 +237,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags)
static int
vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
{
- struct inode *ip = fp->f_path.dentry->d_inode;
+ struct inode *ip = file_inode(fp);
struct super_block *sbp = ip->i_sb;
u_long bsize = sbp->s_blocksize;
u_long page, npages, block, pblocks, nblocks, offset;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 310972b72a66..21f46fb3a101 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -318,8 +318,14 @@ static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
static int write_inode(struct inode *inode, struct writeback_control *wbc)
{
- if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
- return inode->i_sb->s_op->write_inode(inode, wbc);
+ int ret;
+
+ if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
+ trace_writeback_write_inode_start(inode, wbc);
+ ret = inode->i_sb->s_op->write_inode(inode, wbc);
+ trace_writeback_write_inode(inode, wbc);
+ return ret;
+ }
return 0;
}
@@ -450,6 +456,8 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
WARN_ON(!(inode->i_state & I_SYNC));
+ trace_writeback_single_inode_start(inode, wbc, nr_to_write);
+
ret = do_writepages(mapping, wbc);
/*
@@ -1150,8 +1158,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* dirty the inode itself
*/
if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+ trace_writeback_dirty_inode_start(inode, flags);
+
if (sb->s_op->dirty_inode)
sb->s_op->dirty_inode(inode, flags);
+
+ trace_writeback_dirty_inode(inode, flags);
}
/*
@@ -1332,47 +1344,43 @@ void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
EXPORT_SYMBOL(writeback_inodes_sb);
/**
- * writeback_inodes_sb_if_idle - start writeback if none underway
+ * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
* @sb: the superblock
- * @reason: reason why some writeback work was initiated
+ * @nr: the number of pages to write
+ * @reason: the reason of writeback
*
- * Invoke writeback_inodes_sb if no writeback is currently underway.
+ * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
*/
-int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
+int try_to_writeback_inodes_sb_nr(struct super_block *sb,
+ unsigned long nr,
+ enum wb_reason reason)
{
- if (!writeback_in_progress(sb->s_bdi)) {
- down_read(&sb->s_umount);
- writeback_inodes_sb(sb, reason);
- up_read(&sb->s_umount);
+ if (writeback_in_progress(sb->s_bdi))
return 1;
- } else
+
+ if (!down_read_trylock(&sb->s_umount))
return 0;
+
+ writeback_inodes_sb_nr(sb, nr, reason);
+ up_read(&sb->s_umount);
+ return 1;
}
-EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
+EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
/**
- * writeback_inodes_sb_nr_if_idle - start writeback if none underway
+ * try_to_writeback_inodes_sb - try to start writeback if none underway
* @sb: the superblock
- * @nr: the number of pages to write
* @reason: reason why some writeback work was initiated
*
- * Invoke writeback_inodes_sb if no writeback is currently underway.
+ * Implement by try_to_writeback_inodes_sb_nr()
* Returns 1 if writeback was started, 0 if not.
*/
-int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
- unsigned long nr,
- enum wb_reason reason)
+int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
- if (!writeback_in_progress(sb->s_bdi)) {
- down_read(&sb->s_umount);
- writeback_inodes_sb_nr(sb, nr, reason);
- up_read(&sb->s_umount);
- return 1;
- } else
- return 0;
+ return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
}
-EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);
+EXPORT_SYMBOL(try_to_writeback_inodes_sb);
/**
* sync_inodes_sb - sync sb inode pages
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 8dcb114758e3..e2cba1f60c21 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -237,13 +237,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
struct fscache_cookie *cookie)
{
struct fscache_object *object;
- struct hlist_node *_n;
int ret;
_enter("%p,%p{%s}", cache, cookie, cookie->def->name);
spin_lock(&cookie->lock);
- hlist_for_each_entry(object, _n, &cookie->backing_objects,
+ hlist_for_each_entry(object, &cookie->backing_objects,
cookie_link) {
if (object->cache == cache)
goto object_already_extant;
@@ -311,7 +310,6 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
{
struct fscache_object *p;
struct fscache_cache *cache = object->cache;
- struct hlist_node *_n;
int ret;
_enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
@@ -321,7 +319,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
/* there may be multiple initial creations of this object, but we only
* want one */
ret = -EEXIST;
- hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) {
+ hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
if (p->cache == object->cache) {
if (p->state >= FSCACHE_OBJECT_DYING)
ret = -ENOBUFS;
@@ -331,7 +329,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
/* pin the parent object */
spin_lock_nested(&cookie->parent->lock, 1);
- hlist_for_each_entry(p, _n, &cookie->parent->backing_objects,
+ hlist_for_each_entry(p, &cookie->parent->backing_objects,
cookie_link) {
if (p->cache == object->cache) {
if (p->state >= FSCACHE_OBJECT_DYING) {
@@ -435,7 +433,6 @@ EXPORT_SYMBOL(__fscache_wait_on_invalidate);
void __fscache_update_cookie(struct fscache_cookie *cookie)
{
struct fscache_object *object;
- struct hlist_node *_p;
fscache_stat(&fscache_n_updates);
@@ -452,7 +449,7 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
spin_lock(&cookie->lock);
/* update the index entry on disk in each cache backing this cookie */
- hlist_for_each_entry(object, _p,
+ hlist_for_each_entry(object,
&cookie->backing_objects, cookie_link) {
fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
}
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 75a20c092dd4..b7978b9f75ef 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -23,7 +23,7 @@ static struct fuse_conn *fuse_ctl_file_conn_get(struct file *file)
{
struct fuse_conn *fc;
mutex_lock(&fuse_mutex);
- fc = file->f_path.dentry->d_inode->i_private;
+ fc = file_inode(file)->i_private;
if (fc)
fc = fuse_conn_get(fc);
mutex_unlock(&fuse_mutex);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 85065221a58a..ff15522481d4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1325,7 +1325,7 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
int plus, err;
size_t nbytes;
struct page *page;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
u64 attr_version = 0;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 01353ed75750..df00993ed108 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -679,7 +679,7 @@ static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
if (*max_len < len) {
*max_len = len;
- return 255;
+ return FILEID_INVALID;
}
nodeid = get_fuse_inode(inode)->nodeid;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index f850020ad906..f69ac0af5496 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -237,7 +237,7 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
return -EINVAL;
if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
return value ? -EACCES : 0;
- if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index df686d13a7d2..5e83657f046e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1099,7 +1099,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
if (error)
return error;
- error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+ error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (error)
return error;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 7179478e5a28..c3e82bd23179 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1849,7 +1849,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
if (!ht)
return -ENOMEM;
- error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+ error = gfs2_quota_hold(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (error)
goto out;
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 4767774a5f3e..9973df4ff565 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -37,10 +37,10 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
if (parent && (*len < GFS2_LARGE_FH_SIZE)) {
*len = GFS2_LARGE_FH_SIZE;
- return 255;
+ return FILEID_INVALID;
} else if (*len < GFS2_SMALL_FH_SIZE) {
*len = GFS2_SMALL_FH_SIZE;
- return 255;
+ return FILEID_INVALID;
}
fh[0] = cpu_to_be32(ip->i_no_formal_ino >> 32);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 2687f50d98cb..019f45e45097 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -157,7 +157,7 @@ static const u32 gfs2_to_fsflags[32] = {
static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int error;
@@ -217,7 +217,7 @@ void gfs2_set_inode_flags(struct inode *inode)
*/
static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *bh;
@@ -293,7 +293,7 @@ out_drop_write:
static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
u32 fsflags, gfsflags;
if (get_user(fsflags, ptr))
@@ -336,7 +336,7 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
{
- struct inode *inode = filep->f_dentry->d_inode;
+ struct inode *inode = file_inode(filep);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
@@ -386,7 +386,7 @@ static int gfs2_allocate_page_backing(struct page *page)
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned long last_index;
@@ -673,8 +673,7 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
struct file *file = iocb->ki_filp;
size_t writesize = iov_length(iov, nr_segs);
- struct dentry *dentry = file->f_dentry;
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_inode *ip = GFS2_I(file_inode(file));
int ret;
ret = gfs2_rs_alloc(ip);
@@ -772,7 +771,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
@@ -938,7 +937,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct gfs2_file *fp = file->private_data;
struct gfs2_holder *fl_gh = &fp->f_fl_gh;
- struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
+ struct gfs2_inode *ip = GFS2_I(file_inode(file));
struct gfs2_glock *gl;
unsigned int state;
int flags;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 78d4184ffc7d..444b6503ebc4 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -322,8 +322,8 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
break;
};
- ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
- ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
+ i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
+ i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index e2601ba38ef5..156e42ec84ea 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -392,7 +392,6 @@ struct gfs2_revoke_replay {
};
enum {
- QDF_USER = 0,
QDF_CHANGE = 1,
QDF_LOCKED = 2,
QDF_REFRESH = 3,
@@ -404,7 +403,7 @@ struct gfs2_quota_data {
atomic_t qd_count;
- u32 qd_id;
+ struct kqid qd_id;
unsigned long qd_flags; /* QDF_... */
s64 qd_change;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index db048a8ab6a8..cc00bd1d1f87 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -368,10 +368,11 @@ static void munge_mode_uid_gid(const struct gfs2_inode *dip,
struct inode *inode)
{
if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
- (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
+ (dip->i_inode.i_mode & S_ISUID) &&
+ !uid_eq(dip->i_inode.i_uid, GLOBAL_ROOT_UID)) {
if (S_ISDIR(inode->i_mode))
inode->i_mode |= S_ISUID;
- else if (dip->i_inode.i_uid != current_fsuid())
+ else if (!uid_eq(dip->i_inode.i_uid, current_fsuid()))
inode->i_mode &= ~07111;
inode->i_uid = dip->i_inode.i_uid;
} else
@@ -455,8 +456,8 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
di->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
di->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
di->di_mode = cpu_to_be32(ip->i_inode.i_mode);
- di->di_uid = cpu_to_be32(ip->i_inode.i_uid);
- di->di_gid = cpu_to_be32(ip->i_inode.i_gid);
+ di->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
+ di->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
di->di_nlink = 0;
di->di_size = cpu_to_be64(ip->i_inode.i_size);
di->di_blocks = cpu_to_be64(1);
@@ -548,7 +549,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
return error;
- error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+ error = gfs2_quota_lock(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (error)
goto fail;
@@ -978,8 +979,8 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
return -EPERM;
if ((dip->i_inode.i_mode & S_ISVTX) &&
- dip->i_inode.i_uid != current_fsuid() &&
- ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER))
+ !uid_eq(dip->i_inode.i_uid, current_fsuid()) &&
+ !uid_eq(ip->i_inode.i_uid, current_fsuid()) && !capable(CAP_FOWNER))
return -EPERM;
if (IS_APPEND(&dip->i_inode))
@@ -1580,7 +1581,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- u32 ouid, ogid, nuid, ngid;
+ kuid_t ouid, nuid;
+ kgid_t ogid, ngid;
int error;
ouid = inode->i_uid;
@@ -1588,16 +1590,17 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
nuid = attr->ia_uid;
ngid = attr->ia_gid;
- if (!(attr->ia_valid & ATTR_UID) || ouid == nuid)
- ouid = nuid = NO_QUOTA_CHANGE;
- if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
- ogid = ngid = NO_QUOTA_CHANGE;
+ if (!(attr->ia_valid & ATTR_UID) || uid_eq(ouid, nuid))
+ ouid = nuid = NO_UID_QUOTA_CHANGE;
+ if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
+ ogid = ngid = NO_GID_QUOTA_CHANGE;
error = gfs2_quota_lock(ip, nuid, ngid);
if (error)
return error;
- if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
+ if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
+ !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
error = gfs2_quota_check(ip, nuid, ngid);
if (error)
goto out_gunlock_q;
@@ -1611,7 +1614,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (error)
goto out_end_trans;
- if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
+ if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
+ !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
u64 blocks = gfs2_get_inode_blocks(&ip->i_inode);
gfs2_quota_change(ip, -blocks, ouid, ogid);
gfs2_quota_change(ip, blocks, nuid, ngid);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 06122d09c0d1..c7c840e916f8 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -65,13 +65,10 @@
#include "inode.h"
#include "util.h"
-#define QUOTA_USER 1
-#define QUOTA_GROUP 0
-
struct gfs2_quota_change_host {
u64 qc_change;
u32 qc_flags; /* GFS2_QCF_... */
- u32 qc_id;
+ struct kqid qc_id;
};
static LIST_HEAD(qd_lru_list);
@@ -120,17 +117,24 @@ out:
return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
}
+static u64 qd2index(struct gfs2_quota_data *qd)
+{
+ struct kqid qid = qd->qd_id;
+ return (2 * (u64)from_kqid(&init_user_ns, qid)) +
+ (qid.type == USRQUOTA) ? 0 : 1;
+}
+
static u64 qd2offset(struct gfs2_quota_data *qd)
{
u64 offset;
- offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
+ offset = qd2index(qd);
offset *= sizeof(struct gfs2_quota);
return offset;
}
-static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
+static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd;
@@ -141,13 +145,11 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
return -ENOMEM;
atomic_set(&qd->qd_count, 1);
- qd->qd_id = id;
- if (user)
- set_bit(QDF_USER, &qd->qd_flags);
+ qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_reclaim);
- error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
+ error = gfs2_glock_get(sdp, qd2index(qd),
&gfs2_quota_glops, CREATE, &qd->qd_gl);
if (error)
goto fail;
@@ -161,7 +163,7 @@ fail:
return error;
}
-static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
+static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
@@ -173,8 +175,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
found = 0;
spin_lock(&qd_lru_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (qd->qd_id == id &&
- !test_bit(QDF_USER, &qd->qd_flags) == !user) {
+ if (qid_eq(qd->qd_id, qid)) {
if (!atomic_read(&qd->qd_count) &&
!list_empty(&qd->qd_reclaim)) {
/* Remove it from reclaim list */
@@ -208,7 +209,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
return 0;
}
- error = qd_alloc(sdp, user, id, &new_qd);
+ error = qd_alloc(sdp, qid, &new_qd);
if (error)
return error;
}
@@ -458,12 +459,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
qd_put(qd);
}
-static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
+static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
struct gfs2_quota_data **qdp)
{
int error;
- error = qd_get(sdp, user, id, qdp);
+ error = qd_get(sdp, qid, qdp);
if (error)
return error;
@@ -491,7 +492,7 @@ static void qdsb_put(struct gfs2_quota_data *qd)
qd_put(qd);
}
-int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
+int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data **qd;
@@ -512,28 +513,30 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
- error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
+ error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
if (error)
goto out;
ip->i_res->rs_qa_qd_num++;
qd++;
- error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
+ error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
if (error)
goto out;
ip->i_res->rs_qa_qd_num++;
qd++;
- if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
- error = qdsb_get(sdp, QUOTA_USER, uid, qd);
+ if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
+ !uid_eq(uid, ip->i_inode.i_uid)) {
+ error = qdsb_get(sdp, make_kqid_uid(uid), qd);
if (error)
goto out;
ip->i_res->rs_qa_qd_num++;
qd++;
}
- if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
- error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
+ if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
+ !gid_eq(gid, ip->i_inode.i_gid)) {
+ error = qdsb_get(sdp, make_kqid_gid(gid), qd);
if (error)
goto out;
ip->i_res->rs_qa_qd_num++;
@@ -567,18 +570,10 @@ static int sort_qd(const void *a, const void *b)
const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
- if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
- !test_bit(QDF_USER, &qd_b->qd_flags)) {
- if (test_bit(QDF_USER, &qd_a->qd_flags))
- return -1;
- else
- return 1;
- }
- if (qd_a->qd_id < qd_b->qd_id)
+ if (qid_lt(qd_a->qd_id, qd_b->qd_id))
return -1;
- if (qd_a->qd_id > qd_b->qd_id)
+ if (qid_lt(qd_b->qd_id, qd_a->qd_id))
return 1;
-
return 0;
}
@@ -595,9 +590,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
qc->qc_change = 0;
qc->qc_flags = 0;
- if (test_bit(QDF_USER, &qd->qd_flags))
+ if (qd->qd_id.type == USRQUOTA)
qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
- qc->qc_id = cpu_to_be32(qd->qd_id);
+ qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
}
x = be64_to_cpu(qc->qc_change) + change;
@@ -925,7 +920,7 @@ fail:
return error;
}
-int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
+int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qd;
@@ -1040,13 +1035,13 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
sdp->sd_fsname, type,
- (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
- qd->qd_id);
+ (qd->qd_id.type == USRQUOTA) ? "user" : "group",
+ from_kqid(&init_user_ns, qd->qd_id));
return 0;
}
-int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
+int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qd;
@@ -1063,8 +1058,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
qd = ip->i_res->rs_qa_qd[x];
- if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
- (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
+ if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
+ qid_eq(qd->qd_id, make_kqid_gid(gid))))
continue;
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
@@ -1074,10 +1069,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
print_message(qd, "exceeded");
- quota_send_warning(make_kqid(&init_user_ns,
- test_bit(QDF_USER, &qd->qd_flags) ?
- USRQUOTA : GRPQUOTA,
- qd->qd_id),
+ quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
error = -EDQUOT;
@@ -1087,10 +1079,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
time_after_eq(jiffies, qd->qd_last_warn +
gfs2_tune_get(sdp,
gt_quota_warn_period) * HZ)) {
- quota_send_warning(make_kqid(&init_user_ns,
- test_bit(QDF_USER, &qd->qd_flags) ?
- USRQUOTA : GRPQUOTA,
- qd->qd_id),
+ quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
error = print_message(qd, "warning");
qd->qd_last_warn = jiffies;
@@ -1101,7 +1090,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
}
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
- u32 uid, u32 gid)
+ kuid_t uid, kgid_t gid)
{
struct gfs2_quota_data *qd;
unsigned int x;
@@ -1114,8 +1103,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
qd = ip->i_res->rs_qa_qd[x];
- if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
- (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
+ if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
+ qid_eq(qd->qd_id, make_kqid_gid(gid))) {
do_qc(qd, change);
}
}
@@ -1170,13 +1159,13 @@ static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
return gfs2_quota_sync(sb, type);
}
-int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
+int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
{
struct gfs2_quota_data *qd;
struct gfs2_holder q_gh;
int error;
- error = qd_get(sdp, user, id, &qd);
+ error = qd_get(sdp, qid, &qd);
if (error)
return error;
@@ -1194,7 +1183,9 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *
qc->qc_change = be64_to_cpu(str->qc_change);
qc->qc_flags = be32_to_cpu(str->qc_flags);
- qc->qc_id = be32_to_cpu(str->qc_id);
+ qc->qc_id = make_kqid(&init_user_ns,
+ (qc->qc_flags & GFS2_QCF_USER)?USRQUOTA:GRPQUOTA,
+ be32_to_cpu(str->qc_id));
}
int gfs2_quota_init(struct gfs2_sbd *sdp)
@@ -1257,8 +1248,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
if (!qc.qc_change)
continue;
- error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
- qc.qc_id, &qd);
+ error = qd_alloc(sdp, qc.qc_id, &qd);
if (error) {
brelse(bh);
goto fail;
@@ -1485,21 +1475,17 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
struct gfs2_quota_data *qd;
struct gfs2_holder q_gh;
int error;
- int type;
memset(fdq, 0, sizeof(struct fs_disk_quota));
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return -ESRCH; /* Crazy XFS error code */
- if (qid.type == USRQUOTA)
- type = QUOTA_USER;
- else if (qid.type == GRPQUOTA)
- type = QUOTA_GROUP;
- else
+ if ((qid.type != USRQUOTA) &&
+ (qid.type != GRPQUOTA))
return -EINVAL;
- error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd);
+ error = qd_get(sdp, qid, &qd);
if (error)
return error;
error = do_glock(qd, FORCE, &q_gh);
@@ -1508,8 +1494,8 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
fdq->d_version = FS_DQUOT_VERSION;
- fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
- fdq->d_id = from_kqid(&init_user_ns, qid);
+ fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
+ fdq->d_id = from_kqid_munged(current_user_ns(), qid);
fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
@@ -1535,32 +1521,18 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
int alloc_required;
loff_t offset;
int error;
- int type;
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return -ESRCH; /* Crazy XFS error code */
- switch(qid.type) {
- case USRQUOTA:
- type = QUOTA_USER;
- if (fdq->d_flags != FS_USER_QUOTA)
- return -EINVAL;
- break;
- case GRPQUOTA:
- type = QUOTA_GROUP;
- if (fdq->d_flags != FS_GROUP_QUOTA)
- return -EINVAL;
- break;
- default:
+ if ((qid.type != USRQUOTA) &&
+ (qid.type != GRPQUOTA))
return -EINVAL;
- }
if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
return -EINVAL;
- if (fdq->d_id != from_kqid(&init_user_ns, qid))
- return -EINVAL;
- error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd);
+ error = qd_get(sdp, qid, &qd);
if (error)
return error;
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index f25d98b87904..4f5e6e44ed83 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -14,20 +14,21 @@ struct gfs2_inode;
struct gfs2_sbd;
struct shrink_control;
-#define NO_QUOTA_CHANGE ((u32)-1)
+#define NO_UID_QUOTA_CHANGE INVALID_UID
+#define NO_GID_QUOTA_CHANGE INVALID_GID
-extern int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid);
+extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
extern void gfs2_quota_unhold(struct gfs2_inode *ip);
-extern int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid);
+extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
extern void gfs2_quota_unlock(struct gfs2_inode *ip);
-extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
+extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
- u32 uid, u32 gid);
+ kuid_t uid, kgid_t gid);
extern int gfs2_quota_sync(struct super_block *sb, int type);
-extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
+extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
extern int gfs2_quota_init(struct gfs2_sbd *sdp);
extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
@@ -41,7 +42,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
int ret;
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
- ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+ ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
return ret;
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 52c2aeaf45ce..d1f51fd73f86 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1257,7 +1257,7 @@ fail:
int gfs2_fitrim(struct file *filp, void __user *argp)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
struct buffer_head *bh;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a3b40eeaa6e2..cab77b8ba84f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -673,8 +673,8 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
- str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
- str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
+ str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
+ str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
@@ -1376,7 +1376,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
if (error)
return error;
- error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+ error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (error)
return error;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 4fb9ad80d260..aa5c48044966 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -103,7 +103,7 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
int n = simple_strtol(buf, NULL, 0);
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
switch (n) {
case 0:
@@ -133,7 +133,7 @@ static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
{
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL;
@@ -148,7 +148,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
size_t len)
{
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL;
@@ -161,7 +161,7 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
size_t len)
{
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL;
@@ -173,30 +173,40 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
size_t len)
{
+ struct kqid qid;
int error;
u32 id;
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
id = simple_strtoul(buf, NULL, 0);
- error = gfs2_quota_refresh(sdp, 1, id);
+ qid = make_kqid(current_user_ns(), USRQUOTA, id);
+ if (!qid_valid(qid))
+ return -EINVAL;
+
+ error = gfs2_quota_refresh(sdp, qid);
return error ? error : len;
}
static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
size_t len)
{
+ struct kqid qid;
int error;
u32 id;
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
id = simple_strtoul(buf, NULL, 0);
- error = gfs2_quota_refresh(sdp, 0, id);
+ qid = make_kqid(current_user_ns(), GRPQUOTA, id);
+ if (!qid_valid(qid))
+ return -EINVAL;
+
+ error = gfs2_quota_refresh(sdp, qid);
return error ? error : len;
}
@@ -211,7 +221,7 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
int rv;
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum,
mode);
@@ -522,7 +532,7 @@ static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
unsigned int x, y;
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
if (sscanf(buf, "%u %u", &x, &y) != 2 || !y)
return -EINVAL;
@@ -541,7 +551,7 @@ static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
unsigned int x;
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
x = simple_strtoul(buf, NULL, 0);
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index cbb46c2baa69..ecd37f30ab91 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -331,7 +331,7 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
if (error)
return error;
- error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+ error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (error)
goto out_alloc;
@@ -1461,7 +1461,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
if (error)
return error;
- error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+ error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (error)
return error;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 422dde2ec0a1..5f7f1abd5f6d 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -51,7 +51,7 @@ done:
*/
static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
int len, err;
char strbuf[HFS_MAX_NAMELEN];
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index d47f11658c17..3031dfdd2358 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -128,7 +128,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
- struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+ struct inode *inode = file_inode(file)->i_mapping->host;
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
index 3cc0df730156..09d278bb7b91 100644
--- a/fs/hfsplus/Makefile
+++ b/fs/hfsplus/Makefile
@@ -5,5 +5,5 @@
obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
- bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o
-
+ bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
+ attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
new file mode 100644
index 000000000000..8d691f124714
--- /dev/null
+++ b/fs/hfsplus/attributes.c
@@ -0,0 +1,399 @@
+/*
+ * linux/fs/hfsplus/attributes.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handling of records in attributes tree
+ */
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+static struct kmem_cache *hfsplus_attr_tree_cachep;
+
+int hfsplus_create_attr_tree_cache(void)
+{
+ if (hfsplus_attr_tree_cachep)
+ return -EEXIST;
+
+ hfsplus_attr_tree_cachep =
+ kmem_cache_create("hfsplus_attr_cache",
+ sizeof(hfsplus_attr_entry), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hfsplus_attr_tree_cachep)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hfsplus_destroy_attr_tree_cache(void)
+{
+ kmem_cache_destroy(hfsplus_attr_tree_cachep);
+}
+
+int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1,
+ const hfsplus_btree_key *k2)
+{
+ __be32 k1_cnid, k2_cnid;
+
+ k1_cnid = k1->attr.cnid;
+ k2_cnid = k2->attr.cnid;
+ if (k1_cnid != k2_cnid)
+ return be32_to_cpu(k1_cnid) < be32_to_cpu(k2_cnid) ? -1 : 1;
+
+ return hfsplus_strcmp(
+ (const struct hfsplus_unistr *)&k1->attr.key_name,
+ (const struct hfsplus_unistr *)&k2->attr.key_name);
+}
+
+int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key,
+ u32 cnid, const char *name)
+{
+ int len;
+
+ memset(key, 0, sizeof(struct hfsplus_attr_key));
+ key->attr.cnid = cpu_to_be32(cnid);
+ if (name) {
+ len = strlen(name);
+ if (len > HFSPLUS_ATTR_MAX_STRLEN) {
+ printk(KERN_ERR "hfs: invalid xattr name's length\n");
+ return -EINVAL;
+ }
+ hfsplus_asc2uni(sb,
+ (struct hfsplus_unistr *)&key->attr.key_name,
+ HFSPLUS_ATTR_MAX_STRLEN, name, len);
+ len = be16_to_cpu(key->attr.key_name.length);
+ } else {
+ key->attr.key_name.length = 0;
+ len = 0;
+ }
+
+ /* The length of the key, as stored in key_len field, does not include
+ * the size of the key_len field itself.
+ * So, offsetof(hfsplus_attr_key, key_name) is a trick because
+ * it takes into consideration key_len field (__be16) of
+ * hfsplus_attr_key structure instead of length field (__be16) of
+ * hfsplus_attr_unistr structure.
+ */
+ key->key_len =
+ cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) +
+ 2 * len);
+
+ return 0;
+}
+
+void hfsplus_attr_build_key_uni(hfsplus_btree_key *key,
+ u32 cnid,
+ struct hfsplus_attr_unistr *name)
+{
+ int ustrlen;
+
+ memset(key, 0, sizeof(struct hfsplus_attr_key));
+ ustrlen = be16_to_cpu(name->length);
+ key->attr.cnid = cpu_to_be32(cnid);
+ key->attr.key_name.length = cpu_to_be16(ustrlen);
+ ustrlen *= 2;
+ memcpy(key->attr.key_name.unicode, name->unicode, ustrlen);
+
+ /* The length of the key, as stored in key_len field, does not include
+ * the size of the key_len field itself.
+ * So, offsetof(hfsplus_attr_key, key_name) is a trick because
+ * it takes into consideration key_len field (__be16) of
+ * hfsplus_attr_key structure instead of length field (__be16) of
+ * hfsplus_attr_unistr structure.
+ */
+ key->key_len =
+ cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) +
+ ustrlen);
+}
+
+hfsplus_attr_entry *hfsplus_alloc_attr_entry(void)
+{
+ return kmem_cache_alloc(hfsplus_attr_tree_cachep, GFP_KERNEL);
+}
+
+void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry)
+{
+ if (entry)
+ kmem_cache_free(hfsplus_attr_tree_cachep, entry);
+}
+
+#define HFSPLUS_INVALID_ATTR_RECORD -1
+
+static int hfsplus_attr_build_record(hfsplus_attr_entry *entry, int record_type,
+ u32 cnid, const void *value, size_t size)
+{
+ if (record_type == HFSPLUS_ATTR_FORK_DATA) {
+ /*
+ * Mac OS X supports only inline data attributes.
+ * Do nothing
+ */
+ memset(entry, 0, sizeof(*entry));
+ return sizeof(struct hfsplus_attr_fork_data);
+ } else if (record_type == HFSPLUS_ATTR_EXTENTS) {
+ /*
+ * Mac OS X supports only inline data attributes.
+ * Do nothing.
+ */
+ memset(entry, 0, sizeof(*entry));
+ return sizeof(struct hfsplus_attr_extents);
+ } else if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
+ u16 len;
+
+ memset(entry, 0, sizeof(struct hfsplus_attr_inline_data));
+ entry->inline_data.record_type = cpu_to_be32(record_type);
+ if (size <= HFSPLUS_MAX_INLINE_DATA_SIZE)
+ len = size;
+ else
+ return HFSPLUS_INVALID_ATTR_RECORD;
+ entry->inline_data.length = cpu_to_be16(len);
+ memcpy(entry->inline_data.raw_bytes, value, len);
+ /*
+ * Align len on two-byte boundary.
+ * It needs to add pad byte if we have odd len.
+ */
+ len = round_up(len, 2);
+ return offsetof(struct hfsplus_attr_inline_data, raw_bytes) +
+ len;
+ } else /* invalid input */
+ memset(entry, 0, sizeof(*entry));
+
+ return HFSPLUS_INVALID_ATTR_RECORD;
+}
+
+int hfsplus_find_attr(struct super_block *sb, u32 cnid,
+ const char *name, struct hfs_find_data *fd)
+{
+ int err = 0;
+
+ dprint(DBG_ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid);
+
+ if (!HFSPLUS_SB(sb)->attr_tree) {
+ printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+ return -EINVAL;
+ }
+
+ if (name) {
+ err = hfsplus_attr_build_key(sb, fd->search_key, cnid, name);
+ if (err)
+ goto failed_find_attr;
+ err = hfs_brec_find(fd, hfs_find_rec_by_key);
+ if (err)
+ goto failed_find_attr;
+ } else {
+ err = hfsplus_attr_build_key(sb, fd->search_key, cnid, NULL);
+ if (err)
+ goto failed_find_attr;
+ err = hfs_brec_find(fd, hfs_find_1st_rec_by_cnid);
+ if (err)
+ goto failed_find_attr;
+ }
+
+failed_find_attr:
+ return err;
+}
+
+int hfsplus_attr_exists(struct inode *inode, const char *name)
+{
+ int err = 0;
+ struct super_block *sb = inode->i_sb;
+ struct hfs_find_data fd;
+
+ if (!HFSPLUS_SB(sb)->attr_tree)
+ return 0;
+
+ err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
+ if (err)
+ return 0;
+
+ err = hfsplus_find_attr(sb, inode->i_ino, name, &fd);
+ if (err)
+ goto attr_not_found;
+
+ hfs_find_exit(&fd);
+ return 1;
+
+attr_not_found:
+ hfs_find_exit(&fd);
+ return 0;
+}
+
+int hfsplus_create_attr(struct inode *inode,
+ const char *name,
+ const void *value, size_t size)
+{
+ struct super_block *sb = inode->i_sb;
+ struct hfs_find_data fd;
+ hfsplus_attr_entry *entry_ptr;
+ int entry_size;
+ int err;
+
+ dprint(DBG_ATTR_MOD, "create_attr: %s,%ld\n",
+ name ? name : NULL, inode->i_ino);
+
+ if (!HFSPLUS_SB(sb)->attr_tree) {
+ printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+ return -EINVAL;
+ }
+
+ entry_ptr = hfsplus_alloc_attr_entry();
+ if (!entry_ptr)
+ return -ENOMEM;
+
+ err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
+ if (err)
+ goto failed_init_create_attr;
+
+ if (name) {
+ err = hfsplus_attr_build_key(sb, fd.search_key,
+ inode->i_ino, name);
+ if (err)
+ goto failed_create_attr;
+ } else {
+ err = -EINVAL;
+ goto failed_create_attr;
+ }
+
+ /* Mac OS X supports only inline data attributes. */
+ entry_size = hfsplus_attr_build_record(entry_ptr,
+ HFSPLUS_ATTR_INLINE_DATA,
+ inode->i_ino,
+ value, size);
+ if (entry_size == HFSPLUS_INVALID_ATTR_RECORD) {
+ err = -EINVAL;
+ goto failed_create_attr;
+ }
+
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
+ if (err != -ENOENT) {
+ if (!err)
+ err = -EEXIST;
+ goto failed_create_attr;
+ }
+
+ err = hfs_brec_insert(&fd, entry_ptr, entry_size);
+ if (err)
+ goto failed_create_attr;
+
+ hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
+
+failed_create_attr:
+ hfs_find_exit(&fd);
+
+failed_init_create_attr:
+ hfsplus_destroy_attr_entry(entry_ptr);
+ return err;
+}
+
+static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
+ struct hfs_find_data *fd)
+{
+ int err = 0;
+ __be32 found_cnid, record_type;
+
+ hfs_bnode_read(fd->bnode, &found_cnid,
+ fd->keyoffset +
+ offsetof(struct hfsplus_attr_key, cnid),
+ sizeof(__be32));
+ if (cnid != be32_to_cpu(found_cnid))
+ return -ENOENT;
+
+ hfs_bnode_read(fd->bnode, &record_type,
+ fd->entryoffset, sizeof(record_type));
+
+ switch (be32_to_cpu(record_type)) {
+ case HFSPLUS_ATTR_INLINE_DATA:
+ /* All is OK. Do nothing. */
+ break;
+ case HFSPLUS_ATTR_FORK_DATA:
+ case HFSPLUS_ATTR_EXTENTS:
+ printk(KERN_ERR "hfs: only inline data xattr are supported\n");
+ return -EOPNOTSUPP;
+ default:
+ printk(KERN_ERR "hfs: invalid extended attribute record\n");
+ return -ENOENT;
+ }
+
+ err = hfs_brec_remove(fd);
+ if (err)
+ return err;
+
+ hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
+ return err;
+}
+
+int hfsplus_delete_attr(struct inode *inode, const char *name)
+{
+ int err = 0;
+ struct super_block *sb = inode->i_sb;
+ struct hfs_find_data fd;
+
+ dprint(DBG_ATTR_MOD, "delete_attr: %s,%ld\n",
+ name ? name : NULL, inode->i_ino);
+
+ if (!HFSPLUS_SB(sb)->attr_tree) {
+ printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+ return -EINVAL;
+ }
+
+ err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
+ if (err)
+ return err;
+
+ if (name) {
+ err = hfsplus_attr_build_key(sb, fd.search_key,
+ inode->i_ino, name);
+ if (err)
+ goto out;
+ } else {
+ printk(KERN_ERR "hfs: invalid extended attribute name\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
+ if (err)
+ goto out;
+
+ err = __hfsplus_delete_attr(inode, inode->i_ino, &fd);
+ if (err)
+ goto out;
+
+out:
+ hfs_find_exit(&fd);
+ return err;
+}
+
+int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
+{
+ int err = 0;
+ struct hfs_find_data fd;
+
+ dprint(DBG_ATTR_MOD, "delete_all_attrs: %d\n", cnid);
+
+ if (!HFSPLUS_SB(dir->i_sb)->attr_tree) {
+ printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+ return -EINVAL;
+ }
+
+ err = hfs_find_init(HFSPLUS_SB(dir->i_sb)->attr_tree, &fd);
+ if (err)
+ return err;
+
+ for (;;) {
+ err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd);
+ if (err) {
+ if (err != -ENOENT)
+ printk(KERN_ERR "hfs: xattr search failed.\n");
+ goto end_delete_all;
+ }
+
+ err = __hfsplus_delete_attr(dir, cnid, &fd);
+ if (err)
+ goto end_delete_all;
+ }
+
+end_delete_all:
+ hfs_find_exit(&fd);
+ return err;
+}
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index 5d799c13205f..d73c98d1ee99 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -24,7 +24,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->key = ptr + tree->max_key_len + 2;
dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
- mutex_lock(&tree->tree_lock);
+ switch (tree->cnid) {
+ case HFSPLUS_CAT_CNID:
+ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
+ break;
+ case HFSPLUS_EXT_CNID:
+ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
+ break;
+ case HFSPLUS_ATTR_CNID:
+ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
+ break;
+ default:
+ BUG();
+ }
return 0;
}
@@ -38,15 +50,73 @@ void hfs_find_exit(struct hfs_find_data *fd)
fd->tree = NULL;
}
-/* Find the record in bnode that best matches key (not greater than...)*/
-int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
+int hfs_find_1st_rec_by_cnid(struct hfs_bnode *bnode,
+ struct hfs_find_data *fd,
+ int *begin,
+ int *end,
+ int *cur_rec)
+{
+ __be32 cur_cnid, search_cnid;
+
+ if (bnode->tree->cnid == HFSPLUS_EXT_CNID) {
+ cur_cnid = fd->key->ext.cnid;
+ search_cnid = fd->search_key->ext.cnid;
+ } else if (bnode->tree->cnid == HFSPLUS_CAT_CNID) {
+ cur_cnid = fd->key->cat.parent;
+ search_cnid = fd->search_key->cat.parent;
+ } else if (bnode->tree->cnid == HFSPLUS_ATTR_CNID) {
+ cur_cnid = fd->key->attr.cnid;
+ search_cnid = fd->search_key->attr.cnid;
+ } else
+ BUG();
+
+ if (cur_cnid == search_cnid) {
+ (*end) = (*cur_rec);
+ if ((*begin) == (*end))
+ return 1;
+ } else {
+ if (be32_to_cpu(cur_cnid) < be32_to_cpu(search_cnid))
+ (*begin) = (*cur_rec) + 1;
+ else
+ (*end) = (*cur_rec) - 1;
+ }
+
+ return 0;
+}
+
+int hfs_find_rec_by_key(struct hfs_bnode *bnode,
+ struct hfs_find_data *fd,
+ int *begin,
+ int *end,
+ int *cur_rec)
{
int cmpval;
+
+ cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
+ if (!cmpval) {
+ (*end) = (*cur_rec);
+ return 1;
+ }
+ if (cmpval < 0)
+ (*begin) = (*cur_rec) + 1;
+ else
+ *(end) = (*cur_rec) - 1;
+
+ return 0;
+}
+
+/* Find the record in bnode that best matches key (not greater than...)*/
+int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd,
+ search_strategy_t rec_found)
+{
u16 off, len, keylen;
int rec;
int b, e;
int res;
+ if (!rec_found)
+ BUG();
+
b = 0;
e = bnode->num_recs - 1;
res = -ENOENT;
@@ -59,17 +129,12 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
goto fail;
}
hfs_bnode_read(bnode, fd->key, off, keylen);
- cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
- if (!cmpval) {
- e = rec;
+ if (rec_found(bnode, fd, &b, &e, &rec)) {
res = 0;
goto done;
}
- if (cmpval < 0)
- b = rec + 1;
- else
- e = rec - 1;
} while (b <= e);
+
if (rec != e && e >= 0) {
len = hfs_brec_lenoff(bnode, e, &off);
keylen = hfs_brec_keylen(bnode, e);
@@ -79,19 +144,21 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
}
hfs_bnode_read(bnode, fd->key, off, keylen);
}
+
done:
fd->record = e;
fd->keyoffset = off;
fd->keylength = keylen;
fd->entryoffset = off + keylen;
fd->entrylength = len - keylen;
+
fail:
return res;
}
/* Traverse a B*Tree from the root to a leaf finding best fit to key */
/* Return allocated copy of node found, set recnum to best record */
-int hfs_brec_find(struct hfs_find_data *fd)
+int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare)
{
struct hfs_btree *tree;
struct hfs_bnode *bnode;
@@ -122,7 +189,7 @@ int hfs_brec_find(struct hfs_find_data *fd)
goto invalid;
bnode->parent = parent;
- res = __hfs_brec_find(bnode, fd);
+ res = __hfs_brec_find(bnode, fd, do_key_compare);
if (!height)
break;
if (fd->record < 0)
@@ -149,7 +216,7 @@ int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
{
int res;
- res = hfs_brec_find(fd);
+ res = hfs_brec_find(fd, hfs_find_rec_by_key);
if (res)
return res;
if (fd->entrylength > rec_len)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 1c42cc5b899f..f31ac6f404f1 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -62,7 +62,8 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
tree = node->tree;
if (node->type == HFS_NODE_LEAF ||
- tree->attributes & HFS_TREE_VARIDXKEYS)
+ tree->attributes & HFS_TREE_VARIDXKEYS ||
+ node->tree->cnid == HFSPLUS_ATTR_CNID)
key_len = hfs_bnode_read_u16(node, off) + 2;
else
key_len = tree->max_key_len + 2;
@@ -314,7 +315,8 @@ void hfs_bnode_dump(struct hfs_bnode *node)
if (i && node->type == HFS_NODE_INDEX) {
int tmp;
- if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
+ if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
+ node->tree->cnid == HFSPLUS_ATTR_CNID)
tmp = hfs_bnode_read_u16(node, key_off) + 2;
else
tmp = node->tree->max_key_len + 2;
@@ -646,6 +648,8 @@ void hfs_bnode_put(struct hfs_bnode *node)
if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
hfs_bnode_unhash(node);
spin_unlock(&tree->hash_lock);
+ hfs_bnode_clear(node, 0,
+ PAGE_CACHE_SIZE * tree->pages_per_bnode);
hfs_bmap_free(node);
hfs_bnode_free(node);
return;
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 2a734cfccc92..298d4e45604b 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -36,7 +36,8 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
return 0;
if ((node->type == HFS_NODE_INDEX) &&
- !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) {
+ !(node->tree->attributes & HFS_TREE_VARIDXKEYS) &&
+ (node->tree->cnid != HFSPLUS_ATTR_CNID)) {
retval = node->tree->max_key_len + 2;
} else {
recoff = hfs_bnode_read_u16(node,
@@ -151,12 +152,13 @@ skip:
/* get index key */
hfs_bnode_read_key(new_node, fd->search_key, 14);
- __hfs_brec_find(fd->bnode, fd);
+ __hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
hfs_bnode_put(new_node);
new_node = NULL;
- if (tree->attributes & HFS_TREE_VARIDXKEYS)
+ if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
+ (tree->cnid == HFSPLUS_ATTR_CNID))
key_len = be16_to_cpu(fd->search_key->key_len) + 2;
else {
fd->search_key->key_len =
@@ -201,7 +203,7 @@ again:
hfs_bnode_put(node);
node = fd->bnode = parent;
- __hfs_brec_find(node, fd);
+ __hfs_brec_find(node, fd, hfs_find_rec_by_key);
goto again;
}
hfs_bnode_write_u16(node,
@@ -367,12 +369,13 @@ again:
parent = hfs_bnode_find(tree, node->parent);
if (IS_ERR(parent))
return PTR_ERR(parent);
- __hfs_brec_find(parent, fd);
+ __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
hfs_bnode_dump(parent);
rec = fd->record;
/* size difference between old and new key */
- if (tree->attributes & HFS_TREE_VARIDXKEYS)
+ if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
+ (tree->cnid == HFSPLUS_ATTR_CNID))
newkeylen = hfs_bnode_read_u16(node, 14) + 2;
else
fd->keylength = newkeylen = tree->max_key_len + 2;
@@ -427,7 +430,7 @@ skip:
hfs_bnode_read_key(new_node, fd->search_key, 14);
cnid = cpu_to_be32(new_node->this);
- __hfs_brec_find(fd->bnode, fd);
+ __hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
hfs_brec_insert(fd, &cnid, sizeof(cnid));
hfs_bnode_put(fd->bnode);
hfs_bnode_put(new_node);
@@ -495,13 +498,15 @@ static int hfs_btree_inc_height(struct hfs_btree *tree)
/* insert old root idx into new root */
node->parent = tree->root;
if (node->type == HFS_NODE_LEAF ||
- tree->attributes & HFS_TREE_VARIDXKEYS)
+ tree->attributes & HFS_TREE_VARIDXKEYS ||
+ tree->cnid == HFSPLUS_ATTR_CNID)
key_size = hfs_bnode_read_u16(node, 14) + 2;
else
key_size = tree->max_key_len + 2;
hfs_bnode_copy(new_node, 14, node, 14, key_size);
- if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
+ if (!(tree->attributes & HFS_TREE_VARIDXKEYS) &&
+ (tree->cnid != HFSPLUS_ATTR_CNID)) {
key_size = tree->max_key_len + 2;
hfs_bnode_write_u16(new_node, 14, tree->max_key_len);
}
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 685d07d0ed18..efb689c21a95 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -98,6 +98,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
}
break;
+ case HFSPLUS_ATTR_CNID:
+ if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
+ printk(KERN_ERR "hfs: invalid attributes max_key_len %d\n",
+ tree->max_key_len);
+ goto fail_page;
+ }
+ tree->keycmp = hfsplus_attr_bin_cmp_key;
+ break;
default:
printk(KERN_ERR "hfs: unknown B*Tree requested\n");
goto fail_page;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 798d9c4c5e71..840d71edd193 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -45,7 +45,8 @@ void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
key->cat.parent = cpu_to_be32(parent);
if (str) {
- hfsplus_asc2uni(sb, &key->cat.name, str->name, str->len);
+ hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
+ str->name, str->len);
len = be16_to_cpu(key->cat.name.length);
} else {
key->cat.name.length = 0;
@@ -167,7 +168,8 @@ static int hfsplus_fill_cat_thread(struct super_block *sb,
entry->type = cpu_to_be16(type);
entry->thread.reserved = 0;
entry->thread.parentID = cpu_to_be32(parentid);
- hfsplus_asc2uni(sb, &entry->thread.nodeName, str->name, str->len);
+ hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
+ str->name, str->len);
return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
}
@@ -198,7 +200,7 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
hfsplus_cat_build_key_uni(fd->search_key,
be32_to_cpu(tmp.thread.parentID),
&tmp.thread.nodeName);
- return hfs_brec_find(fd);
+ return hfs_brec_find(fd, hfs_find_rec_by_key);
}
int hfsplus_create_cat(u32 cnid, struct inode *dir,
@@ -221,7 +223,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
S_ISDIR(inode->i_mode) ?
HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
dir->i_ino, str);
- err = hfs_brec_find(&fd);
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
@@ -233,7 +235,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
- err = hfs_brec_find(&fd);
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
/* panic? */
if (!err)
@@ -253,7 +255,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
err1:
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
- if (!hfs_brec_find(&fd))
+ if (!hfs_brec_find(&fd, hfs_find_rec_by_key))
hfs_brec_remove(&fd);
err2:
hfs_find_exit(&fd);
@@ -279,7 +281,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
int len;
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
- err = hfs_brec_find(&fd);
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -296,7 +298,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
} else
hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
- err = hfs_brec_find(&fd);
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -326,7 +328,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
goto out;
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
- err = hfs_brec_find(&fd);
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -337,6 +339,12 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
dir->i_size--;
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
+
+ if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
+ if (HFSPLUS_SB(sb)->attr_tree)
+ hfsplus_delete_all_attrs(dir, cnid);
+ }
+
out:
hfs_find_exit(&fd);
@@ -363,7 +371,7 @@ int hfsplus_rename_cat(u32 cnid,
/* find the old dir entry and read the data */
hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
- err = hfs_brec_find(&src_fd);
+ err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
@@ -376,7 +384,7 @@ int hfsplus_rename_cat(u32 cnid,
/* create new dir entry with the data from the old entry */
hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
- err = hfs_brec_find(&dst_fd);
+ err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
@@ -391,7 +399,7 @@ int hfsplus_rename_cat(u32 cnid,
/* finally remove the old entry */
hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
- err = hfs_brec_find(&src_fd);
+ err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
err = hfs_brec_remove(&src_fd);
@@ -402,7 +410,7 @@ int hfsplus_rename_cat(u32 cnid,
/* remove old thread entry */
hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
- err = hfs_brec_find(&src_fd);
+ err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset);
@@ -414,7 +422,7 @@ int hfsplus_rename_cat(u32 cnid,
hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
dst_dir->i_ino, dst_name);
- err = hfs_brec_find(&dst_fd);
+ err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
err = -EEXIST;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 6b9f921ef2fa..031c24e50521 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -15,6 +15,7 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
+#include "xattr.h"
static inline void hfsplus_instantiate(struct dentry *dentry,
struct inode *inode, u32 cnid)
@@ -122,7 +123,7 @@ fail:
static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
int len, err;
char strbuf[HFSPLUS_MAX_STRLEN + 1];
@@ -138,7 +139,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (err)
return err;
hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
- err = hfs_brec_find(&fd);
+ err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -421,6 +422,15 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
if (res)
goto out_err;
+ res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
+ if (res == -EOPNOTSUPP)
+ res = 0; /* Operation is not supported. */
+ else if (res) {
+ /* Try to delete anyway without error analysis. */
+ hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
+ goto out_err;
+ }
+
hfsplus_instantiate(dentry, inode, inode->i_ino);
mark_inode_dirty(inode);
goto out;
@@ -450,15 +460,26 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
init_special_inode(inode, mode, rdev);
res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
- if (res) {
- clear_nlink(inode);
- hfsplus_delete_inode(inode);
- iput(inode);
- goto out;
+ if (res)
+ goto failed_mknod;
+
+ res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
+ if (res == -EOPNOTSUPP)
+ res = 0; /* Operation is not supported. */
+ else if (res) {
+ /* Try to delete anyway without error analysis. */
+ hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
+ goto failed_mknod;
}
hfsplus_instantiate(dentry, inode, inode->i_ino);
mark_inode_dirty(inode);
+ goto out;
+
+failed_mknod:
+ clear_nlink(inode);
+ hfsplus_delete_inode(inode);
+ iput(inode);
out:
mutex_unlock(&sbi->vh_mutex);
return res;
@@ -499,15 +520,19 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
}
const struct inode_operations hfsplus_dir_inode_operations = {
- .lookup = hfsplus_lookup,
- .create = hfsplus_create,
- .link = hfsplus_link,
- .unlink = hfsplus_unlink,
- .mkdir = hfsplus_mkdir,
- .rmdir = hfsplus_rmdir,
- .symlink = hfsplus_symlink,
- .mknod = hfsplus_mknod,
- .rename = hfsplus_rename,
+ .lookup = hfsplus_lookup,
+ .create = hfsplus_create,
+ .link = hfsplus_link,
+ .unlink = hfsplus_unlink,
+ .mkdir = hfsplus_mkdir,
+ .rmdir = hfsplus_rmdir,
+ .symlink = hfsplus_symlink,
+ .mknod = hfsplus_mknod,
+ .rename = hfsplus_rename,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = hfsplus_listxattr,
+ .removexattr = hfsplus_removexattr,
};
const struct file_operations hfsplus_dir_operations = {
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index eba76eab6d62..a94f0f779d5e 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -95,7 +95,7 @@ static void __hfsplus_ext_write_extent(struct inode *inode,
HFSPLUS_IS_RSRC(inode) ?
HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
- res = hfs_brec_find(fd);
+ res = hfs_brec_find(fd, hfs_find_rec_by_key);
if (hip->extent_state & HFSPLUS_EXT_NEW) {
if (res != -ENOENT)
return;
@@ -154,7 +154,7 @@ static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
hfsplus_ext_build_key(fd->search_key, cnid, block, type);
fd->key->ext.cnid = 0;
- res = hfs_brec_find(fd);
+ res = hfs_brec_find(fd, hfs_find_rec_by_key);
if (res && res != -ENOENT)
return res;
if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index a6da86b1b4c1..05b11f36024c 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -23,6 +23,7 @@
#define DBG_SUPER 0x00000010
#define DBG_EXTENT 0x00000020
#define DBG_BITMAP 0x00000040
+#define DBG_ATTR_MOD 0x00000080
#if 0
#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
@@ -46,6 +47,13 @@ typedef int (*btree_keycmp)(const hfsplus_btree_key *,
#define NODE_HASH_SIZE 256
+/* B-tree mutex nested subclasses */
+enum hfsplus_btree_mutex_classes {
+ CATALOG_BTREE_MUTEX,
+ EXTENTS_BTREE_MUTEX,
+ ATTR_BTREE_MUTEX,
+};
+
/* An HFS+ BTree held in memory */
struct hfs_btree {
struct super_block *sb;
@@ -223,6 +231,7 @@ struct hfsplus_inode_info {
#define HFSPLUS_I_CAT_DIRTY 1 /* has changes in the catalog tree */
#define HFSPLUS_I_EXT_DIRTY 2 /* has changes in the extent tree */
#define HFSPLUS_I_ALLOC_DIRTY 3 /* has changes in the allocation file */
+#define HFSPLUS_I_ATTR_DIRTY 4 /* has changes in the attributes tree */
#define HFSPLUS_IS_RSRC(inode) \
test_bit(HFSPLUS_I_RSRC, &HFSPLUS_I(inode)->flags)
@@ -302,7 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
#define hfs_brec_remove hfsplus_brec_remove
#define hfs_find_init hfsplus_find_init
#define hfs_find_exit hfsplus_find_exit
-#define __hfs_brec_find __hplusfs_brec_find
+#define __hfs_brec_find __hfsplus_brec_find
#define hfs_brec_find hfsplus_brec_find
#define hfs_brec_read hfsplus_brec_read
#define hfs_brec_goto hfsplus_brec_goto
@@ -324,10 +333,33 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
*/
#define HFSPLUS_IOC_BLESS _IO('h', 0x80)
+typedef int (*search_strategy_t)(struct hfs_bnode *,
+ struct hfs_find_data *,
+ int *, int *, int *);
+
/*
* Functions in any *.c used in other files
*/
+/* attributes.c */
+int hfsplus_create_attr_tree_cache(void);
+void hfsplus_destroy_attr_tree_cache(void);
+hfsplus_attr_entry *hfsplus_alloc_attr_entry(void);
+void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry_p);
+int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *,
+ const hfsplus_btree_key *);
+int hfsplus_attr_build_key(struct super_block *, hfsplus_btree_key *,
+ u32, const char *);
+void hfsplus_attr_build_key_uni(hfsplus_btree_key *key,
+ u32 cnid,
+ struct hfsplus_attr_unistr *name);
+int hfsplus_find_attr(struct super_block *, u32,
+ const char *, struct hfs_find_data *);
+int hfsplus_attr_exists(struct inode *inode, const char *name);
+int hfsplus_create_attr(struct inode *, const char *, const void *, size_t);
+int hfsplus_delete_attr(struct inode *, const char *);
+int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid);
+
/* bitmap.c */
int hfsplus_block_allocate(struct super_block *, u32, u32, u32 *);
int hfsplus_block_free(struct super_block *, u32, u32);
@@ -369,8 +401,15 @@ int hfs_brec_remove(struct hfs_find_data *);
/* bfind.c */
int hfs_find_init(struct hfs_btree *, struct hfs_find_data *);
void hfs_find_exit(struct hfs_find_data *);
-int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *);
-int hfs_brec_find(struct hfs_find_data *);
+int hfs_find_1st_rec_by_cnid(struct hfs_bnode *,
+ struct hfs_find_data *,
+ int *, int *, int *);
+int hfs_find_rec_by_key(struct hfs_bnode *,
+ struct hfs_find_data *,
+ int *, int *, int *);
+int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *,
+ search_strategy_t);
+int hfs_brec_find(struct hfs_find_data *, search_strategy_t);
int hfs_brec_read(struct hfs_find_data *, void *, int);
int hfs_brec_goto(struct hfs_find_data *, int);
@@ -417,11 +456,6 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
/* ioctl.c */
long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
- void *value, size_t size);
-ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* options.c */
int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
@@ -446,7 +480,7 @@ int hfsplus_strcmp(const struct hfsplus_unistr *,
int hfsplus_uni2asc(struct super_block *,
const struct hfsplus_unistr *, char *, int *);
int hfsplus_asc2uni(struct super_block *,
- struct hfsplus_unistr *, const char *, int);
+ struct hfsplus_unistr *, int, const char *, int);
int hfsplus_hash_dentry(const struct dentry *dentry,
const struct inode *inode, struct qstr *str);
int hfsplus_compare_dentry(const struct dentry *parent,
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 921967e5abb1..452ede01b036 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -52,13 +52,23 @@
typedef __be32 hfsplus_cnid;
typedef __be16 hfsplus_unichr;
+#define HFSPLUS_MAX_STRLEN 255
+#define HFSPLUS_ATTR_MAX_STRLEN 127
+
/* A "string" as used in filenames, etc. */
struct hfsplus_unistr {
__be16 length;
- hfsplus_unichr unicode[255];
+ hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
} __packed;
-#define HFSPLUS_MAX_STRLEN 255
+/*
+ * A "string" is used in attributes file
+ * for name of extended attribute
+ */
+struct hfsplus_attr_unistr {
+ __be16 length;
+ hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
+} __packed;
/* POSIX permissions */
struct hfsplus_perm {
@@ -291,6 +301,8 @@ struct hfsplus_cat_file {
/* File attribute bits */
#define HFSPLUS_FILE_LOCKED 0x0001
#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
+#define HFSPLUS_XATTR_EXISTS 0x0004
+#define HFSPLUS_ACL_EXISTS 0x0008
/* HFS+ catalog thread (part of a cat_entry) */
struct hfsplus_cat_thread {
@@ -327,11 +339,63 @@ struct hfsplus_ext_key {
#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
+#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
+#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
+
+#define HFSPLUS_ATTR_INLINE_DATA 0x10
+#define HFSPLUS_ATTR_FORK_DATA 0x20
+#define HFSPLUS_ATTR_EXTENTS 0x30
+
+/* HFS+ attributes tree key */
+struct hfsplus_attr_key {
+ __be16 key_len;
+ __be16 pad;
+ hfsplus_cnid cnid;
+ __be32 start_block;
+ struct hfsplus_attr_unistr key_name;
+} __packed;
+
+#define HFSPLUS_ATTR_KEYLEN sizeof(struct hfsplus_attr_key)
+
+/* HFS+ fork data attribute */
+struct hfsplus_attr_fork_data {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_fork_raw the_fork;
+} __packed;
+
+/* HFS+ extension attribute */
+struct hfsplus_attr_extents {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_extent extents;
+} __packed;
+
+#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
+
+/* HFS+ attribute inline data */
+struct hfsplus_attr_inline_data {
+ __be32 record_type;
+ __be32 reserved1;
+ u8 reserved2[6];
+ __be16 length;
+ u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
+} __packed;
+
+/* A data record in the attributes tree */
+typedef union {
+ __be32 record_type;
+ struct hfsplus_attr_fork_data fork_data;
+ struct hfsplus_attr_extents extents;
+ struct hfsplus_attr_inline_data inline_data;
+} __packed hfsplus_attr_entry;
+
/* HFS+ generic BTree key */
typedef union {
__be16 key_len;
struct hfsplus_cat_key cat;
struct hfsplus_ext_key ext;
+ struct hfsplus_attr_key attr;
} __packed hfsplus_btree_key;
#endif
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 799b336b59f9..160ccc9cdb4b 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -17,6 +17,7 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
+#include "xattr.h"
static int hfsplus_readpage(struct file *file, struct page *page)
{
@@ -124,7 +125,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
- struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+ struct inode *inode = file_inode(file)->i_mapping->host;
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
@@ -348,6 +349,18 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
error = error2;
}
+ if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
+ if (sbi->attr_tree) {
+ error2 =
+ filemap_write_and_wait(
+ sbi->attr_tree->inode->i_mapping);
+ if (!error)
+ error = error2;
+ } else {
+ printk(KERN_ERR "hfs: sync non-existent attributes tree\n");
+ }
+ }
+
if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
if (!error)
@@ -365,9 +378,10 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
static const struct inode_operations hfsplus_file_inode_operations = {
.lookup = hfsplus_file_lookup,
.setattr = hfsplus_setattr,
- .setxattr = hfsplus_setxattr,
- .getxattr = hfsplus_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = hfsplus_listxattr,
+ .removexattr = hfsplus_removexattr,
};
static const struct file_operations hfsplus_file_operations = {
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 09addc8615fa..d3ff5cc317d7 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -16,7 +16,6 @@
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/sched.h>
-#include <linux/xattr.h>
#include <asm/uaccess.h>
#include "hfsplus_fs.h"
@@ -59,7 +58,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
unsigned int flags = 0;
@@ -75,7 +74,7 @@ static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags)
static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
unsigned int flags;
int err = 0;
@@ -151,110 +150,3 @@ long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -ENOTTY;
}
}
-
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
-{
- struct inode *inode = dentry->d_inode;
- struct hfs_find_data fd;
- hfsplus_cat_entry entry;
- struct hfsplus_cat_file *file;
- int res;
-
- if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
- return -EOPNOTSUPP;
-
- res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
- if (res)
- return res;
- res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
- if (res)
- goto out;
- hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
- sizeof(struct hfsplus_cat_file));
- file = &entry.file;
-
- if (!strcmp(name, "hfs.type")) {
- if (size == 4)
- memcpy(&file->user_info.fdType, value, 4);
- else
- res = -ERANGE;
- } else if (!strcmp(name, "hfs.creator")) {
- if (size == 4)
- memcpy(&file->user_info.fdCreator, value, 4);
- else
- res = -ERANGE;
- } else
- res = -EOPNOTSUPP;
- if (!res) {
- hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
- sizeof(struct hfsplus_cat_file));
- hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
- }
-out:
- hfs_find_exit(&fd);
- return res;
-}
-
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
- void *value, size_t size)
-{
- struct inode *inode = dentry->d_inode;
- struct hfs_find_data fd;
- hfsplus_cat_entry entry;
- struct hfsplus_cat_file *file;
- ssize_t res = 0;
-
- if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
- return -EOPNOTSUPP;
-
- if (size) {
- res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
- if (res)
- return res;
- res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
- if (res)
- goto out;
- hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
- sizeof(struct hfsplus_cat_file));
- }
- file = &entry.file;
-
- if (!strcmp(name, "hfs.type")) {
- if (size >= 4) {
- memcpy(value, &file->user_info.fdType, 4);
- res = 4;
- } else
- res = size ? -ERANGE : 4;
- } else if (!strcmp(name, "hfs.creator")) {
- if (size >= 4) {
- memcpy(value, &file->user_info.fdCreator, 4);
- res = 4;
- } else
- res = size ? -ERANGE : 4;
- } else
- res = -EOPNOTSUPP;
-out:
- if (size)
- hfs_find_exit(&fd);
- return res;
-}
-
-#define HFSPLUS_ATTRLIST_SIZE (sizeof("hfs.creator")+sizeof("hfs.type"))
-
-ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
- struct inode *inode = dentry->d_inode;
-
- if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
- return -EOPNOTSUPP;
-
- if (!buffer || !size)
- return HFSPLUS_ATTRLIST_SIZE;
- if (size < HFSPLUS_ATTRLIST_SIZE)
- return -ERANGE;
- strcpy(buffer, "hfs.type");
- strcpy(buffer + sizeof("hfs.type"), "hfs.creator");
-
- return HFSPLUS_ATTRLIST_SIZE;
-}
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 796198d26553..974c26f96fae 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -20,6 +20,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb);
static void hfsplus_destroy_inode(struct inode *inode);
#include "hfsplus_fs.h"
+#include "xattr.h"
static int hfsplus_system_read_inode(struct inode *inode)
{
@@ -118,6 +119,7 @@ static int hfsplus_system_write_inode(struct inode *inode)
case HFSPLUS_ATTR_CNID:
fork = &vhdr->attr_file;
tree = sbi->attr_tree;
+ break;
default:
return -EIO;
}
@@ -191,6 +193,12 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
if (!error)
error = error2;
+ if (sbi->attr_tree) {
+ error2 =
+ filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
+ if (!error)
+ error = error2;
+ }
error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
if (!error)
error = error2;
@@ -281,6 +289,7 @@ static void hfsplus_put_super(struct super_block *sb)
hfsplus_sync_fs(sb, 1);
}
+ hfs_btree_close(sbi->attr_tree);
hfs_btree_close(sbi->cat_tree);
hfs_btree_close(sbi->ext_tree);
iput(sbi->alloc_file);
@@ -477,12 +486,20 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
printk(KERN_ERR "hfs: failed to load catalog file\n");
goto out_close_ext_tree;
}
+ if (vhdr->attr_file.total_blocks != 0) {
+ sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
+ if (!sbi->attr_tree) {
+ printk(KERN_ERR "hfs: failed to load attributes file\n");
+ goto out_close_cat_tree;
+ }
+ }
+ sb->s_xattr = hfsplus_xattr_handlers;
inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
if (IS_ERR(inode)) {
printk(KERN_ERR "hfs: failed to load allocation file\n");
err = PTR_ERR(inode);
- goto out_close_cat_tree;
+ goto out_close_attr_tree;
}
sbi->alloc_file = inode;
@@ -542,10 +559,27 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
}
err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
&str, sbi->hidden_dir);
- mutex_unlock(&sbi->vh_mutex);
- if (err)
+ if (err) {
+ mutex_unlock(&sbi->vh_mutex);
+ goto out_put_hidden_dir;
+ }
+
+ err = hfsplus_init_inode_security(sbi->hidden_dir,
+ root, &str);
+ if (err == -EOPNOTSUPP)
+ err = 0; /* Operation is not supported. */
+ else if (err) {
+ /*
+ * Try to delete anyway without
+ * error analysis.
+ */
+ hfsplus_delete_cat(sbi->hidden_dir->i_ino,
+ root, &str);
+ mutex_unlock(&sbi->vh_mutex);
goto out_put_hidden_dir;
+ }
+ mutex_unlock(&sbi->vh_mutex);
hfsplus_mark_inode_dirty(sbi->hidden_dir,
HFSPLUS_I_CAT_DIRTY);
}
@@ -562,6 +596,8 @@ out_put_root:
sb->s_root = NULL;
out_put_alloc_file:
iput(sbi->alloc_file);
+out_close_attr_tree:
+ hfs_btree_close(sbi->attr_tree);
out_close_cat_tree:
hfs_btree_close(sbi->cat_tree);
out_close_ext_tree:
@@ -635,9 +671,20 @@ static int __init init_hfsplus_fs(void)
hfsplus_init_once);
if (!hfsplus_inode_cachep)
return -ENOMEM;
+ err = hfsplus_create_attr_tree_cache();
+ if (err)
+ goto destroy_inode_cache;
err = register_filesystem(&hfsplus_fs_type);
if (err)
- kmem_cache_destroy(hfsplus_inode_cachep);
+ goto destroy_attr_tree_cache;
+ return 0;
+
+destroy_attr_tree_cache:
+ hfsplus_destroy_attr_tree_cache();
+
+destroy_inode_cache:
+ kmem_cache_destroy(hfsplus_inode_cachep);
+
return err;
}
@@ -650,6 +697,7 @@ static void __exit exit_hfsplus_fs(void)
* destroy cache.
*/
rcu_barrier();
+ hfsplus_destroy_attr_tree_cache();
kmem_cache_destroy(hfsplus_inode_cachep);
}
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index a32998f29f0b..2c2e47dcfdd8 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -295,7 +295,8 @@ static inline u16 *decompose_unichar(wchar_t uc, int *size)
return hfsplus_decompose_table + (off / 4);
}
-int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
+int hfsplus_asc2uni(struct super_block *sb,
+ struct hfsplus_unistr *ustr, int max_unistr_len,
const char *astr, int len)
{
int size, dsize, decompose;
@@ -303,7 +304,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
wchar_t c;
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
- while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
+ while (outlen < max_unistr_len && len > 0) {
size = asc2unichar(sb, astr, len, &c);
if (decompose)
@@ -311,7 +312,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
else
dstr = NULL;
if (dstr) {
- if (outlen + dsize > HFSPLUS_MAX_STRLEN)
+ if (outlen + dsize > max_unistr_len)
break;
do {
ustr->unicode[outlen++] = cpu_to_be16(*dstr++);
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
new file mode 100644
index 000000000000..e8a4b0815c61
--- /dev/null
+++ b/fs/hfsplus/xattr.c
@@ -0,0 +1,709 @@
+/*
+ * linux/fs/hfsplus/xattr.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Logic of processing extended attributes
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+const struct xattr_handler *hfsplus_xattr_handlers[] = {
+ &hfsplus_xattr_osx_handler,
+ &hfsplus_xattr_user_handler,
+ &hfsplus_xattr_trusted_handler,
+ &hfsplus_xattr_security_handler,
+ NULL
+};
+
+static int strcmp_xattr_finder_info(const char *name)
+{
+ if (name) {
+ return strncmp(name, HFSPLUS_XATTR_FINDER_INFO_NAME,
+ sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME));
+ }
+ return -1;
+}
+
+static int strcmp_xattr_acl(const char *name)
+{
+ if (name) {
+ return strncmp(name, HFSPLUS_XATTR_ACL_NAME,
+ sizeof(HFSPLUS_XATTR_ACL_NAME));
+ }
+ return -1;
+}
+
+static inline int is_known_namespace(const char *name)
+{
+ if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
+ strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+ strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+ return false;
+
+ return true;
+}
+
+static int can_set_xattr(struct inode *inode, const char *name,
+ const void *value, size_t value_len)
+{
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return -EOPNOTSUPP; /* TODO: implement ACL support */
+
+ if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) {
+ /*
+ * This makes sure that we aren't trying to set an
+ * attribute in a different namespace by prefixing it
+ * with "osx."
+ */
+ if (is_known_namespace(name + XATTR_MAC_OSX_PREFIX_LEN))
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+
+ /*
+ * Don't allow setting an attribute in an unknown namespace.
+ */
+ if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
+ strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+ strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int __hfsplus_setxattr(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ int err = 0;
+ struct hfs_find_data cat_fd;
+ hfsplus_cat_entry entry;
+ u16 cat_entry_flags, cat_entry_type;
+ u16 folder_finderinfo_len = sizeof(struct DInfo) +
+ sizeof(struct DXInfo);
+ u16 file_finderinfo_len = sizeof(struct FInfo) +
+ sizeof(struct FXInfo);
+
+ if ((!S_ISREG(inode->i_mode) &&
+ !S_ISDIR(inode->i_mode)) ||
+ HFSPLUS_IS_RSRC(inode))
+ return -EOPNOTSUPP;
+
+ err = can_set_xattr(inode, name, value, size);
+ if (err)
+ return err;
+
+ if (strncmp(name, XATTR_MAC_OSX_PREFIX,
+ XATTR_MAC_OSX_PREFIX_LEN) == 0)
+ name += XATTR_MAC_OSX_PREFIX_LEN;
+
+ if (value == NULL) {
+ value = "";
+ size = 0;
+ }
+
+ err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
+ if (err) {
+ printk(KERN_ERR "hfs: can't init xattr find struct\n");
+ return err;
+ }
+
+ err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
+ if (err) {
+ printk(KERN_ERR "hfs: catalog searching failed\n");
+ goto end_setxattr;
+ }
+
+ if (!strcmp_xattr_finder_info(name)) {
+ if (flags & XATTR_CREATE) {
+ printk(KERN_ERR "hfs: xattr exists yet\n");
+ err = -EOPNOTSUPP;
+ goto end_setxattr;
+ }
+ hfs_bnode_read(cat_fd.bnode, &entry, cat_fd.entryoffset,
+ sizeof(hfsplus_cat_entry));
+ if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
+ if (size == folder_finderinfo_len) {
+ memcpy(&entry.folder.user_info, value,
+ folder_finderinfo_len);
+ hfs_bnode_write(cat_fd.bnode, &entry,
+ cat_fd.entryoffset,
+ sizeof(struct hfsplus_cat_folder));
+ hfsplus_mark_inode_dirty(inode,
+ HFSPLUS_I_CAT_DIRTY);
+ } else {
+ err = -ERANGE;
+ goto end_setxattr;
+ }
+ } else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
+ if (size == file_finderinfo_len) {
+ memcpy(&entry.file.user_info, value,
+ file_finderinfo_len);
+ hfs_bnode_write(cat_fd.bnode, &entry,
+ cat_fd.entryoffset,
+ sizeof(struct hfsplus_cat_file));
+ hfsplus_mark_inode_dirty(inode,
+ HFSPLUS_I_CAT_DIRTY);
+ } else {
+ err = -ERANGE;
+ goto end_setxattr;
+ }
+ } else {
+ err = -EOPNOTSUPP;
+ goto end_setxattr;
+ }
+ goto end_setxattr;
+ }
+
+ if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
+ err = -EOPNOTSUPP;
+ goto end_setxattr;
+ }
+
+ if (hfsplus_attr_exists(inode, name)) {
+ if (flags & XATTR_CREATE) {
+ printk(KERN_ERR "hfs: xattr exists yet\n");
+ err = -EOPNOTSUPP;
+ goto end_setxattr;
+ }
+ err = hfsplus_delete_attr(inode, name);
+ if (err)
+ goto end_setxattr;
+ err = hfsplus_create_attr(inode, name, value, size);
+ if (err)
+ goto end_setxattr;
+ } else {
+ if (flags & XATTR_REPLACE) {
+ printk(KERN_ERR "hfs: cannot replace xattr\n");
+ err = -EOPNOTSUPP;
+ goto end_setxattr;
+ }
+ err = hfsplus_create_attr(inode, name, value, size);
+ if (err)
+ goto end_setxattr;
+ }
+
+ cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
+ if (cat_entry_type == HFSPLUS_FOLDER) {
+ cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
+ cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_folder, flags));
+ cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
+ if (!strcmp_xattr_acl(name))
+ cat_entry_flags |= HFSPLUS_ACL_EXISTS;
+ hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_folder, flags),
+ cat_entry_flags);
+ hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+ } else if (cat_entry_type == HFSPLUS_FILE) {
+ cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
+ cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_file, flags));
+ cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
+ if (!strcmp_xattr_acl(name))
+ cat_entry_flags |= HFSPLUS_ACL_EXISTS;
+ hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_file, flags),
+ cat_entry_flags);
+ hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+ } else {
+ printk(KERN_ERR "hfs: invalid catalog entry type\n");
+ err = -EIO;
+ goto end_setxattr;
+ }
+
+end_setxattr:
+ hfs_find_exit(&cat_fd);
+ return err;
+}
+
+static inline int is_osx_xattr(const char *xattr_name)
+{
+ return !is_known_namespace(xattr_name);
+}
+
+static int name_len(const char *xattr_name, int xattr_name_len)
+{
+ int len = xattr_name_len + 1;
+
+ if (is_osx_xattr(xattr_name))
+ len += XATTR_MAC_OSX_PREFIX_LEN;
+
+ return len;
+}
+
+static int copy_name(char *buffer, const char *xattr_name, int name_len)
+{
+ int len = name_len;
+ int offset = 0;
+
+ if (is_osx_xattr(xattr_name)) {
+ strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
+ offset += XATTR_MAC_OSX_PREFIX_LEN;
+ len += XATTR_MAC_OSX_PREFIX_LEN;
+ }
+
+ strncpy(buffer + offset, xattr_name, name_len);
+ memset(buffer + offset + name_len, 0, 1);
+ len += 1;
+
+ return len;
+}
+
+static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry,
+ void *value, size_t size)
+{
+ ssize_t res = 0;
+ struct inode *inode = dentry->d_inode;
+ struct hfs_find_data fd;
+ u16 entry_type;
+ u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
+ u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+ u16 record_len = max(folder_rec_len, file_rec_len);
+ u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
+ u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+
+ if (size >= record_len) {
+ res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
+ if (res) {
+ printk(KERN_ERR "hfs: can't init xattr find struct\n");
+ return res;
+ }
+ res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+ if (res)
+ goto end_getxattr_finder_info;
+ entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
+
+ if (entry_type == HFSPLUS_FOLDER) {
+ hfs_bnode_read(fd.bnode, folder_finder_info,
+ fd.entryoffset +
+ offsetof(struct hfsplus_cat_folder, user_info),
+ folder_rec_len);
+ memcpy(value, folder_finder_info, folder_rec_len);
+ res = folder_rec_len;
+ } else if (entry_type == HFSPLUS_FILE) {
+ hfs_bnode_read(fd.bnode, file_finder_info,
+ fd.entryoffset +
+ offsetof(struct hfsplus_cat_file, user_info),
+ file_rec_len);
+ memcpy(value, file_finder_info, file_rec_len);
+ res = file_rec_len;
+ } else {
+ res = -EOPNOTSUPP;
+ goto end_getxattr_finder_info;
+ }
+ } else
+ res = size ? -ERANGE : record_len;
+
+end_getxattr_finder_info:
+ if (size >= record_len)
+ hfs_find_exit(&fd);
+ return res;
+}
+
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ void *value, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct hfs_find_data fd;
+ hfsplus_attr_entry *entry;
+ __be32 xattr_record_type;
+ u32 record_type;
+ u16 record_length = 0;
+ ssize_t res = 0;
+
+ if ((!S_ISREG(inode->i_mode) &&
+ !S_ISDIR(inode->i_mode)) ||
+ HFSPLUS_IS_RSRC(inode))
+ return -EOPNOTSUPP;
+
+ if (strncmp(name, XATTR_MAC_OSX_PREFIX,
+ XATTR_MAC_OSX_PREFIX_LEN) == 0) {
+ /* skip "osx." prefix */
+ name += XATTR_MAC_OSX_PREFIX_LEN;
+ /*
+ * Don't allow retrieving properly prefixed attributes
+ * by prepending them with "osx."
+ */
+ if (is_known_namespace(name))
+ return -EOPNOTSUPP;
+ }
+
+ if (!strcmp_xattr_finder_info(name))
+ return hfsplus_getxattr_finder_info(dentry, value, size);
+
+ if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
+ return -EOPNOTSUPP;
+
+ entry = hfsplus_alloc_attr_entry();
+ if (!entry) {
+ printk(KERN_ERR "hfs: can't allocate xattr entry\n");
+ return -ENOMEM;
+ }
+
+ res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
+ if (res) {
+ printk(KERN_ERR "hfs: can't init xattr find struct\n");
+ goto failed_getxattr_init;
+ }
+
+ res = hfsplus_find_attr(inode->i_sb, inode->i_ino, name, &fd);
+ if (res) {
+ if (res == -ENOENT)
+ res = -ENODATA;
+ else
+ printk(KERN_ERR "hfs: xattr searching failed\n");
+ goto out;
+ }
+
+ hfs_bnode_read(fd.bnode, &xattr_record_type,
+ fd.entryoffset, sizeof(xattr_record_type));
+ record_type = be32_to_cpu(xattr_record_type);
+ if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
+ record_length = hfs_bnode_read_u16(fd.bnode,
+ fd.entryoffset +
+ offsetof(struct hfsplus_attr_inline_data,
+ length));
+ if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) {
+ printk(KERN_ERR "hfs: invalid xattr record size\n");
+ res = -EIO;
+ goto out;
+ }
+ } else if (record_type == HFSPLUS_ATTR_FORK_DATA ||
+ record_type == HFSPLUS_ATTR_EXTENTS) {
+ printk(KERN_ERR "hfs: only inline data xattr are supported\n");
+ res = -EOPNOTSUPP;
+ goto out;
+ } else {
+ printk(KERN_ERR "hfs: invalid xattr record\n");
+ res = -EIO;
+ goto out;
+ }
+
+ if (size) {
+ hfs_bnode_read(fd.bnode, entry, fd.entryoffset,
+ offsetof(struct hfsplus_attr_inline_data,
+ raw_bytes) + record_length);
+ }
+
+ if (size >= record_length) {
+ memcpy(value, entry->inline_data.raw_bytes, record_length);
+ res = record_length;
+ } else
+ res = size ? -ERANGE : record_length;
+
+out:
+ hfs_find_exit(&fd);
+
+failed_getxattr_init:
+ hfsplus_destroy_attr_entry(entry);
+ return res;
+}
+
+static inline int can_list(const char *xattr_name)
+{
+ if (!xattr_name)
+ return 0;
+
+ return strncmp(xattr_name, XATTR_TRUSTED_PREFIX,
+ XATTR_TRUSTED_PREFIX_LEN) ||
+ capable(CAP_SYS_ADMIN);
+}
+
+static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
+ char *buffer, size_t size)
+{
+ ssize_t res = 0;
+ struct inode *inode = dentry->d_inode;
+ struct hfs_find_data fd;
+ u16 entry_type;
+ u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
+ u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+ unsigned long len, found_bit;
+ int xattr_name_len, symbols_count;
+
+ res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
+ if (res) {
+ printk(KERN_ERR "hfs: can't init xattr find struct\n");
+ return res;
+ }
+
+ res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+ if (res)
+ goto end_listxattr_finder_info;
+
+ entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
+ if (entry_type == HFSPLUS_FOLDER) {
+ len = sizeof(struct DInfo) + sizeof(struct DXInfo);
+ hfs_bnode_read(fd.bnode, folder_finder_info,
+ fd.entryoffset +
+ offsetof(struct hfsplus_cat_folder, user_info),
+ len);
+ found_bit = find_first_bit((void *)folder_finder_info, len*8);
+ } else if (entry_type == HFSPLUS_FILE) {
+ len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+ hfs_bnode_read(fd.bnode, file_finder_info,
+ fd.entryoffset +
+ offsetof(struct hfsplus_cat_file, user_info),
+ len);
+ found_bit = find_first_bit((void *)file_finder_info, len*8);
+ } else {
+ res = -EOPNOTSUPP;
+ goto end_listxattr_finder_info;
+ }
+
+ if (found_bit >= (len*8))
+ res = 0;
+ else {
+ symbols_count = sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME) - 1;
+ xattr_name_len =
+ name_len(HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count);
+ if (!buffer || !size) {
+ if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME))
+ res = xattr_name_len;
+ } else if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) {
+ if (size < xattr_name_len)
+ res = -ERANGE;
+ else {
+ res = copy_name(buffer,
+ HFSPLUS_XATTR_FINDER_INFO_NAME,
+ symbols_count);
+ }
+ }
+ }
+
+end_listxattr_finder_info:
+ hfs_find_exit(&fd);
+
+ return res;
+}
+
+ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+ ssize_t err;
+ ssize_t res = 0;
+ struct inode *inode = dentry->d_inode;
+ struct hfs_find_data fd;
+ u16 key_len = 0;
+ struct hfsplus_attr_key attr_key;
+ char strbuf[HFSPLUS_ATTR_MAX_STRLEN +
+ XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
+ int xattr_name_len;
+
+ if ((!S_ISREG(inode->i_mode) &&
+ !S_ISDIR(inode->i_mode)) ||
+ HFSPLUS_IS_RSRC(inode))
+ return -EOPNOTSUPP;
+
+ res = hfsplus_listxattr_finder_info(dentry, buffer, size);
+ if (res < 0)
+ return res;
+ else if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
+ return (res == 0) ? -EOPNOTSUPP : res;
+
+ err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
+ if (err) {
+ printk(KERN_ERR "hfs: can't init xattr find struct\n");
+ return err;
+ }
+
+ err = hfsplus_find_attr(inode->i_sb, inode->i_ino, NULL, &fd);
+ if (err) {
+ if (err == -ENOENT) {
+ if (res == 0)
+ res = -ENODATA;
+ goto end_listxattr;
+ } else {
+ res = err;
+ goto end_listxattr;
+ }
+ }
+
+ for (;;) {
+ key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
+ if (key_len == 0 || key_len > fd.tree->max_key_len) {
+ printk(KERN_ERR "hfs: invalid xattr key length: %d\n",
+ key_len);
+ res = -EIO;
+ goto end_listxattr;
+ }
+
+ hfs_bnode_read(fd.bnode, &attr_key,
+ fd.keyoffset, key_len + sizeof(key_len));
+
+ if (be32_to_cpu(attr_key.cnid) != inode->i_ino)
+ goto end_listxattr;
+
+ xattr_name_len = HFSPLUS_ATTR_MAX_STRLEN;
+ if (hfsplus_uni2asc(inode->i_sb,
+ (const struct hfsplus_unistr *)&fd.key->attr.key_name,
+ strbuf, &xattr_name_len)) {
+ printk(KERN_ERR "hfs: unicode conversion failed\n");
+ res = -EIO;
+ goto end_listxattr;
+ }
+
+ if (!buffer || !size) {
+ if (can_list(strbuf))
+ res += name_len(strbuf, xattr_name_len);
+ } else if (can_list(strbuf)) {
+ if (size < (res + name_len(strbuf, xattr_name_len))) {
+ res = -ERANGE;
+ goto end_listxattr;
+ } else
+ res += copy_name(buffer + res,
+ strbuf, xattr_name_len);
+ }
+
+ if (hfs_brec_goto(&fd, 1))
+ goto end_listxattr;
+ }
+
+end_listxattr:
+ hfs_find_exit(&fd);
+ return res;
+}
+
+int hfsplus_removexattr(struct dentry *dentry, const char *name)
+{
+ int err = 0;
+ struct inode *inode = dentry->d_inode;
+ struct hfs_find_data cat_fd;
+ u16 flags;
+ u16 cat_entry_type;
+ int is_xattr_acl_deleted = 0;
+ int is_all_xattrs_deleted = 0;
+
+ if ((!S_ISREG(inode->i_mode) &&
+ !S_ISDIR(inode->i_mode)) ||
+ HFSPLUS_IS_RSRC(inode))
+ return -EOPNOTSUPP;
+
+ if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
+ return -EOPNOTSUPP;
+
+ err = can_set_xattr(inode, name, NULL, 0);
+ if (err)
+ return err;
+
+ if (strncmp(name, XATTR_MAC_OSX_PREFIX,
+ XATTR_MAC_OSX_PREFIX_LEN) == 0)
+ name += XATTR_MAC_OSX_PREFIX_LEN;
+
+ if (!strcmp_xattr_finder_info(name))
+ return -EOPNOTSUPP;
+
+ err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
+ if (err) {
+ printk(KERN_ERR "hfs: can't init xattr find struct\n");
+ return err;
+ }
+
+ err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
+ if (err) {
+ printk(KERN_ERR "hfs: catalog searching failed\n");
+ goto end_removexattr;
+ }
+
+ err = hfsplus_delete_attr(inode, name);
+ if (err)
+ goto end_removexattr;
+
+ is_xattr_acl_deleted = !strcmp_xattr_acl(name);
+ is_all_xattrs_deleted = !hfsplus_attr_exists(inode, NULL);
+
+ if (!is_xattr_acl_deleted && !is_all_xattrs_deleted)
+ goto end_removexattr;
+
+ cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
+
+ if (cat_entry_type == HFSPLUS_FOLDER) {
+ flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_folder, flags));
+ if (is_xattr_acl_deleted)
+ flags &= ~HFSPLUS_ACL_EXISTS;
+ if (is_all_xattrs_deleted)
+ flags &= ~HFSPLUS_XATTR_EXISTS;
+ hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_folder, flags),
+ flags);
+ hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+ } else if (cat_entry_type == HFSPLUS_FILE) {
+ flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_file, flags));
+ if (is_xattr_acl_deleted)
+ flags &= ~HFSPLUS_ACL_EXISTS;
+ if (is_all_xattrs_deleted)
+ flags &= ~HFSPLUS_XATTR_EXISTS;
+ hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+ offsetof(struct hfsplus_cat_file, flags),
+ flags);
+ hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+ } else {
+ printk(KERN_ERR "hfs: invalid catalog entry type\n");
+ err = -EIO;
+ goto end_removexattr;
+ }
+
+end_removexattr:
+ hfs_find_exit(&cat_fd);
+ return err;
+}
+
+static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN +
+ XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
+ strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
+
+ return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN +
+ XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
+ strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
+
+ return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
+{
+ /*
+ * This method is not used.
+ * It is used hfsplus_listxattr() instead of generic_listxattr().
+ */
+ return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_osx_handler = {
+ .prefix = XATTR_MAC_OSX_PREFIX,
+ .list = hfsplus_osx_listxattr,
+ .get = hfsplus_osx_getxattr,
+ .set = hfsplus_osx_setxattr,
+};
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
new file mode 100644
index 000000000000..847b695b984d
--- /dev/null
+++ b/fs/hfsplus/xattr.h
@@ -0,0 +1,60 @@
+/*
+ * linux/fs/hfsplus/xattr.h
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Logic of processing extended attributes
+ */
+
+#ifndef _LINUX_HFSPLUS_XATTR_H
+#define _LINUX_HFSPLUS_XATTR_H
+
+#include <linux/xattr.h>
+
+extern const struct xattr_handler hfsplus_xattr_osx_handler;
+extern const struct xattr_handler hfsplus_xattr_user_handler;
+extern const struct xattr_handler hfsplus_xattr_trusted_handler;
+/*extern const struct xattr_handler hfsplus_xattr_acl_access_handler;*/
+/*extern const struct xattr_handler hfsplus_xattr_acl_default_handler;*/
+extern const struct xattr_handler hfsplus_xattr_security_handler;
+
+extern const struct xattr_handler *hfsplus_xattr_handlers[];
+
+int __hfsplus_setxattr(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags);
+
+static inline int hfsplus_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags);
+}
+
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ void *value, size_t size);
+
+ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
+
+int hfsplus_removexattr(struct dentry *dentry, const char *name);
+
+int hfsplus_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr);
+
+static inline int hfsplus_init_acl(struct inode *inode, struct inode *dir)
+{
+ /*TODO: implement*/
+ return 0;
+}
+
+static inline int hfsplus_init_inode_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr)
+{
+ int err;
+
+ err = hfsplus_init_acl(inode, dir);
+ if (!err)
+ err = hfsplus_init_security(inode, dir, qstr);
+ return err;
+}
+
+#endif
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
new file mode 100644
index 000000000000..83b842f113c5
--- /dev/null
+++ b/fs/hfsplus/xattr_security.c
@@ -0,0 +1,104 @@
+/*
+ * linux/fs/hfsplus/xattr_trusted.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for storing security labels as extended attributes.
+ */
+
+#include <linux/security.h>
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+static int hfsplus_security_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len + XATTR_SECURITY_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_SECURITY_PREFIX);
+ strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
+
+ return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_security_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len + XATTR_SECURITY_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_SECURITY_PREFIX);
+ strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
+
+ return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_security_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
+{
+ /*
+ * This method is not used.
+ * It is used hfsplus_listxattr() instead of generic_listxattr().
+ */
+ return -EOPNOTSUPP;
+}
+
+static int hfsplus_initxattrs(struct inode *inode,
+ const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+ size_t xattr_name_len;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ xattr_name_len = strlen(xattr->name);
+
+ if (xattr_name_len == 0)
+ continue;
+
+ if (xattr_name_len + XATTR_SECURITY_PREFIX_LEN >
+ HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_SECURITY_PREFIX);
+ strcpy(xattr_name +
+ XATTR_SECURITY_PREFIX_LEN, xattr->name);
+ memset(xattr_name +
+ XATTR_SECURITY_PREFIX_LEN + xattr_name_len, 0, 1);
+
+ err = __hfsplus_setxattr(inode, xattr_name,
+ xattr->value, xattr->value_len, 0);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int hfsplus_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &hfsplus_initxattrs, NULL);
+}
+
+const struct xattr_handler hfsplus_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .list = hfsplus_security_listxattr,
+ .get = hfsplus_security_getxattr,
+ .set = hfsplus_security_setxattr,
+};
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
new file mode 100644
index 000000000000..426cee277542
--- /dev/null
+++ b/fs/hfsplus/xattr_trusted.c
@@ -0,0 +1,63 @@
+/*
+ * linux/fs/hfsplus/xattr_trusted.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for trusted extended attributes.
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+static int hfsplus_trusted_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len + XATTR_TRUSTED_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
+ strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
+
+ return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_trusted_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len + XATTR_TRUSTED_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
+ strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
+
+ return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_trusted_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
+{
+ /*
+ * This method is not used.
+ * It is used hfsplus_listxattr() instead of generic_listxattr().
+ */
+ return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .list = hfsplus_trusted_listxattr,
+ .get = hfsplus_trusted_getxattr,
+ .set = hfsplus_trusted_setxattr,
+};
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
new file mode 100644
index 000000000000..e34016561ae0
--- /dev/null
+++ b/fs/hfsplus/xattr_user.c
@@ -0,0 +1,63 @@
+/*
+ * linux/fs/hfsplus/xattr_user.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for user extended attributes.
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+static int hfsplus_user_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len + XATTR_USER_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_USER_PREFIX);
+ strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
+
+ return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_user_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
+{
+ char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+ size_t len = strlen(name);
+
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ if (len + XATTR_USER_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+ return -EOPNOTSUPP;
+
+ strcpy(xattr_name, XATTR_USER_PREFIX);
+ strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
+
+ return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_user_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
+{
+ /*
+ * This method is not used.
+ * It is used hfsplus_listxattr() instead of generic_listxattr().
+ */
+ return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .list = hfsplus_user_listxattr,
+ .get = hfsplus_user_getxattr,
+ .set = hfsplus_user_setxattr,
+};
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 457addc5c91f..fbabb906066f 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -30,7 +30,7 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
return list_entry(inode, struct hostfs_inode_info, vfs_inode);
}
-#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
+#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file))
static int hostfs_d_delete(const struct dentry *dentry)
{
@@ -861,14 +861,6 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
}
static const struct inode_operations hostfs_iops = {
- .create = hostfs_create,
- .link = hostfs_link,
- .unlink = hostfs_unlink,
- .symlink = hostfs_symlink,
- .mkdir = hostfs_mkdir,
- .rmdir = hostfs_rmdir,
- .mknod = hostfs_mknod,
- .rename = hostfs_rename,
.permission = hostfs_permission,
.setattr = hostfs_setattr,
};
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 78e12b2e0ea2..546f6d39713a 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -25,7 +25,7 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
loff_t new_off = off + (whence == 1 ? filp->f_pos : 0);
loff_t pos;
struct quad_buffer_head qbh;
- struct inode *i = filp->f_path.dentry->d_inode;
+ struct inode *i = file_inode(filp);
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct super_block *s = i->i_sb;
@@ -57,7 +57,7 @@ fail:
static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index fbfe2df5624b..9f9dbeceeee7 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -152,7 +152,7 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf,
retval = do_sync_write(file, buf, count, ppos);
if (retval > 0) {
hpfs_lock(file->f_path.dentry->d_sb);
- hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1;
+ hpfs_i(file_inode(file))->i_dirty = 1;
hpfs_unlock(file->f_path.dentry->d_sb);
}
return retval;
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 43b315f2002b..74f55703be49 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -180,7 +180,7 @@ static ssize_t read_proc(struct file *file, char __user *buf, ssize_t count,
ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
ssize_t n;
- read = file->f_path.dentry->d_inode->i_fop->read;
+ read = file_inode(file)->i_fop->read;
if (!is_user)
set_fs(KERNEL_DS);
@@ -288,7 +288,7 @@ static ssize_t hppfs_write(struct file *file, const char __user *buf,
struct file *proc_file = data->proc_file;
ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
- write = proc_file->f_path.dentry->d_inode->i_fop->write;
+ write = file_inode(proc_file)->i_fop->write;
return (*write)(proc_file, buf, len, ppos);
}
@@ -513,7 +513,7 @@ static loff_t hppfs_llseek(struct file *file, loff_t off, int where)
loff_t (*llseek)(struct file *, loff_t, int);
loff_t ret;
- llseek = proc_file->f_path.dentry->d_inode->i_fop->llseek;
+ llseek = file_inode(proc_file)->i_fop->llseek;
if (llseek != NULL) {
ret = (*llseek)(proc_file, off, where);
if (ret < 0)
@@ -561,7 +561,7 @@ static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir)
});
int err;
- readdir = proc_file->f_path.dentry->d_inode->i_fop->readdir;
+ readdir = file_inode(proc_file)->i_fop->readdir;
proc_file->f_pos = file->f_pos;
err = (*readdir)(proc_file, &dirent, hppfs_filldir);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 78bde32ea951..7f94e0cbc69c 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -97,7 +97,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
@@ -918,16 +918,25 @@ static int get_hstate_idx(int page_size_log)
return h - hstates;
}
+static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
+ dentry->d_name.name);
+}
+
+static struct dentry_operations anon_ops = {
+ .d_dname = hugetlb_dname
+};
+
struct file *hugetlb_file_setup(const char *name, unsigned long addr,
size_t size, vm_flags_t acctflag,
struct user_struct **user,
int creat_flags, int page_size_log)
{
- int error = -ENOMEM;
- struct file *file;
+ struct file *file = ERR_PTR(-ENOMEM);
struct inode *inode;
struct path path;
- struct dentry *root;
+ struct super_block *sb;
struct qstr quick_string;
struct hstate *hstate;
unsigned long num_pages;
@@ -955,17 +964,18 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
}
}
- root = hugetlbfs_vfsmount[hstate_idx]->mnt_root;
+ sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
quick_string.name = name;
quick_string.len = strlen(quick_string.name);
quick_string.hash = 0;
- path.dentry = d_alloc(root, &quick_string);
+ path.dentry = d_alloc_pseudo(sb, &quick_string);
if (!path.dentry)
goto out_shm_unlock;
+ d_set_d_op(path.dentry, &anon_ops);
path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
- error = -ENOSPC;
- inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
+ file = ERR_PTR(-ENOSPC);
+ inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
goto out_dentry;
@@ -973,7 +983,7 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
size += addr & ~huge_page_mask(hstate);
num_pages = ALIGN(size, huge_page_size(hstate)) >>
huge_page_shift(hstate);
- error = -ENOMEM;
+ file = ERR_PTR(-ENOMEM);
if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
goto out_inode;
@@ -981,10 +991,9 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
inode->i_size = size;
clear_nlink(inode);
- error = -ENFILE;
file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
&hugetlbfs_file_operations);
- if (!file)
+ if (IS_ERR(file))
goto out_dentry; /* inode is already attached */
return file;
@@ -998,7 +1007,7 @@ out_shm_unlock:
user_shm_unlock(size, *user);
*user = NULL;
}
- return ERR_PTR(error);
+ return file;
}
static int __init init_hugetlbfs_fs(void)
diff --git a/fs/inode.c b/fs/inode.c
index 14084b72b259..f5f7c06c36fb 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -798,11 +798,10 @@ static struct inode *find_inode(struct super_block *sb,
int (*test)(struct inode *, void *),
void *data)
{
- struct hlist_node *node;
struct inode *inode = NULL;
repeat:
- hlist_for_each_entry(inode, node, head, i_hash) {
+ hlist_for_each_entry(inode, head, i_hash) {
spin_lock(&inode->i_lock);
if (inode->i_sb != sb) {
spin_unlock(&inode->i_lock);
@@ -830,11 +829,10 @@ repeat:
static struct inode *find_inode_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino)
{
- struct hlist_node *node;
struct inode *inode = NULL;
repeat:
- hlist_for_each_entry(inode, node, head, i_hash) {
+ hlist_for_each_entry(inode, head, i_hash) {
spin_lock(&inode->i_lock);
if (inode->i_ino != ino) {
spin_unlock(&inode->i_lock);
@@ -1132,11 +1130,10 @@ EXPORT_SYMBOL(iget_locked);
static int test_inode_iunique(struct super_block *sb, unsigned long ino)
{
struct hlist_head *b = inode_hashtable + hash(sb, ino);
- struct hlist_node *node;
struct inode *inode;
spin_lock(&inode_hash_lock);
- hlist_for_each_entry(inode, node, b, i_hash) {
+ hlist_for_each_entry(inode, b, i_hash) {
if (inode->i_ino == ino && inode->i_sb == sb) {
spin_unlock(&inode_hash_lock);
return 0;
@@ -1291,10 +1288,9 @@ int insert_inode_locked(struct inode *inode)
struct hlist_head *head = inode_hashtable + hash(sb, ino);
while (1) {
- struct hlist_node *node;
struct inode *old = NULL;
spin_lock(&inode_hash_lock);
- hlist_for_each_entry(old, node, head, i_hash) {
+ hlist_for_each_entry(old, head, i_hash) {
if (old->i_ino != ino)
continue;
if (old->i_sb != sb)
@@ -1306,7 +1302,7 @@ int insert_inode_locked(struct inode *inode)
}
break;
}
- if (likely(!node)) {
+ if (likely(!old)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
@@ -1334,11 +1330,10 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
while (1) {
- struct hlist_node *node;
struct inode *old = NULL;
spin_lock(&inode_hash_lock);
- hlist_for_each_entry(old, node, head, i_hash) {
+ hlist_for_each_entry(old, head, i_hash) {
if (old->i_sb != sb)
continue;
if (!test(old, data))
@@ -1350,7 +1345,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
}
break;
}
- if (likely(!node)) {
+ if (likely(!old)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
@@ -1655,7 +1650,7 @@ EXPORT_SYMBOL(file_remove_suid);
int file_update_time(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct timespec now;
int sync_it = 0;
int ret;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 3bdad6d1f268..fd507fb460f8 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -175,7 +175,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
struct fiemap fiemap;
struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
struct fiemap_extent_info fieinfo = { 0, };
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
u64 len;
int error;
@@ -424,7 +424,7 @@ EXPORT_SYMBOL(generic_block_fiemap);
*/
int ioctl_preallocate(struct file *filp, void __user *argp)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct space_resv sr;
if (copy_from_user(&sr, argp, sizeof(sr)))
@@ -449,7 +449,7 @@ int ioctl_preallocate(struct file *filp, void __user *argp)
static int file_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int __user *p = (int __user *)arg;
switch (cmd) {
@@ -512,7 +512,7 @@ static int ioctl_fioasync(unsigned int fd, struct file *filp,
static int ioctl_fsfreeze(struct file *filp)
{
- struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+ struct super_block *sb = file_inode(filp)->i_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -527,7 +527,7 @@ static int ioctl_fsfreeze(struct file *filp)
static int ioctl_fsthaw(struct file *filp)
{
- struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+ struct super_block *sb = file_inode(filp)->i_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -548,7 +548,7 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
{
int error = 0;
int __user *argp = (int __user *)arg;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
switch (cmd) {
case FIOCLEX:
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 0b3fa7974fa8..592e5115a561 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -296,7 +296,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
*/
static int zisofs_readpage(struct file *file, struct page *page)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
int err;
int i, pcount, full_page;
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index f20437c068a0..a7d5c3c3d4e6 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -253,7 +253,7 @@ static int isofs_readdir(struct file *filp,
int result;
char *tmpname;
struct iso_directory_record *tmpde;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
tmpname = (char *)__get_free_page(GFP_KERNEL);
if (tmpname == NULL)
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 2b4f2358eadb..12088d8de3fa 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -125,10 +125,10 @@ isofs_export_encode_fh(struct inode *inode,
*/
if (parent && (len < 5)) {
*max_len = 5;
- return 255;
+ return FILEID_INVALID;
} else if (len < 3) {
*max_len = 3;
- return 255;
+ return FILEID_INVALID;
}
len = 3;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 3091d42992f0..750c70148eff 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -435,7 +435,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
trace_jbd2_commit_locking(journal, commit_transaction);
stats.run.rs_wait = commit_transaction->t_max_wait;
+ stats.run.rs_request_delay = 0;
stats.run.rs_locked = jiffies;
+ if (commit_transaction->t_requested)
+ stats.run.rs_request_delay =
+ jbd2_time_diff(commit_transaction->t_requested,
+ stats.run.rs_locked);
stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
stats.run.rs_locked);
@@ -1116,7 +1121,10 @@ restart_loop:
*/
spin_lock(&journal->j_history_lock);
journal->j_stats.ts_tid++;
+ if (commit_transaction->t_requested)
+ journal->j_stats.ts_requested++;
journal->j_stats.run.rs_wait += stats.run.rs_wait;
+ journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
journal->j_stats.run.rs_running += stats.run.rs_running;
journal->j_stats.run.rs_locked += stats.run.rs_locked;
journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index dbf41f9452db..ed10991ab006 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -35,7 +35,6 @@
#include <linux/kthread.h>
#include <linux/poison.h>
#include <linux/proc_fs.h>
-#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/math64.h>
#include <linux/hash.h>
@@ -51,6 +50,14 @@
#include <asm/uaccess.h>
#include <asm/page.h>
+#ifdef CONFIG_JBD2_DEBUG
+ushort jbd2_journal_enable_debug __read_mostly;
+EXPORT_SYMBOL(jbd2_journal_enable_debug);
+
+module_param_named(jbd2_debug, jbd2_journal_enable_debug, ushort, 0644);
+MODULE_PARM_DESC(jbd2_debug, "Debugging level for jbd2");
+#endif
+
EXPORT_SYMBOL(jbd2_journal_extend);
EXPORT_SYMBOL(jbd2_journal_stop);
EXPORT_SYMBOL(jbd2_journal_lock_updates);
@@ -513,6 +520,10 @@ int __jbd2_log_space_left(journal_t *journal)
*/
int __jbd2_log_start_commit(journal_t *journal, tid_t target)
{
+ /* Return if the txn has already requested to be committed */
+ if (journal->j_commit_request == target)
+ return 0;
+
/*
* The only transaction we can possibly wait upon is the
* currently running transaction (if it exists). Otherwise,
@@ -529,6 +540,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
jbd_debug(1, "JBD2: requesting commit %d/%d\n",
journal->j_commit_request,
journal->j_commit_sequence);
+ journal->j_running_transaction->t_requested = jiffies;
wake_up(&journal->j_wait_commit);
return 1;
} else if (!tid_geq(journal->j_commit_request, target))
@@ -894,13 +906,18 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
if (v != SEQ_START_TOKEN)
return 0;
- seq_printf(seq, "%lu transaction, each up to %u blocks\n",
- s->stats->ts_tid,
- s->journal->j_max_transaction_buffers);
+ seq_printf(seq, "%lu transactions (%lu requested), "
+ "each up to %u blocks\n",
+ s->stats->ts_tid, s->stats->ts_requested,
+ s->journal->j_max_transaction_buffers);
if (s->stats->ts_tid == 0)
return 0;
seq_printf(seq, "average: \n %ums waiting for transaction\n",
jiffies_to_msecs(s->stats->run.rs_wait / s->stats->ts_tid));
+ seq_printf(seq, " %ums request delay\n",
+ (s->stats->ts_requested == 0) ? 0 :
+ jiffies_to_msecs(s->stats->run.rs_request_delay /
+ s->stats->ts_requested));
seq_printf(seq, " %ums running transaction\n",
jiffies_to_msecs(s->stats->run.rs_running / s->stats->ts_tid));
seq_printf(seq, " %ums transaction was being locked\n",
@@ -2485,45 +2502,6 @@ restart:
spin_unlock(&journal->j_list_lock);
}
-/*
- * debugfs tunables
- */
-#ifdef CONFIG_JBD2_DEBUG
-u8 jbd2_journal_enable_debug __read_mostly;
-EXPORT_SYMBOL(jbd2_journal_enable_debug);
-
-#define JBD2_DEBUG_NAME "jbd2-debug"
-
-static struct dentry *jbd2_debugfs_dir;
-static struct dentry *jbd2_debug;
-
-static void __init jbd2_create_debugfs_entry(void)
-{
- jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
- if (jbd2_debugfs_dir)
- jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME,
- S_IRUGO | S_IWUSR,
- jbd2_debugfs_dir,
- &jbd2_journal_enable_debug);
-}
-
-static void __exit jbd2_remove_debugfs_entry(void)
-{
- debugfs_remove(jbd2_debug);
- debugfs_remove(jbd2_debugfs_dir);
-}
-
-#else
-
-static void __init jbd2_create_debugfs_entry(void)
-{
-}
-
-static void __exit jbd2_remove_debugfs_entry(void)
-{
-}
-
-#endif
#ifdef CONFIG_PROC_FS
@@ -2609,7 +2587,6 @@ static int __init journal_init(void)
ret = journal_init_caches();
if (ret == 0) {
- jbd2_create_debugfs_entry();
jbd2_create_jbd_stats_proc_entry();
} else {
jbd2_journal_destroy_caches();
@@ -2624,7 +2601,6 @@ static void __exit journal_exit(void)
if (n)
printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n);
#endif
- jbd2_remove_debugfs_entry();
jbd2_remove_jbd_stats_proc_entry();
jbd2_journal_destroy_caches();
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index df9f29760efa..d6ee5aed56b1 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -30,6 +30,8 @@
#include <linux/bug.h>
#include <linux/module.h>
+#include <trace/events/jbd2.h>
+
static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
@@ -100,6 +102,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
journal->j_running_transaction = transaction;
transaction->t_max_wait = 0;
transaction->t_start = jiffies;
+ transaction->t_requested = 0;
return transaction;
}
@@ -306,6 +309,8 @@ repeat:
*/
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
+ handle->h_requested_credits = nblocks;
+ handle->h_start_jiffies = jiffies;
atomic_inc(&transaction->t_updates);
atomic_inc(&transaction->t_handle_count);
jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
@@ -352,7 +357,8 @@ static handle_t *new_handle(int nblocks)
* Return a pointer to a newly allocated handle, or an ERR_PTR() value
* on failure.
*/
-handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask)
+handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no)
{
handle_t *handle = journal_current_handle();
int err;
@@ -376,8 +382,13 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask)
if (err < 0) {
jbd2_free_handle(handle);
current->journal_info = NULL;
- handle = ERR_PTR(err);
+ return ERR_PTR(err);
}
+ handle->h_type = type;
+ handle->h_line_no = line_no;
+ trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
+ handle->h_transaction->t_tid, type,
+ line_no, nblocks);
return handle;
}
EXPORT_SYMBOL(jbd2__journal_start);
@@ -385,7 +396,7 @@ EXPORT_SYMBOL(jbd2__journal_start);
handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
{
- return jbd2__journal_start(journal, nblocks, GFP_NOFS);
+ return jbd2__journal_start(journal, nblocks, GFP_NOFS, 0, 0);
}
EXPORT_SYMBOL(jbd2_journal_start);
@@ -447,7 +458,14 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
goto unlock;
}
+ trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
+ handle->h_transaction->t_tid,
+ handle->h_type, handle->h_line_no,
+ handle->h_buffer_credits,
+ nblocks);
+
handle->h_buffer_credits += nblocks;
+ handle->h_requested_credits += nblocks;
atomic_add(nblocks, &transaction->t_outstanding_credits);
result = 0;
@@ -1376,6 +1394,13 @@ int jbd2_journal_stop(handle_t *handle)
}
jbd_debug(4, "Handle %p going down\n", handle);
+ trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
+ handle->h_transaction->t_tid,
+ handle->h_type, handle->h_line_no,
+ jiffies - handle->h_start_jiffies,
+ handle->h_sync, handle->h_requested_credits,
+ (handle->h_requested_credits -
+ handle->h_buffer_credits));
/*
* Implement synchronous transaction batching. If the handle
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index ad7774d32095..acd46a4160cb 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -117,12 +117,12 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct jffs2_inode_info *f;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct jffs2_full_dirent *fd;
unsigned long offset, curofs;
jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n",
- filp->f_path.dentry->d_inode->i_ino);
+ file_inode(filp)->i_ino);
f = JFFS2_INODE_INFO(inode);
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index bc555ff417e9..93a1232894f6 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -58,7 +58,7 @@ static long jfs_map_ext2(unsigned long flags, int from)
long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct jfs_inode_info *jfs_inode = JFS_IP(inode);
unsigned int flags;
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 9197a1b0d02d..0ddbeceafc62 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3004,7 +3004,7 @@ static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
*/
int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *ip = filp->f_path.dentry->d_inode;
+ struct inode *ip = file_inode(filp);
struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
int rc = 0;
loff_t dtpos; /* legacy OS/2 style position */
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index ca0a08001449..0796c45d0d4d 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -11,7 +11,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/nfs_fs.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/kthread.h>
@@ -178,7 +178,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
continue;
if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
continue;
- if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
+ if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->fl_file)) ,fh) != 0)
continue;
/* Alright, we found a lock. Set the return status
* and wake up the caller
@@ -220,10 +220,19 @@ reclaimer(void *ptr)
{
struct nlm_host *host = (struct nlm_host *) ptr;
struct nlm_wait *block;
+ struct nlm_rqst *req;
struct file_lock *fl, *next;
u32 nsmstate;
struct net *net = host->net;
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ printk(KERN_ERR "lockd: reclaimer unable to alloc memory."
+ " Locks for %s won't be reclaimed!\n",
+ host->h_name);
+ return 0;
+ }
+
allow_signal(SIGKILL);
down_write(&host->h_rwsem);
@@ -253,7 +262,7 @@ restart:
*/
if (signalled())
continue;
- if (nlmclnt_reclaim(host, fl) != 0)
+ if (nlmclnt_reclaim(host, fl, req) != 0)
continue;
list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
if (host->h_nsmstate != nsmstate) {
@@ -279,5 +288,6 @@ restart:
/* Release host handle after use */
nlmclnt_release_host(host);
lockd_down(net);
+ kfree(req);
return 0;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 52e5120bb159..7e529c3c45c0 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -127,7 +127,7 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
struct nlm_lock *lock = &argp->lock;
nlmclnt_next_cookie(&argp->cookie);
- memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
+ memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
lock->caller = utsname()->nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
@@ -618,17 +618,15 @@ out_unlock:
* RECLAIM: Try to reclaim a lock
*/
int
-nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
+nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
+ struct nlm_rqst *req)
{
- struct nlm_rqst reqst, *req;
int status;
- req = &reqst;
memset(req, 0, sizeof(*req));
locks_init_lock(&req->a_args.lock.fl);
locks_init_lock(&req->a_res.lock.fl);
req->a_host = host;
- req->a_flags = 0;
/* Set up the argument struct */
nlmclnt_setlockargs(req, fl);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 0e17090c310f..969d589c848d 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -13,6 +13,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/mutex.h>
@@ -32,15 +33,15 @@
static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
-#define for_each_host(host, pos, chain, table) \
+#define for_each_host(host, chain, table) \
for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
- hlist_for_each_entry((host), (pos), (chain), h_hash)
+ hlist_for_each_entry((host), (chain), h_hash)
-#define for_each_host_safe(host, pos, next, chain, table) \
+#define for_each_host_safe(host, next, chain, table) \
for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
- hlist_for_each_entry_safe((host), (pos), (next), \
+ hlist_for_each_entry_safe((host), (next), \
(chain), h_hash)
static unsigned long nrhosts;
@@ -225,7 +226,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
.net = net,
};
struct hlist_head *chain;
- struct hlist_node *pos;
struct nlm_host *host;
struct nsm_handle *nsm = NULL;
struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -237,7 +237,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
mutex_lock(&nlm_host_mutex);
chain = &nlm_client_hosts[nlm_hash_address(sap)];
- hlist_for_each_entry(host, pos, chain, h_hash) {
+ hlist_for_each_entry(host, chain, h_hash) {
if (host->net != net)
continue;
if (!rpc_cmp_addr(nlm_addr(host), sap))
@@ -322,7 +322,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const size_t hostname_len)
{
struct hlist_head *chain;
- struct hlist_node *pos;
struct nlm_host *host = NULL;
struct nsm_handle *nsm = NULL;
struct sockaddr *src_sap = svc_daddr(rqstp);
@@ -350,7 +349,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
nlm_gc_hosts(net);
chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
- hlist_for_each_entry(host, pos, chain, h_hash) {
+ hlist_for_each_entry(host, chain, h_hash) {
if (host->net != net)
continue;
if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
@@ -515,10 +514,9 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
{
struct nlm_host *host;
struct hlist_head *chain;
- struct hlist_node *pos;
mutex_lock(&nlm_host_mutex);
- for_each_host(host, pos, chain, cache) {
+ for_each_host(host, chain, cache) {
if (host->h_nsmhandle == nsm
&& host->h_nsmstate != info->state) {
host->h_nsmstate = info->state;
@@ -570,7 +568,6 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
static void nlm_complain_hosts(struct net *net)
{
struct hlist_head *chain;
- struct hlist_node *pos;
struct nlm_host *host;
if (net) {
@@ -587,7 +584,7 @@ static void nlm_complain_hosts(struct net *net)
dprintk("lockd: %lu hosts left:\n", nrhosts);
}
- for_each_host(host, pos, chain, nlm_server_hosts) {
+ for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
@@ -600,14 +597,13 @@ void
nlm_shutdown_hosts_net(struct net *net)
{
struct hlist_head *chain;
- struct hlist_node *pos;
struct nlm_host *host;
mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts in net %p...\n", net);
- for_each_host(host, pos, chain, nlm_server_hosts) {
+ for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
host->h_expires = jiffies - 1;
@@ -644,11 +640,11 @@ static void
nlm_gc_hosts(struct net *net)
{
struct hlist_head *chain;
- struct hlist_node *pos, *next;
+ struct hlist_node *next;
struct nlm_host *host;
dprintk("lockd: host garbage collection for net %p\n", net);
- for_each_host(host, pos, chain, nlm_server_hosts) {
+ for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
host->h_inuse = 0;
@@ -657,7 +653,7 @@ nlm_gc_hosts(struct net *net)
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources(net);
- for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
+ for_each_host_safe(host, next, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
if (atomic_read(&host->h_count) || host->h_inuse
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 3c2cfc683631..1812f026960c 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 8d80c990dffd..e703318c41df 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -406,8 +406,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
__be32 ret;
dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
- file->f_file->f_path.dentry->d_inode->i_sb->s_id,
- file->f_file->f_path.dentry->d_inode->i_ino,
+ file_inode(file->f_file)->i_sb->s_id,
+ file_inode(file->f_file)->i_ino,
lock->fl.fl_type, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end,
@@ -513,8 +513,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
__be32 ret;
dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
- file->f_file->f_path.dentry->d_inode->i_sb->s_id,
- file->f_file->f_path.dentry->d_inode->i_ino,
+ file_inode(file->f_file)->i_sb->s_id,
+ file_inode(file->f_file)->i_ino,
lock->fl.fl_type,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
@@ -606,8 +606,8 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
int error;
dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
- file->f_file->f_path.dentry->d_inode->i_sb->s_id,
- file->f_file->f_path.dentry->d_inode->i_ino,
+ file_inode(file->f_file)->i_sb->s_id,
+ file_inode(file->f_file)->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
@@ -635,8 +635,8 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
int status = 0;
dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
- file->f_file->f_path.dentry->d_inode->i_sb->s_id,
- file->f_file->f_path.dentry->d_inode->i_ino,
+ file_inode(file->f_file)->i_sb->s_id,
+ file_inode(file->f_file)->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 0deb5f6c9dd4..97e87415b145 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -13,7 +13,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/sunrpc/svc.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/nfsd/nfsfh.h>
#include <linux/nfsd/export.h>
#include <linux/lockd/lockd.h>
@@ -45,7 +45,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
static inline void nlm_debug_print_file(char *msg, struct nlm_file *file)
{
- struct inode *inode = file->f_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file->f_file);
dprintk("lockd: %s %s/%ld\n",
msg, inode->i_sb->s_id, inode->i_ino);
@@ -84,7 +84,6 @@ __be32
nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
struct nfs_fh *f)
{
- struct hlist_node *pos;
struct nlm_file *file;
unsigned int hash;
__be32 nfserr;
@@ -96,7 +95,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
/* Lock file table */
mutex_lock(&nlm_file_mutex);
- hlist_for_each_entry(file, pos, &nlm_files[hash], f_list)
+ hlist_for_each_entry(file, &nlm_files[hash], f_list)
if (!nfs_compare_fh(&file->f_handle, f))
goto found;
@@ -248,13 +247,13 @@ static int
nlm_traverse_files(void *data, nlm_host_match_fn_t match,
int (*is_failover_file)(void *data, struct nlm_file *file))
{
- struct hlist_node *pos, *next;
+ struct hlist_node *next;
struct nlm_file *file;
int i, ret = 0;
mutex_lock(&nlm_file_mutex);
for (i = 0; i < FILE_NRHASH; i++) {
- hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) {
+ hlist_for_each_entry_safe(file, next, &nlm_files[i], f_list) {
if (is_failover_file && !is_failover_file(data, file))
continue;
file->f_count++;
diff --git a/fs/locks.c b/fs/locks.c
index a94e331a52a2..cb424a4fed71 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -334,7 +334,7 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
start = filp->f_pos;
break;
case SEEK_END:
- start = i_size_read(filp->f_path.dentry->d_inode);
+ start = i_size_read(file_inode(filp));
break;
default:
return -EINVAL;
@@ -384,7 +384,7 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
start = filp->f_pos;
break;
case SEEK_END:
- start = i_size_read(filp->f_path.dentry->d_inode);
+ start = i_size_read(file_inode(filp));
break;
default:
return -EINVAL;
@@ -627,7 +627,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
struct file_lock *cfl;
lock_flocks();
- for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
+ for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
if (!IS_POSIX(cfl))
continue;
if (posix_locks_conflict(fl, cfl))
@@ -708,7 +708,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
{
struct file_lock *new_fl = NULL;
struct file_lock **before;
- struct inode * inode = filp->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(filp);
int error = 0;
int found = 0;
@@ -1002,7 +1002,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
int posix_lock_file(struct file *filp, struct file_lock *fl,
struct file_lock *conflock)
{
- return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
+ return __posix_lock_file(file_inode(filp), fl, conflock);
}
EXPORT_SYMBOL(posix_lock_file);
@@ -1326,8 +1326,8 @@ int fcntl_getlease(struct file *filp)
int type = F_UNLCK;
lock_flocks();
- time_out_leases(filp->f_path.dentry->d_inode);
- for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
+ time_out_leases(file_inode(filp));
+ for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
fl = fl->fl_next) {
if (fl->fl_file == filp) {
type = target_leasetype(fl);
@@ -1843,7 +1843,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
if (copy_from_user(&flock, l, sizeof(flock)))
goto out;
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
@@ -1961,7 +1961,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
if (copy_from_user(&flock, l, sizeof(flock)))
goto out;
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
@@ -2030,7 +2030,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
- if (!filp->f_path.dentry->d_inode->i_flock)
+ if (!file_inode(filp)->i_flock)
return;
lock.fl_type = F_UNLCK;
@@ -2056,7 +2056,7 @@ EXPORT_SYMBOL(locks_remove_posix);
*/
void locks_remove_flock(struct file *filp)
{
- struct inode * inode = filp->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(filp);
struct file_lock *fl;
struct file_lock **before;
@@ -2152,7 +2152,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
fl_pid = fl->fl_pid;
if (fl->fl_file != NULL)
- inode = fl->fl_file->f_path.dentry->d_inode;
+ inode = file_inode(fl->fl_file);
seq_printf(f, "%lld:%s ", id, pfx);
if (IS_POSIX(fl)) {
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 26e4a941532f..b82751082112 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -284,7 +284,7 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
#define IMPLICIT_NODES 2
static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
{
- struct inode *dir = file->f_dentry->d_inode;
+ struct inode *dir = file_inode(file);
loff_t pos = file->f_pos - IMPLICIT_NODES;
struct page *page;
struct logfs_disk_dentry *dd;
@@ -320,7 +320,7 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
static int logfs_readdir(struct file *file, void *buf, filldir_t filldir)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
ino_t pino = parent_ino(file->f_dentry);
int err;
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 3886cded283c..c2219a6dd3c8 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -183,7 +183,7 @@ static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this)
long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct logfs_inode *li = logfs_inode(inode);
unsigned int oldflags, flags;
int err;
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 685b2d981b87..a9ed6f36e6ea 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -85,7 +85,7 @@ static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
unsigned long pos = filp->f_pos;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
diff --git a/fs/namei.c b/fs/namei.c
index 43a97ee1d4c8..dc984fee5532 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -600,14 +600,10 @@ static int complete_walk(struct nameidata *nd)
if (likely(!(nd->flags & LOOKUP_JUMPED)))
return 0;
- if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
+ if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
return 0;
- if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
- return 0;
-
- /* Note: we do not d_invalidate() */
- status = d_revalidate(dentry, nd->flags);
+ status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
if (status > 0)
return 0;
@@ -1342,7 +1338,7 @@ static struct dentry *__lookup_hash(struct qstr *name,
* small and for now I'd prefer to have fast path as straight as possible.
* It _is_ time-critical.
*/
-static int lookup_fast(struct nameidata *nd, struct qstr *name,
+static int lookup_fast(struct nameidata *nd,
struct path *path, struct inode **inode)
{
struct vfsmount *mnt = nd->path.mnt;
@@ -1358,7 +1354,7 @@ static int lookup_fast(struct nameidata *nd, struct qstr *name,
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
- dentry = __d_lookup_rcu(parent, name, &seq, nd->inode);
+ dentry = __d_lookup_rcu(parent, &nd->last, &seq, nd->inode);
if (!dentry)
goto unlazy;
@@ -1400,7 +1396,7 @@ unlazy:
if (unlazy_walk(nd, dentry))
return -ECHILD;
} else {
- dentry = __d_lookup(parent, name);
+ dentry = __d_lookup(parent, &nd->last);
}
if (unlikely(!dentry))
@@ -1436,8 +1432,7 @@ need_lookup:
}
/* Fast lookup failed, do it the slow way */
-static int lookup_slow(struct nameidata *nd, struct qstr *name,
- struct path *path)
+static int lookup_slow(struct nameidata *nd, struct path *path)
{
struct dentry *dentry, *parent;
int err;
@@ -1446,7 +1441,7 @@ static int lookup_slow(struct nameidata *nd, struct qstr *name,
BUG_ON(nd->inode != parent->d_inode);
mutex_lock(&parent->d_inode->i_mutex);
- dentry = __lookup_hash(name, parent, nd->flags);
+ dentry = __lookup_hash(&nd->last, parent, nd->flags);
mutex_unlock(&parent->d_inode->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -1519,7 +1514,7 @@ static inline int should_follow_link(struct inode *inode, int follow)
}
static inline int walk_component(struct nameidata *nd, struct path *path,
- struct qstr *name, int type, int follow)
+ int follow)
{
struct inode *inode;
int err;
@@ -1528,14 +1523,14 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
* to be able to know about the current root directory and
* parent relationships.
*/
- if (unlikely(type != LAST_NORM))
- return handle_dots(nd, type);
- err = lookup_fast(nd, name, path, &inode);
+ if (unlikely(nd->last_type != LAST_NORM))
+ return handle_dots(nd, nd->last_type);
+ err = lookup_fast(nd, path, &inode);
if (unlikely(err)) {
if (err < 0)
goto out_err;
- err = lookup_slow(nd, name, path);
+ err = lookup_slow(nd, path);
if (err < 0)
goto out_err;
@@ -1594,8 +1589,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
res = follow_link(&link, nd, &cookie);
if (res)
break;
- res = walk_component(nd, path, &nd->last,
- nd->last_type, LOOKUP_FOLLOW);
+ res = walk_component(nd, path, LOOKUP_FOLLOW);
put_link(nd, &link, cookie);
} while (res > 0);
@@ -1802,8 +1796,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
}
}
+ nd->last = this;
+ nd->last_type = type;
+
if (!name[len])
- goto last_component;
+ return 0;
/*
* If it wasn't NUL, we know it was '/'. Skip that
* slash, and continue until no more slashes.
@@ -1812,10 +1809,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
len++;
} while (unlikely(name[len] == '/'));
if (!name[len])
- goto last_component;
+ return 0;
+
name += len;
- err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
+ err = walk_component(nd, &next, LOOKUP_FOLLOW);
if (err < 0)
return err;
@@ -1824,16 +1822,10 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (err)
return err;
}
- if (can_lookup(nd->inode))
- continue;
- err = -ENOTDIR;
- break;
- /* here ends the main loop */
-
-last_component:
- nd->last = this;
- nd->last_type = type;
- return 0;
+ if (!can_lookup(nd->inode)) {
+ err = -ENOTDIR;
+ break;
+ }
}
terminate_walk(nd);
return err;
@@ -1932,8 +1924,7 @@ static inline int lookup_last(struct nameidata *nd, struct path *path)
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
nd->flags &= ~LOOKUP_PARENT;
- return walk_component(nd, path, &nd->last, nd->last_type,
- nd->flags & LOOKUP_FOLLOW);
+ return walk_component(nd, path, nd->flags & LOOKUP_FOLLOW);
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
@@ -2732,7 +2723,7 @@ static int do_last(struct nameidata *nd, struct path *path,
if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
symlink_ok = true;
/* we _can_ be in RCU mode here */
- error = lookup_fast(nd, &nd->last, path, &inode);
+ error = lookup_fast(nd, path, &inode);
if (likely(!error))
goto finish_lookup;
@@ -2778,7 +2769,7 @@ retry_lookup:
goto out;
if ((*opened & FILE_CREATED) ||
- !S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ !S_ISREG(file_inode(file)->i_mode))
will_truncate = false;
audit_inode(name, file->f_path.dentry, 0);
@@ -2941,8 +2932,8 @@ static struct file *path_openat(int dfd, struct filename *pathname,
int error;
file = get_empty_filp();
- if (!file)
- return ERR_PTR(-ENFILE);
+ if (IS_ERR(file))
+ return file;
file->f_flags = op->open_flag;
diff --git a/fs/namespace.c b/fs/namespace.c
index 55605c552787..50ca17d3cb45 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -384,7 +384,7 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);
*/
int __mnt_want_write_file(struct file *file)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
return __mnt_want_write(file->f_path.mnt);
@@ -1237,6 +1237,14 @@ static int do_umount(struct mount *mnt, int flags)
return retval;
}
+/*
+ * Is the caller allowed to modify his namespace?
+ */
+static inline bool may_mount(void)
+{
+ return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
+}
+
/*
* Now umount can handle mount points as well as block devices.
* This is important for filesystems which use unnamed block devices.
@@ -1255,6 +1263,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
return -EINVAL;
+ if (!may_mount())
+ return -EPERM;
+
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
@@ -1268,10 +1279,6 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
if (!check_mnt(mnt))
goto dput_and_out;
- retval = -EPERM;
- if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
- goto dput_and_out;
-
retval = do_umount(mnt, flags);
dput_and_out:
/* we mustn't call path_put() as that would clear mnt_expiry_mark */
@@ -1293,24 +1300,6 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
#endif
-static int mount_is_safe(struct path *path)
-{
- if (ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN))
- return 0;
- return -EPERM;
-#ifdef notyet
- if (S_ISLNK(path->dentry->d_inode->i_mode))
- return -EPERM;
- if (path->dentry->d_inode->i_mode & S_ISVTX) {
- if (current_uid() != path->dentry->d_inode->i_uid)
- return -EPERM;
- }
- if (inode_permission(path->dentry->d_inode, MAY_WRITE))
- return -EPERM;
- return 0;
-#endif
-}
-
static bool mnt_ns_loop(struct path *path)
{
/* Could bind mounting the mount namespace inode cause a
@@ -1633,9 +1622,6 @@ static int do_change_type(struct path *path, int flag)
int type;
int err = 0;
- if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
- return -EPERM;
-
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
@@ -1669,9 +1655,7 @@ static int do_loopback(struct path *path, const char *old_name,
LIST_HEAD(umount_list);
struct path old_path;
struct mount *mnt = NULL, *old;
- int err = mount_is_safe(path);
- if (err)
- return err;
+ int err;
if (!old_name || !*old_name)
return -EINVAL;
err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
@@ -1748,9 +1732,6 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
if (!check_mnt(mnt))
return -EINVAL;
@@ -1764,6 +1745,8 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
down_write(&sb->s_umount);
if (flags & MS_BIND)
err = change_mount_flags(path->mnt, flags);
+ else if (!capable(CAP_SYS_ADMIN))
+ err = -EPERM;
else
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
@@ -1796,9 +1779,7 @@ static int do_move_mount(struct path *path, const char *old_name)
struct path old_path, parent_path;
struct mount *p;
struct mount *old;
- int err = 0;
- if (!ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN))
- return -EPERM;
+ int err;
if (!old_name || !*old_name)
return -EINVAL;
err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
@@ -1933,18 +1914,13 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
int mnt_flags, const char *name, void *data)
{
struct file_system_type *type;
- struct user_namespace *user_ns;
+ struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
struct vfsmount *mnt;
int err;
if (!fstype)
return -EINVAL;
- /* we need capabilities... */
- user_ns = real_mount(path->mnt)->mnt_ns->user_ns;
- if (!ns_capable(user_ns, CAP_SYS_ADMIN))
- return -EPERM;
-
type = get_fs_type(fstype);
if (!type)
return -ENODEV;
@@ -2258,6 +2234,9 @@ long do_mount(const char *dev_name, const char *dir_name,
if (retval)
goto dput_out;
+ if (!may_mount())
+ return -EPERM;
+
/* Default to relatime unless overriden */
if (!(flags & MS_NOATIME))
mnt_flags |= MNT_RELATIME;
@@ -2567,7 +2546,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
struct mount *new_mnt, *root_mnt;
int error;
- if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ if (!may_mount())
return -EPERM;
error = user_path_dir(new_root, &new);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 4117e7b377bb..816326093656 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -593,14 +593,10 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
return 1; /* I'm not sure */
qname.name = __name;
- qname.hash = full_name_hash(qname.name, qname.len);
-
- if (dentry->d_op && dentry->d_op->d_hash)
- if (dentry->d_op->d_hash(dentry, dentry->d_inode, &qname) != 0)
- goto end_advance;
-
- newdent = d_lookup(dentry, &qname);
+ newdent = d_hash_and_lookup(dentry, &qname);
+ if (unlikely(IS_ERR(newdent)))
+ goto end_advance;
if (!newdent) {
newdent = d_alloc(dentry, &qname);
if (!newdent)
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 1acdad7fcec7..7dafd6899a62 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -331,12 +331,15 @@ static int ncp_show_options(struct seq_file *seq, struct dentry *root)
struct ncp_server *server = NCP_SBP(root->d_sb);
unsigned int tmp;
- if (server->m.uid != 0)
- seq_printf(seq, ",uid=%u", server->m.uid);
- if (server->m.gid != 0)
- seq_printf(seq, ",gid=%u", server->m.gid);
- if (server->m.mounted_uid != 0)
- seq_printf(seq, ",owner=%u", server->m.mounted_uid);
+ if (!uid_eq(server->m.uid, GLOBAL_ROOT_UID))
+ seq_printf(seq, ",uid=%u",
+ from_kuid_munged(&init_user_ns, server->m.uid));
+ if (!gid_eq(server->m.gid, GLOBAL_ROOT_GID))
+ seq_printf(seq, ",gid=%u",
+ from_kgid_munged(&init_user_ns, server->m.gid));
+ if (!uid_eq(server->m.mounted_uid, GLOBAL_ROOT_UID))
+ seq_printf(seq, ",owner=%u",
+ from_kuid_munged(&init_user_ns, server->m.mounted_uid));
tmp = server->m.file_mode & S_IALLUGO;
if (tmp != NCP_DEFAULT_FILE_MODE)
seq_printf(seq, ",mode=0%o", tmp);
@@ -381,13 +384,13 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
data->flags = 0;
data->int_flags = 0;
- data->mounted_uid = 0;
+ data->mounted_uid = GLOBAL_ROOT_UID;
data->wdog_pid = NULL;
data->ncp_fd = ~0;
data->time_out = NCP_DEFAULT_TIME_OUT;
data->retry_count = NCP_DEFAULT_RETRY_COUNT;
- data->uid = 0;
- data->gid = 0;
+ data->uid = GLOBAL_ROOT_UID;
+ data->gid = GLOBAL_ROOT_GID;
data->file_mode = NCP_DEFAULT_FILE_MODE;
data->dir_mode = NCP_DEFAULT_DIR_MODE;
data->info_fd = -1;
@@ -399,13 +402,19 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
goto err;
switch (optval) {
case 'u':
- data->uid = optint;
+ data->uid = make_kuid(current_user_ns(), optint);
+ if (!uid_valid(data->uid))
+ goto err;
break;
case 'g':
- data->gid = optint;
+ data->gid = make_kgid(current_user_ns(), optint);
+ if (!gid_valid(data->gid))
+ goto err;
break;
case 'o':
- data->mounted_uid = optint;
+ data->mounted_uid = make_kuid(current_user_ns(), optint);
+ if (!uid_valid(data->mounted_uid))
+ goto err;
break;
case 'm':
data->file_mode = optint;
@@ -480,13 +489,13 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
data.flags = md->flags;
data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE;
- data.mounted_uid = md->mounted_uid;
+ data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid);
data.wdog_pid = find_get_pid(md->wdog_pid);
data.ncp_fd = md->ncp_fd;
data.time_out = md->time_out;
data.retry_count = md->retry_count;
- data.uid = md->uid;
- data.gid = md->gid;
+ data.uid = make_kuid(current_user_ns(), md->uid);
+ data.gid = make_kgid(current_user_ns(), md->gid);
data.file_mode = md->file_mode;
data.dir_mode = md->dir_mode;
data.info_fd = -1;
@@ -499,13 +508,13 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data;
data.flags = md->flags;
- data.mounted_uid = md->mounted_uid;
+ data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid);
data.wdog_pid = find_get_pid(md->wdog_pid);
data.ncp_fd = md->ncp_fd;
data.time_out = md->time_out;
data.retry_count = md->retry_count;
- data.uid = md->uid;
- data.gid = md->gid;
+ data.uid = make_kuid(current_user_ns(), md->uid);
+ data.gid = make_kgid(current_user_ns(), md->gid);
data.file_mode = md->file_mode;
data.dir_mode = md->dir_mode;
data.info_fd = -1;
@@ -520,12 +529,16 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
goto out;
break;
}
+ error = -EINVAL;
+ if (!uid_valid(data.mounted_uid) || !uid_valid(data.uid) ||
+ !gid_valid(data.gid))
+ goto out;
error = -EBADF;
ncp_filp = fget(data.ncp_fd);
if (!ncp_filp)
goto out;
error = -ENOTSOCK;
- sock_inode = ncp_filp->f_path.dentry->d_inode;
+ sock_inode = file_inode(ncp_filp);
if (!S_ISSOCK(sock_inode->i_mode))
goto out_fput;
sock = SOCKET_I(sock_inode);
@@ -564,7 +577,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
if (!server->info_filp)
goto out_bdi;
error = -ENOTSOCK;
- sock_inode = server->info_filp->f_path.dentry->d_inode;
+ sock_inode = file_inode(server->info_filp);
if (!S_ISSOCK(sock_inode->i_mode))
goto out_fput2;
info_sock = SOCKET_I(sock_inode);
@@ -886,12 +899,10 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
goto out;
result = -EPERM;
- if (((attr->ia_valid & ATTR_UID) &&
- (attr->ia_uid != server->m.uid)))
+ if ((attr->ia_valid & ATTR_UID) && !uid_eq(attr->ia_uid, server->m.uid))
goto out;
- if (((attr->ia_valid & ATTR_GID) &&
- (attr->ia_gid != server->m.gid)))
+ if ((attr->ia_valid & ATTR_GID) && !gid_eq(attr->ia_gid, server->m.gid))
goto out;
if (((attr->ia_valid & ATTR_MODE) &&
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 6958adfaff08..60426ccb3b65 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -45,7 +45,7 @@ ncp_get_fs_info(struct ncp_server * server, struct inode *inode,
return -EINVAL;
}
/* TODO: info.addr = server->m.serv_addr; */
- SET_UID(info.mounted_uid, server->m.mounted_uid);
+ SET_UID(info.mounted_uid, from_kuid_munged(current_user_ns(), server->m.mounted_uid));
info.connection = server->connection;
info.buffer_size = server->buffer_size;
info.volume_number = NCP_FINFO(inode)->volNumber;
@@ -69,7 +69,7 @@ ncp_get_fs_info_v2(struct ncp_server * server, struct inode *inode,
DPRINTK("info.version invalid: %d\n", info2.version);
return -EINVAL;
}
- info2.mounted_uid = server->m.mounted_uid;
+ info2.mounted_uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
info2.connection = server->connection;
info2.buffer_size = server->buffer_size;
info2.volume_number = NCP_FINFO(inode)->volNumber;
@@ -135,7 +135,7 @@ ncp_get_compat_fs_info_v2(struct ncp_server * server, struct inode *inode,
DPRINTK("info.version invalid: %d\n", info2.version);
return -EINVAL;
}
- info2.mounted_uid = server->m.mounted_uid;
+ info2.mounted_uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
info2.connection = server->connection;
info2.buffer_size = server->buffer_size;
info2.volume_number = NCP_FINFO(inode)->volNumber;
@@ -348,22 +348,25 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
{
u16 uid;
- SET_UID(uid, server->m.mounted_uid);
+ SET_UID(uid, from_kuid_munged(current_user_ns(), server->m.mounted_uid));
if (put_user(uid, (u16 __user *)argp))
return -EFAULT;
return 0;
}
case NCP_IOC_GETMOUNTUID32:
- if (put_user(server->m.mounted_uid,
- (u32 __user *)argp))
+ {
+ uid_t uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
+ if (put_user(uid, (u32 __user *)argp))
return -EFAULT;
return 0;
+ }
case NCP_IOC_GETMOUNTUID64:
- if (put_user(server->m.mounted_uid,
- (u64 __user *)argp))
+ {
+ uid_t uid = from_kuid_munged(current_user_ns(), server->m.mounted_uid);
+ if (put_user(uid, (u64 __user *)argp))
return -EFAULT;
return 0;
-
+ }
case NCP_IOC_GETROOT:
{
struct ncp_setroot_ioctl sr;
@@ -808,9 +811,9 @@ outrel:
long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ncp_server *server = NCP_SERVER(inode);
- uid_t uid = current_uid();
+ kuid_t uid = current_uid();
int need_drop_write = 0;
long ret;
@@ -819,12 +822,12 @@ long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case NCP_IOC_CONN_LOGGED_IN:
case NCP_IOC_SETROOT:
if (!capable(CAP_SYS_ADMIN)) {
- ret = -EACCES;
+ ret = -EPERM;
goto out;
}
break;
}
- if (server->m.mounted_uid != uid) {
+ if (!uid_eq(server->m.mounted_uid, uid)) {
switch (cmd) {
/*
* Only mount owner can issue these ioctls. Information
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 63d14a99483d..ee24df5af1f9 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -105,7 +105,7 @@ static const struct vm_operations_struct ncp_file_mmap =
/* This is used for a general mmap of a ncp file */
int ncp_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
DPRINTK("ncp_mmap: called\n");
diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
index 54cc0cdb3dcb..c51b2c543539 100644
--- a/fs/ncpfs/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -23,15 +23,15 @@ struct ncp_mount_data_kernel {
unsigned long flags; /* NCP_MOUNT_* flags */
unsigned int int_flags; /* internal flags */
#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001
- uid_t mounted_uid; /* Who may umount() this filesystem? */
+ kuid_t mounted_uid; /* Who may umount() this filesystem? */
struct pid *wdog_pid; /* Who cares for our watchdog packets? */
unsigned int ncp_fd; /* The socket to the ncp port */
unsigned int time_out; /* How long should I wait after
sending a NCP request? */
unsigned int retry_count; /* And how often should I retry? */
unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
umode_t file_mode;
umode_t dir_mode;
int info_fd;
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
index 862a2f16db64..5f7b053720ee 100644
--- a/fs/nfs/cache_lib.c
+++ b/fs/nfs/cache_lib.c
@@ -128,10 +128,13 @@ int nfs_cache_register_net(struct net *net, struct cache_detail *cd)
struct super_block *pipefs_sb;
int ret = 0;
+ sunrpc_init_cache_detail(cd);
pipefs_sb = rpc_get_sb_net(net);
if (pipefs_sb) {
ret = nfs_cache_register_sb(pipefs_sb, cd);
rpc_put_sb_net(net);
+ if (ret)
+ sunrpc_destroy_cache_detail(cd);
}
return ret;
}
@@ -151,14 +154,5 @@ void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd)
nfs_cache_unregister_sb(pipefs_sb, cd);
rpc_put_sb_net(net);
}
-}
-
-void nfs_cache_init(struct cache_detail *cd)
-{
- sunrpc_init_cache_detail(cd);
-}
-
-void nfs_cache_destroy(struct cache_detail *cd)
-{
sunrpc_destroy_cache_detail(cd);
}
diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h
index 317db95e37f8..4116d2c3f52f 100644
--- a/fs/nfs/cache_lib.h
+++ b/fs/nfs/cache_lib.h
@@ -23,8 +23,6 @@ extern struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void);
extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq);
extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
-extern void nfs_cache_init(struct cache_detail *cd);
-extern void nfs_cache_destroy(struct cache_detail *cd);
extern int nfs_cache_register_net(struct net *net, struct cache_detail *cd);
extern void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd);
extern int nfs_cache_register_sb(struct super_block *sb,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 9f3c66438d0e..84d8eae203a7 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -197,7 +197,6 @@ error_0:
EXPORT_SYMBOL_GPL(nfs_alloc_client);
#if IS_ENABLED(CONFIG_NFS_V4)
-/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
void nfs_cleanup_cb_ident_idr(struct net *net)
{
struct nfs_net *nn = net_generic(net, nfs_net_id);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 1b2d7eb93796..f23f455be42b 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -281,7 +281,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
for (i = 0; i < array->size; i++) {
if (array->array[i].cookie == *desc->dir_cookie) {
- struct nfs_inode *nfsi = NFS_I(desc->file->f_path.dentry->d_inode);
+ struct nfs_inode *nfsi = NFS_I(file_inode(desc->file));
struct nfs_open_dir_context *ctx = desc->file->private_data;
new_pos = desc->current_index + i;
@@ -629,7 +629,7 @@ out:
static
int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
{
- struct inode *inode = desc->file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(desc->file);
int ret;
ret = nfs_readdir_xdr_to_array(desc, page, inode);
@@ -660,7 +660,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
static
struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
{
- return read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping,
+ return read_cache_page(file_inode(desc->file)->i_mapping,
desc->page_index, (filler_t *)nfs_readdir_filler, desc);
}
@@ -764,7 +764,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
{
struct page *page = NULL;
int status;
- struct inode *inode = desc->file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(desc->file);
struct nfs_open_dir_context *ctx = desc->file->private_data;
dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
@@ -1136,6 +1136,45 @@ out_error:
}
/*
+ * A weaker form of d_revalidate for revalidating just the dentry->d_inode
+ * when we don't really care about the dentry name. This is called when a
+ * pathwalk ends on a dentry that was not found via a normal lookup in the
+ * parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals).
+ *
+ * In this situation, we just want to verify that the inode itself is OK
+ * since the dentry might have changed on the server.
+ */
+static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ int error;
+ struct inode *inode = dentry->d_inode;
+
+ /*
+ * I believe we can only get a negative dentry here in the case of a
+ * procfs-style symlink. Just assume it's correct for now, but we may
+ * eventually need to do something more here.
+ */
+ if (!inode) {
+ dfprintk(LOOKUPCACHE, "%s: %s/%s has negative inode\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+ return 1;
+ }
+
+ if (is_bad_inode(inode)) {
+ dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+ return 0;
+ }
+
+ error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
+ __func__, inode->i_ino, error ? "invalid" : "valid");
+ return !error;
+}
+
+/*
* This is called from dput() when d_count is going to 0.
*/
static int nfs_dentry_delete(const struct dentry *dentry)
@@ -1202,6 +1241,7 @@ static void nfs_d_release(struct dentry *dentry)
const struct dentry_operations nfs_dentry_operations = {
.d_revalidate = nfs_lookup_revalidate,
+ .d_weak_revalidate = nfs_weak_revalidate,
.d_delete = nfs_dentry_delete,
.d_iput = nfs_dentry_iput,
.d_automount = nfs_d_automount,
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index ca4b11ec87a2..945527092295 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/dns_resolver.h>
#include "dns_resolve.h"
@@ -42,6 +43,7 @@ EXPORT_SYMBOL_GPL(nfs_dns_resolve_name);
#include <linux/seq_file.h>
#include <linux/inet.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
@@ -142,7 +144,7 @@ static int nfs_dns_upcall(struct cache_detail *cd,
ret = nfs_cache_upcall(cd, key->hostname);
if (ret)
- ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request);
+ ret = sunrpc_cache_pipe_upcall(cd, ch);
return ret;
}
@@ -351,60 +353,47 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name,
}
EXPORT_SYMBOL_GPL(nfs_dns_resolve_name);
+static struct cache_detail nfs_dns_resolve_template = {
+ .owner = THIS_MODULE,
+ .hash_size = NFS_DNS_HASHTBL_SIZE,
+ .name = "dns_resolve",
+ .cache_put = nfs_dns_ent_put,
+ .cache_upcall = nfs_dns_upcall,
+ .cache_request = nfs_dns_request,
+ .cache_parse = nfs_dns_parse,
+ .cache_show = nfs_dns_show,
+ .match = nfs_dns_match,
+ .init = nfs_dns_ent_init,
+ .update = nfs_dns_ent_update,
+ .alloc = nfs_dns_ent_alloc,
+};
+
+
int nfs_dns_resolver_cache_init(struct net *net)
{
- int err = -ENOMEM;
+ int err;
struct nfs_net *nn = net_generic(net, nfs_net_id);
- struct cache_detail *cd;
- struct cache_head **tbl;
- cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL);
- if (cd == NULL)
- goto err_cd;
-
- tbl = kzalloc(NFS_DNS_HASHTBL_SIZE * sizeof(struct cache_head *),
- GFP_KERNEL);
- if (tbl == NULL)
- goto err_tbl;
-
- cd->owner = THIS_MODULE,
- cd->hash_size = NFS_DNS_HASHTBL_SIZE,
- cd->hash_table = tbl,
- cd->name = "dns_resolve",
- cd->cache_put = nfs_dns_ent_put,
- cd->cache_upcall = nfs_dns_upcall,
- cd->cache_parse = nfs_dns_parse,
- cd->cache_show = nfs_dns_show,
- cd->match = nfs_dns_match,
- cd->init = nfs_dns_ent_init,
- cd->update = nfs_dns_ent_update,
- cd->alloc = nfs_dns_ent_alloc,
-
- nfs_cache_init(cd);
- err = nfs_cache_register_net(net, cd);
+ nn->nfs_dns_resolve = cache_create_net(&nfs_dns_resolve_template, net);
+ if (IS_ERR(nn->nfs_dns_resolve))
+ return PTR_ERR(nn->nfs_dns_resolve);
+
+ err = nfs_cache_register_net(net, nn->nfs_dns_resolve);
if (err)
goto err_reg;
- nn->nfs_dns_resolve = cd;
return 0;
err_reg:
- nfs_cache_destroy(cd);
- kfree(cd->hash_table);
-err_tbl:
- kfree(cd);
-err_cd:
+ cache_destroy_net(nn->nfs_dns_resolve, net);
return err;
}
void nfs_dns_resolver_cache_destroy(struct net *net)
{
struct nfs_net *nn = net_generic(net, nfs_net_id);
- struct cache_detail *cd = nn->nfs_dns_resolve;
- nfs_cache_unregister_net(net, cd);
- nfs_cache_destroy(cd);
- kfree(cd->hash_table);
- kfree(cd);
+ nfs_cache_unregister_net(net, nn->nfs_dns_resolve);
+ cache_destroy_net(nn->nfs_dns_resolve, net);
}
static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 3c2b893665ba..29f4a48a0ee6 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -292,7 +292,7 @@ static int
nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
int ret;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
do {
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index bc3968fa81e5..dc0f98dfa717 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -97,7 +97,7 @@ static void nfs_fattr_free_group_name(struct nfs_fattr *fattr)
static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr *fattr)
{
struct nfs4_string *owner = fattr->owner_name;
- __u32 uid;
+ kuid_t uid;
if (!(fattr->valid & NFS_ATTR_FATTR_OWNER_NAME))
return false;
@@ -111,7 +111,7 @@ static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr
static bool nfs_fattr_map_group_name(struct nfs_server *server, struct nfs_fattr *fattr)
{
struct nfs4_string *group = fattr->group_name;
- __u32 gid;
+ kgid_t gid;
if (!(fattr->valid & NFS_ATTR_FATTR_GROUP_NAME))
return false;
@@ -193,7 +193,8 @@ static int nfs_idmap_init_keyring(void)
if (!cred)
return -ENOMEM;
- keyring = keyring_alloc(".id_resolver", 0, 0, cred,
+ keyring = keyring_alloc(".id_resolver",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
@@ -764,7 +765,7 @@ out:
static ssize_t
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
- struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
+ struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct idmap *idmap = (struct idmap *)rpci->private;
struct key_construction *cons;
struct idmap_msg im;
@@ -836,43 +837,61 @@ idmap_release_pipe(struct inode *inode)
nfs_idmap_abort_pipe_upcall(idmap, -EPIPE);
}
-int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
+int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
+ __u32 id = -1;
+ int ret = 0;
- if (nfs_map_string_to_numeric(name, namelen, uid))
- return 0;
- return nfs_idmap_lookup_id(name, namelen, "uid", uid, idmap);
+ if (!nfs_map_string_to_numeric(name, namelen, &id))
+ ret = nfs_idmap_lookup_id(name, namelen, "uid", &id, idmap);
+ if (ret == 0) {
+ *uid = make_kuid(&init_user_ns, id);
+ if (!uid_valid(*uid))
+ ret = -ERANGE;
+ }
+ return ret;
}
-int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *gid)
+int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, kgid_t *gid)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
+ __u32 id = -1;
+ int ret = 0;
- if (nfs_map_string_to_numeric(name, namelen, gid))
- return 0;
- return nfs_idmap_lookup_id(name, namelen, "gid", gid, idmap);
+ if (!nfs_map_string_to_numeric(name, namelen, &id))
+ ret = nfs_idmap_lookup_id(name, namelen, "gid", &id, idmap);
+ if (ret == 0) {
+ *gid = make_kgid(&init_user_ns, id);
+ if (!gid_valid(*gid))
+ ret = -ERANGE;
+ }
+ return ret;
}
-int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen)
+int nfs_map_uid_to_name(const struct nfs_server *server, kuid_t uid, char *buf, size_t buflen)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
int ret = -EINVAL;
+ __u32 id;
+ id = from_kuid(&init_user_ns, uid);
if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
- ret = nfs_idmap_lookup_name(uid, "user", buf, buflen, idmap);
+ ret = nfs_idmap_lookup_name(id, "user", buf, buflen, idmap);
if (ret < 0)
- ret = nfs_map_numeric_to_string(uid, buf, buflen);
+ ret = nfs_map_numeric_to_string(id, buf, buflen);
return ret;
}
-int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, size_t buflen)
+int nfs_map_gid_to_group(const struct nfs_server *server, kgid_t gid, char *buf, size_t buflen)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
int ret = -EINVAL;
+ __u32 id;
+ id = from_kgid(&init_user_ns, gid);
if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
- ret = nfs_idmap_lookup_name(gid, "group", buf, buflen, idmap);
+ ret = nfs_idmap_lookup_name(id, "group", buf, buflen, idmap);
if (ret < 0)
- ret = nfs_map_numeric_to_string(gid, buf, buflen);
+ ret = nfs_map_numeric_to_string(id, buf, buflen);
return ret;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6acc73c80d7f..1f941674b089 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -237,6 +237,8 @@ nfs_find_actor(struct inode *inode, void *opaque)
if (NFS_FILEID(inode) != fattr->fileid)
return 0;
+ if ((S_IFMT & inode->i_mode) != (S_IFMT & fattr->mode))
+ return 0;
if (nfs_compare_fh(NFS_FH(inode), fh))
return 0;
if (is_bad_inode(inode) || NFS_STALE(inode))
@@ -332,8 +334,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_version = 0;
inode->i_size = 0;
clear_nlink(inode);
- inode->i_uid = -2;
- inode->i_gid = -2;
+ inode->i_uid = make_kuid(&init_user_ns, -2);
+ inode->i_gid = make_kgid(&init_user_ns, -2);
inode->i_blocks = 0;
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
nfsi->write_io = 0;
@@ -711,7 +713,7 @@ EXPORT_SYMBOL_GPL(put_nfs_open_context);
*/
void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct nfs_inode *nfsi = NFS_I(inode);
filp->private_data = get_nfs_open_context(ctx);
@@ -744,7 +746,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
static void nfs_file_clear_open_context(struct file *filp)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct nfs_open_context *ctx = nfs_file_open_context(filp);
if (ctx) {
@@ -1006,9 +1008,9 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
/* Have any file permissions changed? */
if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
- if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && inode->i_uid != fattr->uid)
+ if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
- if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && inode->i_gid != fattr->gid)
+ if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, fattr->gid))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
/* Has the link count changed? */
@@ -1437,7 +1439,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
| NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
- if (inode->i_uid != fattr->uid) {
+ if (!uid_eq(inode->i_uid, fattr->uid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_uid = fattr->uid;
}
@@ -1448,7 +1450,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
| NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
- if (inode->i_gid != fattr->gid) {
+ if (!gid_eq(inode->i_gid, fattr->gid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_gid = fattr->gid;
}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 06b9df49f7f7..62db136339ea 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -290,8 +290,13 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
fattr->mode = be32_to_cpup(p++);
fattr->nlink = be32_to_cpup(p++);
- fattr->uid = be32_to_cpup(p++);
- fattr->gid = be32_to_cpup(p++);
+ fattr->uid = make_kuid(&init_user_ns, be32_to_cpup(p++));
+ if (!uid_valid(fattr->uid))
+ goto out_uid;
+ fattr->gid = make_kgid(&init_user_ns, be32_to_cpup(p++));
+ if (!gid_valid(fattr->gid))
+ goto out_gid;
+
fattr->size = be32_to_cpup(p++);
fattr->du.nfs2.blocksize = be32_to_cpup(p++);
@@ -313,6 +318,12 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
return 0;
+out_uid:
+ dprintk("NFS: returned invalid uid\n");
+ return -EINVAL;
+out_gid:
+ dprintk("NFS: returned invalid gid\n");
+ return -EINVAL;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
@@ -351,11 +362,11 @@ static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr)
else
*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
if (attr->ia_valid & ATTR_UID)
- *p++ = cpu_to_be32(attr->ia_uid);
+ *p++ = cpu_to_be32(from_kuid(&init_user_ns, attr->ia_uid));
else
*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
if (attr->ia_valid & ATTR_GID)
- *p++ = cpu_to_be32(attr->ia_gid);
+ *p++ = cpu_to_be32(from_kgid(&init_user_ns, attr->ia_gid));
else
*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
if (attr->ia_valid & ATTR_SIZE)
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 70efb63b1e42..43ea96ced28c 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -872,7 +872,7 @@ static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
static int
nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
}
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index bffc32406fbf..fa6d72131c19 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -592,13 +592,13 @@ static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
if (attr->ia_valid & ATTR_UID) {
*p++ = xdr_one;
- *p++ = cpu_to_be32(attr->ia_uid);
+ *p++ = cpu_to_be32(from_kuid(&init_user_ns, attr->ia_uid));
} else
*p++ = xdr_zero;
if (attr->ia_valid & ATTR_GID) {
*p++ = xdr_one;
- *p++ = cpu_to_be32(attr->ia_gid);
+ *p++ = cpu_to_be32(from_kgid(&init_user_ns, attr->ia_gid));
} else
*p++ = xdr_zero;
@@ -657,8 +657,12 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
fattr->nlink = be32_to_cpup(p++);
- fattr->uid = be32_to_cpup(p++);
- fattr->gid = be32_to_cpup(p++);
+ fattr->uid = make_kuid(&init_user_ns, be32_to_cpup(p++));
+ if (!uid_valid(fattr->uid))
+ goto out_uid;
+ fattr->gid = make_kgid(&init_user_ns, be32_to_cpup(p++));
+ if (!gid_valid(fattr->gid))
+ goto out_gid;
p = xdr_decode_size3(p, &fattr->size);
p = xdr_decode_size3(p, &fattr->du.nfs3.used);
@@ -675,6 +679,12 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
fattr->valid |= NFS_ATTR_FATTR_V3;
return 0;
+out_uid:
+ dprintk("NFS: returned invalid uid\n");
+ return -EINVAL;
+out_gid:
+ dprintk("NFS: returned invalid gid\n");
+ return -EINVAL;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 2e9779b58b7a..ac4fc9a8fdbc 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -6,6 +6,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include <linux/nfs_mount.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/bc_xprt.h>
@@ -29,15 +30,14 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
if (clp->rpc_ops->version != 4 || minorversion != 0)
return ret;
-retry:
- if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL))
- return -ENOMEM;
+ idr_preload(GFP_KERNEL);
spin_lock(&nn->nfs_client_lock);
- ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident);
+ ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ clp->cl_cb_ident = ret;
spin_unlock(&nn->nfs_client_lock);
- if (ret == -EAGAIN)
- goto retry;
- return ret;
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
}
#ifdef CONFIG_NFS_V4_1
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 08ddcccb8887..13e6bb3e3fe5 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -94,7 +94,7 @@ static int
nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
int ret;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
do {
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 194c48410336..49eeb044c109 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -99,7 +99,8 @@ static void filelayout_reset_write(struct nfs_write_data *data)
task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
}
@@ -119,7 +120,8 @@ static void filelayout_reset_read(struct nfs_read_data *data)
task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
}
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 8c07241fe52b..b8da95548d3d 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -36,7 +36,7 @@
* Default data server connection timeout and retrans vaules.
* Set by module paramters dataserver_timeo and dataserver_retrans.
*/
-#define NFS4_DEF_DS_TIMEO 60
+#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
#define NFS4_DEF_DS_RETRANS 5
/*
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index b720064bcd7f..1fe284f01f8b 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -31,6 +31,7 @@
#include <linux/nfs_fs.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
+#include <linux/sunrpc/addr.h>
#include "internal.h"
#include "nfs4session.h"
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 1e09eb78543b..0dd766079e1c 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/vfs.h>
#include <linux/inet.h>
#include "internal.h"
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index eae83bf96c6d..b2671cb0f901 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -93,6 +93,8 @@ static int nfs4_map_errors(int err)
return err;
switch (err) {
case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
return -EPERM;
@@ -1158,6 +1160,7 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
data->o_arg.fmode);
iput(inode);
out:
+ nfs_release_seqid(data->o_arg.seqid);
return state;
err_put_inode:
iput(inode);
@@ -6045,6 +6048,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layout_hdr *lo;
struct nfs4_state *state = NULL;
+ unsigned long timeo, giveup;
dprintk("--> %s\n", __func__);
@@ -6056,7 +6060,10 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
goto out;
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
- task->tk_status = -NFS4ERR_DELAY;
+ timeo = rpc_get_timeout(task->tk_client);
+ giveup = lgp->args.timestamp + timeo;
+ if (time_after(giveup, jiffies))
+ task->tk_status = -NFS4ERR_DELAY;
break;
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
@@ -6129,11 +6136,13 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
static void nfs4_layoutget_release(void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
- struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct inode *inode = lgp->args.inode;
+ struct nfs_server *server = NFS_SERVER(inode);
size_t max_pages = max_response_pages(server);
dprintk("--> %s\n", __func__);
nfs4_free_pages(lgp->args.layout.pages, max_pages);
+ pnfs_put_layout_hdr(NFS_I(inode)->layout);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
@@ -6148,7 +6157,8 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
struct pnfs_layout_segment *
nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
{
- struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct inode *inode = lgp->args.inode;
+ struct nfs_server *server = NFS_SERVER(inode);
size_t max_pages = max_response_pages(server);
struct rpc_task *task;
struct rpc_message msg = {
@@ -6174,10 +6184,15 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
return ERR_PTR(-ENOMEM);
}
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+ lgp->args.timestamp = jiffies;
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
+
+ /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
+ pnfs_get_layout_hdr(NFS_I(inode)->layout);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return ERR_CAST(task);
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 84d2e9e2f313..569b166cc050 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -28,7 +28,7 @@ static struct file_system_type nfs4_remote_fs_type = {
.name = "nfs4",
.mount = nfs4_remote_mount,
.kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
};
static struct file_system_type nfs4_remote_referral_fs_type = {
@@ -36,7 +36,7 @@ static struct file_system_type nfs4_remote_referral_fs_type = {
.name = "nfs4",
.mount = nfs4_remote_referral_mount,
.kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
};
struct file_system_type nfs4_referral_fs_type = {
@@ -44,7 +44,7 @@ struct file_system_type nfs4_referral_fs_type = {
.name = "nfs4",
.mount = nfs4_referral_mount,
.kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
};
static const struct super_operations nfs4_sops = {
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 26b143920433..e3edda554ac7 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1002,7 +1002,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ);
if (owner_namelen < 0) {
dprintk("nfs: couldn't resolve uid %d to string\n",
- iap->ia_uid);
+ from_kuid(&init_user_ns, iap->ia_uid));
/* XXX */
strcpy(owner_name, "nobody");
owner_namelen = sizeof("nobody") - 1;
@@ -1014,7 +1014,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
owner_grouplen = nfs_map_gid_to_group(server, iap->ia_gid, owner_group, IDMAP_NAMESZ);
if (owner_grouplen < 0) {
dprintk("nfs: couldn't resolve gid %d to string\n",
- iap->ia_gid);
+ from_kgid(&init_user_ns, iap->ia_gid));
strcpy(owner_group, "nobody");
owner_grouplen = sizeof("nobody") - 1;
/* goto out; */
@@ -3778,14 +3778,14 @@ out_overflow:
}
static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
- const struct nfs_server *server, uint32_t *uid,
+ const struct nfs_server *server, kuid_t *uid,
struct nfs4_string *owner_name)
{
uint32_t len;
__be32 *p;
int ret = 0;
- *uid = -2;
+ *uid = make_kuid(&init_user_ns, -2);
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) {
@@ -3813,7 +3813,7 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
__func__, len);
bitmap[1] &= ~FATTR4_WORD1_OWNER;
}
- dprintk("%s: uid=%d\n", __func__, (int)*uid);
+ dprintk("%s: uid=%d\n", __func__, (int)from_kuid(&init_user_ns, *uid));
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -3821,14 +3821,14 @@ out_overflow:
}
static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
- const struct nfs_server *server, uint32_t *gid,
+ const struct nfs_server *server, kgid_t *gid,
struct nfs4_string *group_name)
{
uint32_t len;
__be32 *p;
int ret = 0;
- *gid = -2;
+ *gid = make_kgid(&init_user_ns, -2);
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) {
@@ -3856,7 +3856,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
__func__, len);
bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP;
}
- dprintk("%s: gid=%d\n", __func__, (int)*gid);
+ dprintk("%s: gid=%d\n", __func__, (int)from_kgid(&init_user_ns, *gid));
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 6be70f622b62..48ac5aad6258 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1181,7 +1181,7 @@ pnfs_update_layout(struct inode *ino,
struct nfs_client *clp = server->nfs_client;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg = NULL;
- bool first = false;
+ bool first;
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
goto out;
@@ -1215,10 +1215,9 @@ pnfs_update_layout(struct inode *ino,
goto out_unlock;
atomic_inc(&lo->plh_outstanding);
- if (list_empty(&lo->plh_segs))
- first = true;
-
+ first = list_empty(&lo->plh_layouts) ? true : false;
spin_unlock(&ino->i_lock);
+
if (first) {
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL(FILE) coming in.
@@ -1422,13 +1421,15 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
int pnfs_write_done_resend_to_mds(struct inode *inode,
struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops)
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
+ pgio.pg_dreq = dreq;
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1463,7 +1464,8 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
/*
@@ -1578,13 +1580,15 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
int pnfs_read_done_resend_to_mds(struct inode *inode,
struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops)
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
nfs_pageio_init_read(&pgio, inode, compl_ops);
+ pgio.pg_dreq = dreq;
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1615,7 +1619,8 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 97cb358bb882..94ba80417748 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -230,9 +230,11 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops);
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq);
int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops);
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq);
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
/* nfs4_deviceid_flags */
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index d35b62e83ea6..6da209bd9408 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -77,9 +77,8 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
long hash)
{
struct nfs4_deviceid_node *d;
- struct hlist_node *n;
- hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+ hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
if (d->ld == ld && d->nfs_client == clp &&
!memcmp(&d->deviceid, id, sizeof(*id))) {
if (atomic_read(&d->ref))
@@ -248,12 +247,11 @@ static void
_deviceid_purge_client(const struct nfs_client *clp, long hash)
{
struct nfs4_deviceid_node *d;
- struct hlist_node *n;
HLIST_HEAD(tmp);
spin_lock(&nfs4_deviceid_lock);
rcu_read_lock();
- hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+ hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
if (d->nfs_client == clp && atomic_read(&d->ref)) {
hlist_del_init_rcu(&d->node);
hlist_add_head(&d->tmpnode, &tmp);
@@ -291,12 +289,11 @@ void
nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
{
struct nfs4_deviceid_node *d;
- struct hlist_node *n;
int i;
rcu_read_lock();
for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
- hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
+ hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
if (d->nfs_client == clp)
set_bit(NFS_DEVICEID_INVALID, &d->flags);
}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index f084dac948e1..fc8de9016acf 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -662,7 +662,7 @@ nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
static int
nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index befbae0cce41..17b32b722457 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -31,6 +31,7 @@
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/metrics.h>
#include <linux/sunrpc/xprtsock.h>
@@ -291,7 +292,7 @@ struct file_system_type nfs_fs_type = {
.name = "nfs",
.mount = nfs_fs_mount,
.kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
};
EXPORT_SYMBOL_GPL(nfs_fs_type);
@@ -300,7 +301,7 @@ struct file_system_type nfs_xdev_fs_type = {
.name = "nfs",
.mount = nfs_xdev_mount,
.kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
};
const struct super_operations nfs_sops = {
@@ -330,7 +331,7 @@ struct file_system_type nfs4_fs_type = {
.name = "nfs4",
.mount = nfs_fs_mount,
.kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
};
EXPORT_SYMBOL_GPL(nfs4_fs_type);
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index d26a32f5b53b..1f1f38f0c5d5 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -335,20 +335,14 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
struct inode *old_dir = data->old_dir;
struct inode *new_dir = data->new_dir;
struct dentry *old_dentry = data->old_dentry;
- struct dentry *new_dentry = data->new_dentry;
if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
rpc_restart_call_prepare(task);
return;
}
- if (task->tk_status != 0) {
+ if (task->tk_status != 0)
nfs_cancel_async_unlink(old_dentry);
- return;
- }
-
- d_drop(old_dentry);
- d_drop(new_dentry);
}
/**
@@ -549,6 +543,18 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
error = rpc_wait_for_completion_task(task);
if (error == 0)
error = task->tk_status;
+ switch (error) {
+ case 0:
+ /* The rename succeeded */
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ d_move(dentry, sdentry);
+ break;
+ case -ERESTARTSYS:
+ /* The result of the rename is unknown. Play it safe by
+ * forcing a new lookup */
+ d_drop(dentry);
+ d_drop(sdentry);
+ }
rpc_put_task(task);
out_dput:
dput(sdentry);
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index 6940439bd609..ed628f71274c 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -38,8 +38,8 @@ struct nfsacl_encode_desc {
unsigned int count;
struct posix_acl *acl;
int typeflag;
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
};
struct nfsacl_simple_acl {
@@ -60,14 +60,16 @@ xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem)
*p++ = htonl(entry->e_tag | nfsacl_desc->typeflag);
switch(entry->e_tag) {
case ACL_USER_OBJ:
- *p++ = htonl(nfsacl_desc->uid);
+ *p++ = htonl(from_kuid(&init_user_ns, nfsacl_desc->uid));
break;
case ACL_GROUP_OBJ:
- *p++ = htonl(nfsacl_desc->gid);
+ *p++ = htonl(from_kgid(&init_user_ns, nfsacl_desc->gid));
break;
case ACL_USER:
+ *p++ = htonl(from_kuid(&init_user_ns, entry->e_uid));
+ break;
case ACL_GROUP:
- *p++ = htonl(entry->e_id);
+ *p++ = htonl(from_kgid(&init_user_ns, entry->e_gid));
break;
default: /* Solaris depends on that! */
*p++ = 0;
@@ -148,6 +150,7 @@ xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem)
(struct nfsacl_decode_desc *) desc;
__be32 *p = elem;
struct posix_acl_entry *entry;
+ unsigned int id;
if (!nfsacl_desc->acl) {
if (desc->array_len > NFS_ACL_MAX_ENTRIES)
@@ -160,14 +163,22 @@ xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem)
entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++];
entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT;
- entry->e_id = ntohl(*p++);
+ id = ntohl(*p++);
entry->e_perm = ntohl(*p++);
switch(entry->e_tag) {
- case ACL_USER_OBJ:
case ACL_USER:
- case ACL_GROUP_OBJ:
+ entry->e_uid = make_kuid(&init_user_ns, id);
+ if (!uid_valid(entry->e_uid))
+ return -EINVAL;
+ break;
case ACL_GROUP:
+ entry->e_gid = make_kgid(&init_user_ns, id);
+ if (!gid_valid(entry->e_gid))
+ return -EINVAL;
+ break;
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
case ACL_OTHER:
if (entry->e_perm & ~S_IRWXO)
return -EINVAL;
@@ -190,9 +201,13 @@ cmp_acl_entry(const void *x, const void *y)
if (a->e_tag != b->e_tag)
return a->e_tag - b->e_tag;
- else if (a->e_id > b->e_id)
+ else if ((a->e_tag == ACL_USER) && uid_gt(a->e_uid, b->e_uid))
+ return 1;
+ else if ((a->e_tag == ACL_USER) && uid_lt(a->e_uid, b->e_uid))
+ return -1;
+ else if ((a->e_tag == ACL_GROUP) && gid_gt(a->e_gid, b->e_gid))
return 1;
- else if (a->e_id < b->e_id)
+ else if ((a->e_tag == ACL_GROUP) && gid_lt(a->e_gid, b->e_gid))
return -1;
else
return 0;
@@ -213,22 +228,18 @@ posix_acl_from_nfsacl(struct posix_acl *acl)
sort(acl->a_entries, acl->a_count, sizeof(struct posix_acl_entry),
cmp_acl_entry, NULL);
- /* Clear undefined identifier fields and find the ACL_GROUP_OBJ
- and ACL_MASK entries. */
+ /* Find the ACL_GROUP_OBJ and ACL_MASK entries. */
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch(pa->e_tag) {
case ACL_USER_OBJ:
- pa->e_id = ACL_UNDEFINED_ID;
break;
case ACL_GROUP_OBJ:
- pa->e_id = ACL_UNDEFINED_ID;
group_obj = pa;
break;
case ACL_MASK:
mask = pa;
/* fall through */
case ACL_OTHER:
- pa->e_id = ACL_UNDEFINED_ID;
break;
}
}
diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h
index 34e5c40af5ef..8b186a4955cc 100644
--- a/fs/nfsd/acl.h
+++ b/fs/nfsd/acl.h
@@ -44,8 +44,6 @@
struct nfs4_acl *nfs4_acl_new(int);
int nfs4_acl_get_whotype(char *, u32);
int nfs4_acl_write_who(int who, char *p);
-int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
- uid_t who, u32 mask);
#define NFS4_ACL_TYPE_DEFAULT 0x01
#define NFS4_ACL_DIR 0x02
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 34a10d78b839..06cddd572264 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -47,9 +47,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
if (!gi)
goto oom;
} else if (flags & NFSEXP_ROOTSQUASH) {
- if (!new->fsuid)
+ if (uid_eq(new->fsuid, GLOBAL_ROOT_UID))
new->fsuid = exp->ex_anon_uid;
- if (!new->fsgid)
+ if (gid_eq(new->fsgid, GLOBAL_ROOT_GID))
new->fsgid = exp->ex_anon_gid;
gi = groups_alloc(rqgi->ngroups);
@@ -58,7 +58,7 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
for (i = 0; i < rqgi->ngroups; i++) {
if (gid_eq(GLOBAL_ROOT_GID, GROUP_AT(rqgi, i)))
- GROUP_AT(gi, i) = make_kgid(&init_user_ns, exp->ex_anon_gid);
+ GROUP_AT(gi, i) = exp->ex_anon_gid;
else
GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
}
@@ -66,9 +66,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
gi = get_group_info(rqgi);
}
- if (new->fsuid == (uid_t) -1)
+ if (uid_eq(new->fsuid, INVALID_UID))
new->fsuid = exp->ex_anon_uid;
- if (new->fsgid == (gid_t) -1)
+ if (gid_eq(new->fsgid, INVALID_GID))
new->fsgid = exp->ex_anon_gid;
ret = set_groups(new, gi);
@@ -76,7 +76,7 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
if (ret < 0)
goto error;
- if (new->fsuid)
+ if (!uid_eq(new->fsuid, GLOBAL_ROOT_UID))
new->cap_effective = cap_drop_nfsd_set(new->cap_effective);
else
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
diff --git a/fs/nfsd/auth.h b/fs/nfsd/auth.h
index 78b3c0e93822..53325a12ba62 100644
--- a/fs/nfsd/auth.h
+++ b/fs/nfsd/auth.h
@@ -1,6 +1,5 @@
/*
* nfsd-specific authentication stuff.
- * uid/gid mapping not yet implemented.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
@@ -8,11 +7,6 @@
#ifndef LINUX_NFSD_AUTH_H
#define LINUX_NFSD_AUTH_H
-#define nfsd_luid(rq, uid) ((u32)(uid))
-#define nfsd_lgid(rq, gid) ((u32)(gid))
-#define nfsd_ruid(rq, uid) ((u32)(uid))
-#define nfsd_rgid(rq, gid) ((u32)(gid))
-
/*
* Set the current process's fsuid/fsgid etc to those of the NFS
* client user
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 93cc9d34c459..87fd1410b737 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -12,6 +12,10 @@
/*
* Representation of a reply cache entry.
+ *
+ * Note that we use a sockaddr_in6 to hold the address instead of the more
+ * typical sockaddr_storage. This is for space reasons, since sockaddr_storage
+ * is much larger than a sockaddr_in6.
*/
struct svc_cacherep {
struct hlist_node c_hash;
@@ -20,11 +24,13 @@ struct svc_cacherep {
unsigned char c_state, /* unused, inprog, done */
c_type, /* status, buffer */
c_secure : 1; /* req came from port < 1024 */
- struct sockaddr_in c_addr;
+ struct sockaddr_in6 c_addr;
__be32 c_xid;
u32 c_prot;
u32 c_proc;
u32 c_vers;
+ unsigned int c_len;
+ __wsum c_csum;
unsigned long c_timestamp;
union {
struct kvec u_vec;
@@ -46,8 +52,7 @@ enum {
enum {
RC_DROPIT,
RC_REPLY,
- RC_DOIT,
- RC_INTR
+ RC_DOIT
};
/*
@@ -67,6 +72,12 @@ enum {
*/
#define RC_DELAY (HZ/5)
+/* Cache entries expire after this time period */
+#define RC_EXPIRE (120 * HZ)
+
+/* Checksum this amount of the request */
+#define RC_CSUMLEN (256U)
+
int nfsd_reply_cache_init(void);
void nfsd_reply_cache_shutdown(void);
int nfsd_cache_lookup(struct svc_rqst *);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index a3946cf13fc8..5f38ea36e266 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -67,11 +67,6 @@ static void expkey_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
-static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
-}
-
static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
struct svc_expkey *old);
static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
@@ -245,7 +240,7 @@ static struct cache_detail svc_expkey_cache_template = {
.hash_size = EXPKEY_HASHMAX,
.name = "nfsd.fh",
.cache_put = expkey_put,
- .cache_upcall = expkey_upcall,
+ .cache_request = expkey_request,
.cache_parse = expkey_parse,
.cache_show = expkey_show,
.match = expkey_match,
@@ -315,6 +310,7 @@ static void svc_export_put(struct kref *ref)
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
+ kfree(exp->ex_uuid);
kfree(exp);
}
@@ -337,11 +333,6 @@ static void svc_export_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
-static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, svc_export_request);
-}
-
static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
@@ -544,13 +535,17 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
err = get_int(&mesg, &an_int);
if (err)
goto out3;
- exp.ex_anon_uid= an_int;
+ exp.ex_anon_uid= make_kuid(&init_user_ns, an_int);
+ if (!uid_valid(exp.ex_anon_uid))
+ goto out3;
/* anon gid */
err = get_int(&mesg, &an_int);
if (err)
goto out3;
- exp.ex_anon_gid= an_int;
+ exp.ex_anon_gid= make_kgid(&init_user_ns, an_int);
+ if (!gid_valid(exp.ex_anon_gid))
+ goto out3;
/* fsid */
err = get_int(&mesg, &an_int);
@@ -613,7 +608,7 @@ out:
}
static void exp_flags(struct seq_file *m, int flag, int fsid,
- uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fslocs);
+ kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fslocs);
static void show_secinfo(struct seq_file *m, struct svc_export *exp);
static int svc_export_show(struct seq_file *m,
@@ -670,6 +665,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
+ new->ex_uuid = NULL;
new->cd = item->cd;
}
@@ -711,7 +707,7 @@ static struct cache_detail svc_export_cache_template = {
.hash_size = EXPORT_HASHMAX,
.name = "nfsd.export",
.cache_put = svc_export_put,
- .cache_upcall = svc_export_upcall,
+ .cache_request = svc_export_request,
.cache_parse = svc_export_parse,
.cache_show = svc_export_show,
.match = svc_export_match,
@@ -1179,15 +1175,17 @@ static void show_secinfo(struct seq_file *m, struct svc_export *exp)
}
static void exp_flags(struct seq_file *m, int flag, int fsid,
- uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
+ kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fsloc)
{
show_expflags(m, flag, NFSEXP_ALLFLAGS);
if (flag & NFSEXP_FSID)
seq_printf(m, ",fsid=%d", fsid);
- if (anonu != (uid_t)-2 && anonu != (0x10000-2))
- seq_printf(m, ",anonuid=%u", anonu);
- if (anong != (gid_t)-2 && anong != (0x10000-2))
- seq_printf(m, ",anongid=%u", anong);
+ if (!uid_eq(anonu, make_kuid(&init_user_ns, (uid_t)-2)) &&
+ !uid_eq(anonu, make_kuid(&init_user_ns, 0x10000-2)))
+ seq_printf(m, ",anonuid=%u", from_kuid(&init_user_ns, anonu));
+ if (!gid_eq(anong, make_kgid(&init_user_ns, (gid_t)-2)) &&
+ !gid_eq(anong, make_kgid(&init_user_ns, 0x10000-2)))
+ seq_printf(m, ",anongid=%u", from_kgid(&init_user_ns, anong));
if (fsloc && fsloc->locations_count > 0) {
char *loctype = (fsloc->migrated) ? "refer" : "replicas";
int i;
diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c
index e761ee95617f..d620e7f81429 100644
--- a/fs/nfsd/fault_inject.c
+++ b/fs/nfsd/fault_inject.c
@@ -9,7 +9,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/nsproxy.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <asm/uaccess.h>
#include "state.h"
@@ -101,7 +101,7 @@ static ssize_t fault_inject_read(struct file *file, char __user *buf,
loff_t pos = *ppos;
if (!pos)
- nfsd_inject_get(file->f_dentry->d_inode->i_private, &val);
+ nfsd_inject_get(file_inode(file)->i_private, &val);
size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val);
if (pos < 0)
@@ -133,10 +133,10 @@ static ssize_t fault_inject_write(struct file *file, const char __user *buf,
size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa));
if (size > 0)
- nfsd_inject_set_client(file->f_dentry->d_inode->i_private, &sa, size);
+ nfsd_inject_set_client(file_inode(file)->i_private, &sa, size);
else {
val = simple_strtoll(write_buf, NULL, 0);
- nfsd_inject_set(file->f_dentry->d_inode->i_private, val);
+ nfsd_inject_set(file_inode(file)->i_private, val);
}
return len; /* on success, claim we got the whole input */
}
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
index 9d513efc01ba..bf95f6b817a4 100644
--- a/fs/nfsd/idmap.h
+++ b/fs/nfsd/idmap.h
@@ -54,9 +54,9 @@ static inline void nfsd_idmap_shutdown(struct net *net)
}
#endif
-__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
-__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
-int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *);
-int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *);
+__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, kuid_t *);
+__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, kgid_t *);
+int nfsd_map_uid_to_name(struct svc_rqst *, kuid_t, char *);
+int nfsd_map_gid_to_name(struct svc_rqst *, kgid_t, char *);
#endif /* LINUX_NFSD_IDMAP_H */
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 9170861c804a..95d76dc6c5da 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -45,6 +45,10 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp,
RETURN_STATUS(nfserr_inval);
resp->mask = argp->mask;
+ nfserr = fh_getattr(fh, &resp->stat);
+ if (nfserr)
+ goto fail;
+
if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
acl = nfsd_get_posix_acl(fh, ACL_TYPE_ACCESS);
if (IS_ERR(acl)) {
@@ -115,6 +119,9 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
nfserr = nfserrno( nfsd_set_posix_acl(
fh, ACL_TYPE_DEFAULT, argp->acl_default) );
}
+ if (!nfserr) {
+ nfserr = fh_getattr(fh, &resp->stat);
+ }
/* argp->acl_{access,default} may have been allocated in
nfssvc_decode_setaclargs. */
@@ -129,10 +136,15 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp,
struct nfsd_fhandle *argp, struct nfsd_attrstat *resp)
{
+ __be32 nfserr;
dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
- return fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP);
+ nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP);
+ if (nfserr)
+ return nfserr;
+ nfserr = fh_getattr(&resp->fh, &resp->stat);
+ return nfserr;
}
/*
@@ -150,6 +162,9 @@ static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessarg
fh_copy(&resp->fh, &argp->fh);
resp->access = argp->access;
nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
+ if (nfserr)
+ return nfserr;
+ nfserr = fh_getattr(&resp->fh, &resp->stat);
return nfserr;
}
@@ -243,7 +258,7 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
return 0;
inode = dentry->d_inode;
- p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
+ p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
*p++ = htonl(resp->mask);
if (!xdr_ressize_check(rqstp, p))
return 0;
@@ -274,7 +289,7 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_attrstat *resp)
{
- p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
+ p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
@@ -282,7 +297,7 @@ static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_accessres *resp)
{
- p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
+ p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
*p++ = htonl(resp->access);
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 1fc02dfdc5c4..401289913130 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -43,7 +43,6 @@ static __be32
nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
struct nfsd3_attrstat *resp)
{
- int err;
__be32 nfserr;
dprintk("nfsd: GETATTR(3) %s\n",
@@ -55,9 +54,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
if (nfserr)
RETURN_STATUS(nfserr);
- err = vfs_getattr(resp->fh.fh_export->ex_path.mnt,
- resp->fh.fh_dentry, &resp->stat);
- nfserr = nfserrno(err);
+ nfserr = fh_getattr(&resp->fh, &resp->stat);
RETURN_STATUS(nfserr);
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 324c0baf7cda..14d9ecb96cff 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -11,6 +11,7 @@
#include "xdr3.h"
#include "auth.h"
#include "netns.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
@@ -105,12 +106,14 @@ decode_sattr3(__be32 *p, struct iattr *iap)
iap->ia_mode = ntohl(*p++);
}
if (*p++) {
- iap->ia_valid |= ATTR_UID;
- iap->ia_uid = ntohl(*p++);
+ iap->ia_uid = make_kuid(&init_user_ns, ntohl(*p++));
+ if (uid_valid(iap->ia_uid))
+ iap->ia_valid |= ATTR_UID;
}
if (*p++) {
- iap->ia_valid |= ATTR_GID;
- iap->ia_gid = ntohl(*p++);
+ iap->ia_gid = make_kgid(&init_user_ns, ntohl(*p++));
+ if (gid_valid(iap->ia_gid))
+ iap->ia_valid |= ATTR_GID;
}
if (*p++) {
u64 newsize;
@@ -167,8 +170,8 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
*p++ = htonl((u32) stat->mode);
*p++ = htonl((u32) stat->nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
- *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
+ *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
+ *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
} else {
@@ -204,10 +207,10 @@ encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
{
struct dentry *dentry = fhp->fh_dentry;
if (dentry && dentry->d_inode) {
- int err;
+ __be32 err;
struct kstat stat;
- err = vfs_getattr(fhp->fh_export->ex_path.mnt, dentry, &stat);
+ err = fh_getattr(fhp, &stat);
if (!err) {
*p++ = xdr_one; /* attributes follow */
lease_get_mtime(dentry->d_inode, &stat.mtime);
@@ -254,13 +257,12 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
*/
void fill_post_wcc(struct svc_fh *fhp)
{
- int err;
+ __be32 err;
if (fhp->fh_post_saved)
printk("nfsd: inode locked twice during operation.\n");
- err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
- &fhp->fh_post_attr);
+ err = fh_getattr(fhp, &fhp->fh_post_attr);
fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
if (err) {
fhp->fh_post_saved = 0;
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 9c51aff02ae2..8a50b3c18093 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -264,7 +264,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
ace->flag = eflag;
ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
- ace->who = pa->e_id;
+ ace->who_uid = pa->e_uid;
ace++;
acl->naces++;
}
@@ -273,7 +273,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
ace->access_mask = mask_from_posix(pa->e_perm & pas.mask,
flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
- ace->who = pa->e_id;
+ ace->who_uid = pa->e_uid;
ace++;
acl->naces++;
pa++;
@@ -300,7 +300,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
ace->access_mask = mask_from_posix(pa->e_perm & pas.mask,
flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
- ace->who = pa->e_id;
+ ace->who_gid = pa->e_gid;
ace++;
acl->naces++;
pa++;
@@ -329,7 +329,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
- ace->who = pa->e_id;
+ ace->who_gid = pa->e_gid;
ace++;
acl->naces++;
}
@@ -345,6 +345,18 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
acl->naces++;
}
+static bool
+pace_gt(struct posix_acl_entry *pace1, struct posix_acl_entry *pace2)
+{
+ if (pace1->e_tag != pace2->e_tag)
+ return pace1->e_tag > pace2->e_tag;
+ if (pace1->e_tag == ACL_USER)
+ return uid_gt(pace1->e_uid, pace2->e_uid);
+ if (pace1->e_tag == ACL_GROUP)
+ return gid_gt(pace1->e_gid, pace2->e_gid);
+ return false;
+}
+
static void
sort_pacl_range(struct posix_acl *pacl, int start, int end) {
int sorted = 0, i;
@@ -355,8 +367,8 @@ sort_pacl_range(struct posix_acl *pacl, int start, int end) {
while (!sorted) {
sorted = 1;
for (i = start; i < end; i++) {
- if (pacl->a_entries[i].e_id
- > pacl->a_entries[i+1].e_id) {
+ if (pace_gt(&pacl->a_entries[i],
+ &pacl->a_entries[i+1])) {
sorted = 0;
tmp = pacl->a_entries[i];
pacl->a_entries[i] = pacl->a_entries[i+1];
@@ -398,7 +410,10 @@ struct posix_ace_state {
};
struct posix_user_ace_state {
- uid_t uid;
+ union {
+ kuid_t uid;
+ kgid_t gid;
+ };
struct posix_ace_state perms;
};
@@ -521,7 +536,6 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
if (error)
goto out_err;
low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags);
- pace->e_id = ACL_UNDEFINED_ID;
for (i=0; i < state->users->n; i++) {
pace++;
@@ -531,7 +545,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
goto out_err;
low_mode_from_nfs4(state->users->aces[i].perms.allow,
&pace->e_perm, flags);
- pace->e_id = state->users->aces[i].uid;
+ pace->e_uid = state->users->aces[i].uid;
add_to_mask(state, &state->users->aces[i].perms);
}
@@ -541,7 +555,6 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
if (error)
goto out_err;
low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags);
- pace->e_id = ACL_UNDEFINED_ID;
add_to_mask(state, &state->group);
for (i=0; i < state->groups->n; i++) {
@@ -552,14 +565,13 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
goto out_err;
low_mode_from_nfs4(state->groups->aces[i].perms.allow,
&pace->e_perm, flags);
- pace->e_id = state->groups->aces[i].uid;
+ pace->e_gid = state->groups->aces[i].gid;
add_to_mask(state, &state->groups->aces[i].perms);
}
pace++;
pace->e_tag = ACL_MASK;
low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
- pace->e_id = ACL_UNDEFINED_ID;
pace++;
pace->e_tag = ACL_OTHER;
@@ -567,7 +579,6 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
if (error)
goto out_err;
low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags);
- pace->e_id = ACL_UNDEFINED_ID;
return pacl;
out_err:
@@ -587,12 +598,13 @@ static inline void deny_bits(struct posix_ace_state *astate, u32 mask)
astate->deny |= mask & ~astate->allow;
}
-static int find_uid(struct posix_acl_state *state, struct posix_ace_state_array *a, uid_t uid)
+static int find_uid(struct posix_acl_state *state, kuid_t uid)
{
+ struct posix_ace_state_array *a = state->users;
int i;
for (i = 0; i < a->n; i++)
- if (a->aces[i].uid == uid)
+ if (uid_eq(a->aces[i].uid, uid))
return i;
/* Not found: */
a->n++;
@@ -603,6 +615,23 @@ static int find_uid(struct posix_acl_state *state, struct posix_ace_state_array
return i;
}
+static int find_gid(struct posix_acl_state *state, kgid_t gid)
+{
+ struct posix_ace_state_array *a = state->groups;
+ int i;
+
+ for (i = 0; i < a->n; i++)
+ if (gid_eq(a->aces[i].gid, gid))
+ return i;
+ /* Not found: */
+ a->n++;
+ a->aces[i].gid = gid;
+ a->aces[i].perms.allow = state->everyone.allow;
+ a->aces[i].perms.deny = state->everyone.deny;
+
+ return i;
+}
+
static void deny_bits_array(struct posix_ace_state_array *a, u32 mask)
{
int i;
@@ -636,7 +665,7 @@ static void process_one_v4_ace(struct posix_acl_state *state,
}
break;
case ACL_USER:
- i = find_uid(state, state->users, ace->who);
+ i = find_uid(state, ace->who_uid);
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->users->aces[i].perms, mask);
} else {
@@ -658,7 +687,7 @@ static void process_one_v4_ace(struct posix_acl_state *state,
}
break;
case ACL_GROUP:
- i = find_uid(state, state->groups, ace->who);
+ i = find_gid(state, ace->who_gid);
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->groups->aces[i].perms, mask);
} else {
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index a1f10c0a6255..4832fd819f88 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -65,7 +65,7 @@ MODULE_PARM_DESC(nfs4_disable_idmapping,
struct ent {
struct cache_head h;
int type; /* User / Group */
- uid_t id;
+ u32 id;
char name[IDMAP_NAMESZ];
char authname[IDMAP_NAMESZ];
};
@@ -140,12 +140,6 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
}
static int
-idtoname_upcall(struct cache_detail *cd, struct cache_head *ch)
-{
- return sunrpc_cache_pipe_upcall(cd, ch, idtoname_request);
-}
-
-static int
idtoname_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
@@ -192,7 +186,7 @@ static struct cache_detail idtoname_cache_template = {
.hash_size = ENT_HASHMAX,
.name = "nfs4.idtoname",
.cache_put = ent_put,
- .cache_upcall = idtoname_upcall,
+ .cache_request = idtoname_request,
.cache_parse = idtoname_parse,
.cache_show = idtoname_show,
.warn_no_listener = warn_no_idmapd,
@@ -321,12 +315,6 @@ nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
}
static int
-nametoid_upcall(struct cache_detail *cd, struct cache_head *ch)
-{
- return sunrpc_cache_pipe_upcall(cd, ch, nametoid_request);
-}
-
-static int
nametoid_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
@@ -365,7 +353,7 @@ static struct cache_detail nametoid_cache_template = {
.hash_size = ENT_HASHMAX,
.name = "nfs4.nametoid",
.cache_put = ent_put,
- .cache_upcall = nametoid_upcall,
+ .cache_request = nametoid_request,
.cache_parse = nametoid_parse,
.cache_show = nametoid_show,
.warn_no_listener = warn_no_idmapd,
@@ -540,7 +528,7 @@ rqst_authname(struct svc_rqst *rqstp)
static __be32
idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
- uid_t *id)
+ u32 *id)
{
struct ent *item, key = {
.type = type,
@@ -564,7 +552,7 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
}
static int
-idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
+idmap_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
{
struct ent *item, key = {
.id = id,
@@ -587,7 +575,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
}
static bool
-numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
+numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id)
{
int ret;
char buf[11];
@@ -603,7 +591,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
}
static __be32
-do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
+do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id)
{
if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
if (numeric_name_to_id(rqstp, type, name, namelen, id))
@@ -616,7 +604,7 @@ do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u
}
static int
-do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
+do_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
{
if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
return sprintf(name, "%u", id);
@@ -625,26 +613,40 @@ do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
__be32
nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
- __u32 *id)
+ kuid_t *uid)
{
- return do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
+ __be32 status;
+ u32 id = -1;
+ status = do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, &id);
+ *uid = make_kuid(&init_user_ns, id);
+ if (!uid_valid(*uid))
+ status = nfserr_badowner;
+ return status;
}
__be32
nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
- __u32 *id)
+ kgid_t *gid)
{
- return do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id);
+ __be32 status;
+ u32 id = -1;
+ status = do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, &id);
+ *gid = make_kgid(&init_user_ns, id);
+ if (!gid_valid(*gid))
+ status = nfserr_badowner;
+ return status;
}
int
-nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
+nfsd_map_uid_to_name(struct svc_rqst *rqstp, kuid_t uid, char *name)
{
+ u32 id = from_kuid(&init_user_ns, uid);
return do_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
}
int
-nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
+nfsd_map_gid_to_name(struct svc_rqst *rqstp, kgid_t gid, char *name)
{
+ u32 id = from_kgid(&init_user_ns, gid);
return do_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 9d1c5dba2bbb..ae73175e6e68 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -993,14 +993,15 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (!buf)
return nfserr_jukebox;
+ p = buf;
status = nfsd4_encode_fattr(&cstate->current_fh,
cstate->current_fh.fh_export,
- cstate->current_fh.fh_dentry, buf,
- &count, verify->ve_bmval,
+ cstate->current_fh.fh_dentry, &p,
+ count, verify->ve_bmval,
rqstp, 0);
/* this means that nfsd4_encode_fattr() ran out of space */
- if (status == nfserr_resource && count == 0)
+ if (status == nfserr_resource)
status = nfserr_not_same;
if (status)
goto out_kfree;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ba6fdd4a0455..899ca26dd194 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -73,8 +73,8 @@ nfs4_save_creds(const struct cred **original_creds)
if (!new)
return -ENOMEM;
- new->fsuid = 0;
- new->fsgid = 0;
+ new->fsuid = GLOBAL_ROOT_UID;
+ new->fsgid = GLOBAL_ROOT_GID;
*original_creds = override_creds(new);
put_cred(new);
return 0;
@@ -1185,6 +1185,12 @@ bin_to_hex_dup(const unsigned char *src, int srclen)
static int
nfsd4_umh_cltrack_init(struct net __attribute__((unused)) *net)
{
+ /* XXX: The usermode helper s not working in container yet. */
+ if (net != &init_net) {
+ WARN(1, KERN_ERR "NFSD: attempt to initialize umh client "
+ "tracking in a container!\n");
+ return -EINVAL;
+ }
return nfsd4_umh_cltrack_upcall("init", NULL, NULL);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 499e957510e7..16d39c6c4fbb 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -40,7 +40,7 @@
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/sunrpc/svcauth_gss.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include "xdr4.h"
#include "vfs.h"
#include "current_stateid.h"
@@ -261,33 +261,46 @@ static inline int get_new_stid(struct nfs4_stid *stid)
return new_stid;
}
-static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
+static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
+kmem_cache *slab)
{
- stateid_t *s = &stid->sc_stateid;
+ struct idr *stateids = &cl->cl_stateids;
+ static int min_stateid = 0;
+ struct nfs4_stid *stid;
int new_id;
- stid->sc_type = type;
+ stid = kmem_cache_alloc(slab, GFP_KERNEL);
+ if (!stid)
+ return NULL;
+
+ if (!idr_pre_get(stateids, GFP_KERNEL))
+ goto out_free;
+ if (idr_get_new_above(stateids, stid, min_stateid, &new_id))
+ goto out_free;
stid->sc_client = cl;
- s->si_opaque.so_clid = cl->cl_clientid;
- new_id = get_new_stid(stid);
- s->si_opaque.so_id = (u32)new_id;
+ stid->sc_type = 0;
+ stid->sc_stateid.si_opaque.so_id = new_id;
+ stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
/* Will be incremented before return to client: */
- s->si_generation = 0;
-}
-
-static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
-{
- struct idr *stateids = &cl->cl_stateids;
+ stid->sc_stateid.si_generation = 0;
- if (!idr_pre_get(stateids, GFP_KERNEL))
- return NULL;
/*
- * Note: if we fail here (or any time between now and the time
- * we actually get the new idr), we won't need to undo the idr
- * preallocation, since the idr code caps the number of
- * preallocated entries.
+ * It shouldn't be a problem to reuse an opaque stateid value.
+ * I don't think it is for 4.1. But with 4.0 I worry that, for
+ * example, a stray write retransmission could be accepted by
+ * the server when it should have been rejected. Therefore,
+ * adopt a trick from the sctp code to attempt to maximize the
+ * amount of time until an id is reused, by ensuring they always
+ * "increase" (mod INT_MAX):
*/
- return kmem_cache_alloc(slab, GFP_KERNEL);
+
+ min_stateid = new_id+1;
+ if (min_stateid == INT_MAX)
+ min_stateid = 0;
+ return stid;
+out_free:
+ kfree(stid);
+ return NULL;
}
static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
@@ -316,7 +329,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
if (dp == NULL)
return dp;
- init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
+ dp->dl_stid.sc_type = NFS4_DELEG_STID;
/*
* delegation seqid's are never incremented. The 4.1 special
* meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -337,13 +350,21 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
return dp;
}
+static void free_stid(struct nfs4_stid *s, struct kmem_cache *slab)
+{
+ struct idr *stateids = &s->sc_client->cl_stateids;
+
+ idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
+ kmem_cache_free(slab, s);
+}
+
void
nfs4_put_delegation(struct nfs4_delegation *dp)
{
if (atomic_dec_and_test(&dp->dl_count)) {
dprintk("NFSD: freeing dp %p\n",dp);
put_nfs4_file(dp->dl_file);
- kmem_cache_free(deleg_slab, dp);
+ free_stid(&dp->dl_stid, deleg_slab);
num_delegations--;
}
}
@@ -360,9 +381,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
static void unhash_stid(struct nfs4_stid *s)
{
- struct idr *stateids = &s->sc_client->cl_stateids;
-
- idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
+ s->sc_type = 0;
}
/* Called under the state lock. */
@@ -519,7 +538,7 @@ static void close_generic_stateid(struct nfs4_ol_stateid *stp)
static void free_generic_stateid(struct nfs4_ol_stateid *stp)
{
- kmem_cache_free(stateid_slab, stp);
+ free_stid(&stp->st_stid, stateid_slab);
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -905,7 +924,7 @@ static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan,
new = __alloc_session(slotsize, numslots);
if (!new) {
- nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
+ nfsd4_put_drc_mem(slotsize, numslots);
return NULL;
}
init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize, nn);
@@ -1048,7 +1067,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
static inline void
free_client(struct nfs4_client *clp)
{
- struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+ struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
lockdep_assert_held(&nn->client_lock);
while (!list_empty(&clp->cl_sessions)) {
@@ -1060,6 +1079,7 @@ free_client(struct nfs4_client *clp)
}
free_svc_cred(&clp->cl_cred);
kfree(clp->cl_name.data);
+ idr_destroy(&clp->cl_stateids);
kfree(clp);
}
@@ -1202,7 +1222,7 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
if (g1->ngroups != g2->ngroups)
return false;
for (i=0; i<g1->ngroups; i++)
- if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
+ if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
return false;
return true;
}
@@ -1227,8 +1247,8 @@ static bool
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
if ((is_gss_cred(cr1) != is_gss_cred(cr2))
- || (cr1->cr_uid != cr2->cr_uid)
- || (cr1->cr_gid != cr2->cr_gid)
+ || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
+ || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
return false;
if (cr1->cr_principal == cr2->cr_principal)
@@ -1258,7 +1278,12 @@ static void gen_confirm(struct nfs4_client *clp)
static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
{
- return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
+ struct nfs4_stid *ret;
+
+ ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
+ if (!ret || !ret->sc_type)
+ return NULL;
+ return ret;
}
static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
@@ -1844,11 +1869,12 @@ nfsd4_create_session(struct svc_rqst *rqstp,
/* cache solo and embedded create sessions under the state lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
-out:
nfs4_unlock_state();
+out:
dprintk("%s returns %d\n", __func__, ntohl(status));
return status;
out_free_conn:
+ nfs4_unlock_state();
free_conn(conn);
out_free_session:
__free_session(new);
@@ -2443,9 +2469,8 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
struct nfs4_openowner *oo = open->op_openowner;
- struct nfs4_client *clp = oo->oo_owner.so_client;
- init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
+ stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_lockowners);
list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
@@ -4031,7 +4056,7 @@ alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct
stp = nfs4_alloc_stateid(clp);
if (stp == NULL)
return NULL;
- init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
+ stp->st_stid.sc_type = NFS4_LOCK_STID;
list_add(&stp->st_perfile, &fp->fi_stateids);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
stp->st_stateowner = &lo->lo_owner;
@@ -4913,16 +4938,6 @@ nfs4_state_start_net(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
- /*
- * FIXME: For now, we hang most of the pernet global stuff off of
- * init_net until nfsd is fully containerized. Eventually, we'll
- * need to pass a net pointer into this function, take a reference
- * to that instead and then do most of the rest of this on a per-net
- * basis.
- */
- if (net != &init_net)
- return -EINVAL;
-
ret = nfs4_state_create_net(net);
if (ret)
return ret;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0dc11586682f..01168865dd37 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -293,13 +293,13 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
status = nfs_ok;
if (ace->whotype != NFS4_ACL_WHO_NAMED)
- ace->who = 0;
+ ;
else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
status = nfsd_map_name_to_gid(argp->rqstp,
- buf, dummy32, &ace->who);
+ buf, dummy32, &ace->who_gid);
else
status = nfsd_map_name_to_uid(argp->rqstp,
- buf, dummy32, &ace->who);
+ buf, dummy32, &ace->who_uid);
if (status)
return status;
}
@@ -464,9 +464,16 @@ static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_
READ32(dummy);
READ_BUF(dummy * 4);
if (cbs->flavor == (u32)(-1)) {
- cbs->uid = uid;
- cbs->gid = gid;
- cbs->flavor = RPC_AUTH_UNIX;
+ kuid_t kuid = make_kuid(&init_user_ns, uid);
+ kgid_t kgid = make_kgid(&init_user_ns, gid);
+ if (uid_valid(kuid) && gid_valid(kgid)) {
+ cbs->uid = kuid;
+ cbs->gid = kgid;
+ cbs->flavor = RPC_AUTH_UNIX;
+ } else {
+ dprintk("RPC_AUTH_UNIX with invalid"
+ "uid or gid ignoring!\n");
+ }
}
break;
case RPC_AUTH_GSS:
@@ -1926,7 +1933,7 @@ static u32 nfs4_file_type(umode_t mode)
}
static __be32
-nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
+nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, kuid_t uid, kgid_t gid,
__be32 **p, int *buflen)
{
int status;
@@ -1935,10 +1942,10 @@ nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
return nfserr_resource;
if (whotype != NFS4_ACL_WHO_NAMED)
status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1));
- else if (group)
- status = nfsd_map_gid_to_name(rqstp, id, (u8 *)(*p + 1));
+ else if (gid_valid(gid))
+ status = nfsd_map_gid_to_name(rqstp, gid, (u8 *)(*p + 1));
else
- status = nfsd_map_uid_to_name(rqstp, id, (u8 *)(*p + 1));
+ status = nfsd_map_uid_to_name(rqstp, uid, (u8 *)(*p + 1));
if (status < 0)
return nfserrno(status);
*p = xdr_encode_opaque(*p, NULL, status);
@@ -1948,22 +1955,33 @@ nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
}
static inline __be32
-nfsd4_encode_user(struct svc_rqst *rqstp, uid_t uid, __be32 **p, int *buflen)
+nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t user, __be32 **p, int *buflen)
{
- return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, uid, 0, p, buflen);
+ return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, user, INVALID_GID,
+ p, buflen);
}
static inline __be32
-nfsd4_encode_group(struct svc_rqst *rqstp, uid_t gid, __be32 **p, int *buflen)
+nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t group, __be32 **p, int *buflen)
{
- return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, gid, 1, p, buflen);
+ return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, INVALID_UID, group,
+ p, buflen);
}
static inline __be32
-nfsd4_encode_aclname(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
+nfsd4_encode_aclname(struct svc_rqst *rqstp, struct nfs4_ace *ace,
__be32 **p, int *buflen)
{
- return nfsd4_encode_name(rqstp, whotype, id, group, p, buflen);
+ kuid_t uid = INVALID_UID;
+ kgid_t gid = INVALID_GID;
+
+ if (ace->whotype == NFS4_ACL_WHO_NAMED) {
+ if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
+ gid = ace->who_gid;
+ else
+ uid = ace->who_uid;
+ }
+ return nfsd4_encode_name(rqstp, ace->whotype, uid, gid, p, buflen);
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
@@ -1997,7 +2015,7 @@ static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
if (path.dentry != path.mnt->mnt_root)
break;
}
- err = vfs_getattr(path.mnt, path.dentry, stat);
+ err = vfs_getattr(&path, stat);
path_put(&path);
return err;
}
@@ -2006,12 +2024,11 @@ static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*
- * @countp is the buffer size in _words_; upon successful return this becomes
- * replaced with the number of words written.
+ * countp is the buffer size in _words_
*/
__be32
nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
- struct dentry *dentry, __be32 *buffer, int *countp, u32 *bmval,
+ struct dentry *dentry, __be32 **buffer, int count, u32 *bmval,
struct svc_rqst *rqstp, int ignore_crossmnt)
{
u32 bmval0 = bmval[0];
@@ -2020,12 +2037,12 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
struct kstat stat;
struct svc_fh tempfh;
struct kstatfs statfs;
- int buflen = *countp << 2;
+ int buflen = count << 2;
__be32 *attrlenp;
u32 dummy;
u64 dummy64;
u32 rdattr_err = 0;
- __be32 *p = buffer;
+ __be32 *p = *buffer;
__be32 status;
int err;
int aclsupport = 0;
@@ -2050,7 +2067,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
goto out;
}
- err = vfs_getattr(exp->ex_path.mnt, dentry, &stat);
+ err = vfs_getattr(&path, &stat);
if (err)
goto out_nfserr;
if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
@@ -2224,9 +2241,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
WRITE32(ace->type);
WRITE32(ace->flag);
WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL);
- status = nfsd4_encode_aclname(rqstp, ace->whotype,
- ace->who, ace->flag & NFS4_ACE_IDENTIFIER_GROUP,
- &p, &buflen);
+ status = nfsd4_encode_aclname(rqstp, ace, &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
@@ -2431,7 +2446,7 @@ out_acl:
}
*attrlenp = htonl((char *)p - (char *)attrlenp - 4);
- *countp = p - buffer;
+ *buffer = p;
status = nfs_ok;
out:
@@ -2443,7 +2458,6 @@ out_nfserr:
status = nfserrno(err);
goto out;
out_resource:
- *countp = 0;
status = nfserr_resource;
goto out;
out_serverfault:
@@ -2462,7 +2476,7 @@ static inline int attributes_need_mount(u32 *bmval)
static __be32
nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
- const char *name, int namlen, __be32 *p, int *buflen)
+ const char *name, int namlen, __be32 **p, int buflen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
@@ -2568,10 +2582,9 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
- nfserr = nfsd4_encode_dirent_fattr(cd, name, namlen, p, &buflen);
+ nfserr = nfsd4_encode_dirent_fattr(cd, name, namlen, &p, buflen);
switch (nfserr) {
case nfs_ok:
- p += buflen;
break;
case nfserr_resource:
nfserr = nfserr_toosmall;
@@ -2698,10 +2711,8 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
buflen = resp->end - resp->p - (COMPOUND_ERR_SLACK_SPACE >> 2);
nfserr = nfsd4_encode_fattr(fhp, fhp->fh_export, fhp->fh_dentry,
- resp->p, &buflen, getattr->ga_bmval,
+ &resp->p, buflen, getattr->ga_bmval,
resp->rqstp, 0);
- if (!nfserr)
- resp->p += buflen;
return nfserr;
}
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 2cbac34a55da..62c1ee128aeb 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -9,22 +9,22 @@
*/
#include <linux/slab.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/highmem.h>
+#include <net/checksum.h>
#include "nfsd.h"
#include "cache.h"
-/* Size of reply cache. Common values are:
- * 4.3BSD: 128
- * 4.4BSD: 256
- * Solaris2: 1024
- * DEC Unix: 512-4096
- */
-#define CACHESIZE 1024
+#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
+
#define HASHSIZE 64
static struct hlist_head * cache_hash;
static struct list_head lru_head;
-static int cache_disabled = 1;
+static struct kmem_cache *drc_slab;
+static unsigned int num_drc_entries;
+static unsigned int max_drc_entries;
/*
* Calculate the hash index from an XID.
@@ -37,6 +37,14 @@ static inline u32 request_hash(u32 xid)
}
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
+static void cache_cleaner_func(struct work_struct *unused);
+static int nfsd_reply_cache_shrink(struct shrinker *shrink,
+ struct shrink_control *sc);
+
+struct shrinker nfsd_reply_cache_shrinker = {
+ .shrink = nfsd_reply_cache_shrink,
+ .seeks = 1,
+};
/*
* locking for the reply cache:
@@ -44,30 +52,86 @@ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
* Otherwise, it when accessing _prev or _next, the lock must be held.
*/
static DEFINE_SPINLOCK(cache_lock);
+static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
-int nfsd_reply_cache_init(void)
+/*
+ * Put a cap on the size of the DRC based on the amount of available
+ * low memory in the machine.
+ *
+ * 64MB: 8192
+ * 128MB: 11585
+ * 256MB: 16384
+ * 512MB: 23170
+ * 1GB: 32768
+ * 2GB: 46340
+ * 4GB: 65536
+ * 8GB: 92681
+ * 16GB: 131072
+ *
+ * ...with a hard cap of 256k entries. In the worst case, each entry will be
+ * ~1k, so the above numbers should give a rough max of the amount of memory
+ * used in k.
+ */
+static unsigned int
+nfsd_cache_size_limit(void)
+{
+ unsigned int limit;
+ unsigned long low_pages = totalram_pages - totalhigh_pages;
+
+ limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
+ return min_t(unsigned int, limit, 256*1024);
+}
+
+static struct svc_cacherep *
+nfsd_reply_cache_alloc(void)
{
struct svc_cacherep *rp;
- int i;
- INIT_LIST_HEAD(&lru_head);
- i = CACHESIZE;
- while (i) {
- rp = kmalloc(sizeof(*rp), GFP_KERNEL);
- if (!rp)
- goto out_nomem;
- list_add(&rp->c_lru, &lru_head);
+ rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
+ if (rp) {
rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE;
+ INIT_LIST_HEAD(&rp->c_lru);
INIT_HLIST_NODE(&rp->c_hash);
- i--;
}
+ return rp;
+}
+
+static void
+nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
+{
+ if (rp->c_type == RC_REPLBUFF)
+ kfree(rp->c_replvec.iov_base);
+ hlist_del(&rp->c_hash);
+ list_del(&rp->c_lru);
+ --num_drc_entries;
+ kmem_cache_free(drc_slab, rp);
+}
+
+static void
+nfsd_reply_cache_free(struct svc_cacherep *rp)
+{
+ spin_lock(&cache_lock);
+ nfsd_reply_cache_free_locked(rp);
+ spin_unlock(&cache_lock);
+}
+
+int nfsd_reply_cache_init(void)
+{
+ register_shrinker(&nfsd_reply_cache_shrinker);
+ drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
+ 0, 0, NULL);
+ if (!drc_slab)
+ goto out_nomem;
- cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
+ cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
if (!cache_hash)
goto out_nomem;
- cache_disabled = 0;
+ INIT_LIST_HEAD(&lru_head);
+ max_drc_entries = nfsd_cache_size_limit();
+ num_drc_entries = 0;
+
return 0;
out_nomem:
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
@@ -79,27 +143,33 @@ void nfsd_reply_cache_shutdown(void)
{
struct svc_cacherep *rp;
+ unregister_shrinker(&nfsd_reply_cache_shrinker);
+ cancel_delayed_work_sync(&cache_cleaner);
+
while (!list_empty(&lru_head)) {
rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
- if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
- kfree(rp->c_replvec.iov_base);
- list_del(&rp->c_lru);
- kfree(rp);
+ nfsd_reply_cache_free_locked(rp);
}
- cache_disabled = 1;
-
kfree (cache_hash);
cache_hash = NULL;
+
+ if (drc_slab) {
+ kmem_cache_destroy(drc_slab);
+ drc_slab = NULL;
+ }
}
/*
- * Move cache entry to end of LRU list
+ * Move cache entry to end of LRU list, and queue the cleaner to run if it's
+ * not already scheduled.
*/
static void
lru_put_end(struct svc_cacherep *rp)
{
+ rp->c_timestamp = jiffies;
list_move_tail(&rp->c_lru, &lru_head);
+ schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
}
/*
@@ -112,83 +182,214 @@ hash_refile(struct svc_cacherep *rp)
hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
}
+static inline bool
+nfsd_cache_entry_expired(struct svc_cacherep *rp)
+{
+ return rp->c_state != RC_INPROG &&
+ time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
+}
+
+/*
+ * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+ * Also prune the oldest ones when the total exceeds the max number of entries.
+ */
+static void
+prune_cache_entries(void)
+{
+ struct svc_cacherep *rp, *tmp;
+
+ list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
+ if (!nfsd_cache_entry_expired(rp) &&
+ num_drc_entries <= max_drc_entries)
+ break;
+ nfsd_reply_cache_free_locked(rp);
+ }
+
+ /*
+ * Conditionally rearm the job. If we cleaned out the list, then
+ * cancel any pending run (since there won't be any work to do).
+ * Otherwise, we rearm the job or modify the existing one to run in
+ * RC_EXPIRE since we just ran the pruner.
+ */
+ if (list_empty(&lru_head))
+ cancel_delayed_work(&cache_cleaner);
+ else
+ mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
+}
+
+static void
+cache_cleaner_func(struct work_struct *unused)
+{
+ spin_lock(&cache_lock);
+ prune_cache_entries();
+ spin_unlock(&cache_lock);
+}
+
+static int
+nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned int num;
+
+ spin_lock(&cache_lock);
+ if (sc->nr_to_scan)
+ prune_cache_entries();
+ num = num_drc_entries;
+ spin_unlock(&cache_lock);
+
+ return num;
+}
+
+/*
+ * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
+ */
+static __wsum
+nfsd_cache_csum(struct svc_rqst *rqstp)
+{
+ int idx;
+ unsigned int base;
+ __wsum csum;
+ struct xdr_buf *buf = &rqstp->rq_arg;
+ const unsigned char *p = buf->head[0].iov_base;
+ size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
+ RC_CSUMLEN);
+ size_t len = min(buf->head[0].iov_len, csum_len);
+
+ /* rq_arg.head first */
+ csum = csum_partial(p, len, 0);
+ csum_len -= len;
+
+ /* Continue into page array */
+ idx = buf->page_base / PAGE_SIZE;
+ base = buf->page_base & ~PAGE_MASK;
+ while (csum_len) {
+ p = page_address(buf->pages[idx]) + base;
+ len = min_t(size_t, PAGE_SIZE - base, csum_len);
+ csum = csum_partial(p, len, csum);
+ csum_len -= len;
+ base = 0;
+ ++idx;
+ }
+ return csum;
+}
+
+/*
+ * Search the request hash for an entry that matches the given rqstp.
+ * Must be called with cache_lock held. Returns the found entry or
+ * NULL on failure.
+ */
+static struct svc_cacherep *
+nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
+{
+ struct svc_cacherep *rp;
+ struct hlist_head *rh;
+ __be32 xid = rqstp->rq_xid;
+ u32 proto = rqstp->rq_prot,
+ vers = rqstp->rq_vers,
+ proc = rqstp->rq_proc;
+
+ rh = &cache_hash[request_hash(xid)];
+ hlist_for_each_entry(rp, rh, c_hash) {
+ if (xid == rp->c_xid && proc == rp->c_proc &&
+ proto == rp->c_prot && vers == rp->c_vers &&
+ rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum &&
+ rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
+ rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr))
+ return rp;
+ }
+ return NULL;
+}
+
/*
* Try to find an entry matching the current call in the cache. When none
- * is found, we grab the oldest unlocked entry off the LRU list.
- * Note that no operation within the loop may sleep.
+ * is found, we try to grab the oldest expired entry off the LRU list. If
+ * a suitable one isn't there, then drop the cache_lock and allocate a
+ * new one, then search again in case one got inserted while this thread
+ * didn't hold the lock.
*/
int
nfsd_cache_lookup(struct svc_rqst *rqstp)
{
- struct hlist_node *hn;
- struct hlist_head *rh;
- struct svc_cacherep *rp;
+ struct svc_cacherep *rp, *found;
__be32 xid = rqstp->rq_xid;
u32 proto = rqstp->rq_prot,
vers = rqstp->rq_vers,
proc = rqstp->rq_proc;
+ __wsum csum;
unsigned long age;
int type = rqstp->rq_cachetype;
int rtn;
rqstp->rq_cacherep = NULL;
- if (cache_disabled || type == RC_NOCACHE) {
+ if (type == RC_NOCACHE) {
nfsdstats.rcnocache++;
return RC_DOIT;
}
+ csum = nfsd_cache_csum(rqstp);
+
spin_lock(&cache_lock);
rtn = RC_DOIT;
- rh = &cache_hash[request_hash(xid)];
- hlist_for_each_entry(rp, hn, rh, c_hash) {
- if (rp->c_state != RC_UNUSED &&
- xid == rp->c_xid && proc == rp->c_proc &&
- proto == rp->c_prot && vers == rp->c_vers &&
- time_before(jiffies, rp->c_timestamp + 120*HZ) &&
- memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
- nfsdstats.rchits++;
- goto found_entry;
+ rp = nfsd_cache_search(rqstp, csum);
+ if (rp)
+ goto found_entry;
+
+ /* Try to use the first entry on the LRU */
+ if (!list_empty(&lru_head)) {
+ rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
+ if (nfsd_cache_entry_expired(rp) ||
+ num_drc_entries >= max_drc_entries) {
+ lru_put_end(rp);
+ prune_cache_entries();
+ goto setup_entry;
}
}
- nfsdstats.rcmisses++;
- /* This loop shouldn't take more than a few iterations normally */
- {
- int safe = 0;
- list_for_each_entry(rp, &lru_head, c_lru) {
- if (rp->c_state != RC_INPROG)
- break;
- if (safe++ > CACHESIZE) {
- printk("nfsd: loop in repcache LRU list\n");
- cache_disabled = 1;
- goto out;
- }
+ /* Drop the lock and allocate a new entry */
+ spin_unlock(&cache_lock);
+ rp = nfsd_reply_cache_alloc();
+ if (!rp) {
+ dprintk("nfsd: unable to allocate DRC entry!\n");
+ return RC_DOIT;
}
+ spin_lock(&cache_lock);
+ ++num_drc_entries;
+
+ /*
+ * Must search again just in case someone inserted one
+ * after we dropped the lock above.
+ */
+ found = nfsd_cache_search(rqstp, csum);
+ if (found) {
+ nfsd_reply_cache_free_locked(rp);
+ rp = found;
+ goto found_entry;
}
- /* All entries on the LRU are in-progress. This should not happen */
- if (&rp->c_lru == &lru_head) {
- static int complaints;
-
- printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
- if (++complaints > 5) {
- printk(KERN_WARNING "nfsd: disabling repcache.\n");
- cache_disabled = 1;
- }
- goto out;
- }
+ /*
+ * We're keeping the one we just allocated. Are we now over the
+ * limit? Prune one off the tip of the LRU in trade for the one we
+ * just allocated if so.
+ */
+ if (num_drc_entries >= max_drc_entries)
+ nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
+ struct svc_cacherep, c_lru));
+setup_entry:
+ nfsdstats.rcmisses++;
rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG;
rp->c_xid = xid;
rp->c_proc = proc;
- memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
+ rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
+ rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
rp->c_prot = proto;
rp->c_vers = vers;
- rp->c_timestamp = jiffies;
+ rp->c_len = rqstp->rq_arg.len;
+ rp->c_csum = csum;
hash_refile(rp);
+ lru_put_end(rp);
/* release any buffer */
if (rp->c_type == RC_REPLBUFF) {
@@ -201,9 +402,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
return rtn;
found_entry:
+ nfsdstats.rchits++;
/* We found a matching entry which is either in progress or done. */
age = jiffies - rp->c_timestamp;
- rp->c_timestamp = jiffies;
lru_put_end(rp);
rtn = RC_DROPIT;
@@ -232,7 +433,7 @@ found_entry:
break;
default:
printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
- rp->c_state = RC_UNUSED;
+ nfsd_reply_cache_free_locked(rp);
}
goto out;
@@ -257,11 +458,11 @@ found_entry:
void
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{
- struct svc_cacherep *rp;
+ struct svc_cacherep *rp = rqstp->rq_cacherep;
struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
int len;
- if (!(rp = rqstp->rq_cacherep) || cache_disabled)
+ if (!rp)
return;
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
@@ -269,7 +470,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
- rp->c_state = RC_UNUSED;
+ nfsd_reply_cache_free(rp);
return;
}
@@ -283,21 +484,21 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
cachv = &rp->c_replvec;
cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
if (!cachv->iov_base) {
- spin_lock(&cache_lock);
- rp->c_state = RC_UNUSED;
- spin_unlock(&cache_lock);
+ nfsd_reply_cache_free(rp);
return;
}
cachv->iov_len = len << 2;
memcpy(cachv->iov_base, statp, len << 2);
break;
+ case RC_NOCACHE:
+ nfsd_reply_cache_free(rp);
+ return;
}
spin_lock(&cache_lock);
lru_put_end(rp);
rp->c_secure = rqstp->rq_secure;
rp->c_type = cachetype;
rp->c_state = RC_DONE;
- rp->c_timestamp = jiffies;
spin_unlock(&cache_lock);
return;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 74934284d9a7..13a21c8fca49 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -10,7 +10,7 @@
#include <linux/sunrpc/svcsock.h>
#include <linux/lockd/lockd.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/gss_krb5_enctypes.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
@@ -85,7 +85,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
{
- ino_t ino = file->f_path.dentry->d_inode->i_ino;
+ ino_t ino = file_inode(file)->i_ino;
char *data;
ssize_t rv;
@@ -125,11 +125,11 @@ static const struct file_operations transaction_ops = {
.llseek = default_llseek,
};
-static int exports_open(struct inode *inode, struct file *file)
+static int exports_net_open(struct net *net, struct file *file)
{
int err;
struct seq_file *seq;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
err = seq_open(file, &nfs_exports_op);
if (err)
@@ -140,8 +140,26 @@ static int exports_open(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations exports_operations = {
- .open = exports_open,
+static int exports_proc_open(struct inode *inode, struct file *file)
+{
+ return exports_net_open(current->nsproxy->net_ns, file);
+}
+
+static const struct file_operations exports_proc_operations = {
+ .open = exports_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .owner = THIS_MODULE,
+};
+
+static int exports_nfsd_open(struct inode *inode, struct file *file)
+{
+ return exports_net_open(inode->i_sb->s_fs_info, file);
+}
+
+static const struct file_operations exports_nfsd_operations = {
+ .open = exports_nfsd_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
@@ -220,6 +238,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
struct sockaddr *sap = (struct sockaddr *)&address;
size_t salen = sizeof(address);
char *fo_path;
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
/* sanity check */
if (size == 0)
@@ -232,7 +251,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
if (qword_get(&buf, fo_path, size) < 0)
return -EINVAL;
- if (rpc_pton(&init_net, fo_path, size, sap, salen) == 0)
+ if (rpc_pton(net, fo_path, size, sap, salen) == 0)
return -EINVAL;
return nlmsvc_unlock_all_by_ip(sap);
@@ -317,6 +336,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
int len;
struct auth_domain *dom;
struct knfsd_fh fh;
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
if (size == 0)
return -EINVAL;
@@ -352,7 +372,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
if (!dom)
return -ENOMEM;
- len = exp_rootfh(&init_net, dom, path, &fh, maxsize);
+ len = exp_rootfh(net, dom, path, &fh, maxsize);
auth_domain_put(dom);
if (len)
return len;
@@ -396,7 +416,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
{
char *mesg = buf;
int rv;
- struct net *net = &init_net;
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
if (size > 0) {
int newthreads;
@@ -447,7 +467,7 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
int len;
int npools;
int *nthreads;
- struct net *net = &init_net;
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
mutex_lock(&nfsd_mutex);
npools = nfsd_nrpools(net);
@@ -510,7 +530,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
unsigned minor;
ssize_t tlen = 0;
char *sep;
- struct net *net = &init_net;
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (size>0) {
@@ -534,7 +554,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
else
num = simple_strtol(vers, &minorp, 0);
if (*minorp == '.') {
- if (num < 4)
+ if (num != 4)
return -EINVAL;
minor = simple_strtoul(minorp+1, NULL, 0);
if (minor == 0)
@@ -792,7 +812,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size,
static ssize_t write_ports(struct file *file, char *buf, size_t size)
{
ssize_t rv;
- struct net *net = &init_net;
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
mutex_lock(&nfsd_mutex);
rv = __write_ports(file, buf, size, net);
@@ -827,7 +847,7 @@ int nfsd_max_blksize;
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
{
char *mesg = buf;
- struct net *net = &init_net;
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (size > 0) {
@@ -923,7 +943,8 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
*/
static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
{
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn);
}
@@ -939,7 +960,8 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
*/
static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
{
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
return nfsd4_write_time(file, buf, size, &nn->nfsd4_grace, nn);
}
@@ -995,7 +1017,8 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
{
ssize_t rv;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct net *net = file->f_dentry->d_sb->s_fs_info;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
mutex_lock(&nfsd_mutex);
rv = __write_recoverydir(file, buf, size, nn);
@@ -1013,7 +1036,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
{
static struct tree_descr nfsd_files[] = {
- [NFSD_List] = {"exports", &exports_operations, S_IRUGO},
+ [NFSD_List] = {"exports", &exports_nfsd_operations, S_IRUGO},
[NFSD_Export_features] = {"export_features",
&export_features_operations, S_IRUGO},
[NFSD_FO_UnlockIP] = {"unlock_ip",
@@ -1037,20 +1060,35 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
#endif
/* last one */ {""}
};
- return simple_fill_super(sb, 0x6e667364, nfsd_files);
+ struct net *net = data;
+ int ret;
+
+ ret = simple_fill_super(sb, 0x6e667364, nfsd_files);
+ if (ret)
+ return ret;
+ sb->s_fs_info = get_net(net);
+ return 0;
}
static struct dentry *nfsd_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- return mount_single(fs_type, flags, data, nfsd_fill_super);
+ return mount_ns(fs_type, flags, current->nsproxy->net_ns, nfsd_fill_super);
+}
+
+static void nfsd_umount(struct super_block *sb)
+{
+ struct net *net = sb->s_fs_info;
+
+ kill_litter_super(sb);
+ put_net(net);
}
static struct file_system_type nfsd_fs_type = {
.owner = THIS_MODULE,
.name = "nfsd",
.mount = nfsd_mount,
- .kill_sb = kill_litter_super,
+ .kill_sb = nfsd_umount,
};
#ifdef CONFIG_PROC_FS
@@ -1061,7 +1099,8 @@ static int create_proc_exports_entry(void)
entry = proc_mkdir("fs/nfs", NULL);
if (!entry)
return -ENOMEM;
- entry = proc_create("exports", 0, entry, &exports_operations);
+ entry = proc_create("exports", 0, entry,
+ &exports_proc_operations);
if (!entry)
return -ENOMEM;
return 0;
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index aad6d457b9e8..54c6b3d3cc79 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -26,17 +26,13 @@ static __be32
nfsd_return_attrs(__be32 err, struct nfsd_attrstat *resp)
{
if (err) return err;
- return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
- resp->fh.fh_dentry,
- &resp->stat));
+ return fh_getattr(&resp->fh, &resp->stat);
}
static __be32
nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp)
{
if (err) return err;
- return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
- resp->fh.fh_dentry,
- &resp->stat));
+ return fh_getattr(&resp->fh, &resp->stat);
}
/*
* Get a file's attributes
@@ -150,9 +146,7 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
&resp->count);
if (nfserr) return nfserr;
- return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
- resp->fh.fh_dentry,
- &resp->stat));
+ return fh_getattr(&resp->fh, &resp->stat);
}
/*
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index be7af509930c..262df5ccbf59 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -652,7 +652,6 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
/* Check whether we have this call in the cache. */
switch (nfsd_cache_lookup(rqstp)) {
- case RC_INTR:
case RC_DROPIT:
return 0;
case RC_REPLY:
@@ -703,8 +702,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
int nfsd_pool_stats_open(struct inode *inode, struct file *file)
{
int ret;
- struct net *net = &init_net;
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
mutex_lock(&nfsd_mutex);
if (nn->nfsd_serv == NULL) {
@@ -721,7 +719,7 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file)
int nfsd_pool_stats_release(struct inode *inode, struct file *file)
{
int ret = seq_release(inode, file);
- struct net *net = &init_net;
+ struct net *net = inode->i_sb->s_fs_info;
mutex_lock(&nfsd_mutex);
/* this function really, really should have been called svc_put() */
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 979b42106979..9c769a47ac5a 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -4,6 +4,7 @@
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
+#include "vfs.h"
#include "xdr.h"
#include "auth.h"
@@ -100,12 +101,14 @@ decode_sattr(__be32 *p, struct iattr *iap)
iap->ia_mode = tmp;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
- iap->ia_valid |= ATTR_UID;
- iap->ia_uid = tmp;
+ iap->ia_uid = make_kuid(&init_user_ns, tmp);
+ if (uid_valid(iap->ia_uid))
+ iap->ia_valid |= ATTR_UID;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
- iap->ia_valid |= ATTR_GID;
- iap->ia_gid = tmp;
+ iap->ia_gid = make_kgid(&init_user_ns, tmp);
+ if (gid_valid(iap->ia_gid))
+ iap->ia_valid |= ATTR_GID;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_SIZE;
@@ -151,8 +154,8 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
*p++ = htonl(nfs_ftypes[type >> 12]);
*p++ = htonl((u32) stat->mode);
*p++ = htonl((u32) stat->nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
- *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
+ *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
+ *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
*p++ = htonl(NFS_MAXPATHLEN);
@@ -194,11 +197,9 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
}
/* Helper function for NFSv2 ACL code */
-__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
+__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat)
{
- struct kstat stat;
- vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &stat);
- return encode_fattr(rqstp, p, fhp, &stat);
+ return encode_fattr(rqstp, p, fhp, stat);
}
/*
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index d1c229feed52..1a8c7391f7ae 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -152,8 +152,8 @@ struct nfsd4_channel_attrs {
struct nfsd4_cb_sec {
u32 flavor; /* (u32)(-1) used to mean "no valid flavor" */
- u32 uid;
- u32 gid;
+ kuid_t uid;
+ kgid_t gid;
};
struct nfsd4_create_session {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d586117fa94a..2a7eb536de0b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -401,8 +401,8 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
/* Revoke setuid/setgid on chown */
if (!S_ISDIR(inode->i_mode) &&
- (((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid) ||
- ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid))) {
+ (((iap->ia_valid & ATTR_UID) && !uid_eq(iap->ia_uid, inode->i_uid)) ||
+ ((iap->ia_valid & ATTR_GID) && !gid_eq(iap->ia_gid, inode->i_gid)))) {
iap->ia_valid |= ATTR_KILL_PRIV;
if (iap->ia_valid & ATTR_MODE) {
/* we're setting mode too, just clear the s*id bits */
@@ -979,7 +979,7 @@ static void kill_suid(struct dentry *dentry)
*/
static int wait_for_concurrent_writes(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
static ino_t last_ino;
static dev_t last_dev;
int err = 0;
@@ -1070,7 +1070,7 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (err)
return err;
- inode = file->f_path.dentry->d_inode;
+ inode = file_inode(file);
/* Get readahead parameters */
ra = nfsd_get_raparms(inode->i_sb->s_dev, inode->i_ino);
@@ -1205,7 +1205,7 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp,
* send along the gid on create when it tries to implement
* setgid directories via NFS:
*/
- if (current_fsuid() != 0)
+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
if (iap->ia_valid)
return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
@@ -1957,7 +1957,7 @@ static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
offset = *offsetp;
while (1) {
- struct inode *dir_inode = file->f_path.dentry->d_inode;
+ struct inode *dir_inode = file_inode(file);
unsigned int reclen;
cdp->err = nfserr_eof; /* will be cleared on successful read */
@@ -2150,7 +2150,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
* with NFSv3.
*/
if ((acc & NFSD_MAY_OWNER_OVERRIDE) &&
- inode->i_uid == current_fsuid())
+ uid_eq(inode->i_uid, current_fsuid()))
return 0;
/* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 359594c393d2..5b5894159f22 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -6,6 +6,7 @@
#define LINUX_NFSD_VFS_H
#include "nfsfh.h"
+#include "nfsd.h"
/*
* Flags for nfsd_permission
@@ -125,4 +126,11 @@ static inline void fh_drop_write(struct svc_fh *fh)
}
}
+static inline __be32 fh_getattr(struct svc_fh *fh, struct kstat *stat)
+{
+ struct path p = {.mnt = fh->fh_export->ex_path.mnt,
+ .dentry = fh->fh_dentry};
+ return nfserrno(vfs_getattr(&p, stat));
+}
+
#endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h
index 53b1863dd8f6..4f0481d63804 100644
--- a/fs/nfsd/xdr.h
+++ b/fs/nfsd/xdr.h
@@ -167,7 +167,7 @@ int nfssvc_encode_entry(void *, const char *name,
int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
/* Helper functions for NFSv2 ACL code */
-__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp);
+__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat);
__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp);
#endif /* LINUX_NFSD_H */
diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 7df980eb0562..b6d5542a4ac8 100644
--- a/fs/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -136,6 +136,7 @@ struct nfsd3_accessres {
__be32 status;
struct svc_fh fh;
__u32 access;
+ struct kstat stat;
};
struct nfsd3_readlinkres {
@@ -225,6 +226,7 @@ struct nfsd3_getaclres {
int mask;
struct posix_acl *acl_access;
struct posix_acl *acl_default;
+ struct kstat stat;
};
/* dummy type for release */
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 0889bfb43dc9..546f8983ecf1 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -563,7 +563,7 @@ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
- struct dentry *dentry, __be32 *buffer, int *countp,
+ struct dentry *dentry, __be32 **buffer, int countp,
u32 *bmval, struct svc_rqst *, int ignore_crossmnt);
extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
struct nfsd4_compound_state *,
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index df1a7fb238d1..f30b017740a7 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -259,7 +259,7 @@ static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
loff_t pos = filp->f_pos;
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index bec4af6eab13..08fdb77852ac 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -67,7 +67,7 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct nilfs_transaction_info ti;
int ret = 0;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index f3859354e41a..b44bdb291b84 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -796,7 +796,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
void __user *argp = (void __user *)arg;
switch (cmd) {
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 1d0c0b84c5a3..9de78f08989e 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -517,11 +517,11 @@ static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) {
*lenp = NILFS_FID_SIZE_CONNECTABLE;
- return 255;
+ return FILEID_INVALID;
}
if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) {
*lenp = NILFS_FID_SIZE_NON_CONNECTABLE;
- return 255;
+ return FILEID_INVALID;
}
fid->cno = root->cno;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 08b886f119ce..2bfe6dc413a0 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -174,7 +174,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
struct dnotify_struct **prev;
struct inode *inode;
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
if (!S_ISDIR(inode->i_mode))
return;
@@ -296,7 +296,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
}
/* dnotify only works on directories */
- inode = filp->f_path.dentry->d_inode;
+ inode = file_inode(filp);
if (!S_ISDIR(inode->i_mode)) {
error = -ENOTDIR;
goto out_err;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9ff4a5ee6e20..5d8444268a16 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -466,7 +466,7 @@ static int fanotify_find_path(int dfd, const char __user *filename,
ret = -ENOTDIR;
if ((flags & FAN_MARK_ONLYDIR) &&
- !(S_ISDIR(f.file->f_path.dentry->d_inode->i_mode))) {
+ !(S_ISDIR(file_inode(f.file)->i_mode))) {
fdput(f);
goto out;
}
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 6baadb5a8430..4bb21d67d9b1 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -52,7 +52,6 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
void __fsnotify_update_child_dentry_flags(struct inode *inode)
{
struct dentry *alias;
- struct hlist_node *p;
int watched;
if (!S_ISDIR(inode->i_mode))
@@ -64,7 +63,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
spin_lock(&inode->i_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
- hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
struct dentry *child;
/* run all of the children of the original inode and fix their
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index f31e90fc050d..74825be65b7b 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -36,12 +36,11 @@
static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{
struct fsnotify_mark *mark;
- struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
+ hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
new_mask |= mark->mask;
inode->i_fsnotify_mask = new_mask;
}
@@ -87,11 +86,11 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
void fsnotify_clear_marks_by_inode(struct inode *inode)
{
struct fsnotify_mark *mark, *lmark;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
LIST_HEAD(free_list);
spin_lock(&inode->i_lock);
- hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
+ hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
list_add(&mark->i.free_i_list, &free_list);
hlist_del_init_rcu(&mark->i.i_list);
fsnotify_get_mark(mark);
@@ -129,11 +128,10 @@ static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
struct inode *inode)
{
struct fsnotify_mark *mark;
- struct hlist_node *pos;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
+ hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
if (mark->group == group) {
fsnotify_get_mark(mark);
return mark;
@@ -194,8 +192,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
int allow_dups)
{
- struct fsnotify_mark *lmark;
- struct hlist_node *node, *last = NULL;
+ struct fsnotify_mark *lmark, *last = NULL;
int ret = 0;
mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
@@ -214,8 +211,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
}
/* should mark be in the middle of the current list? */
- hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) {
- last = node;
+ hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
+ last = lmark;
if ((lmark->group == group) && !allow_dups) {
ret = -EEXIST;
@@ -235,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
BUG_ON(last == NULL);
/* mark should be the last entry. last is the current last entry */
- hlist_add_after_rcu(last, &mark->i.i_list);
+ hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
out:
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 871569c7d609..4216308b81b4 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -197,7 +197,6 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
{
/* ideally the idr is empty and we won't hit the BUG in the callback */
idr_for_each(&group->inotify_data.idr, idr_callback, group);
- idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
atomic_dec(&group->inotify_data.user->inotify_devs);
free_uid(group->inotify_data.user);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 07f7a92fe88e..e0f7c1241a6a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -364,22 +364,20 @@ static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
{
int ret;
- do {
- if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
- return -ENOMEM;
+ idr_preload(GFP_KERNEL);
+ spin_lock(idr_lock);
- spin_lock(idr_lock);
- ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
- &i_mark->wd);
+ ret = idr_alloc(idr, i_mark, *last_wd + 1, 0, GFP_NOWAIT);
+ if (ret >= 0) {
/* we added the mark to the idr, take a reference */
- if (!ret) {
- *last_wd = i_mark->wd;
- fsnotify_get_mark(&i_mark->fsn_mark);
- }
- spin_unlock(idr_lock);
- } while (ret == -EAGAIN);
+ i_mark->wd = ret;
+ *last_wd = i_mark->wd;
+ fsnotify_get_mark(&i_mark->fsn_mark);
+ }
- return ret;
+ spin_unlock(idr_lock);
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
}
static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 4df58b8ea64a..68ca5a8704b5 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -33,12 +33,12 @@
void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
{
struct fsnotify_mark *mark, *lmark;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
struct mount *m = real_mount(mnt);
LIST_HEAD(free_list);
spin_lock(&mnt->mnt_root->d_lock);
- hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) {
+ hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
list_add(&mark->m.free_m_list, &free_list);
hlist_del_init_rcu(&mark->m.m_list);
fsnotify_get_mark(mark);
@@ -71,12 +71,11 @@ static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
{
struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark;
- struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&mnt->mnt_root->d_lock);
- hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list)
+ hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
new_mask |= mark->mask;
m->mnt_fsnotify_mask = new_mask;
}
@@ -114,11 +113,10 @@ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_
{
struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark;
- struct hlist_node *pos;
assert_spin_locked(&mnt->mnt_root->d_lock);
- hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) {
+ hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
if (mark->group == group) {
fsnotify_get_mark(mark);
return mark;
@@ -153,8 +151,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
int allow_dups)
{
struct mount *m = real_mount(mnt);
- struct fsnotify_mark *lmark;
- struct hlist_node *node, *last = NULL;
+ struct fsnotify_mark *lmark, *last = NULL;
int ret = 0;
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
@@ -173,8 +170,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
}
/* should mark be in the middle of the current list? */
- hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) {
- last = node;
+ hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
+ last = lmark;
if ((lmark->group == group) && !allow_dups) {
ret = -EEXIST;
@@ -194,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
BUG_ON(last == NULL);
/* mark should be the last entry. last is the current last entry */
- hlist_add_after_rcu(last, &mark->m.m_list);
+ hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
out:
fsnotify_recalc_vfsmount_mask_locked(mnt);
spin_unlock(&mnt->mnt_root->d_lock);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 99e36107ff60..aa411c3f20e9 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1101,7 +1101,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
s64 ia_pos, ia_start, prev_ia_pos, bmp_pos;
loff_t fpos, i_size;
- struct inode *bmp_vi, *vdir = filp->f_path.dentry->d_inode;
+ struct inode *bmp_vi, *vdir = file_inode(filp);
struct super_block *sb = vdir->i_sb;
ntfs_inode *ndir = NTFS_I(vdir);
ntfs_volume *vol = NTFS_SB(sb);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 260b16281fc3..8a404576fb26 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -65,7 +65,20 @@ static struct posix_acl *ocfs2_acl_from_xattr(const void *value, size_t size)
acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag);
acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
- acl->a_entries[n].e_id = le32_to_cpu(entry->e_id);
+ switch(acl->a_entries[n].e_tag) {
+ case ACL_USER:
+ acl->a_entries[n].e_uid =
+ make_kuid(&init_user_ns,
+ le32_to_cpu(entry->e_id));
+ break;
+ case ACL_GROUP:
+ acl->a_entries[n].e_gid =
+ make_kgid(&init_user_ns,
+ le32_to_cpu(entry->e_id));
+ break;
+ default:
+ break;
+ }
value += sizeof(struct posix_acl_entry);
}
@@ -91,7 +104,21 @@ static void *ocfs2_acl_to_xattr(const struct posix_acl *acl, size_t *size)
for (n = 0; n < acl->a_count; n++, entry++) {
entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
- entry->e_id = cpu_to_le32(acl->a_entries[n].e_id);
+ switch(acl->a_entries[n].e_tag) {
+ case ACL_USER:
+ entry->e_id = cpu_to_le32(
+ from_kuid(&init_user_ns,
+ acl->a_entries[n].e_uid));
+ break;
+ case ACL_GROUP:
+ entry->e_id = cpu_to_le32(
+ from_kgid(&init_user_ns,
+ acl->a_entries[n].e_gid));
+ break;
+ default:
+ entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
+ break;
+ }
}
return ocfs2_acl;
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 9796330d8f04..20dfec72e903 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -569,7 +569,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
int ret,
bool is_async)
{
- struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(iocb->ki_filp);
int level;
wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
@@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
level = ocfs2_iocb_rw_locked_level(iocb);
ocfs2_rw_unlock(inode, level);
+ inode_dio_done(inode);
if (is_async)
aio_complete(iocb, ret, 0);
- inode_dio_done(inode);
}
/*
@@ -626,7 +626,7 @@ static ssize_t ocfs2_direct_IO(int rw,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+ struct inode *inode = file_inode(file)->i_mapping->host;
/*
* Fallback to buffered I/O if we see an inode without
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 0d2bf566e39a..aa88bd8bcedc 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -304,28 +304,22 @@ static u8 o2net_num_from_nn(struct o2net_node *nn)
static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
{
- int ret = 0;
-
- do {
- if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
- ret = -EAGAIN;
- break;
- }
- spin_lock(&nn->nn_lock);
- ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
- if (ret == 0)
- list_add_tail(&nsw->ns_node_item,
- &nn->nn_status_list);
- spin_unlock(&nn->nn_lock);
- } while (ret == -EAGAIN);
+ int ret;
- if (ret == 0) {
- init_waitqueue_head(&nsw->ns_wq);
- nsw->ns_sys_status = O2NET_ERR_NONE;
- nsw->ns_status = 0;
+ spin_lock(&nn->nn_lock);
+ ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
+ if (ret >= 0) {
+ nsw->ns_id = ret;
+ list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
}
+ spin_unlock(&nn->nn_lock);
+ if (ret < 0)
+ return ret;
- return ret;
+ init_waitqueue_head(&nsw->ns_wq);
+ nsw->ns_sys_status = O2NET_ERR_NONE;
+ nsw->ns_status = 0;
+ return 0;
}
static void o2net_complete_nsw_locked(struct o2net_node *nn,
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 8db4b58b2e4b..ef999729e274 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -169,11 +169,10 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
u64 parent_blkno,
int skip_unhashed)
{
- struct hlist_node *p;
struct dentry *dentry;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
trace_ocfs2_find_local_alias(dentry->d_name.len,
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 8fe4e2892ab9..f1e1aed8f638 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -67,7 +67,6 @@
#define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static unsigned char ocfs2_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
@@ -2015,12 +2014,12 @@ int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
int error = 0;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int lock_level = 0;
trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
- error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+ error = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
if (lock_level && error >= 0) {
/* We release EX lock which used to update atime
* and get PR lock again to reduce contention
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 005261c333b0..33ecbe0e6734 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2020,7 +2020,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
int ignore_higher, u8 request_from, u32 flags)
{
struct dlm_work_item *item;
- item = kzalloc(sizeof(*item), GFP_NOFS);
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
if (!item)
return -ENOMEM;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 01ebfd0bdad7..eeac97bb3bfa 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2083,7 +2083,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
u8 dead_node, u8 new_master)
{
int i;
- struct hlist_node *hash_iter;
struct hlist_head *bucket;
struct dlm_lock_resource *res, *next;
@@ -2114,7 +2113,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
* if necessary */
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
- hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
+ hlist_for_each_entry(res, bucket, hash_node) {
if (!(res->state & DLM_LOCK_RES_RECOVERING))
continue;
@@ -2273,7 +2272,6 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
{
- struct hlist_node *iter;
struct dlm_lock_resource *res;
int i;
struct hlist_head *bucket;
@@ -2299,7 +2297,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
*/
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
- hlist_for_each_entry(res, iter, bucket, hash_node) {
+ hlist_for_each_entry(res, bucket, hash_node) {
/* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */
if (dlm_is_recovery_lock(res->lockname.name,
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 16b712d260d4..4c5fc8d77dc2 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -224,7 +224,7 @@ static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
{
int event = 0;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct dlmfs_inode_private *ip = DLMFS_I(inode);
poll_wait(file, &ip->ip_lockres.l_event, wait);
@@ -245,7 +245,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
int bytes_left;
ssize_t readlen, got;
char *lvb_buf;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
inode->i_ino, count, *ppos);
@@ -293,7 +293,7 @@ static ssize_t dlmfs_file_write(struct file *filp,
int bytes_left;
ssize_t writelen;
char *lvb_buf;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
inode->i_ino, count, *ppos);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 88577eb5d712..12ae194ac943 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2045,8 +2045,8 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
lvb->lvb_version = OCFS2_LVB_VERSION;
lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
- lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
- lvb->lvb_igid = cpu_to_be32(inode->i_gid);
+ lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
+ lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
lvb->lvb_imode = cpu_to_be16(inode->i_mode);
lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
lvb->lvb_iatime_packed =
@@ -2095,8 +2095,8 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
else
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
- inode->i_gid = be32_to_cpu(lvb->lvb_igid);
+ i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
+ i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
inode->i_mode = be16_to_cpu(lvb->lvb_imode);
set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
ocfs2_unpack_timespec(&inode->i_atime,
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 322216a5f0dd..29651167190d 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -195,11 +195,11 @@ static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
if (parent && (len < 6)) {
*max_len = 6;
- type = 255;
+ type = FILEID_INVALID;
goto bail;
} else if (len < 3) {
*max_len = 3;
- type = 255;
+ type = FILEID_INVALID;
goto bail;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 37d313ede159..6474cb44004d 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1116,7 +1116,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
(unsigned long long)OCFS2_I(inode)->ip_blkno,
dentry->d_name.len, dentry->d_name.name,
attr->ia_valid, attr->ia_mode,
- attr->ia_uid, attr->ia_gid);
+ from_kuid(&init_user_ns, attr->ia_uid),
+ from_kgid(&init_user_ns, attr->ia_gid));
/* ensuring we don't even attempt to truncate a symlink */
if (S_ISLNK(inode->i_mode))
@@ -1174,14 +1175,14 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
}
- if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+ if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
+ (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
/*
* Gather pointers to quota structures so that allocation /
* freeing of quota structures happens here and not inside
* dquot_transfer() where we have problems with lock ordering
*/
- if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
+ if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
&& OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
@@ -1190,7 +1191,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
goto bail_unlock;
}
}
- if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
+ if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
&& OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
@@ -1949,7 +1950,7 @@ out:
int ocfs2_change_file_space(struct file *file, unsigned int cmd,
struct ocfs2_space_resv *sr)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int ret;
@@ -1977,7 +1978,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_space_resv sr;
int change_size = 1;
@@ -2232,7 +2233,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
loff_t old_size, *ppos = &iocb->ki_pos;
u32 old_clusters;
struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
@@ -2516,7 +2517,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
unsigned int flags)
{
int ret = 0, lock_level = 0;
- struct inode *inode = in->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(in);
trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2526,7 +2527,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
/*
* See the comment in ocfs2_file_aio_read()
*/
- ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
+ ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
if (ret < 0) {
mlog_errno(ret);
goto bail;
@@ -2546,7 +2547,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
{
int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
struct file *filp = iocb->ki_filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2589,7 +2590,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
* like i_size. This allows the checks down below
* generic_file_aio_read() a chance of actually working.
*/
- ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+ ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
if (ret < 0) {
mlog_errno(ret);
goto bail;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index d89e08a81eda..f87f9bd1edff 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -269,8 +269,8 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
inode->i_generation = le32_to_cpu(fe->i_generation);
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
inode->i_mode = le16_to_cpu(fe->i_mode);
- inode->i_uid = le32_to_cpu(fe->i_uid);
- inode->i_gid = le32_to_cpu(fe->i_gid);
+ i_uid_write(inode, le32_to_cpu(fe->i_uid));
+ i_gid_write(inode, le32_to_cpu(fe->i_gid));
/* Fast symlinks will have i_size but no allocated clusters. */
if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
@@ -1259,8 +1259,8 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_size = cpu_to_le64(i_size_read(inode));
ocfs2_set_links_count(fe, inode->i_nlink);
- fe->i_uid = cpu_to_le32(inode->i_uid);
- fe->i_gid = cpu_to_le32(inode->i_gid);
+ fe->i_uid = cpu_to_le32(i_uid_read(inode));
+ fe->i_gid = cpu_to_le32(i_gid_read(inode));
fe->i_mode = cpu_to_le16(inode->i_mode);
fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
@@ -1290,8 +1290,8 @@ void ocfs2_refresh_inode(struct inode *inode,
ocfs2_set_inode_flags(inode);
i_size_write(inode, le64_to_cpu(fe->i_size));
set_nlink(inode, ocfs2_read_links_count(fe));
- inode->i_uid = le32_to_cpu(fe->i_uid);
- inode->i_gid = le32_to_cpu(fe->i_gid);
+ i_uid_write(inode, le32_to_cpu(fe->i_uid));
+ i_gid_write(inode, le32_to_cpu(fe->i_gid));
inode->i_mode = le16_to_cpu(fe->i_mode);
if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
inode->i_blocks = 0;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index f20edcbfe700..752f0b26221d 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -881,7 +881,7 @@ bail:
long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned int flags;
int new_clusters;
int status;
@@ -994,7 +994,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
bool preserve;
struct reflink_arguments args;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ocfs2_info info;
void __user *argp = (void __user *)arg;
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 47a87dda54ce..10d66c75cecb 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -62,7 +62,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
struct page *page)
{
int ret = VM_FAULT_NOPAGE;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
unsigned int len = PAGE_CACHE_SIZE;
@@ -131,7 +131,7 @@ out:
static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct buffer_head *di_bh = NULL;
sigset_t oldset;
int ret;
@@ -180,13 +180,13 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret = 0, lock_level = 0;
- ret = ocfs2_inode_lock_atime(file->f_dentry->d_inode,
- file->f_vfsmnt, &lock_level);
+ ret = ocfs2_inode_lock_atime(file_inode(file),
+ file->f_path.mnt, &lock_level);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
- ocfs2_inode_unlock(file->f_dentry->d_inode, lock_level);
+ ocfs2_inode_unlock(file_inode(file), lock_level);
out:
vma->vm_ops = &ocfs2_file_vm_ops;
return 0;
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 6083432f667e..9f8dcadd9a50 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -1055,7 +1055,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
{
int status;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ocfs2_move_extents range;
struct ocfs2_move_extents_context *context = NULL;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f1fd0741162b..04ee1b57c243 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -512,8 +512,8 @@ static int __ocfs2_mknod_locked(struct inode *dir,
fe->i_suballoc_loc = cpu_to_le64(suballoc_loc);
fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
- fe->i_uid = cpu_to_le32(inode->i_uid);
- fe->i_gid = cpu_to_le32(inode->i_gid);
+ fe->i_uid = cpu_to_le32(i_uid_read(inode));
+ fe->i_gid = cpu_to_le32(i_gid_read(inode));
fe->i_mode = cpu_to_le16(inode->i_mode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 30a055049e16..998b17eda09d 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2927,7 +2927,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
u32 new_cluster, u32 new_len)
{
int ret = 0, partial;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
@@ -3020,7 +3020,7 @@ int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
u32 new_cluster, u32 new_len)
{
int ret = 0;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -4407,7 +4407,7 @@ static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
* rights to do so.
*/
if (preserve) {
- if ((current_fsuid() != inode->i_uid) && !capable(CAP_CHOWN))
+ if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN))
return -EPERM;
if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
return -EPERM;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index f169da4624fd..b7e74b580c0f 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
* cluster groups will be staying in cache for the duration of
* this operation.
*/
- ac->ac_allow_chain_relink = 0;
+ ac->ac_disable_chain_relink = 1;
/* Claim the first region */
status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
@@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
* Do this *after* figuring out how many bits we're taking out
* of our target group.
*/
- if (ac->ac_allow_chain_relink &&
+ if (!ac->ac_disable_chain_relink &&
(prev_group_bh) &&
(ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
status = ocfs2_relink_block_group(handle, alloc_inode,
@@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
victim = ocfs2_find_victim_chain(cl);
ac->ac_chain = victim;
- ac->ac_allow_chain_relink = 1;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
@@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
* searching each chain in order. Don't allow chain relinking
* because we only calculate enough journal credits for one
* relink per alloc. */
- ac->ac_allow_chain_relink = 0;
+ ac->ac_disable_chain_relink = 1;
for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
if (i == victim)
continue;
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index b8afabfeede4..a36d0aa50911 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
/* these are used by the chain search */
u16 ac_chain;
- int ac_allow_chain_relink;
+ int ac_disable_chain_relink;
group_search_t *ac_group_search;
u64 ac_last_group;
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index f1fbb4b552ad..66edce7ecfd7 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -57,7 +57,7 @@
static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
{
struct inode *inode = page->mapping->host;
- struct buffer_head *bh;
+ struct buffer_head *bh = NULL;
int status = ocfs2_read_inode_block(inode, &bh);
struct ocfs2_dinode *fe;
const char *link;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 0ba9ea1e7961..2e3ea308c144 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
struct buffer_head *dir_bh = NULL;
ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
- if (!ret) {
+ if (ret) {
mlog_errno(ret);
goto leave;
}
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index fb5b3ff79dc6..acbaebcad3a8 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -330,7 +330,7 @@ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir,
u64 fsblock, int hindex)
{
- struct inode *dir = filp->f_dentry->d_inode;
+ struct inode *dir = file_inode(filp);
struct buffer_head *bh;
struct omfs_inode *oi;
u64 self;
@@ -405,7 +405,7 @@ out:
static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *dir = filp->f_dentry->d_inode;
+ struct inode *dir = file_inode(filp);
struct buffer_head *bh;
loff_t offset, res;
unsigned int hchain, hindex;
diff --git a/fs/open.c b/fs/open.c
index 9b33c0cbfacf..e3441f58d2e1 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -30,6 +30,7 @@
#include <linux/fs_struct.h>
#include <linux/ima.h>
#include <linux/dnotify.h>
+#include <linux/compat.h>
#include "internal.h"
@@ -140,6 +141,13 @@ SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
return do_sys_truncate(path, length);
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length)
+{
+ return do_sys_truncate(path, length);
+}
+#endif
+
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
struct inode *inode;
@@ -195,6 +203,13 @@ SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
return ret;
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
+{
+ return do_sys_ftruncate(fd, length, 1);
+}
+#endif
+
/* LFS versions of truncate are only needed on 32 bit machines */
#if BITS_PER_LONG == 32
SYSCALL_DEFINE(truncate64)(const char __user * path, loff_t length)
@@ -228,7 +243,7 @@ SYSCALL_ALIAS(sys_ftruncate64, SyS_ftruncate64);
int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
long ret;
if (offset < 0 || len <= 0)
@@ -426,7 +441,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
if (!f.file)
goto out;
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
error = -ENOTDIR;
if (!S_ISDIR(inode->i_mode))
@@ -689,7 +704,7 @@ static int do_dentry_open(struct file *f,
f->f_mode = FMODE_PATH;
path_get(&f->f_path);
- inode = f->f_path.dentry->d_inode;
+ inode = file_inode(f);
if (f->f_mode & FMODE_WRITE) {
error = __get_file_write_access(inode, f->f_path.mnt);
if (error)
@@ -699,7 +714,6 @@ static int do_dentry_open(struct file *f,
}
f->f_mapping = inode->i_mapping;
- f->f_pos = 0;
file_sb_list_add(f, inode->i_sb);
if (unlikely(f->f_mode & FMODE_PATH)) {
@@ -810,23 +824,22 @@ struct file *dentry_open(const struct path *path, int flags,
/* We must always pass in a valid mount pointer. */
BUG_ON(!path->mnt);
- error = -ENFILE;
f = get_empty_filp();
- if (f == NULL)
- return ERR_PTR(error);
-
- f->f_flags = flags;
- f->f_path = *path;
- error = do_dentry_open(f, NULL, cred);
- if (!error) {
- error = open_check_o_direct(f);
- if (error) {
- fput(f);
+ if (!IS_ERR(f)) {
+ f->f_flags = flags;
+ f->f_path = *path;
+ error = do_dentry_open(f, NULL, cred);
+ if (!error) {
+ /* from now on we need fput() to dispose of f */
+ error = open_check_o_direct(f);
+ if (error) {
+ fput(f);
+ f = ERR_PTR(error);
+ }
+ } else {
+ put_filp(f);
f = ERR_PTR(error);
}
- } else {
- put_filp(f);
- f = ERR_PTR(error);
}
return f;
}
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 2ad080faca34..ae47fa7efb9d 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -262,7 +262,7 @@ found:
static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct op_inode_info *oi = OP_I(inode);
struct device_node *dp = oi->u.node;
struct device_node *child;
diff --git a/fs/pipe.c b/fs/pipe.c
index bd3479db4b62..64a494cef0a0 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -361,7 +361,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct pipe_inode_info *pipe;
int do_wakeup;
ssize_t ret;
@@ -486,7 +486,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
unsigned long nr_segs, loff_t ppos)
{
struct file *filp = iocb->ki_filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct pipe_inode_info *pipe;
ssize_t ret;
int do_wakeup;
@@ -677,7 +677,7 @@ bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct pipe_inode_info *pipe;
int count, buf, nrbufs;
@@ -705,7 +705,7 @@ static unsigned int
pipe_poll(struct file *filp, poll_table *wait)
{
unsigned int mask;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct pipe_inode_info *pipe = inode->i_pipe;
int nrbufs;
@@ -758,7 +758,7 @@ pipe_release(struct inode *inode, int decr, int decw)
static int
pipe_read_fasync(int fd, struct file *filp, int on)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int retval;
mutex_lock(&inode->i_mutex);
@@ -772,7 +772,7 @@ pipe_read_fasync(int fd, struct file *filp, int on)
static int
pipe_write_fasync(int fd, struct file *filp, int on)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int retval;
mutex_lock(&inode->i_mutex);
@@ -786,7 +786,7 @@ pipe_write_fasync(int fd, struct file *filp, int on)
static int
pipe_rdwr_fasync(int fd, struct file *filp, int on)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct pipe_inode_info *pipe = inode->i_pipe;
int retval;
@@ -1037,13 +1037,13 @@ int create_pipe_files(struct file **res, int flags)
err = -ENFILE;
f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
- if (!f)
+ if (IS_ERR(f))
goto err_dentry;
f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
res[0] = alloc_file(&path, FMODE_READ, &read_pipefifo_fops);
- if (!res[0])
+ if (IS_ERR(res[0]))
goto err_file;
path_get(&path);
@@ -1226,7 +1226,7 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
*/
struct pipe_inode_info *get_pipe_info(struct file *file)
{
- struct inode *i = file->f_path.dentry->d_inode;
+ struct inode *i = file_inode(file);
return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9b43ff77a51e..69078c7cef1f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -73,6 +73,7 @@
#include <linux/security.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
+#include <linux/printk.h>
#include <linux/cgroup.h>
#include <linux/cpuset.h>
#include <linux/audit.h>
@@ -383,7 +384,7 @@ static int lstats_open(struct inode *inode, struct file *file)
static ssize_t lstats_write(struct file *file, const char __user *buf,
size_t count, loff_t *offs)
{
- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ struct task_struct *task = get_proc_task(file_inode(file));
if (!task)
return -ESRCH;
@@ -602,7 +603,7 @@ static const struct inode_operations proc_def_inode_operations = {
static ssize_t proc_info_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(file);
unsigned long page;
ssize_t length;
struct task_struct *task = get_proc_task(inode);
@@ -668,7 +669,7 @@ static const struct file_operations proc_single_file_operations = {
static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
{
- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ struct task_struct *task = get_proc_task(file_inode(file));
struct mm_struct *mm;
if (!task)
@@ -869,7 +870,7 @@ static const struct file_operations proc_environ_operations = {
static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
int oom_adj = OOM_ADJUST_MIN;
size_t len;
@@ -916,7 +917,7 @@ static ssize_t oom_adj_write(struct file *file, const char __user *buf,
goto out;
}
- task = get_proc_task(file->f_path.dentry->d_inode);
+ task = get_proc_task(file_inode(file));
if (!task) {
err = -ESRCH;
goto out;
@@ -952,7 +953,7 @@ static ssize_t oom_adj_write(struct file *file, const char __user *buf,
* /proc/pid/oom_adj is provided for legacy purposes, ask users to use
* /proc/pid/oom_score_adj instead.
*/
- printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
+ pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
current->comm, task_pid_nr(current), task_pid_nr(task),
task_pid_nr(task));
@@ -976,7 +977,7 @@ static const struct file_operations proc_oom_adj_operations = {
static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
short oom_score_adj = OOM_SCORE_ADJ_MIN;
unsigned long flags;
@@ -1019,7 +1020,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
goto out;
}
- task = get_proc_task(file->f_path.dentry->d_inode);
+ task = get_proc_task(file_inode(file));
if (!task) {
err = -ESRCH;
goto out;
@@ -1067,7 +1068,7 @@ static const struct file_operations proc_oom_score_adj_operations = {
static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(file);
struct task_struct *task = get_proc_task(inode);
ssize_t length;
char tmpbuf[TMPBUFLEN];
@@ -1084,7 +1085,7 @@ static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(file);
char *page, *tmp;
ssize_t length;
uid_t loginuid;
@@ -1142,7 +1143,7 @@ static const struct file_operations proc_loginuid_operations = {
static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(file);
struct task_struct *task = get_proc_task(inode);
ssize_t length;
char tmpbuf[TMPBUFLEN];
@@ -1165,7 +1166,7 @@ static const struct file_operations proc_sessionid_operations = {
static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
size_t len;
int make_it_fail;
@@ -1197,7 +1198,7 @@ static ssize_t proc_fault_inject_write(struct file * file,
make_it_fail = simple_strtol(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
- task = get_proc_task(file->f_dentry->d_inode);
+ task = get_proc_task(file_inode(file));
if (!task)
return -ESRCH;
task->make_it_fail = make_it_fail;
@@ -1237,7 +1238,7 @@ static ssize_t
sched_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct task_struct *p;
p = get_proc_task(inode);
@@ -1288,7 +1289,7 @@ static ssize_t
sched_autogroup_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct task_struct *p;
char buffer[PROC_NUMBUF];
int nice;
@@ -1343,7 +1344,7 @@ static const struct file_operations proc_pid_sched_autogroup_operations = {
static ssize_t comm_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct task_struct *p;
char buffer[TASK_COMM_LEN];
@@ -1711,7 +1712,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
if (!capable(CAP_SYS_ADMIN)) {
- status = -EACCES;
+ status = -EPERM;
goto out_notask;
}
@@ -1844,7 +1845,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
struct dentry *result;
struct mm_struct *mm;
- result = ERR_PTR(-EACCES);
+ result = ERR_PTR(-EPERM);
if (!capable(CAP_SYS_ADMIN))
goto out;
@@ -1900,7 +1901,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
ino_t ino;
int ret;
- ret = -EACCES;
+ ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
@@ -2146,7 +2147,7 @@ out_no_task:
static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(file);
char *p = NULL;
ssize_t length;
struct task_struct *task = get_proc_task(inode);
@@ -2167,7 +2168,7 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(file);
char *page;
ssize_t length;
struct task_struct *task = get_proc_task(inode);
@@ -2256,7 +2257,7 @@ static const struct inode_operations proc_attr_dir_inode_operations = {
static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ struct task_struct *task = get_proc_task(file_inode(file));
struct mm_struct *mm;
char buffer[PROC_NUMBUF];
size_t len;
@@ -2308,7 +2309,7 @@ static ssize_t proc_coredump_filter_write(struct file *file,
goto out_no_task;
ret = -ESRCH;
- task = get_proc_task(file->f_dentry->d_inode);
+ task = get_proc_task(file_inode(file));
if (!task)
goto out_no_task;
@@ -2618,6 +2619,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
name.name = buf;
name.len = snprintf(buf, sizeof(buf), "%d", pid);
+ /* no ->d_hash() rejects on procfs */
dentry = d_hash_and_lookup(mnt->mnt_root, &name);
if (dentry) {
shrink_dcache_parent(dentry);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 76ddae83daa5..4b3b3ffb52f1 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/printk.h>
#include <linux/mount.h>
#include <linux/init.h>
#include <linux/idr.h>
@@ -42,7 +43,7 @@ static ssize_t
__proc_file_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode * inode = file_inode(file);
char *page;
ssize_t retval=0;
int eof=0;
@@ -132,11 +133,8 @@ __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
}
if (start == NULL) {
- if (n > PAGE_SIZE) {
- printk(KERN_ERR
- "proc_file_read: Apparent buffer overflow!\n");
+ if (n > PAGE_SIZE) /* Apparent buffer overflow */
n = PAGE_SIZE;
- }
n -= *ppos;
if (n <= 0)
break;
@@ -144,26 +142,19 @@ __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
n = count;
start = page + *ppos;
} else if (start < page) {
- if (n > PAGE_SIZE) {
- printk(KERN_ERR
- "proc_file_read: Apparent buffer overflow!\n");
+ if (n > PAGE_SIZE) /* Apparent buffer overflow */
n = PAGE_SIZE;
- }
if (n > count) {
/*
* Don't reduce n because doing so might
* cut off part of a data block.
*/
- printk(KERN_WARNING
- "proc_file_read: Read count exceeded\n");
+ pr_warn("proc_file_read: count exceeded\n");
}
} else /* start >= page */ {
unsigned long startoff = (unsigned long)(start - page);
- if (n > (PAGE_SIZE - startoff)) {
- printk(KERN_ERR
- "proc_file_read: Apparent buffer overflow!\n");
+ if (n > (PAGE_SIZE - startoff)) /* buffer overflow? */
n = PAGE_SIZE - startoff;
- }
if (n > count)
n = count;
}
@@ -188,7 +179,7 @@ static ssize_t
proc_file_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
spin_lock(&pde->pde_unload_lock);
@@ -209,7 +200,7 @@ static ssize_t
proc_file_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
if (pde->write_proc) {
@@ -412,8 +403,7 @@ static const struct dentry_operations proc_dentry_operations =
struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
struct dentry *dentry)
{
- struct inode *inode = NULL;
- int error = -ENOENT;
+ struct inode *inode;
spin_lock(&proc_subdir_lock);
for (de = de->subdir; de ; de = de->next) {
@@ -422,22 +412,16 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
pde_get(de);
spin_unlock(&proc_subdir_lock);
- error = -ENOMEM;
inode = proc_get_inode(dir->i_sb, de);
- goto out_unlock;
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ d_set_d_op(dentry, &proc_dentry_operations);
+ d_add(dentry, inode);
+ return NULL;
}
}
spin_unlock(&proc_subdir_lock);
-out_unlock:
-
- if (inode) {
- d_set_d_op(dentry, &proc_dentry_operations);
- d_add(dentry, inode);
- return NULL;
- }
- if (de)
- pde_put(de);
- return ERR_PTR(error);
+ return ERR_PTR(-ENOENT);
}
struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
@@ -460,7 +444,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
{
unsigned int ino;
int i;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int ret = 0;
ino = inode->i_ino;
@@ -522,7 +506,7 @@ out:
int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
return proc_readdir_de(PDE(inode), filp, dirent, filldir);
}
@@ -576,7 +560,7 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
for (tmp = dir->subdir; tmp; tmp = tmp->next)
if (strcmp(tmp->name, dp->name) == 0) {
- WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n",
+ WARN(1, "proc_dir_entry '%s/%s' already registered\n",
dir->name, dp->name);
break;
}
@@ -837,9 +821,9 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
if (S_ISDIR(de->mode))
parent->nlink--;
de->nlink = 0;
- WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
- "'%s/%s', leaking at least '%s'\n", __func__,
- de->parent->name, de->name, de->subdir->name);
+ WARN(de->subdir, "%s: removing non-empty directory "
+ "'%s/%s', leaking at least '%s'\n", __func__,
+ de->parent->name, de->name, de->subdir->name);
pde_put(de);
}
EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 439ae6886507..a86aebc9ba7c 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -13,6 +13,7 @@
#include <linux/stat.h>
#include <linux/completion.h>
#include <linux/poll.h>
+#include <linux/printk.h>
#include <linux/file.h>
#include <linux/limits.h>
#include <linux/init.h>
@@ -144,7 +145,7 @@ void pde_users_dec(struct proc_dir_entry *pde)
static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
loff_t rv = -EINVAL;
loff_t (*llseek)(struct file *, loff_t, int);
@@ -179,7 +180,7 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
@@ -201,7 +202,7 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
@@ -223,7 +224,7 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
unsigned int rv = DEFAULT_POLLMASK;
unsigned int (*poll)(struct file *, struct poll_table_struct *);
@@ -245,7 +246,7 @@ static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *p
static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
long (*ioctl)(struct file *, unsigned int, unsigned long);
@@ -268,7 +269,7 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
#ifdef CONFIG_COMPAT
static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
@@ -291,7 +292,7 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ struct proc_dir_entry *pde = PDE(file_inode(file));
int rv = -EIO;
int (*mmap)(struct file *, struct vm_area_struct *);
@@ -445,12 +446,9 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
{
- struct inode * inode;
+ struct inode *inode = iget_locked(sb, de->low_ino);
- inode = iget_locked(sb, de->low_ino);
- if (!inode)
- return NULL;
- if (inode->i_state & I_NEW) {
+ if (inode && (inode->i_state & I_NEW)) {
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
PROC_I(inode)->pde = de;
@@ -482,10 +480,12 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
} else
pde_put(de);
return inode;
-}
+}
int proc_fill_super(struct super_block *s)
{
+ struct inode *root_inode;
+
s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
@@ -494,11 +494,17 @@ int proc_fill_super(struct super_block *s)
s->s_time_gran = 1;
pde_get(&proc_root);
- s->s_root = d_make_root(proc_get_inode(s, &proc_root));
- if (s->s_root)
- return 0;
+ root_inode = proc_get_inode(s, &proc_root);
+ if (!root_inode) {
+ pr_err("proc_fill_super: get root inode failed\n");
+ return -ENOMEM;
+ }
- printk("proc_read_super: get root inode failed\n");
- pde_put(&proc_root);
- return -ENOMEM;
+ s->s_root = d_make_root(root_inode);
+ if (!s->s_root) {
+ pr_err("proc_fill_super: allocate dentry failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 252544c05207..85ff3a4598b3 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/proc_fs.h>
+#include <linux/binfmts.h>
struct ctl_table_header;
struct mempolicy;
@@ -108,7 +109,7 @@ static inline int task_dumpable(struct task_struct *task)
if (mm)
dumpable = get_dumpable(mm);
task_unlock(task);
- if (dumpable == SUID_DUMPABLE_ENABLED)
+ if (dumpable == SUID_DUMP_USER)
return 1;
return 0;
}
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e96d4f18ca3a..eda6f017f272 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -17,6 +17,7 @@
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include <linux/printk.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -619,7 +620,7 @@ static int __init proc_kcore_init(void)
proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
&proc_kcore_operations);
if (!proc_root_kcore) {
- printk(KERN_ERR "couldn't create /proc/kcore\n");
+ pr_err("couldn't create /proc/kcore\n");
return 0; /* Always returns 0. */
}
/* Store text area if it's special */
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index b1822dde55c2..ccfd99bd1c5a 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -45,7 +45,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
file = region->vm_file;
if (file) {
- struct inode *inode = region->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(region->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
}
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index de20ec480fa0..30b590f5bd35 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -8,6 +8,7 @@
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/printk.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/of.h>
@@ -110,8 +111,8 @@ void proc_device_tree_update_prop(struct proc_dir_entry *pde,
if (ent->data == oldprop)
break;
if (ent == NULL) {
- printk(KERN_WARNING "device-tree: property \"%s\" "
- " does not exist\n", oldprop->name);
+ pr_warn("device-tree: property \"%s\" does not exist\n",
+ oldprop->name);
} else {
ent->data = newprop;
ent->size = newprop->length;
@@ -153,8 +154,8 @@ static const char *fixup_name(struct device_node *np, struct proc_dir_entry *de,
realloc:
fixed_name = kmalloc(fixup_len, GFP_KERNEL);
if (fixed_name == NULL) {
- printk(KERN_ERR "device-tree: Out of memory trying to fixup "
- "name \"%s\"\n", name);
+ pr_err("device-tree: Out of memory trying to fixup "
+ "name \"%s\"\n", name);
return name;
}
@@ -175,8 +176,8 @@ retry:
goto retry;
}
- printk(KERN_WARNING "device-tree: Duplicate name in %s, "
- "renamed to \"%s\"\n", np->full_name, fixed_name);
+ pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+ np->full_name, fixed_name);
return fixed_name;
}
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 3131a03d7d37..b4ac6572474f 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -163,7 +163,7 @@ static int proc_tgid_net_readdir(struct file *filp, void *dirent,
struct net *net;
ret = -EINVAL;
- net = get_proc_task_net(filp->f_path.dentry->d_inode);
+ net = get_proc_task_net(file_inode(filp));
if (net != NULL) {
ret = proc_readdir_de(net->proc_net, filp, dirent, filldir);
put_net(net);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 1827d88ad58b..ac05f33a0dde 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -5,6 +5,7 @@
#include <linux/sysctl.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
+#include <linux/printk.h>
#include <linux/security.h>
#include <linux/sched.h>
#include <linux/namei.h>
@@ -57,7 +58,7 @@ static void sysctl_print_dir(struct ctl_dir *dir)
{
if (dir->header.parent)
sysctl_print_dir(dir->header.parent);
- printk(KERN_CONT "%s/", dir->header.ctl_table[0].procname);
+ pr_cont("%s/", dir->header.ctl_table[0].procname);
}
static int namecmp(const char *name1, int len1, const char *name2, int len2)
@@ -134,9 +135,9 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
else if (cmp > 0)
p = &(*p)->rb_right;
else {
- printk(KERN_ERR "sysctl duplicate entry: ");
+ pr_err("sysctl duplicate entry: ");
sysctl_print_dir(head->parent);
- printk(KERN_CONT "/%s\n", entry->procname);
+ pr_cont("/%s\n", entry->procname);
return -EEXIST;
}
}
@@ -478,7 +479,7 @@ out:
static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
size_t count, loff_t *ppos, int write)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ctl_table_header *head = grab_header(inode);
struct ctl_table *table = PROC_I(inode)->sysctl_entry;
ssize_t error;
@@ -542,7 +543,7 @@ static int proc_sys_open(struct inode *inode, struct file *filp)
static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct ctl_table_header *head = grab_header(inode);
struct ctl_table *table = PROC_I(inode)->sysctl_entry;
unsigned int ret = DEFAULT_POLLMASK;
@@ -927,9 +928,9 @@ found:
subdir->header.nreg++;
failed:
if (unlikely(IS_ERR(subdir))) {
- printk(KERN_ERR "sysctl could not get directory: ");
+ pr_err("sysctl could not get directory: ");
sysctl_print_dir(dir);
- printk(KERN_CONT "/%*.*s %ld\n",
+ pr_cont("/%*.*s %ld\n",
namelen, namelen, name, PTR_ERR(subdir));
}
drop_sysctl_table(&dir->header);
@@ -995,8 +996,8 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- printk(KERN_ERR "sysctl table check failed: %s/%s %pV\n",
- path, table->procname, &vaf);
+ pr_err("sysctl table check failed: %s/%s %pV\n",
+ path, table->procname, &vaf);
va_end(args);
return -EINVAL;
@@ -1510,9 +1511,9 @@ static void put_links(struct ctl_table_header *header)
drop_sysctl_table(link_head);
}
else {
- printk(KERN_ERR "sysctl link missing during unregister: ");
+ pr_err("sysctl link missing during unregister: ");
sysctl_print_dir(parent);
- printk(KERN_CONT "/%s\n", name);
+ pr_cont("/%s\n", name);
}
}
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ca5ce7f9f800..3e636d864d56 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -271,7 +271,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
const char *name = NULL;
if (file) {
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -743,7 +743,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
return rv;
if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
return -EINVAL;
- task = get_proc_task(file->f_path.dentry->d_inode);
+ task = get_proc_task(file_inode(file));
if (!task)
return -ESRCH;
mm = get_task_mm(task);
@@ -1015,7 +1015,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ struct task_struct *task = get_proc_task(file_inode(file));
struct mm_struct *mm;
struct pagemapread pm;
int ret = -ESRCH;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 1ccfa537f5f5..56123a6f462e 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -149,7 +149,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
file = vma->vm_file;
if (file) {
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 0d5071d29985..b870f740ab5a 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/highmem.h>
+#include <linux/printk.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/crash_dump.h>
@@ -175,15 +176,15 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
if (!curr_m)
return -EINVAL;
- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
- tsz = buflen;
-
- /* Calculate left bytes in current memory segment. */
- nr_bytes = (curr_m->size - (start - curr_m->paddr));
- if (tsz > nr_bytes)
- tsz = nr_bytes;
while (buflen) {
+ tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK));
+
+ /* Calculate left bytes in current memory segment. */
+ nr_bytes = (curr_m->size - (start - curr_m->paddr));
+ if (tsz > nr_bytes)
+ tsz = nr_bytes;
+
tmp = read_from_oldmem(buffer, tsz, &start, 1);
if (tmp < 0)
return tmp;
@@ -198,12 +199,6 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
struct vmcore, list);
start = curr_m->paddr;
}
- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
- tsz = buflen;
- /* Calculate left bytes in current memory segment. */
- nr_bytes = (curr_m->size - (start - curr_m->paddr));
- if (tsz > nr_bytes)
- tsz = nr_bytes;
}
return acc;
}
@@ -553,8 +548,7 @@ static int __init parse_crash_elf64_headers(void)
ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
ehdr.e_phnum == 0) {
- printk(KERN_WARNING "Warning: Core image elf header is not"
- "sane\n");
+ pr_warn("Warning: Core image elf header is not sane\n");
return -EINVAL;
}
@@ -609,8 +603,7 @@ static int __init parse_crash_elf32_headers(void)
ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
ehdr.e_phnum == 0) {
- printk(KERN_WARNING "Warning: Core image elf header is not"
- "sane\n");
+ pr_warn("Warning: Core image elf header is not sane\n");
return -EINVAL;
}
@@ -653,8 +646,7 @@ static int __init parse_crash_elf_headers(void)
if (rc < 0)
return rc;
if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
- printk(KERN_WARNING "Warning: Core image elf header"
- " not found\n");
+ pr_warn("Warning: Core image elf header not found\n");
return -EINVAL;
}
@@ -673,8 +665,7 @@ static int __init parse_crash_elf_headers(void)
/* Determine vmcore size. */
vmcore_size = get_vmcore_size_elf32(elfcorebuf);
} else {
- printk(KERN_WARNING "Warning: Core image elf header is not"
- " sane\n");
+ pr_warn("Warning: Core image elf header is not sane\n");
return -EINVAL;
}
return 0;
@@ -690,7 +681,7 @@ static int __init vmcore_init(void)
return rc;
rc = parse_crash_elf_headers();
if (rc) {
- printk(KERN_WARNING "Kdump: vmcore not initialized\n");
+ pr_warn("Kdump: vmcore not initialized\n");
return rc;
}
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 7b0329468a5d..28ce014b3cef 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -16,7 +16,7 @@
static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned int offset;
struct buffer_head *bh;
struct qnx4_inode_entry *de;
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index dc597353db3b..8798d065e400 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -117,7 +117,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1);
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index d5378d028589..8d5b438cc188 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -202,7 +202,7 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long pgoff, unsigned long flags)
{
unsigned long maxpages, lpages, nr, loop, ret;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct page **pages = NULL, **ptr, *page;
loff_t isize;
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index eab8c09d3801..c24f1e10b946 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -260,6 +260,7 @@ static struct file_system_type ramfs_fs_type = {
.name = "ramfs",
.mount = ramfs_mount,
.kill_sb = ramfs_kill_sb,
+ .fs_flags = FS_USERNS_MOUNT,
};
static struct file_system_type rootfs_fs_type = {
.name = "rootfs",
diff --git a/fs/read_write.c b/fs/read_write.c
index bb34af315280..a698eff457fb 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -15,6 +15,7 @@
#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <linux/splice.h>
+#include <linux/compat.h>
#include "read_write.h"
#include <asm/uaccess.h>
@@ -163,7 +164,7 @@ EXPORT_SYMBOL(no_llseek);
loff_t default_llseek(struct file *file, loff_t offset, int whence)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
loff_t retval;
mutex_lock(&inode->i_mutex);
@@ -247,6 +248,13 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
return retval;
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
+{
+ return sys_lseek(fd, offset, whence);
+}
+#endif
+
#ifdef __ARCH_WANT_SYS_LLSEEK
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, loff_t __user *, result,
@@ -278,7 +286,6 @@ out_putf:
}
#endif
-
/*
* rw_verify_area doesn't like huge counts. We limit
* them to something that fits in "int" so that others
@@ -290,7 +297,7 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
loff_t pos;
int retval = -EINVAL;
- inode = file->f_path.dentry->d_inode;
+ inode = file_inode(file);
if (unlikely((ssize_t) count < 0))
return retval;
pos = *ppos;
@@ -901,8 +908,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
if (!(out.file->f_mode & FMODE_WRITE))
goto fput_out;
retval = -EINVAL;
- in_inode = in.file->f_path.dentry->d_inode;
- out_inode = out.file->f_path.dentry->d_inode;
+ in_inode = file_inode(in.file);
+ out_inode = file_inode(out.file);
retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
if (retval < 0)
goto fput_out;
diff --git a/fs/readdir.c b/fs/readdir.c
index 5e69ef533b77..fee38e04fae4 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -22,7 +22,7 @@
int vfs_readdir(struct file *file, filldir_t filler, void *buf)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int res = -ENOTDIR;
if (!file->f_op || !file->f_op->readdir)
goto out;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 50302d6f8895..6165bd4784f6 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -268,7 +268,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
* new current position before returning. */
)
{
- struct inode *inode = file->f_path.dentry->d_inode; // Inode of the file that we are writing to.
+ struct inode *inode = file_inode(file); // Inode of the file that we are writing to.
/* To simplify coding at this time, we store
locked pages in array for now */
struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 95d7680ead47..ea5061fd4f3e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1603,10 +1603,10 @@ int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
if (parent && (maxlen < 5)) {
*lenp = 5;
- return 255;
+ return FILEID_INVALID;
} else if (maxlen < 3) {
*lenp = 3;
- return 255;
+ return FILEID_INVALID;
}
data[0] = inode->i_ino;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 0c2185042d5f..15cb5fe6b425 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -21,7 +21,7 @@
*/
long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
unsigned int flags;
int err = 0;
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index e60e87035bb3..9cc0740adffa 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -281,7 +281,7 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb)
}
#if defined( REISERFS_USE_OIDMAPF )
if (sb_info->oidmap.use_file && (sb_info->oidmap.mapf != NULL)) {
- loff_t size = sb_info->oidmap.mapf->f_path.dentry->d_inode->i_size;
+ loff_t size = file_inode(sb_info->oidmap.mapf)->i_size;
total_used += size / sizeof(reiserfs_oidinterval_d_t);
}
#endif
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index fd7c5f60b46b..7e8d3a80bdab 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -147,7 +147,7 @@ static const struct address_space_operations romfs_aops = {
*/
static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *i = filp->f_dentry->d_inode;
+ struct inode *i = file_inode(filp);
struct romfs_inode ri;
unsigned long offset, maxoff;
int j, ino, nextfh;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index f2bc3dfd0b88..15c6304bab71 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -308,27 +308,27 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
mutex_lock(&m->lock);
m->version = file->f_version;
switch (whence) {
- case 1:
- offset += file->f_pos;
- case 0:
- if (offset < 0)
- break;
- retval = offset;
- if (offset != m->read_pos) {
- while ((retval=traverse(m, offset)) == -EAGAIN)
- ;
- if (retval) {
- /* with extreme prejudice... */
- file->f_pos = 0;
- m->read_pos = 0;
- m->version = 0;
- m->index = 0;
- m->count = 0;
- } else {
- m->read_pos = offset;
- retval = file->f_pos = offset;
- }
+ case SEEK_CUR:
+ offset += file->f_pos;
+ case SEEK_SET:
+ if (offset < 0)
+ break;
+ retval = offset;
+ if (offset != m->read_pos) {
+ while ((retval = traverse(m, offset)) == -EAGAIN)
+ ;
+ if (retval) {
+ /* with extreme prejudice... */
+ file->f_pos = 0;
+ m->read_pos = 0;
+ m->version = 0;
+ m->index = 0;
+ m->count = 0;
+ } else {
+ m->read_pos = offset;
+ retval = file->f_pos = offset;
}
+ }
}
file->f_version = m->version;
mutex_unlock(&m->lock);
diff --git a/fs/splice.c b/fs/splice.c
index 6909d89d0da5..718bd0056384 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -569,7 +569,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
return res;
}
-static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+ssize_t kernel_write(struct file *file, const char *buf, size_t count,
loff_t pos)
{
mm_segment_t old_fs;
@@ -578,11 +578,12 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
- res = vfs_write(file, (const char __user *)buf, count, &pos);
+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
set_fs(old_fs);
return res;
}
+EXPORT_SYMBOL(kernel_write);
ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
@@ -1170,7 +1171,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* randomly drop data for eg socket -> socket splicing. Use the
* piped splicing for that!
*/
- i_mode = in->f_path.dentry->d_inode->i_mode;
+ i_mode = file_inode(in)->i_mode;
if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
return -EINVAL;
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index b381305c9a47..57dc70ebbb19 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -102,7 +102,7 @@ static int get_dir_index_using_offset(struct super_block *sb,
static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
u64 block = squashfs_i(inode)->start + msblk->directory_table;
int offset = squashfs_i(inode)->offset, length, dir_count, size,
diff --git a/fs/stat.c b/fs/stat.c
index 14f45459c83d..04ce1ac20d20 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -37,17 +37,17 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
EXPORT_SYMBOL(generic_fillattr);
-int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+int vfs_getattr(struct path *path, struct kstat *stat)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = path->dentry->d_inode;
int retval;
- retval = security_inode_getattr(mnt, dentry);
+ retval = security_inode_getattr(path->mnt, path->dentry);
if (retval)
return retval;
if (inode->i_op->getattr)
- return inode->i_op->getattr(mnt, dentry, stat);
+ return inode->i_op->getattr(path->mnt, path->dentry, stat);
generic_fillattr(inode, stat);
return 0;
@@ -61,8 +61,7 @@ int vfs_fstat(unsigned int fd, struct kstat *stat)
int error = -EBADF;
if (f.file) {
- error = vfs_getattr(f.file->f_path.mnt, f.file->f_path.dentry,
- stat);
+ error = vfs_getattr(&f.file->f_path, stat);
fdput(f);
}
return error;
@@ -89,7 +88,7 @@ retry:
if (error)
goto out;
- error = vfs_getattr(path.mnt, path.dentry, stat);
+ error = vfs_getattr(&path, stat);
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
diff --git a/fs/super.c b/fs/super.c
index 12f123712161..7465d4364208 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -447,14 +447,13 @@ struct super_block *sget(struct file_system_type *type,
void *data)
{
struct super_block *s = NULL;
- struct hlist_node *node;
struct super_block *old;
int err;
retry:
spin_lock(&sb_lock);
if (test) {
- hlist_for_each_entry(old, node, &type->fs_supers, s_instances) {
+ hlist_for_each_entry(old, &type->fs_supers, s_instances) {
if (!test(old, data))
continue;
if (!grab_super(old))
@@ -554,10 +553,9 @@ void iterate_supers_type(struct file_system_type *type,
void (*f)(struct super_block *, void *), void *arg)
{
struct super_block *sb, *p = NULL;
- struct hlist_node *node;
spin_lock(&sb_lock);
- hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) {
+ hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
sb->s_count++;
spin_unlock(&sb_lock);
@@ -842,7 +840,7 @@ int get_anon_bdev(dev_t *p)
else if (error)
return -EAGAIN;
- if ((dev & MAX_IDR_MASK) == (1 << MINORBITS)) {
+ if (dev == (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev);
if (unnamed_dev_start > dev)
diff --git a/fs/sync.c b/fs/sync.c
index 14eefeb44636..2c5d6639a66a 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -332,7 +332,7 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
if (!f.file)
goto out;
- i_mode = f.file->f_path.dentry->d_inode->i_mode;
+ i_mode = file_inode(f.file)->i_mode;
ret = -ESPIPE;
if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
!S_ISLNK(i_mode))
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 614b2b544880..15c68f9489ae 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -70,7 +70,7 @@ static ssize_t
read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
{
struct bin_buffer *bb = file->private_data;
- int size = file->f_path.dentry->d_inode->i_size;
+ int size = file_inode(file)->i_size;
loff_t offs = *off;
int count = min_t(size_t, bytes, PAGE_SIZE);
char *temp;
@@ -140,7 +140,7 @@ static ssize_t write(struct file *file, const char __user *userbuf,
size_t bytes, loff_t *off)
{
struct bin_buffer *bb = file->private_data;
- int size = file->f_path.dentry->d_inode->i_size;
+ int size = file_inode(file)->i_size;
loff_t offs = *off;
int count = min_t(size_t, bytes, PAGE_SIZE);
char *temp;
@@ -461,15 +461,14 @@ const struct file_operations bin_fops = {
void unmap_bin_file(struct sysfs_dirent *attr_sd)
{
struct bin_buffer *bb;
- struct hlist_node *tmp;
if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR)
return;
mutex_lock(&sysfs_bin_lock);
- hlist_for_each_entry(bb, tmp, &attr_sd->s_bin_attr.buffers, list) {
- struct inode *inode = bb->file->f_path.dentry->d_inode;
+ hlist_for_each_entry(bb, &attr_sd->s_bin_attr.buffers, list) {
+ struct inode *inode = file_inode(bb->file);
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
}
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index a77c42157620..3799e8dac3eb 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -68,7 +68,7 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
unsigned long pos = filp->f_pos;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 0e606b12a59d..32b644f03690 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -383,10 +383,10 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
}
-#ifdef COMPAT
+#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct itimerspec __user *, utmr,
- struct itimerspec __user *, otmr)
+ const struct compat_itimerspec __user *, utmr,
+ struct compat_itimerspec __user *, otmr)
{
struct itimerspec new, old;
int ret;
@@ -402,12 +402,12 @@ COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
}
COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
- struct itimerspec __user *, otmr)
+ struct compat_itimerspec __user *, otmr)
{
struct itimerspec kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
if (ret)
return ret;
- return put_compat_itimerspec(otmr, &t) ? -EFAULT: 0;
+ return put_compat_itimerspec(otmr, &kotmr) ? -EFAULT: 0;
}
#endif
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 12817ffc7345..7f60e900edff 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2459,7 +2459,7 @@ error_dump:
static inline int chance(unsigned int n, unsigned int out_of)
{
- return !!((random32() % out_of) + 1 <= n);
+ return !!((prandom_u32() % out_of) + 1 <= n);
}
@@ -2477,13 +2477,13 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
if (chance(1, 2)) {
d->pc_delay = 1;
/* Fail withing 1 minute */
- delay = random32() % 60000;
+ delay = prandom_u32() % 60000;
d->pc_timeout = jiffies;
d->pc_timeout += msecs_to_jiffies(delay);
ubifs_warn("failing after %lums", delay);
} else {
d->pc_delay = 2;
- delay = random32() % 10000;
+ delay = prandom_u32() % 10000;
/* Fail within 10000 operations */
d->pc_cnt_max = delay;
ubifs_warn("failing after %lu calls", delay);
@@ -2563,7 +2563,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
unsigned int from, to, ffs = chance(1, 2);
unsigned char *p = (void *)buf;
- from = random32() % (len + 1);
+ from = prandom_u32() % (len + 1);
/* Corruption may only span one max. write unit */
to = min(len, ALIGN(from, c->max_write_size));
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 8a574776a493..de08c92f2e23 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -352,7 +352,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
struct qstr nm;
union ubifs_key key;
struct ubifs_dent_node *dent;
- struct inode *dir = file->f_path.dentry->d_inode;
+ struct inode *dir = file_inode(file);
struct ubifs_info *c = dir->i_sb->s_fs_info;
dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 4f6493c130e0..f12189d2db1d 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1444,7 +1444,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct timespec now = ubifs_current_time(inode);
struct ubifs_budget_req req = { .new_page = 1 };
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 1a7e2d8bdbe9..648b143606cc 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -147,7 +147,7 @@ out_unlock:
long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int flags, err;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
switch (cmd) {
case FS_IOC_GETFLAGS:
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 9daaeef675dd..4b826abb1528 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -2007,28 +2007,28 @@ static int dbg_populate_lsave(struct ubifs_info *c)
if (!dbg_is_chk_gen(c))
return 0;
- if (random32() & 3)
+ if (prandom_u32() & 3)
return 0;
for (i = 0; i < c->lsave_cnt; i++)
c->lsave[i] = c->main_first;
list_for_each_entry(lprops, &c->empty_list, list)
- c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
+ c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
list_for_each_entry(lprops, &c->freeable_list, list)
- c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
+ c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
list_for_each_entry(lprops, &c->frdi_idx_list, list)
- c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
+ c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
for (i = 0; i < heap->cnt; i++)
- c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
+ c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
heap = &c->lpt_heap[LPROPS_DIRTY - 1];
for (i = 0; i < heap->cnt; i++)
- c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
+ c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
heap = &c->lpt_heap[LPROPS_FREE - 1];
for (i = 0; i < heap->cnt; i++)
- c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
+ c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
return 1;
}
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 769701ccb5c9..ba32da3fe08a 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -126,13 +126,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
else if (inum > o->inum)
p = p->rb_right;
else {
- if (o->dnext) {
+ if (o->del) {
spin_unlock(&c->orphan_lock);
dbg_gen("deleted twice ino %lu",
(unsigned long)inum);
return;
}
- if (o->cnext) {
+ if (o->cmt) {
+ o->del = 1;
o->dnext = c->orph_dnext;
c->orph_dnext = o;
spin_unlock(&c->orphan_lock);
@@ -172,7 +173,9 @@ int ubifs_orphan_start_commit(struct ubifs_info *c)
last = &c->orph_cnext;
list_for_each_entry(orphan, &c->orph_new, new_list) {
ubifs_assert(orphan->new);
+ ubifs_assert(!orphan->cmt);
orphan->new = 0;
+ orphan->cmt = 1;
*last = orphan;
last = &orphan->cnext;
}
@@ -299,7 +302,9 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
cnext = c->orph_cnext;
for (i = 0; i < cnt; i++) {
orphan = cnext;
+ ubifs_assert(orphan->cmt);
orph->inos[i] = cpu_to_le64(orphan->inum);
+ orphan->cmt = 0;
cnext = orphan->cnext;
orphan->cnext = NULL;
}
@@ -378,6 +383,7 @@ static int consolidate(struct ubifs_info *c)
list_for_each_entry(orphan, &c->orph_list, list) {
if (orphan->new)
continue;
+ orphan->cmt = 1;
*last = orphan;
last = &orphan->cnext;
cnt += 1;
@@ -442,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
orphan = dnext;
dnext = orphan->dnext;
ubifs_assert(!orphan->new);
+ ubifs_assert(orphan->del);
rb_erase(&orphan->rb, &c->orph_tree);
list_del(&orphan->list);
c->tot_orphans -= 1;
@@ -531,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
rb_link_node(&orphan->rb, parent, p);
rb_insert_color(&orphan->rb, &c->orph_tree);
list_add_tail(&orphan->list, &c->orph_list);
+ orphan->del = 1;
orphan->dnext = c->orph_dnext;
c->orph_dnext = orphan;
dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 523bbad69c0c..52a6559275c4 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -683,7 +683,7 @@ static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
c->ilebs[c->ileb_cnt++] = lnum;
dbg_cmt("LEB %d", lnum);
}
- if (dbg_is_chk_index(c) && !(random32() & 7))
+ if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
return -ENOSPC;
return 0;
}
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index d133c276fe05..b2babce4d70f 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -904,6 +904,8 @@ struct ubifs_budget_req {
* @dnext: next orphan to delete
* @inum: inode number
* @new: %1 => added since the last commit, otherwise %0
+ * @cmt: %1 => commit pending, otherwise %0
+ * @del: %1 => delete pending, otherwise %0
*/
struct ubifs_orphan {
struct rb_node rb;
@@ -912,7 +914,9 @@ struct ubifs_orphan {
struct ubifs_orphan *cnext;
struct ubifs_orphan *dnext;
ino_t inum;
- int new;
+ unsigned new:1;
+ unsigned cmt:1;
+ unsigned del:1;
};
/**
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index eb8bfe2b89a5..b3e93f5e17c3 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -186,7 +186,7 @@ out:
static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct inode *dir = filp->f_path.dentry->d_inode;
+ struct inode *dir = file_inode(filp);
int result;
if (filp->f_pos == 0) {
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 77b5953eaac8..29569dd08168 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -139,7 +139,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
ssize_t retval;
struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err, pos;
size_t count = iocb->ki_left;
struct udf_inode_info *iinfo = UDF_I(inode);
@@ -178,7 +178,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
long old_block, new_block;
int result = -EINVAL;
@@ -204,7 +204,7 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto out;
case UDF_RELOCATE_BLOCKS:
if (!capable(CAP_SYS_ADMIN)) {
- result = -EACCES;
+ result = -EPERM;
goto out;
}
if (get_user(old_block, (long __user *)arg)) {
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index cbae1ed0b7c1..7a12e48ad819 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -67,6 +67,74 @@ static void udf_update_extents(struct inode *,
struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
+static void __udf_clear_extent_cache(struct inode *inode)
+{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+ if (iinfo->cached_extent.lstart != -1) {
+ brelse(iinfo->cached_extent.epos.bh);
+ iinfo->cached_extent.lstart = -1;
+ }
+}
+
+/* Invalidate extent cache */
+static void udf_clear_extent_cache(struct inode *inode)
+{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+ spin_lock(&iinfo->i_extent_cache_lock);
+ __udf_clear_extent_cache(inode);
+ spin_unlock(&iinfo->i_extent_cache_lock);
+}
+
+/* Return contents of extent cache */
+static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
+ loff_t *lbcount, struct extent_position *pos)
+{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ int ret = 0;
+
+ spin_lock(&iinfo->i_extent_cache_lock);
+ if ((iinfo->cached_extent.lstart <= bcount) &&
+ (iinfo->cached_extent.lstart != -1)) {
+ /* Cache hit */
+ *lbcount = iinfo->cached_extent.lstart;
+ memcpy(pos, &iinfo->cached_extent.epos,
+ sizeof(struct extent_position));
+ if (pos->bh)
+ get_bh(pos->bh);
+ ret = 1;
+ }
+ spin_unlock(&iinfo->i_extent_cache_lock);
+ return ret;
+}
+
+/* Add extent to extent cache */
+static void udf_update_extent_cache(struct inode *inode, loff_t estart,
+ struct extent_position *pos, int next_epos)
+{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+ spin_lock(&iinfo->i_extent_cache_lock);
+ /* Invalidate previously cached extent */
+ __udf_clear_extent_cache(inode);
+ if (pos->bh)
+ get_bh(pos->bh);
+ memcpy(&iinfo->cached_extent.epos, pos,
+ sizeof(struct extent_position));
+ iinfo->cached_extent.lstart = estart;
+ if (next_epos)
+ switch (iinfo->i_alloc_type) {
+ case ICBTAG_FLAG_AD_SHORT:
+ iinfo->cached_extent.epos.offset -=
+ sizeof(struct short_ad);
+ break;
+ case ICBTAG_FLAG_AD_LONG:
+ iinfo->cached_extent.epos.offset -=
+ sizeof(struct long_ad);
+ }
+ spin_unlock(&iinfo->i_extent_cache_lock);
+}
void udf_evict_inode(struct inode *inode)
{
@@ -90,6 +158,7 @@ void udf_evict_inode(struct inode *inode)
}
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
+ udf_clear_extent_cache(inode);
if (want_delete) {
udf_free_inode(inode);
}
@@ -105,6 +174,7 @@ static void udf_write_failed(struct address_space *mapping, loff_t to)
truncate_pagecache(inode, to, isize);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
+ udf_clear_extent_cache(inode);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
@@ -372,7 +442,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
iinfo->i_next_alloc_goal++;
}
-
+ udf_clear_extent_cache(inode);
phys = inode_getblk(inode, block, &err, &new);
if (!phys)
goto abort;
@@ -1171,6 +1241,7 @@ set_size:
} else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
+ udf_clear_extent_cache(inode);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
0x00, bsize - newsize -
udf_file_entry_alloc_offset(inode));
@@ -1184,6 +1255,7 @@ set_size:
if (err)
return err;
down_write(&iinfo->i_data_sem);
+ udf_clear_extent_cache(inode);
truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
@@ -2156,11 +2228,12 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
struct udf_inode_info *iinfo;
iinfo = UDF_I(inode);
- pos->offset = 0;
- pos->block = iinfo->i_location;
- pos->bh = NULL;
+ if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
+ pos->offset = 0;
+ pos->block = iinfo->i_location;
+ pos->bh = NULL;
+ }
*elen = 0;
-
do {
etype = udf_next_aext(inode, pos, eloc, elen, 1);
if (etype == -1) {
@@ -2170,7 +2243,8 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
}
lbcount += *elen;
} while (lbcount <= bcount);
-
+ /* update extent cache */
+ udf_update_extent_cache(inode, lbcount - *elen, pos, 1);
*offset = (bcount + *elen - lbcount) >> blocksize_bits;
return etype;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 95fee278ab9d..102c072c6bbf 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -1270,10 +1270,10 @@ static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
if (parent && (len < 5)) {
*lenp = 5;
- return 255;
+ return FILEID_INVALID;
} else if (len < 3) {
*lenp = 3;
- return 255;
+ return FILEID_INVALID;
}
*lenp = 3;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e9be396a558d..bc5b30a819e8 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -134,6 +134,8 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
ei->i_next_alloc_goal = 0;
ei->i_strat4096 = 0;
init_rwsem(&ei->i_data_sem);
+ ei->cached_extent.lstart = -1;
+ spin_lock_init(&ei->i_extent_cache_lock);
return &ei->vfs_inode;
}
@@ -1021,7 +1023,6 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
if (bitmap == NULL)
return NULL;
- bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
bitmap->s_nr_groups = nr_groups;
return bitmap;
}
@@ -1079,8 +1080,6 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!bitmap)
return 1;
map->s_uspace.s_bitmap = bitmap;
- bitmap->s_extLength = le32_to_cpu(
- phd->unallocSpaceBitmap.extLength);
bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
@@ -1115,8 +1114,6 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!bitmap)
return 1;
map->s_fspace.s_bitmap = bitmap;
- bitmap->s_extLength = le32_to_cpu(
- phd->freedSpaceBitmap.extLength);
bitmap->s_extPosition = le32_to_cpu(
phd->freedSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
@@ -1866,6 +1863,8 @@ static void udf_open_lvid(struct super_block *sb)
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
mutex_unlock(&sbi->s_alloc_mutex);
+ /* Make opening of filesystem visible on the media immediately */
+ sync_dirty_buffer(bh);
}
static void udf_close_lvid(struct super_block *sb)
@@ -1906,6 +1905,8 @@ static void udf_close_lvid(struct super_block *sb)
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
mutex_unlock(&sbi->s_alloc_mutex);
+ /* Make closing of filesystem visible on the media immediately */
+ sync_dirty_buffer(bh);
}
u64 lvid_get_unique_id(struct super_block *sb)
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index bb8309dcd5c1..b5cd8ed2aa12 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,6 +1,19 @@
#ifndef _UDF_I_H
#define _UDF_I_H
+struct extent_position {
+ struct buffer_head *bh;
+ uint32_t offset;
+ struct kernel_lb_addr block;
+};
+
+struct udf_ext_cache {
+ /* Extent position */
+ struct extent_position epos;
+ /* Start logical offset in bytes */
+ loff_t lstart;
+};
+
/*
* The i_data_sem and i_mutex serve for protection of allocation information
* of a regular files and symlinks. This includes all extents belonging to
@@ -35,6 +48,9 @@ struct udf_inode_info {
__u8 *i_data;
} i_ext;
struct rw_semaphore i_data_sem;
+ struct udf_ext_cache cached_extent;
+ /* Spinlock for protecting extent cache */
+ spinlock_t i_extent_cache_lock;
struct inode vfs_inode;
};
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 5f027227f085..ed401e94aa8c 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -80,10 +80,9 @@ struct udf_virtual_data {
};
struct udf_bitmap {
- __u32 s_extLength;
__u32 s_extPosition;
- __u16 s_nr_groups;
- struct buffer_head **s_block_bitmap;
+ int s_nr_groups;
+ struct buffer_head *s_block_bitmap[0];
};
struct udf_part_map {
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index de038da6f6bd..be7dabbbcb49 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -113,11 +113,6 @@ struct ustr {
uint8_t u_len;
};
-struct extent_position {
- struct buffer_head *bh;
- uint32_t offset;
- struct kernel_lb_addr block;
-};
/* super.c */
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index dbc90994715a..3a75ca09c506 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -433,7 +433,7 @@ static int
ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
loff_t pos = filp->f_pos;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
unsigned int offset = pos & ~PAGE_CACHE_MASK;
unsigned long n = pos >> PAGE_CACHE_SHIFT;
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index a8bd26b82ecb..f852b082a084 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -78,14 +78,14 @@ xfs_swapext(
goto out_put_tmp_file;
}
- if (IS_SWAPFILE(f.file->f_path.dentry->d_inode) ||
- IS_SWAPFILE(tmp.file->f_path.dentry->d_inode)) {
+ if (IS_SWAPFILE(file_inode(f.file)) ||
+ IS_SWAPFILE(file_inode(tmp.file))) {
error = XFS_ERROR(EINVAL);
goto out_put_tmp_file;
}
- ip = XFS_I(f.file->f_path.dentry->d_inode);
- tip = XFS_I(tmp.file->f_path.dentry->d_inode);
+ ip = XFS_I(file_inode(f.file));
+ tip = XFS_I(file_inode(tmp.file));
if (ip->i_mount != tip->i_mount) {
error = XFS_ERROR(EINVAL);
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index a83611849cee..c585bc646395 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -48,7 +48,7 @@ static int xfs_fileid_length(int fileid_type)
case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
return 6;
}
- return 255; /* invalid */
+ return FILEID_INVALID;
}
STATIC int
@@ -90,7 +90,7 @@ xfs_fs_encode_fh(
len = xfs_fileid_length(fileid_type);
if (*max_len < len) {
*max_len = len;
- return 255;
+ return FILEID_INVALID;
}
*max_len = len;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 67284edb84d7..f03bf1a456fb 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -811,7 +811,7 @@ xfs_file_fallocate(
loff_t offset,
loff_t len)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
long error;
loff_t new_size = 0;
xfs_flock64_t bf;
@@ -912,7 +912,7 @@ xfs_file_readdir(
void *dirent,
filldir_t filldir)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
xfs_inode_t *ip = XFS_I(inode);
int error;
size_t bufsize;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index c1c3ef88a260..d681e34c2950 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -80,7 +80,7 @@ xfs_find_handle(
f = fdget(hreq->fd);
if (!f.file)
return -EBADF;
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
} else {
error = user_lpath((const char __user *)hreq->path, &path);
if (error)
@@ -168,7 +168,7 @@ xfs_handle_to_dentry(
/*
* Only allow handle opens under a directory.
*/
- if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode))
+ if (!S_ISDIR(file_inode(parfilp)->i_mode))
return ERR_PTR(-ENOTDIR);
if (hlen != sizeof(xfs_handle_t))
@@ -1334,7 +1334,7 @@ xfs_file_ioctl(
unsigned int cmd,
unsigned long p)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 1244274a5674..63b8fc432151 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -530,7 +530,7 @@ xfs_file_compat_ioctl(
unsigned cmd,
unsigned long p)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 96fcbb85ff83..d1dba7ce75ae 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1442,9 +1442,8 @@ xlog_recover_find_tid(
xlog_tid_t tid)
{
xlog_recover_t *trans;
- struct hlist_node *n;
- hlist_for_each_entry(trans, n, head, r_list) {
+ hlist_for_each_entry(trans, head, r_list) {
if (trans->r_log_tid == tid)
return trans;
}
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 227ba7dc293d..e65278f560c4 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -323,6 +323,15 @@ struct acpi_eject_event {
u32 event;
};
+struct acpi_hp_work {
+ struct work_struct work;
+ acpi_handle handle;
+ u32 type;
+ void *context;
+};
+void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
+ void (*func)(struct work_struct *work));
+
extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
void acpi_bus_private_data_handler(acpi_handle, void *);
@@ -454,7 +463,6 @@ struct acpi_pci_root {
/* helper */
acpi_handle acpi_get_child(acpi_handle, u64);
int acpi_is_root_bridge(acpi_handle);
-acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 8b1d7a6a9695..627749af0ba7 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -90,11 +90,6 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
-/* ACPI PCI Interrupt Routing (pci_irq.c) */
-
-int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus);
-void acpi_pci_irq_del_prt(int segment, int bus);
-
/* ACPI PCI Device Binding (pci_bind.c) */
struct pci_bus;
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 7d2a9eaab9eb..5b3d2bd4813a 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -194,8 +194,6 @@ void acpi_os_fixed_event_count(u32 fixed_event_number);
/*
* Threads and Scheduling
*/
-extern struct workqueue_struct *kacpi_hotplug_wq;
-
acpi_thread_id acpi_os_get_thread_id(void);
acpi_status
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
new file mode 100644
index 000000000000..720446cb243e
--- /dev/null
+++ b/include/acpi/ghes.h
@@ -0,0 +1,72 @@
+#include <acpi/apei.h>
+#include <acpi/hed.h>
+
+/*
+ * One struct ghes is created for each generic hardware error source.
+ * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
+ * handler.
+ *
+ * estatus: memory buffer for error status block, allocated during
+ * HEST parsing.
+ */
+#define GHES_TO_CLEAR 0x0001
+#define GHES_EXITING 0x0002
+
+struct ghes {
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u64 buffer_paddr;
+ unsigned long flags;
+ union {
+ struct list_head list;
+ struct timer_list timer;
+ unsigned int irq;
+ };
+};
+
+struct ghes_estatus_node {
+ struct llist_node llnode;
+ struct acpi_hest_generic *generic;
+ struct ghes *ghes;
+};
+
+struct ghes_estatus_cache {
+ u32 estatus_len;
+ atomic_t count;
+ struct acpi_hest_generic *generic;
+ unsigned long long time_in;
+ struct rcu_head rcu;
+};
+
+enum {
+ GHES_SEV_NO = 0x0,
+ GHES_SEV_CORRECTED = 0x1,
+ GHES_SEV_RECOVERABLE = 0x2,
+ GHES_SEV_PANIC = 0x3,
+};
+
+/* From drivers/edac/ghes_edac.c */
+
+#ifdef CONFIG_EDAC_GHES
+void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+ struct cper_sec_mem_err *mem_err);
+
+int ghes_edac_register(struct ghes *ghes, struct device *dev);
+
+void ghes_edac_unregister(struct ghes *ghes);
+
+#else
+static inline void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+ struct cper_sec_mem_err *mem_err)
+{
+}
+
+static inline int ghes_edac_register(struct ghes *ghes, struct device *dev)
+{
+ return 0;
+}
+
+static inline void ghes_edac_unregister(struct ghes *ghes)
+{
+}
+#endif
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index c084767c88bc..59811df58c5b 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -38,12 +38,15 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
csum_partial_copy((src), (dst), (len), (sum))
#endif
+#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
+#ifndef csum_fold
/*
* Fold a partial checksum
*/
@@ -54,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum)
sum = (sum & 0xffff) + (sum >> 16);
return (__force __sum16)~sum;
}
+#endif
#ifndef csum_tcpudp_nofold
/*
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index b6485cafb7bd..a8ece9a33aef 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -76,7 +76,7 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
/*
* Convert cputime <-> timeval (msec)
*/
-static inline cputime_t timeval_to_cputime(struct timeval *val)
+static inline cputime_t timeval_to_cputime(const struct timeval *val)
{
u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
return (__force cputime_t) ret;
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 20ca7663975f..bde646995d10 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -47,12 +47,14 @@ struct gpio;
struct seq_file;
struct module;
struct device_node;
+struct gpio_desc;
/**
* struct gpio_chip - abstract a GPIO controller
* @label: for diagnostics
* @dev: optional device providing the GPIOs
* @owner: helps prevent removal of modules exporting active GPIOs
+ * @list: links gpio_chips together for traversal
* @request: optional hook for chip-specific activation, such as
* enabling module power and clock; may sleep
* @free: optional hook for chip-specific deactivation, such as
@@ -75,6 +77,7 @@ struct device_node;
* negative during registration, requests dynamic ID allocation.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
+ * @desc: array of ngpio descriptors. Private.
* @can_sleep: flag must be set iff get()/set() methods sleep, as they
* must while accessing GPIO expander chips over I2C or SPI
* @names: if set, must be an array of strings to use as alternative
@@ -98,6 +101,7 @@ struct gpio_chip {
const char *label;
struct device *dev;
struct module *owner;
+ struct list_head list;
int (*request)(struct gpio_chip *chip,
unsigned offset);
@@ -124,6 +128,7 @@ struct gpio_chip {
struct gpio_chip *chip);
int base;
u16 ngpio;
+ struct gpio_desc *desc;
const char *const *names;
unsigned can_sleep:1;
unsigned exported:1;
@@ -152,7 +157,6 @@ struct gpio_chip {
extern const char *gpiochip_is_requested(struct gpio_chip *chip,
unsigned offset);
extern struct gpio_chip *gpio_to_chip(unsigned gpio);
-extern int __must_check gpiochip_reserve(int start, int ngpio);
/* add/remove chips */
extern int gpiochip_add(struct gpio_chip *chip);
@@ -192,12 +196,6 @@ extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *labe
extern int gpio_request_array(const struct gpio *array, size_t num);
extern void gpio_free_array(const struct gpio *array, size_t num);
-/* bindings for managed devices that want to request gpios */
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
-int devm_gpio_request_one(struct device *dev, unsigned gpio,
- unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
-
#ifdef CONFIG_GPIO_SYSFS
/*
@@ -212,6 +210,43 @@ extern void gpio_unexport(unsigned gpio);
#endif /* CONFIG_GPIO_SYSFS */
+#ifdef CONFIG_PINCTRL
+
+/**
+ * struct gpio_pin_range - pin range controlled by a gpio chip
+ * @head: list for maintaining set of pin ranges, used internally
+ * @pctldev: pinctrl device which handles corresponding pins
+ * @range: actual range of pins controlled by a gpio controller
+ */
+
+struct gpio_pin_range {
+ struct list_head node;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range range;
+};
+
+int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins);
+void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+
+#else
+
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+
+static inline void
+gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+{
+}
+
+#endif /* CONFIG_PINCTRL */
+
#else /* !CONFIG_GPIOLIB */
static inline bool gpio_is_valid(int number)
@@ -270,41 +305,4 @@ static inline void gpio_unexport(unsigned gpio)
}
#endif /* CONFIG_GPIO_SYSFS */
-#ifdef CONFIG_PINCTRL
-
-/**
- * struct gpio_pin_range - pin range controlled by a gpio chip
- * @head: list for maintaining set of pin ranges, used internally
- * @pctldev: pinctrl device which handles corresponding pins
- * @range: actual range of pins controlled by a gpio controller
- */
-
-struct gpio_pin_range {
- struct list_head node;
- struct pinctrl_dev *pctldev;
- struct pinctrl_gpio_range range;
-};
-
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
-
-#else
-
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- return 0;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
-}
-
-#endif /* CONFIG_PINCTRL */
-
#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 8e260cf01351..aba53083297d 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -239,15 +239,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#ifndef CONFIG_GENERIC_IOMAP
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
-#define ioread16be(addr) be16_to_cpu(ioread16(addr))
+#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr))
#define ioread32(addr) readl(addr)
-#define ioread32be(addr) be32_to_cpu(ioread32(addr))
+#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr))
#define iowrite8(v, addr) writeb((v), (addr))
#define iowrite16(v, addr) writew((v), (addr))
-#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
+#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr)
#define iowrite32(v, addr) writel((v), (addr))
-#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
+#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr)
#define ioread8_rep(p, dst, count) \
insb((unsigned long) (p), (dst), (count))
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 9788568f7978..c184aa8ec8cd 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -7,7 +7,6 @@
* address space, e.g. all NOMMU machines.
*/
#include <linux/sched.h>
-#include <linux/mm.h>
#include <linux/string.h>
#include <asm/segment.h>
@@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs)
}
#endif
+#ifndef segment_eq
#define segment_eq(a, b) ((a).seg == (b).seg)
+#endif
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -168,12 +169,18 @@ static inline __must_check long __copy_to_user(void __user *to,
-EFAULT; \
})
+#ifndef __put_user_fn
+
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
size = __copy_to_user(ptr, x, size);
return size ? -EFAULT : size;
}
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#endif
+
extern int __put_user_bad(void) __attribute__((noreturn));
#define __get_user(x, ptr) \
@@ -224,12 +231,17 @@ extern int __put_user_bad(void) __attribute__((noreturn));
-EFAULT; \
})
+#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
size = __copy_from_user(x, ptr, size);
return size ? -EFAULT : size;
}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+#endif
+
extern int __get_user_bad(void) __attribute__((noreturn));
#ifndef __copy_from_user_inatomic
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fad21c927a38..2d94d7413d71 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -85,6 +85,9 @@ struct module;
struct drm_file;
struct drm_device;
+struct device_node;
+struct videomode;
+
#include <drm/drm_os_linux.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
@@ -446,7 +449,15 @@ struct drm_file {
int is_master; /* this file private is a master for a minor */
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
+
+ /**
+ * fbs - List of framebuffers associated with this file.
+ *
+ * Protected by fbs_lock. Note that the fbs list holds a reference on
+ * the fb object to prevent it from untimely disappearing.
+ */
struct list_head fbs;
+ struct mutex fbs_lock;
wait_queue_head_t event_wait;
struct list_head event_list;
@@ -919,6 +930,14 @@ struct drm_driver {
/* import dmabuf -> GEM */
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
+ /* low-level interface used by drm_gem_prime_{import,export} */
+ int (*gem_prime_pin)(struct drm_gem_object *obj);
+ struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*gem_prime_import_sg_table)(
+ struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
+ void *(*gem_prime_vmap)(struct drm_gem_object *obj);
+ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1276,6 +1295,11 @@ static inline int drm_device_is_unplugged(struct drm_device *dev)
return ret;
}
+static inline bool drm_modeset_is_locked(struct drm_device *dev)
+{
+ return mutex_is_locked(&dev->mode_config.mutex);
+}
+
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
@@ -1456,6 +1480,12 @@ extern struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
struct drm_cmdline_mode *cmd);
+extern int drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode);
+extern int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode,
+ int index);
+
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1540,9 +1570,13 @@ extern int drm_clients_info(struct seq_file *m, void* data);
extern int drm_gem_name_info(struct seq_file *m, void *data);
+extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
+extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 00d78b5161c0..8839b3a24660 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -38,7 +38,8 @@ struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
struct drm_object_properties;
-
+struct drm_file;
+struct drm_clip_rect;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -254,6 +255,10 @@ struct drm_framebuffer {
* userspace perspective.
*/
struct kref refcount;
+ /*
+ * Place on the dev->mode_config.fb_list, access protected by
+ * dev->mode_config.fb_lock.
+ */
struct list_head head;
struct drm_mode_object base;
const struct drm_framebuffer_funcs *funcs;
@@ -390,6 +395,15 @@ struct drm_crtc {
struct drm_device *dev;
struct list_head head;
+ /**
+ * crtc mutex
+ *
+ * This provides a read lock for the overall crtc state (mode, dpms
+ * state, ...) and a write lock for everything which can be update
+ * without a full modeset (fb, cursor data, ...)
+ */
+ struct mutex mutex;
+
struct drm_mode_object base;
/* framebuffer the connector is currently bound to */
@@ -771,8 +785,18 @@ struct drm_mode_config {
struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
+
+
+ /**
+ * fb_lock - mutex to protect fb state
+ *
+ * Besides the global fb list his also protects the fbs list in the
+ * file_priv
+ */
+ struct mutex fb_lock;
int num_fb;
struct list_head fb_list;
+
int num_connector;
struct list_head connector_list;
int num_encoder;
@@ -842,6 +866,10 @@ struct drm_prop_enum_list {
char *name;
};
+extern void drm_modeset_lock_all(struct drm_device *dev);
+extern void drm_modeset_unlock_all(struct drm_device *dev);
+extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
+
extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
@@ -932,10 +960,13 @@ extern void drm_framebuffer_set_object(struct drm_device *dev,
extern int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
+extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id);
extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
@@ -985,6 +1016,7 @@ extern int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getconnector(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_set_config_internal(struct drm_mode_set *set);
extern int drm_mode_setcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getplane(struct drm_device *dev,
@@ -1030,9 +1062,10 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern u8 *drm_find_cea_extension(struct edid *edid);
-extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
+extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
extern bool drm_detect_monitor_audio(struct edid *edid);
+extern bool drm_rgb_quant_range_selectable(struct edid *edid);
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
@@ -1047,7 +1080,6 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int GTF_2C, int GTF_K, int GTF_2J);
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
-extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 0cac551c5347..5da1b4ae7d84 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -247,6 +247,8 @@ struct edid {
struct drm_encoder;
struct drm_connector;
struct drm_display_mode;
+struct hdmi_avi_infoframe;
+
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode);
@@ -254,4 +256,8 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
int drm_load_edid_firmware(struct drm_connector *connector);
+int
+drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index b0c11a7809bb..8b9cc3671858 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -159,4 +159,24 @@ static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *dri
void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode);
+bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_i2c_encoder_prepare(struct drm_encoder *encoder);
+void drm_i2c_encoder_commit(struct drm_encoder *encoder);
+void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+void drm_i2c_encoder_save(struct drm_encoder *encoder);
+void drm_i2c_encoder_restore(struct drm_encoder *encoder);
+
+
#endif
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 76c709837543..4a3fc244301c 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -23,5 +23,10 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
+#ifdef CONFIG_DEBUG_FS
+void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m);
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
+#endif
+
#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5120b01c2eeb..c09511625a11 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -48,6 +48,18 @@ struct drm_fb_helper_surface_size {
u32 surface_depth;
};
+/**
+ * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
+ * @gamma_set: - Set the given gamma lut register on the given crtc.
+ * @gamma_get: - Read the given gamma lut register on the given crtc, used to
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: - Driver callback to allocate and initialize the fbdev info
+ * structure. Futhermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ *
+ * Driver callbacks used by the fbdev emulation helper library.
+ */
struct drm_fb_helper_funcs {
void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
@@ -65,9 +77,7 @@ struct drm_fb_helper_connector {
struct drm_fb_helper {
struct drm_framebuffer *fb;
- struct drm_framebuffer *saved_fb;
struct drm_device *dev;
- struct drm_display_mode *mode;
int crtc_count;
struct drm_fb_helper_crtc *crtc_info;
int connector_count;
@@ -82,9 +92,6 @@ struct drm_fb_helper {
bool delayed_hotplug;
};
-int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
- int preferred_bpp);
-
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *helper, int crtc_count,
int max_conn);
@@ -103,7 +110,6 @@ int drm_fb_helper_setcolreg(unsigned regno,
struct fb_info *info);
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
-void drm_fb_helper_restore(void);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index f0f6b1af25ad..63397ced9254 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -41,4 +41,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
+#ifdef CONFIG_DEBUG_FS
+void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
+#endif
+
#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 3527fb3f75bb..88591ef8fa24 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -89,6 +89,29 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
{
return mm->hole_stack.next;
}
+
+static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ return hole_node->start + hole_node->size;
+}
+
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ BUG_ON(!hole_node->hole_follows);
+ return __drm_mm_hole_node_start(hole_node);
+}
+
+static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ return list_entry(hole_node->node_list.next,
+ struct drm_mm_node, node_list)->start;
+}
+
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ return __drm_mm_hole_node_end(hole_node);
+}
+
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
&(mm)->head_node.node_list, \
node_list)
@@ -99,9 +122,26 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
entry != NULL; entry = next, \
next = entry ? list_entry(entry->node_list.next, \
struct drm_mm_node, node_list) : NULL) \
+
+/* Note that we need to unroll list_for_each_entry in order to inline
+ * setting hole_start and hole_end on each iteration and keep the
+ * macro sane.
+ */
+#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+ for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+ &entry->hole_stack != &(mm)->hole_stack ? \
+ hole_start = drm_mm_hole_node_start(entry), \
+ hole_end = drm_mm_hole_node_end(entry), \
+ 1 : 0; \
+ entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
/*
* Basic range manager support (drm_mm.c)
*/
+extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic);
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index c5c35e629426..a386b0b654cc 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -139,6 +139,19 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 6eb76a1f11ab..b08bdade6002 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,26 +3,8 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-struct intel_gtt {
- /* Size of memory reserved for graphics by the BIOS */
- unsigned int stolen_size;
- /* Total number of gtt entries. */
- unsigned int gtt_total_entries;
- /* Part of the gtt that is mappable by the cpu, for those chips where
- * this is not the full gtt. */
- unsigned int gtt_mappable_entries;
- /* Whether i915 needs to use the dmar apis or not. */
- unsigned int needs_dmar : 1;
- /* Whether we idle the gpu before mapping/unmapping */
- unsigned int do_idle_maps : 1;
- /* Share the scratch page dma with ppgtts. */
- dma_addr_t scratch_page_dma;
- struct page *scratch_page;
- /* for ppgtt PDE access */
- u32 __iomem *gtt;
- /* needed for ioremap in drm/i915 */
- phys_addr_t gma_bus_addr;
-} *intel_gtt_get(void);
+void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, unsigned long *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3a43a47d78c..0fbd046e7c93 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
* to make room for a buffer already reserved. (Buffers are reserved before
* they are evicted). The following algorithm prevents such deadlocks from
* occurring:
- * 1) Buffers are reserved with the lru spinlock held. Upon successful
- * reservation they are removed from the lru list. This stops a reserved buffer
- * from being evicted. However the lru spinlock is released between the time
- * a buffer is selected for eviction and the time it is reserved.
- * Therefore a check is made when a buffer is reserved for eviction, that it
- * is still the first buffer in the lru list, before it is removed from the
- * list. @check_lru == 1 forces this check. If it fails, the function returns
- * -EINVAL, and the caller should then choose a new buffer to evict and repeat
- * the procedure.
- * 2) Processes attempting to reserve multiple buffers other than for eviction,
+ * Processes attempting to reserve multiple buffers other than for eviction,
* (typically execbuf), should first obtain a unique 32-bit
* validation sequence number,
* and call this function with @use_sequence == 1 and @sequence == the unique
@@ -830,9 +821,39 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence);
+/**
+ * ttm_bo_reserve_slowpath_nolru:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ *
+ * Will not remove reserved buffers from the lru lists.
+ * Otherwise identical to ttm_bo_reserve_slowpath.
+ */
+extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+ bool interruptible,
+ uint32_t sequence);
+
/**
- * ttm_bo_reserve_locked:
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence);
+
+/**
+ * ttm_bo_reserve_nolru:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
@@ -840,9 +861,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
* @use_sequence: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @sequence < (@bo)->sequence.
*
- * Must be called with struct ttm_bo_global::lru_lock held,
- * and will not remove reserved buffers from the lru lists.
- * The function may release the LRU spinlock if it needs to sleep.
+ * Will not remove reserved buffers from the lru lists.
* Otherwise identical to ttm_bo_reserve.
*
* Returns:
@@ -855,7 +874,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
* -EDEADLK: Bo already reserved using @sequence. This error code will only
* be returned if @use_sequence is set to true.
*/
-extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence,
uint32_t sequence);
@@ -879,18 +898,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
*/
extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
-/**
- * ttm_bo_wait_unreserved
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Wait for a struct ttm_buffer_object to become unreserved.
- * This is typically used in the execbuf code to relax cpu-usage when
- * a potential deadlock condition backoff.
- */
-extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
- bool interruptible);
-
/*
* ttm_bo_util.c
*/
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f46cfd73a553..bcbdd7484e58 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -485,14 +485,6 @@ static inline bool acpi_driver_match_device(struct device *dev,
#endif /* !CONFIG_ACPI */
-#ifdef CONFIG_ACPI_NUMA
-void __init early_parse_srat(void);
-#else
-static inline void early_parse_srat(void)
-{
-}
-#endif
-
#ifdef CONFIG_ACPI
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
u32 pm1a_ctrl, u32 pm1b_ctrl));
diff --git a/include/linux/acpi_gpio.h b/include/linux/acpi_gpio.h
index 91615a389b65..b76ebd08ff8e 100644
--- a/include/linux/acpi_gpio.h
+++ b/include/linux/acpi_gpio.h
@@ -2,10 +2,12 @@
#define _LINUX_ACPI_GPIO_H_
#include <linux/errno.h>
+#include <linux/gpio.h>
#ifdef CONFIG_GPIO_ACPI
int acpi_get_gpio(char *path, int pin);
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
#else /* CONFIG_GPIO_ACPI */
@@ -14,6 +16,8 @@ static inline int acpi_get_gpio(char *path, int pin)
return -ENODEV;
}
+static inline void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
+
#endif /* CONFIG_GPIO_ACPI */
#endif /* _LINUX_ACPI_GPIO_H_ */
diff --git a/arch/arm/include/asm/hardware/pl080.h b/include/linux/amba/pl080.h
index 4eea2107214b..3e7b62fbefbd 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -1,4 +1,4 @@
-/* arch/arm/include/asm/hardware/pl080.h
+/* include/linux/amba/pl080.h
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 1d002b58b60b..8390c474f69a 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -528,6 +528,7 @@ struct bcma_sflash {
u32 size;
struct mtd_info *mtd;
+ void *priv;
};
#endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 0530b9860359..c3a09149f793 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -111,7 +111,6 @@ extern int suid_dumpable;
extern int setup_arg_pages(struct linux_binprm * bprm,
unsigned long stack_top,
int executable_stack);
-extern int bprm_mm_init(struct linux_binprm *bprm);
extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f94bc83011ed..78feda9bbae2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -19,6 +19,7 @@
#include <linux/gfp.h>
#include <linux/bsg.h>
#include <linux/smp.h>
+#include <linux/rcupdate.h>
#include <asm/scatterlist.h>
@@ -437,6 +438,7 @@ struct request_queue {
/* Throttle data */
struct throtl_data *td;
#endif
+ struct rcu_head rcu_head;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -974,7 +976,6 @@ struct blk_plug {
unsigned long magic; /* detect uninitialized use-cases */
struct list_head list; /* requests */
struct list_head cb_list; /* md requires an unplug callback */
- unsigned int should_sort; /* list to be sorted before flushing? */
};
#define BLK_MAX_REQUEST_COUNT 16
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7c2e030e72f1..0ea61e07a91c 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -12,6 +12,7 @@
struct blk_trace {
int trace_state;
+ bool rq_based;
struct rchan *rchan;
unsigned long __percpu *sequence;
unsigned char __percpu *msg_data;
diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h
new file mode 100644
index 000000000000..22d799147db2
--- /dev/null
+++ b/include/linux/btrfs.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_BTRFS_H
+#define _LINUX_BTRFS_H
+
+#include <uapi/linux/btrfs.h>
+
+#endif /* _LINUX_BTRFS_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 458f497738a4..5afc4f94d110 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -126,7 +126,6 @@ BUFFER_FNS(Write_EIO, write_io_error)
BUFFER_FNS(Unwritten, unwritten)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
-#define touch_buffer(bh) mark_page_accessed(bh->b_page)
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
@@ -142,6 +141,7 @@ BUFFER_FNS(Unwritten, unwritten)
void mark_buffer_dirty(struct buffer_head *bh);
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
+void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
int try_to_free_buffers(struct page *);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index dad579b0c0e6..76554cecaab2 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -12,16 +12,46 @@
#define CEPH_FEATURE_MONNAMES (1<<5)
#define CEPH_FEATURE_RECONNECT_SEQ (1<<6)
#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7)
-/* bits 8-17 defined by user-space; not supported yet here */
+#define CEPH_FEATURE_OBJECTLOCATOR (1<<8)
+#define CEPH_FEATURE_PGID64 (1<<9)
+#define CEPH_FEATURE_INCSUBOSDMAP (1<<10)
+#define CEPH_FEATURE_PGPOOL3 (1<<11)
+#define CEPH_FEATURE_OSDREPLYMUX (1<<12)
+#define CEPH_FEATURE_OSDENC (1<<13)
+#define CEPH_FEATURE_OMAP (1<<14)
+#define CEPH_FEATURE_MONENC (1<<15)
+#define CEPH_FEATURE_QUERY_T (1<<16)
+#define CEPH_FEATURE_INDEP_PG_MAP (1<<17)
#define CEPH_FEATURE_CRUSH_TUNABLES (1<<18)
+#define CEPH_FEATURE_CHUNKY_SCRUB (1<<19)
+#define CEPH_FEATURE_MON_NULLROUTE (1<<20)
+#define CEPH_FEATURE_MON_GV (1<<21)
+#define CEPH_FEATURE_BACKFILL_RESERVATION (1<<22)
+#define CEPH_FEATURE_MSG_AUTH (1<<23)
+#define CEPH_FEATURE_RECOVERY_RESERVATION (1<<24)
+#define CEPH_FEATURE_CRUSH_TUNABLES2 (1<<25)
+#define CEPH_FEATURE_CREATEPOOLID (1<<26)
+#define CEPH_FEATURE_REPLY_CREATE_INODE (1<<27)
+#define CEPH_FEATURE_OSD_HBMSGS (1<<28)
+#define CEPH_FEATURE_MDSENC (1<<29)
+#define CEPH_FEATURE_OSDHASHPSPOOL (1<<30)
/*
* Features supported.
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_CRUSH_TUNABLES)
+ (CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_PGID64 | \
+ CEPH_FEATURE_PGPOOL3 | \
+ CEPH_FEATURE_OSDENC | \
+ CEPH_FEATURE_CRUSH_TUNABLES | \
+ CEPH_FEATURE_CRUSH_TUNABLES2 | \
+ CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_OSDHASHPSPOOL)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR)
+ (CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_PGID64 | \
+ CEPH_FEATURE_PGPOOL3 | \
+ CEPH_FEATURE_OSDENC)
#endif
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index cf6f4d998a76..2ad7b860f062 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -21,16 +21,14 @@
* internal cluster protocols separately from the public,
* client-facing protocol.
*/
-#define CEPH_OSD_PROTOCOL 8 /* cluster internal */
-#define CEPH_MDS_PROTOCOL 12 /* cluster internal */
-#define CEPH_MON_PROTOCOL 5 /* cluster internal */
#define CEPH_OSDC_PROTOCOL 24 /* server/client */
#define CEPH_MDSC_PROTOCOL 32 /* server/client */
#define CEPH_MONC_PROTOCOL 15 /* server/client */
-#define CEPH_INO_ROOT 1
-#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_ROOT 1
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
@@ -51,7 +49,7 @@ struct ceph_file_layout {
__le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
/* object -> pg layout */
- __le32 fl_unused; /* unused; used to be preferred primary (-1) */
+ __le32 fl_unused; /* unused; used to be preferred primary for pg (-1 for none) */
__le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
} __attribute__ ((packed));
@@ -101,6 +99,8 @@ struct ceph_dir_layout {
#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
#define CEPH_MSG_AUTH 17
#define CEPH_MSG_AUTH_REPLY 18
+#define CEPH_MSG_MON_GET_VERSION 19
+#define CEPH_MSG_MON_GET_VERSION_REPLY 20
/* client <-> mds */
#define CEPH_MSG_MDS_MAP 21
@@ -221,6 +221,11 @@ struct ceph_mon_subscribe_ack {
} __attribute__ ((packed));
/*
+ * mdsmap flags
+ */
+#define CEPH_MDSMAP_DOWN (1<<0) /* cluster deliberately down */
+
+/*
* mds states
* > 0 -> in
* <= 0 -> out
@@ -233,6 +238,7 @@ struct ceph_mon_subscribe_ack {
#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
+#define CEPH_MDS_STATE_REPLAYONCE -9 /* up, replaying an active node's journal */
#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
@@ -264,6 +270,7 @@ extern const char *ceph_mds_state_name(int s);
#define CEPH_LOCK_IXATTR 2048
#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */
#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */
+#define CEPH_LOCK_IPOLICY 16384 /* policy lock on dirs. MDS internal */
/* client_session ops */
enum {
@@ -338,6 +345,12 @@ extern const char *ceph_mds_op_name(int op);
#define CEPH_SETATTR_SIZE 32
#define CEPH_SETATTR_CTIME 64
+/*
+ * Ceph setxattr request flags.
+ */
+#define CEPH_XATTR_CREATE 1
+#define CEPH_XATTR_REPLACE 2
+
union ceph_mds_request_args {
struct {
__le32 mask; /* CEPH_CAP_* */
@@ -522,14 +535,17 @@ int ceph_flags_to_mode(int flags);
#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
+#define CEPH_CAP_SIMPLE_BITS 2
+#define CEPH_CAP_FILE_BITS 8
+
/* per-lock shift */
#define CEPH_CAP_SAUTH 2
#define CEPH_CAP_SLINK 4
#define CEPH_CAP_SXATTR 6
#define CEPH_CAP_SFILE 8
-#define CEPH_CAP_SFLOCK 20
+#define CEPH_CAP_SFLOCK 20
-#define CEPH_CAP_BITS 22
+#define CEPH_CAP_BITS 22
/* composed values */
#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 63d092822bad..360d9d08ca9e 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -52,10 +52,10 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
return end >= *p && n <= end - *p;
}
-#define ceph_decode_need(p, end, n, bad) \
- do { \
- if (!likely(ceph_has_room(p, end, n))) \
- goto bad; \
+#define ceph_decode_need(p, end, n, bad) \
+ do { \
+ if (!likely(ceph_has_room(p, end, n))) \
+ goto bad; \
} while (0)
#define ceph_decode_64_safe(p, end, v, bad) \
@@ -99,8 +99,8 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
*
* There are two possible failures:
* - converting the string would require accessing memory at or
- * beyond the "end" pointer provided (-E
- * - memory could not be allocated for the result
+ * beyond the "end" pointer provided (-ERANGE)
+ * - memory could not be allocated for the result (-ENOMEM)
*/
static inline char *ceph_extract_encoded_string(void **p, void *end,
size_t *lenp, gfp_t gfp)
@@ -217,10 +217,10 @@ static inline void ceph_encode_string(void **p, void *end,
*p += len;
}
-#define ceph_encode_need(p, end, n, bad) \
- do { \
- if (!likely(ceph_has_room(p, end, n))) \
- goto bad; \
+#define ceph_encode_need(p, end, n, bad) \
+ do { \
+ if (!likely(ceph_has_room(p, end, n))) \
+ goto bad; \
} while (0)
#define ceph_encode_64_safe(p, end, v, bad) \
@@ -231,12 +231,17 @@ static inline void ceph_encode_string(void **p, void *end,
#define ceph_encode_32_safe(p, end, v, bad) \
do { \
ceph_encode_need(p, end, sizeof(u32), bad); \
- ceph_encode_32(p, v); \
+ ceph_encode_32(p, v); \
} while (0)
#define ceph_encode_16_safe(p, end, v, bad) \
do { \
ceph_encode_need(p, end, sizeof(u16), bad); \
- ceph_encode_16(p, v); \
+ ceph_encode_16(p, v); \
+ } while (0)
+#define ceph_encode_8_safe(p, end, v, bad) \
+ do { \
+ ceph_encode_need(p, end, sizeof(u8), bad); \
+ ceph_encode_8(p, v); \
} while (0)
#define ceph_encode_copy_safe(p, end, pv, n, bad) \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 084d3c622b12..29818fc3fa49 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -193,6 +193,8 @@ static inline int calc_pages_for(u64 off, u64 len)
}
/* ceph_common.c */
+extern bool libceph_compatible(void *data);
+
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
extern struct kmem_cache *ceph_inode_cachep;
@@ -220,7 +222,7 @@ extern int ceph_open_session(struct ceph_client *client);
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
-extern struct page **ceph_get_direct_page_vector(const char __user *data,
+extern struct page **ceph_get_direct_page_vector(const void __user *data,
int num_pages,
bool write_page);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
@@ -228,15 +230,15 @@ extern void ceph_put_page_vector(struct page **pages, int num_pages,
extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_copy_user_to_page_vector(struct page **pages,
- const char __user *data,
+ const void __user *data,
loff_t off, size_t len);
-extern int ceph_copy_to_page_vector(struct page **pages,
- const char *data,
+extern void ceph_copy_to_page_vector(struct page **pages,
+ const void *data,
loff_t off, size_t len);
-extern int ceph_copy_from_page_vector(struct page **pages,
- char *data,
+extern void ceph_copy_from_page_vector(struct page **pages,
+ void *data,
loff_t off, size_t len);
-extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data,
+extern int ceph_copy_page_vector_to_user(struct page **pages, void __user *data,
loff_t off, size_t len);
extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index cb15b5d867c7..87ed09f54800 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -29,8 +29,8 @@ struct ceph_mdsmap {
/* which object pools file data can be stored in */
int m_num_data_pg_pools;
- u32 *m_data_pg_pools;
- u32 m_cas_pg_pool;
+ u64 *m_data_pg_pools;
+ u64 m_cas_pg_pool;
};
static inline struct ceph_entity_addr *
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 14ba5ee738a9..60903e0f665c 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -83,9 +83,11 @@ struct ceph_msg {
struct list_head list_head;
struct kref kref;
+#ifdef CONFIG_BLOCK
struct bio *bio; /* instead of pages/pagelist */
struct bio *bio_iter; /* bio iterator */
int bio_seg; /* current bio segment */
+#endif /* CONFIG_BLOCK */
struct ceph_pagelist *trail; /* the trailing part of the data */
bool front_is_vmalloc;
bool more_to_follow;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index d9b880e977e6..1dd5d466b6f9 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -10,6 +10,7 @@
#include <linux/ceph/osdmap.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/auth.h>
+#include <linux/ceph/pagelist.h>
/*
* Maximum object name size
@@ -22,7 +23,6 @@ struct ceph_snap_context;
struct ceph_osd_request;
struct ceph_osd_client;
struct ceph_authorizer;
-struct ceph_pagelist;
/*
* completion callback for async writepages
@@ -47,6 +47,9 @@ struct ceph_osd {
struct list_head o_keepalive_item;
};
+
+#define CEPH_OSD_MAX_OP 10
+
/* an in-flight request */
struct ceph_osd_request {
u64 r_tid; /* unique for this client */
@@ -63,9 +66,23 @@ struct ceph_osd_request {
struct ceph_connection *r_con_filling_msg;
struct ceph_msg *r_request, *r_reply;
- int r_result;
int r_flags; /* any additional flags for the osd */
u32 r_sent; /* >0 if r_request is sending/sent */
+ int r_num_ops;
+
+ /* encoded message content */
+ struct ceph_osd_op *r_request_ops;
+ /* these are updated on each send */
+ __le32 *r_request_osdmap_epoch;
+ __le32 *r_request_flags;
+ __le64 *r_request_pool;
+ void *r_request_pgid;
+ __le32 *r_request_attempts;
+ struct ceph_eversion *r_request_reassert_version;
+
+ int r_result;
+ int r_reply_op_len[CEPH_OSD_MAX_OP];
+ s32 r_reply_op_result[CEPH_OSD_MAX_OP];
int r_got_reply;
int r_linger;
@@ -82,6 +99,7 @@ struct ceph_osd_request {
char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */
int r_oid_len;
+ u64 r_snapid;
unsigned long r_stamp; /* send OR check time */
struct ceph_file_layout r_file_layout;
@@ -95,7 +113,7 @@ struct ceph_osd_request {
struct bio *r_bio; /* instead of pages */
#endif
- struct ceph_pagelist *r_trail; /* trailing part of the data */
+ struct ceph_pagelist r_trail; /* trailing part of the data */
};
struct ceph_osd_event {
@@ -107,7 +125,6 @@ struct ceph_osd_event {
struct rb_node node;
struct list_head osd_node;
struct kref kref;
- struct completion completion;
};
struct ceph_osd_event_work {
@@ -157,7 +174,7 @@ struct ceph_osd_client {
struct ceph_osd_req_op {
u16 op; /* CEPH_OSD_OP_* */
- u32 flags; /* CEPH_OSD_FLAG_* */
+ u32 payload_len;
union {
struct {
u64 offset, length;
@@ -166,23 +183,24 @@ struct ceph_osd_req_op {
} extent;
struct {
const char *name;
- u32 name_len;
const char *val;
+ u32 name_len;
u32 value_len;
__u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
__u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
} xattr;
struct {
const char *class_name;
- __u8 class_len;
const char *method_name;
- __u8 method_len;
- __u8 argc;
const char *indata;
u32 indata_len;
+ __u8 class_len;
+ __u8 method_len;
+ __u8 argc;
} cls;
struct {
- u64 cookie, count;
+ u64 cookie;
+ u64 count;
} pgls;
struct {
u64 snapid;
@@ -190,12 +208,11 @@ struct ceph_osd_req_op {
struct {
u64 cookie;
u64 ver;
- __u8 flag;
u32 prot_ver;
u32 timeout;
+ __u8 flag;
} watch;
};
- u32 payload_len;
};
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
@@ -207,29 +224,19 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
-extern int ceph_calc_raw_layout(struct ceph_osd_client *osdc,
- struct ceph_file_layout *layout,
- u64 snapid,
- u64 off, u64 *plen, u64 *bno,
- struct ceph_osd_request *req,
- struct ceph_osd_req_op *op);
-
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
- int flags,
struct ceph_snap_context *snapc,
- struct ceph_osd_req_op *ops,
+ unsigned int num_op,
bool use_mempool,
- gfp_t gfp_flags,
- struct page **pages,
- struct bio *bio);
+ gfp_t gfp_flags);
extern void ceph_osdc_build_request(struct ceph_osd_request *req,
- u64 off, u64 *plen,
+ u64 off, u64 len,
+ unsigned int num_op,
struct ceph_osd_req_op *src_ops,
struct ceph_snap_context *snapc,
- struct timespec *mtime,
- const char *oid,
- int oid_len);
+ u64 snap_id,
+ struct timespec *mtime);
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
@@ -239,8 +246,7 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
int do_sync, u32 truncate_seq,
u64 truncate_size,
struct timespec *mtime,
- bool use_mempool, int num_reply,
- int page_align);
+ bool use_mempool, int page_align);
extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@@ -279,17 +285,13 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
u64 off, u64 len,
u32 truncate_seq, u64 truncate_size,
struct timespec *mtime,
- struct page **pages, int nr_pages,
- int flags, int do_sync, bool nofail);
+ struct page **pages, int nr_pages);
/* watch/notify events */
extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
void (*event_cb)(u64, u64, u8, void *),
- int one_shot, void *data,
- struct ceph_osd_event **pevent);
+ void *data, struct ceph_osd_event **pevent);
extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
-extern int ceph_osdc_wait_event(struct ceph_osd_event *event,
- unsigned long timeout);
extern void ceph_osdc_put_event(struct ceph_osd_event *event);
#endif
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 10a417f9f76f..c819190d1642 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -18,14 +18,31 @@
* The map can be updated either via an incremental map (diff) describing
* the change between two successive epochs, or as a fully encoded map.
*/
+struct ceph_pg {
+ uint64_t pool;
+ uint32_t seed;
+};
+
+#define CEPH_POOL_FLAG_HASHPSPOOL 1
+
struct ceph_pg_pool_info {
struct rb_node node;
- int id;
- struct ceph_pg_pool v;
- int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
+ s64 id;
+ u8 type;
+ u8 size;
+ u8 crush_ruleset;
+ u8 object_hash;
+ u32 pg_num, pgp_num;
+ int pg_num_mask, pgp_num_mask;
+ u64 flags;
char *name;
};
+struct ceph_object_locator {
+ uint64_t pool;
+ char *key;
+};
+
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@@ -110,15 +127,16 @@ extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
/* calculate mapping of a file extent to an object */
extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
- u64 off, u64 *plen,
+ u64 off, u64 len,
u64 *bno, u64 *oxoff, u64 *oxlen);
/* calculate mapping of object to a placement group */
-extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
+extern int ceph_calc_object_layout(struct ceph_pg *pg,
const char *oid,
struct ceph_file_layout *fl,
struct ceph_osdmap *osdmap);
-extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
+extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap,
+ struct ceph_pg pgid,
int *acting);
extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
struct ceph_pg pgid);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 2c04afeead1c..68c96a508ac2 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -9,14 +9,6 @@
#include <linux/ceph/msgr.h>
/*
- * osdmap encoding versions
- */
-#define CEPH_OSDMAP_INC_VERSION 5
-#define CEPH_OSDMAP_INC_VERSION_EXT 6
-#define CEPH_OSDMAP_VERSION 5
-#define CEPH_OSDMAP_VERSION_EXT 6
-
-/*
* fs id
*/
struct ceph_fsid {
@@ -64,7 +56,7 @@ struct ceph_timespec {
* placement group.
* we encode this into one __le64.
*/
-struct ceph_pg {
+struct ceph_pg_v1 {
__le16 preferred; /* preferred primary osd */
__le16 ps; /* placement seed */
__le32 pool; /* object pool */
@@ -91,21 +83,6 @@ struct ceph_pg {
#define CEPH_PG_TYPE_REP 1
#define CEPH_PG_TYPE_RAID4 2
-#define CEPH_PG_POOL_VERSION 2
-struct ceph_pg_pool {
- __u8 type; /* CEPH_PG_TYPE_* */
- __u8 size; /* number of osds in each pg */
- __u8 crush_ruleset; /* crush placement rule */
- __u8 object_hash; /* hash mapping object name to ps */
- __le32 pg_num, pgp_num; /* number of pg's */
- __le32 lpg_num, lpgp_num; /* number of localized pg's */
- __le32 last_change; /* most recent epoch changed */
- __le64 snap_seq; /* seq for per-pool snapshot */
- __le32 snap_epoch; /* epoch of last snap */
- __le32 num_snaps;
- __le32 num_removed_snap_intervals; /* if non-empty, NO per-pool snaps */
- __le64 auid; /* who owns the pg */
-} __attribute__ ((packed));
/*
* stable_mod func is used to control number of placement groups.
@@ -128,7 +105,7 @@ static inline int ceph_stable_mod(int x, int b, int bmask)
* object layout - how a given object should be stored.
*/
struct ceph_object_layout {
- struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */
+ struct ceph_pg_v1 ol_pgid; /* raw pg, with _full_ ps precision. */
__le32 ol_stripe_unit; /* for per-object parity, if any */
} __attribute__ ((packed));
@@ -145,8 +122,12 @@ struct ceph_eversion {
*/
/* status bits */
-#define CEPH_OSD_EXISTS 1
-#define CEPH_OSD_UP 2
+#define CEPH_OSD_EXISTS (1<<0)
+#define CEPH_OSD_UP (1<<1)
+#define CEPH_OSD_AUTOOUT (1<<2) /* osd was automatically marked out */
+#define CEPH_OSD_NEW (1<<3) /* osd is new, never marked in */
+
+extern const char *ceph_osd_state_name(int s);
/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
#define CEPH_OSD_IN 0x10000
@@ -161,9 +142,25 @@ struct ceph_eversion {
#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
+#define CEPH_OSDMAP_NOUP (1<<5) /* block osd boot */
+#define CEPH_OSDMAP_NODOWN (1<<6) /* block osd mark-down/failure */
+#define CEPH_OSDMAP_NOOUT (1<<7) /* block osd auto mark-out */
+#define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */
+#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */
+#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */
+
+/*
+ * The error code to return when an OSD can't handle a write
+ * because it is too large.
+ */
+#define OSD_WRITETOOBIG EMSGSIZE
/*
* osd ops
+ *
+ * WARNING: do not use these op codes directly. Use the helpers
+ * defined below instead. In certain cases, op code behavior was
+ * redefined, resulting in special-cases in the helpers.
*/
#define CEPH_OSD_OP_MODE 0xf000
#define CEPH_OSD_OP_MODE_RD 0x1000
@@ -177,6 +174,7 @@ struct ceph_eversion {
#define CEPH_OSD_OP_TYPE_ATTR 0x0300
#define CEPH_OSD_OP_TYPE_EXEC 0x0400
#define CEPH_OSD_OP_TYPE_PG 0x0500
+#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */
enum {
/** data **/
@@ -217,6 +215,23 @@ enum {
CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
+ /* omap */
+ CEPH_OSD_OP_OMAPGETKEYS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 17,
+ CEPH_OSD_OP_OMAPGETVALS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 18,
+ CEPH_OSD_OP_OMAPGETHEADER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 19,
+ CEPH_OSD_OP_OMAPGETVALSBYKEYS =
+ CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 20,
+ CEPH_OSD_OP_OMAPSETVALS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 21,
+ CEPH_OSD_OP_OMAPSETHEADER = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 22,
+ CEPH_OSD_OP_OMAPCLEAR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 23,
+ CEPH_OSD_OP_OMAPRMKEYS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 24,
+ CEPH_OSD_OP_OMAP_CMP = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 25,
+
+ /** multi **/
+ CEPH_OSD_OP_CLONERANGE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_MULTI | 1,
+ CEPH_OSD_OP_ASSERT_SRC_VERSION = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 2,
+ CEPH_OSD_OP_SRC_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 3,
+
/** attrs **/
/* read */
CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
@@ -238,6 +253,7 @@ enum {
CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6,
CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8,
+ CEPH_OSD_OP_SCRUB_MAP = CEPH_OSD_OP_MODE_SUB | 9,
/** lock **/
CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
@@ -248,10 +264,12 @@ enum {
CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
/** exec **/
+ /* note: the RD bit here is wrong; see special-case below in helper */
CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
/** pg **/
CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
+ CEPH_OSD_OP_PGLS_FILTER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 2,
};
static inline int ceph_osd_op_type_lock(int op)
@@ -274,6 +292,10 @@ static inline int ceph_osd_op_type_pg(int op)
{
return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
}
+static inline int ceph_osd_op_type_multi(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_MULTI;
+}
static inline int ceph_osd_op_mode_subop(int op)
{
@@ -281,11 +303,12 @@ static inline int ceph_osd_op_mode_subop(int op)
}
static inline int ceph_osd_op_mode_read(int op)
{
- return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD;
+ return (op & CEPH_OSD_OP_MODE_RD) &&
+ op != CEPH_OSD_OP_CALL;
}
static inline int ceph_osd_op_mode_modify(int op)
{
- return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
+ return op & CEPH_OSD_OP_MODE_WR;
}
/*
@@ -294,34 +317,38 @@ static inline int ceph_osd_op_mode_modify(int op)
*/
#define CEPH_OSD_TMAP_HDR 'h'
#define CEPH_OSD_TMAP_SET 's'
+#define CEPH_OSD_TMAP_CREATE 'c' /* create key */
#define CEPH_OSD_TMAP_RM 'r'
+#define CEPH_OSD_TMAP_RMSLOPPY 'R'
extern const char *ceph_osd_op_name(int op);
-
/*
* osd op flags
*
* An op may be READ, WRITE, or READ|WRITE.
*/
enum {
- CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */
- CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */
- CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */
- CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */
- CEPH_OSD_FLAG_READ = 16, /* op may read */
- CEPH_OSD_FLAG_WRITE = 32, /* op may write */
- CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */
- CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */
- CEPH_OSD_FLAG_BALANCE_READS = 256,
- CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
- CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */
- CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */
- CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */
+ CEPH_OSD_FLAG_ACK = 0x0001, /* want (or is) "ack" ack */
+ CEPH_OSD_FLAG_ONNVRAM = 0x0002, /* want (or is) "onnvram" ack */
+ CEPH_OSD_FLAG_ONDISK = 0x0004, /* want (or is) "ondisk" ack */
+ CEPH_OSD_FLAG_RETRY = 0x0008, /* resend attempt */
+ CEPH_OSD_FLAG_READ = 0x0010, /* op may read */
+ CEPH_OSD_FLAG_WRITE = 0x0020, /* op may write */
+ CEPH_OSD_FLAG_ORDERSNAP = 0x0040, /* EOLDSNAP if snapc is out of order */
+ CEPH_OSD_FLAG_PEERSTAT_OLD = 0x0080, /* DEPRECATED msg includes osd_peer_stat */
+ CEPH_OSD_FLAG_BALANCE_READS = 0x0100,
+ CEPH_OSD_FLAG_PARALLELEXEC = 0x0200, /* execute op in parallel */
+ CEPH_OSD_FLAG_PGOP = 0x0400, /* pg op, no object */
+ CEPH_OSD_FLAG_EXEC = 0x0800, /* op may exec */
+ CEPH_OSD_FLAG_EXEC_PUBLIC = 0x1000, /* DEPRECATED op may exec (public) */
+ CEPH_OSD_FLAG_LOCALIZE_READS = 0x2000, /* read from nearby replica, if any */
+ CEPH_OSD_FLAG_RWORDERED = 0x4000, /* order wrt concurrent reads */
};
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
+ CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
@@ -381,48 +408,13 @@ struct ceph_osd_op {
__le64 ver;
__u8 flag; /* 0 = unwatch, 1 = watch */
} __attribute__ ((packed)) watch;
-};
+ struct {
+ __le64 offset, length;
+ __le64 src_offset;
+ } __attribute__ ((packed)) clonerange;
+ };
__le32 payload_len;
} __attribute__ ((packed));
-/*
- * osd request message header. each request may include multiple
- * ceph_osd_op object operations.
- */
-struct ceph_osd_request_head {
- __le32 client_inc; /* client incarnation */
- struct ceph_object_layout layout; /* pgid */
- __le32 osdmap_epoch; /* client's osdmap epoch */
-
- __le32 flags;
-
- struct ceph_timespec mtime; /* for mutations only */
- struct ceph_eversion reassert_version; /* if we are replaying op */
-
- __le32 object_len; /* length of object name */
-
- __le64 snapid; /* snapid to read */
- __le64 snap_seq; /* writer's snap context */
- __le32 num_snaps;
-
- __le16 num_ops;
- struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */
-} __attribute__ ((packed));
-
-struct ceph_osd_reply_head {
- __le32 client_inc; /* client incarnation */
- __le32 flags;
- struct ceph_object_layout layout;
- __le32 osdmap_epoch;
- struct ceph_eversion reassert_version; /* for replaying uncommitted */
-
- __le32 result; /* result code */
-
- __le32 object_len; /* length of object name */
- __le32 num_ops;
- struct ceph_osd_op ops[0]; /* ops[], object */
-} __attribute__ ((packed));
-
-
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 8031d6eef102..5b8721efa948 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -34,7 +34,7 @@ int venus_lookup(struct super_block *sb, struct CodaFid *fid,
const char *name, int length, int *type,
struct CodaFid *resfid);
int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
- vuid_t uid);
+ kuid_t uid);
int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
struct file **f);
int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index de095b0462a7..76a87fb57ac2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -359,6 +359,7 @@ asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, u32 pos_low, u32 pos_high);
+asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
@@ -535,6 +536,8 @@ asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
+asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 51494e6b5548..33f0280fd533 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -77,10 +77,13 @@ static inline void init_completion(struct completion *x)
}
extern void wait_for_completion(struct completion *);
+extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
+extern unsigned long wait_for_completion_io_timeout(struct completion *x,
+ unsigned long timeout);
extern long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout);
extern long wait_for_completion_killable_timeout(
diff --git a/include/linux/console.h b/include/linux/console.h
index 3b709da1786e..29680a8cda99 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */
int con_is_bound(const struct consw *csw);
int register_con_driver(const struct consw *csw, int first, int last);
int unregister_con_driver(const struct consw *csw);
+int do_unregister_con_driver(const struct consw *csw);
int take_over_console(const struct consw *sw, int first, int last, int deflt);
+int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
#ifdef CONFIG_HW_CONSOLE
int con_debug_enter(struct vc_data *vc);
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 25baa287cff7..6a1101f24cfb 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -162,6 +162,8 @@ struct crush_map {
__u32 choose_local_fallback_tries;
/* choose attempts before giving up */
__u32 choose_total_tries;
+ /* attempt chooseleaf inner descent once; on failure retry outer descent */
+ __u32 chooseleaf_descend_once;
};
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c1754b59ddd3..1a6bb81f0fe5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -145,6 +145,7 @@ enum dentry_d_lock_class
struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, const struct inode *,
struct qstr *);
int (*d_compare)(const struct dentry *, const struct inode *,
@@ -192,6 +193,8 @@ struct dentry_operations {
#define DCACHE_GENOCIDE 0x0200
#define DCACHE_SHRINK_LIST 0x0400
+#define DCACHE_OP_WEAK_REVALIDATE 0x0800
+
#define DCACHE_NFSFS_RENAMED 0x1000
/* this dentry has been "silly renamed" and has to be deleted on the last
* dput() */
@@ -293,9 +296,9 @@ extern void d_move(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
/* appendix may either be NULL or be used for transname suffixes */
-extern struct dentry *d_lookup(struct dentry *, struct qstr *);
+extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
-extern struct dentry *__d_lookup(struct dentry *, struct qstr *);
+extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name,
unsigned *seq, struct inode *inode);
@@ -333,7 +336,6 @@ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
-extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *dentry_path_raw(struct dentry *, char *, int);
extern char *dentry_path(struct dentry *, char *, int);
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 3bd46f766751..a975de1ff59f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -51,7 +51,7 @@ struct task_struct;
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(struct task_struct *task);
+extern void debug_check_no_locks_held(void);
#else
static inline void debug_show_all_locks(void)
{
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
}
static inline void
-debug_check_no_locks_held(struct task_struct *task)
+debug_check_no_locks_held(void)
{
}
#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index bf6afa2fc432..1e483fa7afb4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -68,8 +68,8 @@ typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
-typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+ unsigned status_flags, char *result, unsigned maxlen);
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
@@ -175,6 +175,14 @@ struct target_type {
#define DM_TARGET_IMMUTABLE 0x00000004
#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
+/*
+ * Some targets need to be sent the same WRITE bio severals times so
+ * that they can send copies of it to different devices. This function
+ * examines any supplied bio and returns the number of copies of it the
+ * target requires.
+ */
+typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -187,26 +195,26 @@ struct dm_target {
uint32_t max_io_len;
/*
- * A number of zero-length barrier requests that will be submitted
+ * A number of zero-length barrier bios that will be submitted
* to the target for the purpose of flushing cache.
*
- * The request number can be accessed with dm_bio_get_target_request_nr.
- * It is a responsibility of the target driver to remap these requests
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ * It is a responsibility of the target driver to remap these bios
* to the real underlying devices.
*/
- unsigned num_flush_requests;
+ unsigned num_flush_bios;
/*
- * The number of discard requests that will be submitted to the target.
- * The request number can be accessed with dm_bio_get_target_request_nr.
+ * The number of discard bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_discard_requests;
+ unsigned num_discard_bios;
/*
- * The number of WRITE SAME requests that will be submitted to the target.
- * The request number can be accessed with dm_bio_get_target_request_nr.
+ * The number of WRITE SAME bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_same_requests;
+ unsigned num_write_same_bios;
/*
* The minimum number of extra bytes allocated in each bio for the
@@ -214,6 +222,13 @@ struct dm_target {
*/
unsigned per_bio_data_size;
+ /*
+ * If defined, this function is called to find out how many
+ * duplicate bios should be sent to the target when writing
+ * data.
+ */
+ dm_num_write_bios_fn num_write_bios;
+
/* target specific data */
void *private;
@@ -233,10 +248,10 @@ struct dm_target {
bool discards_supported:1;
/*
- * Set if the target required discard request to be split
+ * Set if the target required discard bios to be split
* on max_io_len boundary.
*/
- bool split_discard_requests:1;
+ bool split_discard_bios:1;
/*
* Set if this target does not return zeroes on discarded blocks.
@@ -261,7 +276,7 @@ struct dm_target_io {
struct dm_io *io;
struct dm_target *ti;
union map_info info;
- unsigned target_request_nr;
+ unsigned target_bio_nr;
struct bio clone;
};
@@ -275,9 +290,9 @@ static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
}
-static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio)
+static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
{
- return container_of(bio, struct dm_target_io, clone)->target_request_nr;
+ return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
int dm_register_target(struct target_type *t);
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index 47d9d376e4e7..f486d636b82e 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -21,11 +21,34 @@
#define DM_KCOPYD_IGNORE_ERROR 1
+struct dm_kcopyd_throttle {
+ unsigned throttle;
+ unsigned num_io_jobs;
+ unsigned io_period;
+ unsigned total_period;
+ unsigned last_jiffies;
+};
+
+/*
+ * kcopyd clients that want to support throttling must pass an initialised
+ * dm_kcopyd_throttle struct into dm_kcopyd_client_create().
+ * Two or more clients may share the same instance of this struct between
+ * them if they wish to be throttled as a group.
+ *
+ * This macro also creates a corresponding module parameter to configure
+ * the amount of throttling.
+ */
+#define DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(name, description) \
+static struct dm_kcopyd_throttle dm_kcopyd_throttle = { 100, 0, 0, 0, 0 }; \
+module_param_named(name, dm_kcopyd_throttle.throttle, uint, 0644); \
+MODULE_PARM_DESC(name, description)
+
/*
* To use kcopyd you must first create a dm_kcopyd_client object.
+ * throttle can be NULL if you don't want any throttling.
*/
struct dm_kcopyd_client;
-struct dm_kcopyd_client *dm_kcopyd_client_create(void);
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
/*
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3d754a394e92..9978b614a1aa 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -119,8 +119,10 @@ struct dma_buf {
struct file *file;
struct list_head attachments;
const struct dma_buf_ops *ops;
- /* mutex to serialize list manipulation and attach/detach */
+ /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
struct mutex lock;
+ unsigned vmapping_counter;
+ void *vmap_ptr;
void *priv;
};
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d3201e438d16..f5939999cb65 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -608,7 +608,10 @@ static inline int dmaengine_device_control(struct dma_chan *chan,
enum dma_ctrl_cmd cmd,
unsigned long arg)
{
- return chan->device->device_control(chan, cmd, arg);
+ if (chan->device->device_control)
+ return chan->device->device_control(chan, cmd, arg);
+
+ return -ENOSYS;
}
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -618,6 +621,11 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
(unsigned long)config);
}
+static inline bool is_slave_direction(enum dma_transfer_direction direction)
+{
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
struct dma_chan *chan, dma_addr_t buf, size_t len,
enum dma_transfer_direction dir, unsigned long flags)
@@ -660,6 +668,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
period_len, dir, flags, NULL);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ return chan->device->device_prep_interleaved_dma(chan, xt, flags);
+}
+
static inline int dmaengine_terminate_all(struct dma_chan *chan)
{
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -849,20 +864,6 @@ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
}
-#define first_dma_cap(mask) __first_dma_cap(&(mask))
-static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
-{
- return min_t(int, DMA_TX_TYPE_END,
- find_first_bit(srcp->bits, DMA_TX_TYPE_END));
-}
-
-#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
-static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
-{
- return min_t(int, DMA_TX_TYPE_END,
- find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
-}
-
#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
static inline void
__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
@@ -891,9 +892,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
}
#define for_each_dma_cap_mask(cap, mask) \
- for ((cap) = first_dma_cap(mask); \
- (cap) < DMA_TX_TYPE_END; \
- (cap) = next_dma_cap((cap), (mask)))
+ for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
/**
* dma_async_issue_pending - flush pending transactions to HW
@@ -907,8 +906,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
chan->device->device_issue_pending(chan);
}
-#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
-
/**
* dma_async_is_tx_complete - poll for transaction completion
* @chan: DMA channel
@@ -934,16 +931,13 @@ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
return status;
}
-#define dma_async_memcpy_complete(chan, cookie, last, used)\
- dma_async_is_tx_complete(chan, cookie, last, used)
-
/**
* dma_async_is_complete - test a cookie against chan state
* @cookie: transaction identifier to test status of
* @last_complete: last know completed transaction
* @last_used: last cookie value handed out
*
- * dma_async_is_complete() is used in dma_async_memcpy_complete()
+ * dma_async_is_complete() is used in dma_async_is_tx_complete()
* the test logic is separated for lightweight testing of multiple cookies
*/
static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
@@ -974,6 +968,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
+struct dma_chan *dma_request_slave_channel(struct device *dev, char *name);
void dma_release_channel(struct dma_chan *chan);
#else
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
@@ -988,6 +983,11 @@ static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
{
return NULL;
}
+static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
+ char *name)
+{
+ return NULL;
+}
static inline void dma_release_channel(struct dma_chan *chan)
{
}
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index e1c8c9e919ac..41766de66e33 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -15,14 +15,38 @@
#include <linux/dmaengine.h>
/**
+ * struct dw_dma_slave - Controller-specific information about a slave
+ *
+ * @dma_dev: required DMA master device. Depricated.
+ * @bus_id: name of this device channel, not just a device name since
+ * devices may have more than one channel e.g. "foo_tx"
+ * @cfg_hi: Platform-specific initializer for the CFG_HI register
+ * @cfg_lo: Platform-specific initializer for the CFG_LO register
+ * @src_master: src master for transfers on allocated channel.
+ * @dst_master: dest master for transfers on allocated channel.
+ */
+struct dw_dma_slave {
+ struct device *dma_dev;
+ const char *bus_id;
+ u32 cfg_hi;
+ u32 cfg_lo;
+ u8 src_master;
+ u8 dst_master;
+};
+
+/**
* struct dw_dma_platform_data - Controller configuration parameters
* @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
+ * @chan_allocation_order: Allocate channels starting from 0 or 7
+ * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
* @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+ * @sd: slave specific data. Used for configuring channels
+ * @sd_count: count of slave data structures passed.
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
@@ -36,6 +60,9 @@ struct dw_dma_platform_data {
unsigned short block_size;
unsigned char nr_masters;
unsigned char data_width[4];
+
+ struct dw_dma_slave *sd;
+ unsigned int sd_count;
};
/* bursts size */
@@ -50,23 +77,6 @@ enum dw_dma_msize {
DW_DMA_MSIZE_256,
};
-/**
- * struct dw_dma_slave - Controller-specific information about a slave
- *
- * @dma_dev: required DMA master device
- * @cfg_hi: Platform-specific initializer for the CFG_HI register
- * @cfg_lo: Platform-specific initializer for the CFG_LO register
- * @src_master: src master for transfers on allocated channel.
- * @dst_master: dest master for transfers on allocated channel.
- */
-struct dw_dma_slave {
- struct device *dma_dev;
- u32 cfg_hi;
- u32 cfg_lo;
- u8 src_master;
- u8 dst_master;
-};
-
/* Platform-configurable bits in CFG_HI */
#define DWC_CFGH_FCMODE (1 << 0)
#define DWC_CFGH_FIFO_MODE (1 << 1)
@@ -104,5 +114,6 @@ void dw_dma_cyclic_stop(struct dma_chan *chan);
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+bool dw_dma_generic_filter(struct dma_chan *chan, void *param);
#endif /* DW_DMAC_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 1b8c02b36f76..4fd4999ccb5b 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -14,7 +14,6 @@
#include <linux/atomic.h>
#include <linux/device.h>
-#include <linux/kobject.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
@@ -48,8 +47,17 @@ static inline void opstate_init(void)
return;
}
+/* Max length of a DIMM label*/
#define EDAC_MC_LABEL_LEN 31
-#define MC_PROC_NAME_MAX_LEN 7
+
+/* Maximum size of the location string */
+#define LOCATION_SIZE 80
+
+/* Defines the maximum number of labels that can be reported */
+#define EDAC_MAX_LABELS 8
+
+/* String used to join two or more labels */
+#define OTHER_LABEL " or "
/**
* enum dev_type - describe the type of memory DRAM chips used at the stick
@@ -101,8 +109,24 @@ enum hw_event_mc_err_type {
HW_EVENT_ERR_CORRECTED,
HW_EVENT_ERR_UNCORRECTED,
HW_EVENT_ERR_FATAL,
+ HW_EVENT_ERR_INFO,
};
+static inline char *mc_event_error_type(const unsigned int err_type)
+{
+ switch (err_type) {
+ case HW_EVENT_ERR_CORRECTED:
+ return "Corrected";
+ case HW_EVENT_ERR_UNCORRECTED:
+ return "Uncorrected";
+ case HW_EVENT_ERR_FATAL:
+ return "Fatal";
+ default:
+ case HW_EVENT_ERR_INFO:
+ return "Info";
+ }
+}
+
/**
* enum mem_type - memory types. For a more detailed reference, please see
* http://en.wikipedia.org/wiki/DRAM
@@ -376,6 +400,9 @@ enum scrub_type {
* @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
* @EDAC_MC_LAYER_SLOT: memory layer is named "slot"
* @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
+ * @EDAC_MC_LAYER_ALL_MEM: memory layout is unknown. All memory is mapped
+ * as a single memory area. This is used when
+ * retrieving errors from a firmware driven driver.
*
* This enum is used by the drivers to tell edac_mc_sysfs what name should
* be used when describing a memory stick location.
@@ -385,6 +412,7 @@ enum edac_mc_layer_type {
EDAC_MC_LAYER_CHANNEL,
EDAC_MC_LAYER_SLOT,
EDAC_MC_LAYER_CHIP_SELECT,
+ EDAC_MC_LAYER_ALL_MEM,
};
/**
@@ -551,6 +579,46 @@ struct errcount_attribute_data {
int layer0, layer1, layer2;
};
+/**
+ * edac_raw_error_desc - Raw error report structure
+ * @grain: minimum granularity for an error report, in bytes
+ * @error_count: number of errors of the same type
+ * @top_layer: top layer of the error (layer[0])
+ * @mid_layer: middle layer of the error (layer[1])
+ * @low_layer: low layer of the error (layer[2])
+ * @page_frame_number: page where the error happened
+ * @offset_in_page: page offset
+ * @syndrome: syndrome of the error (or 0 if unknown or if
+ * the syndrome is not applicable)
+ * @msg: error message
+ * @location: location of the error
+ * @label: label of the affected DIMM(s)
+ * @other_detail: other driver-specific detail about the error
+ * @enable_per_layer_report: if false, the error affects all layers
+ * (typically, a memory controller error)
+ */
+struct edac_raw_error_desc {
+ /*
+ * NOTE: everything before grain won't be cleaned by
+ * edac_raw_error_desc_clean()
+ */
+ char location[LOCATION_SIZE];
+ char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
+ long grain;
+
+ /* the vars below and grain will be cleaned on every new error report */
+ u16 error_count;
+ int top_layer;
+ int mid_layer;
+ int low_layer;
+ unsigned long page_frame_number;
+ unsigned long offset_in_page;
+ unsigned long syndrome;
+ const char *msg;
+ const char *other_detail;
+ bool enable_per_layer_report;
+};
+
/* MEMORY controller information structure
*/
struct mem_ctl_info {
@@ -630,7 +698,6 @@ struct mem_ctl_info {
const char *mod_ver;
const char *ctl_name;
const char *dev_name;
- char proc_name[MC_PROC_NAME_MAX_LEN + 1];
void *pvt_info;
unsigned long start_time; /* mci load start time (in jiffies) */
@@ -659,6 +726,12 @@ struct mem_ctl_info {
/* work struct for this MC */
struct delayed_work work;
+ /*
+ * Used to report an error - by being at the global struct
+ * makes the memory allocated by the EDAC core
+ */
+ struct edac_raw_error_desc error_desc;
+
/* the internal state of this controller instance */
int op_state;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 186620631750..acd0312d46fb 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -2,6 +2,7 @@
#define _LINUX_ELEVATOR_H
#include <linux/percpu.h>
+#include <linux/hashtable.h>
#ifdef CONFIG_BLOCK
@@ -96,6 +97,8 @@ struct elevator_type
struct list_head list;
};
+#define ELV_HASH_BITS 6
+
/*
* each queue has an elevator_queue associated with it
*/
@@ -105,8 +108,8 @@ struct elevator_queue
void *elevator_data;
struct kobject kobj;
struct mutex sysfs_lock;
- struct hlist_head *hash;
unsigned int registered:1;
+ DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
};
/*
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 8c9048e33463..40a3c0e01b2b 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -10,6 +10,10 @@
Override in asm/elf.h as needed. */
# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0
#endif
+#ifndef SET_PERSONALITY
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+#endif
#if ELF_CLASS == ELFCLASS32
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 3c3ef19a625a..cf5d2af61b81 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -13,7 +13,7 @@
#include <linux/wait.h>
/*
- * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
* new flags, since they might collide with O_* ones. We want
* to re-use O_* flags that couldn't possibly have a meaning
* from eventfd, in order to leave a free define-space for
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c7a95714b1fe..58b98606ac26 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -19,6 +19,8 @@ struct vm_area_struct;
struct fb_info;
struct device;
struct file;
+struct videomode;
+struct device_node;
/* Definitions below are used in the parsed monitor specs */
#define FB_DPMS_ACTIVE_OFF 1
@@ -714,6 +716,12 @@ extern void fb_destroy_modedb(struct fb_videomode *modedb);
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
+extern int of_get_fb_videomode(struct device_node *np,
+ struct fb_videomode *fb,
+ int index);
+extern int fb_videomode_from_videomode(const struct videomode *vm,
+ struct fb_videomode *fbmode);
+
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 34
extern void fb_var_to_videomode(struct fb_videomode *mode,
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index e70df40d84f6..043a5cf8b5ba 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -3,6 +3,7 @@
#ifndef FREEZER_H_INCLUDED
#define FREEZER_H_INCLUDED
+#include <linux/debug_locks.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
@@ -48,6 +49,8 @@ extern void thaw_kernel_threads(void);
static inline bool try_to_freeze(void)
{
+ if (!(current->flags & PF_NOFREEZE))
+ debug_check_no_locks_held();
might_sleep();
if (likely(!freezing(current)))
return false;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7d2e893ec3d1..4e686a099465 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -769,7 +769,6 @@ struct file {
} f_u;
struct path f_path;
#define f_dentry f_path.dentry
-#define f_vfsmnt f_path.mnt
const struct file_operations *f_op;
/*
@@ -1807,7 +1806,6 @@ struct file_system_type {
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
-#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
@@ -2217,6 +2215,11 @@ static inline bool execute_ok(struct inode *inode)
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
}
+static inline struct inode *file_inode(struct file *f)
+{
+ return f->f_path.dentry->d_inode;
+}
+
/*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
@@ -2239,7 +2242,7 @@ static inline int get_write_access(struct inode *inode)
}
static inline int deny_write_access(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
}
static inline void put_write_access(struct inode * inode)
@@ -2249,7 +2252,7 @@ static inline void put_write_access(struct inode * inode)
static inline void allow_write_access(struct file *file)
{
if (file)
- atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
+ atomic_inc(&file_inode(file)->i_writecount);
}
#ifdef CONFIG_IMA
static inline void i_readcount_dec(struct inode *inode)
@@ -2274,6 +2277,7 @@ static inline void i_readcount_inc(struct inode *inode)
extern int do_pipe_flags(int *, int);
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
+extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
@@ -2463,7 +2467,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
-extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern int vfs_getattr(struct path *, struct kstat *);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 0fbfb4646d1b..a78680a92dba 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -244,7 +244,7 @@ static inline void fsnotify_open(struct file *file)
static inline void fsnotify_close(struct file *file)
{
struct path *path = &file->f_path;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 79b8bba19363..9f3c275e053e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -231,6 +231,12 @@ static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
}
}
+static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
+{
+ part_pack_uuid(uuid_str, to);
+ return 0;
+}
+
static inline int disk_max_parts(struct gendisk *disk)
{
if (disk->flags & GENHD_FL_EXT_DEVT)
@@ -718,6 +724,10 @@ static inline dev_t blk_lookup_devt(const char *name, int partno)
return devt;
}
+static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_GENHD_H */
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index bfe665621536..f6c7ae3e223b 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -94,24 +94,12 @@ static inline int gpio_request(unsigned gpio, const char *label)
return -ENOSYS;
}
-static inline int devm_gpio_request(struct device *dev, unsigned gpio,
- const char *label)
-{
- return -ENOSYS;
-}
-
static inline int gpio_request_one(unsigned gpio,
unsigned long flags, const char *label)
{
return -ENOSYS;
}
-static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
- unsigned long flags, const char *label)
-{
- return -ENOSYS;
-}
-
static inline int gpio_request_array(const struct gpio *array, size_t num)
{
return -ENOSYS;
@@ -125,14 +113,6 @@ static inline void gpio_free(unsigned gpio)
WARN_ON(1);
}
-static inline void devm_gpio_free(struct device *dev, unsigned gpio)
-{
- might_sleep();
-
- /* GPIO can never have been requested */
- WARN_ON(1);
-}
-
static inline void gpio_free_array(const struct gpio *array, size_t num)
{
might_sleep();
@@ -248,4 +228,12 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip)
#endif /* ! CONFIG_GENERIC_GPIO */
+struct device;
+
+/* bindings for managed devices that want to request gpios */
+int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
+int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label);
+void devm_gpio_free(struct device *dev, unsigned int gpio);
+
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 227c62424f3c..a9df51f5d54c 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -115,51 +115,50 @@ static inline void hash_del_rcu(struct hlist_node *node)
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
- * @node: the &struct list_head to use as a loop cursor for each entry
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
-#define hash_for_each(name, bkt, node, obj, member) \
- for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
- hlist_for_each_entry(obj, node, &name[bkt], member)
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
- * @node: the &struct list_head to use as a loop cursor for each entry
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
-#define hash_for_each_rcu(name, bkt, node, obj, member) \
- for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
- hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
+#define hash_for_each_rcu(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_rcu(obj, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
- * @node: the &struct list_head to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
-#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \
- for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
- hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member)
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
- * @node: the &struct list_head to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
-#define hash_for_each_possible(name, obj, node, member, key) \
- hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
@@ -167,25 +166,24 @@ static inline void hash_del_rcu(struct hlist_node *node)
* in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
- * @node: the &struct list_head to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
-#define hash_for_each_possible_rcu(name, obj, node, member, key) \
- hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+#define hash_for_each_possible_rcu(name, obj, member, key) \
+ hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
+ member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
- * @node: the &struct list_head to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
-#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \
- hlist_for_each_entry_safe(obj, node, tmp, \
+#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member)
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
new file mode 100644
index 000000000000..3b589440ecfe
--- /dev/null
+++ b/include/linux/hdmi.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_HDMI_H_
+#define __LINUX_HDMI_H_
+
+#include <linux/types.h>
+
+enum hdmi_infoframe_type {
+ HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
+ HDMI_INFOFRAME_TYPE_AVI = 0x82,
+ HDMI_INFOFRAME_TYPE_SPD = 0x83,
+ HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
+};
+
+#define HDMI_INFOFRAME_HEADER_SIZE 4
+#define HDMI_AVI_INFOFRAME_SIZE 13
+#define HDMI_SPD_INFOFRAME_SIZE 25
+#define HDMI_AUDIO_INFOFRAME_SIZE 10
+
+enum hdmi_colorspace {
+ HDMI_COLORSPACE_RGB,
+ HDMI_COLORSPACE_YUV422,
+ HDMI_COLORSPACE_YUV444,
+};
+
+enum hdmi_scan_mode {
+ HDMI_SCAN_MODE_NONE,
+ HDMI_SCAN_MODE_OVERSCAN,
+ HDMI_SCAN_MODE_UNDERSCAN,
+};
+
+enum hdmi_colorimetry {
+ HDMI_COLORIMETRY_NONE,
+ HDMI_COLORIMETRY_ITU_601,
+ HDMI_COLORIMETRY_ITU_709,
+ HDMI_COLORIMETRY_EXTENDED,
+};
+
+enum hdmi_picture_aspect {
+ HDMI_PICTURE_ASPECT_NONE,
+ HDMI_PICTURE_ASPECT_4_3,
+ HDMI_PICTURE_ASPECT_16_9,
+};
+
+enum hdmi_active_aspect {
+ HDMI_ACTIVE_ASPECT_16_9_TOP = 2,
+ HDMI_ACTIVE_ASPECT_14_9_TOP = 3,
+ HDMI_ACTIVE_ASPECT_16_9_CENTER = 4,
+ HDMI_ACTIVE_ASPECT_PICTURE = 8,
+ HDMI_ACTIVE_ASPECT_4_3 = 9,
+ HDMI_ACTIVE_ASPECT_16_9 = 10,
+ HDMI_ACTIVE_ASPECT_14_9 = 11,
+ HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13,
+ HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14,
+ HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
+};
+
+enum hdmi_extended_colorimetry {
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
+ HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+};
+
+enum hdmi_quantization_range {
+ HDMI_QUANTIZATION_RANGE_DEFAULT,
+ HDMI_QUANTIZATION_RANGE_LIMITED,
+ HDMI_QUANTIZATION_RANGE_FULL,
+};
+
+/* non-uniform picture scaling */
+enum hdmi_nups {
+ HDMI_NUPS_UNKNOWN,
+ HDMI_NUPS_HORIZONTAL,
+ HDMI_NUPS_VERTICAL,
+ HDMI_NUPS_BOTH,
+};
+
+enum hdmi_ycc_quantization_range {
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED,
+ HDMI_YCC_QUANTIZATION_RANGE_FULL,
+};
+
+enum hdmi_content_type {
+ HDMI_CONTENT_TYPE_NONE,
+ HDMI_CONTENT_TYPE_PHOTO,
+ HDMI_CONTENT_TYPE_CINEMA,
+ HDMI_CONTENT_TYPE_GAME,
+};
+
+struct hdmi_avi_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ enum hdmi_colorspace colorspace;
+ bool active_info_valid;
+ bool horizontal_bar_valid;
+ bool vertical_bar_valid;
+ enum hdmi_scan_mode scan_mode;
+ enum hdmi_colorimetry colorimetry;
+ enum hdmi_picture_aspect picture_aspect;
+ enum hdmi_active_aspect active_aspect;
+ bool itc;
+ enum hdmi_extended_colorimetry extended_colorimetry;
+ enum hdmi_quantization_range quantization_range;
+ enum hdmi_nups nups;
+ unsigned char video_code;
+ enum hdmi_ycc_quantization_range ycc_quantization_range;
+ enum hdmi_content_type content_type;
+ unsigned char pixel_repeat;
+ unsigned short top_bar;
+ unsigned short bottom_bar;
+ unsigned short left_bar;
+ unsigned short right_bar;
+};
+
+int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size);
+
+enum hdmi_spd_sdi {
+ HDMI_SPD_SDI_UNKNOWN,
+ HDMI_SPD_SDI_DSTB,
+ HDMI_SPD_SDI_DVDP,
+ HDMI_SPD_SDI_DVHS,
+ HDMI_SPD_SDI_HDDVR,
+ HDMI_SPD_SDI_DVC,
+ HDMI_SPD_SDI_DSC,
+ HDMI_SPD_SDI_VCD,
+ HDMI_SPD_SDI_GAME,
+ HDMI_SPD_SDI_PC,
+ HDMI_SPD_SDI_BD,
+ HDMI_SPD_SDI_SACD,
+ HDMI_SPD_SDI_HDDVD,
+ HDMI_SPD_SDI_PMP,
+};
+
+struct hdmi_spd_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ char vendor[8];
+ char product[16];
+ enum hdmi_spd_sdi sdi;
+};
+
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product);
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size);
+
+enum hdmi_audio_coding_type {
+ HDMI_AUDIO_CODING_TYPE_STREAM,
+ HDMI_AUDIO_CODING_TYPE_PCM,
+ HDMI_AUDIO_CODING_TYPE_AC3,
+ HDMI_AUDIO_CODING_TYPE_MPEG1,
+ HDMI_AUDIO_CODING_TYPE_MP3,
+ HDMI_AUDIO_CODING_TYPE_MPEG2,
+ HDMI_AUDIO_CODING_TYPE_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_DTS,
+ HDMI_AUDIO_CODING_TYPE_ATRAC,
+ HDMI_AUDIO_CODING_TYPE_DSD,
+ HDMI_AUDIO_CODING_TYPE_EAC3,
+ HDMI_AUDIO_CODING_TYPE_DTS_HD,
+ HDMI_AUDIO_CODING_TYPE_MLP,
+ HDMI_AUDIO_CODING_TYPE_DST,
+ HDMI_AUDIO_CODING_TYPE_WMA_PRO,
+};
+
+enum hdmi_audio_sample_size {
+ HDMI_AUDIO_SAMPLE_SIZE_STREAM,
+ HDMI_AUDIO_SAMPLE_SIZE_16,
+ HDMI_AUDIO_SAMPLE_SIZE_20,
+ HDMI_AUDIO_SAMPLE_SIZE_24,
+};
+
+enum hdmi_audio_sample_frequency {
+ HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_32000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_44100,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_48000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_88200,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_96000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_176400,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_192000,
+};
+
+enum hdmi_audio_coding_type_ext {
+ HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
+};
+
+struct hdmi_audio_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned char channels;
+ enum hdmi_audio_coding_type coding_type;
+ enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_sample_frequency sample_frequency;
+ enum hdmi_audio_coding_type_ext coding_type_ext;
+ unsigned char channel_allocation;
+ unsigned char level_shift_value;
+ bool downmix_inhibit;
+
+};
+
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+
+struct hdmi_vendor_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ u8 data[27];
+};
+
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+
+#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 56fae865e272..0dca785288cf 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -121,9 +121,9 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
* @device: Driver model representation of the device
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
- * @e_handler: Callback for handling port events (RX Wake High/Low)
- * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
- * @nb: Notifier block for port events
+ * e_handler: Callback for handling port events (RX Wake High/Low)
+ * pclaimed: Keeps tracks if the clients claimed its associated HSI port
+ * nb: Notifier block for port events
*/
struct hsi_client {
struct device device;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index eedc334fb6f5..16e4e9a643fb 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -281,7 +281,7 @@ static inline struct hstate *hstate_inode(struct inode *i)
static inline struct hstate *hstate_file(struct file *f)
{
- return hstate_inode(f->f_dentry->d_inode);
+ return hstate_inode(file_inode(f));
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 1ff54b114efc..488debbef895 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -39,51 +39,55 @@
* address each module uses within a given i2c slave.
*/
+/* Module IDs for similar functionalities found in twl4030/twl6030 */
+enum twl_module_ids {
+ TWL_MODULE_USB,
+ TWL_MODULE_PIH,
+ TWL_MODULE_MAIN_CHARGE,
+ TWL_MODULE_PM_MASTER,
+ TWL_MODULE_PM_RECEIVER,
+
+ TWL_MODULE_RTC,
+ TWL_MODULE_PWM,
+ TWL_MODULE_LED,
+ TWL_MODULE_SECURED_REG,
+
+ TWL_MODULE_LAST,
+};
+
+/* Modules only available in twl4030 series */
enum twl4030_module_ids {
- TWL4030_MODULE_USB = 0, /* Slave 0 (i2c address 0x48) */
- TWL4030_MODULE_AUDIO_VOICE, /* Slave 1 (i2c address 0x49) */
+ TWL4030_MODULE_AUDIO_VOICE = TWL_MODULE_LAST,
TWL4030_MODULE_GPIO,
TWL4030_MODULE_INTBR,
- TWL4030_MODULE_PIH,
-
TWL4030_MODULE_TEST,
- TWL4030_MODULE_KEYPAD, /* Slave 2 (i2c address 0x4a) */
+ TWL4030_MODULE_KEYPAD,
+
TWL4030_MODULE_MADC,
TWL4030_MODULE_INTERRUPTS,
- TWL4030_MODULE_LED,
-
- TWL4030_MODULE_MAIN_CHARGE,
TWL4030_MODULE_PRECHARGE,
- TWL4030_MODULE_PWM0,
- TWL4030_MODULE_PWM1,
- TWL4030_MODULE_PWMA,
+ TWL4030_MODULE_BACKUP,
+ TWL4030_MODULE_INT,
- TWL4030_MODULE_PWMB,
TWL5031_MODULE_ACCESSORY,
TWL5031_MODULE_INTERRUPTS,
- TWL4030_MODULE_BACKUP, /* Slave 3 (i2c address 0x4b) */
- TWL4030_MODULE_INT,
- TWL4030_MODULE_PM_MASTER,
- TWL4030_MODULE_PM_RECEIVER,
- TWL4030_MODULE_RTC,
- TWL4030_MODULE_SECURED_REG,
TWL4030_MODULE_LAST,
};
-/* Similar functionalities implemented in TWL4030/6030 */
-#define TWL_MODULE_USB TWL4030_MODULE_USB
-#define TWL_MODULE_PIH TWL4030_MODULE_PIH
-#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
-#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
-#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
-#define TWL_MODULE_RTC TWL4030_MODULE_RTC
-#define TWL_MODULE_PWM TWL4030_MODULE_PWM0
-#define TWL_MODULE_LED TWL4030_MODULE_LED
+/* Modules only available in twl6030 series */
+enum twl6030_module_ids {
+ TWL6030_MODULE_ID0 = TWL_MODULE_LAST,
+ TWL6030_MODULE_ID1,
+ TWL6030_MODULE_ID2,
+ TWL6030_MODULE_GPADC,
+ TWL6030_MODULE_GASGAUGE,
+
+ TWL6030_MODULE_LAST,
+};
-#define TWL6030_MODULE_ID0 13
-#define TWL6030_MODULE_ID1 14
-#define TWL6030_MODULE_ID2 15
+/* Until the clients has been converted to use TWL_MODULE_LED */
+#define TWL4030_MODULE_LED TWL_MODULE_LED
#define GPIO_INTR_OFFSET 0
#define KEYPAD_INTR_OFFSET 1
@@ -171,20 +175,22 @@ TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
/*
- * Read and write single 8-bit registers
- */
-int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
-int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
-
-/*
* Read and write several 8-bit registers at once.
- *
- * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1
- * for the value, and populate your data starting at offset 1.
*/
int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+/*
+ * Read and write single 8-bit registers
+ */
+static inline int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg) {
+ return twl_i2c_write(mod_no, &val, reg, 1);
+}
+
+static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) {
+ return twl_i2c_read(mod_no, val, reg, 1);
+}
+
int twl_get_type(void);
int twl_get_version(void);
int twl_get_hfclk_rate(void);
diff --git a/include/linux/idr.h b/include/linux/idr.h
index e5eb125effe6..a6f38b5c34e4 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -17,69 +17,40 @@
#include <linux/init.h>
#include <linux/rcupdate.h>
-#if BITS_PER_LONG == 32
-# define IDR_BITS 5
-# define IDR_FULL 0xfffffffful
-/* We can only use two of the bits in the top level because there is
- only one possible bit in the top level (5 bits * 7 levels = 35
- bits, but you only use 31 bits in the id). */
-# define TOP_LEVEL_FULL (IDR_FULL >> 30)
-#elif BITS_PER_LONG == 64
-# define IDR_BITS 6
-# define IDR_FULL 0xfffffffffffffffful
-/* We can only use two of the bits in the top level because there is
- only one possible bit in the top level (6 bits * 6 levels = 36
- bits, but you only use 31 bits in the id). */
-# define TOP_LEVEL_FULL (IDR_FULL >> 62)
-#else
-# error "BITS_PER_LONG is not 32 or 64"
-#endif
-
+/*
+ * We want shallower trees and thus more bits covered at each layer. 8
+ * bits gives us large enough first layer for most use cases and maximum
+ * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
+ * 1k on 32bit.
+ */
+#define IDR_BITS 8
#define IDR_SIZE (1 << IDR_BITS)
#define IDR_MASK ((1 << IDR_BITS)-1)
-#define MAX_IDR_SHIFT (sizeof(int)*8 - 1)
-#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-#define MAX_IDR_MASK (MAX_IDR_BIT - 1)
-
-/* Leave the possibility of an incomplete final layer */
-#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
-
-/* Number of id_layer structs to leave in free list */
-#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
-
struct idr_layer {
- unsigned long bitmap; /* A zero bit means "space here" */
+ int prefix; /* the ID prefix of this idr_layer */
+ DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
struct idr_layer __rcu *ary[1<<IDR_BITS];
- int count; /* When zero, we can release it */
- int layer; /* distance from leaf */
- struct rcu_head rcu_head;
+ int count; /* When zero, we can release it */
+ int layer; /* distance from leaf */
+ struct rcu_head rcu_head;
};
struct idr {
- struct idr_layer __rcu *top;
- struct idr_layer *id_free;
- int layers; /* only valid without concurrent changes */
- int id_free_cnt;
- spinlock_t lock;
+ struct idr_layer __rcu *hint; /* the last layer allocated from */
+ struct idr_layer __rcu *top;
+ struct idr_layer *id_free;
+ int layers; /* only valid w/o concurrent changes */
+ int id_free_cnt;
+ spinlock_t lock;
};
-#define IDR_INIT(name) \
-{ \
- .top = NULL, \
- .id_free = NULL, \
- .layers = 0, \
- .id_free_cnt = 0, \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+#define IDR_INIT(name) \
+{ \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
}
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
-/* Actions to be taken after a call to _idr_sub_alloc */
-#define IDR_NEED_TO_GROW -2
-#define IDR_NOMORE_SPACE -3
-
-#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
-
/**
* DOC: idr sync
* idr synchronization (stolen from radix-tree.h)
@@ -101,19 +72,90 @@ struct idr {
* This is what we export.
*/
-void *idr_find(struct idr *idp, int id);
+void *idr_find_slowpath(struct idr *idp, int id);
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
-int idr_get_new(struct idr *idp, void *ptr, int *id);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
+void idr_preload(gfp_t gfp_mask);
+int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
-void idr_remove_all(struct idr *idp);
+void idr_free(struct idr *idp, int id);
void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp);
+/**
+ * idr_preload_end - end preload section started with idr_preload()
+ *
+ * Each idr_preload() should be matched with an invocation of this
+ * function. See idr_preload() for details.
+ */
+static inline void idr_preload_end(void)
+{
+ preempt_enable();
+}
+
+/**
+ * idr_find - return pointer for given id
+ * @idp: idr handle
+ * @id: lookup key
+ *
+ * Return the pointer given the id it has been registered with. A %NULL
+ * return indicates that @id is not valid or you passed %NULL in
+ * idr_get_new().
+ *
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
+ */
+static inline void *idr_find(struct idr *idr, int id)
+{
+ struct idr_layer *hint = rcu_dereference_raw(idr->hint);
+
+ if (hint && (id & ~IDR_MASK) == hint->prefix)
+ return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
+
+ return idr_find_slowpath(idr, id);
+}
+
+/**
+ * idr_get_new - allocate new idr entry
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: pointer to the allocated handle
+ *
+ * Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
+ */
+static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
+{
+ return idr_get_new_above(idp, ptr, 0, id);
+}
+
+/**
+ * idr_for_each_entry - iterate over an idr's elements of a given type
+ * @idp: idr handle
+ * @entry: the type * to use as cursor
+ * @id: id entry's key
+ */
+#define idr_for_each_entry(idp, entry, id) \
+ for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
+ entry != NULL; \
+ ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
+
+void __idr_remove_all(struct idr *idp); /* don't use */
+
+/**
+ * idr_remove_all - remove all ids from the given idr tree
+ * @idp: idr handle
+ *
+ * If you're trying to destroy @idp, calling idr_destroy() is enough.
+ * This is going away. Don't use.
+ */
+static inline void __deprecated idr_remove_all(struct idr *idp)
+{
+ __idr_remove_all(idp);
+}
/*
* IDA - IDR based id allocator, use when translation from id to
@@ -141,7 +183,6 @@ struct ida {
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
-int ida_get_new(struct ida *ida, int *p_id);
void ida_remove(struct ida *ida, int id);
void ida_destroy(struct ida *ida);
void ida_init(struct ida *ida);
@@ -150,17 +191,18 @@ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
gfp_t gfp_mask);
void ida_simple_remove(struct ida *ida, unsigned int id);
-void __init idr_init_cache(void);
-
/**
- * idr_for_each_entry - iterate over an idr's elements of a given type
- * @idp: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * ida_get_new - allocate new ID
+ * @ida: idr handle
+ * @p_id: pointer to the allocated handle
+ *
+ * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
*/
-#define idr_for_each_entry(idp, entry, id) \
- for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
- entry != NULL; \
- ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
+static inline int ida_get_new(struct ida *ida, int *p_id)
+{
+ return ida_get_new_above(ida, 0, p_id);
+}
+
+void __init idr_init_cache(void);
#endif /* __IDR_H__ */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 4648d8021244..cfd21e3d5506 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -216,11 +216,10 @@ static inline struct hlist_head *team_port_index_hash(struct team *team,
static inline struct team_port *team_get_port_by_index(struct team *team,
int port_index)
{
- struct hlist_node *p;
struct team_port *port;
struct hlist_head *head = team_port_index_hash(team, port_index);
- hlist_for_each_entry(port, p, head, hlist)
+ hlist_for_each_entry(port, head, hlist)
if (port->index == port_index)
return port;
return NULL;
@@ -228,11 +227,10 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
int port_index)
{
- struct hlist_node *p;
struct team_port *port;
struct hlist_head *head = team_port_index_hash(team, port_index);
- hlist_for_each_entry_rcu(port, p, head, hlist)
+ hlist_for_each_entry_rcu(port, head, hlist)
if (port->index == port_index)
return port;
return NULL;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d06cc5c8f58c..218a3b686d90 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -331,7 +331,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
__be16 proto;
- unsigned char *rawp;
+ unsigned short *rawp;
/*
* Was a VLAN packet, grab the encapsulated protocol, which the layer
@@ -344,8 +344,8 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
return;
}
- rawp = skb->data;
- if (*(unsigned short *) rawp == 0xFFFF)
+ rawp = (unsigned short *)(vhdr + 1);
+ if (*rawp == 0xFFFF)
/*
* This is a magic hack to spot IPX packets. Older Novell
* breaks the protocol design and runs IPX over 802.3 without
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f3b99e1c1042..ba3b8a98a049 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -58,8 +58,10 @@ struct iommu_domain {
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
enum iommu_attr {
- DOMAIN_ATTR_MAX,
DOMAIN_ATTR_GEOMETRY,
+ DOMAIN_ATTR_PAGING,
+ DOMAIN_ATTR_WINDOWS,
+ DOMAIN_ATTR_MAX,
};
#ifdef CONFIG_IOMMU_API
@@ -100,6 +102,16 @@ struct iommu_ops {
enum iommu_attr attr, void *data);
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
+
+ /* Window handling functions */
+ int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t paddr, u64 size);
+ void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
+ /* Set the numer of window per domain */
+ int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
+ /* Get the numer of window per domain */
+ u32 (*domain_get_windows)(struct iommu_domain *domain);
+
unsigned long pgsize_bitmap;
};
@@ -157,6 +169,10 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
+/* Window handling function prototypes */
+extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t offset, u64 size);
+extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
@@ -239,6 +255,18 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
+static inline int iommu_domain_window_enable(struct iommu_domain *domain,
+ u32 wnd_nr, phys_addr_t paddr,
+ u64 size)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_domain_window_disable(struct iommu_domain *domain,
+ u32 wnd_nr)
+{
+}
+
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 1487e7906bbd..1f9f56e28851 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -35,10 +35,6 @@
#include <uapi/linux/ipmi.h>
-
-/*
- * The in-kernel interface.
- */
#include <linux/list.h>
#include <linux/proc_fs.h>
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index e30b66346942..50e5a5e6a712 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -20,7 +20,6 @@
#ifndef __KERNEL__
#include "jfs_compat.h"
#define JBD2_DEBUG
-#define jfs_debug jbd_debug
#else
#include <linux/types.h>
@@ -57,7 +56,7 @@
* CONFIG_JBD2_DEBUG is on.
*/
#define JBD2_EXPENSIVE_CHECKING
-extern u8 jbd2_journal_enable_debug;
+extern ushort jbd2_journal_enable_debug;
#define jbd_debug(n, f, a...) \
do { \
@@ -397,35 +396,18 @@ struct jbd2_journal_handle
int h_err;
/* Flags [no locking] */
- unsigned int h_sync:1; /* sync-on-close */
- unsigned int h_jdata:1; /* force data journaling */
- unsigned int h_aborted:1; /* fatal error on handle */
- unsigned int h_cowing:1; /* COWing block to snapshot */
-
- /* Number of buffers requested by user:
- * (before adding the COW credits factor) */
- unsigned int h_base_credits:14;
-
- /* Number of buffers the user is allowed to dirty:
- * (counts only buffers dirtied when !h_cowing) */
- unsigned int h_user_credits:14;
+ unsigned int h_sync: 1; /* sync-on-close */
+ unsigned int h_jdata: 1; /* force data journaling */
+ unsigned int h_aborted: 1; /* fatal error on handle */
+ unsigned int h_type: 8; /* for handle statistics */
+ unsigned int h_line_no: 16; /* for handle statistics */
+ unsigned long h_start_jiffies;
+ unsigned int h_requested_credits;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map h_lockdep_map;
#endif
-
-#ifdef CONFIG_JBD2_DEBUG
- /* COW debugging counters: */
- unsigned int h_cow_moved; /* blocks moved to snapshot */
- unsigned int h_cow_copied; /* blocks copied to snapshot */
- unsigned int h_cow_ok_jh; /* blocks already COWed during current
- transaction */
- unsigned int h_cow_ok_bitmap; /* blocks not set in COW bitmap */
- unsigned int h_cow_ok_mapped;/* blocks already mapped in snapshot */
- unsigned int h_cow_bitmaps; /* COW bitmaps created */
- unsigned int h_cow_excluded; /* blocks set in exclude bitmap */
-#endif
};
@@ -581,6 +563,11 @@ struct transaction_s
unsigned long t_start;
/*
+ * When commit was requested
+ */
+ unsigned long t_requested;
+
+ /*
* Checkpointing stats [j_checkpoint_sem]
*/
struct transaction_chp_stats_s t_chp_stats;
@@ -637,6 +624,7 @@ struct transaction_s
struct transaction_run_stats_s {
unsigned long rs_wait;
+ unsigned long rs_request_delay;
unsigned long rs_running;
unsigned long rs_locked;
unsigned long rs_flushing;
@@ -649,6 +637,7 @@ struct transaction_run_stats_s {
struct transaction_stats_s {
unsigned long ts_tid;
+ unsigned long ts_requested;
struct transaction_run_stats_s run;
};
@@ -1086,7 +1075,8 @@ static inline handle_t *journal_current_handle(void)
*/
extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
-extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask);
+extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no);
extern int jbd2_journal_restart(handle_t *, int nblocks);
extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
extern int jbd2_journal_extend (handle_t *, int nblocks);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c566927efcbd..80d36874689b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -398,7 +398,11 @@ extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int sysctl_panic_on_stackoverflow;
extern const char *print_tainted(void);
-extern void add_taint(unsigned flag);
+enum lockdep_ok {
+ LOCKDEP_STILL_OK,
+ LOCKDEP_NOW_UNRELIABLE
+};
+extern void add_taint(unsigned flag, enum lockdep_ok);
extern int test_taint(unsigned flag);
extern unsigned long get_taint(void);
extern int root_mountflags;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b7996a768eb2..cad77fe09d77 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -123,6 +123,8 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_WATCHDOG 18
#define KVM_REQ_MASTERCLOCK_UPDATE 19
#define KVM_REQ_MCLOCK_INPROGRESS 20
+#define KVM_REQ_EPR_EXIT 21
+#define KVM_REQ_EOIBITMAP 22
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -267,12 +269,11 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
struct kvm_memory_slot {
gfn_t base_gfn;
unsigned long npages;
- unsigned long flags;
unsigned long *dirty_bitmap;
struct kvm_arch_memory_slot arch;
unsigned long userspace_addr;
- int user_alloc;
- int id;
+ u32 flags;
+ short id;
};
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
@@ -314,8 +315,12 @@ struct kvm_irq_routing_table {};
#endif
+#ifndef KVM_PRIVATE_MEM_SLOTS
+#define KVM_PRIVATE_MEM_SLOTS 0
+#endif
+
#ifndef KVM_MEM_SLOTS_NUM
-#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
/*
@@ -327,7 +332,7 @@ struct kvm_memslots {
u64 generation;
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
- int id_to_index[KVM_MEM_SLOTS_NUM];
+ short id_to_index[KVM_MEM_SLOTS_NUM];
};
struct kvm {
@@ -425,7 +430,8 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+ u64 last_generation);
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
@@ -448,10 +454,10 @@ id_to_memslot(struct kvm_memslots *slots, int id)
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
@@ -459,11 +465,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc);
+ bool user_alloc);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
/* flush all memory translations */
@@ -533,6 +539,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
+void kvm_make_update_eoibitmap_request(struct kvm *kvm);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -550,7 +557,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -686,6 +693,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level);
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
deleted file mode 100644
index 3f071ec019b2..000000000000
--- a/include/linux/leds-lp5521.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * LP5521 LED chip driver.
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __LINUX_LP5521_H
-#define __LINUX_LP5521_H
-
-/* See Documentation/leds/leds-lp5521.txt */
-
-struct lp5521_led_config {
- char *name;
- u8 chan_nr;
- u8 led_current; /* mA x10, 0 if led is not connected */
- u8 max_current;
-};
-
-struct lp5521_led_pattern {
- u8 *r;
- u8 *g;
- u8 *b;
- u8 size_r;
- u8 size_g;
- u8 size_b;
-};
-
-#define LP5521_CLOCK_AUTO 0
-#define LP5521_CLOCK_INT 1
-#define LP5521_CLOCK_EXT 2
-
-/* Bits in CONFIG register */
-#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
-#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
-#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
-#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
-#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
-#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
-#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */
-#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */
-#define LP5521_CLK_INT 1 /* Internal clock */
-#define LP5521_CLK_AUTO 2 /* Automatic clock selection */
-
-struct lp5521_platform_data {
- struct lp5521_led_config *led_config;
- u8 num_channels;
- u8 clock_mode;
- int (*setup_resources)(void);
- void (*release_resources)(void);
- void (*enable)(bool state);
- const char *label;
- u8 update_config;
- struct lp5521_led_pattern *patterns;
- int num_patterns;
-};
-
-#endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
deleted file mode 100644
index 727877fb406d..000000000000
--- a/include/linux/leds-lp5523.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * LP5523 LED Driver
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __LINUX_LP5523_H
-#define __LINUX_LP5523_H
-
-/* See Documentation/leds/leds-lp5523.txt */
-
-struct lp5523_led_config {
- const char *name;
- u8 chan_nr;
- u8 led_current; /* mA x10, 0 if led is not connected */
- u8 max_current;
-};
-
-#define LP5523_CLOCK_AUTO 0
-#define LP5523_CLOCK_INT 1
-#define LP5523_CLOCK_EXT 2
-
-struct lp5523_platform_data {
- struct lp5523_led_config *led_config;
- u8 num_channels;
- u8 clock_mode;
- int (*setup_resources)(void);
- void (*release_resources)(void);
- void (*enable)(bool state);
- const char *label;
-};
-
-#endif /* __LINUX_LP5523_H */
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
index 33a071167489..a65e9646e4b1 100644
--- a/include/linux/leds_pwm.h
+++ b/include/linux/leds_pwm.h
@@ -7,7 +7,7 @@
struct led_pwm {
const char *name;
const char *default_trigger;
- unsigned pwm_id;
+ unsigned pwm_id __deprecated;
u8 active_low;
unsigned max_brightness;
unsigned pwm_period_ns;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 649e5f86b5f0..91c9d109e5f1 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -620,6 +620,9 @@ struct ata_device {
union acpi_object *gtf_cache;
unsigned int gtf_filter;
#endif
+#ifdef CONFIG_SATA_ZPODD
+ void *zpodd;
+#endif
struct device tdev;
/* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
u64 n_sectors; /* size of device, if ATA */
diff --git a/include/linux/list.h b/include/linux/list.h
index cc6d2aa6b415..d991cc147c98 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -666,54 +666,49 @@ static inline void hlist_move_list(struct hlist_head *old,
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
+#define hlist_entry_safe(ptr, type, member) \
+ (ptr) ? hlist_entry(ptr, type, member) : NULL
+
/**
* hlist_for_each_entry - iterate over list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_continue(tpos, pos, member) \
- for (pos = (pos)->next; \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_from(tpos, pos, member) \
- for (; pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
+#define hlist_for_each_entry_from(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = n)
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
#endif
diff --git a/include/linux/llist.h b/include/linux/llist.h
index d0ab98f73d38..a5199f6d0e82 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -125,31 +125,6 @@ static inline void init_llist_head(struct llist_head *list)
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
- * llist_for_each_entry_safe - iterate safely against remove over some entries
- * of lock-less list of given type.
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as a temporary storage.
- * @node: the fist entry of deleted list entries.
- * @member: the name of the llist_node with the struct.
- *
- * In general, some entries of the lock-less list can be traversed
- * safely only after being removed from list, so start with an entry
- * instead of list head. This variant allows removal of entries
- * as we iterate.
- *
- * If being used on entries deleted from lock-less list directly, the
- * traverse order is from the newest to the oldest added entry. If
- * you want to traverse from the oldest to the newest, you must
- * reverse the order by yourself before traversing.
- */
-#define llist_for_each_entry_safe(pos, n, node, member) \
- for ((pos) = llist_entry((node), typeof(*(pos)), member), \
- (n) = (pos)->member.next; \
- &(pos)->member != NULL; \
- (pos) = llist_entry(n, typeof(*(pos)), member), \
- (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
-
-/**
* llist_empty - tests whether a lock-less list is empty
* @head: the list to test
*
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index f5a051a79273..dcaad79f54ed 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -212,7 +212,8 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
__be32 nlmclnt_grant(const struct sockaddr *addr,
const struct nlm_lock *lock);
void nlmclnt_recovery(struct nlm_host *);
-int nlmclnt_reclaim(struct nlm_host *, struct file_lock *);
+int nlmclnt_reclaim(struct nlm_host *, struct file_lock *,
+ struct nlm_rqst *);
void nlmclnt_next_cookie(struct nlm_cookie *);
/*
@@ -291,7 +292,7 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return file->f_file->f_path.dentry->d_inode;
+ return file_inode(file->f_file);
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
diff --git a/include/linux/lzo.h b/include/linux/lzo.h
index d793497ec1ca..a0848d9377e5 100644
--- a/include/linux/lzo.h
+++ b/include/linux/lzo.h
@@ -4,28 +4,28 @@
* LZO Public Kernel Interface
* A mini subset of the LZO real-time data compression library
*
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
- * Changed for kernel use by:
+ * Changed for Linux kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
-#define LZO1X_MEM_COMPRESS (16384 * sizeof(unsigned char *))
-#define LZO1X_1_MEM_COMPRESS LZO1X_MEM_COMPRESS
+#define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short))
+#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS
#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
-/* This requires 'workmem' of size LZO1X_1_MEM_COMPRESS */
+/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
/* safe decompression with overrun testing */
int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len);
+ unsigned char *dst, size_t *dst_len);
/*
* Return values (< 0 = Error)
@@ -40,5 +40,6 @@ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
#define LZO_E_EOF_NOT_FOUND (-7)
#define LZO_E_INPUT_NOT_CONSUMED (-8)
#define LZO_E_NOT_YET_IMPLEMENTED (-9)
+#define LZO_E_INVALID_ARGUMENT (-10)
#endif
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 3e5ecb2d790e..f388203db7e8 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -42,7 +42,6 @@ struct memblock {
extern struct memblock memblock;
extern int memblock_debug;
-extern struct movablemem_map movablemem_map;
#define memblock_dbg(fmt, ...) \
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
@@ -61,7 +60,6 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 478672ed0c3d..e94537befabd 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -365,5 +365,5 @@ static inline int pm80x_dev_resume(struct device *dev)
extern int pm80x_init(struct i2c_client *client,
const struct i2c_device_id *id);
-extern int pm80x_deinit(struct i2c_client *client);
+extern int pm80x_deinit(void);
#endif /* __LINUX_MFD_88PM80X_H */
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 80e3b8683a84..9ead60bc66b7 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -311,6 +311,7 @@ int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
int abx500_get_chip_id(struct device *dev);
int abx500_event_registers_startup_state_get(struct device *dev, u8 *event);
int abx500_startup_irq_enabled(struct device *dev, unsigned int irq);
+void abx500_dump_all_banks(void);
struct abx500_ops {
int (*get_chip_id) (struct device *);
@@ -321,6 +322,7 @@ struct abx500_ops {
int (*mask_and_set_register) (struct device *, u8, u8, u8, u8);
int (*event_registers_startup_state_get) (struct device *, u8 *);
int (*startup_irq_enabled) (struct device *, unsigned int);
+ void (*dump_all_banks) (struct device *);
};
int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index 10eb50973c39..ebf12e793db9 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -37,6 +37,11 @@ static inline int ab8500_sysctrl_clear(u16 reg, u8 bits)
return ab8500_sysctrl_write(reg, bits, 0);
}
+/* Configuration data for SysClkReq1RfClkBuf - SysClkReq8RfClkBuf */
+struct ab8500_sysctrl_platform_data {
+ u8 initial_req_buf_config[8];
+};
+
/* Registers */
#define AB8500_TURNONSTATUS 0x100
#define AB8500_RESETSTATUS 0x101
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index fc0534483c72..9db0bda446a0 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -368,10 +368,12 @@ struct regulator_reg_init;
struct regulator_init_data;
struct ab8500_gpio_platform_data;
struct ab8500_codec_platform_data;
+struct ab8500_sysctrl_platform_data;
/**
* struct ab8500_platform_data - AB8500 platform data
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
+ * @pm_power_off: Should machine pm power off hook be registered or not
* @init: board-specific initialization after detection of ab8500
* @num_regulator_reg_init: number of regulator init registers
* @regulator_reg_init: regulator init registers
@@ -380,6 +382,7 @@ struct ab8500_codec_platform_data;
*/
struct ab8500_platform_data {
int irq_base;
+ bool pm_power_off;
void (*init) (struct ab8500 *);
int num_regulator_reg_init;
struct ab8500_regulator_reg_init *regulator_reg_init;
@@ -387,6 +390,7 @@ struct ab8500_platform_data {
struct regulator_init_data *regulator;
struct abx500_gpio_platform_data *gpio;
struct ab8500_codec_platform_data *codec;
+ struct ab8500_sysctrl_platform_data *sysctrl;
};
extern int ab8500_init(struct ab8500 *ab8500,
@@ -508,4 +512,12 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
return (is_ab9540(ab) && (ab->chip_id < AB8500_CUT2P0));
}
+#ifdef CONFIG_AB8500_DEBUG
+void ab8500_dump_all_banks(struct device *dev);
+void ab8500_debug_register_interrupt(int line);
+#else
+static inline void ab8500_dump_all_banks(struct device *dev) {}
+static inline void ab8500_debug_register_interrupt(int line) {}
+#endif
+
#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 96d64f2b8d78..455c51d22d6b 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -56,6 +56,8 @@
#define ARIZONA_DMIC_MICBIAS2 2
#define ARIZONA_DMIC_MICBIAS3 3
+#define ARIZONA_MAX_MICBIAS 3
+
#define ARIZONA_INMODE_DIFF 0
#define ARIZONA_INMODE_SE 1
#define ARIZONA_INMODE_DMIC 2
@@ -71,6 +73,13 @@
struct regulator_init_data;
+struct arizona_micbias {
+ int mV; /** Regulated voltage */
+ unsigned int ext_cap:1; /** External capacitor fitted */
+ unsigned int discharge:1; /** Actively discharge */
+ unsigned int fast_start:1; /** Enable aggressive startup ramp rate */
+};
+
struct arizona_micd_config {
unsigned int src;
unsigned int bias;
@@ -136,6 +145,9 @@ struct arizona_pdata {
/** Reference voltage for DMIC inputs */
int dmic_ref[ARIZONA_MAX_INPUT];
+ /** MICBIAS configurations */
+ struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
+
/** Mode of input structures */
int inmode[ARIZONA_MAX_INPUT];
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 188d89abd963..340355136069 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -984,18 +984,34 @@
#define ARIZONA_DSP1_STATUS_1 0x1104
#define ARIZONA_DSP1_STATUS_2 0x1105
#define ARIZONA_DSP1_STATUS_3 0x1106
+#define ARIZONA_DSP1_SCRATCH_0 0x1140
+#define ARIZONA_DSP1_SCRATCH_1 0x1141
+#define ARIZONA_DSP1_SCRATCH_2 0x1142
+#define ARIZONA_DSP1_SCRATCH_3 0x1143
#define ARIZONA_DSP2_CONTROL_1 0x1200
#define ARIZONA_DSP2_CLOCKING_1 0x1201
#define ARIZONA_DSP2_STATUS_1 0x1204
#define ARIZONA_DSP2_STATUS_2 0x1205
+#define ARIZONA_DSP2_SCRATCH_0 0x1240
+#define ARIZONA_DSP2_SCRATCH_1 0x1241
+#define ARIZONA_DSP2_SCRATCH_2 0x1242
+#define ARIZONA_DSP2_SCRATCH_3 0x1243
#define ARIZONA_DSP3_CONTROL_1 0x1300
#define ARIZONA_DSP3_CLOCKING_1 0x1301
#define ARIZONA_DSP3_STATUS_1 0x1304
#define ARIZONA_DSP3_STATUS_2 0x1305
+#define ARIZONA_DSP3_SCRATCH_0 0x1340
+#define ARIZONA_DSP3_SCRATCH_1 0x1341
+#define ARIZONA_DSP3_SCRATCH_2 0x1342
+#define ARIZONA_DSP3_SCRATCH_3 0x1343
#define ARIZONA_DSP4_CONTROL_1 0x1400
#define ARIZONA_DSP4_CLOCKING_1 0x1401
#define ARIZONA_DSP4_STATUS_1 0x1404
#define ARIZONA_DSP4_STATUS_2 0x1405
+#define ARIZONA_DSP4_SCRATCH_0 0x1440
+#define ARIZONA_DSP4_SCRATCH_1 0x1441
+#define ARIZONA_DSP4_SCRATCH_2 0x1442
+#define ARIZONA_DSP4_SCRATCH_3 0x1443
/*
* Field Definitions.
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index f8bac7cfc25f..3abcca91eecd 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -151,6 +151,18 @@ enum prcmu_clock {
};
/**
+ * enum prcmu_wdog_id - PRCMU watchdog IDs
+ * @PRCMU_WDOG_ALL: use all timers
+ * @PRCMU_WDOG_CPU1: use first CPU timer only
+ * @PRCMU_WDOG_CPU2: use second CPU timer conly
+ */
+enum prcmu_wdog_id {
+ PRCMU_WDOG_ALL = 0x00,
+ PRCMU_WDOG_CPU1 = 0x01,
+ PRCMU_WDOG_CPU2 = 0x02,
+};
+
+/**
* enum ape_opp - APE OPP states definition
* @APE_OPP_INIT:
* @APE_NO_CHANGE: The APE operating point is unchanged
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 2a32b16f79cb..786bf6679a28 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -16,6 +16,7 @@
#include <linux/gpio.h>
#include <linux/irqdomain.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#define LP8788_DEV_BUCK "lp8788-buck"
@@ -124,11 +125,6 @@ enum lp8788_bl_ramp_step {
LP8788_RAMP_65538us,
};
-enum lp8788_bl_pwm_polarity {
- LP8788_PWM_ACTIVE_HIGH,
- LP8788_PWM_ACTIVE_LOW,
-};
-
enum lp8788_isink_scale {
LP8788_ISINK_SCALE_100mA,
LP8788_ISINK_SCALE_120mA,
@@ -229,16 +225,6 @@ struct lp8788_charger_platform_data {
};
/*
- * struct lp8788_bl_pwm_data
- * @pwm_set_intensity : set duty of pwm
- * @pwm_get_intensity : get current duty of pwm
- */
-struct lp8788_bl_pwm_data {
- void (*pwm_set_intensity) (int brightness, int max_brightness);
- int (*pwm_get_intensity) (int max_brightness);
-};
-
-/*
* struct lp8788_backlight_platform_data
* @name : backlight driver name. (default: "lcd-backlight")
* @initial_brightness : initial value of backlight brightness
@@ -248,8 +234,8 @@ struct lp8788_bl_pwm_data {
* @rise_time : brightness ramp up step time
* @fall_time : brightness ramp down step time
* @pwm_pol : pwm polarity setting when bl_mode is pwm based
- * @pwm_data : platform specific pwm generation functions
- * only valid when bl_mode is pwm based
+ * @period_ns : platform specific pwm period value. unit is nano.
+ Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED
*/
struct lp8788_backlight_platform_data {
char *name;
@@ -259,8 +245,8 @@ struct lp8788_backlight_platform_data {
enum lp8788_bl_full_scale_current full_scale;
enum lp8788_bl_ramp_step rise_time;
enum lp8788_bl_ramp_step fall_time;
- enum lp8788_bl_pwm_polarity pwm_pol;
- struct lp8788_bl_pwm_data pwm_data;
+ enum pwm_polarity pwm_pol;
+ unsigned int period_ns;
};
/*
diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h
index 74d8e2969630..ce8502e9e7dc 100644
--- a/include/linux/mfd/max8925.h
+++ b/include/linux/mfd/max8925.h
@@ -190,6 +190,8 @@ enum {
MAX8925_NR_IRQS,
};
+
+
struct max8925_chip {
struct device *dev;
struct i2c_client *i2c;
@@ -201,7 +203,6 @@ struct max8925_chip {
int irq_base;
int core_irq;
int tsc_irq;
-
unsigned int wakeup_flag;
};
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 29f6616e12f0..a4d13d7cd001 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -2789,4 +2789,56 @@ enum usb_irq_events {
#define PALMAS_GPADC_TRIM15 0xE
#define PALMAS_GPADC_TRIM16 0xF
+static inline int palmas_read(struct palmas *palmas, unsigned int base,
+ unsigned int reg, unsigned int *val)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_read(palmas->regmap[slave_id], addr, val);
+}
+
+static inline int palmas_write(struct palmas *palmas, unsigned int base,
+ unsigned int reg, unsigned int value)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_write(palmas->regmap[slave_id], addr, value);
+}
+
+static inline int palmas_bulk_write(struct palmas *palmas, unsigned int base,
+ unsigned int reg, const void *val, size_t val_count)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_bulk_write(palmas->regmap[slave_id], addr,
+ val, val_count);
+}
+
+static inline int palmas_bulk_read(struct palmas *palmas, unsigned int base,
+ unsigned int reg, void *val, size_t val_count)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_bulk_read(palmas->regmap[slave_id], addr,
+ val, val_count);
+}
+
+static inline int palmas_update_bits(struct palmas *palmas, unsigned int base,
+ unsigned int reg, unsigned int mask, unsigned int val)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_update_bits(palmas->regmap[slave_id], addr, mask, val);
+}
+
+static inline int palmas_irq_get_virq(struct palmas *palmas, int irq)
+{
+ return regmap_irq_get_virq(palmas->irq_data, irq);
+}
+
#endif /* __LINUX_MFD_PALMAS_H */
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 4b117a3f54d4..26ea7f1b7caf 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -465,7 +465,7 @@
#define SD_RSP_TYPE_R6 0x01
#define SD_RSP_TYPE_R7 0x01
-/* SD_CONFIURE3 */
+/* SD_CONFIGURE3 */
#define SD_RSP_80CLK_TIMEOUT_EN 0x01
/* Card Transfer Reset Register */
@@ -581,8 +581,11 @@
#define CARD_GPIO_DIR 0xFD57
#define CARD_GPIO 0xFD58
#define CARD_DATA_SOURCE 0xFD5B
+#define SD30_CLK_DRIVE_SEL 0xFD5A
#define CARD_SELECT 0xFD5C
#define SD30_DRIVE_SEL 0xFD5E
+#define SD30_CMD_DRIVE_SEL 0xFD5E
+#define SD30_DAT_DRIVE_SEL 0xFD5F
#define CARD_CLK_EN 0xFD69
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
@@ -655,6 +658,8 @@
#define MSGTXDATA3 0xFE47
#define MSGTXCTL 0xFE48
#define PETXCFG 0xFE49
+#define LTR_CTL 0xFE4A
+#define OBFF_CFG 0xFE4C
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
@@ -735,6 +740,7 @@ struct rtsx_pcr {
unsigned int card_inserted;
unsigned int card_removed;
+ unsigned int card_exist;
struct delayed_work carddet_work;
struct delayed_work idle_work;
@@ -799,6 +805,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);
int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card);
int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);
unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);
void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index d83af39815ab..99bf3e665997 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -65,12 +65,6 @@
*/
#define TMIO_MMC_SDIO_IRQ (1 << 2)
/*
- * Some platforms can detect card insertion events with controller powered
- * down, using a GPIO IRQ, in which case they have to fill in cd_irq, cd_gpio,
- * and cd_flags fields of struct tmio_mmc_data.
- */
-#define TMIO_MMC_HAS_COLD_CD (1 << 3)
-/*
* Some controllers require waiting for the SD bus to become
* idle before writing to some registers.
*/
@@ -117,18 +111,6 @@ struct tmio_mmc_data {
};
/*
- * This function is deprecated and will be removed soon. Please, convert your
- * platform to use drivers/mmc/core/cd-gpio.c
- */
-#include <linux/mmc/host.h>
-static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata)
-{
- if (pdata)
- mmc_detect_change(dev_get_drvdata(pdata->dev),
- msecs_to_jiffies(100));
-}
-
-/*
* data for the NAND controller
*/
struct tmio_nand_data {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6d48fce06b4a..811f91cf5e8c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -171,6 +171,7 @@ enum {
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
+ MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1,
MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
@@ -238,7 +239,8 @@ enum {
MLX4_PERM_LOCAL_WRITE = 1 << 11,
MLX4_PERM_REMOTE_READ = 1 << 12,
MLX4_PERM_REMOTE_WRITE = 1 << 13,
- MLX4_PERM_ATOMIC = 1 << 14
+ MLX4_PERM_ATOMIC = 1 << 14,
+ MLX4_PERM_BIND_MW = 1 << 15,
};
enum {
@@ -504,6 +506,18 @@ struct mlx4_mr {
int enabled;
};
+enum mlx4_mw_type {
+ MLX4_MW_TYPE_1 = 1,
+ MLX4_MW_TYPE_2 = 2,
+};
+
+struct mlx4_mw {
+ u32 key;
+ u32 pd;
+ enum mlx4_mw_type type;
+ int enabled;
+};
+
struct mlx4_fmr {
struct mlx4_mr mr;
struct mlx4_mpt_entry *mpt;
@@ -802,8 +816,12 @@ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
int npages, int page_shift, struct mlx4_mr *mr);
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
+int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
+int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
+ struct mlx4_mw *mw);
+void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw);
+int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 4b4ad6ffef92..67f46ad6920a 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -265,6 +265,11 @@ struct mlx4_wqe_lso_seg {
__be32 header[0];
};
+enum mlx4_wqe_bind_seg_flags2 {
+ MLX4_WQE_BIND_ZERO_BASED = (1 << 30),
+ MLX4_WQE_BIND_TYPE_2 = (1 << 31),
+};
+
struct mlx4_wqe_bind_seg {
__be32 flags1;
__be32 flags2;
@@ -277,9 +282,9 @@ struct mlx4_wqe_bind_seg {
enum {
MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
- MLX4_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
- MLX4_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
- MLX4_WQE_FMR_PERM_ATOMIC = 1 << 31
+ MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29,
+ MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
+ MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31
};
struct mlx4_wqe_fmr_seg {
@@ -304,12 +309,10 @@ struct mlx4_wqe_fmr_ext_seg {
};
struct mlx4_wqe_local_inval_seg {
- __be32 flags;
- u32 reserved1;
+ u64 reserved1;
__be32 mem_key;
- u32 reserved2[2];
- __be32 guest_id;
- __be64 pa;
+ u32 reserved2;
+ u64 reserved3[2];
};
struct mlx4_wqe_raddr_seg {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e7c3f9a0111a..1ede55f292c2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1333,24 +1333,6 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
extern void sparse_memory_present_with_active_regions(int nid);
-#define MOVABLEMEM_MAP_MAX MAX_NUMNODES
-struct movablemem_entry {
- unsigned long start_pfn; /* start pfn of memory segment */
- unsigned long end_pfn; /* end pfn of memory segment (exclusive) */
-};
-
-struct movablemem_map {
- bool acpi; /* true if using SRAT info */
- int nr_map;
- struct movablemem_entry map[MOVABLEMEM_MAP_MAX];
- nodemask_t numa_nodes_hotplug; /* on which nodes we specify memory */
- nodemask_t numa_nodes_kernel; /* on which nodes kernel resides in */
-};
-
-extern void __init insert_movablemem_map(unsigned long start_pfn,
- unsigned long end_pfn);
-extern int __init movablemem_map_overlap(unsigned long start_pfn,
- unsigned long end_pfn);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 5c69315d60cc..61b2c30c903b 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -53,6 +53,9 @@ struct mmc_ext_csd {
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
+ u8 max_packed_writes;
+ u8 max_packed_reads;
+ u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
@@ -83,7 +86,7 @@ struct mmc_ext_csd {
unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
- u8 raw_exception_status; /* 53 */
+ u8 raw_exception_status; /* 54 */
u8 raw_partition_support; /* 160 */
u8 raw_rpmb_size_mult; /* 168 */
u8 raw_erased_mem_count; /* 181 */
@@ -187,6 +190,18 @@ struct sdio_func_tuple;
#define SDIO_MAX_FUNCS 7
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
+ MMC_BLK_NEW_REQUEST,
+};
+
/* The number of MMC physical partitions. These consist of:
* boot partitions (2), general purpose partitions (4) in MMC v4.4.
*/
@@ -295,6 +310,11 @@ static inline void mmc_part_add(struct mmc_card *card, unsigned int size,
card->nr_parts++;
}
+static inline bool mmc_large_sector(struct mmc_card *card)
+{
+ return card->ext_csd.data_sector_size == 4096;
+}
+
/*
* The world is not perfect and supplies us with broken mmc/sdio devices.
* For at least some of these bugs we need a work-around.
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 5bf7c2274fcb..39613b9a6fc5 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -18,6 +18,9 @@ struct mmc_request;
struct mmc_command {
u32 opcode;
u32 arg;
+#define MMC_CMD23_ARG_REL_WR (1 << 31)
+#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
+#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
u32 resp[4];
unsigned int flags; /* expected response type */
#define MMC_RSP_PRESENT (1 << 0)
@@ -120,6 +123,7 @@ struct mmc_data {
s32 host_cookie; /* host private data */
};
+struct mmc_host;
struct mmc_request {
struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
struct mmc_command *cmd;
@@ -128,9 +132,9 @@ struct mmc_request {
struct completion completion;
void (*done)(struct mmc_request *);/* completion function */
+ struct mmc_host *host;
};
-struct mmc_host;
struct mmc_card;
struct mmc_async_req;
@@ -147,6 +151,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
+extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 34be4f47293c..198f0fa44e9f 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -209,8 +209,10 @@ struct dw_mci_dma_ops {
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
-/* Write Protect detection not available */
-#define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4)
+
+/* Slot level quirks */
+/* This slot has no write protect */
+#define DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT BIT(0)
struct dma_pdata;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 61a10c17aabd..d6f20cc6415e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -131,9 +131,11 @@ struct mmc_host_ops {
int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Check if the card is pulling dat[0:3] low */
+ int (*card_busy)(struct mmc_host *host);
+
/* The tuning command opcode value is different for SD and eMMC cards */
int (*execute_tuning)(struct mmc_host *host, u32 opcode);
- void (*enable_preset_value)(struct mmc_host *host, bool enable);
int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
@@ -170,6 +172,22 @@ struct mmc_slot {
void *handler_priv;
};
+/**
+ * mmc_context_info - synchronization details for mmc context
+ * @is_done_rcv wake up reason was done request
+ * @is_new_req wake up reason was new request
+ * @is_waiting_last_req mmc context waiting for single running request
+ * @wait wait queue
+ * @lock lock to protect data fields
+ */
+struct mmc_context_info {
+ bool is_done_rcv;
+ bool is_new_req;
+ bool is_waiting_last_req;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
+
struct regulator;
struct mmc_supply {
@@ -258,6 +276,10 @@ struct mmc_host {
#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
+#define MMC_CAP2_PACKED_RD (1 << 12) /* Allow packed read */
+#define MMC_CAP2_PACKED_WR (1 << 13) /* Allow packed write */
+#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
+ MMC_CAP2_PACKED_WR)
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -331,6 +353,7 @@ struct mmc_host {
struct dentry *debugfs_root;
struct mmc_async_req *areq; /* active async req */
+ struct mmc_context_info context_info; /* async synchronization info */
#ifdef CONFIG_FAIL_MMC_REQUEST
struct fault_attr fail_mmc_request;
@@ -341,10 +364,11 @@ struct mmc_host {
unsigned long private[0] ____cacheline_aligned;
};
-extern struct mmc_host *mmc_alloc_host(int extra, struct device *);
-extern int mmc_add_host(struct mmc_host *);
-extern void mmc_remove_host(struct mmc_host *);
-extern void mmc_free_host(struct mmc_host *);
+struct mmc_host *mmc_alloc_host(int extra, struct device *);
+int mmc_add_host(struct mmc_host *);
+void mmc_remove_host(struct mmc_host *);
+void mmc_free_host(struct mmc_host *);
+void mmc_of_parse(struct mmc_host *host);
static inline void *mmc_priv(struct mmc_host *host)
{
@@ -357,16 +381,16 @@ static inline void *mmc_priv(struct mmc_host *host)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
-extern int mmc_suspend_host(struct mmc_host *);
-extern int mmc_resume_host(struct mmc_host *);
+int mmc_suspend_host(struct mmc_host *);
+int mmc_resume_host(struct mmc_host *);
-extern int mmc_power_save_host(struct mmc_host *host);
-extern int mmc_power_restore_host(struct mmc_host *host);
+int mmc_power_save_host(struct mmc_host *host);
+int mmc_power_restore_host(struct mmc_host *host);
-extern void mmc_detect_change(struct mmc_host *, unsigned long delay);
-extern void mmc_request_done(struct mmc_host *, struct mmc_request *);
+void mmc_detect_change(struct mmc_host *, unsigned long delay);
+void mmc_request_done(struct mmc_host *, struct mmc_request *);
-extern int mmc_cache_ctrl(struct mmc_host *, u8);
+int mmc_cache_ctrl(struct mmc_host *, u8);
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
@@ -434,6 +458,19 @@ static inline int mmc_boot_partition_access(struct mmc_host *host)
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
}
+static inline int mmc_host_uhs(struct mmc_host *host)
+{
+ return host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50);
+}
+
+static inline int mmc_host_packed_wr(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_PACKED_WR;
+}
+
#ifdef CONFIG_MMC_CLKGATE
void mmc_host_clk_hold(struct mmc_host *host);
void mmc_host_clk_release(struct mmc_host *host);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 94d532e41c61..50bcde3677ca 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -139,7 +139,7 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
-#define R1_EXCEPTION_EVENT (1 << 6) /* sx, a */
+#define R1_EXCEPTION_EVENT (1 << 6) /* sr, a */
#define R1_APP_CMD (1 << 5) /* sr, c */
#define R1_STATE_IDLE 0
@@ -275,7 +275,10 @@ struct _mmc_csd {
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
-#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO */
+#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
+#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
+#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
+#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
@@ -324,6 +327,8 @@ struct _mmc_csd {
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
+#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
+#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
@@ -385,6 +390,9 @@ struct _mmc_csd {
#define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
+
+#define EXT_CSD_PACKED_EVENT_EN BIT(3)
+
/*
* EXCEPTION_EVENT_STATUS field
*/
@@ -393,6 +401,9 @@ struct _mmc_csd {
#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
#define EXT_CSD_PACKED_FAILURE BIT(3)
+#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0)
+#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1)
+
/*
* BKOPS status level
*/
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 4bbc3301fbbf..b838ffc49e4a 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -94,6 +94,7 @@ struct sdhci_host {
#define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1)
/* The system physically doesn't support 1.8v, even if the host does */
#define SDHCI_QUIRK2_NO_1_8_V (1<<2)
+#define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index b65679ffa880..b76bcf0621f6 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -4,7 +4,6 @@
#include <linux/types.h>
struct platform_device;
-struct tmio_mmc_data;
#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect"
#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
@@ -26,7 +25,6 @@ struct sh_mobile_sdhi_info {
unsigned long tmio_caps2;
u32 tmio_ocr_mask; /* available MMC voltages */
unsigned int cd_gpio;
- struct tmio_mmc_data *pdata;
void (*set_pwr)(struct platform_device *pdev, int state);
int (*get_cd)(struct platform_device *pdev);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index fed3def62818..779cf7c4a3d1 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -33,8 +33,7 @@ struct ieee1394_device_id {
__u32 model_id;
__u32 specifier_id;
__u32 version;
- kernel_ulong_t driver_data
- __attribute__((aligned(sizeof(kernel_ulong_t))));
+ kernel_ulong_t driver_data;
};
@@ -148,8 +147,7 @@ struct hid_device_id {
__u16 group;
__u32 vendor;
__u32 product;
- kernel_ulong_t driver_data
- __attribute__((aligned(sizeof(kernel_ulong_t))));
+ kernel_ulong_t driver_data;
};
/* s390 CCW devices */
@@ -173,8 +171,6 @@ struct ccw_device_id {
struct ap_device_id {
__u16 match_flags; /* which fields to match against */
__u8 dev_type; /* device type */
- __u8 pad1;
- __u32 pad2;
kernel_ulong_t driver_info;
};
@@ -184,13 +180,10 @@ struct ap_device_id {
struct css_device_id {
__u8 match_flags;
__u8 type; /* subchannel type */
- __u16 pad2;
- __u32 pad3;
kernel_ulong_t driver_data;
};
-#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */
- /* to workaround crosscompile issues */
+#define ACPI_ID_LEN 9
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
@@ -231,11 +224,7 @@ struct of_device_id
char name[32];
char type[32];
char compatible[128];
-#ifdef __KERNEL__
const void *data;
-#else
- kernel_ulong_t data;
-#endif
};
/* VIO */
@@ -260,24 +249,14 @@ struct pcmcia_device_id {
/* for pseudo multi-function devices */
__u8 device_no;
- __u32 prod_id_hash[4]
- __attribute__((aligned(sizeof(__u32))));
+ __u32 prod_id_hash[4];
/* not matched against in kernelspace*/
-#ifdef __KERNEL__
const char * prod_id[4];
-#else
- kernel_ulong_t prod_id[4]
- __attribute__((aligned(sizeof(kernel_ulong_t))));
-#endif
/* not matched against */
kernel_ulong_t driver_info;
-#ifdef __KERNEL__
char * cisfile;
-#else
- kernel_ulong_t cisfile;
-#endif
};
#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001
@@ -373,8 +352,7 @@ struct sdio_device_id {
__u8 class; /* Standard interface or SDIO_ANY_ID */
__u16 vendor; /* Vendor or SDIO_ANY_ID */
__u16 device; /* Device ID or SDIO_ANY_ID */
- kernel_ulong_t driver_data /* Data private to the driver */
- __attribute__((aligned(sizeof(kernel_ulong_t))));
+ kernel_ulong_t driver_data; /* Data private to the driver */
};
/* SSB core, see drivers/ssb/ */
@@ -420,8 +398,7 @@ struct virtio_device_id {
*/
struct hv_vmbus_device_id {
__u8 guid[16];
- kernel_ulong_t driver_data /* Data private to the driver */
- __attribute__((aligned(sizeof(kernel_ulong_t))));
+ kernel_ulong_t driver_data; /* Data private to the driver */
};
/* rpmsg */
@@ -440,8 +417,7 @@ struct rpmsg_device_id {
struct i2c_device_id {
char name[I2C_NAME_SIZE];
- kernel_ulong_t driver_data /* Data private to the driver */
- __attribute__((aligned(sizeof(kernel_ulong_t))));
+ kernel_ulong_t driver_data; /* Data private to the driver */
};
/* spi */
@@ -451,8 +427,7 @@ struct i2c_device_id {
struct spi_device_id {
char name[SPI_NAME_SIZE];
- kernel_ulong_t driver_data /* Data private to the driver */
- __attribute__((aligned(sizeof(kernel_ulong_t))));
+ kernel_ulong_t driver_data; /* Data private to the driver */
};
/* dmi */
@@ -484,15 +459,6 @@ struct dmi_strmatch {
char substr[79];
};
-#ifndef __KERNEL__
-struct dmi_system_id {
- kernel_ulong_t callback;
- kernel_ulong_t ident;
- struct dmi_strmatch matches[4];
- kernel_ulong_t driver_data
- __attribute__((aligned(sizeof(kernel_ulong_t))));
-};
-#else
struct dmi_system_id {
int (*callback)(const struct dmi_system_id *);
const char *ident;
@@ -506,7 +472,6 @@ struct dmi_system_id {
* error: storage size of '__mod_dmi_device_table' isn't known
*/
#define dmi_device_id dmi_system_id
-#endif
#define DMI_MATCH(a, b) { a, b }
@@ -515,8 +480,7 @@ struct dmi_system_id {
struct platform_device_id {
char name[PLATFORM_NAME_SIZE];
- kernel_ulong_t driver_data
- __attribute__((aligned(sizeof(kernel_ulong_t))));
+ kernel_ulong_t driver_data;
};
#define MDIO_MODULE_PREFIX "mdio:"
@@ -572,11 +536,7 @@ struct isapnp_device_id {
struct amba_id {
unsigned int id;
unsigned int mask;
-#ifndef __KERNEL__
- kernel_ulong_t data;
-#else
void *data;
-#endif
};
/*
diff --git a/include/linux/module.h b/include/linux/module.h
index 1375ee3f03aa..ead1b5719a12 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -396,13 +396,13 @@ bool is_module_address(unsigned long addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
-static inline int within_module_core(unsigned long addr, struct module *mod)
+static inline int within_module_core(unsigned long addr, const struct module *mod)
{
return (unsigned long)mod->module_core <= addr &&
addr < (unsigned long)mod->module_core + mod->core_size;
}
-static inline int within_module_init(unsigned long addr, struct module *mod)
+static inline int within_module_init(unsigned long addr, const struct module *mod)
{
return (unsigned long)mod->module_init <= addr &&
addr < (unsigned long)mod->module_init + mod->init_size;
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index f6eb4332ac92..4b02512e421c 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -245,6 +245,7 @@ struct map_info {
unsigned long pfow_base;
unsigned long map_priv_1;
unsigned long map_priv_2;
+ struct device_node *device_node;
void *fldrv_priv;
struct mtd_chip_driver *fldrv;
};
@@ -328,7 +329,7 @@ static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
- map_word r = {{0} };
+ map_word r;
if (map_bankwidth_is_1(map))
r.x[0] = *(unsigned char *)ptr;
@@ -342,6 +343,8 @@ static inline map_word map_word_load(struct map_info *map, const void *ptr)
#endif
else if (map_bankwidth_is_large(map))
memcpy(r.x, ptr, map->bankwidth);
+ else
+ BUG();
return r;
}
@@ -391,7 +394,7 @@ static inline map_word map_word_ff(struct map_info *map)
static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
{
- map_word uninitialized_var(r);
+ map_word r;
if (map_bankwidth_is_1(map))
r.x[0] = __raw_readb(map->virt + ofs);
@@ -425,6 +428,8 @@ static inline void inline_map_write(struct map_info *map, const map_word datum,
#endif
else if (map_bankwidth_is_large(map))
memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
+ else
+ BUG();
mb();
}
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e111fa419a4e..7b8fc73810ad 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -13,6 +13,7 @@
#define _LINUX_NFS4_H
#include <linux/list.h>
+#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
struct nfs4_ace {
@@ -20,7 +21,10 @@ struct nfs4_ace {
uint32_t flag;
uint32_t access_mask;
int whotype;
- uid_t who;
+ union {
+ kuid_t who_uid;
+ kgid_t who_gid;
+ };
};
struct nfs4_acl {
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
index 2dcef3ab58b6..0f4b79da6584 100644
--- a/include/linux/nfs_idmap.h
+++ b/include/linux/nfs_idmap.h
@@ -36,6 +36,7 @@
#ifndef NFS_IDMAP_H
#define NFS_IDMAP_H
+#include <linux/uidgid.h>
#include <uapi/linux/nfs_idmap.h>
@@ -67,10 +68,10 @@ void nfs_fattr_init_names(struct nfs_fattr *fattr,
void nfs_fattr_free_names(struct nfs_fattr *);
void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *);
-int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, __u32 *);
-int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, __u32 *);
-int nfs_map_uid_to_name(const struct nfs_server *, __u32, char *, size_t);
-int nfs_map_gid_to_group(const struct nfs_server *, __u32, char *, size_t);
+int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, kuid_t *);
+int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, kgid_t *);
+int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t);
+int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t);
extern unsigned int nfs_idmap_cache_timeout;
#endif /* NFS_IDMAP_H */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 29adb12c7ecf..4b993d358dad 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -48,8 +48,8 @@ struct nfs_fattr {
unsigned int valid; /* which fields are valid */
umode_t mode;
__u32 nlink;
- __u32 uid;
- __u32 gid;
+ kuid_t uid;
+ kgid_t gid;
dev_t rdev;
__u64 size;
union {
@@ -233,6 +233,7 @@ struct nfs4_layoutget_args {
struct inode *inode;
struct nfs_open_context *ctx;
nfs4_stateid stateid;
+ unsigned long timestamp;
struct nfs4_layoutdriver_data layout;
};
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 24c139288db4..7898c997dfea 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -49,8 +49,8 @@ struct svc_export {
struct auth_domain * ex_client;
int ex_flags;
struct path ex_path;
- uid_t ex_anon_uid;
- gid_t ex_anon_gid;
+ kuid_t ex_anon_uid;
+ kgid_t ex_anon_gid;
int ex_fsid;
unsigned char * ex_uuid; /* 16 byte fsid */
struct nfsd4_fs_locations ex_fslocs;
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
new file mode 100644
index 000000000000..d15073e080dd
--- /dev/null
+++ b/include/linux/of_dma.h
@@ -0,0 +1,74 @@
+/*
+ * OF helpers for DMA request / controller
+ *
+ * Based on of_gpio.h
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OF_DMA_H
+#define __LINUX_OF_DMA_H
+
+#include <linux/of.h>
+#include <linux/dmaengine.h>
+
+struct device_node;
+
+struct of_dma {
+ struct list_head of_dma_controllers;
+ struct device_node *of_node;
+ int of_dma_nbcells;
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *);
+ void *of_dma_data;
+ int use_count;
+};
+
+struct of_dma_filter_info {
+ dma_cap_mask_t dma_cap;
+ dma_filter_fn filter_fn;
+};
+
+#ifdef CONFIG_OF
+extern int of_dma_controller_register(struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data);
+extern int of_dma_controller_free(struct device_node *np);
+extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
+ char *name);
+extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma);
+#else
+static inline int of_dma_controller_register(struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+
+static inline int of_dma_controller_free(struct device_node *np)
+{
+ return -ENODEV;
+}
+
+static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
+ char *name)
+{
+ return NULL;
+}
+
+static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /* __LINUX_OF_DMA_H */
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 214e0ebcb84d..3aca2b8def33 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -47,15 +47,6 @@ int opp_enable(struct device *dev, unsigned long freq);
int opp_disable(struct device *dev, unsigned long freq);
struct srcu_notifier_head *opp_get_notifier(struct device *dev);
-
-#ifdef CONFIG_OF
-int of_init_opp_table(struct device *dev);
-#else
-static inline int of_init_opp_table(struct device *dev)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_OF */
#else
static inline unsigned long opp_get_voltage(struct opp *opp)
{
@@ -112,6 +103,15 @@ static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
}
#endif /* CONFIG_PM_OPP */
+#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+ return -EINVAL;
+}
+#endif
+
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
int opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 6fa4dd2a3b9e..2461033a7987 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -286,6 +286,7 @@ struct pci_dev {
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+ bool match_driver; /* Skip attaching driver */
/* These fields are used by common fixups */
unsigned int transparent:1; /* Transparent PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
@@ -378,6 +379,8 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
void (*release_fn)(struct pci_host_bridge *),
void *release_data);
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
+
/*
* The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
* to P2P or CardBus bridge windows) go in a table. Additional ones (for
@@ -674,6 +677,7 @@ extern struct list_head pci_root_buses; /* list of all known PCI buses */
/* Some device drivers need know if pci is initiated */
extern int no_pci_devices(void);
+void pcibios_resource_survey_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
int __must_check pcibios_enable_device(struct pci_dev *, int mask);
/* Architecture specific versions may override this (weak) */
@@ -1700,12 +1704,21 @@ static inline bool pci_is_pcie(struct pci_dev *dev)
}
/**
+ * pcie_caps_reg - get the PCIe Capabilities Register
+ * @dev: PCI device
+ */
+static inline u16 pcie_caps_reg(const struct pci_dev *dev)
+{
+ return dev->pcie_flags_reg;
+}
+
+/**
* pci_pcie_type - get the PCIe device/port type
* @dev: PCI device
*/
static inline int pci_pcie_type(const struct pci_dev *dev)
{
- return (dev->pcie_flags_reg & PCI_EXP_FLAGS_TYPE) >> 4;
+ return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
}
void pci_request_acs(void);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 31717bd287fd..f11c1c2609d5 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2802,6 +2802,7 @@
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 2381c973d897..a089a3c447fc 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -176,9 +176,8 @@ pid_t pid_vnr(struct pid *pid);
#define do_each_pid_task(pid, type, task) \
do { \
- struct hlist_node *pos___; \
if ((pid) != NULL) \
- hlist_for_each_entry_rcu((task), pos___, \
+ hlist_for_each_entry_rcu((task), \
&(pid)->tasks[type], pids[type].node) {
/*
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h
index 9ff93b065686..4b781014b0a0 100644
--- a/include/linux/platform_data/dma-ste-dma40.h
+++ b/include/linux/platform_data/dma-ste-dma40.h
@@ -147,6 +147,16 @@ struct stedma40_chan_cfg {
* @memcpy_conf_log: default configuration of logical channel memcpy
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
+ * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
+ * which avoids HW bug that exists in some versions of the controller.
+ * SoftLLI introduces relink overhead that could impact performace for
+ * certain use cases.
+ * @num_of_soft_lli_chans: The number of channels that needs to be configured
+ * to use SoftLLI.
+ * @use_esram_lcla: flag for mapping the lcla into esram region
+ * @num_of_phy_chans: The number of physical channels implemented in HW.
+ * 0 means reading the number of channels from DMA HW but this is only valid
+ * for 'multiple of 4' channels, like 8.
*/
struct stedma40_platform_data {
u32 dev_len;
@@ -157,7 +167,10 @@ struct stedma40_platform_data {
struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log;
int disabled_channels[STEDMA40_MAX_PHYS];
+ int *soft_lli_chans;
+ int num_of_soft_lli_chans;
bool use_esram_lcla;
+ int num_of_phy_chans;
};
#ifdef CONFIG_STE_DMA40
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
new file mode 100644
index 000000000000..1bd5244d1dcd
--- /dev/null
+++ b/include/linux/platform_data/elm.h
@@ -0,0 +1,54 @@
+/*
+ * BCH Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ELM_H
+#define __ELM_H
+
+enum bch_ecc {
+ BCH4_ECC = 0,
+ BCH8_ECC,
+};
+
+/* ELM support 8 error syndrome process */
+#define ERROR_VECTOR_MAX 8
+
+#define BCH8_ECC_OOB_BYTES 13
+#define BCH4_ECC_OOB_BYTES 7
+/* RBL requires 14 byte even though BCH8 uses only 13 byte */
+#define BCH8_SIZE (BCH8_ECC_OOB_BYTES + 1)
+/* Uses 1 extra byte to handle erased pages */
+#define BCH4_SIZE (BCH4_ECC_OOB_BYTES + 1)
+
+/**
+ * struct elm_errorvec - error vector for elm
+ * @error_reported: set true for vectors error is reported
+ * @error_uncorrectable: number of uncorrectable errors
+ * @error_count: number of correctable errors in the sector
+ * @error_loc: buffer for error location
+ *
+ */
+struct elm_errorvec {
+ bool error_reported;
+ bool error_uncorrectable;
+ int error_count;
+ int error_loc[ERROR_VECTOR_MAX];
+};
+
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec);
+void elm_config(struct device *dev, enum bch_ecc bch_type);
+#endif /* __ELM_H */
diff --git a/include/linux/platform_data/exynos_thermal.h b/include/linux/platform_data/exynos_thermal.h
index a7bdb2f63b73..da7e6274b175 100644
--- a/include/linux/platform_data/exynos_thermal.h
+++ b/include/linux/platform_data/exynos_thermal.h
@@ -53,6 +53,8 @@ struct freq_clip_table {
* struct exynos_tmu_platform_data
* @threshold: basic temperature for generating interrupt
* 25 <= threshold <= 125 [unit: degree Celsius]
+ * @threshold_falling: differntial value for setting threshold
+ * of temperature falling interrupt.
* @trigger_levels: array for each interrupt levels
* [unit: degree Celsius]
* 0: temperature for trigger_level0 interrupt
@@ -97,6 +99,7 @@ struct freq_clip_table {
*/
struct exynos_tmu_platform_data {
u8 threshold;
+ u8 threshold_falling;
u8 trigger_levels[4];
bool trigger_level0_en;
bool trigger_level1_en;
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
new file mode 100644
index 000000000000..1509570d5a3f
--- /dev/null
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -0,0 +1,87 @@
+/*
+ * LP55XX Platform Data Header
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Derived from leds-lp5521.h, leds-lp5523.h
+ */
+
+#ifndef _LEDS_LP55XX_H
+#define _LEDS_LP55XX_H
+
+/* Clock configuration */
+#define LP55XX_CLOCK_AUTO 0
+#define LP55XX_CLOCK_INT 1
+#define LP55XX_CLOCK_EXT 2
+
+/* Bits in LP5521 CONFIG register. 'update_config' in lp55xx_platform_data */
+#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
+#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
+#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
+#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
+#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
+#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
+#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */
+#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */
+#define LP5521_CLK_INT 1 /* Internal clock */
+#define LP5521_CLK_AUTO 2 /* Automatic clock selection */
+
+struct lp55xx_led_config {
+ const char *name;
+ u8 chan_nr;
+ u8 led_current; /* mA x10, 0 if led is not connected */
+ u8 max_current;
+};
+
+struct lp55xx_predef_pattern {
+ u8 *r;
+ u8 *g;
+ u8 *b;
+ u8 size_r;
+ u8 size_g;
+ u8 size_b;
+};
+
+/*
+ * struct lp55xx_platform_data
+ * @led_config : Configurable led class device
+ * @num_channels : Number of LED channels
+ * @label : Used for naming LEDs
+ * @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT
+ * @setup_resources : Platform specific function before enabling the chip
+ * @release_resources : Platform specific function after disabling the chip
+ * @enable : EN pin control by platform side
+ * @patterns : Predefined pattern data for RGB channels
+ * @num_patterns : Number of patterns
+ * @update_config : Value of CONFIG register
+ */
+struct lp55xx_platform_data {
+
+ /* LED channel configuration */
+ struct lp55xx_led_config *led_config;
+ u8 num_channels;
+ const char *label;
+
+ /* Clock configuration */
+ u8 clock_mode;
+
+ /* Platform specific functions */
+ int (*setup_resources)(void);
+ void (*release_resources)(void);
+ void (*enable)(bool state);
+
+ /* Predefined pattern data */
+ struct lp55xx_predef_pattern *patterns;
+ unsigned int num_patterns;
+
+ /* _CONFIG register */
+ u8 update_config;
+};
+
+#endif /* _LEDS_LP55XX_H */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index aaf97481f413..b4a0521ce411 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -39,5 +39,6 @@ struct esdhc_platform_data {
unsigned int cd_gpio;
enum wp_types wp_type;
enum cd_types cd_type;
+ int max_bus_width;
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mmc-sdhci-tegra.h b/include/linux/platform_data/mmc-sdhci-tegra.h
deleted file mode 100644
index 8f8430697686..000000000000
--- a/include/linux/platform_data/mmc-sdhci-tegra.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2009 Palm, Inc.
- * Author: Yvonne Yip <y@palm.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef __PLATFORM_DATA_TEGRA_SDHCI_H
-#define __PLATFORM_DATA_TEGRA_SDHCI_H
-
-#include <linux/mmc/host.h>
-
-struct tegra_sdhci_platform_data {
- int cd_gpio;
- int wp_gpio;
- int power_gpio;
- int is_8bit;
- int pm_flags;
-};
-
-#endif
diff --git a/include/linux/platform_data/sh_ipmmu.h b/include/linux/platform_data/sh_ipmmu.h
new file mode 100644
index 000000000000..39f7405cdac5
--- /dev/null
+++ b/include/linux/platform_data/sh_ipmmu.h
@@ -0,0 +1,18 @@
+/* sh_ipmmu.h
+ *
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __SH_IPMMU_H__
+#define __SH_IPMMU_H__
+
+struct shmobile_ipmmu_platform_data {
+ const char * const *dev_names;
+ unsigned int num_dev_names;
+};
+
+#endif /* __SH_IPMMU_H__ */
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
index e697c85ad3bc..fa579b4c666b 100644
--- a/include/linux/platform_data/usb-omap.h
+++ b/include/linux/platform_data/usb-omap.h
@@ -55,6 +55,7 @@ struct ohci_hcd_omap_platform_data {
};
struct usbhs_omap_platform_data {
+ int nports;
enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS];
int reset_gpio_port[OMAP3_HS_USB_PORTS];
struct regulator *regulator[OMAP3_HS_USB_PORTS];
diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h
new file mode 100644
index 000000000000..1689ff4c3bfd
--- /dev/null
+++ b/include/linux/platform_data/ux500_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * STE Ux500 Watchdog platform data
+ */
+#ifndef __UX500_WDT_H
+#define __UX500_WDT_H
+
+/**
+ * struct ux500_wdt_data
+ */
+struct ux500_wdt_data {
+ unsigned int timeout;
+ bool has_28_bits_resolution;
+};
+
+#endif /* __UX500_WDT_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 6d661f32e0e4..a4df2042b79c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -146,6 +146,8 @@ struct pwm_ops {
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
* @pwms: array of PWM devices allocated by the framework
+ * @can_sleep: must be true if the .config(), .enable() or .disable()
+ * operations may sleep
*/
struct pwm_chip {
struct device *dev;
@@ -159,6 +161,7 @@ struct pwm_chip {
struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
+ bool can_sleep;
};
#if IS_ENABLED(CONFIG_PWM)
@@ -174,11 +177,16 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
const struct of_phandle_args *args);
-struct pwm_device *pwm_get(struct device *dev, const char *consumer);
+struct pwm_device *pwm_get(struct device *dev, const char *con_id);
+struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id);
void pwm_put(struct pwm_device *pwm);
-struct pwm_device *devm_pwm_get(struct device *dev, const char *consumer);
+struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
+struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id);
void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
+
+bool pwm_can_sleep(struct pwm_device *pwm);
#else
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
@@ -213,6 +221,12 @@ static inline struct pwm_device *pwm_get(struct device *dev,
return ERR_PTR(-ENODEV);
}
+static inline struct pwm_device *of_pwm_get(struct device_node *np,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void pwm_put(struct pwm_device *pwm)
{
}
@@ -223,9 +237,21 @@ static inline struct pwm_device *devm_pwm_get(struct device *dev,
return ERR_PTR(-ENODEV);
}
+static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
{
}
+
+static inline bool pwm_can_sleep(struct pwm_device *pwm)
+{
+ return false;
+}
#endif
struct pwm_lookup {
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 58fdef125252..d13371134c59 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -405,6 +405,7 @@ struct quota_module_name {
#define INIT_QUOTA_MODULE_NAMES {\
{QFMT_VFS_OLD, "quota_v1"},\
{QFMT_VFS_V0, "quota_v2"},\
+ {QFMT_VFS_V1, "quota_v2"},\
{0, NULL}}
#endif /* _QUOTA_ */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index c92dd28eaa6c..8089e35d47ac 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -445,8 +445,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
@@ -454,16 +453,16 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
-#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_raw(hlist_next_rcu(pos)))
+#define hlist_for_each_entry_rcu(pos, head, member) \
+ for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
@@ -471,35 +470,36 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
-#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \
- for (pos = rcu_dereference_bh((head)->first); \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_bh(pos->next))
+#define hlist_for_each_entry_rcu_bh(pos, head, member) \
+ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \
- for (pos = rcu_dereference((pos)->next); \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference(pos->next))
+#define hlist_for_each_entry_continue_rcu(pos, member) \
+ for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
+ typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
+ * @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
-#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \
- for (pos = rcu_dereference_bh((pos)->next); \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_bh(pos->next))
+#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
+ for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
+ typeof(*(pos)), member))
#endif /* __KERNEL__ */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4bd6c06eb28e..2d8bdaef9611 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -231,6 +231,41 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
*/
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
+/*
+ * sg page iterator
+ *
+ * Iterates over sg entries page-by-page. On each successful iteration,
+ * @piter->page points to the current page, @piter->sg to the sg holding this
+ * page and @piter->sg_pgoffset to the page's page offset within the sg. The
+ * iteration will stop either when a maximum number of sg entries was reached
+ * or a terminating sg (sg_last(sg) == true) was reached.
+ */
+struct sg_page_iter {
+ struct page *page; /* current page */
+ struct scatterlist *sg; /* sg holding the page */
+ unsigned int sg_pgoffset; /* page offset within the sg */
+
+ /* these are internal states, keep away */
+ unsigned int __nents; /* remaining sg entries */
+ int __pg_advance; /* nr pages to advance at the
+ * next step */
+};
+
+bool __sg_page_iter_next(struct sg_page_iter *piter);
+void __sg_page_iter_start(struct sg_page_iter *piter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgoffset);
+
+/**
+ * for_each_sg_page - iterate over the pages of the given sg list
+ * @sglist: sglist to iterate over
+ * @piter: page iterator to hold current page, sg, sg_pgoffset
+ * @nents: maximum number of sg entries to iterate over
+ * @pgoffset: starting page offset
+ */
+#define for_each_sg_page(sglist, piter, nents, pgoffset) \
+ for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
+ __sg_page_iter_next(piter);)
/*
* Mapping sg iterator
@@ -258,11 +293,11 @@ struct sg_mapping_iter {
void *addr; /* pointer to the mapped area */
size_t length; /* length of the mapped area */
size_t consumed; /* number of consumed bytes */
+ struct sg_page_iter piter; /* page iterator */
/* these are internal states, keep away */
- struct scatterlist *__sg; /* current entry */
- unsigned int __nents; /* nr of remaining entries */
- unsigned int __offset; /* offset within sg */
+ unsigned int __offset; /* offset within page */
+ unsigned int __remaining; /* remaining bytes on page */
unsigned int __flags;
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0655570c67eb..d35d2b6ddbfb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,7 +99,6 @@ extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
-extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long this_cpu_load(void);
@@ -347,11 +346,6 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
-/* get/set_dumpable() values */
-#define SUID_DUMPABLE_DISABLED 0
-#define SUID_DUMPABLE_ENABLED 1
-#define SUID_DUMPABLE_SAFE 2
-
/* mm flags */
/* dumpable bits */
#define MMF_DUMPABLE 0 /* core dump is permitted */
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
new file mode 100644
index 000000000000..07d8e53bedfc
--- /dev/null
+++ b/include/linux/sunrpc/addr.h
@@ -0,0 +1,170 @@
+/*
+ * linux/include/linux/sunrpc/addr.h
+ *
+ * Various routines for copying and comparing sockaddrs and for
+ * converting them to and from presentation format.
+ */
+#ifndef _LINUX_SUNRPC_ADDR_H
+#define _LINUX_SUNRPC_ADDR_H
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/ipv6.h>
+
+size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
+size_t rpc_pton(struct net *, const char *, const size_t,
+ struct sockaddr *, const size_t);
+char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
+size_t rpc_uaddr2sockaddr(struct net *, const char *, const size_t,
+ struct sockaddr *, const size_t);
+
+static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ return ntohs(((struct sockaddr_in *)sap)->sin_port);
+ case AF_INET6:
+ return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+ }
+ return 0;
+}
+
+static inline void rpc_set_port(struct sockaddr *sap,
+ const unsigned short port)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)sap)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
+ break;
+ }
+}
+
+#define IPV6_SCOPE_DELIMITER '%'
+#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
+
+static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
+ const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
+
+ return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
+}
+
+static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
+ struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+
+ dsin->sin_family = ssin->sin_family;
+ dsin->sin_addr.s_addr = ssin->sin_addr.s_addr;
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
+ const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
+
+ if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
+ return false;
+ else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ return sin1->sin6_scope_id == sin2->sin6_scope_id;
+
+ return true;
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src;
+ struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
+
+ dsin6->sin6_family = ssin6->sin6_family;
+ dsin6->sin6_addr = ssin6->sin6_addr;
+ dsin6->sin6_scope_id = ssin6->sin6_scope_id;
+ return true;
+}
+#else /* !(IS_ENABLED(CONFIG_IPV6) */
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ return false;
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ return false;
+}
+#endif /* !(IS_ENABLED(CONFIG_IPV6) */
+
+/**
+ * rpc_cmp_addr - compare the address portion of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ *
+ * Just compares the family and address portion. Ignores port, but
+ * compares the scope if it's a link-local address.
+ *
+ * Returns true if the addrs are equal, false if they aren't.
+ */
+static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ if (sap1->sa_family == sap2->sa_family) {
+ switch (sap1->sa_family) {
+ case AF_INET:
+ return __rpc_cmp_addr4(sap1, sap2);
+ case AF_INET6:
+ return __rpc_cmp_addr6(sap1, sap2);
+ }
+ }
+ return false;
+}
+
+/**
+ * rpc_copy_addr - copy the address portion of one sockaddr to another
+ * @dst: destination sockaddr
+ * @src: source sockaddr
+ *
+ * Just copies the address portion and family. Ignores port, scope, etc.
+ * Caller is responsible for making certain that dst is large enough to hold
+ * the address in src. Returns true if address family is supported. Returns
+ * false otherwise.
+ */
+static inline bool rpc_copy_addr(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ switch (src->sa_family) {
+ case AF_INET:
+ return __rpc_copy_addr4(dst, src);
+ case AF_INET6:
+ return __rpc_copy_addr6(dst, src);
+ }
+ return false;
+}
+
+/**
+ * rpc_get_scope_id - return scopeid for a given sockaddr
+ * @sa: sockaddr to get scopeid from
+ *
+ * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if
+ * not an AF_INET6 address.
+ */
+static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
+{
+ if (sa->sa_family != AF_INET6)
+ return 0;
+
+ return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
+}
+
+#endif /* _LINUX_SUNRPC_ADDR_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index f25ba922baaf..58fda1c3c783 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -17,14 +17,15 @@
#include <linux/atomic.h>
#include <linux/rcupdate.h>
+#include <linux/uidgid.h>
/* size of the nodename buffer */
#define UNX_MAXNODENAME 32
/* Work around the lack of a VFS credential */
struct auth_cred {
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
struct group_info *group_info;
const char *principal;
unsigned char machine_cred : 1;
@@ -48,7 +49,7 @@ struct rpc_cred {
unsigned long cr_flags; /* various flags */
atomic_t cr_count; /* ref count */
- uid_t cr_uid;
+ kuid_t cr_uid;
/* per-flavor data */
};
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 5dc9ee4d616e..303399b1ba59 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -83,6 +83,10 @@ struct cache_detail {
int (*cache_upcall)(struct cache_detail *,
struct cache_head *);
+ void (*cache_request)(struct cache_detail *cd,
+ struct cache_head *ch,
+ char **bpp, int *blen);
+
int (*cache_parse)(struct cache_detail *,
char *buf, int len);
@@ -157,11 +161,7 @@ sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *new, struct cache_head *old, int hash);
extern int
-sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
- void (*cache_request)(struct cache_detail *,
- struct cache_head *,
- char **,
- int *));
+sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h);
extern void cache_clean_deferred(void *owner);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 34206b84d8da..2cf4ffaa3cd4 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -160,162 +160,11 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
+unsigned long rpc_get_timeout(struct rpc_clnt *clnt);
void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
-size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
-size_t rpc_pton(struct net *, const char *, const size_t,
- struct sockaddr *, const size_t);
-char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
-size_t rpc_uaddr2sockaddr(struct net *, const char *, const size_t,
- struct sockaddr *, const size_t);
-
-static inline unsigned short rpc_get_port(const struct sockaddr *sap)
-{
- switch (sap->sa_family) {
- case AF_INET:
- return ntohs(((struct sockaddr_in *)sap)->sin_port);
- case AF_INET6:
- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
- }
- return 0;
-}
-
-static inline void rpc_set_port(struct sockaddr *sap,
- const unsigned short port)
-{
- switch (sap->sa_family) {
- case AF_INET:
- ((struct sockaddr_in *)sap)->sin_port = htons(port);
- break;
- case AF_INET6:
- ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
- break;
- }
-}
-
-#define IPV6_SCOPE_DELIMITER '%'
-#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
-
-static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
- const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
-
- return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
-}
-
-static inline bool __rpc_copy_addr4(struct sockaddr *dst,
- const struct sockaddr *src)
-{
- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
- struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
-
- dsin->sin_family = ssin->sin_family;
- dsin->sin_addr.s_addr = ssin->sin_addr.s_addr;
- return true;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
- const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
-
- if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
- return false;
- else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- return sin1->sin6_scope_id == sin2->sin6_scope_id;
-
- return true;
-}
-
-static inline bool __rpc_copy_addr6(struct sockaddr *dst,
- const struct sockaddr *src)
-{
- const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src;
- struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
-
- dsin6->sin6_family = ssin6->sin6_family;
- dsin6->sin6_addr = ssin6->sin6_addr;
- return true;
-}
-#else /* !(IS_ENABLED(CONFIG_IPV6) */
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- return false;
-}
-
-static inline bool __rpc_copy_addr6(struct sockaddr *dst,
- const struct sockaddr *src)
-{
- return false;
-}
-#endif /* !(IS_ENABLED(CONFIG_IPV6) */
-
-/**
- * rpc_cmp_addr - compare the address portion of two sockaddrs.
- * @sap1: first sockaddr
- * @sap2: second sockaddr
- *
- * Just compares the family and address portion. Ignores port, scope, etc.
- * Returns true if the addrs are equal, false if they aren't.
- */
-static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- if (sap1->sa_family == sap2->sa_family) {
- switch (sap1->sa_family) {
- case AF_INET:
- return __rpc_cmp_addr4(sap1, sap2);
- case AF_INET6:
- return __rpc_cmp_addr6(sap1, sap2);
- }
- }
- return false;
-}
-
-/**
- * rpc_copy_addr - copy the address portion of one sockaddr to another
- * @dst: destination sockaddr
- * @src: source sockaddr
- *
- * Just copies the address portion and family. Ignores port, scope, etc.
- * Caller is responsible for making certain that dst is large enough to hold
- * the address in src. Returns true if address family is supported. Returns
- * false otherwise.
- */
-static inline bool rpc_copy_addr(struct sockaddr *dst,
- const struct sockaddr *src)
-{
- switch (src->sa_family) {
- case AF_INET:
- return __rpc_copy_addr4(dst, src);
- case AF_INET6:
- return __rpc_copy_addr6(dst, src);
- }
- return false;
-}
-
-/**
- * rpc_get_scope_id - return scopeid for a given sockaddr
- * @sa: sockaddr to get scopeid from
- *
- * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if
- * not an AF_INET6 address.
- */
-static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
-{
- if (sa->sa_family != AF_INET6)
- return 0;
-
- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
-}
-
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 676ddf53b3ee..1f0216b9a6c9 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -50,6 +50,7 @@ struct svc_pool {
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
+ int sp_task_pending;/* has pending task */
} ____cacheline_aligned_in_smp;
/*
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index dd74084a9799..ff374ab30839 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -18,8 +18,8 @@
#include <linux/cred.h>
struct svc_cred {
- uid_t cr_uid;
- gid_t cr_gid;
+ kuid_t cr_uid;
+ kgid_t cr_gid;
struct group_info *cr_group_info;
u32 cr_flavor; /* pseudoflavor */
char *cr_principal; /* for gss */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 63988990bd36..15f9204ee70b 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -56,7 +56,7 @@ struct xdr_buf {
struct kvec head[1], /* RPC header + non-page data */
tail[1]; /* Appended after page data */
- struct page ** pages; /* Array of contiguous pages */
+ struct page ** pages; /* Array of pages */
unsigned int page_base, /* Start of page data */
page_len, /* Length of page data */
flags; /* Flags for data disposition */
@@ -152,6 +152,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index fe82022478e7..f0bd7f90a90d 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -74,6 +74,8 @@ enum thermal_trend {
THERMAL_TREND_STABLE, /* temperature is stable */
THERMAL_TREND_RAISING, /* temperature is raising */
THERMAL_TREND_DROPPING, /* temperature is dropping */
+ THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */
+ THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
};
/* Events supported by Thermal Netlink */
@@ -121,6 +123,7 @@ struct thermal_zone_device_ops {
int (*set_trip_hyst) (struct thermal_zone_device *, int,
unsigned long);
int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
int (*get_trend) (struct thermal_zone_device *, int,
enum thermal_trend *);
int (*notify) (struct thermal_zone_device *, int,
@@ -163,6 +166,7 @@ struct thermal_zone_device {
int polling_delay;
int temperature;
int last_temperature;
+ int emul_temperature;
int passive;
unsigned int forced_passive;
const struct thermal_zone_device_ops *ops;
@@ -244,9 +248,11 @@ int thermal_register_governor(struct thermal_governor *);
void thermal_unregister_governor(struct thermal_governor *);
#ifdef CONFIG_NET
-extern int thermal_generate_netlink_event(u32 orig, enum events event);
+extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+ enum events event);
#else
-static inline int thermal_generate_netlink_event(u32 orig, enum events event)
+static int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+ enum events event)
{
return 0;
}
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b9bd2e6c73cc..4ce009324933 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -21,7 +21,7 @@ struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
struct uid_gid_map projid_map;
- struct kref kref;
+ atomic_t count;
struct user_namespace *parent;
kuid_t owner;
kgid_t group;
@@ -35,18 +35,18 @@ extern struct user_namespace init_user_ns;
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
{
if (ns)
- kref_get(&ns->kref);
+ atomic_inc(&ns->count);
return ns;
}
extern int create_user_ns(struct cred *new);
extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
-extern void free_user_ns(struct kref *kref);
+extern void free_user_ns(struct user_namespace *ns);
static inline void put_user_ns(struct user_namespace *ns)
{
- if (ns)
- kref_put(&ns->kref, free_user_ns);
+ if (ns && atomic_dec_and_test(&ns->count))
+ free_user_ns(ns);
}
struct seq_operations;
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index c52215ff4245..75818744ab59 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -27,6 +27,14 @@
#define VEXPRESS_GPIO_MMC_CARDIN 0
#define VEXPRESS_GPIO_MMC_WPROT 1
#define VEXPRESS_GPIO_FLASH_WPn 2
+#define VEXPRESS_GPIO_LED0 3
+#define VEXPRESS_GPIO_LED1 4
+#define VEXPRESS_GPIO_LED2 5
+#define VEXPRESS_GPIO_LED3 6
+#define VEXPRESS_GPIO_LED4 7
+#define VEXPRESS_GPIO_LED5 8
+#define VEXPRESS_GPIO_LED6 9
+#define VEXPRESS_GPIO_LED7 10
#define VEXPRESS_RES_FUNC(_site, _func) \
{ \
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index cf8adb1f5b2c..ff6714e6d0f5 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -78,7 +78,7 @@ struct virtio_device {
int index;
struct device dev;
struct virtio_device_id id;
- struct virtio_config_ops *config;
+ const struct virtio_config_ops *config;
struct list_head vqs;
/* Note that this is a Linux set_bit-style bitmap. */
unsigned long features[1];
@@ -126,4 +126,13 @@ static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
int register_virtio_driver(struct virtio_driver *drv);
void unregister_virtio_driver(struct virtio_driver *drv);
+
+/* module_virtio_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_virtio_driver(__virtio_driver) \
+ module_driver(__virtio_driver, register_virtio_driver, \
+ unregister_virtio_driver)
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 50ae7d0c279e..e8d65718560b 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap);
int con_get_cmap(unsigned char __user *cmap);
void scrollback(struct vc_data *vc, int lines);
void scrollfront(struct vc_data *vc, int lines);
+void clear_buffer_attributes(struct vc_data *vc);
void update_region(struct vc_data *vc, unsigned long start, int count);
void redraw_screen(struct vc_data *vc, int is_switch);
#define update_screen(x) redraw_screen(x, 0)
@@ -130,6 +131,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
int vt_waitactive(int n);
void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
+extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt);
extern int unbind_con_driver(const struct consw *csw, int first, int last,
int deflt);
int vty_init(const struct file_operations *console_fops);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b82a83aba311..9a9367c0c076 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -87,9 +87,9 @@ int inode_wait(void *);
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason);
-int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr,
- enum wb_reason reason);
+int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+ enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
enum wb_reason reason);
diff --git a/include/media/adv7343.h b/include/media/adv7343.h
index d6f8a4e1a1fc..944757be49bb 100644
--- a/include/media/adv7343.h
+++ b/include/media/adv7343.h
@@ -20,4 +20,56 @@
#define ADV7343_COMPONENT_ID (1)
#define ADV7343_SVIDEO_ID (2)
+/**
+ * adv7343_power_mode - power mode configuration.
+ * @sleep_mode: on enable the current consumption is reduced to micro ampere
+ * level. All DACs and the internal PLL circuit are disabled.
+ * Registers can be read from and written in sleep mode.
+ * @pll_control: PLL and oversampling control. This control allows internal
+ * PLL 1 circuit to be powered down and the oversampling to be
+ * switched off.
+ * @dac_1: power on/off DAC 1.
+ * @dac_2: power on/off DAC 2.
+ * @dac_3: power on/off DAC 3.
+ * @dac_4: power on/off DAC 4.
+ * @dac_5: power on/off DAC 5.
+ * @dac_6: power on/off DAC 6.
+ *
+ * Power mode register (Register 0x0), for more info refer REGISTER MAP ACCESS
+ * section of datasheet[1], table 17 page no 30.
+ *
+ * [1] http://www.analog.com/static/imported-files/data_sheets/ADV7342_7343.pdf
+ */
+struct adv7343_power_mode {
+ bool sleep_mode;
+ bool pll_control;
+ bool dac_1;
+ bool dac_2;
+ bool dac_3;
+ bool dac_4;
+ bool dac_5;
+ bool dac_6;
+};
+
+/**
+ * struct adv7343_sd_config - SD Only Output Configuration.
+ * @sd_dac_out1: Configure SD DAC Output 1.
+ * @sd_dac_out2: Configure SD DAC Output 2.
+ */
+struct adv7343_sd_config {
+ /* SD only Output Configuration */
+ bool sd_dac_out1;
+ bool sd_dac_out2;
+};
+
+/**
+ * struct adv7343_platform_data - Platform data values and access functions.
+ * @mode_config: Configuration for power mode.
+ * @sd_config: SD Only Configuration.
+ */
+struct adv7343_platform_data {
+ struct adv7343_power_mode mode_config;
+ struct adv7343_sd_config sd_config;
+};
+
#endif /* End of #ifndef ADV7343_H */
diff --git a/include/media/blackfin/bfin_capture.h b/include/media/blackfin/bfin_capture.h
index 2038a8a3f8aa..56b9ce4472fc 100644
--- a/include/media/blackfin/bfin_capture.h
+++ b/include/media/blackfin/bfin_capture.h
@@ -9,6 +9,7 @@ struct ppi_info;
struct bcap_route {
u32 input;
u32 output;
+ u32 ppi_control;
};
struct bfin_capture_config {
@@ -30,8 +31,8 @@ struct bfin_capture_config {
unsigned long ppi_control;
/* ppi interrupt mask */
u32 int_mask;
- /* horizontal blanking clocks */
- int blank_clocks;
+ /* horizontal blanking pixels */
+ int blank_pixels;
};
#endif
diff --git a/include/media/blackfin/ppi.h b/include/media/blackfin/ppi.h
index 8f72f8a0b3d0..d0697f4edf87 100644
--- a/include/media/blackfin/ppi.h
+++ b/include/media/blackfin/ppi.h
@@ -21,22 +21,42 @@
#define _PPI_H_
#include <linux/interrupt.h>
+#include <asm/blackfin.h>
+#include <asm/bfin_ppi.h>
+/* EPPI */
#ifdef EPPI_EN
#define PORT_EN EPPI_EN
+#define PORT_DIR EPPI_DIR
#define DMA32 0
#define PACK_EN PACKEN
#endif
+/* EPPI3 */
+#ifdef EPPI0_CTL2
+#define PORT_EN EPPI_CTL_EN
+#define PORT_DIR EPPI_CTL_DIR
+#define PACK_EN EPPI_CTL_PACKEN
+#define DMA32 0
+#define DLEN_8 EPPI_CTL_DLEN08
+#define DLEN_16 EPPI_CTL_DLEN16
+#endif
+
struct ppi_if;
struct ppi_params {
- int width;
- int height;
- int bpp;
- unsigned long ppi_control;
- u32 int_mask;
- int blank_clocks;
+ u32 width; /* width in pixels */
+ u32 height; /* height in lines */
+ u32 hdelay; /* delay after the HSYNC in pixels */
+ u32 vdelay; /* delay after the VSYNC in lines */
+ u32 line; /* total pixels per line */
+ u32 frame; /* total lines per frame */
+ u32 hsync; /* HSYNC length in pixels */
+ u32 vsync; /* VSYNC length in lines */
+ int bpp; /* bits per pixel */
+ int dlen; /* data length for ppi in bits */
+ u32 ppi_control; /* ppi configuration */
+ u32 int_mask; /* interrupt mask */
};
struct ppi_ops {
@@ -51,6 +71,7 @@ struct ppi_ops {
enum ppi_type {
PPI_TYPE_PPI,
PPI_TYPE_EPPI,
+ PPI_TYPE_EPPI3,
};
struct ppi_info {
@@ -65,7 +86,8 @@ struct ppi_if {
unsigned long ppi_control;
const struct ppi_ops *ops;
const struct ppi_info *info;
- bool err_int;
+ bool err_int; /* if we need request error interrupt */
+ bool err; /* if ppi has fifo error */
void *priv;
};
diff --git a/include/media/davinci/vpbe_osd.h b/include/media/davinci/vpbe_osd.h
index 5ab0d8d41f68..42628fcfe1bd 100644
--- a/include/media/davinci/vpbe_osd.h
+++ b/include/media/davinci/vpbe_osd.h
@@ -26,7 +26,9 @@
#include <media/davinci/vpbe_types.h>
-#define VPBE_OSD_SUBDEV_NAME "vpbe-osd"
+#define DM644X_VPBE_OSD_SUBDEV_NAME "dm644x,vpbe-osd"
+#define DM365_VPBE_OSD_SUBDEV_NAME "dm365,vpbe-osd"
+#define DM355_VPBE_OSD_SUBDEV_NAME "dm355,vpbe-osd"
/**
* enum osd_layer
@@ -387,7 +389,6 @@ struct osd_state {
};
struct osd_platform_data {
- enum vpbe_version vpbe_type;
int field_inv_wa_enable;
};
diff --git a/include/media/davinci/vpbe_venc.h b/include/media/davinci/vpbe_venc.h
index cc78c2eb16da..476fafc2f522 100644
--- a/include/media/davinci/vpbe_venc.h
+++ b/include/media/davinci/vpbe_venc.h
@@ -20,7 +20,9 @@
#include <media/v4l2-subdev.h>
#include <media/davinci/vpbe_types.h>
-#define VPBE_VENC_SUBDEV_NAME "vpbe-venc"
+#define DM644X_VPBE_VENC_SUBDEV_NAME "dm644x,vpbe-venc"
+#define DM365_VPBE_VENC_SUBDEV_NAME "dm365,vpbe-venc"
+#define DM355_VPBE_VENC_SUBDEV_NAME "dm355,vpbe-venc"
/* venc events */
#define VENC_END_OF_FRAME BIT(0)
@@ -28,7 +30,6 @@
#define VENC_SECOND_FIELD BIT(2)
struct venc_platform_data {
- enum vpbe_version venc_type;
int (*setup_pinmux)(enum v4l2_mbus_pixelcode if_type,
int field);
int (*setup_clock)(enum vpbe_enc_timings_type type,
diff --git a/include/media/davinci/vpss.h b/include/media/davinci/vpss.h
index b586495bcd53..153473daaa32 100644
--- a/include/media/davinci/vpss.h
+++ b/include/media/davinci/vpss.h
@@ -105,4 +105,20 @@ enum vpss_wbl_sel {
};
/* clear wbl overflow flag for DM6446 */
int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel);
+
+/* set sync polarity*/
+void vpss_set_sync_pol(struct vpss_sync_pol sync);
+/* set the PG_FRAME_SIZE register */
+void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size);
+/*
+ * vpss_check_and_clear_interrupt - check and clear interrupt
+ * @irq - common enumerator for IRQ
+ *
+ * Following return values used:-
+ * 0 - interrupt occurred and cleared
+ * 1 - interrupt not occurred
+ * 2 - interrupt status not available
+ */
+int vpss_dma_complete_interrupt(void);
+
#endif
diff --git a/include/media/ov7670.h b/include/media/ov7670.h
index b133bc123031..1913d5123072 100644
--- a/include/media/ov7670.h
+++ b/include/media/ov7670.h
@@ -15,6 +15,8 @@ struct ov7670_config {
int min_height; /* Filter out smaller sizes */
int clock_speed; /* External clock speed (MHz) */
bool use_smbus; /* Use smbus I/O instead of I2C */
+ bool pll_bypass; /* Choose whether to bypass the PLL */
+ bool pclk_hb_disable; /* Disable toggling pixclk during horizontal blanking */
};
#endif
diff --git a/include/media/ov9650.h b/include/media/ov9650.h
new file mode 100644
index 000000000000..d630cf9e028d
--- /dev/null
+++ b/include/media/ov9650.h
@@ -0,0 +1,27 @@
+/*
+ * OV9650/OV9652 camera sensors driver
+ *
+ * Copyright (C) 2013 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OV9650_H_
+#define OV9650_H_
+
+/**
+ * struct ov9650_platform_data - ov9650 driver platform data
+ * @mclk_frequency: the sensor's master clock frequency in Hz
+ * @gpio_pwdn: number of a GPIO connected to OV965X PWDN pin
+ * @gpio_reset: number of a GPIO connected to OV965X RESET pin
+ *
+ * If any of @gpio_pwdn or @gpio_reset are unused then they should be
+ * set to a negative value. @mclk_frequency must always be specified.
+ */
+struct ov9650_platform_data {
+ unsigned long mclk_frequency;
+ int gpio_pwdn;
+ int gpio_reset;
+};
+#endif /* OV9650_H_ */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 74f55a3f14eb..f74ee6f89711 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -182,6 +182,7 @@ void rc_map_init(void);
#define RC_MAP_TEVII_NEC "rc-tevii-nec"
#define RC_MAP_TIVO "rc-tivo"
#define RC_MAP_TOTAL_MEDIA_IN_HAND "rc-total-media-in-hand"
+#define RC_MAP_TOTAL_MEDIA_IN_HAND_02 "rc-total-media-in-hand-02"
#define RC_MAP_TREKSTOR "rc-trekstor"
#define RC_MAP_TT_1500 "rc-tt-1500"
#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027"
diff --git a/include/media/s5c73m3.h b/include/media/s5c73m3.h
new file mode 100644
index 000000000000..ccb9e5448762
--- /dev/null
+++ b/include/media/s5c73m3.h
@@ -0,0 +1,55 @@
+/*
+ * Samsung LSI S5C73M3 8M pixel camera driver
+ *
+ * Copyright (C) 2012, Samsung Electronics, Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef MEDIA_S5C73M3__
+#define MEDIA_S5C73M3__
+
+#include <linux/videodev2.h>
+#include <media/v4l2-mediabus.h>
+
+/**
+ * struct s5c73m3_gpio - data structure describing a GPIO
+ * @gpio: GPIO number
+ * @level: indicates active state of the @gpio
+ */
+struct s5c73m3_gpio {
+ int gpio;
+ int level;
+};
+
+/**
+ * struct s5c73m3_platform_data - s5c73m3 driver platform data
+ * @mclk_frequency: sensor's master clock frequency in Hz
+ * @gpio_reset: GPIO driving RESET pin
+ * @gpio_stby: GPIO driving STBY pin
+ * @nlanes: maximum number of MIPI-CSI lanes used
+ * @horiz_flip: default horizontal image flip value, non zero to enable
+ * @vert_flip: default vertical image flip value, non zero to enable
+ */
+
+struct s5c73m3_platform_data {
+ unsigned long mclk_frequency;
+
+ struct s5c73m3_gpio gpio_reset;
+ struct s5c73m3_gpio gpio_stby;
+
+ enum v4l2_mbus_type bus_type;
+ u8 nlanes;
+ u8 horiz_flip;
+ u8 vert_flip;
+};
+
+#endif /* MEDIA_S5C73M3__ */
diff --git a/include/media/s5p_fimc.h b/include/media/s5p_fimc.h
index eaea62a382f8..28f3590aa031 100644
--- a/include/media/s5p_fimc.h
+++ b/include/media/s5p_fimc.h
@@ -1,8 +1,8 @@
/*
- * Samsung S5P SoC camera interface driver header
+ * Samsung S5P/Exynos4 SoC series camera interface driver header
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
- * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010 - 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -14,45 +14,58 @@
#include <media/media-entity.h>
-enum cam_bus_type {
- FIMC_ITU_601 = 1,
- FIMC_ITU_656,
- FIMC_MIPI_CSI2,
- FIMC_LCD_WB, /* FIFO link from LCD mixer */
+/*
+ * Enumeration of the FIMC data bus types.
+ */
+enum fimc_bus_type {
+ /* Camera parallel bus */
+ FIMC_BUS_TYPE_ITU_601 = 1,
+ /* Camera parallel bus with embedded synchronization */
+ FIMC_BUS_TYPE_ITU_656,
+ /* Camera MIPI-CSI2 serial bus */
+ FIMC_BUS_TYPE_MIPI_CSI2,
+ /* FIFO link from LCD controller (WriteBack A) */
+ FIMC_BUS_TYPE_LCD_WRITEBACK_A,
+ /* FIFO link from LCD controller (WriteBack B) */
+ FIMC_BUS_TYPE_LCD_WRITEBACK_B,
+ /* FIFO link from FIMC-IS */
+ FIMC_BUS_TYPE_ISP_WRITEBACK = FIMC_BUS_TYPE_LCD_WRITEBACK_B,
};
struct i2c_board_info;
/**
- * struct s5p_fimc_isp_info - image sensor information required for host
- * interace configuration.
+ * struct fimc_source_info - video source description required for the host
+ * interface configuration
*
* @board_info: pointer to I2C subdevice's board info
* @clk_frequency: frequency of the clock the host interface provides to sensor
- * @bus_type: determines bus type, MIPI, ITU-R BT.601 etc.
+ * @fimc_bus_type: FIMC camera input type
+ * @sensor_bus_type: image sensor bus type, MIPI, ITU-R BT.601 etc.
+ * @flags: the parallel sensor bus flags defining signals polarity (V4L2_MBUS_*)
* @i2c_bus_num: i2c control bus id the sensor is attached to
* @mux_id: FIMC camera interface multiplexer index (separate for MIPI and ITU)
* @clk_id: index of the SoC peripheral clock for sensors
- * @flags: the parallel bus flags defining signals polarity (V4L2_MBUS_*)
*/
-struct s5p_fimc_isp_info {
+struct fimc_source_info {
struct i2c_board_info *board_info;
unsigned long clk_frequency;
- enum cam_bus_type bus_type;
+ enum fimc_bus_type fimc_bus_type;
+ enum fimc_bus_type sensor_bus_type;
+ u16 flags;
u16 i2c_bus_num;
u16 mux_id;
- u16 flags;
u8 clk_id;
};
/**
* struct s5p_platform_fimc - camera host interface platform data
*
- * @isp_info: properties of camera sensor required for host interface setup
- * @num_clients: the number of attached image sensors
+ * @source_info: properties of an image source for the host interface setup
+ * @num_clients: the number of attached image sources
*/
struct s5p_platform_fimc {
- struct s5p_fimc_isp_info *isp_info;
+ struct fimc_source_info *source_info;
int num_clients;
};
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 6442edc2a151..2cc70cf318bf 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -23,11 +23,11 @@
#include <media/v4l2-device.h>
struct file;
-struct soc_camera_link;
+struct soc_camera_desc;
struct soc_camera_device {
struct list_head list; /* list of all registered devices */
- struct soc_camera_link *link;
+ struct soc_camera_desc *sdesc;
struct device *pdev; /* Platform device */
struct device *parent; /* Camera host device */
struct device *control; /* E.g., the i2c client */
@@ -46,9 +46,8 @@ struct soc_camera_device {
int num_user_formats;
enum v4l2_field field; /* Preserve field over close() */
void *host_priv; /* Per-device host private data */
- /* soc_camera.c private count. Only accessed with .video_lock held */
+ /* soc_camera.c private count. Only accessed with .host_lock held */
int use_count;
- struct mutex video_lock; /* Protects device data */
struct file *streamer; /* stream owner */
union {
struct videobuf_queue vb_vidq;
@@ -62,7 +61,7 @@ struct soc_camera_device {
struct soc_camera_host {
struct v4l2_device v4l2_dev;
struct list_head list;
- struct mutex host_lock; /* Protect during probing */
+ struct mutex host_lock; /* Protect pipeline modifications */
unsigned char nr; /* Host number */
u32 capabilities;
void *priv;
@@ -117,26 +116,72 @@ struct soc_camera_host_ops {
struct i2c_board_info;
struct regulator_bulk_data;
-struct soc_camera_link {
- /* Camera bus id, used to match a camera and a bus */
- int bus_id;
+struct soc_camera_subdev_desc {
/* Per camera SOCAM_SENSOR_* bus flags */
unsigned long flags;
- int i2c_adapter_id;
- struct i2c_board_info *board_info;
- const char *module_name;
- void *priv;
+
+ /* sensor driver private platform data */
+ void *drv_priv;
/* Optional regulators that have to be managed on power on/off events */
struct regulator_bulk_data *regulators;
int num_regulators;
+ /* Optional callbacks to power on or off and reset the sensor */
+ int (*power)(struct device *, int);
+ int (*reset)(struct device *);
+
+ /*
+ * some platforms may support different data widths than the sensors
+ * native ones due to different data line routing. Let the board code
+ * overwrite the width flags.
+ */
+ int (*set_bus_param)(struct soc_camera_subdev_desc *, unsigned long flags);
+ unsigned long (*query_bus_param)(struct soc_camera_subdev_desc *);
+ void (*free_bus)(struct soc_camera_subdev_desc *);
+};
+
+struct soc_camera_host_desc {
+ /* Camera bus id, used to match a camera and a bus */
+ int bus_id;
+ int i2c_adapter_id;
+ struct i2c_board_info *board_info;
+ const char *module_name;
+
/*
* For non-I2C devices platform has to provide methods to add a device
* to the system and to remove it
*/
int (*add_device)(struct soc_camera_device *);
void (*del_device)(struct soc_camera_device *);
+};
+
+/*
+ * This MUST be kept binary-identical to struct soc_camera_link below, until
+ * it is completely replaced by this one, after which we can split it into its
+ * two components.
+ */
+struct soc_camera_desc {
+ struct soc_camera_subdev_desc subdev_desc;
+ struct soc_camera_host_desc host_desc;
+};
+
+/* Prepare to replace this struct: don't change its layout any more! */
+struct soc_camera_link {
+ /*
+ * Subdevice part - keep at top and compatible to
+ * struct soc_camera_subdev_desc
+ */
+
+ /* Per camera SOCAM_SENSOR_* bus flags */
+ unsigned long flags;
+
+ void *priv;
+
+ /* Optional regulators that have to be managed on power on/off events */
+ struct regulator_bulk_data *regulators;
+ int num_regulators;
+
/* Optional callbacks to power on or off and reset the sensor */
int (*power)(struct device *, int);
int (*reset)(struct device *);
@@ -148,6 +193,24 @@ struct soc_camera_link {
int (*set_bus_param)(struct soc_camera_link *, unsigned long flags);
unsigned long (*query_bus_param)(struct soc_camera_link *);
void (*free_bus)(struct soc_camera_link *);
+
+ /*
+ * Host part - keep at bottom and compatible to
+ * struct soc_camera_host_desc
+ */
+
+ /* Camera bus id, used to match a camera and a bus */
+ int bus_id;
+ int i2c_adapter_id;
+ struct i2c_board_info *board_info;
+ const char *module_name;
+
+ /*
+ * For non-I2C devices platform has to provide methods to add a device
+ * to the system and to remove it
+ */
+ int (*add_device)(struct soc_camera_device *);
+ void (*del_device)(struct soc_camera_device *);
};
static inline struct soc_camera_host *to_soc_camera_host(
@@ -158,10 +221,10 @@ static inline struct soc_camera_host *to_soc_camera_host(
return container_of(v4l2_dev, struct soc_camera_host, v4l2_dev);
}
-static inline struct soc_camera_link *to_soc_camera_link(
+static inline struct soc_camera_desc *to_soc_camera_desc(
const struct soc_camera_device *icd)
{
- return icd->link;
+ return icd->sdesc;
}
static inline struct device *to_soc_camera_control(
@@ -251,19 +314,17 @@ static inline void soc_camera_limit_side(int *start, int *length,
*start = start_min + length_max - *length;
}
-unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
- unsigned long flags);
-unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
+unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd,
const struct v4l2_mbus_config *cfg);
-int soc_camera_power_on(struct device *dev, struct soc_camera_link *icl);
-int soc_camera_power_off(struct device *dev, struct soc_camera_link *icl);
+int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd);
+int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd);
static inline int soc_camera_set_power(struct device *dev,
- struct soc_camera_link *icl, bool on)
+ struct soc_camera_subdev_desc *ssdd, bool on)
{
- return on ? soc_camera_power_on(dev, icl)
- : soc_camera_power_off(dev, icl);
+ return on ? soc_camera_power_on(dev, ssdd)
+ : soc_camera_power_off(dev, ssdd);
}
/* This is only temporary here - until v4l2-subdev begins to link to video_device */
@@ -275,7 +336,7 @@ static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_clien
return icd ? icd->vdev : NULL;
}
-static inline struct soc_camera_link *soc_camera_i2c_to_link(const struct i2c_client *client)
+static inline struct soc_camera_subdev_desc *soc_camera_i2c_to_desc(const struct i2c_client *client)
{
return client->dev.platform_data;
}
diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h
index 8aa4200a0b1d..1e5065dab430 100644
--- a/include/media/soc_camera_platform.h
+++ b/include/media/soc_camera_platform.h
@@ -38,10 +38,12 @@ static inline int soc_camera_platform_add(struct soc_camera_device *icd,
void (*release)(struct device *dev),
int id)
{
- struct soc_camera_platform_info *info = plink->priv;
+ struct soc_camera_subdev_desc *ssdd =
+ (struct soc_camera_subdev_desc *)plink;
+ struct soc_camera_platform_info *info = ssdd->drv_priv;
int ret;
- if (icd->link != plink)
+ if (&icd->sdesc->subdev_desc != ssdd)
return -ENODEV;
if (*pdev)
@@ -70,7 +72,9 @@ static inline void soc_camera_platform_del(const struct soc_camera_device *icd,
struct platform_device *pdev,
const struct soc_camera_link *plink)
{
- if (icd->link != plink || !pdev)
+ const struct soc_camera_subdev_desc *ssdd =
+ (const struct soc_camera_subdev_desc *)plink;
+ if (&icd->sdesc->subdev_desc != ssdd || !pdev)
return;
platform_device_unregister(pdev);
diff --git a/include/media/tvp514x.h b/include/media/tvp514x.h
index 74387e83f5b9..86ed7e806830 100644
--- a/include/media/tvp514x.h
+++ b/include/media/tvp514x.h
@@ -96,12 +96,9 @@ enum tvp514x_output {
/**
* struct tvp514x_platform_data - Platform data values and access functions.
- * @power_set: Power state access function, zero is off, non-zero is on.
- * @ifparm: Interface parameters access function.
- * @priv_data_set: Device private data (pointer) access function.
* @clk_polarity: Clock polarity of the current interface.
- * @ hs_polarity: HSYNC Polarity configuration for current interface.
- * @ vs_polarity: VSYNC Polarity configuration for current interface.
+ * @hs_polarity: HSYNC Polarity configuration for current interface.
+ * @vs_polarity: VSYNC Polarity configuration for current interface.
*/
struct tvp514x_platform_data {
/* Interface control params */
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 1a0b2db4c5d3..ec7c9c00b256 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -225,4 +225,6 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait);
+void v4l2_get_timestamp(struct timeval *tv);
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 96509119f28f..f00d42bc01a6 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -53,6 +53,8 @@ struct v4l2_ctrl_ops {
int (*s_ctrl)(struct v4l2_ctrl *ctrl);
};
+typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
+
/** struct v4l2_ctrl - The control structure.
* @node: The list node.
* @ev_subs: The list of control event subscriptions.
@@ -72,6 +74,8 @@ struct v4l2_ctrl_ops {
* set this flag directly.
* @has_volatiles: If set, then one or more members of the cluster are volatile.
* Drivers should never touch this flag.
+ * @call_notify: If set, then call the handler's notify function whenever the
+ * control's value changes.
* @manual_mode_value: If the is_auto flag is set, then this is the value
* of the auto control that determines if that control is in
* manual mode. So if the value of the auto control equals this
@@ -119,6 +123,7 @@ struct v4l2_ctrl {
unsigned int is_private:1;
unsigned int is_auto:1;
unsigned int has_volatiles:1;
+ unsigned int call_notify:1;
unsigned int manual_mode_value:8;
const struct v4l2_ctrl_ops *ops;
@@ -177,6 +182,10 @@ struct v4l2_ctrl_ref {
* control is needed multiple times, so this is a simple
* optimization.
* @buckets: Buckets for the hashing. Allows for quick control lookup.
+ * @notify: A notify callback that is called whenever the control changes value.
+ * Note that the handler's lock is held when the notify function
+ * is called!
+ * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
* @nr_of_buckets: Total number of buckets in the array.
* @error: The error code of the first failed control addition.
*/
@@ -187,6 +196,8 @@ struct v4l2_ctrl_handler {
struct list_head ctrl_refs;
struct v4l2_ctrl_ref *cached;
struct v4l2_ctrl_ref **buckets;
+ v4l2_ctrl_notify_fnc notify;
+ void *notify_priv;
u16 nr_of_buckets;
int error;
};
@@ -507,6 +518,26 @@ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
*/
void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
+/** v4l2_ctrl_modify_range() - Update the range of a control.
+ * @ctrl: The control to update.
+ * @min: The control's minimum value.
+ * @max: The control's maximum value.
+ * @step: The control's step value
+ * @def: The control's default value.
+ *
+ * Update the range of a control on the fly. This works for control types
+ * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
+ * @step value is interpreted as a menu_skip_mask.
+ *
+ * An error is returned if one of the range arguments is invalid for this
+ * control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ */
+int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s32 min, s32 max, u32 step, s32 def);
+
/** v4l2_ctrl_lock() - Helper function to lock the handler
* associated with the control.
* @ctrl: The control to lock.
@@ -525,6 +556,20 @@ static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl)
mutex_unlock(ctrl->handler->lock);
}
+/** v4l2_ctrl_notify() - Function to set a notify callback for a control.
+ * @ctrl: The control.
+ * @notify: The callback function.
+ * @priv: The callback private handle, passed as argument to the callback.
+ *
+ * This function sets a callback function for the control. If @ctrl is NULL,
+ * then it will do nothing. If @notify is NULL, then the notify callback will
+ * be removed.
+ *
+ * There can be only one notify. If another already exists, then a WARN_ON
+ * will be issued and the function will do nothing.
+ */
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv);
+
/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
* @ctrl: The control.
*
@@ -609,4 +654,12 @@ int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs
int v4l2_subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
+/* Can be used as a subscribe_event function that just subscribes control
+ events. */
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+/* Log all controls owned by subdev's control handler. */
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd);
+
#endif
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index eff85f934b24..be05d019de25 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -64,6 +64,7 @@
*/
struct v4l2_fh;
+struct v4l2_subdev;
struct v4l2_subscribed_event;
struct video_device;
@@ -129,5 +130,6 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub);
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh);
-
+int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
#endif /* V4L2_EVENT_H */
diff --git a/include/media/v4l2-image-sizes.h b/include/media/v4l2-image-sizes.h
new file mode 100644
index 000000000000..10daf92ff1ab
--- /dev/null
+++ b/include/media/v4l2-image-sizes.h
@@ -0,0 +1,34 @@
+/*
+ * Standard image size definitions
+ *
+ * Copyright (C) 2013, Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _IMAGE_SIZES_H
+#define _IMAGE_SIZES_H
+
+#define CIF_WIDTH 352
+#define CIF_HEIGHT 288
+
+#define QCIF_WIDTH 176
+#define QCIF_HEIGHT 144
+
+#define QQCIF_WIDTH 88
+#define QQCIF_HEIGHT 72
+
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+#define QVGA_WIDTH 320
+#define QVGA_HEIGHT 240
+
+#define SXGA_WIDTH 1280
+#define SXGA_HEIGHT 1024
+
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+#endif /* _IMAGE_SIZES_H */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 7e82d2b193d5..d3eef01da648 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -125,7 +125,7 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma);
-struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops);
+struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 7184853ca360..27dfe85772b1 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -407,17 +407,17 @@ struct p9_wstat {
char *gid;
char *muid;
char *extension; /* 9p2000.u extensions */
- u32 n_uid; /* 9p2000.u extensions */
- u32 n_gid; /* 9p2000.u extensions */
- u32 n_muid; /* 9p2000.u extensions */
+ kuid_t n_uid; /* 9p2000.u extensions */
+ kgid_t n_gid; /* 9p2000.u extensions */
+ kuid_t n_muid; /* 9p2000.u extensions */
};
struct p9_stat_dotl {
u64 st_result_mask;
struct p9_qid qid;
u32 st_mode;
- u32 st_uid;
- u32 st_gid;
+ kuid_t st_uid;
+ kgid_t st_gid;
u64 st_nlink;
u64 st_rdev;
u64 st_size;
@@ -471,8 +471,8 @@ struct p9_stat_dotl {
struct p9_iattr_dotl {
u32 valid;
u32 mode;
- u32 uid;
- u32 gid;
+ kuid_t uid;
+ kgid_t gid;
u64 size;
u64 atime_sec;
u64 atime_nsec;
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index fc9b90b0c052..5ff70f433e87 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -187,7 +187,7 @@ struct p9_fid {
int mode;
struct p9_qid qid;
u32 iounit;
- uid_t uid;
+ kuid_t uid;
void *rdir;
@@ -220,17 +220,17 @@ void p9_client_destroy(struct p9_client *clnt);
void p9_client_disconnect(struct p9_client *clnt);
void p9_client_begin_disconnect(struct p9_client *clnt);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
- char *uname, u32 n_uname, char *aname);
+ char *uname, kuid_t n_uname, char *aname);
struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
char **wnames, int clone);
int p9_client_open(struct p9_fid *fid, int mode);
int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
char *extension);
int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname);
-int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, gid_t gid,
+int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, kgid_t gid,
struct p9_qid *qid);
int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
- gid_t gid, struct p9_qid *qid);
+ kgid_t gid, struct p9_qid *qid);
int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
@@ -250,9 +250,9 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
u64 request_mask);
int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode,
- dev_t rdev, gid_t gid, struct p9_qid *);
+ dev_t rdev, kgid_t gid, struct p9_qid *);
int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
- gid_t gid, struct p9_qid *);
+ kgid_t gid, struct p9_qid *);
int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 53539acbd81a..89ed9ac5701f 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -161,8 +161,8 @@ typedef struct ax25_uid_assoc {
ax25_address call;
} ax25_uid_assoc;
-#define ax25_uid_for_each(__ax25, node, list) \
- hlist_for_each_entry(__ax25, node, list, uid_node)
+#define ax25_uid_for_each(__ax25, list) \
+ hlist_for_each_entry(__ax25, list, uid_node)
#define ax25_uid_hold(ax25) \
atomic_inc(&((ax25)->refcount))
@@ -247,8 +247,8 @@ typedef struct ax25_cb {
#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
-#define ax25_for_each(__ax25, node, list) \
- hlist_for_each_entry(__ax25, node, list, ax25_node)
+#define ax25_for_each(__ax25, list) \
+ hlist_for_each_entry(__ax25, list, ax25_node)
#define ax25_cb_hold(__ax25) \
atomic_inc(&((__ax25)->refcount))
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 9ac2524d1402..081439fd070e 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -41,6 +41,7 @@ struct net;
extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
extern int icmp_rcv(struct sk_buff *skb);
+extern void icmp_err(struct sk_buff *, u32 info);
extern int icmp_init(void);
extern void icmp_out_count(struct net *net, unsigned char type);
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 7ca75cbbf75e..fd4ee016ba5c 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -28,16 +28,16 @@
struct inet_hashinfo;
-/* I have no idea if this is a good hash for v6 or not. -DaveM */
static inline unsigned int inet6_ehashfn(struct net *net,
const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const __be16 fport)
{
- u32 ports = (lport ^ (__force u16)fport);
+ u32 ports = (((u32)lport) << 16) | (__force u32)fport;
return jhash_3words((__force u32)laddr->s6_addr32[3],
- (__force u32)faddr->s6_addr32[3],
- ports, inet_ehash_secret + net_hash_mix(net));
+ ipv6_addr_jhash(faddr),
+ ports,
+ inet_ehash_secret + net_hash_mix(net));
}
static inline int inet6_sk_ehashfn(const struct sock *sk)
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 3f237db0a426..76c3fe5ecc2e 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -114,7 +114,13 @@ static inline void init_frag_mem_limit(struct netns_frags *nf)
static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_sum_positive(&nf->mem);
+ int res;
+
+ local_bh_disable();
+ res = percpu_counter_sum_positive(&nf->mem);
+ local_bh_enable();
+
+ return res;
}
static inline void inet_frag_lru_move(struct inet_frag_queue *q)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 7b2ae9d37076..ef83d9e844b5 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -94,8 +94,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib)
return read_pnet(&ib->ib_net);
}
-#define inet_bind_bucket_for_each(tb, pos, head) \
- hlist_for_each_entry(tb, pos, head, node)
+#define inet_bind_bucket_for_each(tb, head) \
+ hlist_for_each_entry(tb, head, node)
struct inet_bind_hashbucket {
spinlock_t lock;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a4196cbc84ec..7235ae73a1e8 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -203,6 +203,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
extern int inet_sk_rebuild_header(struct sock *sk);
extern u32 inet_ehash_secret;
+extern u32 ipv6_hash_secret;
extern void build_ehash_secret(void);
static inline unsigned int inet_ehashfn(struct net *net,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 7d658d577368..f908dfc06505 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -178,11 +178,11 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
#define inet_twsk_for_each(tw, node, head) \
hlist_nulls_for_each_entry(tw, node, head, tw_node)
-#define inet_twsk_for_each_inmate(tw, node, jail) \
- hlist_for_each_entry(tw, node, jail, tw_death_node)
+#define inet_twsk_for_each_inmate(tw, jail) \
+ hlist_for_each_entry(tw, jail, tw_death_node)
-#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \
- hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
+#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
+ hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 21947cf4fa46..fd19625ff99d 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -71,4 +71,21 @@ static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+static inline void tunnel_ip_select_ident(struct sk_buff *skb,
+ const struct iphdr *old_iph,
+ struct dst_entry *dst)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->frag_off & htons(IP_DF))
+ iph->id = 0;
+ else {
+ /* Use inner packet iph-id if possible. */
+ if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
+ iph->id = old_iph->id;
+ else
+ __ip_select_ident(iph, dst,
+ (skb_shinfo(skb)->gso_segs ?: 1) - 1);
+ }
+}
#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 851d5412a299..64d12e77719a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -15,6 +15,7 @@
#include <linux/ipv6.h>
#include <linux/hardirq.h>
+#include <linux/jhash.h>
#include <net/if_inet6.h>
#include <net/ndisc.h>
#include <net/flow.h>
@@ -514,6 +515,17 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
#endif
}
+/* more secured version of ipv6_addr_hash() */
+static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
+{
+ u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
+
+ return jhash_3words(v,
+ (__force u32)a->s6_addr32[2],
+ (__force u32)a->s6_addr32[3],
+ ipv6_hash_secret);
+}
+
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
diff --git a/include/net/netrom.h b/include/net/netrom.h
index f0793c1cb5f8..121dcf854db5 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -154,17 +154,17 @@ static __inline__ void nr_node_unlock(struct nr_node *nr_node)
nr_node_put(nr_node);
}
-#define nr_neigh_for_each(__nr_neigh, node, list) \
- hlist_for_each_entry(__nr_neigh, node, list, neigh_node)
+#define nr_neigh_for_each(__nr_neigh, list) \
+ hlist_for_each_entry(__nr_neigh, list, neigh_node)
-#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \
- hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node)
+#define nr_neigh_for_each_safe(__nr_neigh, node2, list) \
+ hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node)
-#define nr_node_for_each(__nr_node, node, list) \
- hlist_for_each_entry(__nr_node, node, list, node_node)
+#define nr_node_for_each(__nr_node, list) \
+ hlist_for_each_entry(__nr_node, list, node_node)
-#define nr_node_for_each_safe(__nr_node, node, node2, list) \
- hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node)
+#define nr_node_for_each_safe(__nr_node, node2, list) \
+ hlist_for_each_entry_safe(__nr_node, node2, list, node_node)
/*********************************************************************/
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2761c905504e..f10818fc8804 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -339,11 +339,10 @@ static inline struct Qdisc_class_common *
qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
{
struct Qdisc_class_common *cl;
- struct hlist_node *n;
unsigned int h;
h = qdisc_class_hash(id, hash->hashmask);
- hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
+ hlist_for_each_entry(cl, &hash->hash[h], hnode) {
if (cl->classid == id)
return cl;
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 7fdf298a47ef..df85a0c0f2d5 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -675,8 +675,8 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
return h & (sctp_assoc_hashsize - 1);
}
-#define sctp_for_each_hentry(epb, node, head) \
- hlist_for_each_entry(epb, node, head, node)
+#define sctp_for_each_hentry(epb, head) \
+ hlist_for_each_entry(epb, head, node)
/* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
diff --git a/include/net/sock.h b/include/net/sock.h
index a66caa223d18..14f6e9d19dc7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -606,24 +606,23 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list);
}
-#define sk_for_each(__sk, node, list) \
- hlist_for_each_entry(__sk, node, list, sk_node)
-#define sk_for_each_rcu(__sk, node, list) \
- hlist_for_each_entry_rcu(__sk, node, list, sk_node)
+#define sk_for_each(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_node)
+#define sk_for_each_rcu(__sk, list) \
+ hlist_for_each_entry_rcu(__sk, list, sk_node)
#define sk_nulls_for_each(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
-#define sk_for_each_from(__sk, node) \
- if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
- hlist_for_each_entry_from(__sk, node, sk_node)
+#define sk_for_each_from(__sk) \
+ hlist_for_each_entry_from(__sk, sk_node)
#define sk_nulls_for_each_from(__sk, node) \
if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
-#define sk_for_each_safe(__sk, node, tmp, list) \
- hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
-#define sk_for_each_bound(__sk, node, list) \
- hlist_for_each_entry(__sk, node, list, sk_bind_node)
+#define sk_for_each_safe(__sk, tmp, list) \
+ hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
+#define sk_for_each_bound(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind_node)
static inline struct user_namespace *sk_user_ns(struct sock *sk)
{
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 260470e72483..21cdb0b7b0fb 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -78,9 +78,7 @@ TRACE_EVENT(mc_event,
TP_printk("%d %s error%s:%s%s on %s (mc:%d location:%d:%d:%d address:0x%08lx grain:%d syndrome:0x%08lx%s%s)",
__entry->error_count,
- (__entry->error_type == HW_EVENT_ERR_CORRECTED) ? "Corrected" :
- ((__entry->error_type == HW_EVENT_ERR_FATAL) ?
- "Fatal" : "Uncorrected"),
+ mc_event_error_type(__entry->error_type),
__entry->error_count > 1 ? "s" : "",
((char *)__get_str(msg))[0] ? " " : "",
__get_str(msg),
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 46bc045bbe15..98cc4b29fc5b 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -115,6 +115,8 @@ enum ib_device_cap_flags {
IB_DEVICE_XRC = (1<<20),
IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
+ IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
+ IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24)
};
enum ib_atomic_cap {
@@ -715,6 +717,11 @@ enum ib_mig_state {
IB_MIG_ARMED
};
+enum ib_mw_type {
+ IB_MW_TYPE_1 = 1,
+ IB_MW_TYPE_2 = 2
+};
+
struct ib_qp_attr {
enum ib_qp_state qp_state;
enum ib_qp_state cur_qp_state;
@@ -758,6 +765,7 @@ enum ib_wr_opcode {
IB_WR_FAST_REG_MR,
IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+ IB_WR_BIND_MW,
};
enum ib_send_flags {
@@ -780,6 +788,23 @@ struct ib_fast_reg_page_list {
unsigned int max_page_list_len;
};
+/**
+ * struct ib_mw_bind_info - Parameters for a memory window bind operation.
+ * @mr: A memory region to bind the memory window to.
+ * @addr: The address where the memory window should begin.
+ * @length: The length of the memory window, in bytes.
+ * @mw_access_flags: Access flags from enum ib_access_flags for the window.
+ *
+ * This struct contains the shared parameters for type 1 and type 2
+ * memory window bind operations.
+ */
+struct ib_mw_bind_info {
+ struct ib_mr *mr;
+ u64 addr;
+ u64 length;
+ int mw_access_flags;
+};
+
struct ib_send_wr {
struct ib_send_wr *next;
u64 wr_id;
@@ -823,6 +848,12 @@ struct ib_send_wr {
int access_flags;
u32 rkey;
} fast_reg;
+ struct {
+ struct ib_mw *mw;
+ /* The new rkey for the memory window. */
+ u32 rkey;
+ struct ib_mw_bind_info bind_info;
+ } bind_mw;
} wr;
u32 xrc_remote_srq_num; /* XRC TGT QPs only */
};
@@ -839,7 +870,8 @@ enum ib_access_flags {
IB_ACCESS_REMOTE_WRITE = (1<<1),
IB_ACCESS_REMOTE_READ = (1<<2),
IB_ACCESS_REMOTE_ATOMIC = (1<<3),
- IB_ACCESS_MW_BIND = (1<<4)
+ IB_ACCESS_MW_BIND = (1<<4),
+ IB_ZERO_BASED = (1<<5)
};
struct ib_phys_buf {
@@ -862,13 +894,16 @@ enum ib_mr_rereg_flags {
IB_MR_REREG_ACCESS = (1<<2)
};
+/**
+ * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
+ * @wr_id: Work request id.
+ * @send_flags: Flags from ib_send_flags enum.
+ * @bind_info: More parameters of the bind operation.
+ */
struct ib_mw_bind {
- struct ib_mr *mr;
- u64 wr_id;
- u64 addr;
- u32 length;
- int send_flags;
- int mw_access_flags;
+ u64 wr_id;
+ int send_flags;
+ struct ib_mw_bind_info bind_info;
};
struct ib_fmr_attr {
@@ -991,6 +1026,7 @@ struct ib_mw {
struct ib_pd *pd;
struct ib_uobject *uobject;
u32 rkey;
+ enum ib_mw_type type;
};
struct ib_fmr {
@@ -1202,7 +1238,8 @@ struct ib_device {
int num_phys_buf,
int mr_access_flags,
u64 *iova_start);
- struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
+ struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
+ enum ib_mw_type type);
int (*bind_mw)(struct ib_qp *qp,
struct ib_mw *mw,
struct ib_mw_bind *mw_bind);
@@ -2019,6 +2056,8 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
* ib_dereg_mr - Deregisters a memory region and removes it from the
* HCA translation table.
* @mr: The memory region to deregister.
+ *
+ * This function can fail, if the memory region has memory windows bound to it.
*/
int ib_dereg_mr(struct ib_mr *mr);
@@ -2071,10 +2110,22 @@ static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
}
/**
+ * ib_inc_rkey - increments the key portion of the given rkey. Can be used
+ * for calculating a new rkey for type 2 memory windows.
+ * @rkey - the rkey to increment.
+ */
+static inline u32 ib_inc_rkey(u32 rkey)
+{
+ const u32 mask = 0x000000ff;
+ return ((rkey + 1) & mask) | (rkey & ~mask);
+}
+
+/**
* ib_alloc_mw - Allocates a memory window.
* @pd: The protection domain associated with the memory window.
+ * @type: The type of the memory window (1 or 2).
*/
-struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
+struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
/**
* ib_bind_mw - Posts a work request to the send queue of the specified
@@ -2084,6 +2135,10 @@ struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
* @mw: The memory window to bind.
* @mw_bind: Specifies information about the memory window, including
* its address range, remote access rights, and associated memory region.
+ *
+ * If there is no immediate error, the function will update the rkey member
+ * of the mw parameter to its new value. The bind operation can still fail
+ * asynchronously.
*/
static inline int ib_bind_mw(struct ib_qp *qp,
struct ib_mw *mw,
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild
index f2b94918994d..562ff9d591b8 100644
--- a/include/scsi/Kbuild
+++ b/include/scsi/Kbuild
@@ -1,4 +1 @@
-header-y += scsi_netlink.h
-header-y += scsi_netlink_fc.h
-header-y += scsi_bsg_fc.h
header-y += fc/
diff --git a/include/scsi/fc/Kbuild b/include/scsi/fc/Kbuild
index 56603813c6cd..e69de29bb2d1 100644
--- a/include/scsi/fc/Kbuild
+++ b/include/scsi/fc/Kbuild
@@ -1,4 +0,0 @@
-header-y += fc_els.h
-header-y += fc_fs.h
-header-y += fc_gs.h
-header-y += fc_ns.h
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
index 604cb9bb3e76..7e2314870341 100644
--- a/include/scsi/fcoe_sysfs.h
+++ b/include/scsi/fcoe_sysfs.h
@@ -34,7 +34,8 @@ struct fcoe_sysfs_function_template {
void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
- void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+ void (*set_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+ int (*set_fcoe_ctlr_enabled)(struct fcoe_ctlr_device *);
void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
};
@@ -48,6 +49,12 @@ enum fip_conn_type {
FIP_CONN_TYPE_VN2VN,
};
+enum ctlr_enabled_state {
+ FCOE_CTLR_ENABLED,
+ FCOE_CTLR_DISABLED,
+ FCOE_CTLR_UNUSED,
+};
+
struct fcoe_ctlr_device {
u32 id;
@@ -64,6 +71,8 @@ struct fcoe_ctlr_device {
int fcf_dev_loss_tmo;
enum fip_conn_type mode;
+ enum ctlr_enabled_state enabled;
+
/* expected in host order for displaying */
struct fcoe_fc_els_lesb lesb;
};
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 8742d853a3b8..4427393115ea 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -260,6 +260,9 @@ void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb,
struct net_device *netdev);
void fcoe_wwn_to_str(u64 wwn, char *buf, int len);
int fcoe_validate_vport_create(struct fc_vport *vport);
+int fcoe_link_speed_update(struct fc_lport *);
+void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
/**
* is_fip_mode() - returns true if FIP mode selected.
@@ -289,8 +292,11 @@ static inline bool is_fip_mode(struct fcoe_ctlr *fip)
* @attached: whether this transport is already attached
* @list: list linkage to all attached transports
* @match: handler to allow the transport driver to match up a given netdev
+ * @alloc: handler to allocate per-instance FCoE structures
+ * (no discovery or login)
* @create: handler to sysfs entry of create for FCoE instances
- * @destroy: handler to sysfs entry of destroy for FCoE instances
+ * @destroy: handler to delete per-instance FCoE structures
+ * (frees all memory)
* @enable: handler to sysfs entry of enable for FCoE instances
* @disable: handler to sysfs entry of disable for FCoE instances
*/
@@ -299,6 +305,7 @@ struct fcoe_transport {
bool attached;
struct list_head list;
bool (*match) (struct net_device *device);
+ int (*alloc) (struct net_device *device);
int (*create) (struct net_device *device, enum fip_state fip_mode);
int (*destroy) (struct net_device *device);
int (*enable) (struct net_device *device);
@@ -347,7 +354,20 @@ struct fcoe_port {
struct timer_list timer;
struct work_struct destroy_work;
u8 data_src_addr[ETH_ALEN];
+ struct net_device * (*get_netdev)(const struct fc_lport *lport);
};
+
+/**
+ * fcoe_get_netdev() - Return the net device associated with a local port
+ * @lport: The local port to get the net device from
+ */
+static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport)
+{
+ struct fcoe_port *port = ((struct fcoe_port *)lport_priv(lport));
+
+ return (port->get_netdev) ? port->get_netdev(lport) : NULL;
+}
+
void fcoe_clean_pending_queue(struct fc_lport *);
void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
void fcoe_queue_timer(ulong lport);
@@ -356,7 +376,7 @@ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
/* FCoE Sysfs helpers */
void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *);
/**
* struct netdev_list
@@ -372,4 +392,12 @@ struct fcoe_netdev_mapping {
int fcoe_transport_attach(struct fcoe_transport *ft);
int fcoe_transport_detach(struct fcoe_transport *ft);
+/* sysfs store handler for ctrl_control interface */
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+ const char *buf, size_t count);
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+ const char *buf, size_t count);
+
#endif /* _LIBFCOE_H */
+
+
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index e65c62e82c5a..a7f9cba275e9 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -157,10 +157,11 @@ struct scsi_device {
unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */
unsigned is_visible:1; /* is the device visible in sysfs */
- unsigned can_power_off:1; /* Device supports runtime power off */
unsigned wce_default_on:1; /* Cache is ON by default */
unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
+ atomic_t disk_events_disable_depth; /* disable depth for disk events */
+
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
struct list_head event_list; /* asserted events */
struct work_struct event_work;
@@ -397,6 +398,8 @@ extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *, int timeout, int retries,
int *resid);
+extern void sdev_disable_disk_events(struct scsi_device *sdev);
+extern void sdev_enable_disk_events(struct scsi_device *sdev);
#ifdef CONFIG_PM_RUNTIME
extern int scsi_autopm_get_device(struct scsi_device *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 49084807eb6b..2b6956e9853d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -873,7 +873,7 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign
SHOST_DIF_TYPE2_PROTECTION,
SHOST_DIF_TYPE3_PROTECTION };
- if (target_type > SHOST_DIF_TYPE3_PROTECTION)
+ if (target_type >= ARRAY_SIZE(cap))
return 0;
return shost->prot_capabilities & cap[target_type] ? target_type : 0;
@@ -887,7 +887,7 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign
SHOST_DIX_TYPE2_PROTECTION,
SHOST_DIX_TYPE3_PROTECTION };
- if (target_type > SHOST_DIX_TYPE3_PROTECTION)
+ if (target_type >= ARRAY_SIZE(cap))
return 0;
return shost->prot_capabilities & cap[target_type];
diff --git a/include/sound/aess.h b/include/sound/aess.h
new file mode 100644
index 000000000000..cee0d09fadbd
--- /dev/null
+++ b/include/sound/aess.h
@@ -0,0 +1,53 @@
+/*
+ * AESS IP block reset
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#ifndef __SOUND_AESS_H__
+#define __SOUND_AESS_H__
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+/*
+ * AESS_AUTO_GATING_ENABLE_OFFSET: offset in bytes of the AESS IP
+ * block's AESS_AUTO_GATING_ENABLE__1 register from the IP block's
+ * base address
+ */
+#define AESS_AUTO_GATING_ENABLE_OFFSET 0x07c
+
+/* Register bitfields in the AESS_AUTO_GATING_ENABLE__1 register */
+#define AESS_AUTO_GATING_ENABLE_SHIFT 0
+
+/**
+ * aess_enable_autogating - enable AESS internal autogating
+ * @oh: struct omap_hwmod *
+ *
+ * Enable internal autogating on the AESS. This allows the AESS to
+ * indicate that it is idle to the OMAP PRCM. Returns 0.
+ */
+static inline void aess_enable_autogating(void __iomem *base)
+{
+ u32 v;
+
+ /* Set AESS_AUTO_GATING_ENABLE__1.ENABLE to allow idle entry */
+ v = 1 << AESS_AUTO_GATING_ENABLE_SHIFT;
+ writel(v, base + AESS_AUTO_GATING_ENABLE_OFFSET);
+}
+
+#endif /* __SOUND_AESS_H__ */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 507910992c59..b128c20770bc 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -35,6 +35,7 @@ struct se_subsystem_api {
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_cmd *);
+ bool (*get_write_cache)(struct se_device *);
};
struct sbc_ops {
@@ -52,11 +53,13 @@ void target_complete_cmd(struct se_cmd *, u8);
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
-sector_t spc_get_write_same_sectors(struct se_cmd *cmd);
+sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
+sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
u32 sbc_get_device_rev(struct se_device *dev);
u32 sbc_get_device_type(struct se_device *dev);
+sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 663e34a5383f..c4af592f7057 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -44,7 +44,7 @@
/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
#define TG_PT_GROUP_NAME_BUF 256
/* Used to parse VPD into struct t10_vpd */
-#define VPD_TMP_BUF_SIZE 128
+#define VPD_TMP_BUF_SIZE 254
/* Used by transport_generic_cmd_sequencer() */
#define READ_BLOCK_LEN 6
#define READ_CAP_LEN 8
@@ -75,6 +75,8 @@
#define DA_MAX_WRITE_SAME_LEN 0
/* Default max transfer length */
#define DA_FABRIC_MAX_SECTORS 8192
+/* Use a model alias based on the configfs backend device name */
+#define DA_EMULATE_MODEL_ALIAS 0
/* Emulation for Direct Page Out */
#define DA_EMULATE_DPO 0
/* Emulation for Forced Unit Access WRITEs */
@@ -193,6 +195,7 @@ enum tcm_sense_reason_table {
TCM_RESERVATION_CONFLICT = R(0x10),
TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
TCM_OUT_OF_RESOURCES = R(0x12),
+ TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
#undef R
};
@@ -211,7 +214,6 @@ enum tcm_tmreq_table {
TMR_LUN_RESET = 5,
TMR_TARGET_WARM_RESET = 6,
TMR_TARGET_COLD_RESET = 7,
- TMR_FABRIC_TMR = 255,
};
/* fabric independent task management response values */
@@ -592,6 +594,7 @@ struct se_dev_entry {
};
struct se_dev_attrib {
+ int emulate_model_alias;
int emulate_dpo;
int emulate_fua_write;
int emulate_fua_read;
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 05c5e61f0a7c..9961726523d0 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -6,10 +6,61 @@
#include <linux/blktrace_api.h>
#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
#define RWBS_LEN 8
+DECLARE_EVENT_CLASS(block_buffer,
+
+ TP_PROTO(struct buffer_head *bh),
+
+ TP_ARGS(bh),
+
+ TP_STRUCT__entry (
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( size_t, size )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bh->b_bdev->bd_dev;
+ __entry->sector = bh->b_blocknr;
+ __entry->size = bh->b_size;
+ ),
+
+ TP_printk("%d,%d sector=%llu size=%zu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->sector, __entry->size
+ )
+);
+
+/**
+ * block_touch_buffer - mark a buffer accessed
+ * @bh: buffer_head being touched
+ *
+ * Called from touch_buffer().
+ */
+DEFINE_EVENT(block_buffer, block_touch_buffer,
+
+ TP_PROTO(struct buffer_head *bh),
+
+ TP_ARGS(bh)
+);
+
+/**
+ * block_dirty_buffer - mark a buffer dirty
+ * @bh: buffer_head being dirtied
+ *
+ * Called from mark_buffer_dirty().
+ */
+DEFINE_EVENT(block_buffer, block_dirty_buffer,
+
+ TP_PROTO(struct buffer_head *bh),
+
+ TP_ARGS(bh)
+);
+
DECLARE_EVENT_CLASS(block_rq_with_error,
TP_PROTO(struct request_queue *q, struct request *rq),
@@ -206,7 +257,6 @@ TRACE_EVENT(block_bio_bounce,
/**
* block_bio_complete - completed all work on the block operation
- * @q: queue holding the block operation
* @bio: block operation completed
* @error: io error value
*
@@ -215,9 +265,9 @@ TRACE_EVENT(block_bio_bounce,
*/
TRACE_EVENT(block_bio_complete,
- TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+ TP_PROTO(struct bio *bio, int error),
- TP_ARGS(q, bio, error),
+ TP_ARGS(bio, error),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -228,7 +278,8 @@ TRACE_EVENT(block_bio_complete,
),
TP_fast_assign(
- __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->dev = bio->bi_bdev ?
+ bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
__entry->error = error;
@@ -241,11 +292,11 @@ TRACE_EVENT(block_bio_complete,
__entry->nr_sector, __entry->error)
);
-DECLARE_EVENT_CLASS(block_bio,
+DECLARE_EVENT_CLASS(block_bio_merge,
- TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
- TP_ARGS(q, bio),
+ TP_ARGS(q, rq, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -272,31 +323,33 @@ DECLARE_EVENT_CLASS(block_bio,
/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @q: queue holding operation
+ * @rq: request bio is being merged into
* @bio: new block operation to merge
*
* Merging block request @bio to the end of an existing block request
* in queue @q.
*/
-DEFINE_EVENT(block_bio, block_bio_backmerge,
+DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
- TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
- TP_ARGS(q, bio)
+ TP_ARGS(q, rq, bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
* @q: queue holding operation
+ * @rq: request bio is being merged into
* @bio: new block operation to merge
*
* Merging block IO operation @bio to the beginning of an existing block
* operation in queue @q.
*/
-DEFINE_EVENT(block_bio, block_bio_frontmerge,
+DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
- TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
- TP_ARGS(q, bio)
+ TP_ARGS(q, rq, bio)
);
/**
@@ -306,11 +359,32 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge,
*
* About to place the block IO operation @bio into queue @q.
*/
-DEFINE_EVENT(block_bio, block_bio_queue,
+TRACE_EVENT(block_bio_queue,
TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio)
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, RWBS_LEN )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
);
DECLARE_EVENT_CLASS(block_get_rq,
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 7e8c36bc7082..4ee471003859 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -1324,6 +1324,31 @@ TRACE_EVENT(ext4_fallocate_exit,
__entry->ret)
);
+TRACE_EVENT(ext4_punch_hole,
+ TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
+
+ TP_ARGS(inode, offset, len),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, offset )
+ __field( loff_t, len )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d ino %lu offset %lld len %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->offset, __entry->len)
+);
+
TRACE_EVENT(ext4_unlink_enter,
TP_PROTO(struct inode *parent, struct dentry *dentry),
@@ -2068,103 +2093,210 @@ TRACE_EVENT(ext4_ext_remove_space_done,
);
TRACE_EVENT(ext4_es_insert_extent,
- TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+ TP_PROTO(struct inode *inode, struct extent_status *es),
- TP_ARGS(inode, start, len),
+ TP_ARGS(inode, es),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( loff_t, start )
- __field( loff_t, len )
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_lblk_t, len )
+ __field( ext4_fsblk_t, pblk )
+ __field( unsigned long long, status )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->start = start;
- __entry->len = len;
+ __entry->lblk = es->es_lblk;
+ __entry->len = es->es_len;
+ __entry->pblk = ext4_es_pblock(es);
+ __entry->status = ext4_es_status(es);
),
- TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->start, __entry->len)
+ __entry->lblk, __entry->len,
+ __entry->pblk, __entry->status)
);
TRACE_EVENT(ext4_es_remove_extent,
- TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len),
- TP_ARGS(inode, start, len),
+ TP_ARGS(inode, lblk, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( loff_t, start )
+ __field( loff_t, lblk )
__field( loff_t, len )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->start = start;
+ __entry->lblk = lblk;
__entry->len = len;
),
TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->start, __entry->len)
+ __entry->lblk, __entry->len)
);
-TRACE_EVENT(ext4_es_find_extent_enter,
- TP_PROTO(struct inode *inode, ext4_lblk_t start),
+TRACE_EVENT(ext4_es_find_delayed_extent_enter,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
- TP_ARGS(inode, start),
+ TP_ARGS(inode, lblk),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( ext4_lblk_t, start )
+ __field( ext4_lblk_t, lblk )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->start = start;
+ __entry->lblk = lblk;
),
- TP_printk("dev %d,%d ino %lu start %u",
+ TP_printk("dev %d,%d ino %lu lblk %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->start)
+ (unsigned long) __entry->ino, __entry->lblk)
);
-TRACE_EVENT(ext4_es_find_extent_exit,
- TP_PROTO(struct inode *inode, struct extent_status *es,
- ext4_lblk_t ret),
+TRACE_EVENT(ext4_es_find_delayed_extent_exit,
+ TP_PROTO(struct inode *inode, struct extent_status *es),
- TP_ARGS(inode, es, ret),
+ TP_ARGS(inode, es),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( ext4_lblk_t, start )
+ __field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
- __field( ext4_lblk_t, ret )
+ __field( ext4_fsblk_t, pblk )
+ __field( unsigned long long, status )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->start = es->start;
- __entry->len = es->len;
- __entry->ret = ret;
+ __entry->lblk = es->es_lblk;
+ __entry->len = es->es_len;
+ __entry->pblk = ext4_es_pblock(es);
+ __entry->status = ext4_es_status(es);
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) ret %u",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->start, __entry->len, __entry->ret)
+ __entry->lblk, __entry->len,
+ __entry->pblk, __entry->status)
+);
+
+TRACE_EVENT(ext4_es_lookup_extent_enter,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
+
+ TP_ARGS(inode, lblk),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ext4_lblk_t, lblk )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblk = lblk;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->lblk)
+);
+
+TRACE_EVENT(ext4_es_lookup_extent_exit,
+ TP_PROTO(struct inode *inode, struct extent_status *es,
+ int found),
+
+ TP_ARGS(inode, es, found),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_lblk_t, len )
+ __field( ext4_fsblk_t, pblk )
+ __field( unsigned long long, status )
+ __field( int, found )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblk = es->es_lblk;
+ __entry->len = es->es_len;
+ __entry->pblk = ext4_es_pblock(es);
+ __entry->status = ext4_es_status(es);
+ __entry->found = found;
+ ),
+
+ TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->found,
+ __entry->lblk, __entry->len,
+ __entry->found ? __entry->pblk : 0,
+ __entry->found ? __entry->status : 0)
+);
+
+TRACE_EVENT(ext4_es_shrink_enter,
+ TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
+
+ TP_ARGS(sb, nr_to_scan, cache_cnt),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( int, nr_to_scan )
+ __field( int, cache_cnt )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->cache_cnt = cache_cnt;
+ ),
+
+ TP_printk("dev %d,%d nr_to_scan %d cache_cnt %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nr_to_scan, __entry->cache_cnt)
+);
+
+TRACE_EVENT(ext4_es_shrink_exit,
+ TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt),
+
+ TP_ARGS(sb, shrunk_nr, cache_cnt),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( int, shrunk_nr )
+ __field( int, cache_cnt )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->shrunk_nr = shrunk_nr;
+ __entry->cache_cnt = cache_cnt;
+ ),
+
+ TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->shrunk_nr, __entry->cache_cnt)
);
#endif /* _TRACE_EXT4_H */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 127993dbf322..070df49e4a1d 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -132,6 +132,104 @@ TRACE_EVENT(jbd2_submit_inode_data,
(unsigned long) __entry->ino)
);
+TRACE_EVENT(jbd2_handle_start,
+ TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ unsigned int line_no, int requested_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, requested_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned long, tid )
+ __field( unsigned int, type )
+ __field( unsigned int, line_no )
+ __field( int, requested_blocks)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->tid = tid;
+ __entry->type = type;
+ __entry->line_no = line_no;
+ __entry->requested_blocks = requested_blocks;
+ ),
+
+ TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ "requested_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ __entry->type, __entry->line_no, __entry->requested_blocks)
+);
+
+TRACE_EVENT(jbd2_handle_extend,
+ TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ unsigned int line_no, int buffer_credits,
+ int requested_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned long, tid )
+ __field( unsigned int, type )
+ __field( unsigned int, line_no )
+ __field( int, buffer_credits )
+ __field( int, requested_blocks)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->tid = tid;
+ __entry->type = type;
+ __entry->line_no = line_no;
+ __entry->buffer_credits = buffer_credits;
+ __entry->requested_blocks = requested_blocks;
+ ),
+
+ TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ "buffer_credits %d requested_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ __entry->type, __entry->line_no, __entry->buffer_credits,
+ __entry->requested_blocks)
+);
+
+TRACE_EVENT(jbd2_handle_stats,
+ TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ unsigned int line_no, int interval, int sync,
+ int requested_blocks, int dirtied_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, interval, sync,
+ requested_blocks, dirtied_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned long, tid )
+ __field( unsigned int, type )
+ __field( unsigned int, line_no )
+ __field( int, interval )
+ __field( int, sync )
+ __field( int, requested_blocks)
+ __field( int, dirtied_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->tid = tid;
+ __entry->type = type;
+ __entry->line_no = line_no;
+ __entry->interval = interval;
+ __entry->sync = sync;
+ __entry->requested_blocks = requested_blocks;
+ __entry->dirtied_blocks = dirtied_blocks;
+ ),
+
+ TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
+ "sync %d requested_blocks %d dirtied_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ __entry->type, __entry->line_no, __entry->interval,
+ __entry->sync, __entry->requested_blocks,
+ __entry->dirtied_blocks)
+);
+
TRACE_EVENT(jbd2_run_stats,
TP_PROTO(dev_t dev, unsigned long tid,
struct transaction_run_stats_s *stats),
@@ -142,6 +240,7 @@ TRACE_EVENT(jbd2_run_stats,
__field( dev_t, dev )
__field( unsigned long, tid )
__field( unsigned long, wait )
+ __field( unsigned long, request_delay )
__field( unsigned long, running )
__field( unsigned long, locked )
__field( unsigned long, flushing )
@@ -155,6 +254,7 @@ TRACE_EVENT(jbd2_run_stats,
__entry->dev = dev;
__entry->tid = tid;
__entry->wait = stats->rs_wait;
+ __entry->request_delay = stats->rs_request_delay;
__entry->running = stats->rs_running;
__entry->locked = stats->rs_locked;
__entry->flushing = stats->rs_flushing;
@@ -164,10 +264,12 @@ TRACE_EVENT(jbd2_run_stats,
__entry->blocks_logged = stats->rs_blocks_logged;
),
- TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
- "logging %u handle_count %u blocks %u blocks_logged %u",
+ TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
+ "locked %u flushing %u logging %u handle_count %u "
+ "blocks %u blocks_logged %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->wait),
+ jiffies_to_msecs(__entry->request_delay),
jiffies_to_msecs(__entry->running),
jiffies_to_msecs(__entry->locked),
jiffies_to_msecs(__entry->flushing),
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 7ef9e759f499..19911dddaeb7 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -14,7 +14,7 @@
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
- ERSN(S390_UCONTROL)
+ ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index b453d92c2253..6a16fd2e70ed 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -32,6 +32,115 @@
struct wb_writeback_work;
+TRACE_EVENT(writeback_dirty_page,
+
+ TP_PROTO(struct page *page, struct address_space *mapping),
+
+ TP_ARGS(page, mapping),
+
+ TP_STRUCT__entry (
+ __array(char, name, 32)
+ __field(unsigned long, ino)
+ __field(pgoff_t, index)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name,
+ mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
+ __entry->ino = mapping ? mapping->host->i_ino : 0;
+ __entry->index = page->index;
+ ),
+
+ TP_printk("bdi %s: ino=%lu index=%lu",
+ __entry->name,
+ __entry->ino,
+ __entry->index
+ )
+);
+
+DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
+
+ TP_PROTO(struct inode *inode, int flags),
+
+ TP_ARGS(inode, flags),
+
+ TP_STRUCT__entry (
+ __array(char, name, 32)
+ __field(unsigned long, ino)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+
+ /* may be called for files on pseudo FSes w/ unregistered bdi */
+ strncpy(__entry->name,
+ bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ __entry->ino = inode->i_ino;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("bdi %s: ino=%lu flags=%s",
+ __entry->name,
+ __entry->ino,
+ show_inode_state(__entry->flags)
+ )
+);
+
+DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
+
+ TP_PROTO(struct inode *inode, int flags),
+
+ TP_ARGS(inode, flags)
+);
+
+DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
+
+ TP_PROTO(struct inode *inode, int flags),
+
+ TP_ARGS(inode, flags)
+);
+
+DECLARE_EVENT_CLASS(writeback_write_inode_template,
+
+ TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+ TP_ARGS(inode, wbc),
+
+ TP_STRUCT__entry (
+ __array(char, name, 32)
+ __field(unsigned long, ino)
+ __field(int, sync_mode)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name,
+ dev_name(inode->i_mapping->backing_dev_info->dev), 32);
+ __entry->ino = inode->i_ino;
+ __entry->sync_mode = wbc->sync_mode;
+ ),
+
+ TP_printk("bdi %s: ino=%lu sync_mode=%d",
+ __entry->name,
+ __entry->ino,
+ __entry->sync_mode
+ )
+);
+
+DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
+
+ TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+ TP_ARGS(inode, wbc)
+);
+
+DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
+
+ TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+ TP_ARGS(inode, wbc)
+);
+
DECLARE_EVENT_CLASS(writeback_work_class,
TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
TP_ARGS(bdi, work),
@@ -479,6 +588,13 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
)
);
+DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
+ TP_PROTO(struct inode *inode,
+ struct writeback_control *wbc,
+ unsigned long nr_to_write),
+ TP_ARGS(inode, wbc, nr_to_write)
+);
+
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index c4d2e9c74002..07d59419fe6b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -308,6 +308,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
#define I915_PARAM_HAS_SECURE_BATCHES 23
#define I915_PARAM_HAS_PINNED_BATCHES 24
+#define I915_PARAM_HAS_EXEC_NO_RELOC 25
+#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
typedef struct drm_i915_getparam {
int param;
@@ -628,7 +630,11 @@ struct drm_i915_gem_exec_object2 {
__u64 offset;
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT (1<<1)
+#define EXEC_OBJECT_WRITE (1<<2)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
__u64 flags;
+
__u64 rsvd1;
__u64 rsvd2;
};
@@ -687,6 +693,20 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_IS_PINNED (1<<10)
+/** Provide a hint to the kernel that the command stream and auxilliary
+ * state buffers already holds the correct presumed addresses and so the
+ * relocation process may be skipped if no buffers need to be moved in
+ * preparation for the execbuffer.
+ */
+#define I915_EXEC_NO_RELOC (1<<11)
+
+/** Use the reloc.handle as an index into the exec object array rather
+ * than as the per-file handle.
+ */
+#define I915_EXEC_HANDLE_LUT (1<<12)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
+
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
diff --git a/drivers/staging/omapdrm/omap_drm.h b/include/uapi/drm/omap_drm.h
index f0ac34a8973e..1d0b1172664e 100644
--- a/drivers/staging/omapdrm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -1,5 +1,5 @@
/*
- * include/drm/omap_drm.h
+ * include/uapi/drm/omap_drm.h
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4e67194fd2c3..5c8a1d25e21c 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -68,6 +68,7 @@ header-y += blkpg.h
header-y += blktrace_api.h
header-y += bpqether.h
header-y += bsg.h
+header-y += btrfs.h
header-y += can.h
header-y += capability.h
header-y += capi.h
diff --git a/fs/btrfs/ioctl.h b/include/uapi/linux/btrfs.h
index dabca9cc8c2e..fa3a5f9338fc 100644
--- a/fs/btrfs/ioctl.h
+++ b/include/uapi/linux/btrfs.h
@@ -16,8 +16,9 @@
* Boston, MA 021110-1307, USA.
*/
-#ifndef __IOCTL_
-#define __IOCTL_
+#ifndef _UAPI_LINUX_BTRFS_H
+#define _UAPI_LINUX_BTRFS_H
+#include <linux/types.h>
#include <linux/ioctl.h>
#define BTRFS_IOCTL_MAGIC 0x94
@@ -406,6 +407,13 @@ struct btrfs_ioctl_received_subvol_args {
__u64 reserved[16]; /* in */
};
+/*
+ * Caller doesn't want file data in the send stream, even if the
+ * search of clone sources doesn't find an extent. UPDATE_EXTENT
+ * commands will be sent instead of WRITE commands.
+ */
+#define BTRFS_SEND_FLAG_NO_FILE_DATA 0x1
+
struct btrfs_ioctl_send_args {
__s64 send_fd; /* in */
__u64 clone_sources_count; /* in */
@@ -494,9 +502,13 @@ struct btrfs_ioctl_send_args {
struct btrfs_ioctl_qgroup_create_args)
#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
struct btrfs_ioctl_qgroup_limit_args)
+#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
+ char[BTRFS_LABEL_SIZE])
+#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
+ char[BTRFS_LABEL_SIZE])
#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
struct btrfs_ioctl_get_dev_stats)
#define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
struct btrfs_ioctl_dev_replace_args)
-#endif
+#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/cdrom.h b/include/uapi/linux/cdrom.h
index 898b866b300c..bd17ad5aa06d 100644
--- a/include/uapi/linux/cdrom.h
+++ b/include/uapi/linux/cdrom.h
@@ -908,5 +908,39 @@ struct mode_page_header {
__be16 desc_length;
};
+/* removable medium feature descriptor */
+struct rm_feature_desc {
+ __be16 feature_code;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 reserved1:2;
+ __u8 feature_version:4;
+ __u8 persistent:1;
+ __u8 curr:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 curr:1;
+ __u8 persistent:1;
+ __u8 feature_version:4;
+ __u8 reserved1:2;
+#endif
+ __u8 add_len;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 mech_type:3;
+ __u8 load:1;
+ __u8 eject:1;
+ __u8 pvnt_jmpr:1;
+ __u8 dbml:1;
+ __u8 lock:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 lock:1;
+ __u8 dbml:1;
+ __u8 pvnt_jmpr:1;
+ __u8 eject:1;
+ __u8 load:1;
+ __u8 mech_type:3;
+#endif
+ __u8 reserved2;
+ __u8 reserved3;
+ __u8 reserved4;
+};
#endif /* _UAPI_LINUX_CDROM_H */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 539b179b349c..7e75b6fd8d45 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 23
-#define DM_VERSION_PATCHLEVEL 1
-#define DM_VERSION_EXTRA "-ioctl (2012-12-18)"
+#define DM_VERSION_MINOR 24
+#define DM_VERSION_PATCHLEVEL 0
+#define DM_VERSION_EXTRA "-ioctl (2013-01-15)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -336,4 +336,9 @@ enum {
*/
#define DM_SECURE_DATA_FLAG (1 << 15) /* In */
+/*
+ * If set, a message generated output data.
+ */
+#define DM_DATA_OUT_FLAG (1 << 16) /* Out */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index c12d452cb40d..c56d77c496a5 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -365,7 +365,17 @@ struct dvb_frontend_event {
#define DTV_INTERLEAVING 60
#define DTV_LNA 61
-#define DTV_MAX_COMMAND DTV_LNA
+/* Quality parameters */
+#define DTV_STAT_SIGNAL_STRENGTH 62
+#define DTV_STAT_CNR 63
+#define DTV_STAT_PRE_ERROR_BIT_COUNT 64
+#define DTV_STAT_PRE_TOTAL_BIT_COUNT 65
+#define DTV_STAT_POST_ERROR_BIT_COUNT 66
+#define DTV_STAT_POST_TOTAL_BIT_COUNT 67
+#define DTV_STAT_ERROR_BLOCK_COUNT 68
+#define DTV_STAT_TOTAL_BLOCK_COUNT 69
+
+#define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT
typedef enum fe_pilot {
PILOT_ON,
@@ -452,11 +462,78 @@ struct dtv_cmds_h {
__u32 reserved:30; /* Align */
};
+/**
+ * Scale types for the quality parameters.
+ * @FE_SCALE_NOT_AVAILABLE: That QoS measure is not available. That
+ * could indicate a temporary or a permanent
+ * condition.
+ * @FE_SCALE_DECIBEL: The scale is measured in 0.0001 dB steps, typically
+ * used on signal measures.
+ * @FE_SCALE_RELATIVE: The scale is a relative percentual measure,
+ * ranging from 0 (0%) to 0xffff (100%).
+ * @FE_SCALE_COUNTER: The scale counts the occurrence of an event, like
+ * bit error, block error, lapsed time.
+ */
+enum fecap_scale_params {
+ FE_SCALE_NOT_AVAILABLE = 0,
+ FE_SCALE_DECIBEL,
+ FE_SCALE_RELATIVE,
+ FE_SCALE_COUNTER
+};
+
+/**
+ * struct dtv_stats - Used for reading a DTV status property
+ *
+ * @value: value of the measure. Should range from 0 to 0xffff;
+ * @scale: Filled with enum fecap_scale_params - the scale
+ * in usage for that parameter
+ *
+ * For most delivery systems, this will return a single value for each
+ * parameter.
+ * It should be noticed, however, that new OFDM delivery systems like
+ * ISDB can use different modulation types for each group of carriers.
+ * On such standards, up to 8 groups of statistics can be provided, one
+ * for each carrier group (called "layer" on ISDB).
+ * In order to be consistent with other delivery systems, the first
+ * value refers to the entire set of carriers ("global").
+ * dtv_status:scale should use the value FE_SCALE_NOT_AVAILABLE when
+ * the value for the entire group of carriers or from one specific layer
+ * is not provided by the hardware.
+ * st.len should be filled with the latest filled status + 1.
+ *
+ * In other words, for ISDB, those values should be filled like:
+ * u.st.stat.svalue[0] = global statistics;
+ * u.st.stat.scale[0] = FE_SCALE_DECIBELS;
+ * u.st.stat.value[1] = layer A statistics;
+ * u.st.stat.scale[1] = FE_SCALE_NOT_AVAILABLE (if not available);
+ * u.st.stat.svalue[2] = layer B statistics;
+ * u.st.stat.scale[2] = FE_SCALE_DECIBELS;
+ * u.st.stat.svalue[3] = layer C statistics;
+ * u.st.stat.scale[3] = FE_SCALE_DECIBELS;
+ * u.st.len = 4;
+ */
+struct dtv_stats {
+ __u8 scale; /* enum fecap_scale_params type */
+ union {
+ __u64 uvalue; /* for counters and relative scales */
+ __s64 svalue; /* for 0.0001 dB measures */
+ };
+} __attribute__ ((packed));
+
+
+#define MAX_DTV_STATS 4
+
+struct dtv_fe_stats {
+ __u8 len;
+ struct dtv_stats stat[MAX_DTV_STATS];
+} __attribute__ ((packed));
+
struct dtv_property {
__u32 cmd;
__u32 reserved[3];
union {
__u32 data;
+ struct dtv_fe_stats st;
struct {
__u8 data[32];
__u32 len;
diff --git a/include/uapi/linux/dvb/version.h b/include/uapi/linux/dvb/version.h
index 827cce7e33e3..e53e2ad4444f 100644
--- a/include/uapi/linux/dvb/version.h
+++ b/include/uapi/linux/dvb/version.h
@@ -24,6 +24,6 @@
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
-#define DVB_API_VERSION_MINOR 9
+#define DVB_API_VERSION_MINOR 10
#endif /*_DVBVERSION_H_*/
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 33fbc99b3812..7b26a62e5707 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -59,15 +59,7 @@
* if it becomes full and it is queried once a second to see if
* anything is in it. Incoming commands to the driver will get
* delivered as commands.
- *
- * This driver provides two main interfaces: one for in-kernel
- * applications and another for userland applications. The
- * capabilities are basically the same for both interface, although
- * the interfaces are somewhat different. The stuff in the
- * #ifdef __KERNEL__ below is the in-kernel interface. The userland
- * interface is defined later in the file. */
-
-
+ */
/*
* This is an overlay for all the address types, so it's easy to
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index c70577cf67bc..3c56ba3d80c1 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -169,6 +169,8 @@ struct kvm_pit_config {
#define KVM_EXIT_PAPR_HCALL 19
#define KVM_EXIT_S390_UCONTROL 20
#define KVM_EXIT_WATCHDOG 21
+#define KVM_EXIT_S390_TSCH 22
+#define KVM_EXIT_EPR 23
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -286,6 +288,19 @@ struct kvm_run {
__u64 ret;
__u64 args[9];
} papr_hcall;
+ /* KVM_EXIT_S390_TSCH */
+ struct {
+ __u16 subchannel_id;
+ __u16 subchannel_nr;
+ __u32 io_int_parm;
+ __u32 io_int_word;
+ __u32 ipb;
+ __u8 dequeued;
+ } s390_tsch;
+ /* KVM_EXIT_EPR */
+ struct {
+ __u32 epr;
+ } epr;
/* Fix the size of the union. */
char padding[256];
};
@@ -398,10 +413,20 @@ struct kvm_s390_psw {
#define KVM_S390_PROGRAM_INT 0xfffe0001u
#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
#define KVM_S390_RESTART 0xfffe0003u
+#define KVM_S390_MCHK 0xfffe1000u
#define KVM_S390_INT_VIRTIO 0xffff2603u
#define KVM_S390_INT_SERVICE 0xffff2401u
#define KVM_S390_INT_EMERGENCY 0xffff1201u
#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
+/* Anything below 0xfffe0000u is taken by INT_IO */
+#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
+ (((schid)) | \
+ ((ssid) << 16) | \
+ ((cssid) << 18) | \
+ ((ai) << 26))
+#define KVM_S390_INT_IO_MIN 0x00000000u
+#define KVM_S390_INT_IO_MAX 0xfffdffffu
+
struct kvm_s390_interrupt {
__u32 type;
@@ -636,6 +661,8 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_IRQFD_RESAMPLE 82
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
#define KVM_CAP_PPC_HTAB_FD 84
+#define KVM_CAP_S390_CSS_SUPPORT 85
+#define KVM_CAP_PPC_EPR 86
#define KVM_CAP_ARM_PSCI 87
#define KVM_CAP_ARM_SET_DEVICE_ADDR 88
diff --git a/include/uapi/linux/meye.h b/include/uapi/linux/meye.h
index 0dd49954f746..8ff50fe9e481 100644
--- a/include/uapi/linux/meye.h
+++ b/include/uapi/linux/meye.h
@@ -57,10 +57,8 @@ struct meye_params {
#define MEYEIOC_STILLJCAPT _IOR ('v', BASE_VIDIOC_PRIVATE+5, int)
/* V4L2 private controls */
-#define V4L2_CID_AGC V4L2_CID_PRIVATE_BASE
-#define V4L2_CID_MEYE_SHARPNESS (V4L2_CID_PRIVATE_BASE + 1)
-#define V4L2_CID_PICTURE (V4L2_CID_PRIVATE_BASE + 2)
-#define V4L2_CID_JPEGQUAL (V4L2_CID_PRIVATE_BASE + 3)
-#define V4L2_CID_FRAMERATE (V4L2_CID_PRIVATE_BASE + 4)
+#define V4L2_CID_MEYE_AGC (V4L2_CID_USER_MEYE_BASE + 0)
+#define V4L2_CID_MEYE_PICTURE (V4L2_CID_USER_MEYE_BASE + 1)
+#define V4L2_CID_MEYE_FRAMERATE (V4L2_CID_USER_MEYE_BASE + 2)
#endif
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index 996719f82e28..f055e58b3147 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -87,6 +87,8 @@
#define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \
&& le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2)
+#define FAT_STATE_DIRTY 0x01
+
struct __fat_dirent {
long d_ino;
__kernel_off_t d_off;
@@ -120,14 +122,34 @@ struct fat_boot_sector {
__le32 hidden; /* hidden sectors (unused) */
__le32 total_sect; /* number of sectors (if sectors == 0) */
- /* The following fields are only used by FAT32 */
- __le32 fat32_length; /* sectors/FAT */
- __le16 flags; /* bit 8: fat mirroring, low 4: active fat */
- __u8 version[2]; /* major, minor filesystem version */
- __le32 root_cluster; /* first cluster in root directory */
- __le16 info_sector; /* filesystem info sector */
- __le16 backup_boot; /* backup boot sector */
- __le16 reserved2[6]; /* Unused */
+ union {
+ struct {
+ /* Extended BPB Fields for FAT16 */
+ __u8 drive_number; /* Physical drive number */
+ __u8 state; /* undocumented, but used
+ for mount state. */
+ /* other fiealds are not added here */
+ } fat16;
+
+ struct {
+ /* only used by FAT32 */
+ __le32 length; /* sectors/FAT */
+ __le16 flags; /* bit 8: fat mirroring,
+ low 4: active fat */
+ __u8 version[2]; /* major, minor filesystem
+ version */
+ __le32 root_cluster; /* first cluster in
+ root directory */
+ __le16 info_sector; /* filesystem info sector */
+ __le16 backup_boot; /* backup boot sector */
+ __le16 reserved2[6]; /* Unused */
+ /* Extended BPB Fields for FAT32 */
+ __u8 drive_number; /* Physical drive number */
+ __u8 state; /* undocumented, but used
+ for mount state. */
+ /* other fiealds are not added here */
+ } fat32;
+ };
};
struct fat_boot_fsinfo {
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index dfb514472cbc..4f52549b23ff 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -33,13 +33,14 @@ enum {
NBD_CMD_READ = 0,
NBD_CMD_WRITE = 1,
NBD_CMD_DISC = 2,
- /* there is a gap here to match userspace */
+ NBD_CMD_FLUSH = 3,
NBD_CMD_TRIM = 4
};
/* values for flags field */
#define NBD_FLAG_HAS_FLAGS (1 << 0) /* nbd-server supports flags */
#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
+#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
/* there is a gap here to match userspace */
#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index f56c945cecd4..dcd63745e83a 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -88,10 +88,6 @@
#define V4L2_CID_HFLIP (V4L2_CID_BASE+20)
#define V4L2_CID_VFLIP (V4L2_CID_BASE+21)
-/* Deprecated; use V4L2_CID_PAN_RESET and V4L2_CID_TILT_RESET */
-#define V4L2_CID_HCENTER (V4L2_CID_BASE+22)
-#define V4L2_CID_VCENTER (V4L2_CID_BASE+23)
-
#define V4L2_CID_POWER_LINE_FREQUENCY (V4L2_CID_BASE+24)
enum v4l2_power_line_frequency {
V4L2_CID_POWER_LINE_FREQUENCY_DISABLED = 0,
@@ -144,6 +140,11 @@ enum v4l2_colorfx {
/* last CID + 1 */
#define V4L2_CID_LASTP1 (V4L2_CID_BASE+43)
+/* USER-class private control IDs */
+
+/* The base for the meye driver controls. See linux/meye.h for the list
+ * of controls. We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_MEYE_BASE (V4L2_CID_USER_BASE + 0x1000)
/* MPEG-class control IDs */
@@ -782,6 +783,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17)
#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18)
+
/* Image source controls */
#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900)
#define V4L2_CID_IMAGE_SOURCE_CLASS (V4L2_CTRL_CLASS_IMAGE_SOURCE | 1)
@@ -800,4 +802,27 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
#define V4L2_CID_TEST_PATTERN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
+
+/* DV-class control IDs defined by V4L2 */
+#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900)
+#define V4L2_CID_DV_CLASS (V4L2_CTRL_CLASS_DV | 1)
+
+#define V4L2_CID_DV_TX_HOTPLUG (V4L2_CID_DV_CLASS_BASE + 1)
+#define V4L2_CID_DV_TX_RXSENSE (V4L2_CID_DV_CLASS_BASE + 2)
+#define V4L2_CID_DV_TX_EDID_PRESENT (V4L2_CID_DV_CLASS_BASE + 3)
+#define V4L2_CID_DV_TX_MODE (V4L2_CID_DV_CLASS_BASE + 4)
+enum v4l2_dv_tx_mode {
+ V4L2_DV_TX_MODE_DVI_D = 0,
+ V4L2_DV_TX_MODE_HDMI = 1,
+};
+#define V4L2_CID_DV_TX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 5)
+enum v4l2_dv_rgb_range {
+ V4L2_DV_RGB_RANGE_AUTO = 0,
+ V4L2_DV_RGB_RANGE_LIMITED = 1,
+ V4L2_DV_RGB_RANGE_FULL = 2,
+};
+
+#define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100)
+#define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101)
+
#endif
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 7d64e0e1a18b..b9b7bea04537 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -47,8 +47,9 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007,
V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008,
- /* YUV (including grey) - next is 0x2014 */
+ /* YUV (including grey) - next is 0x2017 */
V4L2_MBUS_FMT_Y8_1X8 = 0x2001,
+ V4L2_MBUS_FMT_UV8_1X8 = 0x2015,
V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002,
V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003,
V4L2_MBUS_FMT_YUYV8_1_5X8 = 0x2004,
@@ -65,14 +66,20 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010,
V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011,
V4L2_MBUS_FMT_YVYU8_1X16 = 0x2012,
+ V4L2_MBUS_FMT_YDYUYDYV8_1X16 = 0x2014,
V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d,
V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e,
+ V4L2_MBUS_FMT_YUV10_1X30 = 0x2016,
- /* Bayer - next is 0x3015 */
+ /* Bayer - next is 0x3019 */
V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001,
V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013,
V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002,
V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014,
+ V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8 = 0x3015,
+ V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8 = 0x3016,
+ V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8 = 0x3017,
+ V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8 = 0x3018,
V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b,
V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c,
V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009,
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 4758d1bfcf41..4f41f309911e 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -303,6 +303,15 @@ enum {
VFIO_PCI_BAR5_REGION_INDEX,
VFIO_PCI_ROM_REGION_INDEX,
VFIO_PCI_CONFIG_REGION_INDEX,
+ /*
+ * Expose VGA regions defined for PCI base class 03, subclass 00.
+ * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
+ * as well as the MMIO range 0xa0000 to 0xbffff. Each implemented
+ * range is found at it's identity mapped offset from the region
+ * offset, for example 0x3b0 is region_info.offset + 0x3b0. Areas
+ * between described ranges are unimplemented.
+ */
+ VFIO_PCI_VGA_REGION_INDEX,
VFIO_PCI_NUM_REGIONS
};
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 3cf3e946e331..234d1d870914 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -334,6 +334,9 @@ struct v4l2_pix_format {
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
+/* Chrominance formats */
+#define V4L2_PIX_FMT_UV8 v4l2_fourcc('U', 'V', '8', ' ') /* 8 UV 4:4 */
+
/* Luminance+Chrominance formats */
#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */
#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
@@ -386,6 +389,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
+ /* 10bit raw bayer a-law compressed to 8 bits */
+#define V4L2_PIX_FMT_SBGGR10ALAW8 v4l2_fourcc('a', 'B', 'A', '8')
+#define V4L2_PIX_FMT_SGBRG10ALAW8 v4l2_fourcc('a', 'G', 'A', '8')
+#define V4L2_PIX_FMT_SGRBG10ALAW8 v4l2_fourcc('a', 'g', 'A', '8')
+#define V4L2_PIX_FMT_SRGGB10ALAW8 v4l2_fourcc('a', 'R', 'A', '8')
/* 10bit raw bayer DPCM compressed to 8 bits */
#define V4L2_PIX_FMT_SBGGR10DPCM8 v4l2_fourcc('b', 'B', 'A', '8')
#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8')
@@ -693,6 +701,10 @@ struct v4l2_buffer {
/* Cache handling flags */
#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x0800
#define V4L2_BUF_FLAG_NO_CACHE_CLEAN 0x1000
+/* Timestamp type */
+#define V4L2_BUF_FLAG_TIMESTAMP_MASK 0xe000
+#define V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN 0x0000
+#define V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 0x2000
/**
* struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
@@ -1342,28 +1354,6 @@ struct v4l2_querymenu {
#define V4L2_CID_PRIVATE_BASE 0x08000000
-/* DV-class control IDs defined by V4L2 */
-#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900)
-#define V4L2_CID_DV_CLASS (V4L2_CTRL_CLASS_DV | 1)
-
-#define V4L2_CID_DV_TX_HOTPLUG (V4L2_CID_DV_CLASS_BASE + 1)
-#define V4L2_CID_DV_TX_RXSENSE (V4L2_CID_DV_CLASS_BASE + 2)
-#define V4L2_CID_DV_TX_EDID_PRESENT (V4L2_CID_DV_CLASS_BASE + 3)
-#define V4L2_CID_DV_TX_MODE (V4L2_CID_DV_CLASS_BASE + 4)
-enum v4l2_dv_tx_mode {
- V4L2_DV_TX_MODE_DVI_D = 0,
- V4L2_DV_TX_MODE_HDMI = 1,
-};
-#define V4L2_CID_DV_TX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 5)
-enum v4l2_dv_rgb_range {
- V4L2_DV_RGB_RANGE_AUTO = 0,
- V4L2_DV_RGB_RANGE_LIMITED = 1,
- V4L2_DV_RGB_RANGE_FULL = 2,
-};
-
-#define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100)
-#define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101)
-
/*
* T U N I N G
*/
@@ -1810,6 +1800,7 @@ struct v4l2_event_vsync {
/* Payload for V4L2_EVENT_CTRL */
#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0)
#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1)
+#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2)
struct v4l2_event_ctrl {
__u32 changes;
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index 26607bd965fa..e4629b93bdd6 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -15,19 +15,22 @@
/* Namespaces */
#define XATTR_OS2_PREFIX "os2."
-#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
+#define XATTR_OS2_PREFIX_LEN (sizeof(XATTR_OS2_PREFIX) - 1)
+
+#define XATTR_MAC_OSX_PREFIX "osx."
+#define XATTR_MAC_OSX_PREFIX_LEN (sizeof(XATTR_MAC_OSX_PREFIX) - 1)
#define XATTR_SECURITY_PREFIX "security."
-#define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1)
+#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
#define XATTR_SYSTEM_PREFIX "system."
-#define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1)
+#define XATTR_SYSTEM_PREFIX_LEN (sizeof(XATTR_SYSTEM_PREFIX) - 1)
#define XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1)
+#define XATTR_TRUSTED_PREFIX_LEN (sizeof(XATTR_TRUSTED_PREFIX) - 1)
#define XATTR_USER_PREFIX "user."
-#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
+#define XATTR_USER_PREFIX_LEN (sizeof(XATTR_USER_PREFIX) - 1)
/* Security namespace */
#define XATTR_EVM_SUFFIX "evm"
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 81aba3a73aa3..805711ea2005 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -261,6 +261,22 @@ struct ib_uverbs_dereg_mr {
__u32 mr_handle;
};
+struct ib_uverbs_alloc_mw {
+ __u64 response;
+ __u32 pd_handle;
+ __u8 mw_type;
+ __u8 reserved[3];
+};
+
+struct ib_uverbs_alloc_mw_resp {
+ __u32 mw_handle;
+ __u32 rkey;
+};
+
+struct ib_uverbs_dealloc_mw {
+ __u32 mw_handle;
+};
+
struct ib_uverbs_create_comp_channel {
__u64 response;
};
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index 29a87dd26cfb..75746d52f208 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -1,2 +1,5 @@
# UAPI Header export list
header-y += fc/
+header-y += scsi_bsg_fc.h
+header-y += scsi_netlink.h
+header-y += scsi_netlink_fc.h
diff --git a/include/uapi/scsi/fc/Kbuild b/include/uapi/scsi/fc/Kbuild
index aafaa5aa54d4..5ead9fac265c 100644
--- a/include/uapi/scsi/fc/Kbuild
+++ b/include/uapi/scsi/fc/Kbuild
@@ -1 +1,5 @@
# UAPI Header export list
+header-y += fc_els.h
+header-y += fc_fs.h
+header-y += fc_gs.h
+header-y += fc_ns.h
diff --git a/include/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 481abbd48e39..481abbd48e39 100644
--- a/include/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
diff --git a/include/scsi/fc/fc_fs.h b/include/uapi/scsi/fc/fc_fs.h
index 50f28b143451..50f28b143451 100644
--- a/include/scsi/fc/fc_fs.h
+++ b/include/uapi/scsi/fc/fc_fs.h
diff --git a/include/scsi/fc/fc_gs.h b/include/uapi/scsi/fc/fc_gs.h
index a37346d47eb1..a37346d47eb1 100644
--- a/include/scsi/fc/fc_gs.h
+++ b/include/uapi/scsi/fc/fc_gs.h
diff --git a/include/scsi/fc/fc_ns.h b/include/uapi/scsi/fc/fc_ns.h
index f7751d53f1d3..f7751d53f1d3 100644
--- a/include/scsi/fc/fc_ns.h
+++ b/include/uapi/scsi/fc/fc_ns.h
diff --git a/include/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 3031b900b087..3031b900b087 100644
--- a/include/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
diff --git a/include/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h
index 62b4edab15d3..62b4edab15d3 100644
--- a/include/scsi/scsi_netlink.h
+++ b/include/uapi/scsi/scsi_netlink.h
diff --git a/include/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index cbf76e479761..cbf76e479761 100644
--- a/include/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
new file mode 100644
index 000000000000..71e9a383a981
--- /dev/null
+++ b/include/video/display_timing.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * description of display timings
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_DISPLAY_TIMING_H
+#define __LINUX_DISPLAY_TIMING_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/* VESA display monitor timing parameters */
+#define VESA_DMT_HSYNC_LOW BIT(0)
+#define VESA_DMT_HSYNC_HIGH BIT(1)
+#define VESA_DMT_VSYNC_LOW BIT(2)
+#define VESA_DMT_VSYNC_HIGH BIT(3)
+
+/* display specific flags */
+#define DISPLAY_FLAGS_DE_LOW BIT(0) /* data enable flag */
+#define DISPLAY_FLAGS_DE_HIGH BIT(1)
+#define DISPLAY_FLAGS_PIXDATA_POSEDGE BIT(2) /* drive data on pos. edge */
+#define DISPLAY_FLAGS_PIXDATA_NEGEDGE BIT(3) /* drive data on neg. edge */
+#define DISPLAY_FLAGS_INTERLACED BIT(4)
+#define DISPLAY_FLAGS_DOUBLESCAN BIT(5)
+
+/*
+ * A single signal can be specified via a range of minimal and maximal values
+ * with a typical value, that lies somewhere inbetween.
+ */
+struct timing_entry {
+ u32 min;
+ u32 typ;
+ u32 max;
+};
+
+enum timing_entry_index {
+ TE_MIN = 0,
+ TE_TYP = 1,
+ TE_MAX = 2,
+};
+
+/*
+ * Single "mode" entry. This describes one set of signal timings a display can
+ * have in one setting. This struct can later be converted to struct videomode
+ * (see include/video/videomode.h). As each timing_entry can be defined as a
+ * range, one struct display_timing may become multiple struct videomodes.
+ *
+ * Example: hsync active high, vsync active low
+ *
+ * Active Video
+ * Video ______________________XXXXXXXXXXXXXXXXXXXXXX_____________________
+ * |<- sync ->|<- back ->|<----- active ----->|<- front ->|<- sync..
+ * | | porch | | porch |
+ *
+ * HSync _|¯¯¯¯¯¯¯¯¯¯|___________________________________________|¯¯¯¯¯¯¯¯¯
+ *
+ * VSync ¯|__________|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|_________
+ */
+struct display_timing {
+ struct timing_entry pixelclock;
+
+ struct timing_entry hactive; /* hor. active video */
+ struct timing_entry hfront_porch; /* hor. front porch */
+ struct timing_entry hback_porch; /* hor. back porch */
+ struct timing_entry hsync_len; /* hor. sync len */
+
+ struct timing_entry vactive; /* ver. active video */
+ struct timing_entry vfront_porch; /* ver. front porch */
+ struct timing_entry vback_porch; /* ver. back porch */
+ struct timing_entry vsync_len; /* ver. sync len */
+
+ unsigned int dmt_flags; /* VESA DMT flags */
+ unsigned int data_flags; /* video data flags */
+};
+
+/*
+ * This describes all timing settings a display provides.
+ * The native_mode is the default setting for this display.
+ * Drivers that can handle multiple videomodes should work with this struct and
+ * convert each entry to the desired end result.
+ */
+struct display_timings {
+ unsigned int num_timings;
+ unsigned int native_mode;
+
+ struct display_timing **timings;
+};
+
+/* get value specified by index from struct timing_entry */
+static inline u32 display_timing_get_value(const struct timing_entry *te,
+ enum timing_entry_index index)
+{
+ switch (index) {
+ case TE_MIN:
+ return te->min;
+ break;
+ case TE_TYP:
+ return te->typ;
+ break;
+ case TE_MAX:
+ return te->max;
+ break;
+ default:
+ return te->typ;
+ }
+}
+
+/* get one entry from struct display_timings */
+static inline struct display_timing *display_timings_get(const struct
+ display_timings *disp,
+ unsigned int index)
+{
+ if (disp->num_timings > index)
+ return disp->timings[index];
+ else
+ return NULL;
+}
+
+void display_timings_release(struct display_timings *disp);
+
+#endif
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
new file mode 100644
index 000000000000..8016eb727cf3
--- /dev/null
+++ b/include/video/of_display_timing.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * display timings of helpers
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_OF_DISPLAY_TIMING_H
+#define __LINUX_OF_DISPLAY_TIMING_H
+
+struct device_node;
+struct display_timings;
+
+#define OF_USE_NATIVE_MODE -1
+
+struct display_timings *of_get_display_timings(struct device_node *np);
+int of_display_timings_exist(struct device_node *np);
+
+#endif
diff --git a/include/video/of_videomode.h b/include/video/of_videomode.h
new file mode 100644
index 000000000000..a07efcc51424
--- /dev/null
+++ b/include/video/of_videomode.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * videomode of-helpers
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_OF_VIDEOMODE_H
+#define __LINUX_OF_VIDEOMODE_H
+
+struct device_node;
+struct videomode;
+
+int of_get_videomode(struct device_node *np, struct videomode *vm,
+ int index);
+
+#endif /* __LINUX_OF_VIDEOMODE_H */
diff --git a/include/video/videomode.h b/include/video/videomode.h
new file mode 100644
index 000000000000..a42156234dd4
--- /dev/null
+++ b/include/video/videomode.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * generic videomode description
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_VIDEOMODE_H
+#define __LINUX_VIDEOMODE_H
+
+#include <linux/types.h>
+#include <video/display_timing.h>
+
+/*
+ * Subsystem independent description of a videomode.
+ * Can be generated from struct display_timing.
+ */
+struct videomode {
+ unsigned long pixelclock; /* pixelclock in Hz */
+
+ u32 hactive;
+ u32 hfront_porch;
+ u32 hback_porch;
+ u32 hsync_len;
+
+ u32 vactive;
+ u32 vfront_porch;
+ u32 vback_porch;
+ u32 vsync_len;
+
+ unsigned int dmt_flags; /* VESA DMT flags */
+ unsigned int data_flags; /* video data flags */
+};
+
+/**
+ * videomode_from_timing - convert display timing to videomode
+ * @disp: structure with all possible timing entries
+ * @vm: return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * DESCRIPTION:
+ * This function converts a struct display_timing to a struct videomode.
+ */
+int videomode_from_timing(const struct display_timings *disp,
+ struct videomode *vm, unsigned int index);
+
+#endif
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 48a9c0171b65..68d73d09b770 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -40,6 +40,41 @@
#include <xen/xen.h>
#include <linux/acpi.h>
+#define ACPI_MEMORY_DEVICE_CLASS "memory"
+#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
+#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
+
+int xen_stub_memory_device_init(void);
+void xen_stub_memory_device_exit(void);
+
+#define ACPI_PROCESSOR_CLASS "processor"
+#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
+#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
+
+int xen_stub_processor_init(void);
+void xen_stub_processor_exit(void);
+
+void xen_pcpu_hotplug_sync(void);
+int xen_pcpu_id(uint32_t acpi_id);
+
+static inline int xen_acpi_get_pxm(acpi_handle h)
+{
+ unsigned long long pxm;
+ acpi_status status;
+ acpi_handle handle;
+ acpi_handle phandle = h;
+
+ do {
+ handle = phandle;
+ status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
+ if (ACPI_SUCCESS(status))
+ return pxm;
+ status = acpi_get_parent(handle, &phandle);
+ } while (ACPI_SUCCESS(status));
+
+ return -ENXIO;
+}
+
int xen_acpi_notify_hypervisor_state(u8 sleep_state,
u32 pm1a_cnt, u32 pm1b_cnd);
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index b40a4315cb8b..2ecfe4f700d9 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -190,6 +190,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
#define XENMEM_add_to_physmap_range 23
struct xen_add_to_physmap_range {
+ /* IN */
/* Which domain to change the mapping for. */
domid_t domid;
uint16_t space; /* => enum phys_map_space */
@@ -203,6 +204,11 @@ struct xen_add_to_physmap_range {
/* GPFN in domid where the source mapping page should appear. */
GUEST_HANDLE(xen_pfn_t) gpfns;
+
+ /* OUT */
+
+ /* Per index error code. */
+ GUEST_HANDLE(int) errs;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 5e36932ab407..c57d5f67f702 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -324,10 +324,21 @@ struct xenpf_cpu_ol {
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
-/*
- * CMD 58 and 59 are reserved for cpu hotadd and memory hotadd,
- * which are already occupied at Xen hypervisor side.
- */
+#define XENPF_cpu_hotadd 58
+struct xenpf_cpu_hotadd {
+ uint32_t apic_id;
+ uint32_t acpi_id;
+ uint32_t pxm;
+};
+
+#define XENPF_mem_hotadd 59
+struct xenpf_mem_hotadd {
+ uint64_t spfn;
+ uint64_t epfn;
+ uint32_t pxm;
+ uint32_t flags;
+};
+
#define XENPF_core_parking 60
struct xenpf_core_parking {
/* IN variables */
@@ -357,6 +368,8 @@ struct xen_platform_op {
struct xenpf_set_processor_pminfo set_pminfo;
struct xenpf_pcpuinfo pcpu_info;
struct xenpf_cpu_ol cpu_ol;
+ struct xenpf_cpu_hotadd cpu_add;
+ struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
uint8_t pad[128];
} u;
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 886a5d80a18f..53ec4167bd0b 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -285,7 +285,7 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
* Event channel endpoints per domain:
* 1024 if a long is 32 bits; 4096 if a long is 64 bits.
*/
-#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
+#define NR_EVENT_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
struct vcpu_time_info {
/*
@@ -341,7 +341,7 @@ struct vcpu_info {
*/
uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask;
- unsigned long evtchn_pending_sel;
+ xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */
@@ -384,8 +384,8 @@ struct shared_info {
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C long' in the PENDING bitfield array.
*/
- unsigned long evtchn_pending[sizeof(unsigned long) * 8];
- unsigned long evtchn_mask[sizeof(unsigned long) * 8];
+ xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
+ xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
diff --git a/init/Kconfig b/init/Kconfig
index 28c5b9dcc91e..22616cd434bc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1032,6 +1032,13 @@ config USER_NS
help
This allows containers, i.e. vservers, to use user namespaces
to provide different user info for different servers.
+
+ When user namespaces are enabled in the kernel it is
+ recommended that the MEMCG and MEMCG_KMEM options also be
+ enabled and that user-space use the memory control groups to
+ limit the amount of memory a memory unprivileged users can
+ use.
+
If unsure, say N.
config PID_NS
@@ -1060,20 +1067,7 @@ config UIDGID_CONVERTED
bool
default y
- # Networking
- depends on NET_9P = n
-
# Filesystems
- depends on 9P_FS = n
- depends on AFS_FS = n
- depends on CEPH_FS = n
- depends on CIFS = n
- depends on CODA_FS = n
- depends on GFS2_FS = n
- depends on NCP_FS = n
- depends on NFSD = n
- depends on NFS_FS = n
- depends on OCFS2_FS = n
depends on XFS_FS = n
config UIDGID_STRICT_TYPE_CHECKS
@@ -1236,6 +1230,14 @@ config SYSCTL_ARCH_UNALIGN_NO_WARN
Allows arch to define/use @no_unaligned_warning to possibly warn
about unaligned access emulation going on under the hood.
+config SYSCTL_ARCH_UNALIGN_ALLOW
+ bool
+ help
+ Enable support for /proc/sys/kernel/unaligned-trap
+ Allows arches to define/use @unaligned_enabled to runtime toggle
+ the unaligned access emulation.
+ see arch/parisc/kernel/unaligned.c for reference
+
config KALLSYMS
bool "Load all symbols for debugging/ksymoops" if EXPERT
default y
@@ -1670,6 +1672,17 @@ config MODULE_SIG_FORCE
Reject unsigned modules or signed modules for which we don't have a
key. Without this, such modules will simply taint the kernel.
+config MODULE_SIG_ALL
+ bool "Automatically sign all modules"
+ default y
+ depends on MODULE_SIG
+ help
+ Sign all modules during make modules_install. Without this option,
+ modules must be signed manually, using the scripts/sign-file tool.
+
+comment "Do not forget to sign required modules with scripts/sign-file"
+ depends on MODULE_SIG_FORCE && !MODULE_SIG_ALL
+
choice
prompt "Which hash algorithm should modules be signed with?"
depends on MODULE_SIG
@@ -1702,6 +1715,15 @@ config MODULE_SIG_SHA512
endchoice
+config MODULE_SIG_HASH
+ string
+ depends on MODULE_SIG
+ default "sha1" if MODULE_SIG_SHA1
+ default "sha224" if MODULE_SIG_SHA224
+ default "sha256" if MODULE_SIG_SHA256
+ default "sha384" if MODULE_SIG_SHA384
+ default "sha512" if MODULE_SIG_SHA512
+
endif # MODULES
config INIT_ALL_POSSIBLE
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 71a3ca18c873..e5c4f609f22c 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -477,7 +477,7 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
size_t count, loff_t *off)
{
- struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
+ struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
char buffer[FILENT_SIZE];
ssize_t ret;
@@ -498,13 +498,13 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
if (ret <= 0)
return ret;
- filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
+ file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME;
return ret;
}
static int mqueue_flush_file(struct file *filp, fl_owner_t id)
{
- struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
+ struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
spin_lock(&info->lock);
if (task_tgid(current) == info->notify_owner)
@@ -516,7 +516,7 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id)
static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
{
- struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
+ struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
int retval = 0;
poll_wait(filp, &info->wait_q, poll_tab);
@@ -973,7 +973,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
goto out;
}
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
if (unlikely(f.file->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
@@ -1089,7 +1089,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
goto out;
}
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
if (unlikely(f.file->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
@@ -1249,7 +1249,7 @@ retry:
goto out;
}
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
if (unlikely(f.file->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
@@ -1323,7 +1323,7 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
goto out;
}
- inode = f.file->f_path.dentry->d_inode;
+ inode = file_inode(f.file);
if (unlikely(f.file->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
@@ -1383,6 +1383,7 @@ static struct file_system_type mqueue_fs_type = {
.name = "mqueue",
.mount = mqueue_mount,
.kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
int mq_init_ns(struct ipc_namespace *ns)
diff --git a/ipc/shm.c b/ipc/shm.c
index be3ec9ae454e..cb858df061d3 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -193,7 +193,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
if (!is_file_hugepages(shp->shm_file))
shmem_lock(shp->shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
- user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
+ user_shm_unlock(file_inode(shp->shm_file)->i_size,
shp->mlock_user);
fput (shp->shm_file);
security_shm_free(shp);
@@ -529,7 +529,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
* shmid gets reported as "inode#" in /proc/pid/maps.
* proc-ps tools use this. Changing this will break them.
*/
- file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
+ file_inode(file)->i_ino = shp->shm_perm.id;
ns->shm_tot += numpages;
error = shp->shm_perm.id;
@@ -678,7 +678,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
{
struct inode *inode;
- inode = shp->shm_file->f_path.dentry->d_inode;
+ inode = file_inode(shp->shm_file);
if (is_file_hugepages(shp->shm_file)) {
struct address_space *mapping = inode->i_mapping;
@@ -1042,7 +1042,8 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
is_file_hugepages(shp->shm_file) ?
&shm_file_operations_huge :
&shm_file_operations);
- if (!file)
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
goto out_free;
file->private_data = sfd;
@@ -1175,7 +1176,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
- size = vma->vm_file->f_path.dentry->d_inode->i_size;
+ size = file_inode(vma->vm_file)->i_size;
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
/*
* We discovered the size of the shm segment, so
diff --git a/ipc/util.c b/ipc/util.c
index 74e1d9c7a98a..464a8abd779f 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -252,7 +252,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
{
kuid_t euid;
kgid_t egid;
- int id, err;
+ int id;
int next_id = ids->next_id;
if (size > IPCMNI)
@@ -261,17 +261,21 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
if (ids->in_use >= size)
return -ENOSPC;
+ idr_preload(GFP_KERNEL);
+
spin_lock_init(&new->lock);
new->deleted = 0;
rcu_read_lock();
spin_lock(&new->lock);
- err = idr_get_new_above(&ids->ipcs_idr, new,
- (next_id < 0) ? 0 : ipcid_to_idx(next_id), &id);
- if (err) {
+ id = idr_alloc(&ids->ipcs_idr, new,
+ (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
+ GFP_NOWAIT);
+ idr_preload_end();
+ if (id < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
- return err;
+ return id;
}
ids->in_use++;
@@ -307,19 +311,10 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
int err;
-retry:
- err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
-
- if (!err)
- return -ENOMEM;
down_write(&ids->rw_mutex);
err = ops->getnew(ns, params);
up_write(&ids->rw_mutex);
-
- if (err == -EAGAIN)
- goto retry;
-
return err;
}
@@ -376,8 +371,6 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
struct kern_ipc_perm *ipcp;
int flg = params->flg;
int err;
-retry:
- err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
/*
* Take the lock as a writer since we are potentially going to add
@@ -389,8 +382,6 @@ retry:
/* key not used */
if (!(flg & IPC_CREAT))
err = -ENOENT;
- else if (!err)
- err = -ENOMEM;
else
err = ops->getnew(ns, params);
} else {
@@ -413,9 +404,6 @@ retry:
}
up_write(&ids->rw_mutex);
- if (err == -EAGAIN)
- goto retry;
-
return err;
}
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c072b6da239..bbde5f1a4486 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,7 +7,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
rcupdate.o extable.o params.o posix-timers.o \
- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
+ kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o cred.o \
async.o range.o groups.o lglock.o smpboot.o
@@ -25,9 +25,7 @@ endif
obj-y += sched/
obj-y += power/
-ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
-obj-$(CONFIG_X86) += kcmp.o
-endif
+obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -127,11 +125,19 @@ $(obj)/config_data.h: $(obj)/config_data.gz FORCE
$(obj)/time.o: $(obj)/timeconst.h
-quiet_cmd_timeconst = TIMEC $@
- cmd_timeconst = $(PERL) $< $(CONFIG_HZ) > $@
+quiet_cmd_hzfile = HZFILE $@
+ cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
+
+targets += hz.bc
+$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
+ $(call if_changed,hzfile)
+
+quiet_cmd_bc = BC $@
+ cmd_bc = bc -q $(filter-out FORCE,$^) > $@
+
targets += timeconst.h
-$(obj)/timeconst.h: $(src)/timeconst.pl FORCE
- $(call if_changed,timeconst)
+$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
+ $(call if_changed,bc)
ifeq ($(CONFIG_MODULE_SIG),y)
#
@@ -153,23 +159,7 @@ kernel/modsign_certificate.o: signing_key.x509 extra_certificates
# fail and that the kernel may be used afterwards.
#
###############################################################################
-sign_key_with_hash :=
-ifeq ($(CONFIG_MODULE_SIG_SHA1),y)
-sign_key_with_hash := -sha1
-endif
-ifeq ($(CONFIG_MODULE_SIG_SHA224),y)
-sign_key_with_hash := -sha224
-endif
-ifeq ($(CONFIG_MODULE_SIG_SHA256),y)
-sign_key_with_hash := -sha256
-endif
-ifeq ($(CONFIG_MODULE_SIG_SHA384),y)
-sign_key_with_hash := -sha384
-endif
-ifeq ($(CONFIG_MODULE_SIG_SHA512),y)
-sign_key_with_hash := -sha512
-endif
-ifeq ($(sign_key_with_hash),)
+ifndef CONFIG_MODULE_SIG_HASH
$(error Could not determine digest type to use from kernel config)
endif
@@ -182,8 +172,8 @@ signing_key.priv signing_key.x509: x509.genkey
@echo "### needs to be run as root, and uses a hardware random"
@echo "### number generator if one is available."
@echo "###"
- openssl req -new -nodes -utf8 $(sign_key_with_hash) -days 36500 -batch \
- -x509 -config x509.genkey \
+ openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \
+ -batch -x509 -config x509.genkey \
-outform DER -out signing_key.x509 \
-keyout signing_key.priv
@echo "###"
diff --git a/kernel/acct.c b/kernel/acct.c
index e8b1627ab9c7..b9bd7f098ee5 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -205,7 +205,7 @@ static int acct_on(struct filename *pathname)
if (IS_ERR(file))
return PTR_ERR(file);
- if (!S_ISREG(file->f_path.dentry->d_inode->i_mode)) {
+ if (!S_ISREG(file_inode(file)->i_mode)) {
filp_close(file, NULL);
return -EACCES;
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b5c64327e712..a32f9432666c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
{
int i;
struct cgroupfs_root *root = cgrp->root;
- struct hlist_node *node;
struct css_set *cg;
unsigned long key;
@@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
}
key = css_set_hash(template);
- hash_for_each_possible(css_set_table, cg, node, hlist, key) {
+ hash_for_each_possible(css_set_table, cg, hlist, key) {
if (!compare_css_sets(cg, oldcg, cgrp, template))
continue;
@@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
struct cgroupfs_root *existing_root;
const struct cred *cred;
int i;
- struct hlist_node *node;
struct css_set *cg;
BUG_ON(sb->s_root != NULL);
@@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
/* Link the top cgroup in this hierarchy into all
* the css_set objects */
write_lock(&css_set_lock);
- hash_for_each(css_set_table, i, node, cg, hlist)
+ hash_for_each(css_set_table, i, cg, hlist)
link_css_set(&tmp_cg_links, cg, root_cgrp);
write_unlock(&css_set_lock);
@@ -2645,7 +2643,7 @@ static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, un
*/
static inline struct cftype *__file_cft(struct file *file)
{
- if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
+ if (file_inode(file)->i_fop != &cgroup_file_operations)
return ERR_PTR(-EINVAL);
return __d_cft(file->f_dentry);
}
@@ -3902,7 +3900,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
/* the process need read permission on control file */
/* AV: shouldn't we check that it's been opened for read instead? */
- ret = inode_permission(cfile->f_path.dentry->d_inode, MAY_READ);
+ ret = inode_permission(file_inode(cfile), MAY_READ);
if (ret < 0)
goto fail;
@@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
int i, ret;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
struct css_set *cg;
unsigned long key;
@@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
* this is all done under the css_set_lock.
*/
write_lock(&css_set_lock);
- hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) {
+ hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
/* skip entries that we already rehashed */
if (cg->subsys[ss->subsys_id])
continue;
@@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
cg->subsys[ss->subsys_id] = css;
/* recompute hash and restore entry */
key = css_set_hash(cg->subsys);
- hash_add(css_set_table, node, key);
+ hash_add(css_set_table, &cg->hlist, key);
}
write_unlock(&css_set_lock);
@@ -4618,10 +4616,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
offline_css(ss, dummytop);
ss->active = 0;
- if (ss->use_id) {
- idr_remove_all(&ss->idr);
+ if (ss->use_id)
idr_destroy(&ss->idr);
- }
/* deassign the subsys_id */
subsys[ss->subsys_id] = NULL;
@@ -5322,7 +5318,7 @@ EXPORT_SYMBOL_GPL(free_css_id);
static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
{
struct css_id *newid;
- int myid, error, size;
+ int ret, size;
BUG_ON(!ss->use_id);
@@ -5330,35 +5326,24 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
newid = kzalloc(size, GFP_KERNEL);
if (!newid)
return ERR_PTR(-ENOMEM);
- /* get id */
- if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
- error = -ENOMEM;
- goto err_out;
- }
+
+ idr_preload(GFP_KERNEL);
spin_lock(&ss->id_lock);
/* Don't use 0. allocates an ID of 1-65535 */
- error = idr_get_new_above(&ss->idr, newid, 1, &myid);
+ ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
spin_unlock(&ss->id_lock);
+ idr_preload_end();
/* Returns error when there are no free spaces for new ID.*/
- if (error) {
- error = -ENOSPC;
+ if (ret < 0)
goto err_out;
- }
- if (myid > CSS_ID_MAX)
- goto remove_idr;
- newid->id = myid;
+ newid->id = ret;
newid->depth = depth;
return newid;
-remove_idr:
- error = -ENOSPC;
- spin_lock(&ss->id_lock);
- idr_remove(&ss->idr, myid);
- spin_unlock(&ss->id_lock);
err_out:
kfree(newid);
- return ERR_PTR(error);
+ return ERR_PTR(ret);
}
@@ -5489,7 +5474,7 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
struct inode *inode;
struct cgroup_subsys_state *css;
- inode = f->f_dentry->d_inode;
+ inode = file_inode(f);
/* check in cgroup filesystem dir */
if (inode->i_op != &cgroup_dir_inode_operations)
return ERR_PTR(-EBADF);
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
index 3494c28a7e7a..2235967e78b0 100644
--- a/kernel/debug/debug_core.h
+++ b/kernel/debug/debug_core.h
@@ -72,6 +72,8 @@ extern int dbg_kdb_mode;
#ifdef CONFIG_KGDB_KDB
extern int kdb_stub(struct kgdb_state *ks);
extern int kdb_parse(const char *cmdstr);
+extern int kdb_common_init_state(struct kgdb_state *ks);
+extern int kdb_common_deinit_state(void);
#else /* ! CONFIG_KGDB_KDB */
static inline int kdb_stub(struct kgdb_state *ks)
{
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
index 38573f35a5ad..19d9a578c753 100644
--- a/kernel/debug/gdbstub.c
+++ b/kernel/debug/gdbstub.c
@@ -783,7 +783,10 @@ static void gdb_cmd_query(struct kgdb_state *ks)
len = len / 2;
remcom_out_buffer[len++] = 0;
+ kdb_common_init_state(ks);
kdb_parse(remcom_out_buffer);
+ kdb_common_deinit_state();
+
strcpy(remcom_out_buffer, "OK");
}
break;
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 8418c2f8ec5d..70a504601dc3 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -486,11 +486,9 @@ static int kdb_bc(int argc, const char **argv)
/*
* kdb_ss
*
- * Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
- * commands.
+ * Process the 'ss' (Single Step) command.
*
* ss
- * ssb
*
* Parameters:
* argc Argument count
@@ -498,35 +496,23 @@ static int kdb_bc(int argc, const char **argv)
* Outputs:
* None.
* Returns:
- * KDB_CMD_SS[B] for success, a kdb error if failure.
+ * KDB_CMD_SS for success, a kdb error if failure.
* Locking:
* None.
* Remarks:
*
* Set the arch specific option to trigger a debug trap after the next
* instruction.
- *
- * For 'ssb', set the trace flag in the debug trap handler
- * after printing the current insn and return directly without
- * invoking the kdb command processor, until a branch instruction
- * is encountered.
*/
static int kdb_ss(int argc, const char **argv)
{
- int ssb = 0;
-
- ssb = (strcmp(argv[0], "ssb") == 0);
if (argc != 0)
return KDB_ARGCOUNT;
/*
* Set trace flag and go.
*/
KDB_STATE_SET(DOING_SS);
- if (ssb) {
- KDB_STATE_SET(DOING_SSB);
- return KDB_CMD_SSB;
- }
return KDB_CMD_SS;
}
@@ -561,8 +547,6 @@ void __init kdb_initbptab(void)
kdb_register_repeat("ss", kdb_ss, "",
"Single Step", 1, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("ssb", kdb_ss, "",
- "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index be7b33b73d30..328d18ef31e4 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -34,6 +34,22 @@ EXPORT_SYMBOL_GPL(kdb_poll_idx);
static struct kgdb_state *kdb_ks;
+int kdb_common_init_state(struct kgdb_state *ks)
+{
+ kdb_initial_cpu = atomic_read(&kgdb_active);
+ kdb_current_task = kgdb_info[ks->cpu].task;
+ kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+ return 0;
+}
+
+int kdb_common_deinit_state(void)
+{
+ kdb_initial_cpu = -1;
+ kdb_current_task = NULL;
+ kdb_current_regs = NULL;
+ return 0;
+}
+
int kdb_stub(struct kgdb_state *ks)
{
int error = 0;
@@ -94,13 +110,10 @@ int kdb_stub(struct kgdb_state *ks)
}
/* Set initial kdb state variables */
KDB_STATE_CLEAR(KGDB_TRANS);
- kdb_initial_cpu = atomic_read(&kgdb_active);
- kdb_current_task = kgdb_info[ks->cpu].task;
- kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+ kdb_common_init_state(ks);
/* Remove any breakpoints as needed by kdb and clear single step */
kdb_bp_remove();
KDB_STATE_CLEAR(DOING_SS);
- KDB_STATE_CLEAR(DOING_SSB);
KDB_STATE_SET(PAGER);
/* zero out any offline cpu data */
for_each_present_cpu(i) {
@@ -125,9 +138,7 @@ int kdb_stub(struct kgdb_state *ks)
* Upon exit from the kdb main loop setup break points and restart
* the system based on the requested continue state
*/
- kdb_initial_cpu = -1;
- kdb_current_task = NULL;
- kdb_current_regs = NULL;
+ kdb_common_deinit_state();
KDB_STATE_CLEAR(PAGER);
kdbnearsym_cleanup();
if (error == KDB_CMD_KGDB) {
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 8875254120b6..00eb8f7fbf41 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -124,7 +124,7 @@ static kdbmsg_t kdbmsgs[] = {
};
#undef KDBMSG
-static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
+static const int __nkdb_err = ARRAY_SIZE(kdbmsgs);
/*
@@ -175,7 +175,7 @@ static char *__env[] = {
(char *)0,
};
-static const int __nenv = (sizeof(__env) / sizeof(char *));
+static const int __nenv = ARRAY_SIZE(__env);
struct task_struct *kdb_curr_task(int cpu)
{
@@ -681,34 +681,50 @@ static int kdb_defcmd(int argc, const char **argv)
}
if (argc != 3)
return KDB_ARGCOUNT;
- defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
- GFP_KDB);
- if (!defcmd_set) {
- kdb_printf("Could not allocate new defcmd_set entry for %s\n",
- argv[1]);
- defcmd_set = save_defcmd_set;
+ if (in_dbg_master()) {
+ kdb_printf("Command only available during kdb_init()\n");
return KDB_NOTIMP;
}
+ defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
+ GFP_KDB);
+ if (!defcmd_set)
+ goto fail_defcmd;
memcpy(defcmd_set, save_defcmd_set,
defcmd_set_count * sizeof(*defcmd_set));
- kfree(save_defcmd_set);
s = defcmd_set + defcmd_set_count;
memset(s, 0, sizeof(*s));
s->usable = 1;
s->name = kdb_strdup(argv[1], GFP_KDB);
+ if (!s->name)
+ goto fail_name;
s->usage = kdb_strdup(argv[2], GFP_KDB);
+ if (!s->usage)
+ goto fail_usage;
s->help = kdb_strdup(argv[3], GFP_KDB);
+ if (!s->help)
+ goto fail_help;
if (s->usage[0] == '"') {
- strcpy(s->usage, s->usage+1);
+ strcpy(s->usage, argv[2]+1);
s->usage[strlen(s->usage)-1] = '\0';
}
if (s->help[0] == '"') {
- strcpy(s->help, s->help+1);
+ strcpy(s->help, argv[3]+1);
s->help[strlen(s->help)-1] = '\0';
}
++defcmd_set_count;
defcmd_in_progress = 1;
+ kfree(save_defcmd_set);
return 0;
+fail_help:
+ kfree(s->usage);
+fail_usage:
+ kfree(s->name);
+fail_name:
+ kfree(defcmd_set);
+fail_defcmd:
+ kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]);
+ defcmd_set = save_defcmd_set;
+ return KDB_NOTIMP;
}
/*
@@ -1112,7 +1128,6 @@ void kdb_set_current_task(struct task_struct *p)
* KDB_CMD_GO User typed 'go'.
* KDB_CMD_CPU User switched to another cpu.
* KDB_CMD_SS Single step.
- * KDB_CMD_SSB Single step until branch.
*/
static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_dbtrap_t db_result)
@@ -1151,14 +1166,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
break;
- case KDB_DB_SSB:
- /*
- * In the midst of ssb command. Just return.
- */
- KDB_DEBUG_STATE("kdb_local 3", reason);
- return KDB_CMD_SSB; /* Continue with SSB command */
-
- break;
case KDB_DB_SS:
break;
case KDB_DB_SSBPT:
@@ -1281,7 +1288,6 @@ do_full_getstr:
if (diag == KDB_CMD_GO
|| diag == KDB_CMD_CPU
|| diag == KDB_CMD_SS
- || diag == KDB_CMD_SSB
|| diag == KDB_CMD_KGDB)
break;
@@ -1368,12 +1374,6 @@ int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
break;
}
- if (result == KDB_CMD_SSB) {
- KDB_STATE_SET(DOING_SS);
- KDB_STATE_SET(DOING_SSB);
- break;
- }
-
if (result == KDB_CMD_KGDB) {
if (!KDB_STATE(DOING_KGDB))
kdb_printf("Entering please attach debugger "
@@ -2350,69 +2350,6 @@ static int kdb_pid(int argc, const char **argv)
return 0;
}
-/*
- * kdb_ll - This function implements the 'll' command which follows a
- * linked list and executes an arbitrary command for each
- * element.
- */
-static int kdb_ll(int argc, const char **argv)
-{
- int diag = 0;
- unsigned long addr;
- long offset = 0;
- unsigned long va;
- unsigned long linkoffset;
- int nextarg;
- const char *command;
-
- if (argc != 3)
- return KDB_ARGCOUNT;
-
- nextarg = 1;
- diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
- if (diag)
- return diag;
-
- diag = kdbgetularg(argv[2], &linkoffset);
- if (diag)
- return diag;
-
- /*
- * Using the starting address as
- * the first element in the list, and assuming that
- * the list ends with a null pointer.
- */
-
- va = addr;
- command = kdb_strdup(argv[3], GFP_KDB);
- if (!command) {
- kdb_printf("%s: cannot duplicate command\n", __func__);
- return 0;
- }
- /* Recursive use of kdb_parse, do not use argv after this point */
- argv = NULL;
-
- while (va) {
- char buf[80];
-
- if (KDB_FLAG(CMD_INTERRUPT))
- goto out;
-
- sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
- diag = kdb_parse(buf);
- if (diag)
- goto out;
-
- addr = va + linkoffset;
- if (kdb_getword(&va, addr, sizeof(va)))
- goto out;
- }
-
-out:
- kfree(command);
- return diag;
-}
-
static int kdb_kgdb(int argc, const char **argv)
{
return KDB_CMD_KGDB;
@@ -2430,11 +2367,15 @@ static int kdb_help(int argc, const char **argv)
kdb_printf("-----------------------------"
"-----------------------------\n");
for_each_kdbcmd(kt, i) {
- if (kt->cmd_name)
- kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
- kt->cmd_usage, kt->cmd_help);
+ char *space = "";
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
+ if (!kt->cmd_name)
+ continue;
+ if (strlen(kt->cmd_usage) > 20)
+ space = "\n ";
+ kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
+ kt->cmd_usage, space, kt->cmd_help);
}
return 0;
}
@@ -2739,7 +2680,7 @@ int kdb_register_repeat(char *cmd,
(kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new));
kfree(kdb_commands);
}
- memset(new + kdb_max_commands, 0,
+ memset(new + kdb_max_commands - KDB_BASE_CMD_MAX, 0,
kdb_command_extend * sizeof(*new));
kdb_commands = new;
kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX;
@@ -2843,15 +2784,13 @@ static void __init kdb_inittab(void)
"Stack traceback", 1, KDB_REPEAT_NONE);
kdb_register_repeat("btp", kdb_bt, "<pid>",
"Display stack for process <pid>", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
- "Display stack all processes", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+ "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btc", kdb_bt, "",
"Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
KDB_REPEAT_NONE);
- kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
- "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
kdb_register_repeat("env", kdb_env, "",
"Show environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("set", kdb_set, "",
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 392ec6a25844..7afd3c8c41d5 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -19,7 +19,6 @@
#define KDB_CMD_GO (-1001)
#define KDB_CMD_CPU (-1002)
#define KDB_CMD_SS (-1003)
-#define KDB_CMD_SSB (-1004)
#define KDB_CMD_KGDB (-1005)
/* Internal debug flags */
@@ -125,8 +124,6 @@ extern int kdb_state;
* kdb control */
#define KDB_STATE_HOLD_CPU 0x00000010 /* Hold this cpu inside kdb */
#define KDB_STATE_DOING_SS 0x00000020 /* Doing ss command */
-#define KDB_STATE_DOING_SSB 0x00000040 /* Doing ssb command,
- * DOING_SS is also set */
#define KDB_STATE_SSBPT 0x00000080 /* Install breakpoint
* after one ss, independent of
* DOING_SS */
@@ -191,7 +188,6 @@ extern void kdb_bp_remove(void);
typedef enum {
KDB_DB_BPT, /* Breakpoint */
KDB_DB_SS, /* Single-step trap */
- KDB_DB_SSB, /* Single step to branch */
KDB_DB_SSBPT, /* Single step over breakpoint */
KDB_DB_NOBPT /* Spurious breakpoint */
} kdb_dbtrap_t;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5c75791d7269..b0cd86501c30 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3691,7 +3691,7 @@ unlock:
static int perf_fasync(int fd, struct file *filp, int on)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct perf_event *event = filp->private_data;
int retval;
@@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
- struct hlist_node *node;
struct hlist_head *head;
rcu_read_lock();
@@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
if (!head)
goto end;
- hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
+ hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs);
}
@@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
{
struct perf_sample_data data;
struct perf_event *event;
- struct hlist_node *node;
struct perf_raw_record raw = {
.size = entry_size,
@@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
perf_sample_data_init(&data, addr, 0);
data.raw = &raw;
- hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
+ hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
@@ -5965,13 +5963,9 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
pmu->name = name;
if (type < 0) {
- int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
- if (!err)
- goto free_pdc;
-
- err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
- if (err) {
- ret = err;
+ type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
+ if (type < 0) {
+ ret = type;
goto free_pdc;
}
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 7dd20408707c..51e485ca9935 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -20,6 +20,7 @@
#include <linux/tsacct_kern.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/freezer.h>
#include <linux/binfmts.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
@@ -31,7 +32,6 @@
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
-#include <linux/freezer.h>
#include <linux/cgroup.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
@@ -485,7 +485,7 @@ static void exit_mm(struct task_struct * tsk)
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!self.task) /* see coredump_finish() */
break;
- schedule();
+ freezable_schedule();
}
__set_task_state(tsk, TASK_RUNNING);
down_read(&mm->mmap_sem);
@@ -835,7 +835,7 @@ void do_exit(long code)
/*
* Make sure we are holding no locks:
*/
- debug_check_no_locks_held(tsk);
+ debug_check_no_locks_held();
/*
* We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done
diff --git a/kernel/fork.c b/kernel/fork.c
index 4133876d8cd2..8d932b1c9056 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -413,7 +413,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct address_space *mapping = file->f_mapping;
get_file(file);
@@ -1861,10 +1861,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
exit_sem(current);
}
- if (new_nsproxy) {
+ if (new_nsproxy)
switch_task_namespaces(current, new_nsproxy);
- new_nsproxy = NULL;
- }
task_lock(current);
@@ -1894,9 +1892,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
}
}
- if (new_nsproxy)
- put_nsproxy(new_nsproxy);
-
bad_unshare_cleanup_cred:
if (new_cred)
put_cred(new_cred);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 4bd4faa6323a..397db02209ed 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -76,7 +76,7 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
static ssize_t write_irq_affinity(int type, struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
+ unsigned int irq = (int)(long)PDE(file_inode(file))->data;
cpumask_var_t new_value;
int err;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2436ffcec91f..bddd3d7a74b6 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -229,6 +229,8 @@ out:
}
+static void kimage_free_page_list(struct list_head *list);
+
static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
unsigned long nr_segments,
struct kexec_segment __user *segments)
@@ -242,8 +244,6 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
if (result)
goto out;
- *rimage = image;
-
/*
* Find a location for the control code buffer, and add it
* the vector of segments so that it's pages will also be
@@ -254,22 +254,22 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
get_order(KEXEC_CONTROL_PAGE_SIZE));
if (!image->control_code_page) {
printk(KERN_ERR "Could not allocate control_code_buffer\n");
- goto out;
+ goto out_free;
}
image->swap_page = kimage_alloc_control_pages(image, 0);
if (!image->swap_page) {
printk(KERN_ERR "Could not allocate swap buffer\n");
- goto out;
+ goto out_free;
}
- result = 0;
- out:
- if (result == 0)
- *rimage = image;
- else
- kfree(image);
+ *rimage = image;
+ return 0;
+out_free:
+ kimage_free_page_list(&image->control_pages);
+ kfree(image);
+out:
return result;
}
@@ -316,7 +316,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
mend = mstart + image->segment[i].memsz - 1;
/* Ensure we are within the crash kernel limits */
if ((mstart < crashk_res.start) || (mend > crashk_res.end))
- goto out;
+ goto out_free;
}
/*
@@ -329,16 +329,15 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
get_order(KEXEC_CONTROL_PAGE_SIZE));
if (!image->control_code_page) {
printk(KERN_ERR "Could not allocate control_code_buffer\n");
- goto out;
+ goto out_free;
}
- result = 0;
-out:
- if (result == 0)
- *rimage = image;
- else
- kfree(image);
+ *rimage = image;
+ return 0;
+out_free:
+ kfree(image);
+out:
return result;
}
@@ -503,8 +502,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
break;
- if (hole_end > crashk_res.end)
- break;
/* See if I overlap any of the segments */
for (i = 0; i < image->nr_segments; i++) {
unsigned long mstart, mend;
@@ -1514,6 +1511,8 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(page, _count);
VMCOREINFO_OFFSET(page, mapping);
VMCOREINFO_OFFSET(page, lru);
+ VMCOREINFO_OFFSET(page, _mapcount);
+ VMCOREINFO_OFFSET(page, private);
VMCOREINFO_OFFSET(pglist_data, node_zones);
VMCOREINFO_OFFSET(pglist_data, nr_zones);
#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1536,6 +1535,11 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_lru);
VMCOREINFO_NUMBER(PG_private);
VMCOREINFO_NUMBER(PG_swapcache);
+ VMCOREINFO_NUMBER(PG_slab);
+#ifdef CONFIG_MEMORY_FAILURE
+ VMCOREINFO_NUMBER(PG_hwpoison);
+#endif
+ VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
arch_crash_save_vmcoreinfo();
update_vmcoreinfo_note();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 550294d58a02..e35be53f6613 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
struct kprobe __kprobes *get_kprobe(void *addr)
{
struct hlist_head *head;
- struct hlist_node *node;
struct kprobe *p;
head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, node, head, hlist) {
+ hlist_for_each_entry_rcu(p, head, hlist) {
if (p->addr == addr)
return p;
}
@@ -799,7 +798,6 @@ out:
static void __kprobes optimize_all_kprobes(void)
{
struct hlist_head *head;
- struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
kprobes_allow_optimization = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, node, head, hlist)
+ hlist_for_each_entry_rcu(p, head, hlist)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
static void __kprobes unoptimize_all_kprobes(void)
{
struct hlist_head *head;
- struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
kprobes_allow_optimization = false;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, node, head, hlist) {
+ hlist_for_each_entry_rcu(p, head, hlist) {
if (!kprobe_disabled(p))
unoptimize_kprobe(p, false);
}
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long hash, flags = 0;
if (unlikely(!kprobes_initialized))
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
hash = hash_ptr(tk, KPROBE_HASH_BITS);
head = &kretprobe_inst_table[hash];
kretprobe_table_lock(hash, &flags);
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task == tk)
recycle_rp_inst(ri, &empty_rp);
}
kretprobe_table_unlock(hash, &flags);
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
static inline void free_rp_inst(struct kretprobe *rp)
{
struct kretprobe_instance *ri;
- struct hlist_node *pos, *next;
+ struct hlist_node *next;
- hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
+ hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
{
unsigned long flags, hash;
struct kretprobe_instance *ri;
- struct hlist_node *pos, *next;
+ struct hlist_node *next;
struct hlist_head *head;
/* No race here */
for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
kretprobe_table_lock(hash, &flags);
head = &kretprobe_inst_table[hash];
- hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
+ hlist_for_each_entry_safe(ri, next, head, hlist) {
if (ri->rp == rp)
ri->rp = NULL;
}
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
{
struct module *mod = data;
struct hlist_head *head;
- struct hlist_node *node;
struct kprobe *p;
unsigned int i;
int checkcore = (val == MODULE_STATE_GOING);
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, node, head, hlist)
+ hlist_for_each_entry_rcu(p, head, hlist)
if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore &&
within_module_core((unsigned long)p->addr, mod))) {
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
{
struct hlist_head *head;
- struct hlist_node *node;
struct kprobe *p, *kp;
const char *sym = NULL;
unsigned int i = *(loff_t *) v;
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
head = &kprobe_table[i];
preempt_disable();
- hlist_for_each_entry_rcu(p, node, head, hlist) {
+ hlist_for_each_entry_rcu(p, head, hlist) {
sym = kallsyms_lookup((unsigned long)p->addr, NULL,
&offset, &modname, namebuf);
if (kprobe_aggrprobe(p)) {
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
static void __kprobes arm_all_kprobes(void)
{
struct hlist_head *head;
- struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
/* Arming kprobes doesn't optimize kprobe itself */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, node, head, hlist)
+ hlist_for_each_entry_rcu(p, head, hlist)
if (!kprobe_disabled(p))
arm_kprobe(p);
}
@@ -2265,7 +2259,6 @@ already_enabled:
static void __kprobes disarm_all_kprobes(void)
{
struct hlist_head *head;
- struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, node, head, hlist) {
+ hlist_for_each_entry_rcu(p, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
disarm_kprobe(p, false);
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8a0efac4f99d..259db207b5d9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
-static void print_held_locks_bug(struct task_struct *curr)
+static void print_held_locks_bug(void)
{
if (!debug_locks_off())
return;
@@ -4097,22 +4097,21 @@ static void print_held_locks_bug(struct task_struct *curr)
printk("\n");
printk("=====================================\n");
- printk("[ BUG: lock held at task exit time! ]\n");
+ printk("[ BUG: %s/%d still has locks held! ]\n",
+ current->comm, task_pid_nr(current));
print_kernel_ident();
printk("-------------------------------------\n");
- printk("%s/%d is exiting with locks still held!\n",
- curr->comm, task_pid_nr(curr));
- lockdep_print_held_locks(curr);
-
+ lockdep_print_held_locks(current);
printk("\nstack backtrace:\n");
dump_stack();
}
-void debug_check_no_locks_held(struct task_struct *task)
+void debug_check_no_locks_held(void)
{
- if (unlikely(task->lockdep_depth > 0))
- print_held_locks_bug(task);
+ if (unlikely(current->lockdep_depth > 0))
+ print_held_locks_bug();
}
+EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
void debug_show_all_locks(void)
{
diff --git a/kernel/module.c b/kernel/module.c
index eab08274ec9b..0925c9a71975 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -197,9 +197,10 @@ static inline int strong_try_module_get(struct module *mod)
return -ENOENT;
}
-static inline void add_taint_module(struct module *mod, unsigned flag)
+static inline void add_taint_module(struct module *mod, unsigned flag,
+ enum lockdep_ok lockdep_ok)
{
- add_taint(flag);
+ add_taint(flag, lockdep_ok);
mod->taints |= (1U << flag);
}
@@ -727,7 +728,7 @@ static inline int try_force_unload(unsigned int flags)
{
int ret = (flags & O_TRUNC);
if (ret)
- add_taint(TAINT_FORCED_RMMOD);
+ add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
return ret;
}
#else
@@ -1138,7 +1139,7 @@ static int try_to_force_load(struct module *mod, const char *reason)
if (!test_taint(TAINT_FORCED_MODULE))
printk(KERN_WARNING "%s: %s: kernel tainted.\n",
mod->name, reason);
- add_taint_module(mod, TAINT_FORCED_MODULE);
+ add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
return 0;
#else
return -ENOEXEC;
@@ -2147,7 +2148,8 @@ static void set_license(struct module *mod, const char *license)
if (!test_taint(TAINT_PROPRIETARY_MODULE))
printk(KERN_WARNING "%s: module license '%s' taints "
"kernel.\n", mod->name, license);
- add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
+ LOCKDEP_NOW_UNRELIABLE);
}
}
@@ -2539,7 +2541,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
if (err)
goto out;
- err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
+ err = vfs_getattr(&file->f_path, &stat);
if (err)
goto out;
@@ -2700,10 +2702,10 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
}
if (!get_modinfo(info, "intree"))
- add_taint_module(mod, TAINT_OOT_MODULE);
+ add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
if (get_modinfo(info, "staging")) {
- add_taint_module(mod, TAINT_CRAP);
+ add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
printk(KERN_WARNING "%s: module is from the staging directory,"
" the quality is unknown, you have been warned.\n",
mod->name);
@@ -2869,15 +2871,17 @@ static int check_module_license_and_versions(struct module *mod)
* using GPL-only symbols it needs.
*/
if (strcmp(mod->name, "ndiswrapper") == 0)
- add_taint(TAINT_PROPRIETARY_MODULE);
+ add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
/* driverloader was caught wrongly pretending to be under GPL */
if (strcmp(mod->name, "driverloader") == 0)
- add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
+ LOCKDEP_NOW_UNRELIABLE);
/* lve claims to be GPL but upstream won't provide source */
if (strcmp(mod->name, "lve") == 0)
- add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
+ LOCKDEP_NOW_UNRELIABLE);
#ifdef CONFIG_MODVERSIONS
if ((mod->num_syms && !mod->crcs)
@@ -3141,12 +3145,72 @@ static int may_init_module(void)
return 0;
}
+/*
+ * We try to place it in the list now to make sure it's unique before
+ * we dedicate too many resources. In particular, temporary percpu
+ * memory exhaustion.
+ */
+static int add_unformed_module(struct module *mod)
+{
+ int err;
+ struct module *old;
+
+ mod->state = MODULE_STATE_UNFORMED;
+
+again:
+ mutex_lock(&module_mutex);
+ if ((old = find_module_all(mod->name, true)) != NULL) {
+ if (old->state == MODULE_STATE_COMING
+ || old->state == MODULE_STATE_UNFORMED) {
+ /* Wait in case it fails to load. */
+ mutex_unlock(&module_mutex);
+ err = wait_event_interruptible(module_wq,
+ finished_loading(mod->name));
+ if (err)
+ goto out_unlocked;
+ goto again;
+ }
+ err = -EEXIST;
+ goto out;
+ }
+ list_add_rcu(&mod->list, &modules);
+ err = 0;
+
+out:
+ mutex_unlock(&module_mutex);
+out_unlocked:
+ return err;
+}
+
+static int complete_formation(struct module *mod, struct load_info *info)
+{
+ int err;
+
+ mutex_lock(&module_mutex);
+
+ /* Find duplicate symbols (must be called under lock). */
+ err = verify_export_symbols(mod);
+ if (err < 0)
+ goto out;
+
+ /* This relies on module_mutex for list integrity. */
+ module_bug_finalize(info->hdr, info->sechdrs, mod);
+
+ /* Mark state as coming so strong_try_module_get() ignores us,
+ * but kallsyms etc. can see us. */
+ mod->state = MODULE_STATE_COMING;
+
+out:
+ mutex_unlock(&module_mutex);
+ return err;
+}
+
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
static int load_module(struct load_info *info, const char __user *uargs,
int flags)
{
- struct module *mod, *old;
+ struct module *mod;
long err;
err = module_sig_check(info);
@@ -3164,36 +3228,20 @@ static int load_module(struct load_info *info, const char __user *uargs,
goto free_copy;
}
- /*
- * We try to place it in the list now to make sure it's unique
- * before we dedicate too many resources. In particular,
- * temporary percpu memory exhaustion.
- */
- mod->state = MODULE_STATE_UNFORMED;
-again:
- mutex_lock(&module_mutex);
- if ((old = find_module_all(mod->name, true)) != NULL) {
- if (old->state == MODULE_STATE_COMING
- || old->state == MODULE_STATE_UNFORMED) {
- /* Wait in case it fails to load. */
- mutex_unlock(&module_mutex);
- err = wait_event_interruptible(module_wq,
- finished_loading(mod->name));
- if (err)
- goto free_module;
- goto again;
- }
- err = -EEXIST;
- mutex_unlock(&module_mutex);
+ /* Reserve our place in the list. */
+ err = add_unformed_module(mod);
+ if (err)
goto free_module;
- }
- list_add_rcu(&mod->list, &modules);
- mutex_unlock(&module_mutex);
#ifdef CONFIG_MODULE_SIG
mod->sig_ok = info->sig_ok;
- if (!mod->sig_ok)
- add_taint_module(mod, TAINT_FORCED_MODULE);
+ if (!mod->sig_ok) {
+ printk_once(KERN_NOTICE
+ "%s: module verification failed: signature and/or"
+ " required key missing - tainting kernel\n",
+ mod->name);
+ add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK);
+ }
#endif
/* Now module is in final location, initialize linked lists, etc. */
@@ -3236,21 +3284,11 @@ again:
dynamic_debug_setup(info->debug, info->num_debug);
- mutex_lock(&module_mutex);
- /* Find duplicate symbols (must be called under lock). */
- err = verify_export_symbols(mod);
- if (err < 0)
+ /* Finally it's fully formed, ready to start executing. */
+ err = complete_formation(mod, info);
+ if (err)
goto ddebug_cleanup;
- /* This relies on module_mutex for list integrity. */
- module_bug_finalize(info->hdr, info->sechdrs, mod);
-
- /* Mark state as coming so strong_try_module_get() ignores us,
- * but kallsyms etc. can see us. */
- mod->state = MODULE_STATE_COMING;
-
- mutex_unlock(&module_mutex);
-
/* Module is ready to execute: parsing args may do that. */
err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
-32768, 32767, &ddebug_dyndbg_module_param_cb);
@@ -3274,8 +3312,8 @@ again:
/* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex);
module_bug_cleanup(mod);
- ddebug_cleanup:
mutex_unlock(&module_mutex);
+ ddebug_cleanup:
dynamic_debug_remove(info->debug);
synchronize_sched();
kfree(mod->args);
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index b781e66a8f2c..afc0456f227a 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -250,7 +250,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
return PTR_ERR(file);
err = -EINVAL;
- ei = PROC_I(file->f_dentry->d_inode);
+ ei = PROC_I(file_inode(file));
ops = ei->ns_ops;
if (nstype && (ops->type != nstype))
goto out;
diff --git a/kernel/panic.c b/kernel/panic.c
index e1b2822fff97..7c57cc9eee2c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -259,26 +259,19 @@ unsigned long get_taint(void)
return tainted_mask;
}
-void add_taint(unsigned flag)
+/**
+ * add_taint: add a taint flag if not already set.
+ * @flag: one of the TAINT_* constants.
+ * @lockdep_ok: whether lock debugging is still OK.
+ *
+ * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
+ * some notewortht-but-not-corrupting cases, it can be set to true.
+ */
+void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
{
- /*
- * Can't trust the integrity of the kernel anymore.
- * We don't call directly debug_locks_off() because the issue
- * is not necessarily serious enough to set oops_in_progress to 1
- * Also we want to keep up lockdep for staging/out-of-tree
- * development and post-warning case.
- */
- switch (flag) {
- case TAINT_CRAP:
- case TAINT_OOT_MODULE:
- case TAINT_WARN:
- case TAINT_FIRMWARE_WORKAROUND:
- break;
-
- default:
- if (__debug_locks_off())
- printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
- }
+ if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
+ printk(KERN_WARNING
+ "Disabling lock debugging due to kernel taint\n");
set_bit(flag, &tainted_mask);
}
@@ -421,7 +414,8 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
print_modules();
dump_stack();
print_oops_end_marker();
- add_taint(taint);
+ /* Just a warning, don't kill lockdep. */
+ add_taint(taint, LOCKDEP_STILL_OK);
}
void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
diff --git a/kernel/pid.c b/kernel/pid.c
index f2c6a6825098..047dc6264638 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
- struct hlist_node *elem;
struct upid *pnr;
- hlist_for_each_entry_rcu(pnr, elem,
+ hlist_for_each_entry_rcu(pnr,
&pid_hash[pid_hashfn(nr, ns)], pid_chain)
if (pnr->nr == nr && pnr->ns == ns)
return container_of(pnr, struct pid,
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 10349d5f2ec3..6edbb2c55c22 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -552,24 +552,22 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
return -EAGAIN;
spin_lock_init(&new_timer->it_lock);
- retry:
- if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
- error = -EAGAIN;
- goto out;
- }
+
+ idr_preload(GFP_KERNEL);
spin_lock_irq(&idr_lock);
- error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
+ error = idr_alloc(&posix_timers_id, new_timer, 0, 0, GFP_NOWAIT);
spin_unlock_irq(&idr_lock);
- if (error) {
- if (error == -EAGAIN)
- goto retry;
+ idr_preload_end();
+ if (error < 0) {
/*
* Weird looking, but we return EAGAIN if the IDR is
* full (proper POSIX return value for this)
*/
- error = -EAGAIN;
+ if (error == -ENOSPC)
+ error = -EAGAIN;
goto out;
}
+ new_timer_id = error;
it_id_set = IT_ID_SET;
new_timer->it_id = (timer_t) new_timer_id;
@@ -639,6 +637,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
{
struct k_itimer *timr;
+ /*
+ * timer_t could be any type >= int and we want to make sure any
+ * @timer_id outside positive int range fails lookup.
+ */
+ if ((unsigned long long)timer_id > INT_MAX)
+ return NULL;
+
rcu_read_lock();
timr = idr_find(&posix_timers_id, (int)timer_id);
if (timr) {
diff --git a/kernel/printk.c b/kernel/printk.c
index f24633afa46a..0b31715f335a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -88,6 +88,12 @@ static DEFINE_SEMAPHORE(console_sem);
struct console *console_drivers;
EXPORT_SYMBOL_GPL(console_drivers);
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map console_lock_dep_map = {
+ .name = "console_lock"
+};
+#endif
+
/*
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
@@ -1919,6 +1925,7 @@ void console_lock(void)
return;
console_locked = 1;
console_may_schedule = 1;
+ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
}
EXPORT_SYMBOL(console_lock);
@@ -1940,6 +1947,7 @@ int console_trylock(void)
}
console_locked = 1;
console_may_schedule = 0;
+ mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
return 1;
}
EXPORT_SYMBOL(console_trylock);
@@ -2102,6 +2110,7 @@ skip:
local_irq_restore(flags);
}
console_locked = 0;
+ mutex_release(&console_lock_dep_map, 1, _RET_IP_);
/* Release the exclusive_console once it is used */
if (unlikely(exclusive_console))
diff --git a/kernel/relay.c b/kernel/relay.c
index e8cd2027abbd..01ab081ac53a 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1139,7 +1139,7 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
if (!desc->count)
return 0;
- mutex_lock(&filp->f_path.dentry->d_inode->i_mutex);
+ mutex_lock(&file_inode(filp)->i_mutex);
do {
if (!relay_file_read_avail(buf, *ppos))
break;
@@ -1159,7 +1159,7 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
*ppos = relay_file_read_end_pos(buf, read_start, ret);
}
} while (desc->count && ret);
- mutex_unlock(&filp->f_path.dentry->d_inode->i_mutex);
+ mutex_unlock(&file_inode(filp)->i_mutex);
return desc->written;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 053dfd7692d1..7f12624a393c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
struct preempt_notifier *notifier;
- struct hlist_node *node;
- hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
@@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
struct preempt_notifier *notifier;
- struct hlist_node *node;
- hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next);
}
@@ -1979,11 +1977,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
}
/*
- * nr_running, nr_uninterruptible and nr_context_switches:
+ * nr_running and nr_context_switches:
*
* externally visible scheduler statistics: current number of runnable
- * threads, current number of uninterruptible-sleeping threads, total
- * number of context switches performed since bootup.
+ * threads, total number of context switches performed since bootup.
*/
unsigned long nr_running(void)
{
@@ -1995,23 +1992,6 @@ unsigned long nr_running(void)
return sum;
}
-unsigned long nr_uninterruptible(void)
-{
- unsigned long i, sum = 0;
-
- for_each_possible_cpu(i)
- sum += cpu_rq(i)->nr_uninterruptible;
-
- /*
- * Since we read the counters lockless, it might be slightly
- * inaccurate. Do not allow it to go below zero though:
- */
- if (unlikely((long)sum < 0))
- sum = 0;
-
- return sum;
-}
-
unsigned long long nr_context_switches(void)
{
int i;
@@ -2796,7 +2776,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
if (irqs_disabled())
print_irqtrace_events(prev);
dump_stack();
- add_taint(TAINT_WARN);
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
/*
@@ -3278,7 +3258,8 @@ void complete_all(struct completion *x)
EXPORT_SYMBOL(complete_all);
static inline long __sched
-do_wait_for_common(struct completion *x, long timeout, int state)
+do_wait_for_common(struct completion *x,
+ long (*action)(long), long timeout, int state)
{
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
@@ -3291,7 +3272,7 @@ do_wait_for_common(struct completion *x, long timeout, int state)
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
- timeout = schedule_timeout(timeout);
+ timeout = action(timeout);
spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait);
@@ -3302,17 +3283,30 @@ do_wait_for_common(struct completion *x, long timeout, int state)
return timeout ?: 1;
}
-static long __sched
-wait_for_common(struct completion *x, long timeout, int state)
+static inline long __sched
+__wait_for_common(struct completion *x,
+ long (*action)(long), long timeout, int state)
{
might_sleep();
spin_lock_irq(&x->wait.lock);
- timeout = do_wait_for_common(x, timeout, state);
+ timeout = do_wait_for_common(x, action, timeout, state);
spin_unlock_irq(&x->wait.lock);
return timeout;
}
+static long __sched
+wait_for_common(struct completion *x, long timeout, int state)
+{
+ return __wait_for_common(x, schedule_timeout, timeout, state);
+}
+
+static long __sched
+wait_for_common_io(struct completion *x, long timeout, int state)
+{
+ return __wait_for_common(x, io_schedule_timeout, timeout, state);
+}
+
/**
* wait_for_completion: - waits for completion of a task
* @x: holds the state of this particular completion
@@ -3349,6 +3343,39 @@ wait_for_completion_timeout(struct completion *x, unsigned long timeout)
EXPORT_SYMBOL(wait_for_completion_timeout);
/**
+ * wait_for_completion_io: - waits for completion of a task
+ * @x: holds the state of this particular completion
+ *
+ * This waits to be signaled for completion of a specific task. It is NOT
+ * interruptible and there is no timeout. The caller is accounted as waiting
+ * for IO.
+ */
+void __sched wait_for_completion_io(struct completion *x)
+{
+ wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(wait_for_completion_io);
+
+/**
+ * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
+ * @x: holds the state of this particular completion
+ * @timeout: timeout value in jiffies
+ *
+ * This waits for either a completion of a specific task to be signaled or for a
+ * specified timeout to expire. The timeout is in jiffies. It is not
+ * interruptible. The caller is accounted as waiting for IO.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
+ */
+unsigned long __sched
+wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
+{
+ return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(wait_for_completion_io_timeout);
+
+/**
* wait_for_completion_interruptible: - waits for completion of a task (w/intr)
* @x: holds the state of this particular completion
*
@@ -4374,7 +4401,10 @@ EXPORT_SYMBOL(yield);
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
- * Returns true if we indeed boosted the target task.
+ * Returns:
+ * true (>0) if we indeed boosted the target task.
+ * false (0) if we failed to boost the target.
+ * -ESRCH if there's no task to yield to.
*/
bool __sched yield_to(struct task_struct *p, bool preempt)
{
@@ -4388,6 +4418,15 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
again:
p_rq = task_rq(p);
+ /*
+ * If we're the only runnable task on the rq and target rq also
+ * has only one task, there's absolutely no point in yielding.
+ */
+ if (rq->nr_running == 1 && p_rq->nr_running == 1) {
+ yielded = -ESRCH;
+ goto out_irq;
+ }
+
double_rq_lock(rq, p_rq);
while (task_rq(p) != p_rq) {
double_rq_unlock(rq, p_rq);
@@ -4395,13 +4434,13 @@ again:
}
if (!curr->sched_class->yield_to_task)
- goto out;
+ goto out_unlock;
if (curr->sched_class != p->sched_class)
- goto out;
+ goto out_unlock;
if (task_running(p_rq, p) || p->state)
- goto out;
+ goto out_unlock;
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
if (yielded) {
@@ -4414,11 +4453,12 @@ again:
resched_task(p_rq->curr);
}
-out:
+out_unlock:
double_rq_unlock(rq, p_rq);
+out_irq:
local_irq_restore(flags);
- if (yielded)
+ if (yielded > 0)
schedule();
return yielded;
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 9857329ed280..ed12cbb135f4 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -604,7 +604,7 @@ static unsigned long long vtime_delta(struct task_struct *tsk)
{
unsigned long long clock;
- clock = sched_clock();
+ clock = local_clock();
if (clock < tsk->vtime_snap)
return 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 557e7b53b323..75024a673520 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -262,11 +262,11 @@ static void print_cpu(struct seq_file *m, int cpu)
{
unsigned int freq = cpu_khz ? : 1;
- SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
+ SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
cpu, freq / 1000, (freq % 1000));
}
#else
- SEQ_printf(m, "\ncpu#%d\n", cpu);
+ SEQ_printf(m, "cpu#%d\n", cpu);
#endif
#define P(x) \
@@ -323,6 +323,7 @@ do { \
print_rq(m, rq, cpu);
rcu_read_unlock();
spin_unlock_irqrestore(&sched_debug_lock, flags);
+ SEQ_printf(m, "\n");
}
static const char *sched_tunable_scaling_names[] = {
@@ -331,11 +332,10 @@ static const char *sched_tunable_scaling_names[] = {
"linear"
};
-static int sched_debug_show(struct seq_file *m, void *v)
+static void sched_debug_header(struct seq_file *m)
{
u64 ktime, sched_clk, cpu_clk;
unsigned long flags;
- int cpu;
local_irq_save(flags);
ktime = ktime_to_ns(ktime_get());
@@ -377,33 +377,101 @@ static int sched_debug_show(struct seq_file *m, void *v)
#undef PN
#undef P
- SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
+ SEQ_printf(m, " .%-40s: %d (%s)\n",
+ "sysctl_sched_tunable_scaling",
sysctl_sched_tunable_scaling,
sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
+ SEQ_printf(m, "\n");
+}
- for_each_online_cpu(cpu)
- print_cpu(m, cpu);
+static int sched_debug_show(struct seq_file *m, void *v)
+{
+ int cpu = (unsigned long)(v - 2);
- SEQ_printf(m, "\n");
+ if (cpu != -1)
+ print_cpu(m, cpu);
+ else
+ sched_debug_header(m);
return 0;
}
void sysrq_sched_debug_show(void)
{
- sched_debug_show(NULL, NULL);
+ int cpu;
+
+ sched_debug_header(NULL);
+ for_each_online_cpu(cpu)
+ print_cpu(NULL, cpu);
+
+}
+
+/*
+ * This itererator needs some explanation.
+ * It returns 1 for the header position.
+ * This means 2 is cpu 0.
+ * In a hotplugged system some cpus, including cpu 0, may be missing so we have
+ * to use cpumask_* to iterate over the cpus.
+ */
+static void *sched_debug_start(struct seq_file *file, loff_t *offset)
+{
+ unsigned long n = *offset;
+
+ if (n == 0)
+ return (void *) 1;
+
+ n--;
+
+ if (n > 0)
+ n = cpumask_next(n - 1, cpu_online_mask);
+ else
+ n = cpumask_first(cpu_online_mask);
+
+ *offset = n + 1;
+
+ if (n < nr_cpu_ids)
+ return (void *)(unsigned long)(n + 2);
+ return NULL;
+}
+
+static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
+{
+ (*offset)++;
+ return sched_debug_start(file, offset);
+}
+
+static void sched_debug_stop(struct seq_file *file, void *data)
+{
+}
+
+static const struct seq_operations sched_debug_sops = {
+ .start = sched_debug_start,
+ .next = sched_debug_next,
+ .stop = sched_debug_stop,
+ .show = sched_debug_show,
+};
+
+static int sched_debug_release(struct inode *inode, struct file *file)
+{
+ seq_release(inode, file);
+
+ return 0;
}
static int sched_debug_open(struct inode *inode, struct file *filp)
{
- return single_open(filp, sched_debug_show, NULL);
+ int ret = 0;
+
+ ret = seq_open(filp, &sched_debug_sops);
+
+ return ret;
}
static const struct file_operations sched_debug_fops = {
.open = sched_debug_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = sched_debug_release,
};
static int __init init_sched_debug_procfs(void)
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 903ffa9e8872..e036eda1a9c9 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -21,14 +21,17 @@ static int show_schedstat(struct seq_file *seq, void *v)
if (mask_str == NULL)
return -ENOMEM;
- seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
- seq_printf(seq, "timestamp %lu\n", jiffies);
- for_each_online_cpu(cpu) {
- struct rq *rq = cpu_rq(cpu);
+ if (v == (void *)1) {
+ seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+ seq_printf(seq, "timestamp %lu\n", jiffies);
+ } else {
+ struct rq *rq;
#ifdef CONFIG_SMP
struct sched_domain *sd;
int dcount = 0;
#endif
+ cpu = (unsigned long)(v - 2);
+ rq = cpu_rq(cpu);
/* runqueue-specific stats */
seq_printf(seq,
@@ -77,30 +80,66 @@ static int show_schedstat(struct seq_file *seq, void *v)
return 0;
}
-static int schedstat_open(struct inode *inode, struct file *file)
+/*
+ * This itererator needs some explanation.
+ * It returns 1 for the header position.
+ * This means 2 is cpu 0.
+ * In a hotplugged system some cpus, including cpu 0, may be missing so we have
+ * to use cpumask_* to iterate over the cpus.
+ */
+static void *schedstat_start(struct seq_file *file, loff_t *offset)
{
- unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
- char *buf = kmalloc(size, GFP_KERNEL);
- struct seq_file *m;
- int res;
+ unsigned long n = *offset;
- if (!buf)
- return -ENOMEM;
- res = single_open(file, show_schedstat, NULL);
- if (!res) {
- m = file->private_data;
- m->buf = buf;
- m->size = size;
- } else
- kfree(buf);
- return res;
+ if (n == 0)
+ return (void *) 1;
+
+ n--;
+
+ if (n > 0)
+ n = cpumask_next(n - 1, cpu_online_mask);
+ else
+ n = cpumask_first(cpu_online_mask);
+
+ *offset = n + 1;
+
+ if (n < nr_cpu_ids)
+ return (void *)(unsigned long)(n + 2);
+ return NULL;
+}
+
+static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
+{
+ (*offset)++;
+ return schedstat_start(file, offset);
+}
+
+static void schedstat_stop(struct seq_file *file, void *data)
+{
+}
+
+static const struct seq_operations schedstat_sops = {
+ .start = schedstat_start,
+ .next = schedstat_next,
+ .stop = schedstat_stop,
+ .show = show_schedstat,
+};
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &schedstat_sops);
}
+static int schedstat_release(struct inode *inode, struct file *file)
+{
+ return 0;
+};
+
static const struct file_operations proc_schedstat_operations = {
.open = schedstat_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = schedstat_release,
};
static int __init proc_schedstat_init(void)
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a7ae2963185..2ec870a4c3c4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1157,11 +1157,11 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
static void print_fatal_signal(int signr)
{
struct pt_regs *regs = signal_pt_regs();
- printk("%s/%d: potentially unexpected fatal signal %d.\n",
+ printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
current->comm, task_pid_nr(current), signr);
#if defined(__i386__) && !defined(__arch_um__)
- printk("code at %08lx: ", regs->ip);
+ printk(KERN_INFO "code at %08lx: ", regs->ip);
{
int i;
for (i = 0; i < 16; i++) {
@@ -1169,11 +1169,11 @@ static void print_fatal_signal(int signr)
if (get_user(insn, (unsigned char *)(regs->ip + i)))
break;
- printk("%02x ", insn);
+ printk(KERN_CONT "%02x ", insn);
}
}
+ printk(KERN_CONT "\n");
#endif
- printk("\n");
preempt_disable();
show_regs(regs);
preempt_enable();
@@ -2653,7 +2653,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
if (oset) {
compat_sigset_t old32;
sigset_to_compat(&old32, &old_set);
- if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
+ if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
return -EFAULT;
}
return 0;
@@ -2996,7 +2996,8 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
- if (info->si_code >= 0 || info->si_code == SI_TKILL) {
+ if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
+ (task_pid_vnr(current) != pid)) {
/* We used to allow any < 0 si_code */
WARN_ON_ONCE(info->si_code < 0);
return -EPERM;
@@ -3045,7 +3046,8 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
- if (info->si_code >= 0 || info->si_code == SI_TKILL) {
+ if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
+ (task_pid_vnr(current) != pid)) {
/* We used to allow any < 0 si_code */
WARN_ON_ONCE(info->si_code < 0);
return -EPERM;
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d4abac261779..b9bde5727829 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
continue;
}
- BUG_ON(td->cpu != smp_processor_id());
+ //BUG_ON(td->cpu != smp_processor_id());
/* Check for state change setup */
switch (td->status) {
diff --git a/kernel/sys.c b/kernel/sys.c
index 840cfdad7bfc..81f56445fba9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -434,11 +434,12 @@ static DEFINE_MUTEX(reboot_mutex);
SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
void __user *, arg)
{
+ struct pid_namespace *pid_ns = task_active_pid_ns(current);
char buffer[256];
int ret = 0;
/* We only trust the superuser with rebooting the system. */
- if (!capable(CAP_SYS_BOOT))
+ if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT))
return -EPERM;
/* For safety, we require "magic" arguments. */
@@ -454,7 +455,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
* pid_namespace, the command is handled by reboot_pid_ns() which will
* call do_exit().
*/
- ret = reboot_pid_ns(task_active_pid_ns(current), cmd);
+ ret = reboot_pid_ns(pid_ns, cmd);
if (ret)
return ret;
@@ -1793,14 +1794,14 @@ SYSCALL_DEFINE1(umask, int, mask)
static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
{
struct fd exe;
- struct dentry *dentry;
+ struct inode *inode;
int err;
exe = fdget(fd);
if (!exe.file)
return -EBADF;
- dentry = exe.file->f_path.dentry;
+ inode = file_inode(exe.file);
/*
* Because the original mm->exe_file points to executable file, make
@@ -1808,11 +1809,11 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
* overall picture.
*/
err = -EACCES;
- if (!S_ISREG(dentry->d_inode->i_mode) ||
+ if (!S_ISREG(inode->i_mode) ||
exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- err = inode_permission(dentry->d_inode, MAY_EXEC);
+ err = inode_permission(inode, MAY_EXEC);
if (err)
goto exit;
@@ -2184,11 +2185,6 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
-static void argv_cleanup(struct subprocess_info *info)
-{
- argv_free(info->argv);
-}
-
static int __orderly_poweroff(void)
{
int argc;
@@ -2208,9 +2204,8 @@ static int __orderly_poweroff(void)
}
ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
- NULL, argv_cleanup, NULL);
- if (ret == -ENOMEM)
- argv_free(argv);
+ NULL, NULL, NULL);
+ argv_free(argv);
return ret;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 95e9e55602a8..afc1dc60f3f8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -157,6 +157,9 @@ extern int sysctl_tsb_ratio;
#ifdef __hppa__
extern int pwrsw_enabled;
+#endif
+
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
extern int unaligned_enabled;
#endif
@@ -555,6 +558,8 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#endif
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
{
.procname = "unaligned-trap",
.data = &unaligned_enabled,
@@ -2018,7 +2023,7 @@ static int proc_taint(struct ctl_table *table, int write,
int i;
for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) {
if ((tmptaint >> i) & 1)
- add_taint(i);
+ add_taint(i, LOCKDEP_STILL_OK);
}
}
@@ -2095,7 +2100,7 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
static void validate_coredump_safety(void)
{
#ifdef CONFIG_COREDUMP
- if (suid_dumpable == SUID_DUMPABLE_SAFE &&
+ if (suid_dumpable == SUID_DUMP_ROOT &&
core_pattern[0] != '/' && core_pattern[0] != '|') {
printk(KERN_WARNING "Unsafe core_pattern used with "\
"suid_dumpable=2. Pipe handler or fully qualified "\
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index b669ca1fa103..ebf72358e86a 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -970,7 +970,6 @@ out:
static ssize_t bin_intvec(struct file *file,
void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
{
- mm_segment_t old_fs = get_fs();
ssize_t copied = 0;
char *buffer;
ssize_t result;
@@ -983,13 +982,10 @@ static ssize_t bin_intvec(struct file *file,
if (oldval && oldlen) {
unsigned __user *vec = oldval;
size_t length = oldlen / sizeof(*vec);
- loff_t pos = 0;
char *str, *end;
int i;
- set_fs(KERNEL_DS);
- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
- set_fs(old_fs);
+ result = kernel_read(file, 0, buffer, BUFSZ - 1);
if (result < 0)
goto out_kfree;
@@ -1016,7 +1012,6 @@ static ssize_t bin_intvec(struct file *file,
if (newval && newlen) {
unsigned __user *vec = newval;
size_t length = newlen / sizeof(*vec);
- loff_t pos = 0;
char *str, *end;
int i;
@@ -1032,9 +1027,7 @@ static ssize_t bin_intvec(struct file *file,
str += snprintf(str, end - str, "%lu\t", value);
}
- set_fs(KERNEL_DS);
- result = vfs_write(file, buffer, str - buffer, &pos);
- set_fs(old_fs);
+ result = kernel_write(file, buffer, str - buffer, 0);
if (result < 0)
goto out_kfree;
}
@@ -1048,7 +1041,6 @@ out:
static ssize_t bin_ulongvec(struct file *file,
void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
{
- mm_segment_t old_fs = get_fs();
ssize_t copied = 0;
char *buffer;
ssize_t result;
@@ -1061,13 +1053,10 @@ static ssize_t bin_ulongvec(struct file *file,
if (oldval && oldlen) {
unsigned long __user *vec = oldval;
size_t length = oldlen / sizeof(*vec);
- loff_t pos = 0;
char *str, *end;
int i;
- set_fs(KERNEL_DS);
- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
- set_fs(old_fs);
+ result = kernel_read(file, 0, buffer, BUFSZ - 1);
if (result < 0)
goto out_kfree;
@@ -1094,7 +1083,6 @@ static ssize_t bin_ulongvec(struct file *file,
if (newval && newlen) {
unsigned long __user *vec = newval;
size_t length = newlen / sizeof(*vec);
- loff_t pos = 0;
char *str, *end;
int i;
@@ -1110,9 +1098,7 @@ static ssize_t bin_ulongvec(struct file *file,
str += snprintf(str, end - str, "%lu\t", value);
}
- set_fs(KERNEL_DS);
- result = vfs_write(file, buffer, str - buffer, &pos);
- set_fs(old_fs);
+ result = kernel_write(file, buffer, str - buffer, 0);
if (result < 0)
goto out_kfree;
}
@@ -1126,19 +1112,15 @@ out:
static ssize_t bin_uuid(struct file *file,
void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
{
- mm_segment_t old_fs = get_fs();
ssize_t result, copied = 0;
/* Only supports reads */
if (oldval && oldlen) {
- loff_t pos = 0;
char buf[40], *str = buf;
unsigned char uuid[16];
int i;
- set_fs(KERNEL_DS);
- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
- set_fs(old_fs);
+ result = kernel_read(file, 0, buf, sizeof(buf) - 1);
if (result < 0)
goto out;
@@ -1174,18 +1156,14 @@ out:
static ssize_t bin_dn_node_address(struct file *file,
void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
{
- mm_segment_t old_fs = get_fs();
ssize_t result, copied = 0;
if (oldval && oldlen) {
- loff_t pos = 0;
char buf[15], *nodep;
unsigned long area, node;
__le16 dnaddr;
- set_fs(KERNEL_DS);
- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
- set_fs(old_fs);
+ result = kernel_read(file, 0, buf, sizeof(buf) - 1);
if (result < 0)
goto out;
@@ -1193,9 +1171,10 @@ static ssize_t bin_dn_node_address(struct file *file,
/* Convert the decnet address to binary */
result = -EIO;
- nodep = strchr(buf, '.') + 1;
+ nodep = strchr(buf, '.');
if (!nodep)
goto out;
+ ++nodep;
area = simple_strtoul(buf, NULL, 10);
node = simple_strtoul(nodep, NULL, 10);
@@ -1214,7 +1193,6 @@ static ssize_t bin_dn_node_address(struct file *file,
}
if (newval && newlen) {
- loff_t pos = 0;
__le16 dnaddr;
char buf[15];
int len;
@@ -1231,9 +1209,7 @@ static ssize_t bin_dn_node_address(struct file *file,
le16_to_cpu(dnaddr) >> 10,
le16_to_cpu(dnaddr) & 0x3ff);
- set_fs(KERNEL_DS);
- result = vfs_write(file, buf, len, &pos);
- set_fs(old_fs);
+ result = kernel_write(file, buf, len, 0);
if (result < 0)
goto out;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 314b9ee07edf..a19a39952c1b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -554,6 +554,7 @@ void tick_nohz_idle_enter(void)
local_irq_enable();
}
+EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
/**
* tick_nohz_irq_exit - update next tick event from interrupt exit
@@ -685,6 +686,7 @@ void tick_nohz_idle_exit(void)
local_irq_enable();
}
+EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
{
diff --git a/kernel/timeconst.bc b/kernel/timeconst.bc
new file mode 100644
index 000000000000..511bdf2cafda
--- /dev/null
+++ b/kernel/timeconst.bc
@@ -0,0 +1,108 @@
+scale=0
+
+define gcd(a,b) {
+ auto t;
+ while (b) {
+ t = b;
+ b = a % b;
+ a = t;
+ }
+ return a;
+}
+
+/* Division by reciprocal multiplication. */
+define fmul(b,n,d) {
+ return (2^b*n+d-1)/d;
+}
+
+/* Adjustment factor when a ceiling value is used. Use as:
+ (imul * n) + (fmulxx * n + fadjxx) >> xx) */
+define fadj(b,n,d) {
+ auto v;
+ d = d/gcd(n,d);
+ v = 2^b*(d-1)/d;
+ return v;
+}
+
+/* Compute the appropriate mul/adj values as well as a shift count,
+ which brings the mul value into the range 2^b-1 <= x < 2^b. Such
+ a shift value will be correct in the signed integer range and off
+ by at most one in the upper half of the unsigned range. */
+define fmuls(b,n,d) {
+ auto s, m;
+ for (s = 0; 1; s++) {
+ m = fmul(s,n,d);
+ if (m >= 2^(b-1))
+ return s;
+ }
+ return 0;
+}
+
+define timeconst(hz) {
+ print "/* Automatically generated by kernel/timeconst.bc */\n"
+ print "/* Time conversion constants for HZ == ", hz, " */\n"
+ print "\n"
+
+ print "#ifndef KERNEL_TIMECONST_H\n"
+ print "#define KERNEL_TIMECONST_H\n\n"
+
+ print "#include <linux/param.h>\n"
+ print "#include <linux/types.h>\n\n"
+
+ print "#if HZ != ", hz, "\n"
+ print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
+ print "#endif\n\n"
+
+ if (hz < 2) {
+ print "#error Totally bogus HZ value!\n"
+ } else {
+ s=fmuls(32,1000,hz)
+ obase=16
+ print "#define HZ_TO_MSEC_MUL32\tU64_C(0x", fmul(s,1000,hz), ")\n"
+ print "#define HZ_TO_MSEC_ADJ32\tU64_C(0x", fadj(s,1000,hz), ")\n"
+ obase=10
+ print "#define HZ_TO_MSEC_SHR32\t", s, "\n"
+
+ s=fmuls(32,hz,1000)
+ obase=16
+ print "#define MSEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000), ")\n"
+ print "#define MSEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000), ")\n"
+ obase=10
+ print "#define MSEC_TO_HZ_SHR32\t", s, "\n"
+
+ obase=10
+ cd=gcd(hz,1000)
+ print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n"
+ print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n"
+ print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+ print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n"
+ print "\n"
+
+ s=fmuls(32,1000000,hz)
+ obase=16
+ print "#define HZ_TO_USEC_MUL32\tU64_C(0x", fmul(s,1000000,hz), ")\n"
+ print "#define HZ_TO_USEC_ADJ32\tU64_C(0x", fadj(s,1000000,hz), ")\n"
+ obase=10
+ print "#define HZ_TO_USEC_SHR32\t", s, "\n"
+
+ s=fmuls(32,hz,1000000)
+ obase=16
+ print "#define USEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000000), ")\n"
+ print "#define USEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000000), ")\n"
+ obase=10
+ print "#define USEC_TO_HZ_SHR32\t", s, "\n"
+
+ obase=10
+ cd=gcd(hz,1000000)
+ print "#define HZ_TO_USEC_NUM\t\t", 1000000/cd, "\n"
+ print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
+ print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+ print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+ print "\n"
+
+ print "#endif /* KERNEL_TIMECONST_H */\n"
+ }
+ halt
+}
+
+timeconst(hz)
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
deleted file mode 100644
index 3f42652a6a37..000000000000
--- a/kernel/timeconst.pl
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/perl
-# -----------------------------------------------------------------------
-#
-# Copyright 2007-2008 rPath, Inc. - All Rights Reserved
-#
-# This file is part of the Linux kernel, and is made available under
-# the terms of the GNU General Public License version 2 or (at your
-# option) any later version; incorporated herein by reference.
-#
-# -----------------------------------------------------------------------
-#
-
-#
-# Usage: timeconst.pl HZ > timeconst.h
-#
-
-# Precomputed values for systems without Math::BigInt
-# Generated by:
-# timeconst.pl --can 24 32 48 64 100 122 128 200 250 256 300 512 1000 1024 1200
-%canned_values = (
- 24 => [
- '0xa6aaaaab','0x2aaaaaa',26,
- 125,3,
- '0xc49ba5e4','0x1fbe76c8b4',37,
- 3,125,
- '0xa2c2aaab','0xaaaa',16,
- 125000,3,
- '0xc9539b89','0x7fffbce4217d',47,
- 3,125000,
- ], 32 => [
- '0xfa000000','0x6000000',27,
- 125,4,
- '0x83126e98','0xfdf3b645a',36,
- 4,125,
- '0xf4240000','0x0',17,
- 31250,1,
- '0x8637bd06','0x3fff79c842fa',46,
- 1,31250,
- ], 48 => [
- '0xa6aaaaab','0x6aaaaaa',27,
- 125,6,
- '0xc49ba5e4','0xfdf3b645a',36,
- 6,125,
- '0xa2c2aaab','0x15555',17,
- 62500,3,
- '0xc9539b89','0x3fffbce4217d',46,
- 3,62500,
- ], 64 => [
- '0xfa000000','0xe000000',28,
- 125,8,
- '0x83126e98','0x7ef9db22d',35,
- 8,125,
- '0xf4240000','0x0',18,
- 15625,1,
- '0x8637bd06','0x1fff79c842fa',45,
- 1,15625,
- ], 100 => [
- '0xa0000000','0x0',28,
- 10,1,
- '0xcccccccd','0x733333333',35,
- 1,10,
- '0x9c400000','0x0',18,
- 10000,1,
- '0xd1b71759','0x1fff2e48e8a7',45,
- 1,10000,
- ], 122 => [
- '0x8325c53f','0xfbcda3a',28,
- 500,61,
- '0xf9db22d1','0x7fbe76c8b',35,
- 61,500,
- '0x8012e2a0','0x3ef36',18,
- 500000,61,
- '0xffda4053','0x1ffffbce4217',45,
- 61,500000,
- ], 128 => [
- '0xfa000000','0x1e000000',29,
- 125,16,
- '0x83126e98','0x3f7ced916',34,
- 16,125,
- '0xf4240000','0x40000',19,
- 15625,2,
- '0x8637bd06','0xfffbce4217d',44,
- 2,15625,
- ], 200 => [
- '0xa0000000','0x0',29,
- 5,1,
- '0xcccccccd','0x333333333',34,
- 1,5,
- '0x9c400000','0x0',19,
- 5000,1,
- '0xd1b71759','0xfff2e48e8a7',44,
- 1,5000,
- ], 250 => [
- '0x80000000','0x0',29,
- 4,1,
- '0x80000000','0x180000000',33,
- 1,4,
- '0xfa000000','0x0',20,
- 4000,1,
- '0x83126e98','0x7ff7ced9168',43,
- 1,4000,
- ], 256 => [
- '0xfa000000','0x3e000000',30,
- 125,32,
- '0x83126e98','0x1fbe76c8b',33,
- 32,125,
- '0xf4240000','0xc0000',20,
- 15625,4,
- '0x8637bd06','0x7ffde7210be',43,
- 4,15625,
- ], 300 => [
- '0xd5555556','0x2aaaaaaa',30,
- 10,3,
- '0x9999999a','0x1cccccccc',33,
- 3,10,
- '0xd0555556','0xaaaaa',20,
- 10000,3,
- '0x9d495183','0x7ffcb923a29',43,
- 3,10000,
- ], 512 => [
- '0xfa000000','0x7e000000',31,
- 125,64,
- '0x83126e98','0xfdf3b645',32,
- 64,125,
- '0xf4240000','0x1c0000',21,
- 15625,8,
- '0x8637bd06','0x3ffef39085f',42,
- 8,15625,
- ], 1000 => [
- '0x80000000','0x0',31,
- 1,1,
- '0x80000000','0x0',31,
- 1,1,
- '0xfa000000','0x0',22,
- 1000,1,
- '0x83126e98','0x1ff7ced9168',41,
- 1,1000,
- ], 1024 => [
- '0xfa000000','0xfe000000',32,
- 125,128,
- '0x83126e98','0x7ef9db22',31,
- 128,125,
- '0xf4240000','0x3c0000',22,
- 15625,16,
- '0x8637bd06','0x1fff79c842f',41,
- 16,15625,
- ], 1200 => [
- '0xd5555556','0xd5555555',32,
- 5,6,
- '0x9999999a','0x66666666',31,
- 6,5,
- '0xd0555556','0x2aaaaa',22,
- 2500,3,
- '0x9d495183','0x1ffcb923a29',41,
- 3,2500,
- ]
-);
-
-$has_bigint = eval 'use Math::BigInt qw(bgcd); 1;';
-
-sub bint($)
-{
- my($x) = @_;
- return Math::BigInt->new($x);
-}
-
-#
-# Constants for division by reciprocal multiplication.
-# (bits, numerator, denominator)
-#
-sub fmul($$$)
-{
- my ($b,$n,$d) = @_;
-
- $n = bint($n);
- $d = bint($d);
-
- return scalar (($n << $b)+$d-bint(1))/$d;
-}
-
-sub fadj($$$)
-{
- my($b,$n,$d) = @_;
-
- $n = bint($n);
- $d = bint($d);
-
- $d = $d/bgcd($n, $d);
- return scalar (($d-bint(1)) << $b)/$d;
-}
-
-sub fmuls($$$) {
- my($b,$n,$d) = @_;
- my($s,$m);
- my($thres) = bint(1) << ($b-1);
-
- $n = bint($n);
- $d = bint($d);
-
- for ($s = 0; 1; $s++) {
- $m = fmul($s,$n,$d);
- return $s if ($m >= $thres);
- }
- return 0;
-}
-
-# Generate a hex value if the result fits in 64 bits;
-# otherwise skip.
-sub bignum_hex($) {
- my($x) = @_;
- my $s = $x->as_hex();
-
- return (length($s) > 18) ? undef : $s;
-}
-
-# Provides mul, adj, and shr factors for a specific
-# (bit, time, hz) combination
-sub muladj($$$) {
- my($b, $t, $hz) = @_;
- my $s = fmuls($b, $t, $hz);
- my $m = fmul($s, $t, $hz);
- my $a = fadj($s, $t, $hz);
- return (bignum_hex($m), bignum_hex($a), $s);
-}
-
-# Provides numerator, denominator values
-sub numden($$) {
- my($n, $d) = @_;
- my $g = bgcd($n, $d);
- return ($n/$g, $d/$g);
-}
-
-# All values for a specific (time, hz) combo
-sub conversions($$) {
- my ($t, $hz) = @_;
- my @val = ();
-
- # HZ_TO_xx
- push(@val, muladj(32, $t, $hz));
- push(@val, numden($t, $hz));
-
- # xx_TO_HZ
- push(@val, muladj(32, $hz, $t));
- push(@val, numden($hz, $t));
-
- return @val;
-}
-
-sub compute_values($) {
- my($hz) = @_;
- my @val = ();
- my $s, $m, $a, $g;
-
- if (!$has_bigint) {
- die "$0: HZ == $hz not canned and ".
- "Math::BigInt not available\n";
- }
-
- # MSEC conversions
- push(@val, conversions(1000, $hz));
-
- # USEC conversions
- push(@val, conversions(1000000, $hz));
-
- return @val;
-}
-
-sub outputval($$)
-{
- my($name, $val) = @_;
- my $csuf;
-
- if (defined($val)) {
- if ($name !~ /SHR/) {
- $val = "U64_C($val)";
- }
- printf "#define %-23s %s\n", $name.$csuf, $val.$csuf;
- }
-}
-
-sub output($@)
-{
- my($hz, @val) = @_;
- my $pfx, $bit, $suf, $s, $m, $a;
-
- print "/* Automatically generated by kernel/timeconst.pl */\n";
- print "/* Conversion constants for HZ == $hz */\n";
- print "\n";
- print "#ifndef KERNEL_TIMECONST_H\n";
- print "#define KERNEL_TIMECONST_H\n";
- print "\n";
-
- print "#include <linux/param.h>\n";
- print "#include <linux/types.h>\n";
-
- print "\n";
- print "#if HZ != $hz\n";
- print "#error \"kernel/timeconst.h has the wrong HZ value!\"\n";
- print "#endif\n";
- print "\n";
-
- foreach $pfx ('HZ_TO_MSEC','MSEC_TO_HZ',
- 'HZ_TO_USEC','USEC_TO_HZ') {
- foreach $bit (32) {
- foreach $suf ('MUL', 'ADJ', 'SHR') {
- outputval("${pfx}_$suf$bit", shift(@val));
- }
- }
- foreach $suf ('NUM', 'DEN') {
- outputval("${pfx}_$suf", shift(@val));
- }
- }
-
- print "\n";
- print "#endif /* KERNEL_TIMECONST_H */\n";
-}
-
-# Pretty-print Perl values
-sub perlvals(@) {
- my $v;
- my @l = ();
-
- foreach $v (@_) {
- if (!defined($v)) {
- push(@l, 'undef');
- } elsif ($v =~ /^0x/) {
- push(@l, "\'".$v."\'");
- } else {
- push(@l, $v.'');
- }
- }
- return join(',', @l);
-}
-
-($hz) = @ARGV;
-
-# Use this to generate the %canned_values structure
-if ($hz eq '--can') {
- shift(@ARGV);
- @hzlist = sort {$a <=> $b} (@ARGV);
-
- print "# Precomputed values for systems without Math::BigInt\n";
- print "# Generated by:\n";
- print "# timeconst.pl --can ", join(' ', @hzlist), "\n";
- print "\%canned_values = (\n";
- my $pf = "\t";
- foreach $hz (@hzlist) {
- my @values = compute_values($hz);
- print "$pf$hz => [\n";
- while (scalar(@values)) {
- my $bit;
- foreach $bit (32) {
- my $m = shift(@values);
- my $a = shift(@values);
- my $s = shift(@values);
- print "\t\t", perlvals($m,$a,$s), ",\n";
- }
- my $n = shift(@values);
- my $d = shift(@values);
- print "\t\t", perlvals($n,$d), ",\n";
- }
- print "\t]";
- $pf = ', ';
- }
- print "\n);\n";
-} else {
- $hz += 0; # Force to number
- if ($hz < 1) {
- die "Usage: $0 HZ\n";
- }
-
- $cv = $canned_values{$hz};
- @val = defined($cv) ? @$cv : compute_values($hz);
- output($hz, @val);
-}
-exit 0;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 71259e2b6b61..9e5b8c272eec 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -739,6 +739,12 @@ static void blk_add_trace_rq_complete(void *ignore,
struct request_queue *q,
struct request *rq)
{
+ struct blk_trace *bt = q->blk_trace;
+
+ /* if control ever passes through here, it's a request based driver */
+ if (unlikely(bt && !bt->rq_based))
+ bt->rq_based = true;
+
blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
}
@@ -774,15 +780,30 @@ static void blk_add_trace_bio_bounce(void *ignore,
blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
}
-static void blk_add_trace_bio_complete(void *ignore,
- struct request_queue *q, struct bio *bio,
- int error)
+static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
{
+ struct request_queue *q;
+ struct blk_trace *bt;
+
+ if (!bio->bi_bdev)
+ return;
+
+ q = bdev_get_queue(bio->bi_bdev);
+ bt = q->blk_trace;
+
+ /*
+ * Request based drivers will generate both rq and bio completions.
+ * Ignore bio ones.
+ */
+ if (likely(!bt) || bt->rq_based)
+ return;
+
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
}
static void blk_add_trace_bio_backmerge(void *ignore,
struct request_queue *q,
+ struct request *rq,
struct bio *bio)
{
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
@@ -790,6 +811,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
static void blk_add_trace_bio_frontmerge(void *ignore,
struct request_queue *q,
+ struct request *rq,
struct bio *bio)
{
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ce8c3d68292f..ab25b88aae56 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
{
struct ftrace_profile *rec;
struct hlist_head *hhd;
- struct hlist_node *n;
unsigned long key;
key = hash_long(ip, ftrace_profile_bits);
@@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd))
return NULL;
- hlist_for_each_entry_rcu(rec, n, hhd, node) {
+ hlist_for_each_entry_rcu(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
@@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
unsigned long key;
struct ftrace_func_entry *entry;
struct hlist_head *hhd;
- struct hlist_node *n;
if (ftrace_hash_empty(hash))
return NULL;
@@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
hhd = &hash->buckets[key];
- hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
+ hlist_for_each_entry_rcu(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
@@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
static void ftrace_hash_clear(struct ftrace_hash *hash)
{
struct hlist_head *hhd;
- struct hlist_node *tp, *tn;
+ struct hlist_node *tn;
struct ftrace_func_entry *entry;
int size = 1 << hash->size_bits;
int i;
@@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
for (i = 0; i < size; i++) {
hhd = &hash->buckets[i];
- hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
+ hlist_for_each_entry_safe(entry, tn, hhd, hlist)
free_hash_entry(hash, entry);
}
FTRACE_WARN_ON(hash->count);
@@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{
struct ftrace_func_entry *entry;
struct ftrace_hash *new_hash;
- struct hlist_node *tp;
int size;
int ret;
int i;
@@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
- hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
ret = add_hash_entry(new_hash, entry->ip);
if (ret < 0)
goto free_hash;
@@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct ftrace_hash **dst, struct ftrace_hash *src)
{
struct ftrace_func_entry *entry;
- struct hlist_node *tp, *tn;
+ struct hlist_node *tn;
struct hlist_head *hhd;
struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash;
@@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
hhd = &src->buckets[i];
- hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
+ hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
if (bits > 0)
key = hash_long(entry->ip, bits);
else
@@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
{
struct ftrace_func_probe *entry;
struct hlist_head *hhd;
- struct hlist_node *n;
unsigned long key;
key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
- hlist_for_each_entry_rcu(entry, n, hhd, node) {
+ hlist_for_each_entry_rcu(entry, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
@@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
struct ftrace_func_probe *entry;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
int type = MATCH_FULL;
int i, len = 0;
@@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
- hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
+ hlist_for_each_entry_safe(entry, tmp, hhd, node) {
/* break up if statements for readability */
if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
@@ -3996,37 +3992,51 @@ static void ftrace_init_module(struct module *mod,
ftrace_process_locs(mod, start, end);
}
-static int ftrace_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
+static int ftrace_module_notify_enter(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct module *mod = data;
- switch (val) {
- case MODULE_STATE_COMING:
+ if (val == MODULE_STATE_COMING)
ftrace_init_module(mod, mod->ftrace_callsites,
mod->ftrace_callsites +
mod->num_ftrace_callsites);
- break;
- case MODULE_STATE_GOING:
+ return 0;
+}
+
+static int ftrace_module_notify_exit(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+
+ if (val == MODULE_STATE_GOING)
ftrace_release_mod(mod);
- break;
- }
return 0;
}
#else
-static int ftrace_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
+static int ftrace_module_notify_enter(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return 0;
+}
+static int ftrace_module_notify_exit(struct notifier_block *self,
+ unsigned long val, void *data)
{
return 0;
}
#endif /* CONFIG_MODULES */
-struct notifier_block ftrace_module_nb = {
- .notifier_call = ftrace_module_notify,
+struct notifier_block ftrace_module_enter_nb = {
+ .notifier_call = ftrace_module_notify_enter,
.priority = INT_MAX, /* Run before anything that can use kprobes */
};
+struct notifier_block ftrace_module_exit_nb = {
+ .notifier_call = ftrace_module_notify_exit,
+ .priority = INT_MIN, /* Run after anything that can remove kprobes */
+};
+
extern unsigned long __start_mcount_loc[];
extern unsigned long __stop_mcount_loc[];
@@ -4058,9 +4068,13 @@ void __init ftrace_init(void)
__start_mcount_loc,
__stop_mcount_loc);
- ret = register_module_notifier(&ftrace_module_nb);
+ ret = register_module_notifier(&ftrace_module_enter_nb);
+ if (ret)
+ pr_warning("Failed to register trace ftrace module enter notifier\n");
+
+ ret = register_module_notifier(&ftrace_module_exit_nb);
if (ret)
- pr_warning("Failed to register trace ftrace module notifier\n");
+ pr_warning("Failed to register trace ftrace module exit notifier\n");
set_ftrace_early_filters();
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 194d79602dc7..697e88d13907 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
struct trace_event *ftrace_find_event(int type)
{
struct trace_event *event;
- struct hlist_node *n;
unsigned key;
key = type & (EVENT_HASHSIZE - 1);
- hlist_for_each_entry(event, n, &event_hash[key], node) {
+ hlist_for_each_entry(event, &event_hash[key], node) {
if (event->type == type)
return event;
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 5329e13e74a1..7a809e321058 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -1,5 +1,6 @@
#include <trace/syscall.h>
#include <trace/events/syscalls.h>
+#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
@@ -47,6 +48,38 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
}
#endif
+#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+/*
+ * Some architectures that allow for 32bit applications
+ * to run on a 64bit kernel, do not map the syscalls for
+ * the 32bit tasks the same as they do for 64bit tasks.
+ *
+ * *cough*x86*cough*
+ *
+ * In such a case, instead of reporting the wrong syscalls,
+ * simply ignore them.
+ *
+ * For an arch to ignore the compat syscalls it needs to
+ * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
+ * define the function arch_trace_is_compat_syscall() to let
+ * the tracing system know that it should ignore it.
+ */
+static int
+trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ if (unlikely(arch_trace_is_compat_syscall(regs)))
+ return -1;
+
+ return syscall_get_nr(task, regs);
+}
+#else
+static inline int
+trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return syscall_get_nr(task, regs);
+}
+#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
+
static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall)
{
@@ -276,10 +309,10 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
- int size;
int syscall_nr;
+ int size;
- syscall_nr = syscall_get_nr(current, regs);
+ syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0)
return;
if (!test_bit(syscall_nr, enabled_enter_syscalls))
@@ -313,7 +346,7 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
struct ring_buffer *buffer;
int syscall_nr;
- syscall_nr = syscall_get_nr(current, regs);
+ syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0)
return;
if (!test_bit(syscall_nr, enabled_exit_syscalls))
@@ -502,7 +535,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
int rctx;
int size;
- syscall_nr = syscall_get_nr(current, regs);
+ syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0)
return;
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
@@ -578,7 +611,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
int rctx;
int size;
- syscall_nr = syscall_get_nr(current, regs);
+ syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0)
return;
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d96ba22dabfa..0c05a4592047 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
static struct tracepoint_entry *get_tracepoint(const char *name)
{
struct hlist_head *head;
- struct hlist_node *node;
struct tracepoint_entry *e;
u32 hash = jhash(name, strlen(name), 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ hlist_for_each_entry(e, head, hlist) {
if (!strcmp(name, e->name))
return e;
}
@@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
static struct tracepoint_entry *add_tracepoint(const char *name)
{
struct hlist_head *head;
- struct hlist_node *node;
struct tracepoint_entry *e;
size_t name_len = strlen(name) + 1;
u32 hash = jhash(name, name_len-1, 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ hlist_for_each_entry(e, head, hlist) {
if (!strcmp(name, e->name)) {
printk(KERN_NOTICE
"tracepoint %s busy\n", name);
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 1744bb80f1fb..394f70b17162 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
void fire_user_return_notifiers(void)
{
struct user_return_notifier *urn;
- struct hlist_node *tmp1, *tmp2;
+ struct hlist_node *tmp2;
struct hlist_head *head;
head = &get_cpu_var(return_notifier_list);
- hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link)
+ hlist_for_each_entry_safe(urn, tmp2, head, link)
urn->on_user_return(urn);
put_cpu_var(return_notifier_list);
}
diff --git a/kernel/user.c b/kernel/user.c
index 33acb5e53a5f..e81978e8c03b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
.count = 4294967295U,
},
},
- .kref = {
- .refcount = ATOMIC_INIT(3),
- },
+ .count = ATOMIC_INIT(3),
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
.proc_inum = PROC_USER_INIT_INO,
@@ -107,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
- struct hlist_node *h;
- hlist_for_each_entry(user, h, hashent, uidhash_node) {
+ hlist_for_each_entry(user, hashent, uidhash_node) {
if (uid_eq(user->uid, uid)) {
atomic_inc(&user->__count);
return user;
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 2b042c42fbc4..8b650837083e 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -78,7 +78,7 @@ int create_user_ns(struct cred *new)
return ret;
}
- kref_init(&ns->kref);
+ atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
ns->owner = owner;
@@ -104,15 +104,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
return create_user_ns(cred);
}
-void free_user_ns(struct kref *kref)
+void free_user_ns(struct user_namespace *ns)
{
- struct user_namespace *parent, *ns =
- container_of(kref, struct user_namespace, kref);
+ struct user_namespace *parent;
- parent = ns->parent;
- proc_free_inum(ns->proc_inum);
- kmem_cache_free(user_ns_cachep, ns);
- put_user_ns(parent);
+ do {
+ parent = ns->parent;
+ proc_free_inum(ns->proc_inum);
+ kmem_cache_free(user_ns_cachep, ns);
+ ns = parent;
+ } while (atomic_dec_and_test(&parent->count));
}
EXPORT_SYMBOL(free_user_ns);
@@ -519,6 +520,42 @@ struct seq_operations proc_projid_seq_operations = {
.show = projid_m_show,
};
+static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent *extent)
+{
+ u32 upper_first, lower_first, upper_last, lower_last;
+ unsigned idx;
+
+ upper_first = extent->first;
+ lower_first = extent->lower_first;
+ upper_last = upper_first + extent->count - 1;
+ lower_last = lower_first + extent->count - 1;
+
+ for (idx = 0; idx < new_map->nr_extents; idx++) {
+ u32 prev_upper_first, prev_lower_first;
+ u32 prev_upper_last, prev_lower_last;
+ struct uid_gid_extent *prev;
+
+ prev = &new_map->extent[idx];
+
+ prev_upper_first = prev->first;
+ prev_lower_first = prev->lower_first;
+ prev_upper_last = prev_upper_first + prev->count - 1;
+ prev_lower_last = prev_lower_first + prev->count - 1;
+
+ /* Does the upper range intersect a previous extent? */
+ if ((prev_upper_first <= upper_last) &&
+ (prev_upper_last >= upper_first))
+ return true;
+
+ /* Does the lower range intersect a previous extent? */
+ if ((prev_lower_first <= lower_last) &&
+ (prev_lower_last >= lower_first))
+ return true;
+ }
+ return false;
+}
+
+
static DEFINE_MUTEX(id_map_mutex);
static ssize_t map_write(struct file *file, const char __user *buf,
@@ -531,7 +568,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
struct user_namespace *ns = seq->private;
struct uid_gid_map new_map;
unsigned idx;
- struct uid_gid_extent *extent, *last = NULL;
+ struct uid_gid_extent *extent = NULL;
unsigned long page = 0;
char *kbuf, *pos, *next_line;
ssize_t ret = -EINVAL;
@@ -634,14 +671,11 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if ((extent->lower_first + extent->count) <= extent->lower_first)
goto out;
- /* For now only accept extents that are strictly in order */
- if (last &&
- (((last->first + last->count) > extent->first) ||
- ((last->lower_first + last->count) > extent->lower_first)))
+ /* Do the ranges in extent overlap any previous extents? */
+ if (mappings_overlap(&new_map, extent))
goto out;
new_map.nr_extents++;
- last = extent;
/* Fail if the file contains too many extents */
if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 08b197e8c485..a47fc5de3113 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -30,7 +30,7 @@ static struct uts_namespace *create_uts_ns(void)
/*
* Clone a new ns copying an original utsname, setting refcount to 1
* @old_ns: namespace to clone
- * Return NULL on error (failure to kmalloc), new ns otherwise
+ * Return ERR_PTR(-ENOMEM) on error (failure to kmalloc), new ns otherwise
*/
static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
struct uts_namespace *old_ns)
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 63da38c2d820..4f69f9a5e221 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -15,6 +15,8 @@
#include <linux/sysctl.h>
#include <linux/wait.h>
+#ifdef CONFIG_PROC_SYSCTL
+
static void *get_uts(ctl_table *table, int write)
{
char *which = table->data;
@@ -38,7 +40,6 @@ static void put_uts(ctl_table *table, int write, void *which)
up_write(&uts_sem);
}
-#ifdef CONFIG_PROC_SYSCTL
/*
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f4feacad3812..81f2457811eb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
for ((pool) = &std_worker_pools(cpu)[0]; \
(pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
-#define for_each_busy_worker(worker, i, pos, pool) \
- hash_for_each(pool->busy_hash, i, pos, worker, hentry)
+#define for_each_busy_worker(worker, i, pool) \
+ hash_for_each(pool->busy_hash, i, worker, hentry)
static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
struct work_struct *work)
{
struct worker *worker;
- struct hlist_node *tmp;
- hash_for_each_possible(pool->busy_hash, worker, tmp, hentry,
+ hash_for_each_possible(pool->busy_hash, worker, hentry,
(unsigned long)work)
if (worker->current_work == work &&
worker->current_func == work->func)
@@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
static void rebind_workers(struct worker_pool *pool)
{
struct worker *worker, *n;
- struct hlist_node *pos;
int i;
lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
}
/* rebind busy workers */
- for_each_busy_worker(worker, i, pos, pool) {
+ for_each_busy_worker(worker, i, pool) {
struct work_struct *rebind_work = &worker->rebind_work;
struct workqueue_struct *wq;
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
int cpu = smp_processor_id();
struct worker_pool *pool;
struct worker *worker;
- struct hlist_node *pos;
int i;
for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
list_for_each_entry(worker, &pool->idle_list, entry)
worker->flags |= WORKER_UNBOUND;
- for_each_busy_worker(worker, i, pos, pool)
+ for_each_busy_worker(worker, i, pool)
worker->flags |= WORKER_UNBOUND;
pool->flags |= POOL_DISASSOCIATED;
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index dbb58ae1b8e0..140e87824173 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -80,4 +80,22 @@ config KDB_KEYBOARD
help
KDB can use a PS/2 type keyboard for an input device
+config KDB_CONTINUE_CATASTROPHIC
+ int "KDB: continue after catastrophic errors"
+ depends on KGDB_KDB
+ default "0"
+ help
+ This integer controls the behaviour of kdb when the kernel gets a
+ catastrophic error, i.e. for a panic or oops.
+ When KDB is active and a catastrophic error occurs, nothing extra
+ will happen until you type 'go'.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
+ you type 'go', you will be warned by kdb. The secend time you type
+ 'go', KDB tries to continue. No guarantees that the
+ kernel is still usable in this situation.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue.
+ No guarantees that the kernel is still usable in this situation.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot.
+ If you are not sure, say 0.
+
endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 02ed6c04cd7d..d7946ff75b2e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
- bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o
+ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
diff --git a/lib/bug.c b/lib/bug.c
index d0cdf14c651a..168603477f02 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -166,7 +166,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
print_modules();
show_regs(regs);
print_oops_end_marker();
- add_taint(BUG_GET_TAINT(bug));
+ /* Just a warning, don't kill lockdep. */
+ add_taint(BUG_GET_TAINT(bug), LOCKDEP_STILL_OK);
return BUG_TRAP_TYPE_WARN;
}
diff --git a/lib/checksum.c b/lib/checksum.c
index 12dceb27ff20..129775eb6de6 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -102,6 +102,7 @@ out:
}
#endif
+#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
@@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return (__force __sum16)~do_csum(iph, ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
+#endif
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index d11808ca4bc4..37061ede8b81 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -109,11 +109,10 @@ static void fill_pool(void)
*/
static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
{
- struct hlist_node *node;
struct debug_obj *obj;
int cnt = 0;
- hlist_for_each_entry(obj, node, &b->list, node) {
+ hlist_for_each_entry(obj, &b->list, node) {
cnt++;
if (obj->object == addr)
return obj;
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj)
static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
HLIST_HEAD(freelist);
struct debug_obj *obj;
unsigned long flags;
@@ -227,7 +226,7 @@ static void debug_objects_oom(void)
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
- hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat:
cnt = 0;
raw_spin_lock_irqsave(&db->lock, flags);
- hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
if (oaddr < saddr || oaddr >= eaddr)
@@ -702,7 +701,7 @@ repeat:
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
- hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void)
static int __init debug_objects_replace_static_objects(void)
{
struct debug_bucket *db = obj_hash;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
struct debug_obj *obj, *new;
HLIST_HEAD(objects);
int i, cnt = 0;
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void)
local_irq_disable();
/* Remove the statically allocated objects from the pool */
- hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
+ hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
hlist_del(&obj->node);
/* Move the allocated objects to the pool */
hlist_move_list(&objects, &obj_pool);
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void)
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
hlist_move_list(&db->list, &objects);
- hlist_for_each_entry(obj, node, &objects, node) {
+ hlist_for_each_entry(obj, &objects, node) {
new = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&new->node);
/* copy object data */
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void)
obj_pool_used);
return 0;
free:
- hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
+ hlist_for_each_entry_safe(obj, tmp, &objects, node) {
hlist_del(&obj->node);
kmem_cache_free(obj_cache, obj);
}
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index 4531294fa62f..960183d4258f 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -31,7 +31,7 @@
*/
#ifdef STATIC
-#include "lzo/lzo1x_decompress.c"
+#include "lzo/lzo1x_decompress_safe.c"
#else
#include <linux/decompress/unlzo.h>
#endif
diff --git a/lib/devres.c b/lib/devres.c
index 88ad75952a76..823533138fa0 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -227,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
devm_ioport_map_match, (void *)addr));
}
EXPORT_SYMBOL(devm_ioport_unmap);
+#endif /* CONFIG_HAS_IOPORT */
#ifdef CONFIG_PCI
/*
@@ -432,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
}
EXPORT_SYMBOL(pcim_iounmap_regions);
#endif /* CONFIG_PCI */
-#endif /* CONFIG_HAS_IOPORT */
diff --git a/lib/idr.c b/lib/idr.c
index 648239079dd2..73f4d53c02f3 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -35,10 +35,41 @@
#include <linux/string.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+
+#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
+#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
+
+/* Leave the possibility of an incomplete final layer */
+#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
+
+/* Number of id_layer structs to leave in free list */
+#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
static struct kmem_cache *idr_layer_cache;
+static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
+static DEFINE_PER_CPU(int, idr_preload_cnt);
static DEFINE_SPINLOCK(simple_ida_lock);
+/* the maximum ID which can be allocated given idr->layers */
+static int idr_max(int layers)
+{
+ int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
+
+ return (1 << bits) - 1;
+}
+
+/*
+ * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
+ * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
+ * so on.
+ */
+static int idr_layer_prefix_mask(int layer)
+{
+ return ~idr_max(layer + 1);
+}
+
static struct idr_layer *get_from_free_list(struct idr *idp)
{
struct idr_layer *p;
@@ -54,6 +85,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp)
return(p);
}
+/**
+ * idr_layer_alloc - allocate a new idr_layer
+ * @gfp_mask: allocation mask
+ * @layer_idr: optional idr to allocate from
+ *
+ * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
+ * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
+ * an idr_layer from @idr->id_free.
+ *
+ * @layer_idr is to maintain backward compatibility with the old alloc
+ * interface - idr_pre_get() and idr_get_new*() - and will be removed
+ * together with per-pool preload buffer.
+ */
+static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
+{
+ struct idr_layer *new;
+
+ /* this is the old path, bypass to get_from_free_list() */
+ if (layer_idr)
+ return get_from_free_list(layer_idr);
+
+ /* try to allocate directly from kmem_cache */
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ if (new)
+ return new;
+
+ /*
+ * Try to fetch one from the per-cpu preload buffer if in process
+ * context. See idr_preload() for details.
+ */
+ if (in_interrupt())
+ return NULL;
+
+ preempt_disable();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+ preempt_enable();
+ return new;
+}
+
static void idr_layer_rcu_free(struct rcu_head *head)
{
struct idr_layer *layer;
@@ -62,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head)
kmem_cache_free(idr_layer_cache, layer);
}
-static inline void free_layer(struct idr_layer *p)
+static inline void free_layer(struct idr *idr, struct idr_layer *p)
{
+ if (idr->hint && idr->hint == p)
+ RCU_INIT_POINTER(idr->hint, NULL);
call_rcu(&p->rcu_head, idr_layer_rcu_free);
}
@@ -92,18 +169,18 @@ static void idr_mark_full(struct idr_layer **pa, int id)
struct idr_layer *p = pa[0];
int l = 0;
- __set_bit(id & IDR_MASK, &p->bitmap);
+ __set_bit(id & IDR_MASK, p->bitmap);
/*
* If this layer is full mark the bit in the layer above to
* show that this part of the radix tree is full. This may
* complete the layer above and require walking up the radix
* tree.
*/
- while (p->bitmap == IDR_FULL) {
+ while (bitmap_full(p->bitmap, IDR_SIZE)) {
if (!(p = pa[++l]))
break;
id = id >> IDR_BITS;
- __set_bit((id & IDR_MASK), &p->bitmap);
+ __set_bit((id & IDR_MASK), p->bitmap);
}
}
@@ -133,12 +210,29 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_pre_get);
-static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
+/**
+ * sub_alloc - try to allocate an id without growing the tree depth
+ * @idp: idr handle
+ * @starting_id: id to start search at
+ * @id: pointer to the allocated handle
+ * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
+ * @gfp_mask: allocation mask for idr_layer_alloc()
+ * @layer_idr: optional idr passed to idr_layer_alloc()
+ *
+ * Allocate an id in range [@starting_id, INT_MAX] from @idp without
+ * growing its depth. Returns
+ *
+ * the allocated id >= 0 if successful,
+ * -EAGAIN if the tree needs to grow for allocation to succeed,
+ * -ENOSPC if the id space is exhausted,
+ * -ENOMEM if more idr_layers need to be allocated.
+ */
+static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
+ gfp_t gfp_mask, struct idr *layer_idr)
{
int n, m, sh;
struct idr_layer *p, *new;
int l, id, oid;
- unsigned long bm;
id = *starting_id;
restart:
@@ -150,8 +244,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
* We run around this while until we reach the leaf node...
*/
n = (id >> (IDR_BITS*l)) & IDR_MASK;
- bm = ~p->bitmap;
- m = find_next_bit(&bm, IDR_SIZE, n);
+ m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
if (m == IDR_SIZE) {
/* no space available go back to previous layer. */
l++;
@@ -161,7 +254,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
/* if already at the top layer, we need to grow */
if (id >= 1 << (idp->layers * IDR_BITS)) {
*starting_id = id;
- return IDR_NEED_TO_GROW;
+ return -EAGAIN;
}
p = pa[l];
BUG_ON(!p);
@@ -180,17 +273,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_IDR_BIT) || (id < 0))
- return IDR_NOMORE_SPACE;
+ return -ENOSPC;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
- new = get_from_free_list(idp);
+ new = idr_layer_alloc(gfp_mask, layer_idr);
if (!new)
- return -1;
+ return -ENOMEM;
new->layer = l-1;
+ new->prefix = id & idr_layer_prefix_mask(new->layer);
rcu_assign_pointer(p->ary[m], new);
p->count++;
}
@@ -203,7 +297,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
}
static int idr_get_empty_slot(struct idr *idp, int starting_id,
- struct idr_layer **pa)
+ struct idr_layer **pa, gfp_t gfp_mask,
+ struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
@@ -214,8 +309,8 @@ build_up:
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
- if (!(p = get_from_free_list(idp)))
- return -1;
+ if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
+ return -ENOMEM;
p->layer = 0;
layers = 1;
}
@@ -223,7 +318,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
+ while (id > idr_max(layers)) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
@@ -231,9 +326,10 @@ build_up:
* upwards.
*/
p->layer++;
+ WARN_ON_ONCE(p->prefix);
continue;
}
- if (!(new = get_from_free_list(idp))) {
+ if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
@@ -242,45 +338,42 @@ build_up:
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
- new->bitmap = new->count = 0;
+ new->count = 0;
+ bitmap_clear(new->bitmap, 0, IDR_SIZE);
__move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
- return -1;
+ return -ENOMEM;
}
new->ary[0] = p;
new->count = 1;
new->layer = layers-1;
- if (p->bitmap == IDR_FULL)
- __set_bit(0, &new->bitmap);
+ new->prefix = id & idr_layer_prefix_mask(new->layer);
+ if (bitmap_full(p->bitmap, IDR_SIZE))
+ __set_bit(0, new->bitmap);
p = new;
}
rcu_assign_pointer(idp->top, p);
idp->layers = layers;
- v = sub_alloc(idp, &id, pa);
- if (v == IDR_NEED_TO_GROW)
+ v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
+ if (v == -EAGAIN)
goto build_up;
return(v);
}
-static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+/*
+ * @id and @pa are from a successful allocation from idr_get_empty_slot().
+ * Install the user pointer @ptr and mark the slot full.
+ */
+static void idr_fill_slot(struct idr *idr, void *ptr, int id,
+ struct idr_layer **pa)
{
- struct idr_layer *pa[MAX_IDR_LEVEL];
- int id;
-
- id = idr_get_empty_slot(idp, starting_id, pa);
- if (id >= 0) {
- /*
- * Successfully found an empty slot. Install the user
- * pointer and mark the slot full.
- */
- rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
- (struct idr_layer *)ptr);
- pa[0]->count++;
- idr_mark_full(pa, id);
- }
+ /* update hint used for lookup, cleared from free_layer() */
+ rcu_assign_pointer(idr->hint, pa[0]);
- return id;
+ rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
+ pa[0]->count++;
+ idr_mark_full(pa, id);
}
/**
@@ -303,49 +396,124 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
- rv = idr_get_new_above_int(idp, ptr, starting_id);
- /*
- * This is a cheap hack until the IDR code can be fixed to
- * return proper error values.
- */
+ rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
if (rv < 0)
- return _idr_rc_to_errno(rv);
+ return rv == -ENOMEM ? -EAGAIN : rv;
+
+ idr_fill_slot(idp, ptr, rv, pa);
*id = rv;
return 0;
}
EXPORT_SYMBOL(idr_get_new_above);
/**
- * idr_get_new - allocate new idr entry
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @id: pointer to the allocated handle
+ * idr_preload - preload for idr_alloc()
+ * @gfp_mask: allocation mask to use for preloading
*
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
- * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
- * IDR's preallocation and then retry the idr_get_new_above() call.
+ * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
+ * process context and each idr_preload() invocation should be matched with
+ * idr_preload_end(). Note that preemption is disabled while preloaded.
*
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
+ * The first idr_alloc() in the preloaded section can be treated as if it
+ * were invoked with @gfp_mask used for preloading. This allows using more
+ * permissive allocation masks for idrs protected by spinlocks.
+ *
+ * For example, if idr_alloc() below fails, the failure can be treated as
+ * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
+ *
+ * idr_preload(GFP_KERNEL);
+ * spin_lock(lock);
+ *
+ * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
*
- * @id returns a value in the range %0 ... %0x7fffffff
+ * spin_unlock(lock);
+ * idr_preload_end();
+ * if (id < 0)
+ * error;
*/
-int idr_get_new(struct idr *idp, void *ptr, int *id)
+void idr_preload(gfp_t gfp_mask)
{
- int rv;
+ /*
+ * Consuming preload buffer from non-process context breaks preload
+ * allocation guarantee. Disallow usage from those contexts.
+ */
+ WARN_ON_ONCE(in_interrupt());
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ preempt_disable();
- rv = idr_get_new_above_int(idp, ptr, 0);
/*
- * This is a cheap hack until the IDR code can be fixed to
- * return proper error values.
+ * idr_alloc() is likely to succeed w/o full idr_layer buffer and
+ * return value from idr_alloc() needs to be checked for failure
+ * anyway. Silently give up if allocation fails. The caller can
+ * treat failures from idr_alloc() as if idr_alloc() were called
+ * with @gfp_mask which should be enough.
*/
- if (rv < 0)
- return _idr_rc_to_errno(rv);
- *id = rv;
- return 0;
+ while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
+ struct idr_layer *new;
+
+ preempt_enable();
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ preempt_disable();
+ if (!new)
+ break;
+
+ /* link the new one to per-cpu preload list */
+ new->ary[0] = __this_cpu_read(idr_preload_head);
+ __this_cpu_write(idr_preload_head, new);
+ __this_cpu_inc(idr_preload_cnt);
+ }
}
-EXPORT_SYMBOL(idr_get_new);
+EXPORT_SYMBOL(idr_preload);
+
+/**
+ * idr_alloc - allocate new idr entry
+ * @idr: the (initialized) idr
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive, <= 0 for max)
+ * @gfp_mask: memory allocation flags
+ *
+ * Allocate an id in [start, end) and associate it with @ptr. If no ID is
+ * available in the specified range, returns -ENOSPC. On memory allocation
+ * failure, returns -ENOMEM.
+ *
+ * Note that @end is treated as max when <= 0. This is to always allow
+ * using @start + N as @end as long as N is inside integer range.
+ *
+ * The user is responsible for exclusively synchronizing all operations
+ * which may modify @idr. However, read-only accesses such as idr_find()
+ * or iteration can be performed under RCU read lock provided the user
+ * destroys @ptr in RCU-safe way after removal from idr.
+ */
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+ int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
+ int id;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ /* sanity checks */
+ if (WARN_ON_ONCE(start < 0))
+ return -EINVAL;
+ if (unlikely(max < start))
+ return -ENOSPC;
+
+ /* allocate id */
+ id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
+ if (unlikely(id < 0))
+ return id;
+ if (unlikely(id > max))
+ return -ENOSPC;
+
+ idr_fill_slot(idr, ptr, id, pa);
+ return id;
+}
+EXPORT_SYMBOL_GPL(idr_alloc);
static void idr_remove_warning(int id)
{
@@ -357,7 +525,7 @@ static void idr_remove_warning(int id)
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_IDR_LEVEL];
+ struct idr_layer **pa[MAX_IDR_LEVEL + 1];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
@@ -367,26 +535,26 @@ static void sub_remove(struct idr *idp, int shift, int id)
while ((shift > 0) && p) {
n = (id >> shift) & IDR_MASK;
- __clear_bit(n, &p->bitmap);
+ __clear_bit(n, p->bitmap);
*++paa = &p->ary[n];
p = p->ary[n];
shift -= IDR_BITS;
}
n = id & IDR_MASK;
- if (likely(p != NULL && test_bit(n, &p->bitmap))){
- __clear_bit(n, &p->bitmap);
+ if (likely(p != NULL && test_bit(n, p->bitmap))) {
+ __clear_bit(n, p->bitmap);
rcu_assign_pointer(p->ary[n], NULL);
to_free = NULL;
while(*paa && ! --((**paa)->count)){
if (to_free)
- free_layer(to_free);
+ free_layer(idp, to_free);
to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
if (to_free)
- free_layer(to_free);
+ free_layer(idp, to_free);
} else
idr_remove_warning(id);
}
@@ -401,8 +569,9 @@ void idr_remove(struct idr *idp, int id)
struct idr_layer *p;
struct idr_layer *to_free;
- /* Mask off upper bits we don't use for the search. */
- id &= MAX_IDR_MASK;
+ /* see comment in idr_find_slowpath() */
+ if (WARN_ON_ONCE(id < 0))
+ return;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -417,8 +586,9 @@ void idr_remove(struct idr *idp, int id)
p = idp->top->ary[0];
rcu_assign_pointer(idp->top, p);
--idp->layers;
- to_free->bitmap = to_free->count = 0;
- free_layer(to_free);
+ to_free->count = 0;
+ bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
+ free_layer(idp, to_free);
}
while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
@@ -433,34 +603,21 @@ void idr_remove(struct idr *idp, int id)
}
EXPORT_SYMBOL(idr_remove);
-/**
- * idr_remove_all - remove all ids from the given idr tree
- * @idp: idr handle
- *
- * idr_destroy() only frees up unused, cached idp_layers, but this
- * function will remove all id mappings and leave all idp_layers
- * unused.
- *
- * A typical clean-up sequence for objects stored in an idr tree will
- * use idr_for_each() to free all objects, if necessay, then
- * idr_remove_all() to remove all ids, and idr_destroy() to free
- * up the cached idr_layers.
- */
-void idr_remove_all(struct idr *idp)
+void __idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = idp->top;
rcu_assign_pointer(idp->top, NULL);
- max = 1 << n;
+ max = idr_max(idp->layers);
id = 0;
- while (id < max) {
+ while (id >= 0 && id <= max) {
while (n > IDR_BITS && p) {
n -= IDR_BITS;
*paa++ = p;
@@ -472,21 +629,32 @@ void idr_remove_all(struct idr *idp)
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
if (p)
- free_layer(p);
+ free_layer(idp, p);
n += IDR_BITS;
p = *--paa;
}
}
idp->layers = 0;
}
-EXPORT_SYMBOL(idr_remove_all);
+EXPORT_SYMBOL(__idr_remove_all);
/**
* idr_destroy - release all cached layers within an idr tree
* @idp: idr handle
+ *
+ * Free all id mappings and all idp_layers. After this function, @idp is
+ * completely unused and can be freed / recycled. The caller is
+ * responsible for ensuring that no one else accesses @idp during or after
+ * idr_destroy().
+ *
+ * A typical clean-up sequence for objects stored in an idr tree will use
+ * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
{
+ __idr_remove_all(idp);
+
while (idp->id_free_cnt) {
struct idr_layer *p = get_from_free_list(idp);
kmem_cache_free(idr_layer_cache, p);
@@ -494,32 +662,28 @@ void idr_destroy(struct idr *idp)
}
EXPORT_SYMBOL(idr_destroy);
-/**
- * idr_find - return pointer for given id
- * @idp: idr handle
- * @id: lookup key
- *
- * Return the pointer given the id it has been registered with. A %NULL
- * return indicates that @id is not valid or you passed %NULL in
- * idr_get_new().
- *
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
- */
-void *idr_find(struct idr *idp, int id)
+void *idr_find_slowpath(struct idr *idp, int id)
{
int n;
struct idr_layer *p;
+ /*
+ * If @id is negative, idr_find() used to ignore the sign bit and
+ * performed lookup with the rest of bits, which is weird and can
+ * lead to very obscure bugs. We're now returning NULL for all
+ * negative IDs but just in case somebody was depending on the sign
+ * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
+ * be detected and fixed. WARN_ON_ONCE() can later be removed.
+ */
+ if (WARN_ON_ONCE(id < 0))
+ return NULL;
+
p = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer+1) * IDR_BITS;
- /* Mask off upper bits we don't use for the search. */
- id &= MAX_IDR_MASK;
-
- if (id >= (1 << n))
+ if (id > idr_max(p->layer + 1))
return NULL;
BUG_ON(n == 0);
@@ -530,7 +694,7 @@ void *idr_find(struct idr *idp, int id)
}
return((void *)p);
}
-EXPORT_SYMBOL(idr_find);
+EXPORT_SYMBOL(idr_find_slowpath);
/**
* idr_for_each - iterate through all stored pointers
@@ -555,15 +719,15 @@ int idr_for_each(struct idr *idp,
{
int n, id, max, error = 0;
struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = rcu_dereference_raw(idp->top);
- max = 1 << n;
+ max = idr_max(idp->layers);
id = 0;
- while (id < max) {
+ while (id >= 0 && id <= max) {
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
@@ -601,7 +765,7 @@ EXPORT_SYMBOL(idr_for_each);
*/
void *idr_get_next(struct idr *idp, int *nextidp)
{
- struct idr_layer *p, *pa[MAX_IDR_LEVEL];
+ struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
int id = *nextidp;
int n, max;
@@ -611,9 +775,9 @@ void *idr_get_next(struct idr *idp, int *nextidp)
if (!p)
return NULL;
n = (p->layer + 1) * IDR_BITS;
- max = 1 << n;
+ max = idr_max(p->layer + 1);
- while (id < max) {
+ while (id >= 0 && id <= max) {
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
@@ -625,7 +789,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
return p;
}
- id += 1 << n;
+ /*
+ * Proceed to the next layer at the current level. Unlike
+ * idr_for_each(), @id isn't guaranteed to be aligned to
+ * layer boundary at this point and adding 1 << n may
+ * incorrectly skip IDs. Make sure we jump to the
+ * beginning of the next layer using round_up().
+ */
+ id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
@@ -653,14 +824,16 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
int n;
struct idr_layer *p, *old_p;
+ /* see comment in idr_find_slowpath() */
+ if (WARN_ON_ONCE(id < 0))
+ return ERR_PTR(-EINVAL);
+
p = idp->top;
if (!p)
return ERR_PTR(-EINVAL);
n = (p->layer+1) * IDR_BITS;
- id &= MAX_IDR_MASK;
-
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
@@ -671,7 +844,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
}
n = id & IDR_MASK;
- if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+ if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
return ERR_PTR(-ENOENT);
old_p = p->ary[n];
@@ -780,7 +953,7 @@ EXPORT_SYMBOL(ida_pre_get);
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
- struct idr_layer *pa[MAX_IDR_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct ida_bitmap *bitmap;
unsigned long flags;
int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -789,9 +962,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
restart:
/* get vacant slot */
- t = idr_get_empty_slot(&ida->idr, idr_id, pa);
+ t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
if (t < 0)
- return _idr_rc_to_errno(t);
+ return t == -ENOMEM ? -EAGAIN : t;
if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
@@ -852,25 +1025,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
EXPORT_SYMBOL(ida_get_new_above);
/**
- * ida_get_new - allocate new ID
- * @ida: idr handle
- * @p_id: pointer to the allocated handle
- *
- * Allocate new ID. It should be called with any required locks.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the idr_pre_get() call. If the idr is full, it will
- * return %-ENOSPC.
- *
- * @p_id returns a value in the range %0 ... %0x7fffffff.
- */
-int ida_get_new(struct ida *ida, int *p_id)
-{
- return ida_get_new_above(ida, 0, p_id);
-}
-EXPORT_SYMBOL(ida_get_new);
-
-/**
* ida_remove - remove the given ID
* @ida: ida handle
* @id: ID to free
@@ -887,7 +1041,7 @@ void ida_remove(struct ida *ida, int id)
/* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK;
- __clear_bit(n, &p->bitmap);
+ __clear_bit(n, p->bitmap);
p = p->ary[n];
shift -= IDR_BITS;
}
@@ -896,7 +1050,7 @@ void ida_remove(struct ida *ida, int id)
goto err;
n = idr_id & IDR_MASK;
- __clear_bit(n, &p->bitmap);
+ __clear_bit(n, p->bitmap);
bitmap = (void *)p->ary[n];
if (!test_bit(offset, bitmap->bitmap))
@@ -905,7 +1059,7 @@ void ida_remove(struct ida *ida, int id)
/* update bitmap and remove it if empty */
__clear_bit(offset, bitmap->bitmap);
if (--bitmap->nr_busy == 0) {
- __set_bit(n, &p->bitmap); /* to please idr_remove() */
+ __set_bit(n, p->bitmap); /* to please idr_remove() */
idr_remove(&ida->idr, idr_id);
free_bitmap(ida, bitmap);
}
diff --git a/kernel/kfifo.c b/lib/kfifo.c
index 59dcf5b81d24..7b7f83027b7b 100644
--- a/kernel/kfifo.c
+++ b/lib/kfifo.c
@@ -42,8 +42,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
* round down to the next power of 2, since our 'let the indices
* wrap' technique works only in this case.
*/
- if (!is_power_of_2(size))
- size = rounddown_pow_of_two(size);
+ size = roundup_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
@@ -83,8 +82,7 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
{
size /= esize;
- if (!is_power_of_2(size))
- size = rounddown_pow_of_two(size);
+ size = roundup_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index d71d89498943..8335d39d2ccd 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
bool include_changing)
{
- struct hlist_node *n;
struct lc_element *e;
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
- hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
+ hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
/* "about to be changed" elements, pending transaction commit,
* are hashed by their "new number". "Normal" elements have
* lc_number == lc_new_number. */
diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile
index e764116ea12d..f0f7d7ca2b83 100644
--- a/lib/lzo/Makefile
+++ b/lib/lzo/Makefile
@@ -1,5 +1,5 @@
lzo_compress-objs := lzo1x_compress.o
-lzo_decompress-objs := lzo1x_decompress.o
+lzo_decompress-objs := lzo1x_decompress_safe.o
obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o
obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index a6040990a62e..236eb21167b5 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -1,194 +1,243 @@
/*
- * LZO1X Compressor from MiniLZO
+ * LZO1X Compressor from LZO
*
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
- * Changed for kernel use by:
+ * Changed for Linux kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lzo.h>
#include <asm/unaligned.h>
+#include <linux/lzo.h>
#include "lzodefs.h"
static noinline size_t
-_lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len, void *wrkmem)
+lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ size_t ti, void *wrkmem)
{
+ const unsigned char *ip;
+ unsigned char *op;
const unsigned char * const in_end = in + in_len;
- const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5;
- const unsigned char ** const dict = wrkmem;
- const unsigned char *ip = in, *ii = ip;
- const unsigned char *end, *m, *m_pos;
- size_t m_off, m_len, dindex;
- unsigned char *op = out;
+ const unsigned char * const ip_end = in + in_len - 20;
+ const unsigned char *ii;
+ lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
- ip += 4;
+ op = out;
+ ip = in;
+ ii = ip;
+ ip += ti < 4 ? 4 - ti : 0;
for (;;) {
- dindex = ((size_t)(0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK;
- m_pos = dict[dindex];
-
- if (m_pos < in)
- goto literal;
-
- if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET))
- goto literal;
-
- m_off = ip - m_pos;
- if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
- goto try_match;
-
- dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f);
- m_pos = dict[dindex];
-
- if (m_pos < in)
- goto literal;
-
- if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET))
- goto literal;
-
- m_off = ip - m_pos;
- if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
- goto try_match;
-
- goto literal;
-
-try_match:
- if (get_unaligned((const unsigned short *)m_pos)
- == get_unaligned((const unsigned short *)ip)) {
- if (likely(m_pos[2] == ip[2]))
- goto match;
- }
-
+ const unsigned char *m_pos;
+ size_t t, m_len, m_off;
+ u32 dv;
literal:
- dict[dindex] = ip;
- ++ip;
+ ip += 1 + ((ip - ii) >> 5);
+next:
if (unlikely(ip >= ip_end))
break;
- continue;
-
-match:
- dict[dindex] = ip;
- if (ip != ii) {
- size_t t = ip - ii;
+ dv = get_unaligned_le32(ip);
+ t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
+ m_pos = in + dict[t];
+ dict[t] = (lzo_dict_t) (ip - in);
+ if (unlikely(dv != get_unaligned_le32(m_pos)))
+ goto literal;
+ ii -= ti;
+ ti = 0;
+ t = ip - ii;
+ if (t != 0) {
if (t <= 3) {
op[-2] |= t;
- } else if (t <= 18) {
+ COPY4(op, ii);
+ op += t;
+ } else if (t <= 16) {
*op++ = (t - 3);
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += t;
} else {
- size_t tt = t - 18;
-
- *op++ = 0;
- while (tt > 255) {
- tt -= 255;
+ if (t <= 18) {
+ *op++ = (t - 3);
+ } else {
+ size_t tt = t - 18;
*op++ = 0;
+ while (unlikely(tt > 255)) {
+ tt -= 255;
+ *op++ = 0;
+ }
+ *op++ = tt;
}
- *op++ = tt;
+ do {
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += 16;
+ ii += 16;
+ t -= 16;
+ } while (t >= 16);
+ if (t > 0) do {
+ *op++ = *ii++;
+ } while (--t > 0);
}
- do {
- *op++ = *ii++;
- } while (--t > 0);
}
- ip += 3;
- if (m_pos[3] != *ip++ || m_pos[4] != *ip++
- || m_pos[5] != *ip++ || m_pos[6] != *ip++
- || m_pos[7] != *ip++ || m_pos[8] != *ip++) {
- --ip;
- m_len = ip - ii;
+ m_len = 4;
+ {
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
+ u64 v;
+ v = get_unaligned((const u64 *) (ip + m_len)) ^
+ get_unaligned((const u64 *) (m_pos + m_len));
+ if (unlikely(v == 0)) {
+ do {
+ m_len += 8;
+ v = get_unaligned((const u64 *) (ip + m_len)) ^
+ get_unaligned((const u64 *) (m_pos + m_len));
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (v == 0);
+ }
+# if defined(__LITTLE_ENDIAN)
+ m_len += (unsigned) __builtin_ctzll(v) / 8;
+# elif defined(__BIG_ENDIAN)
+ m_len += (unsigned) __builtin_clzll(v) / 8;
+# else
+# error "missing endian definition"
+# endif
+#elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
+ u32 v;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (unlikely(v == 0)) {
+ do {
+ m_len += 4;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (v != 0)
+ break;
+ m_len += 4;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (v == 0);
+ }
+# if defined(__LITTLE_ENDIAN)
+ m_len += (unsigned) __builtin_ctz(v) / 8;
+# elif defined(__BIG_ENDIAN)
+ m_len += (unsigned) __builtin_clz(v) / 8;
+# else
+# error "missing endian definition"
+# endif
+#else
+ if (unlikely(ip[m_len] == m_pos[m_len])) {
+ do {
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (ip[m_len] == m_pos[m_len]);
+ }
+#endif
+ }
+m_len_done:
- if (m_off <= M2_MAX_OFFSET) {
- m_off -= 1;
- *op++ = (((m_len - 1) << 5)
- | ((m_off & 7) << 2));
- *op++ = (m_off >> 3);
- } else if (m_off <= M3_MAX_OFFSET) {
- m_off -= 1;
+ m_off = ip - m_pos;
+ ip += m_len;
+ ii = ip;
+ if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
+ m_off -= 1;
+ *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
+ *op++ = (m_off >> 3);
+ } else if (m_off <= M3_MAX_OFFSET) {
+ m_off -= 1;
+ if (m_len <= M3_MAX_LEN)
*op++ = (M3_MARKER | (m_len - 2));
- goto m3_m4_offset;
- } else {
- m_off -= 0x4000;
-
- *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11)
- | (m_len - 2));
- goto m3_m4_offset;
+ else {
+ m_len -= M3_MAX_LEN;
+ *op++ = M3_MARKER | 0;
+ while (unlikely(m_len > 255)) {
+ m_len -= 255;
+ *op++ = 0;
+ }
+ *op++ = (m_len);
}
+ *op++ = (m_off << 2);
+ *op++ = (m_off >> 6);
} else {
- end = in_end;
- m = m_pos + M2_MAX_LEN + 1;
-
- while (ip < end && *m == *ip) {
- m++;
- ip++;
- }
- m_len = ip - ii;
-
- if (m_off <= M3_MAX_OFFSET) {
- m_off -= 1;
- if (m_len <= 33) {
- *op++ = (M3_MARKER | (m_len - 2));
- } else {
- m_len -= 33;
- *op++ = M3_MARKER | 0;
- goto m3_m4_len;
- }
- } else {
- m_off -= 0x4000;
- if (m_len <= M4_MAX_LEN) {
- *op++ = (M4_MARKER
- | ((m_off & 0x4000) >> 11)
+ m_off -= 0x4000;
+ if (m_len <= M4_MAX_LEN)
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
- } else {
- m_len -= M4_MAX_LEN;
- *op++ = (M4_MARKER
- | ((m_off & 0x4000) >> 11));
-m3_m4_len:
- while (m_len > 255) {
- m_len -= 255;
- *op++ = 0;
- }
-
- *op++ = (m_len);
+ else {
+ m_len -= M4_MAX_LEN;
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8));
+ while (unlikely(m_len > 255)) {
+ m_len -= 255;
+ *op++ = 0;
}
+ *op++ = (m_len);
}
-m3_m4_offset:
- *op++ = ((m_off & 63) << 2);
+ *op++ = (m_off << 2);
*op++ = (m_off >> 6);
}
-
- ii = ip;
- if (unlikely(ip >= ip_end))
- break;
+ goto next;
}
-
*out_len = op - out;
- return in_end - ii;
+ return in_end - (ii - ti);
}
-int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out,
- size_t *out_len, void *wrkmem)
+int lzo1x_1_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
{
- const unsigned char *ii;
+ const unsigned char *ip = in;
unsigned char *op = out;
- size_t t;
+ size_t l = in_len;
+ size_t t = 0;
- if (unlikely(in_len <= M2_MAX_LEN + 5)) {
- t = in_len;
- } else {
- t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem);
+ while (l > 20) {
+ size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1);
+ uintptr_t ll_end = (uintptr_t) ip + ll;
+ if ((ll_end + ((t + ll) >> 5)) <= ll_end)
+ break;
+ BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
+ memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
+ t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem);
+ ip += ll;
op += *out_len;
+ l -= ll;
}
+ t += l;
if (t > 0) {
- ii = in + in_len - t;
+ const unsigned char *ii = in + in_len - t;
if (op == out && t <= 238) {
*op++ = (17 + t);
@@ -198,16 +247,21 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out,
*op++ = (t - 3);
} else {
size_t tt = t - 18;
-
*op++ = 0;
while (tt > 255) {
tt -= 255;
*op++ = 0;
}
-
*op++ = tt;
}
- do {
+ if (t >= 16) do {
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += 16;
+ ii += 16;
+ t -= 16;
+ } while (t >= 16);
+ if (t > 0) do {
*op++ = *ii++;
} while (--t > 0);
}
@@ -223,4 +277,3 @@ EXPORT_SYMBOL_GPL(lzo1x_1_compress);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO1X-1 Compressor");
-
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
deleted file mode 100644
index f2fd09850223..000000000000
--- a/lib/lzo/lzo1x_decompress.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * LZO1X Decompressor from MiniLZO
- *
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
- *
- * The full LZO package can be found at:
- * http://www.oberhumer.com/opensource/lzo/
- *
- * Changed for kernel use by:
- * Nitin Gupta <nitingupta910@gmail.com>
- * Richard Purdie <rpurdie@openedhand.com>
- */
-
-#ifndef STATIC
-#include <linux/module.h>
-#include <linux/kernel.h>
-#endif
-
-#include <asm/unaligned.h>
-#include <linux/lzo.h>
-#include "lzodefs.h"
-
-#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x))
-#define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x))
-#define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op)
-
-#define COPY4(dst, src) \
- put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
-
-int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len)
-{
- const unsigned char * const ip_end = in + in_len;
- unsigned char * const op_end = out + *out_len;
- const unsigned char *ip = in, *m_pos;
- unsigned char *op = out;
- size_t t;
-
- *out_len = 0;
-
- if (*ip > 17) {
- t = *ip++ - 17;
- if (t < 4)
- goto match_next;
- if (HAVE_OP(t, op_end, op))
- goto output_overrun;
- if (HAVE_IP(t + 1, ip_end, ip))
- goto input_overrun;
- do {
- *op++ = *ip++;
- } while (--t > 0);
- goto first_literal_run;
- }
-
- while ((ip < ip_end)) {
- t = *ip++;
- if (t >= 16)
- goto match;
- if (t == 0) {
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- while (*ip == 0) {
- t += 255;
- ip++;
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- }
- t += 15 + *ip++;
- }
- if (HAVE_OP(t + 3, op_end, op))
- goto output_overrun;
- if (HAVE_IP(t + 4, ip_end, ip))
- goto input_overrun;
-
- COPY4(op, ip);
- op += 4;
- ip += 4;
- if (--t > 0) {
- if (t >= 4) {
- do {
- COPY4(op, ip);
- op += 4;
- ip += 4;
- t -= 4;
- } while (t >= 4);
- if (t > 0) {
- do {
- *op++ = *ip++;
- } while (--t > 0);
- }
- } else {
- do {
- *op++ = *ip++;
- } while (--t > 0);
- }
- }
-
-first_literal_run:
- t = *ip++;
- if (t >= 16)
- goto match;
- m_pos = op - (1 + M2_MAX_OFFSET);
- m_pos -= t >> 2;
- m_pos -= *ip++ << 2;
-
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
-
- if (HAVE_OP(3, op_end, op))
- goto output_overrun;
- *op++ = *m_pos++;
- *op++ = *m_pos++;
- *op++ = *m_pos;
-
- goto match_done;
-
- do {
-match:
- if (t >= 64) {
- m_pos = op - 1;
- m_pos -= (t >> 2) & 7;
- m_pos -= *ip++ << 3;
- t = (t >> 5) - 1;
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
- if (HAVE_OP(t + 3 - 1, op_end, op))
- goto output_overrun;
- goto copy_match;
- } else if (t >= 32) {
- t &= 31;
- if (t == 0) {
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- while (*ip == 0) {
- t += 255;
- ip++;
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- }
- t += 31 + *ip++;
- }
- m_pos = op - 1;
- m_pos -= get_unaligned_le16(ip) >> 2;
- ip += 2;
- } else if (t >= 16) {
- m_pos = op;
- m_pos -= (t & 8) << 11;
-
- t &= 7;
- if (t == 0) {
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- while (*ip == 0) {
- t += 255;
- ip++;
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- }
- t += 7 + *ip++;
- }
- m_pos -= get_unaligned_le16(ip) >> 2;
- ip += 2;
- if (m_pos == op)
- goto eof_found;
- m_pos -= 0x4000;
- } else {
- m_pos = op - 1;
- m_pos -= t >> 2;
- m_pos -= *ip++ << 2;
-
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
- if (HAVE_OP(2, op_end, op))
- goto output_overrun;
-
- *op++ = *m_pos++;
- *op++ = *m_pos;
- goto match_done;
- }
-
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
- if (HAVE_OP(t + 3 - 1, op_end, op))
- goto output_overrun;
-
- if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) {
- COPY4(op, m_pos);
- op += 4;
- m_pos += 4;
- t -= 4 - (3 - 1);
- do {
- COPY4(op, m_pos);
- op += 4;
- m_pos += 4;
- t -= 4;
- } while (t >= 4);
- if (t > 0)
- do {
- *op++ = *m_pos++;
- } while (--t > 0);
- } else {
-copy_match:
- *op++ = *m_pos++;
- *op++ = *m_pos++;
- do {
- *op++ = *m_pos++;
- } while (--t > 0);
- }
-match_done:
- t = ip[-2] & 3;
- if (t == 0)
- break;
-match_next:
- if (HAVE_OP(t, op_end, op))
- goto output_overrun;
- if (HAVE_IP(t + 1, ip_end, ip))
- goto input_overrun;
-
- *op++ = *ip++;
- if (t > 1) {
- *op++ = *ip++;
- if (t > 2)
- *op++ = *ip++;
- }
-
- t = *ip++;
- } while (ip < ip_end);
- }
-
- *out_len = op - out;
- return LZO_E_EOF_NOT_FOUND;
-
-eof_found:
- *out_len = op - out;
- return (ip == ip_end ? LZO_E_OK :
- (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN));
-input_overrun:
- *out_len = op - out;
- return LZO_E_INPUT_OVERRUN;
-
-output_overrun:
- *out_len = op - out;
- return LZO_E_OUTPUT_OVERRUN;
-
-lookbehind_overrun:
- *out_len = op - out;
- return LZO_E_LOOKBEHIND_OVERRUN;
-}
-#ifndef STATIC
-EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LZO1X Decompressor");
-
-#endif
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
new file mode 100644
index 000000000000..569985d522d5
--- /dev/null
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -0,0 +1,237 @@
+/*
+ * LZO1X Decompressor from LZO
+ *
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ * The full LZO package can be found at:
+ * http://www.oberhumer.com/opensource/lzo/
+ *
+ * Changed for Linux kernel use by:
+ * Nitin Gupta <nitingupta910@gmail.com>
+ * Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#ifndef STATIC
+#include <linux/module.h>
+#include <linux/kernel.h>
+#endif
+#include <asm/unaligned.h>
+#include <linux/lzo.h>
+#include "lzodefs.h"
+
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+
+int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len)
+{
+ unsigned char *op;
+ const unsigned char *ip;
+ size_t t, next;
+ size_t state = 0;
+ const unsigned char *m_pos;
+ const unsigned char * const ip_end = in + in_len;
+ unsigned char * const op_end = out + *out_len;
+
+ op = out;
+ ip = in;
+
+ if (unlikely(in_len < 3))
+ goto input_overrun;
+ if (*ip > 17) {
+ t = *ip++ - 17;
+ if (t < 4) {
+ next = t;
+ goto match_next;
+ }
+ goto copy_literal_run;
+ }
+
+ for (;;) {
+ t = *ip++;
+ if (t < 16) {
+ if (likely(state == 0)) {
+ if (unlikely(t == 0)) {
+ while (unlikely(*ip == 0)) {
+ t += 255;
+ ip++;
+ NEED_IP(1);
+ }
+ t += 15 + *ip++;
+ }
+ t += 3;
+copy_literal_run:
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ const unsigned char *ie = ip + t;
+ unsigned char *oe = op + t;
+ do {
+ COPY8(op, ip);
+ op += 8;
+ ip += 8;
+ COPY8(op, ip);
+ op += 8;
+ ip += 8;
+ } while (ip < ie);
+ ip = ie;
+ op = oe;
+ } else
+#endif
+ {
+ NEED_OP(t);
+ NEED_IP(t + 3);
+ do {
+ *op++ = *ip++;
+ } while (--t > 0);
+ }
+ state = 4;
+ continue;
+ } else if (state != 4) {
+ next = t & 3;
+ m_pos = op - 1;
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LB(m_pos);
+ NEED_OP(2);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+ goto match_next;
+ } else {
+ next = t & 3;
+ m_pos = op - (1 + M2_MAX_OFFSET);
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ t = 3;
+ }
+ } else if (t >= 64) {
+ next = t & 3;
+ m_pos = op - 1;
+ m_pos -= (t >> 2) & 7;
+ m_pos -= *ip++ << 3;
+ t = (t >> 5) - 1 + (3 - 1);
+ } else if (t >= 32) {
+ t = (t & 31) + (3 - 1);
+ if (unlikely(t == 2)) {
+ while (unlikely(*ip == 0)) {
+ t += 255;
+ ip++;
+ NEED_IP(1);
+ }
+ t += 31 + *ip++;
+ NEED_IP(2);
+ }
+ m_pos = op - 1;
+ next = get_unaligned_le16(ip);
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ } else {
+ m_pos = op;
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
+ while (unlikely(*ip == 0)) {
+ t += 255;
+ ip++;
+ NEED_IP(1);
+ }
+ t += 7 + *ip++;
+ NEED_IP(2);
+ }
+ next = get_unaligned_le16(ip);
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ if (m_pos == op)
+ goto eof_found;
+ m_pos -= 0x4000;
+ }
+ TEST_LB(m_pos);
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (op - m_pos >= 8) {
+ unsigned char *oe = op + t;
+ if (likely(HAVE_OP(t + 15))) {
+ do {
+ COPY8(op, m_pos);
+ op += 8;
+ m_pos += 8;
+ COPY8(op, m_pos);
+ op += 8;
+ m_pos += 8;
+ } while (op < oe);
+ op = oe;
+ if (HAVE_IP(6)) {
+ state = next;
+ COPY4(op, ip);
+ op += next;
+ ip += next;
+ continue;
+ }
+ } else {
+ NEED_OP(t);
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+ }
+ } else
+#endif
+ {
+ unsigned char *oe = op + t;
+ NEED_OP(t);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+ m_pos += 2;
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+ }
+match_next:
+ state = next;
+ t = next;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ COPY4(op, ip);
+ op += t;
+ ip += t;
+ } else
+#endif
+ {
+ NEED_IP(t + 3);
+ NEED_OP(t);
+ while (t > 0) {
+ *op++ = *ip++;
+ t--;
+ }
+ }
+ }
+
+eof_found:
+ *out_len = op - out;
+ return (t != 3 ? LZO_E_ERROR :
+ ip == ip_end ? LZO_E_OK :
+ ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN);
+
+input_overrun:
+ *out_len = op - out;
+ return LZO_E_INPUT_OVERRUN;
+
+output_overrun:
+ *out_len = op - out;
+ return LZO_E_OUTPUT_OVERRUN;
+
+lookbehind_overrun:
+ *out_len = op - out;
+ return LZO_E_LOOKBEHIND_OVERRUN;
+}
+#ifndef STATIC
+EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZO1X Decompressor");
+
+#endif
diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h
index b6d482c492ef..6710b83ce72e 100644
--- a/lib/lzo/lzodefs.h
+++ b/lib/lzo/lzodefs.h
@@ -1,19 +1,37 @@
/*
* lzodefs.h -- architecture, OS and compiler specific defines
*
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
- * Changed for kernel use by:
+ * Changed for Linux kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
-#define LZO_VERSION 0x2020
-#define LZO_VERSION_STRING "2.02"
-#define LZO_VERSION_DATE "Oct 17 2005"
+
+#define COPY4(dst, src) \
+ put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
+#if defined(__x86_64__)
+#define COPY8(dst, src) \
+ put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst))
+#else
+#define COPY8(dst, src) \
+ COPY4(dst, src); COPY4((dst) + 4, (src) + 4)
+#endif
+
+#if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+#error "conflicting endian definitions"
+#elif defined(__x86_64__)
+#define LZO_USE_CTZ64 1
+#define LZO_USE_CTZ32 1
+#elif defined(__i386__) || defined(__powerpc__)
+#define LZO_USE_CTZ32 1
+#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5)
+#define LZO_USE_CTZ32 1
+#endif
#define M1_MAX_OFFSET 0x0400
#define M2_MAX_OFFSET 0x0800
@@ -34,10 +52,8 @@
#define M3_MARKER 32
#define M4_MARKER 16
-#define D_BITS 14
-#define D_MASK ((1u << D_BITS) - 1)
+#define lzo_dict_t unsigned short
+#define D_BITS 13
+#define D_SIZE (1u << D_BITS)
+#define D_MASK (D_SIZE - 1)
#define D_HIGH ((D_MASK >> 1) + 1)
-
-#define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \
- << (s1)) ^ (p)[0])
-#define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0])
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7874b01e816e..b83c144d731f 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
+void __sg_page_iter_start(struct sg_page_iter *piter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgoffset)
+{
+ piter->__pg_advance = 0;
+ piter->__nents = nents;
+
+ piter->page = NULL;
+ piter->sg = sglist;
+ piter->sg_pgoffset = pgoffset;
+}
+EXPORT_SYMBOL(__sg_page_iter_start);
+
+static int sg_page_count(struct scatterlist *sg)
+{
+ return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
+}
+
+bool __sg_page_iter_next(struct sg_page_iter *piter)
+{
+ if (!piter->__nents || !piter->sg)
+ return false;
+
+ piter->sg_pgoffset += piter->__pg_advance;
+ piter->__pg_advance = 1;
+
+ while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
+ piter->sg_pgoffset -= sg_page_count(piter->sg);
+ piter->sg = sg_next(piter->sg);
+ if (!--piter->__nents || !piter->sg)
+ return false;
+ }
+ piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+
+ return true;
+}
+EXPORT_SYMBOL(__sg_page_iter_next);
+
/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
@@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
{
memset(miter, 0, sizeof(struct sg_mapping_iter));
- miter->__sg = sgl;
- miter->__nents = nents;
- miter->__offset = 0;
+ __sg_page_iter_start(&miter->piter, sgl, nents, 0);
WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
miter->__flags = flags;
}
@@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start);
*/
bool sg_miter_next(struct sg_mapping_iter *miter)
{
- unsigned int off, len;
-
- /* check for end and drop resources from the last iteration */
- if (!miter->__nents)
- return false;
-
sg_miter_stop(miter);
- /* get to the next sg if necessary. __offset is adjusted by stop */
- while (miter->__offset == miter->__sg->length) {
- if (--miter->__nents) {
- miter->__sg = sg_next(miter->__sg);
- miter->__offset = 0;
- } else
+ /*
+ * Get to the next page if necessary.
+ * __remaining, __offset is adjusted by sg_miter_stop
+ */
+ if (!miter->__remaining) {
+ struct scatterlist *sg;
+ unsigned long pgoffset;
+
+ if (!__sg_page_iter_next(&miter->piter))
return false;
- }
- /* map the next page */
- off = miter->__sg->offset + miter->__offset;
- len = miter->__sg->length - miter->__offset;
+ sg = miter->piter.sg;
+ pgoffset = miter->piter.sg_pgoffset;
- miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
- off &= ~PAGE_MASK;
- miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
- miter->consumed = miter->length;
+ miter->__offset = pgoffset ? 0 : sg->offset;
+ miter->__remaining = sg->offset + sg->length -
+ (pgoffset << PAGE_SHIFT) - miter->__offset;
+ miter->__remaining = min_t(unsigned long, miter->__remaining,
+ PAGE_SIZE - miter->__offset);
+ }
+ miter->page = miter->piter.page;
+ miter->consumed = miter->length = miter->__remaining;
if (miter->__flags & SG_MITER_ATOMIC)
- miter->addr = kmap_atomic(miter->page) + off;
+ miter->addr = kmap_atomic(miter->page) + miter->__offset;
else
- miter->addr = kmap(miter->page) + off;
+ miter->addr = kmap(miter->page) + miter->__offset;
return true;
}
@@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
/* drop resources from the last iteration */
if (miter->addr) {
miter->__offset += miter->consumed;
+ miter->__remaining -= miter->consumed;
if (miter->__flags & SG_MITER_TO_SG)
flush_kernel_dcache_page(miter->page);
diff --git a/mm/Kconfig b/mm/Kconfig
index 2c7aea7106f9..ae55c1e04d10 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -287,7 +287,7 @@ config NR_QUICK
config VIRT_TO_BUS
def_bool y
- depends on !ARCH_NO_VIRT_TO_BUS
+ depends on HAVE_VIRT_TO_BUS
config MMU_NOTIFIER
bool
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 32e6f4136fa2..d76ba74be2d0 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -89,7 +89,7 @@ static int cleancache_get_key(struct inode *inode,
fhfn = sb->s_export_op->encode_fh;
if (fhfn) {
len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
- if (len <= 0 || len == 255)
+ if (len <= FILEID_ROOT || len == FILEID_INVALID)
return -1;
if (maxlen > CLEANCACHE_KEY_MAX)
return -1;
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 909ec558625c..7e092689a12a 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -39,7 +39,7 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
if (!f.file)
return -EBADF;
- if (S_ISFIFO(f.file->f_path.dentry->d_inode->i_mode)) {
+ if (S_ISFIFO(file_inode(f.file)->i_mode)) {
ret = -ESPIPE;
goto out;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index c610076c30e1..e1979fdca805 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1711,7 +1711,7 @@ EXPORT_SYMBOL(filemap_fault);
int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bfa142e67b1c..e2f7f5aaaafb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1906,9 +1906,8 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
- struct hlist_node *node;
- hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm)
+ hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
if (mm == mm_slot->mm)
return mm_slot;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cdb64e4d238a..0a0be33bb199 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -127,7 +127,7 @@ static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{
- return subpool_inode(vma->vm_file->f_dentry->d_inode);
+ return subpool_inode(file_inode(vma->vm_file));
}
/*
@@ -2479,7 +2479,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
address = address & huge_page_mask(h);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
- mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+ mapping = file_inode(vma->vm_file)->i_mapping;
/*
* Take the mapping lock for the duration of the table walk. As
diff --git a/mm/internal.h b/mm/internal.h
index 1c0c4cc0fcf7..8562de0a5197 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
* must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
-extern void munlock_vma_page(struct page *page);
+extern unsigned int munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 83dd5fbf5e60..c8d7f3110fd0 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -436,7 +436,7 @@ static int get_object(struct kmemleak_object *object)
*/
static void free_object_rcu(struct rcu_head *rcu)
{
- struct hlist_node *elem, *tmp;
+ struct hlist_node *tmp;
struct kmemleak_scan_area *area;
struct kmemleak_object *object =
container_of(rcu, struct kmemleak_object, rcu);
@@ -445,8 +445,8 @@ static void free_object_rcu(struct rcu_head *rcu)
* Once use_count is 0 (guaranteed by put_object), there is no other
* code accessing this object, hence no need for locking.
*/
- hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
- hlist_del(elem);
+ hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
+ hlist_del(&area->node);
kmem_cache_free(scan_area_cache, area);
}
kmem_cache_free(object_cache, object);
@@ -1177,7 +1177,6 @@ static void scan_block(void *_start, void *_end,
static void scan_object(struct kmemleak_object *object)
{
struct kmemleak_scan_area *area;
- struct hlist_node *elem;
unsigned long flags;
/*
@@ -1205,7 +1204,7 @@ static void scan_object(struct kmemleak_object *object)
spin_lock_irqsave(&object->lock, flags);
}
} else
- hlist_for_each_entry(area, elem, &object->area_list, node)
+ hlist_for_each_entry(area, &object->area_list, node)
scan_block((void *)area->start,
(void *)(area->start + area->size),
object, 0);
diff --git a/mm/ksm.c b/mm/ksm.c
index ab2ba9ad3c59..85bfd4c16346 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -320,10 +320,9 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{
- struct hlist_node *node;
struct mm_slot *slot;
- hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm)
+ hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
if (slot->mm == mm)
return slot;
@@ -496,9 +495,8 @@ static inline int get_kpfn_nid(unsigned long kpfn)
static void remove_node_from_stable_tree(struct stable_node *stable_node)
{
struct rmap_item *rmap_item;
- struct hlist_node *hlist;
- hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
if (rmap_item->hlist.next)
ksm_pages_sharing--;
else
@@ -1898,7 +1896,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
- struct hlist_node *hlist;
unsigned int mapcount = page_mapcount(page);
int referenced = 0;
int search_new_forks = 0;
@@ -1910,7 +1907,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
if (!stable_node)
return 0;
again:
- hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
@@ -1952,7 +1949,6 @@ out:
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
struct stable_node *stable_node;
- struct hlist_node *hlist;
struct rmap_item *rmap_item;
int ret = SWAP_AGAIN;
int search_new_forks = 0;
@@ -1964,7 +1960,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
if (!stable_node)
return SWAP_FAIL;
again:
- hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
@@ -2005,7 +2001,6 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
{
struct stable_node *stable_node;
- struct hlist_node *hlist;
struct rmap_item *rmap_item;
int ret = SWAP_AGAIN;
int search_new_forks = 0;
@@ -2017,7 +2012,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
if (!stable_node)
return ret;
again:
- hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
diff --git a/mm/memblock.c b/mm/memblock.c
index 1bcd9b970564..b8d9147e5c08 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -92,58 +92,9 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
*
* Find @size free area aligned to @align in the specified range and node.
*
- * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the
- * memory we found if not in hotpluggable ranges.
- *
* RETURNS:
* Found address on success, %0 on failure.
*/
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
- phys_addr_t end, phys_addr_t size,
- phys_addr_t align, int nid)
-{
- phys_addr_t this_start, this_end, cand;
- u64 i;
- int curr = movablemem_map.nr_map - 1;
-
- /* pump up @end */
- if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
- end = memblock.current_limit;
-
- /* avoid allocating the first page */
- start = max_t(phys_addr_t, start, PAGE_SIZE);
- end = max(start, end);
-
- for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
- this_start = clamp(this_start, start, end);
- this_end = clamp(this_end, start, end);
-
-restart:
- if (this_end <= this_start || this_end < size)
- continue;
-
- for (; curr >= 0; curr--) {
- if ((movablemem_map.map[curr].start_pfn << PAGE_SHIFT)
- < this_end)
- break;
- }
-
- cand = round_down(this_end - size, align);
- if (curr >= 0 &&
- cand < movablemem_map.map[curr].end_pfn << PAGE_SHIFT) {
- this_end = movablemem_map.map[curr].start_pfn
- << PAGE_SHIFT;
- goto restart;
- }
-
- if (cand >= this_start)
- return cand;
- }
-
- return 0;
-}
-#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
phys_addr_t end, phys_addr_t size,
phys_addr_t align, int nid)
@@ -172,7 +123,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
}
return 0;
}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* memblock_find_in_range - find free area in given range
diff --git a/mm/memory.c b/mm/memory.c
index 705473afc1f4..494526ae024a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -720,7 +720,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
(unsigned long)vma->vm_file->f_op->mmap);
dump_stack();
- add_taint(TAINT_BAD_PAGE);
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
static inline bool is_cow_mapping(vm_flags_t flags)
diff --git a/mm/mlock.c b/mm/mlock.c
index e6638f565d42..1c5e33fce639 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page)
* can't isolate the page, we leave it for putback_lru_page() and vmscan
* [page_referenced()/try_to_unmap()] to deal with.
*/
-void munlock_vma_page(struct page *page)
+unsigned int munlock_vma_page(struct page *page)
{
+ unsigned int page_mask = 0;
+
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
- mod_zone_page_state(page_zone(page), NR_MLOCK,
- -hpage_nr_pages(page));
+ unsigned int nr_pages = hpage_nr_pages(page);
+ mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+ page_mask = nr_pages - 1;
if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN;
@@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page)
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}
}
+
+ return page_mask;
}
/**
@@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long addr = start;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
@@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
- return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+ return __get_user_pages(current, mm, start, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}
@@ -226,13 +230,12 @@ static int __mlock_posix_error_return(long retval)
void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- unsigned long addr;
-
- lru_add_drain();
vma->vm_flags &= ~VM_LOCKED;
- for (addr = start; addr < end; addr += PAGE_SIZE) {
+ while (start < end) {
struct page *page;
+ unsigned int page_mask, page_increm;
+
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
@@ -240,13 +243,22 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
* suits munlock very well (and if somehow an abnormal page
* has sneaked into the range, we won't oops here: great).
*/
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
+ &page_mask);
if (page && !IS_ERR(page)) {
lock_page(page);
- munlock_vma_page(page);
+ lru_add_drain();
+ /*
+ * Any THP page found by follow_page_mask() may have
+ * gotten split before reaching munlock_vma_page(),
+ * so we need to recompute the page_mask here.
+ */
+ page_mask = munlock_vma_page(page);
unlock_page(page);
put_page(page);
}
+ page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ start += page_increm * PAGE_SIZE;
cond_resched();
}
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 318e121affda..2664a47cec93 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -203,7 +203,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
struct file *file, struct address_space *mapping)
{
if (vma->vm_flags & VM_DENYWRITE)
- atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
+ atomic_inc(&file_inode(file)->i_writecount);
if (vma->vm_flags & VM_SHARED)
mapping->i_mmap_writable--;
@@ -576,7 +576,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
struct address_space *mapping = file->f_mapping;
if (vma->vm_flags & VM_DENYWRITE)
- atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
+ atomic_dec(&file_inode(file)->i_writecount);
if (vma->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
@@ -1229,7 +1229,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
return -EAGAIN;
}
- inode = file ? file->f_path.dentry->d_inode : NULL;
+ inode = file ? file_inode(file) : NULL;
if (file) {
switch (flags & MAP_TYPE) {
@@ -1431,7 +1431,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
int error;
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
- struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
+ struct inode *inode = file ? file_inode(file) : NULL;
/* Clear old maps */
error = -ENOMEM;
@@ -2185,9 +2185,28 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
}
+/*
+ * Note how expand_stack() refuses to expand the stack all the way to
+ * abut the next virtual mapping, *unless* that mapping itself is also
+ * a stack mapping. We want to leave room for a guard page, after all
+ * (the guard page itself is not added here, that is done by the
+ * actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
+ */
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
+ struct vm_area_struct *next;
+
+ address &= PAGE_MASK;
+ next = vma->vm_next;
+ if (next && next->vm_start == address + PAGE_SIZE) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ }
return expand_upwards(vma, address);
}
@@ -2209,6 +2228,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
+ struct vm_area_struct *prev;
+
+ address &= PAGE_MASK;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end == address) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ }
return expand_downwards(vma, address);
}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2175fb0d501c..be04122fb277 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -95,11 +95,10 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int young = 0, id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->clear_flush_young)
young |= mn->ops->clear_flush_young(mn, mm, address);
}
@@ -112,11 +111,10 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int young = 0, id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->test_young) {
young = mn->ops->test_young(mn, mm, address);
if (young)
@@ -132,11 +130,10 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
pte_t pte)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->change_pte)
mn->ops->change_pte(mn, mm, address, pte);
}
@@ -147,11 +144,10 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_page)
mn->ops->invalidate_page(mn, mm, address);
}
@@ -162,11 +158,10 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_start)
mn->ops->invalidate_range_start(mn, mm, start, end);
}
@@ -178,11 +173,10 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_end)
mn->ops->invalidate_range_end(mn, mm, start, end);
}
diff --git a/mm/nommu.c b/mm/nommu.c
index da0d210fd403..e19328087534 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -943,7 +943,7 @@ static int validate_mmap_request(struct file *file,
*/
mapping = file->f_mapping;
if (!mapping)
- mapping = file->f_path.dentry->d_inode->i_mapping;
+ mapping = file_inode(file)->i_mapping;
capabilities = 0;
if (mapping && mapping->backing_dev_info)
@@ -952,7 +952,7 @@ static int validate_mmap_request(struct file *file,
if (!capabilities) {
/* no explicit capabilities set, so assume some
* defaults */
- switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
+ switch (file_inode(file)->i_mode & S_IFMT) {
case S_IFREG:
case S_IFBLK:
capabilities = BDI_CAP_MAP_COPY;
@@ -987,11 +987,11 @@ static int validate_mmap_request(struct file *file,
!(file->f_mode & FMODE_WRITE))
return -EACCES;
- if (IS_APPEND(file->f_path.dentry->d_inode) &&
+ if (IS_APPEND(file_inode(file)) &&
(file->f_mode & FMODE_WRITE))
return -EACCES;
- if (locks_verify_locked(file->f_path.dentry->d_inode))
+ if (locks_verify_locked(file_inode(file)))
return -EAGAIN;
if (!(capabilities & BDI_CAP_MAP_DIRECT))
@@ -1327,8 +1327,8 @@ unsigned long do_mmap_pgoff(struct file *file,
continue;
/* search for overlapping mappings on the same file */
- if (pregion->vm_file->f_path.dentry->d_inode !=
- file->f_path.dentry->d_inode)
+ if (file_inode(pregion->vm_file) !=
+ file_inode(file))
continue;
if (pregion->vm_pgoff >= pgend)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index cdc377c456c0..efe68148f621 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -696,7 +696,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
* => fast response on large errors; small oscillation near setpoint
*/
setpoint = (freerun + limit) / 2;
- x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
+ x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
limit - setpoint + 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -1986,6 +1986,8 @@ int __set_page_dirty_no_writeback(struct page *page)
*/
void account_page_dirtied(struct page *page, struct address_space *mapping)
{
+ trace_writeback_dirty_page(page, mapping);
+
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_DIRTIED);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e9075fdef695..8fcced7823fa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -202,18 +202,11 @@ static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-/* Movable memory ranges, will also be used by memblock subsystem. */
-struct movablemem_map movablemem_map = {
- .acpi = false,
- .nr_map = 0,
-};
-
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
-static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -333,7 +326,7 @@ static void bad_page(struct page *page)
out:
/* Leave bad fields for debug, except PageBuddy could make trouble */
page_mapcount_reset(page); /* remove PageBuddy */
- add_taint(TAINT_BAD_PAGE);
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
/*
@@ -4412,77 +4405,6 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
-/**
- * sanitize_zone_movable_limit - Sanitize the zone_movable_limit array.
- *
- * zone_movable_limit is initialized as 0. This function will try to get
- * the first ZONE_MOVABLE pfn of each node from movablemem_map, and
- * assigne them to zone_movable_limit.
- * zone_movable_limit[nid] == 0 means no limit for the node.
- *
- * Note: Each range is represented as [start_pfn, end_pfn)
- */
-static void __meminit sanitize_zone_movable_limit(void)
-{
- int map_pos = 0, i, nid;
- unsigned long start_pfn, end_pfn;
-
- if (!movablemem_map.nr_map)
- return;
-
- /* Iterate all ranges from minimum to maximum */
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
- /*
- * If we have found lowest pfn of ZONE_MOVABLE of the node
- * specified by user, just go on to check next range.
- */
- if (zone_movable_limit[nid])
- continue;
-
-#ifdef CONFIG_ZONE_DMA
- /* Skip DMA memory. */
- if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA])
- start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA];
-#endif
-
-#ifdef CONFIG_ZONE_DMA32
- /* Skip DMA32 memory. */
- if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32])
- start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32];
-#endif
-
-#ifdef CONFIG_HIGHMEM
- /* Skip lowmem if ZONE_MOVABLE is highmem. */
- if (zone_movable_is_highmem() &&
- start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])
- start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
-#endif
-
- if (start_pfn >= end_pfn)
- continue;
-
- while (map_pos < movablemem_map.nr_map) {
- if (end_pfn <= movablemem_map.map[map_pos].start_pfn)
- break;
-
- if (start_pfn >= movablemem_map.map[map_pos].end_pfn) {
- map_pos++;
- continue;
- }
-
- /*
- * The start_pfn of ZONE_MOVABLE is either the minimum
- * pfn specified by movablemem_map, or 0, which means
- * the node has no ZONE_MOVABLE.
- */
- zone_movable_limit[nid] = max(start_pfn,
- movablemem_map.map[map_pos].start_pfn);
-
- break;
- }
- }
-}
-
#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
@@ -4500,6 +4422,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
return zholes_size[zone_type];
}
+
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
@@ -4941,19 +4864,12 @@ static void __init find_zone_movable_pfns_for_nodes(void)
required_kernelcore = max(required_kernelcore, corepages);
}
- /*
- * If neither kernelcore/movablecore nor movablemem_map is specified,
- * there is no ZONE_MOVABLE. But if movablemem_map is specified, the
- * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
- */
- if (!required_kernelcore) {
- if (movablemem_map.nr_map)
- memcpy(zone_movable_pfn, zone_movable_limit,
- sizeof(zone_movable_pfn));
+ /* If kernelcore was not specified, there is no ZONE_MOVABLE */
+ if (!required_kernelcore)
goto out;
- }
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
+ find_usable_zone_for_movable();
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
restart:
@@ -4981,24 +4897,10 @@ restart:
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
unsigned long size_pages;
- /*
- * Find more memory for kernelcore in
- * [zone_movable_pfn[nid], zone_movable_limit[nid]).
- */
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
- if (zone_movable_limit[nid]) {
- end_pfn = min(end_pfn, zone_movable_limit[nid]);
- /* No range left for kernelcore in this node */
- if (start_pfn >= end_pfn) {
- zone_movable_pfn[nid] =
- zone_movable_limit[nid];
- break;
- }
- }
-
/* Account for what is only usable for kernelcore */
if (start_pfn < usable_startpfn) {
unsigned long kernel_pages;
@@ -5058,12 +4960,12 @@ restart:
if (usable_nodes && required_kernelcore > usable_nodes)
goto restart;
-out:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+out:
/* restore the node_state */
node_states[N_MEMORY] = saved_node_state;
}
@@ -5126,8 +5028,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Find the PFNs that ZONE_MOVABLE begins at in each node */
memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
- find_usable_zone_for_movable();
- sanitize_zone_movable_limit();
find_zone_movable_pfns_for_nodes();
/* Print out the zone ranges */
@@ -5211,181 +5111,6 @@ static int __init cmdline_parse_movablecore(char *p)
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
-/**
- * movablemem_map_overlap() - Check if a range overlaps movablemem_map.map[].
- * @start_pfn: start pfn of the range to be checked
- * @end_pfn: end pfn of the range to be checked (exclusive)
- *
- * This function checks if a given memory range [start_pfn, end_pfn) overlaps
- * the movablemem_map.map[] array.
- *
- * Return: index of the first overlapped element in movablemem_map.map[]
- * or -1 if they don't overlap each other.
- */
-int __init movablemem_map_overlap(unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int overlap;
-
- if (!movablemem_map.nr_map)
- return -1;
-
- for (overlap = 0; overlap < movablemem_map.nr_map; overlap++)
- if (start_pfn < movablemem_map.map[overlap].end_pfn)
- break;
-
- if (overlap == movablemem_map.nr_map ||
- end_pfn <= movablemem_map.map[overlap].start_pfn)
- return -1;
-
- return overlap;
-}
-
-/**
- * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
- * @start_pfn: start pfn of the range
- * @end_pfn: end pfn of the range
- *
- * This function will also merge the overlapped ranges, and sort the array
- * by start_pfn in monotonic increasing order.
- */
-void __init insert_movablemem_map(unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int pos, overlap;
-
- /*
- * pos will be at the 1st overlapped range, or the position
- * where the element should be inserted.
- */
- for (pos = 0; pos < movablemem_map.nr_map; pos++)
- if (start_pfn <= movablemem_map.map[pos].end_pfn)
- break;
-
- /* If there is no overlapped range, just insert the element. */
- if (pos == movablemem_map.nr_map ||
- end_pfn < movablemem_map.map[pos].start_pfn) {
- /*
- * If pos is not the end of array, we need to move all
- * the rest elements backward.
- */
- if (pos < movablemem_map.nr_map)
- memmove(&movablemem_map.map[pos+1],
- &movablemem_map.map[pos],
- sizeof(struct movablemem_entry) *
- (movablemem_map.nr_map - pos));
- movablemem_map.map[pos].start_pfn = start_pfn;
- movablemem_map.map[pos].end_pfn = end_pfn;
- movablemem_map.nr_map++;
- return;
- }
-
- /* overlap will be at the last overlapped range */
- for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
- if (end_pfn < movablemem_map.map[overlap].start_pfn)
- break;
-
- /*
- * If there are more ranges overlapped, we need to merge them,
- * and move the rest elements forward.
- */
- overlap--;
- movablemem_map.map[pos].start_pfn = min(start_pfn,
- movablemem_map.map[pos].start_pfn);
- movablemem_map.map[pos].end_pfn = max(end_pfn,
- movablemem_map.map[overlap].end_pfn);
-
- if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
- memmove(&movablemem_map.map[pos+1],
- &movablemem_map.map[overlap+1],
- sizeof(struct movablemem_entry) *
- (movablemem_map.nr_map - overlap - 1));
-
- movablemem_map.nr_map -= overlap - pos;
-}
-
-/**
- * movablemem_map_add_region - Add a memory range into movablemem_map.
- * @start: physical start address of range
- * @end: physical end address of range
- *
- * This function transform the physical address into pfn, and then add the
- * range into movablemem_map by calling insert_movablemem_map().
- */
-static void __init movablemem_map_add_region(u64 start, u64 size)
-{
- unsigned long start_pfn, end_pfn;
-
- /* In case size == 0 or start + size overflows */
- if (start + size <= start)
- return;
-
- if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
- pr_err("movablemem_map: too many entries;"
- " ignoring [mem %#010llx-%#010llx]\n",
- (unsigned long long) start,
- (unsigned long long) (start + size - 1));
- return;
- }
-
- start_pfn = PFN_DOWN(start);
- end_pfn = PFN_UP(start + size);
- insert_movablemem_map(start_pfn, end_pfn);
-}
-
-/*
- * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
- * @p: The boot option of the following format:
- * movablemem_map=nn[KMG]@ss[KMG]
- *
- * This option sets the memory range [ss, ss+nn) to be used as movable memory.
- *
- * Return: 0 on success or -EINVAL on failure.
- */
-static int __init cmdline_parse_movablemem_map(char *p)
-{
- char *oldp;
- u64 start_at, mem_size;
-
- if (!p)
- goto err;
-
- if (!strcmp(p, "acpi"))
- movablemem_map.acpi = true;
-
- /*
- * If user decide to use info from BIOS, all the other user specified
- * ranges will be ingored.
- */
- if (movablemem_map.acpi) {
- if (movablemem_map.nr_map) {
- memset(movablemem_map.map, 0,
- sizeof(struct movablemem_entry)
- * movablemem_map.nr_map);
- movablemem_map.nr_map = 0;
- }
- return 0;
- }
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- goto err;
-
- if (*p == '@') {
- oldp = ++p;
- start_at = memparse(p, &p);
- if (p == oldp || *p != '\0')
- goto err;
-
- movablemem_map_add_region(start_at, mem_size);
- return 0;
- }
-err:
- return -EINVAL;
-}
-early_param("movablemem_map", cmdline_parse_movablemem_map);
-
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
diff --git a/mm/shmem.c b/mm/shmem.c
index 1ad79243cb7b..ed2befb4952e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1294,7 +1294,7 @@ unlock:
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
int error;
int ret = VM_FAULT_LOCKED;
@@ -1312,14 +1312,14 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#ifdef CONFIG_NUMA
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
{
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
}
static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
pgoff_t index;
index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -1329,7 +1329,7 @@ static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
int shmem_lock(struct file *file, int lock, struct user_struct *user)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct shmem_inode_info *info = SHMEM_I(inode);
int retval = -ENOMEM;
@@ -1464,7 +1464,7 @@ shmem_write_end(struct file *file, struct address_space *mapping,
static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct address_space *mapping = inode->i_mapping;
pgoff_t index;
unsigned long offset;
@@ -1807,7 +1807,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct shmem_falloc shmem_falloc;
pgoff_t start, index, end;
@@ -2350,7 +2350,7 @@ static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
{
if (*len < 3) {
*len = 3;
- return 255;
+ return FILEID_INVALID;
}
if (inode_unhashed(inode)) {
@@ -2778,6 +2778,7 @@ static struct file_system_type shmem_fs_type = {
.name = "tmpfs",
.mount = shmem_mount,
.kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init shmem_init(void)
@@ -2835,6 +2836,7 @@ static struct file_system_type shmem_fs_type = {
.name = "tmpfs",
.mount = ramfs_mount,
.kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init shmem_init(void)
@@ -2877,6 +2879,16 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
/* common code */
+static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
+ dentry->d_name.name);
+}
+
+static struct dentry_operations anon_ops = {
+ .d_dname = shmem_dname
+};
+
/**
* shmem_file_setup - get an unlinked file living in tmpfs
* @name: name for dentry (to be seen in /proc/<pid>/maps
@@ -2885,15 +2897,14 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
*/
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
- int error;
- struct file *file;
+ struct file *res;
struct inode *inode;
struct path path;
- struct dentry *root;
+ struct super_block *sb;
struct qstr this;
if (IS_ERR(shm_mnt))
- return (void *)shm_mnt;
+ return ERR_CAST(shm_mnt);
if (size < 0 || size > MAX_LFS_FILESIZE)
return ERR_PTR(-EINVAL);
@@ -2901,18 +2912,19 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
if (shmem_acct_size(flags, size))
return ERR_PTR(-ENOMEM);
- error = -ENOMEM;
+ res = ERR_PTR(-ENOMEM);
this.name = name;
this.len = strlen(name);
this.hash = 0; /* will go */
- root = shm_mnt->mnt_root;
- path.dentry = d_alloc(root, &this);
+ sb = shm_mnt->mnt_sb;
+ path.dentry = d_alloc_pseudo(sb, &this);
if (!path.dentry)
goto put_memory;
+ d_set_d_op(path.dentry, &anon_ops);
path.mnt = mntget(shm_mnt);
- error = -ENOSPC;
- inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
+ res = ERR_PTR(-ENOSPC);
+ inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
goto put_dentry;
@@ -2921,23 +2933,23 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
clear_nlink(inode); /* It is unlinked */
#ifndef CONFIG_MMU
error = ramfs_nommu_expand_for_mapping(inode, size);
+ res = ERR_PTR(error);
if (error)
goto put_dentry;
#endif
- error = -ENFILE;
- file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
+ res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
&shmem_file_operations);
- if (!file)
+ if (IS_ERR(res))
goto put_dentry;
- return file;
+ return res;
put_dentry:
path_put(&path);
put_memory:
shmem_unacct_size(flags, size);
- return ERR_PTR(error);
+ return res;
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
diff --git a/mm/slab.c b/mm/slab.c
index e7667a3584bc..856e4a192d25 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -812,7 +812,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg);
dump_stack();
- add_taint(TAINT_BAD_PAGE);
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
#endif
diff --git a/mm/slub.c b/mm/slub.c
index ebcc44eb43b9..4aec53705e4f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -562,7 +562,7 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
printk(KERN_ERR "----------------------------------------"
"-------------------------------------\n\n");
- add_taint(TAINT_BAD_PAGE);
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c72c648f750c..a1f7772a01fc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1774,7 +1774,7 @@ static int swap_show(struct seq_file *swap, void *v)
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
- S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
+ S_ISBLK(file_inode(file)->i_mode) ?
"partition" : "file\t",
si->pages << (PAGE_SHIFT - 10),
si->inuse_pages << (PAGE_SHIFT - 10),
diff --git a/net/9p/client.c b/net/9p/client.c
index 34d417670935..8eb75425e6e6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1100,7 +1100,7 @@ void p9_client_begin_disconnect(struct p9_client *clnt)
EXPORT_SYMBOL(p9_client_begin_disconnect);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
- char *uname, u32 n_uname, char *aname)
+ char *uname, kuid_t n_uname, char *aname)
{
int err = 0;
struct p9_req_t *req;
@@ -1117,7 +1117,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
goto error;
}
- req = p9_client_rpc(clnt, P9_TATTACH, "ddss?d", fid->fid,
+ req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid,
afid ? afid->fid : P9_NOFID, uname, aname, n_uname);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1270,7 +1270,7 @@ error:
EXPORT_SYMBOL(p9_client_open);
int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
- gid_t gid, struct p9_qid *qid)
+ kgid_t gid, struct p9_qid *qid)
{
int err = 0;
struct p9_client *clnt;
@@ -1279,13 +1279,14 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
p9_debug(P9_DEBUG_9P,
">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n",
- ofid->fid, name, flags, mode, gid);
+ ofid->fid, name, flags, mode,
+ from_kgid(&init_user_ns, gid));
clnt = ofid->clnt;
if (ofid->mode != -1)
return -EINVAL;
- req = p9_client_rpc(clnt, P9_TLCREATE, "dsddd", ofid->fid, name, flags,
+ req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags,
mode, gid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1358,7 +1359,7 @@ error:
}
EXPORT_SYMBOL(p9_client_fcreate);
-int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
+int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, kgid_t gid,
struct p9_qid *qid)
{
int err = 0;
@@ -1369,7 +1370,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
dfid->fid, name, symtgt);
clnt = dfid->clnt;
- req = p9_client_rpc(clnt, P9_TSYMLINK, "dssd", dfid->fid, name, symtgt,
+ req = p9_client_rpc(clnt, P9_TSYMLINK, "dssg", dfid->fid, name, symtgt,
gid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1710,7 +1711,9 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
(unsigned long long)ret->qid.path, ret->qid.version, ret->mode,
ret->atime, ret->mtime, (unsigned long long)ret->length,
ret->name, ret->uid, ret->gid, ret->muid, ret->extension,
- ret->n_uid, ret->n_gid, ret->n_muid);
+ from_kuid(&init_user_ns, ret->n_uid),
+ from_kgid(&init_user_ns, ret->n_gid),
+ from_kuid(&init_user_ns, ret->n_muid));
p9_free_req(clnt, req);
return ret;
@@ -1764,8 +1767,10 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
"<<< st_btime_sec=%lld st_btime_nsec=%lld\n"
"<<< st_gen=%lld st_data_version=%lld",
ret->st_result_mask, ret->qid.type, ret->qid.path,
- ret->qid.version, ret->st_mode, ret->st_nlink, ret->st_uid,
- ret->st_gid, ret->st_rdev, ret->st_size, ret->st_blksize,
+ ret->qid.version, ret->st_mode, ret->st_nlink,
+ from_kuid(&init_user_ns, ret->st_uid),
+ from_kgid(&init_user_ns, ret->st_gid),
+ ret->st_rdev, ret->st_size, ret->st_blksize,
ret->st_blocks, ret->st_atime_sec, ret->st_atime_nsec,
ret->st_mtime_sec, ret->st_mtime_nsec, ret->st_ctime_sec,
ret->st_ctime_nsec, ret->st_btime_sec, ret->st_btime_nsec,
@@ -1828,7 +1833,9 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
(unsigned long long)wst->qid.path, wst->qid.version, wst->mode,
wst->atime, wst->mtime, (unsigned long long)wst->length,
wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
- wst->n_uid, wst->n_gid, wst->n_muid);
+ from_kuid(&init_user_ns, wst->n_uid),
+ from_kgid(&init_user_ns, wst->n_gid),
+ from_kuid(&init_user_ns, wst->n_muid));
req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
if (IS_ERR(req)) {
@@ -1857,7 +1864,9 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
" valid=%x mode=%x uid=%d gid=%d size=%lld\n"
" atime_sec=%lld atime_nsec=%lld\n"
" mtime_sec=%lld mtime_nsec=%lld\n",
- p9attr->valid, p9attr->mode, p9attr->uid, p9attr->gid,
+ p9attr->valid, p9attr->mode,
+ from_kuid(&init_user_ns, p9attr->uid),
+ from_kgid(&init_user_ns, p9attr->gid),
p9attr->size, p9attr->atime_sec, p9attr->atime_nsec,
p9attr->mtime_sec, p9attr->mtime_nsec);
@@ -2106,7 +2115,7 @@ error:
EXPORT_SYMBOL(p9_client_readdir);
int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
- dev_t rdev, gid_t gid, struct p9_qid *qid)
+ dev_t rdev, kgid_t gid, struct p9_qid *qid)
{
int err;
struct p9_client *clnt;
@@ -2116,7 +2125,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d "
"minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev));
- req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddd", fid->fid, name, mode,
+ req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddg", fid->fid, name, mode,
MAJOR(rdev), MINOR(rdev), gid);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -2137,7 +2146,7 @@ error:
EXPORT_SYMBOL(p9_client_mknod_dotl);
int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
- gid_t gid, struct p9_qid *qid)
+ kgid_t gid, struct p9_qid *qid)
{
int err;
struct p9_client *clnt;
@@ -2146,8 +2155,8 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
- fid->fid, name, mode, gid);
- req = p9_client_rpc(clnt, P9_TMKDIR, "dsdd", fid->fid, name, mode,
+ fid->fid, name, mode, from_kgid(&init_user_ns, gid));
+ req = p9_client_rpc(clnt, P9_TMKDIR, "dsdg", fid->fid, name, mode,
gid);
if (IS_ERR(req))
return PTR_ERR(req);
diff --git a/net/9p/error.c b/net/9p/error.c
index 2ab2de76010f..126fd0dceea2 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -221,15 +221,13 @@ EXPORT_SYMBOL(p9_error_init);
int p9_errstr2errno(char *errstr, int len)
{
int errno;
- struct hlist_node *p;
struct errormap *c;
int bucket;
errno = 0;
- p = NULL;
c = NULL;
bucket = jhash(errstr, len, 0) % ERRHASHSZ;
- hlist_for_each_entry(c, p, &hash_errmap[bucket], list) {
+ hlist_for_each_entry(c, &hash_errmap[bucket], list) {
if (c->namelen == len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 3d33ecf13327..ab9127ec5b7a 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -85,6 +85,8 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
d - int32_t
q - int64_t
s - string
+ u - numeric uid
+ g - numeric gid
S - stat
Q - qid
D - data blob (int32_t size followed by void *, results are not freed)
@@ -163,6 +165,26 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
(*sptr)[len] = 0;
}
break;
+ case 'u': {
+ kuid_t *uid = va_arg(ap, kuid_t *);
+ __le32 le_val;
+ if (pdu_read(pdu, &le_val, sizeof(le_val))) {
+ errcode = -EFAULT;
+ break;
+ }
+ *uid = make_kuid(&init_user_ns,
+ le32_to_cpu(le_val));
+ } break;
+ case 'g': {
+ kgid_t *gid = va_arg(ap, kgid_t *);
+ __le32 le_val;
+ if (pdu_read(pdu, &le_val, sizeof(le_val))) {
+ errcode = -EFAULT;
+ break;
+ }
+ *gid = make_kgid(&init_user_ns,
+ le32_to_cpu(le_val));
+ } break;
case 'Q':{
struct p9_qid *qid =
va_arg(ap, struct p9_qid *);
@@ -177,11 +199,12 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_arg(ap, struct p9_wstat *);
memset(stbuf, 0, sizeof(struct p9_wstat));
- stbuf->n_uid = stbuf->n_gid = stbuf->n_muid =
- -1;
+ stbuf->n_uid = stbuf->n_muid = INVALID_UID;
+ stbuf->n_gid = INVALID_GID;
+
errcode =
p9pdu_readf(pdu, proto_version,
- "wwdQdddqssss?sddd",
+ "wwdQdddqssss?sugu",
&stbuf->size, &stbuf->type,
&stbuf->dev, &stbuf->qid,
&stbuf->mode, &stbuf->atime,
@@ -294,7 +317,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
memset(stbuf, 0, sizeof(struct p9_stat_dotl));
errcode =
p9pdu_readf(pdu, proto_version,
- "qQdddqqqqqqqqqqqqqqq",
+ "qQdugqqqqqqqqqqqqqqq",
&stbuf->st_result_mask,
&stbuf->qid,
&stbuf->st_mode,
@@ -377,6 +400,20 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
errcode = -EFAULT;
}
break;
+ case 'u': {
+ kuid_t uid = va_arg(ap, kuid_t);
+ __le32 val = cpu_to_le32(
+ from_kuid(&init_user_ns, uid));
+ if (pdu_write(pdu, &val, sizeof(val)))
+ errcode = -EFAULT;
+ } break;
+ case 'g': {
+ kgid_t gid = va_arg(ap, kgid_t);
+ __le32 val = cpu_to_le32(
+ from_kgid(&init_user_ns, gid));
+ if (pdu_write(pdu, &val, sizeof(val)))
+ errcode = -EFAULT;
+ } break;
case 'Q':{
const struct p9_qid *qid =
va_arg(ap, const struct p9_qid *);
@@ -390,7 +427,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_arg(ap, const struct p9_wstat *);
errcode =
p9pdu_writef(pdu, proto_version,
- "wwdQdddqssss?sddd",
+ "wwdQdddqssss?sugu",
stbuf->size, stbuf->type,
stbuf->dev, &stbuf->qid,
stbuf->mode, stbuf->atime,
@@ -468,7 +505,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
struct p9_iattr_dotl *);
errcode = p9pdu_writef(pdu, proto_version,
- "ddddqqqqq",
+ "ddugqqqqq",
p9attr->valid,
p9attr->mode,
p9attr->uid,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index de2e950a0a7a..74dea377fe5b 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = {
.create = p9_virtio_create,
.close = p9_virtio_close,
.request = p9_virtio_request,
- .zc_request = p9_virtio_zc_request,
+ //.zc_request = p9_virtio_zc_request,
.cancel = p9_virtio_cancel,
/*
* We leave one entry for input and one entry for response
diff --git a/net/9p/util.c b/net/9p/util.c
index 6ceeeb384de7..59f278e64f58 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -87,23 +87,18 @@ EXPORT_SYMBOL(p9_idpool_destroy);
int p9_idpool_get(struct p9_idpool *p)
{
- int i = 0;
- int error;
+ int i;
unsigned long flags;
-retry:
- if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
- return -1;
-
+ idr_preload(GFP_NOFS);
spin_lock_irqsave(&p->lock, flags);
/* no need to store exactly p, we just need something non-null */
- error = idr_get_new(&p->pool, p, &i);
- spin_unlock_irqrestore(&p->lock, flags);
+ i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT);
- if (error == -EAGAIN)
- goto retry;
- else if (error)
+ spin_unlock_irqrestore(&p->lock, flags);
+ idr_preload_end();
+ if (i < 0)
return -1;
p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 33475291c9c1..4a141e3cf076 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -93,10 +93,9 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
struct atalk_iface *atif)
{
struct sock *s;
- struct hlist_node *node;
read_lock_bh(&atalk_sockets_lock);
- sk_for_each(s, node, &atalk_sockets) {
+ sk_for_each(s, &atalk_sockets) {
struct atalk_sock *at = at_sk(s);
if (to->sat_port != at->src_port)
@@ -141,11 +140,10 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk,
struct sockaddr_at *sat)
{
struct sock *s;
- struct hlist_node *node;
struct atalk_sock *at;
write_lock_bh(&atalk_sockets_lock);
- sk_for_each(s, node, &atalk_sockets) {
+ sk_for_each(s, &atalk_sockets) {
at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
@@ -1084,9 +1082,8 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
sat->sat_port < ATPORT_LAST;
sat->sat_port++) {
struct sock *s;
- struct hlist_node *node;
- sk_for_each(s, node, &atalk_sockets) {
+ sk_for_each(s, &atalk_sockets) {
struct atalk_sock *at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
diff --git a/net/atm/common.c b/net/atm/common.c
index 806fc0a40051..7b491006eaf4 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -270,11 +270,11 @@ void atm_dev_release_vccs(struct atm_dev *dev)
write_lock_irq(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; i++) {
struct hlist_head *head = &vcc_hash[i];
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
struct sock *s;
struct atm_vcc *vcc;
- sk_for_each_safe(s, node, tmp, head) {
+ sk_for_each_safe(s, tmp, head) {
vcc = atm_sk(s);
if (vcc->dev == dev) {
vcc_release_async(vcc, -EPIPE);
@@ -317,11 +317,10 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
{
struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
- struct hlist_node *node;
struct sock *s;
struct atm_vcc *walk;
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
walk = atm_sk(s);
if (walk->dev != vcc->dev)
continue;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 2e3d942e77f1..f23916be18fb 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -842,7 +842,9 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
--*l;
}
- hlist_for_each_entry_from(tmp, e, next) {
+ tmp = container_of(e, struct lec_arp_table, next);
+
+ hlist_for_each_entry_from(tmp, next) {
if (--*l < 0)
break;
}
@@ -1307,7 +1309,6 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
static int
lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
{
- struct hlist_node *node;
struct lec_arp_table *entry;
int i, remove_vcc = 1;
@@ -1326,7 +1327,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
* ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
*/
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(entry, node,
+ hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (memcmp(to_remove->atm_addr,
entry->atm_addr, ATM_ESA_LEN) == 0) {
@@ -1364,14 +1365,13 @@ static const char *get_status_string(unsigned char st)
static void dump_arp_table(struct lec_priv *priv)
{
- struct hlist_node *node;
struct lec_arp_table *rulla;
char buf[256];
int i, j, offset;
pr_info("Dump %p:\n", priv);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(rulla, node,
+ hlist_for_each_entry(rulla,
&priv->lec_arp_tables[i], next) {
offset = 0;
offset += sprintf(buf, "%d: %p\n", i, rulla);
@@ -1403,7 +1403,7 @@ static void dump_arp_table(struct lec_priv *priv)
if (!hlist_empty(&priv->lec_no_forward))
pr_info("No forward\n");
- hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
+ hlist_for_each_entry(rulla, &priv->lec_no_forward, next) {
offset = 0;
offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
offset += sprintf(buf + offset, " Atm:");
@@ -1428,7 +1428,7 @@ static void dump_arp_table(struct lec_priv *priv)
if (!hlist_empty(&priv->lec_arp_empty_ones))
pr_info("Empty ones\n");
- hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
+ hlist_for_each_entry(rulla, &priv->lec_arp_empty_ones, next) {
offset = 0;
offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
offset += sprintf(buf + offset, " Atm:");
@@ -1453,7 +1453,7 @@ static void dump_arp_table(struct lec_priv *priv)
if (!hlist_empty(&priv->mcast_fwds))
pr_info("Multicast Forward VCCs\n");
- hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
+ hlist_for_each_entry(rulla, &priv->mcast_fwds, next) {
offset = 0;
offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
offset += sprintf(buf + offset, " Atm:");
@@ -1487,7 +1487,7 @@ static void dump_arp_table(struct lec_priv *priv)
static void lec_arp_destroy(struct lec_priv *priv)
{
unsigned long flags;
- struct hlist_node *node, *next;
+ struct hlist_node *next;
struct lec_arp_table *entry;
int i;
@@ -1499,7 +1499,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
@@ -1507,7 +1507,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
}
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
del_timer_sync(&entry->timer);
lec_arp_clear_vccs(entry);
@@ -1516,7 +1516,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
}
INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_no_forward, next) {
del_timer_sync(&entry->timer);
lec_arp_clear_vccs(entry);
@@ -1525,7 +1525,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
}
INIT_HLIST_HEAD(&priv->lec_no_forward);
- hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
+ hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
/* No timer, LANEv2 7.1.20 and 2.3.5.3 */
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
@@ -1542,14 +1542,13 @@ static void lec_arp_destroy(struct lec_priv *priv)
static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
const unsigned char *mac_addr)
{
- struct hlist_node *node;
struct hlist_head *head;
struct lec_arp_table *entry;
pr_debug("%pM\n", mac_addr);
head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
- hlist_for_each_entry(entry, node, head, next) {
+ hlist_for_each_entry(entry, head, next) {
if (ether_addr_equal(mac_addr, entry->mac_addr))
return entry;
}
@@ -1686,7 +1685,7 @@ static void lec_arp_check_expire(struct work_struct *work)
unsigned long flags;
struct lec_priv *priv =
container_of(work, struct lec_priv, lec_arp_work.work);
- struct hlist_node *node, *next;
+ struct hlist_node *next;
struct lec_arp_table *entry;
unsigned long now;
int i;
@@ -1696,7 +1695,7 @@ static void lec_arp_check_expire(struct work_struct *work)
restart:
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
if (__lec_arp_check_expire(entry, now, priv)) {
struct sk_buff *skb;
@@ -1823,14 +1822,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
unsigned long permanent)
{
unsigned long flags;
- struct hlist_node *node, *next;
+ struct hlist_node *next;
struct lec_arp_table *entry;
int i;
pr_debug("\n");
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
(permanent ||
@@ -1855,7 +1854,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
unsigned int targetless_le_arp)
{
unsigned long flags;
- struct hlist_node *node, *next;
+ struct hlist_node *next;
struct lec_arp_table *entry, *tmp;
int i;
@@ -1870,7 +1869,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
* we have no entry in the cache. 7.1.30
*/
if (!hlist_empty(&priv->lec_arp_empty_ones)) {
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
hlist_del(&entry->next);
@@ -1915,7 +1914,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
del_timer(&entry->timer);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(tmp, node,
+ hlist_for_each_entry(tmp,
&priv->lec_arp_tables[i], next) {
if (entry != tmp &&
!memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
@@ -1956,7 +1955,6 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
{
unsigned long flags;
- struct hlist_node *node;
struct lec_arp_table *entry;
int i, found_entry = 0;
@@ -2026,7 +2024,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
ioc_data->atm_addr[16], ioc_data->atm_addr[17],
ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(entry, node,
+ hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (memcmp
(ioc_data->atm_addr, entry->atm_addr,
@@ -2103,7 +2101,6 @@ out:
static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
{
unsigned long flags;
- struct hlist_node *node;
struct lec_arp_table *entry;
int i;
@@ -2111,7 +2108,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
restart:
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(entry, node,
+ hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (entry->flush_tran_id == tran_id &&
entry->status == ESI_FLUSH_PENDING) {
@@ -2140,13 +2137,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
const unsigned char *atm_addr, unsigned long tran_id)
{
unsigned long flags;
- struct hlist_node *node;
struct lec_arp_table *entry;
int i;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
- hlist_for_each_entry(entry, node,
+ hlist_for_each_entry(entry,
&priv->lec_arp_tables[i], next) {
if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
entry->flush_tran_id = tran_id;
@@ -2198,7 +2194,7 @@ out:
static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
{
unsigned long flags;
- struct hlist_node *node, *next;
+ struct hlist_node *next;
struct lec_arp_table *entry;
int i;
@@ -2208,7 +2204,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_tables[i], next) {
if (vcc == entry->vcc) {
lec_arp_remove(priv, entry);
@@ -2219,7 +2215,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
}
}
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
if (entry->vcc == vcc) {
lec_arp_clear_vccs(entry);
@@ -2229,7 +2225,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
}
}
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_no_forward, next) {
if (entry->recv_vcc == vcc) {
lec_arp_clear_vccs(entry);
@@ -2239,7 +2235,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
}
}
- hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
+ hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
if (entry->recv_vcc == vcc) {
lec_arp_clear_vccs(entry);
/* No timer, LANEv2 7.1.20 and 2.3.5.3 */
@@ -2257,13 +2253,13 @@ lec_arp_check_empties(struct lec_priv *priv,
struct atm_vcc *vcc, struct sk_buff *skb)
{
unsigned long flags;
- struct hlist_node *node, *next;
+ struct hlist_node *next;
struct lec_arp_table *entry, *tmp;
struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
unsigned char *src = hdr->h_source;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
- hlist_for_each_entry_safe(entry, node, next,
+ hlist_for_each_entry_safe(entry, next,
&priv->lec_arp_empty_ones, next) {
if (vcc == entry->vcc) {
del_timer(&entry->timer);
diff --git a/net/atm/proc.c b/net/atm/proc.c
index b4e75340b162..6ac35ff0d6b9 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -385,7 +385,7 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
- dev = PDE(file->f_path.dentry->d_inode)->data;
+ dev = PDE(file_inode(file))->data;
if (!dev->ops->proc_read)
length = -EINVAL;
else {
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 86767ca908a3..4176887e72eb 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -217,7 +217,6 @@ static void purge_vcc(struct atm_vcc *vcc)
static void sigd_close(struct atm_vcc *vcc)
{
- struct hlist_node *node;
struct sock *s;
int i;
@@ -231,7 +230,7 @@ static void sigd_close(struct atm_vcc *vcc)
for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
- sk_for_each(s, node, head) {
+ sk_for_each(s, head) {
vcc = atm_sk(s);
purge_vcc(vcc);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 69a06c47b648..7b11f8bc5071 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -81,14 +81,13 @@ static void ax25_kill_by_device(struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *s;
- struct hlist_node *node;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
spin_lock_bh(&ax25_list_lock);
again:
- ax25_for_each(s, node, &ax25_list) {
+ ax25_for_each(s, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
s->ax25_dev = NULL;
spin_unlock_bh(&ax25_list_lock);
@@ -158,10 +157,9 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
struct net_device *dev, int type)
{
ax25_cb *s;
- struct hlist_node *node;
spin_lock(&ax25_list_lock);
- ax25_for_each(s, node, &ax25_list) {
+ ax25_for_each(s, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
if (s->sk && !ax25cmp(&s->source_addr, addr) &&
@@ -187,10 +185,9 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
{
struct sock *sk = NULL;
ax25_cb *s;
- struct hlist_node *node;
spin_lock(&ax25_list_lock);
- ax25_for_each(s, node, &ax25_list) {
+ ax25_for_each(s, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
s->sk->sk_type == type) {
@@ -213,10 +210,9 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
ax25_digi *digi, struct net_device *dev)
{
ax25_cb *s;
- struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
- ax25_for_each(s, node, &ax25_list) {
+ ax25_for_each(s, &ax25_list) {
if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
continue;
if (s->ax25_dev == NULL)
@@ -248,10 +244,9 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
{
ax25_cb *s;
struct sk_buff *copy;
- struct hlist_node *node;
spin_lock(&ax25_list_lock);
- ax25_for_each(s, node, &ax25_list) {
+ ax25_for_each(s, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW &&
s->sk->sk_protocol == proto &&
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index 5ea7fd3e2af9..e05bd57b5afd 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -39,7 +39,6 @@ void ax25_ds_nr_error_recovery(ax25_cb *ax25)
void ax25_ds_enquiry_response(ax25_cb *ax25)
{
ax25_cb *ax25o;
- struct hlist_node *node;
/* Please note that neither DK4EG's nor DG2FEF's
* DAMA spec mention the following behaviour as seen
@@ -80,7 +79,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
ax25_ds_set_timer(ax25->ax25_dev);
spin_lock(&ax25_list_lock);
- ax25_for_each(ax25o, node, &ax25_list) {
+ ax25_for_each(ax25o, &ax25_list) {
if (ax25o == ax25)
continue;
@@ -159,10 +158,9 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev)
{
ax25_cb *ax25;
int res = 0;
- struct hlist_node *node;
spin_lock(&ax25_list_lock);
- ax25_for_each(ax25, node, &ax25_list)
+ ax25_for_each(ax25, &ax25_list)
if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
res = 1;
break;
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 993c439b4f71..951cd57bb07d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -70,7 +70,6 @@ static void ax25_ds_timeout(unsigned long arg)
{
ax25_dev *ax25_dev = (struct ax25_dev *) arg;
ax25_cb *ax25;
- struct hlist_node *node;
if (ax25_dev == NULL || !ax25_dev->dama.slave)
return; /* Yikes! */
@@ -81,7 +80,7 @@ static void ax25_ds_timeout(unsigned long arg)
}
spin_lock(&ax25_list_lock);
- ax25_for_each(ax25, node, &ax25_list) {
+ ax25_for_each(ax25, &ax25_list) {
if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
continue;
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 7d5f24b82cc8..7f16e8a931b2 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -193,10 +193,9 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
void ax25_link_failed(ax25_cb *ax25, int reason)
{
struct ax25_linkfail *lf;
- struct hlist_node *node;
spin_lock_bh(&linkfail_lock);
- hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node)
+ hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node)
lf->func(ax25, reason);
spin_unlock_bh(&linkfail_lock);
}
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 957999e43ff7..71c4badbc807 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -54,10 +54,9 @@ EXPORT_SYMBOL(ax25_uid_policy);
ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
{
ax25_uid_assoc *ax25_uid, *res = NULL;
- struct hlist_node *node;
read_lock(&ax25_uid_lock);
- ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+ ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (uid_eq(ax25_uid->uid, uid)) {
ax25_uid_hold(ax25_uid);
res = ax25_uid;
@@ -74,7 +73,6 @@ EXPORT_SYMBOL(ax25_findbyuid);
int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
{
ax25_uid_assoc *ax25_uid;
- struct hlist_node *node;
ax25_uid_assoc *user;
unsigned long res;
@@ -82,7 +80,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
case SIOCAX25GETUID:
res = -ENOENT;
read_lock(&ax25_uid_lock);
- ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+ ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
break;
@@ -126,7 +124,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
ax25_uid = NULL;
write_lock(&ax25_uid_lock);
- ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+ ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
break;
}
@@ -212,11 +210,10 @@ const struct file_operations ax25_uid_fops = {
void __exit ax25_uid_free(void)
{
ax25_uid_assoc *ax25_uid;
- struct hlist_node *node;
write_lock(&ax25_uid_lock);
again:
- ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+ ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
hlist_del_init(&ax25_uid->uid_node);
ax25_uid_put(ax25_uid);
goto again;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 72fe1bbf7721..a0b253ecadaf 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -487,7 +487,6 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
*/
struct batadv_forw_packet *forw_packet_aggr = NULL;
struct batadv_forw_packet *forw_packet_pos = NULL;
- struct hlist_node *tmp_node;
struct batadv_ogm_packet *batadv_ogm_packet;
bool direct_link;
unsigned long max_aggregation_jiffies;
@@ -500,7 +499,7 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
spin_lock_bh(&bat_priv->forw_bat_list_lock);
/* own packets are not to be aggregated */
if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
- hlist_for_each_entry(forw_packet_pos, tmp_node,
+ hlist_for_each_entry(forw_packet_pos,
&bat_priv->forw_bat_list, list) {
if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
bat_priv, packet_len,
@@ -655,7 +654,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_node_tmp;
- struct hlist_node *node;
int if_num;
uint8_t sum_orig, sum_neigh;
uint8_t *neigh_addr;
@@ -665,7 +663,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
"update_originator(): Searching and updating originator entry of received packet\n");
rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_node->neigh_list, list) {
neigh_addr = tmp_neigh_node->addr;
if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
@@ -801,7 +799,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
- struct hlist_node *node;
uint8_t total_count;
uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
@@ -810,7 +807,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
/* find corresponding one hop neighbor */
rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_neigh_node->neigh_list, list) {
if (!batadv_compare_eth(tmp_neigh_node->addr,
orig_neigh_node->orig))
@@ -920,7 +917,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *tmp_neigh_node;
- struct hlist_node *node;
int is_duplicate = 0;
int32_t seq_diff;
int need_update = 0;
@@ -943,7 +939,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
goto out;
rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_node->neigh_list, list) {
is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
orig_node->last_real_seqno,
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 30f46526cbbd..6a4f728680ae 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -144,7 +144,6 @@ static struct batadv_bla_claim
{
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_bla_claim *claim;
struct batadv_bla_claim *claim_tmp = NULL;
int index;
@@ -156,7 +155,7 @@ static struct batadv_bla_claim
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
if (!batadv_compare_claim(&claim->hash_entry, data))
continue;
@@ -185,7 +184,6 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_bla_backbone_gw search_entry, *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
int index;
@@ -200,7 +198,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
&search_entry))
continue;
@@ -221,7 +219,7 @@ static void
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
{
struct batadv_hashtable *hash;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
struct batadv_bla_claim *claim;
int i;
@@ -236,13 +234,13 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(claim, node, node_tmp,
+ hlist_for_each_entry_safe(claim, node_tmp,
head, hash_entry) {
if (claim->backbone_gw != backbone_gw)
continue;
batadv_claim_free_ref(claim);
- hlist_del_rcu(node);
+ hlist_del_rcu(&claim->hash_entry);
}
spin_unlock_bh(list_lock);
}
@@ -460,7 +458,6 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
short vid)
{
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_hashtable *hash;
struct batadv_bla_claim *claim;
@@ -481,7 +478,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
/* only own claims are interesting */
if (claim->backbone_gw != backbone_gw)
continue;
@@ -958,7 +955,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
{
struct batadv_bla_backbone_gw *backbone_gw;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -973,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
+ hlist_for_each_entry_safe(backbone_gw, node_tmp,
head, hash_entry) {
if (now)
goto purge_now;
@@ -992,7 +989,7 @@ purge_now:
batadv_bla_del_backbone_claims(backbone_gw);
- hlist_del_rcu(node);
+ hlist_del_rcu(&backbone_gw->hash_entry);
batadv_backbone_gw_free_ref(backbone_gw);
}
spin_unlock_bh(list_lock);
@@ -1013,7 +1010,6 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
int now)
{
struct batadv_bla_claim *claim;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_hashtable *hash;
int i;
@@ -1026,7 +1022,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
if (now)
goto purge_now;
if (!batadv_compare_eth(claim->backbone_gw->orig,
@@ -1062,7 +1058,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
struct batadv_hard_iface *oldif)
{
struct batadv_bla_backbone_gw *backbone_gw;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_hashtable *hash;
__be16 group;
@@ -1086,7 +1081,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
/* own orig still holds the old value. */
if (!batadv_compare_eth(backbone_gw->orig,
oldif->net_dev->dev_addr))
@@ -1112,7 +1107,6 @@ static void batadv_bla_periodic_work(struct work_struct *work)
struct delayed_work *delayed_work;
struct batadv_priv *bat_priv;
struct batadv_priv_bla *priv_bla;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hashtable *hash;
@@ -1140,7 +1134,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
if (!batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr))
continue;
@@ -1322,7 +1316,6 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_bla_backbone_gw *backbone_gw;
int i;
@@ -1336,7 +1329,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
if (batadv_compare_eth(backbone_gw->orig, orig)) {
rcu_read_unlock();
return 1;
@@ -1607,7 +1600,6 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
struct batadv_bla_claim *claim;
struct batadv_hard_iface *primary_if;
- struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
bool is_own;
@@ -1628,7 +1620,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
is_own = batadv_compare_eth(claim->backbone_gw->orig,
primary_addr);
seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
@@ -1652,7 +1644,6 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hard_iface *primary_if;
- struct hlist_node *node;
struct hlist_head *head;
int secs, msecs;
uint32_t i;
@@ -1674,7 +1665,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
msecs = jiffies_to_msecs(jiffies -
backbone_gw->lasttime);
secs = msecs / 1000;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 761a59002e34..d54188a112ea 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -83,7 +83,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
{
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_dat_entry *dat_entry;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -95,7 +95,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
list_lock = &bat_priv->dat.hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(dat_entry, node, node_tmp, head,
+ hlist_for_each_entry_safe(dat_entry, node_tmp, head,
hash_entry) {
/* if an helper function has been passed as parameter,
* ask it if the entry has to be purged or not
@@ -103,7 +103,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
if (to_purge && !to_purge(dat_entry))
continue;
- hlist_del_rcu(node);
+ hlist_del_rcu(&dat_entry->hash_entry);
batadv_dat_entry_free_ref(dat_entry);
}
spin_unlock_bh(list_lock);
@@ -235,7 +235,6 @@ static struct batadv_dat_entry *
batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
{
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
struct batadv_hashtable *hash = bat_priv->dat.hash;
uint32_t index;
@@ -247,7 +246,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
if (dat_entry->ip != ip)
continue;
@@ -465,7 +464,6 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
batadv_dat_addr_t max = 0, tmp_max = 0;
struct batadv_orig_node *orig_node, *max_orig_node = NULL;
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node;
struct hlist_head *head;
int i;
@@ -481,7 +479,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
/* the dht space is a ring and addresses are unsigned */
tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
ip_key;
@@ -686,7 +684,6 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_hashtable *hash = bat_priv->dat.hash;
struct batadv_dat_entry *dat_entry;
struct batadv_hard_iface *primary_if;
- struct hlist_node *node;
struct hlist_head *head;
unsigned long last_seen_jiffies;
int last_seen_msecs, last_seen_secs, last_seen_mins;
@@ -704,7 +701,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
last_seen_jiffies = jiffies - dat_entry->last_update;
last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
last_seen_mins = last_seen_msecs / 60000;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 074107f2cfaa..34f99a46ec1d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -114,7 +114,6 @@ static struct batadv_gw_node *
batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_neigh_node *router;
- struct hlist_node *node;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
uint32_t gw_divisor;
@@ -127,7 +126,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
gw_divisor *= 64;
rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
if (gw_node->deleted)
continue;
@@ -344,7 +343,6 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
uint8_t new_gwflags)
{
- struct hlist_node *node;
struct batadv_gw_node *gw_node, *curr_gw;
/* Note: We don't need a NULL check here, since curr_gw never gets
@@ -355,7 +353,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
if (gw_node->orig_node != orig_node)
continue;
@@ -403,7 +401,7 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
void batadv_gw_node_purge(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node, *curr_gw;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
int do_deselect = 0;
@@ -411,7 +409,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
spin_lock_bh(&bat_priv->gw.list_lock);
- hlist_for_each_entry_safe(gw_node, node, node_tmp,
+ hlist_for_each_entry_safe(gw_node, node_tmp,
&bat_priv->gw.list, list) {
if (((!gw_node->deleted) ||
(time_before(jiffies, gw_node->deleted + timeout))) &&
@@ -476,7 +474,6 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hard_iface *primary_if;
struct batadv_gw_node *gw_node;
- struct hlist_node *node;
int gw_count = 0;
primary_if = batadv_seq_print_text_primary_if_get(seq);
@@ -490,7 +487,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
primary_if->net_dev->dev_addr, net_dev->name);
rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
if (gw_node->deleted)
continue;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 21fe6987733b..0488d70c8c35 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -345,9 +345,8 @@ void batadv_recv_handler_unregister(uint8_t packet_type)
static struct batadv_algo_ops *batadv_algo_get(char *name)
{
struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
- struct hlist_node *node;
- hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
+ hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
if (strcmp(bat_algo_ops_tmp->name, name) != 0)
continue;
@@ -411,11 +410,10 @@ out:
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
{
struct batadv_algo_ops *bat_algo_ops;
- struct hlist_node *node;
seq_printf(seq, "Available routing algorithms:\n");
- hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
+ hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
seq_printf(seq, "%s\n", bat_algo_ops->name);
}
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 457ea445217c..96fb80b724dc 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -118,7 +118,7 @@ out:
static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
{
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
struct batadv_orig_node *orig_node;
@@ -134,7 +134,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
}
/* for all neighbors towards this originator ... */
- hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+ hlist_for_each_entry_safe(neigh_node, node_tmp,
&orig_node->neigh_list, list) {
hlist_del_rcu(&neigh_node->list);
batadv_neigh_node_free_ref(neigh_node);
@@ -161,7 +161,7 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
void batadv_originator_free(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct batadv_orig_node *orig_node;
@@ -179,9 +179,9 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(orig_node, node, node_tmp,
+ hlist_for_each_entry_safe(orig_node, node_tmp,
head, hash_entry) {
- hlist_del_rcu(node);
+ hlist_del_rcu(&orig_node->hash_entry);
batadv_orig_node_free_ref(orig_node);
}
spin_unlock_bh(list_lock);
@@ -274,7 +274,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node **best_neigh_node)
{
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
bool neigh_purged = false;
unsigned long last_seen;
@@ -285,7 +285,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
spin_lock_bh(&orig_node->neigh_list_lock);
/* for all neighbors towards this originator ... */
- hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+ hlist_for_each_entry_safe(neigh_node, node_tmp,
&orig_node->neigh_list, list) {
last_seen = neigh_node->last_seen;
if_incoming = neigh_node->if_incoming;
@@ -348,7 +348,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
static void _batadv_purge_orig(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct batadv_orig_node *orig_node;
@@ -363,13 +363,13 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(orig_node, node, node_tmp,
+ hlist_for_each_entry_safe(orig_node, node_tmp,
head, hash_entry) {
if (batadv_purge_orig_node(bat_priv, orig_node)) {
if (orig_node->gw_flags)
batadv_gw_node_delete(bat_priv,
orig_node);
- hlist_del_rcu(node);
+ hlist_del_rcu(&orig_node->hash_entry);
batadv_orig_node_free_ref(orig_node);
continue;
}
@@ -408,7 +408,6 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node, *node_tmp;
struct hlist_head *head;
struct batadv_hard_iface *primary_if;
struct batadv_orig_node *orig_node;
@@ -434,7 +433,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
continue;
@@ -453,7 +452,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
neigh_node->addr,
neigh_node->if_incoming->net_dev->name);
- hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
+ hlist_for_each_entry_rcu(neigh_node_tmp,
&orig_node->neigh_list, list) {
seq_printf(seq, " %pM (%3i)",
neigh_node_tmp->addr,
@@ -511,7 +510,6 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
uint32_t i;
@@ -524,7 +522,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
ret = batadv_orig_node_add_if(orig_node, max_if_num);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
@@ -595,7 +593,6 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_hard_iface *hard_iface_tmp;
struct batadv_orig_node *orig_node;
@@ -609,7 +606,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
ret = batadv_orig_node_del_if(orig_node, max_if_num,
hard_iface->if_num);
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 286bf743e76a..7df48fa7669d 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -68,7 +68,6 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
int index;
@@ -79,7 +78,7 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
if (!batadv_compare_eth(orig_node, data))
continue;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 60ba03fc8390..5ee21cebbbb0 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -37,7 +37,6 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
unsigned long *word;
@@ -49,7 +48,7 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
word_index = hard_iface->if_num * BATADV_NUM_WORDS;
word = &(orig_node->bcast_own[word_index]);
@@ -146,7 +145,6 @@ out:
void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node)
{
- struct hlist_node *node;
struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
uint8_t interference_candidate = 0;
@@ -169,7 +167,7 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
* interface. If we do, we won't select this candidate because of
* possible interference.
*/
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_node->neigh_list, list) {
if (tmp_neigh_node == neigh_node)
continue;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 80ca65fc89a1..a67cffde37ae 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -316,7 +316,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
const struct batadv_hard_iface *hard_iface)
{
struct batadv_forw_packet *forw_packet;
- struct hlist_node *tmp_node, *safe_tmp_node;
+ struct hlist_node *safe_tmp_node;
bool pending;
if (hard_iface)
@@ -329,7 +329,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
/* free bcast list */
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
- hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
+ hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
&bat_priv->forw_bcast_list, list) {
/* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
@@ -355,7 +355,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
/* free batman packet list */
spin_lock_bh(&bat_priv->forw_bat_list_lock);
- hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
+ hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
&bat_priv->forw_bat_list, list) {
/* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d44672f4a349..98a66a021a60 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -56,7 +56,6 @@ static struct batadv_tt_common_entry *
batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
{
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
uint32_t index;
@@ -68,7 +67,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
if (!batadv_compare_eth(tt_common_entry, data))
continue;
@@ -257,7 +256,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
struct batadv_tt_local_entry *tt_local;
struct batadv_tt_global_entry *tt_global;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_tt_orig_list_entry *orig_entry;
int hash_added;
bool roamed_back = false;
@@ -339,7 +337,7 @@ check_roaming:
/* These node are probably going to update their tt table */
head = &tt_global->orig_list;
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
batadv_send_roam_adv(bat_priv, tt_global->common.addr,
orig_entry->orig_node);
}
@@ -470,7 +468,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
struct batadv_hard_iface *primary_if;
- struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
int last_seen_secs;
@@ -494,7 +491,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry,
head, hash_entry) {
tt_local = container_of(tt_common_entry,
struct batadv_tt_local_entry,
@@ -605,9 +602,9 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
{
struct batadv_tt_local_entry *tt_local_entry;
struct batadv_tt_common_entry *tt_common_entry;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
+ hlist_for_each_entry_safe(tt_common_entry, node_tmp, head,
hash_entry) {
tt_local_entry = container_of(tt_common_entry,
struct batadv_tt_local_entry,
@@ -651,7 +648,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -665,9 +662,9 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
+ hlist_for_each_entry_safe(tt_common_entry, node_tmp,
head, hash_entry) {
- hlist_del_rcu(node);
+ hlist_del_rcu(&tt_common_entry->hash_entry);
tt_local = container_of(tt_common_entry,
struct batadv_tt_local_entry,
common);
@@ -724,11 +721,10 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
{
struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
const struct hlist_head *head;
- struct hlist_node *node;
rcu_read_lock();
head = &entry->orig_list;
- hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
+ hlist_for_each_entry_rcu(tmp_orig_entry, head, list) {
if (tmp_orig_entry->orig_node != orig_node)
continue;
if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
@@ -940,12 +936,11 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
{
struct batadv_neigh_node *router = NULL;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
int best_tq = 0;
head = &tt_global_entry->orig_list;
- hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
router = batadv_orig_node_get_router(orig_entry->orig_node);
if (!router)
continue;
@@ -973,7 +968,6 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
struct seq_file *seq)
{
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
struct batadv_tt_common_entry *tt_common_entry;
uint16_t flags;
@@ -997,7 +991,7 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
head = &tt_global_entry->orig_list;
- hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
if (best_entry == orig_entry)
continue;
@@ -1020,7 +1014,6 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_global_entry *tt_global;
struct batadv_hard_iface *primary_if;
- struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
@@ -1039,7 +1032,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry,
head, hash_entry) {
tt_global = container_of(tt_common_entry,
struct batadv_tt_global_entry,
@@ -1059,13 +1052,13 @@ static void
batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
{
struct hlist_head *head;
- struct hlist_node *node, *safe;
+ struct hlist_node *safe;
struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
- hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
- hlist_del_rcu(node);
+ hlist_for_each_entry_safe(orig_entry, safe, head, list) {
+ hlist_del_rcu(&orig_entry->list);
batadv_tt_orig_list_entry_free_ref(orig_entry);
}
spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1078,18 +1071,18 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
const char *message)
{
struct hlist_head *head;
- struct hlist_node *node, *safe;
+ struct hlist_node *safe;
struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
- hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+ hlist_for_each_entry_safe(orig_entry, safe, head, list) {
if (orig_entry->orig_node == orig_node) {
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting %pM from global tt entry %pM: %s\n",
orig_node->orig,
tt_global_entry->common.addr, message);
- hlist_del_rcu(node);
+ hlist_del_rcu(&orig_entry->list);
batadv_tt_orig_list_entry_free_ref(orig_entry);
}
}
@@ -1108,7 +1101,6 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
{
bool last_entry = true;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_tt_orig_list_entry *orig_entry;
/* no local entry exists, case 1:
@@ -1117,7 +1109,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
rcu_read_lock();
head = &tt_global_entry->orig_list;
- hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
if (orig_entry->orig_node != orig_node) {
last_entry = false;
break;
@@ -1202,7 +1194,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_tt_common_entry *tt_common_entry;
uint32_t i;
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
- struct hlist_node *node, *safe;
+ struct hlist_node *safe;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1214,7 +1206,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, safe,
+ hlist_for_each_entry_safe(tt_common_entry, safe,
head, hash_entry) {
tt_global = container_of(tt_common_entry,
struct batadv_tt_global_entry,
@@ -1227,7 +1219,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM: %s\n",
tt_global->common.addr, message);
- hlist_del_rcu(node);
+ hlist_del_rcu(&tt_common_entry->hash_entry);
batadv_tt_global_entry_free_ref(tt_global);
}
}
@@ -1262,7 +1254,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct hlist_head *head;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
char *msg = NULL;
@@ -1274,7 +1266,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+ hlist_for_each_entry_safe(tt_common, node_tmp, head,
hash_entry) {
tt_global = container_of(tt_common,
struct batadv_tt_global_entry,
@@ -1287,7 +1279,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
"Deleting global tt entry (%pM): %s\n",
tt_global->common.addr, msg);
- hlist_del_rcu(node);
+ hlist_del_rcu(&tt_common->hash_entry);
batadv_tt_global_entry_free_ref(tt_global);
}
@@ -1301,7 +1293,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_global_entry *tt_global;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -1315,9 +1307,9 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
+ hlist_for_each_entry_safe(tt_common_entry, node_tmp,
head, hash_entry) {
- hlist_del_rcu(node);
+ hlist_del_rcu(&tt_common_entry->hash_entry);
tt_global = container_of(tt_common_entry,
struct batadv_tt_global_entry,
common);
@@ -1397,7 +1389,6 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
- struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
int j;
@@ -1406,7 +1397,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
tt_global = container_of(tt_common,
struct batadv_tt_global_entry,
common);
@@ -1449,7 +1440,6 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
uint16_t total = 0, total_one;
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
- struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
int j;
@@ -1458,7 +1448,7 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
/* not yet committed clients have not to be taken into
* account while computing the CRC
*/
@@ -1597,7 +1587,6 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_query_packet *tt_response;
struct batadv_tt_change *tt_change;
- struct hlist_node *node;
struct hlist_head *head;
struct sk_buff *skb = NULL;
uint16_t tt_tot, tt_count;
@@ -1627,7 +1616,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
- hlist_for_each_entry_rcu(tt_common_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry,
head, hash_entry) {
if (tt_count == tt_tot)
break;
@@ -2307,7 +2296,6 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
uint32_t i;
uint16_t changed_num = 0;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_tt_common_entry *tt_common_entry;
if (!hash)
@@ -2317,7 +2305,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry,
head, hash_entry) {
if (enable) {
if ((tt_common_entry->flags & flags) == flags)
@@ -2342,7 +2330,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_local_entry *tt_local;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
@@ -2355,7 +2343,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+ hlist_for_each_entry_safe(tt_common, node_tmp, head,
hash_entry) {
if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
continue;
@@ -2365,7 +2353,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
tt_common->addr);
atomic_dec(&bat_priv->tt.local_entry_num);
- hlist_del_rcu(node);
+ hlist_del_rcu(&tt_common->hash_entry);
tt_local = container_of(tt_common,
struct batadv_tt_local_entry,
common);
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 22d2785177d1..c053244b97bd 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -97,7 +97,6 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
{
struct batadv_hashtable *hash = bat_priv->vis.hash;
struct hlist_head *head;
- struct hlist_node *node;
struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
uint32_t index;
@@ -108,8 +107,8 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
- if (!batadv_vis_info_cmp(node, data))
+ hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
+ if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
continue;
vis_info_tmp = vis_info;
@@ -128,9 +127,8 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
bool primary)
{
struct batadv_vis_if_list_entry *entry;
- struct hlist_node *pos;
- hlist_for_each_entry(entry, pos, if_list, list) {
+ hlist_for_each_entry(entry, if_list, list) {
if (batadv_compare_eth(entry->addr, interface))
return;
}
@@ -148,9 +146,8 @@ static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
const struct hlist_head *if_list)
{
struct batadv_vis_if_list_entry *entry;
- struct hlist_node *pos;
- hlist_for_each_entry(entry, pos, if_list, list) {
+ hlist_for_each_entry(entry, if_list, list) {
if (entry->primary)
seq_printf(seq, "PRIMARY, ");
else
@@ -198,9 +195,8 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
{
int i;
struct batadv_vis_if_list_entry *entry;
- struct hlist_node *pos;
- hlist_for_each_entry(entry, pos, list, list) {
+ hlist_for_each_entry(entry, list, list) {
seq_printf(seq, "%pM,", entry->addr);
for (i = 0; i < packet->entries; i++)
@@ -218,17 +214,16 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
const struct hlist_head *head)
{
- struct hlist_node *node;
struct batadv_vis_info *info;
struct batadv_vis_packet *packet;
uint8_t *entries_pos;
struct batadv_vis_info_entry *entries;
struct batadv_vis_if_list_entry *entry;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
HLIST_HEAD(vis_if_list);
- hlist_for_each_entry_rcu(info, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(info, head, hash_entry) {
packet = (struct batadv_vis_packet *)info->skb_packet->data;
entries_pos = (uint8_t *)packet + sizeof(*packet);
entries = (struct batadv_vis_info_entry *)entries_pos;
@@ -240,7 +235,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
batadv_vis_data_read_entries(seq, &vis_if_list, packet,
entries);
- hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
+ hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
hlist_del(&entry->list);
kfree(entry);
}
@@ -519,7 +514,6 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct batadv_neigh_node *router;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
struct batadv_vis_packet *packet;
@@ -532,7 +526,7 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
@@ -571,7 +565,6 @@ static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *router;
@@ -605,7 +598,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
@@ -644,7 +637,7 @@ next:
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node, head,
+ hlist_for_each_entry_rcu(tt_common_entry, head,
hash_entry) {
packet_pos = skb_put(info->skb_packet, sizeof(*entry));
entry = (struct batadv_vis_info_entry *)packet_pos;
@@ -673,14 +666,14 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
{
uint32_t i;
struct batadv_hashtable *hash = bat_priv->vis.hash;
- struct hlist_node *node, *node_tmp;
+ struct hlist_node *node_tmp;
struct hlist_head *head;
struct batadv_vis_info *info;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
- hlist_for_each_entry_safe(info, node, node_tmp,
+ hlist_for_each_entry_safe(info, node_tmp,
head, hash_entry) {
/* never purge own data. */
if (info == bat_priv->vis.my_info)
@@ -688,7 +681,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
if (batadv_has_timed_out(info->first_seen,
BATADV_VIS_TIMEOUT)) {
- hlist_del(node);
+ hlist_del(&info->hash_entry);
batadv_send_list_del(info);
kref_put(&info->refcount, batadv_free_info);
}
@@ -700,7 +693,6 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
struct batadv_vis_info *info)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_node *node;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
struct batadv_vis_packet *packet;
@@ -715,7 +707,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
/* if it's a vis server and reachable, send it. */
if (!(orig_node->flags & BATADV_VIS_SERVER))
continue;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 07f073935811..6a93614f2c49 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -70,14 +70,13 @@ static struct bt_sock_list hci_sk_list = {
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
{
struct sock *sk;
- struct hlist_node *node;
struct sk_buff *skb_copy = NULL;
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
+ sk_for_each(sk, &hci_sk_list.head) {
struct hci_filter *flt;
struct sk_buff *nskb;
@@ -142,13 +141,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
{
struct sock *sk;
- struct hlist_node *node;
BT_DBG("len %d", skb->len);
read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
+ sk_for_each(sk, &hci_sk_list.head) {
struct sk_buff *nskb;
/* Skip the original socket */
@@ -176,7 +174,6 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
{
struct sock *sk;
- struct hlist_node *node;
struct sk_buff *skb_copy = NULL;
__le16 opcode;
@@ -210,7 +207,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
+ sk_for_each(sk, &hci_sk_list.head) {
struct sk_buff *nskb;
if (sk->sk_state != BT_BOUND)
@@ -251,13 +248,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
static void send_monitor_event(struct sk_buff *skb)
{
struct sock *sk;
- struct hlist_node *node;
BT_DBG("len %d", skb->len);
read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
+ sk_for_each(sk, &hci_sk_list.head) {
struct sk_buff *nskb;
if (sk->sk_state != BT_BOUND)
@@ -393,11 +389,10 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
if (event == HCI_DEV_UNREG) {
struct sock *sk;
- struct hlist_node *node;
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
+ sk_for_each(sk, &hci_sk_list.head) {
bh_lock_sock_nested(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index ce3f6658f4b2..c23bae86263b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -107,15 +107,14 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL;
- struct hlist_node *node;
- sk_for_each(sk, node, &rfcomm_sk_list.head) {
+ sk_for_each(sk, &rfcomm_sk_list.head) {
if (rfcomm_pi(sk)->channel == channel &&
!bacmp(&bt_sk(sk)->src, src))
break;
}
- return node ? sk : NULL;
+ return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
@@ -124,11 +123,10 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
read_lock(&rfcomm_sk_list.lock);
- sk_for_each(sk, node, &rfcomm_sk_list.head) {
+ sk_for_each(sk, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
@@ -145,7 +143,7 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *
read_unlock(&rfcomm_sk_list.lock);
- return node ? sk : sk1;
+ return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
@@ -970,11 +968,10 @@ done:
static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
- struct hlist_node *node;
read_lock(&rfcomm_sk_list.lock);
- sk_for_each(sk, node, &rfcomm_sk_list.head) {
+ sk_for_each(sk, &rfcomm_sk_list.head) {
seq_printf(f, "%pMR %pMR %d %d\n",
&bt_sk(sk)->src, &bt_sk(sk)->dst,
sk->sk_state, rfcomm_pi(sk)->channel);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index b5178d62064e..79d87d8d4f51 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -259,10 +259,9 @@ drop:
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
- struct hlist_node *node;
struct sock *sk;
- sk_for_each(sk, node, &sco_sk_list.head) {
+ sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
@@ -279,11 +278,10 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
read_lock(&sco_sk_list.lock);
- sk_for_each(sk, node, &sco_sk_list.head) {
+ sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
@@ -298,7 +296,7 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
read_unlock(&sco_sk_list.lock);
- return node ? sk : sk1;
+ return sk ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
@@ -951,14 +949,13 @@ static void sco_conn_ready(struct sco_conn *conn)
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
{
struct sock *sk;
- struct hlist_node *node;
int lm = 0;
BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
- sk_for_each(sk, node, &sco_sk_list.head) {
+ sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
@@ -1018,11 +1015,10 @@ drop:
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
- struct hlist_node *node;
read_lock(&sco_sk_list.lock);
- sk_for_each(sk, node, &sco_sk_list.head) {
+ sk_for_each(sk, &sco_sk_list.head) {
seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
&bt_sk(sk)->dst, sk->sk_state);
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 8117900af4de..b0812c91c0f0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -181,9 +181,9 @@ void br_fdb_cleanup(unsigned long _data)
spin_lock(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
- struct hlist_node *h, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
+ hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
unsigned long this_timer;
if (f->is_static)
continue;
@@ -207,8 +207,8 @@ void br_fdb_flush(struct net_bridge *br)
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
- struct hlist_node *h, *n;
- hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
+ struct hlist_node *n;
+ hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
if (!f->is_static)
fdb_delete(br, f);
}
@@ -266,10 +266,9 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
- struct hlist_node *h;
struct net_bridge_fdb_entry *fdb;
- hlist_for_each_entry_rcu(fdb, h,
+ hlist_for_each_entry_rcu(fdb,
&br->hash[br_mac_hash(addr, vid)], hlist) {
if (ether_addr_equal(fdb->addr.addr, addr) &&
fdb->vlan_id == vid) {
@@ -315,14 +314,13 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
{
struct __fdb_entry *fe = buf;
int i, num = 0;
- struct hlist_node *h;
struct net_bridge_fdb_entry *f;
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
rcu_read_lock();
for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+ hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
@@ -363,10 +361,9 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
const unsigned char *addr,
__u16 vid)
{
- struct hlist_node *h;
struct net_bridge_fdb_entry *fdb;
- hlist_for_each_entry(fdb, h, head, hlist) {
+ hlist_for_each_entry(fdb, head, hlist) {
if (ether_addr_equal(fdb->addr.addr, addr) &&
fdb->vlan_id == vid)
return fdb;
@@ -378,10 +375,9 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
const unsigned char *addr,
__u16 vid)
{
- struct hlist_node *h;
struct net_bridge_fdb_entry *fdb;
- hlist_for_each_entry_rcu(fdb, h, head, hlist) {
+ hlist_for_each_entry_rcu(fdb, head, hlist) {
if (ether_addr_equal(fdb->addr.addr, addr) &&
fdb->vlan_id == vid)
return fdb;
@@ -593,10 +589,9 @@ int br_fdb_dump(struct sk_buff *skb,
goto out;
for (i = 0; i < BR_HASH_SIZE; i++) {
- struct hlist_node *h;
struct net_bridge_fdb_entry *f;
- hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+ hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
if (idx < cb->args[0])
goto skip;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 38991e03646d..9f97b850fc65 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -18,7 +18,6 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
- struct hlist_node *n;
struct nlattr *nest;
if (!br->multicast_router || hlist_empty(&br->router_list))
@@ -28,7 +27,7 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
if (nest == NULL)
return -EMSGSIZE;
- hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) {
+ hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
goto fail;
}
@@ -61,12 +60,11 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
return -EMSGSIZE;
for (i = 0; i < mdb->max; i++) {
- struct hlist_node *h;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p, **pp;
struct net_bridge_port *port;
- hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) {
+ hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
if (idx < s_idx)
goto skip;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7d886b0a8b7b..10e6fce1bb62 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -86,9 +86,8 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
{
struct net_bridge_mdb_entry *mp;
- struct hlist_node *p;
- hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+ hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
if (br_ip_equal(&mp->addr, dst))
return mp;
}
@@ -178,13 +177,12 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
int elasticity)
{
struct net_bridge_mdb_entry *mp;
- struct hlist_node *p;
int maxlen;
int len;
int i;
for (i = 0; i < old->max; i++)
- hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
+ hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
hlist_add_head(&mp->hlist[new->ver],
&new->mhash[br_ip_hash(new, &mp->addr)]);
@@ -194,7 +192,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
maxlen = 0;
for (i = 0; i < new->max; i++) {
len = 0;
- hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
+ hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
len++;
if (len > maxlen)
maxlen = len;
@@ -510,14 +508,13 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
- struct hlist_node *p;
unsigned int count = 0;
unsigned int max;
int elasticity;
int err;
mdb = rcu_dereference_protected(br->mdb, 1);
- hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+ hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
count++;
if (unlikely(br_ip_equal(group, &mp->addr)))
return mp;
@@ -882,10 +879,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
struct net_bridge_port_group *pg;
- struct hlist_node *p, *n;
+ struct hlist_node *n;
spin_lock(&br->multicast_lock);
- hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
+ hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
br_multicast_del_pg(br, pg);
if (!hlist_unhashed(&port->rlist))
@@ -1025,12 +1022,12 @@ static void br_multicast_add_router(struct net_bridge *br,
struct net_bridge_port *port)
{
struct net_bridge_port *p;
- struct hlist_node *n, *slot = NULL;
+ struct hlist_node *slot = NULL;
- hlist_for_each_entry(p, n, &br->router_list, rlist) {
+ hlist_for_each_entry(p, &br->router_list, rlist) {
if ((unsigned long) port >= (unsigned long) p)
break;
- slot = n;
+ slot = &p->rlist;
}
if (slot)
@@ -1653,7 +1650,7 @@ void br_multicast_stop(struct net_bridge *br)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
- struct hlist_node *p, *n;
+ struct hlist_node *n;
u32 ver;
int i;
@@ -1670,7 +1667,7 @@ void br_multicast_stop(struct net_bridge *br)
ver = mdb->ver;
for (i = 0; i < mdb->max; i++) {
- hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
+ hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
hlist[ver]) {
del_timer(&mp->timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index ddac1ee2ed20..c48e5220bbac 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -516,7 +516,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
{
struct receiver *r = NULL;
struct hlist_head *rl;
- struct hlist_node *next;
struct dev_rcv_lists *d;
if (dev && dev->type != ARPHRD_CAN)
@@ -540,7 +539,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
* been registered before.
*/
- hlist_for_each_entry_rcu(r, next, rl, list) {
+ hlist_for_each_entry_rcu(r, rl, list) {
if (r->can_id == can_id && r->mask == mask &&
r->func == func && r->data == data)
break;
@@ -552,7 +551,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
* will be NULL, while r will point to the last item of the list.
*/
- if (!next) {
+ if (!r) {
printk(KERN_ERR "BUG: receive list entry not found for "
"dev %s, id %03X, mask %03X\n",
DNAME(dev), can_id, mask);
@@ -590,7 +589,6 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r)
static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
{
struct receiver *r;
- struct hlist_node *n;
int matches = 0;
struct can_frame *cf = (struct can_frame *)skb->data;
canid_t can_id = cf->can_id;
@@ -600,7 +598,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
if (can_id & CAN_ERR_FLAG) {
/* check for error message frame entries only */
- hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
+ hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) {
if (can_id & r->mask) {
deliver(skb, r);
matches++;
@@ -610,13 +608,13 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
}
/* check for unfiltered entries */
- hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
+ hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) {
deliver(skb, r);
matches++;
}
/* check for can_id/mask entries */
- hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
+ hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) {
if ((can_id & r->mask) == r->can_id) {
deliver(skb, r);
matches++;
@@ -624,7 +622,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
}
/* check for inverted can_id/mask entries */
- hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
+ hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) {
if ((can_id & r->mask) != r->can_id) {
deliver(skb, r);
matches++;
@@ -636,7 +634,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
return matches;
if (can_id & CAN_EFF_FLAG) {
- hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
+ hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) {
if (r->can_id == can_id) {
deliver(skb, r);
matches++;
@@ -644,7 +642,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
}
} else {
can_id &= CAN_SFF_MASK;
- hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
+ hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) {
deliver(skb, r);
matches++;
}
diff --git a/net/can/gw.c b/net/can/gw.c
index c185fcd5e828..2d117dc5ebea 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -457,11 +457,11 @@ static int cgw_notifier(struct notifier_block *nb,
if (msg == NETDEV_UNREGISTER) {
struct cgw_job *gwj = NULL;
- struct hlist_node *n, *nx;
+ struct hlist_node *nx;
ASSERT_RTNL();
- hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+ hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
hlist_del(&gwj->list);
@@ -575,12 +575,11 @@ cancel:
static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
{
struct cgw_job *gwj = NULL;
- struct hlist_node *n;
int idx = 0;
int s_idx = cb->args[0];
rcu_read_lock();
- hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
+ hlist_for_each_entry_rcu(gwj, &cgw_list, list) {
if (idx < s_idx)
goto cont;
@@ -858,11 +857,11 @@ out:
static void cgw_remove_all_jobs(void)
{
struct cgw_job *gwj = NULL;
- struct hlist_node *n, *nx;
+ struct hlist_node *nx;
ASSERT_RTNL();
- hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+ hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kfree(gwj);
@@ -872,7 +871,7 @@ static void cgw_remove_all_jobs(void)
static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct cgw_job *gwj = NULL;
- struct hlist_node *n, *nx;
+ struct hlist_node *nx;
struct rtcanmsg *r;
struct cf_mod mod;
struct can_can_gw ccgw;
@@ -907,7 +906,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ASSERT_RTNL();
/* remove only the first matching entry */
- hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+ hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
if (gwj->flags != r->flags)
continue;
diff --git a/net/can/proc.c b/net/can/proc.c
index 497335892146..1ab8c888f102 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -195,9 +195,8 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
struct net_device *dev)
{
struct receiver *r;
- struct hlist_node *n;
- hlist_for_each_entry_rcu(r, n, rx_list, list) {
+ hlist_for_each_entry_rcu(r, rx_list, list) {
char *fmt = (r->can_id & CAN_EFF_FLAG)?
" %-5s %08x %08x %pK %pK %8ld %s\n" :
" %-5s %03x %08x %pK %pK %8ld %s\n";
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index ee71ea26777a..e65e6e4be38b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -15,6 +15,8 @@
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/string.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
#include <linux/ceph/ceph_features.h>
@@ -26,6 +28,22 @@
#include "crypto.h"
+/*
+ * Module compatibility interface. For now it doesn't do anything,
+ * but its existence signals a certain level of functionality.
+ *
+ * The data buffer is used to pass information both to and from
+ * libceph. The return value indicates whether libceph determines
+ * it is compatible with the caller (from another kernel module),
+ * given the provided data.
+ *
+ * The data pointer can be null.
+ */
+bool libceph_compatible(void *data)
+{
+ return true;
+}
+EXPORT_SYMBOL(libceph_compatible);
/*
* find filename portion of a path (/foo/bar/baz -> baz)
@@ -292,6 +310,9 @@ ceph_parse_options(char *options, const char *dev_name,
int err = -ENOMEM;
substring_t argstr[MAX_OPT_ARGS];
+ if (current->nsproxy->net_ns != &init_net)
+ return ERR_PTR(-EINVAL);
+
opt = kzalloc(sizeof(*opt), GFP_KERNEL);
if (!opt)
return ERR_PTR(-ENOMEM);
@@ -585,10 +606,8 @@ static int __init init_ceph_lib(void)
if (ret < 0)
goto out_crypto;
- pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
- CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
- CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
- CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
+ pr_info("loaded (mon/osd proto %d/%d)\n",
+ CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL);
return 0;
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
index 3fbda04de29c..1348df96fe15 100644
--- a/net/ceph/ceph_strings.c
+++ b/net/ceph/ceph_strings.c
@@ -21,9 +21,15 @@ const char *ceph_osd_op_name(int op)
switch (op) {
case CEPH_OSD_OP_READ: return "read";
case CEPH_OSD_OP_STAT: return "stat";
+ case CEPH_OSD_OP_MAPEXT: return "mapext";
+ case CEPH_OSD_OP_SPARSE_READ: return "sparse-read";
+ case CEPH_OSD_OP_NOTIFY: return "notify";
+ case CEPH_OSD_OP_NOTIFY_ACK: return "notify-ack";
+ case CEPH_OSD_OP_ASSERT_VER: return "assert-version";
case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
+ case CEPH_OSD_OP_CREATE: return "create";
case CEPH_OSD_OP_WRITE: return "write";
case CEPH_OSD_OP_DELETE: return "delete";
case CEPH_OSD_OP_TRUNCATE: return "truncate";
@@ -39,6 +45,11 @@ const char *ceph_osd_op_name(int op)
case CEPH_OSD_OP_TMAPUP: return "tmapup";
case CEPH_OSD_OP_TMAPGET: return "tmapget";
case CEPH_OSD_OP_TMAPPUT: return "tmapput";
+ case CEPH_OSD_OP_WATCH: return "watch";
+
+ case CEPH_OSD_OP_CLONERANGE: return "clonerange";
+ case CEPH_OSD_OP_ASSERT_SRC_VERSION: return "assert-src-version";
+ case CEPH_OSD_OP_SRC_CMPXATTR: return "src-cmpxattr";
case CEPH_OSD_OP_GETXATTR: return "getxattr";
case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
@@ -53,6 +64,10 @@ const char *ceph_osd_op_name(int op)
case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
case CEPH_OSD_OP_SCRUB: return "scrub";
+ case CEPH_OSD_OP_SCRUB_RESERVE: return "scrub-reserve";
+ case CEPH_OSD_OP_SCRUB_UNRESERVE: return "scrub-unreserve";
+ case CEPH_OSD_OP_SCRUB_STOP: return "scrub-stop";
+ case CEPH_OSD_OP_SCRUB_MAP: return "scrub-map";
case CEPH_OSD_OP_WRLOCK: return "wrlock";
case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
@@ -64,10 +79,34 @@ const char *ceph_osd_op_name(int op)
case CEPH_OSD_OP_CALL: return "call";
case CEPH_OSD_OP_PGLS: return "pgls";
+ case CEPH_OSD_OP_PGLS_FILTER: return "pgls-filter";
+ case CEPH_OSD_OP_OMAPGETKEYS: return "omap-get-keys";
+ case CEPH_OSD_OP_OMAPGETVALS: return "omap-get-vals";
+ case CEPH_OSD_OP_OMAPGETHEADER: return "omap-get-header";
+ case CEPH_OSD_OP_OMAPGETVALSBYKEYS: return "omap-get-vals-by-keys";
+ case CEPH_OSD_OP_OMAPSETVALS: return "omap-set-vals";
+ case CEPH_OSD_OP_OMAPSETHEADER: return "omap-set-header";
+ case CEPH_OSD_OP_OMAPCLEAR: return "omap-clear";
+ case CEPH_OSD_OP_OMAPRMKEYS: return "omap-rm-keys";
}
return "???";
}
+const char *ceph_osd_state_name(int s)
+{
+ switch (s) {
+ case CEPH_OSD_EXISTS:
+ return "exists";
+ case CEPH_OSD_UP:
+ return "up";
+ case CEPH_OSD_AUTOOUT:
+ return "autoout";
+ case CEPH_OSD_NEW:
+ return "new";
+ default:
+ return "???";
+ }
+}
const char *ceph_pool_op_name(int op)
{
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 35fce755ce10..cbd06a91941c 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -287,6 +287,7 @@ static int is_out(const struct crush_map *map, const __u32 *weight, int item, in
* @outpos: our position in that vector
* @firstn: true if choosing "first n" items, false if choosing "indep"
* @recurse_to_leaf: true if we want one device under each item of given type
+ * @descend_once: true if we should only try one descent before giving up
* @out2: second output vector for leaf items (if @recurse_to_leaf)
*/
static int crush_choose(const struct crush_map *map,
@@ -295,7 +296,7 @@ static int crush_choose(const struct crush_map *map,
int x, int numrep, int type,
int *out, int outpos,
int firstn, int recurse_to_leaf,
- int *out2)
+ int descend_once, int *out2)
{
int rep;
unsigned int ftotal, flocal;
@@ -391,7 +392,7 @@ static int crush_choose(const struct crush_map *map,
}
reject = 0;
- if (recurse_to_leaf) {
+ if (!collide && recurse_to_leaf) {
if (item < 0) {
if (crush_choose(map,
map->buckets[-1-item],
@@ -399,6 +400,7 @@ static int crush_choose(const struct crush_map *map,
x, outpos+1, 0,
out2, outpos,
firstn, 0,
+ map->chooseleaf_descend_once,
NULL) <= outpos)
/* didn't get leaf */
reject = 1;
@@ -422,7 +424,10 @@ reject:
ftotal++;
flocal++;
- if (collide && flocal <= map->choose_local_tries)
+ if (reject && descend_once)
+ /* let outer call try again */
+ skip_rep = 1;
+ else if (collide && flocal <= map->choose_local_tries)
/* retry locally a few times */
retry_bucket = 1;
else if (map->choose_local_fallback_tries > 0 &&
@@ -485,6 +490,7 @@ int crush_do_rule(const struct crush_map *map,
int i, j;
int numrep;
int firstn;
+ const int descend_once = 0;
if ((__u32)ruleno >= map->max_rules) {
dprintk(" bad ruleno %d\n", ruleno);
@@ -544,7 +550,8 @@ int crush_do_rule(const struct crush_map *map,
curstep->arg2,
o+osize, j,
firstn,
- recurse_to_leaf, c+osize);
+ recurse_to_leaf,
+ descend_once, c+osize);
}
if (recurse_to_leaf)
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index af14cb425164..6e7a236525b6 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -423,7 +423,8 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
}
}
-int ceph_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+static int ceph_key_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey;
size_t datalen = prep->datalen;
@@ -458,12 +459,12 @@ err:
return ret;
}
-int ceph_key_match(const struct key *key, const void *description)
+static int ceph_key_match(const struct key *key, const void *description)
{
return strcmp(key->description, description) == 0;
}
-void ceph_key_destroy(struct key *key) {
+static void ceph_key_destroy(struct key *key) {
struct ceph_crypto_key *ckey = key->payload.data;
ceph_crypto_key_destroy(ckey);
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 38b5dc1823d4..00d051f4894e 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -66,9 +66,9 @@ static int osdmap_show(struct seq_file *s, void *p)
for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pool =
rb_entry(n, struct ceph_pg_pool_info, node);
- seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
- pool->id, pool->v.pg_num, pool->pg_num_mask,
- pool->v.lpg_num, pool->lpg_num_mask);
+ seq_printf(s, "pg_pool %llu pg_num %d / %d\n",
+ (unsigned long long)pool->id, pool->pg_num,
+ pool->pg_num_mask);
}
for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
struct ceph_entity_addr *addr =
@@ -123,26 +123,16 @@ static int osdc_show(struct seq_file *s, void *pp)
mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
struct ceph_osd_request *req;
- struct ceph_osd_request_head *head;
- struct ceph_osd_op *op;
- int num_ops;
- int opcode, olen;
+ int opcode;
int i;
req = rb_entry(p, struct ceph_osd_request, r_node);
- seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
+ seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid,
req->r_osd ? req->r_osd->o_osd : -1,
- le32_to_cpu(req->r_pgid.pool),
- le16_to_cpu(req->r_pgid.ps));
+ req->r_pgid.pool, req->r_pgid.seed);
- head = req->r_request->front.iov_base;
- op = (void *)(head + 1);
-
- num_ops = le16_to_cpu(head->num_ops);
- olen = le32_to_cpu(head->object_len);
- seq_printf(s, "%.*s", olen,
- (const char *)(head->ops + num_ops));
+ seq_printf(s, "%.*s", req->r_oid_len, req->r_oid);
if (req->r_reassert_version.epoch)
seq_printf(s, "\t%u'%llu",
@@ -151,10 +141,9 @@ static int osdc_show(struct seq_file *s, void *pp)
else
seq_printf(s, "\t");
- for (i = 0; i < num_ops; i++) {
- opcode = le16_to_cpu(op->op);
+ for (i = 0; i < req->r_num_ops; i++) {
+ opcode = le16_to_cpu(req->r_request_ops[i].op);
seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
- op++;
}
seq_printf(s, "\n");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 5ccf87ed8d68..2c0669fb54e3 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -9,8 +9,9 @@
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
+#ifdef CONFIG_BLOCK
#include <linux/bio.h>
-#include <linux/blkdev.h>
+#endif /* CONFIG_BLOCK */
#include <linux/dns_resolver.h>
#include <net/tcp.h>
@@ -97,6 +98,57 @@
#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
+static bool con_flag_valid(unsigned long con_flag)
+{
+ switch (con_flag) {
+ case CON_FLAG_LOSSYTX:
+ case CON_FLAG_KEEPALIVE_PENDING:
+ case CON_FLAG_WRITE_PENDING:
+ case CON_FLAG_SOCK_CLOSED:
+ case CON_FLAG_BACKOFF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
+{
+ BUG_ON(!con_flag_valid(con_flag));
+
+ clear_bit(con_flag, &con->flags);
+}
+
+static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
+{
+ BUG_ON(!con_flag_valid(con_flag));
+
+ set_bit(con_flag, &con->flags);
+}
+
+static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
+{
+ BUG_ON(!con_flag_valid(con_flag));
+
+ return test_bit(con_flag, &con->flags);
+}
+
+static bool con_flag_test_and_clear(struct ceph_connection *con,
+ unsigned long con_flag)
+{
+ BUG_ON(!con_flag_valid(con_flag));
+
+ return test_and_clear_bit(con_flag, &con->flags);
+}
+
+static bool con_flag_test_and_set(struct ceph_connection *con,
+ unsigned long con_flag)
+{
+ BUG_ON(!con_flag_valid(con_flag));
+
+ return test_and_set_bit(con_flag, &con->flags);
+}
+
/* static tag bytes (protocol control messages) */
static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
@@ -114,7 +166,7 @@ static struct lock_class_key socket_class;
static void queue_con(struct ceph_connection *con);
static void con_work(struct work_struct *);
-static void ceph_fault(struct ceph_connection *con);
+static void con_fault(struct ceph_connection *con);
/*
* Nicely render a sockaddr as a string. An array of formatted
@@ -171,7 +223,7 @@ static void encode_my_addr(struct ceph_messenger *msgr)
*/
static struct workqueue_struct *ceph_msgr_wq;
-void _ceph_msgr_exit(void)
+static void _ceph_msgr_exit(void)
{
if (ceph_msgr_wq) {
destroy_workqueue(ceph_msgr_wq);
@@ -308,7 +360,7 @@ static void ceph_sock_write_space(struct sock *sk)
* buffer. See net/ipv4/tcp_input.c:tcp_check_space()
* and net/core/stream.c:sk_stream_write_space().
*/
- if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) {
+ if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@@ -333,7 +385,7 @@ static void ceph_sock_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
dout("%s TCP_CLOSE_WAIT\n", __func__);
con_sock_state_closing(con);
- set_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+ con_flag_set(con, CON_FLAG_SOCK_CLOSED);
queue_con(con);
break;
case TCP_ESTABLISHED:
@@ -474,7 +526,7 @@ static int con_close_socket(struct ceph_connection *con)
* received a socket close event before we had the chance to
* shut the socket down.
*/
- clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+ con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
con_sock_state_closed(con);
return rc;
@@ -538,11 +590,10 @@ void ceph_con_close(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr));
con->state = CON_STATE_CLOSED;
- clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */
- clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
- clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
- clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
- clear_bit(CON_FLAG_BACKOFF, &con->flags);
+ con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
+ con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
+ con_flag_clear(con, CON_FLAG_WRITE_PENDING);
+ con_flag_clear(con, CON_FLAG_BACKOFF);
reset_connection(con);
con->peer_global_seq = 0;
@@ -798,7 +849,7 @@ static void prepare_write_message(struct ceph_connection *con)
/* no, queue up footer too and be done */
prepare_write_message_footer(con);
- set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
/*
@@ -819,7 +870,7 @@ static void prepare_write_ack(struct ceph_connection *con)
&con->out_temp_ack);
con->out_more = 1; /* more will follow.. eventually.. */
- set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
/*
@@ -830,7 +881,7 @@ static void prepare_write_keepalive(struct ceph_connection *con)
dout("prepare_write_keepalive %p\n", con);
con_out_kvec_reset(con);
con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
- set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
/*
@@ -873,7 +924,7 @@ static void prepare_write_banner(struct ceph_connection *con)
&con->msgr->my_enc_addr);
con->out_more = 0;
- set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
static int prepare_write_connect(struct ceph_connection *con)
@@ -923,7 +974,7 @@ static int prepare_write_connect(struct ceph_connection *con)
auth->authorizer_buf);
con->out_more = 0;
- set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con_flag_set(con, CON_FLAG_WRITE_PENDING);
return 0;
}
@@ -1643,7 +1694,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.connect_seq));
if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
- set_bit(CON_FLAG_LOSSYTX, &con->flags);
+ con_flag_set(con, CON_FLAG_LOSSYTX);
con->delay = 0; /* reset backoff memory */
@@ -2080,15 +2131,14 @@ do_next:
prepare_write_ack(con);
goto more;
}
- if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING,
- &con->flags)) {
+ if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
prepare_write_keepalive(con);
goto more;
}
}
/* Nothing to do! */
- clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con_flag_clear(con, CON_FLAG_WRITE_PENDING);
dout("try_write nothing else to write.\n");
ret = 0;
out:
@@ -2268,7 +2318,7 @@ static void queue_con(struct ceph_connection *con)
static bool con_sock_closed(struct ceph_connection *con)
{
- if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
+ if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
return false;
#define CASE(x) \
@@ -2295,6 +2345,41 @@ static bool con_sock_closed(struct ceph_connection *con)
return true;
}
+static bool con_backoff(struct ceph_connection *con)
+{
+ int ret;
+
+ if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
+ return false;
+
+ ret = queue_con_delay(con, round_jiffies_relative(con->delay));
+ if (ret) {
+ dout("%s: con %p FAILED to back off %lu\n", __func__,
+ con, con->delay);
+ BUG_ON(ret == -ENOENT);
+ con_flag_set(con, CON_FLAG_BACKOFF);
+ }
+
+ return true;
+}
+
+/* Finish fault handling; con->mutex must *not* be held here */
+
+static void con_fault_finish(struct ceph_connection *con)
+{
+ /*
+ * in case we faulted due to authentication, invalidate our
+ * current tickets so that we can get new ones.
+ */
+ if (con->auth_retry && con->ops->invalidate_authorizer) {
+ dout("calling invalidate_authorizer()\n");
+ con->ops->invalidate_authorizer(con);
+ }
+
+ if (con->ops->fault)
+ con->ops->fault(con);
+}
+
/*
* Do some work on a connection. Drop a connection ref when we're done.
*/
@@ -2302,73 +2387,68 @@ static void con_work(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
- int ret;
+ bool fault;
mutex_lock(&con->mutex);
-restart:
- if (con_sock_closed(con))
- goto fault;
+ while (true) {
+ int ret;
- if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
- dout("con_work %p backing off\n", con);
- ret = queue_con_delay(con, round_jiffies_relative(con->delay));
- if (ret) {
- dout("con_work %p FAILED to back off %lu\n", con,
- con->delay);
- BUG_ON(ret == -ENOENT);
- set_bit(CON_FLAG_BACKOFF, &con->flags);
+ if ((fault = con_sock_closed(con))) {
+ dout("%s: con %p SOCK_CLOSED\n", __func__, con);
+ break;
+ }
+ if (con_backoff(con)) {
+ dout("%s: con %p BACKOFF\n", __func__, con);
+ break;
+ }
+ if (con->state == CON_STATE_STANDBY) {
+ dout("%s: con %p STANDBY\n", __func__, con);
+ break;
+ }
+ if (con->state == CON_STATE_CLOSED) {
+ dout("%s: con %p CLOSED\n", __func__, con);
+ BUG_ON(con->sock);
+ break;
+ }
+ if (con->state == CON_STATE_PREOPEN) {
+ dout("%s: con %p PREOPEN\n", __func__, con);
+ BUG_ON(con->sock);
}
- goto done;
- }
- if (con->state == CON_STATE_STANDBY) {
- dout("con_work %p STANDBY\n", con);
- goto done;
- }
- if (con->state == CON_STATE_CLOSED) {
- dout("con_work %p CLOSED\n", con);
- BUG_ON(con->sock);
- goto done;
- }
- if (con->state == CON_STATE_PREOPEN) {
- dout("con_work OPENING\n");
- BUG_ON(con->sock);
- }
+ ret = try_read(con);
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ continue;
+ con->error_msg = "socket error on read";
+ fault = true;
+ break;
+ }
- ret = try_read(con);
- if (ret == -EAGAIN)
- goto restart;
- if (ret < 0) {
- con->error_msg = "socket error on read";
- goto fault;
- }
+ ret = try_write(con);
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ continue;
+ con->error_msg = "socket error on write";
+ fault = true;
+ }
- ret = try_write(con);
- if (ret == -EAGAIN)
- goto restart;
- if (ret < 0) {
- con->error_msg = "socket error on write";
- goto fault;
+ break; /* If we make it to here, we're done */
}
-
-done:
+ if (fault)
+ con_fault(con);
mutex_unlock(&con->mutex);
-done_unlocked:
- con->ops->put(con);
- return;
-fault:
- ceph_fault(con); /* error/fault path */
- goto done_unlocked;
-}
+ if (fault)
+ con_fault_finish(con);
+ con->ops->put(con);
+}
/*
* Generic error/fault handler. A retry mechanism is used with
* exponential backoff
*/
-static void ceph_fault(struct ceph_connection *con)
- __releases(con->mutex)
+static void con_fault(struct ceph_connection *con)
{
pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
@@ -2381,10 +2461,10 @@ static void ceph_fault(struct ceph_connection *con)
con_close_socket(con);
- if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) {
+ if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
dout("fault on LOSSYTX channel, marking CLOSED\n");
con->state = CON_STATE_CLOSED;
- goto out_unlock;
+ return;
}
if (con->in_msg) {
@@ -2401,9 +2481,9 @@ static void ceph_fault(struct ceph_connection *con)
/* If there are no messages queued or keepalive pending, place
* the connection in a STANDBY state */
if (list_empty(&con->out_queue) &&
- !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) {
+ !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
- clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con_flag_clear(con, CON_FLAG_WRITE_PENDING);
con->state = CON_STATE_STANDBY;
} else {
/* retry after a delay. */
@@ -2412,23 +2492,9 @@ static void ceph_fault(struct ceph_connection *con)
con->delay = BASE_DELAY_INTERVAL;
else if (con->delay < MAX_DELAY_INTERVAL)
con->delay *= 2;
- set_bit(CON_FLAG_BACKOFF, &con->flags);
+ con_flag_set(con, CON_FLAG_BACKOFF);
queue_con(con);
}
-
-out_unlock:
- mutex_unlock(&con->mutex);
- /*
- * in case we faulted due to authentication, invalidate our
- * current tickets so that we can get new ones.
- */
- if (con->auth_retry && con->ops->invalidate_authorizer) {
- dout("calling invalidate_authorizer()\n");
- con->ops->invalidate_authorizer(con);
- }
-
- if (con->ops->fault)
- con->ops->fault(con);
}
@@ -2469,8 +2535,8 @@ static void clear_standby(struct ceph_connection *con)
dout("clear_standby %p and ++connect_seq\n", con);
con->state = CON_STATE_PREOPEN;
con->connect_seq++;
- WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags));
- WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags));
+ WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
+ WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
}
}
@@ -2511,7 +2577,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
/* if there wasn't anything waiting to send before, queue
* new work */
- if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
+ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_send);
@@ -2600,8 +2666,8 @@ void ceph_con_keepalive(struct ceph_connection *con)
mutex_lock(&con->mutex);
clear_standby(con);
mutex_unlock(&con->mutex);
- if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 &&
- test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
+ if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
+ con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
@@ -2651,9 +2717,11 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
m->page_alignment = 0;
m->pages = NULL;
m->pagelist = NULL;
+#ifdef CONFIG_BLOCK
m->bio = NULL;
m->bio_iter = NULL;
m->bio_seg = 0;
+#endif /* CONFIG_BLOCK */
m->trail = NULL;
/* front */
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 812eb3b46c1f..aef5b1062bee 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -697,7 +697,7 @@ int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
u32 pool, u64 snapid)
{
return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
- pool, snapid, 0, 0);
+ pool, snapid, NULL, 0);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index eb9a44478764..d730dd4d8eb2 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -23,7 +23,7 @@
static const struct ceph_connection_operations osd_con_ops;
-static void send_queued(struct ceph_osd_client *osdc);
+static void __send_queued(struct ceph_osd_client *osdc);
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
static void __register_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@@ -32,64 +32,12 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
static void __send_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
-static int op_needs_trail(int op)
-{
- switch (op) {
- case CEPH_OSD_OP_GETXATTR:
- case CEPH_OSD_OP_SETXATTR:
- case CEPH_OSD_OP_CMPXATTR:
- case CEPH_OSD_OP_CALL:
- case CEPH_OSD_OP_NOTIFY:
- return 1;
- default:
- return 0;
- }
-}
-
static int op_has_extent(int op)
{
return (op == CEPH_OSD_OP_READ ||
op == CEPH_OSD_OP_WRITE);
}
-int ceph_calc_raw_layout(struct ceph_osd_client *osdc,
- struct ceph_file_layout *layout,
- u64 snapid,
- u64 off, u64 *plen, u64 *bno,
- struct ceph_osd_request *req,
- struct ceph_osd_req_op *op)
-{
- struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
- u64 orig_len = *plen;
- u64 objoff, objlen; /* extent in object */
- int r;
-
- reqhead->snapid = cpu_to_le64(snapid);
-
- /* object extent? */
- r = ceph_calc_file_object_mapping(layout, off, plen, bno,
- &objoff, &objlen);
- if (r < 0)
- return r;
- if (*plen < orig_len)
- dout(" skipping last %llu, final file extent %llu~%llu\n",
- orig_len - *plen, off, *plen);
-
- if (op_has_extent(op->op)) {
- op->extent.offset = objoff;
- op->extent.length = objlen;
- }
- req->r_num_pages = calc_pages_for(off, *plen);
- req->r_page_alignment = off & ~PAGE_MASK;
- if (op->op == CEPH_OSD_OP_WRITE)
- op->payload_len = *plen;
-
- dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
- *bno, objoff, objlen, req->r_num_pages);
- return 0;
-}
-EXPORT_SYMBOL(ceph_calc_raw_layout);
-
/*
* Implement client access to distributed object storage cluster.
*
@@ -115,20 +63,48 @@ EXPORT_SYMBOL(ceph_calc_raw_layout);
*
* fill osd op in request message.
*/
-static int calc_layout(struct ceph_osd_client *osdc,
- struct ceph_vino vino,
+static int calc_layout(struct ceph_vino vino,
struct ceph_file_layout *layout,
u64 off, u64 *plen,
struct ceph_osd_request *req,
struct ceph_osd_req_op *op)
{
- u64 bno;
+ u64 orig_len = *plen;
+ u64 bno = 0;
+ u64 objoff = 0;
+ u64 objlen = 0;
int r;
- r = ceph_calc_raw_layout(osdc, layout, vino.snap, off,
- plen, &bno, req, op);
+ /* object extent? */
+ r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno,
+ &objoff, &objlen);
if (r < 0)
return r;
+ if (objlen < orig_len) {
+ *plen = objlen;
+ dout(" skipping last %llu, final file extent %llu~%llu\n",
+ orig_len - *plen, off, *plen);
+ }
+
+ if (op_has_extent(op->op)) {
+ u32 osize = le32_to_cpu(layout->fl_object_size);
+ op->extent.offset = objoff;
+ op->extent.length = objlen;
+ if (op->extent.truncate_size <= off - objoff) {
+ op->extent.truncate_size = 0;
+ } else {
+ op->extent.truncate_size -= off - objoff;
+ if (op->extent.truncate_size > osize)
+ op->extent.truncate_size = osize;
+ }
+ }
+ req->r_num_pages = calc_pages_for(off, *plen);
+ req->r_page_alignment = off & ~PAGE_MASK;
+ if (op->op == CEPH_OSD_OP_WRITE)
+ op->payload_len = *plen;
+
+ dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
+ bno, objoff, objlen, req->r_num_pages);
snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
req->r_oid_len = strlen(req->r_oid);
@@ -148,25 +124,19 @@ void ceph_osdc_release_request(struct kref *kref)
if (req->r_request)
ceph_msg_put(req->r_request);
if (req->r_con_filling_msg) {
- dout("%s revoking pages %p from con %p\n", __func__,
- req->r_pages, req->r_con_filling_msg);
+ dout("%s revoking msg %p from con %p\n", __func__,
+ req->r_reply, req->r_con_filling_msg);
ceph_msg_revoke_incoming(req->r_reply);
req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
+ req->r_con_filling_msg = NULL;
}
if (req->r_reply)
ceph_msg_put(req->r_reply);
if (req->r_own_pages)
ceph_release_page_vector(req->r_pages,
req->r_num_pages);
-#ifdef CONFIG_BLOCK
- if (req->r_bio)
- bio_put(req->r_bio);
-#endif
ceph_put_snap_context(req->r_snapc);
- if (req->r_trail) {
- ceph_pagelist_release(req->r_trail);
- kfree(req->r_trail);
- }
+ ceph_pagelist_release(&req->r_trail);
if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool);
else
@@ -174,37 +144,25 @@ void ceph_osdc_release_request(struct kref *kref)
}
EXPORT_SYMBOL(ceph_osdc_release_request);
-static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
-{
- int i = 0;
-
- if (needs_trail)
- *needs_trail = 0;
- while (ops[i].op) {
- if (needs_trail && op_needs_trail(ops[i].op))
- *needs_trail = 1;
- i++;
- }
-
- return i;
-}
-
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
- int flags,
struct ceph_snap_context *snapc,
- struct ceph_osd_req_op *ops,
+ unsigned int num_ops,
bool use_mempool,
- gfp_t gfp_flags,
- struct page **pages,
- struct bio *bio)
+ gfp_t gfp_flags)
{
struct ceph_osd_request *req;
struct ceph_msg *msg;
- int needs_trail;
- int num_op = get_num_ops(ops, &needs_trail);
- size_t msg_size = sizeof(struct ceph_osd_request_head);
-
- msg_size += num_op*sizeof(struct ceph_osd_op);
+ size_t msg_size;
+
+ msg_size = 4 + 4 + 8 + 8 + 4+8;
+ msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
+ msg_size += 1 + 8 + 4 + 4; /* pg_t */
+ msg_size += 4 + MAX_OBJ_NAME_SIZE;
+ msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
+ msg_size += 8; /* snapid */
+ msg_size += 8; /* snap_seq */
+ msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
+ msg_size += 4;
if (use_mempool) {
req = mempool_alloc(osdc->req_mempool, gfp_flags);
@@ -228,10 +186,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
INIT_LIST_HEAD(&req->r_req_lru_item);
INIT_LIST_HEAD(&req->r_osd_item);
- req->r_flags = flags;
-
- WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
-
/* create reply message */
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
@@ -244,20 +198,9 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
}
req->r_reply = msg;
- /* allocate space for the trailing data */
- if (needs_trail) {
- req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
- if (!req->r_trail) {
- ceph_osdc_put_request(req);
- return NULL;
- }
- ceph_pagelist_init(req->r_trail);
- }
+ ceph_pagelist_init(&req->r_trail);
/* create request message; allow space for oid */
- msg_size += MAX_OBJ_NAME_SIZE;
- if (snapc)
- msg_size += sizeof(u64) * snapc->num_snaps;
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
else
@@ -270,13 +213,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
memset(msg->front.iov_base, 0, msg->front.iov_len);
req->r_request = msg;
- req->r_pages = pages;
-#ifdef CONFIG_BLOCK
- if (bio) {
- req->r_bio = bio;
- bio_get(req->r_bio);
- }
-#endif
return req;
}
@@ -289,6 +225,8 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
dst->op = cpu_to_le16(src->op);
switch (src->op) {
+ case CEPH_OSD_OP_STAT:
+ break;
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
dst->extent.offset =
@@ -300,52 +238,20 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
dst->extent.truncate_seq =
cpu_to_le32(src->extent.truncate_seq);
break;
-
- case CEPH_OSD_OP_GETXATTR:
- case CEPH_OSD_OP_SETXATTR:
- case CEPH_OSD_OP_CMPXATTR:
- BUG_ON(!req->r_trail);
-
- dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
- dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
- dst->xattr.cmp_op = src->xattr.cmp_op;
- dst->xattr.cmp_mode = src->xattr.cmp_mode;
- ceph_pagelist_append(req->r_trail, src->xattr.name,
- src->xattr.name_len);
- ceph_pagelist_append(req->r_trail, src->xattr.val,
- src->xattr.value_len);
- break;
case CEPH_OSD_OP_CALL:
- BUG_ON(!req->r_trail);
-
dst->cls.class_len = src->cls.class_len;
dst->cls.method_len = src->cls.method_len;
dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
- ceph_pagelist_append(req->r_trail, src->cls.class_name,
+ ceph_pagelist_append(&req->r_trail, src->cls.class_name,
src->cls.class_len);
- ceph_pagelist_append(req->r_trail, src->cls.method_name,
+ ceph_pagelist_append(&req->r_trail, src->cls.method_name,
src->cls.method_len);
- ceph_pagelist_append(req->r_trail, src->cls.indata,
+ ceph_pagelist_append(&req->r_trail, src->cls.indata,
src->cls.indata_len);
break;
- case CEPH_OSD_OP_ROLLBACK:
- dst->snap.snapid = cpu_to_le64(src->snap.snapid);
- break;
case CEPH_OSD_OP_STARTSYNC:
break;
- case CEPH_OSD_OP_NOTIFY:
- {
- __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
- __le32 timeout = cpu_to_le32(src->watch.timeout);
-
- BUG_ON(!req->r_trail);
-
- ceph_pagelist_append(req->r_trail,
- &prot_ver, sizeof(prot_ver));
- ceph_pagelist_append(req->r_trail,
- &timeout, sizeof(timeout));
- }
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
dst->watch.cookie = cpu_to_le64(src->watch.cookie);
@@ -356,6 +262,64 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
pr_err("unrecognized osd opcode %d\n", dst->op);
WARN_ON(1);
break;
+ case CEPH_OSD_OP_MAPEXT:
+ case CEPH_OSD_OP_MASKTRUNC:
+ case CEPH_OSD_OP_SPARSE_READ:
+ case CEPH_OSD_OP_NOTIFY:
+ case CEPH_OSD_OP_ASSERT_VER:
+ case CEPH_OSD_OP_WRITEFULL:
+ case CEPH_OSD_OP_TRUNCATE:
+ case CEPH_OSD_OP_ZERO:
+ case CEPH_OSD_OP_DELETE:
+ case CEPH_OSD_OP_APPEND:
+ case CEPH_OSD_OP_SETTRUNC:
+ case CEPH_OSD_OP_TRIMTRUNC:
+ case CEPH_OSD_OP_TMAPUP:
+ case CEPH_OSD_OP_TMAPPUT:
+ case CEPH_OSD_OP_TMAPGET:
+ case CEPH_OSD_OP_CREATE:
+ case CEPH_OSD_OP_ROLLBACK:
+ case CEPH_OSD_OP_OMAPGETKEYS:
+ case CEPH_OSD_OP_OMAPGETVALS:
+ case CEPH_OSD_OP_OMAPGETHEADER:
+ case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
+ case CEPH_OSD_OP_MODE_RD:
+ case CEPH_OSD_OP_OMAPSETVALS:
+ case CEPH_OSD_OP_OMAPSETHEADER:
+ case CEPH_OSD_OP_OMAPCLEAR:
+ case CEPH_OSD_OP_OMAPRMKEYS:
+ case CEPH_OSD_OP_OMAP_CMP:
+ case CEPH_OSD_OP_CLONERANGE:
+ case CEPH_OSD_OP_ASSERT_SRC_VERSION:
+ case CEPH_OSD_OP_SRC_CMPXATTR:
+ case CEPH_OSD_OP_GETXATTR:
+ case CEPH_OSD_OP_GETXATTRS:
+ case CEPH_OSD_OP_CMPXATTR:
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_SETXATTRS:
+ case CEPH_OSD_OP_RESETXATTRS:
+ case CEPH_OSD_OP_RMXATTR:
+ case CEPH_OSD_OP_PULL:
+ case CEPH_OSD_OP_PUSH:
+ case CEPH_OSD_OP_BALANCEREADS:
+ case CEPH_OSD_OP_UNBALANCEREADS:
+ case CEPH_OSD_OP_SCRUB:
+ case CEPH_OSD_OP_SCRUB_RESERVE:
+ case CEPH_OSD_OP_SCRUB_UNRESERVE:
+ case CEPH_OSD_OP_SCRUB_STOP:
+ case CEPH_OSD_OP_SCRUB_MAP:
+ case CEPH_OSD_OP_WRLOCK:
+ case CEPH_OSD_OP_WRUNLOCK:
+ case CEPH_OSD_OP_RDLOCK:
+ case CEPH_OSD_OP_RDUNLOCK:
+ case CEPH_OSD_OP_UPLOCK:
+ case CEPH_OSD_OP_DNLOCK:
+ case CEPH_OSD_OP_PGLS:
+ case CEPH_OSD_OP_PGLS_FILTER:
+ pr_err("unsupported osd opcode %s\n",
+ ceph_osd_op_name(dst->op));
+ WARN_ON(1);
+ break;
}
dst->payload_len = cpu_to_le32(src->payload_len);
}
@@ -365,75 +329,95 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
*
*/
void ceph_osdc_build_request(struct ceph_osd_request *req,
- u64 off, u64 *plen,
+ u64 off, u64 len, unsigned int num_ops,
struct ceph_osd_req_op *src_ops,
- struct ceph_snap_context *snapc,
- struct timespec *mtime,
- const char *oid,
- int oid_len)
+ struct ceph_snap_context *snapc, u64 snap_id,
+ struct timespec *mtime)
{
struct ceph_msg *msg = req->r_request;
- struct ceph_osd_request_head *head;
struct ceph_osd_req_op *src_op;
- struct ceph_osd_op *op;
void *p;
- int num_op = get_num_ops(src_ops, NULL);
- size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
+ size_t msg_size;
int flags = req->r_flags;
- u64 data_len = 0;
+ u64 data_len;
int i;
- head = msg->front.iov_base;
- op = (void *)(head + 1);
- p = (void *)(op + num_op);
-
+ req->r_num_ops = num_ops;
+ req->r_snapid = snap_id;
req->r_snapc = ceph_get_snap_context(snapc);
- head->client_inc = cpu_to_le32(1); /* always, for now. */
- head->flags = cpu_to_le32(flags);
- if (flags & CEPH_OSD_FLAG_WRITE)
- ceph_encode_timespec(&head->mtime, mtime);
- head->num_ops = cpu_to_le16(num_op);
-
-
- /* fill in oid */
- head->object_len = cpu_to_le32(oid_len);
- memcpy(p, oid, oid_len);
- p += oid_len;
+ /* encode request */
+ msg->hdr.version = cpu_to_le16(4);
+ p = msg->front.iov_base;
+ ceph_encode_32(&p, 1); /* client_inc is always 1 */
+ req->r_request_osdmap_epoch = p;
+ p += 4;
+ req->r_request_flags = p;
+ p += 4;
+ if (req->r_flags & CEPH_OSD_FLAG_WRITE)
+ ceph_encode_timespec(p, mtime);
+ p += sizeof(struct ceph_timespec);
+ req->r_request_reassert_version = p;
+ p += sizeof(struct ceph_eversion); /* will get filled in */
+
+ /* oloc */
+ ceph_encode_8(&p, 4);
+ ceph_encode_8(&p, 4);
+ ceph_encode_32(&p, 8 + 4 + 4);
+ req->r_request_pool = p;
+ p += 8;
+ ceph_encode_32(&p, -1); /* preferred */
+ ceph_encode_32(&p, 0); /* key len */
+
+ ceph_encode_8(&p, 1);
+ req->r_request_pgid = p;
+ p += 8 + 4;
+ ceph_encode_32(&p, -1); /* preferred */
+
+ /* oid */
+ ceph_encode_32(&p, req->r_oid_len);
+ memcpy(p, req->r_oid, req->r_oid_len);
+ dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
+ p += req->r_oid_len;
+
+ /* ops */
+ ceph_encode_16(&p, num_ops);
src_op = src_ops;
- while (src_op->op) {
- osd_req_encode_op(req, op, src_op);
- src_op++;
- op++;
+ req->r_request_ops = p;
+ for (i = 0; i < num_ops; i++, src_op++) {
+ osd_req_encode_op(req, p, src_op);
+ p += sizeof(struct ceph_osd_op);
}
- if (req->r_trail)
- data_len += req->r_trail->length;
-
- if (snapc) {
- head->snap_seq = cpu_to_le64(snapc->seq);
- head->num_snaps = cpu_to_le32(snapc->num_snaps);
+ /* snaps */
+ ceph_encode_64(&p, req->r_snapid);
+ ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
+ ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
+ if (req->r_snapc) {
for (i = 0; i < snapc->num_snaps; i++) {
- put_unaligned_le64(snapc->snaps[i], p);
- p += sizeof(u64);
+ ceph_encode_64(&p, req->r_snapc->snaps[i]);
}
}
+ req->r_request_attempts = p;
+ p += 4;
+
+ data_len = req->r_trail.length;
if (flags & CEPH_OSD_FLAG_WRITE) {
req->r_request->hdr.data_off = cpu_to_le16(off);
- req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
- } else if (data_len) {
- req->r_request->hdr.data_off = 0;
- req->r_request->hdr.data_len = cpu_to_le32(data_len);
+ data_len += len;
}
-
+ req->r_request->hdr.data_len = cpu_to_le32(data_len);
req->r_request->page_alignment = req->r_page_alignment;
BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
msg_size = p - msg->front.iov_base;
msg->front.iov_len = msg_size;
msg->hdr.front_len = cpu_to_le32(msg_size);
+
+ dout("build_request msg_size was %d num_ops %d\n", (int)msg_size,
+ num_ops);
return;
}
EXPORT_SYMBOL(ceph_osdc_build_request);
@@ -459,34 +443,33 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
u32 truncate_seq,
u64 truncate_size,
struct timespec *mtime,
- bool use_mempool, int num_reply,
+ bool use_mempool,
int page_align)
{
- struct ceph_osd_req_op ops[3];
+ struct ceph_osd_req_op ops[2];
struct ceph_osd_request *req;
+ unsigned int num_op = 1;
int r;
+ memset(&ops, 0, sizeof ops);
+
ops[0].op = opcode;
ops[0].extent.truncate_seq = truncate_seq;
ops[0].extent.truncate_size = truncate_size;
- ops[0].payload_len = 0;
if (do_sync) {
ops[1].op = CEPH_OSD_OP_STARTSYNC;
- ops[1].payload_len = 0;
- ops[2].op = 0;
- } else
- ops[1].op = 0;
-
- req = ceph_osdc_alloc_request(osdc, flags,
- snapc, ops,
- use_mempool,
- GFP_NOFS, NULL, NULL);
+ num_op++;
+ }
+
+ req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
+ GFP_NOFS);
if (!req)
return ERR_PTR(-ENOMEM);
+ req->r_flags = flags;
/* calculate max write size */
- r = calc_layout(osdc, vino, layout, off, plen, req, ops);
+ r = calc_layout(vino, layout, off, plen, req, ops);
if (r < 0)
return ERR_PTR(r);
req->r_file_layout = *layout; /* keep a copy */
@@ -496,10 +479,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
req->r_num_pages = calc_pages_for(page_align, *plen);
req->r_page_alignment = page_align;
- ceph_osdc_build_request(req, off, plen, ops,
- snapc,
- mtime,
- req->r_oid, req->r_oid_len);
+ ceph_osdc_build_request(req, off, *plen, num_op, ops,
+ snapc, vino.snap, mtime);
return req;
}
@@ -623,8 +604,8 @@ static void osd_reset(struct ceph_connection *con)
down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
__kick_osd_requests(osdc, osd);
+ __send_queued(osdc);
mutex_unlock(&osdc->request_mutex);
- send_queued(osdc);
up_read(&osdc->map_sem);
}
@@ -739,31 +720,35 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
*/
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
{
- struct ceph_osd_request *req;
- int ret = 0;
+ struct ceph_entity_addr *peer_addr;
dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
if (list_empty(&osd->o_requests) &&
list_empty(&osd->o_linger_requests)) {
__remove_osd(osdc, osd);
- ret = -ENODEV;
- } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
- &osd->o_con.peer_addr,
- sizeof(osd->o_con.peer_addr)) == 0 &&
- !ceph_con_opened(&osd->o_con)) {
+
+ return -ENODEV;
+ }
+
+ peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
+ if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
+ !ceph_con_opened(&osd->o_con)) {
+ struct ceph_osd_request *req;
+
dout(" osd addr hasn't changed and connection never opened,"
" letting msgr retry");
/* touch each r_stamp for handle_timeout()'s benfit */
list_for_each_entry(req, &osd->o_requests, r_osd_item)
req->r_stamp = jiffies;
- ret = -EAGAIN;
- } else {
- ceph_con_close(&osd->o_con);
- ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
- &osdc->osdmap->osd_addr[osd->o_osd]);
- osd->o_incarnation++;
+
+ return -EAGAIN;
}
- return ret;
+
+ ceph_con_close(&osd->o_con);
+ ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
+ osd->o_incarnation++;
+
+ return 0;
}
static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
@@ -961,20 +946,18 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
static int __map_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req, int force_resend)
{
- struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
struct ceph_pg pgid;
int acting[CEPH_PG_MAX_SIZE];
int o = -1, num = 0;
int err;
dout("map_request %p tid %lld\n", req, req->r_tid);
- err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
+ err = ceph_calc_object_layout(&pgid, req->r_oid,
&req->r_file_layout, osdc->osdmap);
if (err) {
list_move(&req->r_req_lru_item, &osdc->req_notarget);
return err;
}
- pgid = reqhead->layout.ol_pgid;
req->r_pgid = pgid;
err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
@@ -991,8 +974,8 @@ static int __map_request(struct ceph_osd_client *osdc,
(req->r_osd == NULL && o == -1))
return 0; /* no change */
- dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
- req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
+ dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
+ req->r_tid, pgid.pool, pgid.seed, o,
req->r_osd ? req->r_osd->o_osd : -1);
/* record full pg acting set */
@@ -1041,15 +1024,22 @@ out:
static void __send_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
- struct ceph_osd_request_head *reqhead;
-
- dout("send_request %p tid %llu to osd%d flags %d\n",
- req, req->r_tid, req->r_osd->o_osd, req->r_flags);
+ void *p;
- reqhead = req->r_request->front.iov_base;
- reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
- reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
- reqhead->reassert_version = req->r_reassert_version;
+ dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
+ req, req->r_tid, req->r_osd->o_osd, req->r_flags,
+ (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
+
+ /* fill in message content that changes each time we send it */
+ put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
+ put_unaligned_le32(req->r_flags, req->r_request_flags);
+ put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
+ p = req->r_request_pgid;
+ ceph_encode_64(&p, req->r_pgid.pool);
+ ceph_encode_32(&p, req->r_pgid.seed);
+ put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
+ memcpy(req->r_request_reassert_version, &req->r_reassert_version,
+ sizeof(req->r_reassert_version));
req->r_stamp = jiffies;
list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
@@ -1062,16 +1052,13 @@ static void __send_request(struct ceph_osd_client *osdc,
/*
* Send any requests in the queue (req_unsent).
*/
-static void send_queued(struct ceph_osd_client *osdc)
+static void __send_queued(struct ceph_osd_client *osdc)
{
struct ceph_osd_request *req, *tmp;
- dout("send_queued\n");
- mutex_lock(&osdc->request_mutex);
- list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) {
+ dout("__send_queued\n");
+ list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
__send_request(osdc, req);
- }
- mutex_unlock(&osdc->request_mutex);
}
/*
@@ -1123,8 +1110,8 @@ static void handle_timeout(struct work_struct *work)
}
__schedule_osd_timeout(osdc);
+ __send_queued(osdc);
mutex_unlock(&osdc->request_mutex);
- send_queued(osdc);
up_read(&osdc->map_sem);
}
@@ -1152,6 +1139,26 @@ static void complete_request(struct ceph_osd_request *req)
complete_all(&req->r_safe_completion); /* fsync waiter */
}
+static int __decode_pgid(void **p, void *end, struct ceph_pg *pgid)
+{
+ __u8 v;
+
+ ceph_decode_need(p, end, 1 + 8 + 4 + 4, bad);
+ v = ceph_decode_8(p);
+ if (v > 1) {
+ pr_warning("do not understand pg encoding %d > 1", v);
+ return -EINVAL;
+ }
+ pgid->pool = ceph_decode_64(p);
+ pgid->seed = ceph_decode_32(p);
+ *p += 4;
+ return 0;
+
+bad:
+ pr_warning("incomplete pg encoding");
+ return -EINVAL;
+}
+
/*
* handle osd op reply. either call the callback if it is specified,
* or do the completion to wake up the waiting thread.
@@ -1159,22 +1166,42 @@ static void complete_request(struct ceph_osd_request *req)
static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
struct ceph_connection *con)
{
- struct ceph_osd_reply_head *rhead = msg->front.iov_base;
+ void *p, *end;
struct ceph_osd_request *req;
u64 tid;
- int numops, object_len, flags;
+ int object_len;
+ int numops, payload_len, flags;
s32 result;
+ s32 retry_attempt;
+ struct ceph_pg pg;
+ int err;
+ u32 reassert_epoch;
+ u64 reassert_version;
+ u32 osdmap_epoch;
+ int i;
tid = le64_to_cpu(msg->hdr.tid);
- if (msg->front.iov_len < sizeof(*rhead))
- goto bad;
- numops = le32_to_cpu(rhead->num_ops);
- object_len = le32_to_cpu(rhead->object_len);
- result = le32_to_cpu(rhead->result);
- if (msg->front.iov_len != sizeof(*rhead) + object_len +
- numops * sizeof(struct ceph_osd_op))
+ dout("handle_reply %p tid %llu\n", msg, tid);
+
+ p = msg->front.iov_base;
+ end = p + msg->front.iov_len;
+
+ ceph_decode_need(&p, end, 4, bad);
+ object_len = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, object_len, bad);
+ p += object_len;
+
+ err = __decode_pgid(&p, end, &pg);
+ if (err)
goto bad;
- dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
+
+ ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
+ flags = ceph_decode_64(&p);
+ result = ceph_decode_32(&p);
+ reassert_epoch = ceph_decode_32(&p);
+ reassert_version = ceph_decode_64(&p);
+ osdmap_epoch = ceph_decode_32(&p);
+
/* lookup */
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
@@ -1184,7 +1211,38 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
return;
}
ceph_osdc_get_request(req);
- flags = le32_to_cpu(rhead->flags);
+
+ dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
+ req, result);
+
+ ceph_decode_need(&p, end, 4, bad);
+ numops = ceph_decode_32(&p);
+ if (numops > CEPH_OSD_MAX_OP)
+ goto bad_put;
+ if (numops != req->r_num_ops)
+ goto bad_put;
+ payload_len = 0;
+ ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
+ for (i = 0; i < numops; i++) {
+ struct ceph_osd_op *op = p;
+ int len;
+
+ len = le32_to_cpu(op->payload_len);
+ req->r_reply_op_len[i] = len;
+ dout(" op %d has %d bytes\n", i, len);
+ payload_len += len;
+ p += sizeof(*op);
+ }
+ if (payload_len != le32_to_cpu(msg->hdr.data_len)) {
+ pr_warning("sum of op payload lens %d != data_len %d",
+ payload_len, le32_to_cpu(msg->hdr.data_len));
+ goto bad_put;
+ }
+
+ ceph_decode_need(&p, end, 4 + numops * 4, bad);
+ retry_attempt = ceph_decode_32(&p);
+ for (i = 0; i < numops; i++)
+ req->r_reply_op_result[i] = ceph_decode_32(&p);
/*
* if this connection filled our message, drop our reference now, to
@@ -1199,7 +1257,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
if (!req->r_got_reply) {
unsigned int bytes;
- req->r_result = le32_to_cpu(rhead->result);
+ req->r_result = result;
bytes = le32_to_cpu(msg->hdr.data_len);
dout("handle_reply result %d bytes %d\n", req->r_result,
bytes);
@@ -1207,7 +1265,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
req->r_result = bytes;
/* in case this is a write and we need to replay, */
- req->r_reassert_version = rhead->reassert_version;
+ req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
+ req->r_reassert_version.version = cpu_to_le64(reassert_version);
req->r_got_reply = 1;
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
@@ -1242,10 +1301,11 @@ done:
ceph_osdc_put_request(req);
return;
+bad_put:
+ ceph_osdc_put_request(req);
bad:
- pr_err("corrupt osd_op_reply got %d %d expected %d\n",
- (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
- (int)sizeof(*rhead));
+ pr_err("corrupt osd_op_reply got %d %d\n",
+ (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
ceph_msg_dump(msg);
}
@@ -1462,7 +1522,9 @@ done:
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
ceph_monc_request_next_osdmap(&osdc->client->monc);
- send_queued(osdc);
+ mutex_lock(&osdc->request_mutex);
+ __send_queued(osdc);
+ mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
wake_up_all(&osdc->client->auth_wq);
return;
@@ -1556,8 +1618,7 @@ static void __remove_event(struct ceph_osd_event *event)
int ceph_osdc_create_event(struct ceph_osd_client *osdc,
void (*event_cb)(u64, u64, u8, void *),
- int one_shot, void *data,
- struct ceph_osd_event **pevent)
+ void *data, struct ceph_osd_event **pevent)
{
struct ceph_osd_event *event;
@@ -1567,14 +1628,13 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
dout("create_event %p\n", event);
event->cb = event_cb;
- event->one_shot = one_shot;
+ event->one_shot = 0;
event->data = data;
event->osdc = osdc;
INIT_LIST_HEAD(&event->osd_node);
RB_CLEAR_NODE(&event->node);
kref_init(&event->kref); /* one ref for us */
kref_get(&event->kref); /* one ref for the caller */
- init_completion(&event->completion);
spin_lock(&osdc->event_lock);
event->cookie = ++osdc->event_count;
@@ -1610,7 +1670,6 @@ static void do_event_work(struct work_struct *work)
dout("do_event_work completing %p\n", event);
event->cb(ver, notify_id, opcode, event->data);
- complete(&event->completion);
dout("do_event_work completed %p\n", event);
ceph_osdc_put_event(event);
kfree(event_work);
@@ -1620,7 +1679,8 @@ static void do_event_work(struct work_struct *work)
/*
* Process osd watch notifications
*/
-void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+static void handle_watch_notify(struct ceph_osd_client *osdc,
+ struct ceph_msg *msg)
{
void *p, *end;
u8 proto_ver;
@@ -1641,9 +1701,8 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
spin_lock(&osdc->event_lock);
event = __find_event(osdc, cookie);
if (event) {
+ BUG_ON(event->one_shot);
get_event(event);
- if (event->one_shot)
- __remove_event(event);
}
spin_unlock(&osdc->event_lock);
dout("handle_watch_notify cookie %lld ver %lld event %p\n",
@@ -1668,7 +1727,6 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
return;
done_err:
- complete(&event->completion);
ceph_osdc_put_event(event);
return;
@@ -1677,21 +1735,6 @@ bad:
return;
}
-int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout)
-{
- int err;
-
- dout("wait_event %p\n", event);
- err = wait_for_completion_interruptible_timeout(&event->completion,
- timeout * HZ);
- ceph_osdc_put_event(event);
- if (err > 0)
- err = 0;
- dout("wait_event %p returns %d\n", event, err);
- return err;
-}
-EXPORT_SYMBOL(ceph_osdc_wait_event);
-
/*
* Register request, send initial attempt.
*/
@@ -1706,7 +1749,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
#ifdef CONFIG_BLOCK
req->r_request->bio = req->r_bio;
#endif
- req->r_request->trail = req->r_trail;
+ req->r_request->trail = &req->r_trail;
register_request(osdc, req);
@@ -1865,7 +1908,6 @@ out_mempool:
out:
return err;
}
-EXPORT_SYMBOL(ceph_osdc_init);
void ceph_osdc_stop(struct ceph_osd_client *osdc)
{
@@ -1882,7 +1924,6 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
ceph_msgpool_destroy(&osdc->msgpool_op);
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
}
-EXPORT_SYMBOL(ceph_osdc_stop);
/*
* Read some contiguous pages. If we cross a stripe boundary, shorten
@@ -1902,7 +1943,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, 0, truncate_seq, truncate_size, NULL,
- false, 1, page_align);
+ false, page_align);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1931,8 +1972,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
u64 off, u64 len,
u32 truncate_seq, u64 truncate_size,
struct timespec *mtime,
- struct page **pages, int num_pages,
- int flags, int do_sync, bool nofail)
+ struct page **pages, int num_pages)
{
struct ceph_osd_request *req;
int rc = 0;
@@ -1941,11 +1981,10 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
BUG_ON(vino.snap != CEPH_NOSNAP);
req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
CEPH_OSD_OP_WRITE,
- flags | CEPH_OSD_FLAG_ONDISK |
- CEPH_OSD_FLAG_WRITE,
- snapc, do_sync,
+ CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
+ snapc, 0,
truncate_seq, truncate_size, mtime,
- nofail, 1, page_align);
+ true, page_align);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1954,7 +1993,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
dout("writepages %llu~%llu (%d pages)\n", off, len,
req->r_num_pages);
- rc = ceph_osdc_start_request(osdc, req, nofail);
+ rc = ceph_osdc_start_request(osdc, req, true);
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
@@ -2047,7 +2086,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
if (data_len > 0) {
int want = calc_pages_for(req->r_page_alignment, data_len);
- if (unlikely(req->r_num_pages < want)) {
+ if (req->r_pages && unlikely(req->r_num_pages < want)) {
pr_warning("tid %lld reply has %d bytes %d pages, we"
" had only %d pages ready\n", tid, data_len,
want, req->r_num_pages);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index de73214b5d26..69bc4bf89e3e 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -13,26 +13,18 @@
char *ceph_osdmap_state_str(char *str, int len, int state)
{
- int flag = 0;
-
if (!len)
- goto done;
-
- *str = '\0';
- if (state) {
- if (state & CEPH_OSD_EXISTS) {
- snprintf(str, len, "exists");
- flag = 1;
- }
- if (state & CEPH_OSD_UP) {
- snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
- "up");
- flag = 1;
- }
- } else {
+ return str;
+
+ if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
+ snprintf(str, len, "exists, up");
+ else if (state & CEPH_OSD_EXISTS)
+ snprintf(str, len, "exists");
+ else if (state & CEPH_OSD_UP)
+ snprintf(str, len, "up");
+ else
snprintf(str, len, "doesn't exist");
- }
-done:
+
return str;
}
@@ -53,13 +45,8 @@ static int calc_bits_of(unsigned int t)
*/
static void calc_pg_masks(struct ceph_pg_pool_info *pi)
{
- pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
- pi->pgp_num_mask =
- (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
- pi->lpg_num_mask =
- (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
- pi->lpgp_num_mask =
- (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
+ pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
+ pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
}
/*
@@ -170,6 +157,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
c->choose_local_tries = 2;
c->choose_local_fallback_tries = 5;
c->choose_total_tries = 19;
+ c->chooseleaf_descend_once = 0;
ceph_decode_need(p, end, 4*sizeof(u32), bad);
magic = ceph_decode_32(p);
@@ -336,6 +324,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
dout("crush decode tunable choose_total_tries = %d",
c->choose_total_tries);
+ ceph_decode_need(p, end, sizeof(u32), done);
+ c->chooseleaf_descend_once = ceph_decode_32(p);
+ dout("crush decode tunable chooseleaf_descend_once = %d",
+ c->chooseleaf_descend_once);
+
done:
dout("crush_decode success\n");
return c;
@@ -354,12 +347,13 @@ bad:
*/
static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
{
- u64 a = *(u64 *)&l;
- u64 b = *(u64 *)&r;
-
- if (a < b)
+ if (l.pool < r.pool)
+ return -1;
+ if (l.pool > r.pool)
+ return 1;
+ if (l.seed < r.seed)
return -1;
- if (a > b)
+ if (l.seed > r.seed)
return 1;
return 0;
}
@@ -405,8 +399,8 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
} else if (c > 0) {
n = n->rb_right;
} else {
- dout("__lookup_pg_mapping %llx got %p\n",
- *(u64 *)&pgid, pg);
+ dout("__lookup_pg_mapping %lld.%x got %p\n",
+ pgid.pool, pgid.seed, pg);
return pg;
}
}
@@ -418,12 +412,13 @@ static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
if (pg) {
- dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
+ dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed,
+ pg);
rb_erase(&pg->node, root);
kfree(pg);
return 0;
}
- dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
+ dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed);
return -ENOENT;
}
@@ -452,7 +447,7 @@ static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
return 0;
}
-static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
+static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
{
struct ceph_pg_pool_info *pi;
struct rb_node *n = root->rb_node;
@@ -508,24 +503,57 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
{
- unsigned int n, m;
+ u8 ev, cv;
+ unsigned len, num;
+ void *pool_end;
+
+ ceph_decode_need(p, end, 2 + 4, bad);
+ ev = ceph_decode_8(p); /* encoding version */
+ cv = ceph_decode_8(p); /* compat version */
+ if (ev < 5) {
+ pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
+ return -EINVAL;
+ }
+ if (cv > 7) {
+ pr_warning("got v %d cv %d > 7 of ceph_pg_pool\n", ev, cv);
+ return -EINVAL;
+ }
+ len = ceph_decode_32(p);
+ ceph_decode_need(p, end, len, bad);
+ pool_end = *p + len;
- ceph_decode_copy(p, &pi->v, sizeof(pi->v));
- calc_pg_masks(pi);
+ pi->type = ceph_decode_8(p);
+ pi->size = ceph_decode_8(p);
+ pi->crush_ruleset = ceph_decode_8(p);
+ pi->object_hash = ceph_decode_8(p);
+
+ pi->pg_num = ceph_decode_32(p);
+ pi->pgp_num = ceph_decode_32(p);
+
+ *p += 4 + 4; /* skip lpg* */
+ *p += 4; /* skip last_change */
+ *p += 8 + 4; /* skip snap_seq, snap_epoch */
- /* num_snaps * snap_info_t */
- n = le32_to_cpu(pi->v.num_snaps);
- while (n--) {
- ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
- sizeof(struct ceph_timespec), bad);
- *p += sizeof(u64) + /* key */
- 1 + sizeof(u64) + /* u8, snapid */
- sizeof(struct ceph_timespec);
- m = ceph_decode_32(p); /* snap name */
- *p += m;
+ /* skip snaps */
+ num = ceph_decode_32(p);
+ while (num--) {
+ *p += 8; /* snapid key */
+ *p += 1 + 1; /* versions */
+ len = ceph_decode_32(p);
+ *p += len;
}
- *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
+ /* skip removed snaps */
+ num = ceph_decode_32(p);
+ *p += num * (8 + 8);
+
+ *p += 8; /* skip auid */
+ pi->flags = ceph_decode_64(p);
+
+ /* ignore the rest */
+
+ *p = pool_end;
+ calc_pg_masks(pi);
return 0;
bad:
@@ -535,14 +563,15 @@ bad:
static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
{
struct ceph_pg_pool_info *pi;
- u32 num, len, pool;
+ u32 num, len;
+ u64 pool;
ceph_decode_32_safe(p, end, num, bad);
dout(" %d pool names\n", num);
while (num--) {
- ceph_decode_32_safe(p, end, pool, bad);
+ ceph_decode_64_safe(p, end, pool, bad);
ceph_decode_32_safe(p, end, len, bad);
- dout(" pool %d len %d\n", pool, len);
+ dout(" pool %llu len %d\n", pool, len);
ceph_decode_need(p, end, len, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (pi) {
@@ -633,7 +662,6 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
struct ceph_osdmap *map;
u16 version;
u32 len, max, i;
- u8 ev;
int err = -EINVAL;
void *start = *p;
struct ceph_pg_pool_info *pi;
@@ -646,9 +674,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
map->pg_temp = RB_ROOT;
ceph_decode_16_safe(p, end, version, bad);
- if (version > CEPH_OSDMAP_VERSION) {
- pr_warning("got unknown v %d > %d of osdmap\n", version,
- CEPH_OSDMAP_VERSION);
+ if (version > 6) {
+ pr_warning("got unknown v %d > 6 of osdmap\n", version);
+ goto bad;
+ }
+ if (version < 6) {
+ pr_warning("got old v %d < 6 of osdmap\n", version);
goto bad;
}
@@ -660,20 +691,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_32_safe(p, end, max, bad);
while (max--) {
- ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
+ ceph_decode_need(p, end, 8 + 2, bad);
err = -ENOMEM;
pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
goto bad;
- pi->id = ceph_decode_32(p);
- err = -EINVAL;
- ev = ceph_decode_8(p); /* encoding version */
- if (ev > CEPH_PG_POOL_VERSION) {
- pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
- ev, CEPH_PG_POOL_VERSION);
- kfree(pi);
- goto bad;
- }
+ pi->id = ceph_decode_64(p);
err = __decode_pool(p, end, pi);
if (err < 0) {
kfree(pi);
@@ -682,12 +705,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
__insert_pg_pool(&map->pg_pools, pi);
}
- if (version >= 5) {
- err = __decode_pool_names(p, end, map);
- if (err < 0) {
- dout("fail to decode pool names");
- goto bad;
- }
+ err = __decode_pool_names(p, end, map);
+ if (err < 0) {
+ dout("fail to decode pool names");
+ goto bad;
}
ceph_decode_32_safe(p, end, map->pool_max, bad);
@@ -724,10 +745,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
for (i = 0; i < len; i++) {
int n, j;
struct ceph_pg pgid;
+ struct ceph_pg_v1 pgid_v1;
struct ceph_pg_mapping *pg;
ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
- ceph_decode_copy(p, &pgid, sizeof(pgid));
+ ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
+ pgid.pool = le32_to_cpu(pgid_v1.pool);
+ pgid.seed = le16_to_cpu(pgid_v1.ps);
n = ceph_decode_32(p);
err = -EINVAL;
if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
@@ -745,7 +769,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
err = __insert_pg_mapping(pg, &map->pg_temp);
if (err)
goto bad;
- dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
+ dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed,
+ len);
}
/* crush */
@@ -784,16 +809,17 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
struct ceph_fsid fsid;
u32 epoch = 0;
struct ceph_timespec modified;
- u32 len, pool;
- __s32 new_pool_max, new_flags, max;
+ s32 len;
+ u64 pool;
+ __s64 new_pool_max;
+ __s32 new_flags, max;
void *start = *p;
int err = -EINVAL;
u16 version;
ceph_decode_16_safe(p, end, version, bad);
- if (version > CEPH_OSDMAP_INC_VERSION) {
- pr_warning("got unknown v %d > %d of inc osdmap\n", version,
- CEPH_OSDMAP_INC_VERSION);
+ if (version > 6) {
+ pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6);
goto bad;
}
@@ -803,7 +829,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
epoch = ceph_decode_32(p);
BUG_ON(epoch != map->epoch+1);
ceph_decode_copy(p, &modified, sizeof(modified));
- new_pool_max = ceph_decode_32(p);
+ new_pool_max = ceph_decode_64(p);
new_flags = ceph_decode_32(p);
/* full map? */
@@ -853,18 +879,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
/* new_pool */
ceph_decode_32_safe(p, end, len, bad);
while (len--) {
- __u8 ev;
struct ceph_pg_pool_info *pi;
- ceph_decode_32_safe(p, end, pool, bad);
- ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
- ev = ceph_decode_8(p); /* encoding version */
- if (ev > CEPH_PG_POOL_VERSION) {
- pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
- ev, CEPH_PG_POOL_VERSION);
- err = -EINVAL;
- goto bad;
- }
+ ceph_decode_64_safe(p, end, pool, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (!pi) {
pi = kzalloc(sizeof(*pi), GFP_NOFS);
@@ -890,7 +907,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
while (len--) {
struct ceph_pg_pool_info *pi;
- ceph_decode_32_safe(p, end, pool, bad);
+ ceph_decode_64_safe(p, end, pool, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (pi)
__remove_pg_pool(&map->pg_pools, pi);
@@ -946,10 +963,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
while (len--) {
struct ceph_pg_mapping *pg;
int j;
+ struct ceph_pg_v1 pgid_v1;
struct ceph_pg pgid;
u32 pglen;
ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
- ceph_decode_copy(p, &pgid, sizeof(pgid));
+ ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
+ pgid.pool = le32_to_cpu(pgid_v1.pool);
+ pgid.seed = le16_to_cpu(pgid_v1.ps);
pglen = ceph_decode_32(p);
if (pglen) {
@@ -975,8 +995,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
kfree(pg);
goto bad;
}
- dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
- pglen);
+ dout(" added pg_temp %lld.%x len %d\n", pgid.pool,
+ pgid.seed, pglen);
} else {
/* remove */
__remove_pg_mapping(&map->pg_temp, pgid);
@@ -1010,7 +1030,7 @@ bad:
* pass a stride back to the caller.
*/
int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
- u64 off, u64 *plen,
+ u64 off, u64 len,
u64 *ono,
u64 *oxoff, u64 *oxlen)
{
@@ -1021,7 +1041,7 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u32 su_per_object;
u64 t, su_offset;
- dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
+ dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
osize, su);
if (su == 0 || sc == 0)
goto invalid;
@@ -1054,11 +1074,10 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
/*
* Calculate the length of the extent being written to the selected
- * object. This is the minimum of the full length requested (plen) or
+ * object. This is the minimum of the full length requested (len) or
* the remainder of the current stripe being written to.
*/
- *oxlen = min_t(u64, *plen, su - su_offset);
- *plen = *oxlen;
+ *oxlen = min_t(u64, len, su - su_offset);
dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
return 0;
@@ -1076,33 +1095,24 @@ EXPORT_SYMBOL(ceph_calc_file_object_mapping);
* calculate an object layout (i.e. pgid) from an oid,
* file_layout, and osdmap
*/
-int ceph_calc_object_layout(struct ceph_object_layout *ol,
+int ceph_calc_object_layout(struct ceph_pg *pg,
const char *oid,
struct ceph_file_layout *fl,
struct ceph_osdmap *osdmap)
{
unsigned int num, num_mask;
- struct ceph_pg pgid;
- int poolid = le32_to_cpu(fl->fl_pg_pool);
struct ceph_pg_pool_info *pool;
- unsigned int ps;
BUG_ON(!osdmap);
-
- pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
+ pg->pool = le32_to_cpu(fl->fl_pg_pool);
+ pool = __lookup_pg_pool(&osdmap->pg_pools, pg->pool);
if (!pool)
return -EIO;
- ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
- num = le32_to_cpu(pool->v.pg_num);
+ pg->seed = ceph_str_hash(pool->object_hash, oid, strlen(oid));
+ num = pool->pg_num;
num_mask = pool->pg_num_mask;
- pgid.ps = cpu_to_le16(ps);
- pgid.preferred = cpu_to_le16(-1);
- pgid.pool = fl->fl_pg_pool;
- dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
-
- ol->ol_pgid = pgid;
- ol->ol_stripe_unit = fl->fl_object_stripe_unit;
+ dout("calc_object_layout '%s' pgid %lld.%x\n", oid, pg->pool, pg->seed);
return 0;
}
EXPORT_SYMBOL(ceph_calc_object_layout);
@@ -1117,19 +1127,16 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
struct ceph_pg_mapping *pg;
struct ceph_pg_pool_info *pool;
int ruleno;
- unsigned int poolid, ps, pps, t, r;
-
- poolid = le32_to_cpu(pgid.pool);
- ps = le16_to_cpu(pgid.ps);
+ int r;
+ u32 pps;
- pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
+ pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
if (!pool)
return NULL;
/* pg_temp? */
- t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
- pool->pgp_num_mask);
- pgid.ps = cpu_to_le16(t);
+ pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
+ pool->pgp_num_mask);
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
*num = pg->len;
@@ -1137,26 +1144,39 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
}
/* crush */
- ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
- pool->v.type, pool->v.size);
+ ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
+ pool->type, pool->size);
if (ruleno < 0) {
- pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
- poolid, pool->v.crush_ruleset, pool->v.type,
- pool->v.size);
+ pr_err("no crush rule pool %lld ruleset %d type %d size %d\n",
+ pgid.pool, pool->crush_ruleset, pool->type,
+ pool->size);
return NULL;
}
- pps = ceph_stable_mod(ps,
- le32_to_cpu(pool->v.pgp_num),
- pool->pgp_num_mask);
- pps += poolid;
+ if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
+ /* hash pool id and seed sothat pool PGs do not overlap */
+ pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
+ ceph_stable_mod(pgid.seed, pool->pgp_num,
+ pool->pgp_num_mask),
+ pgid.pool);
+ } else {
+ /*
+ * legacy ehavior: add ps and pool together. this is
+ * not a great approach because the PGs from each pool
+ * will overlap on top of each other: 0.5 == 1.4 ==
+ * 2.3 == ...
+ */
+ pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
+ pool->pgp_num_mask) +
+ (unsigned)pgid.pool;
+ }
r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
- min_t(int, pool->v.size, *num),
+ min_t(int, pool->size, *num),
osdmap->osd_weight);
if (r < 0) {
- pr_err("error %d from crush rule: pool %d ruleset %d type %d"
- " size %d\n", r, poolid, pool->v.crush_ruleset,
- pool->v.type, pool->v.size);
+ pr_err("error %d from crush rule: pool %lld ruleset %d type %d"
+ " size %d\n", r, pgid.pool, pool->crush_ruleset,
+ pool->type, pool->size);
return NULL;
}
*num = r;
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index cd9c21df87d1..815a2249cfa9 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -12,7 +12,7 @@
/*
* build a vector of user pages
*/
-struct page **ceph_get_direct_page_vector(const char __user *data,
+struct page **ceph_get_direct_page_vector(const void __user *data,
int num_pages, bool write_page)
{
struct page **pages;
@@ -93,7 +93,7 @@ EXPORT_SYMBOL(ceph_alloc_page_vector);
* copy user data into a page vector
*/
int ceph_copy_user_to_page_vector(struct page **pages,
- const char __user *data,
+ const void __user *data,
loff_t off, size_t len)
{
int i = 0;
@@ -118,17 +118,17 @@ int ceph_copy_user_to_page_vector(struct page **pages,
}
EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
-int ceph_copy_to_page_vector(struct page **pages,
- const char *data,
+void ceph_copy_to_page_vector(struct page **pages,
+ const void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_CACHE_MASK;
size_t left = len;
- size_t l;
while (left > 0) {
- l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+ size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+
memcpy(page_address(pages[i]) + po, data, l);
data += l;
left -= l;
@@ -138,21 +138,20 @@ int ceph_copy_to_page_vector(struct page **pages,
i++;
}
}
- return len;
}
EXPORT_SYMBOL(ceph_copy_to_page_vector);
-int ceph_copy_from_page_vector(struct page **pages,
- char *data,
+void ceph_copy_from_page_vector(struct page **pages,
+ void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_CACHE_MASK;
size_t left = len;
- size_t l;
while (left > 0) {
- l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+ size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+
memcpy(data, page_address(pages[i]) + po, l);
data += l;
left -= l;
@@ -162,7 +161,6 @@ int ceph_copy_from_page_vector(struct page **pages,
i++;
}
}
- return len;
}
EXPORT_SYMBOL(ceph_copy_from_page_vector);
@@ -170,7 +168,7 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
* copy user data from a page vector into a user pointer
*/
int ceph_copy_page_vector_to_user(struct page **pages,
- char __user *data,
+ void __user *data,
loff_t off, size_t len)
{
int i = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 17bc535115d3..a06a7a58dd11 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -658,11 +658,10 @@ __setup("netdev=", netdev_boot_setup);
struct net_device *__dev_get_by_name(struct net *net, const char *name)
{
- struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_name_hash(net, name);
- hlist_for_each_entry(dev, p, head, name_hlist)
+ hlist_for_each_entry(dev, head, name_hlist)
if (!strncmp(dev->name, name, IFNAMSIZ))
return dev;
@@ -684,11 +683,10 @@ EXPORT_SYMBOL(__dev_get_by_name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{
- struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_name_hash(net, name);
- hlist_for_each_entry_rcu(dev, p, head, name_hlist)
+ hlist_for_each_entry_rcu(dev, head, name_hlist)
if (!strncmp(dev->name, name, IFNAMSIZ))
return dev;
@@ -735,11 +733,10 @@ EXPORT_SYMBOL(dev_get_by_name);
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
{
- struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
- hlist_for_each_entry(dev, p, head, index_hlist)
+ hlist_for_each_entry(dev, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
@@ -760,11 +757,10 @@ EXPORT_SYMBOL(__dev_get_by_index);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{
- struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
- hlist_for_each_entry_rcu(dev, p, head, index_hlist)
+ hlist_for_each_entry_rcu(dev, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
@@ -1882,8 +1878,10 @@ int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
if (!new_dev_maps)
new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
- if (!new_dev_maps)
+ if (!new_dev_maps) {
+ mutex_unlock(&xps_map_mutex);
return -ENOMEM;
+ }
map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
NULL;
diff --git a/net/core/flow.c b/net/core/flow.c
index 43f7495df27a..c56ea6f7f6c7 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -132,14 +132,14 @@ static void __flow_cache_shrink(struct flow_cache *fc,
int shrink_to)
{
struct flow_cache_entry *fle;
- struct hlist_node *entry, *tmp;
+ struct hlist_node *tmp;
LIST_HEAD(gc_list);
int i, deleted = 0;
for (i = 0; i < flow_cache_hash_size(fc); i++) {
int saved = 0;
- hlist_for_each_entry_safe(fle, entry, tmp,
+ hlist_for_each_entry_safe(fle, tmp,
&fcp->hash_table[i], u.hlist) {
if (saved < shrink_to &&
flow_entry_valid(fle)) {
@@ -211,7 +211,6 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
struct flow_cache *fc = &flow_cache_global;
struct flow_cache_percpu *fcp;
struct flow_cache_entry *fle, *tfle;
- struct hlist_node *entry;
struct flow_cache_object *flo;
size_t keysize;
unsigned int hash;
@@ -235,7 +234,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
flow_new_hash_rnd(fc, fcp);
hash = flow_hash_code(fc, fcp, key, keysize);
- hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
+ hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
if (tfle->net == net &&
tfle->family == family &&
tfle->dir == dir &&
@@ -301,13 +300,13 @@ static void flow_cache_flush_tasklet(unsigned long data)
struct flow_cache *fc = info->cache;
struct flow_cache_percpu *fcp;
struct flow_cache_entry *fle;
- struct hlist_node *entry, *tmp;
+ struct hlist_node *tmp;
LIST_HEAD(gc_list);
int i, deleted = 0;
fcp = this_cpu_ptr(fc->percpu);
for (i = 0; i < flow_cache_hash_size(fc); i++) {
- hlist_for_each_entry_safe(fle, entry, tmp,
+ hlist_for_each_entry_safe(fle, tmp,
&fcp->hash_table[i], u.hlist) {
if (flow_entry_valid(fle))
continue;
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 0f6bb6f8d391..3174f1998ee6 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -16,12 +16,11 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff
{
struct net *net = seq_file_net(seq);
struct net_device *dev;
- struct hlist_node *p;
struct hlist_head *h;
unsigned int count = 0, offset = get_offset(*pos);
h = &net->dev_name_head[get_bucket(*pos)];
- hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
+ hlist_for_each_entry_rcu(dev, h, name_hlist) {
if (++count == offset)
return dev;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 8acce01b6dab..80e271d9e64b 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -344,7 +344,7 @@ struct net *get_net_ns_by_fd(int fd)
if (IS_ERR(file))
return ERR_CAST(file);
- ei = PROC_I(file->f_dentry->d_inode);
+ ei = PROC_I(file_inode(file));
if (ei->ns_ops == &netns_operations)
net = get_net(ei->ns);
else
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d8aa20f6a46e..b376410ff259 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1060,7 +1060,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
int idx = 0, s_idx;
struct net_device *dev;
struct hlist_head *head;
- struct hlist_node *node;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
@@ -1080,7 +1079,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
diff --git a/net/core/sock.c b/net/core/sock.c
index fe96c5d34299..b261a7977746 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -186,8 +186,10 @@ void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
static struct lock_class_key af_family_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];
+#if defined(CONFIG_MEMCG_KMEM)
struct static_key memcg_socket_limit_enabled;
EXPORT_SYMBOL(memcg_socket_limit_enabled);
+#endif
/*
* Make lock validator output more readable. (we pre-construct these
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 602cd637182e..a29e90cf36b7 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -97,21 +97,6 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
-{
- if (sock_diag_handlers[family] == NULL)
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, family);
-
- mutex_lock(&sock_diag_table_mutex);
- return sock_diag_handlers[family];
-}
-
-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
-{
- mutex_unlock(&sock_diag_table_mutex);
-}
-
static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int err;
@@ -121,12 +106,20 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (nlmsg_len(nlh) < sizeof(*req))
return -EINVAL;
- hndl = sock_diag_lock_handler(req->sdiag_family);
+ if (req->sdiag_family >= AF_MAX)
+ return -EINVAL;
+
+ if (sock_diag_handlers[req->sdiag_family] == NULL)
+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, req->sdiag_family);
+
+ mutex_lock(&sock_diag_table_mutex);
+ hndl = sock_diag_handlers[req->sdiag_family];
if (hndl == NULL)
err = -ENOENT;
else
err = hndl->dump(skb, nlh);
- sock_diag_unlock_handler(hndl);
+ mutex_unlock(&sock_diag_table_mutex);
return err;
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index c4a2def5b7bd..c21f200eed93 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -175,12 +175,11 @@ static struct hlist_head *dn_find_list(struct sock *sk)
static int check_port(__le16 port)
{
struct sock *sk;
- struct hlist_node *node;
if (port == 0)
return -1;
- sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
+ sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
struct dn_scp *scp = DN_SK(sk);
if (scp->addrloc == port)
return -1;
@@ -374,11 +373,10 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn,
struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
{
struct hlist_head *list = listen_hash(addr);
- struct hlist_node *node;
struct sock *sk;
read_lock(&dn_hash_lock);
- sk_for_each(sk, node, list) {
+ sk_for_each(sk, list) {
struct dn_scp *scp = DN_SK(sk);
if (sk->sk_state != TCP_LISTEN)
continue;
@@ -414,11 +412,10 @@ struct sock *dn_find_by_skb(struct sk_buff *skb)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct sock *sk;
- struct hlist_node *node;
struct dn_scp *scp;
read_lock(&dn_hash_lock);
- sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
+ sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
scp = DN_SK(sk);
if (cb->src != dn_saddr2dn(&scp->peer))
continue;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index f968c1b58f47..6c2445bcaba1 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -483,7 +483,6 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct dn_fib_table *tb;
- struct hlist_node *node;
int dumped = 0;
if (!net_eq(net, &init_net))
@@ -498,7 +497,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
e = 0;
- hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) {
+ hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
if (e < s_e)
goto next;
if (dumped)
@@ -828,7 +827,6 @@ out:
struct dn_fib_table *dn_fib_get_table(u32 n, int create)
{
struct dn_fib_table *t;
- struct hlist_node *node;
unsigned int h;
if (n < RT_TABLE_MIN)
@@ -839,7 +837,7 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
h = n & (DN_FIB_TABLE_HASHSZ - 1);
rcu_read_lock();
- hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) {
+ hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
if (t->n == n) {
rcu_read_unlock();
return t;
@@ -885,11 +883,10 @@ void dn_fib_flush(void)
{
int flushed = 0;
struct dn_fib_table *tb;
- struct hlist_node *node;
unsigned int h;
for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
- hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist)
+ hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
flushed += tb->flush(tb);
}
@@ -908,12 +905,12 @@ void __init dn_fib_table_init(void)
void __exit dn_fib_table_cleanup(void)
{
struct dn_fib_table *t;
- struct hlist_node *node, *next;
+ struct hlist_node *next;
unsigned int h;
write_lock(&dn_fib_tables_lock);
for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
- hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h],
+ hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
hlist) {
hlist_del(&t->hlist);
kfree(t);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 16705611589a..e0da175f8e5b 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -350,7 +350,6 @@ static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id,
int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk, *prev = NULL;
- struct hlist_node *node;
int ret = NET_RX_SUCCESS;
u16 pan_id, short_addr;
@@ -361,7 +360,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
read_lock(&dgram_lock);
- sk_for_each(sk, node, &dgram_head) {
+ sk_for_each(sk, &dgram_head) {
if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr,
dgram_sk(sk))) {
if (prev) {
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 50e823927d49..41f538b8e59c 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -221,10 +221,9 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk;
- struct hlist_node *node;
read_lock(&raw_lock);
- sk_for_each(sk, node, &raw_head) {
+ sk_for_each(sk, &raw_head) {
bh_lock_sock(sk);
if (!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == dev->ifindex) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e225a4e5b572..68f6a94f7661 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -248,8 +248,12 @@ EXPORT_SYMBOL(inet_listen);
u32 inet_ehash_secret __read_mostly;
EXPORT_SYMBOL(inet_ehash_secret);
+u32 ipv6_hash_secret __read_mostly;
+EXPORT_SYMBOL(ipv6_hash_secret);
+
/*
- * inet_ehash_secret must be set exactly once
+ * inet_ehash_secret must be set exactly once, and to a non nul value
+ * ipv6_hash_secret must be set exactly once.
*/
void build_ehash_secret(void)
{
@@ -259,7 +263,8 @@ void build_ehash_secret(void)
get_random_bytes(&rnd, sizeof(rnd));
} while (rnd == 0);
- cmpxchg(&inet_ehash_secret, 0, rnd);
+ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
+ get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
}
EXPORT_SYMBOL(build_ehash_secret);
@@ -1327,8 +1332,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (skb->next != NULL)
iph->frag_off |= htons(IP_MF);
offset += (skb->len - skb->mac_len - iph->ihl * 4);
- } else
- iph->id = htons(id++);
+ } else {
+ if (!(iph->frag_off & htons(IP_DF)))
+ iph->id = htons(id++);
+ }
iph->tot_len = htons(skb->len - skb->mac_len);
iph->check = 0;
iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
@@ -1572,7 +1579,7 @@ static const struct net_offload udp_offload = {
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
- .err_handler = ping_err,
+ .err_handler = icmp_err,
.no_policy = 1,
.netns_ok = 1,
};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5281314886c1..f678507bc829 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -139,10 +139,9 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
u32 hash = inet_addr_hash(net, addr);
struct net_device *result = NULL;
struct in_ifaddr *ifa;
- struct hlist_node *node;
rcu_read_lock();
- hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
+ hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
if (ifa->ifa_local == addr) {
struct net_device *dev = ifa->ifa_dev->dev;
@@ -588,7 +587,6 @@ static void check_lifetime(struct work_struct *work)
{
unsigned long now, next, next_sec, next_sched;
struct in_ifaddr *ifa;
- struct hlist_node *node;
int i;
now = jiffies;
@@ -596,8 +594,7 @@ static void check_lifetime(struct work_struct *work)
rcu_read_lock();
for (i = 0; i < IN4_ADDR_HSIZE; i++) {
- hlist_for_each_entry_rcu(ifa, node,
- &inet_addr_lst[i], hash) {
+ hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
unsigned long age;
if (ifa->ifa_flags & IFA_F_PERMANENT)
@@ -1493,7 +1490,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
struct in_device *in_dev;
struct in_ifaddr *ifa;
struct hlist_head *head;
- struct hlist_node *node;
s_h = cb->args[0];
s_idx = idx = cb->args[1];
@@ -1503,7 +1499,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
- hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (h > s_h || idx > s_idx)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 99f00d39d10b..eb4bb12b3eb4 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -112,7 +112,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
struct fib_table *fib_get_table(struct net *net, u32 id)
{
struct fib_table *tb;
- struct hlist_node *node;
struct hlist_head *head;
unsigned int h;
@@ -122,7 +121,7 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
rcu_read_lock();
head = &net->ipv4.fib_table_hash[h];
- hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
+ hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (tb->tb_id == id) {
rcu_read_unlock();
return tb;
@@ -137,13 +136,12 @@ static void fib_flush(struct net *net)
{
int flushed = 0;
struct fib_table *tb;
- struct hlist_node *node;
struct hlist_head *head;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
head = &net->ipv4.fib_table_hash[h];
- hlist_for_each_entry(tb, node, head, tb_hlist)
+ hlist_for_each_entry(tb, head, tb_hlist)
flushed += fib_table_flush(tb);
}
@@ -656,7 +654,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
- struct hlist_node *node;
struct hlist_head *head;
int dumped = 0;
@@ -670,7 +667,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
- hlist_for_each_entry(tb, node, head, tb_hlist) {
+ hlist_for_each_entry(tb, head, tb_hlist) {
if (e < s_e)
goto next;
if (dumped)
@@ -1117,11 +1114,11 @@ static void ip_fib_net_exit(struct net *net)
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct fib_table *tb;
struct hlist_head *head;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
head = &net->ipv4.fib_table_hash[i];
- hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
- hlist_del(node);
+ hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
+ hlist_del(&tb->tb_hlist);
fib_table_flush(tb);
fib_free_table(tb);
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 4797a800faf8..8f6cb7a87cd6 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -298,14 +298,13 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
static struct fib_info *fib_find_info(const struct fib_info *nfi)
{
struct hlist_head *head;
- struct hlist_node *node;
struct fib_info *fi;
unsigned int hash;
hash = fib_info_hashfn(nfi);
head = &fib_info_hash[hash];
- hlist_for_each_entry(fi, node, head, fib_hash) {
+ hlist_for_each_entry(fi, head, fib_hash) {
if (!net_eq(fi->fib_net, nfi->fib_net))
continue;
if (fi->fib_nhs != nfi->fib_nhs)
@@ -331,7 +330,6 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
int ip_fib_check_default(__be32 gw, struct net_device *dev)
{
struct hlist_head *head;
- struct hlist_node *node;
struct fib_nh *nh;
unsigned int hash;
@@ -339,7 +337,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev)
hash = fib_devindex_hashfn(dev->ifindex);
head = &fib_info_devhash[hash];
- hlist_for_each_entry(nh, node, head, nh_hash) {
+ hlist_for_each_entry(nh, head, nh_hash) {
if (nh->nh_dev == dev &&
nh->nh_gw == gw &&
!(nh->nh_flags & RTNH_F_DEAD)) {
@@ -721,10 +719,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
for (i = 0; i < old_size; i++) {
struct hlist_head *head = &fib_info_hash[i];
- struct hlist_node *node, *n;
+ struct hlist_node *n;
struct fib_info *fi;
- hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
+ hlist_for_each_entry_safe(fi, n, head, fib_hash) {
struct hlist_head *dest;
unsigned int new_hash;
@@ -739,10 +737,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
for (i = 0; i < old_size; i++) {
struct hlist_head *lhead = &fib_info_laddrhash[i];
- struct hlist_node *node, *n;
+ struct hlist_node *n;
struct fib_info *fi;
- hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
+ hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
struct hlist_head *ldest;
unsigned int new_hash;
@@ -1096,13 +1094,12 @@ int fib_sync_down_addr(struct net *net, __be32 local)
int ret = 0;
unsigned int hash = fib_laddr_hashfn(local);
struct hlist_head *head = &fib_info_laddrhash[hash];
- struct hlist_node *node;
struct fib_info *fi;
if (fib_info_laddrhash == NULL || local == 0)
return 0;
- hlist_for_each_entry(fi, node, head, fib_lhash) {
+ hlist_for_each_entry(fi, head, fib_lhash) {
if (!net_eq(fi->fib_net, net))
continue;
if (fi->fib_prefsrc == local) {
@@ -1120,13 +1117,12 @@ int fib_sync_down_dev(struct net_device *dev, int force)
struct fib_info *prev_fi = NULL;
unsigned int hash = fib_devindex_hashfn(dev->ifindex);
struct hlist_head *head = &fib_info_devhash[hash];
- struct hlist_node *node;
struct fib_nh *nh;
if (force)
scope = -1;
- hlist_for_each_entry(nh, node, head, nh_hash) {
+ hlist_for_each_entry(nh, head, nh_hash) {
struct fib_info *fi = nh->nh_parent;
int dead;
@@ -1232,7 +1228,6 @@ int fib_sync_up(struct net_device *dev)
struct fib_info *prev_fi;
unsigned int hash;
struct hlist_head *head;
- struct hlist_node *node;
struct fib_nh *nh;
int ret;
@@ -1244,7 +1239,7 @@ int fib_sync_up(struct net_device *dev)
head = &fib_info_devhash[hash];
ret = 0;
- hlist_for_each_entry(nh, node, head, nh_hash) {
+ hlist_for_each_entry(nh, head, nh_hash) {
struct fib_info *fi = nh->nh_parent;
int alive;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 61e03da3e1f5..ff06b7543d9f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -920,10 +920,9 @@ nomem:
static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
{
struct hlist_head *head = &l->list;
- struct hlist_node *node;
struct leaf_info *li;
- hlist_for_each_entry_rcu(li, node, head, hlist)
+ hlist_for_each_entry_rcu(li, head, hlist)
if (li->plen == plen)
return li;
@@ -943,12 +942,11 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen)
static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
{
struct leaf_info *li = NULL, *last = NULL;
- struct hlist_node *node;
if (hlist_empty(head)) {
hlist_add_head_rcu(&new->hlist, head);
} else {
- hlist_for_each_entry(li, node, head, hlist) {
+ hlist_for_each_entry(li, head, hlist) {
if (new->plen > li->plen)
break;
@@ -1354,9 +1352,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
{
struct leaf_info *li;
struct hlist_head *hhead = &l->list;
- struct hlist_node *node;
- hlist_for_each_entry_rcu(li, node, hhead, hlist) {
+ hlist_for_each_entry_rcu(li, hhead, hlist) {
struct fib_alias *fa;
if (l->key != (key & li->mask_plen))
@@ -1740,10 +1737,10 @@ static int trie_flush_leaf(struct leaf *l)
{
int found = 0;
struct hlist_head *lih = &l->list;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
struct leaf_info *li = NULL;
- hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
+ hlist_for_each_entry_safe(li, tmp, lih, hlist) {
found += trie_flush_list(&li->falh);
if (list_empty(&li->falh)) {
@@ -1895,14 +1892,13 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
struct sk_buff *skb, struct netlink_callback *cb)
{
struct leaf_info *li;
- struct hlist_node *node;
int i, s_i;
s_i = cb->args[4];
i = 0;
/* rcu_read_lock is hold by caller */
- hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
+ hlist_for_each_entry_rcu(li, &l->list, hlist) {
if (i < s_i) {
i++;
continue;
@@ -2092,14 +2088,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
if (IS_LEAF(n)) {
struct leaf *l = (struct leaf *)n;
struct leaf_info *li;
- struct hlist_node *tmp;
s->leaves++;
s->totdepth += iter.depth;
if (iter.depth > s->maxdepth)
s->maxdepth = iter.depth;
- hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
+ hlist_for_each_entry_rcu(li, &l->list, hlist)
++s->prefixes;
} else {
const struct tnode *tn = (const struct tnode *) n;
@@ -2200,10 +2195,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
- struct hlist_node *node;
struct fib_table *tb;
- hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
+ hlist_for_each_entry_rcu(tb, head, tb_hlist) {
struct trie *t = (struct trie *) tb->tb_data;
struct trie_stat stat;
@@ -2245,10 +2239,9 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
- struct hlist_node *node;
struct fib_table *tb;
- hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
+ hlist_for_each_entry_rcu(tb, head, tb_hlist) {
struct rt_trie_node *n;
for (n = fib_trie_get_first(iter,
@@ -2298,7 +2291,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
/* new hash chain */
while (++h < FIB_TABLE_HASHSZ) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
- hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) {
+ hlist_for_each_entry_rcu(tb, head, tb_hlist) {
n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
if (n)
goto found;
@@ -2381,13 +2374,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
} else {
struct leaf *l = (struct leaf *) n;
struct leaf_info *li;
- struct hlist_node *node;
__be32 val = htonl(l->key);
seq_indent(seq, iter->depth);
seq_printf(seq, " |-- %pI4\n", &val);
- hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
+ hlist_for_each_entry_rcu(li, &l->list, hlist) {
struct fib_alias *fa;
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2532,7 +2524,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
{
struct leaf *l = v;
struct leaf_info *li;
- struct hlist_node *node;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
@@ -2541,7 +2532,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
return 0;
}
- hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
+ hlist_for_each_entry_rcu(li, &l->list, hlist) {
struct fib_alias *fa;
__be32 mask, prefix;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 17ff9fd7cdda..3ac5dff79627 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -934,6 +934,29 @@ error:
goto drop;
}
+void icmp_err(struct sk_buff *skb, u32 info)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+ int type = icmp_hdr(skb)->type;
+ int code = icmp_hdr(skb)->code;
+ struct net *net = dev_net(skb->dev);
+
+ /*
+ * Use ping_err to handle all icmp errors except those
+ * triggered by ICMP_ECHOREPLY which sent from kernel.
+ */
+ if (icmph->type != ICMP_ECHOREPLY) {
+ ping_err(skb, info);
+ return;
+ }
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0);
+ else if (type == ICMP_REDIRECT)
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0);
+}
+
/*
* This table is the definition of how we handle ICMP.
*/
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 11cb4979a465..7d1874be1df3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -57,7 +57,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax)
{
struct sock *sk2;
- struct hlist_node *node;
int reuse = sk->sk_reuse;
int reuseport = sk->sk_reuseport;
kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -69,7 +68,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
* one this bucket belongs to.
*/
- sk_for_each_bound(sk2, node, &tb->owners) {
+ sk_for_each_bound(sk2, &tb->owners) {
if (sk != sk2 &&
!inet_v6_ipv6only(sk2) &&
(!sk->sk_bound_dev_if ||
@@ -95,7 +94,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
}
}
}
- return node != NULL;
+ return sk2 != NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
@@ -106,7 +105,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_bind_hashbucket *head;
- struct hlist_node *node;
struct inet_bind_bucket *tb;
int ret, attempts = 5;
struct net *net = sock_net(sk);
@@ -129,7 +127,7 @@ again:
head = &hashinfo->bhash[inet_bhashfn(net, rover,
hashinfo->bhash_size)];
spin_lock(&head->lock);
- inet_bind_bucket_for_each(tb, node, &head->chain)
+ inet_bind_bucket_for_each(tb, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == rover) {
if (((tb->fastreuse > 0 &&
sk->sk_reuse &&
@@ -183,7 +181,7 @@ have_snum:
head = &hashinfo->bhash[inet_bhashfn(net, snum,
hashinfo->bhash_size)];
spin_lock(&head->lock);
- inet_bind_bucket_for_each(tb, node, &head->chain)
+ inet_bind_bucket_for_each(tb, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == snum)
goto tb_found;
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 2e453bde6992..245ae078a07f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -33,9 +33,9 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
get_random_bytes(&f->rnd, sizeof(u32));
for (i = 0; i < INETFRAGS_HASHSZ; i++) {
struct inet_frag_queue *q;
- struct hlist_node *p, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
+ hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
unsigned int hval = f->hashfn(q);
if (hval != i) {
@@ -203,7 +203,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
{
struct inet_frag_queue *qp;
#ifdef CONFIG_SMP
- struct hlist_node *n;
#endif
unsigned int hash;
@@ -219,7 +218,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
* such entry could be created on other cpu, while we
* promoted read lock to write lock.
*/
- hlist_for_each_entry(qp, n, &f->hash[hash], list) {
+ hlist_for_each_entry(qp, &f->hash[hash], list) {
if (qp->net == nf && f->match(qp, arg)) {
atomic_inc(&qp->refcnt);
write_unlock(&f->lock);
@@ -278,9 +277,8 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
__releases(&f->lock)
{
struct inet_frag_queue *q;
- struct hlist_node *n;
- hlist_for_each_entry(q, n, &f->hash[hash], list) {
+ hlist_for_each_entry(q, &f->hash[hash], list) {
if (q->net == nf && f->match(q, key)) {
atomic_inc(&q->refcnt);
read_unlock(&f->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0ce0595d9861..6af375afeeef 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -120,13 +120,12 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
* that the listener socket's icsk_bind_hash is the same
* as that of the child socket. We have to look up or
* create a new bind bucket for the child here. */
- struct hlist_node *node;
- inet_bind_bucket_for_each(tb, node, &head->chain) {
+ inet_bind_bucket_for_each(tb, &head->chain) {
if (net_eq(ib_net(tb), sock_net(sk)) &&
tb->port == port)
break;
}
- if (!node) {
+ if (!tb) {
tb = inet_bind_bucket_create(table->bind_bucket_cachep,
sock_net(sk), head, port);
if (!tb) {
@@ -493,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
int i, remaining, low, high, port;
static u32 hint;
u32 offset = hint + port_offset;
- struct hlist_node *node;
struct inet_timewait_sock *tw = NULL;
inet_get_local_port_range(&low, &high);
@@ -512,7 +510,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
* because the established check is already
* unique enough.
*/
- inet_bind_bucket_for_each(tb, node, &head->chain) {
+ inet_bind_bucket_for_each(tb, &head->chain) {
if (net_eq(ib_net(tb), net) &&
tb->port == port) {
if (tb->fastreuse >= 0 ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2784db3155fb..1f27c9f4afd0 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -216,7 +216,6 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
const int slot)
{
struct inet_timewait_sock *tw;
- struct hlist_node *node;
unsigned int killed;
int ret;
@@ -229,7 +228,7 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
killed = 0;
ret = 0;
rescan:
- inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
+ inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
__inet_twsk_del_dead_node(tw);
spin_unlock(&twdr->death_lock);
__inet_twsk_kill(tw, twdr->hashinfo);
@@ -438,10 +437,10 @@ void inet_twdr_twcal_tick(unsigned long data)
for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
if (time_before_eq(j, now)) {
- struct hlist_node *node, *safe;
+ struct hlist_node *safe;
struct inet_timewait_sock *tw;
- inet_twsk_for_each_inmate_safe(tw, node, safe,
+ inet_twsk_for_each_inmate_safe(tw, safe,
&twdr->twcal_row[slot]) {
__inet_twsk_del_dead_node(tw);
__inet_twsk_kill(tw, twdr->hashinfo);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5ef4da780ac1..d0ef0e674ec5 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -735,7 +735,7 @@ drop:
return 0;
}
-static struct sk_buff *handle_offloads(struct sk_buff *skb)
+static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb)
{
int err;
@@ -745,8 +745,12 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb)
goto error;
skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
return skb;
- }
- if (skb->ip_summed != CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ tunnel->parms.o_flags&GRE_CSUM) {
+ err = skb_checksum_help(skb);
+ if (unlikely(err))
+ goto error;
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return skb;
@@ -776,7 +780,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
int err;
int pkt_len;
- skb = handle_offloads(skb);
+ skb = handle_offloads(tunnel, skb);
if (IS_ERR(skb)) {
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -970,7 +974,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
iph->daddr = fl4.daddr;
iph->saddr = fl4.saddr;
iph->ttl = ttl;
- iph->id = 0;
+
+ tunnel_ip_select_ident(skb, old_iph, &rt->dst);
if (ttl == 0) {
if (skb->protocol == htons(ETH_P_IP))
@@ -1101,14 +1106,8 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
tunnel->hlen = addend;
/* TCP offload with GRE SEQ is not supported. */
if (!(tunnel->parms.o_flags & GRE_SEQ)) {
- /* device supports enc gso offload*/
- if (tdev->hw_enc_features & NETIF_F_GRE_GSO) {
- dev->features |= NETIF_F_TSO;
- dev->hw_features |= NETIF_F_TSO;
- } else {
- dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- }
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
return mtu;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 75e33a7048f8..5852b249054f 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -657,7 +657,7 @@ static int clusterip_proc_release(struct inode *inode, struct file *file)
static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *ofs)
{
- struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
+ struct clusterip_config *c = PDE(file_inode(file))->data;
#define PROC_WRITELEN 10
char buffer[PROC_WRITELEN+1];
unsigned long nodenum;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 55c4ee1bba06..2e91006d6076 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -322,8 +322,8 @@ void ping_err(struct sk_buff *skb, u32 info)
struct iphdr *iph = (struct iphdr *)skb->data;
struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
struct inet_sock *inet_sock;
- int type = icmph->type;
- int code = icmph->code;
+ int type = icmp_hdr(skb)->type;
+ int code = icmp_hdr(skb)->code;
struct net *net = dev_net(skb->dev);
struct sock *sk;
int harderr;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 53ddebc292b6..dd44e0ab600c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -111,9 +111,7 @@ EXPORT_SYMBOL_GPL(raw_unhash_sk);
static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
unsigned short num, __be32 raddr, __be32 laddr, int dif)
{
- struct hlist_node *node;
-
- sk_for_each_from(sk, node) {
+ sk_for_each_from(sk) {
struct inet_sock *inet = inet_sk(sk);
if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
@@ -914,9 +912,7 @@ static struct sock *raw_get_first(struct seq_file *seq)
for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
- struct hlist_node *node;
-
- sk_for_each(sk, node, &state->h->ht[state->bucket])
+ sk_for_each(sk, &state->h->ht[state->bucket])
if (sock_net(sk) == seq_file_net(seq))
goto found;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7a5ba48c2cc9..47e854fcae24 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1409,10 +1409,10 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
return;
last_issued = tp->ucopy.dma_cookie;
- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ dma_async_issue_pending(tp->ucopy.dma_chan);
do {
- if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
+ if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
last_issued, &done,
&used) == DMA_SUCCESS) {
/* Safe to free early-copied skbs now */
@@ -1754,7 +1754,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tcp_service_net_dma(sk, true);
tcp_cleanup_rbuf(sk, copied);
} else
- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ dma_async_issue_pending(tp->ucopy.dma_chan);
}
#endif
if (copied >= target) {
@@ -1847,7 +1847,7 @@ do_prequeue:
break;
}
- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ dma_async_issue_pending(tp->ucopy.dma_chan);
if ((offset + used) == skb->len)
copied_early = true;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 145d3bf8df86..4a8ec457310f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -954,7 +954,6 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
- struct hlist_node *pos;
unsigned int size = sizeof(struct in_addr);
struct tcp_md5sig_info *md5sig;
@@ -968,7 +967,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
if (family == AF_INET6)
size = sizeof(struct in6_addr);
#endif
- hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
+ hlist_for_each_entry_rcu(key, &md5sig->head, node) {
if (key->family != family)
continue;
if (!memcmp(&key->addr, addr, size))
@@ -1069,14 +1068,14 @@ static void tcp_clear_md5_list(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
struct tcp_md5sig_info *md5sig;
md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
if (!hlist_empty(&md5sig->head))
tcp_free_md5sig_pool();
- hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
+ hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
hlist_del_rcu(&key->node);
atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
kfree_rcu(key, rcu);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fd0cea114b5d..e2b4461074da 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1351,8 +1351,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
return 0;
}
-/* Calculate MSS. Not accounting for SACKs here. */
-int tcp_mtu_to_mss(struct sock *sk, int pmtu)
+/* Calculate MSS not accounting any TCP options. */
+static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1381,13 +1381,17 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
/* Then reserve room for full set of TCP options and 8 bytes of data */
if (mss_now < 48)
mss_now = 48;
-
- /* Now subtract TCP options size, not including SACKs */
- mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
-
return mss_now;
}
+/* Calculate MSS. Not accounting for SACKs here. */
+int tcp_mtu_to_mss(struct sock *sk, int pmtu)
+{
+ /* Subtract TCP options size, not including SACKs */
+ return __tcp_mtu_to_mss(sk, pmtu) -
+ (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
+}
+
/* Inverse of above */
int tcp_mss_to_mtu(struct sock *sk, int mss)
{
@@ -2930,7 +2934,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
*/
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
- space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
+ space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
MAX_TCP_OPTION_SPACE;
syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4dc0d44a5d31..f2c7e615f902 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1419,11 +1419,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
struct net_device *dev, int strict)
{
struct inet6_ifaddr *ifp;
- struct hlist_node *node;
unsigned int hash = inet6_addr_hash(addr);
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -1445,9 +1444,8 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
{
unsigned int hash = inet6_addr_hash(addr);
struct inet6_ifaddr *ifp;
- struct hlist_node *node;
- hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -1487,10 +1485,9 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
{
struct inet6_ifaddr *ifp, *result = NULL;
unsigned int hash = inet6_addr_hash(addr);
- struct hlist_node *node;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2907,11 +2904,10 @@ static int addrconf_ifdown(struct net_device *dev, int how)
/* Step 2: clear hash table */
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
struct hlist_head *h = &inet6_addr_lst[i];
- struct hlist_node *n;
spin_lock_bh(&addrconf_hash_lock);
restart:
- hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+ hlist_for_each_entry_rcu(ifa, h, addr_lst) {
if (ifa->idev == idev) {
hlist_del_init_rcu(&ifa->addr_lst);
addrconf_del_timer(ifa);
@@ -3218,8 +3214,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
}
for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
- struct hlist_node *n;
- hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
+ hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
@@ -3244,9 +3239,8 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
{
struct if6_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
- struct hlist_node *n = &ifa->addr_lst;
- hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
+ hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
state->offset++;
@@ -3255,7 +3249,7 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
while (++state->bucket < IN6_ADDR_HSIZE) {
state->offset = 0;
- hlist_for_each_entry_rcu_bh(ifa, n,
+ hlist_for_each_entry_rcu_bh(ifa,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
@@ -3357,11 +3351,10 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
{
int ret = 0;
struct inet6_ifaddr *ifp = NULL;
- struct hlist_node *n;
unsigned int hash = inet6_addr_hash(addr);
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3383,7 +3376,6 @@ static void addrconf_verify(unsigned long foo)
{
unsigned long now, next, next_sec, next_sched;
struct inet6_ifaddr *ifp;
- struct hlist_node *node;
int i;
rcu_read_lock_bh();
@@ -3395,7 +3387,7 @@ static void addrconf_verify(unsigned long foo)
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
- hlist_for_each_entry_rcu_bh(ifp, node,
+ hlist_for_each_entry_rcu_bh(ifp,
&inet6_addr_lst[i], addr_lst) {
unsigned long age;
@@ -3866,7 +3858,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev;
struct inet6_dev *idev;
struct hlist_head *head;
- struct hlist_node *node;
s_h = cb->args[0];
s_idx = idx = cb->args[1];
@@ -3876,7 +3867,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (h > s_h || idx > s_idx)
@@ -4222,7 +4213,6 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
struct inet6_dev *idev;
struct hlist_head *head;
- struct hlist_node *node;
s_h = cb->args[0];
s_idx = cb->args[1];
@@ -4231,7 +4221,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
idev = __in6_dev_get(dev);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index ff76eecfd622..aad64352cb60 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -173,9 +173,8 @@ static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
const struct in6_addr *addr,
int type, int ifindex)
{
- struct hlist_node *pos;
struct ip6addrlbl_entry *p;
- hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
+ hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
if (__ip6addrlbl_match(net, p, addr, type, ifindex))
return p;
}
@@ -261,9 +260,9 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
if (hlist_empty(&ip6addrlbl_table.head)) {
hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
} else {
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
struct ip6addrlbl_entry *p = NULL;
- hlist_for_each_entry_safe(p, pos, n,
+ hlist_for_each_entry_safe(p, n,
&ip6addrlbl_table.head, list) {
if (p->prefixlen == newp->prefixlen &&
net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
@@ -319,13 +318,13 @@ static int __ip6addrlbl_del(struct net *net,
int ifindex)
{
struct ip6addrlbl_entry *p = NULL;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
int ret = -ESRCH;
ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n",
__func__, prefix, prefixlen, ifindex);
- hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
+ hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
if (p->prefixlen == prefixlen &&
net_eq(ip6addrlbl_net(p), net) &&
p->ifindex == ifindex &&
@@ -380,11 +379,11 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
static void __net_exit ip6addrlbl_net_exit(struct net *net)
{
struct ip6addrlbl_entry *p = NULL;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
/* Remove all labels belonging to the exiting net */
spin_lock(&ip6addrlbl_table.lock);
- hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
+ hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
if (net_eq(ip6addrlbl_net(p), net)) {
hlist_del_rcu(&p->list);
ip6addrlbl_put(p);
@@ -505,12 +504,11 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct ip6addrlbl_entry *p;
- struct hlist_node *pos;
int idx = 0, s_idx = cb->args[0];
int err;
rcu_read_lock();
- hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
+ hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
if (idx >= s_idx &&
net_eq(ip6addrlbl_net(p), net)) {
if ((err = ip6addrlbl_fill(skb, p,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index b386a2ce4c6f..9bfab19ff3c0 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -31,7 +31,6 @@ int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax)
{
const struct sock *sk2;
- const struct hlist_node *node;
int reuse = sk->sk_reuse;
int reuseport = sk->sk_reuseport;
kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -41,7 +40,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
* See comment in inet_csk_bind_conflict about sock lookup
* vs net namespaces issues.
*/
- sk_for_each_bound(sk2, node, &tb->owners) {
+ sk_for_each_bound(sk2, &tb->owners) {
if (sk != sk2 &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
@@ -58,7 +57,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
}
}
- return node != NULL;
+ return sk2 != NULL;
}
EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 710cafd2e1a9..192dd1a0e188 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -224,7 +224,6 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
{
struct fib6_table *tb;
struct hlist_head *head;
- struct hlist_node *node;
unsigned int h;
if (id == 0)
@@ -232,7 +231,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
h = id & (FIB6_TABLE_HASHSZ - 1);
rcu_read_lock();
head = &net->ipv6.fib_table_hash[h];
- hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
+ hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
if (tb->tb6_id == id) {
rcu_read_unlock();
return tb;
@@ -363,7 +362,6 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
struct rt6_rtnl_dump_arg arg;
struct fib6_walker_t *w;
struct fib6_table *tb;
- struct hlist_node *node;
struct hlist_head *head;
int res = 0;
@@ -398,7 +396,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv6.fib_table_hash[h];
- hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
+ hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
if (e < s_e)
goto next;
res = fib6_dump_table(tb, skb, cb);
@@ -1520,14 +1518,13 @@ void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg
int prune, void *arg)
{
struct fib6_table *table;
- struct hlist_node *node;
struct hlist_head *head;
unsigned int h;
rcu_read_lock();
for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
head = &net->ipv6.fib_table_hash[h];
- hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
read_lock_bh(&table->tb6_lock);
fib6_clean_tree(net, &table->tb6_root,
func, prune, arg);
@@ -1540,14 +1537,13 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
int prune, void *arg)
{
struct fib6_table *table;
- struct hlist_node *node;
struct hlist_head *head;
unsigned int h;
rcu_read_lock();
for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
head = &net->ipv6.fib_table_hash[h];
- hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
write_lock_bh(&table->tb6_lock);
fib6_clean_tree(net, &table->tb6_root,
func, prune, arg);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c65907db8c44..330b5e7b7df6 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -71,10 +71,9 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
unsigned short num, const struct in6_addr *loc_addr,
const struct in6_addr *rmt_addr, int dif)
{
- struct hlist_node *node;
bool is_multicast = ipv6_addr_is_multicast(loc_addr);
- sk_for_each_from(sk, node)
+ sk_for_each_from(sk)
if (inet_sk(sk)->inet_num == num) {
struct ipv6_pinfo *np = inet6_sk(sk);
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 6cc48012b730..de2bcfaaf759 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -89,9 +89,8 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
- struct hlist_node *pos;
- hlist_for_each_entry_rcu(x6spi, pos,
+ hlist_for_each_entry_rcu(x6spi,
&xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr) {
if (xfrm6_addr_equal(&x6spi->addr, saddr))
@@ -120,9 +119,8 @@ static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
int index = xfrm6_tunnel_spi_hash_byspi(spi);
- struct hlist_node *pos;
- hlist_for_each_entry(x6spi, pos,
+ hlist_for_each_entry(x6spi,
&xfrm6_tn->spi_byspi[index],
list_byspi) {
if (x6spi->spi == spi)
@@ -203,11 +201,11 @@ static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
spin_lock_bh(&xfrm6_tunnel_spi_lock);
- hlist_for_each_entry_safe(x6spi, pos, n,
+ hlist_for_each_entry_safe(x6spi, n,
&xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr)
{
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index dfd6faaf0ea7..f547a47d381c 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -228,9 +228,8 @@ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc,
__be16 port)
{
struct sock *s;
- struct hlist_node *node;
- sk_for_each(s, node, &intrfc->if_sklist)
+ sk_for_each(s, &intrfc->if_sklist)
if (ipx_sk(s)->port == port)
goto found;
s = NULL;
@@ -259,12 +258,11 @@ static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc,
__be16 port)
{
struct sock *s;
- struct hlist_node *node;
ipxitf_hold(intrfc);
spin_lock_bh(&intrfc->if_sklist_lock);
- sk_for_each(s, node, &intrfc->if_sklist) {
+ sk_for_each(s, &intrfc->if_sklist) {
struct ipx_sock *ipxs = ipx_sk(s);
if (ipxs->port == port &&
@@ -282,14 +280,14 @@ found:
static void __ipxitf_down(struct ipx_interface *intrfc)
{
struct sock *s;
- struct hlist_node *node, *t;
+ struct hlist_node *t;
/* Delete all routes associated with this interface */
ipxrtr_del_routes(intrfc);
spin_lock_bh(&intrfc->if_sklist_lock);
/* error sockets */
- sk_for_each_safe(s, node, t, &intrfc->if_sklist) {
+ sk_for_each_safe(s, t, &intrfc->if_sklist) {
struct ipx_sock *ipxs = ipx_sk(s);
s->sk_err = ENOLINK;
@@ -385,12 +383,11 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc,
int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node,
IPX_NODE_LEN);
struct sock *s;
- struct hlist_node *node;
int rc;
spin_lock_bh(&intrfc->if_sklist_lock);
- sk_for_each(s, node, &intrfc->if_sklist) {
+ sk_for_each(s, &intrfc->if_sklist) {
struct ipx_sock *ipxs = ipx_sk(s);
if (ipxs->port == ipx->ipx_dest.sock &&
@@ -446,12 +443,11 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc,
connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8);
if (connection) {
- struct hlist_node *node;
/* Now we have to look for a special NCP connection handling
* socket. Only these sockets have ipx_ncp_conn != 0, set by
* SIOCIPXNCPCONN. */
spin_lock_bh(&intrfc->if_sklist_lock);
- sk_for_each(sk, node, &intrfc->if_sklist)
+ sk_for_each(sk, &intrfc->if_sklist)
if (ipx_sk(sk)->ipx_ncp_conn == connection) {
sock_hold(sk);
goto found;
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 02ff7f2f60d4..65e8833a2510 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -103,19 +103,18 @@ out:
static __inline__ struct sock *ipx_get_socket_idx(loff_t pos)
{
struct sock *s = NULL;
- struct hlist_node *node;
struct ipx_interface *i;
list_for_each_entry(i, &ipx_interfaces, node) {
spin_lock_bh(&i->if_sklist_lock);
- sk_for_each(s, node, &i->if_sklist) {
+ sk_for_each(s, &i->if_sklist) {
if (!pos)
break;
--pos;
}
spin_unlock_bh(&i->if_sklist_lock);
if (!pos) {
- if (node)
+ if (s)
goto found;
break;
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cd6f7a991d80..a7d11ffe4284 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -156,14 +156,13 @@ static int afiucv_pm_freeze(struct device *dev)
{
struct iucv_sock *iucv;
struct sock *sk;
- struct hlist_node *node;
int err = 0;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_freeze\n");
#endif
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
switch (sk->sk_state) {
case IUCV_DISCONN:
@@ -194,13 +193,12 @@ static int afiucv_pm_freeze(struct device *dev)
static int afiucv_pm_restore_thaw(struct device *dev)
{
struct sock *sk;
- struct hlist_node *node;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
#endif
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
switch (sk->sk_state) {
case IUCV_CONNECTED:
sk->sk_err = EPIPE;
@@ -390,9 +388,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
static struct sock *__iucv_get_sock_by_name(char *nm)
{
struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
return sk;
@@ -1678,7 +1675,6 @@ static int iucv_callback_connreq(struct iucv_path *path,
unsigned char user_data[16];
unsigned char nuser_data[16];
unsigned char src_name[8];
- struct hlist_node *node;
struct sock *sk, *nsk;
struct iucv_sock *iucv, *niucv;
int err;
@@ -1689,7 +1685,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
read_lock(&iucv_sk_list.lock);
iucv = NULL;
sk = NULL;
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (sk->sk_state == IUCV_LISTEN &&
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
/*
@@ -2115,7 +2111,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- struct hlist_node *node;
struct sock *sk;
struct iucv_sock *iucv;
struct af_iucv_trans_hdr *trans_hdr;
@@ -2132,7 +2127,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
iucv = NULL;
sk = NULL;
read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
if ((!memcmp(&iucv_sk(sk)->src_name,
trans_hdr->destAppName, 8)) &&
@@ -2225,10 +2220,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
struct sk_buff *list_skb;
struct sk_buff *nskb;
unsigned long flags;
- struct hlist_node *node;
read_lock_irqsave(&iucv_sk_list.lock, flags);
- sk_for_each(sk, node, &iucv_sk_list.head)
+ sk_for_each(sk, &iucv_sk_list.head)
if (sk == isk) {
iucv = iucv_sk(sk);
break;
@@ -2299,14 +2293,13 @@ static int afiucv_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = (struct net_device *)ptr;
- struct hlist_node *node;
struct sock *sk;
struct iucv_sock *iucv;
switch (event) {
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
- sk_for_each(sk, node, &iucv_sk_list.head) {
+ sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
if ((iucv->hs_dev == event_dev) &&
(sk->sk_state == IUCV_CONNECTED)) {
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9ef79851f297..556fdafdd1ea 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -225,7 +225,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
- struct hlist_node *node;
struct sk_buff *skb2 = NULL;
int err = -ESRCH;
@@ -236,7 +235,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
return -ENOMEM;
rcu_read_lock();
- sk_for_each_rcu(sk, node, &net_pfkey->table) {
+ sk_for_each_rcu(sk, &net_pfkey->table) {
struct pfkey_sock *pfk = pfkey_sk(sk);
int err2;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index dcfd64e83ab7..d36875f3427e 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -221,10 +221,9 @@ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
struct hlist_head *session_list =
l2tp_session_id_hash_2(pn, session_id);
struct l2tp_session *session;
- struct hlist_node *walk;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
+ hlist_for_each_entry_rcu(session, session_list, global_hlist) {
if (session->session_id == session_id) {
rcu_read_unlock_bh();
return session;
@@ -253,7 +252,6 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
{
struct hlist_head *session_list;
struct l2tp_session *session;
- struct hlist_node *walk;
/* In L2TPv3, session_ids are unique over all tunnels and we
* sometimes need to look them up before we know the
@@ -264,7 +262,7 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
session_list = l2tp_session_id_hash(tunnel, session_id);
read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, walk, session_list, hlist) {
+ hlist_for_each_entry(session, session_list, hlist) {
if (session->session_id == session_id) {
read_unlock_bh(&tunnel->hlist_lock);
return session;
@@ -279,13 +277,12 @@ EXPORT_SYMBOL_GPL(l2tp_session_find);
struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
{
int hash;
- struct hlist_node *walk;
struct l2tp_session *session;
int count = 0;
read_lock_bh(&tunnel->hlist_lock);
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
+ hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
read_unlock_bh(&tunnel->hlist_lock);
return session;
@@ -306,12 +303,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
{
struct l2tp_net *pn = l2tp_pernet(net);
int hash;
- struct hlist_node *walk;
struct l2tp_session *session;
rcu_read_lock_bh();
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
- hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
+ hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
if (!strcmp(session->ifname, ifname)) {
rcu_read_unlock_bh();
return session;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index f7ac8f42fee2..7f41b7051269 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -49,10 +49,9 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
{
- struct hlist_node *node;
struct sock *sk;
- sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
+ sk_for_each_bound(sk, &l2tp_ip_bind_table) {
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8ee4a86ae996..41f2f8126ebc 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -60,10 +60,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
struct in6_addr *laddr,
int dif, u32 tunnel_id)
{
- struct hlist_node *node;
struct sock *sk;
- sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) {
+ sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
struct in6_addr *addr = inet6_rcv_saddr(sk);
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 7c5073badc73..78be45cda5c1 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -393,12 +393,11 @@ static void llc_sap_mcast(struct llc_sap *sap,
{
int i = 0, count = 256 / sizeof(struct sock *);
struct sock *sk, *stack[count];
- struct hlist_node *node;
struct llc_sock *llc;
struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
spin_lock_bh(&sap->sk_lock);
- hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
+ hlist_for_each_entry(llc, dev_hb, dev_hash_node) {
sk = &llc->sk;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d0dd11153a6c..1a8591b77a13 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -647,8 +647,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
spin_lock_init(&local->ack_status_lock);
idr_init(&local->ack_status_frames);
- /* preallocate at least one entry */
- idr_pre_get(&local->ack_status_frames, GFP_KERNEL);
sta_info_init(local);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 6b3c4e119c63..dc7c8df40c2c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -72,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
* it's used twice. So it is illegal to do
* for_each_mesh_entry(rcu_dereference(...), ...)
*/
-#define for_each_mesh_entry(tbl, p, node, i) \
+#define for_each_mesh_entry(tbl, node, i) \
for (i = 0; i <= tbl->hash_mask; i++) \
- hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
+ hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
static struct mesh_table *mesh_table_alloc(int size_order)
@@ -139,7 +139,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
}
if (free_leafs) {
spin_lock_bh(&tbl->gates_lock);
- hlist_for_each_entry_safe(gate, p, q,
+ hlist_for_each_entry_safe(gate, q,
tbl->known_gates, list) {
hlist_del(&gate->list);
kfree(gate);
@@ -333,12 +333,11 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
- struct hlist_node *n;
struct hlist_head *bucket;
struct mpath_node *node;
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
- hlist_for_each_entry_rcu(node, n, bucket, list) {
+ hlist_for_each_entry_rcu(node, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
ether_addr_equal(dst, mpath->dst)) {
@@ -389,11 +388,10 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
struct mesh_table *tbl = rcu_dereference(mesh_paths);
struct mpath_node *node;
- struct hlist_node *p;
int i;
int j = 0;
- for_each_mesh_entry(tbl, p, node, i) {
+ for_each_mesh_entry(tbl, node, i) {
if (sdata && node->mpath->sdata != sdata)
continue;
if (j++ == idx) {
@@ -417,13 +415,12 @@ int mesh_path_add_gate(struct mesh_path *mpath)
{
struct mesh_table *tbl;
struct mpath_node *gate, *new_gate;
- struct hlist_node *n;
int err;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
- hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
+ hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
if (gate->mpath == mpath) {
err = -EEXIST;
goto err_rcu;
@@ -460,9 +457,9 @@ err_rcu:
static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
struct mpath_node *gate;
- struct hlist_node *p, *q;
+ struct hlist_node *q;
- hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) {
+ hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
if (gate->mpath != mpath)
continue;
spin_lock_bh(&tbl->gates_lock);
@@ -504,7 +501,6 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
- struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
@@ -550,7 +546,7 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
- hlist_for_each_entry(node, n, bucket, list) {
+ hlist_for_each_entry(node, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
ether_addr_equal(dst, mpath->dst))
@@ -640,7 +636,6 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
- struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
@@ -680,7 +675,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
- hlist_for_each_entry(node, n, bucket, list) {
+ hlist_for_each_entry(node, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
ether_addr_equal(dst, mpath->dst))
@@ -725,14 +720,13 @@ void mesh_plink_broken(struct sta_info *sta)
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
struct mpath_node *node;
- struct hlist_node *p;
struct ieee80211_sub_if_data *sdata = sta->sdata;
int i;
__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
- for_each_mesh_entry(tbl, p, node, i) {
+ for_each_mesh_entry(tbl, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
@@ -792,13 +786,12 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
- struct hlist_node *p;
int i;
rcu_read_lock();
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
- for_each_mesh_entry(tbl, p, node, i) {
+ for_each_mesh_entry(tbl, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta) {
spin_lock(&tbl->hashwlock[i]);
@@ -815,11 +808,10 @@ static void table_flush_by_iface(struct mesh_table *tbl,
{
struct mesh_path *mpath;
struct mpath_node *node;
- struct hlist_node *p;
int i;
WARN_ON(!rcu_read_lock_held());
- for_each_mesh_entry(tbl, p, node, i) {
+ for_each_mesh_entry(tbl, node, i) {
mpath = node->mpath;
if (mpath->sdata != sdata)
continue;
@@ -865,7 +857,6 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_head *bucket;
- struct hlist_node *n;
int hash_idx;
int err = 0;
@@ -875,7 +866,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
bucket = &tbl->hash_buckets[hash_idx];
spin_lock(&tbl->hashwlock[hash_idx]);
- hlist_for_each_entry(node, n, bucket, list) {
+ hlist_for_each_entry(node, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
ether_addr_equal(addr, mpath->dst)) {
@@ -920,7 +911,6 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
struct ieee80211_sub_if_data *sdata = mpath->sdata;
- struct hlist_node *n;
struct mesh_table *tbl;
struct mesh_path *from_mpath = mpath;
struct mpath_node *gate = NULL;
@@ -935,7 +925,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
if (!known_gates)
return -EHOSTUNREACH;
- hlist_for_each_entry_rcu(gate, n, known_gates, list) {
+ hlist_for_each_entry_rcu(gate, known_gates, list) {
if (gate->mpath->sdata != sdata)
continue;
@@ -951,7 +941,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
}
}
- hlist_for_each_entry_rcu(gate, n, known_gates, list)
+ hlist_for_each_entry_rcu(gate, known_gates, list)
if (gate->mpath->sdata == sdata) {
mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
mesh_path_tx_pending(gate->mpath);
@@ -1096,12 +1086,11 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
- struct hlist_node *p;
int i;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
- for_each_mesh_entry(tbl, p, node, i) {
+ for_each_mesh_entry(tbl, node, i) {
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5b9602b62405..de8548bf0a7f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2017,24 +2017,14 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
skb = skb_clone(skb, GFP_ATOMIC);
if (skb) {
unsigned long flags;
- int id, r;
+ int id;
spin_lock_irqsave(&local->ack_status_lock, flags);
- r = idr_get_new_above(&local->ack_status_frames,
- orig_skb, 1, &id);
- if (r == -EAGAIN) {
- idr_pre_get(&local->ack_status_frames,
- GFP_ATOMIC);
- r = idr_get_new_above(&local->ack_status_frames,
- orig_skb, 1, &id);
- }
- if (WARN_ON(!id) || id > 0xffff) {
- idr_remove(&local->ack_status_frames, id);
- r = -ERANGE;
- }
+ id = idr_alloc(&local->ack_status_frames, orig_skb,
+ 1, 0x10000, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, flags);
- if (!r) {
+ if (id >= 0) {
info_id = id;
info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
} else if (skb_shared(skb)) {
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 9f00db7e03f2..704e514e02ab 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -259,13 +259,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp;
- struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
- hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
p->cport == cp->cport && p->vport == cp->vport &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
@@ -344,13 +343,12 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp;
- struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
- hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (!ip_vs_conn_net_eq(cp, p->net))
continue;
if (p->pe_data && p->pe->ct_match) {
@@ -394,7 +392,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp, *ret=NULL;
- struct hlist_node *n;
/*
* Check for "full" addressed entries
@@ -403,7 +400,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
ct_read_lock(hash);
- hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
p->vport == cp->cport && p->cport == cp->dport &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
@@ -953,11 +950,10 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
int idx;
struct ip_vs_conn *cp;
struct ip_vs_iter_state *iter = seq->private;
- struct hlist_node *n;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
ct_read_lock_bh(idx);
- hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
+ hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
if (pos-- == 0) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
@@ -981,7 +977,6 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
struct ip_vs_iter_state *iter = seq->private;
- struct hlist_node *e;
struct hlist_head *l = iter->l;
int idx;
@@ -990,15 +985,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return ip_vs_conn_array(seq, 0);
/* more on same hash chain? */
- if ((e = cp->c_list.next))
- return hlist_entry(e, struct ip_vs_conn, c_list);
+ if (cp->c_list.next)
+ return hlist_entry(cp->c_list.next, struct ip_vs_conn, c_list);
idx = l - ip_vs_conn_tab;
ct_read_unlock_bh(idx);
while (++idx < ip_vs_conn_tab_size) {
ct_read_lock_bh(idx);
- hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
+ hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
@@ -1200,14 +1195,13 @@ void ip_vs_random_dropentry(struct net *net)
*/
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
unsigned int hash = net_random() & ip_vs_conn_tab_mask;
- struct hlist_node *n;
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh(hash);
- hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+ hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;
@@ -1255,14 +1249,12 @@ static void ip_vs_conn_flush(struct net *net)
flush_again:
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
- struct hlist_node *n;
-
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh(idx);
- hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
+ hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
if (!ip_vs_conn_net_eq(cp, net))
continue;
IP_VS_DBG(4, "del connection\n");
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 3921e5bc1235..8c10e3db3d9b 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -90,14 +90,13 @@ __nf_ct_expect_find(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
- struct hlist_node *n;
unsigned int h;
if (!net->ct.expect_count)
return NULL;
h = nf_ct_expect_dst_hash(tuple);
- hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
+ hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
nf_ct_zone(i->master) == zone)
return i;
@@ -130,14 +129,13 @@ nf_ct_find_expectation(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i, *exp = NULL;
- struct hlist_node *n;
unsigned int h;
if (!net->ct.expect_count)
return NULL;
h = nf_ct_expect_dst_hash(tuple);
- hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
+ hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
nf_ct_zone(i->master) == zone) {
@@ -172,13 +170,13 @@ void nf_ct_remove_expectations(struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_expect *exp;
- struct hlist_node *n, *next;
+ struct hlist_node *next;
/* Optimization: most connection never expect any others. */
if (!help)
return;
- hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
+ hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
@@ -348,9 +346,8 @@ static void evict_oldest_expect(struct nf_conn *master,
{
struct nf_conn_help *master_help = nfct_help(master);
struct nf_conntrack_expect *exp, *last = NULL;
- struct hlist_node *n;
- hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
+ hlist_for_each_entry(exp, &master_help->expectations, lnode) {
if (exp->class == new->class)
last = exp;
}
@@ -369,7 +366,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
struct nf_conn_help *master_help = nfct_help(master);
struct nf_conntrack_helper *helper;
struct net *net = nf_ct_exp_net(expect);
- struct hlist_node *n, *next;
+ struct hlist_node *next;
unsigned int h;
int ret = 1;
@@ -378,7 +375,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
goto out;
}
h = nf_ct_expect_dst_hash(&expect->tuple);
- hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
+ hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
if (expect_matches(i, expect)) {
if (del_timer(&i->timeout)) {
nf_ct_unlink_expect(i);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 013cdf69fe29..a9740bd6fe54 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -116,14 +116,13 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_helper *helper;
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
- struct hlist_node *n;
unsigned int h;
if (!nf_ct_helper_count)
return NULL;
h = helper_hash(tuple);
- hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) {
+ hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
return helper;
}
@@ -134,11 +133,10 @@ struct nf_conntrack_helper *
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
{
struct nf_conntrack_helper *h;
- struct hlist_node *n;
unsigned int i;
for (i = 0; i < nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
+ hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
if (!strcmp(h->name, name) &&
h->tuple.src.l3num == l3num &&
h->tuple.dst.protonum == protonum)
@@ -357,7 +355,6 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
int ret = 0;
struct nf_conntrack_helper *cur;
- struct hlist_node *n;
unsigned int h = helper_hash(&me->tuple);
BUG_ON(me->expect_policy == NULL);
@@ -365,7 +362,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
mutex_lock(&nf_ct_helper_mutex);
- hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) {
+ hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
cur->tuple.src.l3num == me->tuple.src.l3num &&
cur->tuple.dst.protonum == me->tuple.dst.protonum) {
@@ -386,13 +383,13 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
{
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_expect *exp;
- const struct hlist_node *n, *next;
+ const struct hlist_node *next;
const struct hlist_nulls_node *nn;
unsigned int i;
/* Get rid of expectations */
for (i = 0; i < nf_ct_expect_hsize; i++) {
- hlist_for_each_entry_safe(exp, n, next,
+ hlist_for_each_entry_safe(exp, next,
&net->ct.expect_hash[i], hnode) {
struct nf_conn_help *help = nfct_help(exp->master);
if ((rcu_dereference_protected(
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 5d60e04f9679..9904b15f600e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2370,14 +2370,13 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
- struct hlist_node *n;
u_int8_t l3proto = nfmsg->nfgen_family;
rcu_read_lock();
last = (struct nf_conntrack_expect *)cb->args[1];
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
restart:
- hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]],
+ hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
hnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
@@ -2510,7 +2509,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple tuple;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- struct hlist_node *n, *next;
+ struct hlist_node *next;
u_int8_t u3 = nfmsg->nfgen_family;
unsigned int i;
u16 zone;
@@ -2557,7 +2556,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
/* delete all expectations for this helper */
spin_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_ct_expect_hsize; i++) {
- hlist_for_each_entry_safe(exp, n, next,
+ hlist_for_each_entry_safe(exp, next,
&net->ct.expect_hash[i],
hnode) {
m_help = nfct_help(exp->master);
@@ -2575,7 +2574,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
/* This basically means we have to flush everything*/
spin_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_ct_expect_hsize; i++) {
- hlist_for_each_entry_safe(exp, n, next,
+ hlist_for_each_entry_safe(exp, next,
&net->ct.expect_hash[i],
hnode) {
if (del_timer(&exp->timeout)) {
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 069229d919b6..0e7d423324c3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -855,11 +855,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_expect *exp;
- struct hlist_node *n, *next;
+ struct hlist_node *next;
int found = 0;
spin_lock_bh(&nf_conntrack_lock);
- hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
+ hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
if (exp->class != SIP_EXPECT_SIGNALLING ||
!nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
exp->tuple.dst.protonum != proto ||
@@ -881,10 +881,10 @@ static void flush_expectations(struct nf_conn *ct, bool media)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_expect *exp;
- struct hlist_node *n, *next;
+ struct hlist_node *next;
spin_lock_bh(&nf_conntrack_lock);
- hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
+ hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
continue;
if (!del_timer(&exp->timeout))
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 5f2f9109f461..8d5769c6d16e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -191,9 +191,8 @@ find_appropriate_src(struct net *net, u16 zone,
unsigned int h = hash_by_src(net, zone, tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
- const struct hlist_node *n;
- hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) {
+ hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
ct = nat->ct;
if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
/* Copy source part from reply tuple. */
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 945950a8b1f1..a191b6db657e 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -282,7 +282,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
const char *helper_name;
struct nf_conntrack_helper *cur, *helper = NULL;
struct nf_conntrack_tuple tuple;
- struct hlist_node *n;
int ret = 0, i;
if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
@@ -296,7 +295,7 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
rcu_read_lock();
for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
- hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+ hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -452,13 +451,12 @@ static int
nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nf_conntrack_helper *cur, *last;
- struct hlist_node *n;
rcu_read_lock();
last = (struct nf_conntrack_helper *)cb->args[1];
for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
restart:
- hlist_for_each_entry_rcu(cur, n,
+ hlist_for_each_entry_rcu(cur,
&nf_ct_helper_hash[cb->args[0]], hnode) {
/* skip non-userspace conntrack helpers. */
@@ -495,7 +493,6 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
{
int ret = -ENOENT, i;
struct nf_conntrack_helper *cur;
- struct hlist_node *n;
struct sk_buff *skb2;
char *helper_name = NULL;
struct nf_conntrack_tuple tuple;
@@ -520,7 +517,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
}
for (i = 0; i < nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+ hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -568,7 +565,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
{
char *helper_name = NULL;
struct nf_conntrack_helper *cur;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
struct nf_conntrack_tuple tuple;
bool tuple_set = false, found = false;
int i, j = 0, ret;
@@ -585,7 +582,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
}
for (i = 0; i < nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+ hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
hnode) {
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -654,13 +651,13 @@ err_out:
static void __exit nfnl_cthelper_exit(void)
{
struct nf_conntrack_helper *cur;
- struct hlist_node *n, *tmp;
+ struct hlist_node *tmp;
int i;
nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
for (i=0; i<nf_ct_helper_hsize; i++) {
- hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+ hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
hnode) {
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 92fd8eca0d31..f248db572972 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -87,11 +87,10 @@ static struct nfulnl_instance *
__instance_lookup(u_int16_t group_num)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct nfulnl_instance *inst;
head = &instance_table[instance_hashfn(group_num)];
- hlist_for_each_entry_rcu(inst, pos, head, hlist) {
+ hlist_for_each_entry_rcu(inst, head, hlist) {
if (inst->group_num == group_num)
return inst;
}
@@ -717,11 +716,11 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
/* destroy all instances for this portid */
spin_lock_bh(&instances_lock);
for (i = 0; i < INSTANCE_BUCKETS; i++) {
- struct hlist_node *tmp, *t2;
+ struct hlist_node *t2;
struct nfulnl_instance *inst;
struct hlist_head *head = &instance_table[i];
- hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
+ hlist_for_each_entry_safe(inst, t2, head, hlist) {
if ((net_eq(n->net, &init_net)) &&
(n->portid == inst->peer_portid))
__instance_destroy(inst);
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 3158d87b56a8..858fd52c1040 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -80,11 +80,10 @@ static struct nfqnl_instance *
instance_lookup(u_int16_t queue_num)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct nfqnl_instance *inst;
head = &instance_table[instance_hashfn(queue_num)];
- hlist_for_each_entry_rcu(inst, pos, head, hlist) {
+ hlist_for_each_entry_rcu(inst, head, hlist) {
if (inst->queue_num == queue_num)
return inst;
}
@@ -583,11 +582,10 @@ nfqnl_dev_drop(int ifindex)
rcu_read_lock();
for (i = 0; i < INSTANCE_BUCKETS; i++) {
- struct hlist_node *tmp;
struct nfqnl_instance *inst;
struct hlist_head *head = &instance_table[i];
- hlist_for_each_entry_rcu(inst, tmp, head, hlist)
+ hlist_for_each_entry_rcu(inst, head, hlist)
nfqnl_flush(inst, dev_cmp, ifindex);
}
@@ -627,11 +625,11 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
/* destroy all instances for this portid */
spin_lock(&instances_lock);
for (i = 0; i < INSTANCE_BUCKETS; i++) {
- struct hlist_node *tmp, *t2;
+ struct hlist_node *t2;
struct nfqnl_instance *inst;
struct hlist_head *head = &instance_table[i];
- hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
+ hlist_for_each_entry_safe(inst, t2, head, hlist) {
if ((n->net == &init_net) &&
(n->portid == inst->peer_portid))
__instance_destroy(inst);
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index f264032b8c56..370adf622cef 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -43,12 +43,11 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
struct xt_rateest *xt_rateest_lookup(const char *name)
{
struct xt_rateest *est;
- struct hlist_node *n;
unsigned int h;
h = xt_rateest_hash(name);
mutex_lock(&xt_rateest_mutex);
- hlist_for_each_entry(est, n, &rateest_hash[h], list) {
+ hlist_for_each_entry(est, &rateest_hash[h], list) {
if (strcmp(est->name, name) == 0) {
est->refcnt++;
mutex_unlock(&xt_rateest_mutex);
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 70b5591a2586..c40b2695633b 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -101,7 +101,7 @@ static int count_them(struct net *net,
{
const struct nf_conntrack_tuple_hash *found;
struct xt_connlimit_conn *conn;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
struct nf_conn *found_ct;
struct hlist_head *hash;
bool addit = true;
@@ -115,7 +115,7 @@ static int count_them(struct net *net,
rcu_read_lock();
/* check the saved connections */
- hlist_for_each_entry_safe(conn, pos, n, hash, node) {
+ hlist_for_each_entry_safe(conn, n, hash, node) {
found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
&conn->tuple);
found_ct = NULL;
@@ -258,14 +258,14 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_connlimit_info *info = par->matchinfo;
struct xt_connlimit_conn *conn;
- struct hlist_node *pos, *n;
+ struct hlist_node *n;
struct hlist_head *hash = info->data->iphash;
unsigned int i;
nf_ct_l3proto_module_put(par->family);
for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
- hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) {
+ hlist_for_each_entry_safe(conn, n, &hash[i], node) {
hlist_del(&conn->node);
kfree(conn);
}
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 98218c896d2e..f330e8beaf69 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -141,11 +141,10 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
const struct dsthash_dst *dst)
{
struct dsthash_ent *ent;
- struct hlist_node *pos;
u_int32_t hash = hash_dst(ht, dst);
if (!hlist_empty(&ht->hash[hash])) {
- hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node)
+ hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
if (dst_cmp(ent, dst)) {
spin_lock(&ent->lock);
return ent;
@@ -297,8 +296,8 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
spin_lock_bh(&ht->lock);
for (i = 0; i < ht->cfg.size; i++) {
struct dsthash_ent *dh;
- struct hlist_node *pos, *n;
- hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
+ struct hlist_node *n;
+ hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
if ((*select)(ht, dh))
dsthash_free(ht, dh);
}
@@ -343,9 +342,8 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
struct xt_hashlimit_htable *hinfo;
- struct hlist_node *pos;
- hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
+ hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
if (!strcmp(name, hinfo->pde->name) &&
hinfo->family == family) {
hinfo->use++;
@@ -821,10 +819,9 @@ static int dl_seq_show(struct seq_file *s, void *v)
struct xt_hashlimit_htable *htable = s->private;
unsigned int *bucket = (unsigned int *)v;
struct dsthash_ent *ent;
- struct hlist_node *pos;
if (!hlist_empty(&htable->hash[*bucket])) {
- hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
+ hlist_for_each_entry(ent, &htable->hash[*bucket], node)
if (dl_seq_real_show(ent, htable->family, s))
return -1;
}
@@ -877,7 +874,6 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
static void __net_exit hashlimit_proc_net_exit(struct net *net)
{
struct xt_hashlimit_htable *hinfo;
- struct hlist_node *pos;
struct proc_dir_entry *pde;
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
@@ -890,7 +886,7 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net)
if (pde == NULL)
pde = hashlimit_net->ip6t_hashlimit;
- hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node)
+ hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
remove_proc_entry(hinfo->pde->name, pde);
hashlimit_net->ipt_hashlimit = NULL;
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 31bf233dae97..d9cad315229d 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -540,7 +540,7 @@ static ssize_t
recent_mt_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *loff)
{
- const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ const struct proc_dir_entry *pde = PDE(file_inode(file));
struct recent_table *t = pde->data;
struct recent_entry *e;
char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 3d55e0c713e2..1e3fd5bfcd86 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -248,11 +248,10 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
struct nl_portid_hash *hash = &nl_table[protocol].hash;
struct hlist_head *head;
struct sock *sk;
- struct hlist_node *node;
read_lock(&nl_table_lock);
head = nl_portid_hashfn(hash, portid);
- sk_for_each(sk, node, head) {
+ sk_for_each(sk, head) {
if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
sock_hold(sk);
goto found;
@@ -312,9 +311,9 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
for (i = 0; i <= omask; i++) {
struct sock *sk;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
- sk_for_each_safe(sk, node, tmp, &otable[i])
+ sk_for_each_safe(sk, tmp, &otable[i])
__sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
}
@@ -344,7 +343,6 @@ static void
netlink_update_listeners(struct sock *sk)
{
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
- struct hlist_node *node;
unsigned long mask;
unsigned int i;
struct listeners *listeners;
@@ -355,7 +353,7 @@ netlink_update_listeners(struct sock *sk)
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
- sk_for_each_bound(sk, node, &tbl->mc_list) {
+ sk_for_each_bound(sk, &tbl->mc_list) {
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
@@ -371,18 +369,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
struct hlist_head *head;
int err = -EADDRINUSE;
struct sock *osk;
- struct hlist_node *node;
int len;
netlink_table_grab();
head = nl_portid_hashfn(hash, portid);
len = 0;
- sk_for_each(osk, node, head) {
+ sk_for_each(osk, head) {
if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
break;
len++;
}
- if (node)
+ if (osk)
goto err;
err = -EBUSY;
@@ -575,7 +572,6 @@ static int netlink_autobind(struct socket *sock)
struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
struct hlist_head *head;
struct sock *osk;
- struct hlist_node *node;
s32 portid = task_tgid_vnr(current);
int err;
static s32 rover = -4097;
@@ -584,7 +580,7 @@ retry:
cond_resched();
netlink_table_grab();
head = nl_portid_hashfn(hash, portid);
- sk_for_each(osk, node, head) {
+ sk_for_each(osk, head) {
if (!net_eq(sock_net(osk), net))
continue;
if (nlk_sk(osk)->portid == portid) {
@@ -809,7 +805,7 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
struct sock *netlink_getsockbyfilp(struct file *filp)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct sock *sock;
if (!S_ISSOCK(inode->i_mode))
@@ -1101,7 +1097,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
{
struct net *net = sock_net(ssk);
struct netlink_broadcast_data info;
- struct hlist_node *node;
struct sock *sk;
skb = netlink_trim(skb, allocation);
@@ -1124,7 +1119,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
netlink_lock_table();
- sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
+ sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
do_one_broadcast(sk, &info);
consume_skb(skb);
@@ -1200,7 +1195,6 @@ out:
int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
{
struct netlink_set_err_data info;
- struct hlist_node *node;
struct sock *sk;
int ret = 0;
@@ -1212,7 +1206,7 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
read_lock(&nl_table_lock);
- sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
+ sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
ret += do_one_set_err(sk, &info);
read_unlock(&nl_table_lock);
@@ -1676,10 +1670,9 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups)
void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{
struct sock *sk;
- struct hlist_node *node;
struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
- sk_for_each_bound(sk, node, &tbl->mc_list)
+ sk_for_each_bound(sk, &tbl->mc_list)
netlink_update_socket_mc(nlk_sk(sk), group, 0);
}
@@ -1974,14 +1967,13 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
struct nl_seq_iter *iter = seq->private;
int i, j;
struct sock *s;
- struct hlist_node *node;
loff_t off = 0;
for (i = 0; i < MAX_LINKS; i++) {
struct nl_portid_hash *hash = &nl_table[i].hash;
for (j = 0; j <= hash->mask; j++) {
- sk_for_each(s, node, &hash->table[j]) {
+ sk_for_each(s, &hash->table[j]) {
if (sock_net(s) != seq_file_net(seq))
continue;
if (off == pos) {
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 297b07a029de..d1fa1d9ffd2e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -104,10 +104,9 @@ static void nr_remove_socket(struct sock *sk)
static void nr_kill_by_device(struct net_device *dev)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&nr_list_lock);
- sk_for_each(s, node, &nr_list)
+ sk_for_each(s, &nr_list)
if (nr_sk(s)->device == dev)
nr_disconnect(s, ENETUNREACH);
spin_unlock_bh(&nr_list_lock);
@@ -149,10 +148,9 @@ static void nr_insert_socket(struct sock *sk)
static struct sock *nr_find_listener(ax25_address *addr)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&nr_list_lock);
- sk_for_each(s, node, &nr_list)
+ sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
bh_lock_sock(s);
@@ -170,10 +168,9 @@ found:
static struct sock *nr_find_socket(unsigned char index, unsigned char id)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&nr_list_lock);
- sk_for_each(s, node, &nr_list) {
+ sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
@@ -194,10 +191,9 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
ax25_address *dest)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&nr_list_lock);
- sk_for_each(s, node, &nr_list) {
+ sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->your_index == index && nr->your_id == id &&
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 70ffff76a967..b976d5eff2de 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -49,10 +49,9 @@ static struct nr_node *nr_node_get(ax25_address *callsign)
{
struct nr_node *found = NULL;
struct nr_node *nr_node;
- struct hlist_node *node;
spin_lock_bh(&nr_node_list_lock);
- nr_node_for_each(nr_node, node, &nr_node_list)
+ nr_node_for_each(nr_node, &nr_node_list)
if (ax25cmp(callsign, &nr_node->callsign) == 0) {
nr_node_hold(nr_node);
found = nr_node;
@@ -67,10 +66,9 @@ static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
{
struct nr_neigh *found = NULL;
struct nr_neigh *nr_neigh;
- struct hlist_node *node;
spin_lock_bh(&nr_neigh_list_lock);
- nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
+ nr_neigh_for_each(nr_neigh, &nr_neigh_list)
if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
nr_neigh->dev == dev) {
nr_neigh_hold(nr_neigh);
@@ -114,10 +112,9 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
*/
if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
struct nr_node *nr_nodet;
- struct hlist_node *node;
spin_lock_bh(&nr_node_list_lock);
- nr_node_for_each(nr_nodet, node, &nr_node_list) {
+ nr_node_for_each(nr_nodet, &nr_node_list) {
nr_node_lock(nr_nodet);
for (i = 0; i < nr_nodet->count; i++)
if (nr_nodet->routes[i].neighbour == nr_neigh)
@@ -485,11 +482,11 @@ static int nr_dec_obs(void)
{
struct nr_neigh *nr_neigh;
struct nr_node *s;
- struct hlist_node *node, *nodet;
+ struct hlist_node *nodet;
int i;
spin_lock_bh(&nr_node_list_lock);
- nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
+ nr_node_for_each_safe(s, nodet, &nr_node_list) {
nr_node_lock(s);
for (i = 0; i < s->count; i++) {
switch (s->routes[i].obs_count) {
@@ -540,15 +537,15 @@ static int nr_dec_obs(void)
void nr_rt_device_down(struct net_device *dev)
{
struct nr_neigh *s;
- struct hlist_node *node, *nodet, *node2, *node2t;
+ struct hlist_node *nodet, *node2t;
struct nr_node *t;
int i;
spin_lock_bh(&nr_neigh_list_lock);
- nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
+ nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
if (s->dev == dev) {
spin_lock_bh(&nr_node_list_lock);
- nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
+ nr_node_for_each_safe(t, node2t, &nr_node_list) {
nr_node_lock(t);
for (i = 0; i < t->count; i++) {
if (t->routes[i].neighbour == s) {
@@ -737,11 +734,10 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
void nr_link_failed(ax25_cb *ax25, int reason)
{
struct nr_neigh *s, *nr_neigh = NULL;
- struct hlist_node *node;
struct nr_node *nr_node = NULL;
spin_lock_bh(&nr_neigh_list_lock);
- nr_neigh_for_each(s, node, &nr_neigh_list) {
+ nr_neigh_for_each(s, &nr_neigh_list) {
if (s->ax25 == ax25) {
nr_neigh_hold(s);
nr_neigh = s;
@@ -761,7 +757,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
return;
}
spin_lock_bh(&nr_node_list_lock);
- nr_node_for_each(nr_node, node, &nr_node_list) {
+ nr_node_for_each(nr_node, &nr_node_list) {
nr_node_lock(nr_node);
if (nr_node->which < nr_node->count &&
nr_node->routes[nr_node->which].neighbour == nr_neigh)
@@ -1013,16 +1009,16 @@ void __exit nr_rt_free(void)
{
struct nr_neigh *s = NULL;
struct nr_node *t = NULL;
- struct hlist_node *node, *nodet;
+ struct hlist_node *nodet;
spin_lock_bh(&nr_neigh_list_lock);
spin_lock_bh(&nr_node_list_lock);
- nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
+ nr_node_for_each_safe(t, nodet, &nr_node_list) {
nr_node_lock(t);
nr_remove_node_locked(t);
nr_node_unlock(t);
}
- nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
+ nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
while(s->count) {
s->count--;
nr_neigh_put(s);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 746f5a2f9804..7f8266dd14cb 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -71,14 +71,14 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
{
struct sock *sk;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
struct nfc_llcp_sock *llcp_sock;
skb_queue_purge(&local->tx_queue);
write_lock(&local->sockets.lock);
- sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
+ sk_for_each_safe(sk, tmp, &local->sockets.head) {
llcp_sock = nfc_llcp_sock(sk);
bh_lock_sock(sk);
@@ -171,7 +171,6 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
u8 ssap, u8 dsap)
{
struct sock *sk;
- struct hlist_node *node;
struct nfc_llcp_sock *llcp_sock, *tmp_sock;
pr_debug("ssap dsap %d %d\n", ssap, dsap);
@@ -183,7 +182,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
llcp_sock = NULL;
- sk_for_each(sk, node, &local->sockets.head) {
+ sk_for_each(sk, &local->sockets.head) {
tmp_sock = nfc_llcp_sock(sk);
if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
@@ -272,7 +271,6 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
u8 *sn, size_t sn_len)
{
struct sock *sk;
- struct hlist_node *node;
struct nfc_llcp_sock *llcp_sock, *tmp_sock;
pr_debug("sn %zd %p\n", sn_len, sn);
@@ -284,7 +282,7 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
llcp_sock = NULL;
- sk_for_each(sk, node, &local->sockets.head) {
+ sk_for_each(sk, &local->sockets.head) {
tmp_sock = nfc_llcp_sock(sk);
pr_debug("llcp sock %p\n", tmp_sock);
@@ -601,14 +599,13 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
struct sk_buff *skb, u8 direction)
{
- struct hlist_node *node;
struct sk_buff *skb_copy = NULL, *nskb;
struct sock *sk;
u8 *data;
read_lock(&local->raw_sockets.lock);
- sk_for_each(sk, node, &local->raw_sockets.head) {
+ sk_for_each(sk, &local->raw_sockets.head) {
if (sk->sk_state != LLCP_BOUND)
continue;
@@ -697,11 +694,10 @@ static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local
{
struct sock *sk;
struct nfc_llcp_sock *llcp_sock;
- struct hlist_node *node;
read_lock(&local->connecting_sockets.lock);
- sk_for_each(sk, node, &local->connecting_sockets.head) {
+ sk_for_each(sk, &local->connecting_sockets.head) {
llcp_sock = nfc_llcp_sock(sk);
if (llcp_sock->ssap == ssap) {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 9dc537df46c4..e87a26506dba 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -158,11 +158,10 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{
struct vport *vport;
- struct hlist_node *n;
struct hlist_head *head;
head = vport_hash_bucket(dp, port_no);
- hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
+ hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
if (vport->port_no == port_no)
return vport;
}
@@ -1386,9 +1385,9 @@ static void __dp_destroy(struct datapath *dp)
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
- struct hlist_node *node, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
+ hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
if (vport->port_no != OVSP_LOCAL)
ovs_dp_detach_port(vport);
}
@@ -1825,10 +1824,9 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
- struct hlist_node *n;
j = 0;
- hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
+ hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
if (j >= skip &&
ovs_vport_cmd_fill_info(vport, skb,
NETLINK_CB(cb->skb).portid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index c3294cebc4f2..20605ecf100b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -299,10 +299,10 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
for (i = 0; i < table->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head = flex_array_get(table->buckets, i);
- struct hlist_node *node, *n;
+ struct hlist_node *n;
int ver = table->node_ver;
- hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
+ hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
hlist_del_rcu(&flow->hash_node[ver]);
ovs_flow_free(flow);
}
@@ -332,7 +332,6 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
{
struct sw_flow *flow;
struct hlist_head *head;
- struct hlist_node *n;
int ver;
int i;
@@ -340,7 +339,7 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
while (*bucket < table->n_buckets) {
i = 0;
head = flex_array_get(table->buckets, *bucket);
- hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
+ hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
if (i < *last) {
i++;
continue;
@@ -367,11 +366,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new
for (i = 0; i < old->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head;
- struct hlist_node *n;
head = flex_array_get(old->buckets, i);
- hlist_for_each_entry(flow, n, head, hash_node[old_ver])
+ hlist_for_each_entry(flow, head, hash_node[old_ver])
ovs_flow_tbl_insert(new, flow);
}
old->keep_flows = true;
@@ -766,14 +764,13 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
struct sw_flow_key *key, int key_len)
{
struct sw_flow *flow;
- struct hlist_node *n;
struct hlist_head *head;
u32 hash;
hash = ovs_flow_hash(key, key_len);
head = find_bucket(table, hash);
- hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
+ hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
if (flow->hash == hash &&
!memcmp(&flow->key, key, key_len)) {
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 70af0bedbac4..ba717cc038b3 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -86,9 +86,8 @@ struct vport *ovs_vport_locate(struct net *net, const char *name)
{
struct hlist_head *bucket = hash_bucket(net, name);
struct vport *vport;
- struct hlist_node *node;
- hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
+ hlist_for_each_entry_rcu(vport, bucket, hash_node)
if (!strcmp(name, vport->ops->get_name(vport)) &&
net_eq(ovs_dp_get_net(vport->dp), net))
return vport;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c7bfeff10767..1d6793dbfbae 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3263,12 +3263,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
{
struct sock *sk;
- struct hlist_node *node;
struct net_device *dev = data;
struct net *net = dev_net(dev);
rcu_read_lock();
- sk_for_each_rcu(sk, node, &net->packet.sklist) {
+ sk_for_each_rcu(sk, &net->packet.sklist) {
struct packet_sock *po = pkt_sk(sk);
switch (msg) {
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 8db6e21c46bd..d3fcd1ebef7e 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -172,13 +172,12 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct packet_diag_req *req;
struct net *net;
struct sock *sk;
- struct hlist_node *node;
net = sock_net(skb->sk);
req = nlmsg_data(cb->nlh);
mutex_lock(&net->packet.sklist_lock);
- sk_for_each(sk, node, &net->packet.sklist) {
+ sk_for_each(sk, &net->packet.sklist) {
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num)
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 576f22c9c76e..e77411735de8 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -640,11 +640,10 @@ static struct sock *pep_find_pipe(const struct hlist_head *hlist,
const struct sockaddr_pn *dst,
u8 pipe_handle)
{
- struct hlist_node *node;
struct sock *sknode;
u16 dobj = pn_sockaddr_get_object(dst);
- sk_for_each(sknode, node, hlist) {
+ sk_for_each(sknode, hlist) {
struct pep_sock *pnnode = pep_sk(sknode);
/* Ports match, but addresses might not: */
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index b7e982782255..1afd1381cdc7 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -76,7 +76,6 @@ static struct hlist_head *pn_hash_list(u16 obj)
*/
struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
{
- struct hlist_node *node;
struct sock *sknode;
struct sock *rval = NULL;
u16 obj = pn_sockaddr_get_object(spn);
@@ -84,7 +83,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
struct hlist_head *hlist = pn_hash_list(obj);
rcu_read_lock();
- sk_for_each_rcu(sknode, node, hlist) {
+ sk_for_each_rcu(sknode, hlist) {
struct pn_sock *pn = pn_sk(sknode);
BUG_ON(!pn->sobject); /* unbound socket */
@@ -120,10 +119,9 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
rcu_read_lock();
for (h = 0; h < PN_HASHSIZE; h++) {
- struct hlist_node *node;
struct sock *sknode;
- sk_for_each(sknode, node, hlist) {
+ sk_for_each(sknode, hlist) {
struct sk_buff *clone;
if (!net_eq(sock_net(sknode), net))
@@ -543,12 +541,11 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
struct hlist_head *hlist = pnsocks.hlist;
- struct hlist_node *node;
struct sock *sknode;
unsigned int h;
for (h = 0; h < PN_HASHSIZE; h++) {
- sk_for_each_rcu(sknode, node, hlist) {
+ sk_for_each_rcu(sknode, hlist) {
if (!net_eq(net, sock_net(sknode)))
continue;
if (!pos)
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 637bde56c9db..b5ad65a0067e 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -52,13 +52,12 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
struct rds_sock *insert)
{
struct rds_sock *rs;
- struct hlist_node *node;
struct hlist_head *head = hash_to_bucket(addr, port);
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
rcu_read_lock();
- hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
+ hlist_for_each_entry_rcu(rs, head, rs_bound_node) {
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 9e07c756d1f9..642ad42c416b 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -69,9 +69,8 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
struct rds_transport *trans)
{
struct rds_connection *conn, *ret = NULL;
- struct hlist_node *pos;
- hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, head, c_hash_node) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
conn->c_trans == trans) {
ret = conn;
@@ -376,7 +375,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
int want_send)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct list_head *list;
struct rds_connection *conn;
struct rds_message *rm;
@@ -390,7 +388,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
- hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, head, c_hash_node) {
if (want_send)
list = &conn->c_send_queue;
else
@@ -439,7 +437,6 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
{
uint64_t buffer[(item_len + 7) / 8];
struct hlist_head *head;
- struct hlist_node *pos;
struct rds_connection *conn;
size_t i;
@@ -450,7 +447,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
- hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, head, c_hash_node) {
/* XXX no c_lock usage.. */
if (!visitor(conn, buffer))
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b768fe9d5e7a..cf68e6e4054a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -165,10 +165,9 @@ static void rose_remove_socket(struct sock *sk)
void rose_kill_by_neigh(struct rose_neigh *neigh)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
- sk_for_each(s, node, &rose_list) {
+ sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->neighbour == neigh) {
@@ -186,10 +185,9 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
static void rose_kill_by_device(struct net_device *dev)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
- sk_for_each(s, node, &rose_list) {
+ sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->device == dev) {
@@ -246,10 +244,9 @@ static void rose_insert_socket(struct sock *sk)
static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
- sk_for_each(s, node, &rose_list) {
+ sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
@@ -258,7 +255,7 @@ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
goto found;
}
- sk_for_each(s, node, &rose_list) {
+ sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
@@ -278,10 +275,9 @@ found:
struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
{
struct sock *s;
- struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
- sk_for_each(s, node, &rose_list) {
+ sk_for_each(s, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->lci == lci && rose->neighbour == neigh)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a181b484812a..c297e2a8e2a1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -545,7 +545,7 @@ static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
{
struct Qdisc_class_common *cl;
- struct hlist_node *n, *next;
+ struct hlist_node *next;
struct hlist_head *nhash, *ohash;
unsigned int nsize, nmask, osize;
unsigned int i, h;
@@ -564,7 +564,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
sch_tree_lock(sch);
for (i = 0; i < osize; i++) {
- hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
+ hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
h = qdisc_class_hash(cl->classid, nmask);
hlist_add_head(&cl->hnode, &nhash[h]);
}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0e19948470b8..13aa47aa2ffb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this)
static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
{
struct cbq_class *cl;
- struct hlist_node *n;
unsigned int h;
if (q->quanta[prio] == 0)
return;
for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
/* BUGGGG... Beware! This expression suffer of
* arithmetic overflows!
*/
@@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl)
continue;
for (h = 0; h < q->clhash.hashsize; h++) {
- struct hlist_node *n;
struct cbq_class *c;
- hlist_for_each_entry(c, n, &q->clhash.hash[h],
+ hlist_for_each_entry(c, &q->clhash.hash[h],
common.hnode) {
if (c->split == split && c->level < level &&
c->defmap & (1<<i)) {
@@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
- struct hlist_node *n;
int prio;
unsigned int h;
@@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch)
q->active[prio] = NULL;
for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
qdisc_reset(cl->q);
cl->next_alive = NULL;
@@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
static void cbq_destroy(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct hlist_node *n, *next;
+ struct hlist_node *next;
struct cbq_class *cl;
unsigned int h;
@@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch)
* be bound to classes which have been destroyed already. --TGR '04
*/
for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
tcf_destroy_chain(&cl->filter_list);
}
for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
+ hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
common.hnode)
cbq_destroy_class(sch, cl);
}
@@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
- struct hlist_node *n;
unsigned int h;
if (arg->stop)
return;
for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 71e50c80315f..759b308d1a8d 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -293,14 +293,13 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
- struct hlist_node *n;
unsigned int i;
if (arg->stop)
return;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
@@ -451,11 +450,10 @@ static void drr_reset_qdisc(struct Qdisc *sch)
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
- struct hlist_node *n;
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->qdisc->q.qlen)
list_del(&cl->alist);
qdisc_reset(cl->qdisc);
@@ -468,13 +466,13 @@ static void drr_destroy_qdisc(struct Qdisc *sch)
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
- struct hlist_node *n, *next;
+ struct hlist_node *next;
unsigned int i;
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+ hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
common.hnode)
drr_destroy_class(sch, cl);
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6c2ec4510540..9facea03faeb 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1389,7 +1389,6 @@ static void
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct hfsc_sched *q = qdisc_priv(sch);
- struct hlist_node *n;
struct hfsc_class *cl;
unsigned int i;
@@ -1397,7 +1396,7 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i],
+ hlist_for_each_entry(cl, &q->clhash.hash[i],
cl_common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
@@ -1523,11 +1522,10 @@ hfsc_reset_qdisc(struct Qdisc *sch)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
- struct hlist_node *n;
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
hfsc_reset_class(cl);
}
q->eligible = RB_ROOT;
@@ -1540,16 +1538,16 @@ static void
hfsc_destroy_qdisc(struct Qdisc *sch)
{
struct hfsc_sched *q = qdisc_priv(sch);
- struct hlist_node *n, *next;
+ struct hlist_node *next;
struct hfsc_class *cl;
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
tcf_destroy_chain(&cl->filter_list);
}
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+ hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
cl_common.hnode)
hfsc_destroy_class(sch, cl);
}
@@ -1564,12 +1562,11 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
unsigned char *b = skb_tail_pointer(skb);
struct tc_hfsc_qopt qopt;
struct hfsc_class *cl;
- struct hlist_node *n;
unsigned int i;
sch->qstats.backlog = 0;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
sch->qstats.backlog += cl->qdisc->qstats.backlog;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 03c2692ca01e..571f1d211f4d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -949,11 +949,10 @@ static void htb_reset(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
- struct hlist_node *n;
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->level)
memset(&cl->un.inner, 0, sizeof(cl->un.inner));
else {
@@ -1218,7 +1217,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
static void htb_destroy(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
- struct hlist_node *n, *next;
+ struct hlist_node *next;
struct htb_class *cl;
unsigned int i;
@@ -1232,11 +1231,11 @@ static void htb_destroy(struct Qdisc *sch)
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
tcf_destroy_chain(&cl->filter_list);
}
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+ hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
common.hnode)
htb_destroy_class(sch, cl);
}
@@ -1516,14 +1515,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
- struct hlist_node *n;
unsigned int i;
if (arg->stop)
return;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 6ed37652a4c3..e9a77f621c3d 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -276,9 +276,8 @@ static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
u32 lmax, u32 weight)
{
struct qfq_aggregate *agg;
- struct hlist_node *n;
- hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next)
+ hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
if (agg->lmax == lmax && agg->class_weight == weight)
return agg;
@@ -670,14 +669,13 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
- struct hlist_node *n;
unsigned int i;
if (arg->stop)
return;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
@@ -1376,11 +1374,10 @@ static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
struct hlist_head *slot)
{
struct qfq_aggregate *agg;
- struct hlist_node *n;
struct qfq_class *cl;
unsigned int len;
- hlist_for_each_entry(agg, n, slot, next) {
+ hlist_for_each_entry(agg, slot, next) {
list_for_each_entry(cl, &agg->active, alist) {
if (!cl->qdisc->ops->drop)
@@ -1459,11 +1456,10 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
- struct hlist_node *n;
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->qdisc->q.qlen > 0)
qfq_deactivate_class(q, cl);
@@ -1477,13 +1473,13 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
- struct hlist_node *n, *next;
+ struct hlist_node *next;
unsigned int i;
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+ hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
common.hnode) {
qfq_destroy_class(sch, cl);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 2f95f5a5145d..43cd0dd9149d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1591,32 +1591,31 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
/* Set an association id for a given association */
int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
{
- int assoc_id;
- int error = 0;
+ bool preload = gfp & __GFP_WAIT;
+ int ret;
/* If the id is already assigned, keep it. */
if (asoc->assoc_id)
- return error;
-retry:
- if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
- return -ENOMEM;
+ return 0;
+ if (preload)
+ idr_preload(gfp);
spin_lock_bh(&sctp_assocs_id_lock);
- error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
- idr_low, &assoc_id);
- if (!error) {
- idr_low = assoc_id + 1;
+ /* 0 is not a valid id, idr_low is always >= 1 */
+ ret = idr_alloc(&sctp_assocs_id, asoc, idr_low, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ idr_low = ret + 1;
if (idr_low == INT_MAX)
idr_low = 1;
}
spin_unlock_bh(&sctp_assocs_id_lock);
- if (error == -EAGAIN)
- goto retry;
- else if (error)
- return error;
+ if (preload)
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
- asoc->assoc_id = (sctp_assoc_t) assoc_id;
- return error;
+ asoc->assoc_id = (sctp_assoc_t)ret;
+ return 0;
}
/* Free the ASCONF queue */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 73aad3d16a45..2b3ef03c6098 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -332,7 +332,6 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
struct sctp_transport *t = NULL;
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
- struct hlist_node *node;
int hash;
int rport;
@@ -350,7 +349,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
rport);
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
- sctp_for_each_hentry(epb, node, &head->chain) {
+ sctp_for_each_hentry(epb, &head->chain) {
tmp = sctp_assoc(epb);
if (tmp->ep != ep || rport != tmp->peer.port)
continue;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 965bbbbe48d4..4b2c83146aa7 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -784,13 +784,12 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
- struct hlist_node *node;
int hash;
hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
- sctp_for_each_hentry(epb, node, &head->chain) {
+ sctp_for_each_hentry(epb, &head->chain) {
ep = sctp_ep(epb);
if (sctp_endpoint_is_match(ep, net, laddr))
goto hit;
@@ -876,7 +875,6 @@ static struct sctp_association *__sctp_lookup_association(
struct sctp_ep_common *epb;
struct sctp_association *asoc;
struct sctp_transport *transport;
- struct hlist_node *node;
int hash;
/* Optimize here for direct hit, only listening connections can
@@ -886,7 +884,7 @@ static struct sctp_association *__sctp_lookup_association(
ntohs(peer->v4.sin_port));
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
- sctp_for_each_hentry(epb, node, &head->chain) {
+ sctp_for_each_hentry(epb, &head->chain) {
asoc = sctp_assoc(epb);
transport = sctp_assoc_is_match(asoc, net, local, peer);
if (transport)
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 8c19e97262ca..ab3bba8cb0a8 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -213,7 +213,6 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct sock *sk;
- struct hlist_node *node;
int hash = *(loff_t *)v;
if (hash >= sctp_ep_hashsize)
@@ -222,7 +221,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
- sctp_for_each_hentry(epb, node, &head->chain) {
+ sctp_for_each_hentry(epb, &head->chain) {
ep = sctp_ep(epb);
sk = epb->sk;
if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -321,7 +320,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_association *assoc;
struct sock *sk;
- struct hlist_node *node;
int hash = *(loff_t *)v;
if (hash >= sctp_assoc_hashsize)
@@ -330,7 +328,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
- sctp_for_each_hentry(epb, node, &head->chain) {
+ sctp_for_each_hentry(epb, &head->chain) {
assoc = sctp_assoc(epb);
sk = epb->sk;
if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -436,7 +434,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct sctp_association *assoc;
- struct hlist_node *node;
struct sctp_transport *tsp;
int hash = *(loff_t *)v;
@@ -447,7 +444,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
sctp_local_bh_disable();
read_lock(&head->lock);
rcu_read_lock();
- sctp_for_each_hentry(epb, node, &head->chain) {
+ sctp_for_each_hentry(epb, &head->chain) {
if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
continue;
assoc = sctp_assoc(epb);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index cedd9bf67b8c..c99458df3f3f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5882,8 +5882,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_bind_hashbucket *head; /* hash list */
- struct sctp_bind_bucket *pp; /* hash list port iterator */
- struct hlist_node *node;
+ struct sctp_bind_bucket *pp;
unsigned short snum;
int ret;
@@ -5910,7 +5909,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
index = sctp_phashfn(sock_net(sk), rover);
head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock);
- sctp_for_each_hentry(pp, node, &head->chain)
+ sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(sock_net(sk), pp->net))
goto next;
@@ -5938,7 +5937,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
*/
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
sctp_spin_lock(&head->lock);
- sctp_for_each_hentry(pp, node, &head->chain) {
+ sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
goto pp_found;
}
@@ -5970,7 +5969,7 @@ pp_found:
* that this port/socket (sk) combination are already
* in an endpoint.
*/
- sk_for_each_bound(sk2, node, &pp->owner) {
+ sk_for_each_bound(sk2, &pp->owner) {
struct sctp_endpoint *ep2;
ep2 = sctp_sk(sk2)->ep;
diff --git a/net/socket.c b/net/socket.c
index ee0d029e5130..88f759adf3af 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -369,16 +369,15 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
- if (unlikely(!file)) {
+ if (unlikely(IS_ERR(file))) {
/* drop dentry, keep inode */
ihold(path.dentry->d_inode);
path_put(&path);
- return ERR_PTR(-ENFILE);
+ return file;
}
sock->file = file;
file->f_flags = O_RDWR | (flags & O_NONBLOCK);
- file->f_pos = 0;
file->private_data = sock;
return file;
}
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index d11418f97f1f..a622ad64acd8 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -17,7 +17,8 @@
*/
#include <net/ipv6.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/sunrpc/msg_prot.h>
#include <linux/slab.h>
#include <linux/export.h>
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index b5c067bccc45..f5294047df77 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -407,15 +407,14 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
{
LIST_HEAD(free);
struct rpc_cred_cache *cache = auth->au_credcache;
- struct hlist_node *pos;
struct rpc_cred *cred = NULL,
*entry, *new;
unsigned int nr;
- nr = hash_long(acred->uid, cache->hashbits);
+ nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits);
rcu_read_lock();
- hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) {
+ hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
if (!entry->cr_ops->crmatch(acred, entry, flags))
continue;
spin_lock(&cache->lock);
@@ -439,7 +438,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
}
spin_lock(&cache->lock);
- hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) {
+ hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) {
if (!entry->cr_ops->crmatch(acred, entry, flags))
continue;
cred = get_rpccred(entry);
@@ -519,8 +518,8 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct auth_cred acred = {
- .uid = 0,
- .gid = 0,
+ .uid = GLOBAL_ROOT_UID,
+ .gid = GLOBAL_ROOT_GID,
};
dprintk("RPC: %5u looking up %s cred\n",
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 6ed6f201b022..b6badafc6494 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -18,8 +18,8 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-#define RPC_MACHINE_CRED_USERID ((uid_t)0)
-#define RPC_MACHINE_CRED_GROUPID ((gid_t)0)
+#define RPC_MACHINE_CRED_USERID GLOBAL_ROOT_UID
+#define RPC_MACHINE_CRED_GROUPID GLOBAL_ROOT_GID
struct generic_cred {
struct rpc_cred gc_base;
@@ -96,7 +96,9 @@ generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
dprintk("RPC: allocated %s cred %p for uid %d gid %d\n",
gcred->acred.machine_cred ? "machine" : "generic",
- gcred, acred->uid, acred->gid);
+ gcred,
+ from_kuid(&init_user_ns, acred->uid),
+ from_kgid(&init_user_ns, acred->gid));
return &gcred->gc_base;
}
@@ -129,8 +131,8 @@ machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flag
{
if (!gcred->acred.machine_cred ||
gcred->acred.principal != acred->principal ||
- gcred->acred.uid != acred->uid ||
- gcred->acred.gid != acred->gid)
+ !uid_eq(gcred->acred.uid, acred->uid) ||
+ !gid_eq(gcred->acred.gid, acred->gid))
return 0;
return 1;
}
@@ -147,8 +149,8 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
if (acred->machine_cred)
return machine_cred_match(acred, gcred, flags);
- if (gcred->acred.uid != acred->uid ||
- gcred->acred.gid != acred->gid ||
+ if (!uid_eq(gcred->acred.uid, acred->uid) ||
+ !gid_eq(gcred->acred.gid, acred->gid) ||
gcred->acred.machine_cred != 0)
goto out_nomatch;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 911ef008b701..5257d2982ba5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -255,7 +255,7 @@ err:
struct gss_upcall_msg {
atomic_t count;
- uid_t uid;
+ kuid_t uid;
struct rpc_pipe_msg msg;
struct list_head list;
struct gss_auth *auth;
@@ -302,11 +302,11 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
}
static struct gss_upcall_msg *
-__gss_find_upcall(struct rpc_pipe *pipe, uid_t uid)
+__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
{
struct gss_upcall_msg *pos;
list_for_each_entry(pos, &pipe->in_downcall, list) {
- if (pos->uid != uid)
+ if (!uid_eq(pos->uid, uid))
continue;
atomic_inc(&pos->count);
dprintk("RPC: %s found msg %p\n", __func__, pos);
@@ -394,8 +394,11 @@ gss_upcall_callback(struct rpc_task *task)
static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
{
- gss_msg->msg.data = &gss_msg->uid;
- gss_msg->msg.len = sizeof(gss_msg->uid);
+ uid_t uid = from_kuid(&init_user_ns, gss_msg->uid);
+ memcpy(gss_msg->databuf, &uid, sizeof(uid));
+ gss_msg->msg.data = gss_msg->databuf;
+ gss_msg->msg.len = sizeof(uid);
+ BUG_ON(sizeof(uid) > UPCALL_BUF_LEN);
}
static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
@@ -408,7 +411,7 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
mech->gm_name,
- gss_msg->uid);
+ from_kuid(&init_user_ns, gss_msg->uid));
p += gss_msg->msg.len;
if (clnt->cl_principal) {
len = sprintf(p, "target=%s ", clnt->cl_principal);
@@ -444,7 +447,7 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
static struct gss_upcall_msg *
gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
- uid_t uid, const char *service_name)
+ kuid_t uid, const char *service_name)
{
struct gss_upcall_msg *gss_msg;
int vers;
@@ -474,7 +477,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
struct gss_cred *gss_cred = container_of(cred,
struct gss_cred, gc_base);
struct gss_upcall_msg *gss_new, *gss_msg;
- uid_t uid = cred->cr_uid;
+ kuid_t uid = cred->cr_uid;
gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal);
if (IS_ERR(gss_new))
@@ -516,7 +519,7 @@ gss_refresh_upcall(struct rpc_task *task)
int err = 0;
dprintk("RPC: %5u %s for uid %u\n",
- task->tk_pid, __func__, cred->cr_uid);
+ task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid));
gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
@@ -548,7 +551,8 @@ gss_refresh_upcall(struct rpc_task *task)
gss_release_msg(gss_msg);
out:
dprintk("RPC: %5u %s for uid %u result %d\n",
- task->tk_pid, __func__, cred->cr_uid, err);
+ task->tk_pid, __func__,
+ from_kuid(&init_user_ns, cred->cr_uid), err);
return err;
}
@@ -561,7 +565,8 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
DEFINE_WAIT(wait);
int err = 0;
- dprintk("RPC: %s for uid %u\n", __func__, cred->cr_uid);
+ dprintk("RPC: %s for uid %u\n",
+ __func__, from_kuid(&init_user_ns, cred->cr_uid));
retry:
gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
@@ -603,7 +608,7 @@ out_intr:
gss_release_msg(gss_msg);
out:
dprintk("RPC: %s for uid %u result %d\n",
- __func__, cred->cr_uid, err);
+ __func__, from_kuid(&init_user_ns, cred->cr_uid), err);
return err;
}
@@ -615,9 +620,10 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
const void *p, *end;
void *buf;
struct gss_upcall_msg *gss_msg;
- struct rpc_pipe *pipe = RPC_I(filp->f_dentry->d_inode)->pipe;
+ struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
struct gss_cl_ctx *ctx;
- uid_t uid;
+ uid_t id;
+ kuid_t uid;
ssize_t err = -EFBIG;
if (mlen > MSG_BUF_MAXSIZE)
@@ -632,12 +638,18 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
goto err;
end = (const void *)((char *)buf + mlen);
- p = simple_get_bytes(buf, end, &uid, sizeof(uid));
+ p = simple_get_bytes(buf, end, &id, sizeof(id));
if (IS_ERR(p)) {
err = PTR_ERR(p);
goto err;
}
+ uid = make_kuid(&init_user_ns, id);
+ if (!uid_valid(uid)) {
+ err = -EINVAL;
+ goto err;
+ }
+
err = -ENOMEM;
ctx = gss_alloc_context();
if (ctx == NULL)
@@ -1058,7 +1070,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
int err = -ENOMEM;
dprintk("RPC: %s for uid %d, flavor %d\n",
- __func__, acred->uid, auth->au_flavor);
+ __func__, from_kuid(&init_user_ns, acred->uid),
+ auth->au_flavor);
if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS)))
goto out_err;
@@ -1114,7 +1127,7 @@ out:
}
if (gss_cred->gc_principal != NULL)
return 0;
- return rc->cr_uid == acred->uid;
+ return uid_eq(rc->cr_uid, acred->uid);
}
/*
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 107c4528654f..88edec929d73 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -574,6 +574,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+ /* Trim off the checksum blob */
+ xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip);
return GSS_S_COMPLETE;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 73e957386600..f7d34e7b6f81 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -182,12 +182,6 @@ static void rsi_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
-}
-
-
static int rsi_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
@@ -275,7 +269,7 @@ static struct cache_detail rsi_cache_template = {
.hash_size = RSI_HASHMAX,
.name = "auth.rpcsec.init",
.cache_put = rsi_put,
- .cache_upcall = rsi_upcall,
+ .cache_request = rsi_request,
.cache_parse = rsi_parse,
.match = rsi_match,
.init = rsi_init,
@@ -418,6 +412,7 @@ static int rsc_parse(struct cache_detail *cd,
{
/* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
char *buf = mesg;
+ int id;
int len, rv;
struct rsc rsci, *rscp = NULL;
time_t expiry;
@@ -444,7 +439,7 @@ static int rsc_parse(struct cache_detail *cd,
goto out;
/* uid, or NEGATIVE */
- rv = get_int(&mesg, &rsci.cred.cr_uid);
+ rv = get_int(&mesg, &id);
if (rv == -EINVAL)
goto out;
if (rv == -ENOENT)
@@ -452,8 +447,16 @@ static int rsc_parse(struct cache_detail *cd,
else {
int N, i;
+ /* uid */
+ rsci.cred.cr_uid = make_kuid(&init_user_ns, id);
+ if (!uid_valid(rsci.cred.cr_uid))
+ goto out;
+
/* gid */
- if (get_int(&mesg, &rsci.cred.cr_gid))
+ if (get_int(&mesg, &id))
+ goto out;
+ rsci.cred.cr_gid = make_kgid(&init_user_ns, id);
+ if (!gid_valid(rsci.cred.cr_gid))
goto out;
/* number of additional gid's */
@@ -467,11 +470,10 @@ static int rsc_parse(struct cache_detail *cd,
/* gid's */
status = -EINVAL;
for (i=0; i<N; i++) {
- gid_t gid;
kgid_t kgid;
- if (get_int(&mesg, &gid))
+ if (get_int(&mesg, &id))
goto out;
- kgid = make_kgid(&init_user_ns, gid);
+ kgid = make_kgid(&init_user_ns, id);
if (!gid_valid(kgid))
goto out;
GROUP_AT(rsci.cred.cr_group_info, i) = kgid;
@@ -817,13 +819,17 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
* The server uses base of head iovec as read pointer, while the
* client uses separate pointer. */
static int
-unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
+unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
{
int stat = -EINVAL;
u32 integ_len, maj_stat;
struct xdr_netobj mic;
struct xdr_buf integ_buf;
+ /* Did we already verify the signature on the original pass through? */
+ if (rqstp->rq_deferred)
+ return 0;
+
integ_len = svc_getnl(&buf->head[0]);
if (integ_len & 3)
return stat;
@@ -846,6 +852,8 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
goto out;
if (svc_getnl(&buf->head[0]) != seq)
goto out;
+ /* trim off the mic at the end before returning */
+ xdr_buf_trim(buf, mic.len + 4);
stat = 0;
out:
kfree(mic.data);
@@ -1190,7 +1198,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
/* placeholders for length and seq. number: */
svc_putnl(resv, 0);
svc_putnl(resv, 0);
- if (unwrap_integ_data(&rqstp->rq_arg,
+ if (unwrap_integ_data(rqstp, &rqstp->rq_arg,
gc->gc_seq, rsci->mechctx))
goto garbage_args;
break;
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 52c5abdee211..dc37021fc3e5 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -18,8 +18,8 @@
struct unx_cred {
struct rpc_cred uc_base;
- gid_t uc_gid;
- gid_t uc_gids[NFS_NGROUPS];
+ kgid_t uc_gid;
+ kgid_t uc_gids[NFS_NGROUPS];
};
#define uc_uid uc_base.cr_uid
@@ -65,7 +65,8 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
unsigned int i;
dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
- acred->uid, acred->gid);
+ from_kuid(&init_user_ns, acred->uid),
+ from_kgid(&init_user_ns, acred->gid));
if (!(cred = kmalloc(sizeof(*cred), GFP_NOFS)))
return ERR_PTR(-ENOMEM);
@@ -79,13 +80,10 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
groups = NFS_NGROUPS;
cred->uc_gid = acred->gid;
- for (i = 0; i < groups; i++) {
- gid_t gid;
- gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i));
- cred->uc_gids[i] = gid;
- }
+ for (i = 0; i < groups; i++)
+ cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
if (i < NFS_NGROUPS)
- cred->uc_gids[i] = NOGROUP;
+ cred->uc_gids[i] = INVALID_GID;
return &cred->uc_base;
}
@@ -123,21 +121,17 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
unsigned int i;
- if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid)
+ if (!uid_eq(cred->uc_uid, acred->uid) || !gid_eq(cred->uc_gid, acred->gid))
return 0;
if (acred->group_info != NULL)
groups = acred->group_info->ngroups;
if (groups > NFS_NGROUPS)
groups = NFS_NGROUPS;
- for (i = 0; i < groups ; i++) {
- gid_t gid;
- gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i));
- if (cred->uc_gids[i] != gid)
+ for (i = 0; i < groups ; i++)
+ if (!gid_eq(cred->uc_gids[i], GROUP_AT(acred->group_info, i)))
return 0;
- }
- if (groups < NFS_NGROUPS &&
- cred->uc_gids[groups] != NOGROUP)
+ if (groups < NFS_NGROUPS && gid_valid(cred->uc_gids[groups]))
return 0;
return 1;
}
@@ -163,11 +157,11 @@ unx_marshal(struct rpc_task *task, __be32 *p)
*/
p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
- *p++ = htonl((u32) cred->uc_uid);
- *p++ = htonl((u32) cred->uc_gid);
+ *p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid));
+ *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid));
hold = p++;
- for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++)
- *p++ = htonl((u32) cred->uc_gids[i]);
+ for (i = 0; i < 16 && gid_valid(cred->uc_gids[i]); i++)
+ *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i]));
*hold = htonl(p - hold - 1); /* gid array length */
*base = htonl((p - base - 1) << 2); /* cred length */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 9afa4393c217..25d58e766014 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -196,9 +196,9 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_update);
static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
{
- if (!cd->cache_upcall)
- return -EINVAL;
- return cd->cache_upcall(cd, h);
+ if (cd->cache_upcall)
+ return cd->cache_upcall(cd, h);
+ return sunrpc_cache_pipe_upcall(cd, h);
}
static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
@@ -670,13 +670,13 @@ static void cache_revisit_request(struct cache_head *item)
{
struct cache_deferred_req *dreq;
struct list_head pending;
- struct hlist_node *lp, *tmp;
+ struct hlist_node *tmp;
int hash = DFR_HASH(item);
INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
- hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
+ hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
if (dreq->item == item) {
__unhash_deferred_req(dreq);
list_add(&dreq->recent, &pending);
@@ -750,12 +750,24 @@ struct cache_reader {
int offset; /* if non-0, we have a refcnt on next request */
};
+static int cache_request(struct cache_detail *detail,
+ struct cache_request *crq)
+{
+ char *bp = crq->buf;
+ int len = PAGE_SIZE;
+
+ detail->cache_request(detail, crq->item, &bp, &len);
+ if (len < 0)
+ return -EAGAIN;
+ return PAGE_SIZE - len;
+}
+
static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
loff_t *ppos, struct cache_detail *cd)
{
struct cache_reader *rp = filp->private_data;
struct cache_request *rq;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int err;
if (count == 0)
@@ -784,6 +796,13 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
rq->readers++;
spin_unlock(&queue_lock);
+ if (rq->len == 0) {
+ err = cache_request(cd, rq);
+ if (err < 0)
+ goto out;
+ rq->len = err;
+ }
+
if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
err = -EAGAIN;
spin_lock(&queue_lock);
@@ -886,7 +905,7 @@ static ssize_t cache_write(struct file *filp, const char __user *buf,
struct cache_detail *cd)
{
struct address_space *mapping = filp->f_mapping;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
ssize_t ret = -EINVAL;
if (!cd->cache_parse)
@@ -1140,17 +1159,14 @@ static bool cache_listeners_exist(struct cache_detail *detail)
*
* Each request is at most one page long.
*/
-int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
- void (*cache_request)(struct cache_detail *,
- struct cache_head *,
- char **,
- int *))
+int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
{
char *buf;
struct cache_request *crq;
- char *bp;
- int len;
+
+ if (!detail->cache_request)
+ return -EINVAL;
if (!cache_listeners_exist(detail)) {
warn_no_listener(detail);
@@ -1167,19 +1183,10 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
return -EAGAIN;
}
- bp = buf; len = PAGE_SIZE;
-
- cache_request(detail, h, &bp, &len);
-
- if (len < 0) {
- kfree(buf);
- kfree(crq);
- return -EAGAIN;
- }
crq->q.reader = 0;
crq->item = cache_get(h);
crq->buf = buf;
- crq->len = PAGE_SIZE - len;
+ crq->len = 0;
crq->readers = 0;
spin_lock(&queue_lock);
list_add_tail(&crq->q.list, &detail->queue);
@@ -1454,7 +1461,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+ struct cache_detail *cd = PDE(file_inode(filp))->data;
return cache_read(filp, buf, count, ppos, cd);
}
@@ -1462,14 +1469,14 @@ static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+ struct cache_detail *cd = PDE(file_inode(filp))->data;
return cache_write(filp, buf, count, ppos, cd);
}
static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
{
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+ struct cache_detail *cd = PDE(file_inode(filp))->data;
return cache_poll(filp, wait, cd);
}
@@ -1477,7 +1484,7 @@ static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
static long cache_ioctl_procfs(struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct cache_detail *cd = PDE(inode)->data;
return cache_ioctl(inode, filp, cmd, arg, cd);
@@ -1546,7 +1553,7 @@ static int release_flush_procfs(struct inode *inode, struct file *filp)
static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+ struct cache_detail *cd = PDE(file_inode(filp))->data;
return read_flush(filp, buf, count, ppos, cd);
}
@@ -1555,7 +1562,7 @@ static ssize_t write_flush_procfs(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+ struct cache_detail *cd = PDE(file_inode(filp))->data;
return write_flush(filp, buf, count, ppos, cd);
}
@@ -1605,7 +1612,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
if (p == NULL)
goto out_nomem;
- if (cd->cache_upcall || cd->cache_parse) {
+ if (cd->cache_request || cd->cache_parse) {
p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
cd->u.procfs.proc_ent,
&cache_file_operations_procfs, cd);
@@ -1614,7 +1621,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
goto out_nomem;
}
if (cd->cache_show) {
- p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
+ p = proc_create_data("content", S_IFREG|S_IRUSR,
cd->u.procfs.proc_ent,
&content_file_operations_procfs, cd);
cd->u.procfs.content_ent = p;
@@ -1686,7 +1693,7 @@ EXPORT_SYMBOL_GPL(cache_destroy_net);
static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+ struct cache_detail *cd = RPC_I(file_inode(filp))->private;
return cache_read(filp, buf, count, ppos, cd);
}
@@ -1694,14 +1701,14 @@ static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+ struct cache_detail *cd = RPC_I(file_inode(filp))->private;
return cache_write(filp, buf, count, ppos, cd);
}
static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
{
- struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+ struct cache_detail *cd = RPC_I(file_inode(filp))->private;
return cache_poll(filp, wait, cd);
}
@@ -1709,7 +1716,7 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
static long cache_ioctl_pipefs(struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct cache_detail *cd = RPC_I(inode)->private;
return cache_ioctl(inode, filp, cmd, arg, cd);
@@ -1778,7 +1785,7 @@ static int release_flush_pipefs(struct inode *inode, struct file *filp)
static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+ struct cache_detail *cd = RPC_I(file_inode(filp))->private;
return read_flush(filp, buf, count, ppos, cd);
}
@@ -1787,7 +1794,7 @@ static ssize_t write_flush_pipefs(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+ struct cache_detail *cd = RPC_I(file_inode(filp))->private;
return write_flush(filp, buf, count, ppos, cd);
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index a9f7906c1a6a..dcc446e7fbf6 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -33,6 +33,7 @@
#include <linux/rcupdate.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
#include <linux/sunrpc/bc_xprt.h>
@@ -1196,6 +1197,21 @@ size_t rpc_max_payload(struct rpc_clnt *clnt)
EXPORT_SYMBOL_GPL(rpc_max_payload);
/**
+ * rpc_get_timeout - Get timeout for transport in units of HZ
+ * @clnt: RPC client to query
+ */
+unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
+{
+ unsigned long ret;
+
+ rcu_read_lock();
+ ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpc_get_timeout);
+
+/**
* rpc_force_rebind - force transport to check that remote port is unchanged
* @clnt: client to rebind
*
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index fd10981ea792..7b9b40224a27 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -284,7 +284,7 @@ out:
static ssize_t
rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct rpc_pipe *pipe;
struct rpc_pipe_msg *msg;
int res = 0;
@@ -328,7 +328,7 @@ out_unlock:
static ssize_t
rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int res;
mutex_lock(&inode->i_mutex);
@@ -342,7 +342,7 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of
static unsigned int
rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct rpc_inode *rpci = RPC_I(inode);
unsigned int mask = POLLOUT | POLLWRNORM;
@@ -360,7 +360,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
static long
rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct rpc_pipe *pipe;
int len;
@@ -830,7 +830,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
* responses to upcalls. They will result in calls to @msg->downcall.
*
* The @private argument passed here will be available to all these methods
- * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
+ * from the file pointer, via RPC_I(file_inode(file))->private.
*/
struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
void *private, struct rpc_pipe *pipe)
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 795a0f4e920b..3df764dc330c 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -26,6 +26,7 @@
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprtsock.h>
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index dbf12ac5ecb7..89a588b4478b 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
void svc_shutdown_net(struct svc_serv *serv, struct net *net)
{
- /*
- * The set of xprts (contained in the sv_tempsocks and
- * sv_permsocks lists) is now constant, since it is modified
- * only by accepting new sockets (done by service threads in
- * svc_recv) or aging old ones (done by sv_temptimer), or
- * configuration changes (excluded by whatever locking the
- * caller is using--nfsd_mutex in the case of nfsd). So it's
- * safe to traverse those lists and shut everything down:
- */
svc_close_net(serv, net);
if (serv->sv_shutdown)
@@ -1042,6 +1033,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
/*
* dprintk the given error with the address of the client that caused it.
*/
+#ifdef RPC_DEBUG
static __printf(2, 3)
void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
{
@@ -1058,6 +1050,9 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
va_end(args);
}
+#else
+static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
+#endif
/*
* Common routine for processing the RPC request.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b8e47fac7315..80a6640f329b 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -499,7 +499,8 @@ void svc_wake_up(struct svc_serv *serv)
rqstp->rq_xprt = NULL;
*/
wake_up(&rqstp->rq_wait);
- }
+ } else
+ pool->sp_task_pending = 1;
spin_unlock_bh(&pool->sp_lock);
}
}
@@ -634,7 +635,13 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
* long for cache updates.
*/
rqstp->rq_chandle.thread_wait = 1*HZ;
+ pool->sp_task_pending = 0;
} else {
+ if (pool->sp_task_pending) {
+ pool->sp_task_pending = 0;
+ spin_unlock_bh(&pool->sp_lock);
+ return ERR_PTR(-EAGAIN);
+ }
/* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp);
@@ -856,7 +863,6 @@ static void svc_age_temp_xprts(unsigned long closure)
struct svc_serv *serv = (struct svc_serv *)closure;
struct svc_xprt *xprt;
struct list_head *le, *next;
- LIST_HEAD(to_be_aged);
dprintk("svc_age_temp_xprts\n");
@@ -877,25 +883,15 @@ static void svc_age_temp_xprts(unsigned long closure)
if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
test_bit(XPT_BUSY, &xprt->xpt_flags))
continue;
- svc_xprt_get(xprt);
- list_move(le, &to_be_aged);
+ list_del_init(le);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
set_bit(XPT_DETACHED, &xprt->xpt_flags);
- }
- spin_unlock_bh(&serv->sv_lock);
-
- while (!list_empty(&to_be_aged)) {
- le = to_be_aged.next;
- /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
- list_del_init(le);
- xprt = list_entry(le, struct svc_xprt, xpt_list);
-
dprintk("queuing xprt %p for closing\n", xprt);
/* a thread will dequeue and close it soon */
svc_xprt_enqueue(xprt);
- svc_xprt_put(xprt);
}
+ spin_unlock_bh(&serv->sv_lock);
mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
}
@@ -959,21 +955,24 @@ void svc_close_xprt(struct svc_xprt *xprt)
}
EXPORT_SYMBOL_GPL(svc_close_xprt);
-static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
+static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
{
struct svc_xprt *xprt;
+ int ret = 0;
spin_lock(&serv->sv_lock);
list_for_each_entry(xprt, xprt_list, xpt_list) {
if (xprt->xpt_net != net)
continue;
+ ret++;
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- set_bit(XPT_BUSY, &xprt->xpt_flags);
+ svc_xprt_enqueue(xprt);
}
spin_unlock(&serv->sv_lock);
+ return ret;
}
-static void svc_clear_pools(struct svc_serv *serv, struct net *net)
+static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
{
struct svc_pool *pool;
struct svc_xprt *xprt;
@@ -988,42 +987,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net)
if (xprt->xpt_net != net)
continue;
list_del_init(&xprt->xpt_ready);
+ spin_unlock_bh(&pool->sp_lock);
+ return xprt;
}
spin_unlock_bh(&pool->sp_lock);
}
+ return NULL;
}
-static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
+static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
{
struct svc_xprt *xprt;
- struct svc_xprt *tmp;
- LIST_HEAD(victims);
- spin_lock(&serv->sv_lock);
- list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
- if (xprt->xpt_net != net)
- continue;
- list_move(&xprt->xpt_list, &victims);
- }
- spin_unlock(&serv->sv_lock);
-
- list_for_each_entry_safe(xprt, tmp, &victims, xpt_list)
+ while ((xprt = svc_dequeue_net(serv, net))) {
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
svc_delete_xprt(xprt);
+ }
}
+/*
+ * Server threads may still be running (especially in the case where the
+ * service is still running in other network namespaces).
+ *
+ * So we shut down sockets the same way we would on a running server, by
+ * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
+ * the close. In the case there are no such other threads,
+ * threads running, svc_clean_up_xprts() does a simple version of a
+ * server's main event loop, and in the case where there are other
+ * threads, we may need to wait a little while and then check again to
+ * see if they're done.
+ */
void svc_close_net(struct svc_serv *serv, struct net *net)
{
- svc_close_list(serv, &serv->sv_tempsocks, net);
- svc_close_list(serv, &serv->sv_permsocks, net);
+ int delay = 0;
- svc_clear_pools(serv, net);
- /*
- * At this point the sp_sockets lists will stay empty, since
- * svc_xprt_enqueue will not add new entries without taking the
- * sp_lock and checking XPT_BUSY.
- */
- svc_clear_list(serv, &serv->sv_tempsocks, net);
- svc_clear_list(serv, &serv->sv_permsocks, net);
+ while (svc_close_list(serv, &serv->sv_permsocks, net) +
+ svc_close_list(serv, &serv->sv_tempsocks, net)) {
+
+ svc_clean_up_xprts(serv, net);
+ msleep(delay++);
+ }
}
/*
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 7963569fc04f..2af7b0cba43a 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -138,13 +138,12 @@ auth_domain_lookup(char *name, struct auth_domain *new)
{
struct auth_domain *hp;
struct hlist_head *head;
- struct hlist_node *np;
head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
spin_lock(&auth_domain_lock);
- hlist_for_each_entry(hp, np, head, hash) {
+ hlist_for_each_entry(hp, head, hash) {
if (strcmp(hp->name, name)==0) {
kref_get(&hp->ref);
spin_unlock(&auth_domain_lock);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 4d0129203733..c3f9e1ef7f53 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -6,6 +6,7 @@
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/addr.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <linux/hash.h>
@@ -17,7 +18,6 @@
#include <linux/user_namespace.h>
#define RPCDBG_FACILITY RPCDBG_AUTH
-#include <linux/sunrpc/clnt.h>
#include "netns.h"
@@ -157,11 +157,6 @@ static void ip_map_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
-static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
-}
-
static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
@@ -415,10 +410,15 @@ svcauth_unix_info_release(struct svc_xprt *xpt)
struct unix_gid {
struct cache_head h;
- uid_t uid;
+ kuid_t uid;
struct group_info *gi;
};
+static int unix_gid_hash(kuid_t uid)
+{
+ return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
+}
+
static void unix_gid_put(struct kref *kref)
{
struct cache_head *item = container_of(kref, struct cache_head, ref);
@@ -433,7 +433,7 @@ static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
{
struct unix_gid *orig = container_of(corig, struct unix_gid, h);
struct unix_gid *new = container_of(cnew, struct unix_gid, h);
- return orig->uid == new->uid;
+ return uid_eq(orig->uid, new->uid);
}
static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
{
@@ -465,23 +465,19 @@ static void unix_gid_request(struct cache_detail *cd,
char tuid[20];
struct unix_gid *ug = container_of(h, struct unix_gid, h);
- snprintf(tuid, 20, "%u", ug->uid);
+ snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
qword_add(bpp, blen, tuid);
(*bpp)[-1] = '\n';
}
-static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
-}
-
-static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, uid_t uid);
+static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
static int unix_gid_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
/* uid expiry Ngid gid0 gid1 ... gidN-1 */
- int uid;
+ int id;
+ kuid_t uid;
int gids;
int rv;
int i;
@@ -493,9 +489,12 @@ static int unix_gid_parse(struct cache_detail *cd,
return -EINVAL;
mesg[mlen-1] = 0;
- rv = get_int(&mesg, &uid);
+ rv = get_int(&mesg, &id);
if (rv)
return -EINVAL;
+ uid = make_kuid(&init_user_ns, id);
+ if (!uid_valid(uid))
+ return -EINVAL;
ug.uid = uid;
expiry = get_expiry(&mesg);
@@ -530,7 +529,7 @@ static int unix_gid_parse(struct cache_detail *cd,
ug.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd,
&ug.h, &ugp->h,
- hash_long(uid, GID_HASHBITS));
+ unix_gid_hash(uid));
if (!ch)
err = -ENOMEM;
else {
@@ -549,7 +548,7 @@ static int unix_gid_show(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = &init_user_ns;
struct unix_gid *ug;
int i;
int glen;
@@ -565,7 +564,7 @@ static int unix_gid_show(struct seq_file *m,
else
glen = 0;
- seq_printf(m, "%u %d:", ug->uid, glen);
+ seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
for (i = 0; i < glen; i++)
seq_printf(m, " %d", from_kgid_munged(user_ns, GROUP_AT(ug->gi, i)));
seq_printf(m, "\n");
@@ -577,7 +576,7 @@ static struct cache_detail unix_gid_cache_template = {
.hash_size = GID_HASHMAX,
.name = "auth.unix.gid",
.cache_put = unix_gid_put,
- .cache_upcall = unix_gid_upcall,
+ .cache_request = unix_gid_request,
.cache_parse = unix_gid_parse,
.cache_show = unix_gid_show,
.match = unix_gid_match,
@@ -615,20 +614,20 @@ void unix_gid_cache_destroy(struct net *net)
cache_destroy_net(cd, net);
}
-static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, uid_t uid)
+static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
{
struct unix_gid ug;
struct cache_head *ch;
ug.uid = uid;
- ch = sunrpc_cache_lookup(cd, &ug.h, hash_long(uid, GID_HASHBITS));
+ ch = sunrpc_cache_lookup(cd, &ug.h, unix_gid_hash(uid));
if (ch)
return container_of(ch, struct unix_gid, h);
else
return NULL;
}
-static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
+static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
{
struct unix_gid *ug;
struct group_info *gi;
@@ -750,8 +749,8 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
}
/* Signal that mapping to nobody uid/gid is required */
- cred->cr_uid = (uid_t) -1;
- cred->cr_gid = (gid_t) -1;
+ cred->cr_uid = INVALID_UID;
+ cred->cr_gid = INVALID_GID;
cred->cr_group_info = groups_alloc(0);
if (cred->cr_group_info == NULL)
return SVC_CLOSE; /* kmalloc failure - client must retry */
@@ -812,8 +811,10 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
argv->iov_len -= slen*4;
- cred->cr_uid = svc_getnl(argv); /* uid */
- cred->cr_gid = svc_getnl(argv); /* gid */
+ cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
+ cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
+ if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid))
+ goto badcred;
slen = svc_getnl(argv); /* gids length */
if (slen > 16 || (len -= (slen + 2)*4) < 0)
goto badcred;
@@ -874,7 +875,7 @@ static struct cache_detail ip_map_cache_template = {
.hash_size = IP_HASHMAX,
.name = "auth.unix.ip",
.cache_put = ip_map_put,
- .cache_upcall = ip_map_upcall,
+ .cache_request = ip_map_request,
.cache_parse = ip_map_parse,
.cache_show = ip_map_show,
.match = ip_map_match,
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 56055632f151..75edcfad6e26 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -879,6 +879,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
}
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
+/**
+ * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
+ * @buf: buf to be trimmed
+ * @len: number of bytes to reduce "buf" by
+ *
+ * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
+ * that it's possible that we'll trim less than that amount if the xdr_buf is
+ * too small, or if (for instance) it's all in the head and the parser has
+ * already read too far into it.
+ */
+void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
+{
+ size_t cur;
+ unsigned int trim = len;
+
+ if (buf->tail[0].iov_len) {
+ cur = min_t(size_t, buf->tail[0].iov_len, trim);
+ buf->tail[0].iov_len -= cur;
+ trim -= cur;
+ if (!trim)
+ goto fix_len;
+ }
+
+ if (buf->page_len) {
+ cur = min_t(unsigned int, buf->page_len, trim);
+ buf->page_len -= cur;
+ trim -= cur;
+ if (!trim)
+ goto fix_len;
+ }
+
+ if (buf->head[0].iov_len) {
+ cur = min_t(size_t, buf->head[0].iov_len, trim);
+ buf->head[0].iov_len -= cur;
+ trim -= cur;
+ }
+fix_len:
+ buf->len -= (len - trim);
+}
+EXPORT_SYMBOL_GPL(xdr_buf_trim);
+
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
unsigned int this_len;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 846c34fdee9f..b7478d5e7ffd 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -487,13 +487,17 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
* xprt_wait_for_buffer_space - wait for transport output buffer to clear
* @task: task to be put to sleep
* @action: function pointer to be executed after wait
+ *
+ * Note that we only set the timer for the case of RPC_IS_SOFT(), since
+ * we don't in general want to force a socket disconnection due to
+ * an incomplete RPC call transmission.
*/
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- task->tk_timeout = req->rq_timeout;
+ task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
rpc_sleep_on(&xprt->pending, task, action);
}
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index d0074289708e..794312f22b9b 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -51,6 +51,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/sunrpc/addr.h>
#include "xprt_rdma.h"
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 745973b729af..93726560eaa8 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1086,7 +1086,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
case RPCRDMA_MEMWINDOWS:
/* Allocate one extra request's worth, for full cycling */
for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) {
- r->r.mw = ib_alloc_mw(ia->ri_pd);
+ r->r.mw = ib_alloc_mw(ia->ri_pd, IB_MW_TYPE_1);
if (IS_ERR(r->r.mw)) {
rc = PTR_ERR(r->r.mw);
dprintk("RPC: %s: ib_alloc_mw"
@@ -1673,12 +1673,12 @@ rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg,
*nsegs = 1;
rpcrdma_map_one(ia, seg, writing);
- param.mr = ia->ri_bind_mem;
+ param.bind_info.mr = ia->ri_bind_mem;
param.wr_id = 0ULL; /* no send cookie */
- param.addr = seg->mr_dma;
- param.length = seg->mr_len;
+ param.bind_info.addr = seg->mr_dma;
+ param.bind_info.length = seg->mr_len;
param.send_flags = 0;
- param.mw_access_flags = mem_priv;
+ param.bind_info.mw_access_flags = mem_priv;
DECR_CQCOUNT(&r_xprt->rx_ep);
rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, &param);
@@ -1690,7 +1690,7 @@ rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg,
rpcrdma_unmap_one(ia, seg);
} else {
seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey;
- seg->mr_base = param.addr;
+ seg->mr_base = param.bind_info.addr;
seg->mr_nsegs = 1;
}
return rc;
@@ -1706,10 +1706,10 @@ rpcrdma_deregister_memwin_external(struct rpcrdma_mr_seg *seg,
int rc;
BUG_ON(seg->mr_nsegs != 1);
- param.mr = ia->ri_bind_mem;
- param.addr = 0ULL; /* unbind */
- param.length = 0;
- param.mw_access_flags = 0;
+ param.bind_info.mr = ia->ri_bind_mem;
+ param.bind_info.addr = 0ULL; /* unbind */
+ param.bind_info.length = 0;
+ param.bind_info.mw_access_flags = 0;
if (*r) {
param.wr_id = (u64) (unsigned long) *r;
param.send_flags = IB_SEND_SIGNALED;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 37cbda63f45c..c1d8476b7692 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -33,6 +33,7 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprtsock.h>
@@ -1867,13 +1868,9 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
* @xprt: RPC transport to connect
* @transport: socket transport to connect
* @create_sock: function to create a socket of the correct type
- *
- * Invoked by a work queue tasklet.
*/
-static void xs_local_setup_socket(struct work_struct *work)
+static int xs_local_setup_socket(struct sock_xprt *transport)
{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
struct rpc_xprt *xprt = &transport->xprt;
struct socket *sock;
int status = -EIO;
@@ -1918,6 +1915,30 @@ out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
current->flags &= ~PF_FSTRANS;
+ return status;
+}
+
+static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ int ret;
+
+ if (RPC_IS_ASYNC(task)) {
+ /*
+ * We want the AF_LOCAL connect to be resolved in the
+ * filesystem namespace of the process making the rpc
+ * call. Thus we connect synchronously.
+ *
+ * If we want to support asynchronous AF_LOCAL calls,
+ * we'll need to figure out how to pass a namespace to
+ * connect.
+ */
+ rpc_exit(task, -ENOTCONN);
+ return;
+ }
+ ret = xs_local_setup_socket(transport);
+ if (ret && !RPC_IS_SOFTCONN(task))
+ msleep_interruptible(15000);
}
#ifdef CONFIG_SUNRPC_SWAP
@@ -2455,7 +2476,7 @@ static struct rpc_xprt_ops xs_local_ops = {
.alloc_slot = xprt_alloc_slot,
.rpcbind = xs_local_rpcbind,
.set_port = xs_local_set_port,
- .connect = xs_connect,
+ .connect = xs_local_connect,
.buf_alloc = rpc_malloc,
.buf_free = rpc_free,
.send_request = xs_local_send_request,
@@ -2628,8 +2649,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
goto out_err;
}
xprt_set_bound(xprt);
- INIT_DELAYED_WORK(&transport->connect_worker,
- xs_local_setup_socket);
xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
break;
default:
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 46754779fd3d..24b167914311 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -473,11 +473,10 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
static struct name_seq *nametbl_find_seq(u32 type)
{
struct hlist_head *seq_head;
- struct hlist_node *seq_node;
struct name_seq *ns;
seq_head = &table.types[hash(type)];
- hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
+ hlist_for_each_entry(ns, seq_head, ns_list) {
if (ns->type == type)
return ns;
}
@@ -853,7 +852,6 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
u32 type, u32 lowbound, u32 upbound)
{
struct hlist_head *seq_head;
- struct hlist_node *seq_node;
struct name_seq *seq;
int all_types;
int ret = 0;
@@ -873,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
upbound = ~0;
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
seq_head = &table.types[i];
- hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+ hlist_for_each_entry(seq, seq_head, ns_list) {
ret += nameseq_list(seq, buf + ret, len - ret,
depth, seq->type,
lowbound, upbound, i);
@@ -889,7 +887,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
ret += nametbl_header(buf + ret, len - ret, depth);
i = hash(type);
seq_head = &table.types[i];
- hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+ hlist_for_each_entry(seq, seq_head, ns_list) {
if (seq->type == type) {
ret += nameseq_list(seq, buf + ret, len - ret,
depth, type,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 48f39dd3eae8..6e6c434872e8 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -69,12 +69,11 @@ static unsigned int tipc_hashfn(u32 addr)
struct tipc_node *tipc_node_find(u32 addr)
{
struct tipc_node *node;
- struct hlist_node *pos;
if (unlikely(!in_own_cluster_exact(addr)))
return NULL;
- hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
+ hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) {
if (node->addr == addr)
return node;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 87d284289012..51be64f163ec 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -263,9 +263,8 @@ static struct sock *__unix_find_socket_byname(struct net *net,
int len, int type, unsigned int hash)
{
struct sock *s;
- struct hlist_node *node;
- sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
+ sk_for_each(s, &unix_socket_table[hash ^ type]) {
struct unix_sock *u = unix_sk(s);
if (!net_eq(sock_net(s), net))
@@ -298,10 +297,9 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
static struct sock *unix_find_socket_byinode(struct inode *i)
{
struct sock *s;
- struct hlist_node *node;
spin_lock(&unix_table_lock);
- sk_for_each(s, node,
+ sk_for_each(s,
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 5ac19dc1d5e4..d591091603bf 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -192,10 +192,9 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
slot < ARRAY_SIZE(unix_socket_table);
s_num = 0, slot++) {
struct sock *sk;
- struct hlist_node *node;
num = 0;
- sk_for_each(sk, node, &unix_socket_table[slot]) {
+ sk_for_each(sk, &unix_socket_table[slot]) {
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num)
@@ -226,9 +225,7 @@ static struct sock *unix_lookup_by_ino(int ino)
spin_lock(&unix_table_lock);
for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
- struct hlist_node *node;
-
- sk_for_each(sk, node, &unix_socket_table[i])
+ sk_for_each(sk, &unix_socket_table[i])
if (ino == sock_i_ino(sk)) {
sock_hold(sk);
spin_unlock(&unix_table_lock);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index b6f4b994eb35..d0f6545b0010 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -99,7 +99,7 @@ unsigned int unix_tot_inflight;
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
/*
* Socket ?
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a306bc66000e..37ca9694aabe 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -208,11 +208,10 @@ static void x25_remove_socket(struct sock *sk)
static void x25_kill_by_device(struct net_device *dev)
{
struct sock *s;
- struct hlist_node *node;
write_lock_bh(&x25_list_lock);
- sk_for_each(s, node, &x25_list)
+ sk_for_each(s, &x25_list)
if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
x25_disconnect(s, ENETUNREACH, 0, 0);
@@ -280,12 +279,11 @@ static struct sock *x25_find_listener(struct x25_address *addr,
{
struct sock *s;
struct sock *next_best;
- struct hlist_node *node;
read_lock_bh(&x25_list_lock);
next_best = NULL;
- sk_for_each(s, node, &x25_list)
+ sk_for_each(s, &x25_list)
if ((!strcmp(addr->x25_addr,
x25_sk(s)->source_addr.x25_addr) ||
!strcmp(addr->x25_addr,
@@ -323,9 +321,8 @@ found:
static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
{
struct sock *s;
- struct hlist_node *node;
- sk_for_each(s, node, &x25_list)
+ sk_for_each(s, &x25_list)
if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
sock_hold(s);
goto found;
@@ -1782,11 +1779,10 @@ static struct notifier_block x25_dev_notifier = {
void x25_kill_by_neigh(struct x25_neigh *nb)
{
struct sock *s;
- struct hlist_node *node;
write_lock_bh(&x25_list_lock);
- sk_for_each(s, node, &x25_list)
+ sk_for_each(s, &x25_list)
if (x25_sk(s)->neighbour == nb)
x25_disconnect(s, ENETUNREACH, 0, 0);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5b47180986f8..167c67d46c6a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -379,27 +379,27 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
unsigned int nhashmask)
{
- struct hlist_node *entry, *tmp, *entry0 = NULL;
+ struct hlist_node *tmp, *entry0 = NULL;
struct xfrm_policy *pol;
unsigned int h0 = 0;
redo:
- hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
+ hlist_for_each_entry_safe(pol, tmp, list, bydst) {
unsigned int h;
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask);
if (!entry0) {
- hlist_del(entry);
+ hlist_del(&pol->bydst);
hlist_add_head(&pol->bydst, ndsttable+h);
h0 = h;
} else {
if (h != h0)
continue;
- hlist_del(entry);
+ hlist_del(&pol->bydst);
hlist_add_after(entry0, &pol->bydst);
}
- entry0 = entry;
+ entry0 = &pol->bydst;
}
if (!hlist_empty(list)) {
entry0 = NULL;
@@ -411,10 +411,10 @@ static void xfrm_idx_hash_transfer(struct hlist_head *list,
struct hlist_head *nidxtable,
unsigned int nhashmask)
{
- struct hlist_node *entry, *tmp;
+ struct hlist_node *tmp;
struct xfrm_policy *pol;
- hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
+ hlist_for_each_entry_safe(pol, tmp, list, byidx) {
unsigned int h;
h = __idx_hash(pol->index, nhashmask);
@@ -544,7 +544,6 @@ static u32 xfrm_gen_index(struct net *net, int dir)
static u32 idx_generator;
for (;;) {
- struct hlist_node *entry;
struct hlist_head *list;
struct xfrm_policy *p;
u32 idx;
@@ -556,7 +555,7 @@ static u32 xfrm_gen_index(struct net *net, int dir)
idx = 8;
list = net->xfrm.policy_byidx + idx_hash(net, idx);
found = 0;
- hlist_for_each_entry(p, entry, list, byidx) {
+ hlist_for_each_entry(p, list, byidx) {
if (p->index == idx) {
found = 1;
break;
@@ -628,13 +627,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct xfrm_policy *pol;
struct xfrm_policy *delpol;
struct hlist_head *chain;
- struct hlist_node *entry, *newpos;
+ struct hlist_node *newpos;
write_lock_bh(&xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
delpol = NULL;
newpos = NULL;
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == policy->type &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_policy_mark_match(policy, pol) &&
@@ -691,13 +690,12 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
- struct hlist_node *entry;
*err = 0;
write_lock_bh(&xfrm_policy_lock);
chain = policy_hash_bysel(net, sel, sel->family, dir);
ret = NULL;
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == type &&
(mark & pol->mark.m) == pol->mark.v &&
!selector_cmp(sel, &pol->selector) &&
@@ -729,7 +727,6 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
- struct hlist_node *entry;
*err = -ENOENT;
if (xfrm_policy_id2dir(id) != dir)
@@ -739,7 +736,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
write_lock_bh(&xfrm_policy_lock);
chain = net->xfrm.policy_byidx + idx_hash(net, id);
ret = NULL;
- hlist_for_each_entry(pol, entry, chain, byidx) {
+ hlist_for_each_entry(pol, chain, byidx) {
if (pol->type == type && pol->index == id &&
(mark & pol->mark.m) == pol->mark.v) {
xfrm_pol_hold(pol);
@@ -772,10 +769,9 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy *pol;
- struct hlist_node *entry;
int i;
- hlist_for_each_entry(pol, entry,
+ hlist_for_each_entry(pol,
&net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
@@ -789,7 +785,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
}
}
for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
- hlist_for_each_entry(pol, entry,
+ hlist_for_each_entry(pol,
net->xfrm.policy_bydst[dir].table + i,
bydst) {
if (pol->type != type)
@@ -828,11 +824,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy *pol;
- struct hlist_node *entry;
int i;
again1:
- hlist_for_each_entry(pol, entry,
+ hlist_for_each_entry(pol,
&net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
@@ -852,7 +847,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
again2:
- hlist_for_each_entry(pol, entry,
+ hlist_for_each_entry(pol,
net->xfrm.policy_bydst[dir].table + i,
bydst) {
if (pol->type != type)
@@ -980,7 +975,6 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
int err;
struct xfrm_policy *pol, *ret;
const xfrm_address_t *daddr, *saddr;
- struct hlist_node *entry;
struct hlist_head *chain;
u32 priority = ~0U;
@@ -992,7 +986,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
read_lock_bh(&xfrm_policy_lock);
chain = policy_hash_direct(net, daddr, saddr, family, dir);
ret = NULL;
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir);
if (err) {
if (err == -ESRCH)
@@ -1008,7 +1002,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
}
chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir);
if (err) {
if (err == -ESRCH)
@@ -3041,13 +3035,12 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
u8 dir, u8 type)
{
struct xfrm_policy *pol, *ret = NULL;
- struct hlist_node *entry;
struct hlist_head *chain;
u32 priority = ~0U;
read_lock_bh(&xfrm_policy_lock);
chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
@@ -3056,7 +3049,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
}
}
chain = &init_net.xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, entry, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type &&
pol->priority < priority) {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index ae01bdbcb294..2c341bdaf47c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -72,10 +72,10 @@ static void xfrm_hash_transfer(struct hlist_head *list,
struct hlist_head *nspitable,
unsigned int nhashmask)
{
- struct hlist_node *entry, *tmp;
+ struct hlist_node *tmp;
struct xfrm_state *x;
- hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
+ hlist_for_each_entry_safe(x, tmp, list, bydst) {
unsigned int h;
h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
@@ -368,14 +368,14 @@ static void xfrm_state_gc_task(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.state_gc_work);
struct xfrm_state *x;
- struct hlist_node *entry, *tmp;
+ struct hlist_node *tmp;
struct hlist_head gc_list;
spin_lock_bh(&xfrm_state_gc_lock);
hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
- hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
+ hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
xfrm_state_gc_destroy(x);
wake_up(&net->xfrm.km_waitq);
@@ -577,10 +577,9 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
int i, err = 0;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
- struct hlist_node *entry;
struct xfrm_state *x;
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
if (xfrm_id_proto_match(x->id.proto, proto) &&
(err = security_xfrm_state_delete(x)) != 0) {
xfrm_audit_state_delete(x, 0,
@@ -613,10 +612,9 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
- struct hlist_node *entry;
struct xfrm_state *x;
restart:
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
if (!xfrm_state_kern(x) &&
xfrm_id_proto_match(x->id.proto, proto)) {
xfrm_state_hold(x);
@@ -685,9 +683,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
{
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
- struct hlist_node *entry;
- hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
+ hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
@@ -710,9 +707,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
{
unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
struct xfrm_state *x;
- struct hlist_node *entry;
- hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
+ hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
if (x->props.family != family ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -798,7 +794,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
static xfrm_address_t saddr_wildcard = { };
struct net *net = xp_net(pol);
unsigned int h, h_wildcard;
- struct hlist_node *entry;
struct xfrm_state *x, *x0, *to_put;
int acquire_in_progress = 0;
int error = 0;
@@ -810,7 +805,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
spin_lock_bh(&xfrm_state_lock);
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -826,7 +821,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
goto found;
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -906,11 +901,10 @@ xfrm_stateonly_find(struct net *net, u32 mark,
{
unsigned int h;
struct xfrm_state *rx = NULL, *x = NULL;
- struct hlist_node *entry;
spin_lock(&xfrm_state_lock);
h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -972,12 +966,11 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
unsigned short family = xnew->props.family;
u32 reqid = xnew->props.reqid;
struct xfrm_state *x;
- struct hlist_node *entry;
unsigned int h;
u32 mark = xnew->mark.v & xnew->mark.m;
h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -1004,11 +997,10 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
const xfrm_address_t *saddr, int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
- struct hlist_node *entry;
struct xfrm_state *x;
u32 mark = m->v & m->m;
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.reqid != reqid ||
x->props.mode != mode ||
x->props.family != family ||
@@ -1215,12 +1207,11 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
{
unsigned int h;
struct xfrm_state *x;
- struct hlist_node *entry;
if (m->reqid) {
h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr,
m->reqid, m->old_family);
- hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
@@ -1237,7 +1228,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
} else {
h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr,
m->old_family);
- hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
+ hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
@@ -1466,10 +1457,9 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
int i;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
- struct hlist_node *entry;
struct xfrm_state *x;
- hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
if (x->km.seq == seq &&
(mark & x->mark.m) == x->mark.v &&
x->km.state == XFRM_STATE_ACQ) {
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index 06ba4a70bd4d..25f216a841d5 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -7,15 +7,15 @@
#
# ==========================================================================
-# called may set destination dir (when installing to asm/)
-_dst := $(or $(destination-y),$(dst),$(obj))
-
# generated header directory
gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
kbuild-file := $(srctree)/$(obj)/Kbuild
include $(kbuild-file)
+# called may set destination dir (when installing to asm/)
+_dst := $(or $(destination-y),$(dst),$(obj))
+
old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild
ifneq ($(wildcard $(old-kbuild-file)),)
include $(old-kbuild-file)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index a1cb0222ebe6..cf82c832458f 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -66,10 +66,6 @@ modules := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
# Stop after building .o files if NOFINAL is set. Makes compile tests quicker
_modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
-ifneq ($(KBUILD_BUILDHOST),$(ARCH))
- cross_build := 1
-endif
-
# Step 2), invoke modpost
# Includes step 3,4
modpost = scripts/mod/modpost \
@@ -80,8 +76,7 @@ modpost = scripts/mod/modpost \
$(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
- $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \
- $(if $(cross_build),-c)
+ $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
cmd_modpost = $(modpost) -s
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 747bcd768da0..b28cc384a5bc 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2930,7 +2930,7 @@ sub process {
my $var = $1;
if ($var !~ /$Constant/ &&
$var =~ /[A-Z]\w*[a-z]|[a-z]\w*[A-Z]/ &&
- $var !~ /^Page[A-Z]/ &&
+ $var !~ /"^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
!defined $camelcase{$var}) {
$camelcase{$var} = 1;
WARN("CAMELCASE",
diff --git a/scripts/coccicheck b/scripts/coccicheck
index 1a49d1c7ecfe..85d31899ad98 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -2,6 +2,15 @@
SPATCH="`which ${SPATCH:=spatch}`"
+# The verbosity may be set by the environmental parameter V=
+# as for example with 'make V=1 coccicheck'
+
+if [ -n "$V" -a "$V" != "0" ]; then
+ VERBOSE=1
+else
+ VERBOSE=0
+fi
+
if [ "$C" = "1" -o "$C" = "2" ]; then
ONLINE=1
@@ -46,6 +55,14 @@ if [ "$ONLINE" = "0" ] ; then
echo ''
fi
+run_cmd() {
+ if [ $VERBOSE -ne 0 ] ; then
+ echo "Running: $@"
+ fi
+ eval $@
+}
+
+
coccinelle () {
COCCI="$1"
@@ -55,7 +72,7 @@ coccinelle () {
#
# $SPATCH -D $MODE $FLAGS -parse_cocci $COCCI $OPT > /dev/null
- if [ "$ONLINE" = "0" ] ; then
+ if [ $VERBOSE -ne 0 ] ; then
FILE=`echo $COCCI | sed "s|$srctree/||"`
@@ -91,15 +108,21 @@ coccinelle () {
fi
if [ "$MODE" = "chain" ] ; then
- $SPATCH -D patch $FLAGS -sp_file $COCCI $OPT $OPTIONS || \
- $SPATCH -D report $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || \
- $SPATCH -D context $FLAGS -sp_file $COCCI $OPT $OPTIONS || \
- $SPATCH -D org $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || exit 1
+ run_cmd $SPATCH -D patch \
+ $FLAGS -sp_file $COCCI $OPT $OPTIONS || \
+ run_cmd $SPATCH -D report \
+ $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || \
+ run_cmd $SPATCH -D context \
+ $FLAGS -sp_file $COCCI $OPT $OPTIONS || \
+ run_cmd $SPATCH -D org \
+ $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || exit 1
elif [ "$MODE" = "rep+ctxt" ] ; then
- $SPATCH -D report $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff && \
- $SPATCH -D context $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
+ run_cmd $SPATCH -D report \
+ $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff && \
+ run_cmd $SPATCH -D context \
+ $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
else
- $SPATCH -D $MODE $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
+ run_cmd $SPATCH -D $MODE $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
fi
}
diff --git a/scripts/coccinelle/misc/memcpy-assign.cocci b/scripts/coccinelle/misc/memcpy-assign.cocci
new file mode 100644
index 000000000000..afd058be497f
--- /dev/null
+++ b/scripts/coccinelle/misc/memcpy-assign.cocci
@@ -0,0 +1,103 @@
+//
+// Replace memcpy with struct assignment.
+//
+// Confidence: High
+// Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: --no-includes --include-headers
+
+virtual patch
+virtual report
+virtual context
+virtual org
+
+@r1 depends on !patch@
+identifier struct_name;
+struct struct_name to;
+struct struct_name from;
+struct struct_name *top;
+struct struct_name *fromp;
+position p;
+@@
+memcpy@p(\(&(to)\|top\), \(&(from)\|fromp\), \(sizeof(to)\|sizeof(from)\|sizeof(struct struct_name)\|sizeof(*top)\|sizeof(*fromp)\))
+
+@script:python depends on report@
+p << r1.p;
+@@
+coccilib.report.print_report(p[0],"Replace memcpy with struct assignment")
+
+@depends on context@
+position r1.p;
+@@
+*memcpy@p(...);
+
+@script:python depends on org@
+p << r1.p;
+@@
+cocci.print_main("Replace memcpy with struct assignment",p)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name to;
+struct struct_name from;
+@@
+(
+-memcpy(&(to), &(from), sizeof(to));
++to = from;
+|
+-memcpy(&(to), &(from), sizeof(from));
++to = from;
+|
+-memcpy(&(to), &(from), sizeof(struct struct_name));
++to = from;
+)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name to;
+struct struct_name *from;
+@@
+(
+-memcpy(&(to), from, sizeof(to));
++to = *from;
+|
+-memcpy(&(to), from, sizeof(*from));
++to = *from;
+|
+-memcpy(&(to), from, sizeof(struct struct_name));
++to = *from;
+)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name *to;
+struct struct_name from;
+@@
+(
+-memcpy(to, &(from), sizeof(*to));
++ *to = from;
+|
+-memcpy(to, &(from), sizeof(from));
++ *to = from;
+|
+-memcpy(to, &(from), sizeof(struct struct_name));
++ *to = from;
+)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name *to;
+struct struct_name *from;
+@@
+(
+-memcpy(to, from, sizeof(*to));
++ *to = *from;
+|
+-memcpy(to, from, sizeof(*from));
++ *to = *from;
+|
+-memcpy(to, from, sizeof(struct struct_name));
++ *to = *from;
+)
+
diff --git a/scripts/coccinelle/misc/orplus.cocci b/scripts/coccinelle/misc/orplus.cocci
new file mode 100644
index 000000000000..4a28cef1484e
--- /dev/null
+++ b/scripts/coccinelle/misc/orplus.cocci
@@ -0,0 +1,55 @@
+/// Check for constants that are added but are used elsewhere as bitmasks
+/// The results should be checked manually to ensure that the nonzero
+/// bits in the two constants are actually disjoint.
+///
+// Confidence: Moderate
+// Copyright: (C) 2013 Julia Lawall, INRIA/LIP6. GPLv2.
+// Copyright: (C) 2013 Gilles Muller, INRIA/LIP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r@
+constant c;
+identifier i;
+expression e;
+@@
+
+(
+e | c@i
+|
+e & c@i
+|
+e |= c@i
+|
+e &= c@i
+)
+
+@s@
+constant r.c,c1;
+identifier i1;
+position p;
+@@
+
+(
+ c1 + c - 1
+|
+*c1@i1 +@p c
+)
+
+@script:python depends on org@
+p << s.p;
+@@
+
+cocci.print_main("sum of probable bitmasks, consider |",p)
+
+@script:python depends on report@
+p << s.p;
+@@
+
+msg = "WARNING: sum of probable bitmasks, consider |"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/semicolon.cocci b/scripts/coccinelle/misc/semicolon.cocci
new file mode 100644
index 000000000000..a47eba2edc9e
--- /dev/null
+++ b/scripts/coccinelle/misc/semicolon.cocci
@@ -0,0 +1,83 @@
+///
+/// Removes unneeded semicolon.
+///
+// Confidence: Moderate
+// Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments: Some false positives on empty default cases in switch statements.
+// Options: --no-includes --include-headers
+
+virtual patch
+virtual report
+virtual context
+virtual org
+
+@r_default@
+position p;
+@@
+switch (...)
+{
+default: ...;@p
+}
+
+@r_case@
+position p;
+@@
+(
+switch (...)
+{
+case ...:;@p
+}
+|
+switch (...)
+{
+case ...:...
+case ...:;@p
+}
+|
+switch (...)
+{
+case ...:...
+case ...:
+case ...:;@p
+}
+)
+
+@r1@
+statement S;
+position p1;
+position p != {r_default.p, r_case.p};
+identifier label;
+@@
+(
+label:;
+|
+S@p1;@p
+)
+
+@script:python@
+p << r1.p;
+p1 << r1.p1;
+@@
+if p[0].line != p1[0].line_end:
+ cocci.include_match(False)
+
+@depends on patch@
+position r1.p;
+@@
+-;@p
+
+@script:python depends on report@
+p << r1.p;
+@@
+coccilib.report.print_report(p[0],"Unneeded semicolon")
+
+@depends on context@
+position r1.p;
+@@
+*;@p
+
+@script:python depends on org@
+p << r1.p;
+@@
+cocci.print_main("Unneeded semicolon",p)
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 2ae481703141..122599b1c13b 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -2,16 +2,36 @@
#
# A depmod wrapper used by the toplevel Makefile
-if test $# -ne 2; then
- echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
+if test $# -ne 3; then
+ echo "Usage: $0 /sbin/depmod <kernelrelease> <symbolprefix>" >&2
exit 1
fi
DEPMOD=$1
KERNELRELEASE=$2
+SYMBOL_PREFIX=$3
if ! test -r System.map -a -x "$DEPMOD"; then
exit 0
fi
+
+# older versions of depmod don't support -P <symbol-prefix>
+# support was added in module-init-tools 3.13
+if test -n "$SYMBOL_PREFIX"; then
+ release=$("$DEPMOD" --version)
+ package=$(echo "$release" | cut -d' ' -f 1)
+ if test "$package" = "module-init-tools"; then
+ version=$(echo "$release" | cut -d' ' -f 2)
+ later=$(printf '%s\n' "$version" "3.13" | sort -V | tail -n 1)
+ if test "$later" != "$version"; then
+ # module-init-tools < 3.13, drop the symbol prefix
+ SYMBOL_PREFIX=""
+ fi
+ fi
+ if test -n "$SYMBOL_PREFIX"; then
+ SYMBOL_PREFIX="-P $SYMBOL_PREFIX"
+ fi
+fi
+
# older versions of depmod require the version string to start with three
# numbers, so we cheat with a symlink here
depmod_hack_needed=true
@@ -34,7 +54,7 @@ set -- -ae -F System.map
if test -n "$INSTALL_MOD_PATH"; then
set -- "$@" -b "$INSTALL_MOD_PATH"
fi
-"$DEPMOD" "$@" "$KERNELRELEASE"
+"$DEPMOD" "$@" "$KERNELRELEASE" $SYMBOL_PREFIX
ret=$?
if $depmod_hack_needed; then
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 18d4ab55606b..ce4cc837b748 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -611,6 +611,10 @@ sub get_maintainers {
$hash{$tvi} = $value_pd;
}
}
+ } elsif ($type eq 'K') {
+ if ($file =~ m/$value/x) {
+ $hash{$tvi} = 0;
+ }
}
}
}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 3091794e9354..231b4759c714 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -11,6 +11,9 @@ else
Kconfig := Kconfig
endif
+# We need this, in case the user has it in its environment
+unexport CONFIG_
+
xconfig: $(obj)/qconf
$< $(Kconfig)
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 4da3b4adfad2..e39fcd8143ea 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -36,6 +36,7 @@ enum input_mode {
} input_mode = oldaskconfig;
static int indent = 1;
+static int tty_stdio;
static int valid_stdin = 1;
static int sync_kconfig;
static int conf_cnt;
@@ -108,6 +109,8 @@ static int conf_askvalue(struct symbol *sym, const char *def)
case oldaskconfig:
fflush(stdout);
xfgets(line, 128, stdin);
+ if (!tty_stdio)
+ printf("\n");
return 1;
default:
break;
@@ -495,6 +498,8 @@ int main(int ac, char **av)
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
+ tty_stdio = isatty(0) && isatty(1) && isatty(2);
+
while ((opt = getopt_long(ac, av, "", long_opts, NULL)) != -1) {
input_mode = (enum input_mode)opt;
switch (opt) {
@@ -621,7 +626,7 @@ int main(int ac, char **av)
return 1;
}
}
- valid_stdin = isatty(0) && isatty(1) && isatty(2);
+ valid_stdin = tty_stdio;
}
switch (input_mode) {
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 290ce41f8ba4..d6626521f9b9 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -13,7 +13,7 @@
struct expr *expr_alloc_symbol(struct symbol *sym)
{
- struct expr *e = calloc(1, sizeof(*e));
+ struct expr *e = xcalloc(1, sizeof(*e));
e->type = E_SYMBOL;
e->left.sym = sym;
return e;
@@ -21,7 +21,7 @@ struct expr *expr_alloc_symbol(struct symbol *sym)
struct expr *expr_alloc_one(enum expr_type type, struct expr *ce)
{
- struct expr *e = calloc(1, sizeof(*e));
+ struct expr *e = xcalloc(1, sizeof(*e));
e->type = type;
e->left.expr = ce;
return e;
@@ -29,7 +29,7 @@ struct expr *expr_alloc_one(enum expr_type type, struct expr *ce)
struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2)
{
- struct expr *e = calloc(1, sizeof(*e));
+ struct expr *e = xcalloc(1, sizeof(*e));
e->type = type;
e->left.expr = e1;
e->right.expr = e2;
@@ -38,7 +38,7 @@ struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e
struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2)
{
- struct expr *e = calloc(1, sizeof(*e));
+ struct expr *e = xcalloc(1, sizeof(*e));
e->type = type;
e->left.sym = s1;
e->right.sym = s2;
@@ -66,7 +66,7 @@ struct expr *expr_copy(const struct expr *org)
if (!org)
return NULL;
- e = malloc(sizeof(*org));
+ e = xmalloc(sizeof(*org));
memcpy(e, org, sizeof(*org));
switch (org->type) {
case E_SYMBOL:
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index adc230638c5b..f2bee70e26f4 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -10,6 +10,7 @@
# include <config.h>
#endif
+#include <stdlib.h>
#include "lkc.h"
#include "images.c"
@@ -22,7 +23,6 @@
#include <string.h>
#include <unistd.h>
#include <time.h>
-#include <stdlib.h>
//#define DEBUG
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index c18f2bd9c095..f8aee5fc6d5e 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -39,6 +39,12 @@ extern "C" {
#ifndef CONFIG_
#define CONFIG_ "CONFIG_"
#endif
+static inline const char *CONFIG_prefix(void)
+{
+ return getenv( "CONFIG_" ) ?: CONFIG_;
+}
+#undef CONFIG_
+#define CONFIG_ CONFIG_prefix()
#define TF_COMMAND 0x0001
#define TF_PARAM 0x0002
@@ -116,6 +122,8 @@ void menu_set_type(int type);
/* util.c */
struct file *file_lookup(const char *name);
int file_write_dep(const char *name);
+void *xmalloc(size_t size);
+void *xcalloc(size_t nmemb, size_t size);
struct gstr {
size_t len;
diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
index c8e8a7154753..80788137c670 100644
--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
+++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
@@ -21,6 +21,7 @@ ccflags()
{
if [ -f /usr/include/ncursesw/curses.h ]; then
echo '-I/usr/include/ncursesw -DCURSES_LOC="<ncursesw/curses.h>"'
+ echo ' -DNCURSES_WIDECHAR=1'
elif [ -f /usr/include/ncurses/ncurses.h ]; then
echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses.h>"'
elif [ -f /usr/include/ncurses/curses.h ]; then
diff --git a/scripts/kconfig/lxdialog/dialog.h b/scripts/kconfig/lxdialog/dialog.h
index ee17a5264d5b..307022a8beef 100644
--- a/scripts/kconfig/lxdialog/dialog.h
+++ b/scripts/kconfig/lxdialog/dialog.h
@@ -221,7 +221,6 @@ int dialog_menu(const char *title, const char *prompt,
const void *selected, int *s_scroll);
int dialog_checklist(const char *title, const char *prompt, int height,
int width, int list_height);
-extern char dialog_input_result[];
int dialog_inputbox(const char *title, const char *prompt, int height,
int width, const char *init);
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index dd8e587c50e2..21404a04d7c3 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -45,7 +45,8 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
const char *init)
{
int i, x, y, box_y, box_x, box_width;
- int input_x = 0, scroll = 0, key = 0, button = -1;
+ int input_x = 0, key = 0, button = -1;
+ int show_x, len, pos;
char *instr = dialog_input_result;
WINDOW *dialog;
@@ -97,14 +98,17 @@ do_resize:
wmove(dialog, box_y, box_x);
wattrset(dialog, dlg.inputbox.atr);
- input_x = strlen(instr);
+ len = strlen(instr);
+ pos = len;
- if (input_x >= box_width) {
- scroll = input_x - box_width + 1;
+ if (len >= box_width) {
+ show_x = len - box_width + 1;
input_x = box_width - 1;
for (i = 0; i < box_width - 1; i++)
- waddch(dialog, instr[scroll + i]);
+ waddch(dialog, instr[show_x + i]);
} else {
+ show_x = 0;
+ input_x = len;
waddstr(dialog, instr);
}
@@ -121,45 +125,104 @@ do_resize:
case KEY_UP:
case KEY_DOWN:
break;
- case KEY_LEFT:
- continue;
- case KEY_RIGHT:
- continue;
case KEY_BACKSPACE:
case 127:
- if (input_x || scroll) {
+ if (pos) {
wattrset(dialog, dlg.inputbox.atr);
- if (!input_x) {
- scroll = scroll < box_width - 1 ? 0 : scroll - (box_width - 1);
- wmove(dialog, box_y, box_x);
- for (i = 0; i < box_width; i++)
- waddch(dialog,
- instr[scroll + input_x + i] ?
- instr[scroll + input_x + i] : ' ');
- input_x = strlen(instr) - scroll;
+ if (input_x == 0) {
+ show_x--;
} else
input_x--;
- instr[scroll + input_x] = '\0';
- mvwaddch(dialog, box_y, input_x + box_x, ' ');
+
+ if (pos < len) {
+ for (i = pos - 1; i < len; i++) {
+ instr[i] = instr[i+1];
+ }
+ }
+
+ pos--;
+ len--;
+ instr[len] = '\0';
+ wmove(dialog, box_y, box_x);
+ for (i = 0; i < box_width; i++) {
+ if (!instr[show_x + i]) {
+ waddch(dialog, ' ');
+ break;
+ }
+ waddch(dialog, instr[show_x + i]);
+ }
wmove(dialog, box_y, input_x + box_x);
wrefresh(dialog);
}
continue;
+ case KEY_LEFT:
+ if (pos > 0) {
+ if (input_x > 0) {
+ wmove(dialog, box_y, --input_x + box_x);
+ } else if (input_x == 0) {
+ show_x--;
+ wmove(dialog, box_y, box_x);
+ for (i = 0; i < box_width; i++) {
+ if (!instr[show_x + i]) {
+ waddch(dialog, ' ');
+ break;
+ }
+ waddch(dialog, instr[show_x + i]);
+ }
+ wmove(dialog, box_y, box_x);
+ }
+ pos--;
+ }
+ continue;
+ case KEY_RIGHT:
+ if (pos < len) {
+ if (input_x < box_width - 1) {
+ wmove(dialog, box_y, ++input_x + box_x);
+ } else if (input_x == box_width - 1) {
+ show_x++;
+ wmove(dialog, box_y, box_x);
+ for (i = 0; i < box_width; i++) {
+ if (!instr[show_x + i]) {
+ waddch(dialog, ' ');
+ break;
+ }
+ waddch(dialog, instr[show_x + i]);
+ }
+ wmove(dialog, box_y, input_x + box_x);
+ }
+ pos++;
+ }
+ continue;
default:
if (key < 0x100 && isprint(key)) {
- if (scroll + input_x < MAX_LEN) {
+ if (len < MAX_LEN) {
wattrset(dialog, dlg.inputbox.atr);
- instr[scroll + input_x] = key;
- instr[scroll + input_x + 1] = '\0';
+ if (pos < len) {
+ for (i = len; i > pos; i--)
+ instr[i] = instr[i-1];
+ instr[pos] = key;
+ } else {
+ instr[len] = key;
+ }
+ pos++;
+ len++;
+ instr[len] = '\0';
+
if (input_x == box_width - 1) {
- scroll++;
- wmove(dialog, box_y, box_x);
- for (i = 0; i < box_width - 1; i++)
- waddch(dialog, instr [scroll + i]);
+ show_x++;
} else {
- wmove(dialog, box_y, input_x++ + box_x);
- waddch(dialog, key);
+ input_x++;
+ }
+
+ wmove(dialog, box_y, box_x);
+ for (i = 0; i < box_width; i++) {
+ if (!instr[show_x + i]) {
+ waddch(dialog, ' ');
+ break;
+ }
+ waddch(dialog, instr[show_x + i]);
}
+ wmove(dialog, box_y, input_x + box_x);
wrefresh(dialog);
} else
flash(); /* Alarm user about overflow */
diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c
index 1d604738fa13..48d382e7e374 100644
--- a/scripts/kconfig/lxdialog/menubox.c
+++ b/scripts/kconfig/lxdialog/menubox.c
@@ -26,7 +26,7 @@
*
* *) A bugfix for the Page-Down problem
*
- * *) Formerly when I used Page Down and Page Up, the cursor would be set
+ * *) Formerly when I used Page Down and Page Up, the cursor would be set
* to the first position in the menu box. Now lxdialog is a bit
* smarter and works more like other menu systems (just have a look at
* it).
@@ -154,12 +154,14 @@ static void print_arrows(WINDOW * win, int item_no, int scroll, int y, int x,
*/
static void print_buttons(WINDOW * win, int height, int width, int selected)
{
- int x = width / 2 - 16;
+ int x = width / 2 - 28;
int y = height - 2;
print_button(win, gettext("Select"), y, x, selected == 0);
print_button(win, gettext(" Exit "), y, x + 12, selected == 1);
print_button(win, gettext(" Help "), y, x + 24, selected == 2);
+ print_button(win, gettext(" Save "), y, x + 36, selected == 3);
+ print_button(win, gettext(" Load "), y, x + 48, selected == 4);
wmove(win, y, x + 1 + 12 * selected);
wrefresh(win);
@@ -372,7 +374,7 @@ do_resize:
case TAB:
case KEY_RIGHT:
button = ((key == KEY_LEFT ? --button : ++button) < 0)
- ? 2 : (button > 2 ? 0 : button);
+ ? 4 : (button > 4 ? 0 : button);
print_buttons(dialog, height, width, button);
wrefresh(menu);
@@ -399,17 +401,17 @@ do_resize:
return 2;
case 's':
case 'y':
- return 3;
+ return 5;
case 'n':
- return 4;
+ return 6;
case 'm':
- return 5;
+ return 7;
case ' ':
- return 6;
+ return 8;
case '/':
- return 7;
+ return 9;
case 'z':
- return 8;
+ return 10;
case '\n':
return button;
}
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 53975cf87608..566288a76370 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -280,6 +280,7 @@ static struct menu *current_menu;
static int child_count;
static int single_menu_mode;
static int show_all_options;
+static int save_and_exit;
static void conf(struct menu *menu, struct menu *active_menu);
static void conf_choice(struct menu *menu);
@@ -348,15 +349,19 @@ static void search_conf(void)
{
struct symbol **sym_arr;
struct gstr res;
+ struct gstr title;
char *dialog_input;
int dres, vscroll = 0, hscroll = 0;
bool again;
+ title = str_new();
+ str_printf( &title, _("Enter %s (sub)string to search for "
+ "(with or without \"%s\")"), CONFIG_, CONFIG_);
+
again:
dialog_clear();
dres = dialog_inputbox(_("Search Configuration Parameter"),
- _("Enter " CONFIG_ " (sub)string to search for "
- "(with or without \"" CONFIG_ "\")"),
+ str_get(&title),
10, 75, "");
switch (dres) {
case 0:
@@ -365,6 +370,7 @@ again:
show_helptext(_("Search Configuration"), search_help);
goto again;
default:
+ str_free(&title);
return;
}
@@ -398,6 +404,7 @@ again:
str_free(&res);
} while (again);
free(sym_arr);
+ str_free(&title);
}
static void build_conf(struct menu *menu)
@@ -592,14 +599,6 @@ static void conf(struct menu *menu, struct menu *active_menu)
build_conf(menu);
if (!child_count)
break;
- if (menu == &rootmenu) {
- item_make("--- ");
- item_set_tag(':');
- item_make(_(" Load an Alternate Configuration File"));
- item_set_tag('L');
- item_make(_(" Save an Alternate Configuration File"));
- item_set_tag('S');
- }
dialog_clear();
res = dialog_menu(prompt ? _(prompt) : _("Main Menu"),
_(menu_instructions),
@@ -636,12 +635,6 @@ static void conf(struct menu *menu, struct menu *active_menu)
case 's':
conf_string(submenu);
break;
- case 'L':
- conf_load();
- break;
- case 'S':
- conf_save();
- break;
}
break;
case 2:
@@ -651,6 +644,12 @@ static void conf(struct menu *menu, struct menu *active_menu)
show_helptext(_("README"), _(mconf_readme));
break;
case 3:
+ conf_save();
+ break;
+ case 4:
+ conf_load();
+ break;
+ case 5:
if (item_is_tag('t')) {
if (sym_set_tristate_value(sym, yes))
break;
@@ -658,24 +657,24 @@ static void conf(struct menu *menu, struct menu *active_menu)
show_textbox(NULL, setmod_text, 6, 74);
}
break;
- case 4:
+ case 6:
if (item_is_tag('t'))
sym_set_tristate_value(sym, no);
break;
- case 5:
+ case 7:
if (item_is_tag('t'))
sym_set_tristate_value(sym, mod);
break;
- case 6:
+ case 8:
if (item_is_tag('t'))
sym_toggle_tristate_value(sym);
else if (item_is_tag('m'))
conf(submenu, NULL);
break;
- case 7:
+ case 9:
search_conf();
break;
- case 8:
+ case 10:
show_all_options = !show_all_options;
break;
}
@@ -702,6 +701,17 @@ static void show_helptext(const char *title, const char *text)
show_textbox(title, text, 0, 0);
}
+static void conf_message_callback(const char *fmt, va_list ap)
+{
+ char buf[PATH_MAX+1];
+
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ if (save_and_exit)
+ printf("%s", buf);
+ else
+ show_textbox(NULL, buf, 6, 60);
+}
+
static void show_help(struct menu *menu)
{
struct gstr help = str_new();
@@ -870,6 +880,7 @@ static int handle_exit(void)
{
int res;
+ save_and_exit = 1;
dialog_clear();
if (conf_get_changed())
res = dialog_yesno(NULL,
@@ -941,6 +952,7 @@ int main(int ac, char **av)
}
set_config_filename(conf_get_configname());
+ conf_set_message_callback(conf_message_callback);
do {
conf(&rootmenu, NULL);
res = handle_exit();
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index e98a05c8e508..f3bffa309333 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -48,7 +48,7 @@ void menu_add_entry(struct symbol *sym)
{
struct menu *menu;
- menu = malloc(sizeof(*menu));
+ menu = xmalloc(sizeof(*menu));
memset(menu, 0, sizeof(*menu));
menu->sym = sym;
menu->parent = current_menu;
@@ -531,7 +531,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
location = menu;
}
if (head && location) {
- jump = malloc(sizeof(struct jump_key));
+ jump = xmalloc(sizeof(struct jump_key));
if (menu_is_visible(prop->menu)) {
/*
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index 974d5cb7e30a..05274fccb88e 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -32,11 +32,13 @@ usage() {
echo " -m only merge the fragments, do not execute the make command"
echo " -n use allnoconfig instead of alldefconfig"
echo " -r list redundant entries when merging fragments"
+ echo " -O dir to put generated output files"
}
MAKE=true
ALLTARGET=alldefconfig
WARNREDUN=false
+OUTPUT=.
while true; do
case $1 in
@@ -59,6 +61,16 @@ while true; do
shift
continue
;;
+ "-O")
+ if [ -d $2 ];then
+ OUTPUT=$(echo $2 | sed 's/\/*$//')
+ else
+ echo "output directory $2 does not exist" 1>&2
+ exit 1
+ fi
+ shift 2
+ continue
+ ;;
*)
break
;;
@@ -100,9 +112,9 @@ for MERGE_FILE in $MERGE_LIST ; do
done
if [ "$MAKE" = "false" ]; then
- cp $TMP_FILE .config
+ cp $TMP_FILE $OUTPUT/.config
echo "#"
- echo "# merged configuration written to .config (needs make)"
+ echo "# merged configuration written to $OUTPUT/.config (needs make)"
echo "#"
clean_up
exit
@@ -111,14 +123,14 @@ fi
# Use the merged file as the starting point for:
# alldefconfig: Fills in any missing symbols with Kconfig default
# allnoconfig: Fills in any missing symbols with # CONFIG_* is not set
-make KCONFIG_ALLCONFIG=$TMP_FILE $ALLTARGET
+make KCONFIG_ALLCONFIG=$TMP_FILE O=$OUTPUT $ALLTARGET
# Check all specified config values took (might have missed-dependency issues)
for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
- ACTUAL_VAL=$(grep -w -e "$CFG" .config)
+ ACTUAL_VAL=$(grep -w -e "$CFG" $OUTPUT/.config)
if [ "x$REQUESTED_VAL" != "x$ACTUAL_VAL" ] ; then
echo "Value requested for $CFG not in final .config"
echo "Requested value: $REQUESTED_VAL"
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 87d4b15da951..dbf31edd22b2 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -7,215 +7,208 @@
*/
#define _GNU_SOURCE
#include <string.h>
+#include <stdlib.h>
#include "lkc.h"
#include "nconf.h"
#include <ctype.h>
-static const char nconf_readme[] = N_(
-"Overview\n"
-"--------\n"
-"This interface let you select features and parameters for the build.\n"
-"Features can either be built-in, modularized, or ignored. Parameters\n"
-"must be entered in as decimal or hexadecimal numbers or text.\n"
+static const char nconf_global_help[] = N_(
+"Help windows\n"
+"------------\n"
+"o Global help: Unless in a data entry window, pressing <F1> will give \n"
+" you the global help window, which you are just reading.\n"
"\n"
-"Menu items beginning with following braces represent features that\n"
-" [ ] can be built in or removed\n"
-" < > can be built in, modularized or removed\n"
-" { } can be built in or modularized (selected by other feature)\n"
-" - - are selected by other feature,\n"
-" XXX cannot be selected. Use Symbol Info to find out why,\n"
-"while *, M or whitespace inside braces means to build in, build as\n"
-"a module or to exclude the feature respectively.\n"
+"o A short version of the global help is available by pressing <F3>.\n"
"\n"
-"To change any of these features, highlight it with the cursor\n"
-"keys and press <Y> to build it in, <M> to make it a module or\n"
-"<N> to removed it. You may also press the <Space Bar> to cycle\n"
-"through the available options (ie. Y->N->M->Y).\n"
+"o Local help: To get help related to the current menu entry, use any\n"
+" of <?> <h>, or if in a data entry window then press <F1>.\n"
"\n"
-"Some additional keyboard hints:\n"
"\n"
-"Menus\n"
-"----------\n"
-"o Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
-" you wish to change use <Enter> or <Space>. Goto submenu by \n"
-" pressing <Enter> of <right-arrow>. Use <Esc> or <left-arrow> to go back.\n"
-" Submenus are designated by \"--->\".\n"
-"\n"
-" Searching: pressing '/' triggers interactive search mode.\n"
-" nconfig performs a case insensitive search for the string\n"
-" in the menu prompts (no regex support).\n"
-" Pressing the up/down keys highlights the previous/next\n"
-" matching item. Backspace removes one character from the\n"
-" match string. Pressing either '/' again or ESC exits\n"
-" search mode. All other keys behave normally.\n"
+"Menu entries\n"
+"------------\n"
+"This interface lets you select features and parameters for the kernel\n"
+"build. Kernel features can either be built-in, modularized, or removed.\n"
+"Parameters must be entered as text or decimal or hexadecimal numbers.\n"
"\n"
-" You may also use the <PAGE UP> and <PAGE DOWN> keys to scroll\n"
-" unseen options into view.\n"
+"Menu entries beginning with following braces represent features that\n"
+" [ ] can be built in or removed\n"
+" < > can be built in, modularized or removed\n"
+" { } can be built in or modularized, are selected by another feature\n"
+" - - are selected by another feature\n"
+" XXX cannot be selected. Symbol Info <F2> tells you why.\n"
+"*, M or whitespace inside braces means to build in, build as a module\n"
+"or to exclude the feature respectively.\n"
"\n"
-"o To exit a menu use the just press <ESC> <F5> <F8> or <left-arrow>.\n"
+"To change any of these features, highlight it with the movement keys\n"
+"listed below and press <y> to build it in, <m> to make it a module or\n"
+"<n> to remove it. You may press the <Space> key to cycle through the\n"
+"available options.\n"
"\n"
-"o To get help with an item, press <F1>\n"
-" Shortcut: Press <h> or <?>.\n"
+"A trailing \"--->\" designates a submenu.\n"
"\n"
"\n"
-"Radiolists (Choice lists)\n"
-"-----------\n"
-"o Use the cursor keys to select the option you wish to set and press\n"
-" <S> or the <SPACE BAR>.\n"
+"Menu navigation keys\n"
+"----------------------------------------------------------------------\n"
+"Linewise up <Up>\n"
+"Linewise down <Down>\n"
+"Pagewise up <Page Up>\n"
+"Pagewise down <Page Down>\n"
+"First entry <Home>\n"
+"Last entry <End>\n"
+"Enter a submenu <Right> <Enter>\n"
+"Go back to parent menu <Left> <Esc> <F5>\n"
+"Close a help window <Enter> <Esc> <F5>\n"
+"Close entry window, apply <Enter>\n"
+"Close entry window, forget <Esc> <F5>\n"
+"Start incremental, case-insensitive search for STRING in menu entries,\n"
+" no regex support, STRING is displayed in upper left corner\n"
+" </>STRING\n"
+" Remove last character <Backspace>\n"
+" Jump to next hit <Down>\n"
+" Jump to previous hit <Up>\n"
+"Exit menu search mode </> <Esc>\n"
+"Search for configuration variables with or without leading CONFIG_\n"
+" <F8>RegExpr<Enter>\n"
+"Verbose search help <F8><F1>\n"
+"----------------------------------------------------------------------\n"
"\n"
-" Shortcut: Press the first letter of the option you wish to set then\n"
-" press <S> or <SPACE BAR>.\n"
+"Unless in a data entry window, key <1> may be used instead of <F1>,\n"
+"<2> instead of <F2>, etc.\n"
"\n"
-"o To see available help for the item, press <F1>\n"
-" Shortcut: Press <H> or <?>.\n"
"\n"
+"Radiolist (Choice list)\n"
+"-----------------------\n"
+"Use the movement keys listed above to select the option you wish to set\n"
+"and press <Space>.\n"
"\n"
-"Data Entry\n"
-"-----------\n"
-"o Enter the requested information and press <ENTER>\n"
-" If you are entering hexadecimal values, it is not necessary to\n"
-" add the '0x' prefix to the entry.\n"
"\n"
-"o For help, press <F1>.\n"
+"Data entry\n"
+"----------\n"
+"Enter the requested information and press <Enter>. Hexadecimal values\n"
+"may be entered without the \"0x\" prefix.\n"
"\n"
"\n"
-"Text Box (Help Window)\n"
-"--------\n"
-"o Use the cursor keys to scroll up/down/left/right. The VI editor\n"
-" keys h,j,k,l function here as do <u>, <d> and <SPACE BAR> for\n"
-" those who are familiar with less and lynx.\n"
+"Text Box (Help Window)\n"
+"----------------------\n"
+"Use movement keys as listed in table above.\n"
"\n"
-"o Press <Enter>, <F1>, <F5>, <F9>, <q> or <Esc> to exit.\n"
+"Press any of <Enter> <Esc> <q> <F5> <F9> to exit.\n"
"\n"
"\n"
-"Alternate Configuration Files\n"
+"Alternate configuration files\n"
"-----------------------------\n"
-"nconfig supports the use of alternate configuration files for\n"
-"those who, for various reasons, find it necessary to switch\n"
-"between different configurations.\n"
+"nconfig supports switching between different configurations.\n"
+"Press <F6> to save your current configuration. Press <F7> and enter\n"
+"a file name to load a previously saved configuration.\n"
"\n"
-"At the end of the main menu you will find two options. One is\n"
-"for saving the current configuration to a file of your choosing.\n"
-"The other option is for loading a previously saved alternate\n"
-"configuration.\n"
"\n"
-"Even if you don't use alternate configuration files, but you\n"
-"find during a nconfig session that you have completely messed\n"
-"up your settings, you may use the \"Load Alternate...\" option to\n"
-"restore your previously saved settings from \".config\" without\n"
-"restarting nconfig.\n"
+"Terminal configuration\n"
+"----------------------\n"
+"If you use nconfig in a xterm window, make sure your TERM environment\n"
+"variable specifies a terminal configuration which supports at least\n"
+"16 colors. Otherwise nconfig will look rather bad.\n"
"\n"
-"Other information\n"
-"-----------------\n"
-"If you use nconfig in an XTERM window make sure you have your\n"
-"$TERM variable set to point to a xterm definition which supports color.\n"
-"Otherwise, nconfig will look rather bad. nconfig will not\n"
-"display correctly in a RXVT window because rxvt displays only one\n"
-"intensity of color, bright.\n"
+"If the \"stty size\" command reports the current terminalsize correctly,\n"
+"nconfig will adapt to sizes larger than the traditional 80x25 \"standard\"\n"
+"and display longer menus properly.\n"
"\n"
-"nconfig will display larger menus on screens or xterms which are\n"
-"set to display more than the standard 25 row by 80 column geometry.\n"
-"In order for this to work, the \"stty size\" command must be able to\n"
-"display the screen's current row and column geometry. I STRONGLY\n"
-"RECOMMEND that you make sure you do NOT have the shell variables\n"
-"LINES and COLUMNS exported into your environment. Some distributions\n"
-"export those variables via /etc/profile. Some ncurses programs can\n"
-"become confused when those variables (LINES & COLUMNS) don't reflect\n"
-"the true screen size.\n"
"\n"
-"Optional personality available\n"
-"------------------------------\n"
-"If you prefer to have all of the options listed in a single menu, rather\n"
-"than the default multimenu hierarchy, run the nconfig with NCONFIG_MODE\n"
-"environment variable set to single_menu. Example:\n"
+"Single menu mode\n"
+"----------------\n"
+"If you prefer to have all of the menu entries listed in a single menu,\n"
+"rather than the default multimenu hierarchy, run nconfig with\n"
+"NCONFIG_MODE environment variable set to single_menu. Example:\n"
"\n"
"make NCONFIG_MODE=single_menu nconfig\n"
"\n"
-"<Enter> will then unroll the appropriate category, or enfold it if it\n"
-"is already unrolled.\n"
+"<Enter> will then unfold the appropriate category, or fold it if it\n"
+"is already unfolded. Folded menu entries will be designated by a\n"
+"leading \"++>\" and unfolded entries by a leading \"-->\".\n"
"\n"
-"Note that this mode can eventually be a little more CPU expensive\n"
-"(especially with a larger number of unrolled categories) than the\n"
-"default mode.\n"
+"Note that this mode can eventually be a little more CPU expensive than\n"
+"the default mode, especially with a larger number of unfolded submenus.\n"
"\n"),
menu_no_f_instructions[] = N_(
-" You do not have function keys support. Please follow the\n"
-" following instructions:\n"
-" Arrow keys navigate the menu.\n"
-" <Enter> or <right-arrow> selects submenus --->.\n"
-" Capital Letters are hotkeys.\n"
-" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
-" Pressing SpaceBar toggles between the above options.\n"
-" Press <Esc> or <left-arrow> to go back one menu,\n"
-" <?> or <h> for Help, </> for Search.\n"
-" <1> is interchangeable with <F1>, <2> with <F2>, etc.\n"
-" Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
-" <Esc> always leaves the current window.\n"),
+"Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
+"Submenus are designated by a trailing \"--->\".\n"
+"\n"
+"Use the following keys to navigate the menus:\n"
+"Move up or down with <Up> and <Down>.\n"
+"Enter a submenu with <Enter> or <Right>.\n"
+"Exit a submenu to its parent menu with <Esc> or <Left>.\n"
+"Pressing <y> includes, <n> excludes, <m> modularizes features.\n"
+"Pressing <Space> cycles through the available options.\n"
+"To search for menu entries press </>.\n"
+"<Esc> always leaves the current window.\n"
+"\n"
+"You do not have function keys support.\n"
+"Press <1> instead of <F1>, <2> instead of <F2>, etc.\n"
+"For verbose global help use key <1>.\n"
+"For help related to the current menu entry press <?> or <h>.\n"),
menu_instructions[] = N_(
-" Arrow keys navigate the menu.\n"
-" <Enter> or <right-arrow> selects submenus --->.\n"
-" Capital Letters are hotkeys.\n"
-" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
-" Pressing SpaceBar toggles between the above options\n"
-" Press <Esc>, <F5> or <left-arrow> to go back one menu,\n"
-" <?>, <F1> or <h> for Help, </> for Search.\n"
-" <1> is interchangeable with <F1>, <2> with <F2>, etc.\n"
-" Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
-" <Esc> always leaves the current window\n"),
+"Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
+"Submenus are designated by a trailing \"--->\".\n"
+"\n"
+"Use the following keys to navigate the menus:\n"
+"Move up or down with <Up> or <Down>.\n"
+"Enter a submenu with <Enter> or <Right>.\n"
+"Exit a submenu to its parent menu with <Esc> or <Left>.\n"
+"Pressing <y> includes, <n> excludes, <m> modularizes features.\n"
+"Pressing <Space> cycles through the available options.\n"
+"To search for menu entries press </>.\n"
+"<Esc> always leaves the current window.\n"
+"\n"
+"Pressing <1> may be used instead of <F1>, <2> instead of <F2>, etc.\n"
+"For verbose global help press <F1>.\n"
+"For help related to the current menu entry press <?> or <h>.\n"),
radiolist_instructions[] = N_(
-" Use the arrow keys to navigate this window or\n"
-" press the hotkey of the item you wish to select\n"
-" followed by the <SPACE BAR>.\n"
-" Press <?>, <F1> or <h> for additional information about this option.\n"),
+"Press <Up>, <Down>, <Home> or <End> to navigate a radiolist, select\n"
+"with <Space>.\n"
+"For help related to the current entry press <?> or <h>.\n"
+"For global help press <F1>.\n"),
inputbox_instructions_int[] = N_(
"Please enter a decimal value.\n"
"Fractions will not be accepted.\n"
-"Press <RETURN> to accept, <ESC> to cancel."),
+"Press <Enter> to apply, <Esc> to cancel."),
inputbox_instructions_hex[] = N_(
"Please enter a hexadecimal value.\n"
-"Press <RETURN> to accept, <ESC> to cancel."),
+"Press <Enter> to apply, <Esc> to cancel."),
inputbox_instructions_string[] = N_(
"Please enter a string value.\n"
-"Press <RETURN> to accept, <ESC> to cancel."),
+"Press <Enter> to apply, <Esc> to cancel."),
setmod_text[] = N_(
-"This feature depends on another which\n"
-"has been configured as a module.\n"
-"As a result, this feature will be built as a module."),
+"This feature depends on another feature which has been configured as a\n"
+"module. As a result, the current feature will be built as a module too."),
load_config_text[] = N_(
"Enter the name of the configuration file you wish to load.\n"
-"Accept the name shown to restore the configuration you\n"
-"last retrieved. Leave blank to abort."),
+"Accept the name shown to restore the configuration you last\n"
+"retrieved. Leave empty to abort."),
load_config_help[] = N_(
-"\n"
"For various reasons, one may wish to keep several different\n"
"configurations available on a single machine.\n"
"\n"
"If you have saved a previous configuration in a file other than the\n"
-"default one, entering its name here will allow you to modify that\n"
-"configuration.\n"
+"default one, entering its name here will allow you to load and modify\n"
+"that configuration.\n"
"\n"
-"If you are uncertain, then you have probably never used alternate\n"
-"configuration files. You should therefor leave this blank to abort.\n"),
+"Leave empty to abort.\n"),
save_config_text[] = N_(
"Enter a filename to which this configuration should be saved\n"
-"as an alternate. Leave blank to abort."),
+"as an alternate. Leave empty to abort."),
save_config_help[] = N_(
-"\n"
-"For various reasons, one may wish to keep different configurations\n"
-"available on a single machine.\n"
+"For various reasons, one may wish to keep several different\n"
+"configurations available on a single machine.\n"
"\n"
"Entering a file name here will allow you to later retrieve, modify\n"
"and use the current configuration as an alternate to whatever\n"
"configuration options you have selected at that time.\n"
"\n"
-"If you are uncertain what all this means then you should probably\n"
-"leave this blank.\n"),
+"Leave empty to abort.\n"),
search_help[] = N_(
-"\n"
-"Search for symbols and display their relations. Regular expressions\n"
-"are allowed.\n"
-"Example: search for \"^FOO\"\n"
+"Search for symbols (configuration variable names CONFIG_*) and display\n"
+"their relations. Regular expressions are supported.\n"
+"Example: Search for \"^FOO\".\n"
"Result:\n"
"-----------------------------------------------------------------\n"
"Symbol: FOO [ = m]\n"
@@ -229,26 +222,26 @@ search_help[] = N_(
"Selects: LIBCRC32\n"
"Selected by: BAR\n"
"-----------------------------------------------------------------\n"
-"o The line 'Prompt:' shows the text used in the menu structure for\n"
-" this symbol\n"
-"o The 'Defined at' line tell at what file / line number the symbol\n"
-" is defined\n"
-"o The 'Depends on:' line tell what symbols needs to be defined for\n"
-" this symbol to be visible in the menu (selectable)\n"
-"o The 'Location:' lines tell where in the menu structure this symbol\n"
-" is located\n"
-" A location followed by a [ = y] indicate that this is a selectable\n"
-" menu item - and current value is displayed inside brackets.\n"
-"o The 'Selects:' line tell what symbol will be automatically\n"
-" selected if this symbol is selected (y or m)\n"
-"o The 'Selected by' line tell what symbol has selected this symbol\n"
+"o The line 'Prompt:' shows the text displayed for this symbol in\n"
+" the menu hierarchy.\n"
+"o The 'Defined at' line tells at what file / line number the symbol is\n"
+" defined.\n"
+"o The 'Depends on:' line lists symbols that need to be defined for\n"
+" this symbol to be visible and selectable in the menu.\n"
+"o The 'Location:' lines tell, where in the menu structure this symbol\n"
+" is located. A location followed by a [ = y] indicates that this is\n"
+" a selectable menu item, and the current value is displayed inside\n"
+" brackets.\n"
+"o The 'Selects:' line tells, what symbol will be automatically selected\n"
+" if this symbol is selected (y or m).\n"
+"o The 'Selected by' line tells what symbol has selected this symbol.\n"
"\n"
"Only relevant lines are shown.\n"
"\n\n"
"Search examples:\n"
-"Examples: USB => find all symbols containing USB\n"
-" ^USB => find all symbols starting with USB\n"
-" USB$ => find all symbols ending with USB\n"
+"USB => find all symbols containing USB\n"
+"^USB => find all symbols starting with USB\n"
+"USB$ => find all symbols ending with USB\n"
"\n");
struct mitem {
@@ -319,19 +312,19 @@ struct function_keys function_keys[] = {
},
{
.key_str = "F2",
- .func = "Sym Info",
+ .func = "SymInfo",
.key = F_SYMBOL,
.handler = handle_f2,
},
{
.key_str = "F3",
- .func = "Insts",
+ .func = "Help 2",
.key = F_INSTS,
.handler = handle_f3,
},
{
.key_str = "F4",
- .func = "Config",
+ .func = "ShowAll",
.key = F_CONF,
.handler = handle_f4,
},
@@ -355,7 +348,7 @@ struct function_keys function_keys[] = {
},
{
.key_str = "F8",
- .func = "Sym Search",
+ .func = "SymSearch",
.key = F_SEARCH,
.handler = handle_f8,
},
@@ -392,7 +385,7 @@ static void print_function_line(void)
static void handle_f1(int *key, struct menu *current_item)
{
show_scroll_win(main_window,
- _("README"), _(nconf_readme));
+ _("Global help"), _(nconf_global_help));
return;
}
@@ -407,7 +400,7 @@ static void handle_f2(int *key, struct menu *current_item)
static void handle_f3(int *key, struct menu *current_item)
{
show_scroll_win(main_window,
- _("Instructions"),
+ _("Short help"),
_(current_instructions));
return;
}
@@ -696,13 +689,18 @@ static void search_conf(void)
{
struct symbol **sym_arr;
struct gstr res;
+ struct gstr title;
char *dialog_input;
int dres;
+
+ title = str_new();
+ str_printf( &title, _("Enter %s (sub)string to search for "
+ "(with or without \"%s\")"), CONFIG_, CONFIG_);
+
again:
dres = dialog_inputbox(main_window,
_("Search Configuration Parameter"),
- _("Enter " CONFIG_ " (sub)string to search for "
- "(with or without \"" CONFIG_ "\")"),
+ str_get(&title),
"", &dialog_input_result, &dialog_input_result_len);
switch (dres) {
case 0:
@@ -712,6 +710,7 @@ again:
_("Search Configuration"), search_help);
goto again;
default:
+ str_free(&title);
return;
}
@@ -726,6 +725,7 @@ again:
show_scroll_win(main_window,
_("Search Results"), str_get(&res));
str_free(&res);
+ str_free(&title);
}
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 379003c7a2b4..9f8c44ecc703 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -48,7 +48,7 @@ static void set_normal_colors(void)
init_pair(INPUT_FIELD, -1, -1);
init_pair(FUNCTION_HIGHLIGHT, -1, -1);
- init_pair(FUNCTION_TEXT, COLOR_BLUE, -1);
+ init_pair(FUNCTION_TEXT, COLOR_YELLOW, -1);
}
/* available attributes:
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index df274febb3e5..1500c38f0cca 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -6,6 +6,7 @@
#include <qglobal.h>
#if QT_VERSION < 0x040000
+#include <stddef.h>
#include <qmainwindow.h>
#include <qvbox.h>
#include <qvaluelist.h>
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 22a3c400fc41..ecc5aa5f865d 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -656,11 +656,11 @@ bool sym_set_string_value(struct symbol *sym, const char *newval)
size = strlen(newval) + 1;
if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) {
size += 2;
- sym->def[S_DEF_USER].val = val = malloc(size);
+ sym->def[S_DEF_USER].val = val = xmalloc(size);
*val++ = '0';
*val++ = 'x';
} else if (!oldval || strcmp(oldval, newval))
- sym->def[S_DEF_USER].val = val = malloc(size);
+ sym->def[S_DEF_USER].val = val = xmalloc(size);
else
return true;
@@ -812,7 +812,7 @@ struct symbol *sym_lookup(const char *name, int flags)
hash = 0;
}
- symbol = malloc(sizeof(*symbol));
+ symbol = xmalloc(sizeof(*symbol));
memset(symbol, 0, sizeof(*symbol));
symbol->name = new_name;
symbol->type = S_UNKNOWN;
@@ -863,7 +863,7 @@ const char *sym_expand_string_value(const char *in)
size_t reslen;
reslen = strlen(in) + 1;
- res = malloc(reslen);
+ res = xmalloc(reslen);
res[0] = '\0';
while ((src = strchr(in, '$'))) {
@@ -921,7 +921,7 @@ const char *sym_escape_string_value(const char *in)
p++;
}
- res = malloc(reslen);
+ res = xmalloc(reslen);
res[0] = '\0';
strcat(res, "\"");
@@ -1228,7 +1228,7 @@ struct property *prop_alloc(enum prop_type type, struct symbol *sym)
struct property *prop;
struct property **propp;
- prop = malloc(sizeof(*prop));
+ prop = xmalloc(sizeof(*prop));
memset(prop, 0, sizeof(*prop));
prop->type = type;
prop->sym = sym;
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index d0b8b2318e48..6e7fbf196809 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -23,7 +23,7 @@ struct file *file_lookup(const char *name)
}
}
- file = malloc(sizeof(*file));
+ file = xmalloc(sizeof(*file));
memset(file, 0, sizeof(*file));
file->name = file_name;
file->next = file_list;
@@ -81,7 +81,7 @@ int file_write_dep(const char *name)
struct gstr str_new(void)
{
struct gstr gs;
- gs.s = malloc(sizeof(char) * 64);
+ gs.s = xmalloc(sizeof(char) * 64);
gs.len = 64;
gs.max_width = 0;
strcpy(gs.s, "\0");
@@ -138,3 +138,22 @@ const char *str_get(struct gstr *gs)
return gs->s;
}
+void *xmalloc(size_t size)
+{
+ void *p = malloc(size);
+ if (p)
+ return p;
+ fprintf(stderr, "Out of memory.\n");
+ exit(1);
+}
+
+void *xcalloc(size_t nmemb, size_t size)
+{
+ void *p = calloc(nmemb, size);
+ if (p)
+ return p;
+ fprintf(stderr, "Out of memory.\n");
+ exit(1);
+}
+
+
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 00f9d3a9cf8b..6555a475453b 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -40,7 +40,7 @@ static void zconf_endfile(void);
static void new_string(void)
{
- text = malloc(START_STRSIZE);
+ text = xmalloc(START_STRSIZE);
text_asize = START_STRSIZE;
text_size = 0;
*text = 0;
@@ -62,7 +62,7 @@ static void append_string(const char *str, int size)
static void alloc_string(const char *str, int size)
{
- text = malloc(size + 1);
+ text = xmalloc(size + 1);
memcpy(text, str, size);
text[size] = 0;
}
@@ -288,7 +288,7 @@ void zconf_initscan(const char *name)
exit(1);
}
- current_buf = malloc(sizeof(*current_buf));
+ current_buf = xmalloc(sizeof(*current_buf));
memset(current_buf, 0, sizeof(*current_buf));
current_file = file_lookup(name);
@@ -299,7 +299,7 @@ void zconf_nextfile(const char *name)
{
struct file *iter;
struct file *file = file_lookup(name);
- struct buffer *buf = malloc(sizeof(*buf));
+ struct buffer *buf = xmalloc(sizeof(*buf));
memset(buf, 0, sizeof(*buf));
current_buf->state = YY_CURRENT_BUFFER;
diff --git a/scripts/kconfig/zconf.lex.c_shipped b/scripts/kconfig/zconf.lex.c_shipped
index c32b1a49f5a3..a0521aa5974b 100644
--- a/scripts/kconfig/zconf.lex.c_shipped
+++ b/scripts/kconfig/zconf.lex.c_shipped
@@ -802,7 +802,7 @@ static void zconf_endfile(void);
static void new_string(void)
{
- text = malloc(START_STRSIZE);
+ text = xmalloc(START_STRSIZE);
text_asize = START_STRSIZE;
text_size = 0;
*text = 0;
@@ -824,7 +824,7 @@ static void append_string(const char *str, int size)
static void alloc_string(const char *str, int size)
{
- text = malloc(size + 1);
+ text = xmalloc(size + 1);
memcpy(text, str, size);
text[size] = 0;
}
@@ -2343,7 +2343,7 @@ void zconf_initscan(const char *name)
exit(1);
}
- current_buf = malloc(sizeof(*current_buf));
+ current_buf = xmalloc(sizeof(*current_buf));
memset(current_buf, 0, sizeof(*current_buf));
current_file = file_lookup(name);
@@ -2354,7 +2354,7 @@ void zconf_nextfile(const char *name)
{
struct file *iter;
struct file *file = file_lookup(name);
- struct buffer *buf = malloc(sizeof(*buf));
+ struct buffer *buf = xmalloc(sizeof(*buf));
memset(buf, 0, sizeof(*buf));
current_buf->state = YY_CURRENT_BUFFER;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index f565536a2bef..4305b2f2ec5e 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1750,7 +1750,7 @@ sub dump_struct($$) {
# strip kmemcheck_bitfield_{begin,end}.*;
$members =~ s/kmemcheck_bitfield_.*?;//gos;
# strip attributes
- $members =~ s/__aligned\s*\(\d+\)//gos;
+ $members =~ s/__aligned\s*\(.+\)//gos;
create_parameterlist($members, ';', $file);
check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index b3d907eb93a9..3d569d6022c2 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -132,7 +132,14 @@ if [ "$1" = "clean" ]; then
fi
# We need access to CONFIG_ symbols
-. ./.config
+case "${KCONFIG_CONFIG}" in
+*/*)
+ . "${KCONFIG_CONFIG}"
+ ;;
+*)
+ # Force using a file from the current directory
+ . "./${KCONFIG_CONFIG}"
+esac
#link vmlinux.o
info LD vmlinux.o
diff --git a/scripts/mod/.gitignore b/scripts/mod/.gitignore
index e9b7abe7b95b..33bae0df4de5 100644
--- a/scripts/mod/.gitignore
+++ b/scripts/mod/.gitignore
@@ -1,4 +1,5 @@
elfconfig.h
mk_elfconfig
modpost
+devicetable-offsets.h
diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
index ff954f8168c1..9415b5663364 100644
--- a/scripts/mod/Makefile
+++ b/scripts/mod/Makefile
@@ -3,9 +3,44 @@ always := $(hostprogs-y) empty.o
modpost-objs := modpost.o file2alias.o sumversion.o
+devicetable-offsets-file := devicetable-offsets.h
+
+define sed-y
+ "/^->/{s:->#\(.*\):/* \1 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+ s:->::; p;}"
+endef
+
+quiet_cmd_offsets = GEN $@
+define cmd_offsets
+ (set -e; \
+ echo "#ifndef __DEVICEVTABLE_OFFSETS_H__"; \
+ echo "#define __DEVICEVTABLE_OFFSETS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by Kbuild"; \
+ echo " *"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-y) $<; \
+ echo ""; \
+ echo "#endif" ) > $@
+endef
+
+# We use internal kbuild rules to avoid the "is up to date" message from make
+scripts/mod/devicetable-offsets.s: scripts/mod/devicetable-offsets.c FORCE
+ $(Q)mkdir -p $(dir $@)
+ $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(devicetable-offsets-file): scripts/mod/devicetable-offsets.s
+ $(call cmd,offsets)
+
# dependencies on generated files need to be listed explicitly
$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
+$(obj)/file2alias.o: $(obj)/$(devicetable-offsets-file)
quiet_cmd_elfconfig = MKELF $@
cmd_elfconfig = $(obj)/mk_elfconfig < $< > $@
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
new file mode 100644
index 000000000000..b45260bfeaa0
--- /dev/null
+++ b/scripts/mod/devicetable-offsets.c
@@ -0,0 +1,178 @@
+#include <linux/kbuild.h>
+#include <linux/mod_devicetable.h>
+
+#define DEVID(devid) DEFINE(SIZE_##devid, sizeof(struct devid))
+#define DEVID_FIELD(devid, field) \
+ DEFINE(OFF_##devid##_##field, offsetof(struct devid, field))
+
+int main(void)
+{
+ DEVID(usb_device_id);
+ DEVID_FIELD(usb_device_id, match_flags);
+ DEVID_FIELD(usb_device_id, idVendor);
+ DEVID_FIELD(usb_device_id, idProduct);
+ DEVID_FIELD(usb_device_id, bcdDevice_lo);
+ DEVID_FIELD(usb_device_id, bcdDevice_hi);
+ DEVID_FIELD(usb_device_id, bDeviceClass);
+ DEVID_FIELD(usb_device_id, bDeviceSubClass);
+ DEVID_FIELD(usb_device_id, bDeviceProtocol);
+ DEVID_FIELD(usb_device_id, bInterfaceClass);
+ DEVID_FIELD(usb_device_id, bInterfaceSubClass);
+ DEVID_FIELD(usb_device_id, bInterfaceProtocol);
+ DEVID_FIELD(usb_device_id, bInterfaceNumber);
+
+ DEVID(hid_device_id);
+ DEVID_FIELD(hid_device_id, bus);
+ DEVID_FIELD(hid_device_id, group);
+ DEVID_FIELD(hid_device_id, vendor);
+ DEVID_FIELD(hid_device_id, product);
+
+ DEVID(ieee1394_device_id);
+ DEVID_FIELD(ieee1394_device_id, match_flags);
+ DEVID_FIELD(ieee1394_device_id, vendor_id);
+ DEVID_FIELD(ieee1394_device_id, model_id);
+ DEVID_FIELD(ieee1394_device_id, specifier_id);
+ DEVID_FIELD(ieee1394_device_id, version);
+
+ DEVID(pci_device_id);
+ DEVID_FIELD(pci_device_id, vendor);
+ DEVID_FIELD(pci_device_id, device);
+ DEVID_FIELD(pci_device_id, subvendor);
+ DEVID_FIELD(pci_device_id, subdevice);
+ DEVID_FIELD(pci_device_id, class);
+ DEVID_FIELD(pci_device_id, class_mask);
+
+ DEVID(ccw_device_id);
+ DEVID_FIELD(ccw_device_id, match_flags);
+ DEVID_FIELD(ccw_device_id, cu_type);
+ DEVID_FIELD(ccw_device_id, cu_model);
+ DEVID_FIELD(ccw_device_id, dev_type);
+ DEVID_FIELD(ccw_device_id, dev_model);
+
+ DEVID(ap_device_id);
+ DEVID_FIELD(ap_device_id, dev_type);
+
+ DEVID(css_device_id);
+ DEVID_FIELD(css_device_id, type);
+
+ DEVID(serio_device_id);
+ DEVID_FIELD(serio_device_id, type);
+ DEVID_FIELD(serio_device_id, proto);
+ DEVID_FIELD(serio_device_id, id);
+ DEVID_FIELD(serio_device_id, extra);
+
+ DEVID(acpi_device_id);
+ DEVID_FIELD(acpi_device_id, id);
+
+ DEVID(pnp_device_id);
+ DEVID_FIELD(pnp_device_id, id);
+
+ DEVID(pnp_card_device_id);
+ DEVID_FIELD(pnp_card_device_id, devs);
+
+ DEVID(pcmcia_device_id);
+ DEVID_FIELD(pcmcia_device_id, match_flags);
+ DEVID_FIELD(pcmcia_device_id, manf_id);
+ DEVID_FIELD(pcmcia_device_id, card_id);
+ DEVID_FIELD(pcmcia_device_id, func_id);
+ DEVID_FIELD(pcmcia_device_id, function);
+ DEVID_FIELD(pcmcia_device_id, device_no);
+ DEVID_FIELD(pcmcia_device_id, prod_id_hash);
+
+ DEVID(of_device_id);
+ DEVID_FIELD(of_device_id, name);
+ DEVID_FIELD(of_device_id, type);
+ DEVID_FIELD(of_device_id, compatible);
+
+ DEVID(vio_device_id);
+ DEVID_FIELD(vio_device_id, type);
+ DEVID_FIELD(vio_device_id, compat);
+
+ DEVID(input_device_id);
+ DEVID_FIELD(input_device_id, flags);
+ DEVID_FIELD(input_device_id, bustype);
+ DEVID_FIELD(input_device_id, vendor);
+ DEVID_FIELD(input_device_id, product);
+ DEVID_FIELD(input_device_id, version);
+ DEVID_FIELD(input_device_id, evbit);
+ DEVID_FIELD(input_device_id, keybit);
+ DEVID_FIELD(input_device_id, relbit);
+ DEVID_FIELD(input_device_id, absbit);
+ DEVID_FIELD(input_device_id, mscbit);
+ DEVID_FIELD(input_device_id, ledbit);
+ DEVID_FIELD(input_device_id, sndbit);
+ DEVID_FIELD(input_device_id, ffbit);
+ DEVID_FIELD(input_device_id, swbit);
+
+ DEVID(eisa_device_id);
+ DEVID_FIELD(eisa_device_id, sig);
+
+ DEVID(parisc_device_id);
+ DEVID_FIELD(parisc_device_id, hw_type);
+ DEVID_FIELD(parisc_device_id, hversion);
+ DEVID_FIELD(parisc_device_id, hversion_rev);
+ DEVID_FIELD(parisc_device_id, sversion);
+
+ DEVID(sdio_device_id);
+ DEVID_FIELD(sdio_device_id, class);
+ DEVID_FIELD(sdio_device_id, vendor);
+ DEVID_FIELD(sdio_device_id, device);
+
+ DEVID(ssb_device_id);
+ DEVID_FIELD(ssb_device_id, vendor);
+ DEVID_FIELD(ssb_device_id, coreid);
+ DEVID_FIELD(ssb_device_id, revision);
+
+ DEVID(bcma_device_id);
+ DEVID_FIELD(bcma_device_id, manuf);
+ DEVID_FIELD(bcma_device_id, id);
+ DEVID_FIELD(bcma_device_id, rev);
+ DEVID_FIELD(bcma_device_id, class);
+
+ DEVID(virtio_device_id);
+ DEVID_FIELD(virtio_device_id, device);
+ DEVID_FIELD(virtio_device_id, vendor);
+
+ DEVID(hv_vmbus_device_id);
+ DEVID_FIELD(hv_vmbus_device_id, guid);
+
+ DEVID(i2c_device_id);
+ DEVID_FIELD(i2c_device_id, name);
+
+ DEVID(spi_device_id);
+ DEVID_FIELD(spi_device_id, name);
+
+ DEVID(dmi_system_id);
+ DEVID_FIELD(dmi_system_id, matches);
+
+ DEVID(platform_device_id);
+ DEVID_FIELD(platform_device_id, name);
+
+ DEVID(mdio_device_id);
+ DEVID_FIELD(mdio_device_id, phy_id);
+ DEVID_FIELD(mdio_device_id, phy_id_mask);
+
+ DEVID(zorro_device_id);
+ DEVID_FIELD(zorro_device_id, id);
+
+ DEVID(isapnp_device_id);
+ DEVID_FIELD(isapnp_device_id, vendor);
+ DEVID_FIELD(isapnp_device_id, function);
+
+ DEVID(ipack_device_id);
+ DEVID_FIELD(ipack_device_id, format);
+ DEVID_FIELD(ipack_device_id, vendor);
+ DEVID_FIELD(ipack_device_id, device);
+
+ DEVID(amba_id);
+ DEVID_FIELD(amba_id, id);
+ DEVID_FIELD(amba_id, mask);
+
+ DEVID(x86_cpu_id);
+ DEVID_FIELD(x86_cpu_id, feature);
+ DEVID_FIELD(x86_cpu_id, family);
+ DEVID_FIELD(x86_cpu_id, model);
+ DEVID_FIELD(x86_cpu_id, vendor);
+
+ return 0;
+}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index df4fc23dd836..771ac17f635d 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -11,6 +11,7 @@
*/
#include "modpost.h"
+#include "devicetable-offsets.h"
/* We use the ELF typedefs for kernel_ulong_t but bite the bullet and
* use either stdint.h or inttypes.h for the rest. */
@@ -84,13 +85,25 @@ extern struct devtable *__start___devtable[], *__stop___devtable[];
# define __used __attribute__((__used__))
#endif
+/* Define a variable f that holds the value of field f of struct devid
+ * based at address m.
+ */
+#define DEF_FIELD(m, devid, f) \
+ typeof(((struct devid *)0)->f) f = TO_NATIVE(*(typeof(f) *)((m) + OFF_##devid##_##f))
+/* Define a variable f that holds the address of field f of struct devid
+ * based at address m. Due to the way typeof works, for a field of type
+ * T[N] the variable has type T(*)[N], _not_ T*.
+ */
+#define DEF_FIELD_ADDR(m, devid, f) \
+ typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
+
/* Add a table entry. We test function type matches while we're here. */
#define ADD_TO_DEVTABLE(device_id, type, function) \
static struct devtable __cat(devtable,__LINE__) = { \
device_id + 0*sizeof((function)((const char *)NULL, \
- (type *)NULL, \
+ (void *)NULL, \
(char *)NULL)), \
- sizeof(type), (function) }; \
+ SIZE_##type, (function) }; \
static struct devtable *SECTION(__devtable) __used \
__cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
@@ -116,7 +129,6 @@ static inline void add_wildcard(char *str)
strcat(str + len, "*");
}
-unsigned int cross_build = 0;
/**
* Check that sizeof(device_id type) are consistent with size of section
* in .o file. If in-consistent then userspace and kernel does not agree
@@ -131,8 +143,6 @@ static void device_id_check(const char *modname, const char *device_id,
int i;
if (size % id_size || size < id_size) {
- if (cross_build != 0)
- return;
fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
"of the size of section __mod_%s_device_table=%lu.\n"
"Fix definition of struct %s_device_id "
@@ -157,17 +167,29 @@ static void device_id_check(const char *modname, const char *device_id,
/* USB is special because the bcdDevice can be matched against a numeric range */
/* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
-static void do_usb_entry(struct usb_device_id *id,
+static void do_usb_entry(void *symval,
unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
unsigned char range_lo, unsigned char range_hi,
unsigned char max, struct module *mod)
{
char alias[500];
+ DEF_FIELD(symval, usb_device_id, match_flags);
+ DEF_FIELD(symval, usb_device_id, idVendor);
+ DEF_FIELD(symval, usb_device_id, idProduct);
+ DEF_FIELD(symval, usb_device_id, bcdDevice_lo);
+ DEF_FIELD(symval, usb_device_id, bDeviceClass);
+ DEF_FIELD(symval, usb_device_id, bDeviceSubClass);
+ DEF_FIELD(symval, usb_device_id, bDeviceProtocol);
+ DEF_FIELD(symval, usb_device_id, bInterfaceClass);
+ DEF_FIELD(symval, usb_device_id, bInterfaceSubClass);
+ DEF_FIELD(symval, usb_device_id, bInterfaceProtocol);
+ DEF_FIELD(symval, usb_device_id, bInterfaceNumber);
+
strcpy(alias, "usb:");
- ADD(alias, "v", id->match_flags&USB_DEVICE_ID_MATCH_VENDOR,
- id->idVendor);
- ADD(alias, "p", id->match_flags&USB_DEVICE_ID_MATCH_PRODUCT,
- id->idProduct);
+ ADD(alias, "v", match_flags&USB_DEVICE_ID_MATCH_VENDOR,
+ idVendor);
+ ADD(alias, "p", match_flags&USB_DEVICE_ID_MATCH_PRODUCT,
+ idProduct);
strcat(alias, "d");
if (bcdDevice_initial_digits)
@@ -190,29 +212,23 @@ static void do_usb_entry(struct usb_device_id *id,
range_lo);
}
}
- if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
+ if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1))
strcat(alias, "*");
- ADD(alias, "dc", id->match_flags&USB_DEVICE_ID_MATCH_DEV_CLASS,
- id->bDeviceClass);
- ADD(alias, "dsc",
- id->match_flags&USB_DEVICE_ID_MATCH_DEV_SUBCLASS,
- id->bDeviceSubClass);
- ADD(alias, "dp",
- id->match_flags&USB_DEVICE_ID_MATCH_DEV_PROTOCOL,
- id->bDeviceProtocol);
- ADD(alias, "ic",
- id->match_flags&USB_DEVICE_ID_MATCH_INT_CLASS,
- id->bInterfaceClass);
- ADD(alias, "isc",
- id->match_flags&USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- id->bInterfaceSubClass);
- ADD(alias, "ip",
- id->match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL,
- id->bInterfaceProtocol);
- ADD(alias, "in",
- id->match_flags&USB_DEVICE_ID_MATCH_INT_NUMBER,
- id->bInterfaceNumber);
+ ADD(alias, "dc", match_flags&USB_DEVICE_ID_MATCH_DEV_CLASS,
+ bDeviceClass);
+ ADD(alias, "dsc", match_flags&USB_DEVICE_ID_MATCH_DEV_SUBCLASS,
+ bDeviceSubClass);
+ ADD(alias, "dp", match_flags&USB_DEVICE_ID_MATCH_DEV_PROTOCOL,
+ bDeviceProtocol);
+ ADD(alias, "ic", match_flags&USB_DEVICE_ID_MATCH_INT_CLASS,
+ bInterfaceClass);
+ ADD(alias, "isc", match_flags&USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ bInterfaceSubClass);
+ ADD(alias, "ip", match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL,
+ bInterfaceProtocol);
+ ADD(alias, "in", match_flags&USB_DEVICE_ID_MATCH_INT_NUMBER,
+ bInterfaceNumber);
add_wildcard(alias);
buf_printf(&mod->dev_table_buf,
@@ -258,24 +274,28 @@ static unsigned int incbcd(unsigned int *bcd,
return init;
}
-static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
+static void do_usb_entry_multi(void *symval, struct module *mod)
{
unsigned int devlo, devhi;
unsigned char chi, clo, max;
int ndigits;
- id->match_flags = TO_NATIVE(id->match_flags);
- id->idVendor = TO_NATIVE(id->idVendor);
- id->idProduct = TO_NATIVE(id->idProduct);
+ DEF_FIELD(symval, usb_device_id, match_flags);
+ DEF_FIELD(symval, usb_device_id, idVendor);
+ DEF_FIELD(symval, usb_device_id, idProduct);
+ DEF_FIELD(symval, usb_device_id, bcdDevice_lo);
+ DEF_FIELD(symval, usb_device_id, bcdDevice_hi);
+ DEF_FIELD(symval, usb_device_id, bDeviceClass);
+ DEF_FIELD(symval, usb_device_id, bInterfaceClass);
- devlo = id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO ?
- TO_NATIVE(id->bcdDevice_lo) : 0x0U;
- devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
- TO_NATIVE(id->bcdDevice_hi) : ~0x0U;
+ devlo = match_flags & USB_DEVICE_ID_MATCH_DEV_LO ?
+ bcdDevice_lo : 0x0U;
+ devhi = match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
+ bcdDevice_hi : ~0x0U;
/* Figure out if this entry is in bcd or hex format */
max = 0x9; /* Default to decimal format */
- for (ndigits = 0 ; ndigits < sizeof(id->bcdDevice_lo) * 2 ; ndigits++) {
+ for (ndigits = 0 ; ndigits < sizeof(bcdDevice_lo) * 2 ; ndigits++) {
clo = (devlo >> (ndigits << 2)) & 0xf;
chi = ((devhi > 0x9999 ? 0x9999 : devhi) >> (ndigits << 2)) & 0xf;
if (clo > max || chi > max) {
@@ -288,11 +308,11 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
* Some modules (visor) have empty slots as placeholder for
* run-time specification that results in catch-all alias
*/
- if (!(id->idVendor | id->idProduct | id->bDeviceClass | id->bInterfaceClass))
+ if (!(idVendor | idProduct | bDeviceClass | bInterfaceClass))
return;
/* Convert numeric bcdDevice range into fnmatch-able pattern(s) */
- for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
+ for (ndigits = sizeof(bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
clo = devlo & 0xf;
chi = devhi & 0xf;
if (chi > max) /* If we are in bcd mode, truncate if necessary */
@@ -301,20 +321,20 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
devhi >>= 4;
if (devlo == devhi || !ndigits) {
- do_usb_entry(id, devlo, ndigits, clo, chi, max, mod);
+ do_usb_entry(symval, devlo, ndigits, clo, chi, max, mod);
break;
}
if (clo > 0x0)
- do_usb_entry(id,
+ do_usb_entry(symval,
incbcd(&devlo, 1, max,
- sizeof(id->bcdDevice_lo) * 2),
+ sizeof(bcdDevice_lo) * 2),
ndigits, clo, max, max, mod);
if (chi < max)
- do_usb_entry(id,
+ do_usb_entry(symval,
incbcd(&devhi, -1, max,
- sizeof(id->bcdDevice_lo) * 2),
+ sizeof(bcdDevice_lo) * 2),
ndigits, 0x0, chi, max, mod);
}
}
@@ -323,7 +343,7 @@ static void do_usb_table(void *symval, unsigned long size,
struct module *mod)
{
unsigned int i;
- const unsigned long id_size = sizeof(struct usb_device_id);
+ const unsigned long id_size = SIZE_usb_device_id;
device_id_check(mod->name, "usb", size, id_size, symval);
@@ -336,81 +356,81 @@ static void do_usb_table(void *symval, unsigned long size,
/* Looks like: hid:bNvNpN */
static int do_hid_entry(const char *filename,
- struct hid_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->bus = TO_NATIVE(id->bus);
- id->group = TO_NATIVE(id->group);
- id->vendor = TO_NATIVE(id->vendor);
- id->product = TO_NATIVE(id->product);
+ DEF_FIELD(symval, hid_device_id, bus);
+ DEF_FIELD(symval, hid_device_id, group);
+ DEF_FIELD(symval, hid_device_id, vendor);
+ DEF_FIELD(symval, hid_device_id, product);
sprintf(alias, "hid:");
- ADD(alias, "b", id->bus != HID_BUS_ANY, id->bus);
- ADD(alias, "g", id->group != HID_GROUP_ANY, id->group);
- ADD(alias, "v", id->vendor != HID_ANY_ID, id->vendor);
- ADD(alias, "p", id->product != HID_ANY_ID, id->product);
+ ADD(alias, "b", bus != HID_BUS_ANY, bus);
+ ADD(alias, "g", group != HID_GROUP_ANY, group);
+ ADD(alias, "v", vendor != HID_ANY_ID, vendor);
+ ADD(alias, "p", product != HID_ANY_ID, product);
return 1;
}
-ADD_TO_DEVTABLE("hid", struct hid_device_id, do_hid_entry);
+ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
/* Looks like: ieee1394:venNmoNspNverN */
static int do_ieee1394_entry(const char *filename,
- struct ieee1394_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->match_flags = TO_NATIVE(id->match_flags);
- id->vendor_id = TO_NATIVE(id->vendor_id);
- id->model_id = TO_NATIVE(id->model_id);
- id->specifier_id = TO_NATIVE(id->specifier_id);
- id->version = TO_NATIVE(id->version);
+ DEF_FIELD(symval, ieee1394_device_id, match_flags);
+ DEF_FIELD(symval, ieee1394_device_id, vendor_id);
+ DEF_FIELD(symval, ieee1394_device_id, model_id);
+ DEF_FIELD(symval, ieee1394_device_id, specifier_id);
+ DEF_FIELD(symval, ieee1394_device_id, version);
strcpy(alias, "ieee1394:");
- ADD(alias, "ven", id->match_flags & IEEE1394_MATCH_VENDOR_ID,
- id->vendor_id);
- ADD(alias, "mo", id->match_flags & IEEE1394_MATCH_MODEL_ID,
- id->model_id);
- ADD(alias, "sp", id->match_flags & IEEE1394_MATCH_SPECIFIER_ID,
- id->specifier_id);
- ADD(alias, "ver", id->match_flags & IEEE1394_MATCH_VERSION,
- id->version);
+ ADD(alias, "ven", match_flags & IEEE1394_MATCH_VENDOR_ID,
+ vendor_id);
+ ADD(alias, "mo", match_flags & IEEE1394_MATCH_MODEL_ID,
+ model_id);
+ ADD(alias, "sp", match_flags & IEEE1394_MATCH_SPECIFIER_ID,
+ specifier_id);
+ ADD(alias, "ver", match_flags & IEEE1394_MATCH_VERSION,
+ version);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ieee1394", struct ieee1394_device_id, do_ieee1394_entry);
+ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
/* Looks like: pci:vNdNsvNsdNbcNscNiN. */
static int do_pci_entry(const char *filename,
- struct pci_device_id *id, char *alias)
+ void *symval, char *alias)
{
/* Class field can be divided into these three. */
unsigned char baseclass, subclass, interface,
baseclass_mask, subclass_mask, interface_mask;
- id->vendor = TO_NATIVE(id->vendor);
- id->device = TO_NATIVE(id->device);
- id->subvendor = TO_NATIVE(id->subvendor);
- id->subdevice = TO_NATIVE(id->subdevice);
- id->class = TO_NATIVE(id->class);
- id->class_mask = TO_NATIVE(id->class_mask);
+ DEF_FIELD(symval, pci_device_id, vendor);
+ DEF_FIELD(symval, pci_device_id, device);
+ DEF_FIELD(symval, pci_device_id, subvendor);
+ DEF_FIELD(symval, pci_device_id, subdevice);
+ DEF_FIELD(symval, pci_device_id, class);
+ DEF_FIELD(symval, pci_device_id, class_mask);
strcpy(alias, "pci:");
- ADD(alias, "v", id->vendor != PCI_ANY_ID, id->vendor);
- ADD(alias, "d", id->device != PCI_ANY_ID, id->device);
- ADD(alias, "sv", id->subvendor != PCI_ANY_ID, id->subvendor);
- ADD(alias, "sd", id->subdevice != PCI_ANY_ID, id->subdevice);
-
- baseclass = (id->class) >> 16;
- baseclass_mask = (id->class_mask) >> 16;
- subclass = (id->class) >> 8;
- subclass_mask = (id->class_mask) >> 8;
- interface = id->class;
- interface_mask = id->class_mask;
+ ADD(alias, "v", vendor != PCI_ANY_ID, vendor);
+ ADD(alias, "d", device != PCI_ANY_ID, device);
+ ADD(alias, "sv", subvendor != PCI_ANY_ID, subvendor);
+ ADD(alias, "sd", subdevice != PCI_ANY_ID, subdevice);
+
+ baseclass = (class) >> 16;
+ baseclass_mask = (class_mask) >> 16;
+ subclass = (class) >> 8;
+ subclass_mask = (class_mask) >> 8;
+ interface = class;
+ interface_mask = class_mask;
if ((baseclass_mask != 0 && baseclass_mask != 0xFF)
|| (subclass_mask != 0 && subclass_mask != 0xFF)
|| (interface_mask != 0 && interface_mask != 0xFF)) {
warn("Can't handle masks in %s:%04X\n",
- filename, id->class_mask);
+ filename, class_mask);
return 0;
}
@@ -420,101 +440,105 @@ static int do_pci_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("pci", struct pci_device_id, do_pci_entry);
+ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
/* looks like: "ccw:tNmNdtNdmN" */
static int do_ccw_entry(const char *filename,
- struct ccw_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->match_flags = TO_NATIVE(id->match_flags);
- id->cu_type = TO_NATIVE(id->cu_type);
- id->cu_model = TO_NATIVE(id->cu_model);
- id->dev_type = TO_NATIVE(id->dev_type);
- id->dev_model = TO_NATIVE(id->dev_model);
+ DEF_FIELD(symval, ccw_device_id, match_flags);
+ DEF_FIELD(symval, ccw_device_id, cu_type);
+ DEF_FIELD(symval, ccw_device_id, cu_model);
+ DEF_FIELD(symval, ccw_device_id, dev_type);
+ DEF_FIELD(symval, ccw_device_id, dev_model);
strcpy(alias, "ccw:");
- ADD(alias, "t", id->match_flags&CCW_DEVICE_ID_MATCH_CU_TYPE,
- id->cu_type);
- ADD(alias, "m", id->match_flags&CCW_DEVICE_ID_MATCH_CU_MODEL,
- id->cu_model);
- ADD(alias, "dt", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE,
- id->dev_type);
- ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL,
- id->dev_model);
+ ADD(alias, "t", match_flags&CCW_DEVICE_ID_MATCH_CU_TYPE,
+ cu_type);
+ ADD(alias, "m", match_flags&CCW_DEVICE_ID_MATCH_CU_MODEL,
+ cu_model);
+ ADD(alias, "dt", match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE,
+ dev_type);
+ ADD(alias, "dm", match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL,
+ dev_model);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ccw", struct ccw_device_id, do_ccw_entry);
+ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
/* looks like: "ap:tN" */
static int do_ap_entry(const char *filename,
- struct ap_device_id *id, char *alias)
+ void *symval, char *alias)
{
- sprintf(alias, "ap:t%02X*", id->dev_type);
+ DEF_FIELD(symval, ap_device_id, dev_type);
+
+ sprintf(alias, "ap:t%02X*", dev_type);
return 1;
}
-ADD_TO_DEVTABLE("ap", struct ap_device_id, do_ap_entry);
+ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
/* looks like: "css:tN" */
static int do_css_entry(const char *filename,
- struct css_device_id *id, char *alias)
+ void *symval, char *alias)
{
- sprintf(alias, "css:t%01X", id->type);
+ DEF_FIELD(symval, css_device_id, type);
+
+ sprintf(alias, "css:t%01X", type);
return 1;
}
-ADD_TO_DEVTABLE("css", struct css_device_id, do_css_entry);
+ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
/* Looks like: "serio:tyNprNidNexN" */
static int do_serio_entry(const char *filename,
- struct serio_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->type = TO_NATIVE(id->type);
- id->proto = TO_NATIVE(id->proto);
- id->id = TO_NATIVE(id->id);
- id->extra = TO_NATIVE(id->extra);
+ DEF_FIELD(symval, serio_device_id, type);
+ DEF_FIELD(symval, serio_device_id, proto);
+ DEF_FIELD(symval, serio_device_id, id);
+ DEF_FIELD(symval, serio_device_id, extra);
strcpy(alias, "serio:");
- ADD(alias, "ty", id->type != SERIO_ANY, id->type);
- ADD(alias, "pr", id->proto != SERIO_ANY, id->proto);
- ADD(alias, "id", id->id != SERIO_ANY, id->id);
- ADD(alias, "ex", id->extra != SERIO_ANY, id->extra);
+ ADD(alias, "ty", type != SERIO_ANY, type);
+ ADD(alias, "pr", proto != SERIO_ANY, proto);
+ ADD(alias, "id", id != SERIO_ANY, id);
+ ADD(alias, "ex", extra != SERIO_ANY, extra);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("serio", struct serio_device_id, do_serio_entry);
+ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */
static int do_acpi_entry(const char *filename,
- struct acpi_device_id *id, char *alias)
+ void *symval, char *alias)
{
- sprintf(alias, "acpi*:%s:*", id->id);
+ DEF_FIELD_ADDR(symval, acpi_device_id, id);
+ sprintf(alias, "acpi*:%s:*", *id);
return 1;
}
-ADD_TO_DEVTABLE("acpi", struct acpi_device_id, do_acpi_entry);
+ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
/* looks like: "pnp:dD" */
static void do_pnp_device_entry(void *symval, unsigned long size,
struct module *mod)
{
- const unsigned long id_size = sizeof(struct pnp_device_id);
+ const unsigned long id_size = SIZE_pnp_device_id;
const unsigned int count = (size / id_size)-1;
- const struct pnp_device_id *devs = symval;
unsigned int i;
device_id_check(mod->name, "pnp", size, id_size, symval);
for (i = 0; i < count; i++) {
- const char *id = (char *)devs[i].id;
- char acpi_id[sizeof(devs[0].id)];
+ DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
+ char acpi_id[sizeof(*id)];
int j;
buf_printf(&mod->dev_table_buf,
- "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
/* fix broken pnp bus lowercasing */
for (j = 0; j < sizeof(acpi_id); j++)
- acpi_id[j] = toupper(id[j]);
+ acpi_id[j] = toupper((*id)[j]);
buf_printf(&mod->dev_table_buf,
"MODULE_ALIAS(\"acpi*:%s:*\");\n", acpi_id);
}
@@ -524,19 +548,18 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
static void do_pnp_card_entries(void *symval, unsigned long size,
struct module *mod)
{
- const unsigned long id_size = sizeof(struct pnp_card_device_id);
+ const unsigned long id_size = SIZE_pnp_card_device_id;
const unsigned int count = (size / id_size)-1;
- const struct pnp_card_device_id *cards = symval;
unsigned int i;
device_id_check(mod->name, "pnp", size, id_size, symval);
for (i = 0; i < count; i++) {
unsigned int j;
- const struct pnp_card_device_id *card = &cards[i];
+ DEF_FIELD_ADDR(symval + i*id_size, pnp_card_device_id, devs);
for (j = 0; j < PNP_MAX_DEVICES; j++) {
- const char *id = (char *)card->devs[j].id;
+ const char *id = (char *)(*devs)[j].id;
int i2, j2;
int dup = 0;
@@ -545,10 +568,10 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
/* find duplicate, already added value */
for (i2 = 0; i2 < i && !dup; i2++) {
- const struct pnp_card_device_id *card2 = &cards[i2];
+ DEF_FIELD_ADDR(symval + i2*id_size, pnp_card_device_id, devs);
for (j2 = 0; j2 < PNP_MAX_DEVICES; j2++) {
- const char *id2 = (char *)card2->devs[j2].id;
+ const char *id2 = (char *)(*devs)[j2].id;
if (!id2[0])
break;
@@ -562,7 +585,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
/* add an individual alias for every device entry */
if (!dup) {
- char acpi_id[sizeof(card->devs[0].id)];
+ char acpi_id[PNP_ID_LEN];
int k;
buf_printf(&mod->dev_table_buf,
@@ -580,54 +603,58 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
/* Looks like: pcmcia:mNcNfNfnNpfnNvaNvbNvcNvdN. */
static int do_pcmcia_entry(const char *filename,
- struct pcmcia_device_id *id, char *alias)
+ void *symval, char *alias)
{
unsigned int i;
-
- id->match_flags = TO_NATIVE(id->match_flags);
- id->manf_id = TO_NATIVE(id->manf_id);
- id->card_id = TO_NATIVE(id->card_id);
- id->func_id = TO_NATIVE(id->func_id);
- id->function = TO_NATIVE(id->function);
- id->device_no = TO_NATIVE(id->device_no);
+ DEF_FIELD(symval, pcmcia_device_id, match_flags);
+ DEF_FIELD(symval, pcmcia_device_id, manf_id);
+ DEF_FIELD(symval, pcmcia_device_id, card_id);
+ DEF_FIELD(symval, pcmcia_device_id, func_id);
+ DEF_FIELD(symval, pcmcia_device_id, function);
+ DEF_FIELD(symval, pcmcia_device_id, device_no);
+ DEF_FIELD_ADDR(symval, pcmcia_device_id, prod_id_hash);
for (i=0; i<4; i++) {
- id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]);
- }
-
- strcpy(alias, "pcmcia:");
- ADD(alias, "m", id->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID,
- id->manf_id);
- ADD(alias, "c", id->match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID,
- id->card_id);
- ADD(alias, "f", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID,
- id->func_id);
- ADD(alias, "fn", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION,
- id->function);
- ADD(alias, "pfn", id->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO,
- id->device_no);
- ADD(alias, "pa", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, id->prod_id_hash[0]);
- ADD(alias, "pb", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, id->prod_id_hash[1]);
- ADD(alias, "pc", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, id->prod_id_hash[2]);
- ADD(alias, "pd", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, id->prod_id_hash[3]);
+ (*prod_id_hash)[i] = TO_NATIVE((*prod_id_hash)[i]);
+ }
+
+ strcpy(alias, "pcmcia:");
+ ADD(alias, "m", match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID,
+ manf_id);
+ ADD(alias, "c", match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID,
+ card_id);
+ ADD(alias, "f", match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID,
+ func_id);
+ ADD(alias, "fn", match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION,
+ function);
+ ADD(alias, "pfn", match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO,
+ device_no);
+ ADD(alias, "pa", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, (*prod_id_hash)[0]);
+ ADD(alias, "pb", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, (*prod_id_hash)[1]);
+ ADD(alias, "pc", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, (*prod_id_hash)[2]);
+ ADD(alias, "pd", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, (*prod_id_hash)[3]);
add_wildcard(alias);
- return 1;
+ return 1;
}
-ADD_TO_DEVTABLE("pcmcia", struct pcmcia_device_id, do_pcmcia_entry);
+ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
-static int do_of_entry (const char *filename, struct of_device_id *of, char *alias)
+static int do_of_entry (const char *filename, void *symval, char *alias)
{
int len;
char *tmp;
+ DEF_FIELD_ADDR(symval, of_device_id, name);
+ DEF_FIELD_ADDR(symval, of_device_id, type);
+ DEF_FIELD_ADDR(symval, of_device_id, compatible);
+
len = sprintf (alias, "of:N%sT%s",
- of->name[0] ? of->name : "*",
- of->type[0] ? of->type : "*");
+ (*name)[0] ? *name : "*",
+ (*type)[0] ? *type : "*");
- if (of->compatible[0])
+ if (compatible[0])
sprintf (&alias[len], "%sC%s",
- of->type[0] ? "*" : "",
- of->compatible);
+ (*type)[0] ? "*" : "",
+ *compatible);
/* Replace all whitespace with underscores */
for (tmp = alias; tmp && *tmp; tmp++)
@@ -637,15 +664,17 @@ static int do_of_entry (const char *filename, struct of_device_id *of, char *ali
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("of", struct of_device_id, do_of_entry);
+ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
-static int do_vio_entry(const char *filename, struct vio_device_id *vio,
+static int do_vio_entry(const char *filename, void *symval,
char *alias)
{
char *tmp;
+ DEF_FIELD_ADDR(symval, vio_device_id, type);
+ DEF_FIELD_ADDR(symval, vio_device_id, compat);
- sprintf(alias, "vio:T%sS%s", vio->type[0] ? vio->type : "*",
- vio->compat[0] ? vio->compat : "*");
+ sprintf(alias, "vio:T%sS%s", (*type)[0] ? *type : "*",
+ (*compat)[0] ? *compat : "*");
/* Replace all whitespace with underscores */
for (tmp = alias; tmp && *tmp; tmp++)
@@ -655,7 +684,7 @@ static int do_vio_entry(const char *filename, struct vio_device_id *vio,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("vio", struct vio_device_id, do_vio_entry);
+ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -664,154 +693,172 @@ static void do_input(char *alias,
{
unsigned int i;
+ for (i = min / BITS_PER_LONG; i < max / BITS_PER_LONG + 1; i++)
+ arr[i] = TO_NATIVE(arr[i]);
for (i = min; i < max; i++)
if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG)))
sprintf(alias + strlen(alias), "%X,*", i);
}
/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */
-static int do_input_entry(const char *filename, struct input_device_id *id,
+static int do_input_entry(const char *filename, void *symval,
char *alias)
{
+ DEF_FIELD(symval, input_device_id, flags);
+ DEF_FIELD(symval, input_device_id, bustype);
+ DEF_FIELD(symval, input_device_id, vendor);
+ DEF_FIELD(symval, input_device_id, product);
+ DEF_FIELD(symval, input_device_id, version);
+ DEF_FIELD_ADDR(symval, input_device_id, evbit);
+ DEF_FIELD_ADDR(symval, input_device_id, keybit);
+ DEF_FIELD_ADDR(symval, input_device_id, relbit);
+ DEF_FIELD_ADDR(symval, input_device_id, absbit);
+ DEF_FIELD_ADDR(symval, input_device_id, mscbit);
+ DEF_FIELD_ADDR(symval, input_device_id, ledbit);
+ DEF_FIELD_ADDR(symval, input_device_id, sndbit);
+ DEF_FIELD_ADDR(symval, input_device_id, ffbit);
+ DEF_FIELD_ADDR(symval, input_device_id, swbit);
+
sprintf(alias, "input:");
- ADD(alias, "b", id->flags & INPUT_DEVICE_ID_MATCH_BUS, id->bustype);
- ADD(alias, "v", id->flags & INPUT_DEVICE_ID_MATCH_VENDOR, id->vendor);
- ADD(alias, "p", id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT, id->product);
- ADD(alias, "e", id->flags & INPUT_DEVICE_ID_MATCH_VERSION, id->version);
+ ADD(alias, "b", flags & INPUT_DEVICE_ID_MATCH_BUS, bustype);
+ ADD(alias, "v", flags & INPUT_DEVICE_ID_MATCH_VENDOR, vendor);
+ ADD(alias, "p", flags & INPUT_DEVICE_ID_MATCH_PRODUCT, product);
+ ADD(alias, "e", flags & INPUT_DEVICE_ID_MATCH_VERSION, version);
sprintf(alias + strlen(alias), "-e*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_EVBIT)
- do_input(alias, id->evbit, 0, INPUT_DEVICE_ID_EV_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_EVBIT)
+ do_input(alias, *evbit, 0, INPUT_DEVICE_ID_EV_MAX);
sprintf(alias + strlen(alias), "k*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_KEYBIT)
- do_input(alias, id->keybit,
+ if (flags & INPUT_DEVICE_ID_MATCH_KEYBIT)
+ do_input(alias, *keybit,
INPUT_DEVICE_ID_KEY_MIN_INTERESTING,
INPUT_DEVICE_ID_KEY_MAX);
sprintf(alias + strlen(alias), "r*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_RELBIT)
- do_input(alias, id->relbit, 0, INPUT_DEVICE_ID_REL_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_RELBIT)
+ do_input(alias, *relbit, 0, INPUT_DEVICE_ID_REL_MAX);
sprintf(alias + strlen(alias), "a*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_ABSBIT)
- do_input(alias, id->absbit, 0, INPUT_DEVICE_ID_ABS_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_ABSBIT)
+ do_input(alias, *absbit, 0, INPUT_DEVICE_ID_ABS_MAX);
sprintf(alias + strlen(alias), "m*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_MSCIT)
- do_input(alias, id->mscbit, 0, INPUT_DEVICE_ID_MSC_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_MSCIT)
+ do_input(alias, *mscbit, 0, INPUT_DEVICE_ID_MSC_MAX);
sprintf(alias + strlen(alias), "l*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_LEDBIT)
- do_input(alias, id->ledbit, 0, INPUT_DEVICE_ID_LED_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_LEDBIT)
+ do_input(alias, *ledbit, 0, INPUT_DEVICE_ID_LED_MAX);
sprintf(alias + strlen(alias), "s*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_SNDBIT)
- do_input(alias, id->sndbit, 0, INPUT_DEVICE_ID_SND_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_SNDBIT)
+ do_input(alias, *sndbit, 0, INPUT_DEVICE_ID_SND_MAX);
sprintf(alias + strlen(alias), "f*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_FFBIT)
- do_input(alias, id->ffbit, 0, INPUT_DEVICE_ID_FF_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_FFBIT)
+ do_input(alias, *ffbit, 0, INPUT_DEVICE_ID_FF_MAX);
sprintf(alias + strlen(alias), "w*");
- if (id->flags & INPUT_DEVICE_ID_MATCH_SWBIT)
- do_input(alias, id->swbit, 0, INPUT_DEVICE_ID_SW_MAX);
+ if (flags & INPUT_DEVICE_ID_MATCH_SWBIT)
+ do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
return 1;
}
-ADD_TO_DEVTABLE("input", struct input_device_id, do_input_entry);
+ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
-static int do_eisa_entry(const char *filename, struct eisa_device_id *eisa,
+static int do_eisa_entry(const char *filename, void *symval,
char *alias)
{
- if (eisa->sig[0])
- sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", eisa->sig);
+ DEF_FIELD_ADDR(symval, eisa_device_id, sig);
+ if (sig[0])
+ sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", *sig);
else
strcat(alias, "*");
return 1;
}
-ADD_TO_DEVTABLE("eisa", struct eisa_device_id, do_eisa_entry);
+ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
/* Looks like: parisc:tNhvNrevNsvN */
-static int do_parisc_entry(const char *filename, struct parisc_device_id *id,
+static int do_parisc_entry(const char *filename, void *symval,
char *alias)
{
- id->hw_type = TO_NATIVE(id->hw_type);
- id->hversion = TO_NATIVE(id->hversion);
- id->hversion_rev = TO_NATIVE(id->hversion_rev);
- id->sversion = TO_NATIVE(id->sversion);
+ DEF_FIELD(symval, parisc_device_id, hw_type);
+ DEF_FIELD(symval, parisc_device_id, hversion);
+ DEF_FIELD(symval, parisc_device_id, hversion_rev);
+ DEF_FIELD(symval, parisc_device_id, sversion);
strcpy(alias, "parisc:");
- ADD(alias, "t", id->hw_type != PA_HWTYPE_ANY_ID, id->hw_type);
- ADD(alias, "hv", id->hversion != PA_HVERSION_ANY_ID, id->hversion);
- ADD(alias, "rev", id->hversion_rev != PA_HVERSION_REV_ANY_ID, id->hversion_rev);
- ADD(alias, "sv", id->sversion != PA_SVERSION_ANY_ID, id->sversion);
+ ADD(alias, "t", hw_type != PA_HWTYPE_ANY_ID, hw_type);
+ ADD(alias, "hv", hversion != PA_HVERSION_ANY_ID, hversion);
+ ADD(alias, "rev", hversion_rev != PA_HVERSION_REV_ANY_ID, hversion_rev);
+ ADD(alias, "sv", sversion != PA_SVERSION_ANY_ID, sversion);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("parisc", struct parisc_device_id, do_parisc_entry);
+ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
/* Looks like: sdio:cNvNdN. */
static int do_sdio_entry(const char *filename,
- struct sdio_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->class = TO_NATIVE(id->class);
- id->vendor = TO_NATIVE(id->vendor);
- id->device = TO_NATIVE(id->device);
+ DEF_FIELD(symval, sdio_device_id, class);
+ DEF_FIELD(symval, sdio_device_id, vendor);
+ DEF_FIELD(symval, sdio_device_id, device);
strcpy(alias, "sdio:");
- ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class);
- ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor);
- ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device);
+ ADD(alias, "c", class != (__u8)SDIO_ANY_ID, class);
+ ADD(alias, "v", vendor != (__u16)SDIO_ANY_ID, vendor);
+ ADD(alias, "d", device != (__u16)SDIO_ANY_ID, device);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("sdio", struct sdio_device_id, do_sdio_entry);
+ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
/* Looks like: ssb:vNidNrevN. */
static int do_ssb_entry(const char *filename,
- struct ssb_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->vendor = TO_NATIVE(id->vendor);
- id->coreid = TO_NATIVE(id->coreid);
- id->revision = TO_NATIVE(id->revision);
+ DEF_FIELD(symval, ssb_device_id, vendor);
+ DEF_FIELD(symval, ssb_device_id, coreid);
+ DEF_FIELD(symval, ssb_device_id, revision);
strcpy(alias, "ssb:");
- ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor);
- ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid);
- ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision);
+ ADD(alias, "v", vendor != SSB_ANY_VENDOR, vendor);
+ ADD(alias, "id", coreid != SSB_ANY_ID, coreid);
+ ADD(alias, "rev", revision != SSB_ANY_REV, revision);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ssb", struct ssb_device_id, do_ssb_entry);
+ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
/* Looks like: bcma:mNidNrevNclN. */
static int do_bcma_entry(const char *filename,
- struct bcma_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->manuf = TO_NATIVE(id->manuf);
- id->id = TO_NATIVE(id->id);
- id->rev = TO_NATIVE(id->rev);
- id->class = TO_NATIVE(id->class);
+ DEF_FIELD(symval, bcma_device_id, manuf);
+ DEF_FIELD(symval, bcma_device_id, id);
+ DEF_FIELD(symval, bcma_device_id, rev);
+ DEF_FIELD(symval, bcma_device_id, class);
strcpy(alias, "bcma:");
- ADD(alias, "m", id->manuf != BCMA_ANY_MANUF, id->manuf);
- ADD(alias, "id", id->id != BCMA_ANY_ID, id->id);
- ADD(alias, "rev", id->rev != BCMA_ANY_REV, id->rev);
- ADD(alias, "cl", id->class != BCMA_ANY_CLASS, id->class);
+ ADD(alias, "m", manuf != BCMA_ANY_MANUF, manuf);
+ ADD(alias, "id", id != BCMA_ANY_ID, id);
+ ADD(alias, "rev", rev != BCMA_ANY_REV, rev);
+ ADD(alias, "cl", class != BCMA_ANY_CLASS, class);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("bcma", struct bcma_device_id, do_bcma_entry);
+ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
/* Looks like: virtio:dNvN */
-static int do_virtio_entry(const char *filename, struct virtio_device_id *id,
+static int do_virtio_entry(const char *filename, void *symval,
char *alias)
{
- id->device = TO_NATIVE(id->device);
- id->vendor = TO_NATIVE(id->vendor);
+ DEF_FIELD(symval, virtio_device_id, device);
+ DEF_FIELD(symval, virtio_device_id, vendor);
strcpy(alias, "virtio:");
- ADD(alias, "d", id->device != VIRTIO_DEV_ANY_ID, id->device);
- ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor);
+ ADD(alias, "d", device != VIRTIO_DEV_ANY_ID, device);
+ ADD(alias, "v", vendor != VIRTIO_DEV_ANY_ID, vendor);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("virtio", struct virtio_device_id, do_virtio_entry);
+ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
/*
* Looks like: vmbus:guid
@@ -819,41 +866,44 @@ ADD_TO_DEVTABLE("virtio", struct virtio_device_id, do_virtio_entry);
* in the name.
*/
-static int do_vmbus_entry(const char *filename, struct hv_vmbus_device_id *id,
+static int do_vmbus_entry(const char *filename, void *symval,
char *alias)
{
int i;
- char guid_name[((sizeof(id->guid) + 1)) * 2];
+ DEF_FIELD_ADDR(symval, hv_vmbus_device_id, guid);
+ char guid_name[(sizeof(*guid) + 1) * 2];
- for (i = 0; i < (sizeof(id->guid) * 2); i += 2)
- sprintf(&guid_name[i], "%02x", id->guid[i/2]);
+ for (i = 0; i < (sizeof(*guid) * 2); i += 2)
+ sprintf(&guid_name[i], "%02x", TO_NATIVE((*guid)[i/2]));
strcpy(alias, "vmbus:");
strcat(alias, guid_name);
return 1;
}
-ADD_TO_DEVTABLE("vmbus", struct hv_vmbus_device_id, do_vmbus_entry);
+ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
/* Looks like: i2c:S */
-static int do_i2c_entry(const char *filename, struct i2c_device_id *id,
+static int do_i2c_entry(const char *filename, void *symval,
char *alias)
{
- sprintf(alias, I2C_MODULE_PREFIX "%s", id->name);
+ DEF_FIELD_ADDR(symval, i2c_device_id, name);
+ sprintf(alias, I2C_MODULE_PREFIX "%s", *name);
return 1;
}
-ADD_TO_DEVTABLE("i2c", struct i2c_device_id, do_i2c_entry);
+ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
/* Looks like: spi:S */
-static int do_spi_entry(const char *filename, struct spi_device_id *id,
+static int do_spi_entry(const char *filename, void *symval,
char *alias)
{
- sprintf(alias, SPI_MODULE_PREFIX "%s", id->name);
+ DEF_FIELD_ADDR(symval, spi_device_id, name);
+ sprintf(alias, SPI_MODULE_PREFIX "%s", *name);
return 1;
}
-ADD_TO_DEVTABLE("spi", struct spi_device_id, do_spi_entry);
+ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
static const struct dmifield {
const char *prefix;
@@ -885,21 +935,21 @@ static void dmi_ascii_filter(char *d, const char *s)
}
-static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
+static int do_dmi_entry(const char *filename, void *symval,
char *alias)
{
int i, j;
-
+ DEF_FIELD_ADDR(symval, dmi_system_id, matches);
sprintf(alias, "dmi*");
for (i = 0; i < ARRAY_SIZE(dmi_fields); i++) {
for (j = 0; j < 4; j++) {
- if (id->matches[j].slot &&
- id->matches[j].slot == dmi_fields[i].field) {
+ if ((*matches)[j].slot &&
+ (*matches)[j].slot == dmi_fields[i].field) {
sprintf(alias + strlen(alias), ":%s*",
dmi_fields[i].prefix);
dmi_ascii_filter(alias + strlen(alias),
- id->matches[j].substr);
+ (*matches)[j].substr);
strcat(alias, "*");
}
}
@@ -908,27 +958,30 @@ static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
strcat(alias, ":");
return 1;
}
-ADD_TO_DEVTABLE("dmi", struct dmi_system_id, do_dmi_entry);
+ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
static int do_platform_entry(const char *filename,
- struct platform_device_id *id, char *alias)
+ void *symval, char *alias)
{
- sprintf(alias, PLATFORM_MODULE_PREFIX "%s", id->name);
+ DEF_FIELD_ADDR(symval, platform_device_id, name);
+ sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
return 1;
}
-ADD_TO_DEVTABLE("platform", struct platform_device_id, do_platform_entry);
+ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
static int do_mdio_entry(const char *filename,
- struct mdio_device_id *id, char *alias)
+ void *symval, char *alias)
{
int i;
+ DEF_FIELD(symval, mdio_device_id, phy_id);
+ DEF_FIELD(symval, mdio_device_id, phy_id_mask);
alias += sprintf(alias, MDIO_MODULE_PREFIX);
for (i = 0; i < 32; i++) {
- if (!((id->phy_id_mask >> (31-i)) & 1))
+ if (!((phy_id_mask >> (31-i)) & 1))
*(alias++) = '?';
- else if ((id->phy_id >> (31-i)) & 1)
+ else if ((phy_id >> (31-i)) & 1)
*(alias++) = '1';
else
*(alias++) = '0';
@@ -939,47 +992,50 @@ static int do_mdio_entry(const char *filename,
return 1;
}
-ADD_TO_DEVTABLE("mdio", struct mdio_device_id, do_mdio_entry);
+ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
/* Looks like: zorro:iN. */
-static int do_zorro_entry(const char *filename, struct zorro_device_id *id,
+static int do_zorro_entry(const char *filename, void *symval,
char *alias)
{
- id->id = TO_NATIVE(id->id);
+ DEF_FIELD(symval, zorro_device_id, id);
strcpy(alias, "zorro:");
- ADD(alias, "i", id->id != ZORRO_WILDCARD, id->id);
+ ADD(alias, "i", id != ZORRO_WILDCARD, id);
return 1;
}
-ADD_TO_DEVTABLE("zorro", struct zorro_device_id, do_zorro_entry);
+ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
/* looks like: "pnp:dD" */
static int do_isapnp_entry(const char *filename,
- struct isapnp_device_id *id, char *alias)
+ void *symval, char *alias)
{
+ DEF_FIELD(symval, isapnp_device_id, vendor);
+ DEF_FIELD(symval, isapnp_device_id, function);
sprintf(alias, "pnp:d%c%c%c%x%x%x%x*",
- 'A' + ((id->vendor >> 2) & 0x3f) - 1,
- 'A' + (((id->vendor & 3) << 3) | ((id->vendor >> 13) & 7)) - 1,
- 'A' + ((id->vendor >> 8) & 0x1f) - 1,
- (id->function >> 4) & 0x0f, id->function & 0x0f,
- (id->function >> 12) & 0x0f, (id->function >> 8) & 0x0f);
+ 'A' + ((vendor >> 2) & 0x3f) - 1,
+ 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
+ 'A' + ((vendor >> 8) & 0x1f) - 1,
+ (function >> 4) & 0x0f, function & 0x0f,
+ (function >> 12) & 0x0f, (function >> 8) & 0x0f);
return 1;
}
-ADD_TO_DEVTABLE("isapnp", struct isapnp_device_id, do_isapnp_entry);
+ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
/* Looks like: "ipack:fNvNdN". */
static int do_ipack_entry(const char *filename,
- struct ipack_device_id *id, char *alias)
+ void *symval, char *alias)
{
- id->vendor = TO_NATIVE(id->vendor);
- id->device = TO_NATIVE(id->device);
+ DEF_FIELD(symval, ipack_device_id, format);
+ DEF_FIELD(symval, ipack_device_id, vendor);
+ DEF_FIELD(symval, ipack_device_id, device);
strcpy(alias, "ipack:");
- ADD(alias, "f", id->format != IPACK_ANY_FORMAT, id->format);
- ADD(alias, "v", id->vendor != IPACK_ANY_ID, id->vendor);
- ADD(alias, "d", id->device != IPACK_ANY_ID, id->device);
+ ADD(alias, "f", format != IPACK_ANY_FORMAT, format);
+ ADD(alias, "v", vendor != IPACK_ANY_ID, vendor);
+ ADD(alias, "d", device != IPACK_ANY_ID, device);
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ipack", struct ipack_device_id, do_ipack_entry);
+ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
/*
* Append a match expression for a single masked hex digit.
@@ -1030,25 +1086,27 @@ static void append_nibble_mask(char **outp,
* a ? or [] pattern matching exactly one digit.
*/
static int do_amba_entry(const char *filename,
- struct amba_id *id, char *alias)
+ void *symval, char *alias)
{
unsigned int digit;
char *p = alias;
+ DEF_FIELD(symval, amba_id, id);
+ DEF_FIELD(symval, amba_id, mask);
- if ((id->id & id->mask) != id->id)
+ if ((id & mask) != id)
fatal("%s: Masked-off bit(s) of AMBA device ID are non-zero: "
"id=0x%08X, mask=0x%08X. Please fix this driver.\n",
- filename, id->id, id->mask);
+ filename, id, mask);
p += sprintf(alias, "amba:d");
for (digit = 0; digit < 8; digit++)
append_nibble_mask(&p,
- (id->id >> (4 * (7 - digit))) & 0xf,
- (id->mask >> (4 * (7 - digit))) & 0xf);
+ (id >> (4 * (7 - digit))) & 0xf,
+ (mask >> (4 * (7 - digit))) & 0xf);
return 1;
}
-ADD_TO_DEVTABLE("amba", struct amba_id, do_amba_entry);
+ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
/* LOOKS like x86cpu:vendor:VVVV:family:FFFF:model:MMMM:feature:*,FEAT,*
* All fields are numbers. It would be nicer to use strings for vendor
@@ -1056,24 +1114,24 @@ ADD_TO_DEVTABLE("amba", struct amba_id, do_amba_entry);
* complicated.
*/
-static int do_x86cpu_entry(const char *filename, struct x86_cpu_id *id,
+static int do_x86cpu_entry(const char *filename, void *symval,
char *alias)
{
- id->feature = TO_NATIVE(id->feature);
- id->family = TO_NATIVE(id->family);
- id->model = TO_NATIVE(id->model);
- id->vendor = TO_NATIVE(id->vendor);
+ DEF_FIELD(symval, x86_cpu_id, feature);
+ DEF_FIELD(symval, x86_cpu_id, family);
+ DEF_FIELD(symval, x86_cpu_id, model);
+ DEF_FIELD(symval, x86_cpu_id, vendor);
strcpy(alias, "x86cpu:");
- ADD(alias, "vendor:", id->vendor != X86_VENDOR_ANY, id->vendor);
- ADD(alias, ":family:", id->family != X86_FAMILY_ANY, id->family);
- ADD(alias, ":model:", id->model != X86_MODEL_ANY, id->model);
+ ADD(alias, "vendor:", vendor != X86_VENDOR_ANY, vendor);
+ ADD(alias, ":family:", family != X86_FAMILY_ANY, family);
+ ADD(alias, ":model:", model != X86_MODEL_ANY, model);
strcat(alias, ":feature:*");
- if (id->feature != X86_FEATURE_ANY)
- sprintf(alias + strlen(alias), "%04X*", id->feature);
+ if (feature != X86_FEATURE_ANY)
+ sprintf(alias + strlen(alias), "%04X*", feature);
return 1;
}
-ADD_TO_DEVTABLE("x86cpu", struct x86_cpu_id, do_x86cpu_entry);
+ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
/* Does namelen bytes of name exactly match the symbol? */
static bool sym_is(const char *name, unsigned namelen, const char *symbol)
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index ff36c508a10e..78b30c1548e9 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -830,6 +830,8 @@ static const char *section_white_list[] =
".toc*",
".xt.prop", /* xtensa */
".xt.lit", /* xtensa */
+ ".arcextmap*", /* arc */
+ ".gnu.linkonce.arcext*", /* arc : modules */
NULL
};
@@ -2126,7 +2128,7 @@ int main(int argc, char **argv)
struct ext_sym_list *extsym_iter;
struct ext_sym_list *extsym_start = NULL;
- while ((opt = getopt(argc, argv, "i:I:e:cmsSo:awM:K:")) != -1) {
+ while ((opt = getopt(argc, argv, "i:I:e:msSo:awM:K:")) != -1) {
switch (opt) {
case 'i':
kernel_read = optarg;
@@ -2135,9 +2137,6 @@ int main(int argc, char **argv)
module_read = optarg;
external_module = 1;
break;
- case 'c':
- cross_build = 1;
- break;
case 'e':
external_module = 1;
extsym_iter =
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 4bf17ddf7c7f..fbbfd08853d3 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -95,7 +95,7 @@ echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
echo "%endif"
echo "%endif"
-echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install'
+echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr KBUILD_SRC= headers_install'
echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index bd6dca8a0ab2..84b88f109b80 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -108,7 +108,7 @@ scm_version()
fi
# Check for svn and a svn repo.
- if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then
+ if rev=`LANG= LC_ALL= LC_MESSAGES=C svn info 2>/dev/null | grep '^Last Changed Rev'`; then
rev=`echo $rev | awk '{print $NF}'`
printf -- '-svn%s' "$rev"
diff --git a/scripts/sign-file b/scripts/sign-file
index 974a20b661b7..2b7c4484d46c 100755
--- a/scripts/sign-file
+++ b/scripts/sign-file
@@ -2,51 +2,45 @@
#
# Sign a module file using the given key.
#
-# Format:
-#
-# ./scripts/sign-file [-v] <key> <x509> <module> [<dest>]
-#
-#
+
+my $USAGE =
+"Usage: scripts/sign-file [-v] <hash algo> <key> <x509> <module> [<dest>]\n" .
+" scripts/sign-file [-v] -s <raw sig> <hash algo> <x509> <module> [<dest>]\n";
+
use strict;
use FileHandle;
use IPC::Open2;
+use Getopt::Std;
-my $verbose = 0;
-if ($#ARGV >= 0 && $ARGV[0] eq "-v") {
- $verbose = 1;
- shift;
-}
+my %opts;
+getopts('vs:', \%opts) or die $USAGE;
+my $verbose = $opts{'v'};
+my $signature_file = $opts{'s'};
-die "Format: ./scripts/sign-file [-v] <key> <x509> <module> [<dest>]\n"
- if ($#ARGV != 2 && $#ARGV != 3);
+die $USAGE if ($#ARGV > 4);
+die $USAGE if (!$signature_file && $#ARGV < 3 || $signature_file && $#ARGV < 2);
-my $private_key = $ARGV[0];
-my $x509 = $ARGV[1];
-my $module = $ARGV[2];
-my $dest = ($#ARGV == 3) ? $ARGV[3] : $ARGV[2] . "~";
+my $dgst = shift @ARGV;
+my $private_key;
+if (!$signature_file) {
+ $private_key = shift @ARGV;
+}
+my $x509 = shift @ARGV;
+my $module = shift @ARGV;
+my ($dest, $keep_orig);
+if (@ARGV) {
+ $dest = $ARGV[0];
+ $keep_orig = 1;
+} else {
+ $dest = $module . "~";
+}
-die "Can't read private key\n" unless (-r $private_key);
+die "Can't read private key\n" if (!$signature_file && !-r $private_key);
+die "Can't read signature file\n" if ($signature_file && !-r $signature_file);
die "Can't read X.509 certificate\n" unless (-r $x509);
die "Can't read module\n" unless (-r $module);
#
-# Read the kernel configuration
-#
-my %config = (
- CONFIG_MODULE_SIG_SHA512 => 1
- );
-
-if (-r ".config") {
- open(FD, "<.config") || die ".config";
- while (<FD>) {
- if ($_ =~ /^(CONFIG_.*)=[ym]/) {
- $config{$1} = 1;
- }
- }
- close(FD);
-}
-
-#
# Function to read the contents of a file into a variable.
#
sub read_file($)
@@ -321,73 +315,71 @@ my $id_type = 1; # Identifier type: X.509
#
# Digest the data
#
-my ($dgst, $prologue) = ();
-if (exists $config{"CONFIG_MODULE_SIG_SHA1"}) {
+my $prologue;
+if ($dgst eq "sha1") {
$prologue = pack("C*",
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2B, 0x0E, 0x03, 0x02, 0x1A,
0x05, 0x00, 0x04, 0x14);
- $dgst = "-sha1";
$hash = 2;
-} elsif (exists $config{"CONFIG_MODULE_SIG_SHA224"}) {
+} elsif ($dgst eq "sha224") {
$prologue = pack("C*",
0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
0x05, 0x00, 0x04, 0x1C);
- $dgst = "-sha224";
$hash = 7;
-} elsif (exists $config{"CONFIG_MODULE_SIG_SHA256"}) {
+} elsif ($dgst eq "sha256") {
$prologue = pack("C*",
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
0x05, 0x00, 0x04, 0x20);
- $dgst = "-sha256";
$hash = 4;
-} elsif (exists $config{"CONFIG_MODULE_SIG_SHA384"}) {
+} elsif ($dgst eq "sha384") {
$prologue = pack("C*",
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
0x05, 0x00, 0x04, 0x30);
- $dgst = "-sha384";
$hash = 5;
-} elsif (exists $config{"CONFIG_MODULE_SIG_SHA512"}) {
+} elsif ($dgst eq "sha512") {
$prologue = pack("C*",
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
0x05, 0x00, 0x04, 0x40);
- $dgst = "-sha512";
$hash = 6;
} else {
- die "Can't determine hash algorithm";
+ die "Unknown hash algorithm: $dgst\n";
}
-#
-# Generate the digest and read from openssl's stdout
-#
-my $digest;
-$digest = readpipe("openssl dgst $dgst -binary $module") || die "openssl dgst";
-
-#
-# Generate the binary signature, which will be just the integer that comprises
-# the signature with no metadata attached.
-#
-my $pid;
-$pid = open2(*read_from, *write_to,
- "openssl rsautl -sign -inkey $private_key -keyform PEM") ||
- die "openssl rsautl";
-binmode write_to;
-print write_to $prologue . $digest || die "pipe to openssl rsautl";
-close(write_to) || die "pipe to openssl rsautl";
-
-binmode read_from;
my $signature;
-read(read_from, $signature, 4096) || die "pipe from openssl rsautl";
-close(read_from) || die "pipe from openssl rsautl";
+if ($signature_file) {
+ $signature = read_file($signature_file);
+} else {
+ #
+ # Generate the digest and read from openssl's stdout
+ #
+ my $digest;
+ $digest = readpipe("openssl dgst -$dgst -binary $module") || die "openssl dgst";
+
+ #
+ # Generate the binary signature, which will be just the integer that
+ # comprises the signature with no metadata attached.
+ #
+ my $pid;
+ $pid = open2(*read_from, *write_to,
+ "openssl rsautl -sign -inkey $private_key -keyform PEM") ||
+ die "openssl rsautl";
+ binmode write_to;
+ print write_to $prologue . $digest || die "pipe to openssl rsautl";
+ close(write_to) || die "pipe to openssl rsautl";
+
+ binmode read_from;
+ read(read_from, $signature, 4096) || die "pipe from openssl rsautl";
+ close(read_from) || die "pipe from openssl rsautl";
+ waitpid($pid, 0) || die;
+ die "openssl rsautl died: $?" if ($? >> 8);
+}
$signature = pack("n", length($signature)) . $signature,
-waitpid($pid, 0) || die;
-die "openssl rsautl died: $?" if ($? >> 8);
-
#
# Build the signed binary
#
@@ -424,6 +416,6 @@ print FD
;
close FD || die $dest;
-if ($#ARGV != 3) {
+if (!$keep_orig) {
rename($dest, $module) || die $module;
}
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 65f9595acea9..26a87e68afed 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -217,34 +217,34 @@ exuberant()
emacs()
{
all_target_sources | xargs $1 -a \
- --regex='/^(ENTRY|_GLOBAL)(\([^)]*\)).*/\2/' \
+ --regex='/^\(ENTRY\|_GLOBAL\)(\([^)]*\)).*/\2/' \
--regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' \
--regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/' \
--regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/' \
- --regex='/PAGEFLAG\(([^,)]*).*/Page\1/' \
- --regex='/PAGEFLAG\(([^,)]*).*/SetPage\1/' \
- --regex='/PAGEFLAG\(([^,)]*).*/ClearPage\1/' \
- --regex='/TESTSETFLAG\(([^,)]*).*/TestSetPage\1/' \
- --regex='/TESTPAGEFLAG\(([^,)]*).*/Page\1/' \
- --regex='/SETPAGEFLAG\(([^,)]*).*/SetPage\1/' \
- --regex='/__SETPAGEFLAG\(([^,)]*).*/__SetPage\1/' \
- --regex='/TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/' \
- --regex='/__TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/' \
- --regex='/CLEARPAGEFLAG\(([^,)]*).*/ClearPage\1/' \
- --regex='/__CLEARPAGEFLAG\(([^,)]*).*/__ClearPage\1/' \
- --regex='/__PAGEFLAG\(([^,)]*).*/__SetPage\1/' \
- --regex='/__PAGEFLAG\(([^,)]*).*/__ClearPage\1/' \
- --regex='/PAGEFLAG_FALSE\(([^,)]*).*/Page\1/' \
- --regex='/TESTSCFLAG\(([^,)]*).*/TestSetPage\1/' \
- --regex='/TESTSCFLAG\(([^,)]*).*/TestClearPage\1/' \
- --regex='/SETPAGEFLAG_NOOP\(([^,)]*).*/SetPage\1/' \
- --regex='/CLEARPAGEFLAG_NOOP\(([^,)]*).*/ClearPage\1/' \
- --regex='/__CLEARPAGEFLAG_NOOP\(([^,)]*).*/__ClearPage\1/' \
- --regex='/TESTCLEARFLAG_FALSE\(([^,)]*).*/TestClearPage\1/' \
- --regex='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/' \
- --regex='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/' \
- --regex='/PCI_OP_READ\(([a-z]*[a-z]).*[1-4]\)/pci_bus_read_config_\1/' \
- --regex='/PCI_OP_WRITE\(([a-z]*[a-z]).*[1-4]\)/pci_bus_write_config_\1/'
+ --regex='/PAGEFLAG(\([^,)]*\).*/Page\1/' \
+ --regex='/PAGEFLAG(\([^,)]*\).*/SetPage\1/' \
+ --regex='/PAGEFLAG(\([^,)]*\).*/ClearPage\1/' \
+ --regex='/TESTSETFLAG(\([^,)]*\).*/TestSetPage\1/' \
+ --regex='/TESTPAGEFLAG(\([^,)]*\).*/Page\1/' \
+ --regex='/SETPAGEFLAG(\([^,)]*\).*/SetPage\1/' \
+ --regex='/__SETPAGEFLAG(\([^,)]*\).*/__SetPage\1/' \
+ --regex='/TESTCLEARFLAG(\([^,)]*\).*/TestClearPage\1/' \
+ --regex='/__TESTCLEARFLAG(\([^,)]*\).*/TestClearPage\1/' \
+ --regex='/CLEARPAGEFLAG(\([^,)]*\).*/ClearPage\1/' \
+ --regex='/__CLEARPAGEFLAG(\([^,)]*\).*/__ClearPage\1/' \
+ --regex='/__PAGEFLAG(\([^,)]*\).*/__SetPage\1/' \
+ --regex='/__PAGEFLAG(\([^,)]*\).*/__ClearPage\1/' \
+ --regex='/PAGEFLAG_FALSE(\([^,)]*\).*/Page\1/' \
+ --regex='/TESTSCFLAG(\([^,)]*\).*/TestSetPage\1/' \
+ --regex='/TESTSCFLAG(\([^,)]*\).*/TestClearPage\1/' \
+ --regex='/SETPAGEFLAG_NOOP(\([^,)]*\).*/SetPage\1/' \
+ --regex='/CLEARPAGEFLAG_NOOP(\([^,)]*\).*/ClearPage\1/' \
+ --regex='/__CLEARPAGEFLAG_NOOP(\([^,)]*\).*/__ClearPage\1/' \
+ --regex='/TESTCLEARFLAG_FALSE(\([^,)]*\).*/TestClearPage\1/' \
+ --regex='/__TESTCLEARFLAG_FALSE(\([^,)]*\).*/__TestClearPage\1/' \
+ --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' \
+ --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \
+ --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'
all_kconfigs | xargs $1 -a \
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 60f0c76a27d3..859abdaac1ea 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -349,8 +349,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
unsigned int state;
struct file_perms perms = {};
struct path_cond cond = {
- bprm->file->f_path.dentry->d_inode->i_uid,
- bprm->file->f_path.dentry->d_inode->i_mode
+ file_inode(bprm->file)->i_uid,
+ file_inode(bprm->file)->i_mode
};
const char *name = NULL, *target = NULL, *info = NULL;
int error = cap_bprm_set_creds(bprm);
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index cd21ec5b90af..fdaa50cb1876 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -449,8 +449,8 @@ int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
u32 request)
{
struct path_cond cond = {
- .uid = file->f_path.dentry->d_inode->i_uid,
- .mode = file->f_path.dentry->d_inode->i_mode
+ .uid = file_inode(file)->i_uid,
+ .mode = file_inode(file)->i_mode
};
return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED,
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 8c2a7f6b35e2..b21830eced41 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -379,7 +379,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
struct aa_profile *profile;
int error = 0;
- if (!mediated_filesystem(file->f_path.dentry->d_inode))
+ if (!mediated_filesystem(file_inode(file)))
return 0;
/* If in exec, permission is handled by bprm hooks.
@@ -394,7 +394,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
profile = aa_cred_profile(cred);
if (!unconfined(profile)) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct path_cond cond = { inode->i_uid, inode->i_mode };
error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
@@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask)
BUG_ON(!fprofile);
if (!file->f_path.mnt ||
- !mediated_filesystem(file->f_path.dentry->d_inode))
+ !mediated_filesystem(file_inode(file)))
return 0;
profile = __aa_current_profile();
diff --git a/security/commoncap.c b/security/commoncap.c
index 7ee08c756d6b..c44b6fe6648e 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -440,7 +440,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_c
if (!file_caps_enabled)
return 0;
- if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)
+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
return 0;
dentry = dget(bprm->file->f_dentry);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index d9030b29d84d..1c03e8f1e0e1 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -140,12 +140,12 @@ int ima_must_measure(struct inode *inode, int mask, int function)
int ima_collect_measurement(struct integrity_iint_cache *iint,
struct file *file)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
const char *filename = file->f_dentry->d_name.name;
int result = 0;
if (!(iint->flags & IMA_COLLECTED)) {
- u64 i_version = file->f_dentry->d_inode->i_version;
+ u64 i_version = file_inode(file)->i_version;
iint->ima_xattr.type = IMA_XATTR_DIGEST;
result = ima_calc_file_hash(file, iint->ima_xattr.digest);
@@ -182,7 +182,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
const char *op = "add_template_measure";
const char *audit_cause = "ENOMEM";
int result = -ENOMEM;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ima_template_entry *entry;
int violation = 0;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index b691e0f3830c..a02e0791cf15 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -66,7 +66,7 @@ int ima_calc_file_hash(struct file *file, char *digest)
file->f_mode |= FMODE_READ;
read = 1;
}
- i_size = i_size_read(file->f_dentry->d_inode);
+ i_size = i_size_read(file_inode(file));
while (offset < i_size) {
int rbuf_len;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 5127afcc4b89..3b3b7e6bf8da 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -126,7 +126,7 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
*/
void ima_file_free(struct file *file)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint;
if (!iint_initialized || !S_ISREG(inode->i_mode))
@@ -142,7 +142,7 @@ void ima_file_free(struct file *file)
static int process_measurement(struct file *file, const char *filename,
int mask, int function)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint;
char *pathbuf = NULL;
const char *pathname = NULL;
@@ -284,7 +284,8 @@ int ima_module_check(struct file *file)
{
if (!file) {
#ifndef CONFIG_MODULE_SIG_FORCE
- if (ima_appraise & IMA_APPRAISE_MODULES)
+ if ((ima_appraise & IMA_APPRAISE_MODULES) &&
+ (ima_appraise & IMA_APPRAISE_ENFORCE))
return -EACCES; /* INTEGRITY_UNKNOWN */
#endif
return 0; /* We rely on module signature checking */
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index b27535a13a79..399433ad614e 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -176,7 +176,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
if ((rule->flags & IMA_FSUUID) &&
- memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+ memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
return false;
if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
return false;
@@ -530,14 +530,15 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
ima_log_string(ab, "fsuuid", args[0].from);
if (memchr_inv(entry->fsuuid, 0x00,
- sizeof(entry->fsuuid))) {
+ sizeof(entry->fsuuid))) {
result = -EINVAL;
break;
}
- part_pack_uuid(args[0].from, entry->fsuuid);
- entry->flags |= IMA_FSUUID;
- result = 0;
+ result = blk_part_pack_uuid(args[0].from,
+ entry->fsuuid);
+ if (!result)
+ entry->flags |= IMA_FSUUID;
break;
case Opt_uid:
ima_log_string(ab, "uid", args[0].from);
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 55a6271bce7a..ff63fe00c195 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -45,12 +45,11 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
{
struct ima_queue_entry *qe, *ret = NULL;
unsigned int key;
- struct hlist_node *pos;
int rc;
key = ima_hash_key(digest_value);
rcu_read_lock();
- hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) {
+ hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
if (rc == 0) {
ret = qe;
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 4d3fab47e643..dad36a6ab45f 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -188,11 +188,9 @@ int avc_get_hash_stats(char *page)
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
head = &avc_cache.slots[i];
if (!hlist_empty(head)) {
- struct hlist_node *next;
-
slots_used++;
chain_len = 0;
- hlist_for_each_entry_rcu(node, next, head, list)
+ hlist_for_each_entry_rcu(node, head, list)
chain_len++;
if (chain_len > max_chain_len)
max_chain_len = chain_len;
@@ -241,7 +239,6 @@ static inline int avc_reclaim_node(void)
int hvalue, try, ecx;
unsigned long flags;
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
@@ -253,7 +250,7 @@ static inline int avc_reclaim_node(void)
continue;
rcu_read_lock();
- hlist_for_each_entry(node, next, head, list) {
+ hlist_for_each_entry(node, head, list) {
avc_node_delete(node);
avc_cache_stats_incr(reclaims);
ecx++;
@@ -301,11 +298,10 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
struct avc_node *node, *ret = NULL;
int hvalue;
struct hlist_head *head;
- struct hlist_node *next;
hvalue = avc_hash(ssid, tsid, tclass);
head = &avc_cache.slots[hvalue];
- hlist_for_each_entry_rcu(node, next, head, list) {
+ hlist_for_each_entry_rcu(node, head, list) {
if (ssid == node->ae.ssid &&
tclass == node->ae.tclass &&
tsid == node->ae.tsid) {
@@ -394,7 +390,6 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
node = avc_alloc_node();
if (node) {
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
hvalue = avc_hash(ssid, tsid, tclass);
@@ -404,7 +399,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, next, head, list) {
+ hlist_for_each_entry(pos, head, list) {
if (pos->ae.ssid == ssid &&
pos->ae.tsid == tsid &&
pos->ae.tclass == tclass) {
@@ -541,7 +536,6 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
unsigned long flag;
struct avc_node *pos, *node, *orig = NULL;
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
node = avc_alloc_node();
@@ -558,7 +552,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, next, head, list) {
+ hlist_for_each_entry(pos, head, list) {
if (ssid == pos->ae.ssid &&
tsid == pos->ae.tsid &&
tclass == pos->ae.tclass &&
@@ -614,7 +608,6 @@ out:
static void avc_flush(void)
{
struct hlist_head *head;
- struct hlist_node *next;
struct avc_node *node;
spinlock_t *lock;
unsigned long flag;
@@ -630,7 +623,7 @@ static void avc_flush(void)
* prevent RCU grace periods from ending.
*/
rcu_read_lock();
- hlist_for_each_entry(node, next, head, list)
+ hlist_for_each_entry(node, head, list)
avc_node_delete(node);
rcu_read_unlock();
spin_unlock_irqrestore(lock, flag);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index ef26e9611ffb..84b591711eec 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1528,7 +1528,7 @@ static int file_has_perm(const struct cred *cred,
u32 av)
{
struct file_security_struct *fsec = file->f_security;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct common_audit_data ad;
u32 sid = cred_sid(cred);
int rc;
@@ -1957,7 +1957,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
struct task_security_struct *new_tsec;
struct inode_security_struct *isec;
struct common_audit_data ad;
- struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(bprm->file);
int rc;
rc = cap_bprm_set_creds(bprm);
@@ -2929,7 +2929,7 @@ static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
static int selinux_revalidate_file_permission(struct file *file, int mask)
{
const struct cred *cred = current_cred();
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
/* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
@@ -2941,7 +2941,7 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
static int selinux_file_permission(struct file *file, int mask)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct file_security_struct *fsec = file->f_security;
struct inode_security_struct *isec = inode->i_security;
u32 sid = current_sid();
@@ -3218,7 +3218,7 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
struct inode_security_struct *isec;
fsec = file->f_security;
- isec = file->f_path.dentry->d_inode->i_security;
+ isec = file_inode(file)->i_security;
/*
* Save inode label and policy sequence number
* at open-time so that selinux_file_permission
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 3a6e8731646c..ff427733c290 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -202,7 +202,7 @@ static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
{
char tmpbuf[TMPBUFLEN];
ssize_t length;
- ino_t ino = filp->f_path.dentry->d_inode->i_ino;
+ ino_t ino = file_inode(filp)->i_ino;
int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
security_get_reject_unknown() : !security_get_allow_unknown();
@@ -671,7 +671,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
{
- ino_t ino = file->f_path.dentry->d_inode->i_ino;
+ ino_t ino = file_inode(file)->i_ino;
char *data;
ssize_t rv;
@@ -1042,8 +1042,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
ssize_t length;
ssize_t ret;
int cur_enforcing;
- struct inode *inode = filep->f_path.dentry->d_inode;
- unsigned index = inode->i_ino & SEL_INO_MASK;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
@@ -1077,8 +1076,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
char *page = NULL;
ssize_t length;
int new_value;
- struct inode *inode = filep->f_path.dentry->d_inode;
- unsigned index = inode->i_ino & SEL_INO_MASK;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
@@ -1486,13 +1484,11 @@ static int sel_make_avc_files(struct dentry *dir)
static ssize_t sel_read_initcon(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode *inode;
char *con;
u32 sid, len;
ssize_t ret;
- inode = file->f_path.dentry->d_inode;
- sid = inode->i_ino&SEL_INO_MASK;
+ sid = file_inode(file)->i_ino&SEL_INO_MASK;
ret = security_sid_to_context(sid, &con, &len);
if (ret)
return ret;
@@ -1553,7 +1549,7 @@ static inline u32 sel_ino_to_perm(unsigned long ino)
static ssize_t sel_read_class(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ino = file->f_path.dentry->d_inode->i_ino;
+ unsigned long ino = file_inode(file)->i_ino;
char res[TMPBUFLEN];
ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
return simple_read_from_buffer(buf, count, ppos, res, len);
@@ -1567,7 +1563,7 @@ static const struct file_operations sel_class_ops = {
static ssize_t sel_read_perm(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long ino = file->f_path.dentry->d_inode->i_ino;
+ unsigned long ino = file_inode(file)->i_ino;
char res[TMPBUFLEN];
ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
return simple_read_from_buffer(buf, count, ppos, res, len);
@@ -1584,7 +1580,7 @@ static ssize_t sel_read_policycap(struct file *file, char __user *buf,
int value;
char tmpbuf[TMPBUFLEN];
ssize_t length;
- unsigned long i_ino = file->f_path.dentry->d_inode->i_ino;
+ unsigned long i_ino = file_inode(file)->i_ino;
value = security_policycap_supported(i_ino & SEL_INO_MASK);
length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 38be92ce901e..fa64740abb59 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -456,7 +456,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
*/
static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
- struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(bprm->file);
struct task_smack *bsp = bprm->cred->security;
struct inode_smack *isp;
int rc;
@@ -1187,21 +1187,15 @@ static int smack_mmap_file(struct file *file,
char *msmack;
char *osmack;
struct inode_smack *isp;
- struct dentry *dp;
int may;
int mmay;
int tmay;
int rc;
- if (file == NULL || file->f_dentry == NULL)
- return 0;
-
- dp = file->f_dentry;
-
- if (dp->d_inode == NULL)
+ if (file == NULL)
return 0;
- isp = dp->d_inode->i_security;
+ isp = file_inode(file)->i_security;
if (isp->smk_mmap == NULL)
return 0;
msmack = isp->smk_mmap;
@@ -1359,7 +1353,7 @@ static int smack_file_receive(struct file *file)
*/
static int smack_file_open(struct file *file, const struct cred *cred)
{
- struct inode_smack *isp = file->f_path.dentry->d_inode->i_security;
+ struct inode_smack *isp = file_inode(file)->i_security;
file->f_security = isp->smk_inode;
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 8592f2fc6ebb..fcf32783b66b 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -135,7 +135,7 @@ static const struct file_operations tomoyo_self_operations = {
*/
static int tomoyo_open(struct inode *inode, struct file *file)
{
- const int key = ((u8 *) file->f_path.dentry->d_inode->i_private)
+ const int key = ((u8 *) file_inode(file)->i_private)
- ((u8 *) NULL);
return tomoyo_open_control(key, file);
}
diff --git a/sound/core/info.c b/sound/core/info.c
index 6b368d25073b..5bb97e7d325a 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -496,7 +496,7 @@ static long snd_info_entry_ioctl(struct file *file, unsigned int cmd,
static int snd_info_entry_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct snd_info_private_data *data;
struct snd_info_entry *entry;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 09b4286c65f9..71ae86ca64ac 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1586,7 +1586,7 @@ static struct file *snd_pcm_file_fd(int fd, int *fput_needed)
file = fget_light(fd, fput_needed);
if (!file)
return NULL;
- inode = file->f_path.dentry->d_inode;
+ inode = file_inode(file);
if (!S_ISCHR(inode->i_mode) ||
imajor(inode) != snd_major) {
fput_light(file, *fput_needed);
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
index 536c4c0514d3..11ff7c55240c 100644
--- a/sound/oss/msnd_pinnacle.c
+++ b/sound/oss/msnd_pinnacle.c
@@ -642,7 +642,7 @@ static int mixer_ioctl(unsigned int cmd, unsigned long arg)
static long dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- int minor = iminor(file->f_path.dentry->d_inode);
+ int minor = iminor(file_inode(file));
int ret;
if (cmd == OSS_GETVERSION) {
@@ -1012,7 +1012,7 @@ static int dsp_write(const char __user *buf, size_t len)
static ssize_t dev_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
- int minor = iminor(file->f_path.dentry->d_inode);
+ int minor = iminor(file_inode(file));
if (minor == dev.dsp_minor)
return dsp_read(buf, count);
else
@@ -1021,7 +1021,7 @@ static ssize_t dev_read(struct file *file, char __user *buf, size_t count, loff_
static ssize_t dev_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
{
- int minor = iminor(file->f_path.dentry->d_inode);
+ int minor = iminor(file_inode(file));
if (minor == dev.dsp_minor)
return dsp_write(buf, count);
else
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
index 7c7793a0eb25..e7780349cc55 100644
--- a/sound/oss/soundcard.c
+++ b/sound/oss/soundcard.c
@@ -143,7 +143,7 @@ static int get_mixer_levels(void __user * arg)
static ssize_t sound_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- int dev = iminor(file->f_path.dentry->d_inode);
+ int dev = iminor(file_inode(file));
int ret = -EINVAL;
/*
@@ -176,7 +176,7 @@ static ssize_t sound_read(struct file *file, char __user *buf, size_t count, lof
static ssize_t sound_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- int dev = iminor(file->f_path.dentry->d_inode);
+ int dev = iminor(file_inode(file));
int ret = -EINVAL;
mutex_lock(&soundcard_mutex);
@@ -333,7 +333,7 @@ static int sound_mixer_ioctl(int mixdev, unsigned int cmd, void __user *arg)
static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int len = 0, dtype;
- int dev = iminor(file->f_dentry->d_inode);
+ int dev = iminor(file_inode(file));
long ret = -EINVAL;
void __user *p = (void __user *)arg;
@@ -406,7 +406,7 @@ static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static unsigned int sound_poll(struct file *file, poll_table * wait)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int dev = iminor(inode);
DEB(printk("sound_poll(dev=%d)\n", dev));
@@ -431,7 +431,7 @@ static int sound_mmap(struct file *file, struct vm_area_struct *vma)
int dev_class;
unsigned long size;
struct dma_buffparms *dmap = NULL;
- int dev = iminor(file->f_path.dentry->d_inode);
+ int dev = iminor(file_inode(file));
dev_class = dev & 0x0f;
dev >>= 4;
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index cdd100dae855..9febe5509748 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -836,6 +836,8 @@ static struct {
{0x7063, 0x2000}, /* pcHDTV HD-2000 TV */
};
+static struct pci_driver driver;
+
/* return the id of the card, or a negative value if it's blacklisted */
static int snd_bt87x_detect_card(struct pci_dev *pci)
{
@@ -962,11 +964,24 @@ static DEFINE_PCI_DEVICE_TABLE(snd_bt87x_default_ids) = {
{ }
};
-static struct pci_driver bt87x_driver = {
+static struct pci_driver driver = {
.name = KBUILD_MODNAME,
.id_table = snd_bt87x_ids,
.probe = snd_bt87x_probe,
.remove = snd_bt87x_remove,
};
-module_pci_driver(bt87x_driver);
+static int __init alsa_card_bt87x_init(void)
+{
+ if (load_all)
+ driver.id_table = snd_bt87x_default_ids;
+ return pci_register_driver(&driver);
+}
+
+static void __exit alsa_card_bt87x_exit(void)
+{
+ pci_unregister_driver(&driver);
+}
+
+module_init(alsa_card_bt87x_init)
+module_exit(alsa_card_bt87x_exit)
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index a7c296a36a17..e6b016693240 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -862,6 +862,12 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
filename, emu->firmware->size);
}
+ err = snd_emu1010_load_firmware(emu);
+ if (err != 0) {
+ snd_printk(KERN_INFO "emu1010: Loading Firmware failed\n");
+ return err;
+ }
+
/* ID, should read & 0x7f = 0x55 when FPGA programmed. */
snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg);
if ((reg & 0x3f) != 0x15) {
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 748a286277eb..5ae1d045bdcb 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1127,7 +1127,7 @@ static int snd_emu10k1_playback_open(struct snd_pcm_substream *substream)
struct snd_emu10k1_pcm *epcm;
struct snd_emu10k1_pcm_mixer *mix;
struct snd_pcm_runtime *runtime = substream->runtime;
- int i, err;
+ int i, err, sample_rate;
epcm = kzalloc(sizeof(*epcm), GFP_KERNEL);
if (epcm == NULL)
@@ -1146,7 +1146,11 @@ static int snd_emu10k1_playback_open(struct snd_pcm_substream *substream)
kfree(epcm);
return err;
}
- err = snd_pcm_hw_rule_noresample(runtime, 48000);
+ if (emu->card_capabilities->emu_model && emu->emu1010.internal_clock == 0)
+ sample_rate = 44100;
+ else
+ sample_rate = 48000;
+ err = snd_pcm_hw_rule_noresample(runtime, sample_rate);
if (err < 0) {
kfree(epcm);
return err;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 21425fb51fe0..78e1827d0a95 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1640,6 +1640,9 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
if (pcmdev > 0)
sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
+ if (!is_jack_detectable(codec, per_pin->pin_nid))
+ strncat(hdmi_str, " Phantom",
+ sizeof(hdmi_str) - strlen(hdmi_str) - 1);
return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str, 0);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 61478fd82565..2d4237bc0d8e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -928,6 +928,7 @@ static int alc_codec_rename_from_preset(struct hda_codec *codec)
static const struct snd_pci_quirk beep_white_list[] = {
SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x8376, "EeePC", 1),
SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
diff --git a/sound/pci/ice1712/revo.c b/sound/pci/ice1712/revo.c
index 7641080a9b5d..1112ec1953be 100644
--- a/sound/pci/ice1712/revo.c
+++ b/sound/pci/ice1712/revo.c
@@ -35,6 +35,7 @@
struct revo51_spec {
struct snd_i2c_device *dev;
struct snd_pt2258 *pt2258;
+ struct ak4114 *ak4114;
};
static void revo_i2s_mclk_changed(struct snd_ice1712 *ice)
@@ -359,9 +360,9 @@ static struct snd_ak4xxx_private akm_ap192_priv = {
.cif = 0,
.data_mask = VT1724_REVO_CDOUT,
.clk_mask = VT1724_REVO_CCLK,
- .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1,
- .cs_addr = VT1724_REVO_CS1,
- .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1,
+ .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS3,
+ .cs_addr = VT1724_REVO_CS3,
+ .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS3,
.add_flags = VT1724_REVO_CCLK, /* high at init */
.mask_flags = 0,
};
@@ -372,7 +373,7 @@ static struct snd_ak4xxx_private akm_ap192_priv = {
* CCLK (pin 34) -- GPIO1 pin 51 (shared with AK4358)
* CSN (pin 35) -- GPIO7 pin 59
*/
-#define AK4114_ADDR 0x02
+#define AK4114_ADDR 0x00
static void write_data(struct snd_ice1712 *ice, unsigned int gpio,
unsigned int data, int idx)
@@ -426,7 +427,7 @@ static unsigned int ap192_4wire_start(struct snd_ice1712 *ice)
tmp = snd_ice1712_gpio_read(ice);
tmp |= VT1724_REVO_CCLK; /* high at init */
tmp |= VT1724_REVO_CS0;
- tmp &= ~VT1724_REVO_CS1;
+ tmp &= ~VT1724_REVO_CS3;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
return tmp;
@@ -434,7 +435,7 @@ static unsigned int ap192_4wire_start(struct snd_ice1712 *ice)
static void ap192_4wire_finish(struct snd_ice1712 *ice, unsigned int tmp)
{
- tmp |= VT1724_REVO_CS1;
+ tmp |= VT1724_REVO_CS3;
tmp |= VT1724_REVO_CS0;
snd_ice1712_gpio_write(ice, tmp);
udelay(1);
@@ -470,27 +471,32 @@ static unsigned char ap192_ak4114_read(void *private_data, unsigned char addr)
static int ap192_ak4114_init(struct snd_ice1712 *ice)
{
static const unsigned char ak4114_init_vals[] = {
- AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1,
+ AK4114_RST | AK4114_PWN | AK4114_OCKS0,
AK4114_DIF_I24I2S,
AK4114_TX1E,
- AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(1),
+ AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(0),
0,
0
};
static const unsigned char ak4114_init_txcsb[] = {
0x41, 0x02, 0x2c, 0x00, 0x00
};
- struct ak4114 *ak;
int err;
+ struct revo51_spec *spec;
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ ice->spec = spec;
+
err = snd_ak4114_create(ice->card,
ap192_ak4114_read,
ap192_ak4114_write,
ak4114_init_vals, ak4114_init_txcsb,
- ice, &ak);
+ ice, &spec->ak4114);
/* AK4114 in Revo cannot detect external rate correctly.
* No reason to stop capture stream due to incorrect checks */
- ak->check_flags = AK4114_CHECK_NO_RATE;
+ spec->ak4114->check_flags = AK4114_CHECK_NO_RATE;
return 0; /* error ignored; it's no fatal error */
}
@@ -562,6 +568,9 @@ static int revo_init(struct snd_ice1712 *ice)
ice);
if (err < 0)
return err;
+ err = ap192_ak4114_init(ice);
+ if (err < 0)
+ return err;
/* unmute all codecs */
snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE,
@@ -575,7 +584,7 @@ static int revo_init(struct snd_ice1712 *ice)
static int revo_add_controls(struct snd_ice1712 *ice)
{
- struct revo51_spec *spec;
+ struct revo51_spec *spec = ice->spec;
int err;
switch (ice->eeprom.subvendor) {
@@ -597,7 +606,9 @@ static int revo_add_controls(struct snd_ice1712 *ice)
err = snd_ice1712_akm4xxx_build_controls(ice);
if (err < 0)
return err;
- err = ap192_ak4114_init(ice);
+ /* only capture SPDIF over AK4114 */
+ err = snd_ak4114_build(spec->ak4114, NULL,
+ ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream);
if (err < 0)
return err;
break;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 8df1b3feaf2b..b7e84a7cd9ee 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -251,7 +251,7 @@ static ssize_t codec_reg_write_file(struct file *file,
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
- add_taint(TAINT_USER);
+ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
snd_soc_write(codec, reg, value);
return buf_size;
diff --git a/sound/sound_firmware.c b/sound/sound_firmware.c
index 37711a5d0d6b..e14903468051 100644
--- a/sound/sound_firmware.c
+++ b/sound/sound_firmware.c
@@ -19,7 +19,7 @@ static int do_mod_firmware_load(const char *fn, char **fp)
printk(KERN_INFO "Unable to load '%s'.\n", fn);
return 0;
}
- l = i_size_read(filp->f_path.dentry->d_inode);
+ l = i_size_read(file_inode(filp));
if (l <= 0 || l > 131072)
{
printk(KERN_INFO "Invalid firmware '%s'\n", fn);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index c2206c87fc9f..d5818c98d051 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -94,6 +94,12 @@
#define CPUINFO_PROC "cpu model"
#endif
+#ifdef __arc__
+#define rmb() asm volatile("" ::: "memory")
+#define cpu_relax() rmb()
+#define CPUINFO_PROC "Processor"
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index bc4ad7977438..c8be0fbc5145 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -314,7 +314,6 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
{
struct hlist_head *head;
- struct hlist_node *pos;
struct perf_sample_id *sid;
int hash;
@@ -324,7 +323,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->heads[hash];
- hlist_for_each_entry(sid, pos, head, node)
+ hlist_for_each_entry(sid, head, node)
if (sid->id == id)
return sid->evsel;
diff --git a/tools/testing/ktest/examples/include/patchcheck.conf b/tools/testing/ktest/examples/include/patchcheck.conf
index 339d3e1700ff..0eb0a5ac77da 100644
--- a/tools/testing/ktest/examples/include/patchcheck.conf
+++ b/tools/testing/ktest/examples/include/patchcheck.conf
@@ -14,6 +14,16 @@
PATCH_START := HEAD~3
PATCH_END := HEAD
+# Use the oldconfig if build_type wasn't defined
+DEFAULTS IF NOT DEFINED BUILD_TYPE
+DO_BUILD_TYPE := oldconfig
+
+DEFAULTS ELSE
+DO_BUILD_TYPE := ${BUILD_TYPE}
+
+DEFAULTS
+
+
# Change PATCH_CHECKOUT to be the branch you want to test. The test will
# do a git checkout of this branch before starting. Obviously both
# PATCH_START and PATCH_END must be in this branch (and PATCH_START must
@@ -43,6 +53,31 @@ PATCH_TEST_TYPE := boot
# (space delimited)
#IGNORE_WARNINGS = 39eaf7ef884dcc44f7ff1bac803ca2a1dcf43544 6edb2a8a385f0cdef51dae37ff23e74d76d8a6ce
+# Instead of just checking for warnings to files that are changed
+# it can be advantageous to check for any new warnings. If a
+# header file is changed, it could cause a warning in a file not
+# touched by the commit. To detect these kinds of warnings, you
+# can use the WARNINGS_FILE option.
+#
+# If the variable CREATE_WARNINGS_FILE is set, this config will
+# enable the WARNINGS_FILE during the patchcheck test. Also,
+# before running the patchcheck test, it will create the
+# warnings file.
+#
+DEFAULTS IF DEFINED CREATE_WARNINGS_FILE
+WARNINGS_FILE = ${OUTPUT_DIR}/warnings_file
+
+TEST_START IF DEFINED CREATE_WARNINGS_FILE
+# WARNINGS_FILE is already set by the DEFAULTS above
+TEST_TYPE = make_warnings_file
+# Checkout the commit before the patches to test,
+# and record all the warnings that exist before the patches
+# to test are added
+CHECKOUT = ${PATCHCHECK_START}~1
+# Force a full build
+BUILD_NOCLEAN = 0
+BUILD_TYPE = ${DO_BUILD_TYPE}
+
# If you are running a multi test, and the test failed on the first
# test but on, say the 5th patch. If you want to restart on the
# fifth patch, set PATCH_START1. This will make the first test start
@@ -61,6 +96,7 @@ PATCHCHECK_TYPE = ${PATCH_TEST_TYPE}
PATCHCHECK_START = ${PATCH_START1}
PATCHCHECK_END = ${PATCH_END}
CHECKOUT = ${PATCH_CHECKOUT}
+BUILD_TYPE = ${DO_BUILD_TYPE}
TEST_START IF ${TEST} == patchcheck && ${MULTI}
TEST_TYPE = patchcheck
@@ -72,3 +108,4 @@ PATCHCHECK_END = ${PATCH_END}
CHECKOUT = ${PATCH_CHECKOUT}
# Use multi to test different compilers?
MAKE_CMD = CC=gcc-4.5.1 make
+BUILD_TYPE = ${DO_BUILD_TYPE}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 35fc584a4ffe..4e67d52eb3a2 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -126,6 +126,7 @@ my $start_minconfig_defined;
my $output_minconfig;
my $minconfig_type;
my $use_output_minconfig;
+my $warnings_file;
my $ignore_config;
my $ignore_errors;
my $addconfig;
@@ -193,6 +194,9 @@ my $patchcheck_end;
# which would require more options.
my $buildonly = 1;
+# tell build not to worry about warnings, even when WARNINGS_FILE is set
+my $warnings_ok = 0;
+
# set when creating a new config
my $newconfig = 0;
@@ -235,6 +239,7 @@ my %option_map = (
"START_MIN_CONFIG" => \$start_minconfig,
"MIN_CONFIG_TYPE" => \$minconfig_type,
"USE_OUTPUT_MIN_CONFIG" => \$use_output_minconfig,
+ "WARNINGS_FILE" => \$warnings_file,
"IGNORE_CONFIG" => \$ignore_config,
"TEST" => \$run_test,
"ADD_CONFIG" => \$addconfig,
@@ -619,6 +624,18 @@ sub set_value {
# Note if a test is something other than build, then we
# will need other manditory options.
if ($prvalue ne "install") {
+ # for bisect, we need to check BISECT_TYPE
+ if ($prvalue ne "bisect") {
+ $buildonly = 0;
+ }
+ } else {
+ # install still limits some manditory options.
+ $buildonly = 2;
+ }
+ }
+
+ if ($buildonly && $lvalue =~ /^BISECT_TYPE(\[.*\])?$/ && $prvalue ne "build") {
+ if ($prvalue ne "install") {
$buildonly = 0;
} else {
# install still limits some manditory options.
@@ -1062,7 +1079,7 @@ sub read_config {
}
sub __eval_option {
- my ($option, $i) = @_;
+ my ($name, $option, $i) = @_;
# Add space to evaluate the character before $
$option = " $option";
@@ -1094,7 +1111,11 @@ sub __eval_option {
my $o = "$var\[$i\]";
my $parento = "$var\[$parent\]";
- if (defined($opt{$o})) {
+ # If a variable contains itself, use the default var
+ if (($var eq $name) && defined($opt{$var})) {
+ $o = $opt{$var};
+ $retval = "$retval$o";
+ } elsif (defined($opt{$o})) {
$o = $opt{$o};
$retval = "$retval$o";
} elsif ($repeated && defined($opt{$parento})) {
@@ -1118,7 +1139,7 @@ sub __eval_option {
}
sub eval_option {
- my ($option, $i) = @_;
+ my ($name, $option, $i) = @_;
my $prev = "";
@@ -1134,7 +1155,7 @@ sub eval_option {
"Check for recursive variables\n";
}
$prev = $option;
- $option = __eval_option($option, $i);
+ $option = __eval_option($name, $option, $i);
}
return $option;
@@ -1191,11 +1212,24 @@ sub reboot {
}
if (defined($time)) {
- if (wait_for_monitor($time, $reboot_success_line)) {
+
+ # We only want to get to the new kernel, don't fail
+ # if we stumble over a call trace.
+ my $save_ignore_errors = $ignore_errors;
+ $ignore_errors = 1;
+
+ # Look for the good kernel to boot
+ if (wait_for_monitor($time, "Linux version")) {
# reboot got stuck?
doprint "Reboot did not finish. Forcing power cycle\n";
run_command "$power_cycle";
}
+
+ $ignore_errors = $save_ignore_errors;
+
+ # Still need to wait for the reboot to finish
+ wait_for_monitor($time, $reboot_success_line);
+
end_monitor;
}
}
@@ -1279,6 +1313,7 @@ sub start_monitor {
}
sub end_monitor {
+ return if (!defined $console);
if (--$monitor_cnt) {
return;
}
@@ -1585,7 +1620,7 @@ sub wait_for_input
$rin = '';
vec($rin, fileno($fp), 1) = 1;
- $ready = select($rin, undef, undef, $time);
+ ($ready, $time) = select($rin, undef, undef, $time);
$line = "";
@@ -1891,23 +1926,102 @@ sub get_version {
sub start_monitor_and_boot {
# Make sure the stable kernel has finished booting
- start_monitor;
- wait_for_monitor 5;
- end_monitor;
+
+ # Install bisects, don't need console
+ if (defined $console) {
+ start_monitor;
+ wait_for_monitor 5;
+ end_monitor;
+ }
get_grub_index;
get_version;
install;
- start_monitor;
+ start_monitor if (defined $console);
return monitor;
}
+my $check_build_re = ".*:.*(warning|error|Error):.*";
+my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})";
+
+sub process_warning_line {
+ my ($line) = @_;
+
+ chomp $line;
+
+ # for distcc heterogeneous systems, some compilers
+ # do things differently causing warning lines
+ # to be slightly different. This makes an attempt
+ # to fixe those issues.
+
+ # chop off the index into the line
+ # using distcc, some compilers give different indexes
+ # depending on white space
+ $line =~ s/^(\s*\S+:\d+:)\d+/$1/;
+
+ # Some compilers use UTF-8 extended for quotes and some don't.
+ $line =~ s/$utf8_quote/'/g;
+
+ return $line;
+}
+
+# Read buildlog and check against warnings file for any
+# new warnings.
+#
+# Returns 1 if OK
+# 0 otherwise
sub check_buildlog {
+ return 1 if (!defined $warnings_file);
+
+ my %warnings_list;
+
+ # Failed builds should not reboot the target
+ my $save_no_reboot = $no_reboot;
+ $no_reboot = 1;
+
+ if (-f $warnings_file) {
+ open(IN, $warnings_file) or
+ dodie "Error opening $warnings_file";
+
+ while (<IN>) {
+ if (/$check_build_re/) {
+ my $warning = process_warning_line $_;
+
+ $warnings_list{$warning} = 1;
+ }
+ }
+ close(IN);
+ }
+
+ # If warnings file didn't exist, and WARNINGS_FILE exist,
+ # then we fail on any warning!
+
+ open(IN, $buildlog) or dodie "Can't open $buildlog";
+ while (<IN>) {
+ if (/$check_build_re/) {
+ my $warning = process_warning_line $_;
+
+ if (!defined $warnings_list{$warning}) {
+ fail "New warning found (not in $warnings_file)\n$_\n";
+ $no_reboot = $save_no_reboot;
+ return 0;
+ }
+ }
+ }
+ $no_reboot = $save_no_reboot;
+ close(IN);
+}
+
+sub check_patch_buildlog {
my ($patch) = @_;
my @files = `git show $patch | diffstat -l`;
+ foreach my $file (@files) {
+ chomp $file;
+ }
+
open(IN, "git show $patch |") or
dodie "failed to show $patch";
while (<IN>) {
@@ -3055,11 +3169,13 @@ sub patchcheck {
build "oldconfig" or return 0;
}
-
- if (!defined($ignored_warnings{$sha1})) {
- check_buildlog $sha1 or return 0;
+ # No need to do per patch checking if warnings file exists
+ if (!defined($warnings_file) && !defined($ignored_warnings{$sha1})) {
+ check_patch_buildlog $sha1 or return 0;
}
+ check_buildlog or return 0;
+
next if ($type eq "build");
my $failed = 0;
@@ -3617,6 +3733,39 @@ sub make_min_config {
return 1;
}
+sub make_warnings_file {
+ my ($i) = @_;
+
+ if (!defined($warnings_file)) {
+ dodie "Must define WARNINGS_FILE for make_warnings_file test";
+ }
+
+ if ($build_type eq "nobuild") {
+ dodie "BUILD_TYPE can not be 'nobuild' for make_warnings_file test";
+ }
+
+ build $build_type or dodie "Failed to build";
+
+ open(OUT, ">$warnings_file") or dodie "Can't create $warnings_file";
+
+ open(IN, $buildlog) or dodie "Can't open $buildlog";
+ while (<IN>) {
+
+ # Some compilers use UTF-8 extended for quotes
+ # for distcc heterogeneous systems, this causes issues
+ s/$utf8_quote/'/g;
+
+ if (/$check_build_re/) {
+ print OUT;
+ }
+ }
+ close(IN);
+
+ close(OUT);
+
+ success $i;
+}
+
$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl config-file\n";
if ($#ARGV == 0) {
@@ -3662,7 +3811,7 @@ EOF
read_config $ktest_config;
if (defined($opt{"LOG_FILE"})) {
- $opt{"LOG_FILE"} = eval_option($opt{"LOG_FILE"}, -1);
+ $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1);
}
# Append any configs entered in manually to the config file.
@@ -3739,7 +3888,7 @@ sub set_test_option {
my $option = __set_test_option($name, $i);
return $option if (!defined($option));
- return eval_option($option, $i);
+ return eval_option($name, $option, $i);
}
# First we need to do is the builds
@@ -3818,9 +3967,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$run_type = $bisect_type;
} elsif ($test_type eq "config_bisect") {
$run_type = $config_bisect_type;
- }
-
- if ($test_type eq "make_min_config") {
+ } elsif ($test_type eq "make_min_config") {
+ $run_type = "";
+ } elsif ($test_type eq "make_warnings_file") {
$run_type = "";
}
@@ -3877,10 +4026,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
} elsif ($test_type eq "make_min_config") {
make_min_config $i;
next;
+ } elsif ($test_type eq "make_warnings_file") {
+ $no_reboot = 1;
+ make_warnings_file $i;
+ next;
}
if ($build_type ne "nobuild") {
build $build_type or next;
+ check_buildlog or next;
}
if ($test_type eq "install") {
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 4012e9330344..0a290fb4cd5e 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -793,6 +793,20 @@
# Example for a virtual guest call "Guest".
#POWER_OFF = virsh destroy Guest
+# To have the build fail on "new" warnings, create a file that
+# contains a list of all known warnings (they must match exactly
+# to the line with 'warning:', 'error:' or 'Error:'. If the option
+# WARNINGS_FILE is set, then that file will be read, and if the
+# build detects a warning, it will examine this file and if the
+# warning does not exist in it, it will fail the build.
+#
+# Note, if this option is defined to a file that does not exist
+# then any warning will fail the build.
+# (see make_warnings_file below)
+#
+# (optional, default undefined)
+#WARNINGS_FILE = ${OUTPUT_DIR}/warnings_file
+
# The way to execute a command on the target
# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
@@ -1222,3 +1236,33 @@
# MIN_CONFIG_TYPE = test
# TEST = ssh ${USER}@${MACHINE} echo hi
#
+#
+#
+#
+# For TEST_TYPE = make_warnings_file
+#
+# If you want the build to fail when a new warning is discovered
+# you set the WARNINGS_FILE to point to a file of known warnings.
+#
+# The test "make_warnings_file" will let you create a new warnings
+# file before you run other tests, like patchcheck.
+#
+# What this test does is to run just a build, you still need to
+# specify BUILD_TYPE to tell the test what type of config to use.
+# A BUILD_TYPE of nobuild will fail this test.
+#
+# The test will do the build and scan for all warnings. Any warning
+# it discovers will be saved in the WARNINGS_FILE (required) option.
+#
+# It is recommended (but not necessary) to make sure BUILD_NOCLEAN is
+# off, so that a full build is done (make mrproper is performed).
+# That way, all warnings will be captured.
+#
+# Example:
+#
+# TEST_TYPE = make_warnings_file
+# WARNINGS_FILE = ${OUTPUT_DIR}
+# BUILD_TYPE = useconfig:oldconfig
+# CHECKOUT = v3.8
+# BUILD_NOCLEAN = 0
+#
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 85baf11e2acd..3cc0ad7ae863 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,4 +1,10 @@
-TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug
+TARGETS = breakpoints
+TARGETS += kcmp
+TARGETS += mqueue
+TARGETS += vm
+TARGETS += cpu-hotplug
+TARGETS += memory-hotplug
+TARGETS += efivarfs
all:
for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/README.txt b/tools/testing/selftests/README.txt
new file mode 100644
index 000000000000..5e2faf9c55d3
--- /dev/null
+++ b/tools/testing/selftests/README.txt
@@ -0,0 +1,42 @@
+Linux Kernel Selftests
+
+The kernel contains a set of "self tests" under the tools/testing/selftests/
+directory. These are intended to be small unit tests to exercise individual
+code paths in the kernel.
+
+Running the selftests
+=====================
+
+To build the tests:
+
+ $ make -C tools/testing/selftests
+
+
+To run the tests:
+
+ $ make -C tools/testing/selftests run_tests
+
+- note that some tests will require root privileges.
+
+
+To run only tests targetted for a single subsystem:
+
+ $ make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests
+
+See the top-level tools/testing/selftests/Makefile for the list of all possible
+targets.
+
+
+Contributing new tests
+======================
+
+In general, the rules for for selftests are
+
+ * Do as much as you can if you're not root;
+
+ * Don't take too long;
+
+ * Don't break the build on any architecture, and
+
+ * Don't cause the top-level "make run_tests" to fail if your feature is
+ unconfigured.
diff --git a/tools/testing/selftests/efivarfs/Makefile b/tools/testing/selftests/efivarfs/Makefile
new file mode 100644
index 000000000000..29e8c6bc81b0
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/Makefile
@@ -0,0 +1,12 @@
+CC = $(CROSS_COMPILE)gcc
+CFLAGS = -Wall
+
+test_objs = open-unlink create-read
+
+all: $(test_objs)
+
+run_tests: all
+ @/bin/bash ./efivarfs.sh || echo "efivarfs selftests: [FAIL]"
+
+clean:
+ rm -f $(test_objs)
diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
new file mode 100644
index 000000000000..7feef1880968
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/create-read.c
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+ const char *path;
+ char buf[4];
+ int fd, rc;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <path>\n", argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ path = argv[1];
+
+ /* create a test variable */
+ fd = open(path, O_RDWR | O_CREAT, 0600);
+ if (fd < 0) {
+ perror("open(O_WRONLY)");
+ return EXIT_FAILURE;
+ }
+
+ rc = read(fd, buf, sizeof(buf));
+ if (rc != 0) {
+ fprintf(stderr, "Reading a new var should return EOF\n");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
new file mode 100644
index 000000000000..880cdd5dc63f
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -0,0 +1,139 @@
+#!/bin/bash
+
+efivarfs_mount=/sys/firmware/efi/efivars
+test_guid=210be57c-9849-4fc7-a635-e6382d1aec27
+
+check_prereqs()
+{
+ local msg="skip all tests:"
+
+ if [ $UID != 0 ]; then
+ echo $msg must be run as root >&2
+ exit 0
+ fi
+
+ if ! grep -q "^\S\+ $efivarfs_mount efivarfs" /proc/mounts; then
+ echo $msg efivarfs is not mounted on $efivarfs_mount >&2
+ exit 0
+ fi
+}
+
+run_test()
+{
+ local test="$1"
+
+ echo "--------------------"
+ echo "running $test"
+ echo "--------------------"
+
+ if [ "$(type -t $test)" = 'function' ]; then
+ ( $test )
+ else
+ ( ./$test )
+ fi
+
+ if [ $? -ne 0 ]; then
+ echo " [FAIL]"
+ rc=1
+ else
+ echo " [PASS]"
+ fi
+}
+
+test_create()
+{
+ local attrs='\x07\x00\x00\x00'
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+ printf "$attrs\x00" > $file
+
+ if [ ! -e $file ]; then
+ echo "$file couldn't be created" >&2
+ exit 1
+ fi
+
+ if [ $(stat -c %s $file) -ne 5 ]; then
+ echo "$file has invalid size" >&2
+ exit 1
+ fi
+}
+
+test_create_empty()
+{
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+ : > $file
+
+ if [ ! -e $file ]; then
+ echo "$file can not be created without writing" >&2
+ exit 1
+ fi
+}
+
+test_create_read()
+{
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+ ./create-read $file
+}
+
+test_delete()
+{
+ local attrs='\x07\x00\x00\x00'
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+ printf "$attrs\x00" > $file
+
+ if [ ! -e $file ]; then
+ echo "$file couldn't be created" >&2
+ exit 1
+ fi
+
+ rm $file
+
+ if [ -e $file ]; then
+ echo "$file couldn't be deleted" >&2
+ exit 1
+ fi
+
+}
+
+# test that we can remove a variable by issuing a write with only
+# attributes specified
+test_zero_size_delete()
+{
+ local attrs='\x07\x00\x00\x00'
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+ printf "$attrs\x00" > $file
+
+ if [ ! -e $file ]; then
+ echo "$file does not exist" >&2
+ exit 1
+ fi
+
+ printf "$attrs" > $file
+
+ if [ -e $file ]; then
+ echo "$file should have been deleted" >&2
+ exit 1
+ fi
+}
+
+test_open_unlink()
+{
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+ ./open-unlink $file
+}
+
+check_prereqs
+
+rc=0
+
+run_test test_create
+run_test test_create_empty
+run_test test_create_read
+run_test test_delete
+run_test test_zero_size_delete
+run_test test_open_unlink
+
+exit $rc
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
new file mode 100644
index 000000000000..8c0764407b3c
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/open-unlink.c
@@ -0,0 +1,63 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+int main(int argc, char **argv)
+{
+ const char *path;
+ char buf[5];
+ int fd, rc;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <path>\n", argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ path = argv[1];
+
+ /* attributes: EFI_VARIABLE_NON_VOLATILE |
+ * EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ * EFI_VARIABLE_RUNTIME_ACCESS
+ */
+ *(uint32_t *)buf = 0x7;
+ buf[4] = 0;
+
+ /* create a test variable */
+ fd = open(path, O_WRONLY | O_CREAT);
+ if (fd < 0) {
+ perror("open(O_WRONLY)");
+ return EXIT_FAILURE;
+ }
+
+ rc = write(fd, buf, sizeof(buf));
+ if (rc != sizeof(buf)) {
+ perror("write");
+ return EXIT_FAILURE;
+ }
+
+ close(fd);
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (unlink(path) < 0) {
+ perror("unlink");
+ return EXIT_FAILURE;
+ }
+
+ rc = read(fd, buf, sizeof(buf));
+ if (rc > 0) {
+ fprintf(stderr, "reading from an unlinked variable "
+ "shouldn't be possible\n");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b6eea5cc7b34..adb17f266b28 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -268,14 +268,13 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
struct kvm_irq_routing_table *irq_rt)
{
struct kvm_kernel_irq_routing_entry *e;
- struct hlist_node *n;
if (irqfd->gsi >= irq_rt->nr_rt_entries) {
rcu_assign_pointer(irqfd->irq_entry, NULL);
return;
}
- hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
+ hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
/* Only fast-path MSI. */
if (e->type == KVM_IRQ_ROUTING_MSI)
rcu_assign_pointer(irqfd->irq_entry, e);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index cfb7e4d52dc2..ce82b9401958 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -35,6 +35,7 @@
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -115,6 +116,42 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic)
smp_wmb();
}
+void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+ u64 *eoi_exit_bitmap)
+{
+ struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+ union kvm_ioapic_redirect_entry *e;
+ struct kvm_lapic_irq irqe;
+ int index;
+
+ spin_lock(&ioapic->lock);
+ /* traverse ioapic entry to set eoi exit bitmap*/
+ for (index = 0; index < IOAPIC_NUM_PINS; index++) {
+ e = &ioapic->redirtbl[index];
+ if (!e->fields.mask &&
+ (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+ kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
+ index))) {
+ irqe.dest_id = e->fields.dest_id;
+ irqe.vector = e->fields.vector;
+ irqe.dest_mode = e->fields.dest_mode;
+ irqe.delivery_mode = e->fields.delivery_mode << 8;
+ kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap);
+ }
+ }
+ spin_unlock(&ioapic->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap);
+
+void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ if (!kvm_apic_vid_enabled(kvm) || !ioapic)
+ return;
+ kvm_make_update_eoibitmap_request(kvm);
+}
+
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
@@ -156,6 +193,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
&& ioapic->irr & (1 << index))
ioapic_service(ioapic, index);
+ kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
break;
}
}
@@ -179,15 +217,6 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.level = 1;
irqe.shorthand = 0;
-#ifdef CONFIG_X86
- /* Always delivery PIT interrupt to vcpu 0 */
- if (irq == 0) {
- irqe.dest_mode = 0; /* Physical mode. */
- /* need to read apic_id from apic regiest since
- * it can be rewritten */
- irqe.dest_id = ioapic->kvm->bsp_vcpu_id;
- }
-#endif
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
}
@@ -464,6 +493,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
update_handled_vectors(ioapic);
+ kvm_ioapic_make_eoibitmap_request(kvm);
spin_unlock(&ioapic->lock);
return 0;
}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index a30abfe6ed16..0400a466c50c 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -82,5 +82,9 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
+void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm);
+void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+ u64 *eoi_exit_bitmap);
+
#endif
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 4a340cb23013..72a130bc448a 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -76,7 +76,9 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
- flags = IOMMU_READ | IOMMU_WRITE;
+ flags = IOMMU_READ;
+ if (!(slot->flags & KVM_MEM_READONLY))
+ flags |= IOMMU_WRITE;
if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
flags |= IOMMU_CACHE;
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 656fa455e154..e9073cf4d040 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <trace/events/kvm.h>
#include <asm/msidef.h>
@@ -172,7 +173,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
int ret = -1, i = 0;
struct kvm_irq_routing_table *irq_rt;
- struct hlist_node *n;
trace_kvm_set_irq(irq, level, irq_source_id);
@@ -183,7 +183,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
rcu_read_lock();
irq_rt = rcu_dereference(kvm->irq_routing);
if (irq < irq_rt->nr_rt_entries)
- hlist_for_each_entry(e, n, &irq_rt->map[irq], link)
+ hlist_for_each_entry(e, &irq_rt->map[irq], link)
irq_set[i++] = *e;
rcu_read_unlock();
@@ -211,7 +211,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
struct kvm_kernel_irq_routing_entry *e;
int ret = -EINVAL;
struct kvm_irq_routing_table *irq_rt;
- struct hlist_node *n;
trace_kvm_set_irq(irq, level, irq_source_id);
@@ -226,7 +225,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
rcu_read_lock();
irq_rt = rcu_dereference(kvm->irq_routing);
if (irq < irq_rt->nr_rt_entries)
- hlist_for_each_entry(e, n, &irq_rt->map[irq], link) {
+ hlist_for_each_entry(e, &irq_rt->map[irq], link) {
if (likely(e->type == KVM_IRQ_ROUTING_MSI))
ret = kvm_set_msi_inatomic(e, kvm);
else
@@ -237,10 +236,30 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
return ret;
}
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+ struct kvm_irq_ack_notifier *kian;
+ int gsi;
+
+ rcu_read_lock();
+ gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
+ if (gsi != -1)
+ hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+ link)
+ if (kian->gsi == gsi) {
+ rcu_read_unlock();
+ return true;
+ }
+
+ rcu_read_unlock();
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
struct kvm_irq_ack_notifier *kian;
- struct hlist_node *n;
int gsi;
trace_kvm_ack_irq(irqchip, pin);
@@ -248,7 +267,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
rcu_read_lock();
gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
if (gsi != -1)
- hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
+ hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
link)
if (kian->gsi == gsi)
kian->irq_acked(kian);
@@ -261,6 +280,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
mutex_unlock(&kvm->irq_lock);
+ kvm_ioapic_make_eoibitmap_request(kvm);
}
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -270,6 +290,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_rcu();
+ kvm_ioapic_make_eoibitmap_request(kvm);
}
int kvm_request_irq_source_id(struct kvm *kvm)
@@ -344,13 +365,12 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
bool mask)
{
struct kvm_irq_mask_notifier *kimn;
- struct hlist_node *n;
int gsi;
rcu_read_lock();
gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
if (gsi != -1)
- hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link)
+ hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
if (kimn->irq == gsi)
kimn->func(kimn, mask);
rcu_read_unlock();
@@ -371,13 +391,12 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
int delta;
unsigned max_pin;
struct kvm_kernel_irq_routing_entry *ei;
- struct hlist_node *n;
/*
* Do not allow GSI to be mapped to the same irqchip more than once.
* Allow only one to one mapping between GSI and MSI.
*/
- hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
+ hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
if (ei->type == KVM_IRQ_ROUTING_MSI ||
ue->type == KVM_IRQ_ROUTING_MSI ||
ue->u.irqchip.irqchip == ei->irqchip.irqchip)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1cd693a76a51..adc68feb5c5a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}
+void kvm_make_update_eoibitmap_request(struct kvm *kvm)
+{
+ make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
+}
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
struct page *page;
@@ -474,6 +479,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
+ BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+
r = -ENOMEM;
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots)
@@ -670,7 +677,8 @@ static void sort_memslots(struct kvm_memslots *slots)
slots->id_to_index[slots->memslots[i].id] = i;
}
-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+ u64 last_generation)
{
if (new) {
int id = new->id;
@@ -682,7 +690,7 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
sort_memslots(slots);
}
- slots->generation++;
+ slots->generation = last_generation + 1;
}
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
@@ -699,6 +707,35 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
return 0;
}
+static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
+ struct kvm_memslots *slots, struct kvm_memory_slot *new)
+{
+ struct kvm_memslots *old_memslots = kvm->memslots;
+
+ update_memslots(slots, new, kvm->memslots->generation);
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+ return old_memslots;
+}
+
+/*
+ * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
+ * - create a new memory slot
+ * - delete an existing memory slot
+ * - modify an existing memory slot
+ * -- move it in the guest physical memory space
+ * -- just change its flags
+ *
+ * Since flags can be changed by some of these operations, the following
+ * differentiation is the best we can do for __kvm_set_memory_region():
+ */
+enum kvm_mr_change {
+ KVM_MR_CREATE,
+ KVM_MR_DELETE,
+ KVM_MR_MOVE,
+ KVM_MR_FLAGS_ONLY,
+};
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -709,14 +746,15 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
- struct kvm_memory_slot *memslot, *slot;
+ struct kvm_memory_slot *slot;
struct kvm_memory_slot old, new;
- struct kvm_memslots *slots, *old_memslots;
+ struct kvm_memslots *slots = NULL, *old_memslots;
+ enum kvm_mr_change change;
r = check_memory_region_flags(mem);
if (r)
@@ -740,7 +778,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
- memslot = id_to_memslot(kvm->memslots, mem->slot);
+ slot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
@@ -751,26 +789,48 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
- new = old = *memslot;
+ new = old = *slot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
- /* Disallow changing a memory slot's size. */
r = -EINVAL;
- if (npages && old.npages && npages != old.npages)
- goto out_free;
+ if (npages) {
+ if (!old.npages)
+ change = KVM_MR_CREATE;
+ else { /* Modify an existing slot. */
+ if ((mem->userspace_addr != old.userspace_addr) ||
+ (npages != old.npages) ||
+ ((new.flags ^ old.flags) & KVM_MEM_READONLY))
+ goto out;
- /* Check for overlaps */
- r = -EEXIST;
- kvm_for_each_memslot(slot, kvm->memslots) {
- if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot)
- continue;
- if (!((base_gfn + npages <= slot->base_gfn) ||
- (base_gfn >= slot->base_gfn + slot->npages)))
- goto out_free;
+ if (base_gfn != old.base_gfn)
+ change = KVM_MR_MOVE;
+ else if (new.flags != old.flags)
+ change = KVM_MR_FLAGS_ONLY;
+ else { /* Nothing to change. */
+ r = 0;
+ goto out;
+ }
+ }
+ } else if (old.npages) {
+ change = KVM_MR_DELETE;
+ } else /* Modify a non-existent slot: disallowed. */
+ goto out;
+
+ if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+ /* Check for overlaps */
+ r = -EEXIST;
+ kvm_for_each_memslot(slot, kvm->memslots) {
+ if ((slot->id >= KVM_USER_MEM_SLOTS) ||
+ (slot->id == mem->slot))
+ continue;
+ if (!((base_gfn + npages <= slot->base_gfn) ||
+ (base_gfn >= slot->base_gfn + slot->npages)))
+ goto out;
+ }
}
/* Free page dirty bitmap if unneeded */
@@ -778,10 +838,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.dirty_bitmap = NULL;
r = -ENOMEM;
-
- /* Allocate if a slot is being created */
- if (npages && !old.npages) {
- new.user_alloc = user_alloc;
+ if (change == KVM_MR_CREATE) {
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages))
@@ -792,12 +849,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
- /* destroy any largepage mappings for dirty tracking */
}
- if (!npages || base_gfn != old.base_gfn) {
- struct kvm_memory_slot *slot;
-
+ if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
@@ -806,11 +860,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
- update_memslots(slots, NULL);
+ old_memslots = install_new_memslots(kvm, slots, NULL);
- old_memslots = kvm->memslots;
- rcu_assign_pointer(kvm->memslots, slots);
- synchronize_srcu_expedited(&kvm->srcu);
+ /* slot was deleted or moved, clear iommu mapping */
+ kvm_iommu_unmap_pages(kvm, &old);
/* From this point no new shadow pages pointing to a deleted,
* or moved, memslot will be created.
*
@@ -819,37 +872,48 @@ int __kvm_set_memory_region(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
- kfree(old_memslots);
+ slots = old_memslots;
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
- goto out_free;
+ goto out_slots;
- /* map/unmap the pages in iommu page table */
- if (npages) {
- r = kvm_iommu_map_pages(kvm, &new);
- if (r)
+ r = -ENOMEM;
+ /*
+ * We can re-use the old_memslots from above, the only difference
+ * from the currently installed memslots is the invalid flag. This
+ * will get overwritten by update_memslots anyway.
+ */
+ if (!slots) {
+ slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+ GFP_KERNEL);
+ if (!slots)
goto out_free;
- } else
- kvm_iommu_unmap_pages(kvm, &old);
+ }
- r = -ENOMEM;
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
+ /*
+ * IOMMU mapping: New slots need to be mapped. Old slots need to be
+ * un-mapped and re-mapped if their base changes. Since base change
+ * unmapping is handled above with slot deletion, mapping alone is
+ * needed here. Anything else the iommu might care about for existing
+ * slots (size changes, userspace addr changes and read-only flag
+ * changes) is disallowed above, so any other attribute changes getting
+ * here can be skipped.
+ */
+ if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+ r = kvm_iommu_map_pages(kvm, &new);
+ if (r)
+ goto out_slots;
+ }
/* actual memory is freed via old in kvm_free_physmem_slot below */
- if (!npages) {
+ if (change == KVM_MR_DELETE) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
- update_memslots(slots, &new);
- old_memslots = kvm->memslots;
- rcu_assign_pointer(kvm->memslots, slots);
- synchronize_srcu_expedited(&kvm->srcu);
+ old_memslots = install_new_memslots(kvm, slots, &new);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
@@ -858,17 +922,18 @@ int __kvm_set_memory_region(struct kvm *kvm,
return 0;
+out_slots:
+ kfree(slots);
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
-
}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
int r;
@@ -882,9 +947,9 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
- if (mem->slot >= KVM_MEMORY_SLOTS)
+ if (mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
return kvm_set_memory_region(kvm, mem, user_alloc);
}
@@ -898,7 +963,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
unsigned long any = 0;
r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
+ if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
@@ -944,7 +1009,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
- if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
+ if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
memslot->flags & KVM_MEMSLOT_INVALID)
return 0;
@@ -1641,6 +1706,7 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
{
struct pid *pid;
struct task_struct *task = NULL;
+ bool ret = false;
rcu_read_lock();
pid = rcu_dereference(target->pid);
@@ -1648,17 +1714,15 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
task = get_pid_task(target->pid, PIDTYPE_PID);
rcu_read_unlock();
if (!task)
- return false;
+ return ret;
if (task->flags & PF_VCPU) {
put_task_struct(task);
- return false;
- }
- if (yield_to(task, 1)) {
- put_task_struct(task);
- return true;
+ return ret;
}
+ ret = yield_to(task, 1);
put_task_struct(task);
- return false;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
@@ -1699,12 +1763,14 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
return eligible;
}
#endif
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
int yielded = 0;
+ int try = 3;
int pass;
int i;
@@ -1716,7 +1782,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
* VCPU is holding the lock that we need and will release it.
* We approximate round-robin by starting at the last boosted VCPU.
*/
- for (pass = 0; pass < 2 && !yielded; pass++) {
+ for (pass = 0; pass < 2 && !yielded && try; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!pass && i <= last_boosted_vcpu) {
i = last_boosted_vcpu;
@@ -1729,10 +1795,15 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
- if (kvm_vcpu_yield_to(vcpu)) {
+
+ yielded = kvm_vcpu_yield_to(vcpu);
+ if (yielded > 0) {
kvm->last_boosted_vcpu = i;
- yielded = 1;
break;
+ } else if (yielded < 0) {
+ try--;
+ if (!try)
+ break;
}
}
}
@@ -2127,7 +2198,7 @@ static long kvm_vm_ioctl(struct file *filp,
sizeof kvm_userspace_mem))
goto out;
- r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
+ r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
break;
}
case KVM_GET_DIRTY_LOG: {